diff --git a/config/boards/bananapif3.eos b/config/boards/bananapif3.csc similarity index 91% rename from config/boards/bananapif3.eos rename to config/boards/bananapif3.csc index e2d43b69b60c..71daaf234e69 100644 --- a/config/boards/bananapif3.eos +++ b/config/boards/bananapif3.csc @@ -1,13 +1,12 @@ # SpacemiT K1 octa core RISC-V SoC 2GB/4GB RAM 8GB/16GB eMMC 4x USB3 2x GbE -BOARD_NAME="Banana Pi F3" +BOARD_NAME="BananaPi BPI-F3" BOARDFAMILY="spacemit" BOARD_MAINTAINER="" -KERNEL_TARGET="legacy,current" -KERNEL_TEST_TARGET="legacy" +KERNEL_TARGET="current" BOOT_FDT_FILE="spacemit/k1-bananapi-f3.dtb" BOOTDELAY=1 SRC_EXTLINUX="yes" -SRC_CMDLINE="earlycon=sbi console=tty1 console=ttyS0,115200 clk_ignore_unused swiotlb=65536" +SRC_CMDLINE="earlycon=sbi console=tty1 console=ttyS0,115200 clk_ignore_unused" PACKAGE_LIST_BOARD="rfkill bluetooth bluez bluez-tools" function post_config_uboot_target__extra_configs_for_bananapi_f3() { diff --git a/config/kernel/linux-spacemit-6.1.config b/config/kernel/linux-spacemit-current.config similarity index 81% rename from config/kernel/linux-spacemit-6.1.config rename to config/kernel/linux-spacemit-current.config index 0f290ea84d3f..209a711df76e 100644 --- a/config/kernel/linux-spacemit-6.1.config +++ b/config/kernel/linux-spacemit-current.config @@ -1,10 +1,10 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/riscv 6.1.96 Kernel Configuration +# Linux/riscv 6.6.68 Kernel Configuration # -CONFIG_CC_VERSION_TEXT="riscv64-linux-gnu-gcc (Ubuntu 13.2.0-23ubuntu4) 13.2.0" +CONFIG_CC_VERSION_TEXT="riscv64-linux-gnu-gcc (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=130200 +CONFIG_GCC_VERSION=130300 CONFIG_CLANG_VERSION=0 CONFIG_AS_IS_GNU=y CONFIG_AS_VERSION=24200 @@ -15,7 +15,6 @@ CONFIG_CC_CAN_LINK=y CONFIG_CC_CAN_LINK_STATIC=y CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y -CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND=y CONFIG_CC_HAS_ASM_INLINE=y CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y CONFIG_PAHOLE_VERSION=125 @@ -53,17 +52,18 @@ CONFIG_GENERIC_IRQ_SHOW_LEVEL=y CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y CONFIG_GENERIC_IRQ_MIGRATION=y CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_CHIP=y CONFIG_IRQ_DOMAIN=y CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_IRQ_IPI=y +CONFIG_GENERIC_IRQ_IPI_MUX=y CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y CONFIG_IRQ_FORCED_THREADING=y CONFIG_SPARSE_IRQ=y # CONFIG_GENERIC_IRQ_DEBUGFS is not set # end of IRQ subsystem CONFIG_GENERIC_IRQ_MULTI_HANDLER=y -CONFIG_ARCH_CLOCKSOURCE_INIT=y CONFIG_GENERIC_TIME_VSYSCALL=y CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_ARCH_HAS_TICK_BROADCAST=y @@ -97,10 +97,13 @@ CONFIG_BPF_UNPRIV_DEFAULT_OFF=y CONFIG_USERMODE_DRIVER=y # end of BPF subsystem -CONFIG_PREEMPT_NONE_BUILD=y -CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_BUILD=y +# CONFIG_PREEMPT_NONE is not set # CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y # # CPU/Task time and stats accounting @@ -119,10 +122,11 @@ CONFIG_CPU_ISOLATION=y # RCU Subsystem # CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y # CONFIG_RCU_EXPERT is not set -CONFIG_SRCU=y CONFIG_TREE_SRCU=y CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y CONFIG_TASKS_TRACE_RCU=y CONFIG_RCU_STALL_COMMON=y CONFIG_RCU_NEED_SEGCBLIST=y @@ -133,7 +137,6 @@ CONFIG_IKCONFIG_PROC=y # CONFIG_IKHEADERS is not set CONFIG_LOG_BUF_SHIFT=17 CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 # CONFIG_PRINTK_INDEX is not set CONFIG_GENERIC_SCHED_CLOCK=y @@ -159,6 +162,7 @@ CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y @@ -169,7 +173,7 @@ CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y CONFIG_CGROUP_MISC=y -# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_DEBUG=y CONFIG_SOCK_CGROUP_DATA=y CONFIG_NAMESPACES=y CONFIG_UTS_NS=y @@ -180,7 +184,6 @@ CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_CHECKPOINT_RESTORE=y # CONFIG_SCHED_AUTOGROUP is not set -# CONFIG_SYSFS_DEPRECATED is not set # CONFIG_RELAY is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" @@ -191,13 +194,17 @@ CONFIG_RD_XZ=y CONFIG_RD_LZO=y CONFIG_RD_LZ4=y CONFIG_RD_ZSTD=y -CONFIG_BOOT_CONFIG=y -# CONFIG_BOOT_CONFIG_EMBED is not set +# CONFIG_BOOT_CONFIG is not set CONFIG_INITRAMFS_PRESERVE_MTIME=y CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_HAVE_LD_DEAD_CODE_DATA_ELIMINATION=y +# CONFIG_LD_DEAD_CODE_DATA_ELIMINATION is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" CONFIG_SYSCTL=y CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW=y CONFIG_EXPERT=y CONFIG_MULTIUSER=y # CONFIG_SGETMASK_SYSCALL is not set @@ -205,7 +212,7 @@ CONFIG_SYSFS_SYSCALL=y CONFIG_FHANDLE=y CONFIG_POSIX_TIMERS=y CONFIG_PRINTK=y -# CONFIG_BUG is not set +CONFIG_BUG=y CONFIG_ELF_CORE=y CONFIG_BASE_FULL=y CONFIG_FUTEX=y @@ -220,12 +227,14 @@ CONFIG_IO_URING=y CONFIG_ADVISE_SYSCALLS=y CONFIG_MEMBARRIER=y CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS=y CONFIG_KCMP=y CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y # CONFIG_DEBUG_RSEQ is not set -CONFIG_EMBEDDED=y CONFIG_HAVE_PERF_EVENTS=y # CONFIG_PC104 is not set @@ -239,10 +248,19 @@ CONFIG_PERF_EVENTS=y CONFIG_SYSTEM_DATA_VERIFICATION=y # CONFIG_PROFILING is not set CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +# CONFIG_KEXEC is not set +# CONFIG_KEXEC_FILE is not set +# CONFIG_CRASH_DUMP is not set +# end of Kexec and crash features # end of General setup CONFIG_64BIT=y CONFIG_RISCV=y +CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE=y CONFIG_ARCH_MMAP_RND_BITS_MIN=18 CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 CONFIG_ARCH_MMAP_RND_BITS_MAX=24 @@ -250,15 +268,19 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=17 CONFIG_RISCV_SBI=y CONFIG_MMU=y CONFIG_PAGE_OFFSET=0xff60000000000000 +CONFIG_ARCH_FORCE_MAX_ORDER=11 CONFIG_ARCH_FLATMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SELECT_MEMORY_MODEL=y CONFIG_ARCH_SUPPORTS_UPROBES=y CONFIG_STACKTRACE_SUPPORT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_CSUM=y CONFIG_GENERIC_HWEIGHT=y CONFIG_FIX_EARLYCON_MEM=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 CONFIG_PGTABLE_LEVELS=5 CONFIG_LOCKDEP_SUPPORT=y CONFIG_RISCV_DMA_NONCOHERENT=y @@ -269,8 +291,11 @@ CONFIG_AS_HAS_OPTION_ARCH=y # SoC selection # # CONFIG_SOC_MICROCHIP_POLARFIRE is not set +# CONFIG_ARCH_RENESAS is not set # CONFIG_SOC_SIFIVE is not set # CONFIG_SOC_STARFIVE is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_THEAD is not set # CONFIG_SOC_VIRT is not set CONFIG_SOC_SPACEMIT=y CONFIG_SOC_SPACEMIT_K1=y @@ -279,11 +304,13 @@ CONFIG_SOC_SPACEMIT_K1=y CONFIG_SOC_SPACEMIT_K1X=y # CONFIG_SOC_SPACEMIT_K1_FPGA is not set CONFIG_BIND_THREAD_TO_AICORES=y +CONFIG_SPACEMIT_ERRATA_LOAD_ATOMIC=y # end of SoC selection # # CPU errata selection # +# CONFIG_ERRATA_ANDES is not set # CONFIG_ERRATA_SIFIVE is not set # CONFIG_ERRATA_THEAD is not set # end of CPU errata selection @@ -297,26 +324,33 @@ CONFIG_ARCH_RV64I=y CONFIG_CMODEL_MEDANY=y CONFIG_MODULE_SECTIONS=y CONFIG_SMP=y +# CONFIG_SCHED_MC is not set CONFIG_NR_CPUS=8 CONFIG_HOTPLUG_CPU=y CONFIG_TUNE_GENERIC=y # CONFIG_NUMA is not set CONFIG_RISCV_ALTERNATIVE=y CONFIG_RISCV_ISA_C=y +CONFIG_RISCV_ISA_SVNAPOT=y CONFIG_RISCV_ISA_SVPBMT=y CONFIG_TOOLCHAIN_HAS_V=y CONFIG_RISCV_ISA_V=y +CONFIG_RISCV_ISA_V_DEFAULT_ENABLE=y +CONFIG_RISCV_ISA_V_UCOPY_THRESHOLD=768 +CONFIG_RISCV_ISA_V_PREEMPTIVE=y +CONFIG_RISCV_ISA_ZAWRS=y CONFIG_TOOLCHAIN_HAS_ZBB=y CONFIG_RISCV_ISA_ZBB=y -CONFIG_TOOLCHAIN_HAS_ZICBOM=y CONFIG_RISCV_ISA_ZICBOM=y -CONFIG_TOOLCHAIN_HAS_ZICBOZ=y CONFIG_RISCV_ISA_ZICBOZ=y -CONFIG_TOOLCHAIN_HAS_ZICBOP=y -CONFIG_RISCV_ISA_ZICBOP=y CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE=y CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI=y CONFIG_FPU=y +CONFIG_IRQ_STACKS=y +CONFIG_THREAD_SIZE_ORDER=2 +CONFIG_RISCV_MISALIGNED=y +CONFIG_RISCV_PROBE_UNALIGNED_ACCESS=y +# CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS is not set # end of Platform type # @@ -330,10 +364,13 @@ CONFIG_HZ=250 CONFIG_SCHED_HRTICK=y CONFIG_RISCV_SBI_V01=y # CONFIG_RISCV_BOOT_SPINWAIT is not set -# CONFIG_KEXEC is not set -# CONFIG_KEXEC_FILE is not set -# CONFIG_CRASH_DUMP is not set +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y CONFIG_COMPAT=y +# CONFIG_RELOCATABLE is not set +# CONFIG_RANDOMIZE_BASE is not set # end of Kernel features # @@ -344,10 +381,12 @@ CONFIG_EFI_STUB=y CONFIG_EFI=y CONFIG_CC_HAVE_STACKPROTECTOR_TLS=y CONFIG_STACKPROTECTOR_PER_TASK=y +CONFIG_RISCV_ISA_FALLBACK=y # end of Boot options CONFIG_PORTABLE=y -CONFIG_IMAGE_LOAD_OFFSET=0x1400000 +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_IMAGE_LOAD_OFFSET=0x200000 # # Power management options @@ -355,6 +394,7 @@ CONFIG_IMAGE_LOAD_OFFSET=0x1400000 CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y # CONFIG_SUSPEND_SKIP_SYNC is not set +# CONFIG_HIBERNATION is not set CONFIG_PM_SLEEP=y CONFIG_PM_SLEEP_SMP=y # CONFIG_PM_AUTOSLEEP is not set @@ -369,7 +409,7 @@ CONFIG_PM_GENERIC_DOMAINS_SLEEP=y CONFIG_PM_GENERIC_DOMAINS_OF=y CONFIG_CPU_PM=y # CONFIG_ENERGY_MODEL is not set -CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y # end of Power management options # @@ -419,8 +459,9 @@ CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y # CONFIG_CPUFREQ_DT=y CONFIG_CPUFREQ_DT_PLATDEV=y -CONFIG_ARM_BRCMSTB_AVS_CPUFREQ=y -CONFIG_ARM_MEDIATEK_CPUFREQ_HW=m +# CONFIG_ARM_APPLE_SOC_CPUFREQ is not set +# CONFIG_ARM_BRCMSTB_AVS_CPUFREQ is not set +# CONFIG_ARM_MEDIATEK_CPUFREQ_HW is not set # CONFIG_ARM_QCOM_CPUFREQ_HW is not set # CONFIG_ARM_RASPBERRYPI_CPUFREQ is not set # CONFIG_ARM_SCMI_CPUFREQ is not set @@ -429,20 +470,31 @@ CONFIG_SPACEMIT_K1X_CPUFREQ=y # end of CPU Frequency scaling # end of CPU Power Management +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y CONFIG_HAVE_KVM_EVENTFD=y CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m +CONFIG_KVM=y +CONFIG_ARCH_SUPPORTS_ACPI=y +# CONFIG_ACPI is not set CONFIG_CPU_MITIGATIONS=y # # General architecture-dependent options # +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_GENERIC_ENTRY=y # CONFIG_KPROBES is not set -# CONFIG_JUMP_LABEL is not set +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set CONFIG_UPROBES=y CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y CONFIG_HAVE_KPROBES=y @@ -466,6 +518,7 @@ CONFIG_HAVE_PERF_REGS=y CONFIG_HAVE_PERF_USER_STACK_DUMP=y CONFIG_HAVE_ARCH_JUMP_LABEL=y CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y CONFIG_HAVE_ARCH_SECCOMP=y CONFIG_HAVE_ARCH_SECCOMP_FILTER=y CONFIG_SECCOMP=y @@ -475,15 +528,21 @@ CONFIG_HAVE_STACKPROTECTOR=y CONFIG_STACKPROTECTOR=y CONFIG_STACKPROTECTOR_STRONG=y CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y CONFIG_HAVE_CONTEXT_TRACKING_USER=y CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y CONFIG_HAVE_MOVE_PUD=y CONFIG_HAVE_MOVE_PMD=y CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y CONFIG_HAVE_MOD_ARCH_SPECIFIC=y CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y CONFIG_ARCH_HAS_ELF_RANDOMIZE=y CONFIG_HAVE_ARCH_MMAP_RND_BITS=y CONFIG_ARCH_MMAP_RND_BITS=18 @@ -495,7 +554,7 @@ CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y CONFIG_CLONE_BACKWARDS=y CONFIG_COMPAT_32BIT_TIME=y CONFIG_HAVE_ARCH_VMAP_STACK=y -# CONFIG_VMAP_STACK is not set +CONFIG_VMAP_STACK=y CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y @@ -504,6 +563,10 @@ CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y CONFIG_ARCH_USE_MEMREMAP_PROT=y # CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_VDSO_DATA=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y CONFIG_DYNAMIC_SIGFRAME=y @@ -514,11 +577,14 @@ CONFIG_DYNAMIC_SIGFRAME=y # CONFIG_GCOV_KERNEL is not set CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y # end of GCOV-based kernel profiling + +CONFIG_FUNCTION_ALIGNMENT=0 # end of General architecture-dependent options CONFIG_RT_MUTEXES=y CONFIG_BASE_SMALL=0 CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set # CONFIG_MODULE_FORCE_LOAD is not set CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set @@ -536,9 +602,9 @@ CONFIG_MODULES_TREE_LOOKUP=y CONFIG_BLOCK=y CONFIG_BLOCK_LEGACY_AUTOLOAD=y CONFIG_BLK_CGROUP_RWSTAT=y -CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_CGROUP_PUNT_BIO=y CONFIG_BLK_ICQ=y -CONFIG_BLK_DEV_BSGLIB=y +# CONFIG_BLK_DEV_BSGLIB is not set CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BLK_DEV_INTEGRITY_T10=y # CONFIG_BLK_DEV_ZONED is not set @@ -563,12 +629,11 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_ATARI_PARTITION is not set # CONFIG_MAC_PARTITION is not set CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set +CONFIG_BSD_DISKLABEL=y # CONFIG_MINIX_SUBPARTITION is not set # CONFIG_SOLARIS_X86_PARTITION is not set # CONFIG_UNIXWARE_DISKLABEL is not set -CONFIG_LDM_PARTITION=y -# CONFIG_LDM_DEBUG is not set +# CONFIG_LDM_PARTITION is not set # CONFIG_SGI_PARTITION is not set # CONFIG_ULTRIX_PARTITION is not set # CONFIG_SUN_PARTITION is not set @@ -578,7 +643,6 @@ CONFIG_EFI_PARTITION=y CONFIG_CMDLINE_PARTITION=y # end of Partition Types -CONFIG_BLOCK_COMPAT=y CONFIG_BLK_MQ_PCI=y CONFIG_BLK_MQ_VIRTIO=y CONFIG_BLK_PM=y @@ -589,19 +653,15 @@ CONFIG_BLK_MQ_STACKING=y # IO Schedulers # CONFIG_MQ_IOSCHED_DEADLINE=y -CONFIG_MQ_IOSCHED_KYBER=m -CONFIG_IOSCHED_BFQ=m +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y CONFIG_BFQ_GROUP_IOSCHED=y # CONFIG_BFQ_CGROUP_DEBUG is not set # end of IO Schedulers CONFIG_PREEMPT_NOTIFIERS=y CONFIG_ASN1=y -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_INLINE_READ_UNLOCK=y -CONFIG_INLINE_READ_UNLOCK_IRQ=y -CONFIG_INLINE_WRITE_UNLOCK=y -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_UNINLINE_SPIN_UNLOCK=y CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y CONFIG_MUTEX_SPIN_ON_OWNER=y CONFIG_RWSEM_SPIN_ON_OWNER=y @@ -611,6 +671,7 @@ CONFIG_QUEUED_RWLOCKS=y CONFIG_ARCH_HAS_MMIOWB=y CONFIG_MMIOWB=y CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y CONFIG_FREEZER=y # @@ -622,10 +683,8 @@ CONFIG_ELFCORE=y CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_BINFMT_SCRIPT=y CONFIG_ARCH_HAS_BINFMT_FLAT=y -CONFIG_BINFMT_FLAT=y -CONFIG_BINFMT_FLAT_OLD=y -CONFIG_BINFMT_ZFLAT=y -CONFIG_BINFMT_MISC=m +# CONFIG_BINFMT_FLAT is not set +CONFIG_BINFMT_MISC=y CONFIG_COREDUMP=y # end of Executable file formats @@ -636,6 +695,7 @@ CONFIG_ZPOOL=y CONFIG_SWAP=y CONFIG_ZSWAP=y CONFIG_ZSWAP_DEFAULT_ON=y +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set # CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y # CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set @@ -644,34 +704,37 @@ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y # CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y -# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED is not set # CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" CONFIG_ZBUD=y -CONFIG_Z3FOLD=m +# CONFIG_Z3FOLD_DEPRECATED is not set CONFIG_ZSMALLOC=m CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 # # SLAB allocator options # -# CONFIG_SLAB is not set +# CONFIG_SLAB_DEPRECATED is not set CONFIG_SLUB=y -# CONFIG_SLOB is not set +# CONFIG_SLUB_TINY is not set CONFIG_SLAB_MERGE_DEFAULT=y # CONFIG_SLAB_FREELIST_RANDOM is not set # CONFIG_SLAB_FREELIST_HARDENED is not set # CONFIG_SLUB_STATS is not set CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set # end of SLAB allocator options -CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set CONFIG_COMPAT_BRK=y CONFIG_SELECT_MEMORY_MODEL=y CONFIG_FLATMEM_MANUAL=y # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y CONFIG_MEMORY_ISOLATION=y CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y @@ -680,17 +743,16 @@ CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 CONFIG_PAGE_REPORTING=y CONFIG_MIGRATION=y CONFIG_CONTIG_ALLOC=y +CONFIG_PCP_BATCH_SCALE_MAX=5 CONFIG_PHYS_ADDR_T_64BIT=y CONFIG_MMU_NOTIFIER=y # CONFIG_KSM is not set CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y CONFIG_ARCH_WANTS_THP_SWAP=y # CONFIG_TRANSPARENT_HUGEPAGE is not set -CONFIG_FRONTSWAP=y CONFIG_CMA=y # CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_DEBUGFS=y CONFIG_CMA_SYSFS=y CONFIG_CMA_AREAS=7 CONFIG_GENERIC_EARLY_IOREMAP=y @@ -700,10 +762,15 @@ CONFIG_ZONE_DMA32=y CONFIG_VM_EVENT_COUNTERS=y # CONFIG_PERCPU_STATS is not set # CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y # CONFIG_ANON_VMA_NAME is not set # CONFIG_USERFAULTFD is not set # CONFIG_LRU_GEN is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y CONFIG_LOCK_MM_AND_FIND_VMA=y # @@ -714,47 +781,52 @@ CONFIG_LOCK_MM_AND_FIND_VMA=y # end of Memory Management options CONFIG_NET=y +CONFIG_COMPAT_NETLINK_MESSAGES=y CONFIG_NET_INGRESS=y CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y CONFIG_SKB_EXTENSIONS=y # # Networking options # CONFIG_PACKET=y -CONFIG_PACKET_DIAG=y +CONFIG_PACKET_DIAG=m CONFIG_UNIX=y CONFIG_UNIX_SCM=y CONFIG_AF_UNIX_OOB=y -CONFIG_UNIX_DIAG=y -# CONFIG_TLS is not set +CONFIG_UNIX_DIAG=m +CONFIG_TLS=y +# CONFIG_TLS_DEVICE is not set +# CONFIG_TLS_TOE is not set CONFIG_XFRM=y -CONFIG_XFRM_ALGO=y -CONFIG_XFRM_USER=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m # CONFIG_XFRM_INTERFACE is not set # CONFIG_XFRM_SUB_POLICY is not set # CONFIG_XFRM_MIGRATE is not set # CONFIG_XFRM_STATISTICS is not set -CONFIG_XFRM_AH=y -CONFIG_XFRM_ESP=y -CONFIG_XFRM_IPCOMP=y -CONFIG_NET_KEY=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m # CONFIG_NET_KEY_MIGRATE is not set CONFIG_XDP_SOCKETS=y -CONFIG_XDP_SOCKETS_DIAG=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y # CONFIG_IP_FIB_TRIE_STATS is not set CONFIG_IP_MULTIPLE_TABLES=y -# CONFIG_IP_ROUTE_MULTIPATH is not set -CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_MULTIPATH=y +# CONFIG_IP_ROUTE_VERBOSE is not set CONFIG_IP_ROUTE_CLASSID=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y -CONFIG_NET_IPIP=y +CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IP_TUNNEL=y CONFIG_NET_IPGRE=m @@ -762,12 +834,12 @@ CONFIG_NET_IPGRE_BROADCAST=y CONFIG_IP_MROUTE_COMMON=y CONFIG_IP_MROUTE=y # CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y +# CONFIG_IP_PIMSM_V1 is not set +# CONFIG_IP_PIMSM_V2 is not set CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_NET_UDP_TUNNEL=y -CONFIG_NET_FOU=y +CONFIG_NET_FOU=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -776,57 +848,51 @@ CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_TABLE_PERTURB_ORDER=16 CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=y -CONFIG_INET_DIAG=y -CONFIG_INET_TCP_DIAG=y -CONFIG_INET_UDP_DIAG=y -CONFIG_INET_RAW_DIAG=y +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m # CONFIG_INET_DIAG_DESTROY is not set CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m +# CONFIG_TCP_CONG_BIC is not set CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m +# CONFIG_TCP_CONG_WESTWOOD is not set CONFIG_TCP_CONG_HTCP=m CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m +# CONFIG_TCP_CONG_HYBLA is not set CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_NV=m +# CONFIG_TCP_CONG_NV is not set CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m +# CONFIG_TCP_CONG_LP is not set CONFIG_TCP_CONG_VENO=m CONFIG_TCP_CONG_YEAH=m CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_DCTCP is not set CONFIG_TCP_CONG_CDG=m -CONFIG_TCP_CONG_BBR=y +# CONFIG_TCP_CONG_BBR is not set CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_BBR is not set # CONFIG_DEFAULT_RENO is not set CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_TCP_MD5SIG is not set CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y +# CONFIG_IPV6_ROUTER_PREF is not set # CONFIG_IPV6_OPTIMISTIC_DAD is not set -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -# CONFIG_INET6_ESP_OFFLOAD is not set -# CONFIG_INET6_ESPINTCP is not set -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y +# CONFIG_INET6_AH is not set +# CONFIG_INET6_ESP is not set +# CONFIG_INET6_IPCOMP is not set +# CONFIG_IPV6_MIP6 is not set # CONFIG_IPV6_ILA is not set -CONFIG_INET6_XFRM_TUNNEL=y -CONFIG_INET6_TUNNEL=y -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=y +CONFIG_INET6_TUNNEL=m +# CONFIG_IPV6_VTI is not set +CONFIG_IPV6_SIT=m # CONFIG_IPV6_SIT_6RD is not set CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_TUNNEL=m # CONFIG_IPV6_GRE is not set -CONFIG_IPV6_FOU=y -CONFIG_IPV6_FOU_TUNNEL=y -CONFIG_IPV6_MULTIPLE_TABLES=y -# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +# CONFIG_IPV6_MULTIPLE_TABLES is not set # CONFIG_IPV6_MROUTE is not set # CONFIG_IPV6_SEG6_LWTUNNEL is not set # CONFIG_IPV6_SEG6_HMAC is not set @@ -839,28 +905,28 @@ CONFIG_NET_PTP_CLASSIFY=y # CONFIG_NETWORK_PHY_TIMESTAMPING is not set CONFIG_NETFILTER=y CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m # # Core Netfilter Configuration # CONFIG_NETFILTER_INGRESS=y CONFIG_NETFILTER_EGRESS=y -CONFIG_NETFILTER_SKIP_EGRESS=y -CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK=m CONFIG_NETFILTER_FAMILY_BRIDGE=y CONFIG_NETFILTER_FAMILY_ARP=y -CONFIG_NETFILTER_NETLINK_HOOK=m +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=y -CONFIG_NETFILTER_NETLINK_LOG=y -CONFIG_NETFILTER_NETLINK_OSF=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_LOG_SYSLOG=y -CONFIG_NETFILTER_CONNCOUNT=y +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m CONFIG_NF_CONNTRACK_MARK=y -# CONFIG_NF_CONNTRACK_ZONES is not set -CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_ZONES=y +# CONFIG_NF_CONNTRACK_PROCFS is not set CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y @@ -869,86 +935,86 @@ CONFIG_NF_CT_PROTO_DCCP=y CONFIG_NF_CT_PROTO_GRE=y CONFIG_NF_CT_PROTO_SCTP=y CONFIG_NF_CT_PROTO_UDPLITE=y -# CONFIG_NF_CONNTRACK_AMANDA is not set -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_BROADCAST=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_SNMP=y -CONFIG_NF_CONNTRACK_PPTP=y -# CONFIG_NF_CONNTRACK_SANE is not set -CONFIG_NF_CONNTRACK_SIP=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -# CONFIG_NF_CT_NETLINK_TIMEOUT is not set -# CONFIG_NF_CT_NETLINK_HELPER is not set +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=y -CONFIG_NF_NAT_FTP=y -CONFIG_NF_NAT_IRC=y -CONFIG_NF_NAT_SIP=y -CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m CONFIG_NF_NAT_REDIRECT=y CONFIG_NF_NAT_MASQUERADE=y -CONFIG_NETFILTER_SYNPROXY=y -CONFIG_NF_TABLES=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=y CONFIG_NF_TABLES_NETDEV=y -CONFIG_NFT_NUMGEN=y -CONFIG_NFT_CT=y -CONFIG_NFT_FLOW_OFFLOAD=y -CONFIG_NFT_CONNLIMIT=y -CONFIG_NFT_LOG=y -CONFIG_NFT_LIMIT=y -CONFIG_NFT_MASQ=y -CONFIG_NFT_REDIR=y -CONFIG_NFT_NAT=y -CONFIG_NFT_TUNNEL=y -# CONFIG_NFT_OBJREF is not set -CONFIG_NFT_QUEUE=y -CONFIG_NFT_QUOTA=y -CONFIG_NFT_REJECT=y -CONFIG_NFT_REJECT_INET=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +# CONFIG_NFT_FLOW_OFFLOAD is not set +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=y -CONFIG_NFT_FIB=y -CONFIG_NFT_FIB_INET=y +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m CONFIG_NFT_XFRM=m -CONFIG_NFT_SOCKET=y -CONFIG_NFT_OSF=y -CONFIG_NFT_TPROXY=y -CONFIG_NFT_SYNPROXY=y -CONFIG_NF_DUP_NETDEV=y -CONFIG_NFT_DUP_NETDEV=y -CONFIG_NFT_FWD_NETDEV=y -CONFIG_NFT_FIB_NETDEV=y -CONFIG_NFT_REJECT_NETDEV=y -CONFIG_NF_FLOW_TABLE_INET=y -CONFIG_NF_FLOW_TABLE=y +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m CONFIG_NF_FLOW_TABLE_PROCFS=y CONFIG_NETFILTER_XTABLES=m -CONFIG_NETFILTER_XTABLES_COMPAT=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set # # Xtables combined modules # CONFIG_NETFILTER_XT_MARK=m CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m +# CONFIG_NETFILTER_XT_SET is not set # # Xtables targets # -# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set -# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set -# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m # CONFIG_NETFILTER_XT_TARGET_CT is not set -# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_DSCP=m CONFIG_NETFILTER_XT_TARGET_HL=m -# CONFIG_NETFILTER_XT_TARGET_HMARK is not set -# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LED=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m @@ -962,7 +1028,7 @@ CONFIG_NETFILTER_XT_TARGET_REDIRECT=m CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m +# CONFIG_NETFILTER_XT_TARGET_TRACE is not set CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m @@ -1000,7 +1066,7 @@ CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m CONFIG_NETFILTER_XT_MATCH_NFACCT=m CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m +# CONFIG_NETFILTER_XT_MATCH_POLICY is not set CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m CONFIG_NETFILTER_XT_MATCH_QUOTA=m @@ -1017,24 +1083,24 @@ CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m # end of Core Netfilter Configuration -CONFIG_IP_SET=y +CONFIG_IP_SET=m CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=y -CONFIG_IP_SET_BITMAP_IPMAC=y -CONFIG_IP_SET_BITMAP_PORT=y -CONFIG_IP_SET_HASH_IP=y -CONFIG_IP_SET_HASH_IPMARK=y -CONFIG_IP_SET_HASH_IPPORT=y -CONFIG_IP_SET_HASH_IPPORTIP=y -CONFIG_IP_SET_HASH_IPPORTNET=y -CONFIG_IP_SET_HASH_IPMAC=y -CONFIG_IP_SET_HASH_MAC=y -CONFIG_IP_SET_HASH_NETPORTNET=y -CONFIG_IP_SET_HASH_NET=y -CONFIG_IP_SET_HASH_NETNET=y -CONFIG_IP_SET_HASH_NETPORT=y -CONFIG_IP_SET_HASH_NETIFACE=y -CONFIG_IP_SET_LIST_SET=y +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m CONFIG_IP_VS=m CONFIG_IP_VS_IPV6=y # CONFIG_IP_VS_DEBUG is not set @@ -1088,21 +1154,21 @@ CONFIG_IP_VS_PE_SIP=m # # IP: Netfilter Configuration # -CONFIG_NF_DEFRAG_IPV4=y -CONFIG_NF_SOCKET_IPV4=y -CONFIG_NF_TPROXY_IPV4=y +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_REJECT_IPV4=y -CONFIG_NFT_DUP_IPV4=y -CONFIG_NFT_FIB_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y -CONFIG_NF_DUP_IPV4=y -CONFIG_NF_LOG_ARP=y -CONFIG_NF_LOG_IPV4=y -CONFIG_NF_REJECT_IPV4=y -# CONFIG_NF_NAT_SNMP_BASIC is not set -CONFIG_NF_NAT_PPTP=y -CONFIG_NF_NAT_H323=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -1116,7 +1182,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_TARGET_NETMAP=m CONFIG_IP_NF_TARGET_REDIRECT=m CONFIG_IP_NF_MANGLE=m -# CONFIG_IP_NF_TARGET_CLUSTERIP is not set CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_RAW=m @@ -1129,15 +1194,15 @@ CONFIG_IP_NF_ARP_MANGLE=m # # IPv6: Netfilter Configuration # -CONFIG_NF_SOCKET_IPV6=y -CONFIG_NF_TPROXY_IPV6=y +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_REJECT_IPV6=y -CONFIG_NFT_DUP_IPV6=y -CONFIG_NFT_FIB_IPV6=y -CONFIG_NF_DUP_IPV6=y -CONFIG_NF_REJECT_IPV6=y -CONFIG_NF_LOG_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -1161,11 +1226,11 @@ CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m # end of IPv6: Netfilter Configuration -CONFIG_NF_DEFRAG_IPV6=y -CONFIG_NF_TABLES_BRIDGE=y -CONFIG_NFT_BRIDGE_META=y -CONFIG_NFT_BRIDGE_REJECT=y -CONFIG_NF_CONNTRACK_BRIDGE=y +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m CONFIG_BRIDGE_NF_EBTABLES=m CONFIG_BRIDGE_EBT_BROUTE=m CONFIG_BRIDGE_EBT_T_FILTER=m @@ -1188,52 +1253,94 @@ CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BPFILTER=y -CONFIG_BPFILTER_UMH=y +CONFIG_BPFILTER_UMH=m # CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=y -# CONFIG_SCTP_DBG_OBJCNT is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_INET_SCTP_DIAG=y +# CONFIG_IP_SCTP is not set CONFIG_RDS=m -# CONFIG_RDS_TCP is not set +CONFIG_RDS_TCP=m # CONFIG_RDS_DEBUG is not set -CONFIG_TIPC=y -CONFIG_TIPC_MEDIA_UDP=y -CONFIG_TIPC_CRYPTO=y -CONFIG_TIPC_DIAG=y -# CONFIG_ATM is not set -# CONFIG_L2TP is not set -CONFIG_STP=y -CONFIG_GARP=y -CONFIG_MRP=y -CONFIG_BRIDGE=y +# CONFIG_TIPC is not set +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +# CONFIG_ATM_BR2684 is not set +CONFIG_L2TP=m +# CONFIG_L2TP_DEBUGFS is not set +# CONFIG_L2TP_V3 is not set +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m CONFIG_BRIDGE_IGMP_SNOOPING=y CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_BRIDGE_MRP=y -CONFIG_BRIDGE_CFM=y -# CONFIG_NET_DSA is not set -CONFIG_VLAN_8021Q=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +CONFIG_NET_DSA=m +# CONFIG_NET_DSA_TAG_NONE is not set +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM_COMMON=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_LEGACY=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_HELLCREEK=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA_COMMON=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +CONFIG_NET_DSA_TAG_OCELOT_8021Q=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +CONFIG_NET_DSA_TAG_RTL8_4=m +CONFIG_NET_DSA_TAG_RZN1_A5PSW=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m +CONFIG_NET_DSA_TAG_XRS700X=m +CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y -CONFIG_LLC=y -# CONFIG_LLC2 is not set -# CONFIG_ATALK is not set +CONFIG_LLC=m +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y # CONFIG_X25 is not set # CONFIG_LAPB is not set # CONFIG_PHONET is not set -# CONFIG_6LOWPAN is not set -# CONFIG_IEEE802154 is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +CONFIG_6LOWPAN_NHC=m +CONFIG_6LOWPAN_NHC_DEST=m +CONFIG_6LOWPAN_NHC_FRAGMENT=m +CONFIG_6LOWPAN_NHC_HOP=m +CONFIG_6LOWPAN_NHC_IPV6=m +CONFIG_6LOWPAN_NHC_MOBILITY=m +CONFIG_6LOWPAN_NHC_ROUTING=m +CONFIG_6LOWPAN_NHC_UDP=m +# CONFIG_6LOWPAN_GHC_EXT_HDR_HOP is not set +# CONFIG_6LOWPAN_GHC_UDP is not set +# CONFIG_6LOWPAN_GHC_ICMPV6 is not set +# CONFIG_6LOWPAN_GHC_EXT_HDR_DEST is not set +# CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG is not set +# CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +# CONFIG_IEEE802154_6LOWPAN is not set +# CONFIG_MAC802154 is not set CONFIG_NET_SCHED=y # # Queueing/Scheduling # -CONFIG_NET_SCH_HTB=y -CONFIG_NET_SCH_HFSC=y +# CONFIG_NET_SCH_HTB is not set +# CONFIG_NET_SCH_HFSC is not set # CONFIG_NET_SCH_PRIO is not set # CONFIG_NET_SCH_MULTIQ is not set # CONFIG_NET_SCH_RED is not set @@ -1254,10 +1361,9 @@ CONFIG_NET_SCH_HFSC=y # CONFIG_NET_SCH_CODEL is not set CONFIG_NET_SCH_FQ_CODEL=y # CONFIG_NET_SCH_CAKE is not set -CONFIG_NET_SCH_FQ=y +# CONFIG_NET_SCH_FQ is not set # CONFIG_NET_SCH_HHF is not set # CONFIG_NET_SCH_PIE is not set -# CONFIG_NET_SCH_INGRESS is not set # CONFIG_NET_SCH_PLUG is not set # CONFIG_NET_SCH_ETS is not set # CONFIG_NET_SCH_DEFAULT is not set @@ -1265,40 +1371,17 @@ CONFIG_NET_SCH_FQ=y # # Classification # -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=y -CONFIG_NET_CLS_ROUTE4=y -CONFIG_NET_CLS_FW=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set # CONFIG_NET_CLS_U32 is not set # CONFIG_NET_CLS_FLOW is not set -CONFIG_NET_CLS_CGROUP=y +# CONFIG_NET_CLS_CGROUP is not set # CONFIG_NET_CLS_BPF is not set # CONFIG_NET_CLS_FLOWER is not set -CONFIG_NET_CLS_MATCHALL=y +# CONFIG_NET_CLS_MATCHALL is not set # CONFIG_NET_EMATCH is not set -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=y -CONFIG_NET_ACT_GACT=y -# CONFIG_GACT_PROB is not set -# CONFIG_NET_ACT_MIRRED is not set -# CONFIG_NET_ACT_SAMPLE is not set -# CONFIG_NET_ACT_IPT is not set -# CONFIG_NET_ACT_NAT is not set -# CONFIG_NET_ACT_PEDIT is not set -# CONFIG_NET_ACT_SIMP is not set -# CONFIG_NET_ACT_SKBEDIT is not set -# CONFIG_NET_ACT_CSUM is not set -# CONFIG_NET_ACT_MPLS is not set -# CONFIG_NET_ACT_VLAN is not set -# CONFIG_NET_ACT_BPF is not set -# CONFIG_NET_ACT_CONNMARK is not set -# CONFIG_NET_ACT_CTINFO is not set -# CONFIG_NET_ACT_SKBMOD is not set -# CONFIG_NET_ACT_IFE is not set -# CONFIG_NET_ACT_TUNNEL_KEY is not set -# CONFIG_NET_ACT_CT is not set -# CONFIG_NET_ACT_GATE is not set -# CONFIG_NET_TC_SKB_EXT is not set +# CONFIG_NET_CLS_ACT is not set CONFIG_NET_SCH_FIFO=y # CONFIG_DCB is not set CONFIG_DNS_RESOLVER=y @@ -1311,18 +1394,21 @@ CONFIG_BATMAN_ADV_MCAST=y # CONFIG_BATMAN_ADV_DEBUG is not set # CONFIG_BATMAN_ADV_TRACING is not set # CONFIG_OPENVSWITCH is not set -# CONFIG_VSOCKETS is not set -CONFIG_NETLINK_DIAG=y +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +# CONFIG_VIRTIO_VSOCKETS is not set +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m # CONFIG_MPLS is not set # CONFIG_NET_NSH is not set # CONFIG_HSR is not set CONFIG_NET_SWITCHDEV=y CONFIG_NET_L3_MASTER_DEV=y # CONFIG_QRTR is not set -CONFIG_NET_NCSI=y -CONFIG_NCSI_OEM_CMD_GET_MAC=y -CONFIG_NCSI_OEM_CMD_KEEP_PHY=y +# CONFIG_NET_NCSI is not set CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 CONFIG_RPS=y CONFIG_RFS_ACCEL=y CONFIG_SOCK_RX_QUEUE_MAPPING=y @@ -1342,29 +1428,9 @@ CONFIG_NET_FLOW_LIMIT=y # end of Network testing # end of Networking options -CONFIG_HAMRADIO=y - -# -# Packet Radio protocols -# -CONFIG_AX25=m -CONFIG_AX25_DAMA_SLAVE=y -CONFIG_NETROM=m -CONFIG_ROSE=m - -# -# AX.25 network device drivers -# -CONFIG_MKISS=m -CONFIG_6PACK=m -CONFIG_BPQETHER=m -CONFIG_BAYCOM_SER_FDX=m -CONFIG_BAYCOM_SER_HDX=m -CONFIG_YAM=m -# end of AX.25 network device drivers - +# CONFIG_HAMRADIO is not set # CONFIG_CAN is not set -CONFIG_BT=y +CONFIG_BT=m CONFIG_BT_BREDR=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y @@ -1372,12 +1438,13 @@ CONFIG_BT_BNEP=m CONFIG_BT_BNEP_MC_FILTER=y CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_HIDP=m -# CONFIG_BT_HS is not set CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set CONFIG_BT_LEDS=y # CONFIG_BT_MSFTEXT is not set # CONFIG_BT_AOSPEXT is not set -CONFIG_BT_DEBUGFS=y +# CONFIG_BT_DEBUGFS is not set # CONFIG_BT_SELFTEST is not set # CONFIG_BT_FEATURE_DEBUG is not set @@ -1387,29 +1454,31 @@ CONFIG_BT_DEBUGFS=y CONFIG_BT_INTEL=m CONFIG_BT_BCM=m CONFIG_BT_RTL=m +CONFIG_BT_QCA=m CONFIG_BT_MTK=m CONFIG_BT_HCIBTUSB=m # CONFIG_BT_HCIBTUSB_AUTOSUSPEND is not set +CONFIG_BT_HCIBTUSB_POLL_SYNC=y CONFIG_BT_HCIBTUSB_BCM=y # CONFIG_BT_HCIBTUSB_MTK is not set CONFIG_BT_HCIBTUSB_RTL=y -# CONFIG_BT_HCIBTUSB_RTLBTUSB is not set CONFIG_BT_HCIBTSDIO=m CONFIG_BT_HCIUART=m CONFIG_BT_HCIUART_SERDEV=y CONFIG_BT_HCIUART_H4=y # CONFIG_BT_HCIUART_NOKIA is not set -# CONFIG_BT_HCIUART_BCSP is not set +CONFIG_BT_HCIUART_BCSP=y # CONFIG_BT_HCIUART_ATH3K is not set CONFIG_BT_HCIUART_LL=y CONFIG_BT_HCIUART_3WIRE=y # CONFIG_BT_HCIUART_INTEL is not set CONFIG_BT_HCIUART_BCM=y CONFIG_BT_HCIUART_RTL=y -# CONFIG_BT_HCIUART_QCA is not set +CONFIG_BT_HCIUART_QCA=y # CONFIG_BT_HCIUART_AG6XX is not set # CONFIG_BT_HCIUART_MRVL is not set CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set # CONFIG_BT_HCIBPA10X is not set # CONFIG_BT_HCIBFUSB is not set # CONFIG_BT_HCIVHCI is not set @@ -1419,6 +1488,7 @@ CONFIG_BT_MTKSDIO=m CONFIG_BT_MTKUART=m # CONFIG_BT_QCOMSMD is not set # CONFIG_BT_VIRTIO is not set +# CONFIG_BT_NXPUART is not set # end of Bluetooth device drivers # CONFIG_AF_RXRPC is not set @@ -1427,6 +1497,10 @@ CONFIG_STREAM_PARSER=y # CONFIG_MCTP is not set CONFIG_FIB_RULES=y CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_PRIV=y CONFIG_CFG80211=m # CONFIG_NL80211_TESTMODE is not set # CONFIG_CFG80211_DEVELOPER_WARNINGS is not set @@ -1436,13 +1510,15 @@ CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y CONFIG_CFG80211_DEFAULT_PS=y # CONFIG_CFG80211_DEBUGFS is not set CONFIG_CFG80211_CRDA_SUPPORT=y -# CONFIG_CFG80211_WEXT is not set +CONFIG_CFG80211_WEXT=y +CONFIG_LIB80211=m +# CONFIG_LIB80211_DEBUG is not set CONFIG_MAC80211=m CONFIG_MAC80211_HAS_RC=y CONFIG_MAC80211_RC_MINSTREL=y CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -CONFIG_MAC80211_MESH=y +# CONFIG_MAC80211_MESH is not set CONFIG_MAC80211_LEDS=y # CONFIG_MAC80211_DEBUGFS is not set # CONFIG_MAC80211_MESSAGE_TRACING is not set @@ -1451,11 +1527,11 @@ CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 CONFIG_RFKILL=y CONFIG_RFKILL_LEDS=y CONFIG_RFKILL_INPUT=y -CONFIG_RFKILL_GPIO=y +CONFIG_RFKILL_GPIO=m # CONFIG_NET_9P is not set -CONFIG_CAIF=y +CONFIG_CAIF=m # CONFIG_CAIF_DEBUG is not set -CONFIG_CAIF_NETDEV=y +CONFIG_CAIF_NETDEV=m # CONFIG_CAIF_USB is not set CONFIG_CEPH_LIB=m # CONFIG_CEPH_LIB_PRETTYDEBUG is not set @@ -1511,8 +1587,9 @@ CONFIG_DST_CACHE=y CONFIG_GRO_CELLS=y CONFIG_NET_SELFTESTS=y CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y CONFIG_PAGE_POOL=y -# CONFIG_PAGE_POOL_STATS is not set +CONFIG_PAGE_POOL_STATS=y CONFIG_FAILOVER=y CONFIG_ETHTOOL_NETLINK=y @@ -1524,7 +1601,6 @@ CONFIG_PCI=y CONFIG_PCI_DOMAINS=y CONFIG_PCI_DOMAINS_GENERIC=y CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y CONFIG_PCIEAER=y # CONFIG_PCIEAER_INJECT is not set # CONFIG_PCIE_ECRC is not set @@ -1537,14 +1613,13 @@ CONFIG_PCIE_PME=y # CONFIG_PCIE_DPC is not set # CONFIG_PCIE_PTM is not set CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y CONFIG_PCI_QUIRKS=y # CONFIG_PCI_DEBUG is not set # CONFIG_PCI_STUB is not set -CONFIG_PCI_ECAM=y # CONFIG_PCI_IOV is not set # CONFIG_PCI_PRI is not set # CONFIG_PCI_PASID is not set +# CONFIG_PCI_DYNAMIC_OF_NODES is not set # CONFIG_PCIE_BUS_TUNE_OFF is not set CONFIG_PCIE_BUS_DEFAULT=y # CONFIG_PCIE_BUS_SAFE is not set @@ -1552,82 +1627,78 @@ CONFIG_PCIE_BUS_DEFAULT=y # CONFIG_PCIE_BUS_PEER2PEER is not set CONFIG_VGA_ARB=y CONFIG_VGA_ARB_MAX_GPUS=16 -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_CPCI=y -CONFIG_HOTPLUG_PCI_SHPC=y +# CONFIG_HOTPLUG_PCI is not set # # PCI controller drivers # # CONFIG_PCI_AARDVARK is not set -# CONFIG_PCIE_XILINX_NWL is not set -# CONFIG_PCI_FTPCI100 is not set -# CONFIG_PCI_TEGRA is not set -# CONFIG_PCIE_RCAR_HOST is not set -CONFIG_PCI_HOST_COMMON=m -CONFIG_PCI_HOST_GENERIC=m -# CONFIG_PCIE_XILINX is not set -# CONFIG_PCIE_XILINX_CPM is not set -# CONFIG_PCI_XGENE is not set -# CONFIG_PCI_V3_SEMI is not set -# CONFIG_PCI_VERSATILE is not set # CONFIG_PCIE_ALTERA is not set +# CONFIG_PCIE_APPLE is not set +# CONFIG_PCI_VERSATILE is not set +# CONFIG_PCIE_BRCMSTB is not set # CONFIG_PCI_HOST_THUNDER_PEM is not set # CONFIG_PCI_HOST_THUNDER_ECAM is not set -# CONFIG_PCIE_ROCKCHIP_HOST is not set +# CONFIG_PCI_FTPCI100 is not set +# CONFIG_PCI_HOST_GENERIC is not set +# CONFIG_PCI_LOONGSON is not set # CONFIG_PCIE_MEDIATEK is not set # CONFIG_PCIE_MEDIATEK_GEN3 is not set -# CONFIG_PCIE_BRCMSTB is not set -# CONFIG_PCI_LOONGSON is not set -# CONFIG_PCIE_MICROCHIP_HOST is not set -# CONFIG_PCIE_APPLE is not set # CONFIG_PCIE_MT7621 is not set +# CONFIG_PCIE_MICROCHIP_HOST is not set +# CONFIG_PCI_TEGRA is not set +# CONFIG_PCIE_RCAR_HOST is not set +# CONFIG_PCIE_ROCKCHIP_HOST is not set +# CONFIG_PCI_V3_SEMI is not set +# CONFIG_PCI_XGENE is not set +# CONFIG_PCIE_XILINX is not set +# CONFIG_PCIE_XILINX_NWL is not set +# CONFIG_PCIE_XILINX_CPM is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers # -# DesignWare PCI Core Support +# DesignWare-based PCIe controllers # CONFIG_PCIE_DW=y CONFIG_PCIE_DW_HOST=y -CONFIG_PCI_K1X=y -CONFIG_PCI_K1X_HOST=y -CONFIG_PCIE_DW_PLAT=y -CONFIG_PCIE_DW_PLAT_HOST=y -# CONFIG_PCI_EXYNOS is not set -# CONFIG_PCI_IMX6 is not set -# CONFIG_PCIE_SPEAR13XX is not set -# CONFIG_PCI_KEYSTONE_HOST is not set +# CONFIG_PCIE_AL is not set +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_ARTPEC6_HOST is not set +# CONFIG_PCIE_BT1 is not set +# CONFIG_PCI_IMX6_HOST is not set # CONFIG_PCI_LAYERSCAPE is not set # CONFIG_PCI_HISI is not set -# CONFIG_PCIE_QCOM is not set -# CONFIG_PCIE_ARMADA_8K is not set -# CONFIG_PCIE_ARTPEC6_HOST is not set -# CONFIG_PCIE_ROCKCHIP_DW_HOST is not set -# CONFIG_PCIE_INTEL_GW is not set -# CONFIG_PCIE_KEEMBAY_HOST is not set # CONFIG_PCIE_KIRIN is not set # CONFIG_PCIE_HISI_STB is not set -# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_INTEL_GW is not set +# CONFIG_PCIE_KEEMBAY_HOST is not set +# CONFIG_PCIE_ARMADA_8K is not set # CONFIG_PCIE_TEGRA194_HOST is not set -# CONFIG_PCIE_VISCONTI_HOST is not set -# CONFIG_PCIE_UNIPHIER is not set -# CONFIG_PCIE_AL is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCIE_QCOM is not set +# CONFIG_PCIE_ROCKCHIP_DW_HOST is not set +# CONFIG_PCI_EXYNOS is not set # CONFIG_PCIE_FU740 is not set -CONFIG_PCIE_SPACEMIT=y -# end of DesignWare PCI Core Support +# CONFIG_PCIE_UNIPHIER is not set +# CONFIG_PCIE_SPEAR13XX is not set +# CONFIG_PCI_KEYSTONE_HOST is not set +# CONFIG_PCIE_VISCONTI_HOST is not set +CONFIG_PCI_K1X=y +CONFIG_PCI_K1X_HOST=y +# end of DesignWare-based PCIe controllers # -# Mobiveil PCIe Core Support +# Mobiveil-based PCIe controllers # -# CONFIG_PCIE_MOBIVEIL_PLAT is not set # CONFIG_PCIE_LAYERSCAPE_GEN4 is not set -# end of Mobiveil PCIe Core Support - -# -# Cadence PCIe controllers support -# -# CONFIG_PCIE_CADENCE_PLAT_HOST is not set -# CONFIG_PCI_J721E_HOST is not set -# end of Cadence PCIe controllers support +# CONFIG_PCIE_MOBIVEIL_PLAT is not set +# end of Mobiveil-based PCIe controllers # end of PCI controller drivers # @@ -1661,13 +1732,10 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # Firmware loader # CONFIG_FW_LOADER=y -CONFIG_FW_LOADER_PAGED_BUF=y CONFIG_EXTRA_FIRMWARE="esos.elf" CONFIG_EXTRA_FIRMWARE_DIR="firmware" # CONFIG_FW_LOADER_USER_HELPER is not set -CONFIG_FW_LOADER_COMPRESS=y -CONFIG_FW_LOADER_COMPRESS_XZ=y -CONFIG_FW_LOADER_COMPRESS_ZSTD=y +# CONFIG_FW_LOADER_COMPRESS is not set CONFIG_FW_CACHE=y # CONFIG_FW_UPLOAD is not set # end of Firmware loader @@ -1679,6 +1747,7 @@ CONFIG_DEV_COREDUMP=y # CONFIG_DEBUG_DEVRES is not set # CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set # CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SOC_BUS=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=y CONFIG_REGMAP_SPI=y @@ -1687,6 +1756,7 @@ CONFIG_REGMAP_IRQ=y CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set CONFIG_GENERIC_ARCH_TOPOLOGY=y +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set # end of Generic Driver Options # @@ -1703,6 +1773,12 @@ CONFIG_GENERIC_ARCH_TOPOLOGY=y # CONFIG_MHI_BUS_EP is not set # end of Bus devices +# +# Cache Drivers +# +# CONFIG_AX45MP_L2_CACHE is not set +# end of Cache Drivers + # CONFIG_CONNECTOR is not set # @@ -1712,18 +1788,14 @@ CONFIG_GENERIC_ARCH_TOPOLOGY=y # # ARM System Control and Management Interface Protocol # -CONFIG_ARM_SCMI_PROTOCOL=y -CONFIG_ARM_SCMI_HAVE_TRANSPORT=y -CONFIG_ARM_SCMI_HAVE_SHMEM=y -CONFIG_ARM_SCMI_TRANSPORT_MAILBOX=y -# CONFIG_ARM_SCMI_TRANSPORT_VIRTIO is not set +# CONFIG_ARM_SCMI_PROTOCOL is not set # CONFIG_ARM_SCMI_POWER_DOMAIN is not set # CONFIG_ARM_SCMI_POWER_CONTROL is not set # end of ARM System Control and Management Interface Protocol # CONFIG_ARM_SCPI_PROTOCOL is not set -CONFIG_ARM_SCPI_POWER_DOMAIN=y -CONFIG_FIRMWARE_MEMMAP=y +# CONFIG_ARM_SCPI_POWER_DOMAIN is not set +# CONFIG_FIRMWARE_MEMMAP is not set # CONFIG_SYSFB_SIMPLEFB is not set # CONFIG_TURRIS_MOX_RWTM is not set # CONFIG_BCM47XX_NVRAM is not set @@ -1769,6 +1841,7 @@ CONFIG_MTD_OF_PARTS=y # CONFIG_MTD_OF_PARTS_BCM4908 is not set # CONFIG_MTD_OF_PARTS_LINKSYS_NS is not set # CONFIG_MTD_PARSER_IMAGETAG is not set +# CONFIG_MTD_PARSER_TPLINK_SAFELOADER is not set # CONFIG_MTD_PARSER_TRX is not set # CONFIG_MTD_SHARPSL_PARTS is not set # CONFIG_MTD_REDBOOT_PARTS is not set @@ -1824,7 +1897,7 @@ CONFIG_MTD_CFI_I2=y # CONFIG_MTD_DATAFLASH is not set # CONFIG_MTD_MCHP23K256 is not set # CONFIG_MTD_MCHP48L640 is not set -CONFIG_MTD_SPEAR_SMI=y +# CONFIG_MTD_SPEAR_SMI is not set # CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set @@ -1869,7 +1942,12 @@ CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y # CONFIG_MTD_SPI_NOR_SWP_KEEP is not set # CONFIG_SPI_HISI_SFC is not set # CONFIG_SPI_NXP_SPIFI is not set -# CONFIG_MTD_UBI is not set +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set # CONFIG_MTD_HYPERBUS is not set CONFIG_DTC=y CONFIG_OF=y @@ -1887,7 +1965,6 @@ CONFIG_OF_OVERLAY=y # CONFIG_PARPORT is not set CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_NULL_BLK is not set -CONFIG_CDROM=m # CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set CONFIG_ZRAM=m CONFIG_ZRAM_DEF_COMP_LZORLE=y @@ -1898,7 +1975,9 @@ CONFIG_ZRAM_DEF_COMP_LZORLE=y # CONFIG_ZRAM_DEF_COMP_842 is not set CONFIG_ZRAM_DEF_COMP="lzo-rle" # CONFIG_ZRAM_WRITEBACK is not set +CONFIG_ZRAM_TRACK_ENTRY_ACTIME=y CONFIG_ZRAM_MEMORY_TRACKING=y +# CONFIG_ZRAM_MULTI_COMP is not set CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 # CONFIG_BLK_DEV_DRBD is not set @@ -1913,33 +1992,29 @@ CONFIG_VIRTIO_BLK=y # # NVME Support # -CONFIG_NVME_CORE=m -CONFIG_BLK_DEV_NVME=m -CONFIG_NVME_MULTIPATH=y -# CONFIG_NVME_VERBOSE_ERRORS is not set -CONFIG_NVME_HWMON=y -CONFIG_NVME_FABRICS=m +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y +# CONFIG_NVME_MULTIPATH is not set +CONFIG_NVME_VERBOSE_ERRORS=y +# CONFIG_NVME_HWMON is not set # CONFIG_NVME_FC is not set # CONFIG_NVME_TCP is not set # CONFIG_NVME_AUTH is not set -CONFIG_NVME_TARGET=m -CONFIG_NVME_TARGET_PASSTHRU=y -CONFIG_NVME_TARGET_LOOP=m -# CONFIG_NVME_TARGET_FC is not set -# CONFIG_NVME_TARGET_TCP is not set -# CONFIG_NVME_TARGET_AUTH is not set +# CONFIG_NVME_TARGET is not set # end of NVME Support # # Misc devices # # CONFIG_AD525X_DPOT is not set -CONFIG_DUMMY_IRQ=m +# CONFIG_DUMMY_IRQ is not set # CONFIG_PHANTOM is not set # CONFIG_TIFM_CORE is not set # CONFIG_ICS932S401 is not set # CONFIG_ATMEL_SSC is not set -CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_SMPRO_ERRMON is not set +# CONFIG_SMPRO_MISC is not set # CONFIG_GEHC_ACHC is not set # CONFIG_HP_ILO is not set # CONFIG_QCOM_COINCELL is not set @@ -1958,11 +2033,12 @@ CONFIG_SRAM=y # CONFIG_DW_XDATA_PCIE is not set # CONFIG_PCI_ENDPOINT_TEST is not set # CONFIG_XILINX_SDFEC is not set -CONFIG_MISC_RTSX=m # CONFIG_HISI_HIKEY_USB is not set # CONFIG_OPEN_DICE is not set # CONFIG_VCPU_STALL_DETECTOR is not set CONFIG_SPACEMIT_TCM=y +CONFIG_OCP2138=y +# CONFIG_ICM42607 is not set # CONFIG_C2PORT is not set # @@ -1972,7 +2048,7 @@ CONFIG_EEPROM_AT24=y # CONFIG_EEPROM_AT25 is not set # CONFIG_EEPROM_LEGACY is not set # CONFIG_EEPROM_MAX6875 is not set -# CONFIG_EEPROM_93CX6 is not set +CONFIG_EEPROM_93CX6=m # CONFIG_EEPROM_93XX46 is not set # CONFIG_EEPROM_IDT_89HPESX is not set # CONFIG_EEPROM_EE1004 is not set @@ -1993,9 +2069,9 @@ CONFIG_EEPROM_AT24=y # CONFIG_ECHO is not set # CONFIG_BCM_VK is not set # CONFIG_MISC_ALCOR_PCI is not set -CONFIG_MISC_RTSX_PCI=m -CONFIG_MISC_RTSX_USB=m -# CONFIG_HABANA_AI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_UACCE is not set # CONFIG_PVPANIC is not set # CONFIG_GP_PCI1XXXX is not set # end of Misc devices @@ -2004,7 +2080,7 @@ CONFIG_MISC_RTSX_USB=m # SCSI device support # CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=m +# CONFIG_RAID_ATTRS is not set CONFIG_SCSI_COMMON=y CONFIG_SCSI=y CONFIG_SCSI_DMA=y @@ -2014,83 +2090,28 @@ CONFIG_SCSI_PROC_FS=y # SCSI support type (disk, tape, CD-ROM) # CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_BLK_DEV_SR=m -CONFIG_CHR_DEV_SG=m +# CONFIG_CHR_DEV_ST is not set +# CONFIG_BLK_DEV_SR is not set +# CONFIG_CHR_DEV_SG is not set # CONFIG_BLK_DEV_BSG is not set -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m +# CONFIG_CHR_DEV_SCH is not set # CONFIG_SCSI_CONSTANTS is not set # CONFIG_SCSI_LOGGING is not set -CONFIG_SCSI_SCAN_ASYNC=y +# CONFIG_SCSI_SCAN_ASYNC is not set # # SCSI Transports # # CONFIG_SCSI_SPI_ATTRS is not set # CONFIG_SCSI_FC_ATTRS is not set -CONFIG_SCSI_ISCSI_ATTRS=m +# CONFIG_SCSI_ISCSI_ATTRS is not set # CONFIG_SCSI_SAS_ATTRS is not set # CONFIG_SCSI_SAS_LIBSAS is not set # CONFIG_SCSI_SRP_ATTRS is not set # end of SCSI Transports -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -# CONFIG_SCSI_CXGB3_ISCSI is not set -# CONFIG_SCSI_CXGB4_ISCSI is not set -# CONFIG_SCSI_BNX2_ISCSI is not set -# CONFIG_BE2ISCSI is not set -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -# CONFIG_SCSI_HPSA is not set -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set -# CONFIG_SCSI_AACRAID is not set -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set -# CONFIG_SCSI_HISI_SAS is not set -# CONFIG_SCSI_MVSAS is not set -# CONFIG_SCSI_MVUMI is not set -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_ARCMSR is not set -# CONFIG_SCSI_ESAS2R is not set -# CONFIG_MEGARAID_NEWGEN is not set -# CONFIG_MEGARAID_LEGACY is not set -# CONFIG_MEGARAID_SAS is not set -# CONFIG_SCSI_MPT3SAS is not set -# CONFIG_SCSI_MPT2SAS is not set -# CONFIG_SCSI_MPI3MR is not set -# CONFIG_SCSI_SMARTPQI is not set -# CONFIG_SCSI_HPTIOP is not set -# CONFIG_SCSI_BUSLOGIC is not set -# CONFIG_SCSI_MYRB is not set -# CONFIG_SCSI_MYRS is not set -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FDOMAIN_PCI is not set -# CONFIG_SCSI_IPS is not set -# CONFIG_SCSI_INITIO is not set -# CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_STEX is not set -# CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_IPR is not set -# CONFIG_SCSI_QLOGIC_1280 is not set -# CONFIG_SCSI_QLA_ISCSI is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -# CONFIG_SCSI_DEBUG is not set -# CONFIG_SCSI_PMCRAID is not set -# CONFIG_SCSI_PM8001 is not set -# CONFIG_SCSI_VIRTIO is not set -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=m -CONFIG_SCSI_DH_HP_SW=m -CONFIG_SCSI_DH_EMC=m -CONFIG_SCSI_DH_ALUA=m +# CONFIG_SCSI_LOWLEVEL is not set +# CONFIG_SCSI_DH is not set # end of SCSI device support CONFIG_ATA=y @@ -2160,7 +2181,6 @@ CONFIG_ATA_BMDMA=y # CONFIG_PATA_ARTOP is not set # CONFIG_PATA_ATIIXP is not set # CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_BK3710 is not set # CONFIG_PATA_CMD64X is not set # CONFIG_PATA_CS5520 is not set # CONFIG_PATA_CS5530 is not set @@ -2206,7 +2226,6 @@ CONFIG_ATA_BMDMA=y # CONFIG_PATA_OPTI is not set # CONFIG_PATA_OF_PLATFORM is not set # CONFIG_PATA_RZ1000 is not set -# CONFIG_PATA_SAMSUNG_CF is not set # # Generic fallback / legacy drivers @@ -2216,6 +2235,7 @@ CONFIG_ATA_BMDMA=y CONFIG_MD=y CONFIG_BLK_DEV_MD=y CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y CONFIG_MD_LINEAR=y CONFIG_MD_RAID0=y CONFIG_MD_RAID1=y @@ -2295,17 +2315,51 @@ CONFIG_VXLAN=y # CONFIG_GENEVE is not set # CONFIG_BAREUDP is not set # CONFIG_GTP is not set -CONFIG_AMT=m -CONFIG_MACSEC=m +# CONFIG_AMT is not set +# CONFIG_MACSEC is not set # CONFIG_NETCONSOLE is not set # CONFIG_TUN is not set # CONFIG_TUN_VNET_CROSS_LE is not set CONFIG_VETH=y CONFIG_VIRTIO_NET=y # CONFIG_NLMON is not set -# CONFIG_NET_VRF is not set # CONFIG_ARCNET is not set +CONFIG_ATM_DRIVERS=y +# CONFIG_ATM_DUMMY is not set +# CONFIG_ATM_TCP is not set +# CONFIG_ATM_LANAI is not set +# CONFIG_ATM_ENI is not set +# CONFIG_ATM_NICSTAR is not set +# CONFIG_ATM_IDT77252 is not set +# CONFIG_ATM_IA is not set +# CONFIG_ATM_FORE200E is not set +# CONFIG_ATM_HE is not set +# CONFIG_ATM_SOLOS is not set # CONFIG_CAIF_DRIVERS is not set + +# +# Distributed Switch Architecture drivers +# +# CONFIG_B53 is not set +# CONFIG_NET_DSA_BCM_SF2 is not set +# CONFIG_NET_DSA_LOOP is not set +# CONFIG_NET_DSA_LANTIQ_GSWIP is not set +# CONFIG_NET_DSA_MT7530 is not set +# CONFIG_NET_DSA_MV88E6060 is not set +# CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON is not set +# CONFIG_NET_DSA_MV88E6XXX is not set +# CONFIG_NET_DSA_AR9331 is not set +# CONFIG_NET_DSA_QCA8K is not set +# CONFIG_NET_DSA_SJA1105 is not set +# CONFIG_NET_DSA_XRS700X_I2C is not set +# CONFIG_NET_DSA_XRS700X_MDIO is not set +# CONFIG_NET_DSA_REALTEK is not set +# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set +# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_SPI is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM is not set +# end of Distributed Switch Architecture drivers + CONFIG_ETHERNET=y CONFIG_NET_VENDOR_3COM=y # CONFIG_VORTEX is not set @@ -2324,6 +2378,7 @@ CONFIG_NET_VENDOR_AMD=y # CONFIG_AMD8111_ETH is not set # CONFIG_PCNET32 is not set # CONFIG_AMD_XGBE is not set +# CONFIG_PDS_CORE is not set # CONFIG_NET_XGENE is not set # CONFIG_NET_XGENE_V2 is not set # CONFIG_NET_VENDOR_AQUANTIA is not set @@ -2345,9 +2400,7 @@ CONFIG_NET_VENDOR_CHELSIO=y # CONFIG_CHELSIO_T3 is not set # CONFIG_CHELSIO_T4 is not set # CONFIG_CHELSIO_T4VF is not set -CONFIG_NET_VENDOR_CIRRUS=y -# CONFIG_CS89x0_PLATFORM is not set -# CONFIG_EP93XX_ETH is not set +# CONFIG_NET_VENDOR_CIRRUS is not set CONFIG_NET_VENDOR_CISCO=y # CONFIG_ENIC is not set # CONFIG_NET_VENDOR_CORTINA is not set @@ -2362,27 +2415,11 @@ CONFIG_NET_VENDOR_EMULEX=y # CONFIG_BE2NET is not set # CONFIG_NET_VENDOR_ENGLEDER is not set # CONFIG_NET_VENDOR_EZCHIP is not set -CONFIG_NET_VENDOR_FARADAY=y -CONFIG_NET_VENDOR_FREESCALE=y -# CONFIG_FEC is not set -# CONFIG_FSL_FMAN is not set -# CONFIG_FSL_PQ_MDIO is not set -# CONFIG_FSL_XGMAC_MDIO is not set -# CONFIG_GIANFAR is not set -# CONFIG_FSL_DPAA2_SWITCH is not set -# CONFIG_FSL_ENETC is not set -# CONFIG_FSL_ENETC_VF is not set -# CONFIG_FSL_ENETC_IERB is not set -# CONFIG_FSL_ENETC_MDIO is not set +# CONFIG_NET_VENDOR_FARADAY is not set +# CONFIG_NET_VENDOR_FREESCALE is not set # CONFIG_NET_VENDOR_FUNGIBLE is not set # CONFIG_NET_VENDOR_GOOGLE is not set -CONFIG_NET_VENDOR_HISILICON=y -# CONFIG_HIX5HD2_GMAC is not set -# CONFIG_HISI_FEMAC is not set -# CONFIG_HIP04_ETH is not set -# CONFIG_HNS_DSAF is not set -# CONFIG_HNS_ENET is not set -# CONFIG_HNS3 is not set +# CONFIG_NET_VENDOR_HISILICON is not set # CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_JME is not set @@ -2454,8 +2491,7 @@ CONFIG_NET_VENDOR_SUN=y # CONFIG_SUNGEM is not set # CONFIG_CASSINI is not set # CONFIG_NIU is not set -CONFIG_NET_VENDOR_SUNPLUS=y -# CONFIG_SP7021_EMAC is not set +# CONFIG_NET_VENDOR_SUNPLUS is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set CONFIG_NET_VENDOR_TEHUTI=y # CONFIG_TEHUTI is not set @@ -2474,10 +2510,13 @@ CONFIG_NET_VENDOR_TI=y # CONFIG_NET_VENDOR_XILINX is not set # CONFIG_FDDI is not set # CONFIG_HIPPI is not set +CONFIG_PHYLINK=m CONFIG_PHYLIB=y CONFIG_SWPHY=y # CONFIG_LED_TRIGGER_PHY is not set +CONFIG_PHYLIB_LEDS=y CONFIG_FIXED_PHY=y +# CONFIG_SFP is not set # # MII PHY device drivers @@ -2503,17 +2542,21 @@ CONFIG_FIXED_PHY=y # CONFIG_LSI_ET1011C_PHY is not set # CONFIG_MARVELL_PHY is not set # CONFIG_MARVELL_10G_PHY is not set +# CONFIG_MARVELL_88Q2XXX_PHY is not set # CONFIG_MARVELL_88X2222_PHY is not set # CONFIG_MAXLINEAR_GPHY is not set # CONFIG_MEDIATEK_GE_PHY is not set # CONFIG_MICREL_PHY is not set +# CONFIG_MICROCHIP_T1S_PHY is not set # CONFIG_MICROCHIP_PHY is not set # CONFIG_MICROCHIP_T1_PHY is not set # CONFIG_MICROSEMI_PHY is not set # CONFIG_MOTORCOMM_PHY is not set # CONFIG_NATIONAL_PHY is not set +# CONFIG_NXP_CBTX_PHY is not set # CONFIG_NXP_C45_TJA11XX_PHY is not set # CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set # CONFIG_AT803X_PHY is not set # CONFIG_QSEMI_PHY is not set CONFIG_REALTEK_PHY=y @@ -2556,6 +2599,7 @@ CONFIG_MDIO_DEVRES=y # MDIO Multiplexers # # CONFIG_MDIO_BUS_MUX_MESON_G12A is not set +# CONFIG_MDIO_BUS_MUX_MESON_GXL is not set # CONFIG_MDIO_BUS_MUX_BCM6368 is not set # CONFIG_MDIO_BUS_MUX_BCM_IPROC is not set # CONFIG_MDIO_BUS_MUX_GPIO is not set @@ -2579,10 +2623,10 @@ CONFIG_USB_RTL8152=m # CONFIG_USB_LAN78XX is not set CONFIG_USB_USBNET=y # CONFIG_USB_NET_AX8817X is not set -CONFIG_USB_NET_AX88179_178A=y +# CONFIG_USB_NET_AX88179_178A is not set CONFIG_USB_NET_CDCETHER=y # CONFIG_USB_NET_CDC_EEM is not set -CONFIG_USB_NET_CDC_NCM=y +CONFIG_USB_NET_CDC_NCM=m # CONFIG_USB_NET_HUAWEI_CDC_NCM is not set # CONFIG_USB_NET_CDC_MBIM is not set # CONFIG_USB_NET_DM9601 is not set @@ -2606,6 +2650,7 @@ CONFIG_USB_ARMLINUX=y CONFIG_USB_NET_ZAURUS=y # CONFIG_USB_NET_CX82310_ETH is not set # CONFIG_USB_NET_KALMIA is not set +# CONFIG_USB_NET_QMI_WWAN_F is not set CONFIG_USB_NET_QMI_WWAN=m # CONFIG_USB_HSO is not set # CONFIG_USB_NET_INT51X1 is not set @@ -2615,6 +2660,7 @@ CONFIG_USB_NET_QMI_WWAN=m # CONFIG_USB_NET_CH9200 is not set # CONFIG_USB_NET_AQC111 is not set CONFIG_USB_RTL8153_ECM=m +CONFIG_USB_NET_ASIX=m CONFIG_WLAN=y # CONFIG_WLAN_VENDOR_ADMTEK is not set # CONFIG_WLAN_VENDOR_ATH is not set @@ -2627,10 +2673,32 @@ CONFIG_WLAN=y # CONFIG_WLAN_VENDOR_MEDIATEK is not set # CONFIG_WLAN_VENDOR_MICROCHIP is not set # CONFIG_WLAN_VENDOR_PURELIFI is not set -# CONFIG_WLAN_VENDOR_RALINK is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +# CONFIG_RT2800PCI is not set +CONFIG_RT2500USB=m +CONFIG_RT73USB=m +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set CONFIG_WLAN_VENDOR_REALTEK=y -# CONFIG_RTL8180 is not set -# CONFIG_RTL8187 is not set +CONFIG_RTL8180=m +CONFIG_RTL8187=m +CONFIG_RTL8187_LEDS=y CONFIG_RTL_CARDS=m # CONFIG_RTL8192CE is not set # CONFIG_RTL8192SE is not set @@ -2642,19 +2710,64 @@ CONFIG_RTL_CARDS=m # CONFIG_RTL8821AE is not set # CONFIG_RTL8192CU is not set # CONFIG_RTL8XXXU is not set -# CONFIG_RTW88 is not set -# CONFIG_RTW89 is not set +CONFIG_RTW88=m +CONFIG_RTW88_CORE=m +CONFIG_RTW88_PCI=m +CONFIG_RTW88_USB=m +CONFIG_RTW88_8822B=m +CONFIG_RTW88_8822C=m +CONFIG_RTW88_8723D=m +CONFIG_RTW88_8821C=m +CONFIG_RTW88_8822BE=m +# CONFIG_RTW88_8822BS is not set +CONFIG_RTW88_8822BU=m +CONFIG_RTW88_8822CE=m +# CONFIG_RTW88_8822CS is not set +CONFIG_RTW88_8822CU=m +CONFIG_RTW88_8723DE=m +# CONFIG_RTW88_8723DS is not set +CONFIG_RTW88_8723DU=m +CONFIG_RTW88_8821CE=m +# CONFIG_RTW88_8821CS is not set +CONFIG_RTW88_8821CU=m +# CONFIG_RTW88_DEBUG is not set +# CONFIG_RTW88_DEBUGFS is not set +CONFIG_RTW89=m +CONFIG_RTW89_CORE=m +CONFIG_RTW89_PCI=m +CONFIG_RTW89_8851B=m +CONFIG_RTW89_8852A=m +CONFIG_RTW89_8852B=m +CONFIG_RTW89_8852C=m +CONFIG_RTW89_8851BE=m +CONFIG_RTW89_8852AE=m +CONFIG_RTW89_8852BE=m +CONFIG_RTW89_8852CE=m +# CONFIG_RTW89_DEBUGMSG is not set +# CONFIG_RTW89_DEBUGFS is not set CONFIG_RTL8852BS=m +CONFIG_RTL8852BE=m # CONFIG_WLAN_VENDOR_RSI is not set # CONFIG_WLAN_VENDOR_SILABS is not set # CONFIG_WLAN_VENDOR_ST is not set # CONFIG_WLAN_VENDOR_TI is not set # CONFIG_WLAN_VENDOR_ZYDAS is not set # CONFIG_WLAN_VENDOR_QUANTENNA is not set -# CONFIG_MAC80211_HWSIM is not set +CONFIG_AIC_WLAN_SUPPORT=y +CONFIG_AIC_INTF_SDIO=y +# CONFIG_AIC_INTF_USB is not set +CONFIG_AIC_IRQ_ACTIVE_UNSET=y +# CONFIG_AIC_IRQ_ACTIVE_HIGH is not set +# CONFIG_AIC_IRQ_ACTIVE_RISING is not set +# CONFIG_AIC_IRQ_ACTIVE_LOW is not set +# CONFIG_AIC_IRQ_ACTIVE_FALLING is not set +CONFIG_AIC8800_WLAN_SUPPORT=m +CONFIG_AIC8800_BTLPM_SUPPORT=m # CONFIG_USB_NET_RNDIS_WLAN is not set +# CONFIG_MAC80211_HWSIM is not set # CONFIG_VIRT_WIFI is not set # CONFIG_WAN is not set +CONFIG_IEEE802154_DRIVERS=m # # Wireless WAN @@ -2696,6 +2809,7 @@ CONFIG_TOUCHSCREEN_GT9XX=y # CONFIG_TOUCHSCREEN_ADS7846 is not set # CONFIG_TOUCHSCREEN_AD7877 is not set # CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set # CONFIG_TOUCHSCREEN_AR1021_I2C is not set # CONFIG_TOUCHSCREEN_ATMEL_MXT is not set # CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set @@ -2706,6 +2820,7 @@ CONFIG_TOUCHSCREEN_GT9XX=y # CONFIG_TOUCHSCREEN_CY8CTMG110 is not set # CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set # CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set # CONFIG_TOUCHSCREEN_DYNAPRO is not set # CONFIG_TOUCHSCREEN_HAMPSHIRE is not set # CONFIG_TOUCHSCREEN_EETI is not set @@ -2716,6 +2831,7 @@ CONFIG_TOUCHSCREEN_GT9XX=y CONFIG_TOUCHSCREEN_GOODIX=y # CONFIG_TOUCHSCREEN_HIDEEP is not set # CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set # CONFIG_TOUCHSCREEN_ILI210X is not set # CONFIG_TOUCHSCREEN_ILITEK is not set # CONFIG_TOUCHSCREEN_IPROC is not set @@ -2732,10 +2848,10 @@ CONFIG_TOUCHSCREEN_GOODIX=y # CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set # CONFIG_TOUCHSCREEN_MSG2638 is not set # CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set # CONFIG_TOUCHSCREEN_IMAGIS is not set # CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set # CONFIG_TOUCHSCREEN_INEXIO is not set -# CONFIG_TOUCHSCREEN_MK712 is not set # CONFIG_TOUCHSCREEN_PENMOUNT is not set # CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set # CONFIG_TOUCHSCREEN_RASPBERRYPI_FW is not set @@ -2763,13 +2879,20 @@ CONFIG_TOUCHSCREEN_GOODIX=y # CONFIG_TOUCHSCREEN_TPS6507X is not set # CONFIG_TOUCHSCREEN_ZET6223 is not set # CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set # CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set # CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set # CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set +# CONFIG_TOUCHSCREEN_CHIPONE_TDDI is not set +# CONFIG_TOUCHSCREEN_CHSC5XXX is not set +# CONFIG_TOUCHSCREEN_FTS is not set CONFIG_INPUT_MISC=y # CONFIG_INPUT_AD714X is not set # CONFIG_INPUT_ARIEL_PWRBUTTON is not set # CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BBNSM_PWRKEY is not set # CONFIG_INPUT_BMA150 is not set # CONFIG_INPUT_E3X0_BUTTON is not set # CONFIG_INPUT_MMA8450 is not set @@ -2801,6 +2924,7 @@ CONFIG_INPUT_UINPUT=y # CONFIG_INPUT_DRV2667_HAPTICS is not set # CONFIG_INPUT_HISI_POWERKEY is not set CONFIG_INPUT_SPACEMIT_POWERKEY=y +CONFIG_HALL_SENSOR_AS1911=y # CONFIG_INPUT_SC27XX_VIBRA is not set # CONFIG_INPUT_RT5120_PWRKEY is not set # CONFIG_RMI4_CORE is not set @@ -2811,7 +2935,7 @@ CONFIG_INPUT_SPACEMIT_POWERKEY=y CONFIG_SERIO=y CONFIG_SERIO_SERPORT=y # CONFIG_SERIO_PCIPS2 is not set -# CONFIG_SERIO_LIBPS2 is not set +CONFIG_SERIO_LIBPS2=y # CONFIG_SERIO_RAW is not set # CONFIG_SERIO_ALTERA_PS2 is not set # CONFIG_SERIO_PS2MULT is not set @@ -2837,6 +2961,7 @@ CONFIG_HW_CONSOLE=y CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_UNIX98_PTYS=y # CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y CONFIG_LDISC_AUTOLOAD=y # @@ -2849,6 +2974,7 @@ CONFIG_SERIAL_EARLYCON=y # Non-8250 serial port support # # CONFIG_SERIAL_AMBA_PL010 is not set +# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set CONFIG_SERIAL_EARLYCON_RISCV_SBI=y # CONFIG_SERIAL_ATMEL is not set # CONFIG_SERIAL_MESON is not set @@ -2874,7 +3000,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_MSM is not set # CONFIG_SERIAL_VT8500 is not set # CONFIG_SERIAL_OMAP is not set -CONFIG_SERIAL_SIFIVE=m +# CONFIG_SERIAL_SIFIVE is not set # CONFIG_SERIAL_LANTIQ is not set # CONFIG_SERIAL_SCCNXP is not set # CONFIG_SERIAL_SC16IS7XX is not set @@ -2900,69 +3026,32 @@ CONFIG_SERIAL_SIFIVE=m # CONFIG_SERIAL_MILBEAUT_USIO is not set # CONFIG_SERIAL_LITEUART is not set # CONFIG_SERIAL_SUNPLUS is not set +# CONFIG_SERIAL_NUVOTON_MA35D1 is not set # end of Serial drivers # CONFIG_SERIAL_NONSTANDARD is not set # CONFIG_N_GSM is not set # CONFIG_NOZOMI is not set # CONFIG_NULL_TTY is not set -CONFIG_HVC_DRIVER=y -CONFIG_HVC_RISCV_SBI=y -CONFIG_RPMSG_TTY=m +# CONFIG_HVC_RISCV_SBI is not set +# CONFIG_RPMSG_TTY is not set CONFIG_SERIAL_DEV_BUS=y CONFIG_SERIAL_DEV_CTRL_TTYPORT=y # CONFIG_TTY_PRINTK is not set -CONFIG_VIRTIO_CONSOLE=m +# CONFIG_VIRTIO_CONSOLE is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_ASPEED_KCS_IPMI_BMC is not set # CONFIG_NPCM7XX_KCS_IPMI_BMC is not set # CONFIG_ASPEED_BT_IPMI_BMC is not set +# CONFIG_SSIF_IPMI_BMC is not set # CONFIG_IPMB_DEVICE_INTERFACE is not set -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=y -# CONFIG_HW_RANDOM_ATMEL is not set -# CONFIG_HW_RANDOM_BA431 is not set -# CONFIG_HW_RANDOM_BCM2835 is not set -# CONFIG_HW_RANDOM_IPROC_RNG200 is not set -# CONFIG_HW_RANDOM_IXP4XX is not set -# CONFIG_HW_RANDOM_OMAP is not set -# CONFIG_HW_RANDOM_OMAP3_ROM is not set -# CONFIG_HW_RANDOM_VIRTIO is not set -# CONFIG_HW_RANDOM_IMX_RNGC is not set -# CONFIG_HW_RANDOM_NOMADIK is not set -# CONFIG_HW_RANDOM_STM32 is not set -# CONFIG_HW_RANDOM_MESON is not set -# CONFIG_HW_RANDOM_MTK is not set -# CONFIG_HW_RANDOM_EXYNOS is not set -# CONFIG_HW_RANDOM_NPCM is not set -# CONFIG_HW_RANDOM_KEYSTONE is not set -# CONFIG_HW_RANDOM_CCTRNG is not set -# CONFIG_HW_RANDOM_XIPHERA is not set -# CONFIG_HW_RANDOM_CN10K is not set +# CONFIG_HW_RANDOM is not set # CONFIG_APPLICOM is not set CONFIG_DEVMEM=y CONFIG_DEVPORT=y -CONFIG_TCG_TPM=y -CONFIG_HW_RANDOM_TPM=y -CONFIG_TCG_TIS_CORE=m -CONFIG_TCG_TIS=m -CONFIG_TCG_TIS_SPI=m -CONFIG_TCG_TIS_SPI_CR50=y -CONFIG_TCG_TIS_I2C=m -CONFIG_TCG_TIS_SYNQUACER=m -CONFIG_TCG_TIS_I2C_CR50=m -CONFIG_TCG_TIS_I2C_ATMEL=m -CONFIG_TCG_TIS_I2C_INFINEON=m -CONFIG_TCG_TIS_I2C_NUVOTON=m -CONFIG_TCG_ATMEL=m -CONFIG_TCG_VTPM_PROXY=m -CONFIG_TCG_TIS_ST33ZP24=m -CONFIG_TCG_TIS_ST33ZP24_I2C=m -CONFIG_TCG_TIS_ST33ZP24_SPI=m +# CONFIG_TCG_TPM is not set # CONFIG_XILLYBUS is not set # CONFIG_XILLYUSB is not set -CONFIG_RANDOM_TRUST_CPU=y -CONFIG_RANDOM_TRUST_BOOTLOADER=y # end of Character devices # @@ -2971,26 +3060,26 @@ CONFIG_RANDOM_TRUST_BOOTLOADER=y CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m # # Multiplexer I2C Chip support # -CONFIG_I2C_ARB_GPIO_CHALLENGE=y -CONFIG_I2C_MUX_GPIO=y -CONFIG_I2C_MUX_GPMUX=y -# CONFIG_I2C_MUX_LTC4306 is not set -# CONFIG_I2C_MUX_PCA9541 is not set -# CONFIG_I2C_MUX_PCA954x is not set -CONFIG_I2C_MUX_PINCTRL=y -CONFIG_I2C_MUX_REG=y -CONFIG_I2C_DEMUX_PINCTRL=y -# CONFIG_I2C_MUX_MLXCPLD is not set +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +CONFIG_I2C_MUX_GPMUX=m +CONFIG_I2C_MUX_LTC4306=m +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +CONFIG_I2C_MUX_REG=m +CONFIG_I2C_DEMUX_PINCTRL=m +CONFIG_I2C_MUX_MLXCPLD=m # end of Multiplexer I2C Chip support +# CONFIG_I2C_ATR is not set CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_ALGOBIT=y # # I2C Hardware Bus support @@ -3027,7 +3116,7 @@ CONFIG_I2C_SPACEMIT_K1X=y # CONFIG_I2C_BCM2835 is not set # CONFIG_I2C_BCM_IPROC is not set # CONFIG_I2C_BCM_KONA is not set -CONFIG_I2C_BRCMSTB=y +# CONFIG_I2C_BRCMSTB is not set # CONFIG_I2C_CADENCE is not set # CONFIG_I2C_CBUS_GPIO is not set # CONFIG_I2C_DAVINCI is not set @@ -3037,8 +3126,8 @@ CONFIG_I2C_BRCMSTB=y # CONFIG_I2C_EG20T is not set # CONFIG_I2C_EMEV2 is not set # CONFIG_I2C_EXYNOS5 is not set -CONFIG_I2C_GPIO=m -CONFIG_I2C_GPIO_FAULT_INJECTOR=y +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_GXP is not set # CONFIG_I2C_HIGHLANDER is not set # CONFIG_I2C_HISI is not set # CONFIG_I2C_IMG is not set @@ -3047,6 +3136,7 @@ CONFIG_I2C_GPIO_FAULT_INJECTOR=y # CONFIG_I2C_IOP3XX is not set # CONFIG_I2C_JZ4780 is not set # CONFIG_I2C_LPC2K is not set +# CONFIG_I2C_LS2X is not set # CONFIG_I2C_MESON is not set # CONFIG_I2C_MICROCHIP_CORE is not set # CONFIG_I2C_MT65XX is not set @@ -3094,7 +3184,7 @@ CONFIG_I2C_GPIO_FAULT_INJECTOR=y # CONFIG_I2C_PCI1XXXX is not set # CONFIG_I2C_ROBOTFUZZ_OSIF is not set # CONFIG_I2C_TAOS_EVM is not set -CONFIG_I2C_TINY_USB=m +# CONFIG_I2C_TINY_USB is not set # # Other I2C/SMBus bus drivers @@ -3123,6 +3213,7 @@ CONFIG_SPI_MEM=y # # CONFIG_SPI_ALTERA is not set # CONFIG_SPI_ALTERA_CORE is not set +# CONFIG_SPI_AMLOGIC_SPIFC_A1 is not set # CONFIG_SPI_AR934X is not set # CONFIG_SPI_ATH79 is not set # CONFIG_SPI_ARMADA_3700 is not set @@ -3135,11 +3226,13 @@ CONFIG_SPI_MEM=y # CONFIG_SPI_BCM63XX is not set # CONFIG_SPI_BCM63XX_HSSPI is not set # CONFIG_SPI_BCM_QSPI is not set +# CONFIG_SPI_BCMBCA_HSSPI is not set # CONFIG_SPI_BITBANG is not set # CONFIG_SPI_CADENCE is not set # CONFIG_SPI_CADENCE_QUADSPI is not set # CONFIG_SPI_CADENCE_XSPI is not set # CONFIG_SPI_CLPS711X is not set +# CONFIG_SPI_DAVINCI is not set # CONFIG_SPI_DESIGNWARE_EXT is not set CONFIG_SPI_K1X=y CONFIG_SPI_K1X_QSPI=y @@ -3158,6 +3251,8 @@ CONFIG_SPI_K1X_QSPI=y # CONFIG_SPI_INTEL_PCI is not set # CONFIG_SPI_INTEL_PLATFORM is not set # CONFIG_SPI_JCORE is not set +# CONFIG_SPI_LOONGSON_PCI is not set +# CONFIG_SPI_LOONGSON_PLATFORM is not set # CONFIG_SPI_LP8841_RTC is not set # CONFIG_SPI_FSL_SPI is not set # CONFIG_SPI_FSL_DSPI is not set @@ -3168,20 +3263,23 @@ CONFIG_SPI_K1X_QSPI=y # CONFIG_SPI_MT65XX is not set # CONFIG_SPI_MT7621 is not set # CONFIG_SPI_MTK_NOR is not set +# CONFIG_SPI_WPCM_FIU is not set # CONFIG_SPI_NPCM_FIU is not set # CONFIG_SPI_NPCM_PSPI is not set # CONFIG_SPI_LANTIQ_SSC is not set # CONFIG_SPI_OC_TINY is not set # CONFIG_SPI_OMAP24XX is not set # CONFIG_SPI_TI_QSPI is not set -# CONFIG_SPI_OMAP_100K is not set # CONFIG_SPI_ORION is not set +# CONFIG_SPI_PCI1XXXX is not set # CONFIG_SPI_PIC32 is not set # CONFIG_SPI_PIC32_SQI is not set # CONFIG_SPI_PXA2XX is not set # CONFIG_SPI_ROCKCHIP is not set # CONFIG_SPI_ROCKCHIP_SFC is not set # CONFIG_SPI_RSPI is not set +# CONFIG_SPI_RZV2M_CSI is not set +# CONFIG_SPI_QCOM_QSPI is not set # CONFIG_SPI_QUP is not set # CONFIG_SPI_S3C64XX is not set # CONFIG_SPI_SC18IS602 is not set @@ -3189,6 +3287,7 @@ CONFIG_SPI_K1X_QSPI=y # CONFIG_SPI_SH is not set # CONFIG_SPI_SH_HSPI is not set # CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_SN_F_OSPI is not set # CONFIG_SPI_SPRD is not set # CONFIG_SPI_SPRD_ADI is not set # CONFIG_SPI_STM32 is not set @@ -3257,6 +3356,7 @@ CONFIG_PTP_1588_CLOCK_QORIQ=y # CONFIG_PTP_1588_CLOCK_PCH is not set # CONFIG_PTP_1588_CLOCK_IDT82P33 is not set CONFIG_PTP_1588_CLOCK_IDTCM=y +# CONFIG_PTP_1588_CLOCK_MOCK is not set # end of PTP clock support CONFIG_PINCTRL=y @@ -3273,6 +3373,7 @@ CONFIG_GENERIC_PINCONF=y # CONFIG_PINCTRL_DA850_PUPD is not set # CONFIG_PINCTRL_EQUILIBRIUM is not set # CONFIG_PINCTRL_INGENIC is not set +# CONFIG_PINCTRL_LOONGSON2 is not set # CONFIG_PINCTRL_LPC18XX is not set # CONFIG_PINCTRL_MCP23S08 is not set # CONFIG_PINCTRL_MICROCHIP_SGPIO is not set @@ -3283,6 +3384,7 @@ CONFIG_PINCTRL_SPACEMIT_PMIC=y CONFIG_PINCTRL_SINGLE=y # CONFIG_PINCTRL_STMFX is not set # CONFIG_PINCTRL_SX150X is not set +# CONFIG_PINCTRL_MLXBF3 is not set # CONFIG_PINCTRL_OWL is not set # CONFIG_PINCTRL_ASPEED_G4 is not set # CONFIG_PINCTRL_ASPEED_G5 is not set @@ -3305,11 +3407,6 @@ CONFIG_PINCTRL_SINGLE=y # CONFIG_PINCTRL_AS370 is not set # CONFIG_PINCTRL_BERLIN_BG4CT is not set -# -# Intel pinctrl drivers -# -# end of Intel pinctrl drivers - # # MediaTek pinctrl drivers # @@ -3325,6 +3422,7 @@ CONFIG_PINCTRL_SINGLE=y # CONFIG_PINCTRL_MT6795 is not set # CONFIG_PINCTRL_MT6797 is not set # CONFIG_PINCTRL_MT7622 is not set +# CONFIG_PINCTRL_MT7981 is not set # CONFIG_PINCTRL_MT7986 is not set # CONFIG_PINCTRL_MT8167 is not set # CONFIG_PINCTRL_MT8173 is not set @@ -3357,7 +3455,6 @@ CONFIG_PINCTRL_SINGLE=y # CONFIG_PINCTRL_PFC_R8A77990 is not set # CONFIG_PINCTRL_PFC_R8A7779 is not set # CONFIG_PINCTRL_PFC_R8A7790 is not set -# CONFIG_PINCTRL_PFC_R8A77950 is not set # CONFIG_PINCTRL_PFC_R8A77951 is not set # CONFIG_PINCTRL_PFC_R8A7778 is not set # CONFIG_PINCTRL_PFC_R8A7793 is not set @@ -3403,10 +3500,11 @@ CONFIG_PINCTRL_SINGLE=y # end of Renesas pinctrl drivers # CONFIG_PINCTRL_EXYNOS is not set -# CONFIG_PINCTRL_S3C24XX is not set # CONFIG_PINCTRL_S3C64XX is not set # CONFIG_PINCTRL_SPRD_SC9860 is not set # CONFIG_PINCTRL_STARFIVE_JH7100 is not set +# CONFIG_PINCTRL_STARFIVE_JH7110_SYS is not set +# CONFIG_PINCTRL_STARFIVE_JH7110_AON is not set # CONFIG_PINCTRL_STM32F429 is not set # CONFIG_PINCTRL_STM32F469 is not set # CONFIG_PINCTRL_STM32F746 is not set @@ -3414,16 +3512,22 @@ CONFIG_PINCTRL_SINGLE=y # CONFIG_PINCTRL_STM32H743 is not set # CONFIG_PINCTRL_STM32MP135 is not set # CONFIG_PINCTRL_STM32MP157 is not set +# CONFIG_PINCTRL_STM32MP257 is not set # CONFIG_PINCTRL_TI_IODELAY is not set # CONFIG_PINCTRL_UNIPHIER is not set # CONFIG_PINCTRL_TMPV7700 is not set CONFIG_GPIOLIB=y CONFIG_GPIOLIB_FASTPATH_LIMIT=512 CONFIG_OF_GPIO=y +CONFIG_GPIOLIB_IRQCHIP=y # CONFIG_DEBUG_GPIO is not set CONFIG_GPIO_SYSFS=y CONFIG_GPIO_CDEV=y CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m +CONFIG_GPIO_REGMAP=m +CONFIG_GPIO_MAX730X=m +CONFIG_GPIO_IDIO_16=m # # Memory mapped GPIO drivers @@ -3442,14 +3546,14 @@ CONFIG_GPIO_CDEV_V1=y # CONFIG_GPIO_DWAPB is not set # CONFIG_GPIO_EIC_SPRD is not set # CONFIG_GPIO_EM is not set +# CONFIG_GPIO_GE_FPGA is not set # CONFIG_GPIO_FTGPIO010 is not set # CONFIG_GPIO_GENERIC_PLATFORM is not set # CONFIG_GPIO_GRGPIO is not set # CONFIG_GPIO_HISI is not set # CONFIG_GPIO_HLWD is not set -# CONFIG_GPIO_IOP is not set -CONFIG_GPIO_K1X=y # CONFIG_GPIO_LOGICVC is not set +# CONFIG_GPIO_LOONGSON_64BIT is not set # CONFIG_GPIO_LPC18XX is not set # CONFIG_GPIO_LPC32XX is not set # CONFIG_GPIO_MB86S7X is not set @@ -3457,7 +3561,6 @@ CONFIG_GPIO_K1X=y # CONFIG_GPIO_MT7621 is not set # CONFIG_GPIO_MXC is not set # CONFIG_GPIO_MXS is not set -# CONFIG_GPIO_PMIC_EIC_SPRD is not set # CONFIG_GPIO_PXA is not set # CONFIG_GPIO_RCAR is not set # CONFIG_GPIO_RDA is not set @@ -3467,14 +3570,14 @@ CONFIG_GPIO_K1X=y # CONFIG_GPIO_SNPS_CREG is not set # CONFIG_GPIO_SPRD is not set # CONFIG_GPIO_STP_XWAY is not set -# CONFIG_GPIO_SYSCON is not set +CONFIG_GPIO_SYSCON=m +CONFIG_GPIO_TANGIER=m # CONFIG_GPIO_TEGRA is not set # CONFIG_GPIO_TEGRA186 is not set # CONFIG_GPIO_TS4800 is not set # CONFIG_GPIO_THUNDERX is not set # CONFIG_GPIO_UNIPHIER is not set # CONFIG_GPIO_VISCONTI is not set -# CONFIG_GPIO_VX855 is not set # CONFIG_GPIO_XGENE_SB is not set # CONFIG_GPIO_XILINX is not set # CONFIG_GPIO_XLP is not set @@ -3485,47 +3588,53 @@ CONFIG_GPIO_K1X=y # # I2C GPIO expanders # -# CONFIG_GPIO_ADNP is not set -# CONFIG_GPIO_GW_PLD is not set -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCA9570 is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_TPIC2810 is not set -# CONFIG_GPIO_TS4900 is not set +CONFIG_GPIO_ADNP=m +CONFIG_GPIO_FXL6408=m +CONFIG_GPIO_DS4520=m +CONFIG_GPIO_GW_PLD=m +CONFIG_GPIO_MAX7300=m +CONFIG_GPIO_MAX732X=m +CONFIG_GPIO_PCA953X=y +CONFIG_GPIO_PCA953X_IRQ=y +CONFIG_GPIO_PCA9570=m +CONFIG_GPIO_PCF857X=m +CONFIG_GPIO_TPIC2810=m +CONFIG_GPIO_TS4900=m # end of I2C GPIO expanders # # MFD GPIO expanders # -# CONFIG_GPIO_SL28CPLD is not set -# CONFIG_GPIO_TQMX86 is not set +CONFIG_GPIO_ELKHARTLAKE=m +CONFIG_GPIO_PMIC_EIC_SPRD=m +CONFIG_GPIO_SL28CPLD=m +CONFIG_GPIO_TQMX86=m # end of MFD GPIO expanders # # PCI GPIO expanders # -# CONFIG_GPIO_AMD8111 is not set -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_MLXBF is not set -# CONFIG_GPIO_MLXBF2 is not set -# CONFIG_GPIO_ML_IOH is not set -# CONFIG_GPIO_PCH is not set -# CONFIG_GPIO_PCI_IDIO_16 is not set -# CONFIG_GPIO_PCIE_IDIO_24 is not set -# CONFIG_GPIO_RDC321X is not set +CONFIG_GPIO_AMD8111=m +CONFIG_GPIO_BT8XX=m +CONFIG_GPIO_MLXBF=m +CONFIG_GPIO_MLXBF2=m +CONFIG_GPIO_MLXBF3=m +CONFIG_GPIO_ML_IOH=m +CONFIG_GPIO_PCH=m +CONFIG_GPIO_PCI_IDIO_16=m +CONFIG_GPIO_PCIE_IDIO_24=m +CONFIG_GPIO_RDC321X=m # end of PCI GPIO expanders # # SPI GPIO expanders # -# CONFIG_GPIO_74X164 is not set -# CONFIG_GPIO_MAX3191X is not set -# CONFIG_GPIO_MAX7301 is not set -# CONFIG_GPIO_MC33880 is not set -# CONFIG_GPIO_PISOSR is not set -# CONFIG_GPIO_XRA1403 is not set +CONFIG_GPIO_74X164=m +CONFIG_GPIO_MAX3191X=m +CONFIG_GPIO_MAX7301=m +CONFIG_GPIO_MC33880=m +CONFIG_GPIO_PISOSR=m +CONFIG_GPIO_XRA1403=m # end of SPI GPIO expanders # @@ -3537,50 +3646,76 @@ CONFIG_GPIO_K1X=y # Virtual GPIO drivers # # CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set # CONFIG_GPIO_MOCKUP is not set # CONFIG_GPIO_VIRTIO is not set # CONFIG_GPIO_SIM is not set +CONFIG_GPIO_K1X=y # end of Virtual GPIO drivers -# CONFIG_W1 is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_BRCMKONA is not set -# CONFIG_POWER_RESET_BRCMSTB is not set -# CONFIG_POWER_RESET_GEMINI_POWEROFF is not set -CONFIG_POWER_RESET_GPIO=y -CONFIG_POWER_RESET_GPIO_RESTART=y -# CONFIG_POWER_RESET_LINKSTATION is not set -# CONFIG_POWER_RESET_OCELOT_RESET is not set -# CONFIG_POWER_RESET_PIIX4_POWEROFF is not set -# CONFIG_POWER_RESET_LTC2952 is not set -CONFIG_POWER_RESET_REGULATOR=y -# CONFIG_POWER_RESET_RESTART is not set -# CONFIG_POWER_RESET_KEYSTONE is not set -# CONFIG_POWER_RESET_SYSCON is not set -# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set -# CONFIG_POWER_RESET_RMOBILE is not set -# CONFIG_SYSCON_REBOOT_MODE is not set -# CONFIG_POWER_RESET_SC27XX is not set -# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_W1=m + +# +# 1-wire Bus Masters +# +CONFIG_W1_MASTER_MATROX=m +CONFIG_W1_MASTER_DS2490=m +CONFIG_W1_MASTER_DS2482=m +CONFIG_W1_MASTER_MXC=m +CONFIG_W1_MASTER_GPIO=m +CONFIG_HDQ_MASTER_OMAP=m +CONFIG_W1_MASTER_SGI=m +# end of 1-wire Bus Masters + +# +# 1-wire Slaves +# +CONFIG_W1_SLAVE_THERM=m +CONFIG_W1_SLAVE_SMEM=m +CONFIG_W1_SLAVE_DS2405=m +CONFIG_W1_SLAVE_DS2408=m +CONFIG_W1_SLAVE_DS2408_READBACK=y +CONFIG_W1_SLAVE_DS2413=m +CONFIG_W1_SLAVE_DS2406=m +CONFIG_W1_SLAVE_DS2423=m +CONFIG_W1_SLAVE_DS2805=m +CONFIG_W1_SLAVE_DS2430=m +CONFIG_W1_SLAVE_DS2431=m +CONFIG_W1_SLAVE_DS2433=m +CONFIG_W1_SLAVE_DS2433_CRC=y +CONFIG_W1_SLAVE_DS2438=m +CONFIG_W1_SLAVE_DS250X=m +CONFIG_W1_SLAVE_DS2780=m +CONFIG_W1_SLAVE_DS2781=m +CONFIG_W1_SLAVE_DS28E04=m +CONFIG_W1_SLAVE_DS28E17=m +# end of 1-wire Slaves + +# CONFIG_POWER_RESET is not set CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set CONFIG_POWER_SUPPLY_HWMON=y -# CONFIG_PDA_POWER is not set +# CONFIG_GENERIC_ADC_BATTERY is not set # CONFIG_IP5XXX_POWER is not set # CONFIG_TEST_POWER is not set # CONFIG_CHARGER_ADP5061 is not set # CONFIG_BATTERY_ACT8945A is not set # CONFIG_BATTERY_CW2015 is not set +# CONFIG_SPACEMIT_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2760 is not set # CONFIG_BATTERY_DS2780 is not set # CONFIG_BATTERY_DS2781 is not set # CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_LEGO_EV3 is not set # CONFIG_BATTERY_SAMSUNG_SDI is not set -# CONFIG_BATTERY_SBS is not set +CONFIG_BATTERY_SBS=y # CONFIG_CHARGER_SBS is not set +CONFIG_CHARGER_SBS_VIRTUAL=y # CONFIG_MANAGER_SBS is not set # CONFIG_BATTERY_BQ27XXX is not set # CONFIG_BATTERY_MAX17040 is not set # CONFIG_BATTERY_MAX17042 is not set +# CONFIG_BATTERY_MAX1721X is not set # CONFIG_CHARGER_ISP1704 is not set # CONFIG_CHARGER_MAX8903 is not set # CONFIG_CHARGER_LP8727 is not set @@ -3604,10 +3739,14 @@ CONFIG_POWER_SUPPLY_HWMON=y # CONFIG_BATTERY_GOLDFISH is not set # CONFIG_BATTERY_RT5033 is not set # CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_RT9467 is not set +# CONFIG_CHARGER_RT9471 is not set # CONFIG_CHARGER_SC2731 is not set +# CONFIG_FUEL_GAUGE_SC27XX is not set # CONFIG_CHARGER_UCS1002 is not set # CONFIG_CHARGER_BD99954 is not set # CONFIG_BATTERY_UG3105 is not set +# CONFIG_CHARGER_SGM415XX is not set CONFIG_HWMON=y CONFIG_HWMON_VID=m # CONFIG_HWMON_DEBUG_CHIP is not set @@ -3636,7 +3775,6 @@ CONFIG_HWMON_VID=m # CONFIG_SENSORS_AS370 is not set # CONFIG_SENSORS_ASC7621 is not set # CONFIG_SENSORS_AXI_FAN_CONTROL is not set -# CONFIG_SENSORS_ARM_SCMI is not set # CONFIG_SENSORS_ASB100 is not set # CONFIG_SENSORS_ASPEED is not set # CONFIG_SENSORS_ATXP1 is not set @@ -3657,8 +3795,11 @@ CONFIG_SENSORS_DRIVETEMP=m # CONFIG_SENSORS_GL520SM is not set # CONFIG_SENSORS_G760A is not set # CONFIG_SENSORS_G762 is not set -CONFIG_SENSORS_GPIO_FAN=y +CONFIG_SENSORS_GPIO_FAN=m +# CONFIG_SENSORS_GXP_FAN_CTRL is not set # CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +# CONFIG_SENSORS_IIO_HWMON is not set # CONFIG_SENSORS_IT87 is not set # CONFIG_SENSORS_JC42 is not set # CONFIG_SENSORS_POWR1220 is not set @@ -3684,6 +3825,7 @@ CONFIG_SENSORS_GPIO_FAN=y # CONFIG_SENSORS_MAX31722 is not set # CONFIG_SENSORS_MAX31730 is not set # CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set # CONFIG_SENSORS_MAX6620 is not set # CONFIG_SENSORS_MAX6621 is not set # CONFIG_SENSORS_MAX6639 is not set @@ -3691,6 +3833,7 @@ CONFIG_SENSORS_GPIO_FAN=y # CONFIG_SENSORS_MAX6650 is not set # CONFIG_SENSORS_MAX6697 is not set # CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set # CONFIG_SENSORS_MCP3021 is not set # CONFIG_SENSORS_TC654 is not set # CONFIG_SENSORS_TPS23861 is not set @@ -3714,7 +3857,9 @@ CONFIG_SENSORS_GPIO_FAN=y # CONFIG_SENSORS_LM95245 is not set # CONFIG_SENSORS_PC87360 is not set # CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set # CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set # CONFIG_SENSORS_NCT6775_I2C is not set # CONFIG_SENSORS_NCT7802 is not set # CONFIG_SENSORS_NCT7904 is not set @@ -3724,51 +3869,7 @@ CONFIG_SENSORS_GPIO_FAN=y # CONFIG_SENSORS_NZXT_SMART2 is not set # CONFIG_SENSORS_OCC_P8_I2C is not set # CONFIG_SENSORS_PCF8591 is not set -CONFIG_PMBUS=m -CONFIG_SENSORS_PMBUS=m -# CONFIG_SENSORS_ADM1266 is not set -# CONFIG_SENSORS_ADM1275 is not set -# CONFIG_SENSORS_BEL_PFE is not set -# CONFIG_SENSORS_BPA_RS600 is not set -# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set -# CONFIG_SENSORS_FSP_3Y is not set -# CONFIG_SENSORS_IBM_CFFPS is not set -# CONFIG_SENSORS_DPS920AB is not set -# CONFIG_SENSORS_INSPUR_IPSPS is not set -# CONFIG_SENSORS_IR35221 is not set -# CONFIG_SENSORS_IR36021 is not set -# CONFIG_SENSORS_IR38064 is not set -# CONFIG_SENSORS_IRPS5401 is not set -# CONFIG_SENSORS_ISL68137 is not set -# CONFIG_SENSORS_LM25066 is not set -# CONFIG_SENSORS_LT7182S is not set -# CONFIG_SENSORS_LTC2978 is not set -# CONFIG_SENSORS_LTC3815 is not set -# CONFIG_SENSORS_MAX15301 is not set -# CONFIG_SENSORS_MAX16064 is not set -# CONFIG_SENSORS_MAX16601 is not set -# CONFIG_SENSORS_MAX20730 is not set -# CONFIG_SENSORS_MAX20751 is not set -# CONFIG_SENSORS_MAX31785 is not set -# CONFIG_SENSORS_MAX34440 is not set -# CONFIG_SENSORS_MAX8688 is not set -# CONFIG_SENSORS_MP2888 is not set -# CONFIG_SENSORS_MP2975 is not set -# CONFIG_SENSORS_MP5023 is not set -# CONFIG_SENSORS_PIM4328 is not set -# CONFIG_SENSORS_PLI1209BC is not set -# CONFIG_SENSORS_PM6764TR is not set -# CONFIG_SENSORS_PXE1610 is not set -# CONFIG_SENSORS_Q54SJ108A2 is not set -# CONFIG_SENSORS_STPDDC60 is not set -# CONFIG_SENSORS_TPS40422 is not set -# CONFIG_SENSORS_TPS53679 is not set -# CONFIG_SENSORS_TPS546D24 is not set -# CONFIG_SENSORS_UCD9000 is not set -# CONFIG_SENSORS_UCD9200 is not set -# CONFIG_SENSORS_XDPE152 is not set -# CONFIG_SENSORS_XDPE122 is not set -# CONFIG_SENSORS_ZL6100 is not set +# CONFIG_PMBUS is not set CONFIG_SENSORS_PWM_FAN=y # CONFIG_SENSORS_RASPBERRYPI_HWMON is not set # CONFIG_SENSORS_SL28CPLD is not set @@ -3791,8 +3892,8 @@ CONFIG_SENSORS_SMSC47B397=m CONFIG_SENSORS_SCH56XX_COMMON=m CONFIG_SENSORS_SCH5627=m CONFIG_SENSORS_SCH5636=m -CONFIG_SENSORS_STTS751=m -# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SFCTEMP is not set # CONFIG_SENSORS_ADC128D818 is not set # CONFIG_SENSORS_ADS7828 is not set # CONFIG_SENSORS_ADS7871 is not set @@ -3801,7 +3902,7 @@ CONFIG_SENSORS_STTS751=m # CONFIG_SENSORS_INA2XX is not set # CONFIG_SENSORS_INA238 is not set # CONFIG_SENSORS_INA3221 is not set -CONFIG_SENSORS_TC74=m +# CONFIG_SENSORS_TC74 is not set # CONFIG_SENSORS_THMC50 is not set # CONFIG_SENSORS_TMP102 is not set # CONFIG_SENSORS_TMP103 is not set @@ -3842,7 +3943,7 @@ CONFIG_CPU_FREQ_THERMAL=y # CONFIG_CPU_HOTPLUG_THERMAL is not set # CONFIG_THERMAL_EMULATION is not set # CONFIG_THERMAL_MMIO is not set -CONFIG_HISI_THERMAL=y +# CONFIG_HISI_THERMAL is not set # CONFIG_IMX_THERMAL is not set # CONFIG_IMX8MM_THERMAL is not set # CONFIG_K3_THERMAL is not set @@ -3857,7 +3958,13 @@ CONFIG_HISI_THERMAL=y # CONFIG_DOVE_THERMAL is not set # CONFIG_ARMADA_THERMAL is not set # CONFIG_DA9062_THERMAL is not set -CONFIG_MTK_THERMAL=y + +# +# Mediatek thermal drivers +# +# CONFIG_MTK_THERMAL is not set +# end of Mediatek thermal drivers + CONFIG_K1X_THERMAL=y # @@ -3900,6 +4007,8 @@ CONFIG_K1X_THERMAL=y # CONFIG_TEGRA30_TSENSOR is not set # end of NVIDIA Tegra thermal drivers +# CONFIG_GENERIC_ADC_THERMAL is not set + # # Qualcomm thermal drivers # @@ -3907,6 +4016,7 @@ CONFIG_K1X_THERMAL=y # CONFIG_UNIPHIER_THERMAL is not set # CONFIG_SPRD_THERMAL is not set +# CONFIG_LOONGSON2_THERMAL is not set CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -3931,7 +4041,9 @@ CONFIG_SOFT_WATCHDOG=m # CONFIG_GPIO_WATCHDOG is not set # CONFIG_MENF21BMC_WATCHDOG is not set # CONFIG_XILINX_WATCHDOG is not set +# CONFIG_XILINX_WINDOW_WATCHDOG is not set # CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_MLX_WDT is not set # CONFIG_SL28CPLD_WATCHDOG is not set # CONFIG_ARMADA_37XX_WATCHDOG is not set # CONFIG_ASM9260_WATCHDOG is not set @@ -3941,6 +4053,7 @@ CONFIG_SOFT_WATCHDOG=m # CONFIG_CADENCE_WATCHDOG is not set # CONFIG_FTWDT010_WATCHDOG is not set # CONFIG_S3C2410_WATCHDOG is not set +# CONFIG_SA1100_WATCHDOG is not set # CONFIG_DW_WATCHDOG is not set # CONFIG_EP93XX_WATCHDOG is not set # CONFIG_OMAP_WATCHDOG is not set @@ -3971,6 +4084,7 @@ CONFIG_SOFT_WATCHDOG=m # CONFIG_RENESAS_RZN1WDT is not set # CONFIG_RENESAS_RZG2LWDT is not set # CONFIG_ASPEED_WATCHDOG is not set +CONFIG_STM32_WATCHDOG=y # CONFIG_UNIPHIER_WATCHDOG is not set # CONFIG_RTD119X_WATCHDOG is not set # CONFIG_REALTEK_OTTO_WDT is not set @@ -3979,17 +4093,54 @@ CONFIG_SOFT_WATCHDOG=m # CONFIG_MSC313E_WATCHDOG is not set # CONFIG_APPLE_WATCHDOG is not set # CONFIG_SUNPLUS_WATCHDOG is not set +# CONFIG_ADVANTECH_WDT is not set +# CONFIG_ADVANTECH_EC_WDT is not set +# CONFIG_ALIM1535_WDT is not set # CONFIG_ALIM7101_WDT is not set +# CONFIG_EBC_C384_WDT is not set +# CONFIG_EXAR_WDT is not set +# CONFIG_F71808E_WDT is not set +# CONFIG_SP5100_TCO is not set # CONFIG_SC520_WDT is not set +# CONFIG_SBC_FITPC2_WATCHDOG is not set +# CONFIG_EUROTECH_WDT is not set +# CONFIG_IB700_WDT is not set +# CONFIG_IBMASR is not set +# CONFIG_WAFER_WDT is not set # CONFIG_I6300ESB_WDT is not set +# CONFIG_IE6XX_WDT is not set +# CONFIG_IT8712F_WDT is not set +# CONFIG_IT87_WDT is not set +# CONFIG_HP_WATCHDOG is not set +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +# CONFIG_NV_TCO is not set # CONFIG_RDC321X_WDT is not set +# CONFIG_60XX_WDT is not set +# CONFIG_CPU5_WDT is not set +# CONFIG_SMSC_SCH311X_WDT is not set +# CONFIG_SMSC37B787_WDT is not set +# CONFIG_TQMX86_WDT is not set +# CONFIG_VIA_WDT is not set +# CONFIG_W83627HF_WDT is not set +# CONFIG_W83877F_WDT is not set +# CONFIG_W83977F_WDT is not set +# CONFIG_MACHZ_WDT is not set +# CONFIG_SBC_EPX_C3_WATCHDOG is not set # CONFIG_BCM47XX_WDT is not set +# CONFIG_JZ4740_WDT is not set +CONFIG_MARVELL_GTI_WDT=y # CONFIG_BCM2835_WDT is not set # CONFIG_BCM_KONA_WDT is not set +# CONFIG_BCM_KONA_WDT_DEBUG is not set # CONFIG_BCM7038_WDT is not set # CONFIG_IMGPDC_WDT is not set +# CONFIG_LOONGSON1_WDT is not set +# CONFIG_GXP_WATCHDOG is not set +# CONFIG_MT7621_WDT is not set # CONFIG_MPC5200_WDT is not set # CONFIG_MEN_A21_WDT is not set +# CONFIG_STARFIVE_WATCHDOG is not set # CONFIG_UML_WATCHDOG is not set # @@ -4016,6 +4167,7 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_ACT8945A is not set # CONFIG_MFD_SUN4I_GPADC is not set # CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_SMPRO is not set # CONFIG_MFD_AS3722 is not set # CONFIG_PMIC_ADP5520 is not set # CONFIG_MFD_AAT2870_CORE is not set @@ -4025,8 +4177,9 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_BCM590XX is not set # CONFIG_MFD_BD9571MWV is not set # CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set -# CONFIG_MFD_ASIC3 is not set +# CONFIG_MFD_MAX5970 is not set # CONFIG_PMIC_DA903X is not set # CONFIG_MFD_DA9052_SPI is not set # CONFIG_MFD_DA9052_I2C is not set @@ -4045,10 +4198,9 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_MX25_TSADC is not set # CONFIG_MFD_HI6421_PMIC is not set # CONFIG_MFD_HI655X_PMIC is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_HTC_I2CPLD is not set # CONFIG_LPC_ICH is not set # CONFIG_LPC_SCH is not set +# CONFIG_INTEL_SOC_PMIC is not set # CONFIG_MFD_IQS62X is not set # CONFIG_MFD_JANZ_CMODIO is not set # CONFIG_MFD_KEMPLD is not set @@ -4056,6 +4208,7 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_88PM805 is not set # CONFIG_MFD_88PM860X is not set # CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set # CONFIG_MFD_MAX77620 is not set # CONFIG_MFD_MAX77650 is not set # CONFIG_MFD_MAX77686 is not set @@ -4079,12 +4232,13 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_PCF50633 is not set # CONFIG_MFD_PM8XXX is not set # CONFIG_MFD_SY7636A is not set -# CONFIG_MFD_RDC321X is not set +CONFIG_MFD_RDC321X=m # CONFIG_MFD_RT4831 is not set # CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RT5120 is not set # CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set # CONFIG_MFD_RN5T618 is not set # CONFIG_MFD_SEC_CORE is not set # CONFIG_MFD_SI476X_CORE is not set @@ -4092,6 +4246,7 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_SM501 is not set # CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_SC27XX_PMIC is not set +# CONFIG_RZ_MTU3 is not set # CONFIG_ABX500_CORE is not set # CONFIG_MFD_STMPE is not set # CONFIG_MFD_SUN6I_PRCM is not set @@ -4110,10 +4265,13 @@ CONFIG_MFD_SYSCON=y # CONFIG_MFD_TI_LP873X is not set # CONFIG_MFD_TI_LP87565 is not set # CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set # CONFIG_MFD_TPS6586X is not set # CONFIG_MFD_TPS65910 is not set # CONFIG_MFD_TPS65912_I2C is not set # CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set # CONFIG_TWL4030_CORE is not set # CONFIG_TWL6040_CORE is not set # CONFIG_MFD_WL1273_CORE is not set @@ -4144,7 +4302,7 @@ CONFIG_MFD_SYSCON=y # CONFIG_MFD_QCOM_PM8008 is not set CONFIG_MFD_SPACEMIT_PMIC=y # CONFIG_RAVE_SP_CORE is not set -# CONFIG_MFD_INTEL_M10_BMC is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set # CONFIG_MFD_RSMU_I2C is not set # CONFIG_MFD_RSMU_SPI is not set # end of Multifunction device drivers @@ -4158,12 +4316,12 @@ CONFIG_REGULATOR_USERSPACE_CONSUMER=m # CONFIG_REGULATOR_ACT8865 is not set # CONFIG_REGULATOR_AD5398 is not set # CONFIG_REGULATOR_ANATOP is not set -# CONFIG_REGULATOR_ARM_SCMI is not set +# CONFIG_REGULATOR_AW37503 is not set # CONFIG_REGULATOR_DA9121 is not set # CONFIG_REGULATOR_DA9210 is not set # CONFIG_REGULATOR_DA9211 is not set -CONFIG_REGULATOR_FAN53555=m -CONFIG_REGULATOR_FAN53880=m +# CONFIG_REGULATOR_FAN53555 is not set +# CONFIG_REGULATOR_FAN53880 is not set CONFIG_REGULATOR_GPIO=m # CONFIG_REGULATOR_ISL9305 is not set # CONFIG_REGULATOR_ISL6271A is not set @@ -4176,6 +4334,7 @@ CONFIG_REGULATOR_GPIO=m # CONFIG_REGULATOR_MAX1586 is not set # CONFIG_REGULATOR_MAX77620 is not set # CONFIG_REGULATOR_MAX77650 is not set +# CONFIG_REGULATOR_MAX77857 is not set # CONFIG_REGULATOR_MAX8649 is not set # CONFIG_REGULATOR_MAX8660 is not set # CONFIG_REGULATOR_MAX8893 is not set @@ -4183,6 +4342,7 @@ CONFIG_REGULATOR_GPIO=m # CONFIG_REGULATOR_MAX8952 is not set # CONFIG_REGULATOR_MAX8973 is not set # CONFIG_REGULATOR_MAX20086 is not set +# CONFIG_REGULATOR_MAX20411 is not set # CONFIG_REGULATOR_MAX77686 is not set # CONFIG_REGULATOR_MAX77693 is not set # CONFIG_REGULATOR_MAX77802 is not set @@ -4201,18 +4361,24 @@ CONFIG_REGULATOR_GPIO=m # CONFIG_REGULATOR_PV88080 is not set # CONFIG_REGULATOR_PV88090 is not set CONFIG_REGULATOR_PWM=m +# CONFIG_REGULATOR_QCOM_REFGEN is not set # CONFIG_REGULATOR_QCOM_RPMH is not set # CONFIG_REGULATOR_QCOM_SPMI is not set # CONFIG_REGULATOR_QCOM_USB_VBUS is not set +# CONFIG_REGULATOR_RAA215300 is not set # CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set # CONFIG_REGULATOR_RT4801 is not set +# CONFIG_REGULATOR_RT4803 is not set # CONFIG_REGULATOR_RT5190A is not set +# CONFIG_REGULATOR_RT5739 is not set # CONFIG_REGULATOR_RT5759 is not set # CONFIG_REGULATOR_RT6160 is not set +# CONFIG_REGULATOR_RT6190 is not set # CONFIG_REGULATOR_RT6245 is not set # CONFIG_REGULATOR_RTQ2134 is not set # CONFIG_REGULATOR_RTMV20 is not set # CONFIG_REGULATOR_RTQ6752 is not set +# CONFIG_REGULATOR_RTQ2208 is not set # CONFIG_REGULATOR_S2MPA01 is not set # CONFIG_REGULATOR_S2MPS11 is not set # CONFIG_REGULATOR_S5M8767 is not set @@ -4229,6 +4395,7 @@ CONFIG_REGULATOR_PWM=m # CONFIG_REGULATOR_TPS51632 is not set # CONFIG_REGULATOR_TPS62360 is not set # CONFIG_REGULATOR_TPS6286X is not set +# CONFIG_REGULATOR_TPS6287X is not set # CONFIG_REGULATOR_TPS65023 is not set # CONFIG_REGULATOR_TPS6507X is not set # CONFIG_REGULATOR_TPS65132 is not set @@ -4323,7 +4490,7 @@ CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y # CONFIG_VIDEO_GO7007 is not set # CONFIG_VIDEO_HDPVR is not set # CONFIG_VIDEO_PVRUSB2 is not set -# CONFIG_VIDEO_STK1160_COMMON is not set +# CONFIG_VIDEO_STK1160 is not set # # Analog/digital TV USB devices @@ -4402,6 +4569,10 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y # Mediatek media platform drivers # +# +# Microchip Technology, Inc. media platform drivers +# + # # NVidia media platform drivers # @@ -4409,7 +4580,10 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y # # NXP media platform drivers # +# CONFIG_VIDEO_IMX7_CSI is not set +# CONFIG_VIDEO_IMX8MQ_MIPI_CSI2 is not set # CONFIG_VIDEO_IMX_MIPI_CSIS is not set +# CONFIG_VIDEO_IMX8_ISI is not set # # Qualcomm media platform drivers @@ -4469,12 +4643,18 @@ CONFIG_SPACEMIT_K1X_ISP_V2=y CONFIG_SPACEMIT_K1X_CPP_V2=y CONFIG_SPACEMIT_K1X_SENSOR_V2=y +# +# SPACEMIT K1X Virtual Camera Driver +# +# CONFIG_SPACEMIT_K1X_VIR_CAMERA is not set + # # MMC/SDIO DVB adapters # # CONFIG_SMS_SDIO_DRV is not set # CONFIG_V4L_TEST_DRIVERS is not set # CONFIG_DVB_TEST_DRIVERS is not set +CONFIG_UVC_COMMON=y CONFIG_VIDEOBUF2_CORE=y CONFIG_VIDEOBUF2_V4L2=y CONFIG_VIDEOBUF2_MEMOPS=y @@ -4498,24 +4678,25 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_IMX258 is not set # CONFIG_VIDEO_IMX274 is not set # CONFIG_VIDEO_IMX290 is not set +# CONFIG_VIDEO_IMX296 is not set # CONFIG_VIDEO_IMX319 is not set # CONFIG_VIDEO_IMX334 is not set # CONFIG_VIDEO_IMX335 is not set # CONFIG_VIDEO_IMX355 is not set # CONFIG_VIDEO_IMX412 is not set +# CONFIG_VIDEO_IMX415 is not set # CONFIG_VIDEO_MT9M001 is not set -# CONFIG_VIDEO_MT9M032 is not set # CONFIG_VIDEO_MT9M111 is not set # CONFIG_VIDEO_MT9P031 is not set -# CONFIG_VIDEO_MT9T001 is not set # CONFIG_VIDEO_MT9T112 is not set # CONFIG_VIDEO_MT9V011 is not set # CONFIG_VIDEO_MT9V032 is not set # CONFIG_VIDEO_MT9V111 is not set -# CONFIG_VIDEO_NOON010PC30 is not set # CONFIG_VIDEO_OG01A1B is not set +# CONFIG_VIDEO_OV01A10 is not set # CONFIG_VIDEO_OV02A10 is not set # CONFIG_VIDEO_OV08D10 is not set +# CONFIG_VIDEO_OV08X40 is not set # CONFIG_VIDEO_OV13858 is not set # CONFIG_VIDEO_OV13B10 is not set # CONFIG_VIDEO_OV2640 is not set @@ -4523,6 +4704,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_OV2680 is not set # CONFIG_VIDEO_OV2685 is not set # CONFIG_VIDEO_OV2740 is not set +# CONFIG_VIDEO_OV4689 is not set # CONFIG_VIDEO_OV5640 is not set # CONFIG_VIDEO_OV5645 is not set # CONFIG_VIDEO_OV5647 is not set @@ -4538,6 +4720,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_OV772X is not set # CONFIG_VIDEO_OV7740 is not set # CONFIG_VIDEO_OV8856 is not set +# CONFIG_VIDEO_OV8858 is not set # CONFIG_VIDEO_OV8865 is not set # CONFIG_VIDEO_OV9282 is not set # CONFIG_VIDEO_OV9640 is not set @@ -4547,15 +4730,11 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_RDACM21 is not set # CONFIG_VIDEO_RJ54N1 is not set # CONFIG_VIDEO_S5C73M3 is not set -# CONFIG_VIDEO_S5K4ECGX is not set # CONFIG_VIDEO_S5K5BAF is not set # CONFIG_VIDEO_S5K6A3 is not set -# CONFIG_VIDEO_S5K6AA is not set -# CONFIG_VIDEO_SR030PC30 is not set -# CONFIG_VIDEO_VS6624 is not set +# CONFIG_VIDEO_ST_VGXY61 is not set # CONFIG_VIDEO_CCS is not set # CONFIG_VIDEO_ET8EK8 is not set -# CONFIG_VIDEO_M5MOLS is not set # # Lens drivers @@ -4563,6 +4742,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_AD5820 is not set # CONFIG_VIDEO_AK7375 is not set # CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_DW9719 is not set # CONFIG_VIDEO_DW9768 is not set # CONFIG_VIDEO_DW9807_VCM is not set # end of Lens drivers @@ -4620,6 +4800,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_SAA7110 is not set # CONFIG_VIDEO_SAA711X is not set # CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TC358746 is not set # CONFIG_VIDEO_TVP514X is not set # CONFIG_VIDEO_TVP5150 is not set # CONFIG_VIDEO_TVP7002 is not set @@ -4639,7 +4820,6 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # # Video encoders # -# CONFIG_VIDEO_AD9389B is not set # CONFIG_VIDEO_ADV7170 is not set # CONFIG_VIDEO_ADV7175 is not set # CONFIG_VIDEO_ADV7343 is not set @@ -4679,6 +4859,14 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_THS7303 is not set # end of Miscellaneous helper chips +# +# Video serializers and deserializers +# +# CONFIG_VIDEO_DS90UB913 is not set +# CONFIG_VIDEO_DS90UB953 is not set +# CONFIG_VIDEO_DS90UB960 is not set +# end of Video serializers and deserializers + # # Media SPI Adapters # @@ -4901,6 +5089,10 @@ CONFIG_DVB_LGDT3306A=m # # Graphics support # +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_TEGRA_HOST1X is not set # CONFIG_IMX_IPUV3_CORE is not set CONFIG_DRM=y CONFIG_DRM_MIPI_DSI=y @@ -4944,8 +5136,12 @@ CONFIG_DRM_DP_AUX_BUS=y # CONFIG_DRM_RCAR_DW_HDMI is not set # CONFIG_DRM_RCAR_USE_LVDS is not set # CONFIG_DRM_RCAR_USE_MIPI_DSI is not set +# CONFIG_DRM_RZG2L_MIPI_DSI is not set +# CONFIG_DRM_SHMOBILE is not set # CONFIG_DRM_SUN4I is not set # CONFIG_DRM_QXL is not set +# CONFIG_DRM_MSM is not set +# CONFIG_DRM_TEGRA is not set CONFIG_DRM_PANEL=y # @@ -4954,6 +5150,7 @@ CONFIG_DRM_PANEL=y # CONFIG_DRM_PANEL_ABT_Y030XX067A is not set # CONFIG_DRM_PANEL_ARM_VERSATILE is not set # CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596 is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set # CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0 is not set # CONFIG_DRM_PANEL_BOE_HIMAX8279D is not set # CONFIG_DRM_PANEL_BOE_TV101WUM_NL6 is not set @@ -4965,11 +5162,13 @@ CONFIG_DRM_PANEL=y # CONFIG_DRM_PANEL_ELIDA_KD35T133 is not set # CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02 is not set # CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D is not set +# CONFIG_DRM_PANEL_HIMAX_HX8394 is not set # CONFIG_DRM_PANEL_ILITEK_IL9322 is not set # CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set # CONFIG_DRM_PANEL_ILITEK_ILI9881C is not set # CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set # CONFIG_DRM_PANEL_INNOLUX_P079ZCA is not set +# CONFIG_DRM_PANEL_JADARD_JD9365DA_H3 is not set # CONFIG_DRM_PANEL_JDI_LT070ME05000 is not set # CONFIG_DRM_PANEL_JDI_R63452 is not set # CONFIG_DRM_PANEL_KHADAS_TS050 is not set @@ -4979,15 +5178,19 @@ CONFIG_DRM_PANEL=y # CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set # CONFIG_DRM_PANEL_LG_LB035Q02 is not set # CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966 is not set # CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3051D is not set # CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set # CONFIG_DRM_PANEL_NOVATEK_NT35510 is not set # CONFIG_DRM_PANEL_NOVATEK_NT35560 is not set # CONFIG_DRM_PANEL_NOVATEK_NT35950 is not set +# CONFIG_DRM_PANEL_NOVATEK_NT36523 is not set # CONFIG_DRM_PANEL_NOVATEK_NT36672A is not set # CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set # CONFIG_DRM_PANEL_MANTIX_MLAF057WE51 is not set # CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set # CONFIG_DRM_PANEL_ORISETECH_OTM8009A is not set # CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS is not set # CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00 is not set @@ -4999,6 +5202,7 @@ CONFIG_DRM_PANEL=y # CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set # CONFIG_DRM_PANEL_SAMSUNG_S6D16D0 is not set # CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set # CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2 is not set # CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03 is not set # CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set @@ -5014,13 +5218,17 @@ CONFIG_DRM_PANEL=y # CONFIG_DRM_PANEL_SITRONIX_ST7703 is not set # CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set # CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_SONY_TD4353_JDI is not set # CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521 is not set +# CONFIG_DRM_PANEL_STARTEK_KD070FHFID015 is not set # CONFIG_DRM_PANEL_TDO_TL070WSH30 is not set # CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set # CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set # CONFIG_DRM_PANEL_TPO_TPG110 is not set # CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA is not set # CONFIG_DRM_PANEL_VISIONOX_RM69299 is not set +# CONFIG_DRM_PANEL_VISIONOX_VTDR6130 is not set +# CONFIG_DRM_PANEL_VISIONOX_R66451 is not set # CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set # CONFIG_DRM_PANEL_XINPENG_XPP055C272 is not set # end of Display Panels @@ -5031,7 +5239,6 @@ CONFIG_DRM_PANEL_BRIDGE=y # # Display Interface Bridges # -# CONFIG_DRM_CDNS_DSI is not set # CONFIG_DRM_CHIPONE_ICN6211 is not set # CONFIG_DRM_CHRONTEL_CH7033 is not set # CONFIG_DRM_CROS_EC_ANX7688 is not set @@ -5049,6 +5256,7 @@ CONFIG_DRM_PANEL_BRIDGE=y # CONFIG_DRM_NXP_PTN3460 is not set # CONFIG_DRM_PARADE_PS8622 is not set # CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set # CONFIG_DRM_SIL_SII8620 is not set # CONFIG_DRM_SII902X is not set # CONFIG_DRM_SII9234 is not set @@ -5068,6 +5276,7 @@ CONFIG_DRM_PANEL_BRIDGE=y # CONFIG_DRM_ANALOGIX_ANX78XX is not set # CONFIG_DRM_ANALOGIX_ANX7625 is not set # CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set # CONFIG_DRM_CDNS_MHDP8546 is not set # CONFIG_DRM_IMX8QM_LDB is not set # CONFIG_DRM_IMX8QXP_LDB is not set @@ -5075,9 +5284,11 @@ CONFIG_DRM_PANEL_BRIDGE=y # CONFIG_DRM_IMX8QXP_PIXEL_LINK_TO_DPI is not set # end of Display Interface Bridges +# CONFIG_DRM_IMX_LCDC is not set # CONFIG_DRM_INGENIC is not set # CONFIG_DRM_V3D is not set # CONFIG_DRM_VC4 is not set +# CONFIG_DRM_LOONGSON is not set # CONFIG_DRM_ETNAVIV is not set # CONFIG_DRM_HISI_HIBMC is not set # CONFIG_DRM_LOGICVC is not set @@ -5087,6 +5298,7 @@ CONFIG_DRM_PANEL_BRIDGE=y # CONFIG_DRM_BOCHS is not set # CONFIG_DRM_CIRRUS_QEMU is not set # CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_OFDRM is not set # CONFIG_DRM_PANEL_MIPI_DBI is not set # CONFIG_DRM_SIMPLEDRM is not set # CONFIG_TINYDRM_HX8357D is not set @@ -5110,35 +5322,18 @@ CONFIG_DRM_PANEL_BRIDGE=y # CONFIG_DRM_SPRD is not set CONFIG_DRM_SPACEMIT=y CONFIG_SPACEMIT_MIPI_PANEL=y +CONFIG_SPACEMIT_HDMI=y CONFIG_DRM_LT8911EXB=y CONFIG_DRM_LT9711=y +# CONFIG_POWERVR_ROGUE_NULLDRMDISP is not set CONFIG_POWERVR_ROGUE=y # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y -CONFIG_DRM_NOMODESET=y # # Frame buffer Devices # -CONFIG_FB_CMDLINE=y -CONFIG_FB_NOTIFY=y CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_SYS_FILLRECT=y -CONFIG_FB_SYS_COPYAREA=y -CONFIG_FB_SYS_IMAGEBLIT=y -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=y -CONFIG_FB_DEFERRED_IO=y -# CONFIG_FB_MODE_HELPERS is not set -# CONFIG_FB_TILEBLITTING is not set - -# -# Frame buffer hardware drivers -# # CONFIG_FB_CIRRUS is not set # CONFIG_FB_PM2 is not set # CONFIG_FB_CLPS711X is not set @@ -5176,9 +5371,7 @@ CONFIG_FB_DEFERRED_IO=y # CONFIG_FB_CARMINE is not set # CONFIG_FB_WM8505 is not set # CONFIG_FB_PXA168 is not set -# CONFIG_FB_W100 is not set # CONFIG_FB_SH_MOBILE_LCDC is not set -# CONFIG_FB_TMIO is not set # CONFIG_FB_S3C is not set # CONFIG_FB_SMSCUFX is not set # CONFIG_FB_UDL is not set @@ -5194,6 +5387,21 @@ CONFIG_FB_DEFERRED_IO=y # CONFIG_FB_SM712 is not set # CONFIG_FB_OMAP2 is not set # CONFIG_MMP_DISP is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_FOPS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set # end of Frame buffer Devices # @@ -5202,6 +5410,7 @@ CONFIG_FB_DEFERRED_IO=y # CONFIG_LCD_CLASS_DEVICE is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set # CONFIG_BACKLIGHT_OMAP1 is not set CONFIG_BACKLIGHT_PWM=y # CONFIG_BACKLIGHT_QCOM_WLED is not set @@ -5237,6 +5446,7 @@ CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y # CONFIG_LOGO is not set # end of Graphics support +# CONFIG_DRM_ACCEL is not set CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_TIMER=y @@ -5260,6 +5470,7 @@ CONFIG_SND_CTL_FAST_LOOKUP=y CONFIG_SND_DRIVERS=y # CONFIG_SND_DUMMY is not set # CONFIG_SND_ALOOP is not set +# CONFIG_SND_PCMTEST is not set # CONFIG_SND_MTPAV is not set # CONFIG_SND_SERIAL_U16550 is not set # CONFIG_SND_SERIAL_GENERIC is not set @@ -5374,17 +5585,25 @@ CONFIG_SND_SOC_COMPRESS=y # CONFIG_SND_IMX_SOC is not set # end of SoC Audio for Freescale CPUs +# CONFIG_SND_SOC_CHV3_I2S is not set # CONFIG_SND_I2S_HI6210_I2S is not set # CONFIG_SND_JZ4740_SOC_I2S is not set # CONFIG_SND_KIRKWOOD_SOC is not set + +# +# SoC Audio for Loongson CPUs +# +# CONFIG_SND_SOC_LOONGSON_I2S_PCI is not set +# CONFIG_SND_SOC_LOONGSON_CARD is not set +# end of SoC Audio for Loongson CPUs + # CONFIG_SND_SOC_IMG is not set -CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y +# CONFIG_SND_SOC_INTEL_SST_TOPLEVEL is not set # CONFIG_SND_SOC_INTEL_KEEMBAY is not set # CONFIG_SND_SOC_INTEL_AVS is not set -CONFIG_SND_SOC_INTEL_MACH=y -# CONFIG_SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES is not set # CONFIG_SND_SOC_MT8186 is not set # CONFIG_SND_SOC_MTK_BTCVSD is not set +# CONFIG_SND_SOC_MT8188 is not set # CONFIG_SND_SOC_MT8195 is not set # @@ -5421,6 +5640,7 @@ CONFIG_SND_SOC_INTEL_MACH=y # CONFIG_SND_SOC_SOF_TOPLEVEL is not set # CONFIG_SND_SOC_SPRD is not set +# CONFIG_SND_SOC_STARFIVE is not set # CONFIG_SND_SOC_STI is not set # @@ -5454,7 +5674,6 @@ CONFIG_SND_SOC_INTEL_MACH=y # # CONFIG_SND_SOC_DAVINCI_ASP is not set # CONFIG_SND_SOC_DAVINCI_MCASP is not set -# CONFIG_SND_SOC_DAVINCI_VCIF is not set # CONFIG_SND_SOC_OMAP_DMIC is not set # CONFIG_SND_SOC_OMAP_MCBSP is not set # CONFIG_SND_SOC_OMAP_MCPDM is not set @@ -5472,6 +5691,12 @@ CONFIG_SND_SOC_INTEL_MACH=y # CONFIG_SND_SOC_XILINX_SPDIF is not set # CONFIG_SND_SOC_XTFPGA_I2S is not set CONFIG_SND_SOC_SPACEMIT=y +# CONFIG_SPACEMIT_CARD is not set +# CONFIG_SPACEMIT_PCM is not set +# CONFIG_SPACEMIT_I2S is not set +# CONFIG_SPACEMIT_HDMIAUDIO is not set +# CONFIG_SPACEMIT_DUMMYCODEC is not set +# CONFIG_SPACEMIT_AUDIO_DATA_DEBUG is not set CONFIG_SND_SOC_I2C_AND_SPI=y # @@ -5497,9 +5722,13 @@ CONFIG_SND_SOC_I2C_AND_SPI=y # CONFIG_SND_SOC_AK5386 is not set # CONFIG_SND_SOC_AK5558 is not set # CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_AUDIO_IIO_AUX is not set # CONFIG_SND_SOC_AW8738 is not set +# CONFIG_SND_SOC_AW88395 is not set +# CONFIG_SND_SOC_AW88261 is not set # CONFIG_SND_SOC_BD28623 is not set # CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CHV3_CODEC is not set # CONFIG_SND_SOC_CPCAP is not set # CONFIG_SND_SOC_CS35L32 is not set # CONFIG_SND_SOC_CS35L33 is not set @@ -5510,6 +5739,8 @@ CONFIG_SND_SOC_I2C_AND_SPI=y # CONFIG_SND_SOC_CS35L41_I2C is not set # CONFIG_SND_SOC_CS35L45_SPI is not set # CONFIG_SND_SOC_CS35L45_I2C is not set +# CONFIG_SND_SOC_CS35L56_I2C is not set +# CONFIG_SND_SOC_CS35L56_SPI is not set # CONFIG_SND_SOC_CS42L42 is not set # CONFIG_SND_SOC_CS42L51_I2C is not set # CONFIG_SND_SOC_CS42L52 is not set @@ -5534,25 +5765,29 @@ CONFIG_SND_SOC_I2C_AND_SPI=y # CONFIG_SND_SOC_DA7213 is not set # CONFIG_SND_SOC_DMIC is not set # CONFIG_SND_SOC_ES7134 is not set -# CONFIG_SND_SOC_ES7210 is not set +CONFIG_SND_SOC_ES7210=y # CONFIG_SND_SOC_ES7241 is not set -# CONFIG_SND_SOC_ES8156 is not set -CONFIG_SND_SOC_ES8316=y +CONFIG_SND_SOC_ES8156=y +# CONFIG_SND_SOC_ES8316 is not set +# CONFIG_SND_SOC_ES8323 is not set CONFIG_SND_SOC_ES8326=y # CONFIG_SND_SOC_ES8328_I2C is not set # CONFIG_SND_SOC_ES8328_SPI is not set # CONFIG_SND_SOC_GTM601 is not set # CONFIG_SND_SOC_HDA is not set # CONFIG_SND_SOC_ICS43432 is not set +# CONFIG_SND_SOC_IDT821034 is not set # CONFIG_SND_SOC_INNO_RK3036 is not set # CONFIG_SND_SOC_LOCHNAGAR_SC is not set # CONFIG_SND_SOC_MAX98088 is not set +# CONFIG_SND_SOC_MAX98090 is not set # CONFIG_SND_SOC_MAX98357A is not set # CONFIG_SND_SOC_MAX98504 is not set # CONFIG_SND_SOC_MAX9867 is not set # CONFIG_SND_SOC_MAX98927 is not set # CONFIG_SND_SOC_MAX98520 is not set # CONFIG_SND_SOC_MAX98373_I2C is not set +# CONFIG_SND_SOC_MAX98388 is not set # CONFIG_SND_SOC_MAX98390 is not set # CONFIG_SND_SOC_MAX98396 is not set # CONFIG_SND_SOC_MAX9860 is not set @@ -5571,6 +5806,7 @@ CONFIG_SND_SOC_ES8326=y # CONFIG_SND_SOC_PCM5102A is not set # CONFIG_SND_SOC_PCM512x_I2C is not set # CONFIG_SND_SOC_PCM512x_SPI is not set +# CONFIG_SND_SOC_PEB2466 is not set # CONFIG_SND_SOC_RK3328 is not set # CONFIG_SND_SOC_RK817 is not set # CONFIG_SND_SOC_RT5616 is not set @@ -5581,12 +5817,14 @@ CONFIG_SND_SOC_ES8326=y # CONFIG_SND_SOC_SGTL5000 is not set # CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set # CONFIG_SND_SOC_SIMPLE_MUX is not set +# CONFIG_SND_SOC_SMA1303 is not set # CONFIG_SND_SOC_SPDIF is not set # CONFIG_SND_SOC_SRC4XXX_I2C is not set # CONFIG_SND_SOC_SSM2305 is not set # CONFIG_SND_SOC_SSM2518 is not set # CONFIG_SND_SOC_SSM2602_SPI is not set # CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM3515 is not set # CONFIG_SND_SOC_SSM4567 is not set # CONFIG_SND_SOC_STA32X is not set # CONFIG_SND_SOC_STA350 is not set @@ -5596,6 +5834,7 @@ CONFIG_SND_SOC_ES8326=y # CONFIG_SND_SOC_TAS2764 is not set # CONFIG_SND_SOC_TAS2770 is not set # CONFIG_SND_SOC_TAS2780 is not set +# CONFIG_SND_SOC_TAS2781_I2C is not set # CONFIG_SND_SOC_TAS5086 is not set # CONFIG_SND_SOC_TAS571X is not set # CONFIG_SND_SOC_TAS5720 is not set @@ -5638,6 +5877,7 @@ CONFIG_SND_SOC_ES8326=y # CONFIG_SND_SOC_WM8904 is not set # CONFIG_SND_SOC_WM8940 is not set # CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8961 is not set # CONFIG_SND_SOC_WM8962 is not set # CONFIG_SND_SOC_WM8974 is not set # CONFIG_SND_SOC_WM8978 is not set @@ -5666,10 +5906,7 @@ CONFIG_SND_SIMPLE_CARD=y # CONFIG_SND_AUDIO_GRAPH_CARD2 is not set # CONFIG_SND_TEST_COMPONENT is not set # CONFIG_SND_VIRTIO is not set - -# -# HID support -# +CONFIG_HID_SUPPORT=y CONFIG_HID=y # CONFIG_HID_BATTERY_STRENGTH is not set # CONFIG_HIDRAW is not set @@ -5703,11 +5940,13 @@ CONFIG_HID_GENERIC=y # CONFIG_HID_ELAN is not set # CONFIG_HID_ELECOM is not set # CONFIG_HID_ELO is not set +# CONFIG_HID_EVISION is not set # CONFIG_HID_EZKEY is not set # CONFIG_HID_GEMBIRD is not set # CONFIG_HID_GFRM is not set # CONFIG_HID_GLORIOUS is not set # CONFIG_HID_HOLTEK is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set # CONFIG_HID_VIVALDI is not set # CONFIG_HID_GT683R is not set # CONFIG_HID_KEYTOUCH is not set @@ -5739,6 +5978,7 @@ CONFIG_HID_MULTITOUCH=y # CONFIG_HID_NINTENDO is not set # CONFIG_HID_NTI is not set # CONFIG_HID_NTRIG is not set +# CONFIG_HID_NVIDIA_SHIELD is not set # CONFIG_HID_ORTEK is not set # CONFIG_HID_PANTHERLORD is not set # CONFIG_HID_PENMOUNT is not set @@ -5768,7 +6008,6 @@ CONFIG_HID_MULTITOUCH=y # CONFIG_HID_THINGM is not set # CONFIG_HID_THRUSTMASTER is not set # CONFIG_HID_UDRAW_PS3 is not set -# CONFIG_HID_U2FZERO is not set # CONFIG_HID_WACOM is not set # CONFIG_HID_WIIMOTE is not set # CONFIG_HID_XINMO is not set @@ -5776,9 +6015,15 @@ CONFIG_HID_MULTITOUCH=y # CONFIG_HID_ZYDACRON is not set # CONFIG_HID_SENSOR_HUB is not set # CONFIG_HID_ALPS is not set +# CONFIG_HID_MCP2200 is not set # CONFIG_HID_MCP2221 is not set # end of Special HID drivers +# +# HID-BPF support +# +# end of HID-BPF support + # # USB HID support # @@ -5787,14 +6032,10 @@ CONFIG_USB_HID=y CONFIG_USB_HIDDEV=y # end of USB HID support -# -# I2C HID support -# +CONFIG_I2C_HID=y CONFIG_I2C_HID_OF=y # CONFIG_I2C_HID_OF_ELAN is not set # CONFIG_I2C_HID_OF_GOODIX is not set -# end of I2C HID support - CONFIG_I2C_HID_CORE=y # @@ -5807,14 +6048,13 @@ CONFIG_I2C_HID_CORE=y # # CONFIG_AMD_SFH_HID is not set # end of AMD SFH HID Support -# end of HID support CONFIG_USB_OHCI_LITTLE_ENDIAN=y CONFIG_USB_SUPPORT=y CONFIG_USB_COMMON=y # CONFIG_USB_LED_TRIG is not set # CONFIG_USB_ULPI_BUS is not set -# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_CONN_GPIO=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB=y CONFIG_USB_PCI=y @@ -5826,9 +6066,10 @@ CONFIG_USB_PCI=y CONFIG_USB_DEFAULT_PERSIST=y # CONFIG_USB_FEW_INIT_RETRIES is not set # CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set +CONFIG_USB_OTG=y # CONFIG_USB_OTG_PRODUCTLIST is not set # CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set +# CONFIG_USB_OTG_FSM is not set # CONFIG_USB_LEDS_TRIGGER_USBPORT is not set CONFIG_USB_AUTOSUSPEND_DELAY=2 # CONFIG_USB_MON is not set @@ -5861,12 +6102,10 @@ CONFIG_USB_EHCI_PCI=y # CONFIG_USB_EHCI_EXYNOS is not set # CONFIG_USB_EHCI_MV is not set CONFIG_USB_EHCI_K1X=y -# CONFIG_USB_CNS3XXX_EHCI is not set # CONFIG_USB_EHCI_HCD_PLATFORM is not set # CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set # CONFIG_USB_ISP1362_HCD is not set -# CONFIG_USB_FOTG210_HCD is not set # CONFIG_USB_MAX3421_HCD is not set # CONFIG_USB_OHCI_HCD is not set # CONFIG_USB_UHCI_HCD is not set @@ -5913,7 +6152,12 @@ CONFIG_USB_UAS=y # CONFIG_USB_MDC800 is not set # CONFIG_USB_MICROTEK is not set # CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# # CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_FOTG210 is not set # CONFIG_USB_MTU3 is not set # CONFIG_USB_MUSB_HDRC is not set CONFIG_USB_DWC3=y @@ -5924,16 +6168,18 @@ CONFIG_USB_DWC3_DUAL_ROLE=y # # Platform Glue Driver Support # -CONFIG_USB_DWC3_OMAP=y -CONFIG_USB_DWC3_EXYNOS=y +# CONFIG_USB_DWC3_OMAP is not set +# CONFIG_USB_DWC3_EXYNOS is not set CONFIG_USB_DWC3_HAPS=y -CONFIG_USB_DWC3_KEYSTONE=y -CONFIG_USB_DWC3_MESON_G12A=y +# CONFIG_USB_DWC3_KEYSTONE is not set +# CONFIG_USB_DWC3_MESON_G12A is not set # CONFIG_USB_DWC3_OF_SIMPLE is not set -CONFIG_USB_DWC3_ST=y -CONFIG_USB_DWC3_QCOM=y -CONFIG_USB_DWC3_IMX8MP=y -CONFIG_USB_DWC3_AM62=y +# CONFIG_USB_DWC3_ST is not set +# CONFIG_USB_DWC3_QCOM is not set +# CONFIG_USB_DWC3_IMX8MP is not set +CONFIG_USB_DWC3_XILINX=y +# CONFIG_USB_DWC3_AM62 is not set +CONFIG_USB_DWC3_OCTEON=y CONFIG_USB_DWC3_SPACEMIT=y # CONFIG_USB_DWC2 is not set # CONFIG_USB_CHIPIDEA is not set @@ -6010,7 +6256,6 @@ CONFIG_USB_SERIAL_DEBUG=m # CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set # CONFIG_USB_IDMOUSE is not set -# CONFIG_USB_FTDI_ELAN is not set # CONFIG_USB_APPLEDISPLAY is not set # CONFIG_USB_QCOM_EUD is not set # CONFIG_APPLE_MFI_FASTCHARGE is not set @@ -6027,22 +6272,21 @@ CONFIG_USB_EZUSB_FX2=m # CONFIG_USB_HSIC_USB3503 is not set # CONFIG_USB_HSIC_USB4604 is not set # CONFIG_USB_LINK_LAYER_TEST is not set -# CONFIG_USB_CHAOSKEY is not set # CONFIG_BRCM_USB_PINMAP is not set # CONFIG_USB_ONBOARD_HUB is not set CONFIG_SPACEMIT_ONBOARD_USB_HUB=y +# CONFIG_USB_ATM is not set # # USB Physical Layer drivers # CONFIG_USB_PHY=y # CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_GPIO_VBUS is not set # CONFIG_USB_ISP1301 is not set +CONFIG_USB_K1XCI_OTG=y CONFIG_K1XCI_USB2_PHY=y # CONFIG_USB_TEGRA_PHY is not set # CONFIG_USB_ULPI is not set -# CONFIG_JZ4770_PHY is not set # end of USB Physical Layer drivers CONFIG_USB_GADGET=y @@ -6056,10 +6300,11 @@ CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 # USB Peripheral Controller # # CONFIG_USB_LPC32XX is not set -# CONFIG_USB_FOTG210_UDC is not set # CONFIG_USB_GR_UDC is not set # CONFIG_USB_R8A66597 is not set +# CONFIG_USB_RZV2M_USB3DRD is not set # CONFIG_USB_RENESAS_USB3 is not set +# CONFIG_USB_RENESAS_USBF is not set # CONFIG_USB_PXA27X is not set # CONFIG_USB_MV_UDC is not set CONFIG_USB_K1X_UDC=y @@ -6101,6 +6346,7 @@ CONFIG_USB_CONFIGFS_F_FS=y # CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set # CONFIG_USB_CONFIGFS_F_UAC2 is not set # CONFIG_USB_CONFIGFS_F_MIDI is not set +# CONFIG_USB_CONFIGFS_F_MIDI2 is not set # CONFIG_USB_CONFIGFS_F_HID is not set CONFIG_USB_CONFIGFS_F_UVC=y # CONFIG_USB_CONFIGFS_F_PRINTER is not set @@ -6141,6 +6387,7 @@ CONFIG_MMC_BLOCK_MINORS=8 # MMC/SD/SDIO Host Controller Drivers # # CONFIG_MMC_DEBUG is not set +# CONFIG_MMC_SUNPLUS is not set CONFIG_MMC_SDHCI=y # CONFIG_MMC_SDHCI_PCI is not set CONFIG_MMC_SDHCI_PLTFM=y @@ -6153,7 +6400,6 @@ CONFIG_MMC_SDHCI_PLTFM=y # CONFIG_MMC_SDHCI_OF_K1PRO is not set CONFIG_MMC_SDHCI_OF_K1X=y # CONFIG_MMC_SDHCI_CADENCE is not set -# CONFIG_MMC_SDHCI_CNS3XXX is not set # CONFIG_MMC_SDHCI_ESDHC_IMX is not set # CONFIG_MMC_SDHCI_DOVE is not set # CONFIG_MMC_SDHCI_TEGRA is not set @@ -6175,9 +6421,7 @@ CONFIG_MMC_SDHCI_OF_K1X=y # CONFIG_MMC_TIFM_SD is not set # CONFIG_MMC_DAVINCI is not set # CONFIG_MMC_SPI is not set -# CONFIG_MMC_S3C is not set # CONFIG_MMC_SDHCI_SPRD is not set -# CONFIG_MMC_TMIO is not set # CONFIG_MMC_SDHI is not set # CONFIG_MMC_UNIPHIER is not set # CONFIG_MMC_CB710 is not set @@ -6187,13 +6431,12 @@ CONFIG_MMC_SDHCI_OF_K1X=y # CONFIG_MMC_VUB300 is not set # CONFIG_MMC_USHC is not set # CONFIG_MMC_USDHI6ROL0 is not set -# CONFIG_MMC_REALTEK_PCI is not set -# CONFIG_MMC_REALTEK_USB is not set # CONFIG_MMC_CQHCI is not set # CONFIG_MMC_HSQ is not set # CONFIG_MMC_TOSHIBA_PCI is not set # CONFIG_MMC_BCM2835 is not set # CONFIG_MMC_MTK is not set +# CONFIG_MMC_SDHCI_BRCMSTB is not set # CONFIG_MMC_SDHCI_XENON is not set # CONFIG_MMC_SDHCI_OMAP is not set # CONFIG_MMC_SDHCI_AM654 is not set @@ -6212,6 +6455,7 @@ CONFIG_LEDS_CLASS=y # # CONFIG_LEDS_AN30259A is not set # CONFIG_LEDS_ARIEL is not set +# CONFIG_LEDS_AW200XX is not set # CONFIG_LEDS_AW2013 is not set # CONFIG_LEDS_BCM6328 is not set # CONFIG_LEDS_BCM6358 is not set @@ -6221,7 +6465,6 @@ CONFIG_LEDS_CLASS=y # CONFIG_LEDS_LM3532 is not set # CONFIG_LEDS_LM3642 is not set # CONFIG_LEDS_LM3692X is not set -# CONFIG_LEDS_S3C24XX is not set # CONFIG_LEDS_COBALT_QUBE is not set # CONFIG_LEDS_COBALT_RAQ is not set # CONFIG_LEDS_PCA9532 is not set @@ -6233,13 +6476,15 @@ CONFIG_LEDS_GPIO=y # CONFIG_LEDS_LP8860 is not set # CONFIG_LEDS_PCA955X is not set # CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set # CONFIG_LEDS_DAC124S085 is not set # CONFIG_LEDS_PWM is not set # CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2606MVV is not set # CONFIG_LEDS_BD2802 is not set # CONFIG_LEDS_LT3593 is not set -CONFIG_LEDS_NS2=y -CONFIG_LEDS_NETXBIG=y +# CONFIG_LEDS_NS2 is not set +# CONFIG_LEDS_NETXBIG is not set # CONFIG_LEDS_TCA6507 is not set # CONFIG_LEDS_TLC591XX is not set # CONFIG_LEDS_LM355x is not set @@ -6256,6 +6501,7 @@ CONFIG_LEDS_NETXBIG=y # CONFIG_LEDS_USER is not set # CONFIG_LEDS_SPI_BYTE is not set # CONFIG_LEDS_TI_LMU_COMMON is not set +# CONFIG_LEDS_LM3697 is not set # CONFIG_LEDS_IP30 is not set # CONFIG_LEDS_BCM63138 is not set # CONFIG_LEDS_LGM is not set @@ -6280,7 +6526,6 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_LEDS_TRIGGER_BACKLIGHT=y # CONFIG_LEDS_TRIGGER_CPU is not set # CONFIG_LEDS_TRIGGER_ACTIVITY is not set -CONFIG_LEDS_TRIGGER_GPIO=y # CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set # @@ -6403,7 +6648,6 @@ CONFIG_RTC_I2C_AND_SPI=y # CONFIG_RTC_DRV_MSM6242 is not set # CONFIG_RTC_DRV_BQ4802 is not set # CONFIG_RTC_DRV_RP5C01 is not set -# CONFIG_RTC_DRV_V3020 is not set # CONFIG_RTC_DRV_GAMECUBE is not set # CONFIG_RTC_DRV_SC27XX is not set CONFIG_RTC_DRV_SPEAR=y @@ -6413,7 +6657,6 @@ CONFIG_RTC_DRV_SPEAR=y # on-CPU RTC drivers # # CONFIG_RTC_DRV_ASM9260 is not set -# CONFIG_RTC_DRV_DAVINCI is not set # CONFIG_RTC_DRV_DIGICOLOR is not set # CONFIG_RTC_DRV_FSL_FTM_ALARM is not set # CONFIG_RTC_DRV_MESON is not set @@ -6435,6 +6678,7 @@ CONFIG_RTC_DRV_SA1100=y # CONFIG_RTC_DRV_FTRTC010 is not set # CONFIG_RTC_DRV_STMP is not set # CONFIG_RTC_DRV_JZ4740 is not set +# CONFIG_RTC_DRV_LOONGSON is not set # CONFIG_RTC_DRV_LPC24XX is not set # CONFIG_RTC_DRV_LPC32XX is not set # CONFIG_RTC_DRV_PM8XXX is not set @@ -6442,6 +6686,7 @@ CONFIG_RTC_DRV_SA1100=y # CONFIG_RTC_DRV_MXC is not set # CONFIG_RTC_DRV_MXC_V2 is not set # CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_BBNSM is not set # CONFIG_RTC_DRV_MOXART is not set # CONFIG_RTC_DRV_MT2712 is not set # CONFIG_RTC_DRV_MT6397 is not set @@ -6479,7 +6724,6 @@ CONFIG_DMA_OF=y # CONFIG_HISI_DMA is not set # CONFIG_IMG_MDC_DMA is not set # CONFIG_INTEL_IDMA64 is not set -# CONFIG_INTEL_IOP_ADMA is not set # CONFIG_K3_DMA is not set # CONFIG_MCF_EDMA is not set # CONFIG_MILBEAUT_HDMAC is not set @@ -6499,13 +6743,15 @@ CONFIG_ADMA_SPACEMIT_K1X=y # CONFIG_STM32_DMAMUX is not set # CONFIG_STM32_MDMA is not set # CONFIG_SPRD_DMA is not set -# CONFIG_S3C24XX_DMAC is not set +# CONFIG_TEGRA186_GPC_DMA is not set # CONFIG_TEGRA20_APB_DMA is not set # CONFIG_TEGRA210_ADMA is not set # CONFIG_TIMB_DMA is not set # CONFIG_UNIPHIER_MDMAC is not set # CONFIG_UNIPHIER_XDMAC is not set # CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set # CONFIG_XILINX_ZYNQMP_DMA is not set # CONFIG_XILINX_ZYNQMP_DPDMA is not set CONFIG_USERSPACE_DMA=y @@ -6516,7 +6762,6 @@ CONFIG_USERSPACE_DMA=y # CONFIG_DW_DMAC is not set # CONFIG_DW_DMAC_PCI is not set # CONFIG_DW_EDMA is not set -# CONFIG_DW_EDMA_PCIE is not set # CONFIG_SF_PDMA is not set # CONFIG_SH_DMAE_BASE is not set # CONFIG_RCAR_DMAC is not set @@ -6547,11 +6792,10 @@ CONFIG_DMABUF_HEAPS_SYSTEM=y CONFIG_DMABUF_HEAPS_CMA=y # end of DMABUF options -# CONFIG_AUXDISPLAY is not set -CONFIG_UIO=y +CONFIG_UIO=m # CONFIG_UIO_CIF is not set -CONFIG_UIO_PDRV_GENIRQ=y -CONFIG_UIO_DMEM_GENIRQ=y +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m # CONFIG_UIO_AEC is not set # CONFIG_UIO_SERCOS3 is not set # CONFIG_UIO_PCI_GENERIC is not set @@ -6573,7 +6817,82 @@ CONFIG_VIRTIO=y # CONFIG_GREYBUS is not set # CONFIG_COMEDI is not set -# CONFIG_STAGING is not set +CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +CONFIG_RTL8192U=m +CONFIG_RTLLIB=m +CONFIG_RTLLIB_CRYPTO_CCMP=m +CONFIG_RTLLIB_CRYPTO_TKIP=m +CONFIG_RTLLIB_CRYPTO_WEP=m +CONFIG_RTL8192E=m +CONFIG_RTL8723BS=m +CONFIG_R8712U=m +# CONFIG_RTS5208 is not set +# CONFIG_OCTEON_ETHERNET is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +CONFIG_ADIS16203=m +CONFIG_ADIS16240=m +# end of Accelerometers + +# +# Analog to digital converters +# +CONFIG_AD7816=m +# end of Analog to digital converters + +# +# Analog digital bi-direction converters +# +CONFIG_ADT7316=m +CONFIG_ADT7316_SPI=m +CONFIG_ADT7316_I2C=m +# end of Analog digital bi-direction converters + +# +# Direct Digital Synthesis +# +CONFIG_AD9832=m +CONFIG_AD9834=m +# end of Direct Digital Synthesis + +# +# Network Analyzer, Impedance Converters +# +CONFIG_AD5933=m +# end of Network Analyzer, Impedance Converters + +# +# Resolver to digital converters +# +CONFIG_AD2S1210=m +# end of Resolver to digital converters +# end of IIO staging drivers + +# CONFIG_FB_SM750 is not set +# CONFIG_USB_EMXX is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +CONFIG_BCM_VIDEOCORE=y +# CONFIG_BCM2835_VCHIQ is not set +# CONFIG_SND_BCM2835 is not set +# CONFIG_VIDEO_BCM2835 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set # CONFIG_GOLDFISH is not set # CONFIG_CHROME_PLATFORMS is not set # CONFIG_MELLANOX_PLATFORM is not set @@ -6619,19 +6938,23 @@ CONFIG_COMMON_CLK=y # CONFIG_CLK_QORIQ is not set # CONFIG_CLK_LS1028A_PLLDIG is not set # CONFIG_COMMON_CLK_XGENE is not set +# CONFIG_COMMON_CLK_LOONGSON2 is not set # CONFIG_COMMON_CLK_PWM is not set -# CONFIG_COMMON_CLK_OXNAS is not set # CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set # CONFIG_COMMON_CLK_VC5 is not set # CONFIG_COMMON_CLK_VC7 is not set # CONFIG_COMMON_CLK_MMP2_AUDIO is not set # CONFIG_COMMON_CLK_FIXED_MMIO is not set +# CONFIG_COMMON_CLK_SP7021 is not set # CONFIG_CLK_ACTIONS is not set # CONFIG_CLK_BAIKAL_T1 is not set # CONFIG_CLK_BCM2711_DVP is not set # CONFIG_CLK_BCM2835 is not set # CONFIG_CLK_BCM_63XX is not set # CONFIG_CLK_BCM_63XX_GATE is not set +# CONFIG_CLK_BCM63268_TIMER is not set # CONFIG_CLK_BCM_KONA is not set # CONFIG_CLK_BCM_CYGNUS is not set # CONFIG_CLK_BCM_HR2 is not set @@ -6657,11 +6980,13 @@ CONFIG_COMMON_CLK=y # CONFIG_CLK_IMX8MQ is not set # CONFIG_CLK_IMX8ULP is not set # CONFIG_CLK_IMX93 is not set +# CONFIG_CLK_IMXRT1050 is not set # # Ingenic SoCs drivers # # CONFIG_INGENIC_CGU_JZ4740 is not set +# CONFIG_INGENIC_CGU_JZ4755 is not set # CONFIG_INGENIC_CGU_JZ4725B is not set # CONFIG_INGENIC_CGU_JZ4760 is not set # CONFIG_INGENIC_CGU_JZ4770 is not set @@ -6677,6 +7002,7 @@ CONFIG_COMMON_CLK=y # # Clock driver for MediaTek SoC # +# CONFIG_COMMON_CLK_MEDIATEK_FHCTL is not set # CONFIG_COMMON_CLK_MT2701 is not set # CONFIG_COMMON_CLK_MT2712 is not set # CONFIG_COMMON_CLK_MT6765 is not set @@ -6685,12 +7011,14 @@ CONFIG_COMMON_CLK=y # CONFIG_COMMON_CLK_MT6797 is not set # CONFIG_COMMON_CLK_MT7622 is not set # CONFIG_COMMON_CLK_MT7629 is not set +# CONFIG_COMMON_CLK_MT7981 is not set # CONFIG_COMMON_CLK_MT7986 is not set # CONFIG_COMMON_CLK_MT8135 is not set # CONFIG_COMMON_CLK_MT8167 is not set # CONFIG_COMMON_CLK_MT8173 is not set # CONFIG_COMMON_CLK_MT8183 is not set # CONFIG_COMMON_CLK_MT8186 is not set +# CONFIG_COMMON_CLK_MT8188 is not set # CONFIG_COMMON_CLK_MT8192 is not set # CONFIG_COMMON_CLK_MT8195 is not set # CONFIG_COMMON_CLK_MT8365 is not set @@ -6702,20 +7030,23 @@ CONFIG_COMMON_CLK=y # # end of Clock support for Amlogic platforms +# CONFIG_MSTAR_MSC313_CPUPLL is not set # CONFIG_MSTAR_MSC313_MPLL is not set # CONFIG_MCHP_CLK_MPFS is not set +CONFIG_COMMON_CLK_NUVOTON=y +CONFIG_CLK_MA35D1=y # CONFIG_COMMON_CLK_PISTACHIO is not set # CONFIG_COMMON_CLK_QCOM is not set # CONFIG_CLK_MT7621 is not set +# CONFIG_CLK_MTMIPS is not set # CONFIG_CLK_RENESAS is not set # CONFIG_COMMON_CLK_SAMSUNG is not set -# CONFIG_S3C2410_COMMON_CLK is not set -# CONFIG_S3C2412_COMMON_CLK is not set -# CONFIG_S3C2443_COMMON_CLK is not set # CONFIG_CLK_SIFIVE is not set # CONFIG_CLK_INTEL_SOCFPGA is not set # CONFIG_SPRD_COMMON_CLK is not set # CONFIG_CLK_STARFIVE_JH7100 is not set +# CONFIG_CLK_STARFIVE_JH7110_PLL is not set +# CONFIG_CLK_STARFIVE_JH7110_SYS is not set # CONFIG_CLK_SUNXI is not set # CONFIG_SUNXI_CCU is not set # CONFIG_COMMON_CLK_TI_ADPLL is not set @@ -6733,7 +7064,6 @@ CONFIG_SPACEMIT_K1X_CCU=y # CONFIG_TIMER_OF=y CONFIG_TIMER_PROBE=y -CONFIG_CLKSRC_MMIO=y # CONFIG_BCM2835_TIMER is not set # CONFIG_BCM_KONA_TIMER is not set # CONFIG_DAVINCI_TIMER is not set @@ -6745,7 +7075,7 @@ CONFIG_CLKSRC_MMIO=y # CONFIG_MESON6_TIMER is not set # CONFIG_OWL_TIMER is not set # CONFIG_RDA_TIMER is not set -CONFIG_SPACEMIT_K1X_TIMER=y +# CONFIG_SPACEMIT_K1X_TIMER is not set # CONFIG_SUN4I_TIMER is not set # CONFIG_SUN5I_HSTIMER is not set # CONFIG_TEGRA_TIMER is not set @@ -6770,8 +7100,8 @@ CONFIG_SPACEMIT_K1X_TIMER=y # CONFIG_ATMEL_ST is not set # CONFIG_CLKSRC_SAMSUNG_PWM is not set # CONFIG_FSL_FTM_TIMER is not set -# CONFIG_OXNAS_RPS_TIMER is not set # CONFIG_MTK_TIMER is not set +# CONFIG_MTK_CPUX_TIMER is not set # CONFIG_SPRD_TIMER is not set # CONFIG_CLKSRC_JCORE_PIT is not set # CONFIG_SH_TIMER_CMT is not set @@ -6782,6 +7112,7 @@ CONFIG_SPACEMIT_K1X_TIMER=y # CONFIG_CLKSRC_VERSATILE is not set # CONFIG_CLKSRC_PXA is not set # CONFIG_TIMER_IMX_SYS_CTR is not set +# CONFIG_CLKSRC_LOONGSON1_PWM is not set # CONFIG_CLKSRC_ST_LPC is not set # CONFIG_GXP_TIMER is not set CONFIG_RISCV_TIMER=y @@ -6790,7 +7121,6 @@ CONFIG_RISCV_TIMER=y # CONFIG_INGENIC_TIMER is not set # CONFIG_INGENIC_SYSOST is not set # CONFIG_INGENIC_OST is not set -# CONFIG_MICROCHIP_PIT64B is not set # end of Clock Source drivers CONFIG_MAILBOX=y @@ -6814,13 +7144,39 @@ CONFIG_MAILBOX=y CONFIG_SPACEMIT_MAILBOX=y # CONFIG_K1PRO_MAILBOX is not set CONFIG_K1X_MAILBOX=y -# CONFIG_IOMMU_SUPPORT is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +# CONFIG_IOMMU_IO_PGTABLE_DART is not set +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +CONFIG_IOMMUFD=m +# CONFIG_OMAP_IOMMU is not set +# CONFIG_ROCKCHIP_IOMMU is not set +# CONFIG_SUN50I_IOMMU is not set +# CONFIG_EXYNOS_IOMMU is not set +# CONFIG_IPMMU_VMSA is not set +# CONFIG_APPLE_DART is not set +# CONFIG_ARM_SMMU is not set +# CONFIG_MTK_IOMMU is not set +# CONFIG_QCOM_IOMMU is not set +# CONFIG_SPRD_IOMMU is not set # # Remoteproc drivers # CONFIG_REMOTEPROC=y -CONFIG_REMOTEPROC_CDEV=y +# CONFIG_REMOTEPROC_CDEV is not set # CONFIG_INGENIC_VPU_RPROC is not set # CONFIG_MTK_SCP is not set # CONFIG_MESON_MX_AO_ARC_REMOTEPROC is not set @@ -6834,8 +7190,8 @@ CONFIG_K1X_REMOTEPROC=y # Rpmsg drivers # CONFIG_RPMSG=y -CONFIG_RPMSG_CHAR=y -CONFIG_RPMSG_CTRL=y +# CONFIG_RPMSG_CHAR is not set +# CONFIG_RPMSG_CTRL is not set CONFIG_RPMSG_NS=y # CONFIG_RPMSG_QCOM_GLINK_RPM is not set CONFIG_RPMSG_VIRTIO=y @@ -6901,6 +7257,11 @@ CONFIG_RPMSG_VIRTIO=y # # end of fujitsu SoC drivers +# +# Hisilicon SoC drivers +# +# end of Hisilicon SoC drivers + # # i.MX SoC drivers # @@ -6922,6 +7283,8 @@ CONFIG_RPMSG_VIRTIO=y # CONFIG_LITEX_SOC_CONTROLLER is not set # end of Enable LiteX SoC Builder specific drivers +# CONFIG_LOONGSON2_GUTS is not set + # # MediaTek SoC drivers # @@ -6929,11 +7292,14 @@ CONFIG_RPMSG_VIRTIO=y # CONFIG_MTK_DEVAPC is not set # CONFIG_MTK_INFRACFG is not set # CONFIG_MTK_PMIC_WRAP is not set +# CONFIG_MTK_REGULATOR_COUPLER is not set # CONFIG_MTK_SCPSYS is not set # CONFIG_MTK_SCPSYS_PM_DOMAINS is not set # CONFIG_MTK_MMSYS is not set # end of MediaTek SoC drivers +# CONFIG_WPCM450_SOC is not set + # # Qualcomm SoC drivers # @@ -6942,6 +7308,8 @@ CONFIG_RPMSG_VIRTIO=y # CONFIG_QCOM_GENI_SE is not set # CONFIG_QCOM_GSBI is not set # CONFIG_QCOM_LLCC is not set +# CONFIG_QCOM_RAMP_CTRL is not set +# CONFIG_QCOM_RPM_MASTER_STATS is not set # CONFIG_QCOM_RPMH is not set # CONFIG_QCOM_SMD_RPM is not set # CONFIG_QCOM_SPM is not set @@ -6955,6 +7323,8 @@ CONFIG_RPMSG_VIRTIO=y # CONFIG_ROCKCHIP_IODOMAIN is not set # CONFIG_ROCKCHIP_PM_DOMAINS is not set # CONFIG_SOC_SAMSUNG is not set +# CONFIG_JH71XX_PMU is not set +# CONFIG_SUN20I_PPU is not set # CONFIG_SOC_TEGRA20_VOLTAGE_COUPLER is not set # CONFIG_SOC_TEGRA30_VOLTAGE_COUPLER is not set # CONFIG_SOC_TI is not set @@ -6967,8 +7337,10 @@ CONFIG_RPMSG_VIRTIO=y CONFIG_SPACEMIT_PM_DOMAINS=y CONFIG_SPACEMIT_REBOOT_CONTROL=y -# CONFIG_SPACEMIT_LID_CONTROL is not set +CONFIG_SPACEMIT_LID_CONTROL=y CONFIG_SPACEMI_K1X_DMA_RANGE=y +CONFIG_SPACEMI_SOCINFO=y +# CONFIG_SPACEMIT_DDRBW is not set CONFIG_CHIP_MEDIA_JPU=y # CONFIG_JPU_ENABLE_DEBUG_MSG is not set CONFIG_SPACEMIT_V2D=y @@ -6981,39 +7353,593 @@ CONFIG_EXTCON=y # # Extcon Device Drivers # +# CONFIG_EXTCON_ADC_JACK is not set # CONFIG_EXTCON_FSA9480 is not set -# CONFIG_EXTCON_USB_K1XCI is not set -CONFIG_EXTCON_GPIO=y +CONFIG_EXTCON_USB_K1XCI=y +# CONFIG_EXTCON_GPIO is not set # CONFIG_EXTCON_MAX3355 is not set # CONFIG_EXTCON_PTN5150 is not set # CONFIG_EXTCON_QCOM_SPMI_MISC is not set # CONFIG_EXTCON_RT8973A is not set # CONFIG_EXTCON_SM5502 is not set CONFIG_EXTCON_USB_GPIO=y -CONFIG_MEMORY=y -# CONFIG_ATMEL_SDRAMC is not set -# CONFIG_ATMEL_EBI is not set -# CONFIG_BRCMSTB_DPFE is not set -# CONFIG_BRCMSTB_MEMC is not set -# CONFIG_BT1_L2_CTL is not set -# CONFIG_TI_AEMIF is not set -# CONFIG_TI_EMIF is not set -# CONFIG_OMAP_GPMC is not set -# CONFIG_MVEBU_DEVBUS is not set -# CONFIG_FSL_CORENET_CF is not set -# CONFIG_FSL_IFC is not set -# CONFIG_JZ4780_NEMC is not set -# CONFIG_MTK_SMI is not set -# CONFIG_DA8XX_DDRCTL is not set -# CONFIG_RENESAS_RPCIF is not set -# CONFIG_STM32_FMC2_EBI is not set -# CONFIG_SAMSUNG_MC is not set -# CONFIG_TEGRA_MC is not set -# CONFIG_IIO is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=y +CONFIG_IIO_BUFFER=y +CONFIG_IIO_BUFFER_CB=m +CONFIG_IIO_BUFFER_DMA=m +CONFIG_IIO_BUFFER_DMAENGINE=m +CONFIG_IIO_BUFFER_HW_CONSUMER=m +CONFIG_IIO_KFIFO_BUF=y +CONFIG_IIO_TRIGGERED_BUFFER=y +CONFIG_IIO_CONFIGFS=m +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +CONFIG_IIO_SW_DEVICE=m +CONFIG_IIO_SW_TRIGGER=m +CONFIG_IIO_TRIGGERED_EVENT=m + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD4130 is not set +# CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set +# CONFIG_AD799X is not set +# CONFIG_AD9467 is not set +# CONFIG_ADI_AXI_ADC is not set +# CONFIG_ASPEED_ADC is not set +# CONFIG_AT91_ADC is not set +# CONFIG_AT91_SAMA5D2_ADC is not set +# CONFIG_BCM_IPROC_ADC is not set +# CONFIG_BERLIN2_ADC is not set +# CONFIG_CC10001_ADC is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_EP93XX_ADC is not set +# CONFIG_EXYNOS_ADC is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_INGENIC_ADC is not set +# CONFIG_IMX7D_ADC is not set +# CONFIG_IMX8QXP_ADC is not set +# CONFIG_IMX93_ADC is not set +# CONFIG_LPC18XX_ADC is not set +# CONFIG_LPC32XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set +# CONFIG_MEDIATEK_MT6577_AUXADC is not set +# CONFIG_MESON_SARADC is not set +# CONFIG_NAU7802 is not set +# CONFIG_NPCM_ADC is not set +# CONFIG_RCAR_GYRO_ADC is not set +# CONFIG_ROCKCHIP_SARADC is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_RZG2L_ADC is not set +# CONFIG_SC27XX_ADC is not set +# CONFIG_SPEAR_ADC is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_STM32_ADC_CORE is not set +# CONFIG_STM32_DFSDM_CORE is not set +# CONFIG_STM32_DFSDM_ADC is not set +# CONFIG_SUN20I_GPADC is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_XILINX_XADC is not set +# CONFIG_XILINX_AMS is not set +CONFIG_SPACEMIT_P1_ADC=y +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +CONFIG_IIO_RESCALE=m +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_PMS7003 is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SPS30_SERIAL is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD3552R is not set +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_LPC18XX_DAC is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_STM32_DAC is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# CONFIG_IIO_SIMPLE_DUMMY is not set +# end of IIO dummy driver + +# +# Filters +# +CONFIG_ADMV8818=m +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +CONFIG_AD9523=m +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +CONFIG_ADF4350=m +CONFIG_ADF4371=m +CONFIG_ADF4377=m +CONFIG_ADMV1013=m +CONFIG_ADMV1014=m +CONFIG_ADMV4420=m +CONFIG_ADRF6780=m +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +CONFIG_AFE4403=m +CONFIG_AFE4404=m +CONFIG_MAX30100=m +CONFIG_MAX30102=m +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_BOSCH_BNO055_SERIAL is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +CONFIG_IIO_ADIS_LIB=m +CONFIG_IIO_ADIS_LIB_BUFFER=y + +# +# Light sensors +# +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_IQS621_ALS is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +CONFIG_IIO_MUX=m +# end of Multiplexers + +# +# Inclinometer sensors +# +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_HRTIMER_TRIGGER is not set +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_STM32_LPTIMER_TRIGGER is not set +# CONFIG_IIO_STM32_TIMER_TRIGGER is not set +# CONFIG_IIO_TIGHTLOOP_TRIGGER is not set +CONFIG_IIO_SYSFS_TRIGGER=m +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_IQS624_POS is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set +# CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# end of Resolver to digital converters + +# +# Temperature sensors +# +# CONFIG_IQS620AT_TEMP is not set +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set +# end of Temperature sensors + # CONFIG_NTB is not set CONFIG_PWM=y CONFIG_PWM_SYSFS=y # CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_APPLE is not set # CONFIG_PWM_ATMEL is not set # CONFIG_PWM_ATMEL_TCB is not set # CONFIG_PWM_BCM_IPROC is not set @@ -7021,9 +7947,9 @@ CONFIG_PWM_SYSFS=y # CONFIG_PWM_BCM2835 is not set # CONFIG_PWM_BERLIN is not set # CONFIG_PWM_BRCMSTB is not set -CONFIG_PWM_CLK=y +# CONFIG_PWM_CLK is not set # CONFIG_PWM_CLPS711X is not set -CONFIG_PWM_DWC=y +# CONFIG_PWM_DWC is not set # CONFIG_PWM_DWC_K1PRO is not set # CONFIG_PWM_EP93XX is not set # CONFIG_PWM_FSL_FTM is not set @@ -7043,16 +7969,17 @@ CONFIG_PWM_DWC=y # CONFIG_PWM_MESON is not set # CONFIG_PWM_MTK_DISP is not set # CONFIG_PWM_MEDIATEK is not set +# CONFIG_PWM_MICROCHIP_CORE is not set # CONFIG_PWM_MXS is not set # CONFIG_PWM_OMAP_DMTIMER is not set # CONFIG_PWM_PCA9685 is not set -# CONFIG_PWM_PXA is not set +CONFIG_PWM_PXA=y # CONFIG_PWM_RASPBERRYPI_POE is not set # CONFIG_PWM_RCAR is not set # CONFIG_PWM_RENESAS_TPU is not set # CONFIG_PWM_ROCKCHIP is not set # CONFIG_PWM_SAMSUNG is not set -CONFIG_PWM_SIFIVE=y +# CONFIG_PWM_SIFIVE is not set # CONFIG_PWM_SL28CPLD is not set # CONFIG_PWM_SPEAR is not set # CONFIG_PWM_SPRD is not set @@ -7114,15 +8041,15 @@ CONFIG_RESET_CONTROLLER=y # CONFIG_RESET_MESON is not set # CONFIG_RESET_MESON_AUDIO_ARB is not set # CONFIG_RESET_NPCM is not set +# CONFIG_RESET_NUVOTON_MA35D1 is not set # CONFIG_RESET_PISTACHIO is not set # CONFIG_RESET_QCOM_AOSS is not set # CONFIG_RESET_QCOM_PDC is not set # CONFIG_RESET_RASPBERRYPI is not set # CONFIG_RESET_RZG2L_USBPHY_CTRL is not set # CONFIG_RESET_SCMI is not set -CONFIG_RESET_SIMPLE=y +# CONFIG_RESET_SIMPLE is not set # CONFIG_RESET_SOCFPGA is not set -CONFIG_RESET_STARFIVE_JH7100=y # CONFIG_RESET_SUNPLUS is not set # CONFIG_RESET_SUNXI is not set # CONFIG_RESET_TI_SCI is not set @@ -7134,6 +8061,7 @@ CONFIG_RESET_STARFIVE_JH7100=y # CONFIG_RESET_ZYNQ is not set CONFIG_RESET_K1X_SPACEMIT=y # CONFIG_RESET_K1MATRIX_SPACEMIT is not set +# CONFIG_RESET_STARFIVE_JH7100 is not set # CONFIG_COMMON_RESET_HI3660 is not set # CONFIG_COMMON_RESET_HI6220 is not set @@ -7198,7 +8126,7 @@ CONFIG_GENERIC_PHY=y # CONFIG_ARMADA375_USBCLUSTER_PHY is not set # CONFIG_PHY_BERLIN_SATA is not set # CONFIG_PHY_BERLIN_USB is not set -CONFIG_PHY_MVEBU_A3700_UTMI=y +# CONFIG_PHY_MVEBU_A3700_UTMI is not set # CONFIG_PHY_MVEBU_A38X_COMPHY is not set # CONFIG_PHY_MVEBU_CP110_UTMI is not set # CONFIG_PHY_PXA_28NM_HSIC is not set @@ -7215,6 +8143,7 @@ CONFIG_PHY_MVEBU_A3700_UTMI=y # CONFIG_PHY_MTK_DP is not set # CONFIG_PHY_SPARX5_SERDES is not set # CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_CPCAP_USB is not set # CONFIG_PHY_MAPPHONE_MDM6600 is not set # CONFIG_PHY_OCELOT_SERDES is not set # CONFIG_PHY_ATH79_USB is not set @@ -7223,12 +8152,17 @@ CONFIG_PHY_MVEBU_A3700_UTMI=y # CONFIG_PHY_QCOM_PCIE2 is not set # CONFIG_PHY_QCOM_QMP is not set # CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set +# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set +# CONFIG_PHY_QCOM_M31_USB is not set # CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set # CONFIG_PHY_QCOM_USB_HS_28NM is not set # CONFIG_PHY_QCOM_USB_SS is not set # CONFIG_PHY_QCOM_IPQ806X_USB is not set +# CONFIG_PHY_QCOM_SGMII_ETH is not set # CONFIG_PHY_MT7621_PCI is not set # CONFIG_PHY_RALINK_USB is not set +# CONFIG_PHY_R8A779F0_ETHERNET_SERDES is not set # CONFIG_PHY_RCAR_GEN3_USB3 is not set # CONFIG_PHY_ROCKCHIP_DPHY_RX0 is not set # CONFIG_PHY_ROCKCHIP_INNO_HDMI is not set @@ -7243,7 +8177,6 @@ CONFIG_PHY_MVEBU_A3700_UTMI=y # CONFIG_PHY_EXYNOS_PCIE is not set # CONFIG_PHY_SAMSUNG_UFS is not set # CONFIG_PHY_SAMSUNG_USB2 is not set -# CONFIG_PHY_EXYNOS5_USBDRD is not set # CONFIG_PHY_UNIPHIER_USB2 is not set # CONFIG_PHY_UNIPHIER_USB3 is not set # CONFIG_PHY_UNIPHIER_PCIE is not set @@ -7252,6 +8185,9 @@ CONFIG_PHY_MVEBU_A3700_UTMI=y # CONFIG_PHY_ST_SPEAR1340_MIPHY is not set # CONFIG_PHY_STIH407_USB is not set # CONFIG_PHY_STM32_USBPHYC is not set +# CONFIG_PHY_STARFIVE_JH7110_DPHY_RX is not set +# CONFIG_PHY_STARFIVE_JH7110_PCIE is not set +# CONFIG_PHY_STARFIVE_JH7110_USB is not set # CONFIG_PHY_SUNPLUS_USB is not set # CONFIG_PHY_TEGRA194_P2U is not set # CONFIG_PHY_DA8XX_USB is not set @@ -7264,7 +8200,6 @@ CONFIG_PHY_MVEBU_A3700_UTMI=y # CONFIG_PHY_INTEL_KEEMBAY_USB is not set # CONFIG_PHY_INTEL_LGM_COMBO is not set # CONFIG_PHY_INTEL_LGM_EMMC is not set -# CONFIG_PHY_INTEL_THUNDERBAY_EMMC is not set # CONFIG_PHY_XILINX_ZYNQMP is not set CONFIG_PHY_SPACEMIT_K1X_COMBPHY=y # end of PHY Subsystem @@ -7288,6 +8223,8 @@ CONFIG_RISCV_PMU_SBI=y # CONFIG_ALIBABA_UNCORE_DRW_PMU is not set # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set +# CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set +# CONFIG_MESON_DDR_PMU is not set # end of Performance monitor support CONFIG_RAS=y @@ -7299,20 +8236,24 @@ CONFIG_RAS=y # CONFIG_ANDROID_BINDER_IPC is not set # end of Android -CONFIG_LIBNVDIMM=y -CONFIG_BLK_DEV_PMEM=y -CONFIG_ND_CLAIM=y -CONFIG_ND_BTT=y -CONFIG_BTT=y -CONFIG_OF_PMEM=y -CONFIG_DAX=y +# CONFIG_LIBNVDIMM is not set +# CONFIG_DAX is not set CONFIG_NVMEM=y CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + # CONFIG_NVMEM_APPLE_EFUSES is not set # CONFIG_NVMEM_BCM_OCOTP is not set # CONFIG_NVMEM_BRCM_NVRAM is not set # CONFIG_NVMEM_IMX_IIM is not set # CONFIG_NVMEM_IMX_OCOTP is not set +# CONFIG_NVMEM_IMX_OCOTP_ELE is not set # CONFIG_NVMEM_JZ4780_EFUSE is not set # CONFIG_NVMEM_LAN9662_OTPC is not set # CONFIG_NVMEM_LAYERSCAPE_SFP is not set @@ -7324,17 +8265,20 @@ CONFIG_NVMEM_SYSFS=y # CONFIG_NVMEM_MXS_OCOTP is not set # CONFIG_NVMEM_NINTENDO_OTP is not set # CONFIG_NVMEM_QCOM_QFPROM is not set -CONFIG_NVMEM_RMEM=y +# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set +# CONFIG_NVMEM_RMEM is not set # CONFIG_NVMEM_ROCKCHIP_EFUSE is not set # CONFIG_NVMEM_ROCKCHIP_OTP is not set # CONFIG_NVMEM_SC27XX_EFUSE is not set -CONFIG_NVMEM_SNVS_LPGPR=y +# CONFIG_NVMEM_SNVS_LPGPR is not set +CONFIG_NVMEM_SPACEMIT_EFUSE=y # CONFIG_NVMEM_SPRD_EFUSE is not set # CONFIG_NVMEM_STM32_ROMEM is not set # CONFIG_NVMEM_SUNPLUS_OCOTP is not set -CONFIG_NVMEM_U_BOOT_ENV=y +# CONFIG_NVMEM_U_BOOT_ENV is not set # CONFIG_NVMEM_UNIPHIER_EFUSE is not set # CONFIG_NVMEM_VF610_OCOTP is not set +# CONFIG_NVMEM_QORIQ_EFUSE is not set # # HW tracing support @@ -7347,15 +8291,15 @@ CONFIG_NVMEM_U_BOOT_ENV=y # CONFIG_FPGA is not set # CONFIG_FSI is not set # CONFIG_TEE is not set -CONFIG_MULTIPLEXER=y +CONFIG_MULTIPLEXER=m # # Multiplexer drivers # # CONFIG_MUX_ADG792A is not set # CONFIG_MUX_ADGS1408 is not set -CONFIG_MUX_GPIO=y -CONFIG_MUX_MMIO=y +# CONFIG_MUX_GPIO is not set +# CONFIG_MUX_MMIO is not set # end of Multiplexer drivers CONFIG_PM_OPP=y @@ -7373,6 +8317,8 @@ CONFIG_PM_OPP=y # CONFIG_VALIDATE_FS_PARSER=y CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -7391,8 +8337,9 @@ CONFIG_FS_MBCACHE=y # CONFIG_JFS_FS is not set CONFIG_XFS_FS=y CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y # CONFIG_XFS_QUOTA is not set -# CONFIG_XFS_POSIX_ACL is not set +CONFIG_XFS_POSIX_ACL=y # CONFIG_XFS_RT is not set # CONFIG_XFS_ONLINE_SCRUB is not set # CONFIG_XFS_WARN is not set @@ -7409,15 +8356,22 @@ CONFIG_BTRFS_FS_POSIX_ACL=y # CONFIG_NILFS2_FS is not set CONFIG_F2FS_FS=y CONFIG_F2FS_STAT_FS=y -# CONFIG_F2FS_FS_XATTR is not set +CONFIG_F2FS_FS_XATTR=y +CONFIG_F2FS_FS_POSIX_ACL=y +CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_CHECK_FS=y # CONFIG_F2FS_FAULT_INJECTION is not set -# CONFIG_F2FS_FS_COMPRESSION is not set +CONFIG_F2FS_FS_COMPRESSION=y +CONFIG_F2FS_FS_LZO=y +CONFIG_F2FS_FS_LZORLE=y +CONFIG_F2FS_FS_LZ4=y +CONFIG_F2FS_FS_LZ4HC=y +CONFIG_F2FS_FS_ZSTD=y CONFIG_F2FS_IOSTAT=y # CONFIG_F2FS_UNFAIR_RWSEM is not set CONFIG_FS_POSIX_ACL=y CONFIG_EXPORTFS=y -# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_EXPORTFS_BLOCK_OPS=y CONFIG_FILE_LOCKING=y # CONFIG_FS_ENCRYPTION is not set # CONFIG_FS_VERITY is not set @@ -7427,7 +8381,6 @@ CONFIG_INOTIFY_USER=y CONFIG_FANOTIFY=y # CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set # CONFIG_QUOTA is not set -CONFIG_AUTOFS4_FS=y CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m # CONFIG_CUSE is not set @@ -7438,6 +8391,7 @@ CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y # CONFIG_OVERLAY_FS_INDEX is not set # CONFIG_OVERLAY_FS_XINO_AUTO is not set # CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set # # Caches @@ -7493,9 +8447,9 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_TMPFS_XATTR=y # CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set CONFIG_ARCH_SUPPORTS_HUGETLBFS=y # CONFIG_HUGETLBFS is not set -CONFIG_MEMFD_CREATE=y CONFIG_ARCH_HAS_GIGANTIC_PAGE=y CONFIG_CONFIGFS_FS=y # CONFIG_EFIVAR_FS is not set @@ -7512,13 +8466,16 @@ CONFIG_HFSPLUS_FS=m # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set # CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set # CONFIG_CRAMFS is not set CONFIG_SQUASHFS=y # CONFIG_SQUASHFS_FILE_CACHE is not set CONFIG_SQUASHFS_FILE_DIRECT=y -# CONFIG_SQUASHFS_DECOMP_SINGLE is not set -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set # CONFIG_SQUASHFS_XATTR is not set CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_LZ4=y @@ -7559,7 +8516,7 @@ CONFIG_NFS_FSCACHE=y # CONFIG_NFS_USE_LEGACY_DNS is not set CONFIG_NFS_USE_KERNEL_DNS=y CONFIG_NFS_DISABLE_UDP_SUPPORT=y -# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFS_V4_2_READ_PLUS=y # CONFIG_NFSD is not set CONFIG_GRACE_PERIOD=y CONFIG_LOCKD=y @@ -7570,6 +8527,7 @@ CONFIG_SUNRPC=y CONFIG_SUNRPC_GSS=y CONFIG_SUNRPC_BACKCHANNEL=y CONFIG_SUNRPC_SWAP=y +CONFIG_RPCSEC_GSS_KRB5=y # CONFIG_SUNRPC_DEBUG is not set # CONFIG_CEPH_FS is not set CONFIG_CIFS=y @@ -7585,10 +8543,7 @@ CONFIG_CIFS_DEBUG=y # CONFIG_CIFS_SWN_UPCALL is not set CONFIG_CIFS_FSCACHE=y # CONFIG_CIFS_ROOT is not set -CONFIG_SMB_SERVER=m -CONFIG_SMB_SERVER_SMBDIRECT=y -CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN=y -CONFIG_SMB_SERVER_KERBEROS5=y +# CONFIG_SMB_SERVER is not set CONFIG_SMBFS=y # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set @@ -7643,6 +8598,7 @@ CONFIG_NLS_ISO8859_1=y # CONFIG_NLS_MAC_ROMANIAN is not set # CONFIG_NLS_MAC_TURKISH is not set CONFIG_NLS_UTF8=y +CONFIG_NLS_UCS2_UTILS=y # CONFIG_DLM is not set CONFIG_UNICODE=y # CONFIG_UNICODE_NORMALIZATION_SELFTEST is not set @@ -7660,12 +8616,14 @@ CONFIG_ENCRYPTED_KEYS=m # CONFIG_USER_DECRYPTED_DATA is not set CONFIG_KEY_DH_OPERATIONS=y # CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_PROC_MEM_ALWAYS_FORCE=y +# CONFIG_PROC_MEM_FORCE_PTRACE is not set +# CONFIG_PROC_MEM_NO_FORCE is not set CONFIG_SECURITY=y CONFIG_SECURITYFS=y CONFIG_SECURITY_NETWORK=y # CONFIG_SECURITY_NETWORK_XFRM is not set CONFIG_SECURITY_PATH=y -CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y # CONFIG_HARDENED_USERCOPY is not set # CONFIG_FORTIFY_SOURCE is not set # CONFIG_STATIC_USERMODEHELPER is not set @@ -7712,6 +8670,13 @@ CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y # CONFIG_ZERO_CALL_USED_REGS is not set # end of Memory initialization +# +# Hardening of kernel data structures +# +# CONFIG_LIST_HARDENED is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# end of Hardening of kernel data structures + CONFIG_RANDSTRUCT_NONE=y # end of Kernel hardening options # end of Security options @@ -7731,13 +8696,15 @@ CONFIG_CRYPTO_ALGAPI=y CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=y CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG=y +CONFIG_CRYPTO_SIG2=y CONFIG_CRYPTO_SKCIPHER=y CONFIG_CRYPTO_SKCIPHER2=y CONFIG_CRYPTO_HASH=y CONFIG_CRYPTO_HASH2=y CONFIG_CRYPTO_RNG=y CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_RNG_DEFAULT=m CONFIG_CRYPTO_AKCIPHER2=y CONFIG_CRYPTO_AKCIPHER=y CONFIG_CRYPTO_KPP2=y @@ -7747,7 +8714,6 @@ CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_USER is not set CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -CONFIG_CRYPTO_GF128MUL=y CONFIG_CRYPTO_NULL=y CONFIG_CRYPTO_NULL2=y # CONFIG_CRYPTO_PCRYPT is not set @@ -7762,8 +8728,8 @@ CONFIG_CRYPTO_AUTHENC=y CONFIG_CRYPTO_RSA=y CONFIG_CRYPTO_DH=y # CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set -CONFIG_CRYPTO_ECC=y -CONFIG_CRYPTO_ECDH=y +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m # CONFIG_CRYPTO_ECDSA is not set # CONFIG_CRYPTO_ECRDSA is not set # CONFIG_CRYPTO_SM2 is not set @@ -7797,7 +8763,7 @@ CONFIG_CRYPTO_DES=y # CONFIG_CRYPTO_ADIANTUM is not set # CONFIG_CRYPTO_ARC4 is not set CONFIG_CRYPTO_CHACHA20=y -CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CBC=m # CONFIG_CRYPTO_CFB is not set CONFIG_CRYPTO_CTR=y # CONFIG_CRYPTO_CTS is not set @@ -7817,8 +8783,9 @@ CONFIG_CRYPTO_OFB=y CONFIG_CRYPTO_CHACHA20POLY1305=y CONFIG_CRYPTO_CCM=y CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m CONFIG_CRYPTO_ESSIV=y # end of AEAD (authenticated encryption with associated data) ciphers @@ -7831,7 +8798,7 @@ CONFIG_CRYPTO_GHASH=y CONFIG_CRYPTO_HMAC=y # CONFIG_CRYPTO_MD4 is not set CONFIG_CRYPTO_MD5=y -# CONFIG_CRYPTO_MICHAEL_MIC is not set +CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_POLY1305=y # CONFIG_CRYPTO_RMD160 is not set CONFIG_CRYPTO_SHA1=y @@ -7858,7 +8825,7 @@ CONFIG_CRYPTO_CRC64_ROCKSOFT=y # # Compression # -CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_DEFLATE=m CONFIG_CRYPTO_LZO=y CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m @@ -7869,13 +8836,14 @@ CONFIG_CRYPTO_ZSTD=y # # Random number generation # -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=m CONFIG_CRYPTO_DRBG_HMAC=y # CONFIG_CRYPTO_DRBG_HASH is not set # CONFIG_CRYPTO_DRBG_CTR is not set -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_DRBG=m +CONFIG_CRYPTO_JITTERENTROPY=m +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set CONFIG_CRYPTO_KDF800108_CTR=y # end of Random number generation @@ -7885,7 +8853,8 @@ CONFIG_CRYPTO_KDF800108_CTR=y CONFIG_CRYPTO_USER_API=y CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_SKCIPHER=y -# CONFIG_CRYPTO_USER_API_RNG is not set +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set # CONFIG_CRYPTO_USER_API_AEAD is not set CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y # end of Userspace interface @@ -7901,6 +8870,12 @@ CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_ATMEL_SHA is not set # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +# CONFIG_CAVIUM_CPT is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set +# CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4 is not set +# CONFIG_CRYPTO_DEV_KEEMBAY_OCS_ECC is not set +# CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU is not set # CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set # CONFIG_CRYPTO_DEV_QAT_C3XXX is not set # CONFIG_CRYPTO_DEV_QAT_C62X is not set @@ -7908,9 +8883,6 @@ CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set # CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set # CONFIG_CRYPTO_DEV_QAT_C62XVF is not set -# CONFIG_CAVIUM_CPT is not set -# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set -# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set # CONFIG_CRYPTO_DEV_CAVIUM_ZIP is not set # CONFIG_CRYPTO_DEV_QCE is not set # CONFIG_CRYPTO_DEV_QCOM_RNG is not set @@ -7923,10 +8895,8 @@ CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_HISI_SEC is not set # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set # CONFIG_CRYPTO_DEV_SA2UL is not set -# CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4 is not set -# CONFIG_CRYPTO_DEV_KEEMBAY_OCS_ECC is not set -# CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU is not set # CONFIG_CRYPTO_DEV_ASPEED is not set +# CONFIG_CRYPTO_DEV_JH7110 is not set CONFIG_SPACEMIT_REE_ENGINE=y CONFIG_SPACEMIT_REE_AES=y # CONFIG_SPACEMIT_CRYPTO_DEBUG is not set @@ -7958,7 +8928,7 @@ CONFIG_BINARY_PRINTF=y CONFIG_RAID6_PQ=y CONFIG_RAID6_PQ_BENCHMARK=y CONFIG_LINEAR_RANGES=y -# CONFIG_PACKING is not set +CONFIG_PACKING=y CONFIG_BITREVERSE=y CONFIG_GENERIC_STRNCPY_FROM_USER=y CONFIG_GENERIC_STRNLEN_USER=y @@ -7974,6 +8944,7 @@ CONFIG_GENERIC_PCI_IOMAP=y CONFIG_CRYPTO_LIB_UTILS=y CONFIG_CRYPTO_LIB_AES=y CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y CONFIG_CRYPTO_LIB_CHACHA_GENERIC=y CONFIG_CRYPTO_LIB_CHACHA=m @@ -7988,7 +8959,7 @@ CONFIG_CRYPTO_LIB_SHA1=y CONFIG_CRYPTO_LIB_SHA256=y # end of Crypto library routines -CONFIG_CRC_CCITT=y +CONFIG_CRC_CCITT=m CONFIG_CRC16=y CONFIG_CRC_T10DIF=y CONFIG_CRC64_ROCKSOFT=y @@ -8003,7 +8974,7 @@ CONFIG_CRC64=y # CONFIG_CRC4 is not set # CONFIG_CRC7 is not set CONFIG_LIBCRC32C=y -# CONFIG_CRC8 is not set +CONFIG_CRC8=m CONFIG_XXHASH=y CONFIG_AUDIT_GENERIC=y # CONFIG_RANDOM32_SELFTEST is not set @@ -8013,8 +8984,8 @@ CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=y CONFIG_LZO_COMPRESS=y CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_COMPRESS=m -CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_COMPRESS=y +CONFIG_LZ4HC_COMPRESS=y CONFIG_LZ4_DECOMPRESS=y CONFIG_ZSTD_COMMON=y CONFIG_ZSTD_COMPRESS=y @@ -8044,9 +9015,11 @@ CONFIG_TEXTSEARCH_KMP=m CONFIG_TEXTSEARCH_BM=m CONFIG_TEXTSEARCH_FSM=m CONFIG_INTERVAL_TREE=y +CONFIG_INTERVAL_TREE_SPAN_ITER=y CONFIG_XARRAY_MULTI=y CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y CONFIG_HAS_IOPORT_MAP=y CONFIG_HAS_DMA=y CONFIG_NEED_DMA_MAP_STATE=y @@ -8056,13 +9029,15 @@ CONFIG_ARCH_HAS_SETUP_DMA_OPS=y CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y +CONFIG_ARCH_DMA_DEFAULT_COHERENT=y CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y # CONFIG_DMA_RESTRICTED_POOL is not set CONFIG_DMA_NONCOHERENT_MMAP=y CONFIG_DMA_COHERENT_POOL=y CONFIG_DMA_DIRECT_REMAP=y CONFIG_DMA_CMA=y -# CONFIG_DMA_PERNUMA_CMA is not set # # Default contiguous memory area size: @@ -8076,7 +9051,6 @@ CONFIG_CMA_ALIGNMENT=8 # CONFIG_DMA_API_DEBUG is not set # CONFIG_DMA_MAP_BENCHMARK is not set CONFIG_SGL_ALLOC=y -# CONFIG_FORCE_NR_CPUS is not set CONFIG_CPU_RMAP=y CONFIG_DQL=y CONFIG_GLOB=y @@ -8096,7 +9070,7 @@ CONFIG_FONT_SUPPORT=y CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y CONFIG_SG_POOL=y -CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_PMEM_API=y CONFIG_ARCH_STACKWALK=y CONFIG_STACKDEPOT=y CONFIG_SBITMAP=y @@ -8124,6 +9098,7 @@ CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 # CONFIG_DYNAMIC_DEBUG is not set # CONFIG_DYNAMIC_DEBUG_CORE is not set CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y # end of printk and dmesg options CONFIG_DEBUG_KERNEL=y @@ -8143,6 +9118,7 @@ CONFIG_FRAME_WARN=2048 # CONFIG_HEADERS_INSTALL is not set # CONFIG_DEBUG_SECTION_MISMATCH is not set CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B is not set CONFIG_ARCH_WANT_FRAME_POINTERS=y CONFIG_FRAME_POINTER=y # CONFIG_VMLINUX_MAP is not set @@ -8191,10 +9167,11 @@ CONFIG_ARCH_HAS_DEBUG_WX=y # CONFIG_DEBUG_WX is not set CONFIG_GENERIC_PTDUMP=y # CONFIG_PTDUMP_DEBUGFS is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SHRINKER_DEBUG is not set CONFIG_HAVE_DEBUG_KMEMLEAK=y # CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_PER_VMA_LOCK_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set # CONFIG_DEBUG_STACK_USAGE is not set # CONFIG_SCHED_STACK_END_CHECK is not set CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y @@ -8204,8 +9181,11 @@ CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y # CONFIG_DEBUG_VIRTUAL is not set # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y CONFIG_CC_HAS_KASAN_GENERIC=y CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set CONFIG_HAVE_ARCH_KFENCE=y # CONFIG_KFENCE is not set # end of Memory Debugging @@ -8219,10 +9199,13 @@ CONFIG_HAVE_ARCH_KFENCE=y CONFIG_PANIC_ON_OOPS_VALUE=0 CONFIG_PANIC_TIMEOUT=0 # CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_HARDLOCKUP_DETECTOR is not set CONFIG_DETECT_HUNG_TASK=y CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=60 # CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set # CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set # CONFIG_TEST_LOCKUP is not set # end of Debug Oops, Lockups and Hangs @@ -8234,6 +9217,7 @@ CONFIG_SCHED_DEBUG=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set # # Lock Debugging (spinlocks, mutexes, etc...) @@ -8267,12 +9251,9 @@ CONFIG_STACKTRACE=y # CONFIG_DEBUG_PLIST is not set # CONFIG_DEBUG_SG is not set # CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_BUG_ON_DATA_CORRUPTION is not set # CONFIG_DEBUG_MAPLE_TREE is not set # end of Debug kernel data structures -# CONFIG_DEBUG_CREDENTIALS is not set - # # RCU Debugging # @@ -8281,6 +9262,7 @@ CONFIG_STACKTRACE=y # CONFIG_RCU_REF_SCALE_TEST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=21 CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set # CONFIG_RCU_TRACE is not set # CONFIG_RCU_EQS_DEBUG is not set # end of RCU Debugging @@ -8289,8 +9271,9 @@ CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 # CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set # CONFIG_LATENCYTOP is not set CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_RETHOOK=y CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y CONFIG_HAVE_DYNAMIC_FTRACE=y CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y @@ -8303,9 +9286,8 @@ CONFIG_TRACING=y CONFIG_TRACING_SUPPORT=y CONFIG_FTRACE=y # CONFIG_BOOTTIME_TRACING is not set -# CONFIG_FUNCTION_TRACER is not set -# CONFIG_STACK_TRACER is not set # CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_HWLAT_TRACER is not set # CONFIG_OSNOISE_TRACER is not set @@ -8357,7 +9339,11 @@ CONFIG_ARCH_USE_MEMTEST=y # Rust hacking # # end of Rust hacking +# end of Kernel hacking +# +# Documentation +# # CONFIG_WARN_MISSING_DOCUMENTS is not set # CONFIG_WARN_ABI_ERRORS is not set -# end of Kernel hacking +# end of Documentation diff --git a/config/kernel/linux-spacemit-legacy.config b/config/kernel/linux-spacemit-legacy.config deleted file mode 100644 index d800f4a1cbe8..000000000000 --- a/config/kernel/linux-spacemit-legacy.config +++ /dev/null @@ -1,8341 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/riscv 6.1.15 Kernel Configuration -# -CONFIG_CC_VERSION_TEXT="riscv64-linux-gnu-gcc (Ubuntu 13.2.0-23ubuntu4) 13.2.0" -CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=130200 -CONFIG_CLANG_VERSION=0 -CONFIG_AS_IS_GNU=y -CONFIG_AS_VERSION=24200 -CONFIG_LD_IS_BFD=y -CONFIG_LD_VERSION=24200 -CONFIG_LLD_VERSION=0 -CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y -CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y -CONFIG_CC_HAS_ASM_INLINE=y -CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y -CONFIG_PAHOLE_VERSION=125 -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_TABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_COMPILE_TEST=y -# CONFIG_WERROR is not set -CONFIG_LOCALVERSION="" -CONFIG_BUILD_SALT="" -CONFIG_DEFAULT_INIT="" -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -# CONFIG_WATCH_QUEUE is not set -CONFIG_CROSS_MEMORY_ATTACH=y -# CONFIG_USELIB is not set -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_SHOW_LEVEL=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -# CONFIG_GENERIC_IRQ_DEBUGFS is not set -# end of IRQ subsystem - -CONFIG_GENERIC_IRQ_MULTI_HANDLER=y -CONFIG_ARCH_CLOCKSOURCE_INIT=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_ARCH_HAS_TICK_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y -CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y -CONFIG_CONTEXT_TRACKING=y -CONFIG_CONTEXT_TRACKING_IDLE=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -CONFIG_NO_HZ_IDLE=y -# CONFIG_NO_HZ_FULL is not set -# CONFIG_NO_HZ is not set -CONFIG_HIGH_RES_TIMERS=y -# end of Timers subsystem - -CONFIG_BPF=y -CONFIG_HAVE_EBPF_JIT=y - -# -# BPF subsystem -# -CONFIG_BPF_SYSCALL=y -# CONFIG_BPF_JIT is not set -CONFIG_BPF_UNPRIV_DEFAULT_OFF=y -CONFIG_USERMODE_DRIVER=y -# end of BPF subsystem - -CONFIG_PREEMPT_NONE_BUILD=y -CONFIG_PREEMPT_NONE=y -# CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set - -# -# CPU/Task time and stats accounting -# -CONFIG_TICK_CPU_ACCOUNTING=y -# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set -# CONFIG_IRQ_TIME_ACCOUNTING is not set -# CONFIG_BSD_PROCESS_ACCT is not set -# CONFIG_TASKSTATS is not set -# CONFIG_PSI is not set -# end of CPU/Task time and stats accounting - -CONFIG_CPU_ISOLATION=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_SRCU=y -CONFIG_TREE_SRCU=y -CONFIG_TASKS_RCU_GENERIC=y -CONFIG_TASKS_TRACE_RCU=y -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -# end of RCU Subsystem - -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -# CONFIG_IKHEADERS is not set -CONFIG_LOG_BUF_SHIFT=17 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 -# CONFIG_PRINTK_INDEX is not set -CONFIG_GENERIC_SCHED_CLOCK=y - -# -# Scheduler features -# -# CONFIG_UCLAMP_TASK is not set -# end of Scheduler features - -CONFIG_CC_HAS_INT128=y -CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC11_NO_ARRAY_BOUNDS=y -CONFIG_GCC12_NO_ARRAY_BOUNDS=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -# CONFIG_CGROUP_FAVOR_DYNMODS is not set -CONFIG_MEMCG=y -CONFIG_MEMCG_KMEM=y -CONFIG_BLK_CGROUP=y -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_RDMA=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -CONFIG_CGROUP_MISC=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_SOCK_CGROUP_DATA=y -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_TIME_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_CHECKPOINT_RESTORE=y -# CONFIG_SCHED_AUTOGROUP is not set -# CONFIG_SYSFS_DEPRECATED is not set -# CONFIG_RELAY is not set -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_RD_LZ4=y -CONFIG_RD_ZSTD=y -CONFIG_BOOT_CONFIG=y -# CONFIG_BOOT_CONFIG_EMBED is not set -CONFIG_INITRAMFS_PRESERVE_MTIME=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_EXPERT=y -CONFIG_MULTIUSER=y -# CONFIG_SGETMASK_SYSCALL is not set -CONFIG_SYSFS_SYSCALL=y -CONFIG_FHANDLE=y -CONFIG_POSIX_TIMERS=y -CONFIG_PRINTK=y -# CONFIG_BUG is not set -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_IO_URING=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_MEMBARRIER=y -CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_KCMP=y -CONFIG_RSEQ=y -# CONFIG_DEBUG_RSEQ is not set -# CONFIG_EMBEDDED is not set -CONFIG_HAVE_PERF_EVENTS=y -# CONFIG_PC104 is not set - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -# end of Kernel Performance Events And Counters - -CONFIG_SYSTEM_DATA_VERIFICATION=y -# CONFIG_PROFILING is not set -CONFIG_TRACEPOINTS=y -# end of General setup - -CONFIG_64BIT=y -CONFIG_RISCV=y -CONFIG_ARCH_MMAP_RND_BITS_MIN=18 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 -CONFIG_ARCH_MMAP_RND_BITS_MAX=24 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=17 -CONFIG_RISCV_SBI=y -CONFIG_MMU=y -CONFIG_PAGE_OFFSET=0xff60000000000000 -CONFIG_ARCH_FLATMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_GENERIC_CSUM=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_PGTABLE_LEVELS=5 -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_RISCV_DMA_NONCOHERENT=y -CONFIG_AS_HAS_INSN=y -CONFIG_AS_HAS_OPTION_ARCH=y - -# -# SoC selection -# -# CONFIG_SOC_MICROCHIP_POLARFIRE is not set -# CONFIG_SOC_SIFIVE is not set -# CONFIG_SOC_STARFIVE is not set -# CONFIG_SOC_VIRT is not set -CONFIG_SOC_SPACEMIT=y -CONFIG_SOC_SPACEMIT_K1=y -# CONFIG_SOC_SPACEMIT_K2 is not set -# CONFIG_SOC_SPACEMIT_K1PRO is not set -CONFIG_SOC_SPACEMIT_K1X=y -# CONFIG_SOC_SPACEMIT_K1_FPGA is not set -CONFIG_BIND_THREAD_TO_AICORES=y -# end of SoC selection - -# -# CPU errata selection -# -# CONFIG_ERRATA_SIFIVE is not set -# CONFIG_ERRATA_THEAD is not set -# end of CPU errata selection - -# -# Platform type -# -# CONFIG_NONPORTABLE is not set -CONFIG_ARCH_RV64I=y -# CONFIG_CMODEL_MEDLOW is not set -CONFIG_CMODEL_MEDANY=y -CONFIG_MODULE_SECTIONS=y -CONFIG_SMP=y -CONFIG_NR_CPUS=8 -CONFIG_HOTPLUG_CPU=y -CONFIG_TUNE_GENERIC=y -# CONFIG_NUMA is not set -CONFIG_RISCV_ALTERNATIVE=y -CONFIG_RISCV_ISA_C=y -CONFIG_RISCV_ISA_SVPBMT=y -CONFIG_TOOLCHAIN_HAS_V=y -CONFIG_RISCV_ISA_V=y -CONFIG_TOOLCHAIN_HAS_ZBB=y -CONFIG_RISCV_ISA_ZBB=y -CONFIG_TOOLCHAIN_HAS_ZICBOM=y -CONFIG_RISCV_ISA_ZICBOM=y -CONFIG_TOOLCHAIN_HAS_ZICBOZ=y -CONFIG_RISCV_ISA_ZICBOZ=y -CONFIG_TOOLCHAIN_HAS_ZICBOP=y -CONFIG_RISCV_ISA_ZICBOP=y -CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE=y -CONFIG_FPU=y -# end of Platform type - -# -# Kernel features -# -# CONFIG_HZ_100 is not set -CONFIG_HZ_250=y -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=250 -CONFIG_SCHED_HRTICK=y -CONFIG_RISCV_SBI_V01=y -# CONFIG_RISCV_BOOT_SPINWAIT is not set -# CONFIG_KEXEC is not set -# CONFIG_KEXEC_FILE is not set -# CONFIG_CRASH_DUMP is not set -CONFIG_COMPAT=y -# end of Kernel features - -# -# Boot options -# -CONFIG_CMDLINE="" -CONFIG_EFI_STUB=y -CONFIG_EFI=y -CONFIG_CC_HAVE_STACKPROTECTOR_TLS=y -CONFIG_STACKPROTECTOR_PER_TASK=y -# end of Boot options - -CONFIG_PORTABLE=y -CONFIG_IMAGE_LOAD_OFFSET=0x1400000 - -# -# Power management options -# -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -# CONFIG_SUSPEND_SKIP_SYNC is not set -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_AUTOSLEEP is not set -# CONFIG_PM_USERSPACE_AUTOSLEEP is not set -# CONFIG_PM_WAKELOCKS is not set -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_CLK=y -CONFIG_PM_GENERIC_DOMAINS=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -CONFIG_PM_GENERIC_DOMAINS_SLEEP=y -CONFIG_PM_GENERIC_DOMAINS_OF=y -CONFIG_CPU_PM=y -# CONFIG_ENERGY_MODEL is not set -CONFIG_ARCH_SUSPEND_POSSIBLE=y -# end of Power management options - -# -# CPU Power Management -# - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y -# CONFIG_CPU_IDLE_GOV_LADDER is not set -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_CPU_IDLE_GOV_TEO is not set -CONFIG_DT_IDLE_STATES=y -CONFIG_DT_IDLE_GENPD=y - -# -# RISC-V CPU Idle Drivers -# -CONFIG_RISCV_SBI_CPUIDLE=y -# end of RISC-V CPU Idle Drivers -# end of CPU Idle - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y -# CONFIG_CPU_FREQ_STAT is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y - -# -# CPU frequency scaling drivers -# -CONFIG_CPUFREQ_DT=y -CONFIG_CPUFREQ_DT_PLATDEV=y -CONFIG_ARM_BRCMSTB_AVS_CPUFREQ=y -CONFIG_ARM_MEDIATEK_CPUFREQ_HW=m -# CONFIG_ARM_QCOM_CPUFREQ_HW is not set -# CONFIG_ARM_RASPBERRYPI_CPUFREQ is not set -# CONFIG_ARM_SCMI_CPUFREQ is not set -# CONFIG_QORIQ_CPUFREQ is not set -CONFIG_SPACEMIT_K1X_CPUFREQ=y -# end of CPU Frequency scaling -# end of CPU Power Management - -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y -CONFIG_KVM_XFER_TO_GUEST_WORK=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m - -# -# General architecture-dependent options -# -# CONFIG_KPROBES is not set -# CONFIG_JUMP_LABEL is not set -CONFIG_UPROBES=y -CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_KPROBES_ON_FTRACE=y -CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_GENERIC_IDLE_POLL_SETUP=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_ARCH_HAS_SET_DIRECT_MAP=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_HAVE_ASM_MODVERSIONS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y -CONFIG_HAVE_ARCH_SECCOMP=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP=y -CONFIG_SECCOMP_FILTER=y -# CONFIG_SECCOMP_CACHE_DEBUG is not set -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_LTO_NONE=y -CONFIG_HAVE_CONTEXT_TRACKING_USER=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_MOVE_PUD=y -CONFIG_HAVE_MOVE_PMD=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_ARCH_MMAP_RND_BITS=18 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 -CONFIG_PAGE_SIZE_LESS_THAN_64KB=y -CONFIG_PAGE_SIZE_LESS_THAN_256KB=y -CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y -CONFIG_CLONE_BACKWARDS=y -CONFIG_COMPAT_32BIT_TIME=y -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y -CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_ARCH_USE_MEMREMAP_PROT=y -# CONFIG_LOCK_EVENT_COUNTS is not set -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y -CONFIG_DYNAMIC_SIGFRAME=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -# end of GCOV-based kernel profiling -# end of General architecture-dependent options - -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set -# CONFIG_MODVERSIONS is not set -# CONFIG_MODULE_SRCVERSION_ALL is not set -# CONFIG_MODULE_SIG is not set -CONFIG_MODULE_COMPRESS_NONE=y -# CONFIG_MODULE_COMPRESS_GZIP is not set -# CONFIG_MODULE_COMPRESS_XZ is not set -# CONFIG_MODULE_COMPRESS_ZSTD is not set -# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set -CONFIG_MODPROBE_PATH="/sbin/modprobe" -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLOCK_LEGACY_AUTOLOAD=y -CONFIG_BLK_CGROUP_RWSTAT=y -CONFIG_BLK_DEV_BSG_COMMON=y -CONFIG_BLK_ICQ=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_INTEGRITY_T10=y -# CONFIG_BLK_DEV_ZONED is not set -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -# CONFIG_BLK_WBT is not set -# CONFIG_BLK_CGROUP_IOLATENCY is not set -# CONFIG_BLK_CGROUP_IOCOST is not set -# CONFIG_BLK_CGROUP_IOPRIO is not set -CONFIG_BLK_DEBUG_FS=y -# CONFIG_BLK_SED_OPAL is not set -# CONFIG_BLK_INLINE_ENCRYPTION is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_AIX_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -# CONFIG_MAC_PARTITION is not set -CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_MINIX_SUBPARTITION is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set -CONFIG_LDM_PARTITION=y -# CONFIG_LDM_DEBUG is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -# CONFIG_SUN_PARTITION is not set -# CONFIG_KARMA_PARTITION is not set -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -CONFIG_CMDLINE_PARTITION=y -# end of Partition Types - -CONFIG_BLOCK_COMPAT=y -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_PM=y -CONFIG_BLOCK_HOLDER_DEPRECATED=y -CONFIG_BLK_MQ_STACKING=y - -# -# IO Schedulers -# -CONFIG_MQ_IOSCHED_DEADLINE=y -CONFIG_MQ_IOSCHED_KYBER=m -CONFIG_IOSCHED_BFQ=m -CONFIG_BFQ_GROUP_IOSCHED=y -# CONFIG_BFQ_CGROUP_DEBUG is not set -# end of IO Schedulers - -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_ASN1=y -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_INLINE_READ_UNLOCK=y -CONFIG_INLINE_READ_UNLOCK_IRQ=y -CONFIG_INLINE_WRITE_UNLOCK=y -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_MMIOWB=y -CONFIG_MMIOWB=y -CONFIG_FREEZER=y - -# -# Executable file formats -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -CONFIG_ARCH_HAS_BINFMT_FLAT=y -CONFIG_BINFMT_FLAT=y -CONFIG_BINFMT_FLAT_OLD=y -CONFIG_BINFMT_ZFLAT=y -CONFIG_BINFMT_MISC=m -CONFIG_COREDUMP=y -# end of Executable file formats - -# -# Memory Management options -# -CONFIG_ZPOOL=y -CONFIG_SWAP=y -CONFIG_ZSWAP=y -CONFIG_ZSWAP_DEFAULT_ON=y -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set -CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set -CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" -CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y -# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set -# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set -CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" -CONFIG_ZBUD=y -CONFIG_Z3FOLD=m -CONFIG_ZSMALLOC=m -CONFIG_ZSMALLOC_STAT=y - -# -# SLAB allocator options -# -# CONFIG_SLAB is not set -CONFIG_SLUB=y -# CONFIG_SLOB is not set -CONFIG_SLAB_MERGE_DEFAULT=y -# CONFIG_SLAB_FREELIST_RANDOM is not set -# CONFIG_SLAB_FREELIST_HARDENED is not set -# CONFIG_SLUB_STATS is not set -CONFIG_SLUB_CPU_PARTIAL=y -# end of SLAB allocator options - -CONFIG_SHUFFLE_PAGE_ALLOCATOR=y -CONFIG_COMPAT_BRK=y -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_FLATMEM_MANUAL=y -# CONFIG_SPARSEMEM_MANUAL is not set -CONFIG_FLATMEM=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_COMPACTION=y -CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 -CONFIG_PAGE_REPORTING=y -CONFIG_MIGRATION=y -CONFIG_CONTIG_ALLOC=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_MMU_NOTIFIER=y -# CONFIG_KSM is not set -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ARCH_WANTS_THP_SWAP=y -# CONFIG_TRANSPARENT_HUGEPAGE is not set -CONFIG_FRONTSWAP=y -CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -CONFIG_CMA_SYSFS=y -CONFIG_CMA_AREAS=7 -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_IDLE_PAGE_TRACKING is not set -CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y -CONFIG_ZONE_DMA32=y -CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_TEST is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y -CONFIG_SECRETMEM=y -# CONFIG_ANON_VMA_NAME is not set -# CONFIG_USERFAULTFD is not set -# CONFIG_LRU_GEN is not set - -# -# Data Access Monitoring -# -# CONFIG_DAMON is not set -# end of Data Access Monitoring -# end of Memory Management options - -CONFIG_NET=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y -CONFIG_SKB_EXTENSIONS=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=y -CONFIG_UNIX=y -CONFIG_UNIX_SCM=y -CONFIG_AF_UNIX_OOB=y -CONFIG_UNIX_DIAG=y -# CONFIG_TLS is not set -CONFIG_XFRM=y -CONFIG_XFRM_ALGO=y -CONFIG_XFRM_USER=y -# CONFIG_XFRM_INTERFACE is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set -CONFIG_XFRM_AH=y -CONFIG_XFRM_ESP=y -CONFIG_XFRM_IPCOMP=y -CONFIG_NET_KEY=y -# CONFIG_NET_KEY_MIGRATE is not set -CONFIG_XDP_SOCKETS=y -CONFIG_XDP_SOCKETS_DIAG=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -# CONFIG_IP_FIB_TRIE_STATS is not set -CONFIG_IP_MULTIPLE_TABLES=y -# CONFIG_IP_ROUTE_MULTIPATH is not set -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y -CONFIG_NET_IPIP=y -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IP_TUNNEL=y -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE_COMMON=y -CONFIG_IP_MROUTE=y -# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=y -CONFIG_NET_FOU=y -CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -# CONFIG_INET_ESP_OFFLOAD is not set -# CONFIG_INET_ESPINTCP is not set -CONFIG_INET_IPCOMP=m -CONFIG_INET_TABLE_PERTURB_ORDER=16 -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=y -CONFIG_INET_DIAG=y -CONFIG_INET_TCP_DIAG=y -CONFIG_INET_UDP_DIAG=y -CONFIG_INET_RAW_DIAG=y -# CONFIG_INET_DIAG_DESTROY is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_NV=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_CONG_DCTCP=m -CONFIG_TCP_CONG_CDG=m -CONFIG_TCP_CONG_BBR=y -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_BBR is not set -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -# CONFIG_TCP_MD5SIG is not set -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -# CONFIG_IPV6_OPTIMISTIC_DAD is not set -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -# CONFIG_INET6_ESP_OFFLOAD is not set -# CONFIG_INET6_ESPINTCP is not set -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -# CONFIG_IPV6_ILA is not set -CONFIG_INET6_XFRM_TUNNEL=y -CONFIG_INET6_TUNNEL=y -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=y -# CONFIG_IPV6_SIT_6RD is not set -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=y -# CONFIG_IPV6_GRE is not set -CONFIG_IPV6_FOU=y -CONFIG_IPV6_FOU_TUNNEL=y -CONFIG_IPV6_MULTIPLE_TABLES=y -# CONFIG_IPV6_SUBTREES is not set -# CONFIG_IPV6_MROUTE is not set -# CONFIG_IPV6_SEG6_LWTUNNEL is not set -# CONFIG_IPV6_SEG6_HMAC is not set -# CONFIG_IPV6_RPL_LWTUNNEL is not set -# CONFIG_IPV6_IOAM6_LWTUNNEL is not set -# CONFIG_NETLABEL is not set -# CONFIG_MPTCP is not set -# CONFIG_NETWORK_SECMARK is not set -CONFIG_NET_PTP_CLASSIFY=y -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=y - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_EGRESS=y -CONFIG_NETFILTER_SKIP_EGRESS=y -CONFIG_NETFILTER_NETLINK=y -CONFIG_NETFILTER_FAMILY_BRIDGE=y -CONFIG_NETFILTER_FAMILY_ARP=y -CONFIG_NETFILTER_NETLINK_HOOK=m -CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=y -CONFIG_NETFILTER_NETLINK_LOG=y -CONFIG_NETFILTER_NETLINK_OSF=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_LOG_SYSLOG=y -CONFIG_NETFILTER_CONNCOUNT=y -CONFIG_NF_CONNTRACK_MARK=y -# CONFIG_NF_CONNTRACK_ZONES is not set -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=y -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y -# CONFIG_NF_CONNTRACK_AMANDA is not set -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_BROADCAST=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_SNMP=y -CONFIG_NF_CONNTRACK_PPTP=y -# CONFIG_NF_CONNTRACK_SANE is not set -CONFIG_NF_CONNTRACK_SIP=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -# CONFIG_NF_CT_NETLINK_TIMEOUT is not set -# CONFIG_NF_CT_NETLINK_HELPER is not set -CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=y -CONFIG_NF_NAT_FTP=y -CONFIG_NF_NAT_IRC=y -CONFIG_NF_NAT_SIP=y -CONFIG_NF_NAT_TFTP=y -CONFIG_NF_NAT_REDIRECT=y -CONFIG_NF_NAT_MASQUERADE=y -CONFIG_NETFILTER_SYNPROXY=y -CONFIG_NF_TABLES=y -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_NETDEV=y -CONFIG_NFT_NUMGEN=y -CONFIG_NFT_CT=y -CONFIG_NFT_FLOW_OFFLOAD=y -CONFIG_NFT_CONNLIMIT=y -CONFIG_NFT_LOG=y -CONFIG_NFT_LIMIT=y -CONFIG_NFT_MASQ=y -CONFIG_NFT_REDIR=y -CONFIG_NFT_NAT=y -CONFIG_NFT_TUNNEL=y -# CONFIG_NFT_OBJREF is not set -CONFIG_NFT_QUEUE=y -CONFIG_NFT_QUOTA=y -CONFIG_NFT_REJECT=y -CONFIG_NFT_REJECT_INET=y -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=y -CONFIG_NFT_FIB=y -CONFIG_NFT_FIB_INET=y -CONFIG_NFT_XFRM=m -CONFIG_NFT_SOCKET=y -CONFIG_NFT_OSF=y -CONFIG_NFT_TPROXY=y -CONFIG_NFT_SYNPROXY=y -CONFIG_NF_DUP_NETDEV=y -CONFIG_NFT_DUP_NETDEV=y -CONFIG_NFT_FWD_NETDEV=y -CONFIG_NFT_FIB_NETDEV=y -CONFIG_NFT_REJECT_NETDEV=y -CONFIG_NF_FLOW_TABLE_INET=y -CONFIG_NF_FLOW_TABLE=y -CONFIG_NF_FLOW_TABLE_PROCFS=y -CONFIG_NETFILTER_XTABLES=m -CONFIG_NETFILTER_XTABLES_COMPAT=y - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set -# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set -# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -# CONFIG_NETFILTER_XT_TARGET_CT is not set -# CONFIG_NETFILTER_XT_TARGET_DSCP is not set -CONFIG_NETFILTER_XT_TARGET_HL=m -# CONFIG_NETFILTER_XT_TARGET_HMARK is not set -# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set -CONFIG_NETFILTER_XT_TARGET_LED=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_NAT=m -CONFIG_NETFILTER_XT_TARGET_NETMAP=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_REDIRECT=m -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CGROUP=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ECN=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -CONFIG_NETFILTER_XT_MATCH_IPCOMP=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -# CONFIG_NETFILTER_XT_MATCH_IPVS is not set -CONFIG_NETFILTER_XT_MATCH_L2TP=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -# end of Core Netfilter Configuration - -CONFIG_IP_SET=y -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=y -CONFIG_IP_SET_BITMAP_IPMAC=y -CONFIG_IP_SET_BITMAP_PORT=y -CONFIG_IP_SET_HASH_IP=y -CONFIG_IP_SET_HASH_IPMARK=y -CONFIG_IP_SET_HASH_IPPORT=y -CONFIG_IP_SET_HASH_IPPORTIP=y -CONFIG_IP_SET_HASH_IPPORTNET=y -CONFIG_IP_SET_HASH_IPMAC=y -CONFIG_IP_SET_HASH_MAC=y -CONFIG_IP_SET_HASH_NETPORTNET=y -CONFIG_IP_SET_HASH_NET=y -CONFIG_IP_SET_HASH_NETNET=y -CONFIG_IP_SET_HASH_NETPORT=y -CONFIG_IP_SET_HASH_NETIFACE=y -CONFIG_IP_SET_LIST_SET=y -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_FO=m -CONFIG_IP_VS_OVF=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_MH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m -CONFIG_IP_VS_TWOS=m - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS MH scheduler -# -CONFIG_IP_VS_MH_TAB_INDEX=12 - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=y -CONFIG_NF_SOCKET_IPV4=y -CONFIG_NF_TPROXY_IPV4=y -CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_REJECT_IPV4=y -CONFIG_NFT_DUP_IPV4=y -CONFIG_NFT_FIB_IPV4=y -CONFIG_NF_TABLES_ARP=y -CONFIG_NF_DUP_IPV4=y -CONFIG_NF_LOG_ARP=y -CONFIG_NF_LOG_IPV4=y -CONFIG_NF_REJECT_IPV4=y -# CONFIG_NF_NAT_SNMP_BASIC is not set -CONFIG_NF_NAT_PPTP=y -CONFIG_NF_NAT_H323=y -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_SYNPROXY=m -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_IP_NF_MANGLE=m -# CONFIG_IP_NF_TARGET_CLUSTERIP is not set -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -# end of IP: Netfilter Configuration - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_SOCKET_IPV6=y -CONFIG_NF_TPROXY_IPV6=y -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_REJECT_IPV6=y -CONFIG_NFT_DUP_IPV6=y -CONFIG_NFT_FIB_IPV6=y -CONFIG_NF_DUP_IPV6=y -CONFIG_NF_REJECT_IPV6=y -CONFIG_NF_LOG_IPV6=y -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_MATCH_SRH=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_TARGET_SYNPROXY=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -CONFIG_IP6_NF_NAT=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m -# end of IPv6: Netfilter Configuration - -CONFIG_NF_DEFRAG_IPV6=y -CONFIG_NF_TABLES_BRIDGE=y -CONFIG_NFT_BRIDGE_META=y -CONFIG_NFT_BRIDGE_REJECT=y -CONFIG_NF_CONNTRACK_BRIDGE=y -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -CONFIG_BPFILTER=y -# CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=y -# CONFIG_SCTP_DBG_OBJCNT is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_INET_SCTP_DIAG=y -CONFIG_RDS=m -# CONFIG_RDS_TCP is not set -# CONFIG_RDS_DEBUG is not set -CONFIG_TIPC=y -CONFIG_TIPC_MEDIA_UDP=y -CONFIG_TIPC_CRYPTO=y -CONFIG_TIPC_DIAG=y -# CONFIG_ATM is not set -# CONFIG_L2TP is not set -CONFIG_STP=y -CONFIG_GARP=y -CONFIG_MRP=y -CONFIG_BRIDGE=y -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_BRIDGE_MRP=y -CONFIG_BRIDGE_CFM=y -# CONFIG_NET_DSA is not set -CONFIG_VLAN_8021Q=y -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_VLAN_8021Q_MVRP=y -CONFIG_LLC=y -# CONFIG_LLC2 is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -# CONFIG_6LOWPAN is not set -# CONFIG_IEEE802154 is not set -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -# CONFIG_NET_SCH_CBQ is not set -CONFIG_NET_SCH_HTB=y -CONFIG_NET_SCH_HFSC=y -# CONFIG_NET_SCH_PRIO is not set -# CONFIG_NET_SCH_MULTIQ is not set -# CONFIG_NET_SCH_RED is not set -# CONFIG_NET_SCH_SFB is not set -# CONFIG_NET_SCH_SFQ is not set -# CONFIG_NET_SCH_TEQL is not set -# CONFIG_NET_SCH_TBF is not set -# CONFIG_NET_SCH_CBS is not set -# CONFIG_NET_SCH_ETF is not set -# CONFIG_NET_SCH_TAPRIO is not set -# CONFIG_NET_SCH_GRED is not set -# CONFIG_NET_SCH_DSMARK is not set -# CONFIG_NET_SCH_NETEM is not set -# CONFIG_NET_SCH_DRR is not set -# CONFIG_NET_SCH_MQPRIO is not set -# CONFIG_NET_SCH_SKBPRIO is not set -# CONFIG_NET_SCH_CHOKE is not set -# CONFIG_NET_SCH_QFQ is not set -# CONFIG_NET_SCH_CODEL is not set -CONFIG_NET_SCH_FQ_CODEL=y -# CONFIG_NET_SCH_CAKE is not set -CONFIG_NET_SCH_FQ=y -# CONFIG_NET_SCH_HHF is not set -# CONFIG_NET_SCH_PIE is not set -# CONFIG_NET_SCH_INGRESS is not set -# CONFIG_NET_SCH_PLUG is not set -# CONFIG_NET_SCH_ETS is not set -# CONFIG_NET_SCH_DEFAULT is not set - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=y -# CONFIG_NET_CLS_TCINDEX is not set -CONFIG_NET_CLS_ROUTE4=y -CONFIG_NET_CLS_FW=y -# CONFIG_NET_CLS_U32 is not set -# CONFIG_NET_CLS_RSVP is not set -# CONFIG_NET_CLS_RSVP6 is not set -# CONFIG_NET_CLS_FLOW is not set -CONFIG_NET_CLS_CGROUP=y -# CONFIG_NET_CLS_BPF is not set -# CONFIG_NET_CLS_FLOWER is not set -CONFIG_NET_CLS_MATCHALL=y -# CONFIG_NET_EMATCH is not set -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=y -CONFIG_NET_ACT_GACT=y -# CONFIG_GACT_PROB is not set -# CONFIG_NET_ACT_MIRRED is not set -# CONFIG_NET_ACT_SAMPLE is not set -# CONFIG_NET_ACT_IPT is not set -# CONFIG_NET_ACT_NAT is not set -# CONFIG_NET_ACT_PEDIT is not set -# CONFIG_NET_ACT_SIMP is not set -# CONFIG_NET_ACT_SKBEDIT is not set -# CONFIG_NET_ACT_CSUM is not set -# CONFIG_NET_ACT_MPLS is not set -# CONFIG_NET_ACT_VLAN is not set -# CONFIG_NET_ACT_BPF is not set -# CONFIG_NET_ACT_CONNMARK is not set -# CONFIG_NET_ACT_CTINFO is not set -# CONFIG_NET_ACT_SKBMOD is not set -# CONFIG_NET_ACT_IFE is not set -# CONFIG_NET_ACT_TUNNEL_KEY is not set -# CONFIG_NET_ACT_CT is not set -# CONFIG_NET_ACT_GATE is not set -# CONFIG_NET_TC_SKB_EXT is not set -CONFIG_NET_SCH_FIFO=y -# CONFIG_DCB is not set -CONFIG_DNS_RESOLVER=y -CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_BATMAN_V=y -CONFIG_BATMAN_ADV_BLA=y -CONFIG_BATMAN_ADV_DAT=y -# CONFIG_BATMAN_ADV_NC is not set -CONFIG_BATMAN_ADV_MCAST=y -# CONFIG_BATMAN_ADV_DEBUG is not set -# CONFIG_BATMAN_ADV_TRACING is not set -# CONFIG_OPENVSWITCH is not set -# CONFIG_VSOCKETS is not set -CONFIG_NETLINK_DIAG=y -# CONFIG_MPLS is not set -# CONFIG_NET_NSH is not set -# CONFIG_HSR is not set -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_L3_MASTER_DEV=y -# CONFIG_QRTR is not set -CONFIG_NET_NCSI=y -CONFIG_NCSI_OEM_CMD_GET_MAC=y -CONFIG_NCSI_OEM_CMD_KEEP_PHY=y -CONFIG_PCPU_DEV_REFCNT=y -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_SOCK_RX_QUEUE_MAPPING=y -CONFIG_XPS=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -CONFIG_BPF_STREAM_PARSER=y -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_NET_DROP_MONITOR is not set -# end of Network testing -# end of Networking options - -CONFIG_HAMRADIO=y - -# -# Packet Radio protocols -# -CONFIG_AX25=m -CONFIG_AX25_DAMA_SLAVE=y -CONFIG_NETROM=m -CONFIG_ROSE=m - -# -# AX.25 network device drivers -# -CONFIG_MKISS=m -CONFIG_6PACK=m -CONFIG_BPQETHER=m -CONFIG_BAYCOM_SER_FDX=m -CONFIG_BAYCOM_SER_HDX=m -CONFIG_YAM=m -# end of AX.25 network device drivers - -# CONFIG_CAN is not set -CONFIG_BT=y -CONFIG_BT_BREDR=y -CONFIG_BT_RFCOMM=m -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=m -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_HIDP=m -# CONFIG_BT_HS is not set -CONFIG_BT_LE=y -CONFIG_BT_LEDS=y -# CONFIG_BT_MSFTEXT is not set -# CONFIG_BT_AOSPEXT is not set -CONFIG_BT_DEBUGFS=y -# CONFIG_BT_SELFTEST is not set -# CONFIG_BT_FEATURE_DEBUG is not set - -# -# Bluetooth device drivers -# -CONFIG_BT_INTEL=m -CONFIG_BT_BCM=m -CONFIG_BT_RTL=m -CONFIG_BT_MTK=m -CONFIG_BT_HCIBTUSB=m -# CONFIG_BT_HCIBTUSB_AUTOSUSPEND is not set -CONFIG_BT_HCIBTUSB_BCM=y -# CONFIG_BT_HCIBTUSB_MTK is not set -CONFIG_BT_HCIBTUSB_RTL=y -# CONFIG_BT_HCIBTUSB_RTLBTUSB is not set -CONFIG_BT_HCIBTSDIO=m -CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_SERDEV=y -CONFIG_BT_HCIUART_H4=y -# CONFIG_BT_HCIUART_NOKIA is not set -# CONFIG_BT_HCIUART_BCSP is not set -# CONFIG_BT_HCIUART_ATH3K is not set -CONFIG_BT_HCIUART_LL=y -CONFIG_BT_HCIUART_3WIRE=y -# CONFIG_BT_HCIUART_INTEL is not set -CONFIG_BT_HCIUART_BCM=y -CONFIG_BT_HCIUART_RTL=y -# CONFIG_BT_HCIUART_QCA is not set -# CONFIG_BT_HCIUART_AG6XX is not set -# CONFIG_BT_HCIUART_MRVL is not set -CONFIG_BT_HCIBCM203X=m -# CONFIG_BT_HCIBPA10X is not set -# CONFIG_BT_HCIBFUSB is not set -# CONFIG_BT_HCIVHCI is not set -# CONFIG_BT_MRVL is not set -# CONFIG_BT_ATH3K is not set -CONFIG_BT_MTKSDIO=m -CONFIG_BT_MTKUART=m -# CONFIG_BT_QCOMSMD is not set -# CONFIG_BT_VIRTIO is not set -# end of Bluetooth device drivers - -# CONFIG_AF_RXRPC is not set -# CONFIG_AF_KCM is not set -CONFIG_STREAM_PARSER=y -# CONFIG_MCTP is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -CONFIG_CFG80211=m -# CONFIG_NL80211_TESTMODE is not set -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -# CONFIG_CFG80211_CERTIFICATION_ONUS is not set -CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y -CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y -CONFIG_CFG80211_DEFAULT_PS=y -# CONFIG_CFG80211_DEBUGFS is not set -CONFIG_CFG80211_CRDA_SUPPORT=y -# CONFIG_CFG80211_WEXT is not set -CONFIG_MAC80211=m -CONFIG_MAC80211_HAS_RC=y -CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -CONFIG_MAC80211_MESH=y -CONFIG_MAC80211_LEDS=y -# CONFIG_MAC80211_DEBUGFS is not set -# CONFIG_MAC80211_MESSAGE_TRACING is not set -# CONFIG_MAC80211_DEBUG_MENU is not set -CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 -CONFIG_RFKILL=y -CONFIG_RFKILL_LEDS=y -CONFIG_RFKILL_INPUT=y -CONFIG_RFKILL_GPIO=y -# CONFIG_NET_9P is not set -CONFIG_CAIF=y -# CONFIG_CAIF_DEBUG is not set -CONFIG_CAIF_NETDEV=y -# CONFIG_CAIF_USB is not set -CONFIG_CEPH_LIB=m -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y -CONFIG_NFC=m -CONFIG_NFC_DIGITAL=m -CONFIG_NFC_NCI=m -CONFIG_NFC_NCI_SPI=m -CONFIG_NFC_NCI_UART=m -CONFIG_NFC_HCI=m -CONFIG_NFC_SHDLC=y - -# -# Near Field Communication (NFC) devices -# -CONFIG_NFC_TRF7970A=m -CONFIG_NFC_SIM=m -CONFIG_NFC_PORT100=m -CONFIG_NFC_VIRTUAL_NCI=m -CONFIG_NFC_FDP=m -CONFIG_NFC_FDP_I2C=m -CONFIG_NFC_PN544=m -CONFIG_NFC_PN544_I2C=m -CONFIG_NFC_PN533=m -CONFIG_NFC_PN533_USB=m -CONFIG_NFC_PN533_I2C=m -CONFIG_NFC_PN532_UART=m -CONFIG_NFC_MICROREAD=m -CONFIG_NFC_MICROREAD_I2C=m -CONFIG_NFC_MRVL=m -CONFIG_NFC_MRVL_USB=m -CONFIG_NFC_MRVL_UART=m -CONFIG_NFC_MRVL_I2C=m -CONFIG_NFC_MRVL_SPI=m -CONFIG_NFC_ST21NFCA=m -CONFIG_NFC_ST21NFCA_I2C=m -CONFIG_NFC_ST_NCI=m -CONFIG_NFC_ST_NCI_I2C=m -CONFIG_NFC_ST_NCI_SPI=m -CONFIG_NFC_NXP_NCI=m -CONFIG_NFC_NXP_NCI_I2C=m -CONFIG_NFC_S3FWRN5=m -CONFIG_NFC_S3FWRN5_I2C=m -CONFIG_NFC_S3FWRN82_UART=m -CONFIG_NFC_ST95HF=m -# end of Near Field Communication (NFC) devices - -# CONFIG_PSAMPLE is not set -# CONFIG_NET_IFE is not set -CONFIG_LWTUNNEL=y -CONFIG_LWTUNNEL_BPF=y -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -CONFIG_NET_SELFTESTS=y -CONFIG_NET_SOCK_MSG=y -CONFIG_PAGE_POOL=y -# CONFIG_PAGE_POOL_STATS is not set -CONFIG_FAILOVER=y -CONFIG_ETHTOOL_NETLINK=y - -# -# Device Drivers -# -CONFIG_HAVE_PCI=y -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -# CONFIG_PCIEAER_INJECT is not set -# CONFIG_PCIE_ECRC is not set -CONFIG_PCIEASPM=y -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -# CONFIG_PCIE_DPC is not set -# CONFIG_PCIE_PTM is not set -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_STUB is not set -CONFIG_PCI_ECAM=y -# CONFIG_PCI_IOV is not set -# CONFIG_PCI_PRI is not set -# CONFIG_PCI_PASID is not set -# CONFIG_PCIE_BUS_TUNE_OFF is not set -CONFIG_PCIE_BUS_DEFAULT=y -# CONFIG_PCIE_BUS_SAFE is not set -# CONFIG_PCIE_BUS_PERFORMANCE is not set -# CONFIG_PCIE_BUS_PEER2PEER is not set -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_CPCI=y -CONFIG_HOTPLUG_PCI_SHPC=y - -# -# PCI controller drivers -# -# CONFIG_PCI_AARDVARK is not set -# CONFIG_PCIE_XILINX_NWL is not set -# CONFIG_PCI_FTPCI100 is not set -# CONFIG_PCI_TEGRA is not set -# CONFIG_PCIE_RCAR_HOST is not set -CONFIG_PCI_HOST_COMMON=m -CONFIG_PCI_HOST_GENERIC=m -# CONFIG_PCIE_XILINX is not set -# CONFIG_PCIE_XILINX_CPM is not set -# CONFIG_PCI_XGENE is not set -# CONFIG_PCI_V3_SEMI is not set -# CONFIG_PCI_VERSATILE is not set -# CONFIG_PCIE_ALTERA is not set -# CONFIG_PCI_HOST_THUNDER_PEM is not set -# CONFIG_PCI_HOST_THUNDER_ECAM is not set -# CONFIG_PCIE_ROCKCHIP_HOST is not set -# CONFIG_PCIE_MEDIATEK is not set -# CONFIG_PCIE_MEDIATEK_GEN3 is not set -# CONFIG_PCIE_BRCMSTB is not set -# CONFIG_PCI_LOONGSON is not set -# CONFIG_PCIE_MICROCHIP_HOST is not set -# CONFIG_PCIE_APPLE is not set -# CONFIG_PCIE_MT7621 is not set - -# -# DesignWare PCI Core Support -# -CONFIG_PCIE_DW=y -CONFIG_PCIE_DW_HOST=y -CONFIG_PCI_K1X=y -CONFIG_PCI_K1X_HOST=y -CONFIG_PCIE_DW_PLAT=y -CONFIG_PCIE_DW_PLAT_HOST=y -# CONFIG_PCI_EXYNOS is not set -# CONFIG_PCI_IMX6 is not set -# CONFIG_PCIE_SPEAR13XX is not set -# CONFIG_PCI_KEYSTONE_HOST is not set -# CONFIG_PCI_LAYERSCAPE is not set -# CONFIG_PCI_HISI is not set -# CONFIG_PCIE_QCOM is not set -# CONFIG_PCIE_ARMADA_8K is not set -# CONFIG_PCIE_ARTPEC6_HOST is not set -# CONFIG_PCIE_ROCKCHIP_DW_HOST is not set -# CONFIG_PCIE_INTEL_GW is not set -# CONFIG_PCIE_KEEMBAY_HOST is not set -# CONFIG_PCIE_KIRIN is not set -# CONFIG_PCIE_HISI_STB is not set -# CONFIG_PCI_MESON is not set -# CONFIG_PCIE_TEGRA194_HOST is not set -# CONFIG_PCIE_VISCONTI_HOST is not set -# CONFIG_PCIE_UNIPHIER is not set -# CONFIG_PCIE_AL is not set -# CONFIG_PCIE_FU740 is not set -CONFIG_PCIE_SPACEMIT=y -# end of DesignWare PCI Core Support - -# -# Mobiveil PCIe Core Support -# -# CONFIG_PCIE_MOBIVEIL_PLAT is not set -# CONFIG_PCIE_LAYERSCAPE_GEN4 is not set -# end of Mobiveil PCIe Core Support - -# -# Cadence PCIe controllers support -# -# CONFIG_PCIE_CADENCE_PLAT_HOST is not set -# CONFIG_PCI_J721E_HOST is not set -# end of Cadence PCIe controllers support -# end of PCI controller drivers - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set -# end of PCI Endpoint - -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set -# end of PCI switch controller drivers - -# CONFIG_CXL_BUS is not set -# CONFIG_PCCARD is not set -# CONFIG_RAPIDIO is not set - -# -# Generic Driver Options -# -CONFIG_AUXILIARY_BUS=y -# CONFIG_UEVENT_HELPER is not set -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_DEVTMPFS_SAFE is not set -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y - -# -# Firmware loader -# -CONFIG_FW_LOADER=y -CONFIG_FW_LOADER_PAGED_BUF=y -CONFIG_EXTRA_FIRMWARE="esos.elf" -CONFIG_EXTRA_FIRMWARE_DIR="firmware" -# CONFIG_FW_LOADER_USER_HELPER is not set -CONFIG_FW_LOADER_COMPRESS=y -CONFIG_FW_LOADER_COMPRESS_XZ=y -CONFIG_FW_LOADER_COMPRESS_ZSTD=y -CONFIG_FW_CACHE=y -# CONFIG_FW_UPLOAD is not set -# end of Firmware loader - -CONFIG_WANT_DEV_COREDUMP=y -CONFIG_ALLOW_DEV_COREDUMP=y -CONFIG_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set -CONFIG_REGMAP=y -CONFIG_REGMAP_I2C=y -CONFIG_REGMAP_SPI=y -CONFIG_REGMAP_MMIO=y -CONFIG_REGMAP_IRQ=y -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_DMA_FENCE_TRACE is not set -CONFIG_GENERIC_ARCH_TOPOLOGY=y -# end of Generic Driver Options - -# -# Bus devices -# -# CONFIG_ARM_INTEGRATOR_LM is not set -# CONFIG_BT1_APB is not set -# CONFIG_BT1_AXI is not set -# CONFIG_MOXTET is not set -# CONFIG_HISILICON_LPC is not set -# CONFIG_INTEL_IXP4XX_EB is not set -# CONFIG_QCOM_EBI2 is not set -# CONFIG_MHI_BUS is not set -# CONFIG_MHI_BUS_EP is not set -# end of Bus devices - -# CONFIG_CONNECTOR is not set - -# -# Firmware Drivers -# - -# -# ARM System Control and Management Interface Protocol -# -CONFIG_ARM_SCMI_PROTOCOL=y -CONFIG_ARM_SCMI_HAVE_TRANSPORT=y -CONFIG_ARM_SCMI_HAVE_SHMEM=y -CONFIG_ARM_SCMI_TRANSPORT_MAILBOX=y -# CONFIG_ARM_SCMI_TRANSPORT_VIRTIO is not set -# CONFIG_ARM_SCMI_POWER_DOMAIN is not set -# CONFIG_ARM_SCMI_POWER_CONTROL is not set -# end of ARM System Control and Management Interface Protocol - -# CONFIG_ARM_SCPI_PROTOCOL is not set -CONFIG_ARM_SCPI_POWER_DOMAIN=y -CONFIG_FIRMWARE_MEMMAP=y -# CONFIG_SYSFB_SIMPLEFB is not set -# CONFIG_TURRIS_MOX_RWTM is not set -# CONFIG_BCM47XX_NVRAM is not set -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_ESRT=y -CONFIG_EFI_PARAMS_FROM_FDT=y -CONFIG_EFI_RUNTIME_WRAPPERS=y -CONFIG_EFI_GENERIC_STUB=y -# CONFIG_EFI_ZBOOT is not set -# CONFIG_EFI_BOOTLOADER_CONTROL is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -# CONFIG_RESET_ATTACK_MITIGATION is not set -# CONFIG_EFI_RCI2_TABLE is not set -# CONFIG_EFI_DISABLE_PCI_DMA is not set -CONFIG_EFI_EARLYCON=y -# CONFIG_EFI_DISABLE_RUNTIME is not set -# CONFIG_EFI_COCO_SECRET is not set -# end of EFI (Extensible Firmware Interface) Support - -# -# Tegra firmware driver -# -# end of Tegra firmware driver -# end of Firmware Drivers - -# CONFIG_GNSS is not set -CONFIG_MTD=y -# CONFIG_MTD_TESTS is not set - -# -# Partition parsers -# -# CONFIG_MTD_AR7_PARTS is not set -# CONFIG_MTD_BCM63XX_PARTS is not set -# CONFIG_MTD_BRCM_U_BOOT is not set -# CONFIG_MTD_CMDLINE_PARTS is not set -CONFIG_MTD_OF_PARTS=y -# CONFIG_MTD_OF_PARTS_BCM4908 is not set -# CONFIG_MTD_OF_PARTS_LINKSYS_NS is not set -# CONFIG_MTD_PARSER_IMAGETAG is not set -# CONFIG_MTD_PARSER_TRX is not set -# CONFIG_MTD_SHARPSL_PARTS is not set -# CONFIG_MTD_REDBOOT_PARTS is not set -# end of Partition parsers - -# -# User Modules And Translation Layers -# -CONFIG_MTD_BLKDEVS=y -CONFIG_MTD_BLOCK=y - -# -# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. -# -# CONFIG_FTL is not set -# CONFIG_NFTL is not set -# CONFIG_INFTL is not set -# CONFIG_RFD_FTL is not set -# CONFIG_SSFDC is not set -# CONFIG_SM_FTL is not set -# CONFIG_MTD_OOPS is not set -# CONFIG_MTD_SWAP is not set -# CONFIG_MTD_PARTITIONED_MASTER is not set - -# -# RAM/ROM/Flash chip drivers -# -# CONFIG_MTD_CFI is not set -# CONFIG_MTD_JEDECPROBE is not set -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y -# CONFIG_MTD_RAM is not set -# CONFIG_MTD_ROM is not set -# CONFIG_MTD_ABSENT is not set -# end of RAM/ROM/Flash chip drivers - -# -# Mapping drivers for chip access -# -# CONFIG_MTD_COMPLEX_MAPPINGS is not set -# CONFIG_MTD_TS5500 is not set -# CONFIG_MTD_INTEL_VR_NOR is not set -# CONFIG_MTD_PLATRAM is not set -# end of Mapping drivers for chip access - -# -# Self-contained MTD device drivers -# -# CONFIG_MTD_PMC551 is not set -# CONFIG_MTD_DATAFLASH is not set -# CONFIG_MTD_MCHP23K256 is not set -# CONFIG_MTD_MCHP48L640 is not set -CONFIG_MTD_SPEAR_SMI=y -# CONFIG_MTD_SST25L is not set -# CONFIG_MTD_SLRAM is not set -# CONFIG_MTD_PHRAM is not set -# CONFIG_MTD_MTDRAM is not set -# CONFIG_MTD_BLOCK2MTD is not set - -# -# Disk-On-Chip Device Drivers -# -# CONFIG_MTD_DOCG3 is not set -# end of Self-contained MTD device drivers - -# -# NAND -# -CONFIG_MTD_NAND_CORE=y -# CONFIG_MTD_ONENAND is not set -# CONFIG_MTD_RAW_NAND is not set -CONFIG_MTD_SPI_NAND=y - -# -# ECC engine support -# -CONFIG_MTD_NAND_ECC=y -# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set -# CONFIG_MTD_NAND_ECC_SW_BCH is not set -# CONFIG_MTD_NAND_ECC_MXIC is not set -# CONFIG_MTD_NAND_ECC_MEDIATEK is not set -# end of ECC engine support -# end of NAND - -# -# LPDDR & LPDDR2 PCM memory drivers -# -# CONFIG_MTD_LPDDR is not set -# end of LPDDR & LPDDR2 PCM memory drivers - -CONFIG_MTD_SPI_NOR=y -CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y -# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set -CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y -# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set -# CONFIG_SPI_HISI_SFC is not set -# CONFIG_SPI_NXP_SPIFI is not set -# CONFIG_MTD_UBI is not set -# CONFIG_MTD_HYPERBUS is not set -CONFIG_DTC=y -CONFIG_OF=y -# CONFIG_OF_UNITTEST is not set -# CONFIG_OF_ALL_DTBS is not set -CONFIG_OF_FLATTREE=y -CONFIG_OF_EARLY_FLATTREE=y -CONFIG_OF_KOBJ=y -CONFIG_OF_DYNAMIC=y -CONFIG_OF_ADDRESS=y -CONFIG_OF_IRQ=y -CONFIG_OF_RESERVED_MEM=y -CONFIG_OF_RESOLVE=y -CONFIG_OF_OVERLAY=y -# CONFIG_PARPORT is not set -CONFIG_BLK_DEV=y -# CONFIG_BLK_DEV_NULL_BLK is not set -CONFIG_CDROM=m -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -CONFIG_ZRAM=m -CONFIG_ZRAM_DEF_COMP_LZORLE=y -# CONFIG_ZRAM_DEF_COMP_ZSTD is not set -# CONFIG_ZRAM_DEF_COMP_LZ4 is not set -# CONFIG_ZRAM_DEF_COMP_LZO is not set -# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set -# CONFIG_ZRAM_DEF_COMP_842 is not set -CONFIG_ZRAM_DEF_COMP="lzo-rle" -# CONFIG_ZRAM_WRITEBACK is not set -CONFIG_ZRAM_MEMORY_TRACKING=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -# CONFIG_BLK_DEV_DRBD is not set -# CONFIG_BLK_DEV_NBD is not set -# CONFIG_BLK_DEV_RAM is not set -# CONFIG_CDROM_PKTCDVD is not set -# CONFIG_ATA_OVER_ETH is not set -CONFIG_VIRTIO_BLK=y -# CONFIG_BLK_DEV_RBD is not set -# CONFIG_BLK_DEV_UBLK is not set - -# -# NVME Support -# -CONFIG_NVME_CORE=m -CONFIG_BLK_DEV_NVME=m -CONFIG_NVME_MULTIPATH=y -# CONFIG_NVME_VERBOSE_ERRORS is not set -CONFIG_NVME_HWMON=y -CONFIG_NVME_FABRICS=m -# CONFIG_NVME_FC is not set -# CONFIG_NVME_TCP is not set -# CONFIG_NVME_AUTH is not set -CONFIG_NVME_TARGET=m -CONFIG_NVME_TARGET_PASSTHRU=y -CONFIG_NVME_TARGET_LOOP=m -# CONFIG_NVME_TARGET_FC is not set -# CONFIG_NVME_TARGET_TCP is not set -# CONFIG_NVME_TARGET_AUTH is not set -# end of NVME Support - -# -# Misc devices -# -# CONFIG_AD525X_DPOT is not set -CONFIG_DUMMY_IRQ=m -# CONFIG_PHANTOM is not set -# CONFIG_TIFM_CORE is not set -# CONFIG_ICS932S401 is not set -# CONFIG_ATMEL_SSC is not set -CONFIG_ENCLOSURE_SERVICES=m -# CONFIG_GEHC_ACHC is not set -# CONFIG_HP_ILO is not set -# CONFIG_QCOM_COINCELL is not set -# CONFIG_QCOM_FASTRPC is not set -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -# CONFIG_PCH_PHUB is not set -# CONFIG_LATTICE_ECP3_CONFIG is not set -CONFIG_SRAM=y -# CONFIG_DW_XDATA_PCIE is not set -# CONFIG_PCI_ENDPOINT_TEST is not set -# CONFIG_XILINX_SDFEC is not set -CONFIG_MISC_RTSX=m -# CONFIG_HISI_HIKEY_USB is not set -# CONFIG_OPEN_DICE is not set -# CONFIG_VCPU_STALL_DETECTOR is not set -CONFIG_SPACEMIT_TCM=y -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -CONFIG_EEPROM_AT24=y -# CONFIG_EEPROM_AT25 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_EEPROM_MAX6875 is not set -# CONFIG_EEPROM_93CX6 is not set -# CONFIG_EEPROM_93XX46 is not set -# CONFIG_EEPROM_IDT_89HPESX is not set -# CONFIG_EEPROM_EE1004 is not set -# end of EEPROM support - -# CONFIG_CB710_CORE is not set - -# -# Texas Instruments shared transport line discipline -# -# CONFIG_TI_ST is not set -# end of Texas Instruments shared transport line discipline - -# CONFIG_SENSORS_LIS3_SPI is not set -# CONFIG_SENSORS_LIS3_I2C is not set -# CONFIG_ALTERA_STAPL is not set -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_BCM_VK is not set -# CONFIG_MISC_ALCOR_PCI is not set -CONFIG_MISC_RTSX_PCI=m -CONFIG_MISC_RTSX_USB=m -# CONFIG_HABANA_AI is not set -# CONFIG_PVPANIC is not set -# CONFIG_GP_PCI1XXXX is not set -# end of Misc devices - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=m -CONFIG_SCSI_COMMON=y -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_BLK_DEV_SR=m -CONFIG_CHR_DEV_SG=m -# CONFIG_BLK_DEV_BSG is not set -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m -# CONFIG_SCSI_CONSTANTS is not set -# CONFIG_SCSI_LOGGING is not set -CONFIG_SCSI_SCAN_ASYNC=y - -# -# SCSI Transports -# -# CONFIG_SCSI_SPI_ATTRS is not set -# CONFIG_SCSI_FC_ATTRS is not set -CONFIG_SCSI_ISCSI_ATTRS=m -# CONFIG_SCSI_SAS_ATTRS is not set -# CONFIG_SCSI_SAS_LIBSAS is not set -# CONFIG_SCSI_SRP_ATTRS is not set -# end of SCSI Transports - -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -# CONFIG_SCSI_CXGB3_ISCSI is not set -# CONFIG_SCSI_CXGB4_ISCSI is not set -# CONFIG_SCSI_BNX2_ISCSI is not set -# CONFIG_BE2ISCSI is not set -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -# CONFIG_SCSI_HPSA is not set -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set -# CONFIG_SCSI_AACRAID is not set -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set -# CONFIG_SCSI_HISI_SAS is not set -# CONFIG_SCSI_MVSAS is not set -# CONFIG_SCSI_MVUMI is not set -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_ARCMSR is not set -# CONFIG_SCSI_ESAS2R is not set -# CONFIG_MEGARAID_NEWGEN is not set -# CONFIG_MEGARAID_LEGACY is not set -# CONFIG_MEGARAID_SAS is not set -# CONFIG_SCSI_MPT3SAS is not set -# CONFIG_SCSI_MPT2SAS is not set -# CONFIG_SCSI_MPI3MR is not set -# CONFIG_SCSI_SMARTPQI is not set -# CONFIG_SCSI_HPTIOP is not set -# CONFIG_SCSI_BUSLOGIC is not set -# CONFIG_SCSI_MYRB is not set -# CONFIG_SCSI_MYRS is not set -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FDOMAIN_PCI is not set -# CONFIG_SCSI_IPS is not set -# CONFIG_SCSI_INITIO is not set -# CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_STEX is not set -# CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_IPR is not set -# CONFIG_SCSI_QLOGIC_1280 is not set -# CONFIG_SCSI_QLA_ISCSI is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -# CONFIG_SCSI_DEBUG is not set -# CONFIG_SCSI_PMCRAID is not set -# CONFIG_SCSI_PM8001 is not set -# CONFIG_SCSI_VIRTIO is not set -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=m -CONFIG_SCSI_DH_HP_SW=m -CONFIG_SCSI_DH_EMC=m -CONFIG_SCSI_DH_ALUA=m -# end of SCSI device support - -CONFIG_ATA=y -CONFIG_SATA_HOST=y -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_FORCE=y -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=y -CONFIG_SATA_MOBILE_LPM_POLICY=0 -# CONFIG_SATA_AHCI_PLATFORM is not set -# CONFIG_AHCI_BRCM is not set -# CONFIG_AHCI_DA850 is not set -# CONFIG_AHCI_DM816 is not set -# CONFIG_AHCI_DWC is not set -# CONFIG_AHCI_ST is not set -# CONFIG_AHCI_IMX is not set -# CONFIG_AHCI_CEVA is not set -# CONFIG_AHCI_MTK is not set -# CONFIG_AHCI_MVEBU is not set -# CONFIG_AHCI_SUNXI is not set -# CONFIG_AHCI_TEGRA is not set -# CONFIG_AHCI_XGENE is not set -# CONFIG_AHCI_QORIQ is not set -# CONFIG_SATA_FSL is not set -# CONFIG_SATA_GEMINI is not set -# CONFIG_SATA_AHCI_SEATTLE is not set -# CONFIG_SATA_INIC162X is not set -# CONFIG_SATA_ACARD_AHCI is not set -# CONFIG_SATA_SIL24 is not set -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -# CONFIG_PDC_ADMA is not set -# CONFIG_SATA_QSTOR is not set -# CONFIG_SATA_SX4 is not set -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -# CONFIG_ATA_PIIX is not set -# CONFIG_SATA_DWC is not set -# CONFIG_SATA_HIGHBANK is not set -# CONFIG_SATA_MV is not set -# CONFIG_SATA_NV is not set -# CONFIG_SATA_PROMISE is not set -# CONFIG_SATA_RCAR is not set -# CONFIG_SATA_SIL is not set -# CONFIG_SATA_SIS is not set -# CONFIG_SATA_SVW is not set -# CONFIG_SATA_ULI is not set -# CONFIG_SATA_VIA is not set -# CONFIG_SATA_VITESSE is not set - -# -# PATA SFF controllers with BMDMA -# -# CONFIG_PATA_ALI is not set -# CONFIG_PATA_AMD is not set -# CONFIG_PATA_ARASAN_CF is not set -# CONFIG_PATA_ARTOP is not set -# CONFIG_PATA_ATIIXP is not set -# CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_BK3710 is not set -# CONFIG_PATA_CMD64X is not set -# CONFIG_PATA_CS5520 is not set -# CONFIG_PATA_CS5530 is not set -# CONFIG_PATA_CS5536 is not set -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -# CONFIG_PATA_HPT366 is not set -# CONFIG_PATA_HPT37X is not set -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -# CONFIG_PATA_IMX is not set -# CONFIG_PATA_IT8213 is not set -# CONFIG_PATA_IT821X is not set -# CONFIG_PATA_JMICRON is not set -# CONFIG_PATA_MARVELL is not set -# CONFIG_PATA_NETCELL is not set -# CONFIG_PATA_NINJA32 is not set -# CONFIG_PATA_NS87415 is not set -# CONFIG_PATA_OLDPIIX is not set -# CONFIG_PATA_OPTIDMA is not set -# CONFIG_PATA_PDC2027X is not set -# CONFIG_PATA_PDC_OLD is not set -# CONFIG_PATA_RADISYS is not set -# CONFIG_PATA_RDC is not set -# CONFIG_PATA_SC1200 is not set -# CONFIG_PATA_SCH is not set -# CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_SIL680 is not set -# CONFIG_PATA_SIS is not set -# CONFIG_PATA_TOSHIBA is not set -# CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_VIA is not set -# CONFIG_PATA_PXA is not set -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_IXP4XX_CF is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_OF_PLATFORM is not set -# CONFIG_PATA_RZ1000 is not set -# CONFIG_PATA_SAMSUNG_CF is not set - -# -# Generic fallback / legacy drivers -# -# CONFIG_ATA_GENERIC is not set -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_LINEAR=y -CONFIG_MD_RAID0=y -CONFIG_MD_RAID1=y -CONFIG_MD_RAID10=y -CONFIG_MD_RAID456=y -# CONFIG_MD_MULTIPATH is not set -# CONFIG_MD_FAULTY is not set -CONFIG_BCACHE=y -# CONFIG_BCACHE_DEBUG is not set -# CONFIG_BCACHE_CLOSURES_DEBUG is not set -# CONFIG_BCACHE_ASYNC_REGISTRATION is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=y -# CONFIG_DM_DEBUG is not set -CONFIG_DM_BUFIO=y -# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set -CONFIG_DM_BIO_PRISON=y -CONFIG_DM_PERSISTENT_DATA=y -CONFIG_DM_UNSTRIPED=y -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=y -CONFIG_DM_THIN_PROVISIONING=y -CONFIG_DM_CACHE=y -CONFIG_DM_CACHE_SMQ=y -CONFIG_DM_WRITECACHE=y -# CONFIG_DM_EBS is not set -# CONFIG_DM_ERA is not set -# CONFIG_DM_CLONE is not set -CONFIG_DM_MIRROR=y -# CONFIG_DM_LOG_USERSPACE is not set -CONFIG_DM_RAID=y -CONFIG_DM_ZERO=y -CONFIG_DM_MULTIPATH=y -CONFIG_DM_MULTIPATH_QL=y -CONFIG_DM_MULTIPATH_ST=y -CONFIG_DM_MULTIPATH_HST=y -CONFIG_DM_MULTIPATH_IOA=y -CONFIG_DM_DELAY=y -CONFIG_DM_DUST=y -CONFIG_DM_INIT=y -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=y -CONFIG_DM_VERITY=y -CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y -CONFIG_DM_VERITY_FEC=y -CONFIG_DM_SWITCH=y -CONFIG_DM_LOG_WRITES=y -CONFIG_DM_INTEGRITY=y -CONFIG_DM_AUDIT=y -# CONFIG_TARGET_CORE is not set -# CONFIG_FUSION is not set - -# -# IEEE 1394 (FireWire) support -# -# CONFIG_FIREWIRE is not set -# CONFIG_FIREWIRE_NOSY is not set -# end of IEEE 1394 (FireWire) support - -CONFIG_NETDEVICES=y -CONFIG_MII=y -CONFIG_NET_CORE=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_WIREGUARD=m -# CONFIG_WIREGUARD_DEBUG is not set -# CONFIG_EQUALIZER is not set -# CONFIG_NET_FC is not set -# CONFIG_IFB is not set -# CONFIG_NET_TEAM is not set -CONFIG_MACVLAN=y -# CONFIG_MACVTAP is not set -CONFIG_IPVLAN_L3S=y -CONFIG_IPVLAN=y -# CONFIG_IPVTAP is not set -CONFIG_VXLAN=y -# CONFIG_GENEVE is not set -# CONFIG_BAREUDP is not set -# CONFIG_GTP is not set -CONFIG_AMT=m -CONFIG_MACSEC=m -# CONFIG_NETCONSOLE is not set -# CONFIG_TUN is not set -# CONFIG_TUN_VNET_CROSS_LE is not set -CONFIG_VETH=y -CONFIG_VIRTIO_NET=y -# CONFIG_NLMON is not set -# CONFIG_NET_VRF is not set -# CONFIG_ARCNET is not set -# CONFIG_CAIF_DRIVERS is not set -CONFIG_ETHERNET=y -CONFIG_NET_VENDOR_3COM=y -# CONFIG_VORTEX is not set -# CONFIG_TYPHOON is not set -# CONFIG_NET_VENDOR_ACTIONS is not set -CONFIG_NET_VENDOR_ADAPTEC=y -# CONFIG_ADAPTEC_STARFIRE is not set -CONFIG_NET_VENDOR_AGERE=y -# CONFIG_ET131X is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -CONFIG_NET_VENDOR_ALTEON=y -# CONFIG_ACENIC is not set -# CONFIG_ALTERA_TSE is not set -# CONFIG_NET_VENDOR_AMAZON is not set -CONFIG_NET_VENDOR_AMD=y -# CONFIG_AMD8111_ETH is not set -# CONFIG_PCNET32 is not set -# CONFIG_AMD_XGBE is not set -# CONFIG_NET_XGENE is not set -# CONFIG_NET_XGENE_V2 is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_ASIX is not set -CONFIG_NET_VENDOR_ATHEROS=y -# CONFIG_ATL2 is not set -# CONFIG_ATL1 is not set -# CONFIG_ATL1E is not set -# CONFIG_ATL1C is not set -# CONFIG_ALX is not set -# CONFIG_CX_ECAT is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_CALXEDA_XGMAC is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -CONFIG_NET_VENDOR_CHELSIO=y -# CONFIG_CHELSIO_T1 is not set -# CONFIG_CHELSIO_T3 is not set -# CONFIG_CHELSIO_T4 is not set -# CONFIG_CHELSIO_T4VF is not set -CONFIG_NET_VENDOR_CIRRUS=y -# CONFIG_CS89x0_PLATFORM is not set -# CONFIG_EP93XX_ETH is not set -CONFIG_NET_VENDOR_CISCO=y -# CONFIG_ENIC is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_DAVICOM is not set -# CONFIG_DNET is not set -CONFIG_NET_VENDOR_DEC=y -# CONFIG_NET_TULIP is not set -CONFIG_NET_VENDOR_DLINK=y -# CONFIG_DL2K is not set -# CONFIG_SUNDANCE is not set -CONFIG_NET_VENDOR_EMULEX=y -# CONFIG_BE2NET is not set -# CONFIG_NET_VENDOR_ENGLEDER is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -CONFIG_NET_VENDOR_FARADAY=y -CONFIG_NET_VENDOR_FREESCALE=y -# CONFIG_FEC is not set -# CONFIG_FSL_FMAN is not set -# CONFIG_FSL_PQ_MDIO is not set -# CONFIG_FSL_XGMAC_MDIO is not set -# CONFIG_GIANFAR is not set -# CONFIG_FSL_DPAA2_SWITCH is not set -# CONFIG_FSL_ENETC is not set -# CONFIG_FSL_ENETC_VF is not set -# CONFIG_FSL_ENETC_IERB is not set -# CONFIG_FSL_ENETC_MDIO is not set -# CONFIG_NET_VENDOR_FUNGIBLE is not set -# CONFIG_NET_VENDOR_GOOGLE is not set -CONFIG_NET_VENDOR_HISILICON=y -# CONFIG_HIX5HD2_GMAC is not set -# CONFIG_HISI_FEMAC is not set -# CONFIG_HIP04_ETH is not set -# CONFIG_HNS_DSAF is not set -# CONFIG_HNS_ENET is not set -# CONFIG_HNS3 is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_WANGXUN is not set -# CONFIG_JME is not set -# CONFIG_KORINA is not set -CONFIG_NET_VENDOR_ADI=y -# CONFIG_ADIN1110 is not set -# CONFIG_NET_VENDOR_LITEX is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MEDIATEK is not set -CONFIG_NET_VENDOR_MELLANOX=y -# CONFIG_MLX4_EN is not set -# CONFIG_MLX5_CORE is not set -# CONFIG_MLXSW_CORE is not set -# CONFIG_MLXFW is not set -# CONFIG_MLXBF_GIGE is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROCHIP is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_MICROSOFT is not set -CONFIG_NET_VENDOR_MYRI=y -# CONFIG_MYRI10GE is not set -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -CONFIG_NET_VENDOR_NETERION=y -# CONFIG_S2IO is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -CONFIG_NET_VENDOR_NVIDIA=y -# CONFIG_FORCEDETH is not set -# CONFIG_LPC_ENET is not set -CONFIG_NET_VENDOR_OKI=y -# CONFIG_PCH_GBE is not set -# CONFIG_ETHOC is not set -CONFIG_NET_VENDOR_PACKET_ENGINES=y -# CONFIG_HAMACHI is not set -# CONFIG_YELLOWFIN is not set -# CONFIG_NET_VENDOR_PENSANDO is not set -CONFIG_NET_VENDOR_QLOGIC=y -# CONFIG_QLA3XXX is not set -# CONFIG_QLCNIC is not set -# CONFIG_NETXEN_NIC is not set -# CONFIG_QED is not set -CONFIG_NET_VENDOR_BROCADE=y -# CONFIG_BNA is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -CONFIG_NET_VENDOR_RDC=y -# CONFIG_R6040 is not set -CONFIG_NET_VENDOR_REALTEK=y -# CONFIG_8139CP is not set -# CONFIG_8139TOO is not set -# CONFIG_R8169 is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -CONFIG_NET_VENDOR_SILAN=y -# CONFIG_SC92031 is not set -CONFIG_NET_VENDOR_SIS=y -# CONFIG_SIS900 is not set -# CONFIG_SIS190 is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -CONFIG_NET_VENDOR_SPACEMIT=y -CONFIG_K1X_EMAC=y -# CONFIG_NET_VENDOR_STMICRO is not set -CONFIG_NET_VENDOR_SUN=y -# CONFIG_HAPPYMEAL is not set -# CONFIG_SUNGEM is not set -# CONFIG_CASSINI is not set -# CONFIG_NIU is not set -CONFIG_NET_VENDOR_SUNPLUS=y -# CONFIG_SP7021_EMAC is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -CONFIG_NET_VENDOR_TEHUTI=y -# CONFIG_TEHUTI is not set -CONFIG_NET_VENDOR_TI=y -# CONFIG_TI_DAVINCI_EMAC is not set -# CONFIG_TI_DAVINCI_MDIO is not set -# CONFIG_TI_CPSW_PHY_SEL is not set -# CONFIG_TI_CPSW is not set -# CONFIG_TI_CPSW_SWITCHDEV is not set -# CONFIG_TI_CPTS is not set -# CONFIG_TLAN is not set -# CONFIG_NET_VENDOR_VERTEXCOM is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set -# CONFIG_NET_VENDOR_XILINX is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -CONFIG_PHYLIB=y -CONFIG_SWPHY=y -# CONFIG_LED_TRIGGER_PHY is not set -CONFIG_FIXED_PHY=y - -# -# MII PHY device drivers -# -# CONFIG_AMD_PHY is not set -# CONFIG_MESON_GXL_PHY is not set -# CONFIG_ADIN_PHY is not set -# CONFIG_ADIN1100_PHY is not set -# CONFIG_AQUANTIA_PHY is not set -# CONFIG_AX88796B_PHY is not set -# CONFIG_BROADCOM_PHY is not set -# CONFIG_BCM54140_PHY is not set -# CONFIG_BCM63XX_PHY is not set -# CONFIG_BCM7XXX_PHY is not set -# CONFIG_BCM84881_PHY is not set -# CONFIG_BCM87XX_PHY is not set -# CONFIG_CICADA_PHY is not set -# CONFIG_CORTINA_PHY is not set -# CONFIG_DAVICOM_PHY is not set -# CONFIG_ICPLUS_PHY is not set -# CONFIG_LXT_PHY is not set -# CONFIG_INTEL_XWAY_PHY is not set -# CONFIG_LSI_ET1011C_PHY is not set -# CONFIG_MARVELL_PHY is not set -# CONFIG_MARVELL_10G_PHY is not set -# CONFIG_MARVELL_88X2222_PHY is not set -# CONFIG_MAXLINEAR_GPHY is not set -# CONFIG_MEDIATEK_GE_PHY is not set -# CONFIG_MICREL_PHY is not set -# CONFIG_MICROCHIP_PHY is not set -# CONFIG_MICROCHIP_T1_PHY is not set -# CONFIG_MICROSEMI_PHY is not set -# CONFIG_MOTORCOMM_PHY is not set -# CONFIG_NATIONAL_PHY is not set -# CONFIG_NXP_C45_TJA11XX_PHY is not set -# CONFIG_NXP_TJA11XX_PHY is not set -# CONFIG_AT803X_PHY is not set -# CONFIG_QSEMI_PHY is not set -CONFIG_REALTEK_PHY=y -# CONFIG_RENESAS_PHY is not set -# CONFIG_ROCKCHIP_PHY is not set -# CONFIG_SMSC_PHY is not set -# CONFIG_STE10XP is not set -# CONFIG_TERANETICS_PHY is not set -# CONFIG_DP83822_PHY is not set -# CONFIG_DP83TC811_PHY is not set -# CONFIG_DP83848_PHY is not set -# CONFIG_DP83867_PHY is not set -# CONFIG_DP83869_PHY is not set -# CONFIG_DP83TD510_PHY is not set -# CONFIG_VITESSE_PHY is not set -# CONFIG_XILINX_GMII2RGMII is not set -# CONFIG_MICREL_KS8995MA is not set -# CONFIG_PSE_CONTROLLER is not set -CONFIG_MDIO_DEVICE=y -CONFIG_MDIO_BUS=y -CONFIG_FWNODE_MDIO=y -CONFIG_OF_MDIO=y -CONFIG_MDIO_DEVRES=y -# CONFIG_MDIO_SUN4I is not set -# CONFIG_MDIO_XGENE is not set -# CONFIG_MDIO_ASPEED is not set -# CONFIG_MDIO_BITBANG is not set -# CONFIG_MDIO_BCM_IPROC is not set -# CONFIG_MDIO_BCM_UNIMAC is not set -# CONFIG_MDIO_HISI_FEMAC is not set -# CONFIG_MDIO_MVUSB is not set -# CONFIG_MDIO_MSCC_MIIM is not set -# CONFIG_MDIO_MOXART is not set -# CONFIG_MDIO_OCTEON is not set -# CONFIG_MDIO_IPQ4019 is not set -# CONFIG_MDIO_IPQ8064 is not set -# CONFIG_MDIO_THUNDER is not set - -# -# MDIO Multiplexers -# -# CONFIG_MDIO_BUS_MUX_MESON_G12A is not set -# CONFIG_MDIO_BUS_MUX_BCM6368 is not set -# CONFIG_MDIO_BUS_MUX_BCM_IPROC is not set -# CONFIG_MDIO_BUS_MUX_GPIO is not set -# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set -# CONFIG_MDIO_BUS_MUX_MMIOREG is not set - -# -# PCS device drivers -# -# CONFIG_PCS_RZN1_MIIC is not set -# end of PCS device drivers - -# CONFIG_PPP is not set -# CONFIG_SLIP is not set -CONFIG_USB_NET_DRIVERS=y -# CONFIG_USB_CATC is not set -# CONFIG_USB_KAWETH is not set -# CONFIG_USB_PEGASUS is not set -# CONFIG_USB_RTL8150 is not set -CONFIG_USB_RTL8152=m -# CONFIG_USB_LAN78XX is not set -CONFIG_USB_USBNET=y -# CONFIG_USB_NET_AX8817X is not set -CONFIG_USB_NET_AX88179_178A=y -CONFIG_USB_NET_CDCETHER=y -# CONFIG_USB_NET_CDC_EEM is not set -CONFIG_USB_NET_CDC_NCM=y -# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set -# CONFIG_USB_NET_CDC_MBIM is not set -# CONFIG_USB_NET_DM9601 is not set -# CONFIG_USB_NET_SR9700 is not set -# CONFIG_USB_NET_SR9800 is not set -# CONFIG_USB_NET_SMSC75XX is not set -# CONFIG_USB_NET_SMSC95XX is not set -# CONFIG_USB_NET_GL620A is not set -# CONFIG_USB_NET_NET1080 is not set -# CONFIG_USB_NET_PLUSB is not set -# CONFIG_USB_NET_MCS7830 is not set -# CONFIG_USB_NET_RNDIS_HOST is not set -CONFIG_USB_NET_CDC_SUBSET_ENABLE=y -CONFIG_USB_NET_CDC_SUBSET=y -# CONFIG_USB_ALI_M5632 is not set -# CONFIG_USB_AN2720 is not set -CONFIG_USB_BELKIN=y -CONFIG_USB_ARMLINUX=y -# CONFIG_USB_EPSON2888 is not set -# CONFIG_USB_KC2190 is not set -CONFIG_USB_NET_ZAURUS=y -# CONFIG_USB_NET_CX82310_ETH is not set -# CONFIG_USB_NET_KALMIA is not set -CONFIG_USB_NET_QMI_WWAN=m -# CONFIG_USB_HSO is not set -# CONFIG_USB_NET_INT51X1 is not set -# CONFIG_USB_IPHETH is not set -# CONFIG_USB_SIERRA_NET is not set -# CONFIG_USB_VL600 is not set -# CONFIG_USB_NET_CH9200 is not set -# CONFIG_USB_NET_AQC111 is not set -CONFIG_USB_RTL8153_ECM=m -# CONFIG_USB_NET_ASIX is not set -CONFIG_WLAN=y -# CONFIG_WLAN_VENDOR_ADMTEK is not set -# CONFIG_WLAN_VENDOR_ATH is not set -# CONFIG_WLAN_VENDOR_ATMEL is not set -# CONFIG_WLAN_VENDOR_BROADCOM is not set -# CONFIG_WLAN_VENDOR_CISCO is not set -# CONFIG_WLAN_VENDOR_INTEL is not set -# CONFIG_WLAN_VENDOR_INTERSIL is not set -# CONFIG_WLAN_VENDOR_MARVELL is not set -# CONFIG_WLAN_VENDOR_MEDIATEK is not set -# CONFIG_WLAN_VENDOR_MICROCHIP is not set -# CONFIG_WLAN_VENDOR_PURELIFI is not set -# CONFIG_WLAN_VENDOR_RALINK is not set -CONFIG_WLAN_VENDOR_REALTEK=y -# CONFIG_RTL8180 is not set -# CONFIG_RTL8187 is not set -CONFIG_RTL8852BS=m -CONFIG_RTL_CARDS=m -# CONFIG_RTL8192CE is not set -# CONFIG_RTL8192SE is not set -# CONFIG_RTL8192DE is not set -# CONFIG_RTL8723AE is not set -# CONFIG_RTL8723BE is not set -# CONFIG_RTL8188EE is not set -# CONFIG_RTL8192EE is not set -# CONFIG_RTL8821AE is not set -# CONFIG_RTL8192CU is not set -# CONFIG_RTL8XXXU is not set -# CONFIG_RTW88 is not set -# CONFIG_RTW89 is not set -# CONFIG_WLAN_VENDOR_RSI is not set -# CONFIG_WLAN_VENDOR_SILABS is not set -# CONFIG_WLAN_VENDOR_ST is not set -# CONFIG_WLAN_VENDOR_TI is not set -# CONFIG_WLAN_VENDOR_ZYDAS is not set -# CONFIG_WLAN_VENDOR_QUANTENNA is not set -# CONFIG_MAC80211_HWSIM is not set -# CONFIG_USB_NET_RNDIS_WLAN is not set -# CONFIG_VIRT_WIFI is not set -# CONFIG_WAN is not set - -# -# Wireless WAN -# -# CONFIG_WWAN is not set -# end of Wireless WAN - -# CONFIG_VMXNET3 is not set -# CONFIG_NETDEVSIM is not set -CONFIG_NET_FAILOVER=y -# CONFIG_ISDN is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_LEDS=y -# CONFIG_INPUT_FF_MEMLESS is not set -# CONFIG_INPUT_SPARSEKMAP is not set -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -# CONFIG_INPUT_MOUSEDEV is not set -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_GT9XX=y -# CONFIG_TOUCHSCREEN_ADS7846 is not set -# CONFIG_TOUCHSCREEN_AD7877 is not set -# CONFIG_TOUCHSCREEN_AD7879 is not set -# CONFIG_TOUCHSCREEN_AR1021_I2C is not set -# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set -# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set -# CONFIG_TOUCHSCREEN_BU21013 is not set -# CONFIG_TOUCHSCREEN_BU21029 is not set -# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set -# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set -# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set -# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set -# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set -# CONFIG_TOUCHSCREEN_DYNAPRO is not set -# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set -# CONFIG_TOUCHSCREEN_EETI is not set -# CONFIG_TOUCHSCREEN_EGALAX is not set -# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set -# CONFIG_TOUCHSCREEN_EXC3000 is not set -# CONFIG_TOUCHSCREEN_FUJITSU is not set -CONFIG_TOUCHSCREEN_GOODIX=y -# CONFIG_TOUCHSCREEN_HIDEEP is not set -# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set -# CONFIG_TOUCHSCREEN_ILI210X is not set -# CONFIG_TOUCHSCREEN_ILITEK is not set -# CONFIG_TOUCHSCREEN_IPROC is not set -# CONFIG_TOUCHSCREEN_S6SY761 is not set -# CONFIG_TOUCHSCREEN_GUNZE is not set -# CONFIG_TOUCHSCREEN_EKTF2127 is not set -# CONFIG_TOUCHSCREEN_ELAN is not set -# CONFIG_TOUCHSCREEN_ELO is not set -# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set -# CONFIG_TOUCHSCREEN_WACOM_I2C is not set -# CONFIG_TOUCHSCREEN_MAX11801 is not set -# CONFIG_TOUCHSCREEN_MCS5000 is not set -# CONFIG_TOUCHSCREEN_MMS114 is not set -# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set -# CONFIG_TOUCHSCREEN_MSG2638 is not set -# CONFIG_TOUCHSCREEN_MTOUCH is not set -# CONFIG_TOUCHSCREEN_IMAGIS is not set -# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set -# CONFIG_TOUCHSCREEN_INEXIO is not set -# CONFIG_TOUCHSCREEN_MK712 is not set -# CONFIG_TOUCHSCREEN_PENMOUNT is not set -# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set -# CONFIG_TOUCHSCREEN_RASPBERRYPI_FW is not set -# CONFIG_TOUCHSCREEN_MIGOR is not set -# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set -# CONFIG_TOUCHSCREEN_TOUCHWIN is not set -# CONFIG_TOUCHSCREEN_PIXCIR is not set -# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set -# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set -# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set -# CONFIG_TOUCHSCREEN_TS4800 is not set -# CONFIG_TOUCHSCREEN_TSC_SERIO is not set -# CONFIG_TOUCHSCREEN_TSC2004 is not set -# CONFIG_TOUCHSCREEN_TSC2005 is not set -# CONFIG_TOUCHSCREEN_TSC2007 is not set -# CONFIG_TOUCHSCREEN_RM_TS is not set -# CONFIG_TOUCHSCREEN_SILEAD is not set -# CONFIG_TOUCHSCREEN_SIS_I2C is not set -# CONFIG_TOUCHSCREEN_ST1232 is not set -# CONFIG_TOUCHSCREEN_STMFTS is not set -# CONFIG_TOUCHSCREEN_SUN4I is not set -# CONFIG_TOUCHSCREEN_SUR40 is not set -# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set -# CONFIG_TOUCHSCREEN_SX8654 is not set -# CONFIG_TOUCHSCREEN_TPS6507X is not set -# CONFIG_TOUCHSCREEN_ZET6223 is not set -# CONFIG_TOUCHSCREEN_ZFORCE is not set -# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set -# CONFIG_TOUCHSCREEN_IQS5XX is not set -# CONFIG_TOUCHSCREEN_ZINITIX is not set -CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -# CONFIG_INPUT_ARIEL_PWRBUTTON is not set -# CONFIG_INPUT_ATMEL_CAPTOUCH is not set -# CONFIG_INPUT_BMA150 is not set -# CONFIG_INPUT_E3X0_BUTTON is not set -# CONFIG_INPUT_MMA8450 is not set -# CONFIG_INPUT_GPIO_BEEPER is not set -# CONFIG_INPUT_GPIO_DECODER is not set -# CONFIG_INPUT_GPIO_VIBRA is not set -# CONFIG_INPUT_ATI_REMOTE2 is not set -# CONFIG_INPUT_KEYSPAN_REMOTE is not set -# CONFIG_INPUT_KXTJ9 is not set -# CONFIG_INPUT_POWERMATE is not set -# CONFIG_INPUT_YEALINK is not set -# CONFIG_INPUT_CM109 is not set -# CONFIG_INPUT_REGULATOR_HAPTIC is not set -CONFIG_INPUT_UINPUT=y -# CONFIG_INPUT_PCF8574 is not set -# CONFIG_INPUT_PWM_BEEPER is not set -# CONFIG_INPUT_PWM_VIBRA is not set -# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set -# CONFIG_INPUT_DA7280_HAPTICS is not set -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_IBM_PANEL is not set -# CONFIG_INPUT_IMS_PCU is not set -# CONFIG_INPUT_IQS269A is not set -# CONFIG_INPUT_IQS626A is not set -# CONFIG_INPUT_IQS7222 is not set -# CONFIG_INPUT_CMA3000 is not set -# CONFIG_INPUT_DRV260X_HAPTICS is not set -# CONFIG_INPUT_DRV2665_HAPTICS is not set -# CONFIG_INPUT_DRV2667_HAPTICS is not set -# CONFIG_INPUT_HISI_POWERKEY is not set -CONFIG_INPUT_SPACEMIT_POWERKEY=y -# CONFIG_INPUT_SC27XX_VIBRA is not set -# CONFIG_INPUT_RT5120_PWRKEY is not set -# CONFIG_RMI4_CORE is not set - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_SERIO_SERPORT=y -# CONFIG_SERIO_PCIPS2 is not set -# CONFIG_SERIO_LIBPS2 is not set -# CONFIG_SERIO_RAW is not set -# CONFIG_SERIO_ALTERA_PS2 is not set -# CONFIG_SERIO_PS2MULT is not set -# CONFIG_SERIO_ARC_PS2 is not set -# CONFIG_SERIO_APBPS2 is not set -# CONFIG_SERIO_OLPC_APSP is not set -# CONFIG_SERIO_SUN4I_PS2 is not set -# CONFIG_SERIO_GPIO_PS2 is not set -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set -# end of Hardware I/O ports -# end of Input device support - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -# CONFIG_LEGACY_PTYS is not set -CONFIG_LDISC_AUTOLOAD=y - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -# CONFIG_SERIAL_8250 is not set - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_AMBA_PL010 is not set -CONFIG_SERIAL_EARLYCON_RISCV_SBI=y -# CONFIG_SERIAL_ATMEL is not set -# CONFIG_SERIAL_MESON is not set -# CONFIG_SERIAL_CLPS711X is not set -# CONFIG_SERIAL_SAMSUNG is not set -# CONFIG_SERIAL_TEGRA is not set -# CONFIG_SERIAL_TEGRA_TCU is not set -# CONFIG_SERIAL_MAX3100 is not set -# CONFIG_SERIAL_MAX310X is not set -CONFIG_SERIAL_PXA=y -# CONFIG_SERIAL_PXA_NON8250 is not set -CONFIG_SERIAL_PXA_SPACEMIT_K1X=y -CONFIG_SERIAL_PXA_CONSOLE=y -# CONFIG_SERIAL_IMX is not set -# CONFIG_SERIAL_IMX_EARLYCON is not set -# CONFIG_SERIAL_UARTLITE is not set -# CONFIG_SERIAL_SH_SCI is not set -# CONFIG_SERIAL_HS_LPC32XX is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -# CONFIG_SERIAL_ICOM is not set -# CONFIG_SERIAL_JSM is not set -# CONFIG_SERIAL_MSM is not set -# CONFIG_SERIAL_VT8500 is not set -# CONFIG_SERIAL_OMAP is not set -CONFIG_SERIAL_SIFIVE=m -# CONFIG_SERIAL_LANTIQ is not set -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_TIMBERDALE is not set -# CONFIG_SERIAL_BCM63XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_PCH_UART is not set -# CONFIG_SERIAL_MXS_AUART is not set -# CONFIG_SERIAL_XILINX_PS_UART is not set -# CONFIG_SERIAL_MPS2_UART is not set -# CONFIG_SERIAL_ARC is not set -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_SERIAL_FSL_LINFLEXUART is not set -# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set -# CONFIG_SERIAL_ST_ASC is not set -# CONFIG_SERIAL_SPRD is not set -# CONFIG_SERIAL_STM32 is not set -# CONFIG_SERIAL_MVEBU_UART is not set -# CONFIG_SERIAL_OWL is not set -# CONFIG_SERIAL_RDA is not set -# CONFIG_SERIAL_MILBEAUT_USIO is not set -# CONFIG_SERIAL_LITEUART is not set -# CONFIG_SERIAL_SUNPLUS is not set -# end of Serial drivers - -# CONFIG_SERIAL_NONSTANDARD is not set -# CONFIG_N_GSM is not set -# CONFIG_NOZOMI is not set -# CONFIG_NULL_TTY is not set -CONFIG_HVC_DRIVER=y -CONFIG_HVC_RISCV_SBI=y -CONFIG_RPMSG_TTY=m -CONFIG_SERIAL_DEV_BUS=y -CONFIG_SERIAL_DEV_CTRL_TTYPORT=y -# CONFIG_TTY_PRINTK is not set -CONFIG_VIRTIO_CONSOLE=m -# CONFIG_IPMI_HANDLER is not set -# CONFIG_ASPEED_KCS_IPMI_BMC is not set -# CONFIG_NPCM7XX_KCS_IPMI_BMC is not set -# CONFIG_ASPEED_BT_IPMI_BMC is not set -# CONFIG_IPMB_DEVICE_INTERFACE is not set -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=y -# CONFIG_HW_RANDOM_ATMEL is not set -# CONFIG_HW_RANDOM_BA431 is not set -# CONFIG_HW_RANDOM_BCM2835 is not set -# CONFIG_HW_RANDOM_IPROC_RNG200 is not set -# CONFIG_HW_RANDOM_IXP4XX is not set -# CONFIG_HW_RANDOM_OMAP is not set -# CONFIG_HW_RANDOM_OMAP3_ROM is not set -# CONFIG_HW_RANDOM_VIRTIO is not set -# CONFIG_HW_RANDOM_IMX_RNGC is not set -# CONFIG_HW_RANDOM_NOMADIK is not set -# CONFIG_HW_RANDOM_STM32 is not set -# CONFIG_HW_RANDOM_MESON is not set -# CONFIG_HW_RANDOM_MTK is not set -# CONFIG_HW_RANDOM_EXYNOS is not set -# CONFIG_HW_RANDOM_NPCM is not set -# CONFIG_HW_RANDOM_KEYSTONE is not set -# CONFIG_HW_RANDOM_CCTRNG is not set -# CONFIG_HW_RANDOM_XIPHERA is not set -# CONFIG_HW_RANDOM_CN10K is not set -# CONFIG_APPLICOM is not set -CONFIG_DEVMEM=y -CONFIG_DEVPORT=y -CONFIG_TCG_TPM=y -CONFIG_HW_RANDOM_TPM=y -CONFIG_TCG_TIS_CORE=m -CONFIG_TCG_TIS=m -CONFIG_TCG_TIS_SPI=m -CONFIG_TCG_TIS_SPI_CR50=y -CONFIG_TCG_TIS_I2C=m -CONFIG_TCG_TIS_SYNQUACER=m -CONFIG_TCG_TIS_I2C_CR50=m -CONFIG_TCG_TIS_I2C_ATMEL=m -CONFIG_TCG_TIS_I2C_INFINEON=m -CONFIG_TCG_TIS_I2C_NUVOTON=m -CONFIG_TCG_ATMEL=m -CONFIG_TCG_VTPM_PROXY=m -CONFIG_TCG_TIS_ST33ZP24=m -CONFIG_TCG_TIS_ST33ZP24_I2C=m -CONFIG_TCG_TIS_ST33ZP24_SPI=m -# CONFIG_XILLYBUS is not set -# CONFIG_XILLYUSB is not set -CONFIG_RANDOM_TRUST_CPU=y -CONFIG_RANDOM_TRUST_BOOTLOADER=y -# end of Character devices - -# -# I2C support -# -CONFIG_I2C=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=y - -# -# Multiplexer I2C Chip support -# -CONFIG_I2C_ARB_GPIO_CHALLENGE=y -CONFIG_I2C_MUX_GPIO=y -CONFIG_I2C_MUX_GPMUX=y -# CONFIG_I2C_MUX_LTC4306 is not set -# CONFIG_I2C_MUX_PCA9541 is not set -# CONFIG_I2C_MUX_PCA954x is not set -CONFIG_I2C_MUX_PINCTRL=y -CONFIG_I2C_MUX_REG=y -CONFIG_I2C_DEMUX_PINCTRL=y -# CONFIG_I2C_MUX_MLXCPLD is not set -# end of Multiplexer I2C Chip support - -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_ALGOBIT=y - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -# CONFIG_I2C_HIX5HD2 is not set -# CONFIG_I2C_I801 is not set -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_NVIDIA_GPU is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -CONFIG_I2C_SPACEMIT_K1X=y -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_ALTERA is not set -# CONFIG_I2C_ASPEED is not set -# CONFIG_I2C_AT91 is not set -# CONFIG_I2C_AXXIA is not set -# CONFIG_I2C_BCM2835 is not set -# CONFIG_I2C_BCM_IPROC is not set -# CONFIG_I2C_BCM_KONA is not set -CONFIG_I2C_BRCMSTB=y -# CONFIG_I2C_CADENCE is not set -# CONFIG_I2C_CBUS_GPIO is not set -# CONFIG_I2C_DAVINCI is not set -# CONFIG_I2C_DESIGNWARE_PLATFORM is not set -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_DIGICOLOR is not set -# CONFIG_I2C_EG20T is not set -# CONFIG_I2C_EMEV2 is not set -# CONFIG_I2C_EXYNOS5 is not set -CONFIG_I2C_GPIO=m -CONFIG_I2C_GPIO_FAULT_INJECTOR=y -# CONFIG_I2C_HIGHLANDER is not set -# CONFIG_I2C_HISI is not set -# CONFIG_I2C_IMG is not set -# CONFIG_I2C_IMX is not set -# CONFIG_I2C_IMX_LPI2C is not set -# CONFIG_I2C_IOP3XX is not set -# CONFIG_I2C_JZ4780 is not set -# CONFIG_I2C_LPC2K is not set -# CONFIG_I2C_MESON is not set -# CONFIG_I2C_MICROCHIP_CORE is not set -# CONFIG_I2C_MT65XX is not set -# CONFIG_I2C_MT7621 is not set -# CONFIG_I2C_MV64XXX is not set -# CONFIG_I2C_MXS is not set -# CONFIG_I2C_NPCM is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_OMAP is not set -# CONFIG_I2C_OWL is not set -# CONFIG_I2C_APPLE is not set -# CONFIG_I2C_PCA_PLATFORM is not set -# CONFIG_I2C_PNX is not set -# CONFIG_I2C_PXA is not set -# CONFIG_I2C_QCOM_CCI is not set -# CONFIG_I2C_QUP is not set -# CONFIG_I2C_RIIC is not set -# CONFIG_I2C_RK3X is not set -# CONFIG_I2C_RZV2M is not set -# CONFIG_I2C_S3C2410 is not set -# CONFIG_I2C_SH_MOBILE is not set -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_SPRD is not set -# CONFIG_I2C_ST is not set -# CONFIG_I2C_STM32F4 is not set -# CONFIG_I2C_STM32F7 is not set -# CONFIG_I2C_SUN6I_P2WI is not set -# CONFIG_I2C_SYNQUACER is not set -# CONFIG_I2C_TEGRA is not set -# CONFIG_I2C_TEGRA_BPMP is not set -# CONFIG_I2C_UNIPHIER is not set -# CONFIG_I2C_UNIPHIER_F is not set -# CONFIG_I2C_VERSATILE is not set -# CONFIG_I2C_WMT is not set -# CONFIG_I2C_THUNDERX is not set -# CONFIG_I2C_XILINX is not set -# CONFIG_I2C_XLP9XX is not set -# CONFIG_I2C_RCAR is not set - -# -# External I2C/SMBus adapter drivers -# -# CONFIG_I2C_DIOLAN_U2C is not set -# CONFIG_I2C_CP2615 is not set -# CONFIG_I2C_PCI1XXXX is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -CONFIG_I2C_TINY_USB=m - -# -# Other I2C/SMBus bus drivers -# -# CONFIG_I2C_MLXCPLD is not set -# CONFIG_I2C_VIRTIO is not set -# end of I2C Hardware Bus support - -# CONFIG_I2C_STUB is not set -CONFIG_I2C_SLAVE=y -CONFIG_I2C_SLAVE_EEPROM=y -# CONFIG_I2C_SLAVE_TESTUNIT is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# end of I2C support - -# CONFIG_I3C is not set -CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y -CONFIG_SPI_MEM=y - -# -# SPI Master Controller Drivers -# -# CONFIG_SPI_ALTERA is not set -# CONFIG_SPI_ALTERA_CORE is not set -# CONFIG_SPI_AR934X is not set -# CONFIG_SPI_ATH79 is not set -# CONFIG_SPI_ARMADA_3700 is not set -# CONFIG_SPI_ASPEED_SMC is not set -# CONFIG_SPI_ATMEL is not set -# CONFIG_SPI_ATMEL_QUADSPI is not set -# CONFIG_SPI_AXI_SPI_ENGINE is not set -# CONFIG_SPI_BCM2835 is not set -# CONFIG_SPI_BCM2835AUX is not set -# CONFIG_SPI_BCM63XX is not set -# CONFIG_SPI_BCM63XX_HSSPI is not set -# CONFIG_SPI_BCM_QSPI is not set -# CONFIG_SPI_BITBANG is not set -# CONFIG_SPI_CADENCE is not set -# CONFIG_SPI_CADENCE_QUADSPI is not set -# CONFIG_SPI_CADENCE_XSPI is not set -# CONFIG_SPI_CLPS711X is not set -# CONFIG_SPI_DESIGNWARE_EXT is not set -CONFIG_SPI_K1X=y -CONFIG_SPI_K1X_QSPI=y -# CONFIG_SPI_DESIGNWARE is not set -# CONFIG_SPI_EP93XX is not set -# CONFIG_SPI_FSL_LPSPI is not set -# CONFIG_SPI_FSL_QUADSPI is not set -# CONFIG_SPI_GXP is not set -# CONFIG_SPI_HISI_KUNPENG is not set -# CONFIG_SPI_HISI_SFC_V3XX is not set -# CONFIG_SPI_NXP_FLEXSPI is not set -# CONFIG_SPI_GPIO is not set -# CONFIG_SPI_IMG_SPFI is not set -# CONFIG_SPI_IMX is not set -# CONFIG_SPI_INGENIC is not set -# CONFIG_SPI_INTEL_PCI is not set -# CONFIG_SPI_INTEL_PLATFORM is not set -# CONFIG_SPI_JCORE is not set -# CONFIG_SPI_LP8841_RTC is not set -# CONFIG_SPI_FSL_SPI is not set -# CONFIG_SPI_FSL_DSPI is not set -# CONFIG_SPI_MESON_SPICC is not set -# CONFIG_SPI_MESON_SPIFC is not set -# CONFIG_SPI_MICROCHIP_CORE is not set -# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set -# CONFIG_SPI_MT65XX is not set -# CONFIG_SPI_MT7621 is not set -# CONFIG_SPI_MTK_NOR is not set -# CONFIG_SPI_NPCM_FIU is not set -# CONFIG_SPI_NPCM_PSPI is not set -# CONFIG_SPI_LANTIQ_SSC is not set -# CONFIG_SPI_OC_TINY is not set -# CONFIG_SPI_OMAP24XX is not set -# CONFIG_SPI_TI_QSPI is not set -# CONFIG_SPI_OMAP_100K is not set -# CONFIG_SPI_ORION is not set -# CONFIG_SPI_PIC32 is not set -# CONFIG_SPI_PIC32_SQI is not set -# CONFIG_SPI_PXA2XX is not set -# CONFIG_SPI_ROCKCHIP is not set -# CONFIG_SPI_ROCKCHIP_SFC is not set -# CONFIG_SPI_RSPI is not set -# CONFIG_SPI_QUP is not set -# CONFIG_SPI_S3C64XX is not set -# CONFIG_SPI_SC18IS602 is not set -# CONFIG_SPI_SH_MSIOF is not set -# CONFIG_SPI_SH is not set -# CONFIG_SPI_SH_HSPI is not set -# CONFIG_SPI_SIFIVE is not set -# CONFIG_SPI_SPRD is not set -# CONFIG_SPI_SPRD_ADI is not set -# CONFIG_SPI_STM32 is not set -# CONFIG_SPI_STM32_QSPI is not set -# CONFIG_SPI_ST_SSC4 is not set -# CONFIG_SPI_SUN4I is not set -# CONFIG_SPI_SUN6I is not set -# CONFIG_SPI_SUNPLUS_SP7021 is not set -# CONFIG_SPI_SYNQUACER is not set -# CONFIG_SPI_MXIC is not set -# CONFIG_SPI_TEGRA210_QUAD is not set -# CONFIG_SPI_TEGRA114 is not set -# CONFIG_SPI_TEGRA20_SFLASH is not set -# CONFIG_SPI_TEGRA20_SLINK is not set -# CONFIG_SPI_THUNDERX is not set -# CONFIG_SPI_TOPCLIFF_PCH is not set -# CONFIG_SPI_UNIPHIER is not set -# CONFIG_SPI_XCOMM is not set -# CONFIG_SPI_XILINX is not set -# CONFIG_SPI_XLP is not set -# CONFIG_SPI_XTENSA_XTFPGA is not set -# CONFIG_SPI_ZYNQ_QSPI is not set -# CONFIG_SPI_ZYNQMP_GQSPI is not set -# CONFIG_SPI_AMD is not set - -# -# SPI Multiplexer support -# -# CONFIG_SPI_MUX is not set - -# -# SPI Protocol Masters -# -# CONFIG_SPI_SPIDEV is not set -# CONFIG_SPI_LOOPBACK_TEST is not set -# CONFIG_SPI_TLE62X0 is not set -# CONFIG_SPI_SLAVE is not set -CONFIG_SPI_DYNAMIC=y -# CONFIG_SPMI is not set -# CONFIG_HSI is not set -CONFIG_PPS=y -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -# CONFIG_PPS_CLIENT_LDISC is not set -# CONFIG_PPS_CLIENT_GPIO is not set - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=y -CONFIG_PTP_1588_CLOCK_OPTIONAL=y -CONFIG_PTP_1588_CLOCK_DTE=y -CONFIG_PTP_1588_CLOCK_QORIQ=y - -# -# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. -# -# CONFIG_PTP_1588_CLOCK_PCH is not set -# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set -CONFIG_PTP_1588_CLOCK_IDTCM=y -# end of PTP clock support - -CONFIG_PINCTRL=y -CONFIG_GENERIC_PINCTRL_GROUPS=y -CONFIG_PINMUX=y -CONFIG_GENERIC_PINMUX_FUNCTIONS=y -CONFIG_PINCONF=y -CONFIG_GENERIC_PINCONF=y -# CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_AT91PIO4 is not set -# CONFIG_PINCTRL_BM1880 is not set -# CONFIG_PINCTRL_CY8C95X0 is not set -# CONFIG_PINCTRL_DA850_PUPD is not set -# CONFIG_PINCTRL_EQUILIBRIUM is not set -# CONFIG_PINCTRL_INGENIC is not set -# CONFIG_PINCTRL_LPC18XX is not set -# CONFIG_PINCTRL_MCP23S08 is not set -# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set -# CONFIG_PINCTRL_OCELOT is not set -# CONFIG_PINCTRL_PISTACHIO is not set -CONFIG_PINCTRL_SPACEMIT_PMIC=y -# CONFIG_PINCTRL_ROCKCHIP is not set -CONFIG_PINCTRL_SINGLE=y -# CONFIG_PINCTRL_STMFX is not set -# CONFIG_PINCTRL_SX150X is not set -# CONFIG_PINCTRL_OWL is not set -# CONFIG_PINCTRL_ASPEED_G4 is not set -# CONFIG_PINCTRL_ASPEED_G5 is not set -# CONFIG_PINCTRL_ASPEED_G6 is not set -# CONFIG_PINCTRL_BCM281XX is not set -# CONFIG_PINCTRL_BCM2835 is not set -# CONFIG_PINCTRL_BCM4908 is not set -# CONFIG_PINCTRL_BCM6318 is not set -# CONFIG_PINCTRL_BCM6328 is not set -# CONFIG_PINCTRL_BCM6358 is not set -# CONFIG_PINCTRL_BCM6362 is not set -# CONFIG_PINCTRL_BCM6368 is not set -# CONFIG_PINCTRL_BCM63268 is not set -# CONFIG_PINCTRL_IPROC_GPIO is not set -# CONFIG_PINCTRL_CYGNUS_MUX is not set -# CONFIG_PINCTRL_NS is not set -# CONFIG_PINCTRL_NSP_GPIO is not set -# CONFIG_PINCTRL_NS2_MUX is not set -# CONFIG_PINCTRL_NSP_MUX is not set -# CONFIG_PINCTRL_AS370 is not set -# CONFIG_PINCTRL_BERLIN_BG4CT is not set - -# -# Intel pinctrl drivers -# -# end of Intel pinctrl drivers - -# -# MediaTek pinctrl drivers -# -# CONFIG_EINT_MTK is not set -# CONFIG_PINCTRL_MT2701 is not set -# CONFIG_PINCTRL_MT7623 is not set -# CONFIG_PINCTRL_MT7629 is not set -# CONFIG_PINCTRL_MT8135 is not set -# CONFIG_PINCTRL_MT8127 is not set -# CONFIG_PINCTRL_MT2712 is not set -# CONFIG_PINCTRL_MT6765 is not set -# CONFIG_PINCTRL_MT6779 is not set -# CONFIG_PINCTRL_MT6795 is not set -# CONFIG_PINCTRL_MT6797 is not set -# CONFIG_PINCTRL_MT7622 is not set -# CONFIG_PINCTRL_MT7986 is not set -# CONFIG_PINCTRL_MT8167 is not set -# CONFIG_PINCTRL_MT8173 is not set -# CONFIG_PINCTRL_MT8183 is not set -# CONFIG_PINCTRL_MT8186 is not set -# CONFIG_PINCTRL_MT8188 is not set -# CONFIG_PINCTRL_MT8192 is not set -# CONFIG_PINCTRL_MT8195 is not set -# CONFIG_PINCTRL_MT8365 is not set -# CONFIG_PINCTRL_MT8516 is not set -# CONFIG_PINCTRL_MT6397 is not set -# end of MediaTek pinctrl drivers - -# CONFIG_PINCTRL_MESON is not set -# CONFIG_PINCTRL_WPCM450 is not set -# CONFIG_PINCTRL_NPCM7XX is not set -# CONFIG_PINCTRL_PXA25X is not set -# CONFIG_PINCTRL_PXA27X is not set -# CONFIG_PINCTRL_MSM is not set -# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set -# CONFIG_PINCTRL_LPASS_LPI is not set - -# -# Renesas pinctrl drivers -# -# CONFIG_PINCTRL_RENESAS is not set -# CONFIG_PINCTRL_PFC_EMEV2 is not set -# CONFIG_PINCTRL_PFC_R8A77995 is not set -# CONFIG_PINCTRL_PFC_R8A7794 is not set -# CONFIG_PINCTRL_PFC_R8A77990 is not set -# CONFIG_PINCTRL_PFC_R8A7779 is not set -# CONFIG_PINCTRL_PFC_R8A7790 is not set -# CONFIG_PINCTRL_PFC_R8A77950 is not set -# CONFIG_PINCTRL_PFC_R8A77951 is not set -# CONFIG_PINCTRL_PFC_R8A7778 is not set -# CONFIG_PINCTRL_PFC_R8A7793 is not set -# CONFIG_PINCTRL_PFC_R8A7791 is not set -# CONFIG_PINCTRL_PFC_R8A77965 is not set -# CONFIG_PINCTRL_PFC_R8A77960 is not set -# CONFIG_PINCTRL_PFC_R8A77961 is not set -# CONFIG_PINCTRL_PFC_R8A779F0 is not set -# CONFIG_PINCTRL_PFC_R8A7792 is not set -# CONFIG_PINCTRL_PFC_R8A77980 is not set -# CONFIG_PINCTRL_PFC_R8A77970 is not set -# CONFIG_PINCTRL_PFC_R8A779A0 is not set -# CONFIG_PINCTRL_PFC_R8A779G0 is not set -# CONFIG_PINCTRL_PFC_R8A7740 is not set -# CONFIG_PINCTRL_PFC_R8A73A4 is not set -# CONFIG_PINCTRL_RZA1 is not set -# CONFIG_PINCTRL_RZA2 is not set -# CONFIG_PINCTRL_RZG2L is not set -# CONFIG_PINCTRL_PFC_R8A77470 is not set -# CONFIG_PINCTRL_PFC_R8A7745 is not set -# CONFIG_PINCTRL_PFC_R8A7742 is not set -# CONFIG_PINCTRL_PFC_R8A7743 is not set -# CONFIG_PINCTRL_PFC_R8A7744 is not set -# CONFIG_PINCTRL_PFC_R8A774C0 is not set -# CONFIG_PINCTRL_PFC_R8A774E1 is not set -# CONFIG_PINCTRL_PFC_R8A774A1 is not set -# CONFIG_PINCTRL_PFC_R8A774B1 is not set -# CONFIG_PINCTRL_RZN1 is not set -# CONFIG_PINCTRL_RZV2M is not set -# CONFIG_PINCTRL_PFC_SH7203 is not set -# CONFIG_PINCTRL_PFC_SH7264 is not set -# CONFIG_PINCTRL_PFC_SH7269 is not set -# CONFIG_PINCTRL_PFC_SH7720 is not set -# CONFIG_PINCTRL_PFC_SH7722 is not set -# CONFIG_PINCTRL_PFC_SH7734 is not set -# CONFIG_PINCTRL_PFC_SH7757 is not set -# CONFIG_PINCTRL_PFC_SH7785 is not set -# CONFIG_PINCTRL_PFC_SH7786 is not set -# CONFIG_PINCTRL_PFC_SH73A0 is not set -# CONFIG_PINCTRL_PFC_SH7723 is not set -# CONFIG_PINCTRL_PFC_SH7724 is not set -# CONFIG_PINCTRL_PFC_SHX3 is not set -# end of Renesas pinctrl drivers - -# CONFIG_PINCTRL_EXYNOS is not set -# CONFIG_PINCTRL_S3C24XX is not set -# CONFIG_PINCTRL_S3C64XX is not set -# CONFIG_PINCTRL_SPRD_SC9860 is not set -# CONFIG_PINCTRL_STARFIVE_JH7100 is not set -# CONFIG_PINCTRL_STM32F429 is not set -# CONFIG_PINCTRL_STM32F469 is not set -# CONFIG_PINCTRL_STM32F746 is not set -# CONFIG_PINCTRL_STM32F769 is not set -# CONFIG_PINCTRL_STM32H743 is not set -# CONFIG_PINCTRL_STM32MP135 is not set -# CONFIG_PINCTRL_STM32MP157 is not set -# CONFIG_PINCTRL_TI_IODELAY is not set -# CONFIG_PINCTRL_UNIPHIER is not set -# CONFIG_PINCTRL_TMPV7700 is not set -CONFIG_GPIOLIB=y -CONFIG_GPIOLIB_FASTPATH_LIMIT=512 -CONFIG_OF_GPIO=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_CDEV=y -CONFIG_GPIO_CDEV_V1=y - -# -# Memory mapped GPIO drivers -# -# CONFIG_GPIO_74XX_MMIO is not set -# CONFIG_GPIO_ALTERA is not set -# CONFIG_GPIO_ASPEED is not set -# CONFIG_GPIO_ASPEED_SGPIO is not set -# CONFIG_GPIO_ATH79 is not set -# CONFIG_GPIO_RASPBERRYPI_EXP is not set -# CONFIG_GPIO_BCM_KONA is not set -# CONFIG_GPIO_BCM_XGS_IPROC is not set -# CONFIG_GPIO_BRCMSTB is not set -# CONFIG_GPIO_CADENCE is not set -# CONFIG_GPIO_CLPS711X is not set -# CONFIG_GPIO_DWAPB is not set -# CONFIG_GPIO_EIC_SPRD is not set -# CONFIG_GPIO_EM is not set -# CONFIG_GPIO_FTGPIO010 is not set -# CONFIG_GPIO_GENERIC_PLATFORM is not set -# CONFIG_GPIO_GRGPIO is not set -# CONFIG_GPIO_HISI is not set -# CONFIG_GPIO_HLWD is not set -# CONFIG_GPIO_IOP is not set -CONFIG_GPIO_K1X=y -# CONFIG_GPIO_LOGICVC is not set -# CONFIG_GPIO_LPC18XX is not set -# CONFIG_GPIO_LPC32XX is not set -# CONFIG_GPIO_MB86S7X is not set -# CONFIG_GPIO_MPC8XXX is not set -# CONFIG_GPIO_MT7621 is not set -# CONFIG_GPIO_MXC is not set -# CONFIG_GPIO_MXS is not set -# CONFIG_GPIO_PMIC_EIC_SPRD is not set -# CONFIG_GPIO_PXA is not set -# CONFIG_GPIO_RCAR is not set -# CONFIG_GPIO_RDA is not set -# CONFIG_GPIO_ROCKCHIP is not set -# CONFIG_GPIO_SAMA5D2_PIOBU is not set -# CONFIG_GPIO_SIFIVE is not set -# CONFIG_GPIO_SNPS_CREG is not set -# CONFIG_GPIO_SPRD is not set -# CONFIG_GPIO_STP_XWAY is not set -# CONFIG_GPIO_SYSCON is not set -# CONFIG_GPIO_TEGRA is not set -# CONFIG_GPIO_TEGRA186 is not set -# CONFIG_GPIO_TS4800 is not set -# CONFIG_GPIO_THUNDERX is not set -# CONFIG_GPIO_UNIPHIER is not set -# CONFIG_GPIO_VISCONTI is not set -# CONFIG_GPIO_VX855 is not set -# CONFIG_GPIO_XGENE_SB is not set -# CONFIG_GPIO_XILINX is not set -# CONFIG_GPIO_XLP is not set -# CONFIG_GPIO_AMD_FCH is not set -# CONFIG_GPIO_IDT3243X is not set -# end of Memory mapped GPIO drivers - -# -# I2C GPIO expanders -# -# CONFIG_GPIO_ADNP is not set -# CONFIG_GPIO_GW_PLD is not set -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCA9570 is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_TPIC2810 is not set -# CONFIG_GPIO_TS4900 is not set -# end of I2C GPIO expanders - -# -# MFD GPIO expanders -# -# CONFIG_GPIO_SL28CPLD is not set -# CONFIG_GPIO_TQMX86 is not set -# end of MFD GPIO expanders - -# -# PCI GPIO expanders -# -# CONFIG_GPIO_AMD8111 is not set -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_MLXBF is not set -# CONFIG_GPIO_MLXBF2 is not set -# CONFIG_GPIO_ML_IOH is not set -# CONFIG_GPIO_PCH is not set -# CONFIG_GPIO_PCI_IDIO_16 is not set -# CONFIG_GPIO_PCIE_IDIO_24 is not set -# CONFIG_GPIO_RDC321X is not set -# end of PCI GPIO expanders - -# -# SPI GPIO expanders -# -# CONFIG_GPIO_74X164 is not set -# CONFIG_GPIO_MAX3191X is not set -# CONFIG_GPIO_MAX7301 is not set -# CONFIG_GPIO_MC33880 is not set -# CONFIG_GPIO_PISOSR is not set -# CONFIG_GPIO_XRA1403 is not set -# end of SPI GPIO expanders - -# -# USB GPIO expanders -# -# end of USB GPIO expanders - -# -# Virtual GPIO drivers -# -# CONFIG_GPIO_AGGREGATOR is not set -# CONFIG_GPIO_MOCKUP is not set -# CONFIG_GPIO_VIRTIO is not set -# CONFIG_GPIO_SIM is not set -# end of Virtual GPIO drivers - -# CONFIG_W1 is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_BRCMKONA is not set -# CONFIG_POWER_RESET_BRCMSTB is not set -# CONFIG_POWER_RESET_GEMINI_POWEROFF is not set -# CONFIG_POWER_RESET_GPIO is not set -# CONFIG_POWER_RESET_GPIO_RESTART is not set -# CONFIG_POWER_RESET_LINKSTATION is not set -# CONFIG_POWER_RESET_OCELOT_RESET is not set -# CONFIG_POWER_RESET_PIIX4_POWEROFF is not set -# CONFIG_POWER_RESET_LTC2952 is not set -# CONFIG_POWER_RESET_REGULATOR is not set -# CONFIG_POWER_RESET_RESTART is not set -CONFIG_POWER_RESET_KEYSTONE=y -# CONFIG_POWER_RESET_SYSCON is not set -# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set -# CONFIG_POWER_RESET_RMOBILE is not set -# CONFIG_SYSCON_REBOOT_MODE is not set -# CONFIG_POWER_RESET_SC27XX is not set -# CONFIG_NVMEM_REBOOT_MODE is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -CONFIG_POWER_SUPPLY_HWMON=y -# CONFIG_PDA_POWER is not set -# CONFIG_IP5XXX_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_CHARGER_ADP5061 is not set -# CONFIG_BATTERY_ACT8945A is not set -# CONFIG_BATTERY_CW2015 is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SAMSUNG_SDI is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_CHARGER_SBS is not set -# CONFIG_MANAGER_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_ISP1704 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_MANAGER is not set -# CONFIG_CHARGER_LT3651 is not set -# CONFIG_CHARGER_LTC4162L is not set -# CONFIG_CHARGER_DETECTOR_MAX14656 is not set -# CONFIG_CHARGER_MAX77976 is not set -# CONFIG_CHARGER_QCOM_SMBB is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24190 is not set -# CONFIG_CHARGER_BQ24257 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_BQ2515X is not set -# CONFIG_CHARGER_BQ25890 is not set -# CONFIG_CHARGER_BQ25980 is not set -# CONFIG_CHARGER_BQ256XX is not set -# CONFIG_CHARGER_SMB347 is not set -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -# CONFIG_BATTERY_GOLDFISH is not set -# CONFIG_BATTERY_RT5033 is not set -# CONFIG_CHARGER_RT9455 is not set -# CONFIG_CHARGER_SC2731 is not set -# CONFIG_CHARGER_UCS1002 is not set -# CONFIG_CHARGER_BD99954 is not set -# CONFIG_BATTERY_UG3105 is not set -CONFIG_HWMON=y -CONFIG_HWMON_VID=m -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -# CONFIG_SENSORS_AD7314 is not set -# CONFIG_SENSORS_AD7414 is not set -# CONFIG_SENSORS_AD7418 is not set -# CONFIG_SENSORS_ADM1021 is not set -# CONFIG_SENSORS_ADM1025 is not set -# CONFIG_SENSORS_ADM1026 is not set -# CONFIG_SENSORS_ADM1029 is not set -# CONFIG_SENSORS_ADM1031 is not set -# CONFIG_SENSORS_ADM1177 is not set -# CONFIG_SENSORS_ADM9240 is not set -# CONFIG_SENSORS_ADT7310 is not set -# CONFIG_SENSORS_ADT7410 is not set -# CONFIG_SENSORS_ADT7411 is not set -# CONFIG_SENSORS_ADT7462 is not set -# CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7475 is not set -# CONFIG_SENSORS_AHT10 is not set -# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set -# CONFIG_SENSORS_AS370 is not set -# CONFIG_SENSORS_ASC7621 is not set -# CONFIG_SENSORS_AXI_FAN_CONTROL is not set -# CONFIG_SENSORS_ARM_SCMI is not set -# CONFIG_SENSORS_ASB100 is not set -# CONFIG_SENSORS_ASPEED is not set -# CONFIG_SENSORS_ATXP1 is not set -# CONFIG_SENSORS_BT1_PVT is not set -# CONFIG_SENSORS_CORSAIR_CPRO is not set -# CONFIG_SENSORS_CORSAIR_PSU is not set -CONFIG_SENSORS_DRIVETEMP=m -# CONFIG_SENSORS_DS620 is not set -# CONFIG_SENSORS_DS1621 is not set -# CONFIG_SENSORS_I5K_AMB is not set -# CONFIG_SENSORS_SPARX5 is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_F75375S is not set -# CONFIG_SENSORS_FSCHMD is not set -# CONFIG_SENSORS_FTSTEUTATES is not set -# CONFIG_SENSORS_GL518SM is not set -# CONFIG_SENSORS_GL520SM is not set -# CONFIG_SENSORS_G760A is not set -# CONFIG_SENSORS_G762 is not set -CONFIG_SENSORS_GPIO_FAN=y -# CONFIG_SENSORS_HIH6130 is not set -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_JC42 is not set -# CONFIG_SENSORS_POWR1220 is not set -# CONFIG_SENSORS_LAN966X is not set -# CONFIG_SENSORS_LINEAGE is not set -# CONFIG_SENSORS_LTC2945 is not set -# CONFIG_SENSORS_LTC2947_I2C is not set -# CONFIG_SENSORS_LTC2947_SPI is not set -# CONFIG_SENSORS_LTC2990 is not set -# CONFIG_SENSORS_LTC2992 is not set -# CONFIG_SENSORS_LTC4151 is not set -# CONFIG_SENSORS_LTC4215 is not set -# CONFIG_SENSORS_LTC4222 is not set -# CONFIG_SENSORS_LTC4245 is not set -# CONFIG_SENSORS_LTC4260 is not set -# CONFIG_SENSORS_LTC4261 is not set -# CONFIG_SENSORS_MAX1111 is not set -# CONFIG_SENSORS_MAX127 is not set -# CONFIG_SENSORS_MAX16065 is not set -# CONFIG_SENSORS_MAX1619 is not set -# CONFIG_SENSORS_MAX1668 is not set -# CONFIG_SENSORS_MAX197 is not set -# CONFIG_SENSORS_MAX31722 is not set -# CONFIG_SENSORS_MAX31730 is not set -# CONFIG_SENSORS_MAX31760 is not set -# CONFIG_SENSORS_MAX6620 is not set -# CONFIG_SENSORS_MAX6621 is not set -# CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set -# CONFIG_SENSORS_MAX6650 is not set -# CONFIG_SENSORS_MAX6697 is not set -# CONFIG_SENSORS_MAX31790 is not set -# CONFIG_SENSORS_MCP3021 is not set -# CONFIG_SENSORS_TC654 is not set -# CONFIG_SENSORS_TPS23861 is not set -# CONFIG_SENSORS_MR75203 is not set -# CONFIG_SENSORS_ADCXX is not set -# CONFIG_SENSORS_LM63 is not set -# CONFIG_SENSORS_LM70 is not set -# CONFIG_SENSORS_LM73 is not set -# CONFIG_SENSORS_LM75 is not set -# CONFIG_SENSORS_LM77 is not set -# CONFIG_SENSORS_LM78 is not set -# CONFIG_SENSORS_LM80 is not set -# CONFIG_SENSORS_LM83 is not set -# CONFIG_SENSORS_LM85 is not set -# CONFIG_SENSORS_LM87 is not set -# CONFIG_SENSORS_LM90 is not set -# CONFIG_SENSORS_LM92 is not set -# CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LM95234 is not set -# CONFIG_SENSORS_LM95241 is not set -# CONFIG_SENSORS_LM95245 is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_NCT6683 is not set -# CONFIG_SENSORS_NCT6775 is not set -# CONFIG_SENSORS_NCT6775_I2C is not set -# CONFIG_SENSORS_NCT7802 is not set -# CONFIG_SENSORS_NCT7904 is not set -# CONFIG_SENSORS_NPCM7XX is not set -# CONFIG_SENSORS_NSA320 is not set -# CONFIG_SENSORS_NZXT_KRAKEN2 is not set -# CONFIG_SENSORS_NZXT_SMART2 is not set -# CONFIG_SENSORS_OCC_P8_I2C is not set -# CONFIG_SENSORS_PCF8591 is not set -CONFIG_PMBUS=m -CONFIG_SENSORS_PMBUS=m -# CONFIG_SENSORS_ADM1266 is not set -# CONFIG_SENSORS_ADM1275 is not set -# CONFIG_SENSORS_BEL_PFE is not set -# CONFIG_SENSORS_BPA_RS600 is not set -# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set -# CONFIG_SENSORS_FSP_3Y is not set -# CONFIG_SENSORS_IBM_CFFPS is not set -# CONFIG_SENSORS_DPS920AB is not set -# CONFIG_SENSORS_INSPUR_IPSPS is not set -# CONFIG_SENSORS_IR35221 is not set -# CONFIG_SENSORS_IR36021 is not set -# CONFIG_SENSORS_IR38064 is not set -# CONFIG_SENSORS_IRPS5401 is not set -# CONFIG_SENSORS_ISL68137 is not set -# CONFIG_SENSORS_LM25066 is not set -# CONFIG_SENSORS_LT7182S is not set -# CONFIG_SENSORS_LTC2978 is not set -# CONFIG_SENSORS_LTC3815 is not set -# CONFIG_SENSORS_MAX15301 is not set -# CONFIG_SENSORS_MAX16064 is not set -# CONFIG_SENSORS_MAX16601 is not set -# CONFIG_SENSORS_MAX20730 is not set -# CONFIG_SENSORS_MAX20751 is not set -# CONFIG_SENSORS_MAX31785 is not set -# CONFIG_SENSORS_MAX34440 is not set -# CONFIG_SENSORS_MAX8688 is not set -# CONFIG_SENSORS_MP2888 is not set -# CONFIG_SENSORS_MP2975 is not set -# CONFIG_SENSORS_MP5023 is not set -# CONFIG_SENSORS_PIM4328 is not set -# CONFIG_SENSORS_PLI1209BC is not set -# CONFIG_SENSORS_PM6764TR is not set -# CONFIG_SENSORS_PXE1610 is not set -# CONFIG_SENSORS_Q54SJ108A2 is not set -# CONFIG_SENSORS_STPDDC60 is not set -# CONFIG_SENSORS_TPS40422 is not set -# CONFIG_SENSORS_TPS53679 is not set -# CONFIG_SENSORS_TPS546D24 is not set -# CONFIG_SENSORS_UCD9000 is not set -# CONFIG_SENSORS_UCD9200 is not set -# CONFIG_SENSORS_XDPE152 is not set -# CONFIG_SENSORS_XDPE122 is not set -# CONFIG_SENSORS_ZL6100 is not set -CONFIG_SENSORS_PWM_FAN=y -# CONFIG_SENSORS_RASPBERRYPI_HWMON is not set -# CONFIG_SENSORS_SL28CPLD is not set -# CONFIG_SENSORS_SBTSI is not set -# CONFIG_SENSORS_SBRMI is not set -# CONFIG_SENSORS_SHT15 is not set -# CONFIG_SENSORS_SHT21 is not set -# CONFIG_SENSORS_SHT3x is not set -# CONFIG_SENSORS_SHT4x is not set -# CONFIG_SENSORS_SHTC1 is not set -# CONFIG_SENSORS_SIS5595 is not set -CONFIG_SENSORS_DME1737=m -CONFIG_SENSORS_EMC1403=m -CONFIG_SENSORS_EMC2103=m -CONFIG_SENSORS_EMC2305=m -CONFIG_SENSORS_EMC6W201=m -CONFIG_SENSORS_SMSC47M1=m -CONFIG_SENSORS_SMSC47M192=m -CONFIG_SENSORS_SMSC47B397=m -CONFIG_SENSORS_SCH56XX_COMMON=m -CONFIG_SENSORS_SCH5627=m -CONFIG_SENSORS_SCH5636=m -CONFIG_SENSORS_STTS751=m -# CONFIG_SENSORS_SMM665 is not set -# CONFIG_SENSORS_ADC128D818 is not set -# CONFIG_SENSORS_ADS7828 is not set -# CONFIG_SENSORS_ADS7871 is not set -# CONFIG_SENSORS_AMC6821 is not set -# CONFIG_SENSORS_INA209 is not set -# CONFIG_SENSORS_INA2XX is not set -# CONFIG_SENSORS_INA238 is not set -# CONFIG_SENSORS_INA3221 is not set -CONFIG_SENSORS_TC74=m -# CONFIG_SENSORS_THMC50 is not set -# CONFIG_SENSORS_TMP102 is not set -# CONFIG_SENSORS_TMP103 is not set -# CONFIG_SENSORS_TMP108 is not set -# CONFIG_SENSORS_TMP401 is not set -# CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_TMP464 is not set -# CONFIG_SENSORS_TMP513 is not set -# CONFIG_SENSORS_VIA686A is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set -# CONFIG_SENSORS_W83773G is not set -# CONFIG_SENSORS_W83781D is not set -# CONFIG_SENSORS_W83791D is not set -# CONFIG_SENSORS_W83792D is not set -# CONFIG_SENSORS_W83793 is not set -# CONFIG_SENSORS_W83795 is not set -# CONFIG_SENSORS_W83L785TS is not set -# CONFIG_SENSORS_W83L786NG is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set -CONFIG_THERMAL=y -# CONFIG_THERMAL_NETLINK is not set -# CONFIG_THERMAL_STATISTICS is not set -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_OF=y -# CONFIG_THERMAL_WRITABLE_TRIPS is not set -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_GOV_FAIR_SHARE is not set -CONFIG_THERMAL_GOV_STEP_WISE=y -# CONFIG_THERMAL_GOV_BANG_BANG is not set -# CONFIG_THERMAL_GOV_USER_SPACE is not set -CONFIG_CPU_THERMAL=y -CONFIG_CPU_FREQ_THERMAL=y -# CONFIG_CPU_HOTPLUG_THERMAL is not set -# CONFIG_THERMAL_EMULATION is not set -# CONFIG_THERMAL_MMIO is not set -CONFIG_HISI_THERMAL=y -# CONFIG_IMX_THERMAL is not set -# CONFIG_IMX8MM_THERMAL is not set -# CONFIG_K3_THERMAL is not set -# CONFIG_QORIQ_THERMAL is not set -# CONFIG_SPEAR_THERMAL is not set -# CONFIG_SUN8I_THERMAL is not set -# CONFIG_ROCKCHIP_THERMAL is not set -# CONFIG_RCAR_THERMAL is not set -# CONFIG_RCAR_GEN3_THERMAL is not set -# CONFIG_RZG2L_THERMAL is not set -# CONFIG_KIRKWOOD_THERMAL is not set -# CONFIG_DOVE_THERMAL is not set -# CONFIG_ARMADA_THERMAL is not set -# CONFIG_DA9062_THERMAL is not set -CONFIG_MTK_THERMAL=y -CONFIG_K1X_THERMAL=y - -# -# Intel thermal drivers -# - -# -# ACPI INT340X thermal drivers -# -# end of ACPI INT340X thermal drivers -# end of Intel thermal drivers - -# -# Broadcom thermal drivers -# -# CONFIG_BCM2711_THERMAL is not set -# CONFIG_BCM2835_THERMAL is not set -# CONFIG_BRCMSTB_THERMAL is not set -# CONFIG_BCM_NS_THERMAL is not set -# CONFIG_BCM_SR_THERMAL is not set -# end of Broadcom thermal drivers - -# -# Texas Instruments thermal drivers -# -# CONFIG_TI_SOC_THERMAL is not set -# end of Texas Instruments thermal drivers - -# -# Samsung thermal drivers -# -# CONFIG_EXYNOS_THERMAL is not set -# end of Samsung thermal drivers - -# -# NVIDIA Tegra thermal drivers -# -# CONFIG_TEGRA_SOCTHERM is not set -# CONFIG_TEGRA_BPMP_THERMAL is not set -# CONFIG_TEGRA30_TSENSOR is not set -# end of NVIDIA Tegra thermal drivers - -# -# Qualcomm thermal drivers -# -# end of Qualcomm thermal drivers - -# CONFIG_UNIPHIER_THERMAL is not set -# CONFIG_SPRD_THERMAL is not set -CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_CORE=y -# CONFIG_WATCHDOG_NOWAYOUT is not set -CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y -CONFIG_WATCHDOG_OPEN_TIMEOUT=0 -# CONFIG_WATCHDOG_SYSFS is not set -# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set - -# -# Watchdog Pretimeout Governors -# -# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set - -# -# Watchdog Device Drivers -# -CONFIG_SOFT_WATCHDOG=m -# CONFIG_DA9052_WATCHDOG is not set -# CONFIG_DA9055_WATCHDOG is not set -# CONFIG_DA9063_WATCHDOG is not set -# CONFIG_DA9062_WATCHDOG is not set -# CONFIG_GPIO_WATCHDOG is not set -# CONFIG_MENF21BMC_WATCHDOG is not set -# CONFIG_XILINX_WATCHDOG is not set -# CONFIG_ZIIRAVE_WATCHDOG is not set -# CONFIG_SL28CPLD_WATCHDOG is not set -# CONFIG_ARMADA_37XX_WATCHDOG is not set -# CONFIG_ASM9260_WATCHDOG is not set -# CONFIG_AT91RM9200_WATCHDOG is not set -# CONFIG_AT91SAM9X_WATCHDOG is not set -# CONFIG_SAMA5D4_WATCHDOG is not set -# CONFIG_CADENCE_WATCHDOG is not set -# CONFIG_FTWDT010_WATCHDOG is not set -# CONFIG_S3C2410_WATCHDOG is not set -# CONFIG_DW_WATCHDOG is not set -# CONFIG_EP93XX_WATCHDOG is not set -# CONFIG_OMAP_WATCHDOG is not set -# CONFIG_PNX4008_WATCHDOG is not set -# CONFIG_DAVINCI_WATCHDOG is not set -# CONFIG_K3_RTI_WATCHDOG is not set -# CONFIG_RN5T618_WATCHDOG is not set -# CONFIG_SUNXI_WATCHDOG is not set -# CONFIG_NPCM7XX_WATCHDOG is not set -# CONFIG_STMP3XXX_RTC_WATCHDOG is not set -# CONFIG_TS4800_WATCHDOG is not set -# CONFIG_TS72XX_WATCHDOG is not set -# CONFIG_MAX63XX_WATCHDOG is not set -# CONFIG_MAX77620_WATCHDOG is not set -# CONFIG_IMX2_WDT is not set -# CONFIG_IMX7ULP_WDT is not set -# CONFIG_MOXART_WDT is not set -# CONFIG_ST_LPC_WATCHDOG is not set -# CONFIG_TEGRA_WATCHDOG is not set -# CONFIG_QCOM_WDT is not set -# CONFIG_MESON_GXBB_WATCHDOG is not set -# CONFIG_MESON_WATCHDOG is not set -# CONFIG_MEDIATEK_WATCHDOG is not set -# CONFIG_DIGICOLOR_WATCHDOG is not set -# CONFIG_LPC18XX_WATCHDOG is not set -# CONFIG_RENESAS_WDT is not set -# CONFIG_RENESAS_RZAWDT is not set -# CONFIG_RENESAS_RZN1WDT is not set -# CONFIG_RENESAS_RZG2LWDT is not set -# CONFIG_ASPEED_WATCHDOG is not set -# CONFIG_UNIPHIER_WATCHDOG is not set -# CONFIG_RTD119X_WATCHDOG is not set -# CONFIG_REALTEK_OTTO_WDT is not set -# CONFIG_SPRD_WATCHDOG is not set -# CONFIG_VISCONTI_WATCHDOG is not set -# CONFIG_MSC313E_WATCHDOG is not set -# CONFIG_APPLE_WATCHDOG is not set -# CONFIG_SUNPLUS_WATCHDOG is not set -# CONFIG_ALIM7101_WDT is not set -# CONFIG_SC520_WDT is not set -# CONFIG_I6300ESB_WDT is not set -# CONFIG_RDC321X_WDT is not set -# CONFIG_BCM47XX_WDT is not set -# CONFIG_BCM2835_WDT is not set -# CONFIG_BCM_KONA_WDT is not set -# CONFIG_BCM7038_WDT is not set -# CONFIG_IMGPDC_WDT is not set -# CONFIG_MPC5200_WDT is not set -# CONFIG_MEN_A21_WDT is not set -# CONFIG_UML_WATCHDOG is not set - -# -# PCI-based Watchdog Cards -# -# CONFIG_PCIPCWATCHDOG is not set -# CONFIG_WDTPCI is not set - -# -# USB-based Watchdog Cards -# -# CONFIG_USBPCWATCHDOG is not set -CONFIG_SPACEMIT_WATCHDOG=y -# CONFIG_K1X_WDT_TEST is not set -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y -# CONFIG_BCMA is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=y -# CONFIG_MFD_ACT8945A is not set -# CONFIG_MFD_SUN4I_GPADC is not set -# CONFIG_MFD_AS3711 is not set -# CONFIG_MFD_AS3722 is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set -# CONFIG_MFD_AT91_USART is not set -# CONFIG_MFD_ATMEL_FLEXCOM is not set -# CONFIG_MFD_ATMEL_HLCDC is not set -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_BD9571MWV is not set -# CONFIG_MFD_AXP20X_I2C is not set -# CONFIG_MFD_MADERA is not set -# CONFIG_MFD_ASIC3 is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_MFD_DA9052_SPI is not set -# CONFIG_MFD_DA9052_I2C is not set -# CONFIG_MFD_DA9055 is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_ENE_KB3930 is not set -# CONFIG_MFD_EXYNOS_LPASS is not set -# CONFIG_MFD_GATEWORKS_GSC is not set -# CONFIG_MFD_MC13XXX_SPI is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_MFD_MP2629 is not set -# CONFIG_MFD_MXS_LRADC is not set -# CONFIG_MFD_MX25_TSADC is not set -# CONFIG_MFD_HI6421_PMIC is not set -# CONFIG_MFD_HI655X_PMIC is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_HTC_I2CPLD is not set -# CONFIG_LPC_ICH is not set -# CONFIG_LPC_SCH is not set -# CONFIG_MFD_IQS62X is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77620 is not set -# CONFIG_MFD_MAX77650 is not set -# CONFIG_MFD_MAX77686 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX77714 is not set -# CONFIG_MFD_MAX77843 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_MT6360 is not set -# CONFIG_MFD_MT6370 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_MFD_OCELOT is not set -# CONFIG_EZX_PCAP is not set -# CONFIG_MFD_CPCAP is not set -# CONFIG_MFD_VIPERBOARD is not set -# CONFIG_MFD_NTXEC is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_PM8XXX is not set -# CONFIG_MFD_SY7636A is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RT4831 is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_RT5120 is not set -# CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_RK808 is not set -# CONFIG_MFD_RN5T618 is not set -# CONFIG_MFD_SEC_CORE is not set -# CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SL28CPLD is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_MFD_SKY81452 is not set -# CONFIG_MFD_SC27XX_PMIC is not set -# CONFIG_ABX500_CORE is not set -# CONFIG_MFD_STMPE is not set -# CONFIG_MFD_SUN6I_PRCM is not set -CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TI_AM335X_TSCADC is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_LP8788 is not set -# CONFIG_MFD_TI_LMU is not set -# CONFIG_MFD_PALMAS is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65086 is not set -# CONFIG_MFD_TPS65090 is not set -# CONFIG_MFD_TPS65217 is not set -# CONFIG_MFD_TI_LP873X is not set -# CONFIG_MFD_TI_LP87565 is not set -# CONFIG_MFD_TPS65218 is not set -# CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_TPS65912_SPI is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_TWL6040_CORE is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TIMBERDALE is not set -# CONFIG_MFD_TC3589X is not set -# CONFIG_MFD_TQMX86 is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_LOCHNAGAR is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_ARIZONA_SPI is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM831X_SPI is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_STW481X is not set -# CONFIG_MFD_ROHM_BD718XX is not set -# CONFIG_MFD_ROHM_BD71828 is not set -# CONFIG_MFD_ROHM_BD957XMUF is not set -# CONFIG_MFD_STM32_LPTIMER is not set -# CONFIG_MFD_STM32_TIMERS is not set -# CONFIG_MFD_STPMIC1 is not set -# CONFIG_MFD_STMFX is not set -# CONFIG_MFD_ATC260X_I2C is not set -# CONFIG_MFD_KHADAS_MCU is not set -# CONFIG_MFD_ACER_A500_EC is not set -# CONFIG_MFD_QCOM_PM8008 is not set -CONFIG_MFD_SPACEMIT_PMIC=y -# CONFIG_RAVE_SP_CORE is not set -# CONFIG_MFD_INTEL_M10_BMC is not set -# CONFIG_MFD_RSMU_I2C is not set -# CONFIG_MFD_RSMU_SPI is not set -# end of Multifunction device drivers - -CONFIG_REGULATOR=y -# CONFIG_REGULATOR_DEBUG is not set -CONFIG_REGULATOR_FIXED_VOLTAGE=y -# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set -CONFIG_REGULATOR_USERSPACE_CONSUMER=m -# CONFIG_REGULATOR_88PG86X is not set -# CONFIG_REGULATOR_ACT8865 is not set -# CONFIG_REGULATOR_AD5398 is not set -# CONFIG_REGULATOR_ANATOP is not set -# CONFIG_REGULATOR_ARM_SCMI is not set -# CONFIG_REGULATOR_DA9121 is not set -# CONFIG_REGULATOR_DA9210 is not set -# CONFIG_REGULATOR_DA9211 is not set -CONFIG_REGULATOR_FAN53555=m -CONFIG_REGULATOR_FAN53880=m -CONFIG_REGULATOR_GPIO=m -# CONFIG_REGULATOR_ISL9305 is not set -# CONFIG_REGULATOR_ISL6271A is not set -# CONFIG_REGULATOR_LP3971 is not set -# CONFIG_REGULATOR_LP3972 is not set -# CONFIG_REGULATOR_LP872X is not set -# CONFIG_REGULATOR_LP8755 is not set -# CONFIG_REGULATOR_LTC3589 is not set -# CONFIG_REGULATOR_LTC3676 is not set -# CONFIG_REGULATOR_MAX1586 is not set -# CONFIG_REGULATOR_MAX77620 is not set -# CONFIG_REGULATOR_MAX77650 is not set -# CONFIG_REGULATOR_MAX8649 is not set -# CONFIG_REGULATOR_MAX8660 is not set -# CONFIG_REGULATOR_MAX8893 is not set -# CONFIG_REGULATOR_MAX8907 is not set -# CONFIG_REGULATOR_MAX8952 is not set -# CONFIG_REGULATOR_MAX8973 is not set -# CONFIG_REGULATOR_MAX20086 is not set -# CONFIG_REGULATOR_MAX77686 is not set -# CONFIG_REGULATOR_MAX77693 is not set -# CONFIG_REGULATOR_MAX77802 is not set -# CONFIG_REGULATOR_MAX77826 is not set -# CONFIG_REGULATOR_MCP16502 is not set -# CONFIG_REGULATOR_MP5416 is not set -# CONFIG_REGULATOR_MP8859 is not set -# CONFIG_REGULATOR_MP886X is not set -# CONFIG_REGULATOR_MPQ7920 is not set -# CONFIG_REGULATOR_MT6311 is not set -# CONFIG_REGULATOR_PBIAS is not set -# CONFIG_REGULATOR_PCA9450 is not set -# CONFIG_REGULATOR_PF8X00 is not set -# CONFIG_REGULATOR_PFUZE100 is not set -# CONFIG_REGULATOR_PV88060 is not set -# CONFIG_REGULATOR_PV88080 is not set -# CONFIG_REGULATOR_PV88090 is not set -CONFIG_REGULATOR_PWM=m -# CONFIG_REGULATOR_QCOM_RPMH is not set -# CONFIG_REGULATOR_QCOM_SPMI is not set -# CONFIG_REGULATOR_QCOM_USB_VBUS is not set -# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set -# CONFIG_REGULATOR_RT4801 is not set -# CONFIG_REGULATOR_RT5190A is not set -# CONFIG_REGULATOR_RT5759 is not set -# CONFIG_REGULATOR_RT6160 is not set -# CONFIG_REGULATOR_RT6245 is not set -# CONFIG_REGULATOR_RTQ2134 is not set -# CONFIG_REGULATOR_RTMV20 is not set -# CONFIG_REGULATOR_RTQ6752 is not set -# CONFIG_REGULATOR_S2MPA01 is not set -# CONFIG_REGULATOR_S2MPS11 is not set -# CONFIG_REGULATOR_S5M8767 is not set -# CONFIG_REGULATOR_SC2731 is not set -# CONFIG_REGULATOR_SLG51000 is not set -# CONFIG_REGULATOR_STM32_BOOSTER is not set -# CONFIG_REGULATOR_STM32_VREFBUF is not set -# CONFIG_REGULATOR_STM32_PWR is not set -# CONFIG_REGULATOR_TI_ABB is not set -# CONFIG_REGULATOR_STW481X_VMMC is not set -# CONFIG_REGULATOR_SY8106A is not set -# CONFIG_REGULATOR_SY8824X is not set -# CONFIG_REGULATOR_SY8827N is not set -# CONFIG_REGULATOR_TPS51632 is not set -# CONFIG_REGULATOR_TPS62360 is not set -# CONFIG_REGULATOR_TPS6286X is not set -# CONFIG_REGULATOR_TPS65023 is not set -# CONFIG_REGULATOR_TPS6507X is not set -# CONFIG_REGULATOR_TPS65132 is not set -# CONFIG_REGULATOR_TPS6524X is not set -# CONFIG_REGULATOR_TPS68470 is not set -# CONFIG_REGULATOR_UNIPHIER is not set -CONFIG_REGULATOR_VCTRL=m -# CONFIG_REGULATOR_QCOM_LABIBB is not set -CONFIG_REGULATOR_SPACEMIT=y -# CONFIG_RC_CORE is not set - -# -# CEC support -# -# CONFIG_MEDIA_CEC_SUPPORT is not set -# end of CEC support - -CONFIG_MEDIA_SUPPORT=y -# CONFIG_MEDIA_SUPPORT_FILTER is not set -# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set - -# -# Media device types -# -CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_ANALOG_TV_SUPPORT=y -CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y -CONFIG_MEDIA_RADIO_SUPPORT=y -CONFIG_MEDIA_SDR_SUPPORT=y -CONFIG_MEDIA_PLATFORM_SUPPORT=y -CONFIG_MEDIA_TEST_SUPPORT=y -# end of Media device types - -# -# Media core support -# -CONFIG_VIDEO_DEV=y -CONFIG_MEDIA_CONTROLLER=y -CONFIG_DVB_CORE=y -# end of Media core support - -# -# Video4Linux options -# -CONFIG_VIDEO_V4L2_I2C=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y -# CONFIG_VIDEO_ADV_DEBUG is not set -# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set -# end of Video4Linux options - -# -# Media controller options -# -# CONFIG_MEDIA_CONTROLLER_DVB is not set -# end of Media controller options - -# -# Digital TV options -# -# CONFIG_DVB_MMAP is not set -# CONFIG_DVB_NET is not set -CONFIG_DVB_MAX_ADAPTERS=16 -# CONFIG_DVB_DYNAMIC_MINORS is not set -# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set -# CONFIG_DVB_ULE_DEBUG is not set -# end of Digital TV options - -# -# Media drivers -# - -# -# Media drivers -# -CONFIG_MEDIA_USB_SUPPORT=y - -# -# Webcam devices -# -# CONFIG_USB_GSPCA is not set -# CONFIG_USB_PWC is not set -# CONFIG_USB_S2255 is not set -# CONFIG_VIDEO_USBTV is not set -CONFIG_USB_VIDEO_CLASS=y -CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y - -# -# Analog TV USB devices -# -# CONFIG_VIDEO_GO7007 is not set -# CONFIG_VIDEO_HDPVR is not set -# CONFIG_VIDEO_PVRUSB2 is not set -# CONFIG_VIDEO_STK1160_COMMON is not set - -# -# Analog/digital TV USB devices -# -# CONFIG_VIDEO_AU0828 is not set -# CONFIG_VIDEO_CX231XX is not set - -# -# Digital TV USB devices -# -# CONFIG_DVB_AS102 is not set -# CONFIG_DVB_B2C2_FLEXCOP_USB is not set -# CONFIG_DVB_USB_V2 is not set -# CONFIG_SMS_USB_DRV is not set -# CONFIG_DVB_TTUSB_BUDGET is not set -# CONFIG_DVB_TTUSB_DEC is not set - -# -# Webcam, TV (analog/digital) USB devices -# -# CONFIG_VIDEO_EM28XX is not set - -# -# Software defined radio USB devices -# -# CONFIG_USB_AIRSPY is not set -# CONFIG_USB_HACKRF is not set -# CONFIG_USB_MSI2500 is not set -# CONFIG_MEDIA_PCI_SUPPORT is not set -# CONFIG_RADIO_ADAPTERS is not set -CONFIG_MEDIA_PLATFORM_DRIVERS=y -# CONFIG_V4L_PLATFORM_DRIVERS is not set -# CONFIG_SDR_PLATFORM_DRIVERS is not set -# CONFIG_DVB_PLATFORM_DRIVERS is not set -# CONFIG_V4L_MEM2MEM_DRIVERS is not set - -# -# Allegro DVT media platform drivers -# - -# -# Amlogic media platform drivers -# - -# -# Amphion drivers -# - -# -# Aspeed media platform drivers -# - -# -# Atmel media platform drivers -# - -# -# Cadence media platform drivers -# -# CONFIG_VIDEO_CADENCE_CSI2RX is not set -# CONFIG_VIDEO_CADENCE_CSI2TX is not set - -# -# Chips&Media media platform drivers -# - -# -# Intel media platform drivers -# - -# -# Marvell media platform drivers -# - -# -# Mediatek media platform drivers -# -# CONFIG_VIDEO_MEDIATEK_MDP3 is not set - -# -# NVidia media platform drivers -# - -# -# NXP media platform drivers -# -# CONFIG_VIDEO_IMX_MIPI_CSIS is not set - -# -# Qualcomm media platform drivers -# - -# -# Renesas media platform drivers -# - -# -# Rockchip media platform drivers -# - -# -# Samsung media platform drivers -# - -# -# STMicroelectronics media platform drivers -# - -# -# Sunxi media platform drivers -# - -# -# Texas Instruments drivers -# - -# -# Verisilicon media platform drivers -# - -# -# VIA media platform drivers -# - -# -# Xilinx media platform drivers -# - -# -# Spacemit media platform drivers -# -CONFIG_VIDEO_LINLON_K1X=y -# CONFIG_VIDEO_LINLON_FTRACE_K1X is not set -CONFIG_VIDEO_LINLON_PRINT_FILE_K1X=y - -# -# SPACEMIT K1X Camera And Video V2 -# -CONFIG_SPACEMIT_K1X_CAMERA_V2=y -CONFIG_SPACEMIT_K1X_CCIC_V2=y -CONFIG_SPACEMIT_K1X_VI_V2=y -CONFIG_SPACEMIT_K1X_VI_IOMMU=y -CONFIG_SPACEMIT_K1X_ISP_V2=y -CONFIG_SPACEMIT_K1X_CPP_V2=y -CONFIG_SPACEMIT_K1X_SENSOR_V2=y - -# -# MMC/SDIO DVB adapters -# -# CONFIG_SMS_SDIO_DRV is not set -# CONFIG_V4L_TEST_DRIVERS is not set -# CONFIG_DVB_TEST_DRIVERS is not set -CONFIG_VIDEOBUF2_CORE=y -CONFIG_VIDEOBUF2_V4L2=y -CONFIG_VIDEOBUF2_MEMOPS=y -CONFIG_VIDEOBUF2_DMA_CONTIG=y -CONFIG_VIDEOBUF2_VMALLOC=y -CONFIG_VIDEOBUF2_DMA_SG=y -# end of Media drivers - -# -# Media ancillary drivers -# -CONFIG_MEDIA_ATTACH=y - -# -# Camera sensor devices -# -# CONFIG_VIDEO_AR0521 is not set -# CONFIG_VIDEO_HI556 is not set -# CONFIG_VIDEO_HI846 is not set -# CONFIG_VIDEO_HI847 is not set -# CONFIG_VIDEO_IMX208 is not set -# CONFIG_VIDEO_IMX214 is not set -# CONFIG_VIDEO_IMX219 is not set -# CONFIG_VIDEO_IMX258 is not set -# CONFIG_VIDEO_IMX274 is not set -# CONFIG_VIDEO_IMX290 is not set -# CONFIG_VIDEO_IMX319 is not set -# CONFIG_VIDEO_IMX334 is not set -# CONFIG_VIDEO_IMX335 is not set -# CONFIG_VIDEO_IMX355 is not set -# CONFIG_VIDEO_IMX412 is not set -# CONFIG_VIDEO_MT9M001 is not set -# CONFIG_VIDEO_MT9M032 is not set -# CONFIG_VIDEO_MT9M111 is not set -# CONFIG_VIDEO_MT9P031 is not set -# CONFIG_VIDEO_MT9T001 is not set -# CONFIG_VIDEO_MT9T112 is not set -# CONFIG_VIDEO_MT9V011 is not set -# CONFIG_VIDEO_MT9V032 is not set -# CONFIG_VIDEO_MT9V111 is not set -# CONFIG_VIDEO_NOON010PC30 is not set -# CONFIG_VIDEO_OG01A1B is not set -# CONFIG_VIDEO_OV02A10 is not set -# CONFIG_VIDEO_OV08D10 is not set -# CONFIG_VIDEO_OV13858 is not set -# CONFIG_VIDEO_OV13B10 is not set -# CONFIG_VIDEO_OV2640 is not set -# CONFIG_VIDEO_OV2659 is not set -# CONFIG_VIDEO_OV2680 is not set -# CONFIG_VIDEO_OV2685 is not set -# CONFIG_VIDEO_OV2740 is not set -# CONFIG_VIDEO_OV5640 is not set -# CONFIG_VIDEO_OV5645 is not set -# CONFIG_VIDEO_OV5647 is not set -# CONFIG_VIDEO_OV5648 is not set -# CONFIG_VIDEO_OV5670 is not set -# CONFIG_VIDEO_OV5675 is not set -# CONFIG_VIDEO_OV5693 is not set -# CONFIG_VIDEO_OV5695 is not set -# CONFIG_VIDEO_OV6650 is not set -# CONFIG_VIDEO_OV7251 is not set -# CONFIG_VIDEO_OV7640 is not set -# CONFIG_VIDEO_OV7670 is not set -# CONFIG_VIDEO_OV772X is not set -# CONFIG_VIDEO_OV7740 is not set -# CONFIG_VIDEO_OV8856 is not set -# CONFIG_VIDEO_OV8865 is not set -# CONFIG_VIDEO_OV9282 is not set -# CONFIG_VIDEO_OV9640 is not set -# CONFIG_VIDEO_OV9650 is not set -# CONFIG_VIDEO_OV9734 is not set -# CONFIG_VIDEO_RDACM20 is not set -# CONFIG_VIDEO_RDACM21 is not set -# CONFIG_VIDEO_RJ54N1 is not set -# CONFIG_VIDEO_S5C73M3 is not set -# CONFIG_VIDEO_S5K4ECGX is not set -# CONFIG_VIDEO_S5K5BAF is not set -# CONFIG_VIDEO_S5K6A3 is not set -# CONFIG_VIDEO_S5K6AA is not set -# CONFIG_VIDEO_SR030PC30 is not set -# CONFIG_VIDEO_VS6624 is not set -# CONFIG_VIDEO_CCS is not set -# CONFIG_VIDEO_ET8EK8 is not set -# CONFIG_VIDEO_M5MOLS is not set -# end of Camera sensor devices - -# -# Lens drivers -# -# CONFIG_VIDEO_AD5820 is not set -# CONFIG_VIDEO_AK7375 is not set -# CONFIG_VIDEO_DW9714 is not set -# CONFIG_VIDEO_DW9768 is not set -# CONFIG_VIDEO_DW9807_VCM is not set -# end of Lens drivers - -# -# Flash devices -# -# CONFIG_VIDEO_ADP1653 is not set -# CONFIG_VIDEO_LM3560 is not set -# CONFIG_VIDEO_LM3646 is not set -# end of Flash devices - -# -# Audio decoders, processors and mixers -# -# CONFIG_VIDEO_CS3308 is not set -# CONFIG_VIDEO_CS5345 is not set -# CONFIG_VIDEO_CS53L32A is not set -# CONFIG_VIDEO_MSP3400 is not set -# CONFIG_VIDEO_SONY_BTF_MPX is not set -# CONFIG_VIDEO_TDA1997X is not set -# CONFIG_VIDEO_TDA7432 is not set -# CONFIG_VIDEO_TDA9840 is not set -# CONFIG_VIDEO_TEA6415C is not set -# CONFIG_VIDEO_TEA6420 is not set -# CONFIG_VIDEO_TLV320AIC23B is not set -# CONFIG_VIDEO_TVAUDIO is not set -# CONFIG_VIDEO_UDA1342 is not set -# CONFIG_VIDEO_VP27SMPX is not set -# CONFIG_VIDEO_WM8739 is not set -# CONFIG_VIDEO_WM8775 is not set -# end of Audio decoders, processors and mixers - -# -# RDS decoders -# -# CONFIG_VIDEO_SAA6588 is not set -# end of RDS decoders - -# -# Video decoders -# -# CONFIG_VIDEO_ADV7180 is not set -# CONFIG_VIDEO_ADV7183 is not set -# CONFIG_VIDEO_ADV748X is not set -# CONFIG_VIDEO_ADV7604 is not set -# CONFIG_VIDEO_ADV7842 is not set -# CONFIG_VIDEO_BT819 is not set -# CONFIG_VIDEO_BT856 is not set -# CONFIG_VIDEO_BT866 is not set -# CONFIG_VIDEO_ISL7998X is not set -# CONFIG_VIDEO_KS0127 is not set -# CONFIG_VIDEO_MAX9286 is not set -# CONFIG_VIDEO_ML86V7667 is not set -# CONFIG_VIDEO_SAA7110 is not set -# CONFIG_VIDEO_SAA711X is not set -# CONFIG_VIDEO_TC358743 is not set -# CONFIG_VIDEO_TVP514X is not set -# CONFIG_VIDEO_TVP5150 is not set -# CONFIG_VIDEO_TVP7002 is not set -# CONFIG_VIDEO_TW2804 is not set -# CONFIG_VIDEO_TW9903 is not set -# CONFIG_VIDEO_TW9906 is not set -# CONFIG_VIDEO_TW9910 is not set -# CONFIG_VIDEO_VPX3220 is not set - -# -# Video and audio decoders -# -# CONFIG_VIDEO_SAA717X is not set -# CONFIG_VIDEO_CX25840 is not set -# end of Video decoders - -# -# Video encoders -# -# CONFIG_VIDEO_AD9389B is not set -# CONFIG_VIDEO_ADV7170 is not set -# CONFIG_VIDEO_ADV7175 is not set -# CONFIG_VIDEO_ADV7343 is not set -# CONFIG_VIDEO_ADV7393 is not set -# CONFIG_VIDEO_ADV7511 is not set -# CONFIG_VIDEO_AK881X is not set -# CONFIG_VIDEO_SAA7127 is not set -# CONFIG_VIDEO_SAA7185 is not set -# CONFIG_VIDEO_THS8200 is not set -# end of Video encoders - -# -# Video improvement chips -# -# CONFIG_VIDEO_UPD64031A is not set -# CONFIG_VIDEO_UPD64083 is not set -# end of Video improvement chips - -# -# Audio/Video compression chips -# -# CONFIG_VIDEO_SAA6752HS is not set -# end of Audio/Video compression chips - -# -# SDR tuner chips -# -# CONFIG_SDR_MAX2175 is not set -# end of SDR tuner chips - -# -# Miscellaneous helper chips -# -# CONFIG_VIDEO_I2C is not set -# CONFIG_VIDEO_M52790 is not set -# CONFIG_VIDEO_ST_MIPID02 is not set -# CONFIG_VIDEO_THS7303 is not set -# end of Miscellaneous helper chips - -# -# Media SPI Adapters -# -# CONFIG_CXD2880_SPI_DRV is not set -# CONFIG_VIDEO_GS1662 is not set -# end of Media SPI Adapters - -CONFIG_MEDIA_TUNER=y - -# -# Customize TV tuners -# -# CONFIG_MEDIA_TUNER_E4000 is not set -# CONFIG_MEDIA_TUNER_FC0011 is not set -# CONFIG_MEDIA_TUNER_FC0012 is not set -# CONFIG_MEDIA_TUNER_FC0013 is not set -# CONFIG_MEDIA_TUNER_FC2580 is not set -# CONFIG_MEDIA_TUNER_IT913X is not set -# CONFIG_MEDIA_TUNER_M88RS6000T is not set -# CONFIG_MEDIA_TUNER_MAX2165 is not set -# CONFIG_MEDIA_TUNER_MC44S803 is not set -# CONFIG_MEDIA_TUNER_MSI001 is not set -# CONFIG_MEDIA_TUNER_MT2060 is not set -# CONFIG_MEDIA_TUNER_MT2063 is not set -# CONFIG_MEDIA_TUNER_MT20XX is not set -# CONFIG_MEDIA_TUNER_MT2131 is not set -# CONFIG_MEDIA_TUNER_MT2266 is not set -# CONFIG_MEDIA_TUNER_MXL301RF is not set -# CONFIG_MEDIA_TUNER_MXL5005S is not set -# CONFIG_MEDIA_TUNER_MXL5007T is not set -# CONFIG_MEDIA_TUNER_QM1D1B0004 is not set -# CONFIG_MEDIA_TUNER_QM1D1C0042 is not set -# CONFIG_MEDIA_TUNER_QT1010 is not set -# CONFIG_MEDIA_TUNER_R820T is not set -# CONFIG_MEDIA_TUNER_SI2157 is not set -# CONFIG_MEDIA_TUNER_SIMPLE is not set -# CONFIG_MEDIA_TUNER_TDA18212 is not set -# CONFIG_MEDIA_TUNER_TDA18218 is not set -# CONFIG_MEDIA_TUNER_TDA18250 is not set -# CONFIG_MEDIA_TUNER_TDA18271 is not set -# CONFIG_MEDIA_TUNER_TDA827X is not set -# CONFIG_MEDIA_TUNER_TDA8290 is not set -# CONFIG_MEDIA_TUNER_TDA9887 is not set -# CONFIG_MEDIA_TUNER_TEA5761 is not set -# CONFIG_MEDIA_TUNER_TEA5767 is not set -# CONFIG_MEDIA_TUNER_TUA9001 is not set -# CONFIG_MEDIA_TUNER_XC2028 is not set -# CONFIG_MEDIA_TUNER_XC4000 is not set -# CONFIG_MEDIA_TUNER_XC5000 is not set -# end of Customize TV tuners - -# -# Customise DVB Frontends -# - -# -# Multistandard (satellite) frontends -# -CONFIG_DVB_M88DS3103=m -# CONFIG_DVB_MXL5XX is not set -# CONFIG_DVB_STB0899 is not set -# CONFIG_DVB_STB6100 is not set -# CONFIG_DVB_STV090x is not set -# CONFIG_DVB_STV0910 is not set -# CONFIG_DVB_STV6110x is not set -# CONFIG_DVB_STV6111 is not set - -# -# Multistandard (cable + terrestrial) frontends -# -# CONFIG_DVB_DRXK is not set -# CONFIG_DVB_MN88472 is not set -# CONFIG_DVB_MN88473 is not set -# CONFIG_DVB_SI2165 is not set -# CONFIG_DVB_TDA18271C2DD is not set - -# -# DVB-S (satellite) frontends -# -# CONFIG_DVB_CX24110 is not set -# CONFIG_DVB_CX24116 is not set -# CONFIG_DVB_CX24117 is not set -# CONFIG_DVB_CX24120 is not set -# CONFIG_DVB_CX24123 is not set -# CONFIG_DVB_DS3000 is not set -# CONFIG_DVB_MB86A16 is not set -# CONFIG_DVB_MT312 is not set -# CONFIG_DVB_S5H1420 is not set -# CONFIG_DVB_SI21XX is not set -# CONFIG_DVB_STB6000 is not set -# CONFIG_DVB_STV0288 is not set -# CONFIG_DVB_STV0299 is not set -# CONFIG_DVB_STV0900 is not set -# CONFIG_DVB_STV6110 is not set -# CONFIG_DVB_TDA10071 is not set -# CONFIG_DVB_TDA10086 is not set -# CONFIG_DVB_TDA8083 is not set -# CONFIG_DVB_TDA8261 is not set -# CONFIG_DVB_TDA826X is not set -# CONFIG_DVB_TS2020 is not set -# CONFIG_DVB_TUA6100 is not set -# CONFIG_DVB_TUNER_CX24113 is not set -# CONFIG_DVB_TUNER_ITD1000 is not set -# CONFIG_DVB_VES1X93 is not set -# CONFIG_DVB_ZL10036 is not set -# CONFIG_DVB_ZL10039 is not set - -# -# DVB-T (terrestrial) frontends -# -CONFIG_DVB_AF9013=m -# CONFIG_DVB_CX22700 is not set -# CONFIG_DVB_CX22702 is not set -# CONFIG_DVB_CXD2820R is not set -# CONFIG_DVB_CXD2841ER is not set -# CONFIG_DVB_DIB3000MB is not set -# CONFIG_DVB_DIB3000MC is not set -# CONFIG_DVB_DIB7000M is not set -# CONFIG_DVB_DIB7000P is not set -# CONFIG_DVB_DIB9000 is not set -# CONFIG_DVB_DRXD is not set -# CONFIG_DVB_EC100 is not set -# CONFIG_DVB_L64781 is not set -# CONFIG_DVB_MT352 is not set -# CONFIG_DVB_NXT6000 is not set -CONFIG_DVB_RTL2830=m -CONFIG_DVB_RTL2832=m -CONFIG_DVB_RTL2832_SDR=m -# CONFIG_DVB_S5H1432 is not set -CONFIG_DVB_SI2168=m -# CONFIG_DVB_SP887X is not set -# CONFIG_DVB_STV0367 is not set -# CONFIG_DVB_TDA10048 is not set -# CONFIG_DVB_TDA1004X is not set -# CONFIG_DVB_ZD1301_DEMOD is not set -# CONFIG_DVB_ZL10353 is not set -# CONFIG_DVB_CXD2880 is not set - -# -# DVB-C (cable) frontends -# -# CONFIG_DVB_STV0297 is not set -# CONFIG_DVB_TDA10021 is not set -# CONFIG_DVB_TDA10023 is not set -# CONFIG_DVB_VES1820 is not set - -# -# ATSC (North American/Korean Terrestrial/Cable DTV) frontends -# -# CONFIG_DVB_AU8522_DTV is not set -# CONFIG_DVB_AU8522_V4L is not set -# CONFIG_DVB_BCM3510 is not set -# CONFIG_DVB_LG2160 is not set -# CONFIG_DVB_LGDT3305 is not set -CONFIG_DVB_LGDT3306A=m -# CONFIG_DVB_LGDT330X is not set -# CONFIG_DVB_MXL692 is not set -# CONFIG_DVB_NXT200X is not set -# CONFIG_DVB_OR51132 is not set -# CONFIG_DVB_OR51211 is not set -# CONFIG_DVB_S5H1409 is not set -# CONFIG_DVB_S5H1411 is not set - -# -# ISDB-T (terrestrial) frontends -# -# CONFIG_DVB_DIB8000 is not set -# CONFIG_DVB_MB86A20S is not set -# CONFIG_DVB_S921 is not set - -# -# ISDB-S (satellite) & ISDB-T (terrestrial) frontends -# -# CONFIG_DVB_MN88443X is not set -# CONFIG_DVB_TC90522 is not set - -# -# Digital terrestrial only tuners/PLL -# -# CONFIG_DVB_PLL is not set -# CONFIG_DVB_TUNER_DIB0070 is not set -# CONFIG_DVB_TUNER_DIB0090 is not set - -# -# SEC control devices for DVB-S -# -# CONFIG_DVB_A8293 is not set -# CONFIG_DVB_AF9033 is not set -# CONFIG_DVB_ASCOT2E is not set -# CONFIG_DVB_ATBM8830 is not set -# CONFIG_DVB_HELENE is not set -# CONFIG_DVB_HORUS3A is not set -# CONFIG_DVB_ISL6405 is not set -# CONFIG_DVB_ISL6421 is not set -# CONFIG_DVB_ISL6423 is not set -# CONFIG_DVB_IX2505V is not set -# CONFIG_DVB_LGS8GL5 is not set -# CONFIG_DVB_LGS8GXX is not set -# CONFIG_DVB_LNBH25 is not set -# CONFIG_DVB_LNBH29 is not set -# CONFIG_DVB_LNBP21 is not set -# CONFIG_DVB_LNBP22 is not set -# CONFIG_DVB_M88RS2000 is not set -# CONFIG_DVB_TDA665x is not set -# CONFIG_DVB_DRX39XYJ is not set - -# -# Common Interface (EN50221) controller drivers -# -# CONFIG_DVB_CXD2099 is not set -# CONFIG_DVB_SP2 is not set -# end of Customise DVB Frontends - -# -# Tools to develop new frontends -# -# CONFIG_DVB_DUMMY_FE is not set -# end of Media ancillary drivers - -# -# Graphics support -# -# CONFIG_IMX_IPUV3_CORE is not set -CONFIG_DRM=y -CONFIG_DRM_MIPI_DSI=y -# CONFIG_DRM_DEBUG_MM is not set -CONFIG_DRM_KMS_HELPER=y -# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set -# CONFIG_DRM_DEBUG_MODESET_LOCK is not set -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set -# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set -CONFIG_DRM_DP_AUX_BUS=y - -# -# I2C encoder or helper chips -# -# CONFIG_DRM_I2C_CH7006 is not set -# CONFIG_DRM_I2C_SIL164 is not set -# CONFIG_DRM_I2C_NXP_TDA998X is not set -# CONFIG_DRM_I2C_NXP_TDA9950 is not set -# end of I2C encoder or helper chips - -# -# ARM devices -# -# CONFIG_DRM_HDLCD is not set -# CONFIG_DRM_MALI_DISPLAY is not set -# CONFIG_DRM_KOMEDA is not set -# end of ARM devices - -# CONFIG_DRM_RADEON is not set -# CONFIG_DRM_AMDGPU is not set -# CONFIG_DRM_NOUVEAU is not set -# CONFIG_DRM_KMB_DISPLAY is not set -# CONFIG_DRM_VGEM is not set -# CONFIG_DRM_VKMS is not set -# CONFIG_DRM_EXYNOS is not set -# CONFIG_DRM_UDL is not set -# CONFIG_DRM_AST is not set -# CONFIG_DRM_MGAG200 is not set -# CONFIG_DRM_RCAR_DW_HDMI is not set -# CONFIG_DRM_RCAR_USE_LVDS is not set -# CONFIG_DRM_RCAR_USE_MIPI_DSI is not set -# CONFIG_DRM_SUN4I is not set -# CONFIG_DRM_QXL is not set -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set -# CONFIG_DRM_PANEL_ARM_VERSATILE is not set -# CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596 is not set -# CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0 is not set -# CONFIG_DRM_PANEL_BOE_HIMAX8279D is not set -# CONFIG_DRM_PANEL_BOE_TV101WUM_NL6 is not set -# CONFIG_DRM_PANEL_DSI_CM is not set -# CONFIG_DRM_PANEL_LVDS is not set -# CONFIG_DRM_PANEL_SIMPLE is not set -# CONFIG_DRM_PANEL_EDP is not set -# CONFIG_DRM_PANEL_EBBG_FT8719 is not set -# CONFIG_DRM_PANEL_ELIDA_KD35T133 is not set -# CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02 is not set -# CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D is not set -# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set -# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set -# CONFIG_DRM_PANEL_ILITEK_ILI9881C is not set -# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set -# CONFIG_DRM_PANEL_INNOLUX_P079ZCA is not set -# CONFIG_DRM_PANEL_JDI_LT070ME05000 is not set -# CONFIG_DRM_PANEL_JDI_R63452 is not set -# CONFIG_DRM_PANEL_KHADAS_TS050 is not set -# CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04 is not set -# CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W is not set -# CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829 is not set -# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set -# CONFIG_DRM_PANEL_LG_LB035Q02 is not set -# CONFIG_DRM_PANEL_LG_LG4573 is not set -# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set -# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set -# CONFIG_DRM_PANEL_NOVATEK_NT35510 is not set -# CONFIG_DRM_PANEL_NOVATEK_NT35560 is not set -# CONFIG_DRM_PANEL_NOVATEK_NT35950 is not set -# CONFIG_DRM_PANEL_NOVATEK_NT36672A is not set -# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set -# CONFIG_DRM_PANEL_MANTIX_MLAF057WE51 is not set -# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set -# CONFIG_DRM_PANEL_ORISETECH_OTM8009A is not set -# CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS is not set -# CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00 is not set -# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set -# CONFIG_DRM_PANEL_RAYDIUM_RM67191 is not set -# CONFIG_DRM_PANEL_RAYDIUM_RM68200 is not set -# CONFIG_DRM_PANEL_RONBO_RB070D30 is not set -# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set -# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6D16D0 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set -# CONFIG_DRM_PANEL_SAMSUNG_SOFEF00 is not set -# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set -# CONFIG_DRM_PANEL_SHARP_LQ101R1SX01 is not set -# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set -# CONFIG_DRM_PANEL_SHARP_LS043T1LE01 is not set -# CONFIG_DRM_PANEL_SHARP_LS060T1SX01 is not set -# CONFIG_DRM_PANEL_SITRONIX_ST7701 is not set -# CONFIG_DRM_PANEL_SITRONIX_ST7703 is not set -# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set -# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set -# CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521 is not set -# CONFIG_DRM_PANEL_TDO_TL070WSH30 is not set -# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set -# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set -# CONFIG_DRM_PANEL_TPO_TPG110 is not set -# CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA is not set -# CONFIG_DRM_PANEL_VISIONOX_RM69299 is not set -# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set -# CONFIG_DRM_PANEL_XINPENG_XPP055C272 is not set -# end of Display Panels - -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -# CONFIG_DRM_CDNS_DSI is not set -# CONFIG_DRM_CHIPONE_ICN6211 is not set -# CONFIG_DRM_CHRONTEL_CH7033 is not set -# CONFIG_DRM_CROS_EC_ANX7688 is not set -# CONFIG_DRM_DISPLAY_CONNECTOR is not set -# CONFIG_DRM_FSL_LDB is not set -# CONFIG_DRM_ITE_IT6505 is not set -# CONFIG_DRM_LONTIUM_LT8912B is not set -# CONFIG_DRM_LONTIUM_LT9211 is not set -# CONFIG_DRM_LONTIUM_LT9611 is not set -# CONFIG_DRM_LONTIUM_LT9611UXC is not set -# CONFIG_DRM_ITE_IT66121 is not set -# CONFIG_DRM_LVDS_CODEC is not set -# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set -# CONFIG_DRM_NWL_MIPI_DSI is not set -# CONFIG_DRM_NXP_PTN3460 is not set -# CONFIG_DRM_PARADE_PS8622 is not set -# CONFIG_DRM_PARADE_PS8640 is not set -# CONFIG_DRM_SIL_SII8620 is not set -# CONFIG_DRM_SII902X is not set -# CONFIG_DRM_SII9234 is not set -# CONFIG_DRM_SIMPLE_BRIDGE is not set -# CONFIG_DRM_THINE_THC63LVD1024 is not set -# CONFIG_DRM_TOSHIBA_TC358762 is not set -# CONFIG_DRM_TOSHIBA_TC358764 is not set -# CONFIG_DRM_TOSHIBA_TC358767 is not set -# CONFIG_DRM_TOSHIBA_TC358768 is not set -# CONFIG_DRM_TOSHIBA_TC358775 is not set -# CONFIG_DRM_TI_DLPC3433 is not set -# CONFIG_DRM_TI_TFP410 is not set -# CONFIG_DRM_TI_SN65DSI83 is not set -# CONFIG_DRM_TI_SN65DSI86 is not set -# CONFIG_DRM_TI_TPD12S015 is not set -# CONFIG_DRM_ANALOGIX_ANX6345 is not set -# CONFIG_DRM_ANALOGIX_ANX78XX is not set -# CONFIG_DRM_ANALOGIX_ANX7625 is not set -# CONFIG_DRM_I2C_ADV7511 is not set -# CONFIG_DRM_CDNS_MHDP8546 is not set -# CONFIG_DRM_IMX8QM_LDB is not set -# CONFIG_DRM_IMX8QXP_LDB is not set -# CONFIG_DRM_IMX8QXP_PIXEL_COMBINER is not set -# CONFIG_DRM_IMX8QXP_PIXEL_LINK_TO_DPI is not set -# end of Display Interface Bridges - -# CONFIG_DRM_INGENIC is not set -# CONFIG_DRM_V3D is not set -# CONFIG_DRM_VC4 is not set -# CONFIG_DRM_ETNAVIV is not set -# CONFIG_DRM_HISI_HIBMC is not set -# CONFIG_DRM_LOGICVC is not set -# CONFIG_DRM_MXSFB is not set -# CONFIG_DRM_IMX_LCDIF is not set -# CONFIG_DRM_ARCPGU is not set -# CONFIG_DRM_BOCHS is not set -# CONFIG_DRM_CIRRUS_QEMU is not set -# CONFIG_DRM_GM12U320 is not set -# CONFIG_DRM_PANEL_MIPI_DBI is not set -# CONFIG_DRM_SIMPLEDRM is not set -# CONFIG_TINYDRM_HX8357D is not set -# CONFIG_TINYDRM_ILI9163 is not set -# CONFIG_TINYDRM_ILI9225 is not set -# CONFIG_TINYDRM_ILI9341 is not set -# CONFIG_TINYDRM_ILI9486 is not set -# CONFIG_TINYDRM_MI0283QT is not set -# CONFIG_TINYDRM_REPAPER is not set -# CONFIG_TINYDRM_ST7586 is not set -# CONFIG_TINYDRM_ST7735R is not set -# CONFIG_DRM_PL111 is not set -# CONFIG_DRM_TVE200 is not set -# CONFIG_DRM_LIMA is not set -# CONFIG_DRM_PANFROST is not set -# CONFIG_DRM_ASPEED_GFX is not set -# CONFIG_DRM_MCDE is not set -# CONFIG_DRM_TIDSS is not set -# CONFIG_DRM_GUD is not set -# CONFIG_DRM_SSD130X is not set -# CONFIG_DRM_SPRD is not set -CONFIG_DRM_SPACEMIT=y -CONFIG_SPACEMIT_MIPI_PANEL=y -CONFIG_DRM_LT8911EXB=y -CONFIG_DRM_LT9711=y -# CONFIG_POWERVR_ROGUE_NULLDRMDISP is not set -CONFIG_POWERVR_ROGUE=y -# CONFIG_DRM_LEGACY is not set -CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y -CONFIG_DRM_NOMODESET=y - -# -# Frame buffer Devices -# -CONFIG_FB_CMDLINE=y -CONFIG_FB_NOTIFY=y -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_SYS_FILLRECT=y -CONFIG_FB_SYS_COPYAREA=y -CONFIG_FB_SYS_IMAGEBLIT=y -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=y -CONFIG_FB_DEFERRED_IO=y -# CONFIG_FB_MODE_HELPERS is not set -# CONFIG_FB_TILEBLITTING is not set - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CLPS711X is not set -# CONFIG_FB_IMX is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ARC is not set -# CONFIG_FB_CONTROL is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_EFI is not set -# CONFIG_FB_GBE is not set -# CONFIG_FB_PVR2 is not set -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_ATMEL is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_VIA is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_WM8505 is not set -# CONFIG_FB_PXA168 is not set -# CONFIG_FB_W100 is not set -# CONFIG_FB_SH_MOBILE_LCDC is not set -# CONFIG_FB_TMIO is not set -# CONFIG_FB_S3C is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_GOLDFISH is not set -# CONFIG_FB_DA8XX is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set -# CONFIG_FB_SIMPLE is not set -# CONFIG_FB_SSD1307 is not set -# CONFIG_FB_SM712 is not set -# CONFIG_FB_OMAP2 is not set -# CONFIG_MMP_DISP is not set -# end of Frame buffer Devices - -# -# Backlight & LCD device support -# -# CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_KTD253 is not set -# CONFIG_BACKLIGHT_OMAP1 is not set -CONFIG_BACKLIGHT_PWM=y -# CONFIG_BACKLIGHT_QCOM_WLED is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set -# CONFIG_BACKLIGHT_LM3630A is not set -# CONFIG_BACKLIGHT_LM3639 is not set -# CONFIG_BACKLIGHT_LP855X is not set -# CONFIG_BACKLIGHT_GPIO is not set -# CONFIG_BACKLIGHT_LV5207LP is not set -# CONFIG_BACKLIGHT_BD6107 is not set -# CONFIG_BACKLIGHT_ARCXCNN is not set -# CONFIG_BACKLIGHT_LED is not set -# end of Backlight & LCD device support - -CONFIG_VIDEOMODE_HELPERS=y -CONFIG_HDMI=y - -# -# Console display driver support -# -# CONFIG_VGA_CONSOLE is not set -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set -# end of Console display driver support - -# CONFIG_LOGO is not set -# end of Graphics support - -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_TIMER=y -CONFIG_SND_PCM=y -CONFIG_SND_DMAENGINE_PCM=y -CONFIG_SND_COMPRESS_OFFLOAD=y -CONFIG_SND_JACK=y -CONFIG_SND_JACK_INPUT_DEV=y -# CONFIG_SND_OSSEMUL is not set -CONFIG_SND_PCM_TIMER=y -# CONFIG_SND_HRTIMER is not set -# CONFIG_SND_DYNAMIC_MINORS is not set -CONFIG_SND_SUPPORT_OLD_API=y -CONFIG_SND_PROC_FS=y -CONFIG_SND_VERBOSE_PROCFS=y -# CONFIG_SND_VERBOSE_PRINTK is not set -CONFIG_SND_CTL_FAST_LOOKUP=y -# CONFIG_SND_DEBUG is not set -# CONFIG_SND_CTL_INPUT_VALIDATION is not set -# CONFIG_SND_SEQUENCER is not set -CONFIG_SND_DRIVERS=y -# CONFIG_SND_DUMMY is not set -# CONFIG_SND_ALOOP is not set -# CONFIG_SND_MTPAV is not set -# CONFIG_SND_SERIAL_U16550 is not set -# CONFIG_SND_SERIAL_GENERIC is not set -# CONFIG_SND_MPU401 is not set -CONFIG_SND_PCI=y -# CONFIG_SND_AD1889 is not set -# CONFIG_SND_ATIIXP is not set -# CONFIG_SND_ATIIXP_MODEM is not set -# CONFIG_SND_AU8810 is not set -# CONFIG_SND_AU8820 is not set -# CONFIG_SND_AU8830 is not set -# CONFIG_SND_AW2 is not set -# CONFIG_SND_BT87X is not set -# CONFIG_SND_CA0106 is not set -# CONFIG_SND_CMIPCI is not set -# CONFIG_SND_OXYGEN is not set -# CONFIG_SND_CS4281 is not set -# CONFIG_SND_CS46XX is not set -# CONFIG_SND_CS5535AUDIO is not set -# CONFIG_SND_CTXFI is not set -# CONFIG_SND_DARLA20 is not set -# CONFIG_SND_GINA20 is not set -# CONFIG_SND_LAYLA20 is not set -# CONFIG_SND_DARLA24 is not set -# CONFIG_SND_GINA24 is not set -# CONFIG_SND_LAYLA24 is not set -# CONFIG_SND_MONA is not set -# CONFIG_SND_MIA is not set -# CONFIG_SND_ECHO3G is not set -# CONFIG_SND_INDIGO is not set -# CONFIG_SND_INDIGOIO is not set -# CONFIG_SND_INDIGODJ is not set -# CONFIG_SND_INDIGOIOX is not set -# CONFIG_SND_INDIGODJX is not set -# CONFIG_SND_ENS1370 is not set -# CONFIG_SND_ENS1371 is not set -# CONFIG_SND_FM801 is not set -# CONFIG_SND_HDSP is not set -# CONFIG_SND_HDSPM is not set -# CONFIG_SND_ICE1724 is not set -# CONFIG_SND_INTEL8X0 is not set -# CONFIG_SND_INTEL8X0M is not set -# CONFIG_SND_KORG1212 is not set -# CONFIG_SND_LOLA is not set -# CONFIG_SND_LX6464ES is not set -# CONFIG_SND_MIXART is not set -# CONFIG_SND_NM256 is not set -# CONFIG_SND_PCXHR is not set -# CONFIG_SND_RIPTIDE is not set -# CONFIG_SND_RME32 is not set -# CONFIG_SND_RME96 is not set -# CONFIG_SND_RME9652 is not set -# CONFIG_SND_SE6X is not set -# CONFIG_SND_VIA82XX is not set -# CONFIG_SND_VIA82XX_MODEM is not set -# CONFIG_SND_VIRTUOSO is not set -# CONFIG_SND_VX222 is not set -# CONFIG_SND_YMFPCI is not set - -# -# HD-Audio -# -# CONFIG_SND_HDA_INTEL is not set -# end of HD-Audio - -CONFIG_SND_HDA_PREALLOC_SIZE=64 -CONFIG_SND_SPI=y -CONFIG_SND_USB=y -# CONFIG_SND_USB_AUDIO is not set -# CONFIG_SND_USB_UA101 is not set -# CONFIG_SND_USB_CAIAQ is not set -# CONFIG_SND_USB_US122L is not set -# CONFIG_SND_USB_6FIRE is not set -# CONFIG_SND_USB_HIFACE is not set -# CONFIG_SND_BCD2000 is not set -# CONFIG_SND_USB_POD is not set -# CONFIG_SND_USB_PODHD is not set -# CONFIG_SND_USB_TONEPORT is not set -# CONFIG_SND_USB_VARIAX is not set -CONFIG_SND_SOC=y -CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y -CONFIG_SND_SOC_COMPRESS=y -# CONFIG_SND_SOC_ADI is not set -# CONFIG_SND_SOC_AMD_ACP is not set -# CONFIG_SND_AMD_ACP_CONFIG is not set -# CONFIG_SND_SOC_APPLE_MCA is not set -# CONFIG_SND_ATMEL_SOC is not set -# CONFIG_SND_BCM2835_SOC_I2S is not set -# CONFIG_SND_SOC_CYGNUS is not set -# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set -# CONFIG_SND_EP93XX_SOC is not set -# CONFIG_SND_DESIGNWARE_I2S is not set - -# -# SoC Audio for Freescale CPUs -# - -# -# Common SoC Audio options for Freescale CPUs: -# -# CONFIG_SND_SOC_FSL_ASRC is not set -# CONFIG_SND_SOC_FSL_SAI is not set -# CONFIG_SND_SOC_FSL_AUDMIX is not set -# CONFIG_SND_SOC_FSL_SSI is not set -# CONFIG_SND_SOC_FSL_SPDIF is not set -# CONFIG_SND_SOC_FSL_ESAI is not set -# CONFIG_SND_SOC_FSL_MICFIL is not set -# CONFIG_SND_SOC_FSL_XCVR is not set -# CONFIG_SND_SOC_FSL_AUD2HTX is not set -# CONFIG_SND_SOC_FSL_RPMSG is not set -# CONFIG_SND_SOC_IMX_AUDMUX is not set -# CONFIG_SND_IMX_SOC is not set -# end of SoC Audio for Freescale CPUs - -# CONFIG_SND_I2S_HI6210_I2S is not set -# CONFIG_SND_JZ4740_SOC_I2S is not set -# CONFIG_SND_KIRKWOOD_SOC is not set -# CONFIG_SND_SOC_IMG is not set -CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y -# CONFIG_SND_SOC_INTEL_KEEMBAY is not set -# CONFIG_SND_SOC_INTEL_AVS is not set -CONFIG_SND_SOC_INTEL_MACH=y -# CONFIG_SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES is not set -# CONFIG_SND_SOC_MT8186 is not set -# CONFIG_SND_SOC_MTK_BTCVSD is not set -# CONFIG_SND_SOC_MT8195 is not set - -# -# ASoC support for Amlogic platforms -# -# CONFIG_SND_MESON_AIU is not set -# CONFIG_SND_MESON_AXG_FRDDR is not set -# CONFIG_SND_MESON_AXG_TODDR is not set -# CONFIG_SND_MESON_AXG_TDMIN is not set -# CONFIG_SND_MESON_AXG_TDMOUT is not set -# CONFIG_SND_MESON_AXG_SOUND_CARD is not set -# CONFIG_SND_MESON_AXG_SPDIFOUT is not set -# CONFIG_SND_MESON_AXG_SPDIFIN is not set -# CONFIG_SND_MESON_AXG_PDM is not set -# CONFIG_SND_MESON_GX_SOUND_CARD is not set -# CONFIG_SND_MESON_G12A_TOACODEC is not set -# CONFIG_SND_MESON_G12A_TOHDMITX is not set -# CONFIG_SND_SOC_MESON_T9015 is not set -# end of ASoC support for Amlogic platforms - -# CONFIG_SND_MXS_SOC is not set -# CONFIG_SND_PXA2XX_SOC is not set -# CONFIG_SND_SOC_QCOM is not set -# CONFIG_SND_SOC_ROCKCHIP is not set -# CONFIG_SND_SOC_SAMSUNG is not set - -# -# SoC Audio support for Renesas SoCs -# -# CONFIG_SND_SOC_SH4_FSI is not set -# CONFIG_SND_SOC_RCAR is not set -# CONFIG_SND_SOC_RZ is not set -# end of SoC Audio support for Renesas SoCs - -# CONFIG_SND_SOC_SOF_TOPLEVEL is not set -# CONFIG_SND_SOC_SPRD is not set -# CONFIG_SND_SOC_STI is not set - -# -# STMicroelectronics STM32 SOC audio support -# -# CONFIG_SND_SOC_STM32_SAI is not set -# CONFIG_SND_SOC_STM32_I2S is not set -# CONFIG_SND_SOC_STM32_SPDIFRX is not set -# end of STMicroelectronics STM32 SOC audio support - -# -# Allwinner SoC Audio support -# -# CONFIG_SND_SUN4I_CODEC is not set -# CONFIG_SND_SUN8I_CODEC is not set -# CONFIG_SND_SUN8I_CODEC_ANALOG is not set -# CONFIG_SND_SUN50I_CODEC_ANALOG is not set -# CONFIG_SND_SUN4I_I2S is not set -# CONFIG_SND_SUN4I_SPDIF is not set -# CONFIG_SND_SUN50I_DMIC is not set -# end of Allwinner SoC Audio support - -# CONFIG_SND_SOC_TEGRA is not set - -# -# Audio support for Texas Instruments SoCs -# - -# -# Texas Instruments DAI support for: -# -# CONFIG_SND_SOC_DAVINCI_ASP is not set -# CONFIG_SND_SOC_DAVINCI_MCASP is not set -# CONFIG_SND_SOC_DAVINCI_VCIF is not set -# CONFIG_SND_SOC_OMAP_DMIC is not set -# CONFIG_SND_SOC_OMAP_MCBSP is not set -# CONFIG_SND_SOC_OMAP_MCPDM is not set - -# -# Audio support for boards with Texas Instruments SoCs -# -# CONFIG_SND_SOC_OMAP_HDMI is not set -# CONFIG_SND_SOC_J721E_EVM is not set -# end of Audio support for Texas Instruments SoCs - -# CONFIG_SND_SOC_UNIPHIER is not set -# CONFIG_SND_SOC_XILINX_I2S is not set -# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set -# CONFIG_SND_SOC_XILINX_SPDIF is not set -# CONFIG_SND_SOC_XTFPGA_I2S is not set -CONFIG_SND_SOC_SPACEMIT=y -CONFIG_SND_SOC_I2C_AND_SPI=y - -# -# CODEC drivers -# -# CONFIG_SND_SOC_ALL_CODECS is not set -# CONFIG_SND_SOC_AC97_CODEC is not set -# CONFIG_SND_SOC_ADAU1372_I2C is not set -# CONFIG_SND_SOC_ADAU1372_SPI is not set -# CONFIG_SND_SOC_ADAU1701 is not set -# CONFIG_SND_SOC_ADAU1761_I2C is not set -# CONFIG_SND_SOC_ADAU1761_SPI is not set -# CONFIG_SND_SOC_ADAU7002 is not set -# CONFIG_SND_SOC_ADAU7118_HW is not set -# CONFIG_SND_SOC_ADAU7118_I2C is not set -# CONFIG_SND_SOC_AK4104 is not set -# CONFIG_SND_SOC_AK4118 is not set -# CONFIG_SND_SOC_AK4375 is not set -# CONFIG_SND_SOC_AK4458 is not set -# CONFIG_SND_SOC_AK4554 is not set -# CONFIG_SND_SOC_AK4613 is not set -# CONFIG_SND_SOC_AK4642 is not set -# CONFIG_SND_SOC_AK5386 is not set -# CONFIG_SND_SOC_AK5558 is not set -# CONFIG_SND_SOC_ALC5623 is not set -# CONFIG_SND_SOC_AW8738 is not set -# CONFIG_SND_SOC_BD28623 is not set -# CONFIG_SND_SOC_BT_SCO is not set -# CONFIG_SND_SOC_CPCAP is not set -# CONFIG_SND_SOC_CS35L32 is not set -# CONFIG_SND_SOC_CS35L33 is not set -# CONFIG_SND_SOC_CS35L34 is not set -# CONFIG_SND_SOC_CS35L35 is not set -# CONFIG_SND_SOC_CS35L36 is not set -# CONFIG_SND_SOC_CS35L41_SPI is not set -# CONFIG_SND_SOC_CS35L41_I2C is not set -# CONFIG_SND_SOC_CS35L45_SPI is not set -# CONFIG_SND_SOC_CS35L45_I2C is not set -# CONFIG_SND_SOC_CS42L42 is not set -# CONFIG_SND_SOC_CS42L51_I2C is not set -# CONFIG_SND_SOC_CS42L52 is not set -# CONFIG_SND_SOC_CS42L56 is not set -# CONFIG_SND_SOC_CS42L73 is not set -# CONFIG_SND_SOC_CS42L83 is not set -# CONFIG_SND_SOC_CS4234 is not set -# CONFIG_SND_SOC_CS4265 is not set -# CONFIG_SND_SOC_CS4270 is not set -# CONFIG_SND_SOC_CS4271_I2C is not set -# CONFIG_SND_SOC_CS4271_SPI is not set -# CONFIG_SND_SOC_CS42XX8_I2C is not set -# CONFIG_SND_SOC_CS43130 is not set -# CONFIG_SND_SOC_CS4341 is not set -# CONFIG_SND_SOC_CS4349 is not set -# CONFIG_SND_SOC_CS53L30 is not set -# CONFIG_SND_SOC_CX2072X is not set -# CONFIG_SND_SOC_JZ4740_CODEC is not set -# CONFIG_SND_SOC_JZ4725B_CODEC is not set -# CONFIG_SND_SOC_JZ4760_CODEC is not set -# CONFIG_SND_SOC_JZ4770_CODEC is not set -# CONFIG_SND_SOC_DA7213 is not set -# CONFIG_SND_SOC_DMIC is not set -# CONFIG_SND_SOC_ES7134 is not set -# CONFIG_SND_SOC_ES7210 is not set -# CONFIG_SND_SOC_ES7241 is not set -# CONFIG_SND_SOC_ES8156 is not set -CONFIG_SND_SOC_ES8316=y -CONFIG_SND_SOC_ES8326=y -# CONFIG_SND_SOC_ES8328_I2C is not set -# CONFIG_SND_SOC_ES8328_SPI is not set -# CONFIG_SND_SOC_GTM601 is not set -# CONFIG_SND_SOC_HDA is not set -# CONFIG_SND_SOC_ICS43432 is not set -# CONFIG_SND_SOC_INNO_RK3036 is not set -# CONFIG_SND_SOC_LOCHNAGAR_SC is not set -# CONFIG_SND_SOC_MAX98088 is not set -# CONFIG_SND_SOC_MAX98357A is not set -# CONFIG_SND_SOC_MAX98504 is not set -# CONFIG_SND_SOC_MAX9867 is not set -# CONFIG_SND_SOC_MAX98927 is not set -# CONFIG_SND_SOC_MAX98520 is not set -# CONFIG_SND_SOC_MAX98373_I2C is not set -# CONFIG_SND_SOC_MAX98390 is not set -# CONFIG_SND_SOC_MAX98396 is not set -# CONFIG_SND_SOC_MAX9860 is not set -# CONFIG_SND_SOC_MSM8916_WCD_ANALOG is not set -# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set -# CONFIG_SND_SOC_PCM1681 is not set -# CONFIG_SND_SOC_PCM1789_I2C is not set -# CONFIG_SND_SOC_PCM179X_I2C is not set -# CONFIG_SND_SOC_PCM179X_SPI is not set -# CONFIG_SND_SOC_PCM186X_I2C is not set -# CONFIG_SND_SOC_PCM186X_SPI is not set -# CONFIG_SND_SOC_PCM3060_I2C is not set -# CONFIG_SND_SOC_PCM3060_SPI is not set -# CONFIG_SND_SOC_PCM3168A_I2C is not set -# CONFIG_SND_SOC_PCM3168A_SPI is not set -# CONFIG_SND_SOC_PCM5102A is not set -# CONFIG_SND_SOC_PCM512x_I2C is not set -# CONFIG_SND_SOC_PCM512x_SPI is not set -# CONFIG_SND_SOC_RK3328 is not set -# CONFIG_SND_SOC_RK817 is not set -# CONFIG_SND_SOC_RT5616 is not set -# CONFIG_SND_SOC_RT5631 is not set -# CONFIG_SND_SOC_RT5640 is not set -# CONFIG_SND_SOC_RT5659 is not set -# CONFIG_SND_SOC_RT9120 is not set -# CONFIG_SND_SOC_SGTL5000 is not set -# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set -# CONFIG_SND_SOC_SIMPLE_MUX is not set -# CONFIG_SND_SOC_SPDIF is not set -# CONFIG_SND_SOC_SRC4XXX_I2C is not set -# CONFIG_SND_SOC_SSM2305 is not set -# CONFIG_SND_SOC_SSM2518 is not set -# CONFIG_SND_SOC_SSM2602_SPI is not set -# CONFIG_SND_SOC_SSM2602_I2C is not set -# CONFIG_SND_SOC_SSM4567 is not set -# CONFIG_SND_SOC_STA32X is not set -# CONFIG_SND_SOC_STA350 is not set -# CONFIG_SND_SOC_STI_SAS is not set -# CONFIG_SND_SOC_TAS2552 is not set -# CONFIG_SND_SOC_TAS2562 is not set -# CONFIG_SND_SOC_TAS2764 is not set -# CONFIG_SND_SOC_TAS2770 is not set -# CONFIG_SND_SOC_TAS2780 is not set -# CONFIG_SND_SOC_TAS5086 is not set -# CONFIG_SND_SOC_TAS571X is not set -# CONFIG_SND_SOC_TAS5720 is not set -# CONFIG_SND_SOC_TAS5805M is not set -# CONFIG_SND_SOC_TAS6424 is not set -# CONFIG_SND_SOC_TDA7419 is not set -# CONFIG_SND_SOC_TFA9879 is not set -# CONFIG_SND_SOC_TFA989X is not set -# CONFIG_SND_SOC_TLV320ADC3XXX is not set -# CONFIG_SND_SOC_TLV320AIC23_I2C is not set -# CONFIG_SND_SOC_TLV320AIC23_SPI is not set -# CONFIG_SND_SOC_TLV320AIC31XX is not set -# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set -# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set -# CONFIG_SND_SOC_TLV320AIC3X_I2C is not set -# CONFIG_SND_SOC_TLV320AIC3X_SPI is not set -# CONFIG_SND_SOC_TLV320ADCX140 is not set -# CONFIG_SND_SOC_TS3A227E is not set -# CONFIG_SND_SOC_TSCS42XX is not set -# CONFIG_SND_SOC_TSCS454 is not set -# CONFIG_SND_SOC_UDA1334 is not set -# CONFIG_SND_SOC_WM8510 is not set -# CONFIG_SND_SOC_WM8523 is not set -# CONFIG_SND_SOC_WM8524 is not set -# CONFIG_SND_SOC_WM8580 is not set -# CONFIG_SND_SOC_WM8711 is not set -# CONFIG_SND_SOC_WM8728 is not set -# CONFIG_SND_SOC_WM8731_I2C is not set -# CONFIG_SND_SOC_WM8731_SPI is not set -# CONFIG_SND_SOC_WM8737 is not set -# CONFIG_SND_SOC_WM8741 is not set -# CONFIG_SND_SOC_WM8750 is not set -# CONFIG_SND_SOC_WM8753 is not set -# CONFIG_SND_SOC_WM8770 is not set -# CONFIG_SND_SOC_WM8776 is not set -# CONFIG_SND_SOC_WM8782 is not set -# CONFIG_SND_SOC_WM8804_I2C is not set -# CONFIG_SND_SOC_WM8804_SPI is not set -# CONFIG_SND_SOC_WM8903 is not set -# CONFIG_SND_SOC_WM8904 is not set -# CONFIG_SND_SOC_WM8940 is not set -# CONFIG_SND_SOC_WM8960 is not set -# CONFIG_SND_SOC_WM8962 is not set -# CONFIG_SND_SOC_WM8974 is not set -# CONFIG_SND_SOC_WM8978 is not set -# CONFIG_SND_SOC_WM8985 is not set -# CONFIG_SND_SOC_ZL38060 is not set -# CONFIG_SND_SOC_MAX9759 is not set -# CONFIG_SND_SOC_MT6351 is not set -# CONFIG_SND_SOC_MT6358 is not set -# CONFIG_SND_SOC_MT6660 is not set -# CONFIG_SND_SOC_NAU8315 is not set -# CONFIG_SND_SOC_NAU8540 is not set -# CONFIG_SND_SOC_NAU8810 is not set -# CONFIG_SND_SOC_NAU8821 is not set -# CONFIG_SND_SOC_NAU8822 is not set -# CONFIG_SND_SOC_NAU8824 is not set -# CONFIG_SND_SOC_TPA6130A2 is not set -# CONFIG_SND_SOC_LPASS_WSA_MACRO is not set -# CONFIG_SND_SOC_LPASS_VA_MACRO is not set -# CONFIG_SND_SOC_LPASS_RX_MACRO is not set -# CONFIG_SND_SOC_LPASS_TX_MACRO is not set -# end of CODEC drivers - -CONFIG_SND_SIMPLE_CARD_UTILS=y -CONFIG_SND_SIMPLE_CARD=y -# CONFIG_SND_AUDIO_GRAPH_CARD is not set -# CONFIG_SND_AUDIO_GRAPH_CARD2 is not set -# CONFIG_SND_TEST_COMPONENT is not set -# CONFIG_SND_VIRTIO is not set - -# -# HID support -# -CONFIG_HID=y -# CONFIG_HID_BATTERY_STRENGTH is not set -# CONFIG_HIDRAW is not set -CONFIG_UHID=y -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -# CONFIG_HID_A4TECH is not set -# CONFIG_HID_ACCUTOUCH is not set -# CONFIG_HID_ACRUX is not set -# CONFIG_HID_APPLE is not set -# CONFIG_HID_APPLEIR is not set -# CONFIG_HID_ASUS is not set -# CONFIG_HID_AUREAL is not set -# CONFIG_HID_BELKIN is not set -# CONFIG_HID_BETOP_FF is not set -# CONFIG_HID_BIGBEN_FF is not set -# CONFIG_HID_CHERRY is not set -# CONFIG_HID_CHICONY is not set -# CONFIG_HID_CORSAIR is not set -# CONFIG_HID_COUGAR is not set -# CONFIG_HID_MACALLY is not set -# CONFIG_HID_PRODIKEYS is not set -# CONFIG_HID_CMEDIA is not set -# CONFIG_HID_CREATIVE_SB0540 is not set -# CONFIG_HID_CYPRESS is not set -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -# CONFIG_HID_ELAN is not set -# CONFIG_HID_ELECOM is not set -# CONFIG_HID_ELO is not set -# CONFIG_HID_EZKEY is not set -# CONFIG_HID_GEMBIRD is not set -# CONFIG_HID_GFRM is not set -# CONFIG_HID_GLORIOUS is not set -# CONFIG_HID_HOLTEK is not set -# CONFIG_HID_VIVALDI is not set -# CONFIG_HID_GT683R is not set -# CONFIG_HID_KEYTOUCH is not set -# CONFIG_HID_KYE is not set -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -# CONFIG_HID_VIEWSONIC is not set -# CONFIG_HID_VRC2 is not set -# CONFIG_HID_XIAOMI is not set -# CONFIG_HID_GYRATION is not set -# CONFIG_HID_ICADE is not set -# CONFIG_HID_ITE is not set -# CONFIG_HID_JABRA is not set -# CONFIG_HID_TWINHAN is not set -# CONFIG_HID_KENSINGTON is not set -# CONFIG_HID_LCPOWER is not set -# CONFIG_HID_LED is not set -# CONFIG_HID_LENOVO is not set -# CONFIG_HID_LETSKETCH is not set -# CONFIG_HID_LOGITECH is not set -# CONFIG_HID_MAGICMOUSE is not set -# CONFIG_HID_MALTRON is not set -# CONFIG_HID_MAYFLASH is not set -# CONFIG_HID_MEGAWORLD_FF is not set -# CONFIG_HID_REDRAGON is not set -# CONFIG_HID_MICROSOFT is not set -# CONFIG_HID_MONTEREY is not set -CONFIG_HID_MULTITOUCH=y -# CONFIG_HID_NINTENDO is not set -# CONFIG_HID_NTI is not set -# CONFIG_HID_NTRIG is not set -# CONFIG_HID_ORTEK is not set -# CONFIG_HID_PANTHERLORD is not set -# CONFIG_HID_PENMOUNT is not set -# CONFIG_HID_PETALYNX is not set -# CONFIG_HID_PICOLCD is not set -# CONFIG_HID_PLANTRONICS is not set -# CONFIG_HID_PXRC is not set -# CONFIG_HID_RAZER is not set -# CONFIG_HID_PRIMAX is not set -# CONFIG_HID_RETRODE is not set -# CONFIG_HID_ROCCAT is not set -# CONFIG_HID_SAITEK is not set -# CONFIG_HID_SAMSUNG is not set -# CONFIG_HID_SEMITEK is not set -# CONFIG_HID_SIGMAMICRO is not set -# CONFIG_HID_SONY is not set -# CONFIG_HID_SPEEDLINK is not set -# CONFIG_HID_STEAM is not set -# CONFIG_HID_STEELSERIES is not set -# CONFIG_HID_SUNPLUS is not set -# CONFIG_HID_RMI is not set -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -# CONFIG_HID_TIVO is not set -# CONFIG_HID_TOPSEED is not set -# CONFIG_HID_TOPRE is not set -# CONFIG_HID_THINGM is not set -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_UDRAW_PS3 is not set -# CONFIG_HID_U2FZERO is not set -# CONFIG_HID_WACOM is not set -# CONFIG_HID_WIIMOTE is not set -# CONFIG_HID_XINMO is not set -# CONFIG_HID_ZEROPLUS is not set -# CONFIG_HID_ZYDACRON is not set -# CONFIG_HID_SENSOR_HUB is not set -# CONFIG_HID_ALPS is not set -# CONFIG_HID_MCP2221 is not set -# end of Special HID drivers - -# -# USB HID support -# -CONFIG_USB_HID=y -# CONFIG_HID_PID is not set -CONFIG_USB_HIDDEV=y -# end of USB HID support - -# -# I2C HID support -# -CONFIG_I2C_HID_OF=y -# CONFIG_I2C_HID_OF_ELAN is not set -# CONFIG_I2C_HID_OF_GOODIX is not set -# end of I2C HID support - -CONFIG_I2C_HID_CORE=y - -# -# Intel ISH HID support -# -# end of Intel ISH HID support - -# -# AMD SFH HID Support -# -# CONFIG_AMD_SFH_HID is not set -# end of AMD SFH HID Support -# end of HID support - -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -# CONFIG_USB_LED_TRIG is not set -# CONFIG_USB_ULPI_BUS is not set -# CONFIG_USB_CONN_GPIO is not set -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_PCI=y -# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_FEW_INIT_RETRIES is not set -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_PRODUCTLIST is not set -# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set -# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set -CONFIG_USB_AUTOSUSPEND_DELAY=2 -# CONFIG_USB_MON is not set - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -# CONFIG_USB_XHCI_DBGCAP is not set -CONFIG_USB_XHCI_PCI=y -# CONFIG_USB_XHCI_PCI_RENESAS is not set -CONFIG_USB_XHCI_PLATFORM=y -# CONFIG_USB_XHCI_HISTB is not set -# CONFIG_USB_XHCI_MTK is not set -# CONFIG_USB_XHCI_MVEBU is not set -# CONFIG_USB_XHCI_RCAR is not set -# CONFIG_USB_BRCMSTB is not set -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_FSL is not set -# CONFIG_USB_EHCI_HCD_NPCM7XX is not set -# CONFIG_USB_EHCI_HCD_ORION is not set -# CONFIG_USB_EHCI_HCD_SPEAR is not set -# CONFIG_USB_EHCI_HCD_STI is not set -# CONFIG_USB_EHCI_HCD_AT91 is not set -# CONFIG_USB_EHCI_SH is not set -# CONFIG_USB_EHCI_EXYNOS is not set -# CONFIG_USB_EHCI_MV is not set -CONFIG_USB_EHCI_K1X=y -# CONFIG_USB_CNS3XXX_EHCI is not set -# CONFIG_USB_EHCI_HCD_PLATFORM is not set -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_ISP1362_HCD is not set -# CONFIG_USB_FOTG210_HCD is not set -# CONFIG_USB_MAX3421_HCD is not set -# CONFIG_USB_OHCI_HCD is not set -# CONFIG_USB_UHCI_HCD is not set -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_TEST_MODE is not set -# CONFIG_USB_RENESAS_USBHS is not set - -# -# USB Device Class drivers -# -# CONFIG_USB_ACM is not set -# CONFIG_USB_PRINTER is not set -CONFIG_USB_WDM=m -# CONFIG_USB_TMC is not set - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=y -# CONFIG_USB_STORAGE_DEBUG is not set -# CONFIG_USB_STORAGE_REALTEK is not set -# CONFIG_USB_STORAGE_DATAFAB is not set -# CONFIG_USB_STORAGE_FREECOM is not set -# CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_USBAT is not set -# CONFIG_USB_STORAGE_SDDR09 is not set -# CONFIG_USB_STORAGE_SDDR55 is not set -# CONFIG_USB_STORAGE_JUMPSHOT is not set -# CONFIG_USB_STORAGE_ALAUDA is not set -# CONFIG_USB_STORAGE_ONETOUCH is not set -# CONFIG_USB_STORAGE_KARMA is not set -# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set -# CONFIG_USB_STORAGE_ENE_UB6250 is not set -CONFIG_USB_UAS=y - -# -# USB Imaging devices -# -# CONFIG_USB_MDC800 is not set -# CONFIG_USB_MICROTEK is not set -# CONFIG_USBIP_CORE is not set -# CONFIG_USB_CDNS_SUPPORT is not set -# CONFIG_USB_MTU3 is not set -# CONFIG_USB_MUSB_HDRC is not set -CONFIG_USB_DWC3=y -# CONFIG_USB_DWC3_HOST is not set -# CONFIG_USB_DWC3_GADGET is not set -CONFIG_USB_DWC3_DUAL_ROLE=y - -# -# Platform Glue Driver Support -# -CONFIG_USB_DWC3_OMAP=y -CONFIG_USB_DWC3_EXYNOS=y -CONFIG_USB_DWC3_HAPS=y -CONFIG_USB_DWC3_KEYSTONE=y -CONFIG_USB_DWC3_MESON_G12A=y -# CONFIG_USB_DWC3_OF_SIMPLE is not set -CONFIG_USB_DWC3_ST=y -CONFIG_USB_DWC3_QCOM=y -CONFIG_USB_DWC3_IMX8MP=y -CONFIG_USB_DWC3_AM62=y -CONFIG_USB_DWC3_SPACEMIT=y -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set -# CONFIG_USB_ISP1760 is not set - -# -# USB port drivers -# -CONFIG_USB_SERIAL=m -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_SIMPLE=m -CONFIG_USB_SERIAL_AIRCABLE=m -CONFIG_USB_SERIAL_ARK3116=m -CONFIG_USB_SERIAL_BELKIN=m -CONFIG_USB_SERIAL_CH341=m -CONFIG_USB_SERIAL_WHITEHEAT=m -CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m -CONFIG_USB_SERIAL_CP210X=m -CONFIG_USB_SERIAL_CYPRESS_M8=m -CONFIG_USB_SERIAL_EMPEG=m -CONFIG_USB_SERIAL_FTDI_SIO=m -CONFIG_USB_SERIAL_VISOR=m -CONFIG_USB_SERIAL_IPAQ=m -CONFIG_USB_SERIAL_IR=m -CONFIG_USB_SERIAL_EDGEPORT=m -CONFIG_USB_SERIAL_EDGEPORT_TI=m -CONFIG_USB_SERIAL_F81232=m -CONFIG_USB_SERIAL_F8153X=m -CONFIG_USB_SERIAL_GARMIN=m -CONFIG_USB_SERIAL_IPW=m -CONFIG_USB_SERIAL_IUU=m -CONFIG_USB_SERIAL_KEYSPAN_PDA=m -CONFIG_USB_SERIAL_KEYSPAN=m -CONFIG_USB_SERIAL_KLSI=m -CONFIG_USB_SERIAL_KOBIL_SCT=m -CONFIG_USB_SERIAL_MCT_U232=m -CONFIG_USB_SERIAL_METRO=m -CONFIG_USB_SERIAL_MOS7720=m -CONFIG_USB_SERIAL_MOS7840=m -CONFIG_USB_SERIAL_MXUPORT=m -CONFIG_USB_SERIAL_NAVMAN=m -CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_OTI6858=m -CONFIG_USB_SERIAL_QCAUX=m -CONFIG_USB_SERIAL_QUALCOMM=m -CONFIG_USB_SERIAL_SPCP8X5=m -CONFIG_USB_SERIAL_SAFE=m -CONFIG_USB_SERIAL_SAFE_PADDED=y -CONFIG_USB_SERIAL_SIERRAWIRELESS=m -CONFIG_USB_SERIAL_SYMBOL=m -CONFIG_USB_SERIAL_TI=m -CONFIG_USB_SERIAL_CYBERJACK=m -CONFIG_USB_SERIAL_WWAN=m -CONFIG_USB_SERIAL_OPTION=m -CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_SERIAL_OPTICON=m -CONFIG_USB_SERIAL_XSENS_MT=m -CONFIG_USB_SERIAL_WISHBONE=m -CONFIG_USB_SERIAL_SSU100=m -CONFIG_USB_SERIAL_QT2=m -CONFIG_USB_SERIAL_UPD78F0730=m -CONFIG_USB_SERIAL_XR=m -CONFIG_USB_SERIAL_DEBUG=m - -# -# USB Miscellaneous drivers -# -# CONFIG_USB_EMI62 is not set -# CONFIG_USB_EMI26 is not set -# CONFIG_USB_ADUTUX is not set -# CONFIG_USB_SEVSEG is not set -# CONFIG_USB_LEGOTOWER is not set -# CONFIG_USB_LCD is not set -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -# CONFIG_USB_IDMOUSE is not set -# CONFIG_USB_FTDI_ELAN is not set -# CONFIG_USB_APPLEDISPLAY is not set -# CONFIG_USB_QCOM_EUD is not set -# CONFIG_APPLE_MFI_FASTCHARGE is not set -# CONFIG_USB_SISUSBVGA is not set -# CONFIG_USB_LD is not set -# CONFIG_USB_TRANCEVIBRATOR is not set -# CONFIG_USB_IOWARRIOR is not set -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -# CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_YUREX is not set -CONFIG_USB_EZUSB_FX2=m -# CONFIG_USB_HUB_USB251XB is not set -# CONFIG_USB_HSIC_USB3503 is not set -# CONFIG_USB_HSIC_USB4604 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -# CONFIG_USB_CHAOSKEY is not set -# CONFIG_BRCM_USB_PINMAP is not set -# CONFIG_USB_ONBOARD_HUB is not set -CONFIG_SPACEMIT_ONBOARD_USB_HUB=y - -# -# USB Physical Layer drivers -# -CONFIG_USB_PHY=y -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_USB_ISP1301 is not set -CONFIG_K1XCI_USB2_PHY=y -# CONFIG_USB_TEGRA_PHY is not set -# CONFIG_USB_ULPI is not set -# CONFIG_JZ4770_PHY is not set -# end of USB Physical Layer drivers - -CONFIG_USB_GADGET=y -# CONFIG_USB_GADGET_DEBUG is not set -# CONFIG_USB_GADGET_DEBUG_FILES is not set -# CONFIG_USB_GADGET_DEBUG_FS is not set -CONFIG_USB_GADGET_VBUS_DRAW=2 -CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 - -# -# USB Peripheral Controller -# -# CONFIG_USB_LPC32XX is not set -# CONFIG_USB_FOTG210_UDC is not set -# CONFIG_USB_GR_UDC is not set -# CONFIG_USB_R8A66597 is not set -# CONFIG_USB_RENESAS_USB3 is not set -# CONFIG_USB_PXA27X is not set -# CONFIG_USB_MV_UDC is not set -CONFIG_USB_K1X_UDC=y -# CONFIG_USB_MV_U3D is not set -# CONFIG_USB_SNP_UDC_PLAT is not set -# CONFIG_USB_M66592 is not set -# CONFIG_USB_BDC_UDC is not set -# CONFIG_USB_AMD5536UDC is not set -# CONFIG_USB_NET2272 is not set -# CONFIG_USB_NET2280 is not set -# CONFIG_USB_GOKU is not set -# CONFIG_USB_EG20T is not set -# CONFIG_USB_GADGET_XILINX is not set -# CONFIG_USB_MAX3420_UDC is not set -# CONFIG_USB_ASPEED_UDC is not set -# CONFIG_USB_ASPEED_VHUB is not set -# CONFIG_USB_DUMMY_HCD is not set -# end of USB Peripheral Controller - -CONFIG_USB_LIBCOMPOSITE=y -CONFIG_USB_U_ETHER=y -CONFIG_USB_F_RNDIS=y -CONFIG_USB_F_MASS_STORAGE=y -CONFIG_USB_F_FS=y -CONFIG_USB_F_UVC=y -CONFIG_USB_CONFIGFS=y -# CONFIG_USB_CONFIGFS_SERIAL is not set -# CONFIG_USB_CONFIGFS_ACM is not set -# CONFIG_USB_CONFIGFS_OBEX is not set -# CONFIG_USB_CONFIGFS_NCM is not set -# CONFIG_USB_CONFIGFS_ECM is not set -# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set -CONFIG_USB_CONFIGFS_RNDIS=y -# CONFIG_USB_CONFIGFS_EEM is not set -CONFIG_USB_CONFIGFS_MASS_STORAGE=y -# CONFIG_USB_CONFIGFS_F_LB_SS is not set -CONFIG_USB_CONFIGFS_F_FS=y -# CONFIG_USB_CONFIGFS_F_UAC1 is not set -# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set -# CONFIG_USB_CONFIGFS_F_UAC2 is not set -# CONFIG_USB_CONFIGFS_F_MIDI is not set -# CONFIG_USB_CONFIGFS_F_HID is not set -CONFIG_USB_CONFIGFS_F_UVC=y -# CONFIG_USB_CONFIGFS_F_PRINTER is not set - -# -# USB Gadget precomposed configurations -# -# CONFIG_USB_ZERO is not set -# CONFIG_USB_AUDIO is not set -# CONFIG_USB_ETH is not set -# CONFIG_USB_G_NCM is not set -# CONFIG_USB_GADGETFS is not set -# CONFIG_USB_FUNCTIONFS is not set -# CONFIG_USB_MASS_STORAGE is not set -# CONFIG_USB_G_SERIAL is not set -# CONFIG_USB_MIDI_GADGET is not set -# CONFIG_USB_G_PRINTER is not set -# CONFIG_USB_CDC_COMPOSITE is not set -# CONFIG_USB_G_ACM_MS is not set -# CONFIG_USB_G_MULTI is not set -# CONFIG_USB_G_HID is not set -# CONFIG_USB_G_DBGP is not set -# CONFIG_USB_G_WEBCAM is not set -# CONFIG_USB_RAW_GADGET is not set -# end of USB Gadget precomposed configurations - -# CONFIG_TYPEC is not set -CONFIG_USB_ROLE_SWITCH=y -CONFIG_MMC=y -CONFIG_PWRSEQ_EMMC=y -CONFIG_PWRSEQ_SIMPLE=y -CONFIG_MMC_BLOCK=y -CONFIG_MMC_BLOCK_MINORS=8 -# CONFIG_SDIO_UART is not set -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -# CONFIG_MMC_DEBUG is not set -CONFIG_MMC_SDHCI=y -# CONFIG_MMC_SDHCI_PCI is not set -CONFIG_MMC_SDHCI_PLTFM=y -# CONFIG_MMC_SDHCI_OF_ARASAN is not set -# CONFIG_MMC_SDHCI_OF_ASPEED is not set -# CONFIG_MMC_SDHCI_OF_AT91 is not set -# CONFIG_MMC_SDHCI_OF_ESDHC is not set -# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set -# CONFIG_MMC_SDHCI_OF_SPARX5 is not set -# CONFIG_MMC_SDHCI_OF_K1PRO is not set -CONFIG_MMC_SDHCI_OF_K1X=y -# CONFIG_MMC_SDHCI_CADENCE is not set -# CONFIG_MMC_SDHCI_CNS3XXX is not set -# CONFIG_MMC_SDHCI_ESDHC_IMX is not set -# CONFIG_MMC_SDHCI_DOVE is not set -# CONFIG_MMC_SDHCI_TEGRA is not set -# CONFIG_MMC_SDHCI_S3C is not set -# CONFIG_MMC_SDHCI_PXAV3 is not set -# CONFIG_MMC_SDHCI_PXAV2 is not set -# CONFIG_MMC_SDHCI_SPEAR is not set -# CONFIG_MMC_SDHCI_BCM_KONA is not set -# CONFIG_MMC_SDHCI_F_SDH30 is not set -# CONFIG_MMC_SDHCI_MILBEAUT is not set -# CONFIG_MMC_SDHCI_IPROC is not set -# CONFIG_MMC_MESON_GX is not set -# CONFIG_MMC_MESON_MX_SDHC is not set -# CONFIG_MMC_MESON_MX_SDIO is not set -# CONFIG_MMC_MOXART is not set -# CONFIG_MMC_SDHCI_ST is not set -# CONFIG_MMC_OMAP_HS is not set -# CONFIG_MMC_SDHCI_MSM is not set -# CONFIG_MMC_TIFM_SD is not set -# CONFIG_MMC_DAVINCI is not set -# CONFIG_MMC_SPI is not set -# CONFIG_MMC_S3C is not set -# CONFIG_MMC_SDHCI_SPRD is not set -# CONFIG_MMC_TMIO is not set -# CONFIG_MMC_SDHI is not set -# CONFIG_MMC_UNIPHIER is not set -# CONFIG_MMC_CB710 is not set -# CONFIG_MMC_VIA_SDMMC is not set -# CONFIG_MMC_DW is not set -# CONFIG_MMC_SH_MMCIF is not set -# CONFIG_MMC_VUB300 is not set -# CONFIG_MMC_USHC is not set -# CONFIG_MMC_USDHI6ROL0 is not set -# CONFIG_MMC_REALTEK_PCI is not set -# CONFIG_MMC_REALTEK_USB is not set -# CONFIG_MMC_CQHCI is not set -# CONFIG_MMC_HSQ is not set -# CONFIG_MMC_TOSHIBA_PCI is not set -# CONFIG_MMC_BCM2835 is not set -# CONFIG_MMC_MTK is not set -# CONFIG_MMC_SDHCI_XENON is not set -# CONFIG_MMC_SDHCI_OMAP is not set -# CONFIG_MMC_SDHCI_AM654 is not set -# CONFIG_MMC_OWL is not set -# CONFIG_MMC_LITEX is not set -# CONFIG_SCSI_UFSHCD is not set -# CONFIG_MEMSTICK is not set -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -# CONFIG_LEDS_CLASS_FLASH is not set -# CONFIG_LEDS_CLASS_MULTICOLOR is not set -# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set - -# -# LED drivers -# -# CONFIG_LEDS_AN30259A is not set -# CONFIG_LEDS_ARIEL is not set -# CONFIG_LEDS_AW2013 is not set -# CONFIG_LEDS_BCM6328 is not set -# CONFIG_LEDS_BCM6358 is not set -# CONFIG_LEDS_CR0014114 is not set -# CONFIG_LEDS_EL15203000 is not set -# CONFIG_LEDS_LM3530 is not set -# CONFIG_LEDS_LM3532 is not set -# CONFIG_LEDS_LM3642 is not set -# CONFIG_LEDS_LM3692X is not set -# CONFIG_LEDS_S3C24XX is not set -# CONFIG_LEDS_COBALT_QUBE is not set -# CONFIG_LEDS_COBALT_RAQ is not set -# CONFIG_LEDS_PCA9532 is not set -CONFIG_LEDS_GPIO=y -# CONFIG_LEDS_LP3944 is not set -# CONFIG_LEDS_LP3952 is not set -# CONFIG_LEDS_LP50XX is not set -# CONFIG_LEDS_LP55XX_COMMON is not set -# CONFIG_LEDS_LP8860 is not set -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_PCA963X is not set -# CONFIG_LEDS_DAC124S085 is not set -# CONFIG_LEDS_PWM is not set -# CONFIG_LEDS_REGULATOR is not set -# CONFIG_LEDS_BD2802 is not set -# CONFIG_LEDS_LT3593 is not set -CONFIG_LEDS_NS2=y -CONFIG_LEDS_NETXBIG=y -# CONFIG_LEDS_TCA6507 is not set -# CONFIG_LEDS_TLC591XX is not set -# CONFIG_LEDS_LM355x is not set -# CONFIG_LEDS_OT200 is not set -# CONFIG_LEDS_IS31FL319X is not set -# CONFIG_LEDS_IS31FL32XX is not set - -# -# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) -# -# CONFIG_LEDS_BLINKM is not set -# CONFIG_LEDS_SYSCON is not set -# CONFIG_LEDS_MLXREG is not set -# CONFIG_LEDS_USER is not set -# CONFIG_LEDS_SPI_BYTE is not set -# CONFIG_LEDS_TI_LMU_COMMON is not set -# CONFIG_LEDS_IP30 is not set -# CONFIG_LEDS_BCM63138 is not set -# CONFIG_LEDS_LGM is not set - -# -# Flash and Torch LED drivers -# - -# -# RGB LED drivers -# - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGERS=y -CONFIG_LEDS_TRIGGER_TIMER=y -CONFIG_LEDS_TRIGGER_ONESHOT=y -# CONFIG_LEDS_TRIGGER_DISK is not set -# CONFIG_LEDS_TRIGGER_MTD is not set -CONFIG_LEDS_TRIGGER_HEARTBEAT=y -CONFIG_LEDS_TRIGGER_BACKLIGHT=y -# CONFIG_LEDS_TRIGGER_CPU is not set -# CONFIG_LEDS_TRIGGER_ACTIVITY is not set -CONFIG_LEDS_TRIGGER_GPIO=y -# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set - -# -# iptables trigger is under Netfilter config (LED target) -# -# CONFIG_LEDS_TRIGGER_TRANSIENT is not set -# CONFIG_LEDS_TRIGGER_CAMERA is not set -# CONFIG_LEDS_TRIGGER_PANIC is not set -# CONFIG_LEDS_TRIGGER_NETDEV is not set -# CONFIG_LEDS_TRIGGER_PATTERN is not set -# CONFIG_LEDS_TRIGGER_AUDIO is not set -# CONFIG_LEDS_TRIGGER_TTY is not set - -# -# Simple LED drivers -# -# CONFIG_ACCESSIBILITY is not set -# CONFIG_INFINIBAND is not set -CONFIG_EDAC_SUPPORT=y -# CONFIG_EDAC is not set -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set -CONFIG_RTC_NVMEM=y - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_ABB5ZES3 is not set -# CONFIG_RTC_DRV_ABEOZ9 is not set -# CONFIG_RTC_DRV_ABX80X is not set -# CONFIG_RTC_DRV_BRCMSTB is not set -# CONFIG_RTC_DRV_DS1307 is not set -# CONFIG_RTC_DRV_DS1374 is not set -# CONFIG_RTC_DRV_DS1672 is not set -# CONFIG_RTC_DRV_HYM8563 is not set -# CONFIG_RTC_DRV_MAX6900 is not set -# CONFIG_RTC_DRV_MAX8907 is not set -# CONFIG_RTC_DRV_MAX77686 is not set -# CONFIG_RTC_DRV_NCT3018Y is not set -# CONFIG_RTC_DRV_RS5C372 is not set -# CONFIG_RTC_DRV_ISL1208 is not set -# CONFIG_RTC_DRV_ISL12022 is not set -# CONFIG_RTC_DRV_ISL12026 is not set -# CONFIG_RTC_DRV_X1205 is not set -# CONFIG_RTC_DRV_PCF8523 is not set -# CONFIG_RTC_DRV_PCF85063 is not set -# CONFIG_RTC_DRV_PCF85363 is not set -# CONFIG_RTC_DRV_PCF8563 is not set -# CONFIG_RTC_DRV_PCF8583 is not set -# CONFIG_RTC_DRV_M41T80 is not set -# CONFIG_RTC_DRV_BQ32K is not set -# CONFIG_RTC_DRV_S35390A is not set -# CONFIG_RTC_DRV_FM3130 is not set -# CONFIG_RTC_DRV_RX8010 is not set -# CONFIG_RTC_DRV_RX8581 is not set -# CONFIG_RTC_DRV_RX8025 is not set -# CONFIG_RTC_DRV_EM3027 is not set -# CONFIG_RTC_DRV_RV3028 is not set -# CONFIG_RTC_DRV_RV3032 is not set -# CONFIG_RTC_DRV_RV8803 is not set -# CONFIG_RTC_DRV_S5M is not set -# CONFIG_RTC_DRV_SD3078 is not set -CONFIG_RTC_DRV_SPT_PMIC=y - -# -# SPI RTC drivers -# -# CONFIG_RTC_DRV_M41T93 is not set -# CONFIG_RTC_DRV_M41T94 is not set -# CONFIG_RTC_DRV_DS1302 is not set -# CONFIG_RTC_DRV_DS1305 is not set -# CONFIG_RTC_DRV_DS1343 is not set -# CONFIG_RTC_DRV_DS1347 is not set -# CONFIG_RTC_DRV_DS1390 is not set -# CONFIG_RTC_DRV_MAX6916 is not set -# CONFIG_RTC_DRV_R9701 is not set -# CONFIG_RTC_DRV_RX4581 is not set -# CONFIG_RTC_DRV_RS5C348 is not set -# CONFIG_RTC_DRV_MAX6902 is not set -# CONFIG_RTC_DRV_PCF2123 is not set -# CONFIG_RTC_DRV_MCP795 is not set -CONFIG_RTC_I2C_AND_SPI=y - -# -# SPI and I2C RTC drivers -# -# CONFIG_RTC_DRV_DS3232 is not set -# CONFIG_RTC_DRV_PCF2127 is not set -# CONFIG_RTC_DRV_RV3029C2 is not set -# CONFIG_RTC_DRV_RX6110 is not set - -# -# Platform RTC drivers -# -# CONFIG_RTC_DRV_DS1286 is not set -# CONFIG_RTC_DRV_DS1511 is not set -# CONFIG_RTC_DRV_DS1553 is not set -# CONFIG_RTC_DRV_DS1685_FAMILY is not set -# CONFIG_RTC_DRV_DS1742 is not set -# CONFIG_RTC_DRV_DS2404 is not set -# CONFIG_RTC_DRV_EFI is not set -# CONFIG_RTC_DRV_STK17TA8 is not set -# CONFIG_RTC_DRV_M48T86 is not set -# CONFIG_RTC_DRV_M48T35 is not set -# CONFIG_RTC_DRV_M48T59 is not set -# CONFIG_RTC_DRV_MSM6242 is not set -# CONFIG_RTC_DRV_BQ4802 is not set -# CONFIG_RTC_DRV_RP5C01 is not set -# CONFIG_RTC_DRV_V3020 is not set -# CONFIG_RTC_DRV_GAMECUBE is not set -# CONFIG_RTC_DRV_SC27XX is not set -CONFIG_RTC_DRV_SPEAR=y -# CONFIG_RTC_DRV_ZYNQMP is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_ASM9260 is not set -# CONFIG_RTC_DRV_DAVINCI is not set -# CONFIG_RTC_DRV_DIGICOLOR is not set -# CONFIG_RTC_DRV_FSL_FTM_ALARM is not set -# CONFIG_RTC_DRV_MESON is not set -# CONFIG_RTC_DRV_MESON_VRTC is not set -# CONFIG_RTC_DRV_OMAP is not set -# CONFIG_RTC_DRV_S3C is not set -# CONFIG_RTC_DRV_EP93XX is not set -CONFIG_RTC_DRV_SA1100=y -# CONFIG_RTC_DRV_AT91RM9200 is not set -# CONFIG_RTC_DRV_AT91SAM9 is not set -# CONFIG_RTC_DRV_RZN1 is not set -# CONFIG_RTC_DRV_GENERIC is not set -# CONFIG_RTC_DRV_VT8500 is not set -# CONFIG_RTC_DRV_SUN6I is not set -# CONFIG_RTC_DRV_SUNXI is not set -# CONFIG_RTC_DRV_MV is not set -# CONFIG_RTC_DRV_ARMADA38X is not set -# CONFIG_RTC_DRV_CADENCE is not set -# CONFIG_RTC_DRV_FTRTC010 is not set -# CONFIG_RTC_DRV_STMP is not set -# CONFIG_RTC_DRV_JZ4740 is not set -# CONFIG_RTC_DRV_LPC24XX is not set -# CONFIG_RTC_DRV_LPC32XX is not set -# CONFIG_RTC_DRV_PM8XXX is not set -# CONFIG_RTC_DRV_TEGRA is not set -# CONFIG_RTC_DRV_MXC is not set -# CONFIG_RTC_DRV_MXC_V2 is not set -# CONFIG_RTC_DRV_SNVS is not set -# CONFIG_RTC_DRV_MOXART is not set -# CONFIG_RTC_DRV_MT2712 is not set -# CONFIG_RTC_DRV_MT6397 is not set -# CONFIG_RTC_DRV_MT7622 is not set -# CONFIG_RTC_DRV_XGENE is not set -# CONFIG_RTC_DRV_R7301 is not set -# CONFIG_RTC_DRV_STM32 is not set -# CONFIG_RTC_DRV_RTD119X is not set -# CONFIG_RTC_DRV_ASPEED is not set -# CONFIG_RTC_DRV_TI_K3 is not set - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_GOLDFISH is not set -# CONFIG_RTC_DRV_MSC313 is not set -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_VIRTUAL_CHANNELS=y -CONFIG_DMA_OF=y -# CONFIG_ALTERA_MSGDMA is not set -# CONFIG_APPLE_ADMAC is not set -# CONFIG_AXI_DMAC is not set -# CONFIG_BCM_SBA_RAID is not set -# CONFIG_DMA_JZ4780 is not set -# CONFIG_DMA_SA11X0 is not set -# CONFIG_DMA_SUN6I is not set -# CONFIG_DW_AXI_DMAC is not set -# CONFIG_EP93XX_DMA is not set -# CONFIG_FSL_EDMA is not set -# CONFIG_HISI_DMA is not set -# CONFIG_IMG_MDC_DMA is not set -# CONFIG_INTEL_IDMA64 is not set -# CONFIG_INTEL_IOP_ADMA is not set -# CONFIG_K3_DMA is not set -# CONFIG_MCF_EDMA is not set -# CONFIG_MILBEAUT_HDMAC is not set -# CONFIG_MILBEAUT_XDMAC is not set -CONFIG_MMP_PDMA_DRIVER=y -# CONFIG_MMP_PDMA is not set -CONFIG_MMP_PDMA_SPACEMIT_K1X=y -CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT=y -CONFIG_ADMA_SPACEMIT_K1X=y -# CONFIG_MMP_TDMA is not set -# CONFIG_MV_XOR is not set -# CONFIG_MXS_DMA is not set -# CONFIG_NBPFAXI_DMA is not set -# CONFIG_PCH_DMA is not set -# CONFIG_PLX_DMA is not set -# CONFIG_STM32_DMA is not set -# CONFIG_STM32_DMAMUX is not set -# CONFIG_STM32_MDMA is not set -# CONFIG_SPRD_DMA is not set -# CONFIG_S3C24XX_DMAC is not set -# CONFIG_TEGRA20_APB_DMA is not set -# CONFIG_TEGRA210_ADMA is not set -# CONFIG_TIMB_DMA is not set -# CONFIG_UNIPHIER_MDMAC is not set -# CONFIG_UNIPHIER_XDMAC is not set -# CONFIG_XGENE_DMA is not set -# CONFIG_XILINX_ZYNQMP_DMA is not set -# CONFIG_XILINX_ZYNQMP_DPDMA is not set -CONFIG_USERSPACE_DMA=y -# CONFIG_MTK_HSDMA is not set -# CONFIG_MTK_CQDMA is not set -# CONFIG_QCOM_HIDMA_MGMT is not set -# CONFIG_QCOM_HIDMA is not set -# CONFIG_DW_DMAC is not set -# CONFIG_DW_DMAC_PCI is not set -# CONFIG_DW_EDMA is not set -# CONFIG_DW_EDMA_PCIE is not set -# CONFIG_SF_PDMA is not set -CONFIG_RENESAS_DMA=y -CONFIG_SH_DMAE_BASE=y -# CONFIG_SH_DMAE is not set -# CONFIG_RCAR_DMAC is not set -# CONFIG_RENESAS_USB_DMAC is not set -# CONFIG_RZ_DMAC is not set -CONFIG_TI_EDMA=y -CONFIG_DMA_OMAP=y -CONFIG_TI_DMA_CROSSBAR=y -# CONFIG_INTEL_LDMA is not set - -# -# DMA Clients -# -# CONFIG_ASYNC_TX_DMA is not set -# CONFIG_DMATEST is not set - -# -# DMABUF options -# -CONFIG_SYNC_FILE=y -CONFIG_SW_SYNC=y -CONFIG_UDMABUF=y -# CONFIG_DMABUF_MOVE_NOTIFY is not set -# CONFIG_DMABUF_DEBUG is not set -# CONFIG_DMABUF_SELFTESTS is not set -CONFIG_DMABUF_HEAPS=y -# CONFIG_DMABUF_SYSFS_STATS is not set -CONFIG_DMABUF_HEAPS_SYSTEM=y -CONFIG_DMABUF_HEAPS_CMA=y -# end of DMABUF options - -# CONFIG_AUXDISPLAY is not set -# CONFIG_UIO is not set -# CONFIG_VFIO is not set -# CONFIG_VIRT_DRIVERS is not set -CONFIG_VIRTIO_ANCHOR=y -CONFIG_VIRTIO=y -# CONFIG_VIRTIO_MENU is not set -# CONFIG_VDPA is not set -# CONFIG_VHOST_MENU is not set - -# -# Microsoft Hyper-V guest support -# -# end of Microsoft Hyper-V guest support - -# CONFIG_GREYBUS is not set -# CONFIG_COMEDI is not set -# CONFIG_STAGING is not set -# CONFIG_GOLDFISH is not set -# CONFIG_CHROME_PLATFORMS is not set -# CONFIG_MELLANOX_PLATFORM is not set -# CONFIG_OLPC_XO175 is not set -CONFIG_SURFACE_PLATFORMS=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y - -# -# Clock driver for ARM Reference designs -# -# CONFIG_CLK_ICST is not set -# CONFIG_CLK_SP810 is not set -# end of Clock driver for ARM Reference designs - -# CONFIG_CLK_HSDK is not set -# CONFIG_LMK04832 is not set -# CONFIG_COMMON_CLK_APPLE_NCO is not set -# CONFIG_COMMON_CLK_MAX77686 is not set -# CONFIG_COMMON_CLK_MAX9485 is not set -# CONFIG_COMMON_CLK_HI655X is not set -# CONFIG_COMMON_CLK_SCMI is not set -# CONFIG_COMMON_CLK_SCPI is not set -# CONFIG_COMMON_CLK_SI5341 is not set -# CONFIG_COMMON_CLK_SI5351 is not set -# CONFIG_COMMON_CLK_SI514 is not set -# CONFIG_COMMON_CLK_SI544 is not set -# CONFIG_COMMON_CLK_SI570 is not set -# CONFIG_COMMON_CLK_BM1880 is not set -# CONFIG_COMMON_CLK_CDCE706 is not set -# CONFIG_COMMON_CLK_TPS68470 is not set -# CONFIG_COMMON_CLK_CDCE925 is not set -# CONFIG_COMMON_CLK_CS2000_CP is not set -# CONFIG_COMMON_CLK_EN7523 is not set -# CONFIG_COMMON_CLK_FSL_FLEXSPI is not set -# CONFIG_COMMON_CLK_FSL_SAI is not set -# CONFIG_COMMON_CLK_GEMINI is not set -# CONFIG_COMMON_CLK_LAN966X is not set -# CONFIG_COMMON_CLK_ASPEED is not set -# CONFIG_COMMON_CLK_S2MPS11 is not set -# CONFIG_COMMON_CLK_AXI_CLKGEN is not set -# CONFIG_CLK_QORIQ is not set -# CONFIG_CLK_LS1028A_PLLDIG is not set -# CONFIG_COMMON_CLK_XGENE is not set -# CONFIG_COMMON_CLK_PWM is not set -# CONFIG_COMMON_CLK_OXNAS is not set -# CONFIG_COMMON_CLK_RS9_PCIE is not set -# CONFIG_COMMON_CLK_VC5 is not set -# CONFIG_COMMON_CLK_VC7 is not set -# CONFIG_COMMON_CLK_MMP2_AUDIO is not set -# CONFIG_COMMON_CLK_FIXED_MMIO is not set -# CONFIG_CLK_ACTIONS is not set -# CONFIG_CLK_BAIKAL_T1 is not set -# CONFIG_CLK_BCM2711_DVP is not set -# CONFIG_CLK_BCM2835 is not set -# CONFIG_CLK_BCM_63XX is not set -# CONFIG_CLK_BCM_63XX_GATE is not set -# CONFIG_CLK_BCM_KONA is not set -# CONFIG_CLK_BCM_CYGNUS is not set -# CONFIG_CLK_BCM_HR2 is not set -# CONFIG_CLK_BCM_NSP is not set -# CONFIG_CLK_BCM_NS2 is not set -# CONFIG_CLK_BCM_SR is not set -# CONFIG_CLK_RASPBERRYPI is not set -# CONFIG_COMMON_CLK_HI3516CV300 is not set -# CONFIG_COMMON_CLK_HI3519 is not set -# CONFIG_COMMON_CLK_HI3559A is not set -# CONFIG_COMMON_CLK_HI3660 is not set -# CONFIG_COMMON_CLK_HI3670 is not set -# CONFIG_COMMON_CLK_HI3798CV200 is not set -# CONFIG_COMMON_CLK_HI6220 is not set -# CONFIG_RESET_HISI is not set -# CONFIG_STUB_CLK_HI6220 is not set -# CONFIG_STUB_CLK_HI3660 is not set -# CONFIG_COMMON_CLK_BOSTON is not set -# CONFIG_MXC_CLK is not set -# CONFIG_CLK_IMX8MM is not set -# CONFIG_CLK_IMX8MN is not set -# CONFIG_CLK_IMX8MP is not set -# CONFIG_CLK_IMX8MQ is not set -# CONFIG_CLK_IMX8ULP is not set -# CONFIG_CLK_IMX93 is not set - -# -# Ingenic SoCs drivers -# -# CONFIG_INGENIC_CGU_JZ4740 is not set -# CONFIG_INGENIC_CGU_JZ4725B is not set -# CONFIG_INGENIC_CGU_JZ4760 is not set -# CONFIG_INGENIC_CGU_JZ4770 is not set -# CONFIG_INGENIC_CGU_JZ4780 is not set -# CONFIG_INGENIC_CGU_X1000 is not set -# CONFIG_INGENIC_CGU_X1830 is not set -# CONFIG_INGENIC_TCU_CLK is not set -# end of Ingenic SoCs drivers - -# CONFIG_COMMON_CLK_KEYSTONE is not set -# CONFIG_TI_SYSCON_CLK is not set - -# -# Clock driver for MediaTek SoC -# -# CONFIG_COMMON_CLK_MT2701 is not set -# CONFIG_COMMON_CLK_MT2712 is not set -# CONFIG_COMMON_CLK_MT6765 is not set -# CONFIG_COMMON_CLK_MT6779 is not set -# CONFIG_COMMON_CLK_MT6795 is not set -# CONFIG_COMMON_CLK_MT6797 is not set -# CONFIG_COMMON_CLK_MT7622 is not set -# CONFIG_COMMON_CLK_MT7629 is not set -# CONFIG_COMMON_CLK_MT7986 is not set -# CONFIG_COMMON_CLK_MT8135 is not set -# CONFIG_COMMON_CLK_MT8167 is not set -# CONFIG_COMMON_CLK_MT8173 is not set -# CONFIG_COMMON_CLK_MT8183 is not set -# CONFIG_COMMON_CLK_MT8186 is not set -# CONFIG_COMMON_CLK_MT8192 is not set -# CONFIG_COMMON_CLK_MT8195 is not set -# CONFIG_COMMON_CLK_MT8365 is not set -# CONFIG_COMMON_CLK_MT8516 is not set -# end of Clock driver for MediaTek SoC - -# -# Clock support for Amlogic platforms -# -# end of Clock support for Amlogic platforms - -# CONFIG_MSTAR_MSC313_MPLL is not set -# CONFIG_MCHP_CLK_MPFS is not set -# CONFIG_COMMON_CLK_PISTACHIO is not set -# CONFIG_COMMON_CLK_QCOM is not set -# CONFIG_CLK_MT7621 is not set -# CONFIG_CLK_RENESAS is not set -# CONFIG_COMMON_CLK_SAMSUNG is not set -# CONFIG_S3C2410_COMMON_CLK is not set -# CONFIG_S3C2412_COMMON_CLK is not set -# CONFIG_S3C2443_COMMON_CLK is not set -# CONFIG_CLK_SIFIVE is not set -# CONFIG_CLK_INTEL_SOCFPGA is not set -# CONFIG_SPRD_COMMON_CLK is not set -# CONFIG_CLK_STARFIVE_JH7100 is not set -CONFIG_CLK_SUNXI=y -CONFIG_CLK_SUNXI_CLOCKS=y -CONFIG_CLK_SUNXI_PRCM_SUN6I=y -CONFIG_CLK_SUNXI_PRCM_SUN8I=y -CONFIG_CLK_SUNXI_PRCM_SUN9I=y -# CONFIG_SUNXI_CCU is not set -# CONFIG_COMMON_CLK_TI_ADPLL is not set -# CONFIG_CLK_UNIPHIER is not set -# CONFIG_COMMON_CLK_VISCONTI is not set -# CONFIG_CLK_LGM_CGU is not set -# CONFIG_XILINX_VCU is not set -# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set -# CONFIG_COMMON_CLK_ZYNQMP is not set -CONFIG_SPACEMIT_K1X_CCU=y -# CONFIG_HWSPINLOCK is not set - -# -# Clock Source drivers -# -CONFIG_TIMER_OF=y -CONFIG_TIMER_PROBE=y -# CONFIG_BCM2835_TIMER is not set -# CONFIG_BCM_KONA_TIMER is not set -# CONFIG_DAVINCI_TIMER is not set -# CONFIG_DIGICOLOR_TIMER is not set -# CONFIG_OMAP_DM_TIMER is not set -# CONFIG_DW_APB_TIMER is not set -# CONFIG_FTTMR010_TIMER is not set -# CONFIG_IXP4XX_TIMER is not set -# CONFIG_MESON6_TIMER is not set -# CONFIG_OWL_TIMER is not set -# CONFIG_RDA_TIMER is not set -# CONFIG_SPACEMIT_K1X_TIMER is not set -# CONFIG_SUN4I_TIMER is not set -# CONFIG_SUN5I_HSTIMER is not set -# CONFIG_TEGRA_TIMER is not set -# CONFIG_TEGRA186_TIMER is not set -# CONFIG_VT8500_TIMER is not set -# CONFIG_NPCM7XX_TIMER is not set -# CONFIG_CADENCE_TTC_TIMER is not set -# CONFIG_ASM9260_TIMER is not set -# CONFIG_CLKSRC_DBX500_PRCMU is not set -# CONFIG_CLPS711X_TIMER is not set -# CONFIG_MXS_TIMER is not set -# CONFIG_NSPIRE_TIMER is not set -# CONFIG_INTEGRATOR_AP_TIMER is not set -# CONFIG_CLKSRC_PISTACHIO is not set -# CONFIG_CLKSRC_TI_32K is not set -# CONFIG_CLKSRC_STM32_LP is not set -# CONFIG_CLKSRC_MPS2 is not set -# CONFIG_ARC_TIMERS is not set -# CONFIG_ARM_TIMER_SP804 is not set -# CONFIG_ARMV7M_SYSTICK is not set -# CONFIG_ATMEL_PIT is not set -# CONFIG_ATMEL_ST is not set -# CONFIG_CLKSRC_SAMSUNG_PWM is not set -# CONFIG_FSL_FTM_TIMER is not set -# CONFIG_OXNAS_RPS_TIMER is not set -# CONFIG_MTK_TIMER is not set -# CONFIG_SPRD_TIMER is not set -# CONFIG_CLKSRC_JCORE_PIT is not set -# CONFIG_SH_TIMER_CMT is not set -# CONFIG_SH_TIMER_MTU2 is not set -# CONFIG_RENESAS_OSTM is not set -# CONFIG_SH_TIMER_TMU is not set -# CONFIG_EM_TIMER_STI is not set -# CONFIG_CLKSRC_VERSATILE is not set -# CONFIG_CLKSRC_PXA is not set -# CONFIG_TIMER_IMX_SYS_CTR is not set -# CONFIG_CLKSRC_ST_LPC is not set -# CONFIG_GXP_TIMER is not set -CONFIG_RISCV_TIMER=y -# CONFIG_CLINT_TIMER is not set -# CONFIG_MSC313E_TIMER is not set -# CONFIG_INGENIC_TIMER is not set -# CONFIG_INGENIC_SYSOST is not set -# CONFIG_INGENIC_OST is not set -# CONFIG_MICROCHIP_PIT64B is not set -# end of Clock Source drivers - -CONFIG_MAILBOX=y -# CONFIG_IMX_MBOX is not set -# CONFIG_PLATFORM_MHU is not set -# CONFIG_ARMADA_37XX_RWTM_MBOX is not set -# CONFIG_ROCKCHIP_MBOX is not set -# CONFIG_ALTERA_MBOX is not set -# CONFIG_HI3660_MBOX is not set -# CONFIG_HI6220_MBOX is not set -# CONFIG_MAILBOX_TEST is not set -# CONFIG_POLARFIRE_SOC_MAILBOX is not set -# CONFIG_QCOM_APCS_IPC is not set -# CONFIG_BCM_PDC_MBOX is not set -# CONFIG_STM32_IPCC is not set -# CONFIG_MTK_ADSP_MBOX is not set -# CONFIG_MTK_CMDQ_MBOX is not set -# CONFIG_SUN6I_MSGBOX is not set -# CONFIG_SPRD_MBOX is not set -# CONFIG_QCOM_IPCC is not set -CONFIG_SPACEMIT_MAILBOX=y -# CONFIG_K1PRO_MAILBOX is not set -CONFIG_K1X_MAILBOX=y -# CONFIG_IOMMU_SUPPORT is not set - -# -# Remoteproc drivers -# -CONFIG_REMOTEPROC=y -# CONFIG_REMOTEPROC_CDEV is not set -# CONFIG_INGENIC_VPU_RPROC is not set -# CONFIG_MTK_SCP is not set -# CONFIG_MESON_MX_AO_ARC_REMOTEPROC is not set -# CONFIG_K1PRO_REMOTEPROC is not set -CONFIG_K1X_REMOTEPROC=y -# CONFIG_RCAR_REMOTEPROC is not set -# end of Remoteproc drivers - -# -# Rpmsg drivers -# -CONFIG_RPMSG=y -# CONFIG_RPMSG_CHAR is not set -# CONFIG_RPMSG_CTRL is not set -CONFIG_RPMSG_NS=y -# CONFIG_RPMSG_QCOM_GLINK_RPM is not set -CONFIG_RPMSG_VIRTIO=y -# end of Rpmsg drivers - -# CONFIG_SOUNDWIRE is not set - -# -# SOC (System On Chip) specific Drivers -# -# CONFIG_OWL_PM_DOMAINS is not set - -# -# Amlogic SoC drivers -# -# CONFIG_MESON_CANVAS is not set -# CONFIG_MESON_CLK_MEASURE is not set -# CONFIG_MESON_GX_SOCINFO is not set -# CONFIG_MESON_GX_PM_DOMAINS is not set -# CONFIG_MESON_EE_PM_DOMAINS is not set -# CONFIG_MESON_MX_SOCINFO is not set -# end of Amlogic SoC drivers - -# -# Apple SoC drivers -# -# CONFIG_APPLE_PMGR_PWRSTATE is not set -# CONFIG_APPLE_RTKIT is not set -# CONFIG_APPLE_SART is not set -# end of Apple SoC drivers - -# -# ASPEED SoC drivers -# -# CONFIG_ASPEED_LPC_CTRL is not set -# CONFIG_ASPEED_LPC_SNOOP is not set -# CONFIG_ASPEED_UART_ROUTING is not set -# CONFIG_ASPEED_P2A_CTRL is not set -# CONFIG_ASPEED_SOCINFO is not set -# end of ASPEED SoC drivers - -# CONFIG_AT91_SOC_ID is not set -# CONFIG_AT91_SOC_SFR is not set - -# -# Broadcom SoC drivers -# -# CONFIG_BCM2835_POWER is not set -# CONFIG_SOC_BCM63XX is not set -# CONFIG_SOC_BRCMSTB is not set -# CONFIG_BCM_PMB is not set -# end of Broadcom SoC drivers - -# -# NXP/Freescale QorIQ SoC drivers -# -# CONFIG_QUICC_ENGINE is not set -CONFIG_DPAA2_CONSOLE=y -# end of NXP/Freescale QorIQ SoC drivers - -# -# fujitsu SoC drivers -# -# end of fujitsu SoC drivers - -# -# i.MX SoC drivers -# -# CONFIG_IMX_GPCV2_PM_DOMAINS is not set -# CONFIG_SOC_IMX8M is not set -# CONFIG_SOC_IMX9 is not set -# end of i.MX SoC drivers - -# -# IXP4xx SoC drivers -# -# CONFIG_IXP4XX_QMGR is not set -# CONFIG_IXP4XX_NPE is not set -# end of IXP4xx SoC drivers - -# -# Enable LiteX SoC Builder specific drivers -# -# CONFIG_LITEX_SOC_CONTROLLER is not set -# end of Enable LiteX SoC Builder specific drivers - -# -# MediaTek SoC drivers -# -# CONFIG_MTK_CMDQ is not set -# CONFIG_MTK_DEVAPC is not set -# CONFIG_MTK_INFRACFG is not set -# CONFIG_MTK_PMIC_WRAP is not set -# CONFIG_MTK_SCPSYS is not set -# CONFIG_MTK_SCPSYS_PM_DOMAINS is not set -# CONFIG_MTK_MMSYS is not set -# end of MediaTek SoC drivers - -# -# Qualcomm SoC drivers -# -# CONFIG_QCOM_AOSS_QMP is not set -# CONFIG_QCOM_COMMAND_DB is not set -# CONFIG_QCOM_GENI_SE is not set -# CONFIG_QCOM_GSBI is not set -# CONFIG_QCOM_LLCC is not set -# CONFIG_QCOM_RPMH is not set -# CONFIG_QCOM_SMD_RPM is not set -# CONFIG_QCOM_SPM is not set -# CONFIG_QCOM_WCNSS_CTRL is not set -# CONFIG_QCOM_APR is not set -# CONFIG_QCOM_ICC_BWMON is not set -# end of Qualcomm SoC drivers - -# CONFIG_SOC_RENESAS is not set -# CONFIG_ROCKCHIP_GRF is not set -# CONFIG_ROCKCHIP_IODOMAIN is not set -# CONFIG_ROCKCHIP_PM_DOMAINS is not set -# CONFIG_SOC_SAMSUNG is not set -# CONFIG_SOC_TEGRA20_VOLTAGE_COUPLER is not set -# CONFIG_SOC_TEGRA30_VOLTAGE_COUPLER is not set -# CONFIG_SOC_TI is not set -# CONFIG_UX500_SOC_ID is not set - -# -# Xilinx SoC drivers -# -# end of Xilinx SoC drivers - -CONFIG_SPACEMIT_PM_DOMAINS=y -CONFIG_SPACEMIT_REBOOT_CONTROL=y -# CONFIG_SPACEMIT_LID_CONTROL is not set -CONFIG_SPACEMI_K1X_DMA_RANGE=y -CONFIG_CHIP_MEDIA_JPU=y -# CONFIG_JPU_ENABLE_DEBUG_MSG is not set -CONFIG_SPACEMIT_V2D=y -CONFIG_SPACEMIT_RFKILL=y -# end of SOC (System On Chip) specific Drivers - -# CONFIG_PM_DEVFREQ is not set -CONFIG_EXTCON=y - -# -# Extcon Device Drivers -# -# CONFIG_EXTCON_FSA9480 is not set -# CONFIG_EXTCON_USB_K1XCI is not set -# CONFIG_EXTCON_GPIO is not set -# CONFIG_EXTCON_MAX3355 is not set -# CONFIG_EXTCON_PTN5150 is not set -# CONFIG_EXTCON_QCOM_SPMI_MISC is not set -# CONFIG_EXTCON_RT8973A is not set -# CONFIG_EXTCON_SM5502 is not set -# CONFIG_EXTCON_USB_GPIO is not set -# CONFIG_MEMORY is not set -# CONFIG_IIO is not set -# CONFIG_NTB is not set -CONFIG_PWM=y -CONFIG_PWM_SYSFS=y -# CONFIG_PWM_DEBUG is not set -# CONFIG_PWM_ATMEL is not set -# CONFIG_PWM_ATMEL_TCB is not set -# CONFIG_PWM_BCM_IPROC is not set -# CONFIG_PWM_BCM_KONA is not set -# CONFIG_PWM_BCM2835 is not set -# CONFIG_PWM_BERLIN is not set -# CONFIG_PWM_BRCMSTB is not set -# CONFIG_PWM_CLK is not set -# CONFIG_PWM_CLPS711X is not set -# CONFIG_PWM_DWC is not set -# CONFIG_PWM_DWC_K1PRO is not set -# CONFIG_PWM_EP93XX is not set -# CONFIG_PWM_FSL_FTM is not set -# CONFIG_PWM_HIBVT is not set -# CONFIG_PWM_IMG is not set -# CONFIG_PWM_IMX1 is not set -# CONFIG_PWM_IMX27 is not set -# CONFIG_PWM_IMX_TPM is not set -# CONFIG_PWM_INTEL_LGM is not set -# CONFIG_PWM_IQS620A is not set -# CONFIG_PWM_JZ4740 is not set -# CONFIG_PWM_KEEMBAY is not set -# CONFIG_PWM_LPC18XX_SCT is not set -# CONFIG_PWM_LPC32XX is not set -# CONFIG_PWM_LPSS_PCI is not set -# CONFIG_PWM_LPSS_PLATFORM is not set -# CONFIG_PWM_MESON is not set -# CONFIG_PWM_MTK_DISP is not set -# CONFIG_PWM_MEDIATEK is not set -# CONFIG_PWM_MXS is not set -# CONFIG_PWM_OMAP_DMTIMER is not set -# CONFIG_PWM_PCA9685 is not set -# CONFIG_PWM_PXA is not set -# CONFIG_PWM_RASPBERRYPI_POE is not set -# CONFIG_PWM_RCAR is not set -# CONFIG_PWM_RENESAS_TPU is not set -# CONFIG_PWM_ROCKCHIP is not set -# CONFIG_PWM_SAMSUNG is not set -# CONFIG_PWM_SIFIVE is not set -# CONFIG_PWM_SL28CPLD is not set -# CONFIG_PWM_SPEAR is not set -# CONFIG_PWM_SPRD is not set -# CONFIG_PWM_STI is not set -# CONFIG_PWM_STM32 is not set -# CONFIG_PWM_STM32_LP is not set -# CONFIG_PWM_SUN4I is not set -# CONFIG_PWM_SUNPLUS is not set -# CONFIG_PWM_TEGRA is not set -# CONFIG_PWM_TIECAP is not set -# CONFIG_PWM_TIEHRPWM is not set -# CONFIG_PWM_VISCONTI is not set -# CONFIG_PWM_VT8500 is not set -# CONFIG_PWM_XILINX is not set - -# -# IRQ chip support -# -CONFIG_IRQCHIP=y -# CONFIG_AL_FIC is not set -# CONFIG_JCORE_AIC is not set -# CONFIG_RENESAS_INTC_IRQPIN is not set -# CONFIG_RENESAS_IRQC is not set -# CONFIG_RENESAS_RZA1_IRQC is not set -# CONFIG_RENESAS_RZG2L_IRQC is not set -# CONFIG_SL28CPLD_INTC is not set -# CONFIG_TS4800_IRQ is not set -# CONFIG_XILINX_INTC is not set -# CONFIG_INGENIC_TCU_IRQ is not set -# CONFIG_IRQ_UNIPHIER_AIDET is not set -# CONFIG_MESON_IRQ_GPIO is not set -# CONFIG_IMX_IRQSTEER is not set -# CONFIG_IMX_INTMUX is not set -# CONFIG_IMX_MU_MSI is not set -CONFIG_RISCV_INTC=y -CONFIG_SIFIVE_PLIC=y -# CONFIG_EXYNOS_IRQ_COMBINER is not set -# CONFIG_MST_IRQ is not set -# CONFIG_MCHP_EIC is not set -# CONFIG_SUNPLUS_SP7021_INTC is not set -# end of IRQ chip support - -# CONFIG_IPACK_BUS is not set -CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_A10SR is not set -# CONFIG_RESET_ATH79 is not set -# CONFIG_RESET_AXS10X is not set -# CONFIG_RESET_BCM6345 is not set -# CONFIG_RESET_BERLIN is not set -# CONFIG_RESET_BRCMSTB is not set -# CONFIG_RESET_BRCMSTB_RESCAL is not set -# CONFIG_RESET_HSDK is not set -# CONFIG_RESET_IMX7 is not set -# CONFIG_RESET_INTEL_GW is not set -# CONFIG_RESET_K210 is not set -# CONFIG_RESET_LANTIQ is not set -# CONFIG_RESET_LPC18XX is not set -# CONFIG_RESET_MCHP_SPARX5 is not set -# CONFIG_RESET_MESON is not set -# CONFIG_RESET_MESON_AUDIO_ARB is not set -# CONFIG_RESET_NPCM is not set -# CONFIG_RESET_PISTACHIO is not set -# CONFIG_RESET_QCOM_AOSS is not set -# CONFIG_RESET_QCOM_PDC is not set -CONFIG_RESET_RASPBERRYPI=y -# CONFIG_RESET_RZG2L_USBPHY_CTRL is not set -# CONFIG_RESET_SCMI is not set -# CONFIG_RESET_SIMPLE is not set -# CONFIG_RESET_SOCFPGA is not set -# CONFIG_RESET_STARFIVE_JH7100 is not set -# CONFIG_RESET_SUNPLUS is not set -# CONFIG_RESET_SUNXI is not set -# CONFIG_RESET_TI_SCI is not set -# CONFIG_RESET_TI_SYSCON is not set -# CONFIG_RESET_TI_TPS380X is not set -# CONFIG_RESET_TN48M_CPLD is not set -# CONFIG_RESET_UNIPHIER is not set -# CONFIG_RESET_UNIPHIER_GLUE is not set -# CONFIG_RESET_ZYNQ is not set -CONFIG_RESET_K1X_SPACEMIT=y -# CONFIG_RESET_K1MATRIX_SPACEMIT is not set -# CONFIG_COMMON_RESET_HI3660 is not set -# CONFIG_COMMON_RESET_HI6220 is not set - -# -# PHY Subsystem -# -CONFIG_GENERIC_PHY=y -# CONFIG_PHY_LPC18XX_USB_OTG is not set -# CONFIG_PHY_PISTACHIO_USB is not set -# CONFIG_PHY_XGENE is not set -# CONFIG_USB_LGM_PHY is not set -# CONFIG_PHY_CAN_TRANSCEIVER is not set -# CONFIG_PHY_SUN4I_USB is not set -# CONFIG_PHY_SUN6I_MIPI_DPHY is not set -# CONFIG_PHY_SUN9I_USB is not set -# CONFIG_PHY_SUN50I_USB3 is not set -# CONFIG_PHY_MESON8_HDMI_TX is not set -# CONFIG_PHY_MESON8B_USB2 is not set -# CONFIG_PHY_MESON_GXL_USB2 is not set -# CONFIG_PHY_MESON_G12A_MIPI_DPHY_ANALOG is not set -# CONFIG_PHY_MESON_G12A_USB2 is not set -# CONFIG_PHY_MESON_G12A_USB3_PCIE is not set -# CONFIG_PHY_MESON_AXG_PCIE is not set -# CONFIG_PHY_MESON_AXG_MIPI_PCIE_ANALOG is not set -# CONFIG_PHY_MESON_AXG_MIPI_DPHY is not set - -# -# PHY drivers for Broadcom platforms -# -# CONFIG_PHY_BCM63XX_USBH is not set -# CONFIG_PHY_CYGNUS_PCIE is not set -# CONFIG_PHY_BCM_SR_USB is not set -# CONFIG_BCM_KONA_USB2_PHY is not set -# CONFIG_PHY_BCM_NS_USB2 is not set -# CONFIG_PHY_BCM_NS_USB3 is not set -# CONFIG_PHY_NS2_PCIE is not set -# CONFIG_PHY_NS2_USB_DRD is not set -# CONFIG_PHY_BRCM_SATA is not set -# CONFIG_PHY_BRCM_USB is not set -# CONFIG_PHY_BCM_SR_PCIE is not set -# end of PHY drivers for Broadcom platforms - -# CONFIG_PHY_CADENCE_TORRENT is not set -# CONFIG_PHY_CADENCE_DPHY is not set -# CONFIG_PHY_CADENCE_DPHY_RX is not set -# CONFIG_PHY_CADENCE_SIERRA is not set -# CONFIG_PHY_CADENCE_SALVO is not set -# CONFIG_PHY_FSL_IMX8MQ_USB is not set -# CONFIG_PHY_MIXEL_LVDS_PHY is not set -# CONFIG_PHY_MIXEL_MIPI_DPHY is not set -# CONFIG_PHY_FSL_IMX8M_PCIE is not set -# CONFIG_PHY_FSL_LYNX_28G is not set -# CONFIG_PHY_HI6220_USB is not set -# CONFIG_PHY_HI3660_USB is not set -# CONFIG_PHY_HI3670_USB is not set -# CONFIG_PHY_HI3670_PCIE is not set -# CONFIG_PHY_HISTB_COMBPHY is not set -# CONFIG_PHY_HISI_INNO_USB2 is not set -# CONFIG_PHY_INGENIC_USB is not set -# CONFIG_PHY_LANTIQ_VRX200_PCIE is not set -# CONFIG_PHY_LANTIQ_RCU_USB2 is not set -# CONFIG_ARMADA375_USBCLUSTER_PHY is not set -# CONFIG_PHY_BERLIN_SATA is not set -# CONFIG_PHY_BERLIN_USB is not set -CONFIG_PHY_MVEBU_A3700_UTMI=y -# CONFIG_PHY_MVEBU_A38X_COMPHY is not set -# CONFIG_PHY_MVEBU_CP110_UTMI is not set -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# CONFIG_PHY_PXA_USB is not set -# CONFIG_PHY_MMP3_USB is not set -# CONFIG_PHY_MMP3_HSIC is not set -# CONFIG_PHY_MTK_PCIE is not set -# CONFIG_PHY_MTK_TPHY is not set -# CONFIG_PHY_MTK_UFS is not set -# CONFIG_PHY_MTK_XSPHY is not set -# CONFIG_PHY_MTK_HDMI is not set -# CONFIG_PHY_MTK_MIPI_DSI is not set -# CONFIG_PHY_MTK_DP is not set -# CONFIG_PHY_SPARX5_SERDES is not set -# CONFIG_PHY_LAN966X_SERDES is not set -# CONFIG_PHY_MAPPHONE_MDM6600 is not set -# CONFIG_PHY_OCELOT_SERDES is not set -# CONFIG_PHY_ATH79_USB is not set -# CONFIG_PHY_QCOM_EDP is not set -# CONFIG_PHY_QCOM_IPQ4019_USB is not set -# CONFIG_PHY_QCOM_PCIE2 is not set -# CONFIG_PHY_QCOM_QMP is not set -# CONFIG_PHY_QCOM_QUSB2 is not set -# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set -# CONFIG_PHY_QCOM_USB_HS_28NM is not set -# CONFIG_PHY_QCOM_USB_SS is not set -# CONFIG_PHY_QCOM_IPQ806X_USB is not set -# CONFIG_PHY_MT7621_PCI is not set -# CONFIG_PHY_RALINK_USB is not set -# CONFIG_PHY_RCAR_GEN3_USB3 is not set -# CONFIG_PHY_ROCKCHIP_DPHY_RX0 is not set -# CONFIG_PHY_ROCKCHIP_INNO_HDMI is not set -# CONFIG_PHY_ROCKCHIP_INNO_USB2 is not set -# CONFIG_PHY_ROCKCHIP_INNO_CSIDPHY is not set -# CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY is not set -# CONFIG_PHY_ROCKCHIP_PCIE is not set -# CONFIG_PHY_ROCKCHIP_SNPS_PCIE3 is not set -# CONFIG_PHY_ROCKCHIP_TYPEC is not set -# CONFIG_PHY_EXYNOS_DP_VIDEO is not set -# CONFIG_PHY_EXYNOS_MIPI_VIDEO is not set -# CONFIG_PHY_EXYNOS_PCIE is not set -# CONFIG_PHY_SAMSUNG_UFS is not set -# CONFIG_PHY_SAMSUNG_USB2 is not set -CONFIG_PHY_EXYNOS5_USBDRD=y -# CONFIG_PHY_UNIPHIER_USB2 is not set -# CONFIG_PHY_UNIPHIER_USB3 is not set -# CONFIG_PHY_UNIPHIER_PCIE is not set -# CONFIG_PHY_UNIPHIER_AHCI is not set -# CONFIG_PHY_ST_SPEAR1310_MIPHY is not set -# CONFIG_PHY_ST_SPEAR1340_MIPHY is not set -# CONFIG_PHY_STIH407_USB is not set -# CONFIG_PHY_STM32_USBPHYC is not set -# CONFIG_PHY_SUNPLUS_USB is not set -# CONFIG_PHY_TEGRA194_P2U is not set -# CONFIG_PHY_DA8XX_USB is not set -# CONFIG_PHY_DM816X_USB is not set -# CONFIG_PHY_AM654_SERDES is not set -# CONFIG_PHY_J721E_WIZ is not set -# CONFIG_OMAP_CONTROL_PHY is not set -# CONFIG_TI_PIPE3 is not set -# CONFIG_PHY_INTEL_KEEMBAY_EMMC is not set -# CONFIG_PHY_INTEL_KEEMBAY_USB is not set -# CONFIG_PHY_INTEL_LGM_COMBO is not set -# CONFIG_PHY_INTEL_LGM_EMMC is not set -# CONFIG_PHY_INTEL_THUNDERBAY_EMMC is not set -# CONFIG_PHY_XILINX_ZYNQMP is not set -CONFIG_PHY_SPACEMIT_K1X_COMBPHY=y -# end of PHY Subsystem - -# CONFIG_POWERCAP is not set -# CONFIG_MCB is not set - -# -# Performance monitor support -# -# CONFIG_ARM_CCN is not set -# CONFIG_ARM_CMN is not set -CONFIG_RISCV_PMU=y -CONFIG_RISCV_PMU_LEGACY=y -CONFIG_RISCV_PMU_SBI=y -# CONFIG_ARM_SMMU_V3_PMU is not set -# CONFIG_FSL_IMX8_DDR_PMU is not set -# CONFIG_XGENE_PMU is not set -# CONFIG_ARM_DMC620_PMU is not set -# CONFIG_MARVELL_CN10K_TAD_PMU is not set -# CONFIG_ALIBABA_UNCORE_DRW_PMU is not set -# CONFIG_HNS3_PMU is not set -# CONFIG_MARVELL_CN10K_DDR_PMU is not set -# end of Performance monitor support - -CONFIG_RAS=y -# CONFIG_USB4 is not set - -# -# Android -# -# CONFIG_ANDROID_BINDER_IPC is not set -# end of Android - -# CONFIG_LIBNVDIMM is not set -# CONFIG_DAX is not set -CONFIG_NVMEM=y -CONFIG_NVMEM_SYSFS=y -# CONFIG_NVMEM_APPLE_EFUSES is not set -# CONFIG_NVMEM_BCM_OCOTP is not set -# CONFIG_NVMEM_BRCM_NVRAM is not set -# CONFIG_NVMEM_IMX_IIM is not set -# CONFIG_NVMEM_IMX_OCOTP is not set -# CONFIG_NVMEM_JZ4780_EFUSE is not set -# CONFIG_NVMEM_LAN9662_OTPC is not set -# CONFIG_NVMEM_LAYERSCAPE_SFP is not set -# CONFIG_NVMEM_LPC18XX_EEPROM is not set -# CONFIG_NVMEM_LPC18XX_OTP is not set -# CONFIG_NVMEM_MESON_MX_EFUSE is not set -# CONFIG_NVMEM_MICROCHIP_OTPC is not set -# CONFIG_NVMEM_MTK_EFUSE is not set -# CONFIG_NVMEM_MXS_OCOTP is not set -# CONFIG_NVMEM_NINTENDO_OTP is not set -# CONFIG_NVMEM_QCOM_QFPROM is not set -# CONFIG_NVMEM_RMEM is not set -# CONFIG_NVMEM_ROCKCHIP_EFUSE is not set -# CONFIG_NVMEM_ROCKCHIP_OTP is not set -# CONFIG_NVMEM_SC27XX_EFUSE is not set -# CONFIG_NVMEM_SNVS_LPGPR is not set -# CONFIG_NVMEM_SPRD_EFUSE is not set -# CONFIG_NVMEM_STM32_ROMEM is not set -# CONFIG_NVMEM_SUNPLUS_OCOTP is not set -# CONFIG_NVMEM_U_BOOT_ENV is not set -# CONFIG_NVMEM_UNIPHIER_EFUSE is not set -# CONFIG_NVMEM_VF610_OCOTP is not set - -# -# HW tracing support -# -# CONFIG_STM is not set -# CONFIG_INTEL_TH is not set -# CONFIG_HISI_PTT is not set -# end of HW tracing support - -# CONFIG_FPGA is not set -# CONFIG_FSI is not set -# CONFIG_TEE is not set -CONFIG_MULTIPLEXER=y - -# -# Multiplexer drivers -# -# CONFIG_MUX_ADG792A is not set -# CONFIG_MUX_ADGS1408 is not set -# CONFIG_MUX_GPIO is not set -# CONFIG_MUX_MMIO is not set -# end of Multiplexer drivers - -CONFIG_PM_OPP=y -# CONFIG_SIOX is not set -# CONFIG_SLIMBUS is not set -# CONFIG_INTERCONNECT is not set -# CONFIG_COUNTER is not set -# CONFIG_MOST is not set -# CONFIG_PECI is not set -# CONFIG_HTE is not set -# end of Device Drivers - -# -# File systems -# -CONFIG_VALIDATE_FS_PARSER=y -CONFIG_FS_IOMAP=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -CONFIG_XFS_FS=y -CONFIG_XFS_SUPPORT_V4=y -# CONFIG_XFS_QUOTA is not set -# CONFIG_XFS_POSIX_ACL is not set -# CONFIG_XFS_RT is not set -# CONFIG_XFS_ONLINE_SCRUB is not set -# CONFIG_XFS_WARN is not set -# CONFIG_XFS_DEBUG is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -CONFIG_BTRFS_FS=y -CONFIG_BTRFS_FS_POSIX_ACL=y -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_BTRFS_FS_REF_VERIFY is not set -# CONFIG_NILFS2_FS is not set -CONFIG_F2FS_FS=y -CONFIG_F2FS_STAT_FS=y -# CONFIG_F2FS_FS_XATTR is not set -CONFIG_F2FS_CHECK_FS=y -# CONFIG_F2FS_FAULT_INJECTION is not set -# CONFIG_F2FS_FS_COMPRESSION is not set -CONFIG_F2FS_IOSTAT=y -# CONFIG_F2FS_UNFAIR_RWSEM is not set -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -# CONFIG_EXPORTFS_BLOCK_OPS is not set -CONFIG_FILE_LOCKING=y -# CONFIG_FS_ENCRYPTION is not set -# CONFIG_FS_VERITY is not set -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set -# CONFIG_QUOTA is not set -CONFIG_AUTOFS4_FS=y -CONFIG_AUTOFS_FS=y -CONFIG_FUSE_FS=m -# CONFIG_CUSE is not set -# CONFIG_VIRTIO_FS is not set -CONFIG_OVERLAY_FS=y -# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set -CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y -# CONFIG_OVERLAY_FS_INDEX is not set -# CONFIG_OVERLAY_FS_XINO_AUTO is not set -# CONFIG_OVERLAY_FS_METACOPY is not set - -# -# Caches -# -CONFIG_NETFS_SUPPORT=y -CONFIG_NETFS_STATS=y -CONFIG_FSCACHE=y -# CONFIG_FSCACHE_STATS is not set -# CONFIG_FSCACHE_DEBUG is not set -CONFIG_CACHEFILES=y -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_ERROR_INJECTION is not set -# CONFIG_CACHEFILES_ONDEMAND is not set -# end of Caches - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=y -# end of CD-ROM/DVD Filesystems - -# -# DOS/FAT/EXFAT/NT Filesystems -# -CONFIG_FAT_FS=y -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=y -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" -CONFIG_FAT_DEFAULT_UTF8=y -CONFIG_EXFAT_FS=m -CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" -CONFIG_NTFS_FS=m -# CONFIG_NTFS_DEBUG is not set -CONFIG_NTFS_RW=y -# CONFIG_NTFS3_FS is not set -# end of DOS/FAT/EXFAT/NT Filesystems - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -# CONFIG_PROC_KCORE is not set -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_PROC_CHILDREN=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -# CONFIG_TMPFS_INODE64 is not set -CONFIG_ARCH_SUPPORTS_HUGETLBFS=y -# CONFIG_HUGETLBFS is not set -CONFIG_MEMFD_CREATE=y -CONFIG_ARCH_HAS_GIGANTIC_PAGE=y -CONFIG_CONFIGFS_FS=y -CONFIG_EFIVAR_FS=m -# end of Pseudo filesystems - -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ORANGEFS_FS is not set -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -CONFIG_HFS_FS=m -CONFIG_HFSPLUS_FS=m -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_JFFS2_FS is not set -# CONFIG_CRAMFS is not set -CONFIG_SQUASHFS=y -# CONFIG_SQUASHFS_FILE_CACHE is not set -CONFIG_SQUASHFS_FILE_DIRECT=y -# CONFIG_SQUASHFS_DECOMP_SINGLE is not set -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y -# CONFIG_SQUASHFS_XATTR is not set -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZ4=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -CONFIG_SQUASHFS_ZSTD=y -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -# CONFIG_ROMFS_FS is not set -# CONFIG_PSTORE is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -# CONFIG_EROFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V2=y -CONFIG_NFS_V3=y -# CONFIG_NFS_V3_ACL is not set -CONFIG_NFS_V4=y -CONFIG_NFS_SWAP=y -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=y -CONFIG_PNFS_BLOCK=y -CONFIG_PNFS_FLEXFILE_LAYOUT=y -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -CONFIG_NFS_V4_1_MIGRATION=y -CONFIG_NFS_V4_SECURITY_LABEL=y -CONFIG_ROOT_NFS=y -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFS_DISABLE_UDP_SUPPORT=y -# CONFIG_NFS_V4_2_READ_PLUS is not set -# CONFIG_NFSD is not set -CONFIG_GRACE_PERIOD=y -CONFIG_LOCKD=y -CONFIG_LOCKD_V4=y -CONFIG_NFS_COMMON=y -CONFIG_NFS_V4_2_SSC_HELPER=y -CONFIG_SUNRPC=y -CONFIG_SUNRPC_GSS=y -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_SUNRPC_SWAP=y -# CONFIG_SUNRPC_DEBUG is not set -# CONFIG_CEPH_FS is not set -CONFIG_CIFS=y -CONFIG_CIFS_STATS2=y -CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y -# CONFIG_CIFS_UPCALL is not set -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_CIFS_DEBUG=y -# CONFIG_CIFS_DEBUG2 is not set -# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set -# CONFIG_CIFS_DFS_UPCALL is not set -# CONFIG_CIFS_SWN_UPCALL is not set -CONFIG_CIFS_FSCACHE=y -# CONFIG_CIFS_ROOT is not set -CONFIG_SMB_SERVER=m -CONFIG_SMB_SERVER_SMBDIRECT=y -CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN=y -CONFIG_SMB_SERVER_KERBEROS5=y -CONFIG_SMBFS_COMMON=y -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -# CONFIG_NLS_CODEPAGE_737 is not set -# CONFIG_NLS_CODEPAGE_775 is not set -# CONFIG_NLS_CODEPAGE_850 is not set -# CONFIG_NLS_CODEPAGE_852 is not set -# CONFIG_NLS_CODEPAGE_855 is not set -# CONFIG_NLS_CODEPAGE_857 is not set -# CONFIG_NLS_CODEPAGE_860 is not set -# CONFIG_NLS_CODEPAGE_861 is not set -# CONFIG_NLS_CODEPAGE_862 is not set -# CONFIG_NLS_CODEPAGE_863 is not set -# CONFIG_NLS_CODEPAGE_864 is not set -# CONFIG_NLS_CODEPAGE_865 is not set -# CONFIG_NLS_CODEPAGE_866 is not set -# CONFIG_NLS_CODEPAGE_869 is not set -# CONFIG_NLS_CODEPAGE_936 is not set -# CONFIG_NLS_CODEPAGE_950 is not set -# CONFIG_NLS_CODEPAGE_932 is not set -# CONFIG_NLS_CODEPAGE_949 is not set -# CONFIG_NLS_CODEPAGE_874 is not set -# CONFIG_NLS_ISO8859_8 is not set -# CONFIG_NLS_CODEPAGE_1250 is not set -# CONFIG_NLS_CODEPAGE_1251 is not set -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -# CONFIG_NLS_ISO8859_2 is not set -# CONFIG_NLS_ISO8859_3 is not set -# CONFIG_NLS_ISO8859_4 is not set -# CONFIG_NLS_ISO8859_5 is not set -# CONFIG_NLS_ISO8859_6 is not set -# CONFIG_NLS_ISO8859_7 is not set -# CONFIG_NLS_ISO8859_9 is not set -# CONFIG_NLS_ISO8859_13 is not set -# CONFIG_NLS_ISO8859_14 is not set -# CONFIG_NLS_ISO8859_15 is not set -# CONFIG_NLS_KOI8_R is not set -# CONFIG_NLS_KOI8_U is not set -# CONFIG_NLS_MAC_ROMAN is not set -# CONFIG_NLS_MAC_CELTIC is not set -# CONFIG_NLS_MAC_CENTEURO is not set -# CONFIG_NLS_MAC_CROATIAN is not set -# CONFIG_NLS_MAC_CYRILLIC is not set -# CONFIG_NLS_MAC_GAELIC is not set -# CONFIG_NLS_MAC_GREEK is not set -# CONFIG_NLS_MAC_ICELAND is not set -# CONFIG_NLS_MAC_INUIT is not set -# CONFIG_NLS_MAC_ROMANIAN is not set -# CONFIG_NLS_MAC_TURKISH is not set -CONFIG_NLS_UTF8=y -# CONFIG_DLM is not set -CONFIG_UNICODE=y -# CONFIG_UNICODE_NORMALIZATION_SELFTEST is not set -CONFIG_IO_WQ=y -# end of File systems - -# -# Security options -# -CONFIG_KEYS=y -# CONFIG_KEYS_REQUEST_CACHE is not set -# CONFIG_PERSISTENT_KEYRINGS is not set -# CONFIG_TRUSTED_KEYS is not set -CONFIG_ENCRYPTED_KEYS=m -# CONFIG_USER_DECRYPTED_DATA is not set -CONFIG_KEY_DH_OPERATIONS=y -# CONFIG_SECURITY_DMESG_RESTRICT is not set -CONFIG_SECURITY=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -# CONFIG_SECURITY_NETWORK_XFRM is not set -CONFIG_SECURITY_PATH=y -CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y -# CONFIG_HARDENED_USERCOPY is not set -# CONFIG_FORTIFY_SOURCE is not set -# CONFIG_STATIC_USERMODEHELPER is not set -# CONFIG_SECURITY_SELINUX is not set -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -CONFIG_SECURITY_APPARMOR=y -# CONFIG_SECURITY_APPARMOR_DEBUG is not set -CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY=y -CONFIG_SECURITY_APPARMOR_HASH=y -CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y -CONFIG_SECURITY_APPARMOR_EXPORT_BINARY=y -CONFIG_SECURITY_APPARMOR_PARANOID_LOAD=y -# CONFIG_SECURITY_LOADPIN is not set -# CONFIG_SECURITY_YAMA is not set -# CONFIG_SECURITY_SAFESETID is not set -# CONFIG_SECURITY_LOCKDOWN_LSM is not set -# CONFIG_SECURITY_LANDLOCK is not set -CONFIG_INTEGRITY=y -# CONFIG_INTEGRITY_SIGNATURE is not set -CONFIG_INTEGRITY_AUDIT=y -# CONFIG_IMA is not set -# CONFIG_EVM is not set -CONFIG_DEFAULT_SECURITY_APPARMOR=y -# CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" - -# -# Kernel hardening options -# - -# -# Memory initialization -# -CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y -CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y -CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y -CONFIG_INIT_STACK_NONE=y -# CONFIG_INIT_STACK_ALL_PATTERN is not set -# CONFIG_INIT_STACK_ALL_ZERO is not set -# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set -# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set -CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y -# CONFIG_ZERO_CALL_USED_REGS is not set -# end of Memory initialization - -CONFIG_RANDSTRUCT_NONE=y -# end of Kernel hardening options -# end of Security options - -CONFIG_XOR_BLOCKS=y -CONFIG_ASYNC_CORE=y -CONFIG_ASYNC_MEMCPY=y -CONFIG_ASYNC_XOR=y -CONFIG_ASYNC_PQ=y -CONFIG_ASYNC_RAID6_RECOV=y -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_SKCIPHER=y -CONFIG_CRYPTO_SKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_KPP=y -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -# CONFIG_CRYPTO_USER is not set -CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -CONFIG_CRYPTO_GF128MUL=y -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -# CONFIG_CRYPTO_PCRYPT is not set -# CONFIG_CRYPTO_CRYPTD is not set -CONFIG_CRYPTO_AUTHENC=y -# CONFIG_CRYPTO_TEST is not set -# end of Crypto core or helper - -# -# Public-key cryptography -# -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=y -# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set -CONFIG_CRYPTO_ECC=y -CONFIG_CRYPTO_ECDH=y -# CONFIG_CRYPTO_ECDSA is not set -# CONFIG_CRYPTO_ECRDSA is not set -# CONFIG_CRYPTO_SM2 is not set -CONFIG_CRYPTO_CURVE25519=y -# end of Public-key cryptography - -# -# Block ciphers -# -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_TI is not set -# CONFIG_CRYPTO_ANUBIS is not set -# CONFIG_CRYPTO_ARIA is not set -# CONFIG_CRYPTO_BLOWFISH is not set -# CONFIG_CRYPTO_CAMELLIA is not set -# CONFIG_CRYPTO_CAST5 is not set -# CONFIG_CRYPTO_CAST6 is not set -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_FCRYPT is not set -# CONFIG_CRYPTO_KHAZAD is not set -# CONFIG_CRYPTO_SEED is not set -# CONFIG_CRYPTO_SERPENT is not set -# CONFIG_CRYPTO_SM4_GENERIC is not set -# CONFIG_CRYPTO_TEA is not set -# CONFIG_CRYPTO_TWOFISH is not set -# end of Block ciphers - -# -# Length-preserving ciphers and modes -# -# CONFIG_CRYPTO_ADIANTUM is not set -# CONFIG_CRYPTO_ARC4 is not set -CONFIG_CRYPTO_CHACHA20=y -CONFIG_CRYPTO_CBC=y -# CONFIG_CRYPTO_CFB is not set -CONFIG_CRYPTO_CTR=y -# CONFIG_CRYPTO_CTS is not set -CONFIG_CRYPTO_ECB=y -# CONFIG_CRYPTO_HCTR2 is not set -# CONFIG_CRYPTO_KEYWRAP is not set -# CONFIG_CRYPTO_LRW is not set -CONFIG_CRYPTO_OFB=y -# CONFIG_CRYPTO_PCBC is not set -# CONFIG_CRYPTO_XTS is not set -# end of Length-preserving ciphers and modes - -# -# AEAD (authenticated encryption with associated data) ciphers -# -# CONFIG_CRYPTO_AEGIS128 is not set -CONFIG_CRYPTO_CHACHA20POLY1305=y -CONFIG_CRYPTO_CCM=y -CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_ECHAINIV=y -CONFIG_CRYPTO_ESSIV=y -# end of AEAD (authenticated encryption with associated data) ciphers - -# -# Hashes, digests, and MACs -# -CONFIG_CRYPTO_BLAKE2B=y -CONFIG_CRYPTO_CMAC=y -CONFIG_CRYPTO_GHASH=y -CONFIG_CRYPTO_HMAC=y -# CONFIG_CRYPTO_MD4 is not set -CONFIG_CRYPTO_MD5=y -# CONFIG_CRYPTO_MICHAEL_MIC is not set -CONFIG_CRYPTO_POLY1305=y -# CONFIG_CRYPTO_RMD160 is not set -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_SHA3=y -# CONFIG_CRYPTO_SM3_GENERIC is not set -# CONFIG_CRYPTO_STREEBOG is not set -# CONFIG_CRYPTO_VMAC is not set -# CONFIG_CRYPTO_WP512 is not set -# CONFIG_CRYPTO_XCBC is not set -CONFIG_CRYPTO_XXHASH=y -# end of Hashes, digests, and MACs - -# -# CRCs (cyclic redundancy checks) -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32=y -CONFIG_CRYPTO_CRCT10DIF=y -CONFIG_CRYPTO_CRC64_ROCKSOFT=y -# end of CRCs (cyclic redundancy checks) - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_LZO=y -CONFIG_CRYPTO_842=m -CONFIG_CRYPTO_LZ4=m -CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_ZSTD=y -# end of Compression - -# -# Random number generation -# -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y -# CONFIG_CRYPTO_DRBG_HASH is not set -# CONFIG_CRYPTO_DRBG_CTR is not set -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -CONFIG_CRYPTO_KDF800108_CTR=y -# end of Random number generation - -# -# Userspace interface -# -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -# CONFIG_CRYPTO_USER_API_RNG is not set -# CONFIG_CRYPTO_USER_API_AEAD is not set -CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y -# end of Userspace interface - -CONFIG_CRYPTO_HASH_INFO=y -CONFIG_CRYPTO_HW=y -# CONFIG_CRYPTO_DEV_ALLWINNER is not set -# CONFIG_CRYPTO_DEV_SL3516 is not set -# CONFIG_CRYPTO_DEV_EXYNOS_RNG is not set -# CONFIG_CRYPTO_DEV_S5P is not set -# CONFIG_CRYPTO_DEV_ATMEL_AES is not set -# CONFIG_CRYPTO_DEV_ATMEL_TDES is not set -# CONFIG_CRYPTO_DEV_ATMEL_SHA is not set -# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set -# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set -# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set -# CONFIG_CRYPTO_DEV_QAT_C62X is not set -# CONFIG_CRYPTO_DEV_QAT_4XXX is not set -# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set -# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set -# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set -# CONFIG_CAVIUM_CPT is not set -# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set -# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set -# CONFIG_CRYPTO_DEV_CAVIUM_ZIP is not set -# CONFIG_CRYPTO_DEV_QCE is not set -# CONFIG_CRYPTO_DEV_QCOM_RNG is not set -# CONFIG_CRYPTO_DEV_IMGTEC_HASH is not set -# CONFIG_CRYPTO_DEV_ZYNQMP_AES is not set -# CONFIG_CRYPTO_DEV_ZYNQMP_SHA3 is not set -# CONFIG_CRYPTO_DEV_VIRTIO is not set -# CONFIG_CRYPTO_DEV_SAFEXCEL is not set -# CONFIG_CRYPTO_DEV_CCREE is not set -# CONFIG_CRYPTO_DEV_HISI_SEC is not set -# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set -# CONFIG_CRYPTO_DEV_SA2UL is not set -# CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4 is not set -# CONFIG_CRYPTO_DEV_KEEMBAY_OCS_ECC is not set -# CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU is not set -# CONFIG_CRYPTO_DEV_ASPEED is not set -CONFIG_SPACEMIT_REE_ENGINE=y -CONFIG_SPACEMIT_REE_AES=y -# CONFIG_SPACEMIT_CRYPTO_DEBUG is not set -# CONFIG_SPACEMIT_CRYPTO_SELF_TEST is not set -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set -CONFIG_PKCS7_MESSAGE_PARSER=y -# CONFIG_PKCS7_TEST_KEY is not set -# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set -# CONFIG_FIPS_SIGNATURE_SELFTEST is not set - -# -# Certificates for signature checking -# -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="" -# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set -# CONFIG_SECONDARY_TRUSTED_KEYRING is not set -# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set -# end of Certificates for signature checking - -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=y -CONFIG_RAID6_PQ_BENCHMARK=y -CONFIG_LINEAR_RANGES=y -# CONFIG_PACKING is not set -CONFIG_BITREVERSE=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -# CONFIG_CORDIC is not set -# CONFIG_PRIME_NUMBERS is not set -CONFIG_RATIONAL=y -CONFIG_GENERIC_PCI_IOMAP=y - -# -# Crypto library routines -# -CONFIG_CRYPTO_LIB_UTILS=y -CONFIG_CRYPTO_LIB_AES=y -CONFIG_CRYPTO_LIB_ARC4=m -CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y -CONFIG_CRYPTO_LIB_CHACHA_GENERIC=y -CONFIG_CRYPTO_LIB_CHACHA=m -CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=y -CONFIG_CRYPTO_LIB_CURVE25519=m -CONFIG_CRYPTO_LIB_DES=y -CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 -CONFIG_CRYPTO_LIB_POLY1305_GENERIC=y -CONFIG_CRYPTO_LIB_POLY1305=m -CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m -CONFIG_CRYPTO_LIB_SHA1=y -CONFIG_CRYPTO_LIB_SHA256=y -# end of Crypto library routines - -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC64_ROCKSOFT=y -CONFIG_CRC_ITU_T=y -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC64=y -# CONFIG_CRC4 is not set -# CONFIG_CRC7 is not set -CONFIG_LIBCRC32C=y -# CONFIG_CRC8 is not set -CONFIG_XXHASH=y -CONFIG_AUDIT_GENERIC=y -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_842_COMPRESS=m -CONFIG_842_DECOMPRESS=m -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_COMPRESS=m -CONFIG_LZ4HC_COMPRESS=m -CONFIG_LZ4_DECOMPRESS=y -CONFIG_ZSTD_COMMON=y -CONFIG_ZSTD_COMPRESS=y -CONFIG_ZSTD_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -# CONFIG_XZ_DEC_MICROLZMA is not set -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_DECOMPRESS_LZ4=y -CONFIG_DECOMPRESS_ZSTD=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_REED_SOLOMON=y -CONFIG_REED_SOLOMON_DEC8=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_INTERVAL_TREE=y -CONFIG_XARRAY_MULTI=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_DMA_DECLARE_COHERENT=y -CONFIG_ARCH_HAS_SETUP_DMA_OPS=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y -CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y -CONFIG_SWIOTLB=y -# CONFIG_DMA_RESTRICTED_POOL is not set -CONFIG_DMA_NONCOHERENT_MMAP=y -CONFIG_DMA_COHERENT_POOL=y -CONFIG_DMA_DIRECT_REMAP=y -CONFIG_DMA_CMA=y -# CONFIG_DMA_PERNUMA_CMA is not set - -# -# Default contiguous memory area size: -# -CONFIG_CMA_SIZE_MBYTES=16 -CONFIG_CMA_SIZE_SEL_MBYTES=y -# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set -# CONFIG_CMA_SIZE_SEL_MIN is not set -# CONFIG_CMA_SIZE_SEL_MAX is not set -CONFIG_CMA_ALIGNMENT=8 -# CONFIG_DMA_API_DEBUG is not set -# CONFIG_DMA_MAP_BENCHMARK is not set -CONFIG_SGL_ALLOC=y -# CONFIG_FORCE_NR_CPUS is not set -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_CLZ_TAB=y -# CONFIG_IRQ_POLL is not set -CONFIG_MPILIB=y -CONFIG_LIBFDT=y -CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_HAVE_GENERIC_VDSO=y -CONFIG_GENERIC_GETTIMEOFDAY=y -CONFIG_GENERIC_VDSO_TIME_NS=y -CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_SG_POOL=y -CONFIG_ARCH_STACKWALK=y -CONFIG_STACKDEPOT=y -CONFIG_SBITMAP=y -# CONFIG_PARMAN is not set -# CONFIG_OBJAGG is not set -# end of Library routines - -CONFIG_GENERIC_IOREMAP=y -CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y - -# -# Kernel hacking -# - -# -# printk and dmesg options -# -CONFIG_PRINTK_TIME=y -# CONFIG_PRINTK_CALLER is not set -# CONFIG_STACKTRACE_BUILD_ID is not set -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=8 -CONFIG_CONSOLE_LOGLEVEL_QUIET=8 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -# CONFIG_BOOT_PRINTK_DELAY is not set -# CONFIG_DYNAMIC_DEBUG is not set -# CONFIG_DYNAMIC_DEBUG_CORE is not set -CONFIG_SYMBOLIC_ERRNAME=y -# end of printk and dmesg options - -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MISC=y - -# -# Compile-time checks and compiler options -# -CONFIG_AS_HAS_NON_CONST_LEB128=y -CONFIG_DEBUG_INFO_NONE=y -# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_DEBUG_INFO_DWARF5 is not set -CONFIG_FRAME_WARN=2048 -# CONFIG_STRIP_ASM_SYMS is not set -# CONFIG_READABLE_ASM is not set -# CONFIG_HEADERS_INSTALL is not set -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_ARCH_WANT_FRAME_POINTERS=y -CONFIG_FRAME_POINTER=y -# CONFIG_VMLINUX_MAP is not set -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# end of Compile-time checks and compiler options - -# -# Generic Kernel Debugging Instruments -# -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" -CONFIG_DEBUG_FS=y -CONFIG_DEBUG_FS_ALLOW_ALL=y -# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set -# CONFIG_DEBUG_FS_ALLOW_NONE is not set -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_HAVE_ARCH_KGDB_QXFER_PKT=y -# CONFIG_KGDB is not set -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_UBSAN is not set -CONFIG_HAVE_KCSAN_COMPILER=y -# end of Generic Kernel Debugging Instruments - -# -# Networking Debugging -# -# CONFIG_NET_DEV_REFCNT_TRACKER is not set -# CONFIG_NET_NS_REFCNT_TRACKER is not set -# CONFIG_DEBUG_NET is not set -# end of Networking Debugging - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -# CONFIG_DEBUG_PAGEALLOC is not set -CONFIG_SLUB_DEBUG=y -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_PAGE_OWNER is not set -# CONFIG_PAGE_TABLE_CHECK is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set -# CONFIG_DEBUG_RODATA_TEST is not set -CONFIG_ARCH_HAS_DEBUG_WX=y -# CONFIG_DEBUG_WX is not set -CONFIG_GENERIC_PTDUMP=y -# CONFIG_PTDUMP_DEBUGFS is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SHRINKER_DEBUG is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_SCHED_STACK_END_CHECK is not set -CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_VM_PGTABLE is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -# CONFIG_DEBUG_MEMORY_INIT is not set -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_CC_HAS_KASAN_GENERIC=y -CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y -CONFIG_HAVE_ARCH_KFENCE=y -# CONFIG_KFENCE is not set -# end of Memory Debugging - -# CONFIG_DEBUG_SHIRQ is not set - -# -# Debug Oops, Lockups and Hangs -# -# CONFIG_PANIC_ON_OOPS is not set -CONFIG_PANIC_ON_OOPS_VALUE=0 -CONFIG_PANIC_TIMEOUT=0 -# CONFIG_SOFTLOCKUP_DETECTOR is not set -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=60 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -# CONFIG_WQ_WATCHDOG is not set -# CONFIG_TEST_LOCKUP is not set -# end of Debug Oops, Lockups and Hangs - -# -# Scheduler Debugging -# -CONFIG_SCHED_DEBUG=y -# CONFIG_SCHEDSTATS is not set -# end of Scheduler Debugging - -# CONFIG_DEBUG_TIMEKEEPING is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_RWSEMS is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -# CONFIG_SCF_TORTURE_TEST is not set -# CONFIG_CSD_LOCK_WAIT_DEBUG is not set -# end of Lock Debugging (spinlocks, mutexes, etc...) - -# CONFIG_DEBUG_IRQFLAGS is not set -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set - -# -# Debug kernel data structures -# -# CONFIG_DEBUG_LIST is not set -# CONFIG_DEBUG_PLIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_BUG_ON_DATA_CORRUPTION is not set -# CONFIG_DEBUG_MAPLE_TREE is not set -# end of Debug kernel data structures - -# CONFIG_DEBUG_CREDENTIALS is not set - -# -# RCU Debugging -# -# CONFIG_RCU_SCALE_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -# CONFIG_RCU_REF_SCALE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=21 -CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# end of RCU Debugging - -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_LATENCYTOP is not set -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_BOOTTIME_TRACING is not set -# CONFIG_FUNCTION_TRACER is not set -# CONFIG_STACK_TRACER is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_HWLAT_TRACER is not set -# CONFIG_OSNOISE_TRACER is not set -# CONFIG_TIMERLAT_TRACER is not set -# CONFIG_ENABLE_DEFAULT_TRACERS is not set -# CONFIG_FTRACE_SYSCALLS is not set -# CONFIG_TRACER_SNAPSHOT is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -CONFIG_UPROBE_EVENTS=y -CONFIG_BPF_EVENTS=y -CONFIG_DYNAMIC_EVENTS=y -CONFIG_PROBE_EVENTS=y -# CONFIG_SYNTH_EVENTS is not set -# CONFIG_USER_EVENTS is not set -# CONFIG_TRACE_EVENT_INJECT is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_TRACE_EVAL_MAP_FILE is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set -# CONFIG_PREEMPTIRQ_DELAY_TEST is not set -# CONFIG_RV is not set -# CONFIG_SAMPLES is not set -# CONFIG_STRICT_DEVMEM is not set - -# -# riscv Debugging -# -# end of riscv Debugging - -# -# Kernel Testing and Coverage -# -# CONFIG_KUNIT is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -# CONFIG_FAULT_INJECTION is not set -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set -# CONFIG_RUNTIME_TESTING_MENU is not set -CONFIG_ARCH_USE_MEMTEST=y -# CONFIG_MEMTEST is not set -# end of Kernel Testing and Coverage - -# -# Rust hacking -# -# end of Rust hacking - -# CONFIG_WARN_MISSING_DOCUMENTS is not set -# CONFIG_WARN_ABI_ERRORS is not set -# end of Kernel hacking diff --git a/config/sources/families/spacemit.conf b/config/sources/families/spacemit.conf index 0d67fd252f28..62ebd3ba4269 100644 --- a/config/sources/families/spacemit.conf +++ b/config/sources/families/spacemit.conf @@ -14,44 +14,27 @@ declare -g GOVERNOR="performance" # Arm Trusted Firmware declare -g ATF_USE_GCC="> 8.0" declare -g ATF_COMPILER="riscv64-linux-gnu-" -declare -g ATFSOURCE="https://github.com/riscv-software-src/opensbi.git" +declare -g ATFSOURCE="https://gitee.com/bianbu-linux/opensbi.git" declare -g ATFDIR="opensbi" -declare -g ATFBRANCH="tag:v1.3" +declare -g ATFBRANCH="tag:v2.0.4" declare -g ATF_TARGET_MAP="PLATFORM_DEFCONFIG=k1_defconfig PLATFORM=generic ;;build/platform/generic/firmware/fw_dynamic.itb" -# declare -g ATFPATCHDIR="atf-spacemit" # U-Boot declare -g BOOTSOURCE="https://gitee.com/bianbu-linux/uboot-2022.10.git" -declare -g BOOTBRANCH="${BOOTBRANCH_BOARD:-"tag:v1.0.15"}" +declare -g BOOTBRANCH="${BOOTBRANCH_BOARD:-"tag:v2.0.4"}" declare -g BOOTDIR='u-boot-spacemit' declare -g BOOTPATCHDIR="${BOOTPATCHDIR:-"legacy/u-boot-spacemit-k1"}" declare -g UBOOT_TARGET_MAP=";;bootinfo_emmc.bin FSBL.bin u-boot.itb fw_dynamic.itb" declare -g BOOTCONFIG="k1_defconfig" # Boot with generic SpacemiT K1 config -# Skip all wifi drivers -declare -g KERNEL_DRIVERS_SKIP+=(driver_generic_bring_back_ipx driver_mt7921u_add_pids driver_rtl8152_rtl8153 driver_rtl8189ES - driver_rtl8189FS driver_rtl8192EU driver_rtl8811_rtl8812_rtl8814_rtl8821 driver_xradio_xr819 driver_rtl8811CU_rtl8821C - driver_rtl8188EU_rtl8188ETV driver_rtl88x2bu driver_rtw88 driver_rtl88x2cs driver_rtl8822cs_bt driver_rtl8723DS driver_rtl8723DU - driver_rtl8822BS driver_uwe5622 driver_rtl8723cs) - +# Linux case "${BRANCH}" in - - legacy) - # Kernel - declare -g KERNELSOURCE='https://gitee.com/bianbu-linux/linux-6.1.git' - declare -g KERNELBRANCH='tag:v1.0.15' - declare -g KERNEL_MAJOR_MINOR="6.1" - declare -g LINUXCONFIG="linux-${LINUXFAMILY}-${BRANCH}" - declare -g KERNELPATCHDIR="${LINUXFAMILY}-legacy-${KERNEL_MAJOR_MINOR}" # Needed as long as both legacy and current are 6.1 kernels - declare -g EXTRAWIFI="no" # WiFi drivers are already included in th legacy kernel - ;; - current) - # Kernel - declare -g KERNEL_MAJOR_MINOR="6.1" - declare -g KERNELBRANCH='tag:v6.1.102' # last known good tag on current patchset - declare -g LINUXCONFIG="linux-${LINUXFAMILY}-${KERNEL_MAJOR_MINOR}" - # No need to set KERNELPATCHDIR, since default is: KERNELPATCHDIR='archive/${LINUXFAMILY}-${KERNEL_MAJOR_MINOR}' + declare -g KERNELSOURCE='https://github.com/jmontleon/linux-bianbu.git' + declare -g KERNELBRANCH='commit:46927cc73d6e7261fb89dadc2295f1054c9a1fa9' + declare -g EXTRAWIFI="no" # WiFi drivers are already included in the kernel + declare -g KERNEL_MAJOR_MINOR="6.6" + declare -g LINUXCONFIG="linux-${LINUXFAMILY}-current" ;; esac @@ -73,34 +56,21 @@ pre_prepare_partitions() { write_uboot_platform() { local device=${2} - - declare -A d - d=( - [${1}/bootinfo_emmc.bin]="0:$(du -b ${1}/bootinfo_emmc.bin | awk '{print $1}')" - [${1}/FSBL.bin]="512:$(du -b ${1}/FSBL.bin | awk '{print $1}')" - ) - if [ -b ${2}boot0 ]; then - device=${2}boot0 - echo 0 > /sys/block/$(basename ${device})/force_ro - sync + echo "eMMC" + DEVICE=`ls /dev/mmcblk*boot0 | sed 's/^.....//'` + echo 0 > /sys/block/${DEVICE}/force_ro + sleep .50 + dd if="$1/bootinfo_emmc.bin" of="/dev/${DEVICE}" bs=512 conv=notrunc + dd if="$1/FSBL.bin" of="/dev/${DEVICE}" bs=512 seek=1 conv=notrunc + dd if="$1/FSBL.bin" of="/dev/${DEVICE}" bs=512 seek=512 conv=notrunc + else + echo "SD card" + dd if="$1/bootinfo_emmc.bin" of=$2 bs=512 conv=notrunc + dd if="$1/FSBL.bin" of="$2" bs=512 seek=1 conv=notrunc + dd if="$1/FSBL.bin" of="$2" bs=512 seek=512 conv=notrunc fi - - for f in "${!d[@]}" - do - if $(dd if=${device} bs=1 skip="${d[$f]%:*}" count="${d[$f]#*:}" \ - conv=notrunc status=noxfer 2>/dev/null | cmp --quiet "${f}") - then - echo "Skip $(basename $f), it is equal to the existing one" - else - echo "# Write =: $(basename $f) to ${device}" - dd if=$f of=${device} bs=1 seek="${d[$f]%:*}" conv=notrunc status=noxfer - sync - fi - done - - dd if=$1/fw_dynamic.itb of=${2} bs=512 seek=1280 conv=notrunc - sync - dd if=$1/u-boot.itb of=${2} bs=512 seek=2048 conv=notrunc + dd if="$1/fw_dynamic.itb" of="$2" bs=512 seek=1280 conv=notrunc + dd if="$1/u-boot.itb" of="$2" bs=512 seek=2048 conv=notrunc sync } diff --git a/patch/atf/atf-spacemit/001-Update-for-v1.0alpha2.patch b/patch/atf/atf-spacemit/001-Update-for-v1.0alpha2.patch deleted file mode 100644 index 8092244a7eb8..000000000000 --- a/patch/atf/atf-spacemit/001-Update-for-v1.0alpha2.patch +++ /dev/null @@ -1,6923 +0,0 @@ -From 59ac4b4bb09bfb031369a2b40156f4a1c94b93cd Mon Sep 17 00:00:00 2001 -From: James Deng -Date: Fri, 1 Mar 2024 19:54:35 +0800 -Subject: Update for v1.0alpha2 - ---- - Makefile | 17 +- - firmware/fw_base.ldS | 5 + - include/sbi/riscv_encoding.h | 2 + - include/sbi/sbi_ecall_interface.h | 4 + - include/sbi/sbi_hsm.h | 8 +- - include/sbi_utils/cache/cacheflush.h | 192 ++++ - include/sbi_utils/cci/cci.h | 27 + - include/sbi_utils/irqchip/fdt_irqchip_plic.h | 2 + - .../psci/drivers/arm/css/css_mhu_doorbell.h | 14 + - .../sbi_utils/psci/drivers/arm/css/css_scp.h | 10 + - include/sbi_utils/psci/drivers/arm/css/scmi.h | 141 +++ - .../psci/drivers/arm/css/scmi_private.h | 146 +++ - .../arm/board/spacemit/include/platform_def.h | 10 + - .../sbi_utils/psci/plat/arm/common/arm_def.h | 19 + - .../sbi_utils/psci/plat/arm/common/plat_arm.h | 21 + - .../psci/plat/arm/css/common/css_pm.h | 36 + - include/sbi_utils/psci/plat/common/platform.h | 13 + - include/sbi_utils/psci/psci.h | 223 +++++ - include/sbi_utils/psci/psci_lib.h | 8 + - lib/sbi/sbi_ecall_base.c | 10 + - lib/sbi/sbi_ecall_hsm.c | 5 + - lib/sbi/sbi_hart.c | 2 + - lib/sbi/sbi_hsm.c | 80 +- - lib/sbi/sbi_init.c | 10 +- - lib/sbi/sbi_pmu.c | 30 +- - lib/sbi/sbi_scratch.c | 8 +- - lib/utils/Kconfig | 2 + - .../arm_scmi/board/spacemit/spacemit_pm.c | 41 + - lib/utils/arm_scmi/common/arm_pm.c | 68 ++ - lib/utils/arm_scmi/css/common/css_pm.c | 298 ++++++ - lib/utils/arm_scmi/css/mhu/css_mhu_doorbell.c | 27 + - lib/utils/arm_scmi/css/mhu/mhu.h | 130 +++ - lib/utils/arm_scmi/css/scmi/scmi_common.c | 228 +++++ - .../arm_scmi/css/scmi/scmi_pwr_dmn_proto.c | 102 ++ - .../arm_scmi/css/scmi/scmi_sys_pwr_proto.c | 90 ++ - lib/utils/arm_scmi/css/scp/css_pm_scmi.c | 418 +++++++++ - lib/utils/arm_scmi/objects.mk | 24 + - lib/utils/cci/bus-cci.c | 168 ++++ - lib/utils/cci/objects.mk | 7 + - lib/utils/ipi/aclint_mswi.c | 4 +- - lib/utils/irqchip/fdt_irqchip_plic.c | 5 + - lib/utils/psci/Kconfig | 21 + - lib/utils/psci/objects.mk | 30 + - lib/utils/psci/psci_common.c | 872 ++++++++++++++++++ - lib/utils/psci/psci_main.c | 188 ++++ - lib/utils/psci/psci_off.c | 173 ++++ - lib/utils/psci/psci_on.c | 246 +++++ - lib/utils/psci/psci_private.h | 198 ++++ - lib/utils/psci/psci_setup.c | 242 +++++ - lib/utils/psci/psci_suspend.c | 298 ++++++ - .../spacemit/plat/k1x/underly_implement.c | 345 +++++++ - lib/utils/psci/spacemit/plat/plat_pm.c | 258 ++++++ - .../psci/spacemit/plat/underly_implement.h | 14 + - lib/utils/psci/spacemit/spacemit_topology.c | 26 + - lib/utils/serial/uart8250.c | 5 + - lib/utils/timer/aclint_mtimer.c | 4 +- - platform/generic/Kconfig | 36 + - platform/generic/configs/defconfig | 3 +- - .../generic/configs/k1-x_fpga_1x4_defconfig | 16 + - .../generic/configs/k1-x_fpga_2x2_defconfig | 16 + - platform/generic/configs/k1-x_fpga_defconfig | 16 + - platform/generic/configs/k1_defconfig | 16 + - .../include/spacemit/k1x/core_common.h | 13 + - .../generic/include/spacemit/k1x/k1x_evb.h | 72 ++ - .../generic/include/spacemit/k1x/k1x_fpga.h | 73 ++ - .../include/spacemit/spacemit_config.h | 30 + - platform/generic/objects.mk | 2 +- - platform/generic/spacemit/fw_dynamic.its | 31 + - platform/generic/spacemit/objects.mk | 7 + - platform/generic/spacemit/spacemit_k1.c | 194 ++++ - 71 files changed, 6067 insertions(+), 33 deletions(-) - create mode 100644 include/sbi_utils/cache/cacheflush.h - create mode 100644 include/sbi_utils/cci/cci.h - create mode 100644 include/sbi_utils/psci/drivers/arm/css/css_mhu_doorbell.h - create mode 100644 include/sbi_utils/psci/drivers/arm/css/css_scp.h - create mode 100644 include/sbi_utils/psci/drivers/arm/css/scmi.h - create mode 100644 include/sbi_utils/psci/drivers/arm/css/scmi_private.h - create mode 100644 include/sbi_utils/psci/plat/arm/board/spacemit/include/platform_def.h - create mode 100644 include/sbi_utils/psci/plat/arm/common/arm_def.h - create mode 100644 include/sbi_utils/psci/plat/arm/common/plat_arm.h - create mode 100644 include/sbi_utils/psci/plat/arm/css/common/css_pm.h - create mode 100644 include/sbi_utils/psci/plat/common/platform.h - create mode 100644 include/sbi_utils/psci/psci.h - create mode 100644 include/sbi_utils/psci/psci_lib.h - create mode 100644 lib/utils/arm_scmi/board/spacemit/spacemit_pm.c - create mode 100644 lib/utils/arm_scmi/common/arm_pm.c - create mode 100644 lib/utils/arm_scmi/css/common/css_pm.c - create mode 100644 lib/utils/arm_scmi/css/mhu/css_mhu_doorbell.c - create mode 100644 lib/utils/arm_scmi/css/mhu/mhu.h - create mode 100644 lib/utils/arm_scmi/css/scmi/scmi_common.c - create mode 100644 lib/utils/arm_scmi/css/scmi/scmi_pwr_dmn_proto.c - create mode 100644 lib/utils/arm_scmi/css/scmi/scmi_sys_pwr_proto.c - create mode 100644 lib/utils/arm_scmi/css/scp/css_pm_scmi.c - create mode 100644 lib/utils/arm_scmi/objects.mk - create mode 100644 lib/utils/cci/bus-cci.c - create mode 100644 lib/utils/cci/objects.mk - create mode 100644 lib/utils/psci/Kconfig - create mode 100644 lib/utils/psci/objects.mk - create mode 100644 lib/utils/psci/psci_common.c - create mode 100644 lib/utils/psci/psci_main.c - create mode 100644 lib/utils/psci/psci_off.c - create mode 100644 lib/utils/psci/psci_on.c - create mode 100644 lib/utils/psci/psci_private.h - create mode 100644 lib/utils/psci/psci_setup.c - create mode 100644 lib/utils/psci/psci_suspend.c - create mode 100644 lib/utils/psci/spacemit/plat/k1x/underly_implement.c - create mode 100644 lib/utils/psci/spacemit/plat/plat_pm.c - create mode 100644 lib/utils/psci/spacemit/plat/underly_implement.h - create mode 100644 lib/utils/psci/spacemit/spacemit_topology.c - create mode 100644 platform/generic/configs/k1-x_fpga_1x4_defconfig - create mode 100644 platform/generic/configs/k1-x_fpga_2x2_defconfig - create mode 100644 platform/generic/configs/k1-x_fpga_defconfig - create mode 100644 platform/generic/configs/k1_defconfig - create mode 100644 platform/generic/include/spacemit/k1x/core_common.h - create mode 100644 platform/generic/include/spacemit/k1x/k1x_evb.h - create mode 100644 platform/generic/include/spacemit/k1x/k1x_fpga.h - create mode 100644 platform/generic/include/spacemit/spacemit_config.h - create mode 100755 platform/generic/spacemit/fw_dynamic.its - create mode 100644 platform/generic/spacemit/objects.mk - create mode 100644 platform/generic/spacemit/spacemit_k1.c - -diff --git a/Makefile b/Makefile -index 730dbd910e51..468f8a30e89a 100644 ---- a/Makefile -+++ b/Makefile -@@ -114,6 +114,7 @@ endif - CPP = $(CC) -E - AS = $(CC) - DTC = dtc -+MKIMAGE = mkimage - - ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) - CC_IS_CLANG = y -@@ -245,6 +246,9 @@ ifdef PLATFORM - libsbiutils-objs-path-y=$(foreach obj,$(libsbiutils-objs-y),$(platform_build_dir)/lib/utils/$(obj)) - platform-objs-path-y=$(foreach obj,$(platform-objs-y),$(platform_build_dir)/$(obj)) - firmware-bins-path-y=$(foreach bin,$(firmware-bins-y),$(platform_build_dir)/firmware/$(bin)) -+firmware-itb-path-y=$(foreach its,$(firmware-its-y),$(platform_build_dir)/firmware/$(basename $(notdir $(its))).itb) -+platform_build_itb_dir=$(patsubst %/,%,$(dir $(firstword $(firmware-itb-path-y)))) -+platform_src_its_dir=$(patsubst %/,%,$(platform_src_dir)/$(dir $(firstword $(firmware-its-y)))) - endif - firmware-elfs-path-y=$(firmware-bins-path-y:.bin=.elf) - firmware-objs-path-y=$(firmware-bins-path-y:.bin=.o) -@@ -342,7 +346,7 @@ CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls -mstrict-align - ifeq ($(CC_SUPPORT_SAVE_RESTORE),y) - CFLAGS += -mno-save-restore - endif --CFLAGS += -mabi=$(PLATFORM_RISCV_ABI) -march=$(PLATFORM_RISCV_ISA) -+CFLAGS += -mabi=$(PLATFORM_RISCV_ABI) -march=$(PLATFORM_RISCV_ISA)_zicbom - CFLAGS += -mcmodel=$(PLATFORM_RISCV_CODE_MODEL) - CFLAGS += $(RELAX_FLAG) - CFLAGS += $(GENFLAGS) -@@ -360,7 +364,7 @@ ASFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls -mstrict-align - ifeq ($(CC_SUPPORT_SAVE_RESTORE),y) - ASFLAGS += -mno-save-restore - endif --ASFLAGS += -mabi=$(PLATFORM_RISCV_ABI) -march=$(PLATFORM_RISCV_ISA) -+ASFLAGS += -mabi=$(PLATFORM_RISCV_ABI) -march=$(PLATFORM_RISCV_ISA)_zicbom - ASFLAGS += -mcmodel=$(PLATFORM_RISCV_CODE_MODEL) - ASFLAGS += $(RELAX_FLAG) - ifneq ($(CC_IS_CLANG),y) -@@ -468,12 +472,16 @@ compile_carray = $(CMD_PREFIX)mkdir -p `dirname $(1)`; \ - compile_gen_dep = $(CMD_PREFIX)mkdir -p `dirname $(1)`; \ - echo " GEN-DEP $(subst $(build_dir)/,,$(1))"; \ - echo "$(1:.dep=$(2)): $(3)" >> $(1) -+compile_itb = \ -+ $(CMD_PREFIX)echo " ITB $(subst $(build_dir)/,,$(1))"; \ -+ $(MKIMAGE) -f $(2) -r $(1) - - targets-y = $(build_dir)/lib/libsbi.a - ifdef PLATFORM - targets-y += $(platform_build_dir)/lib/libplatsbi.a - endif - targets-y += $(firmware-bins-path-y) -+targets-y += $(firmware-itb-path-y) - - # The default "make all" rule - .PHONY: all -@@ -579,6 +587,11 @@ $(platform_build_dir)/%.dep: $(src_dir)/%.S $(KCONFIG_CONFIG) - $(platform_build_dir)/%.o: $(src_dir)/%.S - $(call compile_as,$@,$<) - -+# Rules for fit image sources -+$(platform_build_itb_dir)/%.itb: $(platform_src_its_dir)/%.its $(firmware-bins-path-y) -+ $(call copy_file,$(dir $@)/$(notdir $<),$<) -+ $(call compile_itb,$@,$(basename $@).its) -+ - # Rule for "make docs" - $(build_dir)/docs/latex/refman.pdf: $(build_dir)/docs/latex/refman.tex - $(CMD_PREFIX)mkdir -p $(build_dir)/docs -diff --git a/firmware/fw_base.ldS b/firmware/fw_base.ldS -index 3d68484ba5e3..e214f9d63eaa 100644 ---- a/firmware/fw_base.ldS -+++ b/firmware/fw_base.ldS -@@ -96,6 +96,11 @@ - PROVIDE(_bss_end = .); - } - -+ /DISCARD/ : { -+ *(.eh_frame*) -+ *(.debug*) -+ } -+ - /* End of the read-write data sections */ - - . = ALIGN(0x1000); /* Need this to create proper sections */ -diff --git a/include/sbi/riscv_encoding.h b/include/sbi/riscv_encoding.h -index 4ebed97ab0a0..54e09d44528b 100644 ---- a/include/sbi/riscv_encoding.h -+++ b/include/sbi/riscv_encoding.h -@@ -708,6 +708,8 @@ - #define CSR_MVIPH 0x319 - #define CSR_MIPH 0x354 - -+#define CSR_TCMCFG 0x5DB -+ - /* ===== Trap/Exception Causes ===== */ - - #define CAUSE_MISALIGNED_FETCH 0x0 -diff --git a/include/sbi/sbi_ecall_interface.h b/include/sbi/sbi_ecall_interface.h -index 1fe469e37078..f29c22a106c2 100644 ---- a/include/sbi/sbi_ecall_interface.h -+++ b/include/sbi/sbi_ecall_interface.h -@@ -42,6 +42,10 @@ - #define SBI_EXT_BASE_GET_MARCHID 0x5 - #define SBI_EXT_BASE_GET_MIMPID 0x6 - -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1PRO) || defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+#define SBI_EXT_BASE_FLUSH_CACHE_ALL 0x7 -+#endif -+ - /* SBI function IDs for TIME extension*/ - #define SBI_EXT_TIME_SET_TIMER 0x0 - -diff --git a/include/sbi/sbi_hsm.h b/include/sbi/sbi_hsm.h -index 4b5601ba40ca..066456cb4382 100644 ---- a/include/sbi/sbi_hsm.h -+++ b/include/sbi/sbi_hsm.h -@@ -74,10 +74,16 @@ bool sbi_hsm_hart_change_state(struct sbi_scratch *scratch, long oldstate, - long newstate); - int __sbi_hsm_hart_get_state(u32 hartid); - int sbi_hsm_hart_get_state(const struct sbi_domain *dom, u32 hartid); -+ -+#ifdef CONFIG_ARM_PSCI_SUPPORT -+int __sbi_hsm_hart_get_psci_state(u32 hartid); -+int sbi_hsm_hart_get_psci_state(const struct sbi_domain *dom, u32 hartid); -+#endif -+ - int sbi_hsm_hart_interruptible_mask(const struct sbi_domain *dom, - ulong hbase, ulong *out_hmask); - void __sbi_hsm_suspend_non_ret_save(struct sbi_scratch *scratch); - void __noreturn sbi_hsm_hart_start_finish(struct sbi_scratch *scratch, -- u32 hartid); -+ u32 hartid, bool cool_boot); - - #endif -diff --git a/include/sbi_utils/cache/cacheflush.h b/include/sbi_utils/cache/cacheflush.h -new file mode 100644 -index 000000000000..c3e353229f75 ---- /dev/null -+++ b/include/sbi_utils/cache/cacheflush.h -@@ -0,0 +1,192 @@ -+#ifndef __CACHE_FLUSH__H__ -+#define __CACHE_FLUSH__H__ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define __ALWAYS_STATIC_INLINE __attribute__((always_inline)) static inline -+ -+/** -+ \brief Clear Dcache by addr -+ \details Clear Dcache by addr. -+ \param [in] addr operate addr -+ */ -+__ALWAYS_STATIC_INLINE void __DCACHE_CPA(uintptr_t addr) -+{ -+ uintptr_t __v = addr; -+ asm volatile ("cbo.clean" " 0(%0)" : : "rK"(__v) : "memory"); -+} -+ -+/** -+ \brief Invalid Dcache by addr -+ \details Invalid Dcache by addr. -+ \param [in] addr operate addr -+ */ -+__ALWAYS_STATIC_INLINE void __DCACHE_IPA(uintptr_t addr) -+{ -+ uintptr_t __v = addr; -+ asm volatile ("cbo.inval" " 0(%0)" : : "rK"(__v) : "memory"); -+} -+ -+/** -+ \brief Clear & Invalid Dcache by addr -+ \details Clear & Invalid Dcache by addr. -+ \param [in] addr operate addr -+ */ -+__ALWAYS_STATIC_INLINE void __DCACHE_CIPA(uintptr_t addr) -+{ -+ uintptr_t __v = addr; -+ asm volatile ("cbo.flush" " 0(%0)" : : "rK"(__v) : "memory"); -+} -+ -+/** -+ \brief Get MSTATUS -+ \details Returns the content of the MSTATUS Register. -+ \return MSTATUS Register value -+ */ -+__ALWAYS_STATIC_INLINE uintptr_t __get_CurrentSP(void) -+{ -+ uintptr_t result; -+ -+ asm volatile("move %0, sp" : "=r"(result)); -+ -+ return (result); -+} -+ -+__ALWAYS_STATIC_INLINE uintptr_t __get_Supervisor_isr(void) -+{ -+ uintptr_t result; -+ -+ asm volatile("csrr %0, mip" : "=r"(result)); -+ -+ return (result & 0x222); -+} -+/** -+ \brief D-Cache Clean by address -+ \details Cleans D-Cache for the given address -+ \param[in] addr address (aligned to 32-byte boundary) -+ \param[in] dsize size of memory block (in number of bytes) -+*/ -+static inline void csi_dcache_clean_range (uintptr_t addr, unsigned int dsize) -+{ -+ int op_size = dsize + addr % CACHE_LINE_SIZE; -+ uintptr_t op_addr = addr & CACHE_INV_ADDR_Msk; -+ -+ asm volatile("fence rw, rw"); -+ -+ while (op_size > 0) { -+ __DCACHE_CPA(op_addr); -+ op_addr += CACHE_LINE_SIZE; -+ op_size -= CACHE_LINE_SIZE; -+ } -+ -+ asm volatile("fence rw, rw"); -+ asm volatile("fence.i"); -+} -+ -+/** -+ \brief D-Cache Clean and Invalidate by address -+ \details Cleans and invalidates D_Cache for the given address -+ \param[in] addr address (aligned to 32-byte boundary) -+ \param[in] dsize size of memory block (aligned to 16-byte boundary) -+*/ -+static inline void csi_dcache_clean_invalid_range (uintptr_t addr, unsigned int dsize) -+{ -+ int op_size = dsize + addr % CACHE_LINE_SIZE; -+ uintptr_t op_addr = addr & CACHE_INV_ADDR_Msk; -+ -+ asm volatile("fence rw, rw"); -+ -+ while (op_size > 0) { -+ __DCACHE_CIPA(op_addr); -+ op_addr += CACHE_LINE_SIZE; -+ op_size -= CACHE_LINE_SIZE; -+ } -+ -+ asm volatile("fence rw, rw"); -+ asm volatile("fence.i"); -+} -+ -+/** -+ \brief D-Cache Invalidate by address -+ \details Invalidates D-Cache for the given address -+ \param[in] addr address (aligned to 32-byte boundary) -+ \param[in] dsize size of memory block (in number of bytes) -+*/ -+static inline void csi_dcache_invalid_range (uintptr_t addr, unsigned int dsize) -+{ -+ int op_size = dsize + addr % CACHE_LINE_SIZE; -+ uintptr_t op_addr = addr & CACHE_INV_ADDR_Msk; -+ -+ asm volatile("fence rw, rw"); -+ -+ while (op_size > 0) { -+ __DCACHE_IPA(op_addr); -+ op_addr += CACHE_LINE_SIZE; -+ op_size -= CACHE_LINE_SIZE; -+ } -+ -+ asm volatile("fence rw, rw"); -+ asm volatile("fence.i"); -+} -+ -+static inline void csi_enable_dcache(void) -+{ -+ csr_set(CSR_MSETUP, 0x10073); -+} -+ -+static inline void csi_disable_data_preftch(void) -+{ -+ csr_clear(CSR_MSETUP, 32); -+} -+ -+static inline void csi_disable_dcache(void) -+{ -+ csr_clear(CSR_MSETUP, 1); -+} -+ -+static inline void csi_flush_dcache_all(void) -+{ -+ asm volatile ("csrwi 0x7c2, 0x3"); -+} -+ -+static inline void csi_invalidate_dcache_all(void) -+{ -+ asm volatile ("csrwi 0x7c2, 0x2"); -+} -+ -+static inline void __mdelay(void) -+{ -+ unsigned long long i; -+ -+ for (i = 0; i < 0xffffffff; ++i) -+ cpu_relax(); -+} -+ -+static inline void csi_flush_l2_cache(void) -+{ -+ unsigned int hartid = current_hartid(); -+ -+ uintptr_t *cr =(MPIDR_AFFLVL1_VAL(hartid) == 0) ? (uintptr_t *)CLUSTER0_L2_CACHE_FLUSH_REG_BASE : -+ (uintptr_t *)CLUSTER1_L2_CACHE_FLUSH_REG_BASE; -+ -+ /* flush l2 cache */ -+ writel(readl(cr) | (1 << L2_CACHE_FLUSH_REQUEST_BIT_OFFSET), cr); -+ /* k1pro */ -+ if (L2_CACHE_FLUSH_REQUEST_BIT_OFFSET == L2_CACHE_FLUSH_DONE_BIT_OFFSET) -+ while (readl(cr) & (1 << L2_CACHE_FLUSH_DONE_BIT_OFFSET)); -+ else /* k1x */ { -+ /* clear the request */ -+ while (1) { -+ if ((readl(cr) & (1 << L2_CACHE_FLUSH_DONE_BIT_OFFSET)) == 0) -+ break; -+ __mdelay(); -+ } -+ writel(readl(cr) & ~(1 << L2_CACHE_FLUSH_REQUEST_BIT_OFFSET), cr); -+ } -+} -+#endif -diff --git a/include/sbi_utils/cci/cci.h b/include/sbi_utils/cci/cci.h -new file mode 100644 -index 000000000000..c5b8b576d6e6 ---- /dev/null -+++ b/include/sbi_utils/cci/cci.h -@@ -0,0 +1,27 @@ -+/* -+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+ -+#ifndef __CCI_H__ -+#define __CCI_H__ -+ -+/* Function declarations */ -+ -+/* -+ * The ARM CCI driver needs the following: -+ * 1. Base address of the CCI product -+ * 2. An array of map between AMBA 4 master ids and ACE/ACE lite slave -+ * interfaces. -+ * 3. Size of the array. -+ * -+ * SLAVE_IF_UNUSED should be used in the map to represent no AMBA 4 master exists -+ * for that interface. -+ */ -+void cci_init(uintptr_t base, const int *map, unsigned int num_cci_masters); -+ -+void cci_enable_snoop_dvm_reqs(unsigned int master_id); -+void cci_disable_snoop_dvm_reqs(unsigned int master_id); -+ -+#endif /* CCI_H */ -diff --git a/include/sbi_utils/irqchip/fdt_irqchip_plic.h b/include/sbi_utils/irqchip/fdt_irqchip_plic.h -index df645dd00ee3..b892b0bc70f8 100644 ---- a/include/sbi_utils/irqchip/fdt_irqchip_plic.h -+++ b/include/sbi_utils/irqchip/fdt_irqchip_plic.h -@@ -28,6 +28,8 @@ void fdt_plic_context_save(bool smode, u32 *enable, u32 *threshold, u32 num); - void fdt_plic_context_restore(bool smode, const u32 *enable, u32 threshold, - u32 num); - -+void fdt_plic_context_exit(void); -+ - void thead_plic_restore(void); - - #endif -diff --git a/include/sbi_utils/psci/drivers/arm/css/css_mhu_doorbell.h b/include/sbi_utils/psci/drivers/arm/css/css_mhu_doorbell.h -new file mode 100644 -index 000000000000..e49c7e2dca64 ---- /dev/null -+++ b/include/sbi_utils/psci/drivers/arm/css/css_mhu_doorbell.h -@@ -0,0 +1,14 @@ -+/* -+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+ -+#ifndef CSS_MHU_DOORBELL_H -+#define CSS_MHU_DOORBELL_H -+ -+#include -+ -+void mhu_ring_doorbell(struct scmi_channel_plat_info *plat_info); -+ -+#endif /* CSS_MHU_DOORBELL_H */ -diff --git a/include/sbi_utils/psci/drivers/arm/css/css_scp.h b/include/sbi_utils/psci/drivers/arm/css/css_scp.h -new file mode 100644 -index 000000000000..f75eae64b856 ---- /dev/null -+++ b/include/sbi_utils/psci/drivers/arm/css/css_scp.h -@@ -0,0 +1,10 @@ -+#ifndef __CSS_SCP_H__ -+#define __CSS_SCP_H__ -+ -+#include -+ -+void css_scp_off(const struct psci_power_state *target_state); -+void css_scp_on(u_register_t mpidr); -+void css_scp_suspend(const struct psci_power_state *target_state); -+ -+#endif -diff --git a/include/sbi_utils/psci/drivers/arm/css/scmi.h b/include/sbi_utils/psci/drivers/arm/css/scmi.h -new file mode 100644 -index 000000000000..1e8c370d4510 ---- /dev/null -+++ b/include/sbi_utils/psci/drivers/arm/css/scmi.h -@@ -0,0 +1,141 @@ -+#ifndef __DRIVER_SCMI_H__ -+#define __DRIVER_SCMI_H__ -+ -+#include -+#include -+#include -+ -+#define GET_SCMI_MAJOR_VER(ver) (((ver) >> 16) & 0xffff) -+#define GET_SCMI_MINOR_VER(ver) ((ver) & 0xffff) -+ -+#define MAKE_SCMI_VERSION(maj, min) \ -+ ((((maj) & 0xffff) << 16) | ((min) & 0xffff)) -+ -+/* Supported SCMI Protocol Versions */ -+#define SCMI_AP_CORE_PROTO_VER MAKE_SCMI_VERSION(1, 0) -+#define SCMI_PWR_DMN_PROTO_VER MAKE_SCMI_VERSION(2, 0) -+#define SCMI_SYS_PWR_PROTO_VER MAKE_SCMI_VERSION(1, 0) -+ -+/* -+ * Check that the driver's version is same or higher than the reported SCMI -+ * version. We accept lower major version numbers, as all affected protocols -+ * so far stay backwards compatible. This might need to be revisited in the -+ * future. -+ */ -+#define is_scmi_version_compatible(drv, scmi) \ -+ ((GET_SCMI_MAJOR_VER(drv) > GET_SCMI_MAJOR_VER(scmi)) || \ -+ ((GET_SCMI_MAJOR_VER(drv) == GET_SCMI_MAJOR_VER(scmi)) && \ -+ (GET_SCMI_MINOR_VER(drv) <= GET_SCMI_MINOR_VER(scmi)))) -+ -+/* Mandatory messages IDs for all SCMI protocols */ -+#define SCMI_PROTO_VERSION_MSG 0x0 -+#define SCMI_PROTO_ATTR_MSG 0x1 -+#define SCMI_PROTO_MSG_ATTR_MSG 0x2 -+ -+/* SCMI power domain management protocol message IDs */ -+#define SCMI_PWR_STATE_SET_MSG 0x4 -+#define SCMI_PWR_STATE_GET_MSG 0x5 -+ -+/* SCMI system power management protocol message IDs */ -+#define SCMI_SYS_PWR_STATE_SET_MSG 0x3 -+#define SCMI_SYS_PWR_STATE_GET_MSG 0x4 -+ -+/* SCMI Protocol identifiers */ -+#define SCMI_PWR_DMN_PROTO_ID 0x11 -+#define SCMI_SYS_PWR_PROTO_ID 0x12 -+ -+/* -+ * Macros to describe the bit-fields of the `attribute` of system power domain -+ * protocol PROTOCOL_MSG_ATTRIBUTE message. -+ */ -+#define SYS_PWR_ATTR_WARM_RESET_SHIFT 31 -+#define SCMI_SYS_PWR_WARM_RESET_SUPPORTED (1U << SYS_PWR_ATTR_WARM_RESET_SHIFT) -+ -+#define SYS_PWR_ATTR_SUSPEND_SHIFT 30 -+#define SCMI_SYS_PWR_SUSPEND_SUPPORTED (1 << SYS_PWR_ATTR_SUSPEND_SHIFT) -+ -+/* -+ * Macros to describe the bit-fields of the `flags` parameter of system power -+ * domain protocol SYSTEM_POWER_STATE_SET message. -+ */ -+#define SYS_PWR_SET_GRACEFUL_REQ_SHIFT 0 -+#define SCMI_SYS_PWR_GRACEFUL_REQ (1 << SYS_PWR_SET_GRACEFUL_REQ_SHIFT) -+#define SCMI_SYS_PWR_FORCEFUL_REQ (0 << SYS_PWR_SET_GRACEFUL_REQ_SHIFT) -+ -+/* -+ * Macros to describe the `system_state` parameter of system power -+ * domain protocol SYSTEM_POWER_STATE_SET message. -+ */ -+#define SCMI_SYS_PWR_SHUTDOWN 0x0 -+#define SCMI_SYS_PWR_COLD_RESET 0x1 -+#define SCMI_SYS_PWR_WARM_RESET 0x2 -+#define SCMI_SYS_PWR_POWER_UP 0x3 -+#define SCMI_SYS_PWR_SUSPEND 0x4 -+ -+/* SCMI Error code definitions */ -+#define SCMI_E_QUEUED 1 -+#define SCMI_E_SUCCESS 0 -+#define SCMI_E_NOT_SUPPORTED -1 -+#define SCMI_E_INVALID_PARAM -2 -+#define SCMI_E_DENIED -3 -+#define SCMI_E_NOT_FOUND -4 -+#define SCMI_E_OUT_OF_RANGE -5 -+#define SCMI_E_BUSY -6 -+ -+/* -+ * SCMI driver platform information. The details of the doorbell mechanism -+ * can be found in the SCMI specification. -+ */ -+typedef struct scmi_channel_plat_info { -+ /* SCMI mailbox memory */ -+ uintptr_t scmi_mbx_mem; -+ /* The door bell register address */ -+ uintptr_t db_reg_addr; -+ /* The bit mask that need to be preserved when ringing doorbell */ -+ uint32_t db_preserve_mask; -+ /* The bit mask that need to be set to ring doorbell */ -+ uint32_t db_modify_mask; -+ /* The handler for ringing doorbell */ -+ void (*ring_doorbell)(struct scmi_channel_plat_info *plat_info); -+ /* cookie is unused now. But added for future enhancements. */ -+ void *cookie; -+} scmi_channel_plat_info_t; -+ -+typedef spinlock_t scmi_lock_t; -+ -+/* -+ * Structure to represent an SCMI channel. -+ */ -+typedef struct scmi_channel { -+ scmi_channel_plat_info_t *info; -+ /* The lock for channel access */ -+ scmi_lock_t *lock; -+ /* Indicate whether the channel is initialized */ -+ int is_initialized; -+} scmi_channel_t; -+ -+/* External Common API */ -+void *scmi_init(scmi_channel_t *ch); -+/* API to override default PSCI callbacks for platforms that support SCMI. */ -+const plat_psci_ops_t *css_scmi_override_pm_ops(plat_psci_ops_t *ops); -+ -+/* -+ * Power domain protocol commands. Refer to the SCMI specification for more -+ * details on these commands. -+ */ -+int scmi_pwr_state_set(void *p, uint32_t domain_id, uint32_t scmi_pwr_state); -+int scmi_pwr_state_get(void *p, uint32_t domain_id, uint32_t *scmi_pwr_state); -+ -+int scmi_proto_version(void *p, uint32_t proto_id, uint32_t *version); -+int scmi_proto_msg_attr(void *p, uint32_t proto_id, uint32_t command_id, -+ uint32_t *attr); -+scmi_channel_plat_info_t *plat_css_get_scmi_info(unsigned int channel_id); -+ -+/* -+ * System power management protocol commands. Refer SCMI specification for more -+ * details on these commands. -+ */ -+int scmi_sys_pwr_state_set(void *p, uint32_t flags, uint32_t system_state); -+int scmi_sys_pwr_state_get(void *p, uint32_t *system_state); -+ -+#endif -diff --git a/include/sbi_utils/psci/drivers/arm/css/scmi_private.h b/include/sbi_utils/psci/drivers/arm/css/scmi_private.h -new file mode 100644 -index 000000000000..7b246e53cc38 ---- /dev/null -+++ b/include/sbi_utils/psci/drivers/arm/css/scmi_private.h -@@ -0,0 +1,146 @@ -+#ifndef __SCMI_PRIVATE_H__ -+#define __SCMI_PRIVATE_H__ -+ -+#include -+#include -+#include -+#include -+ -+/* -+ * SCMI power domain management protocol message and response lengths. It is -+ * calculated as sum of length in bytes of the message header (4) and payload -+ * area (the number of bytes of parameters or return values in the payload). -+ */ -+#define SCMI_PROTO_VERSION_MSG_LEN 4 -+#define SCMI_PROTO_VERSION_RESP_LEN 12 -+ -+#define SCMI_PROTO_MSG_ATTR_MSG_LEN 8 -+#define SCMI_PROTO_MSG_ATTR_RESP_LEN 12 -+ -+#define SCMI_PWR_STATE_GET_MSG_LEN 8 -+#define SCMI_PWR_STATE_GET_RESP_LEN 12 -+ -+/* SCMI power domain protocol `POWER_STATE_SET` message flags */ -+#define SCMI_PWR_STATE_SET_FLAG_SYNC 0 -+#define SCMI_PWR_STATE_SET_FLAG_ASYNC 1 -+ -+/* SCMI message header format bit field */ -+#define SCMI_MSG_ID_SHIFT 0 -+#define SCMI_MSG_ID_WIDTH 8 -+#define SCMI_MSG_ID_MASK ((1 << SCMI_MSG_ID_WIDTH) - 1) -+ -+#define SCMI_MSG_PROTO_ID_SHIFT 10 -+#define SCMI_MSG_PROTO_ID_WIDTH 8 -+#define SCMI_MSG_PROTO_ID_MASK ((1 << SCMI_MSG_PROTO_ID_WIDTH) - 1) -+ -+#define SCMI_MSG_TOKEN_SHIFT 18 -+#define SCMI_MSG_TOKEN_WIDTH 10 -+#define SCMI_MSG_TOKEN_MASK ((1 << SCMI_MSG_TOKEN_WIDTH) - 1) -+ -+#define SCMI_PWR_STATE_SET_MSG_LEN 16 -+#define SCMI_PWR_STATE_SET_RESP_LEN 8 -+ -+#define SCMI_SYS_PWR_STATE_SET_MSG_LEN 12 -+#define SCMI_SYS_PWR_STATE_SET_RESP_LEN 8 -+ -+#define SCMI_SYS_PWR_STATE_GET_MSG_LEN 4 -+#define SCMI_SYS_PWR_STATE_GET_RESP_LEN 12 -+ -+/* SCMI mailbox flags */ -+#define SCMI_FLAG_RESP_POLL 0 -+#define SCMI_FLAG_RESP_INT 1 -+ -+/* Helper macros to copy arguments to the mailbox payload */ -+#define SCMI_PAYLOAD_ARG1(payld_arr, arg1) \ -+ *((uint32_t *)&payld_arr[0]) = arg1 -+ -+#define SCMI_PAYLOAD_ARG2(payld_arr, arg1, arg2) do { \ -+ SCMI_PAYLOAD_ARG1(payld_arr, arg1); \ -+ *((uint32_t *)&payld_arr[1]) = arg2; \ -+ } while (0) -+ -+#define SCMI_PAYLOAD_ARG3(payld_arr, arg1, arg2, arg3) do { \ -+ SCMI_PAYLOAD_ARG2(payld_arr, arg1, arg2); \ -+ *((uint32_t *)&payld_arr[2]) = arg3; \ -+ } while (0) -+ -+/* Helper macros to read return values from the mailbox payload */ -+#define SCMI_PAYLOAD_RET_VAL1(payld_arr, val1) \ -+ (val1) = *((uint32_t *)&payld_arr[0]) -+ -+#define SCMI_PAYLOAD_RET_VAL2(payld_arr, val1, val2) do { \ -+ SCMI_PAYLOAD_RET_VAL1(payld_arr, val1); \ -+ (val2) = *((uint32_t *)&payld_arr[1]); \ -+ } while (0) -+ -+#define SCMI_PAYLOAD_RET_VAL3(payld_arr, val1, val2, val3) do { \ -+ SCMI_PAYLOAD_RET_VAL2(payld_arr, val1, val2); \ -+ (val3) = *((uint32_t *)&payld_arr[2]); \ -+ } while (0) -+ -+#define SCMI_PAYLOAD_RET_VAL4(payld_arr, val1, val2, val3, val4) do { \ -+ SCMI_PAYLOAD_RET_VAL3(payld_arr, val1, val2, val3); \ -+ (val4) = *((uint32_t *)&payld_arr[3]); \ -+ } while (0) -+ -+/* Helper macro to get the token from a SCMI message header */ -+#define SCMI_MSG_GET_TOKEN(_msg) \ -+ (((_msg) >> SCMI_MSG_TOKEN_SHIFT) & SCMI_MSG_TOKEN_MASK) -+ -+/* SCMI Channel Status bit fields */ -+#define SCMI_CH_STATUS_RES0_MASK 0xFFFFFFFE -+#define SCMI_CH_STATUS_FREE_SHIFT 0 -+#define SCMI_CH_STATUS_FREE_WIDTH 1 -+#define SCMI_CH_STATUS_FREE_MASK ((1 << SCMI_CH_STATUS_FREE_WIDTH) - 1) -+ -+/* Helper macros to check and write the channel status */ -+#define SCMI_IS_CHANNEL_FREE(status) \ -+ (!!(((status) >> SCMI_CH_STATUS_FREE_SHIFT) & SCMI_CH_STATUS_FREE_MASK)) -+ -+#define SCMI_MARK_CHANNEL_BUSY(status) do { \ -+ if (!SCMI_IS_CHANNEL_FREE(status)) \ -+ sbi_hart_hang(); \ -+ (status) &= ~(SCMI_CH_STATUS_FREE_MASK << \ -+ SCMI_CH_STATUS_FREE_SHIFT); \ -+ } while (0) -+ -+/* -+ * Helper macro to create an SCMI message header given protocol, message id -+ * and token. -+ */ -+#define SCMI_MSG_CREATE(_protocol, _msg_id, _token) \ -+ ((((_protocol) & SCMI_MSG_PROTO_ID_MASK) << SCMI_MSG_PROTO_ID_SHIFT) | \ -+ (((_msg_id) & SCMI_MSG_ID_MASK) << SCMI_MSG_ID_SHIFT) | \ -+ (((_token) & SCMI_MSG_TOKEN_MASK) << SCMI_MSG_TOKEN_SHIFT)) -+ -+#define MAILBOX_MEM_PAYLOAD_SIZE (0x80) -+#define MAILBOX_SECURE_PSCI_CHANNEL (0x1) -+ -+/* -+ * Private data structure for representing the mailbox memory layout. Refer -+ * the SCMI specification for more details. -+ */ -+typedef struct mailbox_mem { -+ uint32_t res_a; /* Reserved */ -+ volatile uint32_t status; -+ uint64_t res_b; /* Reserved */ -+ uint32_t flags; -+ volatile uint32_t len; -+ volatile uint32_t msg_header; -+ uint32_t payload[]; -+} mailbox_mem_t; -+ -+static inline void validate_scmi_channel(scmi_channel_t *ch) -+{ -+ if (!ch || !ch->is_initialized) -+ sbi_hart_hang(); -+ -+ if (!ch->info || !ch->info->scmi_mbx_mem) -+ sbi_hart_hang(); -+} -+ -+void scmi_send_sync_command(scmi_channel_t *ch); -+void scmi_get_channel(scmi_channel_t *ch); -+void scmi_put_channel(scmi_channel_t *ch); -+ -+#endif -diff --git a/include/sbi_utils/psci/plat/arm/board/spacemit/include/platform_def.h b/include/sbi_utils/psci/plat/arm/board/spacemit/include/platform_def.h -new file mode 100644 -index 000000000000..6287c8273c7e ---- /dev/null -+++ b/include/sbi_utils/psci/plat/arm/board/spacemit/include/platform_def.h -@@ -0,0 +1,10 @@ -+#ifndef __PLATFORM_DEFINE_H__ -+#define __PLATFORM_DEFINE_H__ -+ -+/* System power domain level */ -+#define CSS_SYSTEM_PWR_DMN_LVL ARM_PWR_LVL2 -+ -+/* Number of SCMI channels on the platform */ -+#define PLAT_ARM_SCMI_CHANNEL_COUNT 1U -+ -+#endif -diff --git a/include/sbi_utils/psci/plat/arm/common/arm_def.h b/include/sbi_utils/psci/plat/arm/common/arm_def.h -new file mode 100644 -index 000000000000..3cedcff21609 ---- /dev/null -+++ b/include/sbi_utils/psci/plat/arm/common/arm_def.h -@@ -0,0 +1,19 @@ -+#ifndef __ARM_DEF_H__ -+#define __ARM_DEF_H__ -+ -+#define MPIDR_AFFLVL0 0ULL -+#define MPIDR_AFFLVL1 1ULL -+#define MPIDR_AFFLVL2 2ULL -+#define MPIDR_AFFLVL3 3ULL -+ -+/* -+ * Macros mapping the MPIDR Affinity levels to ARM Platform Power levels. The -+ * power levels have a 1:1 mapping with the MPIDR affinity levels. -+ */ -+#define ARM_PWR_LVL0 MPIDR_AFFLVL0 -+#define ARM_PWR_LVL1 MPIDR_AFFLVL1 -+#define ARM_PWR_LVL2 MPIDR_AFFLVL2 -+#define ARM_PWR_LVL3 MPIDR_AFFLVL3 -+ -+ -+#endif -diff --git a/include/sbi_utils/psci/plat/arm/common/plat_arm.h b/include/sbi_utils/psci/plat/arm/common/plat_arm.h -new file mode 100644 -index 000000000000..fb7bf13f9ba4 ---- /dev/null -+++ b/include/sbi_utils/psci/plat/arm/common/plat_arm.h -@@ -0,0 +1,21 @@ -+#ifndef __PLAT_ARM_H__ -+#define __PLAT_ARM_H__ -+ -+#include -+#include -+#include -+ -+#define ARM_SCMI_INSTANTIATE_LOCK spinlock_t arm_scmi_lock -+ -+#define ARM_SCMI_LOCK_GET_INSTANCE (&arm_scmi_lock) -+ -+extern plat_psci_ops_t plat_arm_psci_pm_ops; -+ -+const plat_psci_ops_t *plat_arm_psci_override_pm_ops(plat_psci_ops_t *ops); -+ -+void plat_arm_pwrc_setup(void); -+ -+int arm_validate_power_state(unsigned int power_state, -+ psci_power_state_t *req_state); -+ -+#endif -diff --git a/include/sbi_utils/psci/plat/arm/css/common/css_pm.h b/include/sbi_utils/psci/plat/arm/css/common/css_pm.h -new file mode 100644 -index 000000000000..78d7f374d254 ---- /dev/null -+++ b/include/sbi_utils/psci/plat/arm/css/common/css_pm.h -@@ -0,0 +1,36 @@ -+#ifndef __CSS_ARM_H__ -+#define __CSS_ARM_H__ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#define SCMI_DOMAIN_ID_MASK 0xFFFFU -+#define SCMI_CHANNEL_ID_MASK 0xFFFFU -+#define SCMI_CHANNEL_ID_SHIFT 16U -+ -+#define SET_SCMI_CHANNEL_ID(n) (((n) & SCMI_CHANNEL_ID_MASK) << \ -+ SCMI_CHANNEL_ID_SHIFT) -+#define SET_SCMI_DOMAIN_ID(n) ((n) & SCMI_DOMAIN_ID_MASK) -+#define GET_SCMI_CHANNEL_ID(n) (((n) >> SCMI_CHANNEL_ID_SHIFT) & \ -+ SCMI_CHANNEL_ID_MASK) -+#define GET_SCMI_DOMAIN_ID(n) ((n) & SCMI_DOMAIN_ID_MASK) -+ -+/* Macros to read the CSS power domain state */ -+#define CSS_CORE_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL0] -+#define CSS_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL1] -+ -+static inline unsigned int css_system_pwr_state(const psci_power_state_t *state) -+{ -+#if (PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL) -+ return state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL]; -+#else -+ return 0; -+#endif -+} -+ -+extern uint32_t plat_css_core_pos_to_scmi_dmn_id_map[PLATFORM_CLUSTER_COUNT][PLATFORM_CORE_COUNT]; -+ -+#endif -diff --git a/include/sbi_utils/psci/plat/common/platform.h b/include/sbi_utils/psci/plat/common/platform.h -new file mode 100644 -index 000000000000..7c5361230b55 ---- /dev/null -+++ b/include/sbi_utils/psci/plat/common/platform.h -@@ -0,0 +1,13 @@ -+#ifndef __PSCI_PLAT_COMMON_H__ -+#define __PSCI_PLAT_COMMON_H__ -+ -+#include -+#include -+ -+unsigned char *plat_get_power_domain_tree_desc(void); -+ -+int plat_setup_psci_ops(uintptr_t sec_entrypoint, -+ const struct plat_psci_ops **psci_ops); -+int plat_core_pos_by_mpidr(u_register_t mpidr); -+ -+#endif -diff --git a/include/sbi_utils/psci/psci.h b/include/sbi_utils/psci/psci.h -new file mode 100644 -index 000000000000..c76fd25d546b ---- /dev/null -+++ b/include/sbi_utils/psci/psci.h -@@ -0,0 +1,223 @@ -+#ifndef __PSCI_H__ -+#define __PSCI_H__ -+ -+#include -+#include -+ -+#define MPIDR_AFFLVL0_VAL(mpidr) \ -+ (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFINITY0_MASK) -+#define MPIDR_AFFLVL1_VAL(mpidr) \ -+ (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFINITY1_MASK) -+/* -+ * Macros for local power states in ARM platforms encoded by State-ID field -+ * within the power-state parameter. -+ */ -+/* Local power state for power domains in Run state. */ -+#define ARM_LOCAL_STATE_RUN 0U -+/* Local power state for retention. Valid only for CPU power domains */ -+#define ARM_LOCAL_STATE_RET 1U -+/* Local power state for OFF/power-down. Valid for CPU and cluster power -+ domains */ -+#define ARM_LOCAL_STATE_OFF 2U -+ -+/* -+ * This macro defines the deepest retention state possible. A higher state -+ * id will represent an invalid or a power down state. -+ */ -+#define PLAT_MAX_RET_STATE ARM_LOCAL_STATE_RET -+ -+/* -+ * This macro defines the deepest power down states possible. Any state ID -+ * higher than this is invalid. -+ */ -+#define PLAT_MAX_OFF_STATE ARM_LOCAL_STATE_OFF -+ -+/* -+ * Type for representing the local power state at a particular level. -+ */ -+typedef unsigned char plat_local_state_t; -+ -+/* The local state macro used to represent RUN state. */ -+#define PSCI_LOCAL_STATE_RUN 0U -+ -+typedef unsigned long u_register_t; -+ -+/******************************************************************************* -+ * PSCI error codes -+ ******************************************************************************/ -+#define PSCI_E_SUCCESS 0 -+#define PSCI_E_NOT_SUPPORTED -1 -+#define PSCI_E_INVALID_PARAMS -2 -+#define PSCI_E_DENIED -3 -+#define PSCI_E_ALREADY_ON -4 -+#define PSCI_E_ON_PENDING -5 -+#define PSCI_E_INTERN_FAIL -6 -+#define PSCI_E_NOT_PRESENT -7 -+#define PSCI_E_DISABLED -8 -+#define PSCI_E_INVALID_ADDRESS -9 -+ -+#define PSCI_INVALID_MPIDR ~((u_register_t)0) -+ -+ -+/* -+ * These are the states reported by the PSCI_AFFINITY_INFO API for the specified -+ * CPU. The definitions of these states can be found in Section 5.7.1 in the -+ * PSCI specification (ARM DEN 0022C). -+ */ -+typedef enum { -+ AFF_STATE_ON = 0U, -+ AFF_STATE_OFF = 1U, -+ AFF_STATE_ON_PENDING = 2U -+} aff_info_state_t; -+ -+/******************************************************************************* -+ * Structure used to store per-cpu information relevant to the PSCI service. -+ * It is populated in the per-cpu data array. In return we get a guarantee that -+ * this information will not reside on a cache line shared with another cpu. -+ ******************************************************************************/ -+typedef struct psci_cpu_data { -+ /* State as seen by PSCI Affinity Info API */ -+ aff_info_state_t aff_info_state; -+ -+ /* -+ * Highest power level which takes part in a power management -+ * operation. -+ */ -+ unsigned int target_pwrlvl; -+ -+ /* The local power state of this CPU */ -+ plat_local_state_t local_state; -+} psci_cpu_data_t; -+ -+/* -+ * Macro to represent invalid affinity level within PSCI. -+ */ -+#define PSCI_INVALID_PWR_LVL (PLAT_MAX_PWR_LVL + 1U) -+ -+/* -+ * These are the power states reported by PSCI_NODE_HW_STATE API for the -+ * specified CPU. The definitions of these states can be found in Section 5.15.3 -+ * of PSCI specification (ARM DEN 0022C). -+ */ -+#define HW_ON 0 -+#define HW_OFF 1 -+#define HW_STANDBY 2 -+ -+#define PSTATE_ID_SHIFT (0U) -+#define PSTATE_VALID_MASK (0xFCFE0000U) -+#define PSTATE_TYPE_SHIFT (16U) -+#define PSTATE_PWR_LVL_SHIFT (24U) -+#define PSTATE_ID_MASK (0xffffU) -+#define PSTATE_PWR_LVL_MASK (0x3U) -+ -+#define psci_get_pstate_pwrlvl(pstate) (((pstate) >> PSTATE_PWR_LVL_SHIFT) & \ -+ PSTATE_PWR_LVL_MASK) -+#define psci_make_powerstate(state_id, type, pwrlvl) \ -+ (((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\ -+ (((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\ -+ (((pwrlvl) & PSTATE_PWR_LVL_MASK) << PSTATE_PWR_LVL_SHIFT) -+ -+#define PSTATE_TYPE_STANDBY (0x0U) -+#define PSTATE_TYPE_POWERDOWN (0x1U) -+#define PSTATE_TYPE_MASK (0x1U) -+ -+/* RISCV suspend power state */ -+#define RSTATE_TYPE_SHIFT (31U) -+#define RSTATE_PWR_LVL_SHIFT (24U) -+#define RSTATE_COMMON_SHIFT (28U) -+ -+/***************************************************************************** -+ * This data structure defines the representation of the power state parameter -+ * for its exchange between the generic PSCI code and the platform port. For -+ * example, it is used by the platform port to specify the requested power -+ * states during a power management operation. It is used by the generic code to -+ * inform the platform about the target power states that each level should -+ * enter. -+ ****************************************************************************/ -+typedef struct psci_power_state { -+ /* -+ * The pwr_domain_state[] stores the local power state at each level -+ * for the CPU. -+ */ -+ plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1U ]; -+} psci_power_state_t; -+ -+/* -+ * Function to test whether the plat_local_state is RUN state -+ */ -+static inline int is_local_state_run(unsigned int plat_local_state) -+{ -+ return (plat_local_state == PSCI_LOCAL_STATE_RUN) ? 1 : 0; -+} -+ -+/* -+ * Function to test whether the plat_local_state is OFF state -+ */ -+static inline int is_local_state_off(unsigned int plat_local_state) -+{ -+ return ((plat_local_state > PLAT_MAX_RET_STATE) && -+ (plat_local_state <= PLAT_MAX_OFF_STATE)) ? 1 : 0; -+} -+ -+/* Power state helper functions */ -+ -+static inline unsigned int psci_check_power_state(unsigned int power_state) -+{ -+ return ((power_state) & PSTATE_VALID_MASK); -+} -+ -+static inline unsigned int psci_get_pstate_id(unsigned int power_state) -+{ -+ return ((power_state) >> PSTATE_ID_SHIFT) & PSTATE_ID_MASK; -+} -+ -+static inline unsigned int psci_get_pstate_type(unsigned int power_state) -+{ -+ return ((power_state) >> PSTATE_TYPE_SHIFT) & PSTATE_TYPE_MASK; -+} -+ -+/******************************************************************************* -+ * Structure populated by platform specific code to export routines which -+ * perform common low level power management functions -+ ******************************************************************************/ -+typedef struct plat_psci_ops { -+ void (*cpu_standby)(plat_local_state_t cpu_state); -+ int (*pwr_domain_on)(u_register_t mpidr); -+ void (*pwr_domain_off)(const psci_power_state_t *target_state); -+ int (*pwr_domain_off_early)(const psci_power_state_t *target_state); -+ void (*pwr_domain_suspend_pwrdown_early)( -+ const psci_power_state_t *target_state); -+ void (*pwr_domain_suspend)(const psci_power_state_t *target_state); -+ void (*pwr_domain_on_finish)(const psci_power_state_t *target_state); -+ void (*pwr_domain_on_finish_late)( -+ const psci_power_state_t *target_state); -+ void (*pwr_domain_suspend_finish)( -+ const psci_power_state_t *target_state); -+ void (*pwr_domain_pwr_down_wfi)( -+ const psci_power_state_t *target_state); -+ void (*system_off)(void); -+ void (*system_reset)(void); -+ int (*validate_power_state)(unsigned int power_state, -+ psci_power_state_t *req_state); -+ int (*validate_ns_entrypoint)(uintptr_t ns_entrypoint); -+ void (*get_sys_suspend_power_state)( -+ psci_power_state_t *req_state); -+ int (*get_pwr_lvl_state_idx)(plat_local_state_t pwr_domain_state, -+ int pwrlvl); -+ int (*translate_power_state_by_mpidr)(u_register_t mpidr, -+ unsigned int power_state, -+ psci_power_state_t *output_state); -+ int (*get_node_hw_state)(u_register_t mpidr, unsigned int power_level); -+ int (*mem_protect_chk)(uintptr_t base, u_register_t length); -+ int (*read_mem_protect)(int *val); -+ int (*write_mem_protect)(int val); -+ int (*system_reset2)(int is_vendor, -+ int reset_type, u_register_t cookie); -+} plat_psci_ops_t; -+ -+int psci_cpu_on(u_register_t target_cpu, uintptr_t entrypoint); -+int psci_cpu_off(void); -+int psci_affinity_info(u_register_t target_affinity, unsigned int lowest_affinity_level); -+int psci_cpu_suspend(unsigned int power_state, uintptr_t entrypoint, u_register_t context_id); -+ -+#endif -diff --git a/include/sbi_utils/psci/psci_lib.h b/include/sbi_utils/psci/psci_lib.h -new file mode 100644 -index 000000000000..15576b788cd0 ---- /dev/null -+++ b/include/sbi_utils/psci/psci_lib.h -@@ -0,0 +1,8 @@ -+#ifndef __PSCI_LIB_H__ -+#define __PSCI_LIB_H__ -+ -+int psci_setup(void); -+void psci_print_power_domain_map(void); -+void psci_warmboot_entrypoint(void); -+ -+#endif -diff --git a/lib/sbi/sbi_ecall_base.c b/lib/sbi/sbi_ecall_base.c -index 74f05eb26a35..b02bcc10322f 100644 ---- a/lib/sbi/sbi_ecall_base.c -+++ b/lib/sbi/sbi_ecall_base.c -@@ -14,6 +14,9 @@ - #include - #include - #include -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1PRO) || defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+#include -+#endif - - static int sbi_ecall_base_probe(unsigned long extid, unsigned long *out_val) - { -@@ -62,6 +65,13 @@ static int sbi_ecall_base_handler(unsigned long extid, unsigned long funcid, - case SBI_EXT_BASE_GET_MIMPID: - *out_val = csr_read(CSR_MIMPID); - break; -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1PRO) || defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+ case SBI_EXT_BASE_FLUSH_CACHE_ALL: -+ csi_flush_dcache_all(); -+ /* there has no need to flush l2 cache here */ -+ /* csi_flush_l2_cache(); */ -+ break; -+#endif - case SBI_EXT_BASE_PROBE_EXT: - ret = sbi_ecall_base_probe(regs->a0, out_val); - break; -diff --git a/lib/sbi/sbi_ecall_hsm.c b/lib/sbi/sbi_ecall_hsm.c -index 20705c395131..ed8c940e9188 100644 ---- a/lib/sbi/sbi_ecall_hsm.c -+++ b/lib/sbi/sbi_ecall_hsm.c -@@ -35,8 +35,13 @@ static int sbi_ecall_hsm_handler(unsigned long extid, unsigned long funcid, - ret = sbi_hsm_hart_stop(scratch, true); - break; - case SBI_EXT_HSM_HART_GET_STATUS: -+#ifndef CONFIG_ARM_PSCI_SUPPORT - ret = sbi_hsm_hart_get_state(sbi_domain_thishart_ptr(), - regs->a0); -+#else -+ ret = sbi_hsm_hart_get_psci_state(sbi_domain_thishart_ptr(), -+ regs->a0); -+#endif - break; - case SBI_EXT_HSM_HART_SUSPEND: - ret = sbi_hsm_hart_suspend(scratch, regs->a0, regs->a1, -diff --git a/lib/sbi/sbi_hart.c b/lib/sbi/sbi_hart.c -index 6e52cbd76d89..3a3265df7f20 100644 ---- a/lib/sbi/sbi_hart.c -+++ b/lib/sbi/sbi_hart.c -@@ -818,6 +818,8 @@ sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1, - } - } - -+ csr_write(CSR_TCMCFG, 1); -+ - register unsigned long a0 asm("a0") = arg0; - register unsigned long a1 asm("a1") = arg1; - __asm__ __volatile__("mret" : : "r"(a0), "r"(a1)); -diff --git a/lib/sbi/sbi_hsm.c b/lib/sbi/sbi_hsm.c -index f870ca72bafc..acd3c9e04c87 100644 ---- a/lib/sbi/sbi_hsm.c -+++ b/lib/sbi/sbi_hsm.c -@@ -25,6 +25,8 @@ - #include - #include - #include -+#include -+#include - - #define __sbi_hsm_hart_change_state(hdata, oldstate, newstate) \ - ({ \ -@@ -76,6 +78,21 @@ int sbi_hsm_hart_get_state(const struct sbi_domain *dom, u32 hartid) - return __sbi_hsm_hart_get_state(hartid); - } - -+#ifdef CONFIG_ARM_PSCI_SUPPORT -+int __sbi_hsm_hart_get_psci_state(u32 hartid) -+{ -+ return psci_affinity_info(hartid, 0); -+} -+ -+int sbi_hsm_hart_get_psci_state(const struct sbi_domain *dom, u32 hartid) -+{ -+ if (!sbi_domain_is_assigned_hart(dom, hartid)) -+ return SBI_EINVAL; -+ -+ return __sbi_hsm_hart_get_psci_state(hartid); -+} -+#endif -+ - /* - * Try to acquire the ticket for the given target hart to make sure only - * one hart prepares the start of the target hart. -@@ -137,8 +154,13 @@ int sbi_hsm_hart_interruptible_mask(const struct sbi_domain *dom, - return 0; - } - -+extern unsigned char _data_start[]; -+extern unsigned char _data_end[]; -+extern unsigned char _bss_start[]; -+extern unsigned char _bss_end[]; -+ - void __noreturn sbi_hsm_hart_start_finish(struct sbi_scratch *scratch, -- u32 hartid) -+ u32 hartid, bool cool_boot) - { - unsigned long next_arg1; - unsigned long next_addr; -@@ -155,34 +177,54 @@ void __noreturn sbi_hsm_hart_start_finish(struct sbi_scratch *scratch, - next_mode = scratch->next_mode; - hsm_start_ticket_release(hdata); - -+ /** -+ * clean the cache : .data/bss section & local scratch & local sp -+ * let the second hart can view the data -+ * */ -+ if (cool_boot) { -+ csi_flush_dcache_all(); -+ csi_flush_l2_cache(); -+ } -+ - sbi_hart_switch_mode(hartid, next_arg1, next_addr, next_mode, false); - } - -+#ifdef CONFIG_ARM_PSCI_SUPPORT - static void sbi_hsm_hart_wait(struct sbi_scratch *scratch, u32 hartid) - { -- unsigned long saved_mie; - struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, - hart_data_offset); -- /* Save MIE CSR */ -- saved_mie = csr_read(CSR_MIE); -- -- /* Set MSIE and MEIE bits to receive IPI */ -- csr_set(CSR_MIE, MIP_MSIP | MIP_MEIP); -- -- /* Wait for state transition requested by sbi_hsm_hart_start() */ -- while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING) { -- wfi(); -- } -- -- /* Restore MIE CSR */ -- csr_write(CSR_MIE, saved_mie); - -- /* -- * No need to clear IPI here because the sbi_ipi_init() will -- * clear it for current HART via sbi_platform_ipi_init(). -- */ -+ while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING); -+} -+#else -+static void sbi_hsm_hart_wait(struct sbi_scratch *scratch, u32 hartid) -+{ -+ unsigned long saved_mie; -+ struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, -+ hart_data_offset); -+ /* Save MIE CSR */ -+ saved_mie = csr_read(CSR_MIE); -+ -+ /* Set MSIE and MEIE bits to receive IPI */ -+ csr_set(CSR_MIE, MIP_MSIP | MIP_MEIP); -+ -+ /* Wait for state transition requested by sbi_hsm_hart_start() */ -+ while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING) { -+ wfi(); -+ } -+ -+ /* Restore MIE CSR */ -+ csr_write(CSR_MIE, saved_mie); -+ -+ /* -+ * No need to clear IPI here because the sbi_ipi_init() will -+ * clear it for current HART via sbi_platform_ipi_init(). -+ */ - } - -+#endif -+ - const struct sbi_hsm_device *sbi_hsm_get_device(void) - { - return hsm_dev; -diff --git a/lib/sbi/sbi_init.c b/lib/sbi/sbi_init.c -index 423e6d83650f..d36d5a096401 100644 ---- a/lib/sbi/sbi_init.c -+++ b/lib/sbi/sbi_init.c -@@ -185,6 +185,7 @@ static void sbi_boot_print_hart(struct sbi_scratch *scratch, u32 hartid) - sbi_hart_delegation_dump(scratch, "Boot HART ", " "); - } - -+#ifndef CONFIG_ARM_PSCI_SUPPORT - static spinlock_t coldboot_lock = SPIN_LOCK_INITIALIZER; - static struct sbi_hartmask coldboot_wait_hmask = { 0 }; - -@@ -257,6 +258,7 @@ static void wake_coldboot_harts(struct sbi_scratch *scratch, u32 hartid) - /* Release coldboot lock */ - spin_unlock(&coldboot_lock); - } -+#endif - - static unsigned long entry_count_offset; - static unsigned long init_count_offset; -@@ -392,12 +394,14 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid) - - sbi_boot_print_hart(scratch, hartid); - -+#ifndef CONFIG_ARM_PSCI_SUPPORT - wake_coldboot_harts(scratch, hartid); -+#endif - - count = sbi_scratch_offset_ptr(scratch, init_count_offset); - (*count)++; - -- sbi_hsm_hart_start_finish(scratch, hartid); -+ sbi_hsm_hart_start_finish(scratch, hartid, true); - } - - static void __noreturn init_warm_startup(struct sbi_scratch *scratch, -@@ -456,7 +460,7 @@ static void __noreturn init_warm_startup(struct sbi_scratch *scratch, - count = sbi_scratch_offset_ptr(scratch, init_count_offset); - (*count)++; - -- sbi_hsm_hart_start_finish(scratch, hartid); -+ sbi_hsm_hart_start_finish(scratch, hartid, false); - } - - static void __noreturn init_warm_resume(struct sbi_scratch *scratch, -@@ -481,7 +485,9 @@ static void __noreturn init_warmboot(struct sbi_scratch *scratch, u32 hartid) - { - int hstate; - -+#ifndef CONFIG_ARM_PSCI_SUPPORT - wait_for_coldboot(scratch, hartid); -+#endif - - hstate = sbi_hsm_hart_get_state(sbi_domain_thishart_ptr(), hartid); - if (hstate < 0) -diff --git a/lib/sbi/sbi_pmu.c b/lib/sbi/sbi_pmu.c -index c73e6ef3ff2d..80367b28005c 100644 ---- a/lib/sbi/sbi_pmu.c -+++ b/lib/sbi/sbi_pmu.c -@@ -557,8 +557,25 @@ int sbi_pmu_ctr_stop(unsigned long cbase, unsigned long cmask, - return ret; - } - -+#ifdef CONFIG_PLATFORM_SPACEMIT_K1X -+static inline int spacemit_mhpmevent_inhibit_flags_are_invalid(uint64_t mhpmevent_val) -+{ -+ uint64_t event_hw_idx = mhpmevent_val & ~MHPMEVENT_SSCOF_MASK; -+ -+ /* Inhibit flags in mhpmevents of L2 cache events are invalid. */ -+ if (event_hw_idx >= 184 && event_hw_idx <= 189) -+ return 1; -+ -+ return 0; -+} -+#endif /* CONFIG_PLATFORM_SPACEMIT_K1X */ -+ - static void pmu_update_inhibit_flags(unsigned long flags, uint64_t *mhpmevent_val) - { -+#ifdef CONFIG_PLATFORM_SPACEMIT_K1X -+ if (spacemit_mhpmevent_inhibit_flags_are_invalid(*mhpmevent_val)) -+ return; -+#endif - if (flags & SBI_PMU_CFG_FLAG_SET_VUINH) - *mhpmevent_val |= MHPMEVENT_VUINH; - if (flags & SBI_PMU_CFG_FLAG_SET_VSINH) -@@ -587,9 +604,16 @@ static int pmu_update_hw_mhpmevent(struct sbi_pmu_hw_event *hw_evt, int ctr_idx, - * Always set the OVF bit(disable interrupts) and inhibit counting of - * events in M-mode. The OVF bit should be enabled during the start call. - */ -- if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF)) -- mhpmevent_val = (mhpmevent_val & ~MHPMEVENT_SSCOF_MASK) | -- MHPMEVENT_MINH | MHPMEVENT_OF; -+ if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF)) { -+#ifdef CONFIG_PLATFORM_SPACEMIT_K1X -+ if (spacemit_mhpmevent_inhibit_flags_are_invalid(mhpmevent_val)) -+ mhpmevent_val = (mhpmevent_val & ~MHPMEVENT_SSCOF_MASK) | -+ MHPMEVENT_OF; -+ else -+#endif -+ mhpmevent_val = (mhpmevent_val & ~MHPMEVENT_SSCOF_MASK) | -+ MHPMEVENT_MINH | MHPMEVENT_OF; -+ } - - if (pmu_dev && pmu_dev->hw_counter_disable_irq) - pmu_dev->hw_counter_disable_irq(ctr_idx); -diff --git a/lib/sbi/sbi_scratch.c b/lib/sbi/sbi_scratch.c -index 87ef84cafd31..44917eb3fcc6 100644 ---- a/lib/sbi/sbi_scratch.c -+++ b/lib/sbi/sbi_scratch.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - - u32 last_hartid_having_scratch = SBI_HARTMASK_MAX_BITS - 1; - struct sbi_scratch *hartid_to_scratch_table[SBI_HARTMASK_MAX_BITS] = { 0 }; -@@ -59,11 +60,14 @@ unsigned long sbi_scratch_alloc_offset(unsigned long size) - if (!size) - return 0; - -- size += __SIZEOF_POINTER__ - 1; -- size &= ~((unsigned long)__SIZEOF_POINTER__ - 1); -+ size += CACHE_LINE_SIZE - 1; -+ size &= ~((unsigned long)CACHE_LINE_SIZE - 1); - - spin_lock(&extra_lock); - -+ extra_offset += CACHE_LINE_SIZE - 1; -+ extra_offset &= ~((unsigned long)CACHE_LINE_SIZE - 1); -+ - if (SBI_SCRATCH_SIZE < (extra_offset + size)) - goto done; - -diff --git a/lib/utils/Kconfig b/lib/utils/Kconfig -index 5a71e7509ca8..3ac04ab1ab4f 100644 ---- a/lib/utils/Kconfig -+++ b/lib/utils/Kconfig -@@ -22,4 +22,6 @@ source "$(OPENSBI_SRC_DIR)/lib/utils/sys/Kconfig" - - source "$(OPENSBI_SRC_DIR)/lib/utils/timer/Kconfig" - -+source "$(OPENSBI_SRC_DIR)/lib/utils/psci/Kconfig" -+ - endmenu -diff --git a/lib/utils/arm_scmi/board/spacemit/spacemit_pm.c b/lib/utils/arm_scmi/board/spacemit/spacemit_pm.c -new file mode 100644 -index 000000000000..96fc7bb7dee3 ---- /dev/null -+++ b/lib/utils/arm_scmi/board/spacemit/spacemit_pm.c -@@ -0,0 +1,41 @@ -+/* -+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+const plat_psci_ops_t *plat_arm_psci_override_pm_ops(plat_psci_ops_t *ops) -+{ -+ return css_scmi_override_pm_ops(ops); -+} -+ -+static scmi_channel_plat_info_t spacemit_scmi_plat_info = { -+ .scmi_mbx_mem = SCMI_MAILBOX_SHARE_MEM, -+ .db_reg_addr = PLAT_MAILBOX_REG_BASE, -+ /* no used */ -+ .db_preserve_mask = 0xfffffffe, -+ /* no used */ -+ .db_modify_mask = 0x1, -+ .ring_doorbell = &mhu_ring_doorbell, -+}; -+ -+scmi_channel_plat_info_t *plat_css_get_scmi_info(unsigned int channel_id) -+{ -+ return &spacemit_scmi_plat_info; -+} -+ -+/* -+ * The array mapping platform core position (implemented by plat_my_core_pos()) -+ * to the SCMI power domain ID implemented by SCP. -+ */ -+uint32_t plat_css_core_pos_to_scmi_dmn_id_map[PLATFORM_CLUSTER_COUNT][PLATFORM_CORE_COUNT] = { -+ PLAT_SCMI_SINGLE_CLUSTER_DOMAIN_MAP, -+ PLAT_SCMI_DOUBLE_CLUSTER_DOMAIN_MAP -+}; -diff --git a/lib/utils/arm_scmi/common/arm_pm.c b/lib/utils/arm_scmi/common/arm_pm.c -new file mode 100644 -index 000000000000..7fffeced85ac ---- /dev/null -+++ b/lib/utils/arm_scmi/common/arm_pm.c -@@ -0,0 +1,68 @@ -+/* -+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+/******************************************************************************* -+ * ARM standard platform handler called to check the validity of the power state -+ * parameter. -+ ******************************************************************************/ -+int arm_validate_power_state(unsigned int power_state, -+ psci_power_state_t *req_state) -+{ -+ unsigned int pstate = psci_get_pstate_type(power_state); -+ unsigned int pwr_lvl = psci_get_pstate_pwrlvl(power_state); -+ unsigned int i; -+ -+ if (req_state == NULL) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ if (pwr_lvl > PLAT_MAX_PWR_LVL) -+ return PSCI_E_INVALID_PARAMS; -+ -+ /* Sanity check the requested state */ -+ if (pstate == PSTATE_TYPE_STANDBY) { -+ /* -+ * It's possible to enter standby only on power level 0 -+ * Ignore any other power level. -+ */ -+ if (pwr_lvl != ARM_PWR_LVL0) -+ return PSCI_E_INVALID_PARAMS; -+ -+ req_state->pwr_domain_state[ARM_PWR_LVL0] = -+ ARM_LOCAL_STATE_RET; -+ } else { -+ for (i = ARM_PWR_LVL0; i <= pwr_lvl; i++) -+ req_state->pwr_domain_state[i] = -+ ARM_LOCAL_STATE_OFF; -+ } -+ -+ /* -+ * We expect the 'state id' to be zero. -+ */ -+ if (psci_get_pstate_id(power_state) != 0U) -+ return PSCI_E_INVALID_PARAMS; -+ -+ return PSCI_E_SUCCESS; -+} -+ -+/******************************************************************************* -+ * The ARM Standard platform definition of platform porting API -+ * `plat_setup_psci_ops`. -+ ******************************************************************************/ -+int plat_setup_psci_ops(uintptr_t sec_entrypoint, -+ const plat_psci_ops_t **psci_ops) -+{ -+ *psci_ops = plat_arm_psci_override_pm_ops(&plat_arm_psci_pm_ops); -+ -+ return 0; -+} -diff --git a/lib/utils/arm_scmi/css/common/css_pm.c b/lib/utils/arm_scmi/css/common/css_pm.c -new file mode 100644 -index 000000000000..8d17b6be442f ---- /dev/null -+++ b/lib/utils/arm_scmi/css/common/css_pm.c -@@ -0,0 +1,298 @@ -+/* -+ * Copyright (c) 2015-2022, Arm Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* Allow CSS platforms to override `plat_arm_psci_pm_ops` */ -+#pragma weak plat_arm_psci_pm_ops -+ -+/******************************************************************************* -+ * Handler called when a power domain is about to be turned on. The -+ * level and mpidr determine the affinity instance. -+ ******************************************************************************/ -+int css_pwr_domain_on(u_register_t mpidr) -+{ -+ css_scp_on(mpidr); -+ -+ return PSCI_E_SUCCESS; -+} -+ -+static void css_pwr_domain_on_finisher_common( -+ const psci_power_state_t *target_state) -+{ -+ unsigned int clusterid; -+ unsigned int hartid = current_hartid(); -+ -+ if (CSS_CORE_PWR_STATE(target_state) != ARM_LOCAL_STATE_OFF) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* -+ * Perform the common cluster specific operations i.e enable coherency -+ * if this cluster was off. -+ */ -+ if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -+ clusterid = MPIDR_AFFLVL1_VAL(hartid); -+ cci_enable_snoop_dvm_reqs(clusterid); -+ } -+} -+ -+/******************************************************************************* -+ * Handler called when a power level has just been powered on after -+ * being turned off earlier. The target_state encodes the low power state that -+ * each level has woken up from. This handler would never be invoked with -+ * the system power domain uninitialized as either the primary would have taken -+ * care of it as part of cold boot or the first core awakened from system -+ * suspend would have already initialized it. -+ ******************************************************************************/ -+void css_pwr_domain_on_finish(const psci_power_state_t *target_state) -+{ -+ /* Assert that the system power domain need not be initialized */ -+ if (css_system_pwr_state(target_state) != ARM_LOCAL_STATE_RUN) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ css_pwr_domain_on_finisher_common(target_state); -+} -+ -+/******************************************************************************* -+ * Handler called when a power domain has just been powered on and the cpu -+ * and its cluster are fully participating in coherent transaction on the -+ * interconnect. Data cache must be enabled for CPU at this point. -+ ******************************************************************************/ -+void css_pwr_domain_on_finish_late(const psci_power_state_t *target_state) -+{ -+#if 0 -+ /* Program the gic per-cpu distributor or re-distributor interface */ -+ plat_arm_gic_pcpu_init(); -+ -+ /* Enable the gic cpu interface */ -+ plat_arm_gic_cpuif_enable(); -+ -+ /* Setup the CPU power down request interrupt for secondary core(s) */ -+ css_setup_cpu_pwr_down_intr(); -+#endif -+} -+ -+/******************************************************************************* -+ * Common function called while turning a cpu off or suspending it. It is called -+ * from css_off() or css_suspend() when these functions in turn are called for -+ * power domain at the highest power level which will be powered down. It -+ * performs the actions common to the OFF and SUSPEND calls. -+ ******************************************************************************/ -+static void css_power_down_common(const psci_power_state_t *target_state) -+{ -+ unsigned int clusterid; -+ unsigned int hartid = current_hartid(); -+#if 0 -+ /* Prevent interrupts from spuriously waking up this cpu */ -+ plat_arm_gic_cpuif_disable(); -+#endif -+ /* Cluster is to be turned off, so disable coherency */ -+ if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -+ clusterid = MPIDR_AFFLVL1_VAL(hartid); -+ cci_disable_snoop_dvm_reqs(clusterid); -+ } -+} -+ -+static int css_pwr_domain_off_early(const psci_power_state_t *target_state) -+{ -+ /* the ipi's pending is cleared before */ -+ /* disable the plic irq */ -+ fdt_plic_context_exit(); -+ /* clear the external irq pending */ -+ csr_clear(CSR_MIP, MIP_MEIP); -+ csr_clear(CSR_MIP, MIP_SEIP); -+ -+ /* here we clear the sstimer pending if this core have */ -+ if (sbi_hart_has_extension(sbi_scratch_thishart_ptr(), SBI_HART_EXT_SSTC)) { -+ csr_write(CSR_STIMECMP, 0xffffffffffffffff); -+ } -+ -+ return 0; -+} -+ -+/******************************************************************************* -+ * Handler called when a power domain is about to be turned off. The -+ * target_state encodes the power state that each level should transition to. -+ ******************************************************************************/ -+void css_pwr_domain_off(const psci_power_state_t *target_state) -+{ -+ if (CSS_CORE_PWR_STATE(target_state) != ARM_LOCAL_STATE_OFF) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ css_power_down_common(target_state); -+ css_scp_off(target_state); -+} -+ -+void css_pwr_down_wfi(const psci_power_state_t *target_state) -+{ -+ while (1) -+ wfi(); -+} -+ -+/* -+ * The system power domain suspend is only supported only via -+ * PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain -+ * will be downgraded to the lower level. -+ */ -+static int css_validate_power_state(unsigned int power_state, -+ psci_power_state_t *req_state) -+{ -+ int rc; -+ -+ rc = arm_validate_power_state(power_state, req_state); -+ -+ /* -+ * Ensure that we don't overrun the pwr_domain_state array in the case -+ * where the platform supported max power level is less than the system -+ * power level -+ */ -+ -+#if (PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL) -+ -+ /* -+ * Ensure that the system power domain level is never suspended -+ * via PSCI CPU SUSPEND API. Currently system suspend is only -+ * supported via PSCI SYSTEM SUSPEND API. -+ */ -+ -+ req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] = -+ ARM_LOCAL_STATE_RUN; -+#endif -+ -+ return rc; -+} -+ -+/******************************************************************************* -+ * Handler called when the CPU power domain is about to enter standby. -+ ******************************************************************************/ -+void css_cpu_standby(plat_local_state_t cpu_state) -+{ -+ /* unsigned int scr; */ -+ -+ if (cpu_state != ARM_LOCAL_STATE_RET) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ wfi(); -+#if 0 -+ scr = read_scr_el3(); -+ /* -+ * Enable the Non secure interrupt to wake the CPU. -+ * In GICv3 affinity routing mode, the non secure group1 interrupts use -+ * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ. -+ * Enabling both the bits works for both GICv2 mode and GICv3 affinity -+ * routing mode. -+ */ -+ write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT); -+ isb(); -+ dsb(); -+ wfi(); -+ -+ /* -+ * Restore SCR to the original value, synchronisation of scr_el3 is -+ * done by eret while el3_exit to save some execution cycles. -+ */ -+ write_scr_el3(scr); -+#endif -+} -+ -+/******************************************************************************* -+ * Handler called when a power domain is about to be suspended. The -+ * target_state encodes the power state that each level should transition to. -+ ******************************************************************************/ -+void css_pwr_domain_suspend(const psci_power_state_t *target_state) -+{ -+ /* -+ * CSS currently supports retention only at cpu level. Just return -+ * as nothing is to be done for retention. -+ */ -+ if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET) -+ return; -+ -+ -+ if (CSS_CORE_PWR_STATE(target_state) != ARM_LOCAL_STATE_OFF) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ css_power_down_common(target_state); -+ -+ csr_clear(CSR_MIE, MIP_SSIP | MIP_MSIP | MIP_STIP | MIP_MTIP | MIP_SEIP | MIP_MEIP); -+ -+ /* Perform system domain state saving if issuing system suspend */ -+ if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) { -+ /* arm_system_pwr_domain_save(); */ -+ -+ /* Power off the Redistributor after having saved its context */ -+ /* plat_arm_gic_redistif_off(); */ -+ } -+ -+ css_scp_suspend(target_state); -+} -+ -+/******************************************************************************* -+ * Handler called when a power domain has just been powered on after -+ * having been suspended earlier. The target_state encodes the low power state -+ * that each level has woken up from. -+ * TODO: At the moment we reuse the on finisher and reinitialize the secure -+ * context. Need to implement a separate suspend finisher. -+ ******************************************************************************/ -+void css_pwr_domain_suspend_finish( -+ const psci_power_state_t *target_state) -+{ -+ /* Return as nothing is to be done on waking up from retention. */ -+ if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET) -+ return; -+ -+ /* Perform system domain restore if woken up from system suspend */ -+ if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) -+ /* -+ * At this point, the Distributor must be powered on to be ready -+ * to have its state restored. The Redistributor will be powered -+ * on as part of gicv3_rdistif_init_restore. -+ */ -+ /* arm_system_pwr_domain_resume() */; -+ -+ css_pwr_domain_on_finisher_common(target_state); -+ -+ /* Enable the gic cpu interface */ -+ /* plat_arm_gic_cpuif_enable() */; -+} -+ -+/******************************************************************************* -+ * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard -+ * platform will take care of registering the handlers with PSCI. -+ ******************************************************************************/ -+plat_psci_ops_t plat_arm_psci_pm_ops = { -+ .pwr_domain_on = css_pwr_domain_on, -+ .pwr_domain_on_finish = css_pwr_domain_on_finish, -+ .pwr_domain_on_finish_late = css_pwr_domain_on_finish_late, -+ .pwr_domain_off = css_pwr_domain_off, -+ .pwr_domain_off_early = css_pwr_domain_off_early, -+ .pwr_domain_pwr_down_wfi = css_pwr_down_wfi, -+ .validate_power_state = css_validate_power_state, -+ .cpu_standby = css_cpu_standby, -+ .pwr_domain_suspend = css_pwr_domain_suspend, -+ .pwr_domain_suspend_finish = css_pwr_domain_suspend_finish, -+}; -diff --git a/lib/utils/arm_scmi/css/mhu/css_mhu_doorbell.c b/lib/utils/arm_scmi/css/mhu/css_mhu_doorbell.c -new file mode 100644 -index 000000000000..887d0312f3f2 ---- /dev/null -+++ b/lib/utils/arm_scmi/css/mhu/css_mhu_doorbell.c -@@ -0,0 +1,27 @@ -+/* -+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+#include -+#include -+#include "mhu.h" -+ -+void mhu_ring_doorbell(struct scmi_channel_plat_info *plat_info) -+{ -+ unsigned int msg; -+ mbox_reg_desc_t *regs = (mbox_reg_desc_t *)plat_info->db_reg_addr; -+ -+ /* clear the fifo */ -+ while (regs->msg_status[MAILBOX_SECURE_PSCI_CHANNEL + 2].bits.num_msg) { -+ msg = regs->mbox_msg[MAILBOX_SECURE_PSCI_CHANNEL + 2].val; -+ } -+ -+ /* clear pending */ -+ msg = regs->mbox_irq[0].irq_status_clr.val; -+ msg |= (1 << ((MAILBOX_SECURE_PSCI_CHANNEL + 2) * 2)); -+ regs->mbox_irq[0].irq_status_clr.val = msg; -+ -+ /* door bell the esos */ -+ regs->mbox_msg[MAILBOX_SECURE_PSCI_CHANNEL].val = 'c'; -+} -diff --git a/lib/utils/arm_scmi/css/mhu/mhu.h b/lib/utils/arm_scmi/css/mhu/mhu.h -new file mode 100644 -index 000000000000..fb3e7574c6c4 ---- /dev/null -+++ b/lib/utils/arm_scmi/css/mhu/mhu.h -@@ -0,0 +1,130 @@ -+/* -+ * Arm SCP/MCP Software -+ * Copyright (c) 2015-2021, Arm Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+ -+#ifndef INTERNAL_MHU_H -+#define INTERNAL_MHU_H -+ -+/* mailbox register description */ -+/* mailbox sysconfig */ -+typedef union mbox_sysconfig { -+ unsigned int val; -+ struct { -+ unsigned int resetn:1; -+ unsigned int reserved:31; -+ } bits; -+} mbox_sysconfig_t; -+ -+ -+typedef union mbox_msg { -+ unsigned int val; -+ struct { -+ unsigned int msg:32; -+ } bits; -+} mbox_msg_t; -+ -+typedef union mbox_fifo_status { -+ unsigned int val; -+ struct { -+ unsigned int is_full:1; -+ unsigned int reserved:31; -+ } bits; -+} mbox_fifo_status_t; -+ -+ -+typedef union mbox_msg_status { -+ unsigned int val; -+ struct { -+ unsigned int num_msg:4; -+ unsigned int reserved:28; -+ } bits; -+} mbox_msg_status_t; -+ -+typedef union mbox_irq_status { -+ unsigned int val; -+ struct { -+ unsigned int new_msg0_status:1; -+ unsigned int not_msg0_full:1; -+ unsigned int new_msg1_status:1; -+ unsigned int not_msg1_full:1; -+ unsigned int new_msg2_status:1; -+ unsigned int not_msg2_full:1; -+ unsigned int new_msg3_status:1; -+ unsigned int not_msg3_full:1; -+ unsigned int reserved:24; -+ } bits; -+} mbox_irq_status_t; -+ -+typedef union mbox_irq_status_clr { -+ unsigned int val; -+ struct { -+ unsigned int new_msg0_clr:1; -+ unsigned int not_msg0_full_clr:1; -+ unsigned int new_msg1_clr:1; -+ unsigned int not_msg1_full_clr:1; -+ unsigned int new_msg2_clr:1; -+ unsigned int not_msg2_full_clr:1; -+ unsigned int new_msg3_clr:1; -+ unsigned int not_msg3_full_clr:1; -+ unsigned int reserved:24; -+ } bits; -+} mbox_irq_status_clr_t; -+ -+typedef union mbox_irq_enable_set { -+ unsigned int val; -+ struct { -+ unsigned int new_msg0_irq_en:1; -+ unsigned int not_msg0_full_irq_en:1; -+ unsigned int new_msg1_irq_en:1; -+ unsigned int not_msg1_full_irq_en:1; -+ unsigned int new_msg2_irq_en:1; -+ unsigned int not_msg2_full_irq_en:1; -+ unsigned int new_msg3_irq_en:1; -+ unsigned int not_msg3_full_irq_en:1; -+ unsigned int reserved:24; -+ } bits; -+} mbox_irq_enable_set_t; -+ -+typedef union mbox_irq_enable_clr { -+ unsigned int val; -+ struct { -+ unsigned int new_msg0_irq_clr:1; -+ unsigned int not_msg0_full_irq_clr:1; -+ unsigned int new_msg1_irq_clr:1; -+ unsigned int not_msg1_full_irq_clr:1; -+ unsigned int new_msg2_irq_clr:1; -+ unsigned int not_msg2_full_irq_clr:1; -+ unsigned int new_msg3_irq_clr:1; -+ unsigned int not_msg3_full_irq_clr:1; -+ unsigned int reserved:24; -+ } bits; -+} mbox_irq_enable_clr_t; -+ -+typedef struct mbox_irq { -+ mbox_irq_status_t irq_status; -+ mbox_irq_status_clr_t irq_status_clr; -+ mbox_irq_enable_set_t irq_en_set; -+ mbox_irq_enable_clr_t irq_en_clr; -+} mbox_irq_t; -+ -+/*! -+ * \brief MHU Register Definitions -+ */ -+typedef struct mhu_reg { -+ unsigned int mbox_version; /* 0x00 */ -+ unsigned int reserved0[3]; /* 0x4 0x8 0xc */ -+ mbox_sysconfig_t mbox_sysconfig; /* 0x10 */ -+ unsigned int reserved1[11]; /* 0x14, 0x18, 0x1c, 0x20, 0x24, 0x28, 0x2c, 0x30, 0x34, 0x38, 0x3c */ -+ mbox_msg_t mbox_msg[4]; /* 0x40, 0x44, 0x48, 0x4c */ -+ unsigned int reserved2[12]; -+ mbox_fifo_status_t fifo_status[4]; /* 0x80, 0x84, 0x88, 0x8c */ -+ unsigned int reserved3[12]; -+ mbox_msg_status_t msg_status[4]; /* 0xc0 */ -+ unsigned int reserved4[12]; -+ mbox_irq_t mbox_irq[2]; /* 0x100 */ -+} mbox_reg_desc_t; -+ -+#endif /* INTERNAL_MHU_H */ -diff --git a/lib/utils/arm_scmi/css/scmi/scmi_common.c b/lib/utils/arm_scmi/css/scmi/scmi_common.c -new file mode 100644 -index 000000000000..1c56da290f10 ---- /dev/null -+++ b/lib/utils/arm_scmi/css/scmi/scmi_common.c -@@ -0,0 +1,228 @@ -+/* -+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define scmi_lock_init(lock) -+#define scmi_lock_get(lock) spin_lock(lock) -+#define scmi_lock_release(lock) spin_unlock(lock) -+ -+ -+/* -+ * Private helper function to get exclusive access to SCMI channel. -+ */ -+void scmi_get_channel(scmi_channel_t *ch) -+{ -+ if (!ch->lock) -+ sbi_hart_hang(); -+ -+ scmi_lock_get(ch->lock); -+ -+ /* Make sure any previous command has finished */ -+ if (!SCMI_IS_CHANNEL_FREE( -+ ((mailbox_mem_t *)(ch->info->scmi_mbx_mem))->status)) -+ sbi_hart_hang(); -+} -+ -+/* -+ * Private helper function to transfer ownership of channel from AP to SCP. -+ */ -+void scmi_send_sync_command(scmi_channel_t *ch) -+{ -+ mailbox_mem_t *mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem); -+ -+ SCMI_MARK_CHANNEL_BUSY(mbx_mem->status); -+ -+ /* -+ * Ensure that any write to the SCMI payload area is seen by SCP before -+ * we write to the doorbell register. If these 2 writes were reordered -+ * by the CPU then SCP would read stale payload data -+ */ -+ /* dmbst(); */ -+ asm volatile ("fence iorw, iorw"); -+ -+ ch->info->ring_doorbell(ch->info); -+ /* -+ * Ensure that the write to the doorbell register is ordered prior to -+ * checking whether the channel is free. -+ */ -+ /* dmbsy(); */ -+ asm volatile ("fence iorw, iorw"); -+ -+ /* Wait for channel to be free */ -+ while (!SCMI_IS_CHANNEL_FREE(mbx_mem->status)) -+ ; -+ -+ /* -+ * Ensure that any read to the SCMI payload area is done after reading -+ * mailbox status. If these 2 reads were reordered then the CPU would -+ * read invalid payload data -+ */ -+ /* dmbld(); */ -+ asm volatile ("fence iorw, iorw"); -+} -+ -+/* -+ * Private helper function to release exclusive access to SCMI channel. -+ */ -+void scmi_put_channel(scmi_channel_t *ch) -+{ -+ /* Make sure any previous command has finished */ -+ if (!SCMI_IS_CHANNEL_FREE( -+ ((mailbox_mem_t *)(ch->info->scmi_mbx_mem))->status)) -+ sbi_hart_hang(); -+ -+ if (!ch->lock) -+ sbi_hart_hang(); -+ -+ scmi_lock_release(ch->lock); -+} -+ -+/* -+ * API to query the SCMI protocol version. -+ */ -+int scmi_proto_version(void *p, uint32_t proto_id, uint32_t *version) -+{ -+ mailbox_mem_t *mbx_mem; -+ unsigned int token = 0; -+ int ret; -+ scmi_channel_t *ch = (scmi_channel_t *)p; -+ -+ validate_scmi_channel(ch); -+ -+ scmi_get_channel(ch); -+ -+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem); -+ mbx_mem->msg_header = SCMI_MSG_CREATE(proto_id, SCMI_PROTO_VERSION_MSG, -+ token); -+ mbx_mem->len = SCMI_PROTO_VERSION_MSG_LEN; -+ mbx_mem->flags = SCMI_FLAG_RESP_POLL; -+ -+ csi_dcache_clean_invalid_range((uintptr_t)ch->info->scmi_mbx_mem, 0x80); -+ -+ scmi_send_sync_command(ch); -+ -+ /* Get the return values */ -+ SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *version); -+ if (mbx_mem->len != SCMI_PROTO_VERSION_RESP_LEN) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ if (token != SCMI_MSG_GET_TOKEN(mbx_mem->msg_header)) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ scmi_put_channel(ch); -+ -+ return ret; -+} -+ -+/* -+ * API to query the protocol message attributes for a SCMI protocol. -+ */ -+int scmi_proto_msg_attr(void *p, uint32_t proto_id, -+ uint32_t command_id, uint32_t *attr) -+{ -+ mailbox_mem_t *mbx_mem; -+ unsigned int token = 0; -+ int ret; -+ scmi_channel_t *ch = (scmi_channel_t *)p; -+ -+ validate_scmi_channel(ch); -+ -+ scmi_get_channel(ch); -+ -+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem); -+ mbx_mem->msg_header = SCMI_MSG_CREATE(proto_id, -+ SCMI_PROTO_MSG_ATTR_MSG, token); -+ mbx_mem->len = SCMI_PROTO_MSG_ATTR_MSG_LEN; -+ mbx_mem->flags = SCMI_FLAG_RESP_POLL; -+ SCMI_PAYLOAD_ARG1(mbx_mem->payload, command_id); -+ -+ csi_dcache_clean_invalid_range((uintptr_t)ch->info->scmi_mbx_mem, 0x80); -+ -+ scmi_send_sync_command(ch); -+ -+ /* Get the return values */ -+ SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *attr); -+ if (mbx_mem->len != SCMI_PROTO_MSG_ATTR_RESP_LEN) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ if (token != SCMI_MSG_GET_TOKEN(mbx_mem->msg_header)) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ scmi_put_channel(ch); -+ -+ return ret; -+} -+ -+/* -+ * SCMI Driver initialization API. Returns initialized channel on success -+ * or NULL on error. The return type is an opaque void pointer. -+ */ -+void *scmi_init(scmi_channel_t *ch) -+{ -+ uint32_t version; -+ int ret; -+ -+ if (!ch || !ch->info || !ch->info->db_reg_addr || !ch->info->db_modify_mask || -+ !ch->info->db_preserve_mask || !ch->info->ring_doorbell || -+ !ch->lock) -+ sbi_hart_hang(); -+ -+ scmi_lock_init(ch->lock); -+ -+ ch->is_initialized = 1; -+ -+ ret = scmi_proto_version(ch, SCMI_PWR_DMN_PROTO_ID, &version); -+ if (ret != SCMI_E_SUCCESS) { -+ sbi_printf("SCMI power domain protocol version message failed\n"); -+ goto error; -+ } -+ -+ if (!is_scmi_version_compatible(SCMI_PWR_DMN_PROTO_VER, version)) { -+ sbi_printf("SCMI power domain protocol version 0x%x incompatible with driver version 0x%x\n", -+ version, SCMI_PWR_DMN_PROTO_VER); -+ goto error; -+ } -+ -+ sbi_printf("SCMI power domain protocol version 0x%x detected\n", version); -+ -+ ret = scmi_proto_version(ch, SCMI_SYS_PWR_PROTO_ID, &version); -+ if ((ret != SCMI_E_SUCCESS)) { -+ sbi_printf("SCMI system power protocol version message failed\n"); -+ goto error; -+ } -+ -+ if (!is_scmi_version_compatible(SCMI_SYS_PWR_PROTO_VER, version)) { -+ sbi_printf("SCMI system power management protocol version 0x%x incompatible with driver version 0x%x\n", -+ version, SCMI_SYS_PWR_PROTO_VER); -+ goto error; -+ } -+ -+ sbi_printf("SCMI system power management protocol version 0x%x detected\n", -+ version); -+ -+ sbi_printf("SCMI driver initialized\n"); -+ -+ return (void *)ch; -+ -+error: -+ ch->is_initialized = 0; -+ return NULL; -+} -diff --git a/lib/utils/arm_scmi/css/scmi/scmi_pwr_dmn_proto.c b/lib/utils/arm_scmi/css/scmi/scmi_pwr_dmn_proto.c -new file mode 100644 -index 000000000000..5a4f7347ceff ---- /dev/null -+++ b/lib/utils/arm_scmi/css/scmi/scmi_pwr_dmn_proto.c -@@ -0,0 +1,102 @@ -+/* -+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * API to set the SCMI power domain power state. -+ */ -+int scmi_pwr_state_set(void *p, uint32_t domain_id, -+ uint32_t scmi_pwr_state) -+{ -+ mailbox_mem_t *mbx_mem; -+ unsigned int token = 0; -+ int ret; -+ -+ /* -+ * Only asynchronous mode of `set power state` command is allowed on -+ * application processors. -+ */ -+ uint32_t pwr_state_set_msg_flag = SCMI_PWR_STATE_SET_FLAG_ASYNC; -+ scmi_channel_t *ch = (scmi_channel_t *)p; -+ -+ validate_scmi_channel(ch); -+ -+ scmi_get_channel(ch); -+ -+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem); -+ mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_PWR_DMN_PROTO_ID, -+ SCMI_PWR_STATE_SET_MSG, token); -+ mbx_mem->len = SCMI_PWR_STATE_SET_MSG_LEN; -+ mbx_mem->flags = SCMI_FLAG_RESP_POLL; -+ SCMI_PAYLOAD_ARG3(mbx_mem->payload, pwr_state_set_msg_flag, -+ domain_id, scmi_pwr_state); -+ -+ csi_dcache_clean_invalid_range((uintptr_t)ch->info->scmi_mbx_mem, 0x80); -+ scmi_send_sync_command(ch); -+ -+ /* Get the return values */ -+ SCMI_PAYLOAD_RET_VAL1(mbx_mem->payload, ret); -+ if (mbx_mem->len != SCMI_PWR_STATE_SET_RESP_LEN) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ if (token != SCMI_MSG_GET_TOKEN(mbx_mem->msg_header)) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ scmi_put_channel(ch); -+ -+ return ret; -+} -+ -+/* -+ * API to get the SCMI power domain power state. -+ */ -+int scmi_pwr_state_get(void *p, uint32_t domain_id, -+ uint32_t *scmi_pwr_state) -+{ -+ mailbox_mem_t *mbx_mem; -+ unsigned int token = 0; -+ int ret; -+ scmi_channel_t *ch = (scmi_channel_t *)p; -+ -+ validate_scmi_channel(ch); -+ -+ scmi_get_channel(ch); -+ -+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem); -+ mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_PWR_DMN_PROTO_ID, -+ SCMI_PWR_STATE_GET_MSG, token); -+ mbx_mem->len = SCMI_PWR_STATE_GET_MSG_LEN; -+ mbx_mem->flags = SCMI_FLAG_RESP_POLL; -+ SCMI_PAYLOAD_ARG1(mbx_mem->payload, domain_id); -+ -+ csi_dcache_clean_invalid_range((uintptr_t)ch->info->scmi_mbx_mem, 0x80); -+ scmi_send_sync_command(ch); -+ -+ /* Get the return values */ -+ SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *scmi_pwr_state); -+ if (mbx_mem->len != SCMI_PWR_STATE_GET_RESP_LEN) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ if (token != SCMI_MSG_GET_TOKEN(mbx_mem->msg_header)) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ scmi_put_channel(ch); -+ -+ return ret; -+} -diff --git a/lib/utils/arm_scmi/css/scmi/scmi_sys_pwr_proto.c b/lib/utils/arm_scmi/css/scmi/scmi_sys_pwr_proto.c -new file mode 100644 -index 000000000000..fc7c30e50c6d ---- /dev/null -+++ b/lib/utils/arm_scmi/css/scmi/scmi_sys_pwr_proto.c -@@ -0,0 +1,90 @@ -+/* -+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * API to set the SCMI system power state -+ */ -+int scmi_sys_pwr_state_set(void *p, uint32_t flags, uint32_t system_state) -+{ -+ mailbox_mem_t *mbx_mem; -+ unsigned int token = 0; -+ int ret; -+ scmi_channel_t *ch = (scmi_channel_t *)p; -+ -+ validate_scmi_channel(ch); -+ -+ scmi_get_channel(ch); -+ -+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem); -+ mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_SYS_PWR_PROTO_ID, -+ SCMI_SYS_PWR_STATE_SET_MSG, token); -+ mbx_mem->len = SCMI_SYS_PWR_STATE_SET_MSG_LEN; -+ mbx_mem->flags = SCMI_FLAG_RESP_POLL; -+ SCMI_PAYLOAD_ARG2(mbx_mem->payload, flags, system_state); -+ -+ scmi_send_sync_command(ch); -+ -+ /* Get the return values */ -+ SCMI_PAYLOAD_RET_VAL1(mbx_mem->payload, ret); -+ if (mbx_mem->len != SCMI_SYS_PWR_STATE_SET_RESP_LEN) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ if (token != SCMI_MSG_GET_TOKEN(mbx_mem->msg_header)) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ scmi_put_channel(ch); -+ -+ return ret; -+} -+ -+/* -+ * API to get the SCMI system power state -+ */ -+int scmi_sys_pwr_state_get(void *p, uint32_t *system_state) -+{ -+ mailbox_mem_t *mbx_mem; -+ unsigned int token = 0; -+ int ret; -+ scmi_channel_t *ch = (scmi_channel_t *)p; -+ -+ validate_scmi_channel(ch); -+ -+ scmi_get_channel(ch); -+ -+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem); -+ mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_SYS_PWR_PROTO_ID, -+ SCMI_SYS_PWR_STATE_GET_MSG, token); -+ mbx_mem->len = SCMI_SYS_PWR_STATE_GET_MSG_LEN; -+ mbx_mem->flags = SCMI_FLAG_RESP_POLL; -+ -+ scmi_send_sync_command(ch); -+ -+ /* Get the return values */ -+ SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *system_state); -+ if (mbx_mem->len != SCMI_SYS_PWR_STATE_GET_RESP_LEN) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ if (token != SCMI_MSG_GET_TOKEN(mbx_mem->msg_header)) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ scmi_put_channel(ch); -+ -+ return ret; -+} -diff --git a/lib/utils/arm_scmi/css/scp/css_pm_scmi.c b/lib/utils/arm_scmi/css/scp/css_pm_scmi.c -new file mode 100644 -index 000000000000..a88eb9f4ce0e ---- /dev/null -+++ b/lib/utils/arm_scmi/css/scp/css_pm_scmi.c -@@ -0,0 +1,418 @@ -+/* -+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include <../../../psci/psci_private.h> -+ -+/* -+ * This file implements the SCP helper functions using SCMI protocol. -+ */ -+ -+/* -+ * SCMI power state parameter bit field encoding for ARM CSS platforms. -+ * -+ * 31 20 19 16 15 12 11 8 7 4 3 0 -+ * +-------------------------------------------------------------+ -+ * | SBZ | Max level | Level 3 | Level 2 | Level 1 | Level 0 | -+ * | | | state | state | state | state | -+ * +-------------------------------------------------------------+ -+ * -+ * `Max level` encodes the highest level that has a valid power state -+ * encoded in the power state. -+ */ -+#define SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT 16 -+#define SCMI_PWR_STATE_MAX_PWR_LVL_WIDTH 4 -+#define SCMI_PWR_STATE_MAX_PWR_LVL_MASK \ -+ ((1 << SCMI_PWR_STATE_MAX_PWR_LVL_WIDTH) - 1) -+#define SCMI_SET_PWR_STATE_MAX_PWR_LVL(_power_state, _max_level) \ -+ (_power_state) |= ((_max_level) & SCMI_PWR_STATE_MAX_PWR_LVL_MASK)\ -+ << SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT -+#define SCMI_GET_PWR_STATE_MAX_PWR_LVL(_power_state) \ -+ (((_power_state) >> SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT) \ -+ & SCMI_PWR_STATE_MAX_PWR_LVL_MASK) -+ -+#define SCMI_PWR_STATE_LVL_WIDTH 4 -+#define SCMI_PWR_STATE_LVL_MASK \ -+ ((1 << SCMI_PWR_STATE_LVL_WIDTH) - 1) -+#define SCMI_SET_PWR_STATE_LVL(_power_state, _level, _level_state) \ -+ (_power_state) |= ((_level_state) & SCMI_PWR_STATE_LVL_MASK) \ -+ << (SCMI_PWR_STATE_LVL_WIDTH * (_level)) -+#define SCMI_GET_PWR_STATE_LVL(_power_state, _level) \ -+ (((_power_state) >> (SCMI_PWR_STATE_LVL_WIDTH * (_level))) & \ -+ SCMI_PWR_STATE_LVL_MASK) -+ -+/* -+ * The SCMI power state enumeration for a power domain level -+ */ -+typedef enum { -+ scmi_power_state_off = 0, -+ scmi_power_state_on = 1, -+ scmi_power_state_sleep = 2, -+} scmi_power_state_t; -+ -+/* -+ * The global handles for invoking the SCMI driver APIs after the driver -+ * has been initialized. -+ */ -+static void *scmi_handles[PLAT_ARM_SCMI_CHANNEL_COUNT]; -+ -+/* The global SCMI channels array */ -+static scmi_channel_t scmi_channels[PLAT_ARM_SCMI_CHANNEL_COUNT]; -+ -+/* -+ * Channel ID for the default SCMI channel. -+ * The default channel is used to issue SYSTEM level SCMI requests and is -+ * initialized to the channel which has the boot cpu as its resource. -+ */ -+static uint32_t default_scmi_channel_id; -+ -+/* -+ * TODO: Allow use of channel specific lock instead of using a single lock for -+ * all the channels. -+ */ -+ARM_SCMI_INSTANTIATE_LOCK; -+ -+/* -+ * Function to obtain the SCMI Domain ID and SCMI Channel number from the linear -+ * core position. The SCMI Channel number is encoded in the upper 16 bits and -+ * the Domain ID is encoded in the lower 16 bits in each entry of the mapping -+ * array exported by the platform. -+ */ -+static void css_scp_core_pos_to_scmi_channel(unsigned int core_pos, -+ unsigned int *scmi_domain_id, unsigned int *scmi_channel_id) -+{ -+ unsigned int composite_id; -+ unsigned int *map_id = plat_get_power_domain_tree_desc()[CLUSTER_INDEX_IN_CPU_TOPOLOGY] > 1 ? -+ plat_css_core_pos_to_scmi_dmn_id_map[1] : -+ plat_css_core_pos_to_scmi_dmn_id_map[0]; -+ -+ composite_id = map_id[core_pos]; -+ -+ *scmi_channel_id = GET_SCMI_CHANNEL_ID(composite_id); -+ *scmi_domain_id = GET_SCMI_DOMAIN_ID(composite_id); -+} -+ -+/* -+ * Helper function to turn off a CPU power domain and its parent power domains -+ * if applicable. -+ */ -+void css_scp_off(const struct psci_power_state *target_state) -+{ -+ unsigned int lvl = 0, channel_id, domain_id; -+ int ret; -+ uint32_t scmi_pwr_state = 0, cpu_idx; -+ unsigned int hartid = current_hartid(); -+ -+ cpu_idx = plat_core_pos_by_mpidr(hartid); -+ -+ /* At-least the CPU level should be specified to be OFF */ -+ if (target_state->pwr_domain_state[ARM_PWR_LVL0] != ARM_LOCAL_STATE_OFF) { -+ sbi_printf("%s:%d, wrong power domain state\n", -+ __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* PSCI CPU OFF cannot be used to turn OFF system power domain */ -+ if (css_system_pwr_state(target_state) != ARM_LOCAL_STATE_RUN) { -+ sbi_printf("%s:%d, wrong power domain state\n", -+ __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) { -+ if (target_state->pwr_domain_state[lvl] == ARM_LOCAL_STATE_RUN) -+ break; -+ -+ if (target_state->pwr_domain_state[lvl] != ARM_LOCAL_STATE_OFF) { -+ sbi_printf("%s:%d, wrong power domain state\n", -+ __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl, -+ scmi_power_state_off); -+ } -+ -+ SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1); -+ -+ css_scp_core_pos_to_scmi_channel(cpu_idx, &domain_id, &channel_id); -+ ret = scmi_pwr_state_set(scmi_handles[channel_id], -+ domain_id, scmi_pwr_state); -+ if (ret != SCMI_E_QUEUED && ret != SCMI_E_SUCCESS) { -+ sbi_printf("SCMI set power state command return 0x%x unexpected\n", -+ ret); -+ sbi_hart_hang(); -+ } -+} -+ -+/* -+ * Helper function to turn ON a CPU power domain and its parent power domains -+ * if applicable. -+ */ -+void css_scp_on(u_register_t mpidr) -+{ -+ unsigned int lvl = 0, channel_id, core_pos, domain_id; -+ int ret; -+ uint32_t scmi_pwr_state = 0; -+ -+ core_pos = plat_core_pos_by_mpidr(mpidr); -+ if (core_pos >= PLATFORM_CORE_COUNT) { -+ sbi_printf("%s:%d, node_idx beyond the boundary\n", -+ __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) -+ SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl, -+ scmi_power_state_on); -+ -+ SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1); -+ -+ css_scp_core_pos_to_scmi_channel(core_pos, &domain_id, -+ &channel_id); -+ ret = scmi_pwr_state_set(scmi_handles[channel_id], -+ domain_id, scmi_pwr_state); -+ if (ret != SCMI_E_QUEUED && ret != SCMI_E_SUCCESS) { -+ sbi_printf("SCMI set power state command return 0x%x unexpected\n", -+ ret); -+ sbi_hart_hang(); -+ } -+} -+ -+/* -+ * Helper function to get the power state of a power domain node as reported -+ * by the SCP. -+ */ -+int css_scp_get_power_state(u_register_t mpidr, unsigned int power_level) -+{ -+ int ret; -+ uint32_t scmi_pwr_state = 0, lvl_state; -+ unsigned int channel_id, cpu_idx, domain_id; -+ -+ cpu_idx = plat_core_pos_by_mpidr(mpidr); -+ -+ if (cpu_idx >= PLATFORM_CORE_COUNT) { -+ sbi_printf("%s:%d, node_idx beyond the boundary\n", -+ __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* We don't support get power state at the system power domain level */ -+ if ((power_level > PLAT_MAX_PWR_LVL) || -+ (power_level == CSS_SYSTEM_PWR_DMN_LVL)) { -+ sbi_printf("Invalid power level %u specified for SCMI get power state\n", -+ power_level); -+ return PSCI_E_INVALID_PARAMS; -+ } -+ -+ css_scp_core_pos_to_scmi_channel(cpu_idx, &domain_id, &channel_id); -+ ret = scmi_pwr_state_get(scmi_handles[channel_id], -+ domain_id, &scmi_pwr_state); -+ -+ if (ret != SCMI_E_SUCCESS) { -+ sbi_printf("SCMI get power state command return 0x%x unexpected\n", -+ ret); -+ return PSCI_E_INVALID_PARAMS; -+ } -+ -+ /* -+ * Find the maximum power level described in the get power state -+ * command. If it is less than the requested power level, then assume -+ * the requested power level is ON. -+ */ -+ if (SCMI_GET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state) < power_level) -+ return HW_ON; -+ -+ lvl_state = SCMI_GET_PWR_STATE_LVL(scmi_pwr_state, power_level); -+ if (lvl_state == scmi_power_state_on) -+ return HW_ON; -+ -+ if ((lvl_state != scmi_power_state_off) && -+ (lvl_state != scmi_power_state_sleep)) { -+ sbi_printf("wrong power state, :%d\n", ret); -+ sbi_hart_hang(); -+ -+ } -+ -+ return HW_OFF; -+} -+ -+void plat_arm_pwrc_setup(void) -+{ -+ unsigned int composite_id, idx, cpu_idx; -+ unsigned int hartid = current_hartid(); -+ -+ cpu_idx = plat_core_pos_by_mpidr(hartid); -+ -+ for (idx = 0; idx < PLAT_ARM_SCMI_CHANNEL_COUNT; idx++) { -+ sbi_printf("Initializing SCMI driver on channel %d\n", idx); -+ -+ scmi_channels[idx].info = plat_css_get_scmi_info(idx); -+ scmi_channels[idx].lock = ARM_SCMI_LOCK_GET_INSTANCE; -+ scmi_handles[idx] = scmi_init(&scmi_channels[idx]); -+ -+ if (scmi_handles[idx] == NULL) { -+ sbi_printf("SCMI Initialization failed on channel %d\n", idx); -+ sbi_hart_hang(); -+ } -+ } -+ -+ unsigned int *map_id = plat_get_power_domain_tree_desc()[CLUSTER_INDEX_IN_CPU_TOPOLOGY] > 1 ? -+ plat_css_core_pos_to_scmi_dmn_id_map[1] : -+ plat_css_core_pos_to_scmi_dmn_id_map[0]; -+ -+ composite_id = map_id[cpu_idx]; -+ default_scmi_channel_id = GET_SCMI_CHANNEL_ID(composite_id); -+} -+ -+/****************************************************************************** -+ * This function overrides the default definition for ARM platforms. Initialize -+ * the SCMI driver, query capability via SCMI and modify the PSCI capability -+ * based on that. -+ *****************************************************************************/ -+const plat_psci_ops_t *css_scmi_override_pm_ops(plat_psci_ops_t *ops) -+{ -+ uint32_t msg_attr; -+ int ret; -+ void *scmi_handle = scmi_handles[default_scmi_channel_id]; -+ -+ /* Check that power domain POWER_STATE_SET message is supported */ -+ ret = scmi_proto_msg_attr(scmi_handle, SCMI_PWR_DMN_PROTO_ID, -+ SCMI_PWR_STATE_SET_MSG, &msg_attr); -+ if (ret != SCMI_E_SUCCESS) { -+ sbi_printf("Set power state command is not supported by SCMI\n"); -+ sbi_hart_hang(); -+ } -+ -+ /* -+ * Don't support PSCI NODE_HW_STATE call if SCMI doesn't support -+ * POWER_STATE_GET message. -+ */ -+ ret = scmi_proto_msg_attr(scmi_handle, SCMI_PWR_DMN_PROTO_ID, -+ SCMI_PWR_STATE_GET_MSG, &msg_attr); -+ if (ret != SCMI_E_SUCCESS) -+ ops->get_node_hw_state = NULL; -+ -+ /* Check if the SCMI SYSTEM_POWER_STATE_SET message is supported */ -+ ret = scmi_proto_msg_attr(scmi_handle, SCMI_SYS_PWR_PROTO_ID, -+ SCMI_SYS_PWR_STATE_SET_MSG, &msg_attr); -+ if (ret != SCMI_E_SUCCESS) { -+ /* System power management operations are not supported */ -+ ops->system_off = NULL; -+ ops->system_reset = NULL; -+ ops->get_sys_suspend_power_state = NULL; -+ } else { -+ if (!(msg_attr & SCMI_SYS_PWR_SUSPEND_SUPPORTED)) { -+ /* -+ * System power management protocol is available, but -+ * it does not support SYSTEM SUSPEND. -+ */ -+ ops->get_sys_suspend_power_state = NULL; -+ } -+ if (!(msg_attr & SCMI_SYS_PWR_WARM_RESET_SUPPORTED)) { -+ /* -+ * WARM reset is not available. -+ */ -+ ops->system_reset2 = NULL; -+ } -+ } -+ -+ return ops; -+} -+ -+/* -+ * Helper function to suspend a CPU power domain and its parent power domains -+ * if applicable. -+ */ -+void css_scp_suspend(const struct psci_power_state *target_state) -+{ -+ int ret; -+ unsigned int curr_hart = current_hartid(); -+ -+ unsigned int core_pos = plat_core_pos_by_mpidr(curr_hart); -+ if (core_pos >= PLATFORM_CORE_COUNT) { -+ sbi_printf("%s:%d, node_idx beyond the boundary\n", -+ __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ -+ /* At least power domain level 0 should be specified to be suspended */ -+ if (target_state->pwr_domain_state[ARM_PWR_LVL0] != -+ ARM_LOCAL_STATE_OFF) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* Check if power down at system power domain level is requested */ -+ if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) { -+ /* Issue SCMI command for SYSTEM_SUSPEND on all SCMI channels */ -+ ret = scmi_sys_pwr_state_set( -+ scmi_handles[default_scmi_channel_id], -+ SCMI_SYS_PWR_FORCEFUL_REQ, SCMI_SYS_PWR_SUSPEND); -+ if (ret != SCMI_E_SUCCESS) { -+ sbi_printf("SCMI system power domain suspend return 0x%x unexpected\n", -+ ret); -+ sbi_hart_hang(); -+ } -+ return; -+ } -+ -+ unsigned int lvl, channel_id, domain_id; -+ uint32_t scmi_pwr_state = 0; -+ /* -+ * If we reach here, then assert that power down at system power domain -+ * level is running. -+ */ -+ if (css_system_pwr_state(target_state) != ARM_LOCAL_STATE_RUN) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* For level 0, specify `scmi_power_state_sleep` as the power state */ -+ SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, ARM_PWR_LVL0, -+ scmi_power_state_sleep); -+ -+ for (lvl = ARM_PWR_LVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) { -+ if (target_state->pwr_domain_state[lvl] == ARM_LOCAL_STATE_RUN) -+ break; -+ -+ if (target_state->pwr_domain_state[lvl] != -+ ARM_LOCAL_STATE_OFF) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ /* -+ * Specify `scmi_power_state_off` as power state for higher -+ * levels. -+ */ -+ SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl, -+ scmi_power_state_off); -+ } -+ -+ SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1); -+ -+ css_scp_core_pos_to_scmi_channel(core_pos, -+ &domain_id, &channel_id); -+ ret = scmi_pwr_state_set(scmi_handles[channel_id], -+ domain_id, scmi_pwr_state); -+ -+ if (ret != SCMI_E_SUCCESS) { -+ sbi_printf("SCMI set power state command return 0x%x unexpected\n", -+ ret); -+ sbi_hart_hang(); -+ } -+} -diff --git a/lib/utils/arm_scmi/objects.mk b/lib/utils/arm_scmi/objects.mk -new file mode 100644 -index 000000000000..532e0709bdee ---- /dev/null -+++ b/lib/utils/arm_scmi/objects.mk -@@ -0,0 +1,24 @@ -+# -+# SPDX-License-Identifier: BSD-2-Clause -+# -+# Copyright (c) 2020 Western Digital Corporation or its affiliates. -+# -+# Authors: -+# Anup Patel -+# -+ -+libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/common/arm_pm.o -+ -+libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/css/common/css_pm.o -+ -+libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/css/scmi/scmi_common.o -+ -+libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/css/scmi/scmi_pwr_dmn_proto.o -+ -+libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/css/scmi/scmi_sys_pwr_proto.o -+ -+libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/css/scp/css_pm_scmi.o -+ -+libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/css/mhu/css_mhu_doorbell.o -+ -+libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/board/spacemit/spacemit_pm.o -diff --git a/lib/utils/cci/bus-cci.c b/lib/utils/cci/bus-cci.c -new file mode 100644 -index 000000000000..7fe11d0d28c1 ---- /dev/null -+++ b/lib/utils/cci/bus-cci.c -@@ -0,0 +1,168 @@ -+/* -+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+ -+#include -+#include -+ -+ -+/* Slave interface offsets from PERIPHBASE */ -+#define SLAVE_IFACE6_OFFSET (0x7000UL) -+#define SLAVE_IFACE5_OFFSET (0x6000UL) -+#define SLAVE_IFACE4_OFFSET (0x5000UL) -+#define SLAVE_IFACE3_OFFSET (0x4000UL) -+#define SLAVE_IFACE2_OFFSET (0x3000UL) -+#define SLAVE_IFACE1_OFFSET (0x2000UL) -+#define SLAVE_IFACE0_OFFSET (0x1000UL) -+#define SLAVE_IFACE_OFFSET(index) (SLAVE_IFACE0_OFFSET + \ -+ ((0x1000UL) * (index))) -+ -+/* Slave interface event and count register offsets from PERIPHBASE */ -+#define EVENT_SELECT7_OFFSET (0x80000UL) -+#define EVENT_SELECT6_OFFSET (0x70000UL) -+#define EVENT_SELECT5_OFFSET (0x60000UL) -+#define EVENT_SELECT4_OFFSET (0x50000UL) -+#define EVENT_SELECT3_OFFSET (0x40000UL) -+#define EVENT_SELECT2_OFFSET (0x30000UL) -+#define EVENT_SELECT1_OFFSET (0x20000UL) -+#define EVENT_SELECT0_OFFSET (0x10000UL) -+#define EVENT_OFFSET(index) (EVENT_SELECT0_OFFSET + \ -+ ((0x10000UL) * (index))) -+ -+/* Control and ID register offsets */ -+#define CTRL_OVERRIDE_REG (0x0U) -+#define SECURE_ACCESS_REG (0x8U) -+#define STATUS_REG (0xcU) -+#define IMPRECISE_ERR_REG (0x10U) -+#define PERFMON_CTRL_REG (0x100U) -+#define IFACE_MON_CTRL_REG (0x104U) -+ -+/* Component and peripheral ID registers */ -+#define PERIPHERAL_ID0 (0xFE0U) -+#define PERIPHERAL_ID1 (0xFE4U) -+#define PERIPHERAL_ID2 (0xFE8U) -+#define PERIPHERAL_ID3 (0xFECU) -+#define PERIPHERAL_ID4 (0xFD0U) -+#define PERIPHERAL_ID5 (0xFD4U) -+#define PERIPHERAL_ID6 (0xFD8U) -+#define PERIPHERAL_ID7 (0xFDCU) -+ -+#define COMPONENT_ID0 (0xFF0U) -+#define COMPONENT_ID1 (0xFF4U) -+#define COMPONENT_ID2 (0xFF8U) -+#define COMPONENT_ID3 (0xFFCU) -+#define COMPONENT_ID4 (0x1000U) -+#define COMPONENT_ID5 (0x1004U) -+#define COMPONENT_ID6 (0x1008U) -+#define COMPONENT_ID7 (0x100CU) -+ -+/* Slave interface register offsets */ -+#define SNOOP_CTRL_REG (0x0U) -+#define SH_OVERRIDE_REG (0x4U) -+#define READ_CHNL_QOS_VAL_OVERRIDE_REG (0x100U) -+#define WRITE_CHNL_QOS_VAL_OVERRIDE_REG (0x104U) -+#define MAX_OT_REG (0x110U) -+ -+/* Snoop Control register bit definitions */ -+#define DVM_EN_BIT (1U<<1) -+#define SNOOP_EN_BIT (1U<<0) -+#define SUPPORT_SNOOPS (1U<<30) -+#define SUPPORT_DVM (1U<<31) -+ -+/* Status register bit definitions */ -+#define CHANGE_PENDING_BIT (1U<<0) -+ -+/* Event and count register offsets */ -+#define EVENT_SELECT_REG (0x0U) -+#define EVENT_COUNT_REG (0x4U) -+#define COUNT_CNTRL_REG (0x8U) -+#define COUNT_OVERFLOW_REG (0xCU) -+ -+/* Slave interface monitor registers */ -+#define INT_MON_REG_SI0 (0x90000U) -+#define INT_MON_REG_SI1 (0x90004U) -+#define INT_MON_REG_SI2 (0x90008U) -+#define INT_MON_REG_SI3 (0x9000CU) -+#define INT_MON_REG_SI4 (0x90010U) -+#define INT_MON_REG_SI5 (0x90014U) -+#define INT_MON_REG_SI6 (0x90018U) -+ -+/* Master interface monitor registers */ -+#define INT_MON_REG_MI0 (0x90100U) -+#define INT_MON_REG_MI1 (0x90104U) -+#define INT_MON_REG_MI2 (0x90108U) -+#define INT_MON_REG_MI3 (0x9010cU) -+#define INT_MON_REG_MI4 (0x90110U) -+#define INT_MON_REG_MI5 (0x90114U) -+ -+#define SLAVE_IF_UNUSED (-1) -+ -+#define MAKE_CCI_PART_NUMBER(hi, lo) (((hi) << 8) | (lo)) -+#define CCI_PART_LO_MASK (0xffU) -+#define CCI_PART_HI_MASK (0xfU) -+ -+/* CCI part number codes read from Peripheral ID registers 0 and 1 */ -+#define CCI400_PART_NUM (0x420) -+#define CCI500_PART_NUM (0x422) -+#define CCI550_PART_NUM (0x423) -+ -+#define CCI400_SLAVE_PORTS (5) -+#define CCI500_SLAVE_PORTS (7) -+#define CCI550_SLAVE_PORTS (7) -+ -+static void *cci_base; -+static const int *cci_slave_if_map; -+ -+ -+void cci_init(u32 base, const int *map, unsigned int num_cci_masters) -+{ -+ cci_base = (void *)(u64)base; -+ cci_slave_if_map = map; -+} -+ -+void cci_enable_snoop_dvm_reqs(unsigned int master_id) -+{ -+ int slave_if_id = cci_slave_if_map[master_id]; -+ -+ /* -+ * Enable Snoops and DVM messages, no need for Read/Modify/Write as -+ * rest of bits are write ignore -+ */ -+ writel(DVM_EN_BIT | SNOOP_EN_BIT, cci_base + -+ SLAVE_IFACE_OFFSET(slave_if_id) + SNOOP_CTRL_REG); -+ -+ /* -+ * Wait for the completion of the write to the Snoop Control Register -+ * before testing the change_pending bit -+ */ -+ mb(); -+ -+ /* Wait for the dust to settle down */ -+ while ((readl(cci_base + STATUS_REG) & CHANGE_PENDING_BIT) != 0U) -+ ; -+} -+ -+void cci_disable_snoop_dvm_reqs(unsigned int master_id) -+{ -+ int slave_if_id = cci_slave_if_map[master_id]; -+ -+ /* -+ * Disable Snoops and DVM messages, no need for Read/Modify/Write as -+ * rest of bits are write ignore. -+ */ -+ writel(~(DVM_EN_BIT | SNOOP_EN_BIT), cci_base + -+ SLAVE_IFACE_OFFSET(slave_if_id) + SNOOP_CTRL_REG); -+ -+ /* -+ * Wait for the completion of the write to the Snoop Control Register -+ * before testing the change_pending bit -+ */ -+ mb(); -+ -+ /* Wait for the dust to settle down */ -+ while ((readl(cci_base + STATUS_REG) & CHANGE_PENDING_BIT) != 0U) -+ ; -+} -+ -diff --git a/lib/utils/cci/objects.mk b/lib/utils/cci/objects.mk -new file mode 100644 -index 000000000000..08aac675e6da ---- /dev/null -+++ b/lib/utils/cci/objects.mk -@@ -0,0 +1,7 @@ -+# -+# SPDX-License-Identifier: BSD-2-Clause -+# -+# Copyright (C) 2020 Bin Meng -+# -+ -+libsbiutils-objs-y += cci/bus-cci.o -diff --git a/lib/utils/ipi/aclint_mswi.c b/lib/utils/ipi/aclint_mswi.c -index f47b3bcbbb44..63420302b1d7 100644 ---- a/lib/utils/ipi/aclint_mswi.c -+++ b/lib/utils/ipi/aclint_mswi.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - - static unsigned long mswi_ptr_offset; -@@ -84,6 +85,7 @@ int aclint_mswi_cold_init(struct aclint_mswi_data *mswi) - struct sbi_scratch *scratch; - unsigned long pos, region_size; - struct sbi_domain_memregion reg; -+ const struct sbi_platform *sbi = sbi_platform_thishart_ptr(); - - /* Sanity checks */ - if (!mswi || (mswi->addr & (ACLINT_MSWI_ALIGN - 1)) || -@@ -100,7 +102,7 @@ int aclint_mswi_cold_init(struct aclint_mswi_data *mswi) - - /* Update MSWI pointer in scratch space */ - for (i = 0; i < mswi->hart_count; i++) { -- scratch = sbi_hartid_to_scratch(mswi->first_hartid + i); -+ scratch = sbi_hartid_to_scratch(sbi->hart_index2id[i]); - if (!scratch) - return SBI_ENOENT; - mswi_set_hart_data_ptr(scratch, mswi); -diff --git a/lib/utils/irqchip/fdt_irqchip_plic.c b/lib/utils/irqchip/fdt_irqchip_plic.c -index 829c5ee20341..0a2d61b0beca 100644 ---- a/lib/utils/irqchip/fdt_irqchip_plic.c -+++ b/lib/utils/irqchip/fdt_irqchip_plic.c -@@ -85,6 +85,11 @@ static int irqchip_plic_warm_init(void) - plic_get_hart_scontext(scratch)); - } - -+void fdt_plic_context_exit(void) -+{ -+ irqchip_plic_warm_init(); -+} -+ - static int irqchip_plic_update_hartid_table(void *fdt, int nodeoff, - struct plic_data *pd) - { -diff --git a/lib/utils/psci/Kconfig b/lib/utils/psci/Kconfig -new file mode 100644 -index 000000000000..a009597ba053 ---- /dev/null -+++ b/lib/utils/psci/Kconfig -@@ -0,0 +1,21 @@ -+# SPDX-License-Identifier: BSD-2-Clause -+ -+menu "ARM's power management framework Support" -+ -+config ARM_PSCI_SUPPORT -+ bool "Support psci protocol" -+ default n -+ -+if ARM_PSCI_SUPPORT -+ -+config ARM_SCMI_PROTOCOL_SUPPORT -+ bool "Using r-core and arm's scmi protocol to dealing with the pwr management" -+ default n -+ -+config ARM_NON_SCMI_SUPPORT -+ bool "dealing with the pwr management in Machine mode-opensbi" -+ default n -+ -+endif -+ -+endmenu -diff --git a/lib/utils/psci/objects.mk b/lib/utils/psci/objects.mk -new file mode 100644 -index 000000000000..617068038db8 ---- /dev/null -+++ b/lib/utils/psci/objects.mk -@@ -0,0 +1,30 @@ -+# -+# SPDX-License-Identifier: BSD-2-Clause -+# -+# Copyright (c) 2020 Western Digital Corporation or its affiliates. -+# -+# Authors: -+# Anup Patel -+# -+ -+libsbiutils-objs-$(CONFIG_ARM_PSCI_SUPPORT) += psci/psci_common.o -+ -+libsbiutils-objs-$(CONFIG_ARM_PSCI_SUPPORT) += psci/psci_setup.o -+ -+libsbiutils-objs-$(CONFIG_ARM_PSCI_SUPPORT) += psci/psci_main.o -+ -+libsbiutils-objs-$(CONFIG_ARM_PSCI_SUPPORT) += psci/psci_on.o -+ -+libsbiutils-objs-$(CONFIG_ARM_PSCI_SUPPORT) += psci/psci_off.o -+ -+libsbiutils-objs-${CONFIG_ARM_PSCI_SUPPORT} += psci/psci_suspend.o -+ -+libsbiutils-objs-$(CONFIG_ARM_PSCI_SUPPORT) += psci/spacemit/spacemit_topology.o -+ -+ifeq ($(CONFIG_ARM_NON_SCMI_SUPPORT), y) -+# common -+libsbiutils-objs-$(CONFIG_ARM_NON_SCMI_SUPPORT) += psci/spacemit/plat/plat_pm.o -+ -+# platform -+libsbiutils-objs-$(CONFIG_PLATFORM_SPACEMIT_K1X) += psci/spacemit/plat/k1x/underly_implement.o -+endif -diff --git a/lib/utils/psci/psci_common.c b/lib/utils/psci/psci_common.c -new file mode 100644 -index 000000000000..f4b4bee03ec4 ---- /dev/null -+++ b/lib/utils/psci/psci_common.c -@@ -0,0 +1,872 @@ -+/* -+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "psci_private.h" -+ -+/* -+ * PSCI requested local power state map. This array is used to store the local -+ * power states requested by a CPU for power levels from level 1 to -+ * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power -+ * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a -+ * CPU are the same. -+ * -+ * During state coordination, the platform is passed an array containing the -+ * local states requested for a particular non cpu power domain by each cpu -+ * within the domain. -+ * -+ * TODO: Dense packing of the requested states will cause cache thrashing -+ * when multiple power domains write to it. If we allocate the requested -+ * states at each power level in a cache-line aligned per-domain memory, -+ * the cache thrashing can be avoided. -+ */ -+static plat_local_state_t -+ /* psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT] */ -+ psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][CACHE_LINE_SIZE] __attribute__((aligned(CACHE_LINE_SIZE))); -+ -+unsigned int psci_plat_core_count; -+ -+unsigned long psci_delta_off; -+ -+/******************************************************************************* -+ * Arrays that hold the platform's power domain tree information for state -+ * management of power domains. -+ * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain -+ * which is an ancestor of a CPU power domain. -+ * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain -+ ******************************************************************************/ -+non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]; -+ -+/* Lock for PSCI state coordination */ -+DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); -+ -+cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; -+ -+/******************************************************************************* -+ * Pointer to functions exported by the platform to complete power mgmt. ops -+ ******************************************************************************/ -+const plat_psci_ops_t *psci_plat_pm_ops; -+ -+/* -+ * The plat_local_state used by the platform is one of these types: RUN, -+ * RETENTION and OFF. The platform can define further sub-states for each type -+ * apart from RUN. This categorization is done to verify the sanity of the -+ * psci_power_state passed by the platform and to print debug information. The -+ * categorization is done on the basis of the following conditions: -+ * -+ * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN. -+ * -+ * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is -+ * STATE_TYPE_RETN. -+ * -+ * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is -+ * STATE_TYPE_OFF. -+ */ -+typedef enum plat_local_state_type { -+ STATE_TYPE_RUN = 0, -+ STATE_TYPE_RETN, -+ STATE_TYPE_OFF -+} plat_local_state_type_t; -+ -+/* Function used to categorize plat_local_state. */ -+plat_local_state_type_t find_local_state_type(plat_local_state_t state) -+{ -+ if (state != 0U) { -+ if (state > PLAT_MAX_RET_STATE) { -+ return STATE_TYPE_OFF; -+ } else { -+ return STATE_TYPE_RETN; -+ } -+ } else { -+ return STATE_TYPE_RUN; -+ } -+} -+ -+/******************************************************************************* -+ * PSCI helper function to get the parent nodes corresponding to a cpu_index. -+ ******************************************************************************/ -+void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, -+ unsigned int end_lvl, -+ unsigned int *node_index) -+{ -+ unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node; -+ unsigned int i; -+ unsigned int *node = node_index; -+ -+ for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) { -+ *node = parent_node; -+ node++; -+ parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node; -+ } -+} -+ -+/****************************************************************************** -+ * This function initializes the psci_req_local_pwr_states. -+ *****************************************************************************/ -+void psci_init_req_local_pwr_states(void) -+{ -+ /* Initialize the requested state of all non CPU power domains as OFF */ -+ unsigned int pwrlvl; -+ unsigned int core; -+ -+ for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) { -+ for (core = 0; core < psci_plat_core_count; core++) { -+ psci_req_local_pwr_states[pwrlvl][core] = -+ PLAT_MAX_OFF_STATE; -+ } -+ csi_dcache_clean_invalid_range( -+ (uintptr_t) psci_req_local_pwr_states[pwrlvl], -+ CACHE_LINE_SIZE); -+ } -+} -+ -+void set_non_cpu_pd_node_local_state(unsigned int parent_idx, -+ plat_local_state_t state) -+{ -+ psci_non_cpu_pd_nodes[parent_idx].local_state = state; -+ csi_dcache_clean_invalid_range( -+ (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], -+ sizeof(psci_non_cpu_pd_nodes[parent_idx])); -+} -+ -+/****************************************************************************** -+ * Helper function to update the requested local power state array. This array -+ * does not store the requested state for the CPU power level. Hence an -+ * assertion is added to prevent us from accessing the CPU power level. -+ *****************************************************************************/ -+void psci_set_req_local_pwr_state(unsigned int pwrlvl, -+ unsigned int cpu_idx, -+ plat_local_state_t req_pwr_state) -+{ -+ if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) && -+ (cpu_idx < psci_plat_core_count)) { -+ psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state; -+ csi_dcache_clean_invalid_range( -+ (uintptr_t) psci_req_local_pwr_states[pwrlvl - 1U], -+ CACHE_LINE_SIZE); -+ } -+} -+ -+/****************************************************************************** -+ * Helper function to set the target local power state that each power domain -+ * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will -+ * enter. This function will be called after coordination of requested power -+ * states has been done for each power level. -+ *****************************************************************************/ -+static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl, -+ const psci_power_state_t *target_state) -+{ -+ unsigned int parent_idx, lvl; -+ psci_cpu_data_t *svc_cpu_data; -+ const plat_local_state_t *pd_state = target_state->pwr_domain_state; -+ unsigned int hartid = current_hartid(); -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]); -+ -+ /* -+ * Need to flush as local_state might be accessed with Data Cache -+ * disabled during power on -+ */ -+ csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->local_state, sizeof(plat_local_state_t)); -+ -+ parent_idx = psci_cpu_pd_nodes[plat_core_pos_by_mpidr(hartid)].parent_node; -+ -+ /* Copy the local_state from state_info */ -+ for (lvl = 1U; lvl <= end_pwrlvl; lvl++) { -+ set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]); -+ parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; -+ } -+} -+ -+/****************************************************************************** -+ * Helper function to return a reference to an array containing the local power -+ * states requested by each cpu for a power domain at 'pwrlvl'. The size of the -+ * array will be the number of cpu power domains of which this power domain is -+ * an ancestor. These requested states will be used to determine a suitable -+ * target state for this power domain during psci state coordination. An -+ * assertion is added to prevent us from accessing the CPU power level. -+ *****************************************************************************/ -+static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl, -+ unsigned int cpu_idx) -+{ -+ if (pwrlvl <= PSCI_CPU_PWR_LVL) { -+ sbi_printf("%s:%d, err\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) && -+ (cpu_idx < psci_plat_core_count)) { -+ return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx]; -+ } else -+ return NULL; -+} -+ -+/* -+ * Helper functions to get/set the fields of PSCI per-cpu data. -+ */ -+void psci_set_aff_info_state(aff_info_state_t aff_state) -+{ -+ psci_cpu_data_t *svc_cpu_data; -+ unsigned int hartid = current_hartid(); -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ svc_cpu_data->aff_info_state = aff_state; -+} -+ -+aff_info_state_t psci_get_aff_info_state(void) -+{ -+ psci_cpu_data_t *svc_cpu_data; -+ unsigned int hartid = current_hartid(); -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ return svc_cpu_data->aff_info_state; -+} -+ -+aff_info_state_t psci_get_aff_info_state_by_idx(unsigned int idx) -+{ -+ psci_cpu_data_t *svc_cpu_data; -+ const struct sbi_platform *sbi = sbi_platform_thishart_ptr(); -+ unsigned int hartid = sbi->hart_index2id[idx]; -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ return svc_cpu_data->aff_info_state; -+} -+ -+void psci_set_aff_info_state_by_idx(unsigned int idx, -+ aff_info_state_t aff_state) -+{ -+ psci_cpu_data_t *svc_cpu_data; -+ const struct sbi_platform *sbi = sbi_platform_thishart_ptr(); -+ unsigned int hartid = sbi->hart_index2id[idx]; -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ svc_cpu_data->aff_info_state = aff_state; -+} -+ -+void psci_set_cpu_local_state(plat_local_state_t state) -+{ -+ psci_cpu_data_t *svc_cpu_data; -+ unsigned int hartid = current_hartid(); -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ svc_cpu_data->local_state = state; -+} -+ -+void psci_set_suspend_pwrlvl(unsigned int target_lvl) -+{ -+ psci_cpu_data_t *svc_cpu_data; -+ unsigned int hartid = current_hartid(); -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ svc_cpu_data->target_pwrlvl = target_lvl; -+} -+ -+static inline plat_local_state_t psci_get_cpu_local_state_by_idx( -+ unsigned int idx) -+{ -+ psci_cpu_data_t *svc_cpu_data; -+ const struct sbi_platform *sbi = sbi_platform_thishart_ptr(); -+ unsigned int hartid = sbi->hart_index2id[idx]; -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ return svc_cpu_data->local_state; -+} -+ -+static inline plat_local_state_t psci_get_cpu_local_state(void) -+{ -+ psci_cpu_data_t *svc_cpu_data; -+ unsigned int hartid = current_hartid(); -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ return svc_cpu_data->local_state; -+} -+ -+/****************************************************************************** -+ * This function is invoked post CPU power up and initialization. It sets the -+ * affinity info state, target power state and requested power state for the -+ * current CPU and all its ancestor power domains to RUN. -+ *****************************************************************************/ -+void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl) -+{ -+ unsigned int parent_idx, lvl; -+ unsigned int cpu_idx; -+ psci_cpu_data_t *svc_cpu_data; -+ unsigned int hartid = current_hartid(); -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ cpu_idx = plat_core_pos_by_mpidr(hartid); -+ -+ parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; -+ -+ /* Reset the local_state to RUN for the non cpu power domains. */ -+ for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { -+ set_non_cpu_pd_node_local_state(parent_idx, -+ PSCI_LOCAL_STATE_RUN); -+ psci_set_req_local_pwr_state(lvl, -+ cpu_idx, -+ PSCI_LOCAL_STATE_RUN); -+ parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; -+ } -+ -+ /* Set the affinity info state to ON */ -+ psci_set_aff_info_state(AFF_STATE_ON); -+ -+ psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); -+ -+ csi_dcache_clean_invalid_range((uintptr_t)svc_cpu_data, sizeof(psci_cpu_data_t)); -+} -+ -+/******************************************************************************* -+ * This function prints the state of all power domains present in the -+ * system -+ ******************************************************************************/ -+void psci_print_power_domain_map(void) -+{ -+ unsigned int idx; -+ plat_local_state_t state; -+ plat_local_state_type_t state_type; -+ -+ /* This array maps to the PSCI_STATE_X definitions in psci.h */ -+ static const char * const psci_state_type_str[] = { -+ "ON", -+ "RETENTION", -+ "OFF", -+ }; -+ -+ sbi_printf("PSCI Power Domain Map:\n"); -+ for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - psci_plat_core_count); -+ idx++) { -+ state_type = find_local_state_type( -+ psci_non_cpu_pd_nodes[idx].local_state); -+ sbi_printf(" Domain Node : Level %u, parent_node %u," -+ " State %s (0x%x)\n", -+ psci_non_cpu_pd_nodes[idx].level, -+ psci_non_cpu_pd_nodes[idx].parent_node, -+ psci_state_type_str[state_type], -+ psci_non_cpu_pd_nodes[idx].local_state); -+ } -+ -+ for (idx = 0; idx < psci_plat_core_count; idx++) { -+ state = psci_get_cpu_local_state_by_idx(idx); -+ state_type = find_local_state_type(state); -+ sbi_printf(" CPU Node : MPID 0x%llx, parent_node %u," -+ " State %s (0x%x)\n", -+ (unsigned long long)psci_cpu_pd_nodes[idx].mpidr, -+ psci_cpu_pd_nodes[idx].parent_node, -+ psci_state_type_str[state_type], -+ psci_get_cpu_local_state_by_idx(idx)); -+ } -+} -+ -+/******************************************************************************* -+ * Simple routine to determine whether a mpidr is valid or not. -+ ******************************************************************************/ -+int psci_validate_mpidr(u_register_t mpidr) -+{ -+ if (plat_core_pos_by_mpidr(mpidr) < 0) -+ return PSCI_E_INVALID_PARAMS; -+ -+ return PSCI_E_SUCCESS; -+} -+ -+static unsigned int psci_get_suspend_pwrlvl(void) -+{ -+ psci_cpu_data_t *svc_cpu_data; -+ unsigned int hartid = current_hartid(); -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ return svc_cpu_data->target_pwrlvl; -+} -+ -+/******************************************************************************* -+ * Routine to return the maximum power level to traverse to after a cpu has -+ * been physically powered up. It is expected to be called immediately after -+ * reset from assembler code. -+ ******************************************************************************/ -+static unsigned int get_power_on_target_pwrlvl(void) -+{ -+ unsigned int pwrlvl; -+ -+ /* -+ * Assume that this cpu was suspended and retrieve its target power -+ * level. If it is invalid then it could only have been turned off -+ * earlier. PLAT_MAX_PWR_LVL will be the highest power level a -+ * cpu can be turned off to. -+ */ -+ pwrlvl = psci_get_suspend_pwrlvl(); -+ if (pwrlvl == PSCI_INVALID_PWR_LVL) -+ pwrlvl = PLAT_MAX_PWR_LVL; -+ if (pwrlvl >= PSCI_INVALID_PWR_LVL) { -+ sbi_printf("%s:%d,\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ return pwrlvl; -+} -+ -+/******************************************************************************* -+ * This function is passed the highest level in the topology tree that the -+ * operation should be applied to and a list of node indexes. It picks up locks -+ * from the node index list in order of increasing power domain level in the -+ * range specified. -+ ******************************************************************************/ -+void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, -+ const unsigned int *parent_nodes) -+{ -+ unsigned int parent_idx; -+ unsigned int level; -+ -+ /* No locking required for level 0. Hence start locking from level 1 */ -+ for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) { -+ parent_idx = parent_nodes[level - 1U]; -+ psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]); -+ } -+} -+ -+/******************************************************************************* -+ * This function is passed the highest level in the topology tree that the -+ * operation should be applied to and a list of node indexes. It releases the -+ * locks in order of decreasing power domain level in the range specified. -+ ******************************************************************************/ -+void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, -+ const unsigned int *parent_nodes) -+{ -+ unsigned int parent_idx; -+ unsigned int level; -+ -+ /* Unlock top down. No unlocking required for level 0. */ -+ for (level = end_pwrlvl; level >= (PSCI_CPU_PWR_LVL + 1U); level--) { -+ parent_idx = parent_nodes[level - 1U]; -+ psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]); -+ } -+} -+ -+/****************************************************************************** -+ * This function finds the highest power level which will be powered down -+ * amongst all the power levels specified in the 'state_info' structure -+ *****************************************************************************/ -+unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info) -+{ -+ int i; -+ -+ for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) { -+ if (is_local_state_off(state_info->pwr_domain_state[i]) != 0) -+ return (unsigned int) i; -+ } -+ -+ return PSCI_INVALID_PWR_LVL; -+} -+ -+/* -+ * The PSCI generic code uses this API to let the platform participate in state -+ * coordination during a power management operation. It compares the platform -+ * specific local power states requested by each cpu for a given power domain -+ * and returns the coordinated target power state that the domain should -+ * enter. A platform assigns a number to a local power state. This default -+ * implementation assumes that the platform assigns these numbers in order of -+ * increasing depth of the power state i.e. for two power states X & Y, if X < Y -+ * then X represents a shallower power state than Y. As a result, the -+ * coordinated target local power state for a power domain will be the minimum -+ * of the requested local power states. -+ */ -+plat_local_state_t plat_get_target_pwr_state(unsigned int lvl, -+ const plat_local_state_t *states, -+ unsigned int ncpu) -+{ -+ plat_local_state_t target = PLAT_MAX_OFF_STATE, temp; -+ const plat_local_state_t *st = states; -+ unsigned int n = ncpu; -+ -+ if (ncpu <= 0U) { -+ sbi_printf("%s:%d, err\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ do { -+ temp = *st; -+ st++; -+ if (temp < target) -+ target = temp; -+ n--; -+ } while (n > 0U); -+ -+ return target; -+} -+ -+/* -+ * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent -+ * memory. -+ * -+ * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory, -+ * it's accessed by both cached and non-cached participants. To serve the common -+ * minimum, perform a cache flush before read and after write so that non-cached -+ * participants operate on latest data in main memory. -+ * -+ * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent -+ * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent. -+ * In both cases, no cache operations are required. -+ */ -+ -+/* -+ * Retrieve local state of non-CPU power domain node from a non-cached CPU, -+ * after any required cache maintenance operation. -+ */ -+static plat_local_state_t get_non_cpu_pd_node_local_state( -+ unsigned int parent_idx) -+{ -+ return psci_non_cpu_pd_nodes[parent_idx].local_state; -+} -+ -+/****************************************************************************** -+ * Helper function to return the current local power state of each power domain -+ * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This -+ * function will be called after a cpu is powered on to find the local state -+ * each power domain has emerged from. -+ *****************************************************************************/ -+void psci_get_target_local_pwr_states(unsigned int end_pwrlvl, -+ psci_power_state_t *target_state) -+{ -+ unsigned int parent_idx, lvl, cpu_idx; -+ plat_local_state_t *pd_state = target_state->pwr_domain_state; -+ unsigned int hartid = current_hartid(); -+ -+ cpu_idx = plat_core_pos_by_mpidr(hartid); -+ pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state(); -+ parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; -+ -+ /* Copy the local power state from node to state_info */ -+ for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { -+ pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx); -+ parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; -+ } -+ -+ /* Set the the higher levels to RUN */ -+ for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) -+ target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; -+} -+ -+/******************************************************************************* -+ * Generic handler which is called when a cpu is physically powered on. It -+ * traverses the node information and finds the highest power level powered -+ * off and performs generic, architectural, platform setup and state management -+ * to power on that power level and power levels below it. -+ * e.g. For a cpu that's been powered on, it will call the platform specific -+ * code to enable the gic cpu interface and for a cluster it will enable -+ * coherency at the interconnect level in addition to gic cpu interface. -+ ******************************************************************************/ -+void psci_warmboot_entrypoint(void) -+{ -+ unsigned int end_pwrlvl; -+ unsigned int cpu_idx; -+ unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; -+ psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; -+ unsigned int hartid = current_hartid(); -+ -+ cpu_idx = plat_core_pos_by_mpidr(hartid); -+ -+ /* if we resumed directly from CPU-non-ret because of the wakeup source in suspending process */ -+ if (psci_get_cpu_local_state() == PSCI_LOCAL_STATE_RUN) { -+ /* sbi_printf("%s:%d\n", __func__, __LINE__); */ -+ return; -+ } -+ -+ /* -+ * Verify that we have been explicitly turned ON or resumed from -+ * suspend. -+ */ -+ if (psci_get_aff_info_state() == AFF_STATE_OFF) { -+ sbi_printf("Unexpected affinity info state.\n"); -+ sbi_hart_hang(); -+ } -+ -+ /* -+ * Get the maximum power domain level to traverse to after this cpu -+ * has been physically powered up. -+ */ -+ end_pwrlvl = get_power_on_target_pwrlvl(); -+ -+ /* Get the parent nodes */ -+ psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); -+ -+ /* -+ * This function acquires the lock corresponding to each power level so -+ * that by the time all locks are taken, the system topology is snapshot -+ * and state management can be done safely. -+ */ -+ psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes); -+ -+ psci_get_target_local_pwr_states(end_pwrlvl, &state_info); -+ -+#if ENABLE_PSCI_STAT -+ plat_psci_stat_accounting_stop(&state_info); -+#endif -+ -+ /* -+ * This CPU could be resuming from suspend or it could have just been -+ * turned on. To distinguish between these 2 cases, we examine the -+ * affinity state of the CPU: -+ * - If the affinity state is ON_PENDING then it has just been -+ * turned on. -+ * - Else it is resuming from suspend. -+ * -+ * Depending on the type of warm reset identified, choose the right set -+ * of power management handler and perform the generic, architecture -+ * and platform specific handling. -+ */ -+ if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) -+ psci_cpu_on_finish(cpu_idx, &state_info); -+ else -+ psci_cpu_suspend_finish(cpu_idx, &state_info); -+ -+ /* -+ * Set the requested and target state of this CPU and all the higher -+ * power domains which are ancestors of this CPU to run. -+ */ -+ psci_set_pwr_domains_to_run(end_pwrlvl); -+ -+#if ENABLE_PSCI_STAT -+ /* -+ * Update PSCI stats. -+ * Caches are off when writing stats data on the power down path. -+ * Since caches are now enabled, it's necessary to do cache -+ * maintenance before reading that same data. -+ */ -+ psci_stats_update_pwr_up(end_pwrlvl, &state_info); -+#endif -+ -+ /* -+ * This loop releases the lock corresponding to each power level -+ * in the reverse order to which they were acquired. -+ */ -+ psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes); -+} -+ -+/****************************************************************************** -+ * This function is used in platform-coordinated mode. -+ * -+ * This function is passed the local power states requested for each power -+ * domain (state_info) between the current CPU domain and its ancestors until -+ * the target power level (end_pwrlvl). It updates the array of requested power -+ * states with this information. -+ * -+ * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it -+ * retrieves the states requested by all the cpus of which the power domain at -+ * that level is an ancestor. It passes this information to the platform to -+ * coordinate and return the target power state. If the target state for a level -+ * is RUN then subsequent levels are not considered. At the CPU level, state -+ * coordination is not required. Hence, the requested and the target states are -+ * the same. -+ * -+ * The 'state_info' is updated with the target state for each level between the -+ * CPU and the 'end_pwrlvl' and returned to the caller. -+ * -+ * This function will only be invoked with data cache enabled and while -+ * powering down a core. -+ *****************************************************************************/ -+void psci_do_state_coordination(unsigned int end_pwrlvl, -+ psci_power_state_t *state_info) -+{ -+ unsigned int lvl, parent_idx; -+ unsigned int start_idx; -+ unsigned int ncpus; -+ plat_local_state_t target_state, *req_states; -+ unsigned int hartid = current_hartid(); -+ unsigned int cpu_idx = plat_core_pos_by_mpidr(hartid);; -+ -+ if (end_pwrlvl > PLAT_MAX_PWR_LVL) { -+ sbi_printf("%s:%d, err\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; -+ -+ /* For level 0, the requested state will be equivalent -+ to target state */ -+ for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) { -+ -+ /* First update the requested power state */ -+ psci_set_req_local_pwr_state(lvl, cpu_idx, -+ state_info->pwr_domain_state[lvl]); -+ -+ /* Get the requested power states for this power level */ -+ start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; -+ req_states = psci_get_req_local_pwr_states(lvl, start_idx); -+ -+ /* -+ * Let the platform coordinate amongst the requested states at -+ * this power level and return the target local power state. -+ */ -+ ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; -+ target_state = plat_get_target_pwr_state(lvl, -+ req_states, -+ ncpus); -+ -+ state_info->pwr_domain_state[lvl] = target_state; -+ -+ /* Break early if the negotiated target power state is RUN */ -+ if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0) -+ break; -+ -+ parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; -+ } -+ -+ /* -+ * This is for cases when we break out of the above loop early because -+ * the target power state is RUN at a power level < end_pwlvl. -+ * We update the requested power state from state_info and then -+ * set the target state as RUN. -+ */ -+ for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) { -+ psci_set_req_local_pwr_state(lvl, cpu_idx, -+ state_info->pwr_domain_state[lvl]); -+ state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; -+ -+ } -+ -+ /* Update the target state in the power domain nodes */ -+ psci_set_target_local_pwr_states(end_pwrlvl, state_info); -+} -+ -+/****************************************************************************** -+ * This function ensures that the power state parameter in a CPU_SUSPEND request -+ * is valid. If so, it returns the requested states for each power level. -+ *****************************************************************************/ -+int psci_validate_power_state(unsigned int power_state, -+ psci_power_state_t *state_info) -+{ -+ /* Check SBZ bits in power state are zero */ -+ if (psci_check_power_state(power_state) != 0U) -+ return PSCI_E_INVALID_PARAMS; -+ -+ if (psci_plat_pm_ops->validate_power_state == NULL) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* Validate the power_state using platform pm_ops */ -+ return psci_plat_pm_ops->validate_power_state(power_state, state_info); -+} -+ -+/****************************************************************************** -+ * This functions finds the level of the highest power domain which will be -+ * placed in a low power state during a suspend operation. -+ *****************************************************************************/ -+unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info) -+{ -+ int i; -+ -+ for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) { -+ if (is_local_state_run(state_info->pwr_domain_state[i]) == 0) -+ return (unsigned int) i; -+ } -+ -+ return PSCI_INVALID_PWR_LVL; -+} -+ -+/****************************************************************************** -+ * This function validates a suspend request by making sure that if a standby -+ * state is requested then no power level is turned off and the highest power -+ * level is placed in a standby/retention state. -+ * -+ * It also ensures that the state level X will enter is not shallower than the -+ * state level X + 1 will enter. -+ * -+ * This validation will be enabled only for DEBUG builds as the platform is -+ * expected to perform these validations as well. -+ *****************************************************************************/ -+int psci_validate_suspend_req(const psci_power_state_t *state_info, -+ unsigned int is_power_down_state) -+{ -+ unsigned int max_off_lvl, target_lvl, max_retn_lvl; -+ plat_local_state_t state; -+ plat_local_state_type_t req_state_type, deepest_state_type; -+ int i; -+ -+ /* Find the target suspend power level */ -+ target_lvl = psci_find_target_suspend_lvl(state_info); -+ if (target_lvl == PSCI_INVALID_PWR_LVL) -+ return PSCI_E_INVALID_PARAMS; -+ -+ /* All power domain levels are in a RUN state to begin with */ -+ deepest_state_type = STATE_TYPE_RUN; -+ -+ for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) { -+ state = state_info->pwr_domain_state[i]; -+ req_state_type = find_local_state_type(state); -+ -+ /* -+ * While traversing from the highest power level to the lowest, -+ * the state requested for lower levels has to be the same or -+ * deeper i.e. equal to or greater than the state at the higher -+ * levels. If this condition is true, then the requested state -+ * becomes the deepest state encountered so far. -+ */ -+ if (req_state_type < deepest_state_type) -+ return PSCI_E_INVALID_PARAMS; -+ deepest_state_type = req_state_type; -+ } -+ -+ /* Find the highest off power level */ -+ max_off_lvl = psci_find_max_off_lvl(state_info); -+ -+ /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */ -+ max_retn_lvl = PSCI_INVALID_PWR_LVL; -+ if (target_lvl != max_off_lvl) -+ max_retn_lvl = target_lvl; -+ -+ /* -+ * If this is not a request for a power down state then max off level -+ * has to be invalid and max retention level has to be a valid power -+ * level. -+ */ -+ if ((is_power_down_state == 0U) && -+ ((max_off_lvl != PSCI_INVALID_PWR_LVL) || -+ (max_retn_lvl == PSCI_INVALID_PWR_LVL))) -+ return PSCI_E_INVALID_PARAMS; -+ -+ return PSCI_E_SUCCESS; -+} -+ -+void riscv_pwr_state_to_psci(unsigned int rstate, unsigned int *pstate) -+{ -+ *pstate = 0; -+ -+ /* suspend ? */ -+ if (rstate & (1 << RSTATE_TYPE_SHIFT)) -+ *pstate |= (1 << PSTATE_TYPE_SHIFT); -+ -+ /* cluster ? */ -+ if (rstate & (PSTATE_PWR_LVL_MASK << RSTATE_PWR_LVL_SHIFT)) -+ *pstate |= (rstate & (PSTATE_PWR_LVL_MASK << RSTATE_PWR_LVL_SHIFT)); -+} -diff --git a/lib/utils/psci/psci_main.c b/lib/utils/psci/psci_main.c -new file mode 100644 -index 000000000000..f2441f57e16e ---- /dev/null -+++ b/lib/utils/psci/psci_main.c -@@ -0,0 +1,188 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+#include "psci_private.h" -+ -+/******************************************************************************* -+ * PSCI frontend api for servicing SMCs. Described in the PSCI spec. -+ ******************************************************************************/ -+int psci_cpu_on(u_register_t target_cpu, -+ uintptr_t entrypoint) -+ -+{ -+ int rc; -+ -+ /* Determine if the cpu exists of not */ -+ rc = psci_validate_mpidr(target_cpu); -+ if (rc != PSCI_E_SUCCESS) -+ return PSCI_E_INVALID_PARAMS; -+ -+ /* -+ * To turn this cpu on, specify which power -+ * levels need to be turned on -+ */ -+ return psci_cpu_on_start(target_cpu, entrypoint); -+} -+ -+int psci_affinity_info(u_register_t target_affinity, -+ unsigned int lowest_affinity_level) -+{ -+ int ret; -+ unsigned int target_idx; -+ psci_cpu_data_t *svc_cpu_data; -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(target_affinity); -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ /* We dont support level higher than PSCI_CPU_PWR_LVL */ -+ if (lowest_affinity_level > PSCI_CPU_PWR_LVL) -+ return PSCI_E_INVALID_PARAMS; -+ -+ /* Calculate the cpu index of the target */ -+ ret = plat_core_pos_by_mpidr(target_affinity); -+ if (ret == -1) { -+ return PSCI_E_INVALID_PARAMS; -+ } -+ target_idx = (unsigned int)ret; -+ -+ /* -+ * Generic management: -+ * Perform cache maintanence ahead of reading the target CPU state to -+ * ensure that the data is not stale. -+ * There is a theoretical edge case where the cache may contain stale -+ * data for the target CPU data - this can occur under the following -+ * conditions: -+ * - the target CPU is in another cluster from the current -+ * - the target CPU was the last CPU to shutdown on its cluster -+ * - the cluster was removed from coherency as part of the CPU shutdown -+ * -+ * In this case the cache maintenace that was performed as part of the -+ * target CPUs shutdown was not seen by the current CPU's cluster. And -+ * so the cache may contain stale data for the target CPU. -+ */ -+ csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t)); -+ -+ return psci_get_aff_info_state_by_idx(target_idx); -+} -+ -+int psci_cpu_off(void) -+{ -+ int rc; -+ unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL; -+ -+ /* -+ * Do what is needed to power off this CPU and possible higher power -+ * levels if it able to do so. Upon success, enter the final wfi -+ * which will power down this CPU. -+ */ -+ rc = psci_do_cpu_off(target_pwrlvl); -+ -+ /* -+ * The only error cpu_off can return is E_DENIED. So check if that's -+ * indeed the case. -+ */ -+ if (rc != PSCI_E_DENIED) { -+ sbi_printf("%s:%d, err\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ return rc; -+} -+ -+int psci_cpu_suspend(unsigned int power_state, -+ uintptr_t entrypoint, -+ u_register_t context_id) -+{ -+ int rc; -+ unsigned int target_pwrlvl, is_power_down_state, pwr_state; -+ /* entry_point_info_t ep; */ -+ psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; -+ plat_local_state_t cpu_pd_state; -+ -+ riscv_pwr_state_to_psci(power_state, &pwr_state); -+ -+ /* Validate the power_state parameter */ -+ rc = psci_validate_power_state(pwr_state, &state_info); -+ if (rc != PSCI_E_SUCCESS) { -+ if (rc != PSCI_E_INVALID_PARAMS) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ return rc; -+ } -+ -+ /* -+ * Get the value of the state type bit from the power state parameter. -+ */ -+ is_power_down_state = psci_get_pstate_type(pwr_state); -+ -+ /* Sanity check the requested suspend levels */ -+ if (psci_validate_suspend_req(&state_info, is_power_down_state) -+ != PSCI_E_SUCCESS) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ target_pwrlvl = psci_find_target_suspend_lvl(&state_info); -+ if (target_pwrlvl == PSCI_INVALID_PWR_LVL) { -+ sbi_printf("Invalid target power level for suspend operation\n"); -+ sbi_hart_hang(); -+ } -+ -+ /* Fast path for CPU standby.*/ -+ if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) { -+ if (psci_plat_pm_ops->cpu_standby == NULL) -+ return PSCI_E_INVALID_PARAMS; -+ -+ /* -+ * Set the state of the CPU power domain to the platform -+ * specific retention state and enter the standby state. -+ */ -+ cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL]; -+ psci_set_cpu_local_state(cpu_pd_state); -+ -+#if ENABLE_PSCI_STAT -+ plat_psci_stat_accounting_start(&state_info); -+#endif -+ -+ psci_plat_pm_ops->cpu_standby(cpu_pd_state); -+ -+ /* Upon exit from standby, set the state back to RUN. */ -+ psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); -+ -+#if ENABLE_PSCI_STAT -+ plat_psci_stat_accounting_stop(&state_info); -+ -+ /* Update PSCI stats */ -+ psci_stats_update_pwr_up(PSCI_CPU_PWR_LVL, &state_info); -+#endif -+ -+ return PSCI_E_SUCCESS; -+ } -+ -+ /* -+ * If a power down state has been requested, we need to verify entry -+ * point and program entry information. -+ */ -+ if (is_power_down_state != 0U) { -+ /* rc = psci_validate_entry_point(&ep, entrypoint, context_id); -+ if (rc != PSCI_E_SUCCESS) -+ return rc; */; -+ } -+ -+ /* -+ * Do what is needed to enter the power down state. Upon success, -+ * enter the final wfi which will power down this CPU. This function -+ * might return if the power down was abandoned for any reason, e.g. -+ * arrival of an interrupt -+ */ -+ rc = psci_cpu_suspend_start(/* &ep */entrypoint, -+ target_pwrlvl, -+ &state_info, -+ is_power_down_state); -+ -+ return rc; -+} -+ -diff --git a/lib/utils/psci/psci_off.c b/lib/utils/psci/psci_off.c -new file mode 100644 -index 000000000000..e8b5be18dedb ---- /dev/null -+++ b/lib/utils/psci/psci_off.c -@@ -0,0 +1,173 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "psci_private.h" -+ -+/****************************************************************************** -+ * Construct the psci_power_state to request power OFF at all power levels. -+ ******************************************************************************/ -+static void psci_set_power_off_state(psci_power_state_t *state_info) -+{ -+ unsigned int lvl; -+ -+ for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++) -+ state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE; -+} -+ -+/****************************************************************************** -+ * Top level handler which is called when a cpu wants to power itself down. -+ * It's assumed that along with turning the cpu power domain off, power -+ * domains at higher levels will be turned off as far as possible. It finds -+ * the highest level where a domain has to be powered off by traversing the -+ * node information and then performs generic, architectural, platform setup -+ * and state management required to turn OFF that power domain and domains -+ * below it. e.g. For a cpu that's to be powered OFF, it could mean programming -+ * the power controller whereas for a cluster that's to be powered off, it will -+ * call the platform specific code which will disable coherency at the -+ * interconnect level if the cpu is the last in the cluster and also the -+ * program the power controller. -+ ******************************************************************************/ -+int psci_do_cpu_off(unsigned int end_pwrlvl) -+{ -+ int rc = PSCI_E_SUCCESS; -+ unsigned int hartid = current_hartid(); -+ psci_cpu_data_t *svc_cpu_data; -+ unsigned int idx = plat_core_pos_by_mpidr(hartid);; -+ psci_power_state_t state_info; -+ unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ /* -+ * This function must only be called on platforms where the -+ * CPU_OFF platform hooks have been implemented. -+ */ -+ if (psci_plat_pm_ops->pwr_domain_off == NULL) { -+ sbi_printf("%s:%d, err\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* Construct the psci_power_state for CPU_OFF */ -+ psci_set_power_off_state(&state_info); -+ -+ /* -+ * Call the platform provided early CPU_OFF handler to allow -+ * platforms to perform any housekeeping activities before -+ * actually powering the CPU off. PSCI_E_DENIED indicates that -+ * the CPU off sequence should be aborted at this time. -+ */ -+ if (psci_plat_pm_ops->pwr_domain_off_early) { -+ rc = psci_plat_pm_ops->pwr_domain_off_early(&state_info); -+ if (rc == PSCI_E_DENIED) { -+ return rc; -+ } -+ } -+ -+ /* -+ * Get the parent nodes here, this is important to do before we -+ * initiate the power down sequence as after that point the core may -+ * have exited coherency and its cache may be disabled, any access to -+ * shared memory after that (such as the parent node lookup in -+ * psci_cpu_pd_nodes) can cause coherency issues on some platforms. -+ */ -+ psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes); -+ -+ /* -+ * This function acquires the lock corresponding to each power -+ * level so that by the time all locks are taken, the system topology -+ * is snapshot and state management can be done safely. -+ */ -+ psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes); -+ -+#if 0 -+ /* -+ * Call the cpu off handler registered by the Secure Payload Dispatcher -+ * to let it do any bookkeeping. Assume that the SPD always reports an -+ * E_DENIED error if SP refuse to power down -+ */ -+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_off != NULL)) { -+ rc = psci_spd_pm->svc_off(0); -+ if (rc != 0) -+ goto exit; -+ } -+#endif -+ -+ /* -+ * This function is passed the requested state info and -+ * it returns the negotiated state info for each power level upto -+ * the end level specified. -+ */ -+ psci_do_state_coordination(end_pwrlvl, &state_info); -+ -+#if ENABLE_PSCI_STAT -+ /* Update the last cpu for each level till end_pwrlvl */ -+ psci_stats_update_pwr_down(end_pwrlvl, &state_info); -+#endif -+ -+ /* -+ * Without hardware-assisted coherency, the CPU drivers disable data -+ * caches, then perform cache-maintenance operations in software. -+ * -+ * This also calls prepare_cpu_pwr_dwn() to initiate power down -+ * sequence, but that function will return with data caches disabled. -+ * We must ensure that the stack memory is flushed out to memory before -+ * we start popping from it again. -+ */ -+ psci_do_pwrdown_cache_maintenance(hartid, (uintptr_t)scratch, psci_find_max_off_lvl(&state_info)); -+ -+ /* -+ * Plat. management: Perform platform specific actions to turn this -+ * cpu off e.g. exit cpu coherency, program the power controller etc. -+ */ -+ psci_plat_pm_ops->pwr_domain_off(&state_info); -+ -+#if ENABLE_PSCI_STAT -+ plat_psci_stat_accounting_start(&state_info); -+#endif -+ -+#if 0 -+exit: -+#endif -+ /* -+ * Release the locks corresponding to each power level in the -+ * reverse order to which they were acquired. -+ */ -+ psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes); -+ -+ /* -+ * Check if all actions needed to safely power down this cpu have -+ * successfully completed. -+ */ -+ if (rc == PSCI_E_SUCCESS) { -+ /* -+ * Set the affinity info state to OFF. When caches are disabled, -+ * this writes directly to main memory, so cache maintenance is -+ * required to ensure that later cached reads of aff_info_state -+ * return AFF_STATE_OFF. A dsbish() ensures ordering of the -+ * update to the affinity info state prior to cache line -+ * invalidation. -+ */ -+ csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t)); -+ psci_set_aff_info_state(AFF_STATE_OFF); -+ /* psci_dsbish(); */ -+ asm volatile ("fence rw, rw"); -+ csi_dcache_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t)); -+ -+ if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL) { -+ /* This function must not return */ -+ psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info); -+ } else { -+ /* -+ * Enter a wfi loop which will allow the power -+ * controller to physically power down this cpu. -+ */ -+ //psci_power_down_wfi(); -+ } -+ } -+ -+ return rc; -+} -diff --git a/lib/utils/psci/psci_on.c b/lib/utils/psci/psci_on.c -new file mode 100644 -index 000000000000..2dd4ff0d50a7 ---- /dev/null -+++ b/lib/utils/psci/psci_on.c -@@ -0,0 +1,246 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "psci_private.h" -+ -+/* -+ * Helper functions for the CPU level spinlocks -+ */ -+static inline void psci_spin_lock_cpu(unsigned int idx) -+{ -+ spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock); -+} -+ -+static inline void psci_spin_unlock_cpu(unsigned int idx) -+{ -+ spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock); -+} -+ -+/******************************************************************************* -+ * This function checks whether a cpu which has been requested to be turned on -+ * is OFF to begin with. -+ ******************************************************************************/ -+static int cpu_on_validate_state(aff_info_state_t aff_state) -+{ -+ if (aff_state == AFF_STATE_ON) -+ return PSCI_E_ALREADY_ON; -+ -+ if (aff_state == AFF_STATE_ON_PENDING) -+ return PSCI_E_ON_PENDING; -+ -+ if (aff_state != AFF_STATE_OFF) { -+ sbi_printf("wrong aff state.\n"); -+ sbi_hart_hang(); -+ } -+ -+ return PSCI_E_SUCCESS; -+} -+ -+/******************************************************************************* -+ * Generic handler which is called to physically power on a cpu identified by -+ * its mpidr. It performs the generic, architectural, platform setup and state -+ * management to power on the target cpu e.g. it will ensure that -+ * enough information is stashed for it to resume execution in the non-secure -+ * security state. -+ * -+ * The state of all the relevant power domains are changed after calling the -+ * platform handler as it can return error. -+ ******************************************************************************/ -+int psci_cpu_on_start(u_register_t target, uintptr_t entrypoint) -+{ -+ int rc; -+ aff_info_state_t target_aff_state; -+ int ret = 0; -+ unsigned int target_idx; -+ psci_cpu_data_t *svc_cpu_data; -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(target); -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ ret = plat_core_pos_by_mpidr(target); -+ -+ if ((ret < 0) || (ret >= (int)PLATFORM_CORE_COUNT)) { -+ sbi_printf("Unexpected core index.\n"); -+ sbi_hart_hang(); -+ } -+ -+ target_idx = (unsigned int)ret; -+ -+ /* -+ * This function must only be called on platforms where the -+ * CPU_ON platform hooks have been implemented. -+ */ -+ if (psci_plat_pm_ops->pwr_domain_on == NULL || -+ psci_plat_pm_ops->pwr_domain_on_finish == NULL) { -+ sbi_printf("%s:%d, invalid psci ops\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* Protect against multiple CPUs trying to turn ON the same target CPU */ -+ psci_spin_lock_cpu(target_idx); -+ -+ /* -+ * Generic management: Ensure that the cpu is off to be -+ * turned on. -+ * Perform cache maintanence ahead of reading the target CPU state to -+ * ensure that the data is not stale. -+ * There is a theoretical edge case where the cache may contain stale -+ * data for the target CPU data - this can occur under the following -+ * conditions: -+ * - the target CPU is in another cluster from the current -+ * - the target CPU was the last CPU to shutdown on its cluster -+ * - the cluster was removed from coherency as part of the CPU shutdown -+ * -+ * In this case the cache maintenace that was performed as part of the -+ * target CPUs shutdown was not seen by the current CPU's cluster. And -+ * so the cache may contain stale data for the target CPU. -+ */ -+ csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t)); -+ -+ rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); -+ if (rc != PSCI_E_SUCCESS) { -+ goto exit; -+ } -+#if 0 -+ /* -+ * Call the cpu on handler registered by the Secure Payload Dispatcher -+ * to let it do any bookeeping. If the handler encounters an error, it's -+ * expected to assert within -+ */ -+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on != NULL)) -+ psci_spd_pm->svc_on(target_cpu); -+#endif -+ /* -+ * Set the Affinity info state of the target cpu to ON_PENDING. -+ * Flush aff_info_state as it will be accessed with caches -+ * turned OFF. -+ */ -+ psci_set_aff_info_state_by_idx((uintptr_t)target_idx, AFF_STATE_ON_PENDING); -+ -+ csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t)); -+ -+ /* -+ * The cache line invalidation by the target CPU after setting the -+ * state to OFF (see psci_do_cpu_off()), could cause the update to -+ * aff_info_state to be invalidated. Retry the update if the target -+ * CPU aff_info_state is not ON_PENDING. -+ */ -+ target_aff_state = psci_get_aff_info_state_by_idx(target_idx); -+ if (target_aff_state != AFF_STATE_ON_PENDING) { -+ if (target_aff_state != AFF_STATE_OFF) { -+ sbi_printf("%s:%d, invalid psci state\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); -+ -+ csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t)); -+ -+ if (psci_get_aff_info_state_by_idx(target_idx) != -+ AFF_STATE_ON_PENDING) { -+ sbi_printf("%s:%d, invalid psci state\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ } -+ -+ /* -+ * Perform generic, architecture and platform specific handling. -+ */ -+ /* -+ * Plat. management: Give the platform the current state -+ * of the target cpu to allow it to perform the necessary -+ * steps to power on. -+ */ -+ rc = psci_plat_pm_ops->pwr_domain_on(target); -+ if ((rc != PSCI_E_SUCCESS) && (rc != PSCI_E_INTERN_FAIL)) { -+ sbi_printf("%s:%d, power-on domain err\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ if (rc == PSCI_E_SUCCESS) { -+ /* Store the re-entry information for the non-secure world. */ -+ /**/; -+ } else { -+ /* Restore the state on error. */ -+ psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); -+ csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t)); -+ } -+ -+exit: -+ psci_spin_unlock_cpu(target_idx); -+ return rc; -+} -+ -+/******************************************************************************* -+ * The following function finish an earlier power on request. They -+ * are called by the common finisher routine in psci_common.c. The `state_info` -+ * is the psci_power_state from which this CPU has woken up from. -+ ******************************************************************************/ -+void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info) -+{ -+ const struct sbi_platform *sbi = sbi_platform_thishart_ptr(); -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(sbi->hart_index2id[cpu_idx]); -+ -+ /* -+ * Plat. management: Perform the platform specific actions -+ * for this cpu e.g. enabling the gic or zeroing the mailbox -+ * register. The actual state of this cpu has already been -+ * changed. -+ */ -+ psci_plat_pm_ops->pwr_domain_on_finish(state_info); -+ -+ /* -+ * Arch. management: Enable data cache and manage stack memory -+ */ -+ psci_do_pwrup_cache_maintenance((uintptr_t)scratch); -+ -+ /* -+ * Plat. management: Perform any platform specific actions which -+ * can only be done with the cpu and the cluster guaranteed to -+ * be coherent. -+ */ -+ if (psci_plat_pm_ops->pwr_domain_on_finish_late != NULL) -+ psci_plat_pm_ops->pwr_domain_on_finish_late(state_info); -+ -+#if 0 -+ /* -+ * All the platform specific actions for turning this cpu -+ * on have completed. Perform enough arch.initialization -+ * to run in the non-secure address space. -+ */ -+ psci_arch_setup(); -+#endif -+ -+ /* -+ * Lock the CPU spin lock to make sure that the context initialization -+ * is done. Since the lock is only used in this function to create -+ * a synchronization point with cpu_on_start(), it can be released -+ * immediately. -+ */ -+ psci_spin_lock_cpu(cpu_idx); -+ psci_spin_unlock_cpu(cpu_idx); -+ -+ /* Ensure we have been explicitly woken up by another cpu */ -+ if (psci_get_aff_info_state() != AFF_STATE_ON_PENDING) { -+ sbi_printf("%s:%d, err\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+#if 0 -+ /* -+ * Call the cpu on finish handler registered by the Secure Payload -+ * Dispatcher to let it do any bookeeping. If the handler encounters an -+ * error, it's expected to assert within -+ */ -+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on_finish != NULL)) -+ psci_spd_pm->svc_on_finish(0); -+ -+ PUBLISH_EVENT(psci_cpu_on_finish); -+#endif -+ -+ /* Populate the mpidr field within the cpu node array */ -+ /* This needs to be done only once */ -+ psci_cpu_pd_nodes[cpu_idx].mpidr = current_hartid(); -+} -diff --git a/lib/utils/psci/psci_private.h b/lib/utils/psci/psci_private.h -new file mode 100644 -index 000000000000..d1cd2ba84742 ---- /dev/null -+++ b/lib/utils/psci/psci_private.h -@@ -0,0 +1,198 @@ -+#ifndef __PSCI_PRIVATE_H__ -+#define __PSCI_PRIVATE_H__ -+ -+#include -+#include -+#include -+#include -+ -+/******************************************************************************* -+ * The following two data structures implement the power domain tree. The tree -+ * is used to track the state of all the nodes i.e. power domain instances -+ * described by the platform. The tree consists of nodes that describe CPU power -+ * domains i.e. leaf nodes and all other power domains which are parents of a -+ * CPU power domain i.e. non-leaf nodes. -+ ******************************************************************************/ -+typedef struct non_cpu_pwr_domain_node { -+ /* -+ * Index of the first CPU power domain node level 0 which has this node -+ * as its parent. -+ */ -+ unsigned int cpu_start_idx; -+ -+ /* -+ * Number of CPU power domains which are siblings of the domain indexed -+ * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx -+ * -> cpu_start_idx + ncpus' have this node as their parent. -+ */ -+ unsigned int ncpus; -+ -+ /* -+ * Index of the parent power domain node. -+ * TODO: Figure out whether to whether using pointer is more efficient. -+ */ -+ unsigned int parent_node; -+ -+ plat_local_state_t local_state; -+ -+ unsigned char level; -+ -+ /* For indexing the psci_lock array*/ -+ unsigned short lock_index; -+} __aligned(CACHE_LINE_SIZE) non_cpu_pd_node_t; -+ -+typedef struct cpu_pwr_domain_node { -+ u_register_t mpidr; -+ -+ /* -+ * Index of the parent power domain node. -+ * TODO: Figure out whether to whether using pointer is more efficient. -+ */ -+ unsigned int parent_node; -+ -+ /* -+ * A CPU power domain does not require state coordination like its -+ * parent power domains. Hence this node does not include a bakery -+ * lock. A spinlock is required by the CPU_ON handler to prevent a race -+ * when multiple CPUs try to turn ON the same target CPU. -+ */ -+ spinlock_t cpu_lock; -+} cpu_pd_node_t; -+ -+/* -+ * On systems where participant CPUs are cache-coherent, we can use spinlocks -+ * instead of bakery locks. -+ */ -+typedef struct psci_spinlock_t { -+ spinlock_t lock; -+} __aligned(CACHE_LINE_SIZE) _psci_spinlock_t; -+ -+#define DEFINE_PSCI_LOCK(_name) _psci_spinlock_t _name -+#define DECLARE_PSCI_LOCK(_name) extern DEFINE_PSCI_LOCK(_name) -+ -+/* One lock is required per non-CPU power domain node */ -+DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); -+ -+static inline void psci_lock_init(non_cpu_pd_node_t *non_cpu_pd_node, unsigned short idx) -+{ -+ non_cpu_pd_node[idx].lock_index = idx; -+} -+ -+static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node) -+{ -+ spin_lock(&psci_locks[non_cpu_pd_node->lock_index].lock); -+} -+ -+static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node) -+{ -+ spin_unlock(&psci_locks[non_cpu_pd_node->lock_index].lock); -+} -+ -+/* common */ -+extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]; -+extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; -+extern unsigned int psci_plat_core_count; -+extern unsigned long psci_delta_off; -+extern const plat_psci_ops_t *psci_plat_pm_ops; -+ -+void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, -+ const unsigned int *parent_nodes); -+void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, -+ const unsigned int *parent_nodes); -+unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info); -+ -+int psci_validate_mpidr(u_register_t mpidr); -+void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, -+ unsigned int end_lvl, -+ unsigned int *node_index); -+ -+void psci_init_req_local_pwr_states(void); -+void set_non_cpu_pd_node_local_state(unsigned int parent_idx, -+ plat_local_state_t state); -+void psci_set_req_local_pwr_state(unsigned int pwrlvl, -+ unsigned int cpu_idx, -+ plat_local_state_t req_pwr_state); -+void psci_set_aff_info_state(aff_info_state_t aff_state); -+aff_info_state_t psci_get_aff_info_state(void); -+aff_info_state_t psci_get_aff_info_state_by_idx(unsigned int idx); -+void psci_set_aff_info_state_by_idx(unsigned int idx, aff_info_state_t aff_state); -+void psci_set_cpu_local_state(plat_local_state_t state); -+void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl); -+ -+void psci_get_target_local_pwr_states(unsigned int end_pwrlvl, -+ psci_power_state_t *target_state); -+ -+void psci_do_state_coordination(unsigned int end_pwrlvl, -+ psci_power_state_t *state_info); -+ -+int plat_core_pos_by_mpidr(u_register_t mpidr); -+int psci_validate_power_state(unsigned int power_state, -+ psci_power_state_t *state_info); -+int psci_validate_suspend_req(const psci_power_state_t *state_info, -+ unsigned int is_power_down_state); -+unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info); -+unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info); -+ -+void psci_set_suspend_pwrlvl(unsigned int target_lvl); -+/* Private exported functions from psci_suspend.c */ -+int psci_cpu_suspend_start(/* const entry_point_info_t *ep */ uintptr_t entrypoint, -+ unsigned int end_pwrlvl, -+ psci_power_state_t *state_info, -+ unsigned int is_power_down_state); -+void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info); -+void riscv_pwr_state_to_psci(unsigned int rstate, unsigned int *pstate); -+ -+/* Helper function to identify a CPU standby request in PSCI Suspend call */ -+static inline bool is_cpu_standby_req(unsigned int is_power_down_state, -+ unsigned int retn_lvl) -+{ -+ return (is_power_down_state == 0U) && (retn_lvl == 0U); -+} -+ -+static inline void psci_do_pwrup_cache_maintenance(uintptr_t scratch) -+{ -+ /* invalidate local cache */ -+ csi_invalidate_dcache_all(); -+ -+ /* enable dcache */ -+ csi_enable_dcache(); -+} -+ -+static inline void psci_disable_core_snoop(void) -+{ -+ unsigned int hartid = current_hartid(); -+ -+ csr_clear(CSR_ML2SETUP, 1 << (hartid % PLATFORM_MAX_CPUS_PER_CLUSTER)); -+} -+ -+static inline void psci_do_pwrdown_cache_maintenance(int hartid, uintptr_t scratch, int power_level) -+{ -+ /* disable the data preftch */ -+ csi_disable_data_preftch(); -+ -+ /* flush dacache all */ -+ csi_flush_dcache_all(); -+ -+ if (power_level >= PSCI_CPU_PWR_LVL + 1) { -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+ /* disable the tcm */ -+ csr_write(CSR_TCMCFG, 0); -+#endif -+ csi_flush_l2_cache(); -+ } -+ -+ /* disable dcache */ -+ csi_disable_dcache(); -+ -+ /* disable core snoop */ -+ psci_disable_core_snoop(); -+ -+ asm volatile ("fence iorw, iorw"); -+} -+ -+/* psci cpu */ -+int psci_cpu_on_start(u_register_t target, uintptr_t entrypoint); -+void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info); -+int psci_do_cpu_off(unsigned int end_pwrlvl); -+ -+#endif -diff --git a/lib/utils/psci/psci_setup.c b/lib/utils/psci/psci_setup.c -new file mode 100644 -index 000000000000..ba52c20a8c99 ---- /dev/null -+++ b/lib/utils/psci/psci_setup.c -@@ -0,0 +1,242 @@ -+/* -+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. -+ * -+ * SPDX-License-Identifier: BSD-3-Clause -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "psci_private.h" -+ -+/******************************************************************************* -+ * Function which initializes the 'psci_non_cpu_pd_nodes' or the -+ * 'psci_cpu_pd_nodes' corresponding to the power level. -+ ******************************************************************************/ -+static void psci_init_pwr_domain_node(uint16_t node_idx, -+ unsigned int parent_idx, -+ unsigned char level) -+{ -+ if (level > PSCI_CPU_PWR_LVL) { -+ if (node_idx >= PSCI_NUM_NON_CPU_PWR_DOMAINS) { -+ sbi_printf("%s:%d, node_idx beyond the boundary\n", -+ __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ -+ psci_non_cpu_pd_nodes[node_idx].level = level; -+ psci_lock_init(psci_non_cpu_pd_nodes, node_idx); -+ psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx; -+ psci_non_cpu_pd_nodes[node_idx].local_state = -+ PLAT_MAX_OFF_STATE; -+ } else { -+ psci_cpu_data_t *svc_cpu_data; -+ const struct sbi_platform *sbi = sbi_platform_thishart_ptr(); -+ -+ if (node_idx >= PLATFORM_CORE_COUNT) { -+ sbi_printf("%s:%d, node_idx beyond the boundary\n", -+ __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ unsigned int hartid = sbi->hart_index2id[node_idx]; -+ -+ psci_cpu_pd_nodes[node_idx].parent_node = parent_idx; -+ -+ /* Initialize with an invalid mpidr */ -+ psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR; -+ -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ /* Set the Affinity Info for the cores as OFF */ -+ svc_cpu_data->aff_info_state = AFF_STATE_OFF; -+ -+ /* Invalidate the suspend level for the cpu */ -+ svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL; -+ -+ /* Set the power state to OFF state */ -+ svc_cpu_data->local_state = PLAT_MAX_OFF_STATE; -+ -+ csi_dcache_clean_invalid_range((uintptr_t)svc_cpu_data, sizeof(psci_cpu_data_t)); -+ } -+} -+ -+/******************************************************************************* -+ * This functions updates cpu_start_idx and ncpus field for each of the node in -+ * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of -+ * the CPUs and check whether they match with the parent of the previous -+ * CPU. The basic assumption for this work is that children of the same parent -+ * are allocated adjacent indices. The platform should ensure this though proper -+ * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and -+ * plat_my_core_pos() APIs. -+ *******************************************************************************/ -+static void psci_update_pwrlvl_limits(void) -+{ -+ unsigned int cpu_idx; -+ int j; -+ unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0}; -+ unsigned int temp_index[PLAT_MAX_PWR_LVL]; -+ -+ for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) { -+ psci_get_parent_pwr_domain_nodes(cpu_idx, -+ PLAT_MAX_PWR_LVL, -+ temp_index); -+ for (j = (int)PLAT_MAX_PWR_LVL - 1; j >= 0; j--) { -+ if (temp_index[j] != nodes_idx[j]) { -+ nodes_idx[j] = temp_index[j]; -+ psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx -+ = cpu_idx; -+ } -+ psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++; -+ } -+ } -+} -+ -+/******************************************************************************* -+ * Core routine to populate the power domain tree. The tree descriptor passed by -+ * the platform is populated breadth-first and the first entry in the map -+ * informs the number of root power domains. The parent nodes of the root nodes -+ * will point to an invalid entry(-1). -+ ******************************************************************************/ -+static unsigned int populate_power_domain_tree(const unsigned char -+ *topology) -+{ -+ unsigned int i, j = 0U, num_nodes_at_lvl = 1U, num_nodes_at_next_lvl; -+ unsigned int node_index = 0U, num_children; -+ unsigned int parent_node_index = 0U; -+ int level = (int)PLAT_MAX_PWR_LVL; -+ -+ /* -+ * For each level the inputs are: -+ * - number of nodes at this level in plat_array i.e. num_nodes_at_level -+ * This is the sum of values of nodes at the parent level. -+ * - Index of first entry at this level in the plat_array i.e. -+ * parent_node_index. -+ * - Index of first free entry in psci_non_cpu_pd_nodes[] or -+ * psci_cpu_pd_nodes[] i.e. node_index depending upon the level. -+ */ -+ while (level >= (int) PSCI_CPU_PWR_LVL) { -+ num_nodes_at_next_lvl = 0U; -+ /* -+ * For each entry (parent node) at this level in the plat_array: -+ * - Find the number of children -+ * - Allocate a node in a power domain array for each child -+ * - Set the parent of the child to the parent_node_index - 1 -+ * - Increment parent_node_index to point to the next parent -+ * - Accumulate the number of children at next level. -+ */ -+ for (i = 0U; i < num_nodes_at_lvl; i++) { -+ if (parent_node_index > PSCI_NUM_NON_CPU_PWR_DOMAINS) { -+ sbi_printf("%s:%d, node_idx beyond the boundary\n", -+ __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ num_children = topology[parent_node_index]; -+ -+ for (j = node_index; -+ j < (node_index + num_children); j++) -+ psci_init_pwr_domain_node((uint16_t)j, -+ parent_node_index - 1U, -+ (unsigned char)level); -+ -+ node_index = j; -+ num_nodes_at_next_lvl += num_children; -+ parent_node_index++; -+ } -+ -+ num_nodes_at_lvl = num_nodes_at_next_lvl; -+ level--; -+ -+ /* Reset the index for the cpu power domain array */ -+ if (level == (int) PSCI_CPU_PWR_LVL) -+ node_index = 0; -+ } -+ -+ /* Validate the sanity of array exported by the platform */ -+ if (j > PLATFORM_CORE_COUNT) { -+ sbi_printf("%s:%d, invalidate core count\n", -+ __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ return j; -+} -+ -+/******************************************************************************* -+ * This function does the architectural setup and takes the warm boot -+ * entry-point `mailbox_ep` as an argument. The function also initializes the -+ * power domain topology tree by querying the platform. The power domain nodes -+ * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and -+ * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform -+ * exports its static topology map through the -+ * populate_power_domain_topology_tree() API. The algorithm populates the -+ * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this -+ * topology map. On a platform that implements two clusters of 2 cpus each, -+ * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would -+ * look like this: -+ * -+ * --------------------------------------------------- -+ * | system node | cluster 0 node | cluster 1 node | -+ * --------------------------------------------------- -+ * -+ * And populated psci_cpu_pd_nodes would look like this : -+ * <- cpus cluster0 -><- cpus cluster1 -> -+ * ------------------------------------------------ -+ * | CPU 0 | CPU 1 | CPU 2 | CPU 3 | -+ * ------------------------------------------------ -+ ******************************************************************************/ -+int psci_setup(void) -+{ -+ unsigned int cpu_idx; -+ const unsigned char *topology_tree; -+ unsigned int hartid = current_hartid(); -+ -+ cpu_idx = plat_core_pos_by_mpidr(hartid); -+ -+ psci_delta_off = sbi_scratch_alloc_offset(sizeof(psci_cpu_data_t)); -+ if (!psci_delta_off) -+ return SBI_ENOMEM; -+ -+ /* Query the topology map from the platform */ -+ topology_tree = plat_get_power_domain_tree_desc(); -+ -+ /* Populate the power domain arrays using the platform topology map */ -+ psci_plat_core_count = populate_power_domain_tree(topology_tree); -+ -+ /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */ -+ psci_update_pwrlvl_limits(); -+ -+ /* Populate the mpidr field of cpu node for this CPU */ -+ psci_cpu_pd_nodes[cpu_idx].mpidr = hartid; -+ -+ psci_init_req_local_pwr_states(); -+ -+ /* -+ * Set the requested and target state of this CPU and all the higher -+ * power domain levels for this CPU to run. -+ */ -+ psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL); -+ -+ psci_print_power_domain_map(); -+ -+ (void) plat_setup_psci_ops(0, &psci_plat_pm_ops); -+ if (psci_plat_pm_ops == NULL) { -+ sbi_printf("%s:%d, invalid psci ops\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* -+ * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs -+ * during warm boot, possibly before data cache is enabled. -+ */ -+ csi_dcache_clean_invalid_range((uintptr_t)&psci_plat_pm_ops, sizeof(*psci_plat_pm_ops)); -+ -+ return 0; -+} -diff --git a/lib/utils/psci/psci_suspend.c b/lib/utils/psci/psci_suspend.c -new file mode 100644 -index 000000000000..1466acfc77b6 ---- /dev/null -+++ b/lib/utils/psci/psci_suspend.c -@@ -0,0 +1,298 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "psci_private.h" -+ -+/******************************************************************************* -+ * This function does generic and platform specific operations after a wake-up -+ * from standby/retention states at multiple power levels. -+ ******************************************************************************/ -+static void psci_suspend_to_standby_finisher(unsigned int cpu_idx, -+ unsigned int end_pwrlvl) -+{ -+ unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; -+ psci_power_state_t state_info; -+ -+ /* Get the parent nodes */ -+ psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); -+ -+ psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes); -+ -+ /* -+ * Find out which retention states this CPU has exited from until the -+ * 'end_pwrlvl'. The exit retention state could be deeper than the entry -+ * state as a result of state coordination amongst other CPUs post wfi. -+ */ -+ psci_get_target_local_pwr_states(end_pwrlvl, &state_info); -+ -+#if ENABLE_PSCI_STAT -+ plat_psci_stat_accounting_stop(&state_info); -+ psci_stats_update_pwr_up(end_pwrlvl, &state_info); -+#endif -+ -+ /* -+ * Plat. management: Allow the platform to do operations -+ * on waking up from retention. -+ */ -+ psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info); -+ -+ /* -+ * Set the requested and target state of this CPU and all the higher -+ * power domain levels for this CPU to run. -+ */ -+ psci_set_pwr_domains_to_run(end_pwrlvl); -+ -+ psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes); -+} -+ -+/******************************************************************************* -+ * This function does generic and platform specific suspend to power down -+ * operations. -+ ******************************************************************************/ -+static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl, -+ /* const entry_point_info_t *ep */ uintptr_t ep, -+ const psci_power_state_t *state_info) -+{ -+ unsigned int hartid = current_hartid(); -+ psci_cpu_data_t *svc_cpu_data; -+ /* unsigned int max_off_lvl = psci_find_max_off_lvl(state_info); */ -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ /* save something ???? */ -+ /* PUBLISH_EVENT(psci_suspend_pwrdown_start); */ -+ -+ /* Save PSCI target power level for the suspend finisher handler */ -+ psci_set_suspend_pwrlvl(end_pwrlvl); -+ -+ /* -+ * Flush the target power level as it might be accessed on power up with -+ * Data cache disabled. -+ */ -+ csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->target_pwrlvl, sizeof(unsigned int)); -+ -+#if 0 -+ /* -+ * Call the cpu suspend handler registered by the Secure Payload -+ * Dispatcher to let it do any book-keeping. If the handler encounters an -+ * error, it's expected to assert within -+ */ -+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend != NULL)) -+ psci_spd_pm->svc_suspend(max_off_lvl); -+#endif -+ -+ /* -+ * Plat. management: Allow the platform to perform any early -+ * actions required to power down the CPU. This might be useful for -+ * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these -+ * actions with data caches enabled. -+ */ -+ if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early != NULL) -+ psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info); -+ -+ /* -+ * Store the re-entry information for the non-secure world. -+ */ -+ /* cm_init_my_context(ep); */ -+ -+ /* -+ * Arch. management. Initiate power down sequence. -+ * TODO : Introduce a mechanism to query the cache level to flush -+ * and the cpu-ops power down to perform from the platform. -+ */ -+ /* psci_pwrdown_cpu(max_off_lvl); */ -+ psci_do_pwrdown_cache_maintenance(hartid, (uintptr_t)scratch, psci_find_max_off_lvl(state_info)); -+} -+ -+/******************************************************************************* -+ * Top level handler which is called when a cpu wants to suspend its execution. -+ * It is assumed that along with suspending the cpu power domain, power domains -+ * at higher levels until the target power level will be suspended as well. It -+ * coordinates with the platform to negotiate the target state for each of -+ * the power domain level till the target power domain level. It then performs -+ * generic, architectural, platform setup and state management required to -+ * suspend that power domain level and power domain levels below it. -+ * e.g. For a cpu that's to be suspended, it could mean programming the -+ * power controller whereas for a cluster that's to be suspended, it will call -+ * the platform specific code which will disable coherency at the interconnect -+ * level if the cpu is the last in the cluster and also the program the power -+ * controller. -+ * -+ * All the required parameter checks are performed at the beginning and after -+ * the state transition has been done, no further error is expected and it is -+ * not possible to undo any of the actions taken beyond that point. -+ ******************************************************************************/ -+int psci_cpu_suspend_start(/* const entry_point_info_t *ep */uintptr_t ep, -+ unsigned int end_pwrlvl, -+ psci_power_state_t *state_info, -+ unsigned int is_power_down_state) -+{ -+ int rc = PSCI_E_SUCCESS; -+ bool skip_wfi = false; -+ unsigned int hartid = current_hartid(); -+ unsigned int idx = plat_core_pos_by_mpidr(hartid); -+ unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0}; -+ -+ /* -+ * This function must only be called on platforms where the -+ * CPU_SUSPEND platform hooks have been implemented. -+ */ -+ if ((psci_plat_pm_ops->pwr_domain_suspend == NULL) || -+ (psci_plat_pm_ops->pwr_domain_suspend_finish == NULL)) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* Get the parent nodes */ -+ psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes); -+ -+ /* -+ * This function acquires the lock corresponding to each power -+ * level so that by the time all locks are taken, the system topology -+ * is snapshot and state management can be done safely. -+ */ -+ psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes); -+ -+ /* -+ * We check if there are any pending interrupts after the delay -+ * introduced by lock contention to increase the chances of early -+ * detection that a wake-up interrupt has fired. -+ */ -+ if (__get_Supervisor_isr() != 0U) { -+ skip_wfi = true; -+ goto exit; -+ } -+ -+ /* -+ * This function is passed the requested state info and -+ * it returns the negotiated state info for each power level upto -+ * the end level specified. -+ */ -+ psci_do_state_coordination(end_pwrlvl, state_info); -+ -+#if ENABLE_PSCI_STAT -+ /* Update the last cpu for each level till end_pwrlvl */ -+ psci_stats_update_pwr_down(end_pwrlvl, state_info); -+#endif -+ -+ if (is_power_down_state != 0U) -+ psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info); -+ -+ /* -+ * Plat. management: Allow the platform to perform the -+ * necessary actions to turn off this cpu e.g. set the -+ * platform defined mailbox with the psci entrypoint, -+ * program the power controller etc. -+ */ -+ -+ psci_plat_pm_ops->pwr_domain_suspend(state_info); -+ -+#if ENABLE_PSCI_STAT -+ plat_psci_stat_accounting_start(state_info); -+#endif -+ -+exit: -+ /* -+ * Release the locks corresponding to each power level in the -+ * reverse order to which they were acquired. -+ */ -+ psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes); -+ -+ if (skip_wfi) { -+ return rc; -+ } -+ -+ if (is_power_down_state != 0U) { -+ /* The function calls below must not return */ -+ if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL) -+ psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info); -+ else -+ /* psci_power_down_wfi() */; -+ } -+ -+ /* -+ * We will reach here if only retention/standby states have been -+ * requested at multiple power levels. This means that the cpu -+ * context will be preserved. -+ */ -+ /* wfi(); */ -+ asm volatile ("wfi"); -+ -+ /* -+ * After we wake up from context retaining suspend, call the -+ * context retaining suspend finisher. -+ */ -+ psci_suspend_to_standby_finisher(idx, end_pwrlvl); -+ -+ return rc; -+} -+ -+/******************************************************************************* -+ * The following functions finish an earlier suspend request. They -+ * are called by the common finisher routine in psci_common.c. The `state_info` -+ * is the psci_power_state from which this CPU has woken up from. -+ ******************************************************************************/ -+void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info) -+{ -+ /* unsigned int counter_freq; */ -+ /* unsigned int max_off_lvl; */ -+ unsigned int hartid = current_hartid(); -+ psci_cpu_data_t *svc_cpu_data; -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid); -+ svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); -+ -+ /* Ensure we have been woken up from a suspended state */ -+ if ((psci_get_aff_info_state() != AFF_STATE_ON) || -+ (is_local_state_off( -+ state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]) == 0)) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* -+ * Plat. management: Perform the platform specific actions -+ * before we change the state of the cpu e.g. enabling the -+ * gic or zeroing the mailbox register. If anything goes -+ * wrong then assert as there is no way to recover from this -+ * situation. -+ */ -+ psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); -+ -+ /* Arch. management: Enable the data cache, stack memory maintenance. */ -+ psci_do_pwrup_cache_maintenance((uintptr_t)scratch); -+ -+#if 0 -+ /* Re-init the cntfrq_el0 register */ -+ counter_freq = plat_get_syscnt_freq2(); -+ write_cntfrq_el0(counter_freq); -+#endif -+ /* -+ * Call the cpu suspend finish handler registered by the Secure Payload -+ * Dispatcher to let it do any bookeeping. If the handler encounters an -+ * error, it's expected to assert within -+ */ -+#if 0 -+ if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend_finish != NULL)) { -+ max_off_lvl = psci_find_max_off_lvl(state_info); -+ assert(max_off_lvl != PSCI_INVALID_PWR_LVL); -+ psci_spd_pm->svc_suspend_finish(max_off_lvl); -+ } -+#endif -+ -+ /* Invalidate the suspend level for the cpu */ -+ psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL); -+ csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->target_pwrlvl, sizeof(unsigned int)); -+ -+ /* PUBLISH_EVENT(psci_suspend_pwrdown_finish); */ -+ -+ /* -+ * Generic management: Now we just need to retrieve the -+ * information that we had stashed away during the suspend -+ * call to set this cpu on its way. -+ */ -+ /* cm_prepare_el3_exit_ns(); */ -+} -diff --git a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -new file mode 100644 -index 000000000000..9976b5774039 ---- /dev/null -+++ b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -@@ -0,0 +1,345 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define C1_CPU_RESET_BASE_ADDR (0xD4282B24) -+ -+#define PMU_CAP_CORE0_IDLE_CFG (0xd4282924) -+#define PMU_CAP_CORE1_IDLE_CFG (0xd4282928) -+#define PMU_CAP_CORE2_IDLE_CFG (0xd4282960) -+#define PMU_CAP_CORE3_IDLE_CFG (0xd4282964) -+#define PMU_CAP_CORE4_IDLE_CFG (0xd4282b04) -+#define PMU_CAP_CORE5_IDLE_CFG (0xd4282b08) -+#define PMU_CAP_CORE6_IDLE_CFG (0xd4282b0c) -+#define PMU_CAP_CORE7_IDLE_CFG (0xd4282b10) -+ -+#define PMU_C0_CAPMP_IDLE_CFG0 (0xd4282920) -+#define PMU_C0_CAPMP_IDLE_CFG1 (0xd42828e4) -+#define PMU_C0_CAPMP_IDLE_CFG2 (0xd4282950) -+#define PMU_C0_CAPMP_IDLE_CFG3 (0xd4282954) -+#define PMU_C1_CAPMP_IDLE_CFG0 (0xd4282b14) -+#define PMU_C1_CAPMP_IDLE_CFG1 (0xd4282b18) -+#define PMU_C1_CAPMP_IDLE_CFG2 (0xd4282b1c) -+#define PMU_C1_CAPMP_IDLE_CFG3 (0xd4282b20) -+ -+#define PMU_ACPR_CLUSTER0_REG (0xd4051090) -+#define PMU_ACPR_CLUSTER1_REG (0xd4051094) -+ -+#define CPU_PWR_DOWN_VALUE (0x3) -+#define CLUSTER_PWR_DOWN_VALUE (0x3) -+#define CLUSTER_AXISDO_OFFSET (31) -+ -+struct pmu_cap_wakeup { -+ unsigned int pmu_cap_core0_wakeup; -+ unsigned int pmu_cap_core1_wakeup; -+ unsigned int pmu_cap_core2_wakeup; -+ unsigned int pmu_cap_core3_wakeup; -+}; -+ -+/* D1P */ -+void spacemit_top_on(u_register_t mpidr) -+{ -+ unsigned int *cluster0_acpr = NULL; -+ unsigned int *cluster1_acpr = NULL; -+ -+ cluster0_acpr = (unsigned int *)PMU_ACPR_CLUSTER0_REG; -+ cluster1_acpr = (unsigned int *)PMU_ACPR_CLUSTER1_REG; -+ -+ unsigned int value = readl(cluster0_acpr); -+ value &= ~(1 << CLUSTER_AXISDO_OFFSET); -+ writel(value, cluster0_acpr); -+ -+ value = readl(cluster1_acpr); -+ value &= ~(1 << CLUSTER_AXISDO_OFFSET); -+ writel(value, cluster1_acpr); -+} -+ -+/* D1P */ -+void spacemit_top_off(u_register_t mpidr) -+{ -+ unsigned int *cluster0_acpr = NULL; -+ unsigned int *cluster1_acpr = NULL; -+ -+ cluster0_acpr = (unsigned int *)PMU_ACPR_CLUSTER0_REG; -+ cluster1_acpr = (unsigned int *)PMU_ACPR_CLUSTER1_REG; -+ -+ unsigned int value = readl(cluster0_acpr); -+ value |= (1 << CLUSTER_AXISDO_OFFSET); -+ writel(value, cluster0_acpr); -+ -+ value = readl(cluster1_acpr); -+ value |= (1 << CLUSTER_AXISDO_OFFSET); -+ writel(value, cluster1_acpr); -+} -+ -+/* M2 */ -+void spacemit_cluster_on(u_register_t mpidr) -+{ -+ unsigned int target_cpu_idx, value; -+ unsigned int *cluster_assert_base0 = NULL; -+ unsigned int *cluster_assert_base1 = NULL; -+ unsigned int *cluster_assert_base2 = NULL; -+ unsigned int *cluster_assert_base3 = NULL; -+ unsigned int *cluster_assert_base4 = NULL; -+ unsigned int *cluster_assert_base5 = NULL; -+ unsigned int *cluster_assert_base6 = NULL; -+ unsigned int *cluster_assert_base7 = NULL; -+ -+ target_cpu_idx = MPIDR_AFFLVL1_VAL(mpidr) * PLATFORM_MAX_CPUS_PER_CLUSTER -+ + MPIDR_AFFLVL0_VAL(mpidr); -+ -+ switch (target_cpu_idx) { -+ case 0: -+ case 1: -+ case 2: -+ case 3: -+ cluster_assert_base0 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG0; -+ cluster_assert_base1 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG1; -+ cluster_assert_base2 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG2; -+ cluster_assert_base3 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG3; -+ -+ /* cluster vote */ -+ /* M2 */ -+ value = readl(cluster_assert_base0); -+ value &= ~CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base0); -+ -+ value = readl(cluster_assert_base1); -+ value &= ~CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base1); -+ -+ value = readl(cluster_assert_base2); -+ value &= ~CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base2); -+ -+ value = readl(cluster_assert_base3); -+ value &= ~CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base3); -+ break; -+ case 4: -+ case 5: -+ case 6: -+ case 7: -+ cluster_assert_base4 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG0; -+ cluster_assert_base5 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG1; -+ cluster_assert_base6 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG2; -+ cluster_assert_base7 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG3; -+ -+ /* cluster vote */ -+ /* M2 */ -+ value = readl(cluster_assert_base4); -+ value &= ~CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base4); -+ -+ value = readl(cluster_assert_base5); -+ value &= ~CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base5); -+ -+ value = readl(cluster_assert_base6); -+ value &= ~CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base6); -+ -+ value = readl(cluster_assert_base7); -+ value &= ~CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base7); -+ break; -+ } -+} -+ -+/* M2 */ -+void spacemit_cluster_off(u_register_t mpidr) -+{ -+ unsigned int target_cpu_idx, value; -+ unsigned int *cluster_assert_base0 = NULL; -+ unsigned int *cluster_assert_base1 = NULL; -+ unsigned int *cluster_assert_base2 = NULL; -+ unsigned int *cluster_assert_base3 = NULL; -+ unsigned int *cluster_assert_base4 = NULL; -+ unsigned int *cluster_assert_base5 = NULL; -+ unsigned int *cluster_assert_base6 = NULL; -+ unsigned int *cluster_assert_base7 = NULL; -+ -+ target_cpu_idx = MPIDR_AFFLVL1_VAL(mpidr) * PLATFORM_MAX_CPUS_PER_CLUSTER -+ + MPIDR_AFFLVL0_VAL(mpidr); -+ -+ switch (target_cpu_idx) { -+ case 0: -+ case 1: -+ case 2: -+ case 3: -+ cluster_assert_base0 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG0; -+ cluster_assert_base1 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG1; -+ cluster_assert_base2 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG2; -+ cluster_assert_base3 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG3; -+ -+ /* cluster vote */ -+ /* M2 */ -+ value = readl(cluster_assert_base0); -+ value |= CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base0); -+ -+ value = readl(cluster_assert_base1); -+ value |= CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base1); -+ -+ value = readl(cluster_assert_base2); -+ value |= CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base2); -+ -+ value = readl(cluster_assert_base3); -+ value |= CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base3); -+ break; -+ case 4: -+ case 5: -+ case 6: -+ case 7: -+ cluster_assert_base4 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG0; -+ cluster_assert_base5 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG1; -+ cluster_assert_base6 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG2; -+ cluster_assert_base7 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG3; -+ -+ /* cluster vote */ -+ /* M2 */ -+ value = readl(cluster_assert_base4); -+ value |= CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base4); -+ -+ value = readl(cluster_assert_base5); -+ value |= CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base5); -+ -+ value = readl(cluster_assert_base6); -+ value |= CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base6); -+ -+ value = readl(cluster_assert_base7); -+ value |= CLUSTER_PWR_DOWN_VALUE; -+ writel(value, cluster_assert_base7); -+ break; -+ } -+} -+ -+void spacemit_wakeup_cpu(u_register_t mpidr) -+{ -+ unsigned int *cpu_reset_base; -+ struct pmu_cap_wakeup *pmu_cap_wakeup; -+ unsigned int cur_cluster, cur_cpu; -+ unsigned int target_cpu_idx; -+ unsigned int cur_hartid = current_hartid(); -+ -+ cur_cluster = MPIDR_AFFLVL1_VAL(cur_hartid); -+ cur_cpu = MPIDR_AFFLVL0_VAL(cur_hartid); -+ -+ pmu_cap_wakeup = (struct pmu_cap_wakeup *)((cur_cluster == 0) ? (unsigned int *)CPU_RESET_BASE_ADDR : -+ (unsigned int *)C1_CPU_RESET_BASE_ADDR); -+ -+ switch (cur_cpu) { -+ case 0: -+ cpu_reset_base = &pmu_cap_wakeup->pmu_cap_core0_wakeup; -+ break; -+ case 1: -+ cpu_reset_base = &pmu_cap_wakeup->pmu_cap_core1_wakeup; -+ break; -+ case 2: -+ cpu_reset_base = &pmu_cap_wakeup->pmu_cap_core2_wakeup; -+ break; -+ case 3: -+ cpu_reset_base = &pmu_cap_wakeup->pmu_cap_core3_wakeup; -+ break; -+ } -+ -+ target_cpu_idx = MPIDR_AFFLVL1_VAL(mpidr) * PLATFORM_MAX_CPUS_PER_CLUSTER -+ + MPIDR_AFFLVL0_VAL(mpidr); -+ -+ writel(1 << target_cpu_idx, cpu_reset_base); -+} -+ -+void spacemit_assert_cpu(u_register_t mpidr) -+{ -+ unsigned int target_cpu_idx; -+ unsigned int *cpu_assert_base = NULL; -+ -+ target_cpu_idx = MPIDR_AFFLVL1_VAL(mpidr) * PLATFORM_MAX_CPUS_PER_CLUSTER -+ + MPIDR_AFFLVL0_VAL(mpidr); -+ -+ switch (target_cpu_idx) { -+ case 0: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE0_IDLE_CFG; -+ break; -+ case 1: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE1_IDLE_CFG; -+ break; -+ case 2: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE2_IDLE_CFG; -+ break; -+ case 3: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE3_IDLE_CFG; -+ break; -+ case 4: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE4_IDLE_CFG; -+ break; -+ case 5: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE5_IDLE_CFG; -+ break; -+ case 6: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE6_IDLE_CFG; -+ break; -+ case 7: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE7_IDLE_CFG; -+ break; -+ } -+ -+ /* cpu vote */ -+ /* C2 */ -+ unsigned int value = readl(cpu_assert_base); -+ value |= CPU_PWR_DOWN_VALUE; -+ writel(value, cpu_assert_base); -+} -+ -+void spacemit_deassert_cpu(void) -+{ -+ unsigned int mpidr = current_hartid(); -+ -+ /* clear the idle bit */ -+ unsigned int target_cpu_idx; -+ unsigned int *cpu_assert_base = NULL; -+ -+ target_cpu_idx = MPIDR_AFFLVL1_VAL(mpidr) * PLATFORM_MAX_CPUS_PER_CLUSTER -+ + MPIDR_AFFLVL0_VAL(mpidr); -+ -+ switch (target_cpu_idx) { -+ case 0: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE0_IDLE_CFG; -+ break; -+ case 1: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE1_IDLE_CFG; -+ break; -+ case 2: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE2_IDLE_CFG; -+ break; -+ case 3: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE3_IDLE_CFG; -+ break; -+ case 4: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE4_IDLE_CFG; -+ break; -+ case 5: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE5_IDLE_CFG; -+ break; -+ case 6: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE6_IDLE_CFG; -+ break; -+ case 7: -+ cpu_assert_base = (unsigned int *)PMU_CAP_CORE7_IDLE_CFG; -+ break; -+ } -+ -+ /* de-vote cpu */ -+ unsigned int value = readl(cpu_assert_base); -+ value &= ~CPU_PWR_DOWN_VALUE; -+ writel(value, cpu_assert_base); -+} -diff --git a/lib/utils/psci/spacemit/plat/plat_pm.c b/lib/utils/psci/spacemit/plat/plat_pm.c -new file mode 100644 -index 000000000000..464a56a277ef ---- /dev/null -+++ b/lib/utils/psci/spacemit/plat/plat_pm.c -@@ -0,0 +1,258 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "underly_implement.h" -+ -+#define CORE_PWR_STATE(state) \ -+ ((state)->pwr_domain_state[MPIDR_AFFLVL0]) -+#define CLUSTER_PWR_STATE(state) \ -+ ((state)->pwr_domain_state[MPIDR_AFFLVL1]) -+#define SYSTEM_PWR_STATE(state) \ -+ ((state)->pwr_domain_state[PLAT_MAX_PWR_LVL]) -+ -+static int spacemit_pwr_domain_on(u_register_t mpidr) -+{ -+ /* wakeup the cpu */ -+ spacemit_wakeup_cpu(mpidr); -+ -+ return 0; -+} -+ -+static void spacemit_pwr_domain_on_finish(const psci_power_state_t *target_state) -+{ -+ unsigned int hartid = current_hartid(); -+ -+ if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -+ /* D1P */ -+ spacemit_top_on(hartid); -+ } -+ -+ /* -+ * Enable CCI coherency for this cluster. -+ * No need for locks as no other cpu is active at the moment. -+ */ -+ if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { -+ spacemit_cluster_on(hartid); -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+ /* disable the tcm */ -+ csr_write(CSR_TCMCFG, 0); -+#endif -+ cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(hartid)); -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+ /* enable the tcm */ -+ csr_write(CSR_TCMCFG, 1); -+#endif -+ } -+} -+ -+static int spacemit_pwr_domain_off_early(const psci_power_state_t *target_state) -+{ -+ /* the ipi's pending is cleared before */ -+ /* disable the plic irq */ -+ fdt_plic_context_exit(); -+ /* clear the external irq pending */ -+ csr_clear(CSR_MIP, MIP_MEIP); -+ csr_clear(CSR_MIP, MIP_SEIP); -+ -+ /* here we clear the sstimer pending if this core have */ -+ if (sbi_hart_has_extension(sbi_scratch_thishart_ptr(), SBI_HART_EXT_SSTC)) { -+ csr_write(CSR_STIMECMP, 0xffffffffffffffff); -+ } -+ -+ return 0; -+} -+ -+static void spacemit_pwr_domain_off(const psci_power_state_t *target_state) -+{ -+ unsigned int hartid = current_hartid(); -+ -+ if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+ /* disable the tcm */ -+ csr_write(CSR_TCMCFG, 0); -+#endif -+ cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(hartid)); -+ spacemit_cluster_off(hartid); -+ } -+ -+ if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -+ spacemit_top_off(hartid); -+ } -+ -+ spacemit_assert_cpu(hartid); -+ -+} -+ -+static void spacemit_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state) -+{ -+ while (1) { -+ asm volatile ("wfi"); -+ } -+} -+ -+static void spacemit_pwr_domain_on_finish_late(const psci_power_state_t *target_state) -+{ -+ spacemit_deassert_cpu(); -+} -+ -+static int _spacemit_validate_power_state(unsigned int power_state, -+ psci_power_state_t *req_state) -+{ -+ unsigned int pstate = psci_get_pstate_type(power_state); -+ unsigned int pwr_lvl = psci_get_pstate_pwrlvl(power_state); -+ unsigned int i; -+ -+ if (req_state == NULL) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ if (pwr_lvl > PLAT_MAX_PWR_LVL) -+ return PSCI_E_INVALID_PARAMS; -+ -+ /* Sanity check the requested state */ -+ if (pstate == PSTATE_TYPE_STANDBY) { -+ /* -+ * It's possible to enter standby only on power level 0 -+ * Ignore any other power level. -+ */ -+ if (pwr_lvl != ARM_PWR_LVL0) -+ return PSCI_E_INVALID_PARAMS; -+ -+ req_state->pwr_domain_state[ARM_PWR_LVL0] = -+ ARM_LOCAL_STATE_RET; -+ } else { -+ for (i = ARM_PWR_LVL0; i <= pwr_lvl; i++) -+ req_state->pwr_domain_state[i] = -+ ARM_LOCAL_STATE_OFF; -+ } -+ -+ /* -+ * We expect the 'state id' to be zero. -+ */ -+ if (psci_get_pstate_id(power_state) != 0U) -+ return PSCI_E_INVALID_PARAMS; -+ -+ return PSCI_E_SUCCESS; -+} -+ -+static int spacemit_validate_power_state(unsigned int power_state, -+ psci_power_state_t *req_state) -+{ -+ int rc; -+ -+ rc = _spacemit_validate_power_state(power_state, req_state); -+ -+ return rc; -+} -+ -+static void spacemit_pwr_domain_suspend(const psci_power_state_t *target_state) -+{ -+ unsigned int clusterid; -+ unsigned int hartid = current_hartid(); -+ -+ /* -+ * CSS currently supports retention only at cpu level. Just return -+ * as nothing is to be done for retention. -+ */ -+ if (CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET) -+ return; -+ -+ -+ if (CORE_PWR_STATE(target_state) != ARM_LOCAL_STATE_OFF) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* Cluster is to be turned off, so disable coherency */ -+ if (CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -+ clusterid = MPIDR_AFFLVL1_VAL(hartid); -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+ /* disable the tcm */ -+ csr_write(CSR_TCMCFG, 0); -+#endif -+ cci_disable_snoop_dvm_reqs(clusterid); -+ -+ spacemit_cluster_off(hartid); -+ } -+ -+ if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -+ /* D1P */ -+ spacemit_top_off(hartid); -+ } -+ -+ spacemit_assert_cpu(hartid); -+} -+ -+static void spacemit_pwr_domain_suspend_finish(const psci_power_state_t *target_state) -+{ -+ unsigned int clusterid; -+ unsigned int hartid = current_hartid(); -+ -+ /* Return as nothing is to be done on waking up from retention. */ -+ if (CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET) -+ return; -+ -+ if (CORE_PWR_STATE(target_state) != ARM_LOCAL_STATE_OFF) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* -+ * Perform the common cluster specific operations i.e enable coherency -+ * if this cluster was off. -+ */ -+ if (CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -+ clusterid = MPIDR_AFFLVL1_VAL(hartid); -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+ /* disable the tcm */ -+ csr_write(CSR_TCMCFG, 0); -+#endif -+ cci_enable_snoop_dvm_reqs(clusterid); -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+ /* enable the tcm */ -+ csr_write(CSR_TCMCFG, 1); -+#endif -+ spacemit_cluster_on(hartid); -+ } -+ -+ if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -+ /* D1P */ -+ spacemit_top_on(hartid); -+ } -+ -+ /* Do something */ -+ spacemit_deassert_cpu(); -+} -+ -+static void spacemit_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state) -+{ -+ csr_clear(CSR_MIE, MIP_SSIP | MIP_MSIP | MIP_STIP | MIP_MTIP | MIP_SEIP | MIP_MEIP); -+} -+ -+static const plat_psci_ops_t spacemit_psci_ops = { -+ .cpu_standby = NULL, -+ .pwr_domain_on = spacemit_pwr_domain_on, -+ .pwr_domain_on_finish = spacemit_pwr_domain_on_finish, -+ .pwr_domain_off_early = spacemit_pwr_domain_off_early, -+ .pwr_domain_off = spacemit_pwr_domain_off, -+ .pwr_domain_pwr_down_wfi = spacemit_pwr_domain_pwr_down_wfi, -+ .pwr_domain_on_finish_late = spacemit_pwr_domain_on_finish_late, -+ .validate_power_state = spacemit_validate_power_state, -+ .pwr_domain_suspend = spacemit_pwr_domain_suspend, -+ .pwr_domain_suspend_pwrdown_early = spacemit_pwr_domain_suspend_pwrdown_early, -+ .pwr_domain_suspend_finish = spacemit_pwr_domain_suspend_finish, -+}; -+ -+int plat_setup_psci_ops(uintptr_t sec_entrypoint, const plat_psci_ops_t **psci_ops) -+{ -+ *psci_ops = &spacemit_psci_ops; -+ -+ return 0; -+} -diff --git a/lib/utils/psci/spacemit/plat/underly_implement.h b/lib/utils/psci/spacemit/plat/underly_implement.h -new file mode 100644 -index 000000000000..dd6c972325bb ---- /dev/null -+++ b/lib/utils/psci/spacemit/plat/underly_implement.h -@@ -0,0 +1,14 @@ -+#ifndef __UNDERLY_IMPLEMENT__H__ -+#define __UNDERLY_IMPLEMENT__H__ -+ -+#include -+ -+void spacemit_top_on(u_register_t mpidr); -+void spacemit_top_off(u_register_t mpidr); -+void spacemit_cluster_on(u_register_t mpidr); -+void spacemit_cluster_off(u_register_t mpidr); -+void spacemit_wakeup_cpu(u_register_t mpidr); -+void spacemit_assert_cpu(u_register_t mpidr); -+void spacemit_deassert_cpu(void); -+ -+#endif -diff --git a/lib/utils/psci/spacemit/spacemit_topology.c b/lib/utils/psci/spacemit/spacemit_topology.c -new file mode 100644 -index 000000000000..de327d833377 ---- /dev/null -+++ b/lib/utils/psci/spacemit/spacemit_topology.c -@@ -0,0 +1,26 @@ -+#include -+ -+static unsigned char plat_power_domain_tree_desc[] = { -+ /* No of root nodes */ -+ 1, -+ /* Num of children for the root node */ -+ 0, -+ /* Num of children for the first cluster node */ -+ 0, -+ /* Num of children for the second cluster node */ -+ 0, -+}; -+ -+int plat_core_pos_by_mpidr(u_register_t mpidr) -+{ -+ unsigned int cluster = MPIDR_AFFLVL1_VAL(mpidr); -+ unsigned int core = MPIDR_AFFLVL0_VAL(mpidr); -+ -+ return (cluster == 0) ? core : -+ (plat_power_domain_tree_desc[2] + core); -+} -+ -+unsigned char *plat_get_power_domain_tree_desc(void) -+{ -+ return plat_power_domain_tree_desc; -+} -diff --git a/lib/utils/serial/uart8250.c b/lib/utils/serial/uart8250.c -index 99bf1bf733f2..87a6bc607cac 100644 ---- a/lib/utils/serial/uart8250.c -+++ b/lib/utils/serial/uart8250.c -@@ -131,6 +131,11 @@ int uart8250_init(unsigned long base, u32 in_freq, u32 baudrate, u32 reg_shift, - /* Set scratchpad */ - set_reg(UART_SCR_OFFSET, 0x00); - -+#ifdef CONFIG_PLATFORM_SPACEMIT_K1X -+ /* enable uart. */ -+ set_reg(UART_IER_OFFSET, 0x40); -+#endif -+ - sbi_console_set_device(&uart8250_console); - - return 0; -diff --git a/lib/utils/timer/aclint_mtimer.c b/lib/utils/timer/aclint_mtimer.c -index 13af5d8232d3..a9dbea2526a6 100644 ---- a/lib/utils/timer/aclint_mtimer.c -+++ b/lib/utils/timer/aclint_mtimer.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - - static unsigned long mtimer_ptr_offset; -@@ -183,6 +184,7 @@ int aclint_mtimer_cold_init(struct aclint_mtimer_data *mt, - u32 i; - int rc; - struct sbi_scratch *scratch; -+ const struct sbi_platform *sbi = sbi_platform_thishart_ptr(); - - /* Sanity checks */ - if (!mt || -@@ -218,7 +220,7 @@ int aclint_mtimer_cold_init(struct aclint_mtimer_data *mt, - - /* Update MTIMER pointer in scratch space */ - for (i = 0; i < mt->hart_count; i++) { -- scratch = sbi_hartid_to_scratch(mt->first_hartid + i); -+ scratch = sbi_hartid_to_scratch(sbi->hart_index2id[i]); - if (!scratch) - return SBI_ENOENT; - mtimer_set_hart_data_ptr(scratch, mt); -diff --git a/platform/generic/Kconfig b/platform/generic/Kconfig -index 72768edeb921..350e41e58325 100644 ---- a/platform/generic/Kconfig -+++ b/platform/generic/Kconfig -@@ -52,6 +52,42 @@ config PLATFORM_STARFIVE_JH7110 - bool "StarFive JH7110 support" - default n - -+config PLATFORM_SPACEMIT_K1PRO -+ bool "Spacemit K1pro support" -+ default n -+ -+if PLATFORM_SPACEMIT_K1PRO -+ config PLATFORM_SPACEMIT_K1PRO_FPGA -+ bool "Spacemit K1pro board fpga" -+ default n -+ -+ config PLATFORM_SPACEMIT_K1PRO_QEMU -+ bool "Spacemit K1pro board qemu" -+ default n -+ -+ config PLATFORM_SPACEMIT_K1PRO_SIM -+ bool "Spacemit K1pro board sim" -+ default n -+ -+ config PLATFORM_SPACEMIT_K1PRO_VERIFY -+ bool "Spacemit K1pro board verify" -+ default n -+endif -+ -+config PLATFORM_SPACEMIT_K1X -+ bool "Spacemit K1x support" -+ default n -+ -+if PLATFORM_SPACEMIT_K1X -+ config PLATFORM_SPACEMIT_K1X_FPGA -+ bool "Spacemit K1x board fpag" -+ default n -+ -+ config PLATFORM_SPACEMIT_K1X_EVB -+ bool "Spacemit K1x board evb" -+ default n -+endif -+ - source "$(OPENSBI_SRC_DIR)/platform/generic/andes/Kconfig" - - endif -diff --git a/platform/generic/configs/defconfig b/platform/generic/configs/defconfig -index ee0df38a22ee..6366769ef59a 100644 ---- a/platform/generic/configs/defconfig -+++ b/platform/generic/configs/defconfig -@@ -4,6 +4,7 @@ CONFIG_PLATFORM_RENESAS_RZFIVE=y - CONFIG_PLATFORM_SIFIVE_FU540=y - CONFIG_PLATFORM_SIFIVE_FU740=y - CONFIG_PLATFORM_STARFIVE_JH7110=y -+CONFIG_PLATFORM_SPACEMIT_K1PRO=y - CONFIG_FDT_GPIO=y - CONFIG_FDT_GPIO_SIFIVE=y - CONFIG_FDT_GPIO_STARFIVE=y -@@ -37,4 +38,4 @@ CONFIG_FDT_SERIAL_XILINX_UARTLITE=y - CONFIG_FDT_TIMER=y - CONFIG_FDT_TIMER_MTIMER=y - CONFIG_FDT_TIMER_PLMT=y --CONFIG_SERIAL_SEMIHOSTING=y -+CONFIG_SERIAL_SEMIHOSTING=n -diff --git a/platform/generic/configs/k1-x_fpga_1x4_defconfig b/platform/generic/configs/k1-x_fpga_1x4_defconfig -new file mode 100644 -index 000000000000..c9419d6336fc ---- /dev/null -+++ b/platform/generic/configs/k1-x_fpga_1x4_defconfig -@@ -0,0 +1,16 @@ -+CONFIG_PLATFORM_SPACEMIT_K1X=y -+CONFIG_PLATFORM_SPACEMIT_K1X_FPGA=y -+# CONFIG_SBI_ECALL_TIME is not set -+CONFIG_FDT_IPI=y -+CONFIG_FDT_IPI_MSWI=y -+CONFIG_FDT_IRQCHIP=y -+CONFIG_FDT_IRQCHIP_PLIC=y -+CONFIG_FDT_RESET=y -+CONFIG_FDT_RESET_HTIF=y -+CONFIG_FDT_RESET_SIFIVE_TEST=y -+CONFIG_FDT_RESET_SUNXI_WDT=y -+CONFIG_FDT_RESET_THEAD=y -+CONFIG_FDT_SERIAL=y -+CONFIG_FDT_SERIAL_UART8250=y -+CONFIG_ARM_PSCI_SUPPORT=y -+CONFIG_ARM_NON_SCMI_SUPPORT=y -diff --git a/platform/generic/configs/k1-x_fpga_2x2_defconfig b/platform/generic/configs/k1-x_fpga_2x2_defconfig -new file mode 100644 -index 000000000000..c9419d6336fc ---- /dev/null -+++ b/platform/generic/configs/k1-x_fpga_2x2_defconfig -@@ -0,0 +1,16 @@ -+CONFIG_PLATFORM_SPACEMIT_K1X=y -+CONFIG_PLATFORM_SPACEMIT_K1X_FPGA=y -+# CONFIG_SBI_ECALL_TIME is not set -+CONFIG_FDT_IPI=y -+CONFIG_FDT_IPI_MSWI=y -+CONFIG_FDT_IRQCHIP=y -+CONFIG_FDT_IRQCHIP_PLIC=y -+CONFIG_FDT_RESET=y -+CONFIG_FDT_RESET_HTIF=y -+CONFIG_FDT_RESET_SIFIVE_TEST=y -+CONFIG_FDT_RESET_SUNXI_WDT=y -+CONFIG_FDT_RESET_THEAD=y -+CONFIG_FDT_SERIAL=y -+CONFIG_FDT_SERIAL_UART8250=y -+CONFIG_ARM_PSCI_SUPPORT=y -+CONFIG_ARM_NON_SCMI_SUPPORT=y -diff --git a/platform/generic/configs/k1-x_fpga_defconfig b/platform/generic/configs/k1-x_fpga_defconfig -new file mode 100644 -index 000000000000..c9419d6336fc ---- /dev/null -+++ b/platform/generic/configs/k1-x_fpga_defconfig -@@ -0,0 +1,16 @@ -+CONFIG_PLATFORM_SPACEMIT_K1X=y -+CONFIG_PLATFORM_SPACEMIT_K1X_FPGA=y -+# CONFIG_SBI_ECALL_TIME is not set -+CONFIG_FDT_IPI=y -+CONFIG_FDT_IPI_MSWI=y -+CONFIG_FDT_IRQCHIP=y -+CONFIG_FDT_IRQCHIP_PLIC=y -+CONFIG_FDT_RESET=y -+CONFIG_FDT_RESET_HTIF=y -+CONFIG_FDT_RESET_SIFIVE_TEST=y -+CONFIG_FDT_RESET_SUNXI_WDT=y -+CONFIG_FDT_RESET_THEAD=y -+CONFIG_FDT_SERIAL=y -+CONFIG_FDT_SERIAL_UART8250=y -+CONFIG_ARM_PSCI_SUPPORT=y -+CONFIG_ARM_NON_SCMI_SUPPORT=y -diff --git a/platform/generic/configs/k1_defconfig b/platform/generic/configs/k1_defconfig -new file mode 100644 -index 000000000000..08f17036aa91 ---- /dev/null -+++ b/platform/generic/configs/k1_defconfig -@@ -0,0 +1,16 @@ -+CONFIG_PLATFORM_SPACEMIT_K1X=y -+CONFIG_PLATFORM_SPACEMIT_K1X_EVB=y -+# CONFIG_SBI_ECALL_TIME is not set -+CONFIG_FDT_IPI=y -+CONFIG_FDT_IPI_MSWI=y -+CONFIG_FDT_IRQCHIP=y -+CONFIG_FDT_IRQCHIP_PLIC=y -+CONFIG_FDT_RESET=y -+CONFIG_FDT_RESET_HTIF=y -+CONFIG_FDT_RESET_SIFIVE_TEST=y -+CONFIG_FDT_RESET_SUNXI_WDT=y -+CONFIG_FDT_RESET_THEAD=y -+CONFIG_FDT_SERIAL=y -+CONFIG_FDT_SERIAL_UART8250=y -+CONFIG_ARM_PSCI_SUPPORT=y -+CONFIG_ARM_NON_SCMI_SUPPORT=y -diff --git a/platform/generic/include/spacemit/k1x/core_common.h b/platform/generic/include/spacemit/k1x/core_common.h -new file mode 100644 -index 000000000000..4bee5e11b38b ---- /dev/null -+++ b/platform/generic/include/spacemit/k1x/core_common.h -@@ -0,0 +1,13 @@ -+#ifndef __K1X_CORE_COMMON_H__ -+#define __K1X_CORE_COMMON_H__ -+ -+ -+#define CSR_MHCR 0x7c1 -+#define CSR_MSETUP 0x7c0 -+#define CSR_MHINT 0x7c5 -+#define CSR_ML2SETUP 0x7F0 -+ -+#define CACHE_LINE_SIZE (64) -+#define CACHE_INV_ADDR_Msk (0xffffffffffffffff << 6) -+ -+#endif /* __K1X_CORE_COMMON_H__ */ -diff --git a/platform/generic/include/spacemit/k1x/k1x_evb.h b/platform/generic/include/spacemit/k1x/k1x_evb.h -new file mode 100644 -index 000000000000..b951105e0c04 ---- /dev/null -+++ b/platform/generic/include/spacemit/k1x/k1x_evb.h -@@ -0,0 +1,72 @@ -+#ifndef __K1X_EVB_CONFIG_H__ -+#define __K1X_EVB_CONFIG_H__ -+ -+/***************************cci******************************/ -+#define PLATFORM_CCI_ADDR (0xD8500000) -+ -+#define PLAT_CCI_CLUSTER0_IFACE_IX 0 -+#define PLAT_CCI_CLUSTER1_IFACE_IX 1 -+#define PLAT_CCI_CLUSTER2_IFACE_IX 2 -+#define PLAT_CCI_CLUSTER3_IFACE_IX 3 -+ -+#define PLAT_CCI_MAP static const int cci_map[] = { \ -+ PLAT_CCI_CLUSTER0_IFACE_IX, \ -+ PLAT_CCI_CLUSTER1_IFACE_IX, \ -+ PLAT_CCI_CLUSTER2_IFACE_IX, \ -+ PLAT_CCI_CLUSTER3_IFACE_IX, \ -+}; -+ -+/***************************cpu******************************/ -+#define CPU_RESET_BASE_ADDR (0xD428292C) -+#define C0_RVBADDR_LO_ADDR (0xD4282DB0) -+#define C0_RVBADDR_HI_ADDR (0xD4282DB4) -+ -+#define C1_RVBADDR_LO_ADDR (0xD4282C00 + 0x2B0) -+#define C1_RVBADDR_HI_ADDR (0xD4282C00 + 0X2B4) -+ -+/***************************mailbox***************************/ -+#define SCMI_MAILBOX_SHARE_MEM (0x2f902080) -+#define PLAT_MAILBOX_REG_BASE (0x2f824000) -+ -+/****************************scmi*****************************/ -+#define PLAT_SCMI_DOMAIN_MAP {0, 1, 2, 3} -+ -+/*************************cpu topology************************/ -+#define ARM_SYSTEM_COUNT (1U) -+/* this is the max cluster count of this platform */ -+#define PLATFORM_CLUSTER_COUNT (2U) -+/* this is the max core count of this platform */ -+#define PLATFORM_CORE_COUNT (8U) -+/* this is the max NUN CPU power domains */ -+#define PSCI_NUM_NON_CPU_PWR_DOMAINS (3U) -+/* this is the max cpu cores per cluster*/ -+#define PLATFORM_MAX_CPUS_PER_CLUSTER (4U) -+ -+#define CLUSTER_INDEX_IN_CPU_TOPOLOGY (1U) -+#define CLUSTER0_INDEX_IN_CPU_TOPOLOGY (2U) -+#define CLUSTER1_INDEX_IN_CPU_TOPOLOGY (3U) -+ -+#define PSCI_NUM_PWR_DOMAINS \ -+ (ARM_SYSTEM_COUNT + plat_get_power_domain_tree_desc()[CLUSTER_INDEX_IN_CPU_TOPOLOGY] \ -+ + plat_get_power_domain_tree_desc()[CLUSTER0_INDEX_IN_CPU_TOPOLOGY] + \ -+ plat_get_power_domain_tree_desc()[CLUSTER1_INDEX_IN_CPU_TOPOLOGY]) -+ -+/***************************psci pwr level********************/ -+/* This is the power level corresponding to a CPU */ -+#define PSCI_CPU_PWR_LVL 0U -+#define PLAT_MAX_PWR_LVL 2U -+ -+/***************************cpu affin*************************/ -+#define MPIDR_AFFINITY0_MASK 0x3U -+#define MPIDR_AFFINITY1_MASK 0xfU -+#define MPIDR_AFF0_SHIFT 0U -+#define MPIDR_AFF1_SHIFT 2U -+ -+/**************************cluster power domain***************/ -+#define CLUSTER0_L2_CACHE_FLUSH_REG_BASE (0xD84401B0) -+#define CLUSTER1_L2_CACHE_FLUSH_REG_BASE (0xD84401B4) -+ -+#define L2_CACHE_FLUSH_REQUEST_BIT_OFFSET (0x1) -+#define L2_CACHE_FLUSH_DONE_BIT_OFFSET (0x3) -+ -+#endif /* __K1X_EVB_CONFIG_H__ */ -diff --git a/platform/generic/include/spacemit/k1x/k1x_fpga.h b/platform/generic/include/spacemit/k1x/k1x_fpga.h -new file mode 100644 -index 000000000000..4748c86b69c2 ---- /dev/null -+++ b/platform/generic/include/spacemit/k1x/k1x_fpga.h -@@ -0,0 +1,73 @@ -+#ifndef __K1X_FPGA_CONFIG_H__ -+#define __K1X_FPGA_CONFIG_H__ -+ -+/***************************cci******************************/ -+#define PLATFORM_CCI_ADDR (0xD8500000) -+ -+#define PLAT_CCI_CLUSTER0_IFACE_IX 0 -+#define PLAT_CCI_CLUSTER1_IFACE_IX 1 -+#define PLAT_CCI_CLUSTER2_IFACE_IX 2 -+#define PLAT_CCI_CLUSTER3_IFACE_IX 3 -+ -+#define PLAT_CCI_MAP static const int cci_map[] = { \ -+ PLAT_CCI_CLUSTER0_IFACE_IX, \ -+ PLAT_CCI_CLUSTER1_IFACE_IX, \ -+ PLAT_CCI_CLUSTER2_IFACE_IX, \ -+ PLAT_CCI_CLUSTER3_IFACE_IX, \ -+}; -+ -+/***************************cpu******************************/ -+#define CPU_RESET_BASE_ADDR (0xD428292C) -+#define C0_RVBADDR_LO_ADDR (0xD4282DB0) -+#define C0_RVBADDR_HI_ADDR (0xD4282DB4) -+ -+#define C1_RVBADDR_LO_ADDR (0xD4282C00 + 0x2B0) -+#define C1_RVBADDR_HI_ADDR (0xD4282C00 + 0X2B4) -+ -+/***************************mailbox***************************/ -+#define SCMI_MAILBOX_SHARE_MEM (0x2f902080) -+#define PLAT_MAILBOX_REG_BASE (0x2f824000) -+ -+/****************************scmi*****************************/ -+#define PLAT_SCMI_SINGLE_CLUSTER_DOMAIN_MAP {0, 1, 2, 3} -+#define PLAT_SCMI_DOUBLE_CLUSTER_DOMAIN_MAP {0, 1, 4, 5} -+ -+/*************************cpu topology************************/ -+#define ARM_SYSTEM_COUNT (1U) -+/* this is the max cluster count of this platform */ -+#define PLATFORM_CLUSTER_COUNT (2U) -+/* this is the max core count of this platform */ -+#define PLATFORM_CORE_COUNT (8U) -+/* this is the max NUN CPU power domains */ -+#define PSCI_NUM_NON_CPU_PWR_DOMAINS (3U) -+/* this is the max cpu cores per cluster*/ -+#define PLATFORM_MAX_CPUS_PER_CLUSTER (4U) -+ -+#define CLUSTER_INDEX_IN_CPU_TOPOLOGY (1U) -+#define CLUSTER0_INDEX_IN_CPU_TOPOLOGY (2U) -+#define CLUSTER1_INDEX_IN_CPU_TOPOLOGY (3U) -+ -+#define PSCI_NUM_PWR_DOMAINS \ -+ (ARM_SYSTEM_COUNT + plat_get_power_domain_tree_desc()[CLUSTER_INDEX_IN_CPU_TOPOLOGY] \ -+ + plat_get_power_domain_tree_desc()[CLUSTER0_INDEX_IN_CPU_TOPOLOGY] + \ -+ plat_get_power_domain_tree_desc()[CLUSTER1_INDEX_IN_CPU_TOPOLOGY]) -+ -+/***************************psci pwr level********************/ -+/* This is the power level corresponding to a CPU */ -+#define PSCI_CPU_PWR_LVL 0U -+#define PLAT_MAX_PWR_LVL 2U -+ -+/***************************cpu affin*************************/ -+#define MPIDR_AFFINITY0_MASK 0x3U -+#define MPIDR_AFFINITY1_MASK 0xfU -+#define MPIDR_AFF0_SHIFT 0U -+#define MPIDR_AFF1_SHIFT 2U -+ -+/**************************cluster power domain***************/ -+#define CLUSTER0_L2_CACHE_FLUSH_REG_BASE (0xD84401B0) -+#define CLUSTER1_L2_CACHE_FLUSH_REG_BASE (0xD84401B4) -+ -+#define L2_CACHE_FLUSH_REQUEST_BIT_OFFSET (0x1) -+#define L2_CACHE_FLUSH_DONE_BIT_OFFSET (0x3) -+ -+#endif /* __K1X_FPGA_CONFIG_H__ */ -diff --git a/platform/generic/include/spacemit/spacemit_config.h b/platform/generic/include/spacemit/spacemit_config.h -new file mode 100644 -index 000000000000..d48869cc2ae5 ---- /dev/null -+++ b/platform/generic/include/spacemit/spacemit_config.h -@@ -0,0 +1,30 @@ -+#ifndef __SPACEMIT_CONFIG_H__ -+#define __SPACEMIT_CONFIG_H__ -+ -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1PRO) -+#include "./k1pro/core_common.h" -+ -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1PRO_FPGA) -+#include "./k1pro/k1pro_fpga.h" -+#elif defined(CONFIG_PLATFORM_SPACEMIT_K1PRO_QEMU) -+#include "./k1pro/k1pro_qemu.h" -+#elif defined(CONFIG_PLATFORM_SPACEMIT_K1PRO_SIM) -+#include "./k1pro/k1pro_sim.h" -+#elif defined(CONFIG_PLATFORM_SPACEMIT_K1PRO_VERIFY) -+#include "./k1pro/k1pro_verify.h" -+#endif -+ -+#endif -+ -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+#include "./k1x/core_common.h" -+ -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1X_FPGA) -+#include "./k1x/k1x_fpga.h" -+#elif defined(CONFIG_PLATFORM_SPACEMIT_K1X_EVB) -+#include "./k1x/k1x_evb.h" -+#endif -+ -+#endif -+ -+#endif /* __SPACEMIT_CONFIG_H__ */ -diff --git a/platform/generic/objects.mk b/platform/generic/objects.mk -index 136853eeb1d8..f3418efeb011 100644 ---- a/platform/generic/objects.mk -+++ b/platform/generic/objects.mk -@@ -22,7 +22,7 @@ platform-objs-y += platform.o - platform-objs-y += platform_override_modules.o - - # Blobs to build --FW_TEXT_START=0x80000000 -+FW_TEXT_START?=0x80000000 - FW_DYNAMIC=y - FW_JUMP=y - ifeq ($(PLATFORM_RISCV_XLEN), 32) -diff --git a/platform/generic/spacemit/fw_dynamic.its b/platform/generic/spacemit/fw_dynamic.its -new file mode 100755 -index 000000000000..f1159d4cfe0a ---- /dev/null -+++ b/platform/generic/spacemit/fw_dynamic.its -@@ -0,0 +1,31 @@ -+/dts-v1/; -+ -+/ { -+ description = "Configuration to load OpenSBI before U-Boot"; -+ #address-cells = <2>; -+ fit,fdt-list = "of-list"; -+ -+ images { -+ opensbi { -+ description = "OpenSBI fw_dynamic Firmware"; -+ type = "firmware"; -+ os = "opensbi"; -+ arch = "riscv"; -+ compression = "none"; -+ load = <0x0 0x0>; -+ entry = <0x0 0x0>; -+ data = /incbin/("./fw_dynamic.bin"); -+ hash-1 { -+ algo = "crc32"; -+ }; -+ }; -+ }; -+ configurations { -+ default = "config_1"; -+ -+ config_1 { -+ description = "opensbi FIT config"; -+ firmware = "opensbi"; -+ }; -+ }; -+}; -diff --git a/platform/generic/spacemit/objects.mk b/platform/generic/spacemit/objects.mk -new file mode 100644 -index 000000000000..92ef7eb26c92 ---- /dev/null -+++ b/platform/generic/spacemit/objects.mk -@@ -0,0 +1,7 @@ -+# -+# SPDX-License-Identifier: BSD-2-Clause -+# -+ -+carray-platform_override_modules-$(CONFIG_PLATFORM_SPACEMIT_K1PRO)$(CONFIG_PLATFORM_SPACEMIT_K1X) += spacemit_k1 -+platform-objs-$(CONFIG_PLATFORM_SPACEMIT_K1PRO)$(CONFIG_PLATFORM_SPACEMIT_K1X) += spacemit/spacemit_k1.o -+firmware-its-$(CONFIG_PLATFORM_SPACEMIT_K1PRO)$(CONFIG_PLATFORM_SPACEMIT_K1X) += spacemit/fw_dynamic.its -diff --git a/platform/generic/spacemit/spacemit_k1.c b/platform/generic/spacemit/spacemit_k1.c -new file mode 100644 -index 000000000000..8664e05e7910 ---- /dev/null -+++ b/platform/generic/spacemit/spacemit_k1.c -@@ -0,0 +1,194 @@ -+/* -+ * SPDX-License-Identifier: BSD-2-Clause -+ * -+ * Copyright (c) 2022 Spacemit. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include <../../../lib/utils/psci/psci_private.h> -+#include -+#include -+#include -+ -+extern struct sbi_platform platform; -+ -+PLAT_CCI_MAP -+ -+static void wakeup_other_core(void) -+{ -+ int i; -+ u32 hartid, clusterid, cluster_enabled = 0; -+ unsigned int cur_hartid = current_hartid(); -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(cur_hartid); -+ -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+ /* set other cpu's boot-entry */ -+ writel(scratch->warmboot_addr & 0xffffffff, (u32 *)C0_RVBADDR_LO_ADDR); -+ writel((scratch->warmboot_addr >> 32) & 0xffffffff, (u32 *)C0_RVBADDR_HI_ADDR); -+ -+ writel(scratch->warmboot_addr & 0xffffffff, (u32 *)C1_RVBADDR_LO_ADDR); -+ writel((scratch->warmboot_addr >> 32) & 0xffffffff, (u32 *)C1_RVBADDR_HI_ADDR); -+#elif defined(CONFIG_PLATFORM_SPACEMIT_K1PRO) -+ for (i = 0; i < platform.hart_count; i++) { -+ hartid = platform.hart_index2id[i]; -+ -+ unsigned long core_index = MPIDR_AFFLVL1_VAL(hartid) * PLATFORM_MAX_CPUS_PER_CLUSTER -+ + MPIDR_AFFLVL0_VAL(hartid); -+ -+ writel(scratch->warmboot_addr & 0xffffffff, (u32 *)(CORE0_RVBADDR_LO_ADDR + core_index * CORE_RVBADDR_STEP)); -+ writel((scratch->warmboot_addr >> 32) & 0xffffffff, (u32 *)(CORE0_RVBADDR_HI_ADDR + core_index * CORE_RVBADDR_STEP)); -+ } -+#endif -+ -+#ifdef CONFIG_ARM_PSCI_SUPPORT -+ unsigned char *cpu_topology = plat_get_power_domain_tree_desc(); -+#endif -+ -+ // hart0 is already boot up -+ for (i = 0; i < platform.hart_count; i++) { -+ hartid = platform.hart_index2id[i]; -+ -+ clusterid = MPIDR_AFFLVL1_VAL(hartid); -+ -+ /* we only enable snoop of cluster0 */ -+ if (0 == (cluster_enabled & (1 << clusterid))) { -+ cluster_enabled |= 1 << clusterid; -+ if (0 == clusterid) { -+ cci_enable_snoop_dvm_reqs(clusterid); -+ } -+#ifdef CONFIG_ARM_PSCI_SUPPORT -+ cpu_topology[CLUSTER_INDEX_IN_CPU_TOPOLOGY]++; -+#endif -+ } -+ -+#ifdef CONFIG_ARM_PSCI_SUPPORT -+ /* we only support 2 cluster by now */ -+ if (clusterid == PLATFORM_CLUSTER_COUNT - 1) -+ cpu_topology[CLUSTER1_INDEX_IN_CPU_TOPOLOGY]++; -+ else -+ cpu_topology[CLUSTER0_INDEX_IN_CPU_TOPOLOGY]++; -+#endif -+ } -+} -+ -+/* -+ * Platform early initialization. -+ */ -+static int spacemit_k1_early_init(bool cold_boot, const struct fdt_match *match) -+{ -+ if (cold_boot) { -+ /* initiate cci */ -+ cci_init(PLATFORM_CCI_ADDR, cci_map, array_size(cci_map)); -+ /* enable dcache */ -+ csi_enable_dcache(); -+ /* wakeup other core ? */ -+ wakeup_other_core(); -+ /* initialize */ -+#ifdef CONFIG_ARM_SCMI_PROTOCOL_SUPPORT -+ plat_arm_pwrc_setup(); -+#endif -+ } else { -+#ifdef CONFIG_ARM_PSCI_SUPPORT -+ psci_warmboot_entrypoint(); -+#endif -+ } -+ -+ return 0; -+} -+ -+#ifdef CONFIG_ARM_PSCI_SUPPORT -+/** Start (or power-up) the given hart */ -+static int spacemit_hart_start(unsigned int hartid, unsigned long saddr) -+{ -+ return psci_cpu_on_start(hartid, saddr); -+} -+ -+/** -+ * Stop (or power-down) the current hart from running. This call -+ * doesn't expect to return if success. -+ */ -+static int spacemit_hart_stop(void) -+{ -+ psci_cpu_off(); -+ -+ return 0; -+} -+ -+static int spacemit_hart_suspend(unsigned int suspend_type) -+{ -+ psci_cpu_suspend(suspend_type, 0, 0); -+ -+ return 0; -+} -+ -+static void spacemit_hart_resume(void) -+{ -+ psci_warmboot_entrypoint(); -+} -+ -+static const struct sbi_hsm_device spacemit_hsm_ops = { -+ .name = "spacemit-hsm", -+ .hart_start = spacemit_hart_start, -+ .hart_stop = spacemit_hart_stop, -+ .hart_suspend = spacemit_hart_suspend, -+ .hart_resume = spacemit_hart_resume, -+}; -+#endif -+ -+/* -+ * Platform final initialization. -+ */ -+static int spacemit_k1_final_init(bool cold_boot, const struct fdt_match *match) -+{ -+#ifdef CONFIG_ARM_PSCI_SUPPORT -+ /* for clod boot, we build the cpu topology structure */ -+ if (cold_boot) { -+ sbi_hsm_set_device(&spacemit_hsm_ops); -+ return psci_setup(); -+ } -+#endif -+ -+ return 0; -+} -+ -+static bool spacemit_cold_boot_allowed(u32 hartid, const struct fdt_match *match) -+{ -+ /* enable core snoop */ -+ csr_set(CSR_ML2SETUP, 1 << (hartid % PLATFORM_MAX_CPUS_PER_CLUSTER)); -+ -+ /* dealing with resuming process */ -+ if ((__sbi_hsm_hart_get_state(hartid) == SBI_HSM_STATE_SUSPENDED) && (hartid == 0)) -+ return false; -+ -+ return ((hartid == 0) ? true : false); -+} -+ -+static const struct fdt_match spacemit_k1_match[] = { -+ { .compatible = "spacemit,k1-pro" }, -+ { .compatible = "spacemit,k1x" }, -+ { }, -+}; -+ -+const struct platform_override spacemit_k1 = { -+ .match_table = spacemit_k1_match, -+ .early_init = spacemit_k1_early_init, -+ .final_init = spacemit_k1_final_init, -+ .cold_boot_allowed = spacemit_cold_boot_allowed, -+}; --- -2.35.3 - diff --git a/patch/atf/atf-spacemit/002-Update-for-v1.0beta3.1.patch b/patch/atf/atf-spacemit/002-Update-for-v1.0beta3.1.patch deleted file mode 100644 index 7818e0ef9264..000000000000 --- a/patch/atf/atf-spacemit/002-Update-for-v1.0beta3.1.patch +++ /dev/null @@ -1,1201 +0,0 @@ -From 7aad08218f25d4eed674bc172995f38291ebfb5e Mon Sep 17 00:00:00 2001 -From: James Deng -Date: Mon, 15 Apr 2024 11:42:57 +0800 -Subject: Update for v1.0beta3.1 - ---- - debian/.gitignore | 1 + - debian/README.source | 7 + - debian/bin/git-snapshot | 18 ++ - debian/control | 24 +++ - debian/copyright | 177 ++++++++++++++++++ - debian/opensbi-spacemit.docs | 2 + - debian/opensbi-spacemit.install | 1 + - debian/opensbi-spacemit.lintian-overrides | 9 + - debian/opensbi-spacemit.postinst | 45 +++++ - debian/rules | 27 +++ - debian/source/format | 1 + - debian/upstream/metadata | 5 + - debian/watch | 3 + - include/sbi/riscv_encoding.h | 1 + - include/sbi_utils/irqchip/fdt_irqchip_plic.h | 2 - - lib/sbi/sbi_console.c | 7 + - lib/sbi/sbi_hart.c | 6 + - lib/utils/Kconfig | 7 + - lib/utils/arm_scmi/css/common/css_pm.c | 3 +- - lib/utils/irqchip/fdt_irqchip_plic.c | 5 - - lib/utils/psci/psci_common.c | 52 +++++ - lib/utils/psci/psci_main.c | 82 ++++++-- - lib/utils/psci/psci_private.h | 4 + - .../spacemit/plat/k1x/underly_implement.c | 61 +++++- - lib/utils/psci/spacemit/plat/plat_pm.c | 25 ++- - platform/generic/spacemit/spacemit_k1.c | 175 +++++++++++------ - 26 files changed, 659 insertions(+), 91 deletions(-) - create mode 100644 debian/.gitignore - create mode 100644 debian/README.source - create mode 100755 debian/bin/git-snapshot - create mode 100644 debian/control - create mode 100644 debian/copyright - create mode 100644 debian/opensbi-spacemit.docs - create mode 100644 debian/opensbi-spacemit.install - create mode 100644 debian/opensbi-spacemit.lintian-overrides - create mode 100755 debian/opensbi-spacemit.postinst - create mode 100755 debian/rules - create mode 100644 debian/source/format - create mode 100644 debian/upstream/metadata - create mode 100644 debian/watch - -diff --git a/debian/.gitignore b/debian/.gitignore -new file mode 100644 -index 000000000000..6d10dce740f7 ---- /dev/null -+++ b/debian/.gitignore -@@ -0,0 +1 @@ -+changelog -diff --git a/debian/README.source b/debian/README.source -new file mode 100644 -index 000000000000..bd33e1488fcf ---- /dev/null -+++ b/debian/README.source -@@ -0,0 +1,7 @@ -+Upstream git snapshots are produced with: -+ -+ ./debian/bin/git-snapshot COMMIT -+ -+Which produces an upstream version based on "git describe" output. -+ -+ -- Vagrant Cascadian , Thu, 30 May 2019 15:02:49 -0700 -diff --git a/debian/bin/git-snapshot b/debian/bin/git-snapshot -new file mode 100755 -index 000000000000..6db5eb241d78 ---- /dev/null -+++ b/debian/bin/git-snapshot -@@ -0,0 +1,18 @@ -+#!/bin/sh -+ -+set -e -+ -+commit="$1" -+test -z "$commit" && echo "invalid commit" && exit 1 -+package=opensbi -+archive=tar.gz -+version=$(git describe "$commit" | sed -e 's,-,+,' -e 's,-g,.,' -e 's,^v,,g') -+output=../${package}_${version}.orig.${archive} -+test -f "${output}" && echo "already present: ${output}" && exit 1 -+ -+git archive \ -+ --format=${archive} \ -+ --prefix=${package}-${version}/ \ -+ --output=${output} \ -+ ${commit} && \ -+ echo "successfully created: ${output}" -diff --git a/debian/control b/debian/control -new file mode 100644 -index 000000000000..6c4a1747b5f3 ---- /dev/null -+++ b/debian/control -@@ -0,0 +1,24 @@ -+Source: opensbi-spacemit -+Section: misc -+Priority: optional -+Maintainer: Vagrant Cascadian -+Uploaders: Karsten Merker -+Build-Depends: debhelper-compat (=13), -+ python3, -+ u-boot-tools, -+Standards-Version: 4.6.2 -+Rules-Requires-Root: no -+Vcs-Browser: https://salsa.debian.org/opensbi-team/opensbi -+Vcs-Git: https://salsa.debian.org/opensbi-team/opensbi.git -+Homepage: https://github.com/riscv-software-src/opensbi -+ -+Package: opensbi-spacemit -+Architecture: all -+Multi-Arch: foreign -+Depends: ${misc:Depends}, ${shlibs:Depends} -+Description: RISC-V Open Source Supervisor Binary Interface -+ An open-source reference implementation of the RISC-V SBI -+ specifications for platform-specific firmwares executing in M-mode. -+ . -+ The following firmware platforms are provided: -+ generic -diff --git a/debian/copyright b/debian/copyright -new file mode 100644 -index 000000000000..cfbcf75e0649 ---- /dev/null -+++ b/debian/copyright -@@ -0,0 +1,177 @@ -+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -+Upstream-Name: opensbi -+Source: https://github.com/riscv-software-src/opensbi -+ -+Files: * -+Copyright: 2019-2020 Western Digital Corporation or its affiliates and -+ other contributors. -+ 2019-2022 Western Digital Corporation or its affiliates. -+ 2021 Christoph Müllner -+ 2021 YADRO -+ 2021 Cobham Gaisler AB. -+ 2021 Gabriel Somlo -+ 2021-2022 SiFive -+ 2021-2022 Samuel Holland -+ 2022 Ventana Micro Systems Inc. -+ 2022 Andes Technology Corporation -+ 2022 StarFive Technology Co., Ltd. -+ 2022 Renesas Electronics Corporation -+ 2023 RISC-V International -+License: BSD-2-clause -+ -+Files: include/sbi_utils/sys/htif.h -+ lib/utils/sys/htif.c -+Copyright: 2010-2020, The Regents of the University of California -+License: BSD-3-clause -+ -+Files: debian/* -+Copyright: 2019-2022 Vagrant Cascadian -+License: BSD-2-clause -+ -+Files: platform/generic/renesas/rzfive/rzfive.c -+Copyright: 2022 Renesas Electronics Corp. -+License: GPL-2 -+ -+Files: platform/fpga/ariane/* -+Copyright: 2019 FORTH-ICS/CARV -+License: BSD-2-clause -+ -+Files: lib/utils/libfdt/* -+Copyright: 2006-2012 David Gibson, IBM Corporation. -+ 2012 Kim Phillips, Freescale Semiconductor. -+ 2014 David Gibson -+ 2016 Free Electrons -+ 2016 NextThing Co. -+ 2018 embedded brains GmbH -+License: BSD-2-clause or GPL-2+ -+ -+Files: lib/utils/libfdt/objects.mk -+Copyright: 2019 Western Digital Corporation or its affiliates. -+License: BSD-2-clause -+ -+Files: -+ lib/utils/libquad/divdi3.c -+ lib/utils/libquad/moddi3.c -+ lib/utils/libquad/qdivrem.c -+ lib/utils/libquad/quad.h -+ lib/utils/libquad/udivdi3.c -+ lib/utils/libquad/umoddi3.c -+Copyright: -+ 1992, 1993 The Regents of the University of California. -+License: BSD-3-clause -+ -+Files: -+ lib/utils/libquad/include/limits.h -+ lib/utils/libquad/include/sys/cdefs.h -+ lib/utils/libquad/include/sys/types.h -+ lib/utils/libquad/objects.mk -+Copyright: 2021 Jessica Clarke -+License: BSD-2-clause -+ -+Files: -+ include/sbi_utils/fdt/* -+ lib/utils/fdt/* -+Copyright: -+ 2020 Bin Meng -+ 2021 Western Digital Corporation or its affiliates. -+License: BSD-2-clause -+ -+Files: scripts/Kconfiglib/* -+Copyright: 2011-2019, Ulf Magnusson -+License: ISC -+ -+License: BSD-2-clause -+ Redistribution and use in source and binary forms, with or without -+ modification, are permitted provided that the following conditions are met: -+ . -+ 1. Redistributions of source code must retain the above copyright notice, this -+ list of conditions and the following disclaimer. -+ 2. Redistributions in binary form must reproduce the above copyright notice, -+ this list of conditions and the following disclaimer in the documentation -+ and/or other materials provided with the distribution. -+ . -+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+License: BSD-3-clause -+ Redistribution and use in source and binary forms, with or without -+ modification, are permitted provided that the following conditions -+ are met: -+ . -+ 1. Redistributions of source code must retain the above copyright -+ notice, this list of conditions and the following disclaimer. -+ . -+ 2. Redistributions in binary form must reproduce the above copyright -+ notice, this list of conditions and the following disclaimer in the -+ documentation and/or other materials provided with the distribution. -+ . -+ 3. Neither the name of the copyright holder nor the names of its -+ contributors may be used to endorse or promote products derived from -+ this software without specific prior written permission. -+ . -+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -+ BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS -+ OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED -+ AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY -+ WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ POSSIBILITY OF SUCH DAMAGE. -+ -+License: GPL-2+ -+ This library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU General Public License as -+ published by the Free Software Foundation; either version 2 of the -+ License, or (at your option) any later version. -+ . -+ This library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ GNU General Public License for more details. -+ . -+ You should have received a copy of the GNU General Public -+ License along with this library; if not, write to the Free -+ Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, -+ MA 02110-1301 USA -+ . -+ On Debian systems, the complete text of the GNU General Public -+ License Version 2.0 can be found in -+ `/usr/share/common-licenses/GPL-2'. -+ -+License: GPL-2 -+ This library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU General Public License as -+ published by the Free Software Foundation; version 2 of the -+ License. -+ . -+ This library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ GNU General Public License for more details. -+ . -+ You should have received a copy of the GNU General Public -+ License along with this library; if not, write to the Free -+ Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, -+ MA 02110-1301 USA -+ . -+ On Debian systems, the complete text of the GNU General Public -+ License Version 2.0 can be found in -+ `/usr/share/common-licenses/GPL-2'. -+ -+License: ISC -+ Permission to use, copy, modify, and/or distribute this software for -+ any purpose with or without fee is hereby granted, provided that the -+ above copyright notice and this permission notice appear in all -+ copies. -diff --git a/debian/opensbi-spacemit.docs b/debian/opensbi-spacemit.docs -new file mode 100644 -index 000000000000..85f6f20d235f ---- /dev/null -+++ b/debian/opensbi-spacemit.docs -@@ -0,0 +1,2 @@ -+docs/ -+CONTRIBUTORS.md -diff --git a/debian/opensbi-spacemit.install b/debian/opensbi-spacemit.install -new file mode 100644 -index 000000000000..d1897d557a11 ---- /dev/null -+++ b/debian/opensbi-spacemit.install -@@ -0,0 +1 @@ -+build/platform/generic/firmware/fw_*.itb /usr/lib/riscv64-linux-gnu/opensbi/generic/ -diff --git a/debian/opensbi-spacemit.lintian-overrides b/debian/opensbi-spacemit.lintian-overrides -new file mode 100644 -index 000000000000..8b6168d15e45 ---- /dev/null -+++ b/debian/opensbi-spacemit.lintian-overrides -@@ -0,0 +1,9 @@ -+# These are binary firmware for use with qemu. -+opensbi binary: arch-independent-package-contains-binary-or-object *usr/lib/*/opensbi/*/fw_*.elf* -+ -+# Needs to be statically linked. -+opensbi binary: statically-linked-binary *usr/lib/*/opensbi/*/fw_*.elf* -+ -+# Binary firmwares being installed into multi-arch directory for -+# future-proofing if riscv32 becomes a thing. -+opensbi binary: triplet-dir-and-architecture-mismatch is for riscv64 instead of all *usr/lib/riscv64-linux-gnu/* -diff --git a/debian/opensbi-spacemit.postinst b/debian/opensbi-spacemit.postinst -new file mode 100755 -index 000000000000..1f6feca80674 ---- /dev/null -+++ b/debian/opensbi-spacemit.postinst -@@ -0,0 +1,45 @@ -+#!/bin/sh -+set -e -+ -+case "$1" in -+configure) -+ target="" -+ if grep -q '^spacemit' /sys/firmware/devicetree/base/model; then -+ target="spacemit" -+ else -+ exit 0 -+ fi -+ -+ for x in $(cat /proc/cmdline); do -+ case $x in -+ root=*) -+ ROOT=${x#root=} -+ ;; -+ esac -+ done -+ -+ if [ -n $ROOT ]; then -+ case $ROOT in -+ "/dev/mmcblk0"*) -+ OPENSBI=/dev/mmcblk0p3 -+ ;; -+ "/dev/mmcblk2"*) -+ OPENSBI=/dev/mmcblk2p3 -+ ;; -+ *) -+ echo "Unsupported root=$ROOT" -+ exit 0 -+ ;; -+ esac -+ else -+ echo "Missing root= in cmdline" -+ exit 0 -+ fi -+ -+ if [ -n "$target" ] && [ -e $OPENSBI ]; then -+ dd if=/usr/lib/riscv64-linux-gnu/opensbi/generic/fw_dynamic.itb of=$OPENSBI bs=1 && sync -+ fi -+ ;; -+esac -+ -+exit 0 -diff --git a/debian/rules b/debian/rules -new file mode 100755 -index 000000000000..ab9cc10c406c ---- /dev/null -+++ b/debian/rules -@@ -0,0 +1,27 @@ -+#!/usr/bin/make -f -+# Always set CROSS_COMPILE, which also works for native builds. -+export CROSS_COMPILE=riscv64-unknown-linux-gnu- -+export ARCH=riscv -+ -+# Enable verbose build by default, disable when terse is specified. -+ifeq (,$(filter terse,$(DEB_BUILD_OPTIONS))) -+VERBOSE=1 -+else -+VERBOSE=0 -+endif -+ -+%: -+ dh $@ -+ -+override_dh_auto_build: -+ make \ -+ V=$(VERBOSE) \ -+ PLATFORM_DEFCONFIG=k1_defconfig \ -+ PLATFORM=generic ; \ -+ -+override_dh_installdocs: -+ dh_installdocs --exclude=doxygen.cfg -+ -+override_dh_install: -+ chmod -x build/platform/generic/firmware/fw_*.bin -+ dh_install -diff --git a/debian/source/format b/debian/source/format -new file mode 100644 -index 000000000000..163aaf8d82b6 ---- /dev/null -+++ b/debian/source/format -@@ -0,0 +1 @@ -+3.0 (quilt) -diff --git a/debian/upstream/metadata b/debian/upstream/metadata -new file mode 100644 -index 000000000000..f716ba77edcd ---- /dev/null -+++ b/debian/upstream/metadata -@@ -0,0 +1,5 @@ -+--- -+Bug-Database: https://github.com/riscv-software-src/opensbi/issues -+Bug-Submit: https://github.com/riscv-software-src/opensbi/issues/new -+Repository: https://github.com/riscv-software-src/opensbi.git -+Repository-Browse: https://github.com/riscv-software-src/opensbi -diff --git a/debian/watch b/debian/watch -new file mode 100644 -index 000000000000..508d476a75c0 ---- /dev/null -+++ b/debian/watch -@@ -0,0 +1,3 @@ -+version=4 -+opts=filenamemangle=s/\/(.*)v/@PACKAGE@-/ \ -+ https://github.com/riscv-software-src/@PACKAGE@/tags .*/v@ANY_VERSION@.tar.gz -diff --git a/include/sbi/riscv_encoding.h b/include/sbi/riscv_encoding.h -index 54e09d44528b..5abb8e4c776b 100644 ---- a/include/sbi/riscv_encoding.h -+++ b/include/sbi/riscv_encoding.h -@@ -709,6 +709,7 @@ - #define CSR_MIPH 0x354 - - #define CSR_TCMCFG 0x5DB -+#define CSR_FEATURECTL 0xbf9 - - /* ===== Trap/Exception Causes ===== */ - -diff --git a/include/sbi_utils/irqchip/fdt_irqchip_plic.h b/include/sbi_utils/irqchip/fdt_irqchip_plic.h -index b892b0bc70f8..df645dd00ee3 100644 ---- a/include/sbi_utils/irqchip/fdt_irqchip_plic.h -+++ b/include/sbi_utils/irqchip/fdt_irqchip_plic.h -@@ -28,8 +28,6 @@ void fdt_plic_context_save(bool smode, u32 *enable, u32 *threshold, u32 num); - void fdt_plic_context_restore(bool smode, const u32 *enable, u32 threshold, - u32 num); - --void fdt_plic_context_exit(void); -- - void thead_plic_restore(void); - - #endif -diff --git a/lib/sbi/sbi_console.c b/lib/sbi/sbi_console.c -index 168dffd06429..9d917ec78927 100644 ---- a/lib/sbi/sbi_console.c -+++ b/lib/sbi/sbi_console.c -@@ -422,6 +422,7 @@ int sbi_snprintf(char *out, u32 out_sz, const char *format, ...) - return retval; - } - -+#ifdef CONFIG_ENABLE_LOGGING - int sbi_printf(const char *format, ...) - { - va_list args; -@@ -435,6 +436,12 @@ int sbi_printf(const char *format, ...) - - return retval; - } -+#else -+int sbi_printf(const char *format, ...) -+{ -+ return 0; -+} -+#endif - - int sbi_dprintf(const char *format, ...) - { -diff --git a/lib/sbi/sbi_hart.c b/lib/sbi/sbi_hart.c -index 3a3265df7f20..a3f752d27105 100644 ---- a/lib/sbi/sbi_hart.c -+++ b/lib/sbi/sbi_hart.c -@@ -819,6 +819,12 @@ sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1, - } - - csr_write(CSR_TCMCFG, 1); -+ /* -+ * update 0xfb9 csr: -+ * bit9: for emprove fence operation -+ * bit23 for disable vector load/store dual-issue -+ */ -+ csr_set(CSR_FEATURECTL, (1<<9)|(1<<23)); - - register unsigned long a0 asm("a0") = arg0; - register unsigned long a1 asm("a1") = arg1; -diff --git a/lib/utils/Kconfig b/lib/utils/Kconfig -index 3ac04ab1ab4f..dab9bf956598 100644 ---- a/lib/utils/Kconfig -+++ b/lib/utils/Kconfig -@@ -24,4 +24,11 @@ source "$(OPENSBI_SRC_DIR)/lib/utils/timer/Kconfig" - - source "$(OPENSBI_SRC_DIR)/lib/utils/psci/Kconfig" - -+config ENABLE_LOGGING -+ bool "Enable Logging" -+ default n -+ help -+ Enables or disables logging throughout the system. -+ Enable this option to allow the system to print log messages. -+ - endmenu -diff --git a/lib/utils/arm_scmi/css/common/css_pm.c b/lib/utils/arm_scmi/css/common/css_pm.c -index 8d17b6be442f..83908c747d8d 100644 ---- a/lib/utils/arm_scmi/css/common/css_pm.c -+++ b/lib/utils/arm_scmi/css/common/css_pm.c -@@ -114,8 +114,7 @@ static void css_power_down_common(const psci_power_state_t *target_state) - static int css_pwr_domain_off_early(const psci_power_state_t *target_state) - { - /* the ipi's pending is cleared before */ -- /* disable the plic irq */ -- fdt_plic_context_exit(); -+ csr_clear(CSR_MIE, MIP_SSIP | MIP_MSIP | MIP_STIP | MIP_MTIP | MIP_SEIP | MIP_MEIP); - /* clear the external irq pending */ - csr_clear(CSR_MIP, MIP_MEIP); - csr_clear(CSR_MIP, MIP_SEIP); -diff --git a/lib/utils/irqchip/fdt_irqchip_plic.c b/lib/utils/irqchip/fdt_irqchip_plic.c -index 0a2d61b0beca..829c5ee20341 100644 ---- a/lib/utils/irqchip/fdt_irqchip_plic.c -+++ b/lib/utils/irqchip/fdt_irqchip_plic.c -@@ -85,11 +85,6 @@ static int irqchip_plic_warm_init(void) - plic_get_hart_scontext(scratch)); - } - --void fdt_plic_context_exit(void) --{ -- irqchip_plic_warm_init(); --} -- - static int irqchip_plic_update_hartid_table(void *fdt, int nodeoff, - struct plic_data *pd) - { -diff --git a/lib/utils/psci/psci_common.c b/lib/utils/psci/psci_common.c -index f4b4bee03ec4..0a8ebd1319fd 100644 ---- a/lib/utils/psci/psci_common.c -+++ b/lib/utils/psci/psci_common.c -@@ -870,3 +870,55 @@ void riscv_pwr_state_to_psci(unsigned int rstate, unsigned int *pstate) - if (rstate & (PSTATE_PWR_LVL_MASK << RSTATE_PWR_LVL_SHIFT)) - *pstate |= (rstate & (PSTATE_PWR_LVL_MASK << RSTATE_PWR_LVL_SHIFT)); - } -+ -+/******************************************************************************* -+ * This function verifies that all the other cores in the system have been -+ * turned OFF and the current CPU is the last running CPU in the system. -+ * Returns true, if the current CPU is the last ON CPU or false otherwise. -+ ******************************************************************************/ -+bool psci_is_last_on_cpu(void) -+{ -+ unsigned int cpu_idx; -+ unsigned int hartid = current_hartid(); -+ int my_idx = plat_core_pos_by_mpidr(hartid); -+ -+ for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) { -+ if (cpu_idx == my_idx) { -+ if (psci_get_aff_info_state() != AFF_STATE_ON) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ continue; -+ } -+ -+ if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) { -+ sbi_printf("core=%u other than current core=%u %s\n", -+ cpu_idx, my_idx, "running in the system"); -+ return false; -+ } -+ } -+ -+ return true; -+} -+ -+/****************************************************************************** -+ * This function retrieves the `psci_power_state_t` for system suspend from -+ * the platform. -+ *****************************************************************************/ -+void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info) -+{ -+ /* -+ * Assert that the required pm_ops hook is implemented to ensure that -+ * the capability detected during psci_setup() is valid. -+ */ -+ if (psci_plat_pm_ops->get_sys_suspend_power_state == NULL) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* -+ * Query the platform for the power_state required for system suspend -+ */ -+ psci_plat_pm_ops->get_sys_suspend_power_state(state_info); -+} -+ -diff --git a/lib/utils/psci/psci_main.c b/lib/utils/psci/psci_main.c -index f2441f57e16e..a3ce138c00cc 100644 ---- a/lib/utils/psci/psci_main.c -+++ b/lib/utils/psci/psci_main.c -@@ -9,34 +9,32 @@ - /******************************************************************************* - * PSCI frontend api for servicing SMCs. Described in the PSCI spec. - ******************************************************************************/ --int psci_cpu_on(u_register_t target_cpu, -- uintptr_t entrypoint) -- -+int psci_cpu_on(u_register_t target_cpu, uintptr_t entrypoint) - { -- int rc; -+ int rc; - -- /* Determine if the cpu exists of not */ -- rc = psci_validate_mpidr(target_cpu); -- if (rc != PSCI_E_SUCCESS) -- return PSCI_E_INVALID_PARAMS; -+ /* Determine if the cpu exists of not */ -+ rc = psci_validate_mpidr(target_cpu); -+ if (rc != PSCI_E_SUCCESS) -+ return PSCI_E_INVALID_PARAMS; - -- /* -- * To turn this cpu on, specify which power -- * levels need to be turned on -- */ -- return psci_cpu_on_start(target_cpu, entrypoint); -+ /* -+ * To turn this cpu on, specify which power -+ * levels need to be turned on -+ */ -+ return psci_cpu_on_start(target_cpu, entrypoint); - } - - int psci_affinity_info(u_register_t target_affinity, - unsigned int lowest_affinity_level) - { -- int ret; -- unsigned int target_idx; -+ int ret; -+ unsigned int target_idx; - psci_cpu_data_t *svc_cpu_data; - struct sbi_scratch *scratch = sbi_hartid_to_scratch(target_affinity); - svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off); - -- /* We dont support level higher than PSCI_CPU_PWR_LVL */ -+ /* We dont support level higher than PSCI_CPU_PWR_LVL */ - if (lowest_affinity_level > PSCI_CPU_PWR_LVL) - return PSCI_E_INVALID_PARAMS; - -@@ -186,3 +184,55 @@ int psci_cpu_suspend(unsigned int power_state, - return rc; - } - -+int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id) -+{ -+ int rc; -+ psci_power_state_t state_info; -+ /* entry_point_info_t ep; */ -+ -+ /* Check if the current CPU is the last ON CPU in the system */ -+ if (!psci_is_last_on_cpu()) -+ return PSCI_E_DENIED; -+ -+ /* Validate the entry point and get the entry_point_info */ -+/** -+ * rc = psci_validate_entry_point(&ep, entrypoint, context_id); -+ * if (rc != PSCI_E_SUCCESS) -+ * return rc; -+ */ -+ -+ /* Query the psci_power_state for system suspend */ -+ psci_query_sys_suspend_pwrstate(&state_info); -+ -+ /* -+ * Check if platform allows suspend to Highest power level -+ * (System level) -+ */ -+ if (psci_find_target_suspend_lvl(&state_info) < PLAT_MAX_PWR_LVL) -+ return PSCI_E_DENIED; -+ -+ /* Ensure that the psci_power_state makes sense */ -+ if (psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN) -+ != PSCI_E_SUCCESS) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ if (is_local_state_off( -+ state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]) == 0) { -+ sbi_printf("%s:%d\n", __func__, __LINE__); -+ sbi_hart_hang(); -+ } -+ -+ /* -+ * Do what is needed to enter the system suspend state. This function -+ * might return if the power down was abandoned for any reason, e.g. -+ * arrival of an interrupt -+ */ -+ rc = psci_cpu_suspend_start(/* &ep */entrypoint, -+ PLAT_MAX_PWR_LVL, -+ &state_info, -+ PSTATE_TYPE_POWERDOWN); -+ -+ return rc; -+} -diff --git a/lib/utils/psci/psci_private.h b/lib/utils/psci/psci_private.h -index d1cd2ba84742..c768d3f379ab 100644 ---- a/lib/utils/psci/psci_private.h -+++ b/lib/utils/psci/psci_private.h -@@ -142,6 +142,10 @@ int psci_cpu_suspend_start(/* const entry_point_info_t *ep */ uintptr_t entrypoi - void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info); - void riscv_pwr_state_to_psci(unsigned int rstate, unsigned int *pstate); - -+bool psci_is_last_on_cpu(void); -+void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info); -+int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id); -+ - /* Helper function to identify a CPU standby request in PSCI Suspend call */ - static inline bool is_cpu_standby_req(unsigned int is_power_down_state, - unsigned int retn_lvl) -diff --git a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -index 9976b5774039..279e6d5dc741 100644 ---- a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -+++ b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -@@ -27,10 +27,20 @@ - - #define PMU_ACPR_CLUSTER0_REG (0xd4051090) - #define PMU_ACPR_CLUSTER1_REG (0xd4051094) -+#define PMU_ACPR_UNKONW_REG (0xd4050038) -+ - - #define CPU_PWR_DOWN_VALUE (0x3) - #define CLUSTER_PWR_DOWN_VALUE (0x3) - #define CLUSTER_AXISDO_OFFSET (31) -+#define CLUSTER_DDRSD_OFFSET (27) -+#define CLUSTER_APBSD_OFFSET (26) -+#define CLUSTER_VCXOSD_OFFSET (19) -+#define CLUSTER_BIT29_OFFSET (29) -+#define CLUSTER_BIT14_OFFSET (14) -+#define CLUSTER_BIT30_OFFSET (30) -+#define CLUSTER_BIT25_OFFSET (25) -+#define CLUSTER_BIT13_OFFSET (13) - - struct pmu_cap_wakeup { - unsigned int pmu_cap_core0_wakeup; -@@ -39,7 +49,7 @@ struct pmu_cap_wakeup { - unsigned int pmu_cap_core3_wakeup; - }; - --/* D1P */ -+/* D1P & D2 ? */ - void spacemit_top_on(u_register_t mpidr) - { - unsigned int *cluster0_acpr = NULL; -@@ -49,15 +59,31 @@ void spacemit_top_on(u_register_t mpidr) - cluster1_acpr = (unsigned int *)PMU_ACPR_CLUSTER1_REG; - - unsigned int value = readl(cluster0_acpr); -- value &= ~(1 << CLUSTER_AXISDO_OFFSET); -+ value &= ~((1 << CLUSTER_AXISDO_OFFSET) | -+ (1 << CLUSTER_DDRSD_OFFSET) | -+ (1 << CLUSTER_APBSD_OFFSET) | -+ (1 << CLUSTER_VCXOSD_OFFSET) | -+ (1 << CLUSTER_BIT29_OFFSET) | -+ (1 << CLUSTER_BIT14_OFFSET) | -+ (1 << CLUSTER_BIT30_OFFSET) | -+ (1 << CLUSTER_BIT25_OFFSET) | -+ (1 << CLUSTER_BIT13_OFFSET)); - writel(value, cluster0_acpr); - - value = readl(cluster1_acpr); -- value &= ~(1 << CLUSTER_AXISDO_OFFSET); -+ value &= ~((1 << CLUSTER_AXISDO_OFFSET) | -+ (1 << CLUSTER_DDRSD_OFFSET) | -+ (1 << CLUSTER_APBSD_OFFSET) | -+ (1 << CLUSTER_VCXOSD_OFFSET) | -+ (1 << CLUSTER_BIT29_OFFSET) | -+ (1 << CLUSTER_BIT14_OFFSET) | -+ (1 << CLUSTER_BIT30_OFFSET) | -+ (1 << CLUSTER_BIT25_OFFSET) | -+ (1 << CLUSTER_BIT13_OFFSET)); - writel(value, cluster1_acpr); - } - --/* D1P */ -+/* D1P & D2 ? */ - void spacemit_top_off(u_register_t mpidr) - { - unsigned int *cluster0_acpr = NULL; -@@ -67,12 +93,35 @@ void spacemit_top_off(u_register_t mpidr) - cluster1_acpr = (unsigned int *)PMU_ACPR_CLUSTER1_REG; - - unsigned int value = readl(cluster0_acpr); -- value |= (1 << CLUSTER_AXISDO_OFFSET); -+ value |= (1 << CLUSTER_AXISDO_OFFSET) | -+ (1 << CLUSTER_DDRSD_OFFSET) | -+ (1 << CLUSTER_APBSD_OFFSET) | -+ (1 << CLUSTER_VCXOSD_OFFSET) | -+ (1 << CLUSTER_BIT29_OFFSET) | -+ (1 << CLUSTER_BIT14_OFFSET) | -+ (1 << CLUSTER_BIT30_OFFSET) | -+ (1 << CLUSTER_BIT25_OFFSET) | -+ (1 << CLUSTER_BIT13_OFFSET); - writel(value, cluster0_acpr); - - value = readl(cluster1_acpr); -- value |= (1 << CLUSTER_AXISDO_OFFSET); -+ value |= (1 << CLUSTER_AXISDO_OFFSET) | -+ (1 << CLUSTER_DDRSD_OFFSET) | -+ (1 << CLUSTER_APBSD_OFFSET) | -+ (1 << CLUSTER_VCXOSD_OFFSET) | -+ (1 << CLUSTER_BIT29_OFFSET) | -+ (1 << CLUSTER_BIT14_OFFSET) | -+ (1 << CLUSTER_BIT30_OFFSET) | -+ (1 << CLUSTER_BIT25_OFFSET) | -+ (1 << CLUSTER_BIT13_OFFSET); - writel(value, cluster1_acpr); -+ -+ value = readl((unsigned int *)PMU_ACPR_UNKONW_REG); -+ value |= (1 << 2); -+ writel(value, (unsigned int *)PMU_ACPR_UNKONW_REG); -+ -+ /* for wakeup debug */ -+ writel(0xffff, (unsigned int *)0xd4051030); - } - - /* M2 */ -diff --git a/lib/utils/psci/spacemit/plat/plat_pm.c b/lib/utils/psci/spacemit/plat/plat_pm.c -index 464a56a277ef..da6f958157fa 100644 ---- a/lib/utils/psci/spacemit/plat/plat_pm.c -+++ b/lib/utils/psci/spacemit/plat/plat_pm.c -@@ -16,6 +16,9 @@ - #define SYSTEM_PWR_STATE(state) \ - ((state)->pwr_domain_state[PLAT_MAX_PWR_LVL]) - -+/* reserved for future used */ -+/* unsigned long __plic_regsave_offset_ptr; */ -+ - static int spacemit_pwr_domain_on(u_register_t mpidr) - { - /* wakeup the cpu */ -@@ -54,8 +57,7 @@ static void spacemit_pwr_domain_on_finish(const psci_power_state_t *target_state - static int spacemit_pwr_domain_off_early(const psci_power_state_t *target_state) - { - /* the ipi's pending is cleared before */ -- /* disable the plic irq */ -- fdt_plic_context_exit(); -+ csr_clear(CSR_MIE, MIP_SSIP | MIP_MSIP | MIP_STIP | MIP_MTIP | MIP_SEIP | MIP_MEIP); - /* clear the external irq pending */ - csr_clear(CSR_MIP, MIP_MEIP); - csr_clear(CSR_MIP, MIP_SEIP); -@@ -70,9 +72,9 @@ static int spacemit_pwr_domain_off_early(const psci_power_state_t *target_state) - - static void spacemit_pwr_domain_off(const psci_power_state_t *target_state) - { -- unsigned int hartid = current_hartid(); -+ unsigned int hartid = current_hartid(); - -- if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { -+ if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { - #if defined(CONFIG_PLATFORM_SPACEMIT_K1X) - /* disable the tcm */ - csr_write(CSR_TCMCFG, 0); -@@ -82,11 +84,11 @@ static void spacemit_pwr_domain_off(const psci_power_state_t *target_state) - } - - if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -+ /* D1P */ - spacemit_top_off(hartid); - } - - spacemit_assert_cpu(hartid); -- - } - - static void spacemit_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state) -@@ -183,7 +185,7 @@ static void spacemit_pwr_domain_suspend(const psci_power_state_t *target_state) - } - - if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -- /* D1P */ -+ /* D1P & D2 */ - spacemit_top_off(hartid); - } - -@@ -223,7 +225,7 @@ static void spacemit_pwr_domain_suspend_finish(const psci_power_state_t *target_ - } - - if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -- /* D1P */ -+ /* D1P & D2 */ - spacemit_top_on(hartid); - } - -@@ -236,6 +238,14 @@ static void spacemit_pwr_domain_suspend_pwrdown_early(const psci_power_state_t * - csr_clear(CSR_MIE, MIP_SSIP | MIP_MSIP | MIP_STIP | MIP_MTIP | MIP_SEIP | MIP_MEIP); - } - -+static void spacemit_get_sys_suspend_power_state(psci_power_state_t *req_state) -+{ -+ int i; -+ -+ for (i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) -+ req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE; -+} -+ - static const plat_psci_ops_t spacemit_psci_ops = { - .cpu_standby = NULL, - .pwr_domain_on = spacemit_pwr_domain_on, -@@ -248,6 +258,7 @@ static const plat_psci_ops_t spacemit_psci_ops = { - .pwr_domain_suspend = spacemit_pwr_domain_suspend, - .pwr_domain_suspend_pwrdown_early = spacemit_pwr_domain_suspend_pwrdown_early, - .pwr_domain_suspend_finish = spacemit_pwr_domain_suspend_finish, -+ .get_sys_suspend_power_state = spacemit_get_sys_suspend_power_state, - }; - - int plat_setup_psci_ops(uintptr_t sec_entrypoint, const plat_psci_ops_t **psci_ops) -diff --git a/platform/generic/spacemit/spacemit_k1.c b/platform/generic/spacemit/spacemit_k1.c -index 8664e05e7910..38794c2dfbb5 100644 ---- a/platform/generic/spacemit/spacemit_k1.c -+++ b/platform/generic/spacemit/spacemit_k1.c -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - #include - #include <../../../lib/utils/psci/psci_private.h> - #include -@@ -29,63 +30,110 @@ - - extern struct sbi_platform platform; - -+/* reserved for future use */ -+/* extern unsigned long __plic_regsave_offset_ptr; */ -+ - PLAT_CCI_MAP - - static void wakeup_other_core(void) - { -- int i; -- u32 hartid, clusterid, cluster_enabled = 0; -- unsigned int cur_hartid = current_hartid(); -- struct sbi_scratch *scratch = sbi_hartid_to_scratch(cur_hartid); -+ int i; -+ u32 hartid, clusterid, cluster_enabled = 0; -+ unsigned int cur_hartid = current_hartid(); -+ struct sbi_scratch *scratch = sbi_hartid_to_scratch(cur_hartid); - - #if defined(CONFIG_PLATFORM_SPACEMIT_K1X) -- /* set other cpu's boot-entry */ -- writel(scratch->warmboot_addr & 0xffffffff, (u32 *)C0_RVBADDR_LO_ADDR); -- writel((scratch->warmboot_addr >> 32) & 0xffffffff, (u32 *)C0_RVBADDR_HI_ADDR); -+ /* set other cpu's boot-entry */ -+ writel(scratch->warmboot_addr & 0xffffffff, (u32 *)C0_RVBADDR_LO_ADDR); -+ writel((scratch->warmboot_addr >> 32) & 0xffffffff, (u32 *)C0_RVBADDR_HI_ADDR); - -- writel(scratch->warmboot_addr & 0xffffffff, (u32 *)C1_RVBADDR_LO_ADDR); -- writel((scratch->warmboot_addr >> 32) & 0xffffffff, (u32 *)C1_RVBADDR_HI_ADDR); -+ writel(scratch->warmboot_addr & 0xffffffff, (u32 *)C1_RVBADDR_LO_ADDR); -+ writel((scratch->warmboot_addr >> 32) & 0xffffffff, (u32 *)C1_RVBADDR_HI_ADDR); - #elif defined(CONFIG_PLATFORM_SPACEMIT_K1PRO) -- for (i = 0; i < platform.hart_count; i++) { -- hartid = platform.hart_index2id[i]; -+ for (i = 0; i < platform.hart_count; i++) { -+ hartid = platform.hart_index2id[i]; - - unsigned long core_index = MPIDR_AFFLVL1_VAL(hartid) * PLATFORM_MAX_CPUS_PER_CLUSTER -- + MPIDR_AFFLVL0_VAL(hartid); -+ + MPIDR_AFFLVL0_VAL(hartid); - - writel(scratch->warmboot_addr & 0xffffffff, (u32 *)(CORE0_RVBADDR_LO_ADDR + core_index * CORE_RVBADDR_STEP)); - writel((scratch->warmboot_addr >> 32) & 0xffffffff, (u32 *)(CORE0_RVBADDR_HI_ADDR + core_index * CORE_RVBADDR_STEP)); -- } -+ } - #endif - - #ifdef CONFIG_ARM_PSCI_SUPPORT -- unsigned char *cpu_topology = plat_get_power_domain_tree_desc(); -+ unsigned char *cpu_topology = plat_get_power_domain_tree_desc(); - #endif - -- // hart0 is already boot up -- for (i = 0; i < platform.hart_count; i++) { -- hartid = platform.hart_index2id[i]; -+ // hart0 is already boot up -+ for (i = 0; i < platform.hart_count; i++) { -+ hartid = platform.hart_index2id[i]; - -- clusterid = MPIDR_AFFLVL1_VAL(hartid); -+ clusterid = MPIDR_AFFLVL1_VAL(hartid); - -- /* we only enable snoop of cluster0 */ -- if (0 == (cluster_enabled & (1 << clusterid))) { -- cluster_enabled |= 1 << clusterid; -- if (0 == clusterid) { -- cci_enable_snoop_dvm_reqs(clusterid); -- } -+ /* we only enable snoop of cluster0 */ -+ if (0 == (cluster_enabled & (1 << clusterid))) { -+ cluster_enabled |= 1 << clusterid; -+ if (0 == clusterid) { -+ cci_enable_snoop_dvm_reqs(clusterid); -+ } - #ifdef CONFIG_ARM_PSCI_SUPPORT -- cpu_topology[CLUSTER_INDEX_IN_CPU_TOPOLOGY]++; -+ cpu_topology[CLUSTER_INDEX_IN_CPU_TOPOLOGY]++; - #endif -- } -+ } - - #ifdef CONFIG_ARM_PSCI_SUPPORT -- /* we only support 2 cluster by now */ -- if (clusterid == PLATFORM_CLUSTER_COUNT - 1) -- cpu_topology[CLUSTER1_INDEX_IN_CPU_TOPOLOGY]++; -- else -- cpu_topology[CLUSTER0_INDEX_IN_CPU_TOPOLOGY]++; -+ /* we only support 2 cluster by now */ -+ if (clusterid == PLATFORM_CLUSTER_COUNT - 1) -+ cpu_topology[CLUSTER1_INDEX_IN_CPU_TOPOLOGY]++; -+ else -+ cpu_topology[CLUSTER0_INDEX_IN_CPU_TOPOLOGY]++; - #endif -- } -+ } -+ -+/** -+ * // reserved for future used -+ * // get the number of plic registers -+ * u32 *regnum_pos; -+ * int noff = -1, fdtlen, regnum, regsize; -+ * const fdt32_t *fdtval; -+ * void *fdt = fdt_get_address(); -+ * const struct fdt_match match_table = { .compatible = "riscv,plic0", }; -+ * -+ * noff = fdt_find_match(fdt, noff, &match_table, NULL); -+ * if (noff >= 0) { -+ * fdtval = fdt_getprop(fdt, noff, "riscv,ndev", &fdtlen); -+ * if (fdtlen > 0) { -+ * regnum = fdt32_to_cpu(*fdtval); -+ * regsize = -+ * // regnum + regsize -+ * sizeof(u32) + sizeof(u32) + -+ * // plic priority regisrer -+ * sizeof(u8) * regnum + -+ * // plic enable register -+ * (sizeof(u32) * (regnum / 32 + 1) + -+ * // plic threshold regisrer -+ * sizeof (u32) * 1) * 2; // smode and machine mode -+ * -+ * __plic_regsave_offset_ptr = sbi_scratch_alloc_offset(regsize); -+ * if (__plic_regsave_offset_ptr == 0) { -+ * sbi_hart_hang(); -+ * } -+ * } -+ * } -+ * -+ * if (__plic_regsave_offset_ptr) { -+ * for (i = 0; i < platform.hart_count; i++) { -+ * hartid = platform.hart_index2id[i]; -+ * scratch = sbi_hartid_to_scratch(hartid); -+ * u32 *regnum_pos = sbi_scratch_offset_ptr(scratch, __plic_regsave_offset_ptr); -+ * -+ * regnum_pos[0] = regnum; -+ * regnum_pos[1] = regsize; -+ * csi_dcache_clean_invalid_range((uintptr_t)regnum_pos, regsize); -+ * } -+ * } -+ */ - } - - /* -@@ -93,24 +141,24 @@ static void wakeup_other_core(void) - */ - static int spacemit_k1_early_init(bool cold_boot, const struct fdt_match *match) - { -- if (cold_boot) { -- /* initiate cci */ -- cci_init(PLATFORM_CCI_ADDR, cci_map, array_size(cci_map)); -- /* enable dcache */ -- csi_enable_dcache(); -- /* wakeup other core ? */ -- wakeup_other_core(); -- /* initialize */ -+ if (cold_boot) { -+ /* initiate cci */ -+ cci_init(PLATFORM_CCI_ADDR, cci_map, array_size(cci_map)); -+ /* enable dcache */ -+ csi_enable_dcache(); -+ /* wakeup other core ? */ -+ wakeup_other_core(); -+ /* initialize */ - #ifdef CONFIG_ARM_SCMI_PROTOCOL_SUPPORT -- plat_arm_pwrc_setup(); -+ plat_arm_pwrc_setup(); - #endif -- } else { -+ } else { - #ifdef CONFIG_ARM_PSCI_SUPPORT -- psci_warmboot_entrypoint(); -+ psci_warmboot_entrypoint(); - #endif -- } -+ } - -- return 0; -+ return 0; - } - - #ifdef CONFIG_ARM_PSCI_SUPPORT -@@ -127,14 +175,12 @@ static int spacemit_hart_start(unsigned int hartid, unsigned long saddr) - static int spacemit_hart_stop(void) - { - psci_cpu_off(); -- - return 0; - } - - static int spacemit_hart_suspend(unsigned int suspend_type) - { - psci_cpu_suspend(suspend_type, 0, 0); -- - return 0; - } - -@@ -150,6 +196,27 @@ static const struct sbi_hsm_device spacemit_hsm_ops = { - .hart_suspend = spacemit_hart_suspend, - .hart_resume = spacemit_hart_resume, - }; -+ -+static int spacemit_system_suspend_check(u32 sleep_type) -+{ -+ return sleep_type == SBI_SUSP_SLEEP_TYPE_SUSPEND ? 0 : SBI_EINVAL; -+} -+ -+static int spacemit_system_suspend(u32 sleep_type, unsigned long mmode_resume_addr) -+{ -+ if (sleep_type != SBI_SUSP_SLEEP_TYPE_SUSPEND) -+ return SBI_EINVAL; -+ -+ psci_system_suspend(mmode_resume_addr, 0); -+ -+ return SBI_OK; -+} -+ -+static struct sbi_system_suspend_device spacemit_system_suspend_ops = { -+ .name = "spacemit-system-suspend", -+ .system_suspend_check = spacemit_system_suspend_check, -+ .system_suspend = spacemit_system_suspend, -+}; - #endif - - /* -@@ -158,14 +225,16 @@ static const struct sbi_hsm_device spacemit_hsm_ops = { - static int spacemit_k1_final_init(bool cold_boot, const struct fdt_match *match) - { - #ifdef CONFIG_ARM_PSCI_SUPPORT -- /* for clod boot, we build the cpu topology structure */ -- if (cold_boot) { -- sbi_hsm_set_device(&spacemit_hsm_ops); -- return psci_setup(); -- } -+ /* for clod boot, we build the cpu topology structure */ -+ if (cold_boot) { -+ sbi_hsm_set_device(&spacemit_hsm_ops); -+ /* register system-suspend ops */ -+ sbi_system_suspend_set_device(&spacemit_system_suspend_ops); -+ return psci_setup(); -+ } - #endif - -- return 0; -+ return 0; - } - - static bool spacemit_cold_boot_allowed(u32 hartid, const struct fdt_match *match) --- -2.35.3 - diff --git a/patch/atf/atf-spacemit/003-Update-for-v1.0rc1.patch b/patch/atf/atf-spacemit/003-Update-for-v1.0rc1.patch deleted file mode 100644 index 78eaea378fa5..000000000000 --- a/patch/atf/atf-spacemit/003-Update-for-v1.0rc1.patch +++ /dev/null @@ -1,331 +0,0 @@ -From 41a15ab971400502e93bbbf0d7336fa81daf25c9 Mon Sep 17 00:00:00 2001 -From: James Deng -Date: Tue, 30 Apr 2024 17:48:07 +0800 -Subject: Update for v1.0rc1 - ---- - include/sbi_utils/cache/cacheflush.h | 34 +++++++++----- - lib/sbi/sbi_hsm.c | 2 +- - lib/utils/psci/psci_private.h | 2 +- - .../spacemit/plat/k1x/underly_implement.c | 37 --------------- - lib/utils/psci/spacemit/plat/plat_pm.c | 4 +- - .../generic/include/spacemit/k1x/k1x_evb.h | 44 ++++++++++++++++- - .../generic/include/spacemit/k1x/k1x_fpga.h | 47 +++++++++++++++++-- - platform/generic/spacemit/spacemit_k1.c | 13 +++++ - 8 files changed, 126 insertions(+), 57 deletions(-) - -diff --git a/include/sbi_utils/cache/cacheflush.h b/include/sbi_utils/cache/cacheflush.h -index c3e353229f75..126931b25888 100644 ---- a/include/sbi_utils/cache/cacheflush.h -+++ b/include/sbi_utils/cache/cacheflush.h -@@ -167,26 +167,34 @@ static inline void __mdelay(void) - cpu_relax(); - } - --static inline void csi_flush_l2_cache(void) -+static inline void csi_flush_l2_cache(bool hw) - { - unsigned int hartid = current_hartid(); - - uintptr_t *cr =(MPIDR_AFFLVL1_VAL(hartid) == 0) ? (uintptr_t *)CLUSTER0_L2_CACHE_FLUSH_REG_BASE : - (uintptr_t *)CLUSTER1_L2_CACHE_FLUSH_REG_BASE; - -- /* flush l2 cache */ -- writel(readl(cr) | (1 << L2_CACHE_FLUSH_REQUEST_BIT_OFFSET), cr); -- /* k1pro */ -- if (L2_CACHE_FLUSH_REQUEST_BIT_OFFSET == L2_CACHE_FLUSH_DONE_BIT_OFFSET) -- while (readl(cr) & (1 << L2_CACHE_FLUSH_DONE_BIT_OFFSET)); -- else /* k1x */ { -- /* clear the request */ -- while (1) { -- if ((readl(cr) & (1 << L2_CACHE_FLUSH_DONE_BIT_OFFSET)) == 0) -- break; -- __mdelay(); -+ if (!hw) { -+ writel(0x0, cr); -+ /* flush l2 cache */ -+ writel(readl(cr) | (1 << L2_CACHE_FLUSH_REQUEST_BIT_OFFSET), cr); -+ /* k1pro */ -+ if (L2_CACHE_FLUSH_REQUEST_BIT_OFFSET == L2_CACHE_FLUSH_DONE_BIT_OFFSET) -+ while (readl(cr) & (1 << L2_CACHE_FLUSH_DONE_BIT_OFFSET)); -+ else /* k1x */ { -+ /* clear the request */ -+ while (1) { -+ if ((readl(cr) & (1 << L2_CACHE_FLUSH_DONE_BIT_OFFSET)) == 0) -+ break; -+ __mdelay(); -+ } -+ writel(readl(cr) & ~(1 << L2_CACHE_FLUSH_REQUEST_BIT_OFFSET), cr); - } -- writel(readl(cr) & ~(1 << L2_CACHE_FLUSH_REQUEST_BIT_OFFSET), cr); -+ } else { -+ /* k1pro */ -+ if (L2_CACHE_FLUSH_REQUEST_BIT_OFFSET == L2_CACHE_FLUSH_DONE_BIT_OFFSET) -+ return /* do nothing */; -+ writel((1 << L2_CACHE_FLUSH_HW_TYPE_BIT_OFFSET) | (1 << L2_CACHE_FLUSH_HW_EN_BIT_OFFSET), cr); - } - } - #endif -diff --git a/lib/sbi/sbi_hsm.c b/lib/sbi/sbi_hsm.c -index acd3c9e04c87..51c982ad7b78 100644 ---- a/lib/sbi/sbi_hsm.c -+++ b/lib/sbi/sbi_hsm.c -@@ -183,7 +183,7 @@ void __noreturn sbi_hsm_hart_start_finish(struct sbi_scratch *scratch, - * */ - if (cool_boot) { - csi_flush_dcache_all(); -- csi_flush_l2_cache(); -+ csi_flush_l2_cache(0); - } - - sbi_hart_switch_mode(hartid, next_arg1, next_addr, next_mode, false); -diff --git a/lib/utils/psci/psci_private.h b/lib/utils/psci/psci_private.h -index c768d3f379ab..0a3f260f5c39 100644 ---- a/lib/utils/psci/psci_private.h -+++ b/lib/utils/psci/psci_private.h -@@ -182,7 +182,7 @@ static inline void psci_do_pwrdown_cache_maintenance(int hartid, uintptr_t scrat - /* disable the tcm */ - csr_write(CSR_TCMCFG, 0); - #endif -- csi_flush_l2_cache(); -+ csi_flush_l2_cache(0); - } - - /* disable dcache */ -diff --git a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -index 279e6d5dc741..73feec440d27 100644 ---- a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -+++ b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -@@ -5,43 +5,6 @@ - #include - #include - --#define C1_CPU_RESET_BASE_ADDR (0xD4282B24) -- --#define PMU_CAP_CORE0_IDLE_CFG (0xd4282924) --#define PMU_CAP_CORE1_IDLE_CFG (0xd4282928) --#define PMU_CAP_CORE2_IDLE_CFG (0xd4282960) --#define PMU_CAP_CORE3_IDLE_CFG (0xd4282964) --#define PMU_CAP_CORE4_IDLE_CFG (0xd4282b04) --#define PMU_CAP_CORE5_IDLE_CFG (0xd4282b08) --#define PMU_CAP_CORE6_IDLE_CFG (0xd4282b0c) --#define PMU_CAP_CORE7_IDLE_CFG (0xd4282b10) -- --#define PMU_C0_CAPMP_IDLE_CFG0 (0xd4282920) --#define PMU_C0_CAPMP_IDLE_CFG1 (0xd42828e4) --#define PMU_C0_CAPMP_IDLE_CFG2 (0xd4282950) --#define PMU_C0_CAPMP_IDLE_CFG3 (0xd4282954) --#define PMU_C1_CAPMP_IDLE_CFG0 (0xd4282b14) --#define PMU_C1_CAPMP_IDLE_CFG1 (0xd4282b18) --#define PMU_C1_CAPMP_IDLE_CFG2 (0xd4282b1c) --#define PMU_C1_CAPMP_IDLE_CFG3 (0xd4282b20) -- --#define PMU_ACPR_CLUSTER0_REG (0xd4051090) --#define PMU_ACPR_CLUSTER1_REG (0xd4051094) --#define PMU_ACPR_UNKONW_REG (0xd4050038) -- -- --#define CPU_PWR_DOWN_VALUE (0x3) --#define CLUSTER_PWR_DOWN_VALUE (0x3) --#define CLUSTER_AXISDO_OFFSET (31) --#define CLUSTER_DDRSD_OFFSET (27) --#define CLUSTER_APBSD_OFFSET (26) --#define CLUSTER_VCXOSD_OFFSET (19) --#define CLUSTER_BIT29_OFFSET (29) --#define CLUSTER_BIT14_OFFSET (14) --#define CLUSTER_BIT30_OFFSET (30) --#define CLUSTER_BIT25_OFFSET (25) --#define CLUSTER_BIT13_OFFSET (13) -- - struct pmu_cap_wakeup { - unsigned int pmu_cap_core0_wakeup; - unsigned int pmu_cap_core1_wakeup; -diff --git a/lib/utils/psci/spacemit/plat/plat_pm.c b/lib/utils/psci/spacemit/plat/plat_pm.c -index da6f958157fa..a5b91270834f 100644 ---- a/lib/utils/psci/spacemit/plat/plat_pm.c -+++ b/lib/utils/psci/spacemit/plat/plat_pm.c -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include - #include "underly_implement.h" - - #define CORE_PWR_STATE(state) \ -@@ -81,6 +82,7 @@ static void spacemit_pwr_domain_off(const psci_power_state_t *target_state) - #endif - cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(hartid)); - spacemit_cluster_off(hartid); -+ csi_flush_l2_cache(1); - } - - if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -@@ -180,8 +182,8 @@ static void spacemit_pwr_domain_suspend(const psci_power_state_t *target_state) - csr_write(CSR_TCMCFG, 0); - #endif - cci_disable_snoop_dvm_reqs(clusterid); -- - spacemit_cluster_off(hartid); -+ csi_flush_l2_cache(1); - } - - if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -diff --git a/platform/generic/include/spacemit/k1x/k1x_evb.h b/platform/generic/include/spacemit/k1x/k1x_evb.h -index b951105e0c04..5f5b672a61a6 100644 ---- a/platform/generic/include/spacemit/k1x/k1x_evb.h -+++ b/platform/generic/include/spacemit/k1x/k1x_evb.h -@@ -24,6 +24,45 @@ - #define C1_RVBADDR_LO_ADDR (0xD4282C00 + 0x2B0) - #define C1_RVBADDR_HI_ADDR (0xD4282C00 + 0X2B4) - -+#define C1_CPU_RESET_BASE_ADDR (0xD4282B24) -+ -+#define PMU_CAP_CORE0_IDLE_CFG (0xd4282924) -+#define PMU_CAP_CORE1_IDLE_CFG (0xd4282928) -+#define PMU_CAP_CORE2_IDLE_CFG (0xd4282960) -+#define PMU_CAP_CORE3_IDLE_CFG (0xd4282964) -+#define PMU_CAP_CORE4_IDLE_CFG (0xd4282b04) -+#define PMU_CAP_CORE5_IDLE_CFG (0xd4282b08) -+#define PMU_CAP_CORE6_IDLE_CFG (0xd4282b0c) -+#define PMU_CAP_CORE7_IDLE_CFG (0xd4282b10) -+ -+#define PMU_C0_CAPMP_IDLE_CFG0 (0xd4282920) -+#define PMU_C0_CAPMP_IDLE_CFG1 (0xd42828e4) -+#define PMU_C0_CAPMP_IDLE_CFG2 (0xd4282950) -+#define PMU_C0_CAPMP_IDLE_CFG3 (0xd4282954) -+#define PMU_C1_CAPMP_IDLE_CFG0 (0xd4282b14) -+#define PMU_C1_CAPMP_IDLE_CFG1 (0xd4282b18) -+#define PMU_C1_CAPMP_IDLE_CFG2 (0xd4282b1c) -+#define PMU_C1_CAPMP_IDLE_CFG3 (0xd4282b20) -+ -+#define PMU_ACPR_CLUSTER0_REG (0xd4051090) -+#define PMU_ACPR_CLUSTER1_REG (0xd4051094) -+#define PMU_ACPR_UNKONW_REG (0xd4050038) -+ -+ -+#define CPU_PWR_DOWN_VALUE (0x3) -+#define CLUSTER_PWR_DOWN_VALUE (0x3) -+#define CLUSTER_AXISDO_OFFSET (31) -+#define CLUSTER_DDRSD_OFFSET (27) -+#define CLUSTER_APBSD_OFFSET (26) -+#define CLUSTER_VCXOSD_OFFSET (19) -+#define CLUSTER_BIT29_OFFSET (29) -+#define CLUSTER_BIT14_OFFSET (14) -+#define CLUSTER_BIT30_OFFSET (30) -+#define CLUSTER_BIT25_OFFSET (25) -+#define CLUSTER_BIT13_OFFSET (13) -+ -+#define L2_HARDWARE_CACHE_FLUSH_EN (13) -+ - /***************************mailbox***************************/ - #define SCMI_MAILBOX_SHARE_MEM (0x2f902080) - #define PLAT_MAILBOX_REG_BASE (0x2f824000) -@@ -66,7 +105,10 @@ - #define CLUSTER0_L2_CACHE_FLUSH_REG_BASE (0xD84401B0) - #define CLUSTER1_L2_CACHE_FLUSH_REG_BASE (0xD84401B4) - --#define L2_CACHE_FLUSH_REQUEST_BIT_OFFSET (0x1) -+#define L2_CACHE_FLUSH_REQUEST_BIT_OFFSET (0x1) /* sw flush l2 cache */ - #define L2_CACHE_FLUSH_DONE_BIT_OFFSET (0x3) - -+#define L2_CACHE_FLUSH_HW_TYPE_BIT_OFFSET (0) -+#define L2_CACHE_FLUSH_HW_EN_BIT_OFFSET (0x2) -+ - #endif /* __K1X_EVB_CONFIG_H__ */ -diff --git a/platform/generic/include/spacemit/k1x/k1x_fpga.h b/platform/generic/include/spacemit/k1x/k1x_fpga.h -index 4748c86b69c2..3d8964c861c4 100644 ---- a/platform/generic/include/spacemit/k1x/k1x_fpga.h -+++ b/platform/generic/include/spacemit/k1x/k1x_fpga.h -@@ -24,13 +24,51 @@ - #define C1_RVBADDR_LO_ADDR (0xD4282C00 + 0x2B0) - #define C1_RVBADDR_HI_ADDR (0xD4282C00 + 0X2B4) - -+#define C1_CPU_RESET_BASE_ADDR (0xD4282B24) -+ -+#define PMU_CAP_CORE0_IDLE_CFG (0xd4282924) -+#define PMU_CAP_CORE1_IDLE_CFG (0xd4282928) -+#define PMU_CAP_CORE2_IDLE_CFG (0xd4282960) -+#define PMU_CAP_CORE3_IDLE_CFG (0xd4282964) -+#define PMU_CAP_CORE4_IDLE_CFG (0xd4282b04) -+#define PMU_CAP_CORE5_IDLE_CFG (0xd4282b08) -+#define PMU_CAP_CORE6_IDLE_CFG (0xd4282b0c) -+#define PMU_CAP_CORE7_IDLE_CFG (0xd4282b10) -+ -+#define PMU_C0_CAPMP_IDLE_CFG0 (0xd4282920) -+#define PMU_C0_CAPMP_IDLE_CFG1 (0xd42828e4) -+#define PMU_C0_CAPMP_IDLE_CFG2 (0xd4282950) -+#define PMU_C0_CAPMP_IDLE_CFG3 (0xd4282954) -+#define PMU_C1_CAPMP_IDLE_CFG0 (0xd4282b14) -+#define PMU_C1_CAPMP_IDLE_CFG1 (0xd4282b18) -+#define PMU_C1_CAPMP_IDLE_CFG2 (0xd4282b1c) -+#define PMU_C1_CAPMP_IDLE_CFG3 (0xd4282b20) -+ -+#define PMU_ACPR_CLUSTER0_REG (0xd4051090) -+#define PMU_ACPR_CLUSTER1_REG (0xd4051094) -+#define PMU_ACPR_UNKONW_REG (0xd4050038) -+ -+ -+#define CPU_PWR_DOWN_VALUE (0x3) -+#define CLUSTER_PWR_DOWN_VALUE (0x3) -+#define CLUSTER_AXISDO_OFFSET (31) -+#define CLUSTER_DDRSD_OFFSET (27) -+#define CLUSTER_APBSD_OFFSET (26) -+#define CLUSTER_VCXOSD_OFFSET (19) -+#define CLUSTER_BIT29_OFFSET (29) -+#define CLUSTER_BIT14_OFFSET (14) -+#define CLUSTER_BIT30_OFFSET (30) -+#define CLUSTER_BIT25_OFFSET (25) -+#define CLUSTER_BIT13_OFFSET (13) -+ -+#define L2_HARDWARE_CACHE_FLUSH_EN (13) -+ - /***************************mailbox***************************/ - #define SCMI_MAILBOX_SHARE_MEM (0x2f902080) - #define PLAT_MAILBOX_REG_BASE (0x2f824000) - - /****************************scmi*****************************/ --#define PLAT_SCMI_SINGLE_CLUSTER_DOMAIN_MAP {0, 1, 2, 3} --#define PLAT_SCMI_DOUBLE_CLUSTER_DOMAIN_MAP {0, 1, 4, 5} -+#define PLAT_SCMI_DOMAIN_MAP {0, 1, 2, 3} - - /*************************cpu topology************************/ - #define ARM_SYSTEM_COUNT (1U) -@@ -67,7 +105,10 @@ - #define CLUSTER0_L2_CACHE_FLUSH_REG_BASE (0xD84401B0) - #define CLUSTER1_L2_CACHE_FLUSH_REG_BASE (0xD84401B4) - --#define L2_CACHE_FLUSH_REQUEST_BIT_OFFSET (0x1) -+#define L2_CACHE_FLUSH_REQUEST_BIT_OFFSET (0x1) /* sw flush l2 cache */ - #define L2_CACHE_FLUSH_DONE_BIT_OFFSET (0x3) - -+#define L2_CACHE_FLUSH_HW_TYPE_BIT_OFFSET (0) -+#define L2_CACHE_FLUSH_HW_EN_BIT_OFFSET (0x2) -+ - #endif /* __K1X_FPGA_CONFIG_H__ */ -diff --git a/platform/generic/spacemit/spacemit_k1.c b/platform/generic/spacemit/spacemit_k1.c -index 38794c2dfbb5..95218846715f 100644 ---- a/platform/generic/spacemit/spacemit_k1.c -+++ b/platform/generic/spacemit/spacemit_k1.c -@@ -65,6 +65,19 @@ static void wakeup_other_core(void) - unsigned char *cpu_topology = plat_get_power_domain_tree_desc(); - #endif - -+#if defined(CONFIG_PLATFORM_SPACEMIT_K1X) -+ /* enable the hw l2 cache flush method for each core */ -+ writel(readl((u32 *)PMU_C0_CAPMP_IDLE_CFG0) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C0_CAPMP_IDLE_CFG0); -+ writel(readl((u32 *)PMU_C0_CAPMP_IDLE_CFG1) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C0_CAPMP_IDLE_CFG1); -+ writel(readl((u32 *)PMU_C0_CAPMP_IDLE_CFG2) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C0_CAPMP_IDLE_CFG2); -+ writel(readl((u32 *)PMU_C0_CAPMP_IDLE_CFG3) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C0_CAPMP_IDLE_CFG3); -+ -+ writel(readl((u32 *)PMU_C1_CAPMP_IDLE_CFG0) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C1_CAPMP_IDLE_CFG0); -+ writel(readl((u32 *)PMU_C1_CAPMP_IDLE_CFG1) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C1_CAPMP_IDLE_CFG1); -+ writel(readl((u32 *)PMU_C1_CAPMP_IDLE_CFG2) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C1_CAPMP_IDLE_CFG2); -+ writel(readl((u32 *)PMU_C1_CAPMP_IDLE_CFG3) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C1_CAPMP_IDLE_CFG3); -+#endif -+ - // hart0 is already boot up - for (i = 0; i < platform.hart_count; i++) { - hartid = platform.hart_index2id[i]; --- -2.35.3 - diff --git a/patch/atf/atf-spacemit/004-Update-for-v1.0.patch b/patch/atf/atf-spacemit/004-Update-for-v1.0.patch deleted file mode 100644 index 542c4578cf25..000000000000 --- a/patch/atf/atf-spacemit/004-Update-for-v1.0.patch +++ /dev/null @@ -1,42 +0,0 @@ -From ce6e8eec55a62d9e4cb5f5a767e50e9d0c2659ff Mon Sep 17 00:00:00 2001 -From: James Deng -Date: Thu, 30 May 2024 23:19:43 +0800 -Subject: Update for v1.0 - ---- - debian/opensbi-spacemit.postinst | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - -diff --git a/debian/opensbi-spacemit.postinst b/debian/opensbi-spacemit.postinst -index 1f6feca80674..9ce082ef548b 100755 ---- a/debian/opensbi-spacemit.postinst -+++ b/debian/opensbi-spacemit.postinst -@@ -22,9 +22,16 @@ configure) - case $ROOT in - "/dev/mmcblk0"*) - OPENSBI=/dev/mmcblk0p3 -+ OPENSBI_SEEK=0 - ;; - "/dev/mmcblk2"*) - OPENSBI=/dev/mmcblk2p3 -+ OPENSBI_SEEK=0 -+ ;; -+ "/dev/nvme0n1"*) -+ OPENSBI=/dev/mtdblock0 -+ # 以KB为单位 -+ OPENSBI_SEEK=448 - ;; - *) - echo "Unsupported root=$ROOT" -@@ -37,7 +44,7 @@ configure) - fi - - if [ -n "$target" ] && [ -e $OPENSBI ]; then -- dd if=/usr/lib/riscv64-linux-gnu/opensbi/generic/fw_dynamic.itb of=$OPENSBI bs=1 && sync -+ dd if=/usr/lib/riscv64-linux-gnu/opensbi/generic/fw_dynamic.itb of=$OPENSBI seek=$OPENSBI_SEEK bs=1K && sync - fi - ;; - esac --- -2.35.3 - diff --git a/patch/atf/atf-spacemit/005-Update-for-v1.0.3.patch b/patch/atf/atf-spacemit/005-Update-for-v1.0.3.patch deleted file mode 100644 index c490680ebd1e..000000000000 --- a/patch/atf/atf-spacemit/005-Update-for-v1.0.3.patch +++ /dev/null @@ -1,86 +0,0 @@ -From 6f1344573d4ce0638d24d960e9a7d5ff1b0426b6 Mon Sep 17 00:00:00 2001 -From: James Deng -Date: Wed, 19 Jun 2024 15:18:09 +0800 -Subject: Update for v1.0.3 - ---- - .../spacemit/plat/k1x/underly_implement.c | 24 ++++++++++++++++++- - .../generic/include/spacemit/k1x/k1x_evb.h | 4 ++-- - 2 files changed, 25 insertions(+), 3 deletions(-) - -diff --git a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -index 73feec440d27..654da2d1a926 100644 ---- a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -+++ b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -@@ -44,6 +44,12 @@ void spacemit_top_on(u_register_t mpidr) - (1 << CLUSTER_BIT25_OFFSET) | - (1 << CLUSTER_BIT13_OFFSET)); - writel(value, cluster1_acpr); -+ -+ /* enable the gpio edge detected function again -+ * */ -+ value = readl((unsigned int *)0xd4051000); -+ value &= ~(1 << 21); -+ writel(value, (unsigned int *)0xd4051000); - } - - /* D1P & D2 ? */ -@@ -60,6 +66,7 @@ void spacemit_top_off(u_register_t mpidr) - (1 << CLUSTER_DDRSD_OFFSET) | - (1 << CLUSTER_APBSD_OFFSET) | - (1 << CLUSTER_VCXOSD_OFFSET) | -+ (1 << 3) | - (1 << CLUSTER_BIT29_OFFSET) | - (1 << CLUSTER_BIT14_OFFSET) | - (1 << CLUSTER_BIT30_OFFSET) | -@@ -72,6 +79,7 @@ void spacemit_top_off(u_register_t mpidr) - (1 << CLUSTER_DDRSD_OFFSET) | - (1 << CLUSTER_APBSD_OFFSET) | - (1 << CLUSTER_VCXOSD_OFFSET) | -+ (1 << 3) | - (1 << CLUSTER_BIT29_OFFSET) | - (1 << CLUSTER_BIT14_OFFSET) | - (1 << CLUSTER_BIT30_OFFSET) | -@@ -80,9 +88,23 @@ void spacemit_top_off(u_register_t mpidr) - writel(value, cluster1_acpr); - - value = readl((unsigned int *)PMU_ACPR_UNKONW_REG); -- value |= (1 << 2); -+ value |= (1 << 2) | (1 << 0); - writel(value, (unsigned int *)PMU_ACPR_UNKONW_REG); - -+ /* disable the gpio edge detect function -+ * this may cause the system cann't enter D2 -+ * */ -+ value = readl((unsigned int *)0xd4051000); -+ value |= (1 << 21); -+ writel(value, (unsigned int *)0xd4051000); -+ -+ /* enable the refbuf function which will enhance the -+ * driving capability of the internal 26M to PLL path -+ * */ -+ value = readl((unsigned int *)0xd4090104); -+ value |= (1 << 22); -+ writel(value, (unsigned int *)0xd4090104); -+ - /* for wakeup debug */ - writel(0xffff, (unsigned int *)0xd4051030); - } -diff --git a/platform/generic/include/spacemit/k1x/k1x_evb.h b/platform/generic/include/spacemit/k1x/k1x_evb.h -index 5f5b672a61a6..10e856965618 100644 ---- a/platform/generic/include/spacemit/k1x/k1x_evb.h -+++ b/platform/generic/include/spacemit/k1x/k1x_evb.h -@@ -49,8 +49,8 @@ - #define PMU_ACPR_UNKONW_REG (0xd4050038) - - --#define CPU_PWR_DOWN_VALUE (0x3) --#define CLUSTER_PWR_DOWN_VALUE (0x3) -+#define CPU_PWR_DOWN_VALUE (0x1b) -+#define CLUSTER_PWR_DOWN_VALUE (0x7) - #define CLUSTER_AXISDO_OFFSET (31) - #define CLUSTER_DDRSD_OFFSET (27) - #define CLUSTER_APBSD_OFFSET (26) --- -2.35.3 - diff --git a/patch/atf/atf-spacemit/006-Update-for-v1.0.7.patch b/patch/atf/atf-spacemit/006-Update-for-v1.0.7.patch deleted file mode 100644 index 81a001b7b1d8..000000000000 --- a/patch/atf/atf-spacemit/006-Update-for-v1.0.7.patch +++ /dev/null @@ -1,94 +0,0 @@ -From 94bf83cc0bd1c86e51f48174fa17e23427903c59 Mon Sep 17 00:00:00 2001 -From: James Deng -Date: Thu, 11 Jul 2024 14:56:36 +0800 -Subject: Update for v1.0.7 - ---- - debian/opensbi-spacemit.postinst | 24 ++++++++++++++----- - .../spacemit/plat/k1x/underly_implement.c | 13 ---------- - 2 files changed, 18 insertions(+), 19 deletions(-) - -diff --git a/debian/opensbi-spacemit.postinst b/debian/opensbi-spacemit.postinst -index 9ce082ef548b..dce7154ac8f4 100755 ---- a/debian/opensbi-spacemit.postinst -+++ b/debian/opensbi-spacemit.postinst -@@ -4,9 +4,11 @@ set -e - case "$1" in - configure) - target="" -- if grep -q '^spacemit' /sys/firmware/devicetree/base/model; then -+ if grep -q '^spacemit' /sys/firmware/devicetree/base/model || grep -q '^spacemit' /sys/devices/soc0/family; then - target="spacemit" - else -+ echo "Neither /sys/firmware/devicetree/base/model nor /sys/devices/soc0/family starts with 'spacemit'." -+ echo "This may indicate that you are installing this package in a chroot environment." - exit 0 - fi - -@@ -35,17 +37,27 @@ configure) - ;; - *) - echo "Unsupported root=$ROOT" -- exit 0 -+ exit 1 - ;; - esac - else - echo "Missing root= in cmdline" -- exit 0 -+ exit 1 - fi - -- if [ -n "$target" ] && [ -e $OPENSBI ]; then -- dd if=/usr/lib/riscv64-linux-gnu/opensbi/generic/fw_dynamic.itb of=$OPENSBI seek=$OPENSBI_SEEK bs=1K && sync -- fi -+ # 待检查文件/分区列表 -+ files="/usr/lib/riscv64-linux-gnu/opensbi/generic/fw_dynamic.itb $OPENSBI" -+ for file in $files; do -+ if [ ! -e "$file" ]; then -+ # 任意不存在则退出 -+ echo "Missing $file" -+ exit 1 -+ fi -+ done -+ -+ # 此前已经做了所有检查 -+ dd if=/usr/lib/riscv64-linux-gnu/opensbi/generic/fw_dynamic.itb of=$OPENSBI seek=$OPENSBI_SEEK bs=1K && sync -+ - ;; - esac - -diff --git a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -index 654da2d1a926..94d53bf51d0f 100644 ---- a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -+++ b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -@@ -44,12 +44,6 @@ void spacemit_top_on(u_register_t mpidr) - (1 << CLUSTER_BIT25_OFFSET) | - (1 << CLUSTER_BIT13_OFFSET)); - writel(value, cluster1_acpr); -- -- /* enable the gpio edge detected function again -- * */ -- value = readl((unsigned int *)0xd4051000); -- value &= ~(1 << 21); -- writel(value, (unsigned int *)0xd4051000); - } - - /* D1P & D2 ? */ -@@ -91,13 +85,6 @@ void spacemit_top_off(u_register_t mpidr) - value |= (1 << 2) | (1 << 0); - writel(value, (unsigned int *)PMU_ACPR_UNKONW_REG); - -- /* disable the gpio edge detect function -- * this may cause the system cann't enter D2 -- * */ -- value = readl((unsigned int *)0xd4051000); -- value |= (1 << 21); -- writel(value, (unsigned int *)0xd4051000); -- - /* enable the refbuf function which will enhance the - * driving capability of the internal 26M to PLL path - * */ --- -2.35.3 - diff --git a/patch/atf/atf-spacemit/007-Update-for-v1.0.11.patch b/patch/atf/atf-spacemit/007-Update-for-v1.0.11.patch deleted file mode 100644 index 1e1c8a0a2082..000000000000 --- a/patch/atf/atf-spacemit/007-Update-for-v1.0.11.patch +++ /dev/null @@ -1,554 +0,0 @@ -From 1600b3620dd8babffcfcc7d780a31723c94270bc Mon Sep 17 00:00:00 2001 -From: James Deng -Date: Thu, 1 Aug 2024 22:09:26 +0800 -Subject: Update for v1.0.11 - ---- - debian/control | 1 + - debian/rules | 10 ++ - include/sbi_utils/cache/cacheflush.h | 29 ++++ - lib/utils/psci/psci_main.c | 8 +- - .../spacemit/plat/k1x/underly_implement.c | 86 +++++++++++- - lib/utils/psci/spacemit/plat/plat_pm.c | 131 ++++++++++++++---- - .../psci/spacemit/plat/underly_implement.h | 2 + - lib/utils/serial/fdt_serial_uart8250.c | 1 + - .../generic/include/spacemit/k1x/k1x_evb.h | 1 + - platform/generic/spacemit/spacemit_k1.c | 23 +-- - 10 files changed, 246 insertions(+), 46 deletions(-) - -diff --git a/debian/control b/debian/control -index 6c4a1747b5f3..f0f186707ea7 100644 ---- a/debian/control -+++ b/debian/control -@@ -11,6 +11,7 @@ Rules-Requires-Root: no - Vcs-Browser: https://salsa.debian.org/opensbi-team/opensbi - Vcs-Git: https://salsa.debian.org/opensbi-team/opensbi.git - Homepage: https://github.com/riscv-software-src/opensbi -+XBS-Commit-Id: - - Package: opensbi-spacemit - Architecture: all -diff --git a/debian/rules b/debian/rules -index ab9cc10c406c..3d44b43fcc38 100755 ---- a/debian/rules -+++ b/debian/rules -@@ -10,9 +10,19 @@ else - VERBOSE=0 - endif - -+# 检查是否在 Git 仓库中,并获取 commit ID -+GIT_INSIDE := $(shell git rev-parse --is-inside-work-tree 2>/dev/null) -+ifeq ($(GIT_INSIDE),true) -+ COMMIT_ID := $(shell git rev-parse --short HEAD) -+endif -+ - %: - dh $@ - -+override_dh_auto_configure: -+ sed -i "s/XBS-Commit-Id:.*/XBS-Commit-Id: $(COMMIT_ID)/" debian/control -+ dh_auto_configure -+ - override_dh_auto_build: - make \ - V=$(VERBOSE) \ -diff --git a/include/sbi_utils/cache/cacheflush.h b/include/sbi_utils/cache/cacheflush.h -index 126931b25888..7887eef949ab 100644 ---- a/include/sbi_utils/cache/cacheflush.h -+++ b/include/sbi_utils/cache/cacheflush.h -@@ -197,4 +197,33 @@ static inline void csi_flush_l2_cache(bool hw) - writel((1 << L2_CACHE_FLUSH_HW_TYPE_BIT_OFFSET) | (1 << L2_CACHE_FLUSH_HW_EN_BIT_OFFSET), cr); - } - } -+ -+static inline void csi_flush_l2_cache_hart(bool hw, int hartid) -+{ -+ uintptr_t *cr =(MPIDR_AFFLVL1_VAL(hartid) == 0) ? (uintptr_t *)CLUSTER0_L2_CACHE_FLUSH_REG_BASE : -+ (uintptr_t *)CLUSTER1_L2_CACHE_FLUSH_REG_BASE; -+ -+ if (!hw) { -+ writel(0x0, cr); -+ /* flush l2 cache */ -+ writel(readl(cr) | (1 << L2_CACHE_FLUSH_REQUEST_BIT_OFFSET), cr); -+ /* k1pro */ -+ if (L2_CACHE_FLUSH_REQUEST_BIT_OFFSET == L2_CACHE_FLUSH_DONE_BIT_OFFSET) -+ while (readl(cr) & (1 << L2_CACHE_FLUSH_DONE_BIT_OFFSET)); -+ else /* k1x */ { -+ /* clear the request */ -+ while (1) { -+ if ((readl(cr) & (1 << L2_CACHE_FLUSH_DONE_BIT_OFFSET)) == 0) -+ break; -+ __mdelay(); -+ } -+ writel(readl(cr) & ~(1 << L2_CACHE_FLUSH_REQUEST_BIT_OFFSET), cr); -+ } -+ } else { -+ /* k1pro */ -+ if (L2_CACHE_FLUSH_REQUEST_BIT_OFFSET == L2_CACHE_FLUSH_DONE_BIT_OFFSET) -+ return /* do nothing */; -+ writel((1 << L2_CACHE_FLUSH_HW_TYPE_BIT_OFFSET) | (1 << L2_CACHE_FLUSH_HW_EN_BIT_OFFSET), cr); -+ } -+} - #endif -diff --git a/lib/utils/psci/psci_main.c b/lib/utils/psci/psci_main.c -index a3ce138c00cc..e89bb4ad3f39 100644 ---- a/lib/utils/psci/psci_main.c -+++ b/lib/utils/psci/psci_main.c -@@ -81,10 +81,10 @@ int psci_cpu_off(void) - * The only error cpu_off can return is E_DENIED. So check if that's - * indeed the case. - */ -- if (rc != PSCI_E_DENIED) { -- sbi_printf("%s:%d, err\n", __func__, __LINE__); -- sbi_hart_hang(); -- } -+// if (rc != PSCI_E_DENIED) { -+// sbi_printf("%s:%d, err\n", __func__, __LINE__); -+// sbi_hart_hang(); -+// } - - return rc; - } -diff --git a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -index 94d53bf51d0f..f87bacc7297d 100644 ---- a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -+++ b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -@@ -30,7 +30,8 @@ void spacemit_top_on(u_register_t mpidr) - (1 << CLUSTER_BIT14_OFFSET) | - (1 << CLUSTER_BIT30_OFFSET) | - (1 << CLUSTER_BIT25_OFFSET) | -- (1 << CLUSTER_BIT13_OFFSET)); -+ (1 << CLUSTER_BIT13_OFFSET) | -+ (1 << CLUSTER_VOTE_AP_SLPEN)); - writel(value, cluster0_acpr); - - value = readl(cluster1_acpr); -@@ -42,7 +43,8 @@ void spacemit_top_on(u_register_t mpidr) - (1 << CLUSTER_BIT14_OFFSET) | - (1 << CLUSTER_BIT30_OFFSET) | - (1 << CLUSTER_BIT25_OFFSET) | -- (1 << CLUSTER_BIT13_OFFSET)); -+ (1 << CLUSTER_BIT13_OFFSET) | -+ (1 << CLUSTER_VOTE_AP_SLPEN)); - writel(value, cluster1_acpr); - } - -@@ -60,7 +62,7 @@ void spacemit_top_off(u_register_t mpidr) - (1 << CLUSTER_DDRSD_OFFSET) | - (1 << CLUSTER_APBSD_OFFSET) | - (1 << CLUSTER_VCXOSD_OFFSET) | -- (1 << 3) | -+ (1 << CLUSTER_VOTE_AP_SLPEN) | - (1 << CLUSTER_BIT29_OFFSET) | - (1 << CLUSTER_BIT14_OFFSET) | - (1 << CLUSTER_BIT30_OFFSET) | -@@ -73,7 +75,7 @@ void spacemit_top_off(u_register_t mpidr) - (1 << CLUSTER_DDRSD_OFFSET) | - (1 << CLUSTER_APBSD_OFFSET) | - (1 << CLUSTER_VCXOSD_OFFSET) | -- (1 << 3) | -+ (1 << CLUSTER_VOTE_AP_SLPEN) | - (1 << CLUSTER_BIT29_OFFSET) | - (1 << CLUSTER_BIT14_OFFSET) | - (1 << CLUSTER_BIT30_OFFSET) | -@@ -279,6 +281,82 @@ void spacemit_wakeup_cpu(u_register_t mpidr) - writel(1 << target_cpu_idx, cpu_reset_base); - } - -+int spacemit_core_enter_c2(u_register_t mpidr) -+{ -+ unsigned int value; -+ -+ /* wait the cpu enter c2 */ -+ value = readl((unsigned int *)0xd4282890); -+ -+ if (mpidr == 0) { -+ if (value & (1 << 6)) -+ return 1; -+ } else if (mpidr == 1) { -+ if (value & (1 << 9)) -+ return 1; -+ } else if (mpidr == 2) { -+ if (value & (1 << 12)) -+ return 1; -+ } else if (mpidr == 3) { -+ if (value & (1 << 15)) -+ return 1; -+ } else if (mpidr == 4) { -+ if (value & (1 << 22)) -+ return 1; -+ } else if (mpidr == 5) { -+ if (value & (1 << 25)) -+ return 1; -+ } else if (mpidr == 6) { -+ if (value & (1 << 28)) -+ return 1; -+ } else if (mpidr == 7) { -+ if (value & (1 << 31)) -+ return 1; -+ } else { -+ return 0; -+ } -+ -+ return 0; -+} -+ -+void spacemit_wait_core_enter_c2(u_register_t mpidr) -+{ -+ unsigned int value; -+ -+ while (1) { -+ /* wait the cpu enter c2 */ -+ value = readl((unsigned int *)0xd4282890); -+ -+ if (mpidr == 0) { -+ if (value & (1 << 6)) -+ return; -+ } else if (mpidr == 1) { -+ if (value & (1 << 9)) -+ return; -+ } else if (mpidr == 2) { -+ if (value & (1 << 12)) -+ return; -+ } else if (mpidr == 3) { -+ if (value & (1 << 15)) -+ return; -+ } else if (mpidr == 4) { -+ if (value & (1 << 22)) -+ return; -+ } else if (mpidr == 5) { -+ if (value & (1 << 25)) -+ return; -+ } else if (mpidr == 6) { -+ if (value & (1 << 28)) -+ return; -+ } else if (mpidr == 7) { -+ if (value & (1 << 31)) -+ return; -+ } else { -+ ; -+ } -+ } -+} -+ - void spacemit_assert_cpu(u_register_t mpidr) - { - unsigned int target_cpu_idx; -diff --git a/lib/utils/psci/spacemit/plat/plat_pm.c b/lib/utils/psci/spacemit/plat/plat_pm.c -index a5b91270834f..166bc3c7be1f 100644 ---- a/lib/utils/psci/spacemit/plat/plat_pm.c -+++ b/lib/utils/psci/spacemit/plat/plat_pm.c -@@ -1,14 +1,20 @@ - #include -+#include - #include - #include - #include - #include -+#include - #include -+#include -+#include - #include -+#include - #include - #include - #include - #include "underly_implement.h" -+#include "../../psci_private.h" - - #define CORE_PWR_STATE(state) \ - ((state)->pwr_domain_state[MPIDR_AFFLVL0]) -@@ -20,17 +26,39 @@ - /* reserved for future used */ - /* unsigned long __plic_regsave_offset_ptr; */ - -+static spinlock_t psciipi_lock = SPIN_LOCK_INITIALIZER; -+static struct sbi_hartmask psciipi_wait_hmask = { 0 }; -+ -+static void wake_idle_harts(struct sbi_scratch *scratch, u32 hartid) -+{ -+ spin_lock(&psciipi_lock); -+ -+ /* Send an IPI to all HARTs of the cluster that waiting for waked up */ -+ for (u32 i = 0; i < PLATFORM_MAX_CPUS_PER_CLUSTER * PLATFORM_CLUSTER_COUNT; i++) { -+ if (i != hartid) { -+ sbi_hartmask_set_hart(i, &psciipi_wait_hmask); -+ sbi_ipi_raw_send(i); -+ } -+ } -+ -+ spin_unlock(&psciipi_lock); -+} -+ - static int spacemit_pwr_domain_on(u_register_t mpidr) - { - /* wakeup the cpu */ -- spacemit_wakeup_cpu(mpidr); -+ if (spacemit_core_enter_c2(mpidr)) { -+ spacemit_wakeup_cpu(mpidr); -+ } else { -+ sbi_ipi_raw_send(mpidr); -+ } - - return 0; - } - - static void spacemit_pwr_domain_on_finish(const psci_power_state_t *target_state) - { -- unsigned int hartid = current_hartid(); -+ unsigned int hartid = current_hartid(); - - if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { - /* D1P */ -@@ -42,12 +70,12 @@ static void spacemit_pwr_domain_on_finish(const psci_power_state_t *target_state - * No need for locks as no other cpu is active at the moment. - */ - if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { -- spacemit_cluster_on(hartid); -+ spacemit_cluster_on(hartid); - #if defined(CONFIG_PLATFORM_SPACEMIT_K1X) - /* disable the tcm */ - csr_write(CSR_TCMCFG, 0); - #endif -- cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(hartid)); -+ cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(hartid)); - #if defined(CONFIG_PLATFORM_SPACEMIT_K1X) - /* enable the tcm */ - csr_write(CSR_TCMCFG, 1); -@@ -62,6 +90,7 @@ static int spacemit_pwr_domain_off_early(const psci_power_state_t *target_state) - /* clear the external irq pending */ - csr_clear(CSR_MIP, MIP_MEIP); - csr_clear(CSR_MIP, MIP_SEIP); -+ csr_clear(CSR_MIP, MIP_MSIP); - - /* here we clear the sstimer pending if this core have */ - if (sbi_hart_has_extension(sbi_scratch_thishart_ptr(), SBI_HART_EXT_SSTC)) { -@@ -76,28 +105,65 @@ static void spacemit_pwr_domain_off(const psci_power_state_t *target_state) - unsigned int hartid = current_hartid(); - - if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { --#if defined(CONFIG_PLATFORM_SPACEMIT_K1X) -- /* disable the tcm */ -- csr_write(CSR_TCMCFG, 0); --#endif -- cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(hartid)); -- spacemit_cluster_off(hartid); -- csi_flush_l2_cache(1); -+ /* power-off cluster */ -+ spacemit_cluster_off(hartid); - } - - if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { - /* D1P */ - spacemit_top_off(hartid); - } -- -- spacemit_assert_cpu(hartid); - } - - static void spacemit_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state) - { -- while (1) { -- asm volatile ("wfi"); -+ int hstate; -+ unsigned long saved_mie, cmip; -+ unsigned int hartid = current_hartid(); -+ -+ hstate = sbi_hsm_hart_get_state(sbi_domain_thishart_ptr(), hartid); -+ -+ /* Save MIE CSR */ -+ saved_mie = csr_read(CSR_MIE); -+ -+ /* Set MSIE and MEIE bits to receive IPI */ -+ if (hstate == SBI_HSM_STATE_SUSPENDED) { -+ csr_set(CSR_MIE, MIP_MSIP | MIP_MEIP); -+ -+ /* Wait for wakeup source to finish using WFI */ -+ do { -+ wfi(); -+ cmip = csr_read(CSR_MIP); -+ } while (!(cmip & (MIP_MSIP | MIP_MEIP))); -+ } else { -+ csr_set(CSR_MIE, MIP_MSIP); -+ -+ /* Wait for wakeup source to finish using WFI */ -+ do { -+ wfi(); -+ cmip = csr_read(CSR_MIP); -+ } while (!(cmip & (MIP_MSIP))); -+ -+ spin_lock(&psciipi_lock); -+ -+ if (sbi_hartmask_test_hart(hartid, &psciipi_wait_hmask)) { -+ sbi_ipi_raw_clear(hartid); -+ /* Restore MIE CSR */ -+ csr_write(CSR_MIE, saved_mie); -+ -+ spin_unlock(&psciipi_lock); -+ -+ spacemit_assert_cpu(hartid); -+ -+ while (1) -+ asm volatile ("wfi"); -+ } -+ -+ spin_unlock(&psciipi_lock); - } -+ -+ /* Restore MIE CSR */ -+ csr_write(CSR_MIE, saved_mie); - } - - static void spacemit_pwr_domain_on_finish_late(const psci_power_state_t *target_state) -@@ -158,9 +224,8 @@ static int spacemit_validate_power_state(unsigned int power_state, - - static void spacemit_pwr_domain_suspend(const psci_power_state_t *target_state) - { -- unsigned int clusterid; - unsigned int hartid = current_hartid(); -- -+ - /* - * CSS currently supports retention only at cpu level. Just return - * as nothing is to be done for retention. -@@ -168,30 +233,40 @@ static void spacemit_pwr_domain_suspend(const psci_power_state_t *target_state) - if (CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET) - return; - -- - if (CORE_PWR_STATE(target_state) != ARM_LOCAL_STATE_OFF) { - sbi_printf("%s:%d\n", __func__, __LINE__); - sbi_hart_hang(); - } - -- /* Cluster is to be turned off, so disable coherency */ -- if (CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { -- clusterid = MPIDR_AFFLVL1_VAL(hartid); -+ /* power-off cluster */ -+ if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) -+ spacemit_cluster_off(hartid); -+ -+ if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { - #if defined(CONFIG_PLATFORM_SPACEMIT_K1X) - /* disable the tcm */ - csr_write(CSR_TCMCFG, 0); - #endif -- cci_disable_snoop_dvm_reqs(clusterid); -- spacemit_cluster_off(hartid); -- csi_flush_l2_cache(1); -- } -+ wake_idle_harts(NULL, hartid); - -- if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) { - /* D1P & D2 */ -+ csi_flush_l2_cache_hart(0, 0); -+ csi_flush_l2_cache_hart(0, PLATFORM_MAX_CPUS_PER_CLUSTER); -+ -+ cci_disable_snoop_dvm_reqs(0); -+ cci_disable_snoop_dvm_reqs(1); -+ -+ /* assert othter cpu & wait other cpu enter c2 */ -+ for (u32 i = 0; i < PLATFORM_MAX_CPUS_PER_CLUSTER * PLATFORM_CLUSTER_COUNT; i++) { -+ if (i != hartid) { -+ spacemit_wait_core_enter_c2(i); -+ } -+ } -+ -+ spacemit_assert_cpu(hartid); -+ - spacemit_top_off(hartid); - } -- -- spacemit_assert_cpu(hartid); - } - - static void spacemit_pwr_domain_suspend_finish(const psci_power_state_t *target_state) -diff --git a/lib/utils/psci/spacemit/plat/underly_implement.h b/lib/utils/psci/spacemit/plat/underly_implement.h -index dd6c972325bb..7c11db518a3f 100644 ---- a/lib/utils/psci/spacemit/plat/underly_implement.h -+++ b/lib/utils/psci/spacemit/plat/underly_implement.h -@@ -9,6 +9,8 @@ void spacemit_cluster_on(u_register_t mpidr); - void spacemit_cluster_off(u_register_t mpidr); - void spacemit_wakeup_cpu(u_register_t mpidr); - void spacemit_assert_cpu(u_register_t mpidr); -+int spacemit_core_enter_c2(u_register_t mpidr); -+void spacemit_wait_core_enter_c2(u_register_t mpidr); - void spacemit_deassert_cpu(void); - - #endif -diff --git a/lib/utils/serial/fdt_serial_uart8250.c b/lib/utils/serial/fdt_serial_uart8250.c -index 7b5d6a4c2f18..51ea91c7665f 100644 ---- a/lib/utils/serial/fdt_serial_uart8250.c -+++ b/lib/utils/serial/fdt_serial_uart8250.c -@@ -30,6 +30,7 @@ static const struct fdt_match serial_uart8250_match[] = { - { .compatible = "ns16550" }, - { .compatible = "ns16550a" }, - { .compatible = "snps,dw-apb-uart" }, -+ { .compatible = "spacemit,pxa-uart" }, - { }, - }; - -diff --git a/platform/generic/include/spacemit/k1x/k1x_evb.h b/platform/generic/include/spacemit/k1x/k1x_evb.h -index 10e856965618..e7381ca245da 100644 ---- a/platform/generic/include/spacemit/k1x/k1x_evb.h -+++ b/platform/generic/include/spacemit/k1x/k1x_evb.h -@@ -60,6 +60,7 @@ - #define CLUSTER_BIT30_OFFSET (30) - #define CLUSTER_BIT25_OFFSET (25) - #define CLUSTER_BIT13_OFFSET (13) -+#define CLUSTER_VOTE_AP_SLPEN (3) - - #define L2_HARDWARE_CACHE_FLUSH_EN (13) - -diff --git a/platform/generic/spacemit/spacemit_k1.c b/platform/generic/spacemit/spacemit_k1.c -index 95218846715f..2f9deb1a7940 100644 ---- a/platform/generic/spacemit/spacemit_k1.c -+++ b/platform/generic/spacemit/spacemit_k1.c -@@ -67,15 +67,15 @@ static void wakeup_other_core(void) - - #if defined(CONFIG_PLATFORM_SPACEMIT_K1X) - /* enable the hw l2 cache flush method for each core */ -- writel(readl((u32 *)PMU_C0_CAPMP_IDLE_CFG0) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C0_CAPMP_IDLE_CFG0); -- writel(readl((u32 *)PMU_C0_CAPMP_IDLE_CFG1) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C0_CAPMP_IDLE_CFG1); -- writel(readl((u32 *)PMU_C0_CAPMP_IDLE_CFG2) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C0_CAPMP_IDLE_CFG2); -- writel(readl((u32 *)PMU_C0_CAPMP_IDLE_CFG3) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C0_CAPMP_IDLE_CFG3); -- -- writel(readl((u32 *)PMU_C1_CAPMP_IDLE_CFG0) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C1_CAPMP_IDLE_CFG0); -- writel(readl((u32 *)PMU_C1_CAPMP_IDLE_CFG1) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C1_CAPMP_IDLE_CFG1); -- writel(readl((u32 *)PMU_C1_CAPMP_IDLE_CFG2) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C1_CAPMP_IDLE_CFG2); -- writel(readl((u32 *)PMU_C1_CAPMP_IDLE_CFG3) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C1_CAPMP_IDLE_CFG3); -+ /* writel(readl((u32 *)PMU_C0_CAPMP_IDLE_CFG0) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C0_CAPMP_IDLE_CFG0); */ -+ /* writel(readl((u32 *)PMU_C0_CAPMP_IDLE_CFG1) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C0_CAPMP_IDLE_CFG1); */ -+ /* writel(readl((u32 *)PMU_C0_CAPMP_IDLE_CFG2) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C0_CAPMP_IDLE_CFG2); */ -+ /* writel(readl((u32 *)PMU_C0_CAPMP_IDLE_CFG3) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C0_CAPMP_IDLE_CFG3); */ -+ -+ /* writel(readl((u32 *)PMU_C1_CAPMP_IDLE_CFG0) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C1_CAPMP_IDLE_CFG0); */ -+ /* writel(readl((u32 *)PMU_C1_CAPMP_IDLE_CFG1) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C1_CAPMP_IDLE_CFG1); */ -+ /* writel(readl((u32 *)PMU_C1_CAPMP_IDLE_CFG2) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C1_CAPMP_IDLE_CFG2); */ -+ /* writel(readl((u32 *)PMU_C1_CAPMP_IDLE_CFG3) | (1 << L2_HARDWARE_CACHE_FLUSH_EN), (u32 *)PMU_C1_CAPMP_IDLE_CFG3); */ - #endif - - // hart0 is already boot up -@@ -188,7 +188,8 @@ static int spacemit_hart_start(unsigned int hartid, unsigned long saddr) - static int spacemit_hart_stop(void) - { - psci_cpu_off(); -- return 0; -+ -+ return SBI_ENOTSUPP; - } - - static int spacemit_hart_suspend(unsigned int suspend_type) -@@ -265,6 +266,8 @@ static bool spacemit_cold_boot_allowed(u32 hartid, const struct fdt_match *match - static const struct fdt_match spacemit_k1_match[] = { - { .compatible = "spacemit,k1-pro" }, - { .compatible = "spacemit,k1x" }, -+ { .compatible = "spacemit,k1-x" }, -+ { .compatible = "spacemit,k1" }, - { }, - }; - --- -2.35.3 - diff --git a/patch/atf/atf-spacemit/008-Update-for-v1.0.13.patch b/patch/atf/atf-spacemit/008-Update-for-v1.0.13.patch deleted file mode 100644 index cae45e98481b..000000000000 --- a/patch/atf/atf-spacemit/008-Update-for-v1.0.13.patch +++ /dev/null @@ -1,26 +0,0 @@ -From a26e37daaeb01f027e4753f854716f0f15eb3d13 Mon Sep 17 00:00:00 2001 -From: James Deng -Date: Fri, 16 Aug 2024 23:44:13 +0800 -Subject: Update for v1.0.13 - ---- - lib/utils/psci/spacemit/plat/plat_pm.c | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/lib/utils/psci/spacemit/plat/plat_pm.c b/lib/utils/psci/spacemit/plat/plat_pm.c -index 166bc3c7be1f..32aec9d308ab 100644 ---- a/lib/utils/psci/spacemit/plat/plat_pm.c -+++ b/lib/utils/psci/spacemit/plat/plat_pm.c -@@ -147,6 +147,9 @@ static void spacemit_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_st - spin_lock(&psciipi_lock); - - if (sbi_hartmask_test_hart(hartid, &psciipi_wait_hmask)) { -+ -+ sbi_hartmask_clear_hart(hartid, &psciipi_wait_hmask); -+ - sbi_ipi_raw_clear(hartid); - /* Restore MIE CSR */ - csr_write(CSR_MIE, saved_mie); --- -2.35.3 - diff --git a/patch/atf/atf-spacemit/009-Update-for-v1.0.14.patch b/patch/atf/atf-spacemit/009-Update-for-v1.0.14.patch deleted file mode 100644 index 4df612f402f6..000000000000 --- a/patch/atf/atf-spacemit/009-Update-for-v1.0.14.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 6cf0c8e6ed09841cdbff7b3788efa608ac5b08aa Mon Sep 17 00:00:00 2001 -From: James Deng -Date: Sat, 31 Aug 2024 14:23:34 +0800 -Subject: Update for v1.0.14 - ---- - debian/opensbi-spacemit.postinst | 15 +++++++++++---- - 1 file changed, 11 insertions(+), 4 deletions(-) - -diff --git a/debian/opensbi-spacemit.postinst b/debian/opensbi-spacemit.postinst -index dce7154ac8f4..0362a598a14b 100755 ---- a/debian/opensbi-spacemit.postinst -+++ b/debian/opensbi-spacemit.postinst -@@ -31,9 +31,15 @@ configure) - OPENSBI_SEEK=0 - ;; - "/dev/nvme0n1"*) -- OPENSBI=/dev/mtdblock0 -- # 以KB为单位 -- OPENSBI_SEEK=448 -+ if [ ! -e "/dev/mtdblock4" ]; then -+ OPENSBI=/dev/mtdblock0 -+ # 以KB为单位 -+ OPENSBI_SEEK=448 -+ else -+ OPENSBI=/dev/mtdblock4 -+ # 以KB为单位 -+ OPENSBI_SEEK=0 -+ fi - ;; - *) - echo "Unsupported root=$ROOT" -@@ -56,8 +62,9 @@ configure) - done - - # 此前已经做了所有检查 -+ set -x - dd if=/usr/lib/riscv64-linux-gnu/opensbi/generic/fw_dynamic.itb of=$OPENSBI seek=$OPENSBI_SEEK bs=1K && sync -- -+ set +x - ;; - esac - --- -2.35.3 - diff --git a/patch/atf/atf-spacemit/010-Update-for-v1.0.15.patch b/patch/atf/atf-spacemit/010-Update-for-v1.0.15.patch deleted file mode 100644 index 063996207546..000000000000 --- a/patch/atf/atf-spacemit/010-Update-for-v1.0.15.patch +++ /dev/null @@ -1,108 +0,0 @@ -From 08916e4fe06451080a8882d6955df9e5947e352e Mon Sep 17 00:00:00 2001 -From: James Deng -Date: Sat, 7 Sep 2024 21:08:45 +0800 -Subject: Update for v1.0.15 - ---- - .../spacemit/plat/k1x/underly_implement.c | 20 ++++++++++++++ - lib/utils/psci/spacemit/plat/plat_pm.c | 27 +++++++++++-------- - .../psci/spacemit/plat/underly_implement.h | 1 + - 3 files changed, 37 insertions(+), 11 deletions(-) - -diff --git a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -index f87bacc7297d..825db86dddfe 100644 ---- a/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -+++ b/lib/utils/psci/spacemit/plat/k1x/underly_implement.c -@@ -319,6 +319,26 @@ int spacemit_core_enter_c2(u_register_t mpidr) - return 0; - } - -+int spacemit_cluster_enter_m2(u_register_t mpidr) -+{ -+ unsigned int value; -+ -+ /* wait the cpu enter M2 */ -+ value = readl((unsigned int *)0xd4282890); -+ -+ if (mpidr == 0 || mpidr == 1 || mpidr == 2 || mpidr == 3) { -+ if (value & (1 << 3)) -+ return 1; -+ } else if (mpidr == 4 || mpidr == 5 || mpidr == 6 || mpidr == 7) { -+ if (value & (1 << 19)) -+ return 1; -+ } else { -+ return 0; -+ } -+ -+ return 0; -+} -+ - void spacemit_wait_core_enter_c2(u_register_t mpidr) - { - unsigned int value; -diff --git a/lib/utils/psci/spacemit/plat/plat_pm.c b/lib/utils/psci/spacemit/plat/plat_pm.c -index 32aec9d308ab..e3f494065f23 100644 ---- a/lib/utils/psci/spacemit/plat/plat_pm.c -+++ b/lib/utils/psci/spacemit/plat/plat_pm.c -@@ -228,7 +228,7 @@ static int spacemit_validate_power_state(unsigned int power_state, - static void spacemit_pwr_domain_suspend(const psci_power_state_t *target_state) - { - unsigned int hartid = current_hartid(); -- -+ - /* - * CSS currently supports retention only at cpu level. Just return - * as nothing is to be done for retention. -@@ -250,22 +250,27 @@ static void spacemit_pwr_domain_suspend(const psci_power_state_t *target_state) - /* disable the tcm */ - csr_write(CSR_TCMCFG, 0); - #endif -- wake_idle_harts(NULL, hartid); -+ if (!spacemit_cluster_enter_m2(PLATFORM_MAX_CPUS_PER_CLUSTER)) { -+ wake_idle_harts(NULL, hartid); - -- /* D1P & D2 */ -- csi_flush_l2_cache_hart(0, 0); -- csi_flush_l2_cache_hart(0, PLATFORM_MAX_CPUS_PER_CLUSTER); -+ csi_flush_l2_cache_hart(0, 0); -+ csi_flush_l2_cache_hart(0, PLATFORM_MAX_CPUS_PER_CLUSTER); - -- cci_disable_snoop_dvm_reqs(0); -- cci_disable_snoop_dvm_reqs(1); -+ cci_disable_snoop_dvm_reqs(0); -+ cci_disable_snoop_dvm_reqs(1); - -- /* assert othter cpu & wait other cpu enter c2 */ -- for (u32 i = 0; i < PLATFORM_MAX_CPUS_PER_CLUSTER * PLATFORM_CLUSTER_COUNT; i++) { -- if (i != hartid) { -- spacemit_wait_core_enter_c2(i); -+ /* assert othter cpu & wait other cpu enter c2 */ -+ for (u32 i = 0; i < PLATFORM_MAX_CPUS_PER_CLUSTER * PLATFORM_CLUSTER_COUNT; i++) { -+ if (i != hartid) { -+ spacemit_wait_core_enter_c2(i); -+ } - } -+ } else { -+ csi_flush_l2_cache_hart(0, 0); -+ cci_disable_snoop_dvm_reqs(0); - } - -+ - spacemit_assert_cpu(hartid); - - spacemit_top_off(hartid); -diff --git a/lib/utils/psci/spacemit/plat/underly_implement.h b/lib/utils/psci/spacemit/plat/underly_implement.h -index 7c11db518a3f..80f1377d1116 100644 ---- a/lib/utils/psci/spacemit/plat/underly_implement.h -+++ b/lib/utils/psci/spacemit/plat/underly_implement.h -@@ -10,6 +10,7 @@ void spacemit_cluster_off(u_register_t mpidr); - void spacemit_wakeup_cpu(u_register_t mpidr); - void spacemit_assert_cpu(u_register_t mpidr); - int spacemit_core_enter_c2(u_register_t mpidr); -+int spacemit_cluster_enter_m2(u_register_t mpidr); - void spacemit_wait_core_enter_c2(u_register_t mpidr); - void spacemit_deassert_cpu(void); - --- -2.35.3 - diff --git a/patch/kernel/archive/spacemit-6.1/0000.patching_config.yaml b/patch/kernel/archive/spacemit-6.1/0000.patching_config.yaml deleted file mode 100644 index 72bcc958ba81..000000000000 --- a/patch/kernel/archive/spacemit-6.1/0000.patching_config.yaml +++ /dev/null @@ -1,45 +0,0 @@ -config: # This is file 'patch/kernel/spacemit-6.1/0000.patching_config.yaml' - - # PATCH NUMBERING INFO - # - # Patches should be ordered in such a way that general kernel patches are applied first, then SoC-related patches and at last board-specific patches - # - # Patch numbers in this folder are sorted by category: - # - # 000* for general patches - # 0** for Bianbu-Linux-related patches - # 1** for other SoC-related patches - # 5** for board specific patches - - # Just some info stuff; not used by the patching scripts - name: spacemit-6.1 - kind: kernel - type: mainline # or: vendor - branch: linux-6.1.y - last-known-good-tag: v6.1.96 - maintainers: - - { github: none, name: none, email: none, armbian-forum: none } - - # .dts files in these directories will be copied as-is to the build tree; later ones overwrite earlier ones. - # This is meant to provide a way to "add a board DTS" without having to null-patch them in. - dts-directories: - - { source: "dt", target: "arch/riscv/boot/dts/spacemit" } - - # Every file in these directories will be copied as-is to the build tree; later ones overwrite earlier ones - # This is meant as a way to have overlays, bare, in a directory, without having to null-patch them in. - # @TODO need a solution to auto-Makefile the overlays as well - overlay-directories: - - { source: "overlay", target: "arch/riscv/boot/dts/spacemit/overlay" } - - # The Makefile in each of these directories will be magically patched to include the dts files copied - # or patched-in; overlay subdir will be included "-y" if it exists. - # No more Makefile patching needed, yay! - auto-patch-dt-makefile: - - { directory: "arch/riscv/boot/dts/spacemit", config-var: "CONFIG_SOC_SPACEMIT_K1X" } - - # Configuration for when applying patches to git / auto-rewriting patches (development cycle helpers) - patches-to-git: - do-not-commit-files: - - "MAINTAINERS" # constant churn, drop them. sorry. - do-not-commit-regexes: # Python-style regexes - - "^arch/([a-zA-Z0-9]+)/boot/dts/([a-zA-Z0-9]+)/Makefile$" # ignore DT Makefile patches, we've an auto-patcher now diff --git a/patch/kernel/archive/spacemit-6.1/002-dts-spacemit-makefile.patch b/patch/kernel/archive/spacemit-6.1/002-dts-spacemit-makefile.patch deleted file mode 100644 index c1a63140ae85..000000000000 --- a/patch/kernel/archive/spacemit-6.1/002-dts-spacemit-makefile.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 8a2e5ced1c97d81c3d7cf230a63aac66076e1ca0 Mon Sep 17 00:00:00 2001 -From: ColorfulRhino -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: [PATCH] Add SpacemiT subdirectory to dts Makefile - ---- - arch/riscv/boot/dts/Makefile | 1 + - arch/riscv/boot/dts/spacemit/Makefile | 5 + - -diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile -index ff174996cdfd..cc6a06356338 100644 ---- a/arch/riscv/boot/dts/Makefile -+++ b/arch/riscv/boot/dts/Makefile -@@ -3,5 +3,6 @@ subdir-y += sifive - subdir-y += starfive - subdir-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += canaan - subdir-y += microchip -+subdir-y += spacemit - - obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y)) -diff --git a/arch/riscv/boot/dts/spacemit/Makefile b/arch/riscv/boot/dts/spacemit/Makefile -new file mode 100644 -index 000000000000..2be95dbca797 ---- /dev/null -+++ b/arch/riscv/boot/dts/spacemit/Makefile -@@ -0,0 +1,5 @@ -+# SPDX-License-Identifier: GPL-2.0 -+ -+dtb-$(CONFIG_SOC_SPACEMIT_K1X) += k1-x_deb1.dtb -+ -+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/patch/kernel/archive/spacemit-6.1/003-arch-riscv.patch b/patch/kernel/archive/spacemit-6.1/003-arch-riscv.patch deleted file mode 100644 index 659474def761..000000000000 --- a/patch/kernel/archive/spacemit-6.1/003-arch-riscv.patch +++ /dev/null @@ -1,4785 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - arch/riscv/Kconfig | 110 ++- - arch/riscv/Kconfig.socs | 60 ++ - arch/riscv/Makefile | 22 +- - arch/riscv/boot/Makefile | 50 ++ - arch/riscv/generic/Image.its.S | 32 + - arch/riscv/generic/Platform | 15 + - arch/riscv/include/asm/cacheflush.h | 4 + - arch/riscv/include/asm/csr.h | 25 +- - arch/riscv/include/asm/elf.h | 14 + - arch/riscv/include/asm/errata_list.h | 4 +- - arch/riscv/include/asm/hwcap.h | 5 + - arch/riscv/include/asm/insn.h | 410 ++++++++++ - arch/riscv/include/asm/kvm_host.h | 2 + - arch/riscv/include/asm/kvm_vcpu_vector.h | 82 ++ - arch/riscv/include/asm/mmio.h | 38 + - arch/riscv/include/asm/module.h | 16 + - arch/riscv/include/asm/processor.h | 17 + - arch/riscv/include/asm/sbi.h | 16 + - arch/riscv/include/asm/simd.h | 44 + - arch/riscv/include/asm/switch_to.h | 9 +- - arch/riscv/include/asm/thread_info.h | 3 + - arch/riscv/include/asm/vdso/processor.h | 28 +- - arch/riscv/include/asm/vector.h | 210 +++++ - arch/riscv/include/asm/xor.h | 83 ++ - arch/riscv/include/uapi/asm/auxvec.h | 1 + - arch/riscv/include/uapi/asm/elf.h | 5 +- - arch/riscv/include/uapi/asm/hwcap.h | 1 + - arch/riscv/include/uapi/asm/kvm.h | 10 + - arch/riscv/include/uapi/asm/ptrace.h | 39 + - 29 files changed, 1322 insertions(+), 33 deletions(-) - -diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig -index 111111111111..222222222222 100644 ---- a/arch/riscv/Kconfig -+++ b/arch/riscv/Kconfig -@@ -75,8 +75,8 @@ config RISCV - select HAVE_ARCH_AUDITSYSCALL - select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL - select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL -- select HAVE_ARCH_KASAN if MMU && 64BIT -- select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT -+ # select HAVE_ARCH_KASAN if MMU && 64BIT -+ # select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT - select HAVE_ARCH_KFENCE if MMU && 64BIT - select HAVE_ARCH_KGDB if !XIP_KERNEL - select HAVE_ARCH_KGDB_QXFER_PKT -@@ -94,7 +94,7 @@ config RISCV - select HAVE_DMA_CONTIGUOUS if MMU - select HAVE_EBPF_JIT if MMU - select HAVE_FUNCTION_ERROR_INJECTION -- select HAVE_GCC_PLUGINS -+ # select HAVE_GCC_PLUGINS - select HAVE_GENERIC_VDSO if MMU && 64BIT - select HAVE_IRQ_TIME_ACCOUNTING - select HAVE_KPROBES if !XIP_KERNEL -@@ -118,7 +118,7 @@ config RISCV - select MODULES_USE_ELF_RELA if MODULES - select MODULE_SECTIONS if MODULES - select OF -- select OF_DMA_DEFAULT_COHERENT -+ # select OF_DMA_DEFAULT_COHERENT - select OF_EARLY_FLATTREE - select OF_IRQ - select PCI_DOMAINS_GENERIC if PCI -@@ -130,7 +130,8 @@ config RISCV - select THREAD_INFO_IN_TASK - select TRACE_IRQFLAGS_SUPPORT - select UACCESS_MEMCPY if !MMU -- select ZONE_DMA32 if 64BIT -+ select ZONE_DMA32 if 64BIT && !SOC_SPACEMIT_K1PRO -+ select ARCH_SUSPEND_POSSIBLE - - config ARCH_MMAP_RND_BITS_MIN - default 18 if 64BIT -@@ -235,6 +236,12 @@ config RISCV_DMA_NONCOHERENT - config AS_HAS_INSN - def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero) - -+config AS_HAS_OPTION_ARCH -+ # https://reviews.llvm.org/D123515 -+ def_bool y -+ depends on $(as-instr, .option arch$(comma) +m) -+ depends on !$(as-instr, .option arch$(comma) -i) -+ - source "arch/riscv/Kconfig.socs" - source "arch/riscv/Kconfig.erratas" - -@@ -413,6 +420,50 @@ config RISCV_ISA_SVPBMT - - If you don't know what to do here, say Y. - -+config TOOLCHAIN_HAS_V -+ bool -+ default y -+ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64iv) -+ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32iv) -+ depends on LLD_VERSION >= 140000 || LD_VERSION >= 23800 -+ depends on AS_HAS_OPTION_ARCH -+ -+config RISCV_ISA_V -+ bool "VECTOR extension support" -+ depends on TOOLCHAIN_HAS_V -+ depends on FPU -+ select DYNAMIC_SIGFRAME -+ default y -+ help -+ Say N here if you want to disable all vector related procedure -+ in the kernel. -+ -+ If you don't know what to do here, say Y. -+ -+config TOOLCHAIN_HAS_ZBB -+ bool -+ default y -+ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zbb) -+ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zbb) -+ depends on LLD_VERSION >= 150000 || LD_VERSION >= 23900 -+ depends on AS_HAS_OPTION_ARCH -+ -+config RISCV_ISA_ZBB -+ bool "Zbb extension support for bit manipulation instructions" -+ depends on TOOLCHAIN_HAS_ZBB -+ depends on !XIP_KERNEL && MMU -+ select RISCV_ALTERNATIVE -+ default y -+ help -+ Adds support to dynamically detect the presence of the ZBB -+ extension (basic bit manipulation) and enable its usage. -+ -+ The Zbb extension provides instructions to accelerate a number -+ of bit-specific operations (count bit population, sign extending, -+ bitrotation, etc). -+ -+ If you don't know what to do here, say Y. -+ - config TOOLCHAIN_HAS_ZICBOM - bool - default y -@@ -437,6 +488,42 @@ config RISCV_ISA_ZICBOM - - If you don't know what to do here, say Y. - -+config TOOLCHAIN_HAS_ZICBOZ -+ bool -+ default y -+ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zicboz) -+ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zicboz) -+ depends on LLD_VERSION >= 150000 || LD_VERSION >= 23800 -+ -+config RISCV_ISA_ZICBOZ -+ bool "Zicboz extension support" -+ depends on TOOLCHAIN_HAS_ZICBOZ -+ depends on !XIP_KERNEL && MMU -+ default y -+ help -+ Adds support to dynamically detect the presence of the ZICBOZ -+ extension and enable its usage. -+ -+ If you don't know what to do here, say Y. -+ -+config TOOLCHAIN_HAS_ZICBOP -+ bool -+ default y -+ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zicbop) -+ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zicbop) -+ depends on LLD_VERSION >= 150000 || LD_VERSION >= 23800 -+ -+config RISCV_ISA_ZICBOP -+ bool "Zicboz extension support" -+ depends on TOOLCHAIN_HAS_ZICBOP -+ depends on !XIP_KERNEL && MMU -+ default y -+ help -+ Adds support to dynamically detect the presence of the ZICBOP -+ extension and enable its usage. -+ -+ If you don't know what to do here, say Y. -+ - config TOOLCHAIN_HAS_ZIHINTPAUSE - bool - default y -@@ -711,15 +798,28 @@ config PORTABLE - select OF - select MMU - -+config IMAGE_LOAD_OFFSET -+ hex "Image load offset from start of RAM when load kernel to RAM" -+ default 0x400000 if 32BIT -+ default 0x200000 if 64BIT -+ help -+ This is the RAM offset from start of ram. Bootloader would use -+ this offset to load kernel image to ram. -+ - menu "Power management options" - - source "kernel/power/Kconfig" - -+config ARCH_SUSPEND_POSSIBLE -+ depends on SOC_SPACEMIT -+ def_bool y -+ - endmenu # "Power management options" - - menu "CPU Power Management" - - source "drivers/cpuidle/Kconfig" -+source "drivers/cpufreq/Kconfig" - - endmenu # "CPU Power Management" - -diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs -index 111111111111..222222222222 100644 ---- a/arch/riscv/Kconfig.socs -+++ b/arch/riscv/Kconfig.socs -@@ -81,4 +81,64 @@ config SOC_CANAAN_K210_DTB_SOURCE - - endif # SOC_CANAAN - -+config SOC_SPACEMIT -+ bool "Spacemit SoCs" -+ select SIFIVE_PLIC -+ help -+ This enables support for Spacemit SoCs platform hardware. -+ -+if SOC_SPACEMIT -+ -+choice -+ prompt "Spacemit SOCs platform" -+ help -+ choice Spacemit soc platform -+ -+ config SOC_SPACEMIT_K1 -+ bool "k1" -+ help -+ select Spacemit k1 Platform SOCs. -+ -+ config SOC_SPACEMIT_K2 -+ bool "k2" -+ help -+ select Spacemit k2 Platform SOCs. -+ -+endchoice -+ -+if SOC_SPACEMIT_K1 -+ -+choice -+ prompt "Spacemit K1 serial SOCs" -+ help -+ choice Spacemit K1 soc platform -+ -+ config SOC_SPACEMIT_K1PRO -+ bool "k1-pro" -+ select DW_APB_TIMER_OF -+ help -+ This enables support for Spacemit k1-pro Platform Hardware. -+ -+ config SOC_SPACEMIT_K1X -+ bool "k1-x" -+ help -+ This enables support for Spacemit k1-x Platform Hardware. -+endchoice -+ -+config SOC_SPACEMIT_K1_FPGA -+ bool "Spacemit K1 serial SoC FPGA platform" -+ default n -+ help -+ This enable FPGA platform for K1 SoCs. -+ -+endif -+ -+config BIND_THREAD_TO_AICORES -+ bool "enable bind ai cores when use AI instruction" -+ default y -+ help -+ This enable bind ai cores when use AI instruction. -+ -+endif -+ - endmenu # "SoC selection" -diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile -index 111111111111..222222222222 100644 ---- a/arch/riscv/Makefile -+++ b/arch/riscv/Makefile -@@ -56,6 +56,7 @@ riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima - riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima - riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd - riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c -+riscv-march-$(CONFIG_RISCV_ISA_V) := $(riscv-march-y)v - - ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC - KBUILD_CFLAGS += -Wa,-misa-spec=2.2 -@@ -66,11 +67,16 @@ endif - - # Check if the toolchain supports Zicbom extension - riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZICBOM) := $(riscv-march-y)_zicbom -+riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZICBOZ) := $(riscv-march-y)_zicboz -+riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZICBOP) := $(riscv-march-y)_zicbop - - # Check if the toolchain supports Zihintpause extension - riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause - --KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y)) -+# Remove F,D,V from isa string for all. Keep extensions between "fd" and "v" by -+# matching non-v and non-multi-letter extensions out with the filter ([^v_]*) -+KBUILD_CFLAGS += -march=$(shell echo $(riscv-march-y) | sed -E 's/(rv32ima|rv64ima)fd([^v_]*)v?/\1\2/') -+ - KBUILD_AFLAGS += -march=$(riscv-march-y) - - KBUILD_CFLAGS += -mno-save-restore -@@ -151,7 +157,7 @@ ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy) - KBUILD_IMAGE := $(boot)/loader.bin - else - ifeq ($(CONFIG_EFI_ZBOOT),) --KBUILD_IMAGE := $(boot)/Image.gz -+KBUILD_IMAGE := $(boot)/Image.gz.itb - else - KBUILD_IMAGE := $(boot)/vmlinuz.efi - endif -@@ -159,14 +165,20 @@ endif - endif - BOOT_TARGETS := Image Image.gz loader loader.bin xipImage vmlinuz.efi - --all: $(notdir $(KBUILD_IMAGE)) -+# -+# extra files -+# -+include $(srctree)/arch/riscv/generic/Platform -+bootvars-y = ITS_INPUTS="$(its-y)" -+ -+all: $(notdir $(KBUILD_IMAGE)) Image.itb Image.gz.itb - - $(BOOT_TARGETS): vmlinux - $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ - @$(kecho) ' Kernel: $(boot)/$@ is ready' - --Image.%: Image -- $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ -+Image.%: Image Image.gz -+ $(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@ - - install: KBUILD_IMAGE := $(boot)/Image - zinstall: KBUILD_IMAGE := $(boot)/Image.gz -diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile -index 111111111111..222222222222 100644 ---- a/arch/riscv/boot/Makefile -+++ b/arch/riscv/boot/Makefile -@@ -59,6 +59,56 @@ $(obj)/Image.lzo: $(obj)/Image FORCE - $(obj)/loader.bin: $(obj)/loader FORCE - $(call if_changed,objcopy) - -+ifdef CONFIG_32BIT -+ADDR_BITS := 32 -+ADDR_CELLS := 1 -+else -+ADDR_BITS := 64 -+ADDR_CELLS := 2 -+endif -+ -+IMAGE_LOAD_ADDRESS := $(CONFIG_IMAGE_LOAD_OFFSET) -+IMAGE_ENTRY_ADDRESS := $(CONFIG_IMAGE_LOAD_OFFSET) -+IMAGE_ALGO := crc32 -+ -+quiet_cmd_its_cat = CAT $@ -+ cmd_its_cat = cat $(real-prereqs) >$@ -+ -+$(obj)/Image.its.S: $(addprefix $(srctree)/arch/riscv/generic/,$(ITS_INPUTS)) FORCE -+ $(call if_changed,its_cat) -+ -+quiet_cmd_cpp_its_S = ITS $@ -+ cmd_cpp_its_S = $(CPP) -P -C -o $@ $< \ -+ -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \ -+ -DIMAGE_COMPRESSION="\"$(2)\"" \ -+ -DIMAGE_CHECK_ALGORITHM="\"$(3)\"" \ -+ -DIMAGE_BINARY="\"$(4)\"" \ -+ -DIMAGE_LOAD_ADDRESS=$(IMAGE_LOAD_ADDRESS) \ -+ -DIMAGE_ENTRY_ADDRESS=$(IMAGE_ENTRY_ADDRESS) \ -+ -DADDR_BITS=$(ADDR_BITS) \ -+ -DADDR_CELLS=$(ADDR_CELLS) -+ -+$(obj)/Image.its: $(obj)/Image.its.S $(obj)/Image FORCE -+ $(call if_changed,cpp_its_S,none,$(IMAGE_ALGO),Image) -+ -+$(obj)/Image.gz.its: $(obj)/Image.its.S $(obj)/Image.gz FORCE -+ $(call if_changed,cpp_its_S,gzip,$(IMAGE_ALGO),Image.gz) -+ -+quiet_cmd_itb-image = ITB $@ -+ cmd_itb-image = \ -+ env PATH="$(objtree)/scripts/dtc:$(PATH)" \ -+ $(BASH) $(MKIMAGE) \ -+ -D "-I dts -O dtb -p 500 \ -+ --include $(objtree)/arch/riscv \ -+ --warning no-unit_address_vs_reg" \ -+ -f $(2) $@ -+ -+$(obj)/Image.itb: $(obj)/Image.its $(obj)/Image FORCE -+ $(call if_changed,itb-image,$<) -+ -+$(obj)/Image.%.itb: $(obj)/Image.%.its $(obj)/Image.% FORCE -+ $(call if_changed,itb-image,$<) -+ - EFI_ZBOOT_PAYLOAD := Image - EFI_ZBOOT_BFD_TARGET := elf$(BITS)-littleriscv - EFI_ZBOOT_MACH_TYPE := RISCV$(BITS) -diff --git a/arch/riscv/generic/Image.its.S b/arch/riscv/generic/Image.its.S -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/riscv/generic/Image.its.S -@@ -0,0 +1,32 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/dts-v1/; -+ -+/ { -+ description = KERNEL_NAME; -+ #address-cells = ; -+ -+ images { -+ kernel { -+ description = KERNEL_NAME; -+ data = /incbin/(IMAGE_BINARY); -+ type = "kernel"; -+ arch = "riscv"; -+ os = "linux"; -+ compression = IMAGE_COMPRESSION; -+ load = /bits/ ADDR_BITS ; -+ entry = /bits/ ADDR_BITS ; -+ hash { -+ algo = IMAGE_CHECK_ALGORITHM; -+ }; -+ }; -+ }; -+ -+ configurations { -+ default = "conf-default"; -+ -+ conf-default { -+ description = "Generic Linux kernel"; -+ kernel = "kernel"; -+ }; -+ }; -+}; -diff --git a/arch/riscv/generic/Platform b/arch/riscv/generic/Platform -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/riscv/generic/Platform -@@ -0,0 +1,15 @@ -+# -+# SPDX-License-Identifier: GPL-2.0 -+# -+# Copyright (C) 2024 Spacemit -+# -+# This software is licensed under the terms of the GNU General Public -+# License version 2, as published by the Free Software Foundation, and -+# may be copied, distributed, and modified under those terms. -+# -+# This program is distributed in the hope that it will be useful, -+# but WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+# GNU General Public License for more details. -+ -+its-y := Image.its.S -diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/cacheflush.h -+++ b/arch/riscv/include/asm/cacheflush.h -@@ -30,6 +30,10 @@ static inline void flush_dcache_page(struct page *page) - #define flush_icache_user_page(vma, pg, addr, len) \ - flush_icache_mm(vma->vm_mm, 0) - -+#ifdef CONFIG_64BIT -+#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end) -+#endif -+ - #ifndef CONFIG_SMP - - #define flush_icache_all() local_flush_icache_all() -diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/csr.h -+++ b/arch/riscv/include/asm/csr.h -@@ -24,16 +24,24 @@ - #define SR_FS_CLEAN _AC(0x00004000, UL) - #define SR_FS_DIRTY _AC(0x00006000, UL) - -+#define SR_VS _AC(0x00000600, UL) /* Vector Status */ -+#define SR_VS_OFF _AC(0x00000000, UL) -+#define SR_VS_INITIAL _AC(0x00000200, UL) -+#define SR_VS_CLEAN _AC(0x00000400, UL) -+#define SR_VS_DIRTY _AC(0x00000600, UL) -+ - #define SR_XS _AC(0x00018000, UL) /* Extension Status */ - #define SR_XS_OFF _AC(0x00000000, UL) - #define SR_XS_INITIAL _AC(0x00008000, UL) - #define SR_XS_CLEAN _AC(0x00010000, UL) - #define SR_XS_DIRTY _AC(0x00018000, UL) - -+#define SR_FS_VS (SR_FS | SR_VS) /* Vector and Floating-Point Unit */ -+ - #ifndef CONFIG_64BIT --#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */ -+#define SR_SD _AC(0x80000000, UL) /* FS/VS/XS dirty */ - #else --#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */ -+#define SR_SD _AC(0x8000000000000000, UL) /* FS/VS/XS dirty */ - #endif - - #ifdef CONFIG_64BIT -@@ -240,6 +248,7 @@ - #define CSR_SIE 0x104 - #define CSR_STVEC 0x105 - #define CSR_SCOUNTEREN 0x106 -+#define CSR_SENVCFG 0x10a - #define CSR_SSCRATCH 0x140 - #define CSR_SEPC 0x141 - #define CSR_SCAUSE 0x142 -@@ -297,6 +306,18 @@ - #define CSR_MIMPID 0xf13 - #define CSR_MHARTID 0xf14 - -+#define CSR_VSTART 0x8 -+#define CSR_VCSR 0xf -+#define CSR_VL 0xc20 -+#define CSR_VTYPE 0xc21 -+#define CSR_VLENB 0xc22 -+ -+#ifdef CONFIG_SOC_SPACEMIT_K1X -+/* TCM enable register */ -+#define CSR_TCMCFG 0x5db -+#define TCM_EN _AC(0x00000001, UL) /* TCM Access Enable */ -+#endif -+ - #ifdef CONFIG_RISCV_M_MODE - # define CSR_STATUS CSR_MSTATUS - # define CSR_IE CSR_MIE -diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/elf.h -+++ b/arch/riscv/include/asm/elf.h -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - - /* - * These are used to set parameters in the core dumps. -@@ -30,6 +31,10 @@ - - #define ELF_DATA ELFDATA2LSB - -+#define ELF_PLAT_INIT(_r, load_addr) do { \ -+ riscv_v_csr_init(); \ -+} while (0) -+ - /* - * This is used to ensure we don't load something for the wrong architecture. - */ -@@ -103,6 +108,15 @@ do { \ - get_cache_size(3, CACHE_TYPE_UNIFIED)); \ - NEW_AUX_ENT(AT_L3_CACHEGEOMETRY, \ - get_cache_geometry(3, CACHE_TYPE_UNIFIED)); \ -+ /* \ -+ * Should always be nonzero unless there's a kernel bug. \ -+ * If we haven't determined a sensible value to give to \ -+ * userspace, omit the entry: \ -+ */ \ -+ if (likely(signal_minsigstksz)) \ -+ NEW_AUX_ENT(AT_MINSIGSTKSZ, signal_minsigstksz); \ -+ else \ -+ NEW_AUX_ENT(AT_IGNORE, 0); \ - } while (0) - #define ARCH_HAS_SETUP_ADDITIONAL_PAGES - struct linux_binprm; -diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/errata_list.h -+++ b/arch/riscv/include/asm/errata_list.h -@@ -22,7 +22,9 @@ - - #define CPUFEATURE_SVPBMT 0 - #define CPUFEATURE_ZICBOM 1 --#define CPUFEATURE_NUMBER 2 -+#define CPUFEATURE_ZICBOZ 2 -+#define CPUFEATURE_ZICBOP 3 -+#define CPUFEATURE_NUMBER 4 - - #ifdef __ASSEMBLY__ - -diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/hwcap.h -+++ b/arch/riscv/include/asm/hwcap.h -@@ -35,6 +35,7 @@ extern unsigned long elf_hwcap; - #define RISCV_ISA_EXT_m ('m' - 'a') - #define RISCV_ISA_EXT_s ('s' - 'a') - #define RISCV_ISA_EXT_u ('u' - 'a') -+#define RISCV_ISA_EXT_v ('v' - 'a') - - /* - * Increse this to higher value as kernel support more ISA extensions. -@@ -56,6 +57,8 @@ enum riscv_isa_ext_id { - RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE, - RISCV_ISA_EXT_SVPBMT, - RISCV_ISA_EXT_ZICBOM, -+ RISCV_ISA_EXT_ZICBOZ, -+ RISCV_ISA_EXT_ZICBOP, - RISCV_ISA_EXT_ZIHINTPAUSE, - RISCV_ISA_EXT_SSTC, - RISCV_ISA_EXT_SVINVAL, -@@ -107,6 +110,8 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); - #define riscv_isa_extension_available(isa_bitmap, ext) \ - __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) - -+void riscv_user_isa_enable(void); -+ - #endif - - #endif /* _ASM_RISCV_HWCAP_H */ -diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/riscv/include/asm/insn.h -@@ -0,0 +1,410 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* -+ * Copyright (C) 2020 SiFive -+ */ -+ -+#ifndef _ASM_RISCV_INSN_H -+#define _ASM_RISCV_INSN_H -+ -+#include -+ -+#define RV_INSN_FUNCT3_MASK GENMASK(14, 12) -+#define RV_INSN_FUNCT3_OPOFF 12 -+#define RV_INSN_OPCODE_MASK GENMASK(6, 0) -+#define RV_INSN_OPCODE_OPOFF 0 -+#define RV_INSN_FUNCT12_OPOFF 20 -+ -+#define RV_ENCODE_FUNCT3(f_) (RVG_FUNCT3_##f_ << RV_INSN_FUNCT3_OPOFF) -+#define RV_ENCODE_FUNCT12(f_) (RVG_FUNCT12_##f_ << RV_INSN_FUNCT12_OPOFF) -+ -+/* The bit field of immediate value in I-type instruction */ -+#define RV_I_IMM_SIGN_OPOFF 31 -+#define RV_I_IMM_11_0_OPOFF 20 -+#define RV_I_IMM_SIGN_OFF 12 -+#define RV_I_IMM_11_0_OFF 0 -+#define RV_I_IMM_11_0_MASK GENMASK(11, 0) -+ -+/* The bit field of immediate value in J-type instruction */ -+#define RV_J_IMM_SIGN_OPOFF 31 -+#define RV_J_IMM_10_1_OPOFF 21 -+#define RV_J_IMM_11_OPOFF 20 -+#define RV_J_IMM_19_12_OPOFF 12 -+#define RV_J_IMM_SIGN_OFF 20 -+#define RV_J_IMM_10_1_OFF 1 -+#define RV_J_IMM_11_OFF 11 -+#define RV_J_IMM_19_12_OFF 12 -+#define RV_J_IMM_10_1_MASK GENMASK(9, 0) -+#define RV_J_IMM_11_MASK GENMASK(0, 0) -+#define RV_J_IMM_19_12_MASK GENMASK(7, 0) -+ -+/* -+ * U-type IMMs contain the upper 20bits [31:20] of an immediate with -+ * the rest filled in by zeros, so no shifting required. Similarly, -+ * bit31 contains the signed state, so no sign extension necessary. -+ */ -+#define RV_U_IMM_SIGN_OPOFF 31 -+#define RV_U_IMM_31_12_OPOFF 0 -+#define RV_U_IMM_31_12_MASK GENMASK(31, 12) -+ -+/* The bit field of immediate value in B-type instruction */ -+#define RV_B_IMM_SIGN_OPOFF 31 -+#define RV_B_IMM_10_5_OPOFF 25 -+#define RV_B_IMM_4_1_OPOFF 8 -+#define RV_B_IMM_11_OPOFF 7 -+#define RV_B_IMM_SIGN_OFF 12 -+#define RV_B_IMM_10_5_OFF 5 -+#define RV_B_IMM_4_1_OFF 1 -+#define RV_B_IMM_11_OFF 11 -+#define RV_B_IMM_10_5_MASK GENMASK(5, 0) -+#define RV_B_IMM_4_1_MASK GENMASK(3, 0) -+#define RV_B_IMM_11_MASK GENMASK(0, 0) -+ -+/* The register offset in RVG instruction */ -+#define RVG_RS1_OPOFF 15 -+#define RVG_RS2_OPOFF 20 -+#define RVG_RD_OPOFF 7 -+#define RVG_RD_MASK GENMASK(4, 0) -+ -+/* The bit field of immediate value in RVC J instruction */ -+#define RVC_J_IMM_SIGN_OPOFF 12 -+#define RVC_J_IMM_4_OPOFF 11 -+#define RVC_J_IMM_9_8_OPOFF 9 -+#define RVC_J_IMM_10_OPOFF 8 -+#define RVC_J_IMM_6_OPOFF 7 -+#define RVC_J_IMM_7_OPOFF 6 -+#define RVC_J_IMM_3_1_OPOFF 3 -+#define RVC_J_IMM_5_OPOFF 2 -+#define RVC_J_IMM_SIGN_OFF 11 -+#define RVC_J_IMM_4_OFF 4 -+#define RVC_J_IMM_9_8_OFF 8 -+#define RVC_J_IMM_10_OFF 10 -+#define RVC_J_IMM_6_OFF 6 -+#define RVC_J_IMM_7_OFF 7 -+#define RVC_J_IMM_3_1_OFF 1 -+#define RVC_J_IMM_5_OFF 5 -+#define RVC_J_IMM_4_MASK GENMASK(0, 0) -+#define RVC_J_IMM_9_8_MASK GENMASK(1, 0) -+#define RVC_J_IMM_10_MASK GENMASK(0, 0) -+#define RVC_J_IMM_6_MASK GENMASK(0, 0) -+#define RVC_J_IMM_7_MASK GENMASK(0, 0) -+#define RVC_J_IMM_3_1_MASK GENMASK(2, 0) -+#define RVC_J_IMM_5_MASK GENMASK(0, 0) -+ -+/* The bit field of immediate value in RVC B instruction */ -+#define RVC_B_IMM_SIGN_OPOFF 12 -+#define RVC_B_IMM_4_3_OPOFF 10 -+#define RVC_B_IMM_7_6_OPOFF 5 -+#define RVC_B_IMM_2_1_OPOFF 3 -+#define RVC_B_IMM_5_OPOFF 2 -+#define RVC_B_IMM_SIGN_OFF 8 -+#define RVC_B_IMM_4_3_OFF 3 -+#define RVC_B_IMM_7_6_OFF 6 -+#define RVC_B_IMM_2_1_OFF 1 -+#define RVC_B_IMM_5_OFF 5 -+#define RVC_B_IMM_4_3_MASK GENMASK(1, 0) -+#define RVC_B_IMM_7_6_MASK GENMASK(1, 0) -+#define RVC_B_IMM_2_1_MASK GENMASK(1, 0) -+#define RVC_B_IMM_5_MASK GENMASK(0, 0) -+ -+#define RVC_INSN_FUNCT4_MASK GENMASK(15, 12) -+#define RVC_INSN_FUNCT4_OPOFF 12 -+#define RVC_INSN_FUNCT3_MASK GENMASK(15, 13) -+#define RVC_INSN_FUNCT3_OPOFF 13 -+#define RVC_INSN_J_RS2_MASK GENMASK(6, 2) -+#define RVC_INSN_OPCODE_MASK GENMASK(1, 0) -+#define RVC_ENCODE_FUNCT3(f_) (RVC_FUNCT3_##f_ << RVC_INSN_FUNCT3_OPOFF) -+#define RVC_ENCODE_FUNCT4(f_) (RVC_FUNCT4_##f_ << RVC_INSN_FUNCT4_OPOFF) -+ -+/* The register offset in RVC op=C0 instruction */ -+#define RVC_C0_RS1_OPOFF 7 -+#define RVC_C0_RS2_OPOFF 2 -+#define RVC_C0_RD_OPOFF 2 -+ -+/* The register offset in RVC op=C1 instruction */ -+#define RVC_C1_RS1_OPOFF 7 -+#define RVC_C1_RS2_OPOFF 2 -+#define RVC_C1_RD_OPOFF 7 -+ -+/* The register offset in RVC op=C2 instruction */ -+#define RVC_C2_RS1_OPOFF 7 -+#define RVC_C2_RS2_OPOFF 2 -+#define RVC_C2_RD_OPOFF 7 -+ -+/* parts of opcode for RVG*/ -+#define RVG_OPCODE_FENCE 0x0f -+#define RVG_OPCODE_AUIPC 0x17 -+#define RVG_OPCODE_BRANCH 0x63 -+#define RVG_OPCODE_JALR 0x67 -+#define RVG_OPCODE_JAL 0x6f -+#define RVG_OPCODE_SYSTEM 0x73 -+#define RVG_SYSTEM_CSR_OFF 20 -+#define RVG_SYSTEM_CSR_MASK GENMASK(12, 0) -+ -+/* parts of opcode for RVF, RVD and RVQ */ -+#define RVFDQ_FL_FS_WIDTH_OFF 12 -+#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(3, 0) -+#define RVFDQ_FL_FS_WIDTH_W 2 -+#define RVFDQ_FL_FS_WIDTH_D 3 -+#define RVFDQ_LS_FS_WIDTH_Q 4 -+#define RVFDQ_OPCODE_FL 0x07 -+#define RVFDQ_OPCODE_FS 0x27 -+ -+/* parts of opcode for RVV */ -+#define RVV_OPCODE_VECTOR 0x57 -+#define RVV_VL_VS_WIDTH_8 0 -+#define RVV_VL_VS_WIDTH_16 5 -+#define RVV_VL_VS_WIDTH_32 6 -+#define RVV_VL_VS_WIDTH_64 7 -+#define RVV_OPCODE_VL RVFDQ_OPCODE_FL -+#define RVV_OPCODE_VS RVFDQ_OPCODE_FS -+ -+/* parts of opcode for RVC*/ -+#define RVC_OPCODE_C0 0x0 -+#define RVC_OPCODE_C1 0x1 -+#define RVC_OPCODE_C2 0x2 -+ -+/* parts of funct3 code for I, M, A extension*/ -+#define RVG_FUNCT3_JALR 0x0 -+#define RVG_FUNCT3_BEQ 0x0 -+#define RVG_FUNCT3_BNE 0x1 -+#define RVG_FUNCT3_BLT 0x4 -+#define RVG_FUNCT3_BGE 0x5 -+#define RVG_FUNCT3_BLTU 0x6 -+#define RVG_FUNCT3_BGEU 0x7 -+ -+/* parts of funct3 code for C extension*/ -+#define RVC_FUNCT3_C_BEQZ 0x6 -+#define RVC_FUNCT3_C_BNEZ 0x7 -+#define RVC_FUNCT3_C_J 0x5 -+#define RVC_FUNCT3_C_JAL 0x1 -+#define RVC_FUNCT4_C_JR 0x8 -+#define RVC_FUNCT4_C_JALR 0x9 -+#define RVC_FUNCT4_C_EBREAK 0x9 -+ -+#define RVG_FUNCT12_EBREAK 0x1 -+#define RVG_FUNCT12_SRET 0x102 -+ -+#define RVG_MATCH_AUIPC (RVG_OPCODE_AUIPC) -+#define RVG_MATCH_JALR (RV_ENCODE_FUNCT3(JALR) | RVG_OPCODE_JALR) -+#define RVG_MATCH_JAL (RVG_OPCODE_JAL) -+#define RVG_MATCH_FENCE (RVG_OPCODE_FENCE) -+#define RVG_MATCH_BEQ (RV_ENCODE_FUNCT3(BEQ) | RVG_OPCODE_BRANCH) -+#define RVG_MATCH_BNE (RV_ENCODE_FUNCT3(BNE) | RVG_OPCODE_BRANCH) -+#define RVG_MATCH_BLT (RV_ENCODE_FUNCT3(BLT) | RVG_OPCODE_BRANCH) -+#define RVG_MATCH_BGE (RV_ENCODE_FUNCT3(BGE) | RVG_OPCODE_BRANCH) -+#define RVG_MATCH_BLTU (RV_ENCODE_FUNCT3(BLTU) | RVG_OPCODE_BRANCH) -+#define RVG_MATCH_BGEU (RV_ENCODE_FUNCT3(BGEU) | RVG_OPCODE_BRANCH) -+#define RVG_MATCH_EBREAK (RV_ENCODE_FUNCT12(EBREAK) | RVG_OPCODE_SYSTEM) -+#define RVG_MATCH_SRET (RV_ENCODE_FUNCT12(SRET) | RVG_OPCODE_SYSTEM) -+#define RVC_MATCH_C_BEQZ (RVC_ENCODE_FUNCT3(C_BEQZ) | RVC_OPCODE_C1) -+#define RVC_MATCH_C_BNEZ (RVC_ENCODE_FUNCT3(C_BNEZ) | RVC_OPCODE_C1) -+#define RVC_MATCH_C_J (RVC_ENCODE_FUNCT3(C_J) | RVC_OPCODE_C1) -+#define RVC_MATCH_C_JAL (RVC_ENCODE_FUNCT3(C_JAL) | RVC_OPCODE_C1) -+#define RVC_MATCH_C_JR (RVC_ENCODE_FUNCT4(C_JR) | RVC_OPCODE_C2) -+#define RVC_MATCH_C_JALR (RVC_ENCODE_FUNCT4(C_JALR) | RVC_OPCODE_C2) -+#define RVC_MATCH_C_EBREAK (RVC_ENCODE_FUNCT4(C_EBREAK) | RVC_OPCODE_C2) -+ -+#define RVG_MASK_AUIPC (RV_INSN_OPCODE_MASK) -+#define RVG_MASK_JALR (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) -+#define RVG_MASK_JAL (RV_INSN_OPCODE_MASK) -+#define RVG_MASK_FENCE (RV_INSN_OPCODE_MASK) -+#define RVC_MASK_C_JALR (RVC_INSN_FUNCT4_MASK | RVC_INSN_J_RS2_MASK | RVC_INSN_OPCODE_MASK) -+#define RVC_MASK_C_JR (RVC_INSN_FUNCT4_MASK | RVC_INSN_J_RS2_MASK | RVC_INSN_OPCODE_MASK) -+#define RVC_MASK_C_JAL (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK) -+#define RVC_MASK_C_J (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK) -+#define RVG_MASK_BEQ (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) -+#define RVG_MASK_BNE (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) -+#define RVG_MASK_BLT (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) -+#define RVG_MASK_BGE (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) -+#define RVG_MASK_BLTU (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) -+#define RVG_MASK_BGEU (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK) -+#define RVC_MASK_C_BEQZ (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK) -+#define RVC_MASK_C_BNEZ (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK) -+#define RVC_MASK_C_EBREAK 0xffff -+#define RVG_MASK_EBREAK 0xffffffff -+#define RVG_MASK_SRET 0xffffffff -+ -+#define __INSN_LENGTH_MASK _UL(0x3) -+#define __INSN_LENGTH_GE_32 _UL(0x3) -+#define __INSN_OPCODE_MASK _UL(0x7F) -+#define __INSN_BRANCH_OPCODE _UL(RVG_OPCODE_BRANCH) -+ -+#define __RISCV_INSN_FUNCS(name, mask, val) \ -+static __always_inline bool riscv_insn_is_##name(u32 code) \ -+{ \ -+ BUILD_BUG_ON(~(mask) & (val)); \ -+ return (code & (mask)) == (val); \ -+} \ -+ -+#if __riscv_xlen == 32 -+/* C.JAL is an RV32C-only instruction */ -+__RISCV_INSN_FUNCS(c_jal, RVC_MASK_C_JAL, RVC_MATCH_C_JAL) -+#else -+#define riscv_insn_is_c_jal(opcode) 0 -+#endif -+__RISCV_INSN_FUNCS(auipc, RVG_MASK_AUIPC, RVG_MATCH_AUIPC) -+__RISCV_INSN_FUNCS(jalr, RVG_MASK_JALR, RVG_MATCH_JALR) -+__RISCV_INSN_FUNCS(jal, RVG_MASK_JAL, RVG_MATCH_JAL) -+__RISCV_INSN_FUNCS(c_jr, RVC_MASK_C_JR, RVC_MATCH_C_JR) -+__RISCV_INSN_FUNCS(c_jalr, RVC_MASK_C_JALR, RVC_MATCH_C_JALR) -+__RISCV_INSN_FUNCS(c_j, RVC_MASK_C_J, RVC_MATCH_C_J) -+__RISCV_INSN_FUNCS(beq, RVG_MASK_BEQ, RVG_MATCH_BEQ) -+__RISCV_INSN_FUNCS(bne, RVG_MASK_BNE, RVG_MATCH_BNE) -+__RISCV_INSN_FUNCS(blt, RVG_MASK_BLT, RVG_MATCH_BLT) -+__RISCV_INSN_FUNCS(bge, RVG_MASK_BGE, RVG_MATCH_BGE) -+__RISCV_INSN_FUNCS(bltu, RVG_MASK_BLTU, RVG_MATCH_BLTU) -+__RISCV_INSN_FUNCS(bgeu, RVG_MASK_BGEU, RVG_MATCH_BGEU) -+__RISCV_INSN_FUNCS(c_beqz, RVC_MASK_C_BEQZ, RVC_MATCH_C_BEQZ) -+__RISCV_INSN_FUNCS(c_bnez, RVC_MASK_C_BNEZ, RVC_MATCH_C_BNEZ) -+__RISCV_INSN_FUNCS(c_ebreak, RVC_MASK_C_EBREAK, RVC_MATCH_C_EBREAK) -+__RISCV_INSN_FUNCS(ebreak, RVG_MASK_EBREAK, RVG_MATCH_EBREAK) -+__RISCV_INSN_FUNCS(sret, RVG_MASK_SRET, RVG_MATCH_SRET) -+__RISCV_INSN_FUNCS(fence, RVG_MASK_FENCE, RVG_MATCH_FENCE); -+ -+/* special case to catch _any_ system instruction */ -+static __always_inline bool riscv_insn_is_system(u32 code) -+{ -+ return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_SYSTEM; -+} -+ -+/* special case to catch _any_ branch instruction */ -+static __always_inline bool riscv_insn_is_branch(u32 code) -+{ -+ return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_BRANCH; -+} -+ -+#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1)) -+#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1)) -+#define RV_X(X, s, mask) (((X) >> (s)) & (mask)) -+#define RVC_X(X, s, mask) RV_X(X, s, mask) -+ -+#define RV_EXTRACT_RD_REG(x) \ -+ ({typeof(x) x_ = (x); \ -+ (RV_X(x_, RVG_RD_OPOFF, RVG_RD_MASK)); }) -+ -+#define RV_EXTRACT_UTYPE_IMM(x) \ -+ ({typeof(x) x_ = (x); \ -+ (RV_X(x_, RV_U_IMM_31_12_OPOFF, RV_U_IMM_31_12_MASK)); }) -+ -+#define RV_EXTRACT_JTYPE_IMM(x) \ -+ ({typeof(x) x_ = (x); \ -+ (RV_X(x_, RV_J_IMM_10_1_OPOFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OFF) | \ -+ (RV_X(x_, RV_J_IMM_11_OPOFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OFF) | \ -+ (RV_X(x_, RV_J_IMM_19_12_OPOFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OFF) | \ -+ (RV_IMM_SIGN(x_) << RV_J_IMM_SIGN_OFF); }) -+ -+#define RV_EXTRACT_ITYPE_IMM(x) \ -+ ({typeof(x) x_ = (x); \ -+ (RV_X(x_, RV_I_IMM_11_0_OPOFF, RV_I_IMM_11_0_MASK)) | \ -+ (RV_IMM_SIGN(x_) << RV_I_IMM_SIGN_OFF); }) -+ -+#define RV_EXTRACT_BTYPE_IMM(x) \ -+ ({typeof(x) x_ = (x); \ -+ (RV_X(x_, RV_B_IMM_4_1_OPOFF, RV_B_IMM_4_1_MASK) << RV_B_IMM_4_1_OFF) | \ -+ (RV_X(x_, RV_B_IMM_10_5_OPOFF, RV_B_IMM_10_5_MASK) << RV_B_IMM_10_5_OFF) | \ -+ (RV_X(x_, RV_B_IMM_11_OPOFF, RV_B_IMM_11_MASK) << RV_B_IMM_11_OFF) | \ -+ (RV_IMM_SIGN(x_) << RV_B_IMM_SIGN_OFF); }) -+ -+#define RVC_EXTRACT_JTYPE_IMM(x) \ -+ ({typeof(x) x_ = (x); \ -+ (RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \ -+ (RVC_X(x_, RVC_J_IMM_4_OPOFF, RVC_J_IMM_4_MASK) << RVC_J_IMM_4_OFF) | \ -+ (RVC_X(x_, RVC_J_IMM_5_OPOFF, RVC_J_IMM_5_MASK) << RVC_J_IMM_5_OFF) | \ -+ (RVC_X(x_, RVC_J_IMM_6_OPOFF, RVC_J_IMM_6_MASK) << RVC_J_IMM_6_OFF) | \ -+ (RVC_X(x_, RVC_J_IMM_7_OPOFF, RVC_J_IMM_7_MASK) << RVC_J_IMM_7_OFF) | \ -+ (RVC_X(x_, RVC_J_IMM_9_8_OPOFF, RVC_J_IMM_9_8_MASK) << RVC_J_IMM_9_8_OFF) | \ -+ (RVC_X(x_, RVC_J_IMM_10_OPOFF, RVC_J_IMM_10_MASK) << RVC_J_IMM_10_OFF) | \ -+ (RVC_IMM_SIGN(x_) << RVC_J_IMM_SIGN_OFF); }) -+ -+#define RVC_EXTRACT_BTYPE_IMM(x) \ -+ ({typeof(x) x_ = (x); \ -+ (RVC_X(x_, RVC_B_IMM_2_1_OPOFF, RVC_B_IMM_2_1_MASK) << RVC_B_IMM_2_1_OFF) | \ -+ (RVC_X(x_, RVC_B_IMM_4_3_OPOFF, RVC_B_IMM_4_3_MASK) << RVC_B_IMM_4_3_OFF) | \ -+ (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \ -+ (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \ -+ (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); }) -+ -+#define RVG_EXTRACT_SYSTEM_CSR(x) \ -+ ({typeof(x) x_ = (x); RV_X(x_, RVG_SYSTEM_CSR_OFF, RVG_SYSTEM_CSR_MASK); }) -+ -+#define RVFDQ_EXTRACT_FL_FS_WIDTH(x) \ -+ ({typeof(x) x_ = (x); RV_X(x_, RVFDQ_FL_FS_WIDTH_OFF, \ -+ RVFDQ_FL_FS_WIDTH_MASK); }) -+ -+#define RVV_EXRACT_VL_VS_WIDTH(x) RVFDQ_EXTRACT_FL_FS_WIDTH(x) -+ -+/* -+ * Get the immediate from a J-type instruction. -+ * -+ * @insn: instruction to process -+ * Return: immediate -+ */ -+static inline s32 riscv_insn_extract_jtype_imm(u32 insn) -+{ -+ return RV_EXTRACT_JTYPE_IMM(insn); -+} -+ -+/* -+ * Update a J-type instruction with an immediate value. -+ * -+ * @insn: pointer to the jtype instruction -+ * @imm: the immediate to insert into the instruction -+ */ -+static inline void riscv_insn_insert_jtype_imm(u32 *insn, s32 imm) -+{ -+ /* drop the old IMMs, all jal IMM bits sit at 31:12 */ -+ *insn &= ~GENMASK(31, 12); -+ *insn |= (RV_X(imm, RV_J_IMM_10_1_OFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OPOFF) | -+ (RV_X(imm, RV_J_IMM_11_OFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OPOFF) | -+ (RV_X(imm, RV_J_IMM_19_12_OFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OPOFF) | -+ (RV_X(imm, RV_J_IMM_SIGN_OFF, 1) << RV_J_IMM_SIGN_OPOFF); -+} -+ -+/* -+ * Put together one immediate from a U-type and I-type instruction pair. -+ * -+ * The U-type contains an upper immediate, meaning bits[31:12] with [11:0] -+ * being zero, while the I-type contains a 12bit immediate. -+ * Combined these can encode larger 32bit values and are used for example -+ * in auipc + jalr pairs to allow larger jumps. -+ * -+ * @utype_insn: instruction containing the upper immediate -+ * @itype_insn: instruction -+ * Return: combined immediate -+ */ -+static inline s32 riscv_insn_extract_utype_itype_imm(u32 utype_insn, u32 itype_insn) -+{ -+ s32 imm; -+ -+ imm = RV_EXTRACT_UTYPE_IMM(utype_insn); -+ imm += RV_EXTRACT_ITYPE_IMM(itype_insn); -+ -+ return imm; -+} -+ -+/* -+ * Update a set of two instructions (U-type + I-type) with an immediate value. -+ * -+ * Used for example in auipc+jalrs pairs the U-type instructions contains -+ * a 20bit upper immediate representing bits[31:12], while the I-type -+ * instruction contains a 12bit immediate representing bits[11:0]. -+ * -+ * This also takes into account that both separate immediates are -+ * considered as signed values, so if the I-type immediate becomes -+ * negative (BIT(11) set) the U-type part gets adjusted. -+ * -+ * @utype_insn: pointer to the utype instruction of the pair -+ * @itype_insn: pointer to the itype instruction of the pair -+ * @imm: the immediate to insert into the two instructions -+ */ -+static inline void riscv_insn_insert_utype_itype_imm(u32 *utype_insn, u32 *itype_insn, s32 imm) -+{ -+ /* drop possible old IMM values */ -+ *utype_insn &= ~(RV_U_IMM_31_12_MASK); -+ *itype_insn &= ~(RV_I_IMM_11_0_MASK << RV_I_IMM_11_0_OPOFF); -+ -+ /* add the adapted IMMs */ -+ *utype_insn |= (imm & RV_U_IMM_31_12_MASK) + ((imm & BIT(11)) << 1); -+ *itype_insn |= ((imm & RV_I_IMM_11_0_MASK) << RV_I_IMM_11_0_OPOFF); -+} -+#endif /* _ASM_RISCV_INSN_H */ -diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/kvm_host.h -+++ b/arch/riscv/include/asm/kvm_host.h -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -144,6 +145,7 @@ struct kvm_cpu_context { - unsigned long sstatus; - unsigned long hstatus; - union __riscv_fp_state fp; -+ struct __riscv_v_ext_state vector; - }; - - struct kvm_vcpu_csr { -diff --git a/arch/riscv/include/asm/kvm_vcpu_vector.h b/arch/riscv/include/asm/kvm_vcpu_vector.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/riscv/include/asm/kvm_vcpu_vector.h -@@ -0,0 +1,82 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* -+ * Copyright (C) 2022 SiFive -+ * -+ * Authors: -+ * Vincent Chen -+ * Greentime Hu -+ */ -+ -+#ifndef __KVM_VCPU_RISCV_VECTOR_H -+#define __KVM_VCPU_RISCV_VECTOR_H -+ -+#include -+ -+#ifdef CONFIG_RISCV_ISA_V -+#include -+#include -+ -+static __always_inline void __kvm_riscv_vector_save(struct kvm_cpu_context *context) -+{ -+ __riscv_v_vstate_save(&context->vector, context->vector.datap); -+} -+ -+static __always_inline void __kvm_riscv_vector_restore(struct kvm_cpu_context *context) -+{ -+ __riscv_v_vstate_restore(&context->vector, context->vector.datap); -+} -+ -+void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu); -+void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, -+ unsigned long *isa); -+void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, -+ unsigned long *isa); -+void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx); -+void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx); -+int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, -+ struct kvm_cpu_context *cntx); -+void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu); -+#else -+ -+struct kvm_cpu_context; -+ -+static inline void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu) -+{ -+} -+ -+static inline void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, -+ unsigned long *isa) -+{ -+} -+ -+static inline void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, -+ unsigned long *isa) -+{ -+} -+ -+static inline void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx) -+{ -+} -+ -+static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) -+{ -+} -+ -+static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, -+ struct kvm_cpu_context *cntx) -+{ -+ return 0; -+} -+ -+static inline void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) -+{ -+} -+#endif -+ -+int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, -+ const struct kvm_one_reg *reg, -+ unsigned long rtype); -+int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, -+ const struct kvm_one_reg *reg, -+ unsigned long rtype); -+#endif -diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/mmio.h -+++ b/arch/riscv/include/asm/mmio.h -@@ -148,4 +148,42 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) - #define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); }) - #endif - -+ -+#ifdef CONFIG_SOC_SPACEMIT_K1X -+/* -+ on the spacemit k1x platform, there is some i/o area -+ is override by the tcm, so, need switch the tcm when -+ read or write these i/o area -+*/ -+#include -+ -+/* i/o read on the tcm override area */ -+static inline u32 tcm_override_readl(const volatile void __iomem *addr) -+{ -+ u32 val; -+ unsigned long flags, tcm_csr; -+ -+ flags = arch_local_irq_save(); -+ tcm_csr = csr_read_clear(CSR_TCMCFG, TCM_EN); -+ val = readl(addr); -+ csr_set(CSR_TCMCFG, tcm_csr); -+ arch_local_irq_restore(flags); -+ -+ return val; -+} -+ -+/* i/o write on the tcm override area */ -+static inline void tcm_override_writel(u32 val, volatile void __iomem *addr) -+{ -+ unsigned long flags, tcm_csr; -+ -+ flags = arch_local_irq_save(); -+ tcm_csr = csr_read_clear(CSR_TCMCFG, TCM_EN); -+ writel(val, addr); -+ csr_set(CSR_TCMCFG, tcm_csr); -+ arch_local_irq_restore(flags); -+} -+ -+#endif -+ - #endif /* _ASM_RISCV_MMIO_H */ -diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/module.h -+++ b/arch/riscv/include/asm/module.h -@@ -5,6 +5,7 @@ - #define _ASM_RISCV_MODULE_H - - #include -+#include - - struct module; - unsigned long module_emit_got_entry(struct module *mod, unsigned long val); -@@ -111,4 +112,19 @@ static inline struct plt_entry *get_plt_entry(unsigned long val, - - #endif /* CONFIG_MODULE_SECTIONS */ - -+static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr, -+ const Elf_Shdr *sechdrs, -+ const char *name) -+{ -+ const Elf_Shdr *s, *se; -+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; -+ -+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { -+ if (strcmp(name, secstrs + s->sh_name) == 0) -+ return s; -+ } -+ -+ return NULL; -+} -+ - #endif /* _ASM_RISCV_MODULE_H */ -diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/processor.h -+++ b/arch/riscv/include/asm/processor.h -@@ -7,6 +7,7 @@ - #define _ASM_RISCV_PROCESSOR_H - - #include -+#include - - #include - -@@ -31,6 +32,17 @@ - struct task_struct; - struct pt_regs; - -+/* -+ * We use a flag to track in-kernel Vector context. Currently the flag has the -+ * following meaning: -+ * -+ * - bit 0: indicates whether the in-kernel Vector context is active. The -+ * activation of this state disables the preemption. On a non-RT kernel, it -+ * also disable bh. Currently only 0 and 1 are valid value for this field. -+ * Other values are reserved for future uses. -+ */ -+#define RISCV_KERNEL_MODE_V 0x1 -+ - /* CPU-specific state of a task */ - struct thread_struct { - /* Callee-saved registers */ -@@ -39,6 +51,9 @@ struct thread_struct { - unsigned long s[12]; /* s[0]: frame pointer */ - struct __riscv_d_ext_state fstate; - unsigned long bad_cause; -+ u32 riscv_v_flags; -+ u32 vstate_ctrl; -+ struct __riscv_v_ext_state vstate; - }; - - /* Whitelist the fstate from the task_struct for hardened usercopy */ -@@ -79,7 +94,9 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid); - - extern void riscv_fill_hwcap(void); - extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); -+extern struct cpumask ai_core_mask_get(void); - -+extern unsigned long signal_minsigstksz __ro_after_init; - #endif /* __ASSEMBLY__ */ - - #endif /* _ASM_RISCV_PROCESSOR_H */ -diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/sbi.h -+++ b/arch/riscv/include/asm/sbi.h -@@ -29,6 +29,7 @@ enum sbi_ext_id { - SBI_EXT_RFENCE = 0x52464E43, - SBI_EXT_HSM = 0x48534D, - SBI_EXT_SRST = 0x53525354, -+ SBI_EXT_SUSP = 0x53555350, - SBI_EXT_PMU = 0x504D55, - - /* Experimentals extensions must lie within this range */ -@@ -48,6 +49,9 @@ enum sbi_ext_base_fid { - SBI_EXT_BASE_GET_MVENDORID, - SBI_EXT_BASE_GET_MARCHID, - SBI_EXT_BASE_GET_MIMPID, -+#if defined(CONFIG_SOC_SPACEMIT_K1PRO) || defined(CONFIG_SOC_SPACEMIT_K1X) -+ SBI_EXT_BASE_FLUSH_CACHE_ALL, -+#endif - }; - - enum sbi_ext_time_fid { -@@ -113,6 +117,14 @@ enum sbi_srst_reset_reason { - SBI_SRST_RESET_REASON_SYS_FAILURE, - }; - -+enum sbi_ext_susp_fid { -+ SBI_EXT_SUSP_SYSTEM_SUSPEND = 0, -+}; -+ -+enum sbi_ext_susp_sleep_type { -+ SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0, -+}; -+ - enum sbi_ext_pmu_fid { - SBI_EXT_PMU_NUM_COUNTERS = 0, - SBI_EXT_PMU_COUNTER_GET_INFO, -@@ -295,6 +307,10 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask, - unsigned long asid); - long sbi_probe_extension(int ext); - -+#if defined(CONFIG_SOC_SPACEMIT_K1PRO) || defined(CONFIG_SOC_SPACEMIT_K1X) -+void sbi_flush_local_dcache_all(void); -+#endif -+ - /* Check if current SBI specification version is 0.1 or not */ - static inline int sbi_spec_is_0_1(void) - { -diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/riscv/include/asm/simd.h -@@ -0,0 +1,44 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* -+ * Copyright (C) 2017 Linaro Ltd. -+ * Copyright (C) 2023 SiFive -+ */ -+ -+#ifndef __ASM_SIMD_H -+#define __ASM_SIMD_H -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#ifdef CONFIG_RISCV_ISA_V -+/* -+ * may_use_simd - whether it is allowable at this time to issue vector -+ * instructions or access the vector register file -+ * -+ * Callers must not assume that the result remains true beyond the next -+ * preempt_enable() or return from softirq context. -+ */ -+static __must_check inline bool may_use_simd(void) -+{ -+ /* -+ * RISCV_KERNEL_MODE_V is only set while preemption is disabled, -+ * and is clear whenever preemption is enabled. -+ */ -+ return !in_hardirq() && !in_nmi() && !irqs_disabled() && !(riscv_v_flags() & RISCV_KERNEL_MODE_V); -+} -+ -+#else /* ! CONFIG_RISCV_ISA_V */ -+ -+static __must_check inline bool may_use_simd(void) -+{ -+ return false; -+} -+ -+#endif /* ! CONFIG_RISCV_ISA_V */ -+ -+#endif -\ No newline at end of file -diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/switch_to.h -+++ b/arch/riscv/include/asm/switch_to.h -@@ -8,6 +8,7 @@ - - #include - #include -+#include - #include - #include - #include -@@ -46,7 +47,7 @@ static inline void fstate_restore(struct task_struct *task, - } - } - --static inline void __switch_to_aux(struct task_struct *prev, -+static inline void __switch_to_fpu(struct task_struct *prev, - struct task_struct *next) - { - struct pt_regs *regs; -@@ -65,7 +66,7 @@ static __always_inline bool has_fpu(void) - static __always_inline bool has_fpu(void) { return false; } - #define fstate_save(task, regs) do { } while (0) - #define fstate_restore(task, regs) do { } while (0) --#define __switch_to_aux(__prev, __next) do { } while (0) -+#define __switch_to_fpu(__prev, __next) do { } while (0) - #endif - - extern struct task_struct *__switch_to(struct task_struct *, -@@ -76,7 +77,9 @@ do { \ - struct task_struct *__prev = (prev); \ - struct task_struct *__next = (next); \ - if (has_fpu()) \ -- __switch_to_aux(__prev, __next); \ -+ __switch_to_fpu(__prev, __next); \ -+ if (has_vector()) \ -+ __switch_to_vector(__prev, __next); \ - ((last) = __switch_to(__prev, __next)); \ - } while (0) - -diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/thread_info.h -+++ b/arch/riscv/include/asm/thread_info.h -@@ -80,6 +80,9 @@ struct thread_info { - .preempt_count = INIT_PREEMPT_COUNT, \ - } - -+void arch_release_task_struct(struct task_struct *tsk); -+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); -+ - #endif /* !__ASSEMBLY__ */ - - /* -diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/asm/vdso/processor.h -+++ b/arch/riscv/include/asm/vdso/processor.h -@@ -4,30 +4,26 @@ - - #ifndef __ASSEMBLY__ - --#include - #include --#include - - static inline void cpu_relax(void) - { -- if (!static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_ZIHINTPAUSE])) { - #ifdef __riscv_muldiv -- int dummy; -- /* In lieu of a halt instruction, induce a long-latency stall. */ -- __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); -+ int dummy; -+ /* In lieu of a halt instruction, induce a long-latency stall. */ -+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); - #endif -- } else { -- /* -- * Reduce instruction retirement. -- * This assumes the PC changes. -- */ --#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE -- __asm__ __volatile__ ("pause"); -+ -+#ifdef __riscv_zihintpause -+ /* -+ * Reduce instruction retirement. -+ * This assumes the PC changes. -+ */ -+ __asm__ __volatile__ ("pause"); - #else -- /* Encoding of the pause instruction */ -- __asm__ __volatile__ (".4byte 0x100000F"); -+ /* Encoding of the pause instruction */ -+ __asm__ __volatile__ (".4byte 0x100000F"); - #endif -- } - barrier(); - } - -diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/riscv/include/asm/vector.h -@@ -0,0 +1,210 @@ -+/* SPDX-License-Identifier: GPL-2.0-or-later */ -+/* -+ * Copyright (C) 2020 SiFive -+ */ -+ -+#ifndef __ASM_RISCV_VECTOR_H -+#define __ASM_RISCV_VECTOR_H -+ -+#include -+ -+#ifdef CONFIG_RISCV_ISA_V -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+extern unsigned long riscv_v_vsize; -+void riscv_v_setup_vsize(void); -+bool riscv_v_first_use_handler(struct pt_regs *regs); -+void kernel_vector_begin(void); -+void kernel_vector_end(void); -+void get_cpu_vector_context(void); -+void put_cpu_vector_context(void); -+ -+static inline u32 riscv_v_flags(void) -+{ -+ return current->thread.riscv_v_flags; -+} -+ -+static __always_inline bool has_vector(void) -+{ -+ //return riscv_has_extension_likely(RISCV_ISA_EXT_v); -+ return true; -+} -+ -+static inline void __riscv_v_vstate_clean(struct pt_regs *regs) -+{ -+ regs->status = (regs->status & ~SR_VS) | SR_VS_CLEAN; -+} -+ -+static inline void riscv_v_vstate_off(struct pt_regs *regs) -+{ -+ regs->status = (regs->status & ~SR_VS) | SR_VS_OFF; -+} -+ -+static inline void riscv_v_vstate_on(struct pt_regs *regs) -+{ -+ regs->status = (regs->status & ~SR_VS) | SR_VS_INITIAL; -+} -+ -+static inline bool riscv_v_vstate_query(struct pt_regs *regs) -+{ -+ return (regs->status & SR_VS) != 0; -+} -+ -+static __always_inline void riscv_v_enable(void) -+{ -+ csr_set(CSR_SSTATUS, SR_VS); -+} -+ -+static __always_inline void riscv_v_disable(void) -+{ -+ csr_clear(CSR_SSTATUS, SR_VS); -+} -+ -+static __always_inline void riscv_v_csr_init(void) -+{ -+ riscv_v_enable(); -+ asm volatile ( -+ "csrw " __stringify(CSR_VSTART) ", 0\n\t" -+ "csrw " __stringify(CSR_VCSR) ", 0\n\t" -+ : : :); -+ riscv_v_disable(); -+} -+ -+static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest) -+{ -+ asm volatile ( -+ "csrr %0, " __stringify(CSR_VSTART) "\n\t" -+ "csrr %1, " __stringify(CSR_VTYPE) "\n\t" -+ "csrr %2, " __stringify(CSR_VL) "\n\t" -+ "csrr %3, " __stringify(CSR_VCSR) "\n\t" -+ : "=r" (dest->vstart), "=r" (dest->vtype), "=r" (dest->vl), -+ "=r" (dest->vcsr) : :); -+} -+ -+static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src) -+{ -+ asm volatile ( -+ ".option push\n\t" -+ ".option arch, +v\n\t" -+ "vsetvl x0, %2, %1\n\t" -+ ".option pop\n\t" -+ "csrw " __stringify(CSR_VSTART) ", %0\n\t" -+ "csrw " __stringify(CSR_VCSR) ", %3\n\t" -+ : : "r" (src->vstart), "r" (src->vtype), "r" (src->vl), -+ "r" (src->vcsr) :); -+} -+ -+static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to, -+ void *datap) -+{ -+ unsigned long vl; -+ -+ riscv_v_enable(); -+ __vstate_csr_save(save_to); -+ asm volatile ( -+ ".option push\n\t" -+ ".option arch, +v\n\t" -+ "vsetvli %0, x0, e8, m8, ta, ma\n\t" -+ "vse8.v v0, (%1)\n\t" -+ "add %1, %1, %0\n\t" -+ "vse8.v v8, (%1)\n\t" -+ "add %1, %1, %0\n\t" -+ "vse8.v v16, (%1)\n\t" -+ "add %1, %1, %0\n\t" -+ "vse8.v v24, (%1)\n\t" -+ ".option pop\n\t" -+ : "=&r" (vl) : "r" (datap) : "memory"); -+ riscv_v_disable(); -+} -+ -+static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_from, -+ void *datap) -+{ -+ unsigned long vl; -+ -+ riscv_v_enable(); -+ asm volatile ( -+ ".option push\n\t" -+ ".option arch, +v\n\t" -+ "vsetvli %0, x0, e8, m8, ta, ma\n\t" -+ "vle8.v v0, (%1)\n\t" -+ "add %1, %1, %0\n\t" -+ "vle8.v v8, (%1)\n\t" -+ "add %1, %1, %0\n\t" -+ "vle8.v v16, (%1)\n\t" -+ "add %1, %1, %0\n\t" -+ "vle8.v v24, (%1)\n\t" -+ ".option pop\n\t" -+ : "=&r" (vl) : "r" (datap) : "memory"); -+ __vstate_csr_restore(restore_from); -+ riscv_v_disable(); -+} -+ -+static inline void riscv_v_vstate_save(struct task_struct *task, -+ struct pt_regs *regs) -+{ -+ if ((regs->status & SR_VS) == SR_VS_DIRTY) { -+ struct __riscv_v_ext_state *vstate = &task->thread.vstate; -+ -+ __riscv_v_vstate_save(vstate, vstate->datap); -+ __riscv_v_vstate_clean(regs); -+ } -+} -+ -+static inline void riscv_v_vstate_restore(struct task_struct *task, -+ struct pt_regs *regs) -+{ -+ if ((regs->status & SR_VS) != SR_VS_OFF) { -+ struct __riscv_v_ext_state *vstate = &task->thread.vstate; -+ -+ __riscv_v_vstate_restore(vstate, vstate->datap); -+ __riscv_v_vstate_clean(regs); -+ } -+} -+ -+static inline void __switch_to_vector(struct task_struct *prev, -+ struct task_struct *next) -+{ -+ struct pt_regs *regs; -+ -+ regs = task_pt_regs(prev); -+ riscv_v_vstate_save(prev, regs); -+ riscv_v_vstate_restore(next, task_pt_regs(next)); -+} -+ -+#else /* ! CONFIG_RISCV_ISA_V */ -+ -+struct pt_regs; -+ -+static __always_inline bool has_vector(void) { return false; } -+static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; } -+static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; } -+#define riscv_v_vsize (0) -+#define riscv_v_setup_vsize() do {} while (0) -+#define riscv_v_vstate_save(task, regs) do {} while (0) -+#define riscv_v_vstate_restore(task, regs) do {} while (0) -+#define __switch_to_vector(__prev, __next) do {} while (0) -+#define riscv_v_vstate_off(regs) do {} while (0) -+#define riscv_v_vstate_on(regs) do {} while (0) -+#define riscv_v_csr_init() do {} while (0) -+ -+#endif /* CONFIG_RISCV_ISA_V */ -+/* -+ * Return the implementation's vlen value. -+ * -+ * riscv_v_vsize contains the value of "32 vector registers with vlenb length" -+ * so rebuild the vlen value in bits from it. -+ */ -+static inline int riscv_vector_vlen(void) -+{ -+ return riscv_v_vsize / 32 * 8; -+} -+ -+#endif /* ! __ASM_RISCV_VECTOR_H */ -diff --git a/arch/riscv/include/asm/xor.h b/arch/riscv/include/asm/xor.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/riscv/include/asm/xor.h -@@ -0,0 +1,83 @@ -+ -+/* SPDX-License-Identifier: GPL-2.0-or-later */ -+/* -+ * Copyright (C) 2021 SiFive -+ */ -+ -+#include -+#include -+#ifdef CONFIG_RISCV_ISA_V -+#include -+#include -+ -+void xor_regs_2_(unsigned long bytes, unsigned long *__restrict p1, -+ const unsigned long *__restrict p2); -+void xor_regs_3_(unsigned long bytes, unsigned long *__restrict p1, -+ const unsigned long *__restrict p2, -+ const unsigned long *__restrict p3); -+void xor_regs_4_(unsigned long bytes, unsigned long *__restrict p1, -+ const unsigned long *__restrict p2, -+ const unsigned long *__restrict p3, -+ const unsigned long *__restrict p4); -+void xor_regs_5_(unsigned long bytes, unsigned long *__restrict p1, -+ const unsigned long *__restrict p2, -+ const unsigned long *__restrict p3, -+ const unsigned long *__restrict p4, -+ const unsigned long *__restrict p5); -+ -+static void xor_vector_2(unsigned long bytes, unsigned long *__restrict p1, -+ const unsigned long *__restrict p2) -+{ -+ kernel_vector_begin(); -+ xor_regs_2_(bytes, p1, p2); -+ kernel_vector_end(); -+} -+ -+static void xor_vector_3(unsigned long bytes, unsigned long *__restrict p1, -+ const unsigned long *__restrict p2, -+ const unsigned long *__restrict p3) -+{ -+ kernel_vector_begin(); -+ xor_regs_3_(bytes, p1, p2, p3); -+ kernel_vector_end(); -+} -+ -+static void xor_vector_4(unsigned long bytes, unsigned long *__restrict p1, -+ const unsigned long *__restrict p2, -+ const unsigned long *__restrict p3, -+ const unsigned long *__restrict p4) -+{ -+ kernel_vector_begin(); -+ xor_regs_4_(bytes, p1, p2, p3, p4); -+ kernel_vector_end(); -+} -+ -+static void xor_vector_5(unsigned long bytes, unsigned long *__restrict p1, -+ const unsigned long *__restrict p2, -+ const unsigned long *__restrict p3, -+ const unsigned long *__restrict p4, -+ const unsigned long *__restrict p5) -+{ -+ kernel_vector_begin(); -+ xor_regs_5_(bytes, p1, p2, p3, p4, p5); -+ kernel_vector_end(); -+} -+ -+static struct xor_block_template xor_block_rvv = { -+ .name = "rvv", -+ .do_2 = xor_vector_2, -+ .do_3 = xor_vector_3, -+ .do_4 = xor_vector_4, -+ .do_5 = xor_vector_5 -+}; -+ -+#undef XOR_TRY_TEMPLATES -+#define XOR_TRY_TEMPLATES \ -+ do { \ -+ xor_speed(&xor_block_8regs); \ -+ xor_speed(&xor_block_32regs); \ -+ if (has_vector()) { \ -+ xor_speed(&xor_block_rvv);\ -+ } \ -+ } while (0) -+#endif -diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/uapi/asm/auxvec.h -+++ b/arch/riscv/include/uapi/asm/auxvec.h -@@ -35,5 +35,6 @@ - - /* entries in ARCH_DLINFO */ - #define AT_VECTOR_SIZE_ARCH 9 -+#define AT_MINSIGSTKSZ 51 - - #endif /* _UAPI_ASM_RISCV_AUXVEC_H */ -diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/uapi/asm/elf.h -+++ b/arch/riscv/include/uapi/asm/elf.h -@@ -49,6 +49,7 @@ typedef union __riscv_fp_state elf_fpregset_t; - #define R_RISCV_TLS_DTPREL64 9 - #define R_RISCV_TLS_TPREL32 10 - #define R_RISCV_TLS_TPREL64 11 -+#define R_RISCV_IRELATIVE 58 - - /* Relocation types not used by the dynamic linker */ - #define R_RISCV_BRANCH 16 -@@ -81,7 +82,6 @@ typedef union __riscv_fp_state elf_fpregset_t; - #define R_RISCV_ALIGN 43 - #define R_RISCV_RVC_BRANCH 44 - #define R_RISCV_RVC_JUMP 45 --#define R_RISCV_LUI 46 - #define R_RISCV_GPREL_I 47 - #define R_RISCV_GPREL_S 48 - #define R_RISCV_TPREL_I 49 -@@ -93,6 +93,9 @@ typedef union __riscv_fp_state elf_fpregset_t; - #define R_RISCV_SET16 55 - #define R_RISCV_SET32 56 - #define R_RISCV_32_PCREL 57 -+#define R_RISCV_PLT32 59 -+#define R_RISCV_SET_ULEB128 60 -+#define R_RISCV_SUB_ULEB128 61 - - - #endif /* _UAPI_ASM_RISCV_ELF_H */ -diff --git a/arch/riscv/include/uapi/asm/hwcap.h b/arch/riscv/include/uapi/asm/hwcap.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/uapi/asm/hwcap.h -+++ b/arch/riscv/include/uapi/asm/hwcap.h -@@ -21,5 +21,6 @@ - #define COMPAT_HWCAP_ISA_F (1 << ('F' - 'A')) - #define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A')) - #define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A')) -+#define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) - - #endif /* _UAPI_ASM_RISCV_HWCAP_H */ -diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/uapi/asm/kvm.h -+++ b/arch/riscv/include/uapi/asm/kvm.h -@@ -102,6 +102,9 @@ enum KVM_RISCV_ISA_EXT_ID { - KVM_RISCV_ISA_EXT_SVINVAL, - KVM_RISCV_ISA_EXT_ZIHINTPAUSE, - KVM_RISCV_ISA_EXT_ZICBOM, -+ KVM_RISCV_ISA_EXT_ZICBOZ, -+ KVM_RISCV_ISA_EXT_ZICBOP, -+ KVM_RISCV_ISA_EXT_V, - KVM_RISCV_ISA_EXT_MAX, - }; - -@@ -149,6 +152,13 @@ enum KVM_RISCV_ISA_EXT_ID { - /* ISA Extension registers are mapped as type 7 */ - #define KVM_REG_RISCV_ISA_EXT (0x07 << KVM_REG_RISCV_TYPE_SHIFT) - -+/* V extension registers are mapped as type 8 */ -+#define KVM_REG_RISCV_VECTOR (0x08 << KVM_REG_RISCV_TYPE_SHIFT) -+#define KVM_REG_RISCV_VECTOR_CSR_REG(name) \ -+ (offsetof(struct __riscv_v_ext_state, name) / sizeof(unsigned long)) -+#define KVM_REG_RISCV_VECTOR_REG(n) \ -+ ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long)) -+ - #endif - - #endif /* __LINUX_KVM_RISCV_H */ -diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/uapi/asm/ptrace.h -+++ b/arch/riscv/include/uapi/asm/ptrace.h -@@ -71,12 +71,51 @@ struct __riscv_q_ext_state { - __u32 reserved[3]; - }; - -+struct __riscv_ctx_hdr { -+ __u32 magic; -+ __u32 size; -+}; -+ -+struct __riscv_extra_ext_header { -+ __u32 __padding[129] __attribute__((aligned(16))); -+ /* -+ * Reserved for expansion of sigcontext structure. Currently zeroed -+ * upon signal, and must be zero upon sigreturn. -+ */ -+ __u32 reserved; -+ struct __riscv_ctx_hdr hdr; -+}; -+ - union __riscv_fp_state { - struct __riscv_f_ext_state f; - struct __riscv_d_ext_state d; - struct __riscv_q_ext_state q; - }; - -+struct __riscv_v_ext_state { -+ unsigned long vstart; -+ unsigned long vl; -+ unsigned long vtype; -+ unsigned long vcsr; -+ void *datap; -+ /* -+ * In signal handler, datap will be set a correct user stack offset -+ * and vector registers will be copied to the address of datap -+ * pointer. -+ * -+ * In ptrace syscall, datap will be set to zero and the vector -+ * registers will be copied to the address right after this -+ * structure. -+ */ -+}; -+ -+/* -+ * According to spec: The number of bits in a single vector register, -+ * VLEN >= ELEN, which must be a power of 2, and must be no greater than -+ * 2^16 = 65536bits = 8192bytes -+ */ -+#define RISCV_MAX_VLENB (8192) -+ - #endif /* __ASSEMBLY__ */ - - #endif /* _UAPI_ASM_RISCV_PTRACE_H */ --- -Armbian - -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Sat, 22 Jun 2024 07:23:03 -0400 -Subject: arch: riscv: include: uapi: asm: setup: cmdline 2048 - -Signed-off-by: Patrick Yavitz ---- - arch/riscv/include/uapi/asm/setup.h | 2 +- - arch/riscv/include/uapi/asm/sigcontext.h | 16 +- - arch/riscv/kernel/Makefile | 2 + - arch/riscv/kernel/cpu-hotplug.c | 7 +- - arch/riscv/kernel/cpu.c | 8 +- - arch/riscv/kernel/cpu_ops_sbi.c | 30 ++- - arch/riscv/kernel/cpufeature.c | 103 +++++++- - arch/riscv/kernel/entry.S | 6 +- - arch/riscv/kernel/head.S | 50 ++-- - arch/riscv/kernel/kernel_mode_vector.c | 125 ++++++++++ - 10 files changed, 318 insertions(+), 31 deletions(-) - -diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/uapi/asm/setup.h -+++ b/arch/riscv/include/uapi/asm/setup.h -@@ -3,6 +3,6 @@ - #ifndef _UAPI_ASM_RISCV_SETUP_H - #define _UAPI_ASM_RISCV_SETUP_H - --#define COMMAND_LINE_SIZE 1024 -+#define COMMAND_LINE_SIZE 2048 - - #endif /* _UAPI_ASM_RISCV_SETUP_H */ -diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h -index 111111111111..222222222222 100644 ---- a/arch/riscv/include/uapi/asm/sigcontext.h -+++ b/arch/riscv/include/uapi/asm/sigcontext.h -@@ -8,6 +8,17 @@ - - #include - -+/* The Magic number for signal context frame header. */ -+#define RISCV_V_MAGIC 0x53465457 -+#define END_MAGIC 0x0 -+ -+/* The size of END signal context header. */ -+#define END_HDR_SIZE 0x0 -+ -+struct __sc_riscv_v_state { -+ struct __riscv_v_ext_state v_state; -+} __attribute__((aligned(16))); -+ - /* - * Signal context structure - * -@@ -16,7 +27,10 @@ - */ - struct sigcontext { - struct user_regs_struct sc_regs; -- union __riscv_fp_state sc_fpregs; -+ union { -+ union __riscv_fp_state sc_fpregs; -+ struct __riscv_extra_ext_header sc_extdesc; -+ }; - }; - - #endif /* _UAPI_ASM_RISCV_SIGCONTEXT_H */ -diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/Makefile -+++ b/arch/riscv/kernel/Makefile -@@ -58,6 +58,8 @@ obj-$(CONFIG_MMU) += vdso.o vdso/ - - obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o - obj-$(CONFIG_FPU) += fpu.o -+obj-$(CONFIG_RISCV_ISA_V) += vector.o -+obj-$(CONFIG_RISCV_ISA_V) += kernel_mode_vector.o - obj-$(CONFIG_SMP) += smpboot.o - obj-$(CONFIG_SMP) += smp.o - obj-$(CONFIG_SMP) += cpu_ops.o -diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/cpu-hotplug.c -+++ b/arch/riscv/kernel/cpu-hotplug.c -@@ -74,9 +74,14 @@ void __cpu_die(unsigned int cpu) - void arch_cpu_idle_dead(void) - { - idle_task_exit(); -- -+#if defined(CONFIG_SOC_SPACEMIT_K1PRO) || defined(CONFIG_SOC_SPACEMIT_K1X) -+ sbi_flush_local_dcache_all(); -+#endif - (void)cpu_report_death(); - -+#if defined(CONFIG_SOC_SPACEMIT_K1PRO) || defined(CONFIG_SOC_SPACEMIT_K1X) -+ sbi_flush_local_dcache_all(); -+#endif - cpu_ops[smp_processor_id()]->cpu_stop(); - /* It should never reach here */ - BUG(); -diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/cpu.c -+++ b/arch/riscv/kernel/cpu.c -@@ -144,6 +144,8 @@ static struct riscv_isa_ext_data isa_ext_arr[] = { - __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), - __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT), - __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM), -+ __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ), -+ __RISCV_ISA_EXT_DATA(zicbop, RISCV_ISA_EXT_ZICBOP), - __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), - __RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX), - }; -@@ -238,10 +240,14 @@ static int c_show(struct seq_file *m, void *v) - unsigned long cpu_id = (unsigned long)v - 1; - struct device_node *node = of_get_cpu_node(cpu_id, NULL); - struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id); -- const char *compat, *isa; -+ const char *compat, *isa, *model; - - seq_printf(m, "processor\t: %lu\n", cpu_id); - seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id)); -+ -+ if (!of_property_read_string(node, "model", &model)) -+ seq_printf(m, "model name\t: %s\n", model); -+ - if (!of_property_read_string(node, "riscv,isa", &isa)) - print_isa(m, isa); - print_mmu(m); -diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/cpu_ops_sbi.c -+++ b/arch/riscv/kernel/cpu_ops_sbi.c -@@ -12,6 +12,8 @@ - #include - #include - #include -+#include -+#include - - extern char secondary_start_sbi[]; - const struct cpu_operations cpu_ops_sbi; -@@ -108,11 +110,37 @@ static int sbi_cpu_is_stopped(unsigned int cpuid) - { - int rc; - unsigned long hartid = cpuid_to_hartid_map(cpuid); -- -+#ifndef CONFIG_ARM_SCMI_PROTOCOL - rc = sbi_hsm_hart_get_status(hartid); - - if (rc == SBI_HSM_STATE_STOPPED) - return 0; -+#else -+ unsigned long start, end; -+ -+ /* -+ * cpu_kill could race with cpu_die and we can -+ * potentially end up declaring this cpu undead -+ * while it is dying. So, try again a few times. -+ */ -+ start = jiffies; -+ end = start + msecs_to_jiffies(100); -+ do { -+ rc = sbi_hsm_hart_get_status(hartid); -+ if (rc == SBI_HSM_STATE_STOPPED) { -+ pr_info("CPU%d killed (polled %d ms)\n", cpuid, -+ jiffies_to_msecs(jiffies - start)); -+ return 0; -+ } -+ -+ usleep_range(100, 1000); -+ } while (time_before(jiffies, end)); -+ -+ pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n", -+ cpuid, rc); -+ rc = -ETIMEDOUT; -+ -+#endif - return rc; - } - #endif -diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/cpufeature.c -+++ b/arch/riscv/kernel/cpufeature.c -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - - #define NUM_ALPHA_EXTS ('z' - 'a' + 1) - -@@ -69,6 +70,33 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) - } - EXPORT_SYMBOL_GPL(__riscv_isa_extension_available); - -+struct cpumask ai_core_mask_get(void) -+{ -+ struct device_node *node; -+ const char *cpu_ai; -+ struct cpumask cpu_mask; -+ unsigned long hartid; -+ int rc; -+ -+ cpumask_clear(&cpu_mask); -+ -+ for_each_of_cpu_node(node) { -+ rc = riscv_of_processor_hartid(node, &hartid); -+ if (rc < 0) -+ continue; -+ -+ if (of_property_read_string(node, "cpu-ai", &cpu_ai)) { -+ continue; -+ } -+ -+ if(!strcmp(cpu_ai, "true")) { -+ cpumask_set_cpu(hartid, &cpu_mask); -+ } -+ } -+ -+ return cpu_mask; -+} -+ - void __init riscv_fill_hwcap(void) - { - struct device_node *node; -@@ -78,12 +106,13 @@ void __init riscv_fill_hwcap(void) - static unsigned long isa2hwcap[256] = {0}; - unsigned long hartid; - -- isa2hwcap['i'] = isa2hwcap['I'] = COMPAT_HWCAP_ISA_I; -- isa2hwcap['m'] = isa2hwcap['M'] = COMPAT_HWCAP_ISA_M; -- isa2hwcap['a'] = isa2hwcap['A'] = COMPAT_HWCAP_ISA_A; -- isa2hwcap['f'] = isa2hwcap['F'] = COMPAT_HWCAP_ISA_F; -- isa2hwcap['d'] = isa2hwcap['D'] = COMPAT_HWCAP_ISA_D; -- isa2hwcap['c'] = isa2hwcap['C'] = COMPAT_HWCAP_ISA_C; -+ isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I; -+ isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M; -+ isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A; -+ isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F; -+ isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D; -+ isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C; -+ isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V; - - elf_hwcap = 0; - -@@ -197,12 +226,15 @@ void __init riscv_fill_hwcap(void) - if (unlikely(ext_err)) - continue; - if (!ext_long) { -- this_hwcap |= isa2hwcap[(unsigned char)(*ext)]; -- set_bit(*ext - 'a', this_isa); -+ int nr = *ext - 'a'; -+ this_hwcap |= isa2hwcap[nr]; -+ set_bit(nr, this_isa); - } else { - SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF); - SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT); - SET_ISA_EXT_MAP("zicbom", RISCV_ISA_EXT_ZICBOM); -+ SET_ISA_EXT_MAP("zicboz", RISCV_ISA_EXT_ZICBOZ); -+ SET_ISA_EXT_MAP("zicbop", RISCV_ISA_EXT_ZICBOP); - SET_ISA_EXT_MAP("zihintpause", RISCV_ISA_EXT_ZIHINTPAUSE); - SET_ISA_EXT_MAP("sstc", RISCV_ISA_EXT_SSTC); - SET_ISA_EXT_MAP("svinval", RISCV_ISA_EXT_SVINVAL); -@@ -233,6 +265,17 @@ void __init riscv_fill_hwcap(void) - elf_hwcap &= ~COMPAT_HWCAP_ISA_F; - } - -+ if (elf_hwcap & COMPAT_HWCAP_ISA_V) { -+ riscv_v_setup_vsize(); -+ /* -+ * ISA string in device tree might have 'v' flag, but -+ * CONFIG_RISCV_ISA_V is disabled in kernel. -+ * Clear V flag in elf_hwcap if CONFIG_RISCV_ISA_V is disabled. -+ */ -+ if (!IS_ENABLED(CONFIG_RISCV_ISA_V)) -+ elf_hwcap &= ~COMPAT_HWCAP_ISA_V; -+ } -+ - memset(print_str, 0, sizeof(print_str)); - for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++) - if (riscv_isa[0] & BIT_MASK(i)) -@@ -252,6 +295,15 @@ void __init riscv_fill_hwcap(void) - } - } - -+void riscv_user_isa_enable(void) -+{ -+ if (riscv_isa_extension_available(NULL, ZICBOZ)) -+ csr_set(CSR_SENVCFG, ENVCFG_CBZE); -+ -+ if (riscv_isa_extension_available(NULL, ZICBOM)) -+ csr_set(CSR_SENVCFG, ENVCFG_CBCFE | (ENVCFG_CBIE_FLUSH << ENVCFG_CBIE_SHIFT)); -+} -+ - #ifdef CONFIG_RISCV_ALTERNATIVE - static bool __init_or_module cpufeature_probe_svpbmt(unsigned int stage) - { -@@ -279,6 +331,35 @@ static bool __init_or_module cpufeature_probe_zicbom(unsigned int stage) - return true; - } - -+static bool __init_or_module cpufeature_probe_zicboz(unsigned int stage) -+{ -+ if (!IS_ENABLED(CONFIG_RISCV_ISA_ZICBOZ)) -+ return false; -+ -+ if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) -+ return false; -+ -+ if (!riscv_isa_extension_available(NULL, ZICBOZ)) -+ return false; -+ -+ return true; -+} -+ -+static bool __init_or_module cpufeature_probe_zicbop(unsigned int stage) -+{ -+ if (!IS_ENABLED(CONFIG_RISCV_ISA_ZICBOP)) -+ return false; -+ -+ if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) -+ return false; -+ -+ if (!riscv_isa_extension_available(NULL, ZICBOP)) -+ return false; -+ -+ return true; -+} -+ -+ - /* - * Probe presence of individual extensions. - * -@@ -296,6 +377,12 @@ static u32 __init_or_module cpufeature_probe(unsigned int stage) - if (cpufeature_probe_zicbom(stage)) - cpu_req_feature |= BIT(CPUFEATURE_ZICBOM); - -+ if (cpufeature_probe_zicboz(stage)) -+ cpu_req_feature |= BIT(CPUFEATURE_ZICBOZ); -+ -+ if (cpufeature_probe_zicbop(stage)) -+ cpu_req_feature |= BIT(CPUFEATURE_ZICBOP); -+ - return cpu_req_feature; - } - -diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/entry.S -+++ b/arch/riscv/kernel/entry.S -@@ -77,10 +77,10 @@ _save_context: - * Disable user-mode memory access as it should only be set in the - * actual user copy routines. - * -- * Disable the FPU to detect illegal usage of floating point in kernel -- * space. -+ * Disable the FPU/Vector to detect illegal usage of floating point -+ * or vector in kernel space. - */ -- li t0, SR_SUM | SR_FS -+ li t0, SR_SUM | SR_FS_VS - - REG_L s0, TASK_TI_USER_SP(tp) - csrrc s1, CSR_STATUS, t0 -diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/head.S -+++ b/arch/riscv/kernel/head.S -@@ -42,13 +42,8 @@ ENTRY(_start) - /* Image load offset (0MB) from start of RAM for M-mode */ - .dword 0 - #else --#if __riscv_xlen == 64 -- /* Image load offset(2MB) from start of RAM */ -- .dword 0x200000 --#else -- /* Image load offset(4MB) from start of RAM */ -- .dword 0x400000 --#endif -+ /* Image load offset from start of RAM */ -+ .dword CONFIG_IMAGE_LOAD_OFFSET - #endif - /* Effective size of kernel image */ - .dword _end - _start -@@ -140,10 +135,10 @@ secondary_start_sbi: - .option pop - - /* -- * Disable FPU to detect illegal usage of -- * floating point in kernel space -+ * Disable FPU & VECTOR to detect illegal usage of -+ * floating point or vector in kernel space - */ -- li t0, SR_FS -+ li t0, SR_FS_VS - csrc CSR_STATUS, t0 - - /* Set trap vector to spin forever to help debug */ -@@ -234,10 +229,10 @@ pmp_done: - .option pop - - /* -- * Disable FPU to detect illegal usage of -- * floating point in kernel space -+ * Disable FPU & VECTOR to detect illegal usage of -+ * floating point or vector in kernel space - */ -- li t0, SR_FS -+ li t0, SR_FS_VS - csrc CSR_STATUS, t0 - - #ifdef CONFIG_RISCV_BOOT_SPINWAIT -@@ -301,6 +296,7 @@ clear_bss_done: - la tp, init_task - la sp, init_thread_union + THREAD_SIZE - XIP_FIXUP_OFFSET sp -+ addi sp, sp, -PT_SIZE_ON_STACK - #ifdef CONFIG_BUILTIN_DTB - la a0, __dtb_start - XIP_FIXUP_OFFSET a0 -@@ -318,6 +314,7 @@ clear_bss_done: - /* Restore C environment */ - la tp, init_task - la sp, init_thread_union + THREAD_SIZE -+ addi sp, sp, -PT_SIZE_ON_STACK - - #ifdef CONFIG_KASAN - call kasan_early_init -@@ -392,7 +389,7 @@ ENTRY(reset_regs) - #ifdef CONFIG_FPU - csrr t0, CSR_MISA - andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) -- beqz t0, .Lreset_regs_done -+ beqz t0, .Lreset_regs_done_fpu - - li t1, SR_FS - csrs CSR_STATUS, t1 -@@ -430,8 +427,31 @@ ENTRY(reset_regs) - fmv.s.x f31, zero - csrw fcsr, 0 - /* note that the caller must clear SR_FS */ -+.Lreset_regs_done_fpu: - #endif /* CONFIG_FPU */ --.Lreset_regs_done: -+ -+#ifdef CONFIG_RISCV_ISA_V -+ csrr t0, CSR_MISA -+ li t1, COMPAT_HWCAP_ISA_V -+ and t0, t0, t1 -+ beqz t0, .Lreset_regs_done_vector -+ -+ /* -+ * Clear vector registers and reset vcsr -+ * VLMAX has a defined value, VLEN is a constant, -+ * and this form of vsetvli is defined to set vl to VLMAX. -+ */ -+ li t1, SR_VS -+ csrs CSR_STATUS, t1 -+ csrs CSR_VCSR, x0 -+ vsetvli t1, x0, e8, m8, ta, ma -+ vmv.v.i v0, 0 -+ vmv.v.i v8, 0 -+ vmv.v.i v16, 0 -+ vmv.v.i v24, 0 -+ /* note that the caller must clear SR_VS */ -+.Lreset_regs_done_vector: -+#endif /* CONFIG_RISCV_ISA_V */ - ret - END(reset_regs) - #endif /* CONFIG_RISCV_M_MODE */ -diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/riscv/kernel/kernel_mode_vector.c -@@ -0,0 +1,125 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later -+/* -+ * Copyright (C) 2012 ARM Ltd. -+ * Author: Catalin Marinas -+ * Copyright (C) 2017 Linaro Ltd. -+ * Copyright (C) 2021 SiFive -+ */ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+static inline void riscv_v_flags_set(u32 flags) -+{ -+ current->thread.riscv_v_flags = flags; -+} -+ -+static inline void riscv_v_start(u32 flags) -+{ -+ int orig; -+ -+ orig = riscv_v_flags(); -+ BUG_ON((orig & flags) != 0); -+ riscv_v_flags_set(orig | flags); -+} -+ -+static inline void riscv_v_stop(u32 flags) -+{ -+ int orig; -+ -+ orig = riscv_v_flags(); -+ BUG_ON((orig & flags) == 0); -+ riscv_v_flags_set(orig & ~flags); -+} -+ -+/* -+ * Claim ownership of the CPU vector context for use by the calling context. -+ * -+ * The caller may freely manipulate the vector context metadata until -+ * put_cpu_vector_context() is called. -+ */ -+void get_cpu_vector_context(void) -+{ -+ /* -+ * disable softirqs so it is impossible for softirqs to nest -+ * get_cpu_vector_context() when kernel is actively using Vector. -+ */ -+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) -+ local_bh_disable(); -+ else -+ preempt_disable(); -+ riscv_v_start(RISCV_KERNEL_MODE_V); -+} -+ -+/* -+ * Release the CPU vector context. -+ * -+ * Must be called from a context in which get_cpu_vector_context() was -+ * previously called, with no call to put_cpu_vector_context() in the -+ * meantime. -+ */ -+void put_cpu_vector_context(void) -+{ -+ riscv_v_stop(RISCV_KERNEL_MODE_V); -+ -+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) -+ local_bh_enable(); -+ else -+ preempt_enable(); -+} -+ -+/* -+ * kernel_vector_begin(): obtain the CPU vector registers for use by the calling -+ * context -+ * -+ * Must not be called unless may_use_simd() returns true. -+ * Task context in the vector registers is saved back to memory as necessary. -+ * -+ * A matching call to kernel_vector_end() must be made before returning from the -+ * calling context. -+ * -+ * The caller may freely use the vector registers until kernel_vector_end() is -+ * called. -+ */ -+void kernel_vector_begin(void) -+{ -+ if (WARN_ON(!has_vector())) -+ return; -+ -+ BUG_ON(!may_use_simd()); -+ -+ get_cpu_vector_context(); -+ -+ riscv_v_vstate_save(current, task_pt_regs(current)); -+ -+ riscv_v_enable(); -+} -+EXPORT_SYMBOL_GPL(kernel_vector_begin); -+ -+/* -+ * kernel_vector_end(): give the CPU vector registers back to the current task -+ * -+ * Must be called from a context in which kernel_vector_begin() was previously -+ * called, with no call to kernel_vector_end() in the meantime. -+ * -+ * The caller must not use the vector registers after this function is called, -+ * unless kernel_vector_begin() is called again in the meantime. -+ */ -+void kernel_vector_end(void) -+{ -+ if (WARN_ON(!has_vector())) -+ return; -+ -+ riscv_v_vstate_restore(current, task_pt_regs(current)); -+ -+ riscv_v_disable(); -+ -+ put_cpu_vector_context(); -+} -+EXPORT_SYMBOL_GPL(kernel_vector_end); -\ No newline at end of file --- -Armbian - -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Sat, 22 Jun 2024 07:28:17 -0400 -Subject: arch: riscv: kernel: module.c - -Signed-off-by: Patrick Yavitz ---- - arch/riscv/kernel/module.c | 708 ++++++++-- - arch/riscv/kernel/process.c | 19 + - arch/riscv/kernel/ptrace.c | 70 + - arch/riscv/kernel/sbi.c | 9 + - arch/riscv/kernel/setup.c | 5 + - 5 files changed, 692 insertions(+), 119 deletions(-) - -diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/module.c -+++ b/arch/riscv/kernel/module.c -@@ -7,6 +7,9 @@ - #include - #include - #include -+#include -+#include -+#include - #include - #include - #include -@@ -14,6 +17,29 @@ - #include - #include - -+struct used_bucket { -+ struct list_head head; -+ struct hlist_head *bucket; -+}; -+ -+struct relocation_head { -+ struct hlist_node node; -+ struct list_head *rel_entry; -+ void *location; -+}; -+ -+struct relocation_entry { -+ struct list_head head; -+ Elf_Addr value; -+ unsigned int type; -+}; -+ -+struct relocation_handlers { -+ int (*reloc_handler)(struct module *me, void *location, Elf_Addr v); -+ int (*accumulate_handler)(struct module *me, void *location, -+ long buffer); -+}; -+ - /* - * The auipc+jalr instruction pair can reach any PC-relative offset - * in the range [-2^31 - 2^11, 2^31 - 2^11) -@@ -27,68 +53,90 @@ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val) - #endif - } - --static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) -+static int riscv_insn_rmw(void *location, u32 keep, u32 set) -+{ -+ __le16 *parcel = location; -+ u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16; -+ -+ insn &= keep; -+ insn |= set; -+ -+ parcel[0] = cpu_to_le16(insn); -+ parcel[1] = cpu_to_le16(insn >> 16); -+ return 0; -+} -+ -+static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set) -+{ -+ __le16 *parcel = location; -+ u16 insn = le16_to_cpu(*parcel); -+ -+ insn &= keep; -+ insn |= set; -+ -+ *parcel = cpu_to_le16(insn); -+ return 0; -+} -+ -+static int apply_r_riscv_32_rela(struct module *me, void *location, Elf_Addr v) - { - if (v != (u32)v) { - pr_err("%s: value %016llx out of range for 32-bit field\n", - me->name, (long long)v); - return -EINVAL; - } -- *location = v; -+ *(u32 *)location = v; - return 0; - } - --static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v) -+static int apply_r_riscv_64_rela(struct module *me, void *location, Elf_Addr v) - { - *(u64 *)location = v; - return 0; - } - --static int apply_r_riscv_branch_rela(struct module *me, u32 *location, -+static int apply_r_riscv_branch_rela(struct module *me, void *location, - Elf_Addr v) - { -- ptrdiff_t offset = (void *)v - (void *)location; -+ ptrdiff_t offset = (void *)v - location; - u32 imm12 = (offset & 0x1000) << (31 - 12); - u32 imm11 = (offset & 0x800) >> (11 - 7); - u32 imm10_5 = (offset & 0x7e0) << (30 - 10); - u32 imm4_1 = (offset & 0x1e) << (11 - 4); - -- *location = (*location & 0x1fff07f) | imm12 | imm11 | imm10_5 | imm4_1; -- return 0; -+ return riscv_insn_rmw(location, 0x1fff07f, imm12 | imm11 | imm10_5 | imm4_1); - } - --static int apply_r_riscv_jal_rela(struct module *me, u32 *location, -+static int apply_r_riscv_jal_rela(struct module *me, void *location, - Elf_Addr v) - { -- ptrdiff_t offset = (void *)v - (void *)location; -+ ptrdiff_t offset = (void *)v - location; - u32 imm20 = (offset & 0x100000) << (31 - 20); - u32 imm19_12 = (offset & 0xff000); - u32 imm11 = (offset & 0x800) << (20 - 11); - u32 imm10_1 = (offset & 0x7fe) << (30 - 10); - -- *location = (*location & 0xfff) | imm20 | imm19_12 | imm11 | imm10_1; -- return 0; -+ return riscv_insn_rmw(location, 0xfff, imm20 | imm19_12 | imm11 | imm10_1); - } - --static int apply_r_riscv_rvc_branch_rela(struct module *me, u32 *location, -+static int apply_r_riscv_rvc_branch_rela(struct module *me, void *location, - Elf_Addr v) - { -- ptrdiff_t offset = (void *)v - (void *)location; -+ ptrdiff_t offset = (void *)v - location; - u16 imm8 = (offset & 0x100) << (12 - 8); - u16 imm7_6 = (offset & 0xc0) >> (6 - 5); - u16 imm5 = (offset & 0x20) >> (5 - 2); - u16 imm4_3 = (offset & 0x18) << (12 - 5); - u16 imm2_1 = (offset & 0x6) << (12 - 10); - -- *(u16 *)location = (*(u16 *)location & 0xe383) | -- imm8 | imm7_6 | imm5 | imm4_3 | imm2_1; -- return 0; -+ return riscv_insn_rvc_rmw(location, 0xe383, -+ imm8 | imm7_6 | imm5 | imm4_3 | imm2_1); - } - --static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location, -+static int apply_r_riscv_rvc_jump_rela(struct module *me, void *location, - Elf_Addr v) - { -- ptrdiff_t offset = (void *)v - (void *)location; -+ ptrdiff_t offset = (void *)v - location; - u16 imm11 = (offset & 0x800) << (12 - 11); - u16 imm10 = (offset & 0x400) >> (10 - 8); - u16 imm9_8 = (offset & 0x300) << (12 - 11); -@@ -98,16 +146,14 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location, - u16 imm4 = (offset & 0x10) << (12 - 5); - u16 imm3_1 = (offset & 0xe) << (12 - 10); - -- *(u16 *)location = (*(u16 *)location & 0xe003) | -- imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1; -- return 0; -+ return riscv_insn_rvc_rmw(location, 0xe003, -+ imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1); - } - --static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, -+static int apply_r_riscv_pcrel_hi20_rela(struct module *me, void *location, - Elf_Addr v) - { -- ptrdiff_t offset = (void *)v - (void *)location; -- s32 hi20; -+ ptrdiff_t offset = (void *)v - location; - - if (!riscv_insn_valid_32bit_offset(offset)) { - pr_err( -@@ -116,23 +162,20 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, - return -EINVAL; - } - -- hi20 = (offset + 0x800) & 0xfffff000; -- *location = (*location & 0xfff) | hi20; -- return 0; -+ return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000); - } - --static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, u32 *location, -+static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, void *location, - Elf_Addr v) - { - /* - * v is the lo12 value to fill. It is calculated before calling this - * handler. - */ -- *location = (*location & 0xfffff) | ((v & 0xfff) << 20); -- return 0; -+ return riscv_insn_rmw(location, 0xfffff, (v & 0xfff) << 20); - } - --static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location, -+static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, void *location, - Elf_Addr v) - { - /* -@@ -142,15 +185,12 @@ static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location, - u32 imm11_5 = (v & 0xfe0) << (31 - 11); - u32 imm4_0 = (v & 0x1f) << (11 - 4); - -- *location = (*location & 0x1fff07f) | imm11_5 | imm4_0; -- return 0; -+ return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0); - } - --static int apply_r_riscv_hi20_rela(struct module *me, u32 *location, -+static int apply_r_riscv_hi20_rela(struct module *me, void *location, - Elf_Addr v) - { -- s32 hi20; -- - if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) { - pr_err( - "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", -@@ -158,22 +198,20 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location, - return -EINVAL; - } - -- hi20 = ((s32)v + 0x800) & 0xfffff000; -- *location = (*location & 0xfff) | hi20; -- return 0; -+ return riscv_insn_rmw(location, 0xfff, ((s32)v + 0x800) & 0xfffff000); - } - --static int apply_r_riscv_lo12_i_rela(struct module *me, u32 *location, -+static int apply_r_riscv_lo12_i_rela(struct module *me, void *location, - Elf_Addr v) - { - /* Skip medlow checking because of filtering by HI20 already */ - s32 hi20 = ((s32)v + 0x800) & 0xfffff000; - s32 lo12 = ((s32)v - hi20); -- *location = (*location & 0xfffff) | ((lo12 & 0xfff) << 20); -- return 0; -+ -+ return riscv_insn_rmw(location, 0xfffff, (lo12 & 0xfff) << 20); - } - --static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location, -+static int apply_r_riscv_lo12_s_rela(struct module *me, void *location, - Elf_Addr v) - { - /* Skip medlow checking because of filtering by HI20 already */ -@@ -181,20 +219,18 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location, - s32 lo12 = ((s32)v - hi20); - u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11); - u32 imm4_0 = (lo12 & 0x1f) << (11 - 4); -- *location = (*location & 0x1fff07f) | imm11_5 | imm4_0; -- return 0; -+ -+ return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0); - } - --static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, -+static int apply_r_riscv_got_hi20_rela(struct module *me, void *location, - Elf_Addr v) - { -- ptrdiff_t offset = (void *)v - (void *)location; -- s32 hi20; -+ ptrdiff_t offset = (void *)v - location; - - /* Always emit the got entry */ - if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) { -- offset = module_emit_got_entry(me, v); -- offset = (void *)offset - (void *)location; -+ offset = (void *)module_emit_got_entry(me, v) - location; - } else { - pr_err( - "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n", -@@ -202,22 +238,19 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, - return -EINVAL; - } - -- hi20 = (offset + 0x800) & 0xfffff000; -- *location = (*location & 0xfff) | hi20; -- return 0; -+ return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000); - } - --static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, -+static int apply_r_riscv_call_plt_rela(struct module *me, void *location, - Elf_Addr v) - { -- ptrdiff_t offset = (void *)v - (void *)location; -+ ptrdiff_t offset = (void *)v - location; - u32 hi20, lo12; - - if (!riscv_insn_valid_32bit_offset(offset)) { - /* Only emit the plt entry if offset over 32-bit range */ - if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) { -- offset = module_emit_plt_entry(me, v); -- offset = (void *)offset - (void *)location; -+ offset = (void *)module_emit_plt_entry(me, v) - location; - } else { - pr_err( - "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", -@@ -228,15 +261,14 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, - - hi20 = (offset + 0x800) & 0xfffff000; - lo12 = (offset - hi20) & 0xfff; -- *location = (*location & 0xfff) | hi20; -- *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20); -- return 0; -+ riscv_insn_rmw(location, 0xfff, hi20); -+ return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20); - } - --static int apply_r_riscv_call_rela(struct module *me, u32 *location, -+static int apply_r_riscv_call_rela(struct module *me, void *location, - Elf_Addr v) - { -- ptrdiff_t offset = (void *)v - (void *)location; -+ ptrdiff_t offset = (void *)v - location; - u32 hi20, lo12; - - if (!riscv_insn_valid_32bit_offset(offset)) { -@@ -248,18 +280,17 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location, - - hi20 = (offset + 0x800) & 0xfffff000; - lo12 = (offset - hi20) & 0xfff; -- *location = (*location & 0xfff) | hi20; -- *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20); -- return 0; -+ riscv_insn_rmw(location, 0xfff, hi20); -+ return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20); - } - --static int apply_r_riscv_relax_rela(struct module *me, u32 *location, -+static int apply_r_riscv_relax_rela(struct module *me, void *location, - Elf_Addr v) - { - return 0; - } - --static int apply_r_riscv_align_rela(struct module *me, u32 *location, -+static int apply_r_riscv_align_rela(struct module *me, void *location, - Elf_Addr v) - { - pr_err( -@@ -268,75 +299,509 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location, - return -EINVAL; - } - --static int apply_r_riscv_add32_rela(struct module *me, u32 *location, -+static int apply_r_riscv_add8_rela(struct module *me, void *location, Elf_Addr v) -+{ -+ *(u8 *)location += (u8)v; -+ return 0; -+} -+ -+static int apply_r_riscv_add16_rela(struct module *me, void *location, -+ Elf_Addr v) -+{ -+ *(u16 *)location += (u16)v; -+ return 0; -+} -+ -+static int apply_r_riscv_add32_rela(struct module *me, void *location, - Elf_Addr v) - { - *(u32 *)location += (u32)v; - return 0; - } - --static int apply_r_riscv_add64_rela(struct module *me, u32 *location, -+static int apply_r_riscv_add64_rela(struct module *me, void *location, - Elf_Addr v) - { - *(u64 *)location += (u64)v; - return 0; - } - --static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, -+static int apply_r_riscv_sub8_rela(struct module *me, void *location, Elf_Addr v) -+{ -+ *(u8 *)location -= (u8)v; -+ return 0; -+} -+ -+static int apply_r_riscv_sub16_rela(struct module *me, void *location, -+ Elf_Addr v) -+{ -+ *(u16 *)location -= (u16)v; -+ return 0; -+} -+ -+static int apply_r_riscv_sub32_rela(struct module *me, void *location, - Elf_Addr v) - { - *(u32 *)location -= (u32)v; - return 0; - } - --static int apply_r_riscv_sub64_rela(struct module *me, u32 *location, -+static int apply_r_riscv_sub64_rela(struct module *me, void *location, - Elf_Addr v) - { - *(u64 *)location -= (u64)v; - return 0; - } - --static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, -- Elf_Addr v) = { -- [R_RISCV_32] = apply_r_riscv_32_rela, -- [R_RISCV_64] = apply_r_riscv_64_rela, -- [R_RISCV_BRANCH] = apply_r_riscv_branch_rela, -- [R_RISCV_JAL] = apply_r_riscv_jal_rela, -- [R_RISCV_RVC_BRANCH] = apply_r_riscv_rvc_branch_rela, -- [R_RISCV_RVC_JUMP] = apply_r_riscv_rvc_jump_rela, -- [R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela, -- [R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela, -- [R_RISCV_PCREL_LO12_S] = apply_r_riscv_pcrel_lo12_s_rela, -- [R_RISCV_HI20] = apply_r_riscv_hi20_rela, -- [R_RISCV_LO12_I] = apply_r_riscv_lo12_i_rela, -- [R_RISCV_LO12_S] = apply_r_riscv_lo12_s_rela, -- [R_RISCV_GOT_HI20] = apply_r_riscv_got_hi20_rela, -- [R_RISCV_CALL_PLT] = apply_r_riscv_call_plt_rela, -- [R_RISCV_CALL] = apply_r_riscv_call_rela, -- [R_RISCV_RELAX] = apply_r_riscv_relax_rela, -- [R_RISCV_ALIGN] = apply_r_riscv_align_rela, -- [R_RISCV_ADD32] = apply_r_riscv_add32_rela, -- [R_RISCV_ADD64] = apply_r_riscv_add64_rela, -- [R_RISCV_SUB32] = apply_r_riscv_sub32_rela, -- [R_RISCV_SUB64] = apply_r_riscv_sub64_rela, -+static int dynamic_linking_not_supported(struct module *me, void *location, -+ Elf_Addr v) -+{ -+ pr_err("%s: Dynamic linking not supported in kernel modules PC = %p\n", -+ me->name, location); -+ return -EINVAL; -+} -+ -+static int tls_not_supported(struct module *me, void *location, Elf_Addr v) -+{ -+ pr_err("%s: Thread local storage not supported in kernel modules PC = %p\n", -+ me->name, location); -+ return -EINVAL; -+} -+ -+static int apply_r_riscv_sub6_rela(struct module *me, void *location, Elf_Addr v) -+{ -+ u8 *byte = location; -+ u8 value = v; -+ -+ *byte = (*byte - (value & 0x3f)) & 0x3f; -+ return 0; -+} -+ -+static int apply_r_riscv_set6_rela(struct module *me, void *location, Elf_Addr v) -+{ -+ u8 *byte = location; -+ u8 value = v; -+ -+ *byte = (*byte & 0xc0) | (value & 0x3f); -+ return 0; -+} -+ -+static int apply_r_riscv_set8_rela(struct module *me, void *location, Elf_Addr v) -+{ -+ *(u8 *)location = (u8)v; -+ return 0; -+} -+ -+static int apply_r_riscv_set16_rela(struct module *me, void *location, -+ Elf_Addr v) -+{ -+ *(u16 *)location = (u16)v; -+ return 0; -+} -+ -+static int apply_r_riscv_set32_rela(struct module *me, void *location, -+ Elf_Addr v) -+{ -+ *(u32 *)location = (u32)v; -+ return 0; -+} -+ -+static int apply_r_riscv_32_pcrel_rela(struct module *me, void *location, -+ Elf_Addr v) -+{ -+ *(u32 *)location = v - (uintptr_t)location; -+ return 0; -+} -+ -+static int apply_r_riscv_plt32_rela(struct module *me, void *location, -+ Elf_Addr v) -+{ -+ ptrdiff_t offset = (void *)v - location; -+ -+ if (!riscv_insn_valid_32bit_offset(offset)) { -+ /* Only emit the plt entry if offset over 32-bit range */ -+ if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) { -+ offset = (void *)module_emit_plt_entry(me, v) - location; -+ } else { -+ pr_err("%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", -+ me->name, (long long)v, location); -+ return -EINVAL; -+ } -+ } -+ -+ *(u32 *)location = (u32)offset; -+ return 0; -+} -+ -+static int apply_r_riscv_set_uleb128(struct module *me, void *location, Elf_Addr v) -+{ -+ *(long *)location = v; -+ return 0; -+} -+ -+static int apply_r_riscv_sub_uleb128(struct module *me, void *location, Elf_Addr v) -+{ -+ *(long *)location -= v; -+ return 0; -+} -+ -+static int apply_6_bit_accumulation(struct module *me, void *location, long buffer) -+{ -+ u8 *byte = location; -+ u8 value = buffer; -+ -+ if (buffer > 0x3f) { -+ pr_err("%s: value %ld out of range for 6-bit relocation.\n", -+ me->name, buffer); -+ return -EINVAL; -+ } -+ -+ *byte = (*byte & 0xc0) | (value & 0x3f); -+ return 0; -+} -+ -+static int apply_8_bit_accumulation(struct module *me, void *location, long buffer) -+{ -+ if (buffer > U8_MAX) { -+ pr_err("%s: value %ld out of range for 8-bit relocation.\n", -+ me->name, buffer); -+ return -EINVAL; -+ } -+ *(u8 *)location = (u8)buffer; -+ return 0; -+} -+ -+static int apply_16_bit_accumulation(struct module *me, void *location, long buffer) -+{ -+ if (buffer > U16_MAX) { -+ pr_err("%s: value %ld out of range for 16-bit relocation.\n", -+ me->name, buffer); -+ return -EINVAL; -+ } -+ *(u16 *)location = (u16)buffer; -+ return 0; -+} -+ -+static int apply_32_bit_accumulation(struct module *me, void *location, long buffer) -+{ -+ if (buffer > U32_MAX) { -+ pr_err("%s: value %ld out of range for 32-bit relocation.\n", -+ me->name, buffer); -+ return -EINVAL; -+ } -+ *(u32 *)location = (u32)buffer; -+ return 0; -+} -+ -+static int apply_64_bit_accumulation(struct module *me, void *location, long buffer) -+{ -+ *(u64 *)location = (u64)buffer; -+ return 0; -+} -+ -+static int apply_uleb128_accumulation(struct module *me, void *location, long buffer) -+{ -+ /* -+ * ULEB128 is a variable length encoding. Encode the buffer into -+ * the ULEB128 data format. -+ */ -+ u8 *p = location; -+ -+ while (buffer != 0) { -+ u8 value = buffer & 0x7f; -+ -+ buffer >>= 7; -+ value |= (!!buffer) << 7; -+ -+ *p++ = value; -+ } -+ return 0; -+} -+ -+/* -+ * Relocations defined in the riscv-elf-psabi-doc. -+ * This handles static linking only. -+ */ -+static const struct relocation_handlers reloc_handlers[] = { -+ [R_RISCV_32] = { .reloc_handler = apply_r_riscv_32_rela }, -+ [R_RISCV_64] = { .reloc_handler = apply_r_riscv_64_rela }, -+ [R_RISCV_RELATIVE] = { .reloc_handler = dynamic_linking_not_supported }, -+ [R_RISCV_COPY] = { .reloc_handler = dynamic_linking_not_supported }, -+ [R_RISCV_JUMP_SLOT] = { .reloc_handler = dynamic_linking_not_supported }, -+ [R_RISCV_TLS_DTPMOD32] = { .reloc_handler = dynamic_linking_not_supported }, -+ [R_RISCV_TLS_DTPMOD64] = { .reloc_handler = dynamic_linking_not_supported }, -+ [R_RISCV_TLS_DTPREL32] = { .reloc_handler = dynamic_linking_not_supported }, -+ [R_RISCV_TLS_DTPREL64] = { .reloc_handler = dynamic_linking_not_supported }, -+ [R_RISCV_TLS_TPREL32] = { .reloc_handler = dynamic_linking_not_supported }, -+ [R_RISCV_TLS_TPREL64] = { .reloc_handler = dynamic_linking_not_supported }, -+ /* 12-15 undefined */ -+ [R_RISCV_BRANCH] = { .reloc_handler = apply_r_riscv_branch_rela }, -+ [R_RISCV_JAL] = { .reloc_handler = apply_r_riscv_jal_rela }, -+ [R_RISCV_CALL] = { .reloc_handler = apply_r_riscv_call_rela }, -+ [R_RISCV_CALL_PLT] = { .reloc_handler = apply_r_riscv_call_plt_rela }, -+ [R_RISCV_GOT_HI20] = { .reloc_handler = apply_r_riscv_got_hi20_rela }, -+ [R_RISCV_TLS_GOT_HI20] = { .reloc_handler = tls_not_supported }, -+ [R_RISCV_TLS_GD_HI20] = { .reloc_handler = tls_not_supported }, -+ [R_RISCV_PCREL_HI20] = { .reloc_handler = apply_r_riscv_pcrel_hi20_rela }, -+ [R_RISCV_PCREL_LO12_I] = { .reloc_handler = apply_r_riscv_pcrel_lo12_i_rela }, -+ [R_RISCV_PCREL_LO12_S] = { .reloc_handler = apply_r_riscv_pcrel_lo12_s_rela }, -+ [R_RISCV_HI20] = { .reloc_handler = apply_r_riscv_hi20_rela }, -+ [R_RISCV_LO12_I] = { .reloc_handler = apply_r_riscv_lo12_i_rela }, -+ [R_RISCV_LO12_S] = { .reloc_handler = apply_r_riscv_lo12_s_rela }, -+ [R_RISCV_TPREL_HI20] = { .reloc_handler = tls_not_supported }, -+ [R_RISCV_TPREL_LO12_I] = { .reloc_handler = tls_not_supported }, -+ [R_RISCV_TPREL_LO12_S] = { .reloc_handler = tls_not_supported }, -+ [R_RISCV_TPREL_ADD] = { .reloc_handler = tls_not_supported }, -+ [R_RISCV_ADD8] = { .reloc_handler = apply_r_riscv_add8_rela, -+ .accumulate_handler = apply_8_bit_accumulation }, -+ [R_RISCV_ADD16] = { .reloc_handler = apply_r_riscv_add16_rela, -+ .accumulate_handler = apply_16_bit_accumulation }, -+ [R_RISCV_ADD32] = { .reloc_handler = apply_r_riscv_add32_rela, -+ .accumulate_handler = apply_32_bit_accumulation }, -+ [R_RISCV_ADD64] = { .reloc_handler = apply_r_riscv_add64_rela, -+ .accumulate_handler = apply_64_bit_accumulation }, -+ [R_RISCV_SUB8] = { .reloc_handler = apply_r_riscv_sub8_rela, -+ .accumulate_handler = apply_8_bit_accumulation }, -+ [R_RISCV_SUB16] = { .reloc_handler = apply_r_riscv_sub16_rela, -+ .accumulate_handler = apply_16_bit_accumulation }, -+ [R_RISCV_SUB32] = { .reloc_handler = apply_r_riscv_sub32_rela, -+ .accumulate_handler = apply_32_bit_accumulation }, -+ [R_RISCV_SUB64] = { .reloc_handler = apply_r_riscv_sub64_rela, -+ .accumulate_handler = apply_64_bit_accumulation }, -+ /* 41-42 reserved for future standard use */ -+ [R_RISCV_ALIGN] = { .reloc_handler = apply_r_riscv_align_rela }, -+ [R_RISCV_RVC_BRANCH] = { .reloc_handler = apply_r_riscv_rvc_branch_rela }, -+ [R_RISCV_RVC_JUMP] = { .reloc_handler = apply_r_riscv_rvc_jump_rela }, -+ /* 46-50 reserved for future standard use */ -+ [R_RISCV_RELAX] = { .reloc_handler = apply_r_riscv_relax_rela }, -+ [R_RISCV_SUB6] = { .reloc_handler = apply_r_riscv_sub6_rela, -+ .accumulate_handler = apply_6_bit_accumulation }, -+ [R_RISCV_SET6] = { .reloc_handler = apply_r_riscv_set6_rela, -+ .accumulate_handler = apply_6_bit_accumulation }, -+ [R_RISCV_SET8] = { .reloc_handler = apply_r_riscv_set8_rela, -+ .accumulate_handler = apply_8_bit_accumulation }, -+ [R_RISCV_SET16] = { .reloc_handler = apply_r_riscv_set16_rela, -+ .accumulate_handler = apply_16_bit_accumulation }, -+ [R_RISCV_SET32] = { .reloc_handler = apply_r_riscv_set32_rela, -+ .accumulate_handler = apply_32_bit_accumulation }, -+ [R_RISCV_32_PCREL] = { .reloc_handler = apply_r_riscv_32_pcrel_rela }, -+ [R_RISCV_IRELATIVE] = { .reloc_handler = dynamic_linking_not_supported }, -+ [R_RISCV_PLT32] = { .reloc_handler = apply_r_riscv_plt32_rela }, -+ [R_RISCV_SET_ULEB128] = { .reloc_handler = apply_r_riscv_set_uleb128, -+ .accumulate_handler = apply_uleb128_accumulation }, -+ [R_RISCV_SUB_ULEB128] = { .reloc_handler = apply_r_riscv_sub_uleb128, -+ .accumulate_handler = apply_uleb128_accumulation }, -+ /* 62-191 reserved for future standard use */ -+ /* 192-255 nonstandard ABI extensions */ - }; - -+static void -+process_accumulated_relocations(struct module *me, -+ struct hlist_head **relocation_hashtable, -+ struct list_head *used_buckets_list) -+{ -+ /* -+ * Only ADD/SUB/SET/ULEB128 should end up here. -+ * -+ * Each bucket may have more than one relocation location. All -+ * relocations for a location are stored in a list in a bucket. -+ * -+ * Relocations are applied to a temp variable before being stored to the -+ * provided location to check for overflow. This also allows ULEB128 to -+ * properly decide how many entries are needed before storing to -+ * location. The final value is stored into location using the handler -+ * for the last relocation to an address. -+ * -+ * Three layers of indexing: -+ * - Each of the buckets in use -+ * - Groups of relocations in each bucket by location address -+ * - Each relocation entry for a location address -+ */ -+ struct used_bucket *bucket_iter; -+ struct used_bucket *bucket_iter_tmp; -+ struct relocation_head *rel_head_iter; -+ struct hlist_node *rel_head_iter_tmp; -+ struct relocation_entry *rel_entry_iter; -+ struct relocation_entry *rel_entry_iter_tmp; -+ int curr_type; -+ void *location; -+ long buffer; -+ -+ list_for_each_entry_safe(bucket_iter, bucket_iter_tmp, -+ used_buckets_list, head) { -+ hlist_for_each_entry_safe(rel_head_iter, rel_head_iter_tmp, -+ bucket_iter->bucket, node) { -+ buffer = 0; -+ location = rel_head_iter->location; -+ list_for_each_entry_safe(rel_entry_iter, -+ rel_entry_iter_tmp, -+ rel_head_iter->rel_entry, -+ head) { -+ curr_type = rel_entry_iter->type; -+ reloc_handlers[curr_type].reloc_handler( -+ me, &buffer, rel_entry_iter->value); -+ kfree(rel_entry_iter); -+ } -+ reloc_handlers[curr_type].accumulate_handler( -+ me, location, buffer); -+ kfree(rel_head_iter); -+ } -+ kfree(bucket_iter); -+ } -+ -+ kfree(*relocation_hashtable); -+} -+ -+static int add_relocation_to_accumulate(struct module *me, int type, -+ void *location, -+ unsigned int hashtable_bits, Elf_Addr v, -+ struct hlist_head *relocation_hashtable, -+ struct list_head *used_buckets_list) -+{ -+ struct relocation_entry *entry; -+ struct relocation_head *rel_head; -+ struct hlist_head *current_head; -+ struct used_bucket *bucket; -+ unsigned long hash; -+ bool found = false; -+ struct relocation_head *rel_head_iter; -+ -+ entry = kmalloc(sizeof(*entry), GFP_KERNEL); -+ -+ if (!entry) -+ return -ENOMEM; -+ -+ INIT_LIST_HEAD(&entry->head); -+ entry->type = type; -+ entry->value = v; -+ -+ hash = hash_min((uintptr_t)location, hashtable_bits); -+ -+ current_head = &relocation_hashtable[hash]; -+ -+ /* -+ * Search for the relocation_head for the relocations that happen at the -+ * provided location -+ */ -+ hlist_for_each_entry(rel_head_iter, current_head, node) { -+ if (rel_head_iter->location == location) { -+ found = true; -+ rel_head = rel_head_iter; -+ break; -+ } -+ } -+ -+ /* -+ * If there has not yet been any relocations at the provided location, -+ * create a relocation_head for that location and populate it with this -+ * relocation_entry. -+ */ -+ if (!found) { -+ rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL); -+ -+ if (!rel_head) { -+ kfree(entry); -+ return -ENOMEM; -+ } -+ -+ rel_head->rel_entry = -+ kmalloc(sizeof(struct list_head), GFP_KERNEL); -+ -+ if (!rel_head->rel_entry) { -+ kfree(entry); -+ kfree(rel_head); -+ return -ENOMEM; -+ } -+ -+ INIT_LIST_HEAD(rel_head->rel_entry); -+ rel_head->location = location; -+ INIT_HLIST_NODE(&rel_head->node); -+ if (!current_head->first) { -+ bucket = -+ kmalloc(sizeof(struct used_bucket), GFP_KERNEL); -+ -+ if (!bucket) { -+ kfree(entry); -+ kfree(rel_head->rel_entry); -+ kfree(rel_head); -+ return -ENOMEM; -+ } -+ -+ INIT_LIST_HEAD(&bucket->head); -+ bucket->bucket = current_head; -+ list_add(&bucket->head, used_buckets_list); -+ } -+ hlist_add_head(&rel_head->node, current_head); -+ } -+ -+ /* Add relocation to head of discovered rel_head */ -+ list_add_tail(&entry->head, rel_head->rel_entry); -+ -+ return 0; -+} -+ -+static unsigned int -+initialize_relocation_hashtable(unsigned int num_relocations, -+ struct hlist_head **relocation_hashtable) -+{ -+ /* Can safely assume that bits is not greater than sizeof(long) */ -+ unsigned long hashtable_size = roundup_pow_of_two(num_relocations); -+ /* -+ * When hashtable_size == 1, hashtable_bits == 0. -+ * This is valid because the hashing algorithm returns 0 in this case. -+ */ -+ unsigned int hashtable_bits = ilog2(hashtable_size); -+ -+ /* -+ * Double size of hashtable if num_relocations * 1.25 is greater than -+ * hashtable_size. -+ */ -+ int should_double_size = ((num_relocations + (num_relocations >> 2)) > (hashtable_size)); -+ -+ hashtable_bits += should_double_size; -+ -+ hashtable_size <<= should_double_size; -+ -+ *relocation_hashtable = kmalloc_array(hashtable_size, -+ sizeof(**relocation_hashtable), -+ GFP_KERNEL); -+ if (!*relocation_hashtable) -+ return 0; -+ -+ __hash_init(*relocation_hashtable, hashtable_size); -+ -+ return hashtable_bits; -+} -+ - int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, - unsigned int symindex, unsigned int relsec, - struct module *me) - { - Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr; -- int (*handler)(struct module *me, u32 *location, Elf_Addr v); -+ int (*handler)(struct module *me, void *location, Elf_Addr v); - Elf_Sym *sym; -- u32 *location; -+ void *location; - unsigned int i, type; -+ unsigned int j_idx = 0; - Elf_Addr v; - int res; -+ unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel); -+ struct hlist_head *relocation_hashtable; -+ struct list_head used_buckets_list; -+ unsigned int hashtable_bits; -+ -+ hashtable_bits = initialize_relocation_hashtable(num_relocations, -+ &relocation_hashtable); -+ -+ if (!relocation_hashtable) -+ return -ENOMEM; -+ -+ INIT_LIST_HEAD(&used_buckets_list); - - pr_debug("Applying relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); - -- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { -+ for (i = 0; i < num_relocations; i++) { - /* This is where to make the change */ - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr - + rel[i].r_offset; -@@ -354,8 +819,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, - - type = ELF_RISCV_R_TYPE(rel[i].r_info); - -- if (type < ARRAY_SIZE(reloc_handlers_rela)) -- handler = reloc_handlers_rela[type]; -+ if (type < ARRAY_SIZE(reloc_handlers)) -+ handler = reloc_handlers[type].reloc_handler; - else - handler = NULL; - -@@ -368,9 +833,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, - v = sym->st_value + rel[i].r_addend; - - if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) { -- unsigned int j; -+ unsigned int j = j_idx; -+ bool found = false; - -- for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) { -+ do { - unsigned long hi20_loc = - sechdrs[sechdrs[relsec].sh_info].sh_addr - + rel[j].r_offset; -@@ -399,23 +865,42 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, - hi20 = (offset + 0x800) & 0xfffff000; - lo12 = offset - hi20; - v = lo12; -+ found = true; - - break; - } -- } -- if (j == sechdrs[relsec].sh_size / sizeof(*rel)) { -+ -+ j++; -+ if (j > sechdrs[relsec].sh_size / sizeof(*rel)) -+ j = 0; -+ -+ } while (j_idx != j); -+ -+ if (!found) { - pr_err( - "%s: Can not find HI20 relocation information\n", - me->name); - return -EINVAL; - } -+ -+ /* Record the previous j-loop end index */ -+ j_idx = j; - } - -- res = handler(me, location, v); -+ if (reloc_handlers[type].accumulate_handler) -+ res = add_relocation_to_accumulate(me, type, location, -+ hashtable_bits, v, -+ relocation_hashtable, -+ &used_buckets_list); -+ else -+ res = handler(me, location, v); - if (res) - return res; - } - -+ process_accumulated_relocations(me, &relocation_hashtable, -+ &used_buckets_list); -+ - return 0; - } - -@@ -430,21 +915,6 @@ void *module_alloc(unsigned long size) - } - #endif - --static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, -- const Elf_Shdr *sechdrs, -- const char *name) --{ -- const Elf_Shdr *s, *se; -- const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; -- -- for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { -- if (strcmp(name, secstrs + s->sh_name) == 0) -- return s; -- } -- -- return NULL; --} -- - int module_finalize(const Elf_Ehdr *hdr, - const Elf_Shdr *sechdrs, - struct module *me) -diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/process.c -+++ b/arch/riscv/kernel/process.c -@@ -24,6 +24,7 @@ - #include - #include - #include -+#include - - #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) - #include -@@ -146,12 +147,28 @@ void flush_thread(void) - fstate_off(current, task_pt_regs(current)); - memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate)); - #endif -+#ifdef CONFIG_RISCV_ISA_V -+ /* Reset vector state */ -+ riscv_v_vstate_off(task_pt_regs(current)); -+ kfree(current->thread.vstate.datap); -+ memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); -+#endif -+} -+ -+void arch_release_task_struct(struct task_struct *tsk) -+{ -+ /* Free the vector context of datap. */ -+ if (has_vector()) -+ kfree(tsk->thread.vstate.datap); - } - - int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) - { - fstate_save(src, task_pt_regs(src)); - *dst = *src; -+ /* clear entire V context, including datap for a new task */ -+ memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); -+ - return 0; - } - -@@ -183,6 +200,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) - childregs->a0 = 0; /* Return value of fork() */ - p->thread.ra = (unsigned long)ret_from_fork; - } -+ p->thread.riscv_v_flags = 0; -+ riscv_v_vstate_off(childregs); - p->thread.sp = (unsigned long)childregs; /* kernel sp */ - return 0; - } -diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/ptrace.c -+++ b/arch/riscv/kernel/ptrace.c -@@ -7,6 +7,7 @@ - * Copied from arch/tile/kernel/ptrace.c - */ - -+#include - #include - #include - #include -@@ -27,6 +28,9 @@ enum riscv_regset { - #ifdef CONFIG_FPU - REGSET_F, - #endif -+#ifdef CONFIG_RISCV_ISA_V -+ REGSET_V, -+#endif - }; - - static int riscv_gpr_get(struct task_struct *target, -@@ -83,6 +87,61 @@ static int riscv_fpr_set(struct task_struct *target, - } - #endif - -+#ifdef CONFIG_RISCV_ISA_V -+static int riscv_vr_get(struct task_struct *target, -+ const struct user_regset *regset, -+ struct membuf to) -+{ -+ struct __riscv_v_ext_state *vstate = &target->thread.vstate; -+ -+ if (!riscv_v_vstate_query(task_pt_regs(target))) -+ return -EINVAL; -+ -+ /* -+ * Ensure the vector registers have been saved to the memory before -+ * copying them to membuf. -+ */ -+ if (target == current) -+ riscv_v_vstate_save(current, task_pt_regs(current)); -+ -+ /* Copy vector header from vstate. */ -+ membuf_write(&to, vstate, offsetof(struct __riscv_v_ext_state, datap)); -+ membuf_zero(&to, sizeof(void *)); -+ -+ /* Copy all the vector registers from vstate. */ -+ return membuf_write(&to, vstate->datap, riscv_v_vsize); -+} -+ -+static int riscv_vr_set(struct task_struct *target, -+ const struct user_regset *regset, -+ unsigned int pos, unsigned int count, -+ const void *kbuf, const void __user *ubuf) -+{ -+ int ret, size; -+ struct __riscv_v_ext_state *vstate = &target->thread.vstate; -+ -+ if (!riscv_v_vstate_query(task_pt_regs(target))) -+ return -EINVAL; -+ -+ /* Copy rest of the vstate except datap */ -+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate, 0, -+ offsetof(struct __riscv_v_ext_state, datap)); -+ if (unlikely(ret)) -+ return ret; -+ -+ /* Skip copy datap. */ -+ size = sizeof(vstate->datap); -+ count -= size; -+ ubuf += size; -+ -+ /* Copy all the vector registers. */ -+ pos = 0; -+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate->datap, -+ 0, riscv_v_vsize); -+ return ret; -+} -+#endif -+ - static const struct user_regset riscv_user_regset[] = { - [REGSET_X] = { - .core_note_type = NT_PRSTATUS, -@@ -102,6 +161,17 @@ static const struct user_regset riscv_user_regset[] = { - .set = riscv_fpr_set, - }, - #endif -+#ifdef CONFIG_RISCV_ISA_V -+ [REGSET_V] = { -+ .core_note_type = NT_RISCV_VECTOR, -+ .align = 16, -+ .n = ((32 * RISCV_MAX_VLENB) + -+ sizeof(struct __riscv_v_ext_state)) / sizeof(__u32), -+ .size = sizeof(__u32), -+ .regset_get = riscv_vr_get, -+ .set = riscv_vr_set, -+ }, -+#endif - }; - - static const struct user_regset_view riscv_user_native_view = { -diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/sbi.c -+++ b/arch/riscv/kernel/sbi.c -@@ -529,6 +529,15 @@ int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask, - } - EXPORT_SYMBOL(sbi_remote_hfence_vvma); - -+#if defined(CONFIG_SOC_SPACEMIT_K1PRO) || defined(CONFIG_SOC_SPACEMIT_K1X) -+void sbi_flush_local_dcache_all(void) -+{ -+ sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_FLUSH_CACHE_ALL, 0, -+ 0, 0, 0, 0, 0); -+} -+EXPORT_SYMBOL(sbi_flush_local_dcache_all); -+#endif -+ - /** - * sbi_remote_hfence_vvma_asid() - Execute HFENCE.VVMA instructions on given - * remote harts for current guest virtual address range belonging to a specific -diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/setup.c -+++ b/arch/riscv/kernel/setup.c -@@ -262,6 +262,8 @@ static void __init parse_dtb(void) - #endif - } - -+extern void __init init_rt_signal_env(void); -+ - void __init setup_arch(char **cmdline_p) - { - parse_dtb(); -@@ -295,7 +297,10 @@ void __init setup_arch(char **cmdline_p) - - riscv_init_cbom_blocksize(); - riscv_fill_hwcap(); -+ init_rt_signal_env(); - apply_boot_alternatives(); -+ -+ riscv_user_isa_enable(); - } - - static int __init topology_init(void) --- -Armbian - -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Sat, 22 Jun 2024 07:34:04 -0400 -Subject: arch: riscv: kernel: signal.c - -Signed-off-by: Patrick Yavitz ---- - arch/riscv/kernel/signal.c | 220 ++++++++-- - arch/riscv/kernel/smpboot.c | 2 + - arch/riscv/kernel/suspend.c | 44 ++ - arch/riscv/kernel/traps.c | 42 +- - arch/riscv/kernel/vector.c | 111 +++++ - arch/riscv/kvm/Makefile | 1 + - arch/riscv/kvm/vcpu.c | 30 +- - arch/riscv/kvm/vcpu_vector.c | 186 ++++++++ - arch/riscv/lib/Makefile | 1 + - arch/riscv/lib/xor.S | 81 ++++ - 10 files changed, 687 insertions(+), 31 deletions(-) - -diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/signal.c -+++ b/arch/riscv/kernel/signal.c -@@ -18,10 +18,14 @@ - #include - #include - #include -+#include - #include - #include - -+unsigned long signal_minsigstksz __ro_after_init; -+ - extern u32 __user_rt_sigreturn[2]; -+static size_t riscv_v_sc_size __ro_after_init; - - #define DEBUG_SIG 0 - -@@ -39,26 +43,13 @@ static long restore_fp_state(struct pt_regs *regs, - { - long err; - struct __riscv_d_ext_state __user *state = &sc_fpregs->d; -- size_t i; - - err = __copy_from_user(¤t->thread.fstate, state, sizeof(*state)); - if (unlikely(err)) - return err; - - fstate_restore(current, regs); -- -- /* We support no other extension state at this time. */ -- for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) { -- u32 value; -- -- err = __get_user(value, &sc_fpregs->q.reserved[i]); -- if (unlikely(err)) -- break; -- if (value != 0) -- return -EINVAL; -- } -- -- return err; -+ return 0; - } - - static long save_fp_state(struct pt_regs *regs, -@@ -66,52 +57,186 @@ static long save_fp_state(struct pt_regs *regs, - { - long err; - struct __riscv_d_ext_state __user *state = &sc_fpregs->d; -- size_t i; - - fstate_save(current, regs); - err = __copy_to_user(state, ¤t->thread.fstate, sizeof(*state)); -+ return err; -+} -+#else -+#define save_fp_state(task, regs) (0) -+#define restore_fp_state(task, regs) (0) -+#endif -+ -+#ifdef CONFIG_RISCV_ISA_V -+ -+static long save_v_state(struct pt_regs *regs, void __user **sc_vec) -+{ -+ struct __riscv_ctx_hdr __user *hdr; -+ struct __sc_riscv_v_state __user *state; -+ void __user *datap; -+ long err; -+ -+ hdr = *sc_vec; -+ /* Place state to the user's signal context space after the hdr */ -+ state = (struct __sc_riscv_v_state __user *)(hdr + 1); -+ /* Point datap right after the end of __sc_riscv_v_state */ -+ datap = state + 1; -+ -+ /* datap is designed to be 16 byte aligned for better performance */ -+ WARN_ON(unlikely(!IS_ALIGNED((unsigned long)datap, 16))); -+ -+ riscv_v_vstate_save(current, regs); -+ /* Copy everything of vstate but datap. */ -+ err = __copy_to_user(&state->v_state, ¤t->thread.vstate, -+ offsetof(struct __riscv_v_ext_state, datap)); -+ /* Copy the pointer datap itself. */ -+ err |= __put_user(datap, &state->v_state.datap); -+ /* Copy the whole vector content to user space datap. */ -+ err |= __copy_to_user(datap, current->thread.vstate.datap, riscv_v_vsize); -+ /* Copy magic to the user space after saving all vector conetext */ -+ err |= __put_user(RISCV_V_MAGIC, &hdr->magic); -+ err |= __put_user(riscv_v_sc_size, &hdr->size); - if (unlikely(err)) - return err; - -- /* We support no other extension state at this time. */ -- for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) { -- err = __put_user(0, &sc_fpregs->q.reserved[i]); -- if (unlikely(err)) -- break; -- } -+ /* Only progress the sv_vec if everything has done successfully */ -+ *sc_vec += riscv_v_sc_size; -+ return 0; -+} -+ -+/* -+ * Restore Vector extension context from the user's signal frame. This function -+ * assumes a valid extension header. So magic and size checking must be done by -+ * the caller. -+ */ -+static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec) -+{ -+ long err; -+ struct __sc_riscv_v_state __user *state = sc_vec; -+ void __user *datap; -+ -+ /* Copy everything of __sc_riscv_v_state except datap. */ -+ err = __copy_from_user(¤t->thread.vstate, &state->v_state, -+ offsetof(struct __riscv_v_ext_state, datap)); -+ if (unlikely(err)) -+ return err; -+ -+ /* Copy the pointer datap itself. */ -+ err = __get_user(datap, &state->v_state.datap); -+ if (unlikely(err)) -+ return err; -+ /* -+ * Copy the whole vector content from user space datap. Use -+ * copy_from_user to prevent information leak. -+ */ -+ err = copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize); -+ if (unlikely(err)) -+ return err; -+ -+ riscv_v_vstate_restore(current, regs); - - return err; - } - #else --#define save_fp_state(task, regs) (0) --#define restore_fp_state(task, regs) (0) -+#define save_v_state(task, regs) (0) -+#define __restore_v_state(task, regs) (0) - #endif - - static long restore_sigcontext(struct pt_regs *regs, - struct sigcontext __user *sc) - { -+ void __user *sc_ext_ptr = &sc->sc_extdesc.hdr; -+ __u32 rsvd; - long err; - /* sc_regs is structured the same as the start of pt_regs */ - err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs)); -+ if (unlikely(err)) -+ return err; -+ - /* Restore the floating-point state. */ -- if (has_fpu()) -- err |= restore_fp_state(regs, &sc->sc_fpregs); -+ if (has_fpu()) { -+ err = restore_fp_state(regs, &sc->sc_fpregs); -+ if (unlikely(err)) -+ return err; -+ } -+ -+ /* Check the reserved word before extensions parsing */ -+ err = __get_user(rsvd, &sc->sc_extdesc.reserved); -+ if (unlikely(err)) -+ return err; -+ if (unlikely(rsvd)) -+ return -EINVAL; -+ -+ while (!err) { -+ __u32 magic, size; -+ struct __riscv_ctx_hdr __user *head = sc_ext_ptr; -+ -+ err |= __get_user(magic, &head->magic); -+ err |= __get_user(size, &head->size); -+ if (unlikely(err)) -+ return err; -+ -+ sc_ext_ptr += sizeof(*head); -+ switch (magic) { -+ case END_MAGIC: -+ if (size != END_HDR_SIZE) -+ return -EINVAL; -+ -+ return 0; -+ case RISCV_V_MAGIC: -+ if (!has_vector() || !riscv_v_vstate_query(regs) || -+ size != riscv_v_sc_size) -+ return -EINVAL; -+ -+ err = __restore_v_state(regs, sc_ext_ptr); -+ break; -+ default: -+ return -EINVAL; -+ } -+ sc_ext_ptr = (void __user *)head + size; -+ } - return err; - } - -+static size_t get_rt_frame_size(bool cal_all) -+{ -+ struct rt_sigframe __user *frame; -+ size_t frame_size; -+ size_t total_context_size = 0; -+ -+ frame_size = sizeof(*frame); -+ -+ if (has_vector()) { -+ if (cal_all || riscv_v_vstate_query(task_pt_regs(current))) -+ total_context_size += riscv_v_sc_size; -+ } -+ /* -+ * Preserved a __riscv_ctx_hdr for END signal context header if an -+ * extension uses __riscv_extra_ext_header -+ */ -+ if (total_context_size) -+ total_context_size += sizeof(struct __riscv_ctx_hdr); -+ -+ frame_size += total_context_size; -+ -+ frame_size = round_up(frame_size, 16); -+ return frame_size; -+} -+ - SYSCALL_DEFINE0(rt_sigreturn) - { - struct pt_regs *regs = current_pt_regs(); - struct rt_sigframe __user *frame; - struct task_struct *task; - sigset_t set; -+ size_t frame_size = get_rt_frame_size(false); - - /* Always make any pending restarted system calls return -EINTR */ - current->restart_block.fn = do_no_restart_syscall; - - frame = (struct rt_sigframe __user *)regs->sp; - -- if (!access_ok(frame, sizeof(*frame))) -+ if (!access_ok(frame, frame_size)) - goto badframe; - - if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) -@@ -145,12 +270,23 @@ static long setup_sigcontext(struct rt_sigframe __user *frame, - struct pt_regs *regs) - { - struct sigcontext __user *sc = &frame->uc.uc_mcontext; -+ struct __riscv_ctx_hdr __user *sc_ext_ptr = &sc->sc_extdesc.hdr; - long err; -+ - /* sc_regs is structured the same as the start of pt_regs */ - err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs)); - /* Save the floating-point state. */ - if (has_fpu()) - err |= save_fp_state(regs, &sc->sc_fpregs); -+ /* Save the vector state. */ -+ if (has_vector() && riscv_v_vstate_query(regs)) -+ err |= save_v_state(regs, (void __user **)&sc_ext_ptr); -+ /* Write zero to fp-reserved space and check it on restore_sigcontext */ -+ err |= __put_user(0, &sc->sc_extdesc.reserved); -+ /* And put END __riscv_ctx_hdr at the end. */ -+ err |= __put_user(END_MAGIC, &sc_ext_ptr->magic); -+ err |= __put_user(END_HDR_SIZE, &sc_ext_ptr->size); -+ - return err; - } - -@@ -174,6 +310,13 @@ static inline void __user *get_sigframe(struct ksignal *ksig, - /* Align the stack frame. */ - sp &= ~0xfUL; - -+ /* -+ * Fail if the size of the altstack is not large enough for the -+ * sigframe construction. -+ */ -+ if (current->sas_ss_size && sp < current->sas_ss_sp) -+ return (void __user __force *)-1UL; -+ - return (void __user *)sp; - } - -@@ -182,10 +325,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, - { - struct rt_sigframe __user *frame; - long err = 0; -+ size_t frame_size = get_rt_frame_size(false); - unsigned long __maybe_unused addr; - -- frame = get_sigframe(ksig, regs, sizeof(*frame)); -- if (!access_ok(frame, sizeof(*frame))) -+ frame = get_sigframe(ksig, regs, frame_size); -+ if (!access_ok(frame, frame_size)) - return -EFAULT; - - err |= copy_siginfo_to_user(&frame->info, &ksig->info); -@@ -351,3 +495,23 @@ asmlinkage __visible void do_work_pending(struct pt_regs *regs, - thread_info_flags = read_thread_flags(); - } while (thread_info_flags & _TIF_WORK_MASK); - } -+ -+void init_rt_signal_env(void); -+void __init init_rt_signal_env(void) -+{ -+ riscv_v_sc_size = sizeof(struct __riscv_ctx_hdr) + -+ sizeof(struct __sc_riscv_v_state) + riscv_v_vsize; -+ /* -+ * Determine the stack space required for guaranteed signal delivery. -+ * The signal_minsigstksz will be populated into the AT_MINSIGSTKSZ entry -+ * in the auxiliary array at process startup. -+ */ -+ signal_minsigstksz = get_rt_frame_size(true); -+} -+ -+#ifdef CONFIG_DYNAMIC_SIGFRAME -+bool sigaltstack_size_valid(size_t ss_size) -+{ -+ return ss_size > get_rt_frame_size(false); -+} -+#endif /* CONFIG_DYNAMIC_SIGFRAME */ -diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/smpboot.c -+++ b/arch/riscv/kernel/smpboot.c -@@ -169,6 +169,8 @@ asmlinkage __visible void smp_callin(void) - numa_add_cpu(curr_cpuid); - set_cpu_online(curr_cpuid, 1); - -+ riscv_user_isa_enable(); -+ - /* - * Remote TLB flushes are ignored while the CPU is offline, so emit - * a local TLB flush right now just in case. -diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/suspend.c -+++ b/arch/riscv/kernel/suspend.c -@@ -4,8 +4,12 @@ - * Copyright (c) 2022 Ventana Micro Systems Inc. - */ - -+#define pr_fmt(fmt) "suspend: " fmt -+ - #include -+#include - #include -+#include - #include - - static void suspend_save_csrs(struct suspend_context *context) -@@ -85,3 +89,43 @@ int cpu_suspend(unsigned long arg, - - return rc; - } -+ -+#ifdef CONFIG_RISCV_SBI -+static int sbi_system_suspend(unsigned long sleep_type, -+ unsigned long resume_addr, -+ unsigned long opaque) -+{ -+ struct sbiret ret; -+ -+ ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND, -+ sleep_type, resume_addr, opaque, 0, 0, 0); -+ if (ret.error) -+ return sbi_err_map_linux_errno(ret.error); -+ -+ return ret.value; -+} -+ -+static int sbi_system_suspend_enter(suspend_state_t state) -+{ -+ return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend); -+} -+ -+static const struct platform_suspend_ops sbi_system_suspend_ops = { -+ .valid = suspend_valid_only_mem, -+ .enter = sbi_system_suspend_enter, -+}; -+ -+static int __init sbi_system_suspend_init(void) -+{ -+ if (sbi_spec_version >= sbi_mk_version(1, 0) && -+ sbi_probe_extension(SBI_EXT_SUSP) > 0) { -+ pr_info("SBI SUSP extension detected\n"); -+ if (IS_ENABLED(CONFIG_SUSPEND)) -+ suspend_set_ops(&sbi_system_suspend_ops); -+ } -+ -+ return 0; -+} -+ -+arch_initcall(sbi_system_suspend_init); -+#endif /* CONFIG_RISCV_SBI */ -diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kernel/traps.c -+++ b/arch/riscv/kernel/traps.c -@@ -24,6 +24,7 @@ - #include - #include - #include -+#include - - int show_unhandled_signals = 1; - -@@ -112,8 +113,45 @@ DO_ERROR_INFO(do_trap_insn_misaligned, - SIGBUS, BUS_ADRALN, "instruction address misaligned"); - DO_ERROR_INFO(do_trap_insn_fault, - SIGSEGV, SEGV_ACCERR, "instruction access fault"); --DO_ERROR_INFO(do_trap_insn_illegal, -- SIGILL, ILL_ILLOPC, "illegal instruction"); -+ -+#ifdef CONFIG_BIND_THREAD_TO_AICORES -+#include -+#define AI_OPCODE_MASK0 0xFE0000FF -+#define AI_OPCODE_MATCH0 0xE200002B -+#define AI_OPCODE_MASK1 0xFE0000FF -+#define AI_OPCODE_MATCH1 0xE600002B -+#endif -+asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs) -+{ -+ int flag = 0; -+#ifdef CONFIG_BIND_THREAD_TO_AICORES -+ u32 epc; -+#endif -+ -+ if (has_vector() && user_mode(regs)) { -+ if (riscv_v_first_use_handler(regs)) { -+ flag = 1; -+ } -+ } -+ -+#ifdef CONFIG_BIND_THREAD_TO_AICORES -+ __get_user(epc, (u32 __user *)regs->epc); -+ if ((epc & AI_OPCODE_MASK0) == AI_OPCODE_MATCH0 || -+ (epc & AI_OPCODE_MASK1) == AI_OPCODE_MATCH1) { -+ struct cpumask mask; -+ pid_t pid = current->pid; -+ -+ mask = ai_core_mask_get(); -+ sched_setaffinity(pid, &mask); -+ flag = 1; -+ } -+#endif -+ if (!flag) { -+ do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, -+ "Oops - illegal instruction"); -+ } -+} -+ - DO_ERROR_INFO(do_trap_load_fault, - SIGSEGV, SEGV_ACCERR, "load access fault"); - #ifndef CONFIG_RISCV_M_MODE -diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/riscv/kernel/vector.c -@@ -0,0 +1,111 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later -+/* -+ * Copyright (C) 2023 SiFive -+ * Author: Andy Chiu -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+unsigned long riscv_v_vsize __read_mostly; -+EXPORT_SYMBOL_GPL(riscv_v_vsize); -+ -+void riscv_v_setup_vsize(void) -+{ -+ /* There are 32 vector registers with vlenb length. */ -+ riscv_v_enable(); -+ riscv_v_vsize = csr_read(CSR_VLENB) * 32; -+ riscv_v_disable(); -+} -+ -+static bool insn_is_vector(u32 insn_buf) -+{ -+ u32 opcode = insn_buf & __INSN_OPCODE_MASK; -+ bool is_vector = false; -+ u32 width, csr; -+ -+ /* -+ * All V-related instructions, including CSR operations are 4-Byte. So, -+ * do not handle if the instruction length is not 4-Byte. -+ */ -+ if (unlikely(GET_INSN_LENGTH(insn_buf) != 4)) -+ return false; -+ -+ switch (opcode) { -+ case RVV_OPCODE_VECTOR: -+ is_vector = true; -+ break; -+ case RVV_OPCODE_VL: -+ case RVV_OPCODE_VS: -+ width = RVV_EXRACT_VL_VS_WIDTH(insn_buf); -+ if (width == RVV_VL_VS_WIDTH_8 || width == RVV_VL_VS_WIDTH_16 || -+ width == RVV_VL_VS_WIDTH_32 || width == RVV_VL_VS_WIDTH_64) -+ is_vector = true; -+ break; -+ case RVG_OPCODE_SYSTEM: -+ csr = RVG_EXTRACT_SYSTEM_CSR(insn_buf); -+ if ((csr >= CSR_VSTART && csr <= CSR_VCSR) || -+ (csr >= CSR_VL && csr <= CSR_VLENB)) -+ is_vector = true; -+ break; -+ } -+ return is_vector; -+} -+ -+static int riscv_v_thread_zalloc(void) -+{ -+ void *datap; -+ -+ datap = kzalloc(riscv_v_vsize, GFP_KERNEL); -+ if (!datap) -+ return -ENOMEM; -+ current->thread.vstate.datap = datap; -+ memset(¤t->thread.vstate, 0, offsetof(struct __riscv_v_ext_state, -+ datap)); -+ return 0; -+} -+ -+bool riscv_v_first_use_handler(struct pt_regs *regs) -+{ -+ u32 __user *epc = (u32 __user *)regs->epc; -+ u32 insn = (u32)regs->badaddr; -+ -+ /* If V has been enabled then it is not the first-use trap */ -+ if (riscv_v_vstate_query(regs)) -+ return false; -+ -+ /* Get the instruction */ -+ if (!insn) { -+ if (__get_user(insn, epc)) -+ return false; -+ } -+ /* Filter out non-V instructions */ -+ if (!insn_is_vector(insn)) -+ return false; -+ -+ /* Sanity check. datap should be null by the time of the first-use trap */ -+ WARN_ON(current->thread.vstate.datap); -+ /* -+ * Now we sure that this is a V instruction. And it executes in the -+ * context where VS has been off. So, try to allocate the user's V -+ * context and resume execution. -+ */ -+ if (riscv_v_thread_zalloc()) { -+ force_sig(SIGKILL); -+ return true; -+ } -+ riscv_v_vstate_on(regs); -+ riscv_v_csr_init(); -+ return true; -+} -diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile -index 111111111111..222222222222 100644 ---- a/arch/riscv/kvm/Makefile -+++ b/arch/riscv/kvm/Makefile -@@ -17,6 +17,7 @@ kvm-y += mmu.o - kvm-y += vcpu.o - kvm-y += vcpu_exit.o - kvm-y += vcpu_fp.o -+kvm-y += vcpu_vector.o - kvm-y += vcpu_insn.o - kvm-y += vcpu_switch.o - kvm-y += vcpu_sbi.o -diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/kvm/vcpu.c -+++ b/arch/riscv/kvm/vcpu.c -@@ -21,6 +21,9 @@ - #include - #include - #include -+#include -+#include -+#include - - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { - KVM_GENERIC_VCPU_STATS(), -@@ -56,12 +59,15 @@ static const unsigned long kvm_isa_ext_arr[] = { - [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h, - [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i, - [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, -+ [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v, - - KVM_ISA_EXT_ARR(SSTC), - KVM_ISA_EXT_ARR(SVINVAL), - KVM_ISA_EXT_ARR(SVPBMT), - KVM_ISA_EXT_ARR(ZIHINTPAUSE), - KVM_ISA_EXT_ARR(ZICBOM), -+ KVM_ISA_EXT_ARR(ZICBOZ), -+ KVM_ISA_EXT_ARR(ZICBOP), - }; - - static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) -@@ -132,6 +138,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) - - kvm_riscv_vcpu_fp_reset(vcpu); - -+ kvm_riscv_vcpu_vector_reset(vcpu); -+ - kvm_riscv_vcpu_timer_reset(vcpu); - - WRITE_ONCE(vcpu->arch.irqs_pending, 0); -@@ -182,6 +190,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) - cntx->hstatus |= HSTATUS_SPVP; - cntx->hstatus |= HSTATUS_SPV; - -+ if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx)) -+ return -ENOMEM; -+ - /* By default, make CY, TM, and IR counters accessible in VU mode */ - reset_csr->scounteren = 0x7; - -@@ -212,6 +223,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) - - /* Free unused pages pre-allocated for G-stage page table mappings */ - kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); -+ -+ /* Free vector context space for host and guest kernel */ -+ kvm_riscv_vcpu_free_vector_context(vcpu); - } - - int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) -@@ -560,6 +574,9 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, - KVM_REG_RISCV_FP_D); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT) - return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); -+ else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_VECTOR) -+ return kvm_riscv_vcpu_set_reg_vector(vcpu, reg, -+ KVM_REG_RISCV_VECTOR); - - return -EINVAL; - } -@@ -583,6 +600,9 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, - KVM_REG_RISCV_FP_D); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT) - return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); -+ else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_VECTOR) -+ return kvm_riscv_vcpu_get_reg_vector(vcpu, reg, -+ KVM_REG_RISCV_VECTOR); - - return -EINVAL; - } -@@ -804,6 +824,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, - return -EINVAL; - } - -+/* - static void kvm_riscv_vcpu_update_config(const unsigned long *isa) - { - u64 henvcfg = 0; -@@ -822,6 +843,7 @@ static void kvm_riscv_vcpu_update_config(const unsigned long *isa) - csr_write(CSR_HENVCFGH, henvcfg >> 32); - #endif - } -+*/ - - void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) - { -@@ -837,7 +859,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) - csr_write(CSR_HVIP, csr->hvip); - csr_write(CSR_VSATP, csr->vsatp); - -- kvm_riscv_vcpu_update_config(vcpu->arch.isa); -+// kvm_riscv_vcpu_update_config(vcpu->arch.isa); - - kvm_riscv_gstage_update_hgatp(vcpu); - -@@ -846,6 +868,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) - kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); - kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, - vcpu->arch.isa); -+ kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context); -+ kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, -+ vcpu->arch.isa); - - vcpu->cpu = cpu; - } -@@ -861,6 +886,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) - kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); - - kvm_riscv_vcpu_timer_save(vcpu); -+ kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, -+ vcpu->arch.isa); -+ kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context); - - csr->vsstatus = csr_read(CSR_VSSTATUS); - csr->vsie = csr_read(CSR_VSIE); -diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/riscv/kvm/vcpu_vector.c -@@ -0,0 +1,186 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2022 SiFive -+ * -+ * Authors: -+ * Vincent Chen -+ * Greentime Hu -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#ifdef CONFIG_RISCV_ISA_V -+void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu) -+{ -+ unsigned long *isa = vcpu->arch.isa; -+ struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; -+ -+ cntx->sstatus &= ~SR_VS; -+ if (riscv_isa_extension_available(isa, v)) { -+ cntx->sstatus |= SR_VS_INITIAL; -+ WARN_ON(!cntx->vector.datap); -+ memset(cntx->vector.datap, 0, riscv_v_vsize); -+ } else { -+ cntx->sstatus |= SR_VS_OFF; -+ } -+} -+ -+static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx) -+{ -+ cntx->sstatus &= ~SR_VS; -+ cntx->sstatus |= SR_VS_CLEAN; -+} -+ -+void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, -+ unsigned long *isa) -+{ -+ if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) { -+ if (riscv_isa_extension_available(isa, v)) -+ __kvm_riscv_vector_save(cntx); -+ kvm_riscv_vcpu_vector_clean(cntx); -+ } -+} -+ -+void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, -+ unsigned long *isa) -+{ -+ if ((cntx->sstatus & SR_VS) != SR_VS_OFF) { -+ if (riscv_isa_extension_available(isa, v)) -+ __kvm_riscv_vector_restore(cntx); -+ kvm_riscv_vcpu_vector_clean(cntx); -+ } -+} -+ -+void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx) -+{ -+ /* No need to check host sstatus as it can be modified outside */ -+ if (riscv_isa_extension_available(NULL, v)) -+ __kvm_riscv_vector_save(cntx); -+} -+ -+void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) -+{ -+ if (riscv_isa_extension_available(NULL, v)) -+ __kvm_riscv_vector_restore(cntx); -+} -+ -+int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, -+ struct kvm_cpu_context *cntx) -+{ -+ cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL); -+ if (!cntx->vector.datap) -+ return -ENOMEM; -+ -+ vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL); -+ if (!vcpu->arch.host_context.vector.datap) -+ return -ENOMEM; -+ -+ return 0; -+} -+ -+void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) -+{ -+ kfree(vcpu->arch.guest_reset_context.vector.datap); -+ kfree(vcpu->arch.host_context.vector.datap); -+} -+#endif -+ -+static void *kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu, -+ unsigned long reg_num, -+ size_t reg_size) -+{ -+ struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; -+ void *reg_val; -+ size_t vlenb = riscv_v_vsize / 32; -+ -+ if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) { -+ if (reg_size != sizeof(unsigned long)) -+ return NULL; -+ switch (reg_num) { -+ case KVM_REG_RISCV_VECTOR_CSR_REG(vstart): -+ reg_val = &cntx->vector.vstart; -+ break; -+ case KVM_REG_RISCV_VECTOR_CSR_REG(vl): -+ reg_val = &cntx->vector.vl; -+ break; -+ case KVM_REG_RISCV_VECTOR_CSR_REG(vtype): -+ reg_val = &cntx->vector.vtype; -+ break; -+ case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr): -+ reg_val = &cntx->vector.vcsr; -+ break; -+ case KVM_REG_RISCV_VECTOR_CSR_REG(datap): -+ default: -+ return NULL; -+ } -+ } else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) { -+ if (reg_size != vlenb) -+ return NULL; -+ reg_val = cntx->vector.datap -+ + (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb; -+ } else { -+ return NULL; -+ } -+ -+ return reg_val; -+} -+ -+int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, -+ const struct kvm_one_reg *reg, -+ unsigned long rtype) -+{ -+ unsigned long *isa = vcpu->arch.isa; -+ unsigned long __user *uaddr = -+ (unsigned long __user *)(unsigned long)reg->addr; -+ unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | -+ KVM_REG_SIZE_MASK | -+ rtype); -+ void *reg_val = NULL; -+ size_t reg_size = KVM_REG_SIZE(reg->id); -+ -+ if (rtype == KVM_REG_RISCV_VECTOR && -+ riscv_isa_extension_available(isa, v)) { -+ reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size); -+ } -+ -+ if (!reg_val) -+ return -EINVAL; -+ -+ if (copy_to_user(uaddr, reg_val, reg_size)) -+ return -EFAULT; -+ -+ return 0; -+} -+ -+int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, -+ const struct kvm_one_reg *reg, -+ unsigned long rtype) -+{ -+ unsigned long *isa = vcpu->arch.isa; -+ unsigned long __user *uaddr = -+ (unsigned long __user *)(unsigned long)reg->addr; -+ unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | -+ KVM_REG_SIZE_MASK | -+ rtype); -+ void *reg_val = NULL; -+ size_t reg_size = KVM_REG_SIZE(reg->id); -+ -+ if (rtype == KVM_REG_RISCV_VECTOR && -+ riscv_isa_extension_available(isa, v)) { -+ reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size); -+ } -+ -+ if (!reg_val) -+ return -EINVAL; -+ -+ if (copy_from_user(reg_val, uaddr, reg_size)) -+ return -EFAULT; -+ -+ return 0; -+} -diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile -index 111111111111..222222222222 100644 ---- a/arch/riscv/lib/Makefile -+++ b/arch/riscv/lib/Makefile -@@ -5,5 +5,6 @@ lib-y += memset.o - lib-y += memmove.o - lib-$(CONFIG_MMU) += uaccess.o - lib-$(CONFIG_64BIT) += tishift.o -+lib-$(CONFIG_RISCV_ISA_V) += xor.o - - obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o -diff --git a/arch/riscv/lib/xor.S b/arch/riscv/lib/xor.S -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/arch/riscv/lib/xor.S -@@ -0,0 +1,81 @@ -+/* SPDX-License-Identifier: GPL-2.0-or-later */ -+/* -+ * Copyright (C) 2021 SiFive -+ */ -+#include -+#include -+#include -+ -+ENTRY(xor_regs_2_) -+ vsetvli a3, a0, e8, m8, ta, ma -+ vle8.v v0, (a1) -+ vle8.v v8, (a2) -+ sub a0, a0, a3 -+ vxor.vv v16, v0, v8 -+ add a2, a2, a3 -+ vse8.v v16, (a1) -+ add a1, a1, a3 -+ bnez a0, xor_regs_2_ -+ ret -+END(xor_regs_2_) -+EXPORT_SYMBOL(xor_regs_2_) -+ -+ENTRY(xor_regs_3_) -+ vsetvli a4, a0, e8, m8, ta, ma -+ vle8.v v0, (a1) -+ vle8.v v8, (a2) -+ sub a0, a0, a4 -+ vxor.vv v0, v0, v8 -+ vle8.v v16, (a3) -+ add a2, a2, a4 -+ vxor.vv v16, v0, v16 -+ add a3, a3, a4 -+ vse8.v v16, (a1) -+ add a1, a1, a4 -+ bnez a0, xor_regs_3_ -+ ret -+END(xor_regs_3_) -+EXPORT_SYMBOL(xor_regs_3_) -+ -+ENTRY(xor_regs_4_) -+ vsetvli a5, a0, e8, m8, ta, ma -+ vle8.v v0, (a1) -+ vle8.v v8, (a2) -+ sub a0, a0, a5 -+ vxor.vv v0, v0, v8 -+ vle8.v v16, (a3) -+ add a2, a2, a5 -+ vxor.vv v0, v0, v16 -+ vle8.v v24, (a4) -+ add a3, a3, a5 -+ vxor.vv v16, v0, v24 -+ add a4, a4, a5 -+ vse8.v v16, (a1) -+ add a1, a1, a5 -+ bnez a0, xor_regs_4_ -+ ret -+END(xor_regs_4_) -+EXPORT_SYMBOL(xor_regs_4_) -+ -+ENTRY(xor_regs_5_) -+ vsetvli a6, a0, e8, m8, ta, ma -+ vle8.v v0, (a1) -+ vle8.v v8, (a2) -+ sub a0, a0, a6 -+ vxor.vv v0, v0, v8 -+ vle8.v v16, (a3) -+ add a2, a2, a6 -+ vxor.vv v0, v0, v16 -+ vle8.v v24, (a4) -+ add a3, a3, a6 -+ vxor.vv v0, v0, v24 -+ vle8.v v8, (a5) -+ add a4, a4, a6 -+ vxor.vv v16, v0, v8 -+ add a5, a5, a6 -+ vse8.v v16, (a1) -+ add a1, a1, a6 -+ bnez a0, xor_regs_5_ -+ ret -+END(xor_regs_5_) -+EXPORT_SYMBOL(xor_regs_5_) -\ No newline at end of file --- -Armbian - -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Sat, 22 Jun 2024 07:38:30 -0400 -Subject: arch: riscv: mm: init.c - -Signed-off-by: Patrick Yavitz ---- - arch/riscv/mm/init.c | 10 ++++++++++ - 1 file changed, 10 insertions(+) - -diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c -index 111111111111..222222222222 100644 ---- a/arch/riscv/mm/init.c -+++ b/arch/riscv/mm/init.c -@@ -232,7 +232,12 @@ static void __init setup_bootmem(void) - max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); - high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); - -+ #ifdef CONFIG_SOC_SPACEMIT_K1X -+ /* 2GB~4GB is IO area on spacemit-k1x, will be reserved when early_init_fdt_scan_reserved_mem */ -+ dma32_phys_limit = min(2UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); -+ #else - dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); -+ #endif - set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); - - reserve_initrd_mem(); -@@ -253,7 +258,12 @@ static void __init setup_bootmem(void) - if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) - memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); - -+#ifdef CONFIG_ZONE_DMA32 - dma_contiguous_reserve(dma32_phys_limit); -+#else -+ dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); -+#endif -+ - if (IS_ENABLED(CONFIG_64BIT)) - hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); - } --- -Armbian - diff --git a/patch/kernel/archive/spacemit-6.1/004-drivers-ata-libata-pmp.patch b/patch/kernel/archive/spacemit-6.1/004-drivers-ata-libata-pmp.patch deleted file mode 100644 index 60254d1ebbb5..000000000000 --- a/patch/kernel/archive/spacemit-6.1/004-drivers-ata-libata-pmp.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - drivers/ata/libata-pmp.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c -index 111111111111..222222222222 100644 ---- a/drivers/ata/libata-pmp.c -+++ b/drivers/ata/libata-pmp.c -@@ -446,10 +446,11 @@ static void sata_pmp_quirks(struct ata_port *ap) - * otherwise. Don't try hard to recover it. - */ - ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY; -- } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) { -+ } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325 || devid == 0x0585)) { - /* - * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350? - * 0x0325: jmicron JMB394. -+ * 0x0325: jmicron JMB585. - */ - ata_for_each_link(link, ap, EDGE) { - /* SRST breaks detection and disks get misclassified --- -Armbian - diff --git a/patch/kernel/archive/spacemit-6.1/005-drivers-base-firmware_loader-main.patch b/patch/kernel/archive/spacemit-6.1/005-drivers-base-firmware_loader-main.patch deleted file mode 100644 index edc3b8749a4d..000000000000 --- a/patch/kernel/archive/spacemit-6.1/005-drivers-base-firmware_loader-main.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - drivers/base/firmware_loader/main.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c -index 111111111111..222222222222 100644 ---- a/drivers/base/firmware_loader/main.c -+++ b/drivers/base/firmware_loader/main.c -@@ -1156,7 +1156,7 @@ request_firmware_nowait( - fw_work->device = device; - fw_work->context = context; - fw_work->cont = cont; -- fw_work->opt_flags = FW_OPT_NOWAIT | -+ fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_NO_WARN | - (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER); - - if (!uevent && fw_cache_is_setup(device, name)) { --- -Armbian - diff --git a/patch/kernel/archive/spacemit-6.1/006-drivers-bluetooth.patch b/patch/kernel/archive/spacemit-6.1/006-drivers-bluetooth.patch deleted file mode 100644 index 119edbed46e9..000000000000 --- a/patch/kernel/archive/spacemit-6.1/006-drivers-bluetooth.patch +++ /dev/null @@ -1,8649 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - drivers/bluetooth/Kconfig | 11 + - drivers/bluetooth/Makefile | 2 + - drivers/bluetooth/rtk_bt.c | 2311 +++++++ - drivers/bluetooth/rtk_bt.h | 151 + - drivers/bluetooth/rtk_coex.c | 3065 ++++++++++ - drivers/bluetooth/rtk_coex.h | 378 ++ - drivers/bluetooth/rtk_misc.c | 2517 ++++++++ - drivers/bluetooth/rtk_misc.h | 134 + - 8 files changed, 8569 insertions(+) - -diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/bluetooth/Kconfig -+++ b/drivers/bluetooth/Kconfig -@@ -79,6 +79,17 @@ config BT_HCIBTUSB_RTL - - Say Y here to compile support for Realtek protocol. - -+config BT_HCIBTUSB_RTLBTUSB -+ tristate "Realtek HCI USB driver support" -+ depends on USB -+ help -+ Realtek Bluetooth HCI USB driver. -+ This driver is required if you want to use Realtek Bluetooth -+ device with USB interface. -+ -+ Say Y here to compile support for Bluetooth USB devices into the -+ kernel or say M to compile it as module (rtk_btusb). -+ - config BT_HCIBTSDIO - tristate "HCI SDIO driver" - depends on MMC -diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/bluetooth/Makefile -+++ b/drivers/bluetooth/Makefile -@@ -26,6 +26,8 @@ obj-$(CONFIG_BT_BCM) += btbcm.o - obj-$(CONFIG_BT_RTL) += btrtl.o - obj-$(CONFIG_BT_QCA) += btqca.o - obj-$(CONFIG_BT_MTK) += btmtk.o -+obj-$(CONFIG_BT_HCIBTUSB_RTLBTUSB) += rtk_btusb.o -+rtk_btusb-objs := rtk_bt.o rtk_misc.o rtk_coex.o - - obj-$(CONFIG_BT_VIRTIO) += virtio_bt.o - -diff --git a/drivers/bluetooth/rtk_bt.c b/drivers/bluetooth/rtk_bt.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/bluetooth/rtk_bt.c -@@ -0,0 +1,2311 @@ -+/* -+ * -+ * Realtek Bluetooth USB driver -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "rtk_bt.h" -+#include "rtk_misc.h" -+ -+#define VERSION "3.1.4a6937d.20230413-173859" -+ -+#ifdef BTCOEX -+#include "rtk_coex.h" -+#endif -+ -+#ifdef RTKBT_SWITCH_PATCH -+#include -+#include -+static DEFINE_SEMAPHORE(switch_sem); -+#endif -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 7, 1) -+static bool reset = true; -+#endif -+ -+static struct usb_driver btusb_driver; -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) -+static u16 iso_min_conn_handle = 0x1b; -+#endif -+static struct usb_device_id btusb_table[] = { -+ { -+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | -+ USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x0bda, -+ .bInterfaceClass = 0xe0, -+ .bInterfaceSubClass = 0x01, -+ .bInterfaceProtocol = 0x01 -+ }, { -+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | -+ USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x13d3, -+ .bInterfaceClass = 0xe0, -+ .bInterfaceSubClass = 0x01, -+ .bInterfaceProtocol = 0x01 -+ }, { -+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | -+ USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x0489, -+ .bInterfaceClass = 0xe0, -+ .bInterfaceSubClass = 0x01, -+ .bInterfaceProtocol = 0x01 -+ }, { -+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | -+ USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x1358, -+ .bInterfaceClass = 0xe0, -+ .bInterfaceSubClass = 0x01, -+ .bInterfaceProtocol = 0x01 -+ }, { -+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | -+ USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x04ca, -+ .bInterfaceClass = 0xe0, -+ .bInterfaceSubClass = 0x01, -+ .bInterfaceProtocol = 0x01 -+ }, { -+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | -+ USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x2ff8, -+ .bInterfaceClass = 0xe0, -+ .bInterfaceSubClass = 0x01, -+ .bInterfaceProtocol = 0x01 -+ }, { -+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | -+ USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x0b05, -+ .bInterfaceClass = 0xe0, -+ .bInterfaceSubClass = 0x01, -+ .bInterfaceProtocol = 0x01 -+ }, { -+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | -+ USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x0930, -+ .bInterfaceClass = 0xe0, -+ .bInterfaceSubClass = 0x01, -+ .bInterfaceProtocol = 0x01 -+ }, { -+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | -+ USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x10ec, -+ .bInterfaceClass = 0xe0, -+ .bInterfaceSubClass = 0x01, -+ .bInterfaceProtocol = 0x01 -+ }, { -+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | -+ USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x04c5, -+ .bInterfaceClass = 0xe0, -+ .bInterfaceSubClass = 0x01, -+ .bInterfaceProtocol = 0x01 -+ }, { -+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | -+ USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x0cb5, -+ .bInterfaceClass = 0xe0, -+ .bInterfaceSubClass = 0x01, -+ .bInterfaceProtocol = 0x01 -+ }, { -+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | -+ USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x0cb8, -+ .bInterfaceClass = 0xe0, -+ .bInterfaceSubClass = 0x01, -+ .bInterfaceProtocol = 0x01 -+ }, { } -+}; -+ -+static void rtk_free(struct btusb_data *data) -+{ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 1) -+ kfree(data); -+#endif -+ return; -+} -+ -+static struct btusb_data *rtk_alloc(struct usb_interface *intf) -+{ -+ struct btusb_data *data; -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 1) -+ data = kzalloc(sizeof(*data), GFP_KERNEL); -+#else -+ data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); -+#endif -+ return data; -+} -+ -+MODULE_DEVICE_TABLE(usb, btusb_table); -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) -+static inline void btusb_free_frags(struct btusb_data *data) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&data->rxlock, flags); -+ -+ kfree_skb(data->evt_skb); -+ data->evt_skb = NULL; -+ -+ kfree_skb(data->acl_skb); -+ data->acl_skb = NULL; -+ -+ kfree_skb(data->sco_skb); -+ data->sco_skb = NULL; -+ -+ spin_unlock_irqrestore(&data->rxlock, flags); -+} -+ -+static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) -+{ -+ struct sk_buff *skb; -+ int err = 0; -+ -+ spin_lock(&data->rxlock); -+ skb = data->evt_skb; -+ -+ while (count) { -+ int len; -+ -+ if (!skb) { -+ skb = bt_skb_alloc(HCI_MAX_EVENT_SIZE, GFP_ATOMIC); -+ if (!skb) { -+ err = -ENOMEM; -+ break; -+ } -+ -+ bt_cb(skb)->pkt_type = HCI_EVENT_PKT; -+ bt_cb(skb)->expect = HCI_EVENT_HDR_SIZE; -+ } -+ -+ len = min_t(uint, bt_cb(skb)->expect, count); -+#if HCI_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) -+ skb_put_data(skb, buffer, len); -+#else -+ memcpy(skb_put(skb, len), buffer, len); -+#endif -+ -+ count -= len; -+ buffer += len; -+ bt_cb(skb)->expect -= len; -+ -+ if (skb->len == HCI_EVENT_HDR_SIZE) { -+ /* Complete event header */ -+ bt_cb(skb)->expect = hci_event_hdr(skb)->plen; -+ -+ if (skb_tailroom(skb) < bt_cb(skb)->expect) { -+ kfree_skb(skb); -+ skb = NULL; -+ -+ err = -EILSEQ; -+ break; -+ } -+ } -+ -+ if (bt_cb(skb)->expect == 0) { -+ /* Complete frame */ -+ hci_recv_frame(data->hdev, skb); -+ skb = NULL; -+ } -+ } -+ -+ data->evt_skb = skb; -+ spin_unlock(&data->rxlock); -+ -+ return err; -+} -+ -+static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) -+{ -+ struct sk_buff *skb; -+ int err = 0; -+ -+ spin_lock(&data->rxlock); -+ skb = data->acl_skb; -+ -+ while (count) { -+ int len; -+ -+ if (!skb) { -+ skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); -+ if (!skb) { -+ err = -ENOMEM; -+ break; -+ } -+ -+ bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; -+ bt_cb(skb)->expect = HCI_ACL_HDR_SIZE; -+ } -+ -+ len = min_t(uint, bt_cb(skb)->expect, count); -+#if HCI_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) -+ skb_put_data(skb, buffer, len); -+#else -+ memcpy(skb_put(skb, len), buffer, len); -+#endif -+ -+ count -= len; -+ buffer += len; -+ bt_cb(skb)->expect -= len; -+ -+ if (skb->len == HCI_ACL_HDR_SIZE) { -+ struct hci_acl_hdr *h = hci_acl_hdr(skb); -+ __le16 dlen = h->dlen; -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) -+ __le16 handle = __le16_to_cpu(h->handle) & 0xfff; -+ -+ if(handle >= iso_min_conn_handle) { -+ bt_cb(skb)->pkt_type = HCI_ISODATA_PKT; -+ } -+#endif -+ /* Complete ACL header */ -+ bt_cb(skb)->expect = __le16_to_cpu(dlen); -+ -+ if (skb_tailroom(skb) < bt_cb(skb)->expect) { -+ kfree_skb(skb); -+ skb = NULL; -+ -+ err = -EILSEQ; -+ break; -+ } -+ } -+ -+ if (bt_cb(skb)->expect == 0) { -+ /* Complete frame */ -+ hci_recv_frame(data->hdev, skb); -+ skb = NULL; -+ } -+ } -+ -+ data->acl_skb = skb; -+ spin_unlock(&data->rxlock); -+ -+ return err; -+} -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) -+static int btrtl_usb_recv_isoc(u16 pos, u8 *data, u8 *p, int len, -+ u16 wMaxPacketSize) -+{ -+ u8 *prev; -+ -+ if (pos >= HCI_SCO_HDR_SIZE && pos >= wMaxPacketSize && -+ len == wMaxPacketSize && !(pos % wMaxPacketSize) && -+ wMaxPacketSize >= 10 && p[0] == data[0] && p[1] == data[1]) { -+ -+ prev = data + (pos - wMaxPacketSize); -+ -+ /* Detect the sco data of usb isoc pkt duplication. */ -+ if (!memcmp(p + 2, prev + 2, 8)) -+ return -EILSEQ; -+ -+ if (wMaxPacketSize >= 12 && -+ p[2] == prev[6] && p[3] == prev[7] && -+ p[4] == prev[4] && p[5] == prev[5] && -+ p[6] == prev[10] && p[7] == prev[11] && -+ p[8] == prev[8] && p[9] == prev[9]) { -+ return -EILSEQ; -+ } -+ } -+ -+ return 0; -+} -+#endif -+ -+static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count) -+{ -+ struct sk_buff *skb; -+ int err = 0; -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) -+ u16 wMaxPacketSize = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize); -+#endif -+ -+ spin_lock(&data->rxlock); -+ skb = data->sco_skb; -+ -+ while (count) { -+ int len; -+ -+ if (!skb) { -+ skb = bt_skb_alloc(HCI_MAX_SCO_SIZE, GFP_ATOMIC); -+ if (!skb) { -+ err = -ENOMEM; -+ break; -+ } -+ -+ bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; -+ bt_cb(skb)->expect = HCI_SCO_HDR_SIZE; -+ } -+ -+ len = min_t(uint, bt_cb(skb)->expect, count); -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) -+ /* Gaps in audio could be heard while streaming WBS using USB -+ * alt settings 3 on some platforms. -+ * Add the function to detect it. -+ */ -+ if (test_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags)) { -+ err = btrtl_usb_recv_isoc(skb->len, skb->data, buffer, -+ len, wMaxPacketSize); -+ if (err) -+ break; -+ } -+#endif -+#if HCI_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) -+ skb_put_data(skb, buffer, len); -+#else -+ memcpy(skb_put(skb, len), buffer, len); -+#endif -+ -+ count -= len; -+ buffer += len; -+ bt_cb(skb)->expect -= len; -+ -+ if (skb->len == HCI_SCO_HDR_SIZE) { -+ /* Complete SCO header */ -+ bt_cb(skb)->expect = hci_sco_hdr(skb)->dlen; -+ -+ if (skb_tailroom(skb) < bt_cb(skb)->expect) { -+ kfree_skb(skb); -+ skb = NULL; -+ -+ err = -EILSEQ; -+ break; -+ } -+ } -+ -+ if (bt_cb(skb)->expect == 0) { -+ /* Complete frame */ -+ hci_recv_frame(data->hdev, skb); -+ skb = NULL; -+ } -+ } -+ -+ data->sco_skb = skb; -+ spin_unlock(&data->rxlock); -+ -+ return err; -+} -+#else -+static int inc_tx(struct btusb_data *data) -+{ -+ unsigned long flags; -+ int rv; -+ -+ spin_lock_irqsave(&data->txlock, flags); -+ rv = test_bit(BTUSB_SUSPENDING, &data->flags); -+ if (!rv) -+ data->tx_in_flight++; -+ spin_unlock_irqrestore(&data->txlock, flags); -+ -+ return rv; -+} -+ -+#endif -+ -+static void btusb_intr_complete(struct urb *urb) -+{ -+ struct hci_dev *hdev = urb->context; -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ int err; -+ -+ //RTKBT_DBG("%s: urb %p status %d count %d ", __func__, -+ //urb, urb->status, urb->actual_length); -+ -+ if (!test_bit(HCI_RUNNING, &hdev->flags)) -+ return; -+ -+ if (urb->status == 0) { -+ hdev->stat.byte_rx += urb->actual_length; -+ -+#ifdef BTCOEX -+ rtk_btcoex_parse_event(urb->transfer_buffer, -+ urb->actual_length); -+#endif -+#if HCI_VERSION_CODE < KERNEL_VERSION(3, 18, 0) -+ if (hci_recv_fragment(hdev, HCI_EVENT_PKT, -+ urb->transfer_buffer, -+ urb->actual_length) < 0) { -+ RTKBT_ERR("%s: Corrupted event packet", __func__); -+ hdev->stat.err_rx++; -+ } -+#else -+ if (btusb_recv_intr(data, urb->transfer_buffer, -+ urb->actual_length) < 0) { -+ RTKBT_ERR("%s corrupted event packet", hdev->name); -+ hdev->stat.err_rx++; -+ } -+#endif -+ } -+ /* Avoid suspend failed when usb_kill_urb */ -+ else if (urb->status == -ENOENT) { -+ return; -+ } -+ -+ if (!test_bit(BTUSB_INTR_RUNNING, &data->flags)) -+ return; -+ -+ usb_mark_last_busy(data->udev); -+ usb_anchor_urb(urb, &data->intr_anchor); -+ -+ err = usb_submit_urb(urb, GFP_ATOMIC); -+ if (err < 0) { -+ /* -EPERM: urb is being killed; -+ * -ENODEV: device got disconnected */ -+ if (err != -EPERM && err != -ENODEV) -+ RTKBT_ERR("%s: Failed to re-submit urb %p, err %d", -+ __func__, urb, err); -+ usb_unanchor_urb(urb); -+ } -+} -+ -+static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) -+{ -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ struct urb *urb; -+ unsigned char *buf; -+ unsigned int pipe; -+ int err, size; -+ -+ //RTKBT_DBG("%s", hdev->name); -+ -+ if (!data->intr_ep) -+ return -ENODEV; -+ -+ urb = usb_alloc_urb(0, mem_flags); -+ if (!urb) -+ return -ENOMEM; -+ -+ size = le16_to_cpu(data->intr_ep->wMaxPacketSize); -+ -+ buf = kmalloc(size, mem_flags); -+ if (!buf) { -+ usb_free_urb(urb); -+ return -ENOMEM; -+ } -+ -+ pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress); -+ -+ usb_fill_int_urb(urb, data->udev, pipe, buf, size, -+ btusb_intr_complete, hdev, data->intr_ep->bInterval); -+ -+ urb->transfer_flags |= URB_FREE_BUFFER; -+ -+ usb_anchor_urb(urb, &data->intr_anchor); -+ -+ err = usb_submit_urb(urb, mem_flags); -+ if (err < 0) { -+ RTKBT_ERR -+ ("btusb_submit_intr_urb %s urb %p submission failed (%d)", -+ hdev->name, urb, -err); -+ usb_unanchor_urb(urb); -+ } -+ -+ usb_free_urb(urb); -+ -+ return err; -+} -+ -+static void btusb_bulk_complete(struct urb *urb) -+{ -+ struct hci_dev *hdev = urb->context; -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ int err; -+ -+ //RTKBT_DBG("%s: urb %p status %d count %d", -+ //__func__, urb, urb->status, urb->actual_length); -+ -+ if (!test_bit(HCI_RUNNING, &hdev->flags)) -+ return; -+ -+#ifdef BTCOEX -+ if (urb->status == 0) -+ rtk_btcoex_parse_l2cap_data_rx(urb->transfer_buffer, -+ urb->actual_length); -+#endif -+ -+ if (urb->status == 0) { -+ hdev->stat.byte_rx += urb->actual_length; -+ -+#if HCI_VERSION_CODE < KERNEL_VERSION(3, 18, 0) -+ if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT, -+ urb->transfer_buffer, -+ urb->actual_length) < 0) { -+ RTKBT_ERR("%s: Corrupted ACL packet", __func__); -+ hdev->stat.err_rx++; -+ } -+#else -+ if (data->recv_bulk(data, urb->transfer_buffer, -+ urb->actual_length) < 0) { -+ RTKBT_ERR("%s corrupted ACL packet", hdev->name); -+ hdev->stat.err_rx++; -+ } -+#endif -+ } -+ /* Avoid suspend failed when usb_kill_urb */ -+ else if (urb->status == -ENOENT) { -+ return; -+ } -+ -+ if (!test_bit(BTUSB_BULK_RUNNING, &data->flags)) -+ return; -+ -+ usb_anchor_urb(urb, &data->bulk_anchor); -+ usb_mark_last_busy(data->udev); -+ -+ err = usb_submit_urb(urb, GFP_ATOMIC); -+ if (err < 0) { -+ /* -EPERM: urb is being killed; -+ * -ENODEV: device got disconnected */ -+ if (err != -EPERM && err != -ENODEV) -+ RTKBT_ERR -+ ("btusb_bulk_complete %s urb %p failed to resubmit (%d)", -+ hdev->name, urb, -err); -+ usb_unanchor_urb(urb); -+ } -+} -+ -+static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags) -+{ -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ struct urb *urb; -+ unsigned char *buf; -+ unsigned int pipe; -+ int err, size = HCI_MAX_FRAME_SIZE; -+ -+ //RTKBT_DBG("%s: hdev name %s", __func__, hdev->name); -+ -+ if (!data->bulk_rx_ep) -+ return -ENODEV; -+ -+ urb = usb_alloc_urb(0, mem_flags); -+ if (!urb) -+ return -ENOMEM; -+ -+ buf = kmalloc(size, mem_flags); -+ if (!buf) { -+ usb_free_urb(urb); -+ return -ENOMEM; -+ } -+ -+ pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress); -+ -+ usb_fill_bulk_urb(urb, data->udev, pipe, -+ buf, size, btusb_bulk_complete, hdev); -+ -+ urb->transfer_flags |= URB_FREE_BUFFER; -+ -+ usb_mark_last_busy(data->udev); -+ usb_anchor_urb(urb, &data->bulk_anchor); -+ -+ err = usb_submit_urb(urb, mem_flags); -+ if (err < 0) { -+ RTKBT_ERR("%s: Failed to submit urb %p, err %d", __func__, urb, -+ err); -+ usb_unanchor_urb(urb); -+ } -+ -+ usb_free_urb(urb); -+ -+ return err; -+} -+ -+static void btusb_isoc_complete(struct urb *urb) -+{ -+ struct hci_dev *hdev = urb->context; -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ int i, err; -+ -+ /* -+ RTKBT_DBG("%s urb %p status %d count %d", hdev->name, -+ urb, urb->status, urb->actual_length); -+ */ -+ if (!test_bit(HCI_RUNNING, &hdev->flags)) -+ return; -+ -+ if (urb->status == 0) { -+ for (i = 0; i < urb->number_of_packets; i++) { -+ unsigned int offset = urb->iso_frame_desc[i].offset; -+ unsigned int length = -+ urb->iso_frame_desc[i].actual_length; -+ -+ if (urb->iso_frame_desc[i].status) -+ continue; -+ -+ hdev->stat.byte_rx += length; -+ -+#if HCI_VERSION_CODE < KERNEL_VERSION(3, 18, 0) -+ if (hci_recv_fragment(hdev, HCI_SCODATA_PKT, -+ urb->transfer_buffer + offset, -+ length) < 0) { -+ RTKBT_ERR("%s: Corrupted SCO packet", __func__); -+ hdev->stat.err_rx++; -+ } -+#else -+ if (btusb_recv_isoc(data, urb->transfer_buffer + offset, -+ length) < 0) { -+ RTKBT_ERR("%s corrupted SCO packet", -+ hdev->name); -+ hdev->stat.err_rx++; -+ } -+#endif -+ } -+ } -+ /* Avoid suspend failed when usb_kill_urb */ -+ else if (urb->status == -ENOENT) { -+ return; -+ } -+ -+ if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) -+ return; -+ -+ usb_anchor_urb(urb, &data->isoc_anchor); -+ i = 0; -+retry: -+ err = usb_submit_urb(urb, GFP_ATOMIC); -+ if (err < 0) { -+ /* -EPERM: urb is being killed; -+ * -ENODEV: device got disconnected */ -+ if (err != -EPERM && err != -ENODEV) -+ RTKBT_ERR -+ ("%s: Failed to re-sumbit urb %p, retry %d, err %d", -+ __func__, urb, i, err); -+ if (i < 10) { -+ i++; -+ mdelay(1); -+ goto retry; -+ } -+ -+ usb_unanchor_urb(urb); -+ } -+} -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) -+static inline void __fill_isoc_descriptor_msbc(struct urb *urb, int len, -+ int mtu, struct btusb_data *data) -+{ -+ int i = 0, offset = 0; -+ unsigned int interval; -+ -+ BT_DBG("len %d mtu %d", len, mtu); -+ -+ /* For mSBC ALT 6 settings some Realtek chips need to transmit the data -+ * continuously without the zero length of USB packets. -+ */ -+ if (btrealtek_test_flag(data->hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP)) -+ goto ignore_usb_alt6_packet_flow; -+ -+ /* For mSBC ALT 6 setting the host will send the packet at continuous -+ * flow. As per core spec 5, vol 4, part B, table 2.1. For ALT setting -+ * 6 the HCI PACKET INTERVAL should be 7.5ms for every usb packets. -+ * To maintain the rate we send 63bytes of usb packets alternatively for -+ * 7ms and 8ms to maintain the rate as 7.5ms. -+ */ -+ if (data->usb_alt6_packet_flow) { -+ interval = 7; -+ data->usb_alt6_packet_flow = false; -+ } else { -+ interval = 6; -+ data->usb_alt6_packet_flow = true; -+ } -+ -+ for (i = 0; i < interval; i++) { -+ urb->iso_frame_desc[i].offset = offset; -+ urb->iso_frame_desc[i].length = offset; -+ } -+ -+ignore_usb_alt6_packet_flow: -+ if (len && i < BTUSB_MAX_ISOC_FRAMES) { -+ urb->iso_frame_desc[i].offset = offset; -+ urb->iso_frame_desc[i].length = len; -+ i++; -+ } -+ -+ urb->number_of_packets = i; -+} -+#endif -+ -+static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu) -+{ -+ int i, offset = 0; -+ -+ //RTKBT_DBG("len %d mtu %d", len, mtu); -+ -+ for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu; -+ i++, offset += mtu, len -= mtu) { -+ urb->iso_frame_desc[i].offset = offset; -+ urb->iso_frame_desc[i].length = mtu; -+ } -+ -+ if (len && i < BTUSB_MAX_ISOC_FRAMES) { -+ urb->iso_frame_desc[i].offset = offset; -+ urb->iso_frame_desc[i].length = len; -+ i++; -+ } -+ -+ urb->number_of_packets = i; -+} -+ -+static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) -+{ -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ struct urb *urb; -+ unsigned char *buf; -+ unsigned int pipe; -+ int err, size; -+ -+ //RTKBT_DBG("%s", hdev->name); -+ -+ if (!data->isoc_rx_ep) -+ return -ENODEV; -+ -+ urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, mem_flags); -+ if (!urb) -+ return -ENOMEM; -+ -+ size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) * -+ BTUSB_MAX_ISOC_FRAMES; -+ -+ buf = kmalloc(size, mem_flags); -+ if (!buf) { -+ usb_free_urb(urb); -+ return -ENOMEM; -+ } -+ -+ pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 2, 14) -+ usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete, -+ hdev, data->isoc_rx_ep->bInterval); -+ -+ urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; -+#else -+ urb->dev = data->udev; -+ urb->pipe = pipe; -+ urb->context = hdev; -+ urb->complete = btusb_isoc_complete; -+ urb->interval = data->isoc_rx_ep->bInterval; -+ -+ urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; -+ urb->transfer_buffer = buf; -+ urb->transfer_buffer_length = size; -+#endif -+ -+ __fill_isoc_descriptor(urb, size, -+ le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); -+ -+ usb_anchor_urb(urb, &data->isoc_anchor); -+ -+ err = usb_submit_urb(urb, mem_flags); -+ if (err < 0) { -+ RTKBT_ERR("%s %s urb %p submission failed (%d)", -+ __func__, hdev->name, urb, err); -+ usb_unanchor_urb(urb); -+ } -+ -+ usb_free_urb(urb); -+ -+ return err; -+} -+ -+static void btusb_tx_complete(struct urb *urb) -+{ -+ struct sk_buff *skb = urb->context; -+ struct hci_dev *hdev = (struct hci_dev *)skb->dev; -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ -+// RTKBT_DBG("btusb_tx_complete %s urb %p status %d count %d", hdev->name, -+// urb, urb->status, urb->actual_length); -+ -+ if (!test_bit(HCI_RUNNING, &hdev->flags)) -+ goto done; -+ -+ if (!urb->status) -+ hdev->stat.byte_tx += urb->transfer_buffer_length; -+ else -+ hdev->stat.err_tx++; -+ -+done: -+ spin_lock(&data->txlock); -+ data->tx_in_flight--; -+ spin_unlock(&data->txlock); -+ -+ kfree(urb->setup_packet); -+ -+ kfree_skb(skb); -+} -+ -+static void btusb_isoc_tx_complete(struct urb *urb) -+{ -+ struct sk_buff *skb = urb->context; -+ struct hci_dev *hdev = (struct hci_dev *)skb->dev; -+ -+ RTKBT_DBG("%s: urb %p status %d count %d", __func__, -+ urb, urb->status, urb->actual_length); -+ -+ if (!test_bit(HCI_RUNNING, &hdev->flags)) -+ goto done; -+ -+ if (!urb->status) -+ hdev->stat.byte_tx += urb->transfer_buffer_length; -+ else -+ hdev->stat.err_tx++; -+ -+done: -+ kfree(urb->setup_packet); -+ -+ kfree_skb(skb); -+} -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) -+static int rtl_read_iso_handle_range(struct hci_dev *hdev) -+{ -+ struct sk_buff *skb; -+ -+ skb = __hci_cmd_sync(hdev, 0xfdab, 0, NULL, HCI_CMD_TIMEOUT); -+ if (IS_ERR(skb)) { -+ return PTR_ERR(skb); -+ } -+ -+ if (skb->data[0]) { -+ RTKBT_ERR("%s: rtl: read failed", hdev->name); -+ kfree_skb(skb); -+ return -EIO; -+ } -+ -+ iso_min_conn_handle = skb->data[1] | skb->data[2] << 8; -+ RTKBT_DBG("rtl: read iso handle range done"); -+ -+ kfree_skb(skb); -+ -+ return 0; -+} -+#endif -+ -+static int btusb_open(struct hci_dev *hdev) -+{ -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ int err; -+ -+ err = usb_autopm_get_interface(data->intf); -+ if (err < 0) -+ return err; -+ -+ data->intf->needs_remote_wakeup = 1; -+ RTKBT_DBG("%s start", __func__); -+ -+ /*******************************/ -+ if (0 == atomic_read(&hdev->promisc)) { -+ RTKBT_ERR("btusb_open hdev->promisc ==0"); -+ //err = -1; -+ //goto failed; -+ } -+ -+ err = download_patch(data->intf); -+ if (err < 0) -+ goto failed; -+ /*******************************/ -+ -+ err = setup_btrealtek_flag(data->intf, hdev); -+ if (err < 0) -+ RTKBT_WARN("setup_btrealtek_flag incorrect!"); -+ -+ RTKBT_INFO("%s set HCI UP RUNNING", __func__); -+ if (test_and_set_bit(HCI_UP, &hdev->flags)) -+ goto done; -+ -+ if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) -+ goto done; -+ -+ if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) -+ goto done; -+ -+ err = btusb_submit_intr_urb(hdev, GFP_KERNEL); -+ if (err < 0) -+ goto failed; -+ -+ err = btusb_submit_bulk_urb(hdev, GFP_KERNEL); -+ if (err < 0) { -+ mdelay(URB_CANCELING_DELAY_MS); // Added by Realtek -+ usb_kill_anchored_urbs(&data->intr_anchor); -+ goto failed; -+ } -+ -+ set_bit(BTUSB_BULK_RUNNING, &data->flags); -+ btusb_submit_bulk_urb(hdev, GFP_KERNEL); -+ -+done: -+ usb_autopm_put_interface(data->intf); -+ -+#ifdef BTCOEX -+ rtk_btcoex_open(hdev); -+#endif -+ RTKBT_DBG("%s end", __FUNCTION__); -+ -+ return 0; -+ -+failed: -+ clear_bit(BTUSB_INTR_RUNNING, &data->flags); -+ clear_bit(HCI_RUNNING, &hdev->flags); -+ usb_autopm_put_interface(data->intf); -+ RTKBT_ERR("%s failed", __FUNCTION__); -+ return err; -+} -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) -+static int btusb_setup(struct hci_dev *hdev) -+{ -+ rtl_read_iso_handle_range(hdev); -+ return 0; -+} -+#endif -+ -+static void btusb_stop_traffic(struct btusb_data *data) -+{ -+ mdelay(URB_CANCELING_DELAY_MS); // Added by Realtek -+ usb_kill_anchored_urbs(&data->intr_anchor); -+ usb_kill_anchored_urbs(&data->bulk_anchor); -+ usb_kill_anchored_urbs(&data->isoc_anchor); -+} -+ -+static int btusb_close(struct hci_dev *hdev) -+{ -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ int err; -+ -+#if HCI_VERSION_CODE < KERNEL_VERSION(4, 1, 0) -+ int i; -+#endif -+ -+ /* When in kernel 4.4.0 and greater, the HCI_RUNNING bit is -+ * cleared in hci_dev_do_close(). */ -+#if HCI_VERSION_CODE < KERNEL_VERSION(4, 4, 0) -+ if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) -+ return 0; -+#else -+ if (test_bit(HCI_RUNNING, &hdev->flags)) { -+ RTKBT_ERR("HCI_RUNNING is not cleared before."); -+ return -1; -+ } -+#endif -+ -+ RTKBT_DBG("btusb_close"); -+#if HCI_VERSION_CODE < KERNEL_VERSION(4, 1, 0) -+ /*******************************/ -+ for (i = 0; i < NUM_REASSEMBLY; i++) { -+ if (hdev->reassembly[i]) { -+ kfree_skb(hdev->reassembly[i]); -+ hdev->reassembly[i] = NULL; -+ RTKBT_DBG("%s free ressembly i=%d", __FUNCTION__, i); -+ } -+ } -+ /*******************************/ -+#endif -+ cancel_work_sync(&data->work); -+ cancel_work_sync(&data->waker); -+ -+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags); -+ clear_bit(BTUSB_BULK_RUNNING, &data->flags); -+ clear_bit(BTUSB_INTR_RUNNING, &data->flags); -+ -+ btusb_stop_traffic(data); -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) -+ btusb_free_frags(data); -+#endif -+ -+ err = usb_autopm_get_interface(data->intf); -+ if (err < 0) -+ goto failed; -+ -+ data->intf->needs_remote_wakeup = 0; -+ usb_autopm_put_interface(data->intf); -+ -+#ifdef BTCOEX -+ rtk_btcoex_close(); -+#endif -+ -+failed: -+ mdelay(URB_CANCELING_DELAY_MS); // Added by Realtek -+ usb_scuttle_anchored_urbs(&data->deferred); -+ -+#ifdef RTKBT_SWITCH_PATCH -+ down(&switch_sem); -+ if (data->context) { -+ struct api_context *ctx = data->context; -+ -+ if (ctx->flags & RTLBT_CLOSE) { -+ ctx->flags &= ~RTLBT_CLOSE; -+ ctx->status = 0; -+ complete(&ctx->done); -+ } -+ } -+ up(&switch_sem); -+#endif -+ -+ return 0; -+} -+ -+static int btusb_flush(struct hci_dev *hdev) -+{ -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ -+ RTKBT_DBG("%s add delay ", __FUNCTION__); -+ mdelay(URB_CANCELING_DELAY_MS); // Added by Realtek -+ usb_kill_anchored_urbs(&data->tx_anchor); -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) -+ btusb_free_frags(data); -+#endif -+ -+ return 0; -+} -+ -+static const char pkt_ind[][8] = { -+ [HCI_COMMAND_PKT] = "cmd", -+ [HCI_ACLDATA_PKT] = "acl", -+ [HCI_SCODATA_PKT] = "sco", -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) -+ [HCI_ISODATA_PKT] = "iso", -+#endif -+}; -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) -+static struct urb *alloc_ctrl_urb(struct hci_dev *hdev, struct sk_buff *skb) -+{ -+ struct btusb_data *data = hci_get_drvdata(hdev); -+ struct usb_ctrlrequest *dr; -+ struct urb *urb; -+ unsigned int pipe; -+ -+ urb = usb_alloc_urb(0, GFP_KERNEL); -+ if (!urb) -+ return ERR_PTR(-ENOMEM); -+ -+ dr = kmalloc(sizeof(*dr), GFP_KERNEL); -+ if (!dr) { -+ usb_free_urb(urb); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ dr->bRequestType = data->cmdreq_type; -+ dr->bRequest = 0; -+ dr->wIndex = 0; -+ dr->wValue = 0; -+ dr->wLength = __cpu_to_le16(skb->len); -+ -+ pipe = usb_sndctrlpipe(data->udev, 0x00); -+ -+ usb_fill_control_urb(urb, data->udev, pipe, (void *)dr, -+ skb->data, skb->len, btusb_tx_complete, skb); -+ -+ skb->dev = (void *)hdev; -+ -+ return urb; -+} -+ -+static struct urb *alloc_bulk_urb(struct hci_dev *hdev, struct sk_buff *skb) -+{ -+ struct btusb_data *data = hci_get_drvdata(hdev); -+ struct urb *urb; -+ unsigned int pipe; -+ -+ if (!data->bulk_tx_ep) -+ return ERR_PTR(-ENODEV); -+ -+ urb = usb_alloc_urb(0, GFP_KERNEL); -+ if (!urb) -+ return ERR_PTR(-ENOMEM); -+ -+ pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress); -+ -+ usb_fill_bulk_urb(urb, data->udev, pipe, -+ skb->data, skb->len, btusb_tx_complete, skb); -+ -+ skb->dev = (void *)hdev; -+ -+ return urb; -+} -+ -+static struct urb *alloc_isoc_urb(struct hci_dev *hdev, struct sk_buff *skb) -+{ -+ struct btusb_data *data = hci_get_drvdata(hdev); -+ struct urb *urb; -+ unsigned int pipe; -+ -+ if (!data->isoc_tx_ep) -+ return ERR_PTR(-ENODEV); -+ -+ urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_KERNEL); -+ if (!urb) -+ return ERR_PTR(-ENOMEM); -+ -+ pipe = usb_sndisocpipe(data->udev, data->isoc_tx_ep->bEndpointAddress); -+ -+ usb_fill_int_urb(urb, data->udev, pipe, -+ skb->data, skb->len, btusb_isoc_tx_complete, -+ skb, data->isoc_tx_ep->bInterval); -+ -+ urb->transfer_flags = URB_ISO_ASAP; -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) -+ if (data->isoc_altsetting == 6) -+ __fill_isoc_descriptor_msbc(urb, skb->len, -+ le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize), -+ data); -+ else -+ __fill_isoc_descriptor(urb, skb->len, -+ le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); -+#else -+ __fill_isoc_descriptor(urb, skb->len, -+ le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); -+#endif -+ -+ skb->dev = (void *)hdev; -+ -+ return urb; -+} -+ -+static int submit_tx_urb(struct hci_dev *hdev, struct urb *urb) -+{ -+ struct btusb_data *data = hci_get_drvdata(hdev); -+ int err; -+ -+ usb_anchor_urb(urb, &data->tx_anchor); -+ -+ err = usb_submit_urb(urb, GFP_KERNEL); -+ if (err < 0) { -+ if (err != -EPERM && err != -ENODEV) -+ RTKBT_ERR("%s urb %p submission failed (%d)", -+ hdev->name, urb, -err); -+ kfree(urb->setup_packet); -+ usb_unanchor_urb(urb); -+ } else { -+ usb_mark_last_busy(data->udev); -+ } -+ -+ usb_free_urb(urb); -+ return err; -+} -+ -+static int submit_or_queue_tx_urb(struct hci_dev *hdev, struct urb *urb) -+{ -+ struct btusb_data *data = hci_get_drvdata(hdev); -+ unsigned long flags; -+ bool suspending; -+ -+ spin_lock_irqsave(&data->txlock, flags); -+ suspending = test_bit(BTUSB_SUSPENDING, &data->flags); -+ if (!suspending) -+ data->tx_in_flight++; -+ spin_unlock_irqrestore(&data->txlock, flags); -+ -+ if (!suspending) -+ return submit_tx_urb(hdev, urb); -+ -+ usb_anchor_urb(urb, &data->deferred); -+ schedule_work(&data->waker); -+ -+ usb_free_urb(urb); -+ return 0; -+} -+ -+#endif -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) -+int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) -+{ -+#else -+int btusb_send_frame(struct sk_buff *skb) -+{ -+ struct hci_dev *hdev = (struct hci_dev *)skb->dev; -+#endif -+ -+ struct urb *urb; -+#if HCI_VERSION_CODE < KERNEL_VERSION(3, 18, 0) -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ struct usb_ctrlrequest *dr; -+ unsigned int pipe; -+ int err; -+#endif -+ -+// RTKBT_DBG("%s", hdev->name); -+ -+ /* After Kernel version 4.4.0, move the check into the -+ * hci_send_frame function before calling hdev->send -+ */ -+#if HCI_VERSION_CODE < KERNEL_VERSION(4, 4, 0) -+ if (!test_bit(HCI_RUNNING, &hdev->flags)) { -+ /* If the parameter is wrong, the hdev isn't the correct -+ * one. Then no HCI commands can be sent. -+ * This issue is related to the wrong HCI_VERSION_CODE set */ -+ RTKBT_ERR("HCI is not running"); -+ return -EBUSY; -+ } -+#endif -+ -+ /* Before kernel/hci version 3.13.0, the skb->dev is set before -+ * entering btusb_send_frame(). So there is no need to set it here. -+ * -+ * The skb->dev will be used in the callbacks when urb transfer -+ * completes. See btusb_tx_complete() and btusb_isoc_tx_complete() */ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) && \ -+ HCI_VERSION_CODE < KERNEL_VERSION(3, 18, 0) -+ skb->dev = (void *)hdev; -+#endif -+ -+ switch (bt_cb(skb)->pkt_type) { -+ case HCI_COMMAND_PKT: -+ print_command(skb); -+ -+#ifdef BTCOEX -+ rtk_btcoex_parse_cmd(skb->data, skb->len); -+#endif -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) -+ urb = alloc_ctrl_urb(hdev, skb); -+ if (IS_ERR(urb)) -+ return PTR_ERR(urb); -+ -+ hdev->stat.cmd_tx++; -+ return submit_or_queue_tx_urb(hdev, urb); -+#else -+ urb = usb_alloc_urb(0, GFP_ATOMIC); -+ if (!urb) -+ return -ENOMEM; -+ -+ dr = kmalloc(sizeof(*dr), GFP_ATOMIC); -+ if (!dr) { -+ usb_free_urb(urb); -+ return -ENOMEM; -+ } -+ -+ dr->bRequestType = data->cmdreq_type; -+ dr->bRequest = 0; -+ dr->wIndex = 0; -+ dr->wValue = 0; -+ dr->wLength = __cpu_to_le16(skb->len); -+ -+ pipe = usb_sndctrlpipe(data->udev, 0x00); -+ -+ usb_fill_control_urb(urb, data->udev, pipe, (void *)dr, -+ skb->data, skb->len, btusb_tx_complete, -+ skb); -+ -+ hdev->stat.cmd_tx++; -+ break; -+ -+#endif -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) -+ case HCI_ISODATA_PKT: -+#endif -+ case HCI_ACLDATA_PKT: -+ print_acl(skb, 1); -+#ifdef BTCOEX -+ if(bt_cb(skb)->pkt_type == HCI_ACLDATA_PKT) -+ rtk_btcoex_parse_l2cap_data_tx(skb->data, skb->len); -+#endif -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) -+ urb = alloc_bulk_urb(hdev, skb); -+ if (IS_ERR(urb)) -+ return PTR_ERR(urb); -+ -+ hdev->stat.acl_tx++; -+ return submit_or_queue_tx_urb(hdev, urb); -+#else -+ if (!data->bulk_tx_ep) -+ return -ENODEV; -+ -+ urb = usb_alloc_urb(0, GFP_ATOMIC); -+ if (!urb) -+ return -ENOMEM; -+ -+ pipe = usb_sndbulkpipe(data->udev, -+ data->bulk_tx_ep->bEndpointAddress); -+ -+ usb_fill_bulk_urb(urb, data->udev, pipe, -+ skb->data, skb->len, btusb_tx_complete, skb); -+ -+ hdev->stat.acl_tx++; -+ break; -+ -+#endif -+ case HCI_SCODATA_PKT: -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) -+ if (hci_conn_num(hdev, SCO_LINK) < 1) -+ return -ENODEV; -+ -+ urb = alloc_isoc_urb(hdev, skb); -+ if (IS_ERR(urb)) -+ return PTR_ERR(urb); -+ -+ hdev->stat.sco_tx++; -+ return submit_tx_urb(hdev, urb); -+ } -+ -+ return -EILSEQ; -+#else -+ if (!data->isoc_tx_ep || SCO_NUM < 1) -+ return -ENODEV; -+ -+ urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC); -+ if (!urb) -+ return -ENOMEM; -+ -+ pipe = usb_sndisocpipe(data->udev, -+ data->isoc_tx_ep->bEndpointAddress); -+ -+ usb_fill_int_urb(urb, data->udev, pipe, -+ skb->data, skb->len, btusb_isoc_tx_complete, -+ skb, data->isoc_tx_ep->bInterval); -+ -+ urb->transfer_flags = URB_ISO_ASAP; -+ -+ __fill_isoc_descriptor(urb, skb->len, -+ le16_to_cpu(data->isoc_tx_ep-> -+ wMaxPacketSize)); -+ -+ hdev->stat.sco_tx++; -+ goto skip_waking; -+ -+ default: -+ return -EILSEQ; -+ -+ } -+ -+ err = inc_tx(data); -+ if (err) { -+ usb_anchor_urb(urb, &data->deferred); -+ schedule_work(&data->waker); -+ err = 0; -+ goto done; -+ } -+ -+skip_waking: -+ usb_anchor_urb(urb, &data->tx_anchor); -+ err = usb_submit_urb(urb, GFP_ATOMIC); -+ if (err < 0) { -+ RTKBT_ERR("%s %s urb %p submission for %s failed, err %d", -+ __func__, hdev->name, urb, -+ pkt_ind[bt_cb(skb)->pkt_type], err); -+ kfree(urb->setup_packet); -+ usb_unanchor_urb(urb); -+ } else { -+ usb_mark_last_busy(data->udev); -+ } -+ -+done: -+ usb_free_urb(urb); -+ return err; -+#endif -+} -+ -+ -+#if HCI_VERSION_CODE < KERNEL_VERSION(3, 4, 0) -+static void btusb_destruct(struct hci_dev *hdev) -+{ -+ RTKBT_DBG("btusb_destruct %s", hdev->name); -+ hci_free_dev(hdev); -+} -+#endif -+ -+static void btusb_notify(struct hci_dev *hdev, unsigned int evt) -+{ -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ -+ RTKBT_DBG("%s: %s evt %d", __func__, hdev->name, evt); -+ -+ if (SCO_NUM != data->sco_num) { -+ data->sco_num = SCO_NUM; -+ RTKBT_DBG("%s: Update sco num %d", __func__, data->sco_num); -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) -+ data->air_mode = evt; -+#endif -+ schedule_work(&data->work); -+ } -+} -+ -+static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting) -+{ -+ struct btusb_data *data = GET_DRV_DATA(hdev); -+ struct usb_interface *intf = data->isoc; -+ struct usb_endpoint_descriptor *ep_desc; -+ int i, err; -+ -+ if (!data->isoc) -+ return -ENODEV; -+ -+ RTKBT_INFO("set isoc interface: alt %d", altsetting); -+ -+ err = usb_set_interface(data->udev, 1, altsetting); -+ if (err < 0) { -+ RTKBT_ERR("%s setting interface failed (%d)", hdev->name, -err); -+ return err; -+ } -+ -+ data->isoc_altsetting = altsetting; -+ -+ data->isoc_tx_ep = NULL; -+ data->isoc_rx_ep = NULL; -+ -+ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { -+ ep_desc = &intf->cur_altsetting->endpoint[i].desc; -+ -+ if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) { -+ data->isoc_tx_ep = ep_desc; -+ continue; -+ } -+ -+ if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) { -+ data->isoc_rx_ep = ep_desc; -+ continue; -+ } -+ } -+ -+ if (!data->isoc_tx_ep || !data->isoc_rx_ep) { -+ RTKBT_ERR("%s invalid SCO descriptors", hdev->name); -+ return -ENODEV; -+ } -+ -+ return 0; -+} -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) -+static int btusb_switch_alt_setting(struct hci_dev *hdev, int new_alts) -+{ -+ struct btusb_data *data = hci_get_drvdata(hdev); -+ int err; -+ -+ if (data->isoc_altsetting != new_alts) { -+ unsigned long flags; -+ -+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags); -+ usb_kill_anchored_urbs(&data->isoc_anchor); -+ -+ /* When isochronous alternate setting needs to be -+ * changed, because SCO connection has been added -+ * or removed, a packet fragment may be left in the -+ * reassembling state. This could lead to wrongly -+ * assembled fragments. -+ * -+ * Clear outstanding fragment when selecting a new -+ * alternate setting. -+ */ -+ spin_lock_irqsave(&data->rxlock, flags); -+ kfree_skb(data->sco_skb); -+ data->sco_skb = NULL; -+ spin_unlock_irqrestore(&data->rxlock, flags); -+ -+ err = __set_isoc_interface(hdev, new_alts); -+ if (err < 0) -+ return err; -+ } -+ -+ if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) { -+ if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0) -+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags); -+ else -+ btusb_submit_isoc_urb(hdev, GFP_KERNEL); -+ } -+ -+ return 0; -+} -+ -+static struct usb_host_interface *btusb_find_altsetting(struct btusb_data *data, -+ int alt) -+{ -+ struct usb_interface *intf = data->isoc; -+ int i; -+ -+ BT_DBG("Looking for Alt no :%d", alt); -+ -+ if (!intf) -+ return NULL; -+ -+ for (i = 0; i < intf->num_altsetting; i++) { -+ if (intf->altsetting[i].desc.bAlternateSetting == alt) -+ return &intf->altsetting[i]; -+ } -+ -+ return NULL; -+} -+#endif -+ -+static void btusb_work(struct work_struct *work) -+{ -+ struct btusb_data *data = container_of(work, struct btusb_data, work); -+ struct hci_dev *hdev = data->hdev; -+ int err; -+ int new_alts = 0; -+ -+ RTKBT_DBG("%s: sco num %d", __func__, data->sco_num); -+ if (data->sco_num > 0) { -+ if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { -+ err = -+ usb_autopm_get_interface(data->isoc ? data-> -+ isoc : data->intf); -+ if (err < 0) { -+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags); -+ mdelay(URB_CANCELING_DELAY_MS); -+ usb_kill_anchored_urbs(&data->isoc_anchor); -+ return; -+ } -+ -+ set_bit(BTUSB_DID_ISO_RESUME, &data->flags); -+ } -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) -+ if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_CVSD) { -+ if (hdev->voice_setting & 0x0020) { -+ static const int alts[3] = { 2, 4, 5 }; -+ new_alts = alts[data->sco_num - 1]; -+ } else { -+ new_alts = data->sco_num; -+ } -+ } else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) { -+ if (btusb_find_altsetting(data, 6)) -+ new_alts = 6; -+ else if (btusb_find_altsetting(data, 3) && -+ hdev->sco_mtu >= 72 && -+ test_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags)) -+ new_alts = 3; -+ else -+ new_alts = 1; -+ } -+ -+ if (btusb_switch_alt_setting(hdev, new_alts) < 0) -+ RTKBT_ERR("set USB alt:(%d) failed!", new_alts); -+#else -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) -+ if (hdev->voice_setting & 0x0020) { -+ static const int alts[3] = { 2, 4, 5 }; -+ new_alts = alts[data->sco_num - 1]; -+ } else { -+ new_alts = data->sco_num; -+ } -+ if (data->isoc_altsetting != new_alts) { -+#else -+ if (data->isoc_altsetting != 2) { -+ new_alts = 2; -+#endif -+ -+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags); -+ mdelay(URB_CANCELING_DELAY_MS); -+ usb_kill_anchored_urbs(&data->isoc_anchor); -+ -+ if (__set_isoc_interface(hdev, new_alts) < 0) -+ return; -+ } -+ -+ if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) { -+ RTKBT_INFO("submit SCO RX urb."); -+ if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0) -+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags); -+ else -+ btusb_submit_isoc_urb(hdev, GFP_KERNEL); -+ } -+#endif -+ } else { -+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags); -+ mdelay(URB_CANCELING_DELAY_MS); -+ usb_kill_anchored_urbs(&data->isoc_anchor); -+ -+ __set_isoc_interface(hdev, 0); -+ if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) -+ usb_autopm_put_interface(data->isoc ? data-> -+ isoc : data->intf); -+ } -+} -+ -+static void btusb_waker(struct work_struct *work) -+{ -+ struct btusb_data *data = container_of(work, struct btusb_data, waker); -+ int err; -+ -+ err = usb_autopm_get_interface(data->intf); -+ RTKBT_DBG("%s start", __FUNCTION__); -+ if (err < 0) -+ return; -+ -+ usb_autopm_put_interface(data->intf); -+ RTKBT_DBG("%s end", __FUNCTION__); -+} -+ -+#ifdef RTKBT_TV_POWERON_WHITELIST -+static int rtkbt_lookup_le_device_poweron_whitelist(struct hci_dev *hdev, -+ struct usb_device *udev) -+{ -+ struct hci_conn_params *p; -+ u8 *cmd; -+ int result = 0; -+ -+ hci_dev_lock(hdev); -+ list_for_each_entry(p, &hdev->le_conn_params, list) { -+#if 0 // for debug message -+ RTKBT_DBG("%s(): auto_connect = %d", __FUNCTION__, p->auto_connect); -+ RTKBT_DBG("%s(): addr_type = 0x%02x", __FUNCTION__, p->addr_type); -+ RTKBT_DBG("%s(): addr=%02x:%02x:%02x:%02x:%02x:%02x", __FUNCTION__, -+ p->addr.b[5], p->addr.b[4], p->addr.b[3], -+ p->addr.b[2], p->addr.b[1], p->addr.b[0]); -+#endif -+ if ( p->auto_connect == HCI_AUTO_CONN_ALWAYS && -+ p->addr_type == ADDR_LE_DEV_PUBLIC ) { -+ -+ RTKBT_DBG("%s(): Set RTKBT LE Power-on Whitelist for " -+ "%02x:%02x:%02x:%02x:%02x:%02x", __FUNCTION__, -+ p->addr.b[5], p->addr.b[4], p->addr.b[3], -+ p->addr.b[2], p->addr.b[1], p->addr.b[0]); -+ -+ cmd = kzalloc(16, GFP_ATOMIC); -+ if (!cmd) { -+ RTKBT_ERR("Can't allocate memory for cmd"); -+ return -ENOMEM; -+ } -+ cmd[0] = 0x7b; -+ cmd[1] = 0xfc; -+ cmd[2] = 0x07; -+ cmd[3] = 0x00; -+ cmd[4] = p->addr.b[0]; -+ cmd[5] = p->addr.b[1]; -+ cmd[6] = p->addr.b[2]; -+ cmd[7] = p->addr.b[3]; -+ cmd[8] = p->addr.b[4]; -+ cmd[9] = p->addr.b[5]; -+ -+ result = __rtk_send_hci_cmd(udev, cmd, 10); -+ kfree(cmd); -+ } -+ } -+ hci_dev_unlock(hdev); -+ -+ return result; -+} -+#endif -+ -+static int rtkbt_pm_notify(struct notifier_block *notifier, -+ ulong pm_event, void *unused) -+{ -+ struct btusb_data *data; -+ struct usb_device *udev; -+ struct usb_interface *intf; -+ struct hci_dev *hdev; -+ /* int err; */ -+#if defined RTKBT_SWITCH_PATCH || defined RTKBT_TV_POWERON_WHITELIST -+ int result = 0; -+#endif -+#ifdef RTKBT_SWITCH_PATCH -+ u8 *cmd; -+ static u8 hci_state = 0; -+ struct api_context ctx; -+#endif -+ -+ data = container_of(notifier, struct btusb_data, pm_notifier); -+ udev = data->udev; -+ intf = data->intf; -+ hdev = data->hdev; -+ -+ RTKBT_DBG("%s: pm_event %ld", __func__, pm_event); -+ switch (pm_event) { -+ case PM_SUSPEND_PREPARE: -+ case PM_HIBERNATION_PREPARE: -+ /* No need to load firmware because the download firmware -+ * process is deprecated in resume. -+ * We use rebind after resume instead */ -+ /* err = usb_autopm_get_interface(data->intf); -+ * if (err < 0) -+ * return err; -+ * patch_entry->fw_len = -+ * load_firmware(dev_entry, &patch_entry->fw_cache); -+ * usb_autopm_put_interface(data->intf); -+ * if (patch_entry->fw_len <= 0) { -+ * RTKBT_DBG("rtkbt_pm_notify return NOTIFY_BAD"); -+ * return NOTIFY_BAD; -+ * } */ -+ -+ RTKBT_DBG("%s: suspend prepare", __func__); -+ -+ if (!device_may_wakeup(&udev->dev)) { -+#ifdef CONFIG_NEEDS_BINDING -+ intf->needs_binding = 1; -+ RTKBT_DBG("Remote wakeup not support, set " -+ "intf->needs_binding = 1"); -+#else -+ RTKBT_DBG("Remote wakeup not support, no needs binding"); -+#endif -+ } -+ -+#ifdef RTKBT_SWITCH_PATCH -+ if (test_bit(HCI_UP, &hdev->flags)) { -+ unsigned long expire; -+ -+ init_completion(&ctx.done); -+ hci_state = 1; -+ -+ down(&switch_sem); -+ data->context = &ctx; -+ ctx.flags = RTLBT_CLOSE; -+ queue_work(hdev->req_workqueue, &hdev->power_off.work); -+ up(&switch_sem); -+ -+ expire = msecs_to_jiffies(1000); -+ if (!wait_for_completion_timeout(&ctx.done, expire)) -+ RTKBT_ERR("hdev close timeout"); -+ -+ down(&switch_sem); -+ data->context = NULL; -+ up(&switch_sem); -+ } -+ -+ cmd = kzalloc(16, GFP_ATOMIC); -+ if (!cmd) { -+ RTKBT_ERR("Can't allocate memory for cmd"); -+ return -ENOMEM; -+ } -+ -+ /* Clear patch */ -+ cmd[0] = 0x66; -+ cmd[1] = 0xfc; -+ cmd[2] = 0x00; -+ -+ result = __rtk_send_hci_cmd(udev, cmd, 3); -+ kfree(cmd); -+ msleep(100); /* From FW colleague's recommendation */ -+ result = download_special_patch(intf, "lps_"); -+#endif -+ -+#ifdef RTKBT_TV_POWERON_WHITELIST -+ result = rtkbt_lookup_le_device_poweron_whitelist(hdev, udev); -+ if (result < 0) { -+ RTKBT_ERR("rtkbt_lookup_le_device_poweron_whitelist error: %d", result); -+ } -+#endif -+ -+#if defined RTKBT_SUSPEND_WAKEUP || defined RTKBT_SWITCH_PATCH -+#ifdef RTKBT_POWERKEY_WAKEUP -+ /* Tell the controller to wake up host if received special -+ * advertising packet -+ */ -+ set_scan(intf); -+#endif -+ /* Send special vendor commands */ -+#endif -+ -+ break; -+ -+ case PM_POST_SUSPEND: -+ case PM_POST_HIBERNATION: -+ case PM_POST_RESTORE: -+ /* if (patch_entry->fw_len > 0) { -+ * kfree(patch_entry->fw_cache); -+ * patch_entry->fw_cache = NULL; -+ * patch_entry->fw_len = 0; -+ * } */ -+ -+#ifdef RTKBT_SWITCH_PATCH -+ cmd = kzalloc(16, GFP_ATOMIC); -+ if (!cmd) { -+ RTKBT_ERR("Can't allocate memory for cmd"); -+ return -ENOMEM; -+ } -+ -+ /* Clear patch */ -+ cmd[0] = 0x66; -+ cmd[1] = 0xfc; -+ cmd[2] = 0x00; -+ -+ result = __rtk_send_hci_cmd(udev, cmd, 3); -+ kfree(cmd); -+ msleep(100); /* From FW colleague's recommendation */ -+ result = download_patch(intf); -+ if (hci_state) { -+ hci_state = 0; -+ queue_work(hdev->req_workqueue, &hdev->power_on); -+ } -+#endif -+ -+#ifdef BTUSB_RPM -+ RTKBT_DBG("%s: Re-enable autosuspend", __func__); -+ /* pm_runtime_use_autosuspend(&udev->dev); -+ * pm_runtime_set_autosuspend_delay(&udev->dev, 2000); -+ * pm_runtime_set_active(&udev->dev); -+ * pm_runtime_allow(&udev->dev); -+ * pm_runtime_mark_last_busy(&udev->dev); -+ * pm_runtime_autosuspend(&udev->dev); -+ * pm_runtime_put_autosuspend(&udev->dev); -+ * usb_disable_autosuspend(udev); */ -+ /* FIXME: usb_enable_autosuspend(udev) is useless here. -+ * Because it is always enabled after enabled in btusb_probe() -+ */ -+ usb_enable_autosuspend(udev); -+ pm_runtime_mark_last_busy(&udev->dev); -+#endif -+ break; -+ -+ default: -+ break; -+ } -+ -+ return NOTIFY_DONE; -+} -+ -+static int rtkbt_shutdown_notify(struct notifier_block *notifier, -+ ulong pm_event, void *unused) -+{ -+ struct btusb_data *data; -+ struct usb_device *udev; -+ struct usb_interface *intf; -+ struct hci_dev *hdev; -+ /* int err; */ -+ -+ data = container_of(notifier, struct btusb_data, shutdown_notifier); -+ udev = data->udev; -+ intf = data->intf; -+ hdev = data->hdev; -+ -+ RTKBT_DBG("%s: pm_event %ld", __func__, pm_event); -+ switch (pm_event) { -+ case SYS_POWER_OFF: -+ case SYS_RESTART: -+#ifdef RTKBT_SHUTDOWN_WAKEUP -+ RTKBT_DBG("%s: power off", __func__); -+ set_scan(intf); -+#endif -+ break; -+ -+ default: -+ break; -+ } -+ -+ return NOTIFY_DONE; -+} -+ -+static int btusb_probe(struct usb_interface *intf, -+ const struct usb_device_id *id) -+{ -+ struct usb_endpoint_descriptor *ep_desc; -+ struct btusb_data *data; -+ struct hci_dev *hdev; -+ int i, err, flag1, flag2; -+ struct usb_device *udev; -+ udev = interface_to_usbdev(intf); -+ -+ RTKBT_DBG("btusb_probe intf->cur_altsetting->desc.bInterfaceNumber %d", -+ intf->cur_altsetting->desc.bInterfaceNumber); -+ -+ /* interface numbers are hardcoded in the spec */ -+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0) -+ return -ENODEV; -+ -+ /*******************************/ -+ flag1 = device_can_wakeup(&udev->dev); -+ flag2 = device_may_wakeup(&udev->dev); -+ RTKBT_DBG("btusb_probe can_wakeup %x, may wakeup %x", flag1, flag2); -+#ifdef BTUSB_WAKEUP_HOST -+ device_wakeup_enable(&udev->dev); -+#endif -+ //device_wakeup_enable(&udev->dev); -+ /*device_wakeup_disable(&udev->dev); -+ flag1=device_can_wakeup(&udev->dev); -+ flag2=device_may_wakeup(&udev->dev); -+ RTKBT_DBG("btusb_probe can_wakeup=%x flag2=%x",flag1,flag2); -+ */ -+ err = patch_add(intf); -+ if (err < 0) -+ return -1; -+ /*******************************/ -+ -+ data = rtk_alloc(intf); -+ if (!data) -+ return -ENOMEM; -+ -+ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { -+ ep_desc = &intf->cur_altsetting->endpoint[i].desc; -+ if (!data->intr_ep && usb_endpoint_is_bulk_in(ep_desc) && (ep_desc->bEndpointAddress == 0x81)) { -+ data->intr_ep = ep_desc; -+ continue; -+ } -+ -+ if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) { -+ data->intr_ep = ep_desc; -+ continue; -+ } -+ -+ if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) { -+ data->bulk_tx_ep = ep_desc; -+ continue; -+ } -+ -+ if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) { -+ data->bulk_rx_ep = ep_desc; -+ continue; -+ } -+ } -+ -+ if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) { -+ rtk_free(data); -+ return -ENODEV; -+ } -+ -+ data->cmdreq_type = USB_TYPE_CLASS; -+ -+ data->udev = interface_to_usbdev(intf); -+ data->intf = intf; -+ -+ spin_lock_init(&data->lock); -+ -+ INIT_WORK(&data->work, btusb_work); -+ INIT_WORK(&data->waker, btusb_waker); -+ spin_lock_init(&data->txlock); -+ -+ init_usb_anchor(&data->tx_anchor); -+ init_usb_anchor(&data->intr_anchor); -+ init_usb_anchor(&data->bulk_anchor); -+ init_usb_anchor(&data->isoc_anchor); -+ init_usb_anchor(&data->deferred); -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) -+ spin_lock_init(&data->rxlock); -+ data->recv_bulk = btusb_recv_bulk; -+#endif -+ -+ hdev = hci_alloc_dev(); -+ if (!hdev) { -+ rtk_free(data); -+ return -ENOMEM; -+ } -+ -+ HDEV_BUS = HCI_USB; -+ -+ data->hdev = hdev; -+ -+ SET_HCIDEV_DEV(hdev, &intf->dev); -+ -+ hdev->open = btusb_open; -+ hdev->close = btusb_close; -+ hdev->flush = btusb_flush; -+ hdev->send = btusb_send_frame; -+ hdev->notify = btusb_notify; -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) -+ hdev->setup = btusb_setup; -+#endif -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) -+ hci_set_drvdata(hdev, data); -+#else -+ hdev->driver_data = data; -+ hdev->destruct = btusb_destruct; -+ hdev->owner = THIS_MODULE; -+#endif -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) -+ set_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags); -+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); -+#endif -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 7, 1) -+ if (!reset) -+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); -+#endif -+ -+ /* Interface numbers are hardcoded in the specification */ -+ data->isoc = usb_ifnum_to_if(data->udev, 1); -+ -+ if (data->isoc) { -+ err = usb_driver_claim_interface(&btusb_driver, -+ data->isoc, data); -+ if (err < 0) { -+ hci_free_dev(hdev); -+ rtk_free(data); -+ return err; -+ } -+ } -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) -+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); -+#endif -+ -+ err = hci_register_dev(hdev); -+ if (err < 0) { -+ hci_free_dev(hdev); -+ rtk_free(data); -+ return err; -+ } -+ -+ usb_set_intfdata(intf, data); -+ -+ /* Register PM notifier */ -+ data->pm_notifier.notifier_call = rtkbt_pm_notify; -+ register_pm_notifier(&data->pm_notifier); -+ -+ /* Register POWER-OFF notifier */ -+ data->shutdown_notifier.notifier_call = rtkbt_shutdown_notify; -+ register_reboot_notifier(&data->shutdown_notifier); -+#ifdef BTCOEX -+ rtk_btcoex_probe(hdev); -+#endif -+ -+ RTKBT_DBG("%s: done", __func__); -+ -+ return 0; -+} -+ -+static void btusb_disconnect(struct usb_interface *intf) -+{ -+ struct btusb_data *data = usb_get_intfdata(intf); -+ struct hci_dev *hdev; -+ struct usb_device *udev; -+ udev = interface_to_usbdev(intf); -+ -+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0) -+ return; -+ -+ if (!data) -+ return; -+ -+ RTKBT_DBG("btusb_disconnect"); -+ -+ /* Un-register PM notifier */ -+ unregister_pm_notifier(&data->pm_notifier); -+ unregister_reboot_notifier(&data->shutdown_notifier); -+ -+ /*******************************/ -+ patch_remove(intf); -+ /*******************************/ -+ -+ hdev = data->hdev; -+ -+#if HCI_VERSION_CODE < KERNEL_VERSION(3, 4, 0) -+ __hci_dev_hold(hdev); -+#endif -+ -+ usb_set_intfdata(data->intf, NULL); -+ -+ if (data->isoc) -+ usb_set_intfdata(data->isoc, NULL); -+ -+ hci_unregister_dev(hdev); -+ -+ if (intf == data->isoc) -+ usb_driver_release_interface(&btusb_driver, data->intf); -+ else if (data->isoc) -+ usb_driver_release_interface(&btusb_driver, data->isoc); -+ -+#if HCI_VERSION_CODE < KERNEL_VERSION(3, 4, 0) -+ __hci_dev_put(hdev); -+#endif -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) -+ btusb_free_frags(data); -+#endif -+ -+ hci_free_dev(hdev); -+ rtk_free(data); -+} -+ -+#ifdef CONFIG_PM -+static int btusb_suspend(struct usb_interface *intf, pm_message_t message) -+{ -+ struct btusb_data *data = usb_get_intfdata(intf); -+ -+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0) -+ return 0; -+ -+ /*******************************/ -+ RTKBT_DBG("btusb_suspend message.event 0x%x, data->suspend_count %d", -+ message.event, data->suspend_count); -+ if (!test_bit(HCI_RUNNING, &data->hdev->flags)) { -+ RTKBT_INFO("%s: hdev is not HCI_RUNNING", __func__); -+ /* set_scan(data->intf); */ -+ } -+ /*******************************/ -+ -+ if (data->suspend_count++) -+ return 0; -+ -+ spin_lock_irq(&data->txlock); -+ if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) { -+ set_bit(BTUSB_SUSPENDING, &data->flags); -+ spin_unlock_irq(&data->txlock); -+ RTKBT_INFO("%s: suspending...", __func__); -+ } else { -+ spin_unlock_irq(&data->txlock); -+ data->suspend_count--; -+ return -EBUSY; -+ } -+ -+ cancel_work_sync(&data->work); -+ -+ btusb_stop_traffic(data); -+ mdelay(URB_CANCELING_DELAY_MS); // Added by Realtek -+ usb_kill_anchored_urbs(&data->tx_anchor); -+ -+ return 0; -+} -+ -+static void play_deferred(struct btusb_data *data) -+{ -+ struct urb *urb; -+ int err; -+ -+ while ((urb = usb_get_from_anchor(&data->deferred))) { -+ /************************************/ -+ usb_anchor_urb(urb, &data->tx_anchor); -+ err = usb_submit_urb(urb, GFP_ATOMIC); -+ if (err < 0) { -+ RTKBT_ERR("play_deferred urb %p submission failed", -+ urb); -+ kfree(urb->setup_packet); -+ usb_unanchor_urb(urb); -+ } else { -+ usb_mark_last_busy(data->udev); -+ } -+ usb_free_urb(urb); -+ /************************************/ -+ data->tx_in_flight++; -+ } -+ mdelay(URB_CANCELING_DELAY_MS); // Added by Realtek -+ usb_scuttle_anchored_urbs(&data->deferred); -+} -+ -+static int btusb_resume(struct usb_interface *intf) -+{ -+ struct btusb_data *data = usb_get_intfdata(intf); -+ struct hci_dev *hdev = data->hdev; -+ int err = 0; -+ -+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0) -+ return 0; -+ -+ /*******************************/ -+ RTKBT_DBG("%s: data->suspend_count %d", __func__, data->suspend_count); -+ -+ /* if intf->needs_binding is set, driver will be rebind. -+ * The probe will be called instead of resume */ -+ /* if (!test_bit(HCI_RUNNING, &hdev->flags)) { -+ * RTKBT_DBG("btusb_resume-----bt is off,download patch"); -+ * download_patch(intf); -+ * } else -+ * RTKBT_DBG("btusb_resume,----bt is on"); -+ */ -+ /*******************************/ -+ if (--data->suspend_count) -+ return 0; -+ -+ if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) { -+ err = btusb_submit_intr_urb(hdev, GFP_NOIO); -+ if (err < 0) { -+ clear_bit(BTUSB_INTR_RUNNING, &data->flags); -+ goto failed; -+ } -+ } -+ -+ if (test_bit(BTUSB_BULK_RUNNING, &data->flags)) { -+ err = btusb_submit_bulk_urb(hdev, GFP_NOIO); -+ if (err < 0) { -+ clear_bit(BTUSB_BULK_RUNNING, &data->flags); -+ goto failed; -+ } -+ -+ btusb_submit_bulk_urb(hdev, GFP_NOIO); -+ } -+ -+ if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) { -+ if (btusb_submit_isoc_urb(hdev, GFP_NOIO) < 0) -+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags); -+ else -+ btusb_submit_isoc_urb(hdev, GFP_NOIO); -+ } -+ -+ spin_lock_irq(&data->txlock); -+ play_deferred(data); -+ clear_bit(BTUSB_SUSPENDING, &data->flags); -+ spin_unlock_irq(&data->txlock); -+ schedule_work(&data->work); -+ -+ RTKBT_DBG("%s: data->suspend_count %d, done", __func__, -+ data->suspend_count); -+ -+ return 0; -+ -+failed: -+ mdelay(URB_CANCELING_DELAY_MS); // Added by Realtek -+ usb_scuttle_anchored_urbs(&data->deferred); -+//done: -+ spin_lock_irq(&data->txlock); -+ clear_bit(BTUSB_SUSPENDING, &data->flags); -+ spin_unlock_irq(&data->txlock); -+ RTKBT_DBG("%s: data->suspend_count %d, fail", __func__, -+ data->suspend_count); -+ -+ return err; -+} -+#endif -+ -+static struct usb_driver btusb_driver = { -+ .name = "rtk_btusb", -+ .probe = btusb_probe, -+ .disconnect = btusb_disconnect, -+#ifdef CONFIG_PM -+ .suspend = btusb_suspend, -+ .resume = btusb_resume, -+#if defined RTKBT_SWITCH_PATCH || defined RTKBT_SUSPEND_WAKEUP || defined \ -+ RTKBT_SHUTDOWN_WAKEUP -+ .reset_resume = btusb_resume, -+#endif -+#endif -+ .id_table = btusb_table, -+ .supports_autosuspend = 1, -+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 7, 1) -+ .disable_hub_initiated_lpm = 1, -+#endif -+}; -+ -+static int __init btusb_init(void) -+{ -+ RTKBT_DBG("Realtek Bluetooth USB driver ver %s", VERSION); -+#ifdef BTCOEX -+ rtk_btcoex_init(); -+#endif -+ return usb_register(&btusb_driver); -+} -+ -+static void __exit btusb_exit(void) -+{ -+ RTKBT_DBG("rtk_btusb: btusb_exit"); -+ usb_deregister(&btusb_driver); -+ -+#ifdef BTCOEX -+ rtk_btcoex_exit(); -+#endif -+} -+ -+module_init(btusb_init); -+module_exit(btusb_exit); -+ -+MODULE_AUTHOR(""); -+MODULE_DESCRIPTION("Realtek Bluetooth USB driver ver " VERSION); -+MODULE_VERSION(VERSION); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/bluetooth/rtk_bt.h b/drivers/bluetooth/rtk_bt.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/bluetooth/rtk_bt.h -@@ -0,0 +1,151 @@ -+/* -+ * -+ * Realtek Bluetooth USB driver -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* #define HCI_VERSION_CODE KERNEL_VERSION(3, 14, 41) */ -+#define HCI_VERSION_CODE LINUX_VERSION_CODE -+ -+#ifdef CONFIG_BTCOEX -+#define BTCOEX -+#endif -+ -+/*********************************** -+** Realtek - For rtk_btusb driver ** -+***********************************/ -+#ifdef CONFIG_BTUSB_WAKEUP_HOST -+#define BTUSB_WAKEUP_HOST -+#endif -+ -+#define URB_CANCELING_DELAY_MS 10 // Added by Realtek -+#if HCI_VERSION_CODE > KERNEL_VERSION(2, 6, 33) -+#define HDEV_BUS hdev->bus -+#else -+#define HDEV_BUS hdev->type -+#endif -+ -+#if HCI_VERSION_CODE < KERNEL_VERSION(2, 6, 36) -+#define NUM_REASSEMBLY 3 -+#endif -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) -+#define GET_DRV_DATA(x) hci_get_drvdata(x) -+#else -+#define GET_DRV_DATA(x) x->driver_data -+#endif -+ -+#if HCI_VERSION_CODE < KERNEL_VERSION(3, 13, 0) -+#define SCO_NUM hdev->conn_hash.sco_num -+#else -+#define SCO_NUM hci_conn_num(hdev, SCO_LINK) -+#endif -+ -+int patch_add(struct usb_interface *intf); -+void patch_remove(struct usb_interface *intf); -+int download_patch(struct usb_interface *intf); -+int set_btoff(struct usb_interface *intf); -+void print_event(struct sk_buff *skb); -+void print_command(struct sk_buff *skb); -+void print_acl(struct sk_buff *skb, int dataOut); -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) -+int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb); -+#else -+int btusb_send_frame(struct sk_buff *skb); -+#endif -+ -+#define BTUSB_MAX_ISOC_FRAMES 10 -+#define BTUSB_INTR_RUNNING 0 -+#define BTUSB_BULK_RUNNING 1 -+#define BTUSB_ISOC_RUNNING 2 -+#define BTUSB_SUSPENDING 3 -+#define BTUSB_DID_ISO_RESUME 4 -+#define BTUSB_USE_ALT3_FOR_WBS 15 -+ -+struct btusb_data { -+ struct hci_dev *hdev; -+ struct usb_device *udev; -+ struct usb_interface *intf; -+ struct usb_interface *isoc; -+ -+ spinlock_t lock; -+ -+ unsigned long flags; -+ -+ struct work_struct work; -+ struct work_struct waker; -+ -+ struct usb_anchor tx_anchor; -+ struct usb_anchor intr_anchor; -+ struct usb_anchor bulk_anchor; -+ struct usb_anchor isoc_anchor; -+ struct usb_anchor deferred; -+ int tx_in_flight; -+ spinlock_t txlock; -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) -+ spinlock_t rxlock; -+ struct sk_buff *evt_skb; -+ struct sk_buff *acl_skb; -+ struct sk_buff *sco_skb; -+#endif -+ -+ struct usb_endpoint_descriptor *intr_ep; -+ struct usb_endpoint_descriptor *bulk_tx_ep; -+ struct usb_endpoint_descriptor *bulk_rx_ep; -+ struct usb_endpoint_descriptor *isoc_tx_ep; -+ struct usb_endpoint_descriptor *isoc_rx_ep; -+ -+ __u8 cmdreq_type; -+ -+ unsigned int sco_num; -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) -+ unsigned int air_mode; -+ bool usb_alt6_packet_flow; -+#endif -+ int isoc_altsetting; -+ int suspend_count; -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) -+ int (*recv_bulk) (struct btusb_data * data, void *buffer, int count); -+#endif -+ struct notifier_block pm_notifier; -+ struct notifier_block shutdown_notifier; -+ void *context; -+}; -diff --git a/drivers/bluetooth/rtk_coex.c b/drivers/bluetooth/rtk_coex.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/bluetooth/rtk_coex.c -@@ -0,0 +1,3065 @@ -+/* -+* -+* Realtek Bluetooth USB driver -+* -+* -+* This program is free software; you can redistribute it and/or modify -+* it under the terms of the GNU General Public License as published by -+* the Free Software Foundation; either version 2 of the License, or -+* (at your option) any later version. -+* -+* This program is distributed in the hope that it will be useful, -+* but WITHOUT ANY WARRANTY; without even the implied warranty of -+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+* GNU General Public License for more details. -+* -+* You should have received a copy of the GNU General Public License -+* along with this program; if not, write to the Free Software -+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+* -+*/ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "rtk_coex.h" -+ -+/* Software coex message can be sent to and receive from WiFi driver by -+ * UDP socket or exported symbol */ -+/* #define RTK_COEX_OVER_SYMBOL */ -+ -+#if BTRTL_HCI_IF == BTRTL_HCIUSB -+#include -+#include "rtk_bt.h" -+#undef RTKBT_DBG -+#undef RTKBT_INFO -+#undef RTKBT_WARN -+#undef RTKBT_ERR -+ -+#elif BTRTL_HCI_IF == BTRTL_HCIUART -+/* #define HCI_VERSION_CODE KERNEL_VERSION(3, 14, 41) */ -+#define HCI_VERSION_CODE LINUX_VERSION_CODE -+ -+#else -+#error "Please set type of HCI interface" -+#endif -+ -+#define RTK_VERSION "1.2" -+ -+#define RTKBT_DBG(fmt, arg...) printk(KERN_DEBUG "rtk_btcoex: " fmt "\n" , ## arg) -+#define RTKBT_INFO(fmt, arg...) printk(KERN_INFO "rtk_btcoex: " fmt "\n" , ## arg) -+#define RTKBT_WARN(fmt, arg...) printk(KERN_WARNING "rtk_btcoex: " fmt "\n", ## arg) -+#define RTKBT_ERR(fmt, arg...) printk(KERN_ERR "rtk_btcoex: " fmt "\n", ## arg) -+ -+static struct rtl_coex_struct btrtl_coex; -+ -+#ifdef RTB_SOFTWARE_MAILBOX -+#ifdef RTK_COEX_OVER_SYMBOL -+static struct sk_buff_head rtw_q; -+static struct workqueue_struct *rtw_wq; -+static struct work_struct rtw_work; -+static u8 rtw_coex_on; -+#endif -+#endif -+ -+#define is_profile_connected(conn, profile) ((conn->profile_bitmap & BIT(profile)) > 0) -+#define is_profile_busy(conn, profile) ((conn->profile_status & BIT(profile)) > 0) -+ -+#ifdef RTB_SOFTWARE_MAILBOX -+static void rtk_handle_event_from_wifi(uint8_t * msg); -+#endif -+ -+static void count_a2dp_packet_timeout(struct work_struct *work); -+static void count_pan_packet_timeout(struct work_struct *work); -+static void count_hogp_packet_timeout(struct work_struct *work); -+ -+static int rtl_alloc_buff(struct rtl_coex_struct *coex) -+{ -+ struct rtl_hci_ev *ev; -+ struct rtl_l2_buff *l2; -+ int i; -+ int order; -+ unsigned long addr; -+ unsigned long addr2; -+ int ev_size; -+ int l2_size; -+ int n; -+ -+ spin_lock_init(&coex->buff_lock); -+ -+ INIT_LIST_HEAD(&coex->ev_used_list); -+ INIT_LIST_HEAD(&coex->ev_free_list); -+ -+ INIT_LIST_HEAD(&coex->l2_used_list); -+ INIT_LIST_HEAD(&coex->l2_free_list); -+ -+ n = NUM_RTL_HCI_EV * sizeof(struct rtl_hci_ev); -+ ev_size = ALIGN(n, sizeof(unsigned long)); -+ -+ n = L2_MAX_PKTS * sizeof(struct rtl_l2_buff); -+ l2_size = ALIGN(n, sizeof(unsigned long)); -+ -+ RTKBT_DBG("alloc buffers %d, %d for ev and l2", ev_size, l2_size); -+ -+ order = get_order(ev_size + l2_size); -+ addr = __get_free_pages(GFP_KERNEL, order); -+ if (!addr) { -+ RTKBT_ERR("failed to alloc buffers for ev and l2."); -+ return -ENOMEM; -+ } -+ memset((void *)addr, 0, ev_size + l2_size); -+ -+ coex->pages_addr = addr; -+ coex->buff_size = ev_size + l2_size; -+ -+ ev = (struct rtl_hci_ev *)addr; -+ for (i = 0; i < NUM_RTL_HCI_EV; i++) { -+ list_add_tail(&ev->list, &coex->ev_free_list); -+ ev++; -+ } -+ -+ addr2 = addr + ev_size; -+ l2 = (struct rtl_l2_buff *)addr2; -+ for (i = 0; i < L2_MAX_PKTS; i++) { -+ list_add_tail(&l2->list, &coex->l2_free_list); -+ l2++; -+ } -+ -+ return 0; -+} -+ -+static void rtl_free_buff(struct rtl_coex_struct *coex) -+{ -+ struct rtl_hci_ev *ev; -+ struct rtl_l2_buff *l2; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&coex->buff_lock, flags); -+ -+ while (!list_empty(&coex->ev_used_list)) { -+ ev = list_entry(coex->ev_used_list.next, struct rtl_hci_ev, -+ list); -+ list_del(&ev->list); -+ } -+ -+ while (!list_empty(&coex->ev_free_list)) { -+ ev = list_entry(coex->ev_free_list.next, struct rtl_hci_ev, -+ list); -+ list_del(&ev->list); -+ } -+ -+ while (!list_empty(&coex->l2_used_list)) { -+ l2 = list_entry(coex->l2_used_list.next, struct rtl_l2_buff, -+ list); -+ list_del(&l2->list); -+ } -+ -+ while (!list_empty(&coex->l2_free_list)) { -+ l2 = list_entry(coex->l2_free_list.next, struct rtl_l2_buff, -+ list); -+ list_del(&l2->list); -+ } -+ -+ spin_unlock_irqrestore(&coex->buff_lock, flags); -+ -+ if (coex->buff_size > 0) { -+ free_pages(coex->pages_addr, get_order(coex->buff_size)); -+ coex->pages_addr = 0; -+ coex->buff_size = 0; -+ } -+} -+ -+static struct rtl_hci_ev *rtl_ev_node_get(struct rtl_coex_struct *coex) -+{ -+ struct rtl_hci_ev *ev; -+ unsigned long flags; -+ -+ if (!coex->buff_size) -+ return NULL; -+ -+ spin_lock_irqsave(&coex->buff_lock, flags); -+ if (!list_empty(&coex->ev_free_list)) { -+ ev = list_entry(coex->ev_free_list.next, struct rtl_hci_ev, -+ list); -+ list_del(&ev->list); -+ } else -+ ev = NULL; -+ spin_unlock_irqrestore(&coex->buff_lock, flags); -+ return ev; -+} -+ -+static int rtl_ev_node_to_used(struct rtl_coex_struct *coex, -+ struct rtl_hci_ev *ev) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&coex->buff_lock, flags); -+ list_add_tail(&ev->list, &coex->ev_used_list); -+ spin_unlock_irqrestore(&coex->buff_lock, flags); -+ -+ return 0; -+} -+ -+static struct rtl_l2_buff *rtl_l2_node_get(struct rtl_coex_struct *coex) -+{ -+ struct rtl_l2_buff *l2; -+ unsigned long flags; -+ -+ if (!coex->buff_size) -+ return NULL; -+ -+ spin_lock_irqsave(&coex->buff_lock, flags); -+ -+ if(!list_empty(&coex->l2_free_list)) { -+ l2 = list_entry(coex->l2_free_list.next, struct rtl_l2_buff, -+ list); -+ list_del(&l2->list); -+ } else -+ l2 = NULL; -+ -+ spin_unlock_irqrestore(&coex->buff_lock, flags); -+ return l2; -+} -+ -+static int rtl_l2_node_to_used(struct rtl_coex_struct *coex, -+ struct rtl_l2_buff *l2) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&coex->buff_lock, flags); -+ list_add_tail(&l2->list, &coex->l2_used_list); -+ spin_unlock_irqrestore(&coex->buff_lock, flags); -+ -+ return 0; -+} -+ -+static uint8_t psm_to_profile_index(uint16_t psm) -+{ -+ switch (psm) { -+ case PSM_AVCTP: -+ case PSM_SDP: -+ return 0xFF; //ignore -+ -+ case PSM_HID: -+ case PSM_HID_INT: -+ return profile_hid; -+ -+ case PSM_AVDTP: -+ return profile_a2dp; -+ -+ case PSM_PAN: -+ case PSM_OPP: -+ case PSM_FTP: -+ case PSM_BIP: -+ case PSM_RFCOMM: -+ return profile_pan; -+ -+ default: -+ return profile_pan; -+ } -+} -+ -+static rtk_prof_info *find_by_psm(u16 handle, u16 psm) -+{ -+ struct list_head *head = &btrtl_coex.profile_list; -+ struct list_head *iter = NULL; -+ struct list_head *temp = NULL; -+ rtk_prof_info *desc = NULL; -+ -+ list_for_each_safe(iter, temp, head) { -+ desc = list_entry(iter, rtk_prof_info, list); -+ if ((handle & 0xfff) == (desc->handle & 0xfff) && -+ desc->psm == psm) -+ return desc; -+ } -+ -+ return NULL; -+} -+ -+static void rtk_check_setup_timer(rtk_conn_prof * phci_conn, uint8_t profile_index) -+{ -+ int delay = msecs_to_jiffies(1000); -+ if (profile_index == profile_a2dp) { -+ phci_conn->a2dp_packet_count = 0; -+ queue_delayed_work(btrtl_coex.timer_wq, &phci_conn->a2dp_count_work, delay); -+ } -+ -+ if (profile_index == profile_pan) { -+ phci_conn->pan_packet_count = 0; -+ queue_delayed_work(btrtl_coex.timer_wq, &phci_conn->pan_count_work, delay); -+ } -+ -+ /* hogp & voice share one timer now */ -+ if ((profile_index == profile_hogp) || (profile_index == profile_voice)) { -+ if ((0 == phci_conn->profile_refcount[profile_hogp]) -+ && (0 == phci_conn->profile_refcount[profile_voice])) { -+ phci_conn->hogp_packet_count = 0; -+ phci_conn->voice_packet_count = 0; -+ queue_delayed_work(btrtl_coex.timer_wq, &phci_conn->hogp_count_work, delay); -+ } -+ } -+} -+ -+static void rtk_check_del_timer(uint8_t profile_index, rtk_conn_prof * phci_conn) -+{ -+ RTKBT_DBG("%s: handle 0x%4x", __func__, phci_conn->handle); -+ if (profile_a2dp == profile_index) { -+ phci_conn->a2dp_packet_count = 0; -+ cancel_delayed_work_sync(&phci_conn->a2dp_count_work); -+ } -+ if (profile_pan == profile_index) { -+ phci_conn->pan_packet_count = 0; -+ cancel_delayed_work_sync(&phci_conn->pan_count_work); -+ } -+ if (profile_hogp == profile_index) { -+ phci_conn->hogp_packet_count = 0; -+ if (phci_conn->profile_refcount[profile_voice] == 0) { -+ cancel_delayed_work_sync(&phci_conn->hogp_count_work); -+ } -+ } -+ if (profile_voice == profile_index) { -+ phci_conn->voice_packet_count = 0; -+ if (phci_conn->profile_refcount[profile_hogp] == 0) { -+ cancel_delayed_work_sync(&phci_conn->hogp_count_work); -+ } -+ } -+} -+ -+ -+ -+static rtk_conn_prof *find_connection_by_handle(struct rtl_coex_struct * coex, -+ uint16_t handle) -+{ -+ struct list_head *head = &coex->conn_hash; -+ struct list_head *iter = NULL, *temp = NULL; -+ rtk_conn_prof *desc = NULL; -+ -+ list_for_each_safe(iter, temp, head) { -+ desc = list_entry(iter, rtk_conn_prof, list); -+ if ((handle & 0xFFF) == desc->handle) { -+ return desc; -+ } -+ } -+ return NULL; -+} -+ -+static rtk_conn_prof *allocate_connection_by_handle(uint16_t handle) -+{ -+ rtk_conn_prof *phci_conn = NULL; -+ phci_conn = kmalloc(sizeof(rtk_conn_prof), GFP_ATOMIC); -+ if (phci_conn) -+ phci_conn->handle = handle; -+ -+ return phci_conn; -+} -+ -+static void init_connection_hash(struct rtl_coex_struct * coex) -+{ -+ struct list_head *head = &coex->conn_hash; -+ INIT_LIST_HEAD(head); -+} -+ -+static void add_connection_to_hash(struct rtl_coex_struct * coex, -+ rtk_conn_prof * desc) -+{ -+ struct list_head *head = &coex->conn_hash; -+ list_add_tail(&desc->list, head); -+ INIT_DELAYED_WORK(&desc->a2dp_count_work, (void *)count_a2dp_packet_timeout); -+ INIT_DELAYED_WORK(&desc->pan_count_work, (void *)count_pan_packet_timeout); -+ INIT_DELAYED_WORK(&desc->hogp_count_work, (void *)count_hogp_packet_timeout); -+} -+ -+static void delete_connection_from_hash(rtk_conn_prof * desc) -+{ -+ if (desc) { -+ cancel_delayed_work_sync(&desc->a2dp_count_work); -+ cancel_delayed_work_sync(&desc->pan_count_work); -+ cancel_delayed_work_sync(&desc->hogp_count_work); -+ list_del(&desc->list); -+ kfree(desc); -+ } -+} -+ -+static void flush_connection_hash(struct rtl_coex_struct * coex) -+{ -+ struct list_head *head = &coex->conn_hash; -+ struct list_head *iter = NULL, *temp = NULL; -+ rtk_conn_prof *desc = NULL; -+ -+ list_for_each_safe(iter, temp, head) { -+ desc = list_entry(iter, rtk_conn_prof, list); -+ if (desc) { -+ cancel_delayed_work_sync(&desc->a2dp_count_work); -+ cancel_delayed_work_sync(&desc->pan_count_work); -+ cancel_delayed_work_sync(&desc->hogp_count_work); -+ list_del(&desc->list); -+ kfree(desc); -+ } -+ } -+ //INIT_LIST_HEAD(head); -+} -+ -+static void init_profile_hash(struct rtl_coex_struct * coex) -+{ -+ struct list_head *head = &coex->profile_list; -+ INIT_LIST_HEAD(head); -+} -+ -+static uint8_t list_allocate_add(uint16_t handle, uint16_t psm, -+ uint8_t profile_index, uint16_t dcid, -+ uint16_t scid) -+{ -+ rtk_prof_info *pprof_info = NULL; -+ -+ pprof_info = kmalloc(sizeof(rtk_prof_info), GFP_ATOMIC); -+ if (NULL == pprof_info) { -+ RTKBT_ERR("list_allocate_add: allocate error"); -+ return FALSE; -+ } -+ -+ /* Check if it is the second l2cap connection for a2dp -+ * a2dp signal channel will be created first than media channel. -+ */ -+ if (psm == PSM_AVDTP) { -+ rtk_prof_info *pinfo = find_by_psm(handle, psm); -+ if (!pinfo) { -+ pprof_info->flags = A2DP_SIGNAL; -+ RTKBT_INFO("%s: Add a2dp signal channel", __func__); -+ } else { -+ pprof_info->flags = A2DP_MEDIA; -+ RTKBT_INFO("%s: Add a2dp media channel", __func__); -+ } -+ } -+ -+ pprof_info->handle = handle; -+ pprof_info->psm = psm; -+ pprof_info->scid = scid; -+ pprof_info->dcid = dcid; -+ pprof_info->profile_index = profile_index; -+ list_add_tail(&(pprof_info->list), &(btrtl_coex.profile_list)); -+ -+ return TRUE; -+} -+ -+static void delete_profile_from_hash(rtk_prof_info * desc) -+{ -+ if (desc) { -+ RTKBT_DBG("Delete profile: hndl 0x%04x, psm 0x%04x, dcid 0x%04x, " -+ "scid 0x%04x", desc->handle, desc->psm, desc->dcid, -+ desc->scid); -+ -+ list_del(&desc->list); -+ kfree(desc); -+ desc = NULL; -+ } -+} -+ -+static void flush_profile_hash(struct rtl_coex_struct * coex) -+{ -+ struct list_head *head = &coex->profile_list; -+ struct list_head *iter = NULL, *temp = NULL; -+ rtk_prof_info *desc = NULL; -+ -+ spin_lock(&btrtl_coex.spin_lock_profile); -+ list_for_each_safe(iter, temp, head) { -+ desc = list_entry(iter, rtk_prof_info, list); -+ if (desc) { -+ RTKBT_DBG("Delete profile: hndl 0x%04x, psm 0x%04x, " -+ "dcid 0x%04x, scid 0x%04x", desc->handle, -+ desc->psm, desc->dcid, desc->scid); -+ -+ list_del(&desc->list); -+ kfree(desc); -+ desc = NULL; -+ } -+ } -+ //INIT_LIST_HEAD(head); -+ spin_unlock(&btrtl_coex.spin_lock_profile); -+} -+ -+static rtk_prof_info *find_profile_by_handle_scid(struct rtl_coex_struct * -+ coex, uint16_t handle, -+ uint16_t scid) -+{ -+ struct list_head *head = &coex->profile_list; -+ struct list_head *iter = NULL, *temp = NULL; -+ rtk_prof_info *desc = NULL; -+ -+ list_for_each_safe(iter, temp, head) { -+ desc = list_entry(iter, rtk_prof_info, list); -+ if (((handle & 0xFFF) == desc->handle) && (scid == desc->scid)) { -+ return desc; -+ } -+ } -+ return NULL; -+} -+ -+static rtk_prof_info *find_profile_by_handle_dcid(struct rtl_coex_struct * -+ coex, uint16_t handle, -+ uint16_t dcid) -+{ -+ struct list_head *head = &coex->profile_list; -+ struct list_head *iter = NULL, *temp = NULL; -+ rtk_prof_info *desc = NULL; -+ -+ list_for_each_safe(iter, temp, head) { -+ desc = list_entry(iter, rtk_prof_info, list); -+ if (((handle & 0xFFF) == desc->handle) && (dcid == desc->dcid)) { -+ return desc; -+ } -+ } -+ return NULL; -+} -+ -+static rtk_prof_info *find_profile_by_handle_dcid_scid(struct rtl_coex_struct -+ * coex, uint16_t handle, -+ uint16_t dcid, -+ uint16_t scid) -+{ -+ struct list_head *head = &coex->profile_list; -+ struct list_head *iter = NULL, *temp = NULL; -+ rtk_prof_info *desc = NULL; -+ -+ list_for_each_safe(iter, temp, head) { -+ desc = list_entry(iter, rtk_prof_info, list); -+ if (((handle & 0xFFF) == desc->handle) && (dcid == desc->dcid) -+ && (scid == desc->scid)) { -+ return desc; -+ } -+ } -+ return NULL; -+} -+ -+static void rtk_vendor_cmd_to_fw(uint16_t opcode, uint8_t parameter_len, -+ uint8_t * parameter) -+{ -+ int len = HCI_CMD_PREAMBLE_SIZE + parameter_len; -+ uint8_t *p; -+ struct sk_buff *skb; -+ struct hci_dev *hdev = btrtl_coex.hdev; -+ -+ if (!hdev) { -+ RTKBT_ERR("No HCI device"); -+ return; -+ } else if (!test_bit(HCI_UP, &hdev->flags)) { -+ RTKBT_WARN("HCI device is down"); -+ return; -+ } -+ -+ skb = bt_skb_alloc(len, GFP_ATOMIC); -+ if (!skb) { -+ RTKBT_DBG("there is no room for cmd 0x%x", opcode); -+ return; -+ } -+ -+ p = (uint8_t *) skb_put(skb, HCI_CMD_PREAMBLE_SIZE); -+ UINT16_TO_STREAM(p, opcode); -+ *p++ = parameter_len; -+ -+ if (parameter_len) -+ memcpy(skb_put(skb, parameter_len), parameter, parameter_len); -+ -+ bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; -+ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) -+#if HCI_VERSION_CODE < KERNEL_VERSION(4, 4, 0) -+ bt_cb(skb)->opcode = opcode; -+#else -+ bt_cb(skb)->hci.opcode = opcode; -+#endif -+#endif -+ -+ /* Stand-alone HCI commands must be flagged as -+ * single-command requests. -+ */ -+#if HCI_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) -+#if HCI_VERSION_CODE < KERNEL_VERSION(4, 4, 0) -+ bt_cb(skb)->req.start = true; -+#else -+ -+#if HCI_VERSION_CODE < KERNEL_VERSION(4, 5, 0) -+ bt_cb(skb)->hci.req_start = true; -+#else -+ -+ bt_cb(skb)->hci.req_flags |= HCI_REQ_START; -+#endif -+ -+#endif /* 4.4.0 */ -+#endif /* 3.10.0 */ -+ RTKBT_DBG("%s: opcode 0x%x", __func__, opcode); -+ -+ /* It is harmless if set skb->dev twice. The dev will be used in -+ * btusb_send_frame() after or equal to kernel/hci 3.13.0, -+ * the hdev will not come from skb->dev. */ -+#if HCI_VERSION_CODE < KERNEL_VERSION(3, 13, 0) -+ skb->dev = (void *)btrtl_coex.hdev; -+#endif -+ /* Put the skb to the global hdev->cmd_q */ -+ skb_queue_tail(&hdev->cmd_q, skb); -+ -+#if HCI_VERSION_CODE < KERNEL_VERSION(3, 3, 0) -+ tasklet_schedule(&hdev->cmd_task); -+#else -+ queue_work(hdev->workqueue, &hdev->cmd_work); -+#endif -+ -+ return; -+} -+ -+static uint8_t profileinfo_cmd = 0; -+static void rtk_notify_profileinfo_to_fw(void) -+{ -+ struct list_head *head = NULL; -+ struct list_head *iter = NULL; -+ struct list_head *temp = NULL; -+ rtk_conn_prof *hci_conn = NULL; -+ uint8_t handle_number = 0; -+ uint32_t buffer_size = 0; -+ uint8_t *p_buf = NULL; -+ uint8_t *p = NULL; -+ -+ head = &btrtl_coex.conn_hash; -+ list_for_each_safe(iter, temp, head) { -+ hci_conn = list_entry(iter, rtk_conn_prof, list); -+ if (hci_conn && hci_conn->profile_bitmap) -+ handle_number++; -+ } -+ -+ if(!profileinfo_cmd) { -+ buffer_size = 1 + handle_number * 3 + 1; -+ } else { -+ buffer_size = 1 + handle_number * 6; -+ } -+ -+ p_buf = kmalloc(buffer_size, GFP_ATOMIC); -+ -+ if (NULL == p_buf) { -+ RTKBT_ERR("%s: alloc error", __func__); -+ return; -+ } -+ p = p_buf; -+ *p++ = handle_number; -+ -+ RTKBT_DBG("%s: BufferSize %u", __func__, buffer_size); -+ RTKBT_DBG("%s: NumberOfHandles %u", __func__, handle_number); -+ head = &btrtl_coex.conn_hash; -+ list_for_each(iter, head) { -+ hci_conn = list_entry(iter, rtk_conn_prof, list); -+ if (hci_conn && hci_conn->profile_bitmap) { -+ if(!profileinfo_cmd) { -+ UINT16_TO_STREAM(p, hci_conn->handle); -+ RTKBT_DBG("%s: handle 0x%04x", __func__, -+ hci_conn->handle); -+ *p++ = hci_conn->profile_bitmap; -+ btrtl_coex.profile_status |= hci_conn->profile_status; -+ } else { -+ UINT16_TO_STREAM(p, hci_conn->handle); -+ UINT16_TO_STREAM(p, hci_conn->profile_bitmap); -+ UINT16_TO_STREAM(p, hci_conn->profile_status); -+ RTKBT_DBG("%s: profile_status 0x%02x", __func__, -+ hci_conn->profile_status); -+ } -+ RTKBT_DBG("%s: profile_bitmap 0x%02x", __func__, -+ hci_conn->profile_bitmap); -+ handle_number--; -+ } -+ if (0 == handle_number) -+ break; -+ } -+ -+ if(!profileinfo_cmd) { -+ *p++ = btrtl_coex.profile_status; -+ btrtl_coex.profile_status = 0; -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_SET_PROFILE_REPORT_LEGACY_COMMAND, buffer_size, -+ p_buf); -+ } else { -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_SET_PROFILE_REPORT_COMMAND, buffer_size, -+ p_buf); -+ } -+ -+ kfree(p_buf); -+ return; -+} -+ -+static void update_profile_state(rtk_conn_prof * phci_conn, -+ uint8_t profile_index, uint8_t is_busy) -+{ -+ uint8_t need_update = FALSE; -+ -+ RTKBT_DBG("%s: is_busy %d, profile_index %x", __func__, -+ is_busy, profile_index); -+ if ((phci_conn->profile_bitmap & BIT(profile_index)) == 0) { -+ RTKBT_ERR("%s: : ERROR!!! profile(Index: %x) does not exist", -+ __func__, profile_index); -+ return; -+ } -+ -+ -+ if (is_busy) { -+ if ((phci_conn->profile_status & BIT(profile_index)) == 0) { -+ need_update = TRUE; -+ phci_conn->profile_status |= BIT(profile_index); -+ } -+ } else { -+ if ((phci_conn->profile_status & BIT(profile_index)) > 0) { -+ need_update = TRUE; -+ phci_conn->profile_status &= ~(BIT(profile_index)); -+ } -+ } -+ -+ if (need_update) { -+ RTKBT_DBG("%s: phci_conn->profile_status 0x%02x", -+ __func__, phci_conn->profile_status); -+ rtk_notify_profileinfo_to_fw(); -+ } -+} -+ -+static void update_profile_connection(rtk_conn_prof * phci_conn, -+ uint8_t profile_index, uint8_t is_add) -+{ -+ uint8_t need_update = FALSE; -+ -+ RTKBT_DBG("%s: is_add %d, profile_index %x", __func__, -+ is_add, profile_index); -+ -+ if (is_add) { -+ -+ if (0 == phci_conn->profile_refcount[profile_index]) { -+ need_update = TRUE; -+ phci_conn->profile_bitmap |= BIT(profile_index); -+ /* SCO is always busy */ -+ if (profile_index == profile_sco) -+ phci_conn->profile_status |= -+ BIT(profile_index); -+ -+ rtk_check_setup_timer(phci_conn, profile_index); -+ } -+ phci_conn->profile_refcount[profile_index]++; -+ } else { -+ if (!phci_conn->profile_refcount[profile_index]) { -+ RTKBT_WARN("profile %u refcount is already zero", -+ profile_index); -+ return; -+ } -+ -+ phci_conn->profile_refcount[profile_index]--; -+ if (0 == phci_conn->profile_refcount[profile_index]) { -+ need_update = TRUE; -+ phci_conn->profile_bitmap &= ~(BIT(profile_index)); -+ -+ phci_conn->profile_status &= ~(BIT(profile_index)); -+ rtk_check_del_timer(profile_index, phci_conn); -+ /* clear profile_hid_interval if need */ -+ if ((profile_hid == profile_index) -+ && (phci_conn-> -+ profile_bitmap & (BIT(profile_hid_interval)))) { -+ phci_conn->profile_bitmap &= -+ ~(BIT(profile_hid_interval)); -+ } -+ } -+ } -+ -+ RTKBT_DBG("%s: phci_conn->profile_bitmap 0x%02x", __func__, -+ phci_conn->profile_bitmap); -+ -+ if (need_update) -+ rtk_notify_profileinfo_to_fw(); -+} -+ -+static void update_hid_active_state(uint16_t handle, uint16_t interval) -+{ -+ uint8_t need_update = 0; -+ rtk_conn_prof *phci_conn = -+ find_connection_by_handle(&btrtl_coex, handle); -+ -+ if (phci_conn == NULL) -+ return; -+ -+ RTKBT_DBG("%s: handle 0x%04x, interval %u", __func__, handle, interval); -+ if (((phci_conn->profile_bitmap) & (BIT(profile_hid))) == 0) { -+ RTKBT_DBG("HID not connected, nothing to be down"); -+ return; -+ } -+ -+ if (interval < 60) { -+ if ((phci_conn->profile_bitmap & (BIT(profile_hid_interval))) == -+ 0) { -+ need_update = 1; -+ phci_conn->profile_bitmap |= BIT(profile_hid_interval); -+ -+ phci_conn->profile_refcount[profile_hid_interval]++; -+ if (phci_conn-> -+ profile_refcount[profile_hid_interval] == 1) -+ phci_conn->profile_status |= -+ BIT(profile_hid); -+ } -+ } else { -+ if ((phci_conn->profile_bitmap & (BIT(profile_hid_interval)))) { -+ need_update = 1; -+ phci_conn->profile_bitmap &= -+ ~(BIT(profile_hid_interval)); -+ -+ phci_conn->profile_refcount[profile_hid_interval]--; -+ if (phci_conn-> -+ profile_refcount[profile_hid_interval] == 0) -+ phci_conn->profile_status &= -+ ~(BIT(profile_hid)); -+ } -+ } -+ -+ if (need_update) -+ rtk_notify_profileinfo_to_fw(); -+} -+ -+static uint8_t handle_l2cap_con_req(uint16_t handle, uint16_t psm, -+ uint16_t scid, uint8_t direction) -+{ -+ uint8_t status = FALSE; -+ rtk_prof_info *prof_info = NULL; -+ uint8_t profile_index = psm_to_profile_index(psm); -+ -+ if (profile_index == 0xFF) { -+ RTKBT_DBG("PSM(0x%04x) do not need parse", psm); -+ return status; -+ } -+ -+ spin_lock(&btrtl_coex.spin_lock_profile); -+ if (direction) //1: out -+ prof_info = -+ find_profile_by_handle_scid(&btrtl_coex, handle, scid); -+ else // 0:in -+ prof_info = -+ find_profile_by_handle_dcid(&btrtl_coex, handle, scid); -+ -+ if (prof_info) { -+ RTKBT_DBG("%s: this profile is already exist!", __func__); -+ spin_unlock(&btrtl_coex.spin_lock_profile); -+ return status; -+ } -+ -+ if (direction) //1: out -+ status = list_allocate_add(handle, psm, profile_index, 0, scid); -+ else // 0:in -+ status = list_allocate_add(handle, psm, profile_index, scid, 0); -+ -+ spin_unlock(&btrtl_coex.spin_lock_profile); -+ -+ if (!status) -+ RTKBT_ERR("%s: list_allocate_add failed!", __func__); -+ -+ return status; -+} -+ -+static uint8_t handle_l2cap_con_rsp(uint16_t handle, uint16_t dcid, -+ uint16_t scid, uint8_t direction, -+ uint8_t result) -+{ -+ rtk_prof_info *prof_info = NULL; -+ rtk_conn_prof *phci_conn = NULL; -+ -+ spin_lock(&btrtl_coex.spin_lock_profile); -+ if (!direction) //0, in -+ prof_info = -+ find_profile_by_handle_scid(&btrtl_coex, handle, scid); -+ else //1, out -+ prof_info = -+ find_profile_by_handle_dcid(&btrtl_coex, handle, scid); -+ -+ if (!prof_info) { -+ //RTKBT_DBG("handle_l2cap_con_rsp: prof_info Not Find!!"); -+ spin_unlock(&btrtl_coex.spin_lock_profile); -+ return FALSE; -+ } -+ -+ if (!result) { //success -+ RTKBT_DBG("l2cap connection success, update connection"); -+ if (!direction) //0, in -+ prof_info->dcid = dcid; -+ else //1, out -+ prof_info->scid = dcid; -+ -+ phci_conn = find_connection_by_handle(&btrtl_coex, handle); -+ if (phci_conn) -+ update_profile_connection(phci_conn, -+ prof_info->profile_index, -+ TRUE); -+ } -+ -+ spin_unlock(&btrtl_coex.spin_lock_profile); -+ return TRUE; -+} -+ -+static uint8_t handle_l2cap_discon_req(uint16_t handle, uint16_t dcid, -+ uint16_t scid, uint8_t direction) -+{ -+ rtk_prof_info *prof_info = NULL; -+ rtk_conn_prof *phci_conn = NULL; -+ RTKBT_DBG("%s: handle 0x%04x, dcid 0x%04x, scid 0x%04x, dir %u", -+ __func__, handle, dcid, scid, direction); -+ -+ spin_lock(&btrtl_coex.spin_lock_profile); -+ if (!direction) //0: in -+ prof_info = -+ find_profile_by_handle_dcid_scid(&btrtl_coex, handle, -+ scid, dcid); -+ else //1: out -+ prof_info = -+ find_profile_by_handle_dcid_scid(&btrtl_coex, handle, -+ dcid, scid); -+ -+ if (!prof_info) { -+ //LogMsg("handle_l2cap_discon_req: prof_info Not Find!"); -+ spin_unlock(&btrtl_coex.spin_lock_profile); -+ return 0; -+ } -+ -+ phci_conn = find_connection_by_handle(&btrtl_coex, handle); -+ if (!phci_conn) { -+ spin_unlock(&btrtl_coex.spin_lock_profile); -+ return 0; -+ } -+ -+ update_profile_connection(phci_conn, prof_info->profile_index, FALSE); -+ if (prof_info->profile_index == profile_a2dp && -+ (phci_conn->profile_bitmap & BIT(profile_sink))) -+ update_profile_connection(phci_conn, profile_sink, FALSE); -+ -+ delete_profile_from_hash(prof_info); -+ spin_unlock(&btrtl_coex.spin_lock_profile); -+ -+ return 1; -+} -+ -+static const char sample_freqs[4][8] = { -+ "16", "32", "44.1", "48" -+}; -+ -+static const uint8_t sbc_blocks[4] = { 4, 8, 12, 16 }; -+ -+static const char chan_modes[4][16] = { -+ "MONO", "DUAL_CHANNEL", "STEREO", "JOINT_STEREO" -+}; -+ -+static const char alloc_methods[2][12] = { -+ "LOUDNESS", "SNR" -+}; -+ -+static const uint8_t subbands[2] = { 4, 8 }; -+ -+static void print_sbc_header(struct sbc_frame_hdr *hdr) -+{ -+ RTKBT_DBG("syncword: %02x", hdr->syncword); -+ RTKBT_DBG("freq %skHz", sample_freqs[hdr->sampling_frequency]); -+ RTKBT_DBG("blocks %u", sbc_blocks[hdr->blocks]); -+ RTKBT_DBG("channel mode %s", chan_modes[hdr->channel_mode]); -+ RTKBT_DBG("allocation method %s", -+ alloc_methods[hdr->allocation_method]); -+ RTKBT_DBG("subbands %u", subbands[hdr->subbands]); -+} -+ -+static void packets_count(uint16_t handle, uint16_t scid, uint16_t length, -+ uint8_t direction, u8 *user_data) -+{ -+ rtk_prof_info *prof_info = NULL; -+ -+ rtk_conn_prof *hci_conn = -+ find_connection_by_handle(&btrtl_coex, handle); -+ if (NULL == hci_conn) -+ return; -+ -+ if (0 == hci_conn->type) { -+ if (!direction) //0: in -+ prof_info = -+ find_profile_by_handle_scid(&btrtl_coex, handle, -+ scid); -+ else //1: out -+ prof_info = -+ find_profile_by_handle_dcid(&btrtl_coex, handle, -+ scid); -+ -+ if (!prof_info) { -+ //RTKBT_DBG("packets_count: prof_info Not Find!"); -+ return; -+ } -+ -+ /* avdtp media data */ -+ if (prof_info->profile_index == profile_a2dp && -+ prof_info->flags == A2DP_MEDIA) { -+ if (!is_profile_busy(hci_conn, profile_a2dp)) { -+ struct sbc_frame_hdr *sbc_header; -+ struct rtp_header *rtph; -+ u8 bitpool; -+ -+ update_profile_state(hci_conn, profile_a2dp, TRUE); -+ if (!direction) { -+ if (!(hci_conn->profile_bitmap & BIT(profile_sink))) { -+ hci_conn->profile_bitmap |= BIT(profile_sink); -+ update_profile_connection(hci_conn, profile_sink, 1); -+ } -+ update_profile_state(hci_conn, profile_sink, TRUE); -+ } -+ -+ /* We assume it is SBC if the packet length -+ * is bigger than 100 bytes -+ */ -+ if (length > 100) { -+ RTKBT_INFO("Length %u", length); -+ rtph = (struct rtp_header *)user_data; -+ -+ RTKBT_DBG("rtp: v %u, cc %u, pt %u", -+ rtph->v, rtph->cc, rtph->pt); -+ /* move forward */ -+ user_data += sizeof(struct rtp_header) + -+ rtph->cc * 4 + 1; -+ -+ /* point to the sbc frame header */ -+ sbc_header = (struct sbc_frame_hdr *)user_data; -+ bitpool = sbc_header->bitpool; -+ -+ print_sbc_header(sbc_header); -+ -+ RTKBT_DBG("bitpool %u", bitpool); -+ -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_SET_BITPOOL, -+ 1, &bitpool); -+ } -+ } -+ hci_conn->a2dp_packet_count++; -+ } -+ -+ if (prof_info->profile_index == profile_pan) -+ hci_conn->pan_packet_count++; -+ } -+} -+ -+static void count_a2dp_packet_timeout(struct work_struct *work) -+{ -+ rtk_conn_prof *hci_conn = container_of(work, rtk_conn_prof, -+ a2dp_count_work.work); -+ if (hci_conn->a2dp_packet_count) -+ RTKBT_DBG("%s: a2dp_packet_count %d", __func__, -+ hci_conn->a2dp_packet_count); -+ if (hci_conn->a2dp_packet_count == 0) { -+ if (is_profile_busy(hci_conn, profile_a2dp)) { -+ RTKBT_DBG("%s: a2dp busy->idle!", __func__); -+ update_profile_state(hci_conn, profile_a2dp, FALSE); -+ if (hci_conn->profile_bitmap & BIT(profile_sink)) -+ update_profile_state(hci_conn, profile_sink, FALSE); -+ } -+ } -+ hci_conn->a2dp_packet_count = 0; -+ -+ queue_delayed_work(btrtl_coex.timer_wq, &hci_conn->a2dp_count_work, msecs_to_jiffies(1000)); -+} -+ -+static void count_pan_packet_timeout(struct work_struct *work) -+{ -+ rtk_conn_prof *hci_conn = container_of(work, rtk_conn_prof, -+ pan_count_work.work); -+ if (hci_conn->pan_packet_count) -+ RTKBT_DBG("%s: pan_packet_count %d", __func__, -+ hci_conn->pan_packet_count); -+ if (hci_conn->pan_packet_count < PAN_PACKET_COUNT) { -+ if (is_profile_busy(hci_conn, profile_pan)) { -+ RTKBT_DBG("%s: pan busy->idle!", __func__); -+ update_profile_state(hci_conn, profile_pan, FALSE); -+ } -+ } else { -+ if (!is_profile_busy(hci_conn, profile_pan)) { -+ RTKBT_DBG("timeout_handler: pan idle->busy!"); -+ update_profile_state(hci_conn, profile_pan, TRUE); -+ } -+ } -+ hci_conn->pan_packet_count = 0; -+ queue_delayed_work(btrtl_coex.timer_wq, &hci_conn->pan_count_work, msecs_to_jiffies(1000)); -+} -+ -+static void count_hogp_packet_timeout(struct work_struct *work) -+{ -+ rtk_conn_prof *hci_conn = container_of(work, rtk_conn_prof, -+ hogp_count_work.work); -+ if (hci_conn->hogp_packet_count) -+ RTKBT_DBG("%s: hogp_packet_count %d", __func__, -+ hci_conn->hogp_packet_count); -+ if (hci_conn->hogp_packet_count == 0) { -+ if (is_profile_busy(hci_conn, profile_hogp)) { -+ RTKBT_DBG("%s: hogp busy->idle!", __func__); -+ update_profile_state(hci_conn, profile_hogp, FALSE); -+ } -+ } -+ hci_conn->hogp_packet_count = 0; -+ -+ if (hci_conn->voice_packet_count) -+ RTKBT_DBG("%s: voice_packet_count %d", __func__, -+ hci_conn->voice_packet_count); -+ if (hci_conn->voice_packet_count == 0) { -+ if (is_profile_busy(hci_conn, profile_voice)) { -+ RTKBT_DBG("%s: voice busy->idle!", __func__); -+ update_profile_state(hci_conn, profile_voice, FALSE); -+ } -+ } -+ hci_conn->voice_packet_count = 0; -+ queue_delayed_work(btrtl_coex.timer_wq, &hci_conn->hogp_count_work, msecs_to_jiffies(1000)); -+} -+ -+#ifdef RTB_SOFTWARE_MAILBOX -+ -+#ifndef RTK_COEX_OVER_SYMBOL -+static int udpsocket_send(char *tx_msg, int msg_size) -+{ -+ u8 error = 0; -+ struct msghdr udpmsg; -+ mm_segment_t oldfs; -+ struct iovec iov; -+ -+ RTKBT_DBG("send msg %s with len:%d", tx_msg, msg_size); -+ -+ if (btrtl_coex.sock_open) { -+ iov.iov_base = (void *)tx_msg; -+ iov.iov_len = msg_size; -+ udpmsg.msg_name = &btrtl_coex.wifi_addr; -+ udpmsg.msg_namelen = sizeof(struct sockaddr_in); -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) -+ udpmsg.msg_iov = &iov; -+ udpmsg.msg_iovlen = 1; -+#else -+ iov_iter_init(&udpmsg.msg_iter, WRITE, &iov, 1, msg_size); -+#endif -+ udpmsg.msg_control = NULL; -+ udpmsg.msg_controllen = 0; -+ udpmsg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; -+ oldfs = get_fs(); -+ set_fs(KERNEL_DS); -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) -+ error = sock_sendmsg(btrtl_coex.udpsock, &udpmsg, msg_size); -+#else -+ error = sock_sendmsg(btrtl_coex.udpsock, &udpmsg); -+#endif -+ set_fs(oldfs); -+ -+ if (error < 0) -+ RTKBT_DBG("Error when sendimg msg, error:%d", error); -+ } -+ -+ return error; -+} -+#endif -+ -+#ifdef RTK_COEX_OVER_SYMBOL -+/* Receive message from WiFi */ -+u8 rtw_btcoex_wifi_to_bt(u8 *msg, u8 msg_size) -+{ -+ struct sk_buff *nskb; -+ -+ if (!rtw_coex_on) { -+ RTKBT_WARN("Bluetooth is closed"); -+ return 0; -+ } -+ -+ nskb = alloc_skb(msg_size, GFP_ATOMIC); -+ if (!nskb) { -+ RTKBT_ERR("Couldnt alloc skb for WiFi coex message"); -+ return 0; -+ } -+ -+ memcpy(skb_put(nskb, msg_size), msg, msg_size); -+ skb_queue_tail(&rtw_q, nskb); -+ -+ queue_work(rtw_wq, &rtw_work); -+ -+ return 1; -+} -+EXPORT_SYMBOL(rtw_btcoex_wifi_to_bt); -+ -+static int rtk_send_coexmsg2wifi(u8 *msg, u8 size) -+{ -+ u8 result; -+ u8 (*btmsg_to_wifi)(u8 *, u8); -+ -+ btmsg_to_wifi = __symbol_get(VMLINUX_SYMBOL_STR(rtw_btcoex_bt_to_wifi)); -+ -+ if (!btmsg_to_wifi) { -+ /* RTKBT_ERR("Couldnt get symbol"); */ -+ return -1; -+ } -+ -+ result = btmsg_to_wifi(msg, size); -+ __symbol_put(VMLINUX_SYMBOL_STR(rtw_btcoex_bt_to_wifi)); -+ if (!result) { -+ RTKBT_ERR("Couldnt send coex msg to WiFi"); -+ return -1; -+ } else if (result == 1){ -+ /* successful to send message */ -+ return 0; -+ } else { -+ RTKBT_ERR("Unknown result %d", result); -+ return -1; -+ } -+} -+ -+static int rtkbt_process_coexskb(struct sk_buff *skb) -+{ -+ rtk_handle_event_from_wifi(skb->data); -+ return 0; -+} -+ -+static void rtw_work_func(struct work_struct *work) -+{ -+ struct sk_buff *skb; -+ -+ while ((skb = skb_dequeue(&rtw_q))) { -+ rtkbt_process_coexskb(skb); -+ kfree_skb(skb); -+ } -+} -+ -+#endif -+ -+static int rtkbt_coexmsg_send(char *tx_msg, int msg_size) -+{ -+#ifdef RTK_COEX_OVER_SYMBOL -+ return rtk_send_coexmsg2wifi((uint8_t *)tx_msg, (u8)msg_size); -+#else -+ return udpsocket_send(tx_msg, msg_size); -+#endif -+} -+ -+#ifndef RTK_COEX_OVER_SYMBOL -+static void udpsocket_recv_data(void) -+{ -+ u8 recv_data[512]; -+ u32 len = 0; -+ u16 recv_length; -+ struct sk_buff *skb; -+ -+ RTKBT_DBG("-"); -+ -+ spin_lock(&btrtl_coex.spin_lock_sock); -+ len = skb_queue_len(&btrtl_coex.sk->sk_receive_queue); -+ -+ while (len > 0) { -+ skb = skb_dequeue(&btrtl_coex.sk->sk_receive_queue); -+ -+ /*important: cut the udp header from skb->data! header length is 8 byte */ -+ recv_length = skb->len - 8; -+ memset(recv_data, 0, sizeof(recv_data)); -+ memcpy(recv_data, skb->data + 8, recv_length); -+ //RTKBT_DBG("received data: %s :with len %u", recv_data, recv_length); -+ -+ rtk_handle_event_from_wifi(recv_data); -+ -+ len--; -+ kfree_skb(skb); -+ } -+ -+ spin_unlock(&btrtl_coex.spin_lock_sock); -+} -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) -+static void udpsocket_recv(struct sock *sk, int bytes) -+#else -+static void udpsocket_recv(struct sock *sk) -+#endif -+{ -+ spin_lock(&btrtl_coex.spin_lock_sock); -+ btrtl_coex.sk = sk; -+ spin_unlock(&btrtl_coex.spin_lock_sock); -+ queue_delayed_work(btrtl_coex.sock_wq, &btrtl_coex.sock_work, 0); -+} -+ -+static void create_udpsocket(void) -+{ -+ int err; -+ RTKBT_DBG("%s: connect_port: %d", __func__, CONNECT_PORT); -+ btrtl_coex.sock_open = 0; -+ -+ err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_UDP, -+ &btrtl_coex.udpsock); -+ if (err < 0) { -+ RTKBT_ERR("%s: sock create error, err = %d", __func__, err); -+ return; -+ } -+ -+ memset(&btrtl_coex.addr, 0, sizeof(struct sockaddr_in)); -+ btrtl_coex.addr.sin_family = AF_INET; -+ btrtl_coex.addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); -+ btrtl_coex.addr.sin_port = htons(CONNECT_PORT); -+ -+ memset(&btrtl_coex.wifi_addr, 0, sizeof(struct sockaddr_in)); -+ btrtl_coex.wifi_addr.sin_family = AF_INET; -+ btrtl_coex.wifi_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); -+ btrtl_coex.wifi_addr.sin_port = htons(CONNECT_PORT_WIFI); -+ -+ err = -+ btrtl_coex.udpsock->ops->bind(btrtl_coex.udpsock, -+ (struct sockaddr *)&btrtl_coex. -+ addr, sizeof(struct sockaddr)); -+ if (err < 0) { -+ sock_release(btrtl_coex.udpsock); -+ RTKBT_ERR("%s: sock bind error, err = %d",__func__, err); -+ return; -+ } -+ -+ btrtl_coex.sock_open = 1; -+ btrtl_coex.udpsock->sk->sk_data_ready = udpsocket_recv; -+} -+#endif /* !RTK_COEX_OVER_SYMBOL */ -+ -+static void rtk_notify_extension_version_to_wifi(void) -+{ -+ uint8_t para_length = 2; -+ char p_buf[2 + HCI_CMD_PREAMBLE_SIZE]; -+ char *p = p_buf; -+ -+ if (!btrtl_coex.wifi_on) -+ return; -+ -+ UINT16_TO_STREAM(p, HCI_OP_HCI_EXTENSION_VERSION_NOTIFY); -+ *p++ = para_length; -+ UINT16_TO_STREAM(p, HCI_EXTENSION_VERSION); -+ RTKBT_DBG("extension version is 0x%x", HCI_EXTENSION_VERSION); -+ if (rtkbt_coexmsg_send(p_buf, para_length + HCI_CMD_PREAMBLE_SIZE) < 0) -+ RTKBT_ERR("%s: sock send error", __func__); -+} -+ -+static void rtk_notify_btpatch_version_to_wifi(void) -+{ -+ uint8_t para_length = 4; -+ char p_buf[para_length + HCI_CMD_PREAMBLE_SIZE]; -+ char *p = p_buf; -+ -+ if (!btrtl_coex.wifi_on) -+ return; -+ -+ UINT16_TO_STREAM(p, HCI_OP_HCI_BT_PATCH_VER_NOTIFY); -+ *p++ = para_length; -+ UINT16_TO_STREAM(p, btrtl_coex.hci_reversion); -+ UINT16_TO_STREAM(p, btrtl_coex.lmp_subversion); -+ RTKBT_DBG("btpatch ver: len %u, hci_rev 0x%04x, lmp_subver 0x%04x", -+ para_length, btrtl_coex.hci_reversion, -+ btrtl_coex.lmp_subversion); -+ -+ if (rtkbt_coexmsg_send(p_buf, para_length + HCI_CMD_PREAMBLE_SIZE) < 0) -+ RTKBT_ERR("%s: sock send error", __func__); -+} -+ -+static void rtk_notify_afhmap_to_wifi(void) -+{ -+ uint8_t para_length = 13; -+ char p_buf[para_length + HCI_CMD_PREAMBLE_SIZE]; -+ char *p = p_buf; -+ uint8_t kk = 0; -+ -+ if (!btrtl_coex.wifi_on) -+ return; -+ -+ UINT16_TO_STREAM(p, HCI_OP_HCI_BT_AFH_MAP_NOTIFY); -+ *p++ = para_length; -+ *p++ = btrtl_coex.piconet_id; -+ *p++ = btrtl_coex.mode; -+ *p++ = 10; -+ memcpy(p, btrtl_coex.afh_map, 10); -+ -+ RTKBT_DBG("afhmap, piconet_id is 0x%x, map type is 0x%x", -+ btrtl_coex.piconet_id, btrtl_coex.mode); -+ for (kk = 0; kk < 10; kk++) -+ RTKBT_DBG("afhmap data[%d] is 0x%x", kk, -+ btrtl_coex.afh_map[kk]); -+ -+ if (rtkbt_coexmsg_send(p_buf, para_length + HCI_CMD_PREAMBLE_SIZE) < 0) -+ RTKBT_ERR("%s: sock send error", __func__); -+} -+ -+static void rtk_notify_btcoex_to_wifi(uint8_t opcode, uint8_t status) -+{ -+ uint8_t para_length = 2; -+ char p_buf[para_length + HCI_CMD_PREAMBLE_SIZE]; -+ char *p = p_buf; -+ -+ if (!btrtl_coex.wifi_on) -+ return; -+ -+ UINT16_TO_STREAM(p, HCI_OP_HCI_BT_COEX_NOTIFY); -+ *p++ = para_length; -+ *p++ = opcode; -+ if (!status) -+ *p++ = 0; -+ else -+ *p++ = 1; -+ -+ RTKBT_DBG("btcoex, opcode is 0x%x, status is 0x%x", opcode, status); -+ -+ if (rtkbt_coexmsg_send(p_buf, para_length + HCI_CMD_PREAMBLE_SIZE) < 0) -+ RTKBT_ERR("%s: sock send error", __func__); -+} -+ -+static void rtk_notify_btoperation_to_wifi(uint8_t operation, -+ uint8_t append_data_length, -+ uint8_t * append_data) -+{ -+ uint8_t para_length = 3 + append_data_length; -+ char p_buf[para_length + HCI_CMD_PREAMBLE_SIZE]; -+ char *p = p_buf; -+ uint8_t kk = 0; -+ -+ if (!btrtl_coex.wifi_on) -+ return; -+ -+ UINT16_TO_STREAM(p, HCI_OP_BT_OPERATION_NOTIFY); -+ *p++ = para_length; -+ *p++ = operation; -+ *p++ = append_data_length; -+ if (append_data_length) -+ memcpy(p, append_data, append_data_length); -+ -+ RTKBT_DBG("btoperation: op 0x%02x, append_data_length %u", -+ operation, append_data_length); -+ if (append_data_length) { -+ for (kk = 0; kk < append_data_length; kk++) -+ RTKBT_DBG("append data is 0x%x", *(append_data + kk)); -+ } -+ -+ if (rtkbt_coexmsg_send(p_buf, para_length + HCI_CMD_PREAMBLE_SIZE) < 0) -+ RTKBT_ERR("%s: sock send error", __func__); -+} -+ -+static void rtk_notify_info_to_wifi(uint8_t reason, uint8_t length, -+ uint8_t *report_info) -+{ -+ uint8_t para_length = 4 + length; -+ char buf[para_length + HCI_CMD_PREAMBLE_SIZE]; -+ char *p = buf; -+ struct rtl_btinfo *report = (struct rtl_btinfo *)report_info; -+ -+ if (length) { -+ RTKBT_DBG("bt info: cmd %2.2X", report->cmd); -+ RTKBT_DBG("bt info: len %2.2X", report->len); -+ RTKBT_DBG("bt info: data %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X", -+ report->data[0], report->data[1], report->data[2], -+ report->data[3], report->data[4], report->data[5]); -+ } -+ RTKBT_DBG("bt info: reason 0x%2x, length 0x%2x", reason, length); -+ -+ if (!btrtl_coex.wifi_on) -+ return; -+ -+ UINT16_TO_STREAM(p, HCI_OP_HCI_BT_INFO_NOTIFY); -+ *p++ = para_length; -+ *p++ = btrtl_coex.polling_enable; -+ *p++ = btrtl_coex.polling_interval; -+ *p++ = reason; -+ *p++ = length; -+ -+ if (length) -+ memcpy(p, report_info, length); -+ -+ RTKBT_DBG("para length %2x, polling_enable %u, poiiling_interval %u", -+ para_length, btrtl_coex.polling_enable, -+ btrtl_coex.polling_interval); -+ /* send BT INFO to Wi-Fi driver */ -+ if (rtkbt_coexmsg_send(buf, para_length + HCI_CMD_PREAMBLE_SIZE) < 0) -+ RTKBT_ERR("%s: sock send error", __func__); -+} -+ -+static void rtk_notify_regester_to_wifi(uint8_t * reg_value) -+{ -+ uint8_t para_length = 9; -+ char p_buf[para_length + HCI_CMD_PREAMBLE_SIZE]; -+ char *p = p_buf; -+ hci_mailbox_register *reg = (hci_mailbox_register *) reg_value; -+ -+ if (!btrtl_coex.wifi_on) -+ return; -+ -+ UINT16_TO_STREAM(p, HCI_OP_HCI_BT_REGISTER_VALUE_NOTIFY); -+ *p++ = para_length; -+ memcpy(p, reg_value, para_length); -+ -+ RTKBT_DBG("bt register, register type is %x", reg->type); -+ RTKBT_DBG("bt register, register offset is %x", reg->offset); -+ RTKBT_DBG("bt register, register value is %x", reg->value); -+ -+ if (rtkbt_coexmsg_send(p_buf, para_length + HCI_CMD_PREAMBLE_SIZE) < 0) -+ RTKBT_ERR("%s: sock send error", __func__); -+} -+ -+#endif -+ -+void rtk_btcoex_parse_cmd(uint8_t *buffer, int count) -+{ -+ u16 opcode = (buffer[0]) + (buffer[1] << 8); -+ -+ if (!test_bit(RTL_COEX_RUNNING, &btrtl_coex.flags)) { -+ RTKBT_INFO("%s: Coex is closed, ignore", __func__); -+ return; -+ } -+ -+ switch (opcode) { -+ case HCI_OP_INQUIRY: -+ case HCI_OP_PERIODIC_INQ: -+ if (!btrtl_coex.isinquirying) { -+ btrtl_coex.isinquirying = 1; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("hci (periodic)inq, notify wifi " -+ "inquiry start"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_INQUIRY_START, -+ 0, NULL); -+#else -+ RTKBT_INFO("hci (periodic)inq start"); -+#endif -+ } -+ break; -+ case HCI_OP_INQUIRY_CANCEL: -+ case HCI_OP_EXIT_PERIODIC_INQ: -+ if (btrtl_coex.isinquirying) { -+ btrtl_coex.isinquirying = 0; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("hci (periodic)inq cancel/exit, notify wifi " -+ "inquiry stop"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_INQUIRY_END, 0, -+ NULL); -+#else -+ RTKBT_INFO("hci (periodic)inq cancel/exit"); -+#endif -+ } -+ break; -+ case HCI_OP_ACCEPT_CONN_REQ: -+ if (!btrtl_coex.ispaging) { -+ btrtl_coex.ispaging = 1; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("hci accept connreq, notify wifi page start"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_PAGE_START, 0, -+ NULL); -+#else -+ RTKBT_INFO("hci accept conn req"); -+#endif -+ } -+ break; -+ case HCI_OP_DISCONNECT: -+ RTKBT_INFO("HCI Disconnect, handle %04x, reason 0x%02x", -+ ((u16)buffer[4] << 8 | buffer[3]), buffer[5]); -+ break; -+ default: -+ break; -+ } -+} -+ -+static void rtk_handle_inquiry_complete(void) -+{ -+ if (btrtl_coex.isinquirying) { -+ btrtl_coex.isinquirying = 0; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("inq complete, notify wifi inquiry end"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_INQUIRY_END, 0, NULL); -+#else -+ RTKBT_INFO("inquiry complete"); -+#endif -+ } -+} -+ -+static void rtk_handle_pin_code_req(void) -+{ -+ if (!btrtl_coex.ispairing) { -+ btrtl_coex.ispairing = 1; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("pin code req, notify wifi pair start"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_PAIR_START, 0, NULL); -+#else -+ RTKBT_INFO("pin code request"); -+#endif -+ } -+} -+ -+static void rtk_handle_io_capa_req(void) -+{ -+ if (!btrtl_coex.ispairing) { -+ btrtl_coex.ispairing = 1; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("io cap req, notify wifi pair start"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_PAIR_START, 0, NULL); -+#else -+ RTKBT_INFO("io capability request"); -+#endif -+ } -+} -+ -+static void rtk_handle_auth_request(void) -+{ -+ if (btrtl_coex.ispairing) { -+ btrtl_coex.ispairing = 0; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("auth req, notify wifi pair end"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_PAIR_END, 0, NULL); -+#else -+ RTKBT_INFO("authentication request"); -+#endif -+ } -+} -+ -+static void rtk_handle_link_key_notify(void) -+{ -+ if (btrtl_coex.ispairing) { -+ btrtl_coex.ispairing = 0; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("link key notify, notify wifi pair end"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_PAIR_END, 0, NULL); -+#else -+ RTKBT_INFO("link key notify"); -+#endif -+ } -+} -+ -+static void rtk_handle_mode_change_evt(u8 * p) -+{ -+ u16 mode_change_handle, mode_interval; -+ -+ p++; -+ STREAM_TO_UINT16(mode_change_handle, p); -+ p++; -+ STREAM_TO_UINT16(mode_interval, p); -+ update_hid_active_state(mode_change_handle, mode_interval); -+} -+ -+#ifdef RTB_SOFTWARE_MAILBOX -+static void rtk_parse_vendor_mailbox_cmd_evt(u8 * p, u8 total_len) -+{ -+ u8 status, subcmd; -+ u8 temp_cmd[10]; -+ -+ status = *p++; -+ if (total_len <= 4) { -+ RTKBT_DBG("receive mailbox cmd from fw, total length <= 4"); -+ return; -+ } -+ subcmd = *p++; -+ RTKBT_DBG("receive mailbox cmd from fw, subcmd is 0x%x, status is 0x%x", -+ subcmd, status); -+ -+ switch (subcmd) { -+ case HCI_VENDOR_SUB_CMD_BT_REPORT_CONN_SCO_INQ_INFO: -+ if (status == 0) //success -+ rtk_notify_info_to_wifi(POLLING_RESPONSE, -+ RTL_BTINFO_LEN, (uint8_t *)p); -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_WIFI_CHANNEL_AND_BANDWIDTH_CMD: -+ rtk_notify_btcoex_to_wifi(WIFI_BW_CHNL_NOTIFY, status); -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_WIFI_FORCE_TX_POWER_CMD: -+ rtk_notify_btcoex_to_wifi(BT_POWER_DECREASE_CONTROL, status); -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_BT_ENABLE_IGNORE_WLAN_ACT_CMD: -+ rtk_notify_btcoex_to_wifi(IGNORE_WLAN_ACTIVE_CONTROL, status); -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_SET_BT_PSD_MODE: -+ rtk_notify_btcoex_to_wifi(BT_PSD_MODE_CONTROL, status); -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_SET_BT_LNA_CONSTRAINT: -+ rtk_notify_btcoex_to_wifi(LNA_CONSTRAIN_CONTROL, status); -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_BT_AUTO_REPORT_ENABLE: -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_BT_SET_TXRETRY_REPORT_PARAM: -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_BT_SET_PTATABLE: -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_GET_AFH_MAP_L: -+ if (status == 0) { -+ memcpy(btrtl_coex.afh_map, p + 4, 4); /* cmd_idx, length, piconet_id, mode */ -+ temp_cmd[0] = HCI_VENDOR_SUB_CMD_GET_AFH_MAP_M; -+ temp_cmd[1] = 2; -+ temp_cmd[2] = btrtl_coex.piconet_id; -+ temp_cmd[3] = btrtl_coex.mode; -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_MAILBOX_CMD, 4, -+ temp_cmd); -+ } else { -+ memset(btrtl_coex.afh_map, 0, 10); -+ rtk_notify_afhmap_to_wifi(); -+ } -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_GET_AFH_MAP_M: -+ if (status == 0) { -+ memcpy(btrtl_coex.afh_map + 4, p + 4, 4); -+ temp_cmd[0] = HCI_VENDOR_SUB_CMD_GET_AFH_MAP_H; -+ temp_cmd[1] = 2; -+ temp_cmd[2] = btrtl_coex.piconet_id; -+ temp_cmd[3] = btrtl_coex.mode; -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_MAILBOX_CMD, 4, -+ temp_cmd); -+ } else { -+ memset(btrtl_coex.afh_map, 0, 10); -+ rtk_notify_afhmap_to_wifi(); -+ } -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_GET_AFH_MAP_H: -+ if (status == 0) -+ memcpy(btrtl_coex.afh_map + 8, p + 4, 2); -+ else -+ memset(btrtl_coex.afh_map, 0, 10); -+ -+ rtk_notify_afhmap_to_wifi(); -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_RD_REG_REQ: -+ if (status == 0) -+ rtk_notify_regester_to_wifi(p + 3); /* cmd_idx,length,regist type */ -+ break; -+ -+ case HCI_VENDOR_SUB_CMD_WR_REG_REQ: -+ rtk_notify_btcoex_to_wifi(BT_REGISTER_ACCESS, status); -+ break; -+ -+ default: -+ break; -+ } -+} -+#endif /* RTB_SOFTWARE_MAILBOX */ -+ -+static void rtk_handle_cmd_complete_evt(u8 total_len, u8 * p) -+{ -+ u16 opcode; -+ -+ p++; -+ STREAM_TO_UINT16(opcode, p); -+ //RTKBT_DBG("cmd_complete, opcode is 0x%x", opcode); -+ -+ if (opcode == HCI_OP_PERIODIC_INQ) { -+ if (*p++ && btrtl_coex.isinquirying) { -+ btrtl_coex.isinquirying = 0; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("hci period inq, start error, notify wifi " -+ "inquiry stop"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_INQUIRY_END, 0, -+ NULL); -+#else -+ RTKBT_INFO("hci period inquiry start error"); -+#endif -+ } -+ } -+ -+ if (opcode == HCI_OP_READ_LOCAL_VERSION) { -+ if (!(*p++)) { -+ p++; -+ STREAM_TO_UINT16(btrtl_coex.hci_reversion, p); -+ p += 3; -+ STREAM_TO_UINT16(btrtl_coex.lmp_subversion, p); -+ RTKBT_DBG("BTCOEX hci_rev 0x%04x", -+ btrtl_coex.hci_reversion); -+ RTKBT_DBG("BTCOEX lmp_subver 0x%04x", -+ btrtl_coex.lmp_subversion); -+ } -+ } -+ -+#ifdef RTB_SOFTWARE_MAILBOX -+ if (opcode == HCI_VENDOR_MAILBOX_CMD) { -+ rtk_parse_vendor_mailbox_cmd_evt(p, total_len); -+ } -+#endif -+ if (opcode == HCI_VENDOR_SET_PROFILE_REPORT_COMMAND) { -+ //0x01-unknown hci command -+ if((*p++) == 0x01) { -+ //RTKBT_DBG("unknown hci command"); -+ return; -+ } else { -+ profileinfo_cmd = 1; -+ } -+ } -+} -+ -+static void rtk_handle_cmd_status_evt(u8 * p) -+{ -+ u16 opcode; -+ u8 status; -+ -+ status = *p++; -+ p++; -+ STREAM_TO_UINT16(opcode, p); -+ //RTKBT_DBG("cmd_status, opcode is 0x%x", opcode); -+ if ((opcode == HCI_OP_INQUIRY) && (status)) { -+ if (btrtl_coex.isinquirying) { -+ btrtl_coex.isinquirying = 0; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("hci inq, start error, notify wifi inq stop"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_INQUIRY_END, 0, -+ NULL); -+#else -+ RTKBT_INFO("hci inquiry start error"); -+#endif -+ } -+ } -+ -+ if (opcode == HCI_OP_CREATE_CONN) { -+ if (!status && !btrtl_coex.ispaging) { -+ btrtl_coex.ispaging = 1; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("hci create conn, notify wifi start page"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_PAGE_START, 0, -+ NULL); -+#else -+ RTKBT_INFO("hci create connection, start paging"); -+#endif -+ } -+ } -+} -+ -+static void rtk_handle_connection_complete_evt(u8 * p) -+{ -+ u16 handle; -+ u8 status, link_type; -+ rtk_conn_prof *hci_conn = NULL; -+ -+ status = *p++; -+ STREAM_TO_UINT16(handle, p); -+ p += 6; -+ link_type = *p++; -+ -+ RTKBT_INFO("connected, handle %04x, status 0x%02x", handle, status); -+ -+ if (status == 0) { -+ if (btrtl_coex.ispaging) { -+ btrtl_coex.ispaging = 0; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("notify wifi page success end"); -+ rtk_notify_btoperation_to_wifi -+ (BT_OPCODE_PAGE_SUCCESS_END, 0, NULL); -+#else -+ RTKBT_INFO("Page success"); -+#endif -+ } -+ -+ hci_conn = find_connection_by_handle(&btrtl_coex, handle); -+ if (hci_conn == NULL) { -+ hci_conn = allocate_connection_by_handle(handle); -+ if (hci_conn) { -+ add_connection_to_hash(&btrtl_coex, -+ hci_conn); -+ hci_conn->profile_bitmap = 0; -+ memset(hci_conn->profile_refcount, 0, 8); -+ if ((0 == link_type) || (2 == link_type)) { //sco or esco -+ hci_conn->type = 1; -+ update_profile_connection(hci_conn, -+ profile_sco, -+ TRUE); -+ } else -+ hci_conn->type = 0; -+ } else { -+ RTKBT_ERR("hci connection allocate fail"); -+ } -+ } else { -+ RTKBT_DBG("hci conn handle 0x%04x already existed!", -+ handle); -+ hci_conn->profile_bitmap = 0; -+ memset(hci_conn->profile_refcount, 0, 8); -+ if ((0 == link_type) || (2 == link_type)) { //sco or esco -+ hci_conn->type = 1; -+ update_profile_connection(hci_conn, profile_sco, -+ TRUE); -+ } else -+ hci_conn->type = 0; -+ } -+ } else if (btrtl_coex.ispaging) { -+ btrtl_coex.ispaging = 0; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("notify wifi page unsuccess end"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_PAGE_UNSUCCESS_END, 0, -+ NULL); -+#else -+ RTKBT_INFO("Page failed"); -+#endif -+ } -+} -+ -+static void rtk_handle_le_connection_complete_evt(u8 enhanced, u8 * p) -+{ -+ u16 handle, interval; -+ u8 status; -+ rtk_conn_prof *hci_conn = NULL; -+ -+ status = *p++; -+ STREAM_TO_UINT16(handle, p); -+ if (!enhanced) -+ p += 8; /* role, address type, address */ -+ else -+ p += (8 + 12); /* plus two bluetooth addresses */ -+ STREAM_TO_UINT16(interval, p); -+ -+ RTKBT_INFO("LE connected, handle %04x, status 0x%02x, interval %u", -+ handle, status, interval); -+ -+ if (status == 0) { -+ if (btrtl_coex.ispaging) { -+ btrtl_coex.ispaging = 0; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("notify wifi page success end"); -+ rtk_notify_btoperation_to_wifi -+ (BT_OPCODE_PAGE_SUCCESS_END, 0, NULL); -+#else -+ RTKBT_INFO("Page success end"); -+#endif -+ } -+ -+ hci_conn = find_connection_by_handle(&btrtl_coex, handle); -+ if (hci_conn == NULL) { -+ hci_conn = allocate_connection_by_handle(handle); -+ if (hci_conn) { -+ add_connection_to_hash(&btrtl_coex, -+ hci_conn); -+ hci_conn->profile_bitmap = 0; -+ memset(hci_conn->profile_refcount, 0, 8); -+ hci_conn->type = 2; -+ update_profile_connection(hci_conn, profile_hid, TRUE); //for coex, le is the same as hid -+ update_hid_active_state(handle, interval); -+ } else { -+ RTKBT_ERR("hci connection allocate fail"); -+ } -+ } else { -+ RTKBT_DBG("hci conn handle 0x%04x already existed!", -+ handle); -+ hci_conn->profile_bitmap = 0; -+ memset(hci_conn->profile_refcount, 0, 8); -+ hci_conn->type = 2; -+ update_profile_connection(hci_conn, profile_hid, TRUE); -+ update_hid_active_state(handle, interval); -+ } -+ } else if (btrtl_coex.ispaging) { -+ btrtl_coex.ispaging = 0; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("notify wifi page unsuccess end"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_PAGE_UNSUCCESS_END, 0, -+ NULL); -+#else -+ RTKBT_INFO("Page failed"); -+#endif -+ } -+} -+ -+static void rtk_handle_le_connection_update_complete_evt(u8 * p) -+{ -+ u16 handle, interval; -+ /* u8 status; */ -+ -+ /* status = *p++; */ -+ p++; -+ -+ STREAM_TO_UINT16(handle, p); -+ STREAM_TO_UINT16(interval, p); -+ update_hid_active_state(handle, interval); -+} -+ -+static void rtk_handle_le_meta_evt(u8 * p) -+{ -+ u8 sub_event = *p++; -+ switch (sub_event) { -+ case HCI_EV_LE_CONN_COMPLETE: -+ rtk_handle_le_connection_complete_evt(0, p); -+ break; -+ case HCI_EV_LE_ENHANCED_CONN_COMPLETE: -+ rtk_handle_le_connection_complete_evt(1, p); -+ break; -+ -+ case HCI_EV_LE_CONN_UPDATE_COMPLETE: -+ rtk_handle_le_connection_update_complete_evt(p); -+ break; -+ -+ default: -+ break; -+ } -+} -+ -+static u8 disconn_profile(struct rtl_hci_conn *conn, u8 pfe_index) -+{ -+ u8 need_update = 0; -+ -+ if (!conn->profile_refcount[pfe_index]) { -+ RTKBT_WARN("profile %u ref is 0", pfe_index); -+ return 0; -+ } -+ -+ RTKBT_INFO("%s: profile_ref[%u] %u", __func__, pfe_index, -+ conn->profile_refcount[pfe_index]); -+ -+ if (conn->profile_refcount[pfe_index]) -+ conn->profile_refcount[pfe_index]--; -+ else -+ RTKBT_INFO("%s: conn pfe ref[%u] is 0", __func__, -+ conn->profile_refcount[pfe_index]); -+ if (!conn->profile_refcount[pfe_index]) { -+ need_update = 1; -+ conn->profile_bitmap &= ~(BIT(pfe_index)); -+ -+ /* if profile does not exist, status is meaningless */ -+ conn->profile_status &= ~(BIT(pfe_index)); -+ rtk_check_del_timer(pfe_index, conn); -+ } -+ -+ return need_update; -+} -+ -+static void disconn_acl(u16 handle, struct rtl_hci_conn *conn) -+{ -+ struct rtl_coex_struct *coex = &btrtl_coex; -+ rtk_prof_info *prof_info = NULL; -+ struct list_head *iter = NULL, *temp = NULL; -+ u8 need_update = 0; -+ -+ spin_lock(&coex->spin_lock_profile); -+ -+ list_for_each_safe(iter, temp, &coex->profile_list) { -+ prof_info = list_entry(iter, rtk_prof_info, list); -+ if (handle == prof_info->handle) { -+ RTKBT_DBG("hci disconn, hndl %x, psm %x, dcid %x, " -+ "scid %x, profile %u", prof_info->handle, -+ prof_info->psm, prof_info->dcid, -+ prof_info->scid, prof_info->profile_index); -+ //If both scid and dcid > 0, L2cap connection is exist. -+ need_update |= disconn_profile(conn, -+ prof_info->profile_index); -+ if ((prof_info->flags & A2DP_MEDIA) && -+ (conn->profile_bitmap & BIT(profile_sink))) -+ need_update |= disconn_profile(conn, -+ profile_sink); -+ delete_profile_from_hash(prof_info); -+ } -+ } -+ if (need_update) -+ rtk_notify_profileinfo_to_fw(); -+ spin_unlock(&coex->spin_lock_profile); -+} -+ -+static void rtk_handle_disconnect_complete_evt(u8 * p) -+{ -+ u16 handle; -+ u8 status; -+ u8 reason; -+ rtk_conn_prof *hci_conn = NULL; -+ -+ if (btrtl_coex.ispairing) { //for slave: connection will be disconnected if authentication fail -+ btrtl_coex.ispairing = 0; -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("hci disc complete, notify wifi pair end"); -+ rtk_notify_btoperation_to_wifi(BT_OPCODE_PAIR_END, 0, NULL); -+#else -+ RTKBT_INFO("hci disconnection complete"); -+#endif -+ } -+ -+ status = *p++; -+ STREAM_TO_UINT16(handle, p); -+ reason = *p; -+ -+ RTKBT_INFO("disconn cmpl evt: status 0x%02x, handle %04x, reason 0x%02x", -+ status, handle, reason); -+ -+ if (status == 0) { -+ RTKBT_DBG("process disconn complete event."); -+ hci_conn = find_connection_by_handle(&btrtl_coex, handle); -+ if (hci_conn) { -+ switch (hci_conn->type) { -+ case 0: -+ /* FIXME: If this is interrupted by l2cap rx, -+ * there may be deadlock on spin_lock_profile */ -+ disconn_acl(handle, hci_conn); -+ break; -+ -+ case 1: -+ update_profile_connection(hci_conn, profile_sco, -+ FALSE); -+ break; -+ -+ case 2: -+ update_profile_connection(hci_conn, profile_hid, -+ FALSE); -+ break; -+ -+ default: -+ break; -+ } -+ delete_connection_from_hash(hci_conn); -+ } else -+ RTKBT_ERR("hci conn handle 0x%04x not found", handle); -+ } -+} -+ -+static void rtk_handle_specific_evt(u8 * p) -+{ -+ u16 subcode; -+ -+ STREAM_TO_UINT16(subcode, p); -+ if (subcode == HCI_VENDOR_PTA_AUTO_REPORT_EVENT) { -+#ifdef RTB_SOFTWARE_MAILBOX -+ RTKBT_DBG("notify wifi driver with autoreport data"); -+ rtk_notify_info_to_wifi(AUTO_REPORT, RTL_BTINFO_LEN, -+ (uint8_t *)p); -+#else -+ RTKBT_INFO("auto report data"); -+#endif -+ } -+} -+ -+static void rtk_parse_event_data(struct rtl_coex_struct *coex, -+ u8 *data, u16 len) -+{ -+ u8 *p = data; -+ u8 event_code = *p++; -+ u8 total_len = *p++; -+ -+ (void)coex; -+ (void)&len; -+ -+ switch (event_code) { -+ case HCI_EV_INQUIRY_COMPLETE: -+ rtk_handle_inquiry_complete(); -+ break; -+ -+ case HCI_EV_PIN_CODE_REQ: -+ rtk_handle_pin_code_req(); -+ break; -+ -+ case HCI_EV_IO_CAPA_REQUEST: -+ rtk_handle_io_capa_req(); -+ break; -+ -+ case HCI_EV_AUTH_COMPLETE: -+ rtk_handle_auth_request(); -+ break; -+ -+ case HCI_EV_LINK_KEY_NOTIFY: -+ rtk_handle_link_key_notify(); -+ break; -+ -+ case HCI_EV_MODE_CHANGE: -+ rtk_handle_mode_change_evt(p); -+ break; -+ -+ case HCI_EV_CMD_COMPLETE: -+ rtk_handle_cmd_complete_evt(total_len, p); -+ break; -+ -+ case HCI_EV_CMD_STATUS: -+ rtk_handle_cmd_status_evt(p); -+ break; -+ -+ case HCI_EV_CONN_COMPLETE: -+ case HCI_EV_SYNC_CONN_COMPLETE: -+ rtk_handle_connection_complete_evt(p); -+ break; -+ -+ case HCI_EV_DISCONN_COMPLETE: -+ rtk_handle_disconnect_complete_evt(p); -+ break; -+ -+ case HCI_EV_LE_META: -+ rtk_handle_le_meta_evt(p); -+ break; -+ -+ case HCI_EV_VENDOR_SPECIFIC: -+ rtk_handle_specific_evt(p); -+ break; -+ -+ default: -+ break; -+ } -+} -+ -+static const char l2_dir_str[][4] = { -+ "RX", "TX", -+}; -+ -+static void rtl_process_l2_sig(struct rtl_l2_buff *l2) -+{ -+ /* u8 flag; */ -+ u8 code; -+ /* u8 identifier; */ -+ u16 handle; -+ /* u16 total_len; */ -+ /* u16 pdu_len, channel_id; */ -+ /* u16 command_len; */ -+ u16 psm, scid, dcid, result; -+ /* u16 status; */ -+ u8 *pp = l2->data; -+ -+ STREAM_TO_UINT16(handle, pp); -+ /* flag = handle >> 12; */ -+ handle = handle & 0x0FFF; -+ /* STREAM_TO_UINT16(total_len, pp); */ -+ pp += 2; /* data total length */ -+ -+ /* STREAM_TO_UINT16(pdu_len, pp); -+ * STREAM_TO_UINT16(channel_id, pp); */ -+ pp += 4; /* l2 len and channel id */ -+ -+ code = *pp++; -+ switch (code) { -+ case L2CAP_CONN_REQ: -+ /* identifier = *pp++; */ -+ pp++; -+ /* STREAM_TO_UINT16(command_len, pp); */ -+ pp += 2; -+ STREAM_TO_UINT16(psm, pp); -+ STREAM_TO_UINT16(scid, pp); -+ RTKBT_DBG("%s l2cap conn req, hndl 0x%04x, PSM 0x%04x, " -+ "scid 0x%04x", l2_dir_str[l2->out], handle, psm, -+ scid); -+ handle_l2cap_con_req(handle, psm, scid, l2->out); -+ break; -+ -+ case L2CAP_CONN_RSP: -+ /* identifier = *pp++; */ -+ pp++; -+ /* STREAM_TO_UINT16(command_len, pp); */ -+ pp += 2; -+ STREAM_TO_UINT16(dcid, pp); -+ STREAM_TO_UINT16(scid, pp); -+ STREAM_TO_UINT16(result, pp); -+ /* STREAM_TO_UINT16(status, pp); */ -+ pp += 2; -+ RTKBT_DBG("%s l2cap conn rsp, hndl 0x%04x, dcid 0x%04x, " -+ "scid 0x%04x, result 0x%04x", l2_dir_str[l2->out], -+ handle, dcid, scid, result); -+ handle_l2cap_con_rsp(handle, dcid, scid, l2->out, result); -+ break; -+ -+ case L2CAP_DISCONN_REQ: -+ /* identifier = *pp++; */ -+ pp++; -+ /* STREAM_TO_UINT16(command_len, pp); */ -+ pp += 2; -+ STREAM_TO_UINT16(dcid, pp); -+ STREAM_TO_UINT16(scid, pp); -+ RTKBT_DBG("%s l2cap disconn req, hndl 0x%04x, dcid 0x%04x, " -+ "scid 0x%04x", l2_dir_str[l2->out], handle, dcid, scid); -+ handle_l2cap_discon_req(handle, dcid, scid, l2->out); -+ break; -+ default: -+ RTKBT_DBG("undesired l2 command %u", code); -+ break; -+ } -+} -+ -+static void rtl_l2_data_process(u8 *pp, u16 len, int dir) -+{ -+ u8 code; -+ u8 flag; -+ u16 handle, pdu_len, channel_id; -+ /* u16 total_len; */ -+ struct rtl_l2_buff *l2 = NULL; -+ u8 *hd = pp; -+ rtk_conn_prof *hci_conn = NULL; -+ -+ /* RTKBT_DBG("l2 sig data %p, len %u, dir %d", pp, len, dir); */ -+ -+ STREAM_TO_UINT16(handle, pp); -+ flag = handle >> 12; -+ handle = handle & 0x0FFF; -+ /* STREAM_TO_UINT16(total_len, pp); */ -+ pp += 2; /* data total length */ -+ -+ STREAM_TO_UINT16(pdu_len, pp); -+ STREAM_TO_UINT16(channel_id, pp); -+ -+ hci_conn = -+ find_connection_by_handle(&btrtl_coex, handle); -+ if (NULL == hci_conn) -+ return; -+ -+ -+ if (channel_id == 0x0001) { -+ code = *pp++; -+ switch (code) { -+ case L2CAP_CONN_REQ: -+ case L2CAP_CONN_RSP: -+ case L2CAP_DISCONN_REQ: -+ RTKBT_DBG("l2cap op %u, len %u, out %d", code, len, -+ dir); -+ l2 = rtl_l2_node_get(&btrtl_coex); -+ if (l2) { -+ u16 n; -+ n = min_t(uint, len, L2_MAX_SUBSEC_LEN); -+ memcpy(l2->data, hd, n); -+ l2->out = dir; -+ rtl_l2_node_to_used(&btrtl_coex, l2); -+ queue_delayed_work(btrtl_coex.fw_wq, -+ &btrtl_coex.l2_work, 0); -+ } else -+ RTKBT_ERR("%s: failed to get l2 node", -+ __func__); -+ break; -+ case L2CAP_DISCONN_RSP: -+ break; -+ default: -+ break; -+ } -+ } else { -+ //RTKBT_DBG("%s: handle:%x, flag:%x, pan:%d, a2dp:%d", __func__, handle, flag, -+ // is_profile_connected(profile_a2dp), is_profile_connected(profile_pan)); -+ if ((flag != 0x01) && (is_profile_connected(hci_conn, profile_a2dp) || -+ is_profile_connected(hci_conn, profile_pan))) -+ /* Do not count the continuous packets */ -+ packets_count(handle, channel_id, pdu_len, dir, pp); -+ } -+ return; -+} -+ -+ -+static void rtl_l2_work(struct work_struct *work) -+{ -+ struct rtl_coex_struct *coex; -+ struct rtl_l2_buff *l2; -+ unsigned long flags; -+ -+ coex = container_of(work, struct rtl_coex_struct, l2_work.work); -+ -+ spin_lock_irqsave(&coex->buff_lock, flags); -+ while (!list_empty(&coex->l2_used_list)) { -+ l2 = list_entry(coex->l2_used_list.next, struct rtl_l2_buff, -+ list); -+ list_del(&l2->list); -+ -+ spin_unlock_irqrestore(&coex->buff_lock, flags); -+ -+ rtl_process_l2_sig(l2); -+ -+ spin_lock_irqsave(&coex->buff_lock, flags); -+ -+ list_add_tail(&l2->list, &coex->l2_free_list); -+ } -+ spin_unlock_irqrestore(&coex->buff_lock, flags); -+ -+ return; -+} -+ -+static void rtl_ev_work(struct work_struct *work) -+{ -+ struct rtl_coex_struct *coex; -+ struct rtl_hci_ev *ev; -+ unsigned long flags; -+ -+ coex = container_of(work, struct rtl_coex_struct, fw_work.work); -+ -+ spin_lock_irqsave(&coex->buff_lock, flags); -+ while (!list_empty(&coex->ev_used_list)) { -+ ev = list_entry(coex->ev_used_list.next, struct rtl_hci_ev, -+ list); -+ list_del(&ev->list); -+ spin_unlock_irqrestore(&coex->buff_lock, flags); -+ -+ rtk_parse_event_data(coex, ev->data, ev->len); -+ -+ spin_lock_irqsave(&coex->buff_lock, flags); -+ list_add_tail(&ev->list, &coex->ev_free_list); -+ } -+ spin_unlock_irqrestore(&coex->buff_lock, flags); -+} -+ -+static inline int cmd_cmplt_filter_out(u8 *buf) -+{ -+ u16 opcode; -+ -+ opcode = buf[3] | (buf[4] << 8); -+ switch (opcode) { -+ case HCI_OP_PERIODIC_INQ: -+ case HCI_OP_READ_LOCAL_VERSION: -+#ifdef RTB_SOFTWARE_MAILBOX -+ case HCI_VENDOR_MAILBOX_CMD: -+#endif -+ case HCI_VENDOR_SET_PROFILE_REPORT_COMMAND: -+ return 0; -+ default: -+ return 1; -+ } -+} -+ -+static inline int cmd_status_filter_out(u8 *buf) -+{ -+ u16 opcode; -+ -+ opcode = buf[4] | (buf[5] << 8); -+ switch (opcode) { -+ case HCI_OP_INQUIRY: -+ case HCI_OP_CREATE_CONN: -+ return 0; -+ default: -+ return 1; -+ } -+} -+ -+static int ev_filter_out(u8 *buf) -+{ -+ switch (buf[0]) { -+ case HCI_EV_INQUIRY_COMPLETE: -+ case HCI_EV_PIN_CODE_REQ: -+ case HCI_EV_IO_CAPA_REQUEST: -+ case HCI_EV_AUTH_COMPLETE: -+ case HCI_EV_LINK_KEY_NOTIFY: -+ case HCI_EV_MODE_CHANGE: -+ case HCI_EV_CONN_COMPLETE: -+ case HCI_EV_SYNC_CONN_COMPLETE: -+ case HCI_EV_DISCONN_COMPLETE: -+ case HCI_EV_VENDOR_SPECIFIC: -+ return 0; -+ case HCI_EV_LE_META: -+ /* Ignore frequent but not useful events that result in -+ * costing too much space. -+ */ -+ switch (buf[2]) { -+ case HCI_EV_LE_CONN_COMPLETE: -+ case HCI_EV_LE_ENHANCED_CONN_COMPLETE: -+ case HCI_EV_LE_CONN_UPDATE_COMPLETE: -+ return 0; -+ } -+ return 1; -+ case HCI_EV_CMD_COMPLETE: -+ return cmd_cmplt_filter_out(buf); -+ case HCI_EV_CMD_STATUS: -+ return cmd_status_filter_out(buf); -+ default: -+ return 1; -+ } -+} -+ -+static void rtk_btcoex_evt_enqueue(__u8 *s, __u16 count) -+{ -+ struct rtl_hci_ev *ev; -+ -+ if (ev_filter_out(s)) -+ return; -+ -+ ev = rtl_ev_node_get(&btrtl_coex); -+ if (!ev) { -+ RTKBT_ERR("%s: no free ev node.", __func__); -+ return; -+ } -+ -+ if (count > MAX_LEN_OF_HCI_EV) { -+ memcpy(ev->data, s, MAX_LEN_OF_HCI_EV); -+ ev->len = MAX_LEN_OF_HCI_EV; -+ } else { -+ memcpy(ev->data, s, count); -+ ev->len = count; -+ } -+ -+ rtl_ev_node_to_used(&btrtl_coex, ev); -+ -+ queue_delayed_work(btrtl_coex.fw_wq, &btrtl_coex.fw_work, 0); -+} -+ -+/* Context: in_interrupt() */ -+void rtk_btcoex_parse_event(uint8_t *buffer, int count) -+{ -+ struct rtl_coex_struct *coex = &btrtl_coex; -+ __u8 *tbuff; -+ __u16 elen = 0; -+ -+ /* RTKBT_DBG("%s: parse ev.", __func__); */ -+ if (!test_bit(RTL_COEX_RUNNING, &btrtl_coex.flags)) { -+ /* RTKBT_INFO("%s: Coex is closed, ignore", __func__); */ -+ RTKBT_INFO("%s: Coex is closed, ignore %x, %x", -+ __func__, buffer[0], buffer[1]); -+ return; -+ } -+ -+ spin_lock(&coex->rxlock); -+ -+ /* coex->tbuff will be set to NULL when initializing or -+ * there is a complete frame or there is start of a frame */ -+ tbuff = coex->tbuff; -+ -+ while (count) { -+ int len; -+ -+ /* Start of a frame */ -+ if (!tbuff) { -+ tbuff = coex->back_buff; -+ coex->tbuff = NULL; -+ coex->elen = 0; -+ -+ coex->pkt_type = HCI_EVENT_PKT; -+ coex->expect = HCI_EVENT_HDR_SIZE; -+ } -+ -+ len = min_t(uint, coex->expect, count); -+ memcpy(tbuff, buffer, len); -+ tbuff += len; -+ coex->elen += len; -+ -+ count -= len; -+ buffer += len; -+ coex->expect -= len; -+ -+ if (coex->elen == HCI_EVENT_HDR_SIZE) { -+ /* Complete event header */ -+ coex->expect = -+ ((struct hci_event_hdr *)coex->back_buff)->plen; -+ if (coex->expect > HCI_MAX_EVENT_SIZE - coex->elen) { -+ tbuff = NULL; -+ coex->elen = 0; -+ RTKBT_ERR("tbuff room is not enough"); -+ break; -+ } -+ } -+ -+ if (coex->expect == 0) { -+ /* Complete frame */ -+ elen = coex->elen; -+ spin_unlock(&coex->rxlock); -+ rtk_btcoex_evt_enqueue(coex->back_buff, elen); -+ spin_lock(&coex->rxlock); -+ -+ tbuff = NULL; -+ coex->elen = 0; -+ } -+ } -+ -+ /* coex->tbuff would be non-NULL if there isn't a complete frame -+ * And it will be updated next time */ -+ coex->tbuff = tbuff; -+ spin_unlock(&coex->rxlock); -+} -+ -+ -+void rtk_btcoex_parse_l2cap_data_tx(uint8_t *buffer, int count) -+{ -+ if (!test_bit(RTL_COEX_RUNNING, &btrtl_coex.flags)) { -+ RTKBT_INFO("%s: Coex is closed, ignore", __func__); -+ return; -+ } -+ -+ rtl_l2_data_process(buffer, count, 1); -+ //u16 handle, total_len, pdu_len, channel_ID, command_len, psm, scid, -+ // dcid, result, status; -+ //u8 flag, code, identifier; -+ //u8 *pp = (u8 *) (skb->data); -+ //STREAM_TO_UINT16(handle, pp); -+ //flag = handle >> 12; -+ //handle = handle & 0x0FFF; -+ //STREAM_TO_UINT16(total_len, pp); -+ //STREAM_TO_UINT16(pdu_len, pp); -+ //STREAM_TO_UINT16(channel_ID, pp); -+ -+ //if (channel_ID == 0x0001) { -+ // code = *pp++; -+ // switch (code) { -+ // case L2CAP_CONN_REQ: -+ // identifier = *pp++; -+ // STREAM_TO_UINT16(command_len, pp); -+ // STREAM_TO_UINT16(psm, pp); -+ // STREAM_TO_UINT16(scid, pp); -+ // RTKBT_DBG("TX l2cap conn req, hndl %x, PSM %x, scid=%x", -+ // handle, psm, scid); -+ // handle_l2cap_con_req(handle, psm, scid, 1); -+ // break; -+ -+ // case L2CAP_CONN_RSP: -+ // identifier = *pp++; -+ // STREAM_TO_UINT16(command_len, pp); -+ // STREAM_TO_UINT16(dcid, pp); -+ // STREAM_TO_UINT16(scid, pp); -+ // STREAM_TO_UINT16(result, pp); -+ // STREAM_TO_UINT16(status, pp); -+ // RTKBT_DBG("TX l2cap conn rsp, hndl %x, dcid %x, " -+ // "scid %x, result %x", -+ // handle, dcid, scid, result); -+ // handle_l2cap_con_rsp(handle, dcid, scid, 1, result); -+ // break; -+ -+ // case L2CAP_DISCONN_REQ: -+ // identifier = *pp++; -+ // STREAM_TO_UINT16(command_len, pp); -+ // STREAM_TO_UINT16(dcid, pp); -+ // STREAM_TO_UINT16(scid, pp); -+ // RTKBT_DBG("TX l2cap disconn req, hndl %x, dcid %x, " -+ // "scid %x", handle, dcid, scid); -+ // handle_l2cap_discon_req(handle, dcid, scid, 1); -+ // break; -+ -+ // case L2CAP_DISCONN_RSP: -+ // break; -+ -+ // default: -+ // break; -+ // } -+ //} else { -+ // if ((flag != 0x01) && (is_profile_connected(profile_a2dp) || is_profile_connected(profile_pan))) //Do not count the continuous packets -+ // packets_count(handle, channel_ID, pdu_len, 1, pp); -+ //} -+} -+ -+void rtk_btcoex_parse_l2cap_data_rx(uint8_t *buffer, int count) -+{ -+ if (!test_bit(RTL_COEX_RUNNING, &btrtl_coex.flags)) { -+ RTKBT_INFO("%s: Coex is closed, ignore", __func__); -+ return; -+ } -+ -+ rtl_l2_data_process(buffer, count, 0); -+ //u16 handle, total_len, pdu_len, channel_ID, command_len, psm, scid, -+ // dcid, result, status; -+ //u8 flag, code, identifier; -+ //u8 *pp = urb->transfer_buffer; -+ //STREAM_TO_UINT16(handle, pp); -+ //flag = handle >> 12; -+ //handle = handle & 0x0FFF; -+ //STREAM_TO_UINT16(total_len, pp); -+ //STREAM_TO_UINT16(pdu_len, pp); -+ //STREAM_TO_UINT16(channel_ID, pp); -+ -+ //if (channel_ID == 0x0001) { -+ // code = *pp++; -+ // switch (code) { -+ // case L2CAP_CONN_REQ: -+ // identifier = *pp++; -+ // STREAM_TO_UINT16(command_len, pp); -+ // STREAM_TO_UINT16(psm, pp); -+ // STREAM_TO_UINT16(scid, pp); -+ // RTKBT_DBG("RX l2cap conn req, hndl %x, PSM %x, scid %x", -+ // handle, psm, scid); -+ // handle_l2cap_con_req(handle, psm, scid, 0); -+ // break; -+ -+ // case L2CAP_CONN_RSP: -+ // identifier = *pp++; -+ // STREAM_TO_UINT16(command_len, pp); -+ // STREAM_TO_UINT16(dcid, pp); -+ // STREAM_TO_UINT16(scid, pp); -+ // STREAM_TO_UINT16(result, pp); -+ // STREAM_TO_UINT16(status, pp); -+ // RTKBT_DBG("RX l2cap conn rsp, hndl %x, dcid %x, " -+ // "scid %x, result %x", -+ // handle, dcid, scid, result); -+ // handle_l2cap_con_rsp(handle, dcid, scid, 0, result); -+ // break; -+ -+ // case L2CAP_DISCONN_REQ: -+ // identifier = *pp++; -+ // STREAM_TO_UINT16(command_len, pp); -+ // STREAM_TO_UINT16(dcid, pp); -+ // STREAM_TO_UINT16(scid, pp); -+ // RTKBT_DBG("RX l2cap disconn req, hndl %x, dcid %x, " -+ // "scid %x", handle, dcid, scid); -+ // handle_l2cap_discon_req(handle, dcid, scid, 0); -+ // break; -+ -+ // case L2CAP_DISCONN_RSP: -+ // break; -+ -+ // default: -+ // break; -+ // } -+ //} else { -+ // if ((flag != 0x01) && (is_profile_connected(profile_a2dp) || is_profile_connected(profile_pan))) //Do not count the continuous packets -+ // packets_count(handle, channel_ID, pdu_len, 0, pp); -+ //} -+} -+ -+#ifdef RTB_SOFTWARE_MAILBOX -+ -+#if LINUX_VERSION_CODE > KERNEL_VERSION(4, 14, 0) -+static void polling_bt_info(struct timer_list *unused) -+#else -+static void polling_bt_info(unsigned long data) -+#endif -+{ -+ uint8_t temp_cmd[1]; -+ RTKBT_DBG("polling timer"); -+ if (btrtl_coex.polling_enable) { -+ //temp_cmd[0] = HCI_VENDOR_SUB_CMD_BT_REPORT_CONN_SCO_INQ_INFO; -+ temp_cmd[0] = HCI_VENDOR_SUB_CMD_BT_AUTO_REPORT_STATUS_INFO; -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_MAILBOX_CMD, 1, temp_cmd); -+ } -+ mod_timer(&btrtl_coex.polling_timer, -+ jiffies + msecs_to_jiffies(1000 * btrtl_coex.polling_interval)); -+} -+ -+static void rtk_handle_bt_info_control(uint8_t *p) -+{ -+ uint8_t temp_cmd[20]; -+ struct rtl_btinfo_ctl *ctl = (struct rtl_btinfo_ctl*)p; -+ RTKBT_DBG("Received polling_enable %u, polling_time %u, " -+ "autoreport_enable %u", ctl->polling_enable, -+ ctl->polling_time, ctl->autoreport_enable); -+ RTKBT_DBG("coex: original polling_enable %u", -+ btrtl_coex.polling_enable); -+ -+ if (ctl->polling_enable && !btrtl_coex.polling_enable) { -+ /* setup polling timer for getting bt info from firmware */ -+ btrtl_coex.polling_timer.expires = -+ jiffies + msecs_to_jiffies(ctl->polling_time * 1000); -+ mod_timer(&btrtl_coex.polling_timer, -+ btrtl_coex.polling_timer.expires); -+ } -+ -+ /* Close bt info polling timer */ -+ if (!ctl->polling_enable && btrtl_coex.polling_enable) -+ del_timer(&btrtl_coex.polling_timer); -+ -+ if (btrtl_coex.autoreport != ctl->autoreport_enable) { -+ temp_cmd[0] = HCI_VENDOR_SUB_CMD_BT_AUTO_REPORT_ENABLE; -+ temp_cmd[1] = 1; -+ temp_cmd[2] = ctl->autoreport_enable; -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_MAILBOX_CMD, 3, temp_cmd); -+ } -+ -+ btrtl_coex.polling_enable = ctl->polling_enable; -+ btrtl_coex.polling_interval = ctl->polling_time; -+ btrtl_coex.autoreport = ctl->autoreport_enable; -+ -+ rtk_notify_info_to_wifi(HOST_RESPONSE, 0, NULL); -+} -+ -+static void rtk_handle_bt_coex_control(uint8_t * p) -+{ -+ uint8_t temp_cmd[20]; -+ uint8_t opcode, opcode_len, value, power_decrease, psd_mode, -+ access_type; -+ -+ opcode = *p++; -+ RTKBT_DBG("receive bt coex control event from wifi, op 0x%02x", opcode); -+ -+ switch (opcode) { -+ case BT_PATCH_VERSION_QUERY: -+ rtk_notify_btpatch_version_to_wifi(); -+ break; -+ -+ case IGNORE_WLAN_ACTIVE_CONTROL: -+ opcode_len = *p++; -+ value = *p++; -+ temp_cmd[0] = HCI_VENDOR_SUB_CMD_BT_ENABLE_IGNORE_WLAN_ACT_CMD; -+ temp_cmd[1] = 1; -+ temp_cmd[2] = value; -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_MAILBOX_CMD, 3, temp_cmd); -+ break; -+ -+ case LNA_CONSTRAIN_CONTROL: -+ opcode_len = *p++; -+ value = *p++; -+ temp_cmd[0] = HCI_VENDOR_SUB_CMD_SET_BT_LNA_CONSTRAINT; -+ temp_cmd[1] = 1; -+ temp_cmd[2] = value; -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_MAILBOX_CMD, 3, temp_cmd); -+ break; -+ -+ case BT_POWER_DECREASE_CONTROL: -+ opcode_len = *p++; -+ power_decrease = *p++; -+ temp_cmd[0] = HCI_VENDOR_SUB_CMD_WIFI_FORCE_TX_POWER_CMD; -+ temp_cmd[1] = 1; -+ temp_cmd[2] = power_decrease; -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_MAILBOX_CMD, 3, temp_cmd); -+ break; -+ -+ case BT_PSD_MODE_CONTROL: -+ opcode_len = *p++; -+ psd_mode = *p++; -+ temp_cmd[0] = HCI_VENDOR_SUB_CMD_SET_BT_PSD_MODE; -+ temp_cmd[1] = 1; -+ temp_cmd[2] = psd_mode; -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_MAILBOX_CMD, 3, temp_cmd); -+ break; -+ -+ case WIFI_BW_CHNL_NOTIFY: -+ opcode_len = *p++; -+ temp_cmd[0] = HCI_VENDOR_SUB_CMD_WIFI_CHANNEL_AND_BANDWIDTH_CMD; -+ temp_cmd[1] = 3; -+ memcpy(temp_cmd + 2, p, 3); //wifi_state, wifi_centralchannel, chnnels_btnotuse -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_MAILBOX_CMD, 5, temp_cmd); -+ break; -+ -+ case QUERY_BT_AFH_MAP: -+ opcode_len = *p++; -+ btrtl_coex.piconet_id = *p++; -+ btrtl_coex.mode = *p++; -+ temp_cmd[0] = HCI_VENDOR_SUB_CMD_GET_AFH_MAP_L; -+ temp_cmd[1] = 2; -+ temp_cmd[2] = btrtl_coex.piconet_id; -+ temp_cmd[3] = btrtl_coex.mode; -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_MAILBOX_CMD, 4, temp_cmd); -+ break; -+ -+ case BT_REGISTER_ACCESS: -+ opcode_len = *p++; -+ access_type = *p++; -+ if (access_type == 0) { //read -+ temp_cmd[0] = HCI_VENDOR_SUB_CMD_RD_REG_REQ; -+ temp_cmd[1] = 5; -+ temp_cmd[2] = *p++; -+ memcpy(temp_cmd + 3, p, 4); -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_MAILBOX_CMD, 7, -+ temp_cmd); -+ } else { //write -+ temp_cmd[0] = HCI_VENDOR_SUB_CMD_RD_REG_REQ; -+ temp_cmd[1] = 5; -+ temp_cmd[2] = *p++; -+ memcpy(temp_cmd + 3, p, 8); -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_MAILBOX_CMD, 11, -+ temp_cmd); -+ } -+ break; -+ -+ default: -+ break; -+ } -+} -+ -+static void rtk_handle_event_from_wifi(uint8_t * msg) -+{ -+ uint8_t *p = msg; -+ uint8_t event_code = *p++; -+ uint8_t total_length; -+ uint8_t extension_event; -+ uint8_t operation; -+ uint16_t wifi_opcode; -+ uint8_t op_status; -+ -+ if (memcmp(msg, invite_rsp, sizeof(invite_rsp)) == 0) { -+ RTKBT_DBG("receive invite rsp from wifi, wifi is already on"); -+ btrtl_coex.wifi_on = 1; -+ rtk_notify_extension_version_to_wifi(); -+ } -+ -+ if (memcmp(msg, attend_req, sizeof(attend_req)) == 0) { -+ RTKBT_DBG("receive attend req from wifi, wifi turn on"); -+ btrtl_coex.wifi_on = 1; -+ rtkbt_coexmsg_send(attend_ack, sizeof(attend_ack)); -+ rtk_notify_extension_version_to_wifi(); -+ } -+ -+ if (memcmp(msg, wifi_leave, sizeof(wifi_leave)) == 0) { -+ RTKBT_DBG("receive wifi leave from wifi, wifi turn off"); -+ btrtl_coex.wifi_on = 0; -+ rtkbt_coexmsg_send(leave_ack, sizeof(leave_ack)); -+ if (btrtl_coex.polling_enable) { -+ btrtl_coex.polling_enable = 0; -+ del_timer(&btrtl_coex.polling_timer); -+ } -+ } -+ -+ if (memcmp(msg, leave_ack, sizeof(leave_ack)) == 0) { -+ RTKBT_DBG("receive leave ack from wifi"); -+ } -+ -+ if (event_code == 0xFE) { -+ total_length = *p++; -+ extension_event = *p++; -+ switch (extension_event) { -+ case RTK_HS_EXTENSION_EVENT_WIFI_SCAN: -+ operation = *p; -+ RTKBT_DBG("Recv WiFi scan notify event from WiFi, " -+ "op 0x%02x", operation); -+ break; -+ -+ case RTK_HS_EXTENSION_EVENT_HCI_BT_INFO_CONTROL: -+ rtk_handle_bt_info_control(p); -+ break; -+ -+ case RTK_HS_EXTENSION_EVENT_HCI_BT_COEX_CONTROL: -+ rtk_handle_bt_coex_control(p); -+ break; -+ -+ default: -+ break; -+ } -+ } -+ -+ if (event_code == 0x0E) { -+ p += 2; //length, number of complete packets -+ STREAM_TO_UINT16(wifi_opcode, p); -+ op_status = *p; -+ RTKBT_DBG("Recv cmd complete event from WiFi, op 0x%02x, " -+ "status 0x%02x", wifi_opcode, op_status); -+ } -+} -+#endif /* RTB_SOFTWARE_MAILBOX */ -+ -+static inline void rtl_free_frags(struct rtl_coex_struct *coex) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&coex->rxlock, flags); -+ -+ coex->elen = 0; -+ coex->tbuff = NULL; -+ -+ spin_unlock_irqrestore(&coex->rxlock, flags); -+} -+ -+static void check_profileinfo_cmd(void) -+{ -+ //1 + 6 * handle_bumfer, handle_number = 0 -+ uint8_t profileinfo_buf[] = {0x00}; -+ rtk_vendor_cmd_to_fw(HCI_VENDOR_SET_PROFILE_REPORT_COMMAND, 1, -+ profileinfo_buf); -+} -+ -+static void rtl_cmd_work(struct work_struct *work) -+{ -+ check_profileinfo_cmd(); -+} -+ -+void rtk_btcoex_open(struct hci_dev *hdev) -+{ -+ if (test_and_set_bit(RTL_COEX_RUNNING, &btrtl_coex.flags)) { -+ RTKBT_WARN("RTL COEX is already running."); -+ return; -+ } -+ -+ RTKBT_INFO("Open BTCOEX"); -+ -+ /* Just for test */ -+ //struct rtl_btinfo_ctl ctl; -+ -+ INIT_DELAYED_WORK(&btrtl_coex.fw_work, (void *)rtl_ev_work); -+ INIT_DELAYED_WORK(&btrtl_coex.cmd_work, rtl_cmd_work); -+#ifdef RTB_SOFTWARE_MAILBOX -+#ifdef RTK_COEX_OVER_SYMBOL -+ INIT_WORK(&rtw_work, rtw_work_func); -+ skb_queue_head_init(&rtw_q); -+ rtw_coex_on = 1; -+#else -+ INIT_DELAYED_WORK(&btrtl_coex.sock_work, -+ (void *)udpsocket_recv_data); -+#endif -+#endif /* RTB_SOFTWARE_MAILBOX */ -+ INIT_DELAYED_WORK(&btrtl_coex.l2_work, (void *)rtl_l2_work); -+ -+ btrtl_coex.hdev = hdev; -+#ifdef RTB_SOFTWARE_MAILBOX -+ btrtl_coex.wifi_on = 0; -+#endif -+ -+ init_profile_hash(&btrtl_coex); -+ init_connection_hash(&btrtl_coex); -+ -+ btrtl_coex.pkt_type = 0; -+ btrtl_coex.expect = 0; -+ btrtl_coex.elen = 0; -+ btrtl_coex.tbuff = NULL; -+ -+#ifdef RTB_SOFTWARE_MAILBOX -+#ifndef RTK_COEX_OVER_SYMBOL -+ create_udpsocket(); -+#endif -+ rtkbt_coexmsg_send(invite_req, sizeof(invite_req)); -+#endif -+ queue_delayed_work(btrtl_coex.fw_wq, &btrtl_coex.cmd_work, -+ msecs_to_jiffies(10)); -+ /* Just for test */ -+ //ctl.polling_enable = 1; -+ //ctl.polling_time = 1; -+ //ctl.autoreport_enable = 1; -+ //rtk_handle_bt_info_control((u8 *)&ctl); -+} -+ -+void rtk_btcoex_close(void) -+{ -+ -+ if (!test_and_clear_bit(RTL_COEX_RUNNING, &btrtl_coex.flags)) { -+ RTKBT_WARN("RTL COEX is already closed."); -+ return; -+ } -+ -+ RTKBT_INFO("Close BTCOEX"); -+ -+#ifdef RTB_SOFTWARE_MAILBOX -+ /* Close coex socket */ -+ if (btrtl_coex.wifi_on) -+ rtkbt_coexmsg_send(bt_leave, sizeof(bt_leave)); -+#ifdef RTK_COEX_OVER_SYMBOL -+ rtw_coex_on = 0; -+ skb_queue_purge(&rtw_q); -+ cancel_work_sync(&rtw_work); -+#else -+ cancel_delayed_work_sync(&btrtl_coex.sock_work); -+ if (btrtl_coex.sock_open) { -+ btrtl_coex.sock_open = 0; -+ RTKBT_DBG("release udp socket"); -+ sock_release(btrtl_coex.udpsock); -+ } -+#endif -+ -+ /* Delete all timers */ -+ if (btrtl_coex.polling_enable) { -+ btrtl_coex.polling_enable = 0; -+ del_timer_sync(&(btrtl_coex.polling_timer)); -+ } -+#endif /* RTB_SOFTWARE_MAILBOX */ -+ -+ cancel_delayed_work_sync(&btrtl_coex.fw_work); -+ cancel_delayed_work_sync(&btrtl_coex.l2_work); -+ cancel_delayed_work_sync(&btrtl_coex.cmd_work); -+ -+ flush_connection_hash(&btrtl_coex); -+ flush_profile_hash(&btrtl_coex); -+ btrtl_coex.profile_bitmap = 0; -+ btrtl_coex.profile_status = 0; -+ -+ rtl_free_frags(&btrtl_coex); -+ profileinfo_cmd = 0; -+ RTKBT_DBG("-x"); -+} -+ -+void rtk_btcoex_probe(struct hci_dev *hdev) -+{ -+ btrtl_coex.hdev = hdev; -+ spin_lock_init(&btrtl_coex.spin_lock_sock); -+ spin_lock_init(&btrtl_coex.spin_lock_profile); -+} -+ -+void rtk_btcoex_init(void) -+{ -+ RTKBT_DBG("%s: version: %s", __func__, RTK_VERSION); -+ RTKBT_DBG("create workqueue"); -+#ifdef RTB_SOFTWARE_MAILBOX -+#ifdef RTK_COEX_OVER_SYMBOL -+ RTKBT_INFO("Coex over Symbol"); -+ rtw_wq = create_workqueue("btcoexwork"); -+ skb_queue_head_init(&rtw_q); -+#else -+ RTKBT_INFO("Coex over UDP"); -+ btrtl_coex.sock_wq = create_workqueue("btudpwork"); -+#endif -+#endif /* RTB_SOFTWARE_MAILBOX */ -+ btrtl_coex.fw_wq = create_workqueue("btfwwork"); -+ btrtl_coex.timer_wq = create_workqueue("bttimerwork"); -+ rtl_alloc_buff(&btrtl_coex); -+ spin_lock_init(&btrtl_coex.rxlock); -+} -+ -+void rtk_btcoex_exit(void) -+{ -+ RTKBT_DBG("%s: destroy workqueue", __func__); -+#ifdef RTB_SOFTWARE_MAILBOX -+#ifdef RTK_COEX_OVER_SYMBOL -+ flush_workqueue(rtw_wq); -+ destroy_workqueue(rtw_wq); -+#else -+ flush_workqueue(btrtl_coex.sock_wq); -+ destroy_workqueue(btrtl_coex.sock_wq); -+#endif -+#endif -+ flush_workqueue(btrtl_coex.fw_wq); -+ destroy_workqueue(btrtl_coex.fw_wq); -+ flush_workqueue(btrtl_coex.timer_wq); -+ destroy_workqueue(btrtl_coex.timer_wq); -+ rtl_free_buff(&btrtl_coex); -+} -diff --git a/drivers/bluetooth/rtk_coex.h b/drivers/bluetooth/rtk_coex.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/bluetooth/rtk_coex.h -@@ -0,0 +1,378 @@ -+/* -+* -+* Realtek Bluetooth USB driver -+* -+* -+* This program is free software; you can redistribute it and/or modify -+* it under the terms of the GNU General Public License as published by -+* the Free Software Foundation; either version 2 of the License, or -+* (at your option) any later version. -+* -+* This program is distributed in the hope that it will be useful, -+* but WITHOUT ANY WARRANTY; without even the implied warranty of -+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+* GNU General Public License for more details. -+* -+* You should have received a copy of the GNU General Public License -+* along with this program; if not, write to the Free Software -+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+* -+*/ -+#include -+#include -+ -+/*********************************** -+** Realtek - For coexistence ** -+***********************************/ -+#define BTRTL_HCIUSB 0 -+#define BTRTL_HCIUART 1 -+ -+#define BTRTL_HCI_IF BTRTL_HCIUSB -+ -+#define TRUE 1 -+#define FALSE 0 -+ -+#define CONNECT_PORT 30001 -+#define CONNECT_PORT_WIFI 30000 -+ -+#define invite_req "INVITE_REQ" -+#define invite_rsp "INVITE_RSP" -+#define attend_req "ATTEND_REQ" -+#define attend_ack "ATTEND_ACK" -+#define wifi_leave "WIFI_LEAVE" -+#define leave_ack "LEAVE_ACK" -+#define bt_leave "BT_LEAVE" -+ -+#define HCI_OP_PERIODIC_INQ 0x0403 -+#define HCI_EV_LE_META 0x3e -+#define HCI_EV_LE_CONN_COMPLETE 0x01 -+#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03 -+#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a -+ -+//vendor cmd to fw -+#define HCI_VENDOR_ENABLE_PROFILE_REPORT_COMMAND 0xfc18 -+#define HCI_VENDOR_SET_PROFILE_REPORT_LEGACY_COMMAND 0xfc19 -+#define HCI_VENDOR_SET_PROFILE_REPORT_COMMAND 0xfc1B -+#define HCI_VENDOR_MAILBOX_CMD 0xfc8f -+#define HCI_VENDOR_SET_BITPOOL 0xfc51 -+ -+//subcmd to fw -+#define HCI_VENDOR_SUB_CMD_WIFI_CHANNEL_AND_BANDWIDTH_CMD 0x11 -+#define HCI_VENDOR_SUB_CMD_WIFI_FORCE_TX_POWER_CMD 0x17 -+#define HCI_VENDOR_SUB_CMD_BT_ENABLE_IGNORE_WLAN_ACT_CMD 0x1B -+#define HCI_VENDOR_SUB_CMD_BT_REPORT_CONN_SCO_INQ_INFO 0x23 -+#define HCI_VENDOR_SUB_CMD_BT_AUTO_REPORT_STATUS_INFO 0x27 -+#define HCI_VENDOR_SUB_CMD_BT_AUTO_REPORT_ENABLE 0x28 -+#define HCI_VENDOR_SUB_CMD_BT_SET_TXRETRY_REPORT_PARAM 0x29 -+#define HCI_VENDOR_SUB_CMD_BT_SET_PTATABLE 0x2A -+#define HCI_VENDOR_SUB_CMD_SET_BT_PSD_MODE 0x31 -+#define HCI_VENDOR_SUB_CMD_SET_BT_LNA_CONSTRAINT 0x32 -+#define HCI_VENDOR_SUB_CMD_GET_AFH_MAP_L 0x40 -+#define HCI_VENDOR_SUB_CMD_GET_AFH_MAP_M 0x41 -+#define HCI_VENDOR_SUB_CMD_GET_AFH_MAP_H 0x42 -+#define HCI_VENDOR_SUB_CMD_RD_REG_REQ 0x43 -+#define HCI_VENDOR_SUB_CMD_WR_REG_REQ 0x44 -+ -+#define HCI_EV_VENDOR_SPECIFIC 0xff -+ -+//sub event from fw start -+#define HCI_VENDOR_PTA_REPORT_EVENT 0x24 -+#define HCI_VENDOR_PTA_AUTO_REPORT_EVENT 0x25 -+ -+//vendor cmd to wifi driver -+#define HCI_GRP_VENDOR_SPECIFIC (0x3f << 10) -+#define HCI_OP_HCI_EXTENSION_VERSION_NOTIFY (0x0100 | HCI_GRP_VENDOR_SPECIFIC) -+#define HCI_OP_BT_OPERATION_NOTIFY (0x0102 | HCI_GRP_VENDOR_SPECIFIC) -+#define HCI_OP_HCI_BT_INFO_NOTIFY (0x0106 | HCI_GRP_VENDOR_SPECIFIC) -+#define HCI_OP_HCI_BT_COEX_NOTIFY (0x0107 | HCI_GRP_VENDOR_SPECIFIC) -+#define HCI_OP_HCI_BT_PATCH_VER_NOTIFY (0x0108 | HCI_GRP_VENDOR_SPECIFIC) -+#define HCI_OP_HCI_BT_AFH_MAP_NOTIFY (0x0109 | HCI_GRP_VENDOR_SPECIFIC) -+#define HCI_OP_HCI_BT_REGISTER_VALUE_NOTIFY (0x010a | HCI_GRP_VENDOR_SPECIFIC) -+ -+//bt info reason to wifi -+#define HOST_RESPONSE 0 //Host response when receive the BT Info Control Event -+#define POLLING_RESPONSE 1 //The BT Info response for polling by BT firmware. -+#define AUTO_REPORT 2 //BT auto report by BT firmware. -+#define STACK_REPORT_WHILE_DEVICE_D2 3 //Stack report when BT firmware is under power save state(ex:D2) -+ -+// vendor event from wifi -+#define RTK_HS_EXTENSION_EVENT_WIFI_SCAN 0x01 -+#define RTK_HS_EXTENSION_EVENT_RADIO_STATUS_NOTIFY 0x02 -+#define RTK_HS_EXTENSION_EVENT_HCI_BT_INFO_CONTROL 0x03 -+#define RTK_HS_EXTENSION_EVENT_HCI_BT_COEX_CONTROL 0x04 -+ -+//op code from wifi -+#define BT_PATCH_VERSION_QUERY 0x00 -+#define IGNORE_WLAN_ACTIVE_CONTROL 0x01 -+#define LNA_CONSTRAIN_CONTROL 0x02 -+#define BT_POWER_DECREASE_CONTROL 0x03 -+#define BT_PSD_MODE_CONTROL 0x04 -+#define WIFI_BW_CHNL_NOTIFY 0x05 -+#define QUERY_BT_AFH_MAP 0x06 -+#define BT_REGISTER_ACCESS 0x07 -+ -+//bt operation to notify -+#define BT_OPCODE_NONE 0 -+#define BT_OPCODE_INQUIRY_START 1 -+#define BT_OPCODE_INQUIRY_END 2 -+#define BT_OPCODE_PAGE_START 3 -+#define BT_OPCODE_PAGE_SUCCESS_END 4 -+#define BT_OPCODE_PAGE_UNSUCCESS_END 5 -+#define BT_OPCODE_PAIR_START 6 -+#define BT_OPCODE_PAIR_END 7 -+#define BT_OPCODE_ENABLE_BT 8 -+#define BT_OPCODE_DISABLE_BT 9 -+ -+#define HCI_EXTENSION_VERSION 0x0004 -+#define HCI_CMD_PREAMBLE_SIZE 3 -+#define PAN_PACKET_COUNT 5 -+ -+#define STREAM_TO_UINT16(u16, p) {u16 = ((uint16_t)(*(p)) + (((uint16_t)(*((p) + 1))) << 8)); (p) += 2;} -+#define UINT16_TO_STREAM(p, u16) {*(p)++ = (uint8_t)(u16); *(p)++ = (uint8_t)((u16) >> 8);} -+ -+#define PSM_SDP 0x0001 -+#define PSM_RFCOMM 0x0003 -+#define PSM_PAN 0x000F -+#define PSM_HID 0x0011 -+#define PSM_HID_INT 0x0013 -+#define PSM_AVCTP 0x0017 -+#define PSM_AVDTP 0x0019 -+#define PSM_FTP 0x1001 -+#define PSM_BIP 0x1003 -+#define PSM_OPP 0x1005 -+//--add more if needed--// -+ -+enum { -+ profile_sco = 0, -+ profile_hid = 1, -+ profile_a2dp = 2, -+ profile_pan = 3, -+ profile_hid_interval = 4, -+ profile_hogp = 5, -+ profile_voice = 6, -+ profile_sink = 7, -+ profile_max = 8 -+}; -+ -+#define A2DP_SIGNAL 0x01 -+#define A2DP_MEDIA 0x02 -+//profile info data -+typedef struct { -+ struct list_head list; -+ uint16_t handle; -+ uint16_t psm; -+ uint16_t dcid; -+ uint16_t scid; -+ uint8_t profile_index; -+ uint8_t flags; -+} rtk_prof_info, *prtk_prof_info; -+ -+//profile info for each connection -+typedef struct rtl_hci_conn { -+ struct list_head list; -+ uint16_t handle; -+ struct delayed_work a2dp_count_work; -+ struct delayed_work pan_count_work; -+ struct delayed_work hogp_count_work; -+ uint32_t a2dp_packet_count; -+ uint32_t pan_packet_count; -+ uint32_t hogp_packet_count; -+ uint32_t voice_packet_count; -+ uint8_t type; // 0:l2cap, 1:sco/esco, 2:le -+ uint16_t profile_bitmap; -+ uint16_t profile_status; -+ int8_t profile_refcount[8]; -+} rtk_conn_prof, *prtk_conn_prof; -+ -+#ifdef RTB_SOFTWARE_MAILBOX -+ -+struct rtl_btinfo { -+ u8 cmd; -+ u8 len; -+ u8 data[6]; -+}; -+#define RTL_BTINFO_LEN (sizeof(struct rtl_btinfo)) -+/* typedef struct { -+ * uint8_t cmd_index; -+ * uint8_t cmd_length; -+ * uint8_t link_status; -+ * uint8_t retry_cnt; -+ * uint8_t rssi; -+ * uint8_t mailbox_info; -+ * uint16_t acl_throughput; -+ * } hci_linkstatus_report; */ -+ -+typedef struct { -+ uint8_t type; -+ uint32_t offset; -+ uint32_t value; -+} hci_mailbox_register; -+ -+struct rtl_btinfo_ctl { -+ uint8_t polling_enable; -+ uint8_t polling_time; -+ uint8_t autoreport_enable; -+}; -+#endif /* RTB_SOFTWARE_MAILBOX */ -+ -+#define MAX_LEN_OF_HCI_EV 32 -+#define NUM_RTL_HCI_EV 32 -+struct rtl_hci_ev { -+ __u8 data[MAX_LEN_OF_HCI_EV]; -+ __u16 len; -+ struct list_head list; -+}; -+ -+#define L2_MAX_SUBSEC_LEN 128 -+#define L2_MAX_PKTS 16 -+struct rtl_l2_buff { -+ __u8 data[L2_MAX_SUBSEC_LEN]; -+ __u16 len; -+ __u16 out; -+ struct list_head list; -+}; -+ -+struct rtl_coex_struct { -+ struct list_head conn_hash; //hash for connections -+ struct list_head profile_list; //hash for profile info -+ struct hci_dev *hdev; -+#ifdef RTB_SOFTWARE_MAILBOX -+ struct socket *udpsock; -+ struct sockaddr_in addr; -+ struct sockaddr_in wifi_addr; -+ struct timer_list polling_timer; -+#endif -+#ifdef RTB_SOFTWARE_MAILBOX -+ struct workqueue_struct *sock_wq; -+ struct delayed_work sock_work; -+#endif -+ struct workqueue_struct *fw_wq; -+ struct workqueue_struct *timer_wq; -+ struct delayed_work fw_work; -+ struct delayed_work l2_work; -+ struct delayed_work cmd_work; -+#ifdef RTB_SOFTWARE_MAILBOX -+ struct sock *sk; -+#endif -+ struct urb *urb; -+ spinlock_t spin_lock_sock; -+ spinlock_t spin_lock_profile; -+ uint16_t profile_bitmap; -+ uint16_t profile_status; -+ int8_t profile_refcount[8]; -+ uint8_t ispairing; -+ uint8_t isinquirying; -+ uint8_t ispaging; -+#ifdef RTB_SOFTWARE_MAILBOX -+ uint8_t wifi_state; -+ uint8_t autoreport; -+ uint8_t polling_enable; -+ uint8_t polling_interval; -+ uint8_t piconet_id; -+ uint8_t mode; -+ uint8_t afh_map[10]; -+#endif -+ uint16_t hci_reversion; -+ uint16_t lmp_subversion; -+#ifdef RTB_SOFTWARE_MAILBOX -+ uint8_t wifi_on; -+ uint8_t sock_open; -+#endif -+ -+ unsigned long cmd_last_tx; -+ -+ /* hci ev buff */ -+ struct list_head ev_used_list; -+ struct list_head ev_free_list; -+ -+ spinlock_t rxlock; -+ __u8 pkt_type; -+ __u16 expect; -+ __u8 *tbuff; -+ __u16 elen; -+ __u8 back_buff[HCI_MAX_EVENT_SIZE]; -+ -+ /* l2cap rx buff */ -+ struct list_head l2_used_list; -+ struct list_head l2_free_list; -+ -+ /* buff addr and size */ -+ spinlock_t buff_lock; -+ unsigned long pages_addr; -+ unsigned long buff_size; -+ -+#define RTL_COEX_RUNNING (1 << 0) -+ unsigned long flags; -+ -+}; -+ -+#ifdef __LITTLE_ENDIAN -+struct sbc_frame_hdr { -+ uint8_t syncword:8; /* Sync word */ -+ uint8_t subbands:1; /* Subbands */ -+ uint8_t allocation_method:1; /* Allocation method */ -+ uint8_t channel_mode:2; /* Channel mode */ -+ uint8_t blocks:2; /* Blocks */ -+ uint8_t sampling_frequency:2; /* Sampling frequency */ -+ uint8_t bitpool:8; /* Bitpool */ -+ uint8_t crc_check:8; /* CRC check */ -+} __attribute__ ((packed)); -+ -+/* NOTE: The code is copied from pa. -+ * only the bit field in 8-bit is affected by endian, not the 16-bit or 32-bit. -+ * why? -+ */ -+struct rtp_header { -+ unsigned cc:4; -+ unsigned x:1; -+ unsigned p:1; -+ unsigned v:2; -+ -+ unsigned pt:7; -+ unsigned m:1; -+ -+ uint16_t sequence_number; -+ uint32_t timestamp; -+ uint32_t ssrc; -+ uint32_t csrc[0]; -+} __attribute__ ((packed)); -+ -+#else -+/* big endian */ -+struct sbc_frame_hdr { -+ uint8_t syncword:8; /* Sync word */ -+ uint8_t sampling_frequency:2; /* Sampling frequency */ -+ uint8_t blocks:2; /* Blocks */ -+ uint8_t channel_mode:2; /* Channel mode */ -+ uint8_t allocation_method:1; /* Allocation method */ -+ uint8_t subbands:1; /* Subbands */ -+ uint8_t bitpool:8; /* Bitpool */ -+ uint8_t crc_check:8; /* CRC check */ -+} __attribute__ ((packed)); -+ -+struct rtp_header { -+ unsigned v:2; -+ unsigned p:1; -+ unsigned x:1; -+ unsigned cc:4; -+ -+ unsigned m:1; -+ unsigned pt:7; -+ -+ uint16_t sequence_number; -+ uint32_t timestamp; -+ uint32_t ssrc; -+ uint32_t csrc[0]; -+} __attribute__ ((packed)); -+#endif /* __LITTLE_ENDIAN */ -+ -+void rtk_btcoex_parse_event(uint8_t *buffer, int count); -+void rtk_btcoex_parse_cmd(uint8_t *buffer, int count); -+void rtk_btcoex_parse_l2cap_data_tx(uint8_t *buffer, int count); -+void rtk_btcoex_parse_l2cap_data_rx(uint8_t *buffer, int count); -+ -+void rtk_btcoex_open(struct hci_dev *hdev); -+void rtk_btcoex_close(void); -+void rtk_btcoex_probe(struct hci_dev *hdev); -+void rtk_btcoex_init(void); -+void rtk_btcoex_exit(void); -diff --git a/drivers/bluetooth/rtk_misc.c b/drivers/bluetooth/rtk_misc.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/bluetooth/rtk_misc.c -@@ -0,0 +1,2517 @@ -+/* -+ * -+ * Realtek Bluetooth USB download firmware driver -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) -+#include -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "rtk_misc.h" -+ -+#include -+#include -+#define BDADDR_STRING_LEN 17 -+#define BDADDR_FILE "/opt/bdaddr" -+ -+struct cfg_list_item { -+ struct list_head list; -+ u16 offset; -+ u8 len; -+ u8 data[0]; -+}; -+ -+static struct list_head list_configs; -+ -+#define EXTRA_CONFIG_FILE "/opt/rtk_btconfig.txt" -+static struct list_head list_extracfgs; -+ -+#define CMD_CMP_EVT 0x0e -+#define PKT_LEN 300 -+#define MSG_TO 1000 //us -+#define PATCH_SEG_MAX 252 -+#define DATA_END 0x80 -+#define DOWNLOAD_OPCODE 0xfc20 -+/* This command is used only for TV patch -+ * if host is going to suspend state, it should send this command to -+ * Controller. Controller will scan the special advertising packet -+ * which indicates Controller to wake up host */ -+#define STARTSCAN_OPCODE 0xfc28 -+#define TRUE 1 -+#define FALSE 0 -+#define CMD_HDR_LEN sizeof(struct hci_command_hdr) -+#define EVT_HDR_LEN sizeof(struct hci_event_hdr) -+#define CMD_CMP_LEN sizeof(struct hci_ev_cmd_complete) -+ -+#define HCI_CMD_READ_BD_ADDR 0x1009 -+#define HCI_VENDOR_CHANGE_BDRATE 0xfc17 -+#define HCI_VENDOR_READ_RTK_ROM_VERISION 0xfc6d -+#define HCI_VENDOR_READ_LMP_VERISION 0x1001 -+#define HCI_VENDOR_READ_CMD 0xfc61 -+ -+#define ROM_LMP_NONE 0x0000 -+#define ROM_LMP_8723a 0x1200 -+#define ROM_LMP_8723b 0x8723 -+#define ROM_LMP_8821a 0X8821 -+#define ROM_LMP_8761a 0X8761 -+#define ROM_LMP_8822b 0X8822 -+#define ROM_LMP_8852a 0x8852 -+#define ROM_LMP_8851b 0x8851 -+ -+#define PATCH_SNIPPETS 0x01 -+#define PATCH_DUMMY_HEADER 0x02 -+#define PATCH_SECURITY_HEADER 0x03 -+#define PATCH_OTA_FLAG 0x04 -+#define SECTION_HEADER_SIZE 8 -+ -+struct rtk_eversion_evt { -+ uint8_t status; -+ uint8_t version; -+} __attribute__ ((packed)); -+ -+struct rtk_security_proj_evt { -+ uint8_t status; -+ uint8_t key_id; -+} __attribute__ ((packed)); -+ -+struct rtk_chip_type_evt { -+ uint8_t status; -+ uint16_t chip; -+} __attribute__ ((packed)); -+ -+enum rtk_read_class { -+ READ_NONE = 0, -+ READ_CHIP_TYPE = 1, -+ READ_CHIP_VER = 2, -+ READ_SEC_PROJ = 3 -+}; -+ -+struct rtk_epatch_entry { -+ uint16_t chipID; -+ uint16_t patch_length; -+ uint32_t start_offset; -+} __attribute__ ((packed)); -+ -+struct rtk_epatch { -+ uint8_t signature[8]; -+ __le32 fw_version; -+ __le16 number_of_total_patch; -+ struct rtk_epatch_entry entry[0]; -+} __attribute__ ((packed)); -+ -+struct rtk_extension_entry { -+ uint8_t opcode; -+ uint8_t length; -+ uint8_t *data; -+} __attribute__ ((packed)); -+ -+struct rtb_section_hdr { -+ uint32_t opcode; -+ uint32_t section_len; -+ uint32_t soffset; -+} __attribute__ ((packed)); -+ -+struct rtb_new_patch_hdr { -+ uint8_t signature[8]; -+ uint8_t fw_version[8]; -+ __le32 number_of_section; -+} __attribute__ ((packed)); -+ -+//signature: Realtech -+static const uint8_t RTK_EPATCH_SIGNATURE[8] = -+ { 0x52, 0x65, 0x61, 0x6C, 0x74, 0x65, 0x63, 0x68 }; -+ -+//signature: RTBTCore -+static const uint8_t RTK_EPATCH_SIGNATURE_NEW[8] = -+ { 0x52, 0x54, 0x42, 0x54, 0x43, 0x6F, 0x72, 0x65 }; -+ -+//Extension Section IGNATURE:0x77FD0451 -+static const uint8_t Extension_Section_SIGNATURE[4] = { 0x51, 0x04, 0xFD, 0x77 }; -+ -+static uint16_t project_id[] = { -+ ROM_LMP_8723a, -+ ROM_LMP_8723b, -+ ROM_LMP_8821a, -+ ROM_LMP_8761a, -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_8822b, -+ ROM_LMP_8723b, /* RTL8723DU */ -+ ROM_LMP_8821a, /* RTL8821CU */ -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_8822b, /* RTL8822CU */ -+ ROM_LMP_8761a, /* index 14 for 8761BU */ -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_8852a, /* index 18 for 8852AU */ -+ ROM_LMP_8723b, /* index 19 for 8723FU */ -+ ROM_LMP_8852a, /* index 20 for 8852BU */ -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_8852a, /* index 25 for 8852CU */ -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_8822b, /* index 33 for 8822EU */ -+ ROM_LMP_NONE, -+ ROM_LMP_NONE, -+ ROM_LMP_8851b, /* index 36 for 8851BU */ -+}; -+ -+enum rtk_endpoit { -+ CTRL_EP = 0, -+ INTR_EP = 1, -+ BULK_EP = 2, -+ ISOC_EP = 3 -+}; -+ -+/* software id */ -+#define RTLPREVIOUS 0x00 -+#define RTL8822BU 0x70 -+#define RTL8723DU 0x71 -+#define RTL8821CU 0x72 -+#define RTL8822CU 0x73 -+#define RTL8761BU 0x74 -+#define RTL8852AU 0x75 -+#define RTL8723FU 0x76 -+#define RTL8852BU 0x77 -+#define RTL8852CU 0x78 -+#define RTL8822EU 0x79 -+#define RTL8851BU 0x7A -+ -+typedef struct { -+ uint16_t prod_id; -+ uint16_t lmp_sub; -+ char * mp_patch_name; -+ char * patch_name; -+ char * config_name; -+ u8 chip_type; -+} patch_info; -+ -+typedef struct { -+ struct list_head list_node; -+ struct usb_interface *intf; -+ struct usb_device *udev; -+ patch_info *patch_entry; -+} dev_data; -+ -+typedef struct { -+ dev_data *dev_entry; -+ int pipe_in, pipe_out; -+ uint8_t *send_pkt; -+ uint8_t *rcv_pkt; -+ struct hci_command_hdr *cmd_hdr; -+ struct hci_event_hdr *evt_hdr; -+ struct hci_ev_cmd_complete *cmd_cmp; -+ uint8_t *req_para, *rsp_para; -+ uint8_t *fw_data; -+ int pkt_len, fw_len; -+} xchange_data; -+ -+typedef struct { -+ uint8_t index; -+ uint8_t data[PATCH_SEG_MAX]; -+} __attribute__ ((packed)) download_cp; -+ -+typedef struct { -+ uint8_t status; -+ uint8_t index; -+} __attribute__ ((packed)) download_rp; -+ -+#define RTK_VENDOR_CONFIG_MAGIC 0x8723ab55 -+static const u8 cfg_magic[4] = { 0x55, 0xab, 0x23, 0x87 }; -+struct rtk_bt_vendor_config_entry { -+ __le16 offset; -+ uint8_t entry_len; -+ uint8_t entry_data[0]; -+} __attribute__ ((packed)); -+ -+struct rtk_bt_vendor_config { -+ __le32 signature; -+ __le16 data_len; -+ struct rtk_bt_vendor_config_entry entry[0]; -+} __attribute__ ((packed)); -+#define BT_CONFIG_HDRLEN sizeof(struct rtk_bt_vendor_config) -+ -+static uint8_t gEVersion = 0xFF; -+static uint8_t g_key_id = 0; -+ -+static dev_data *dev_data_find(struct usb_interface *intf); -+static patch_info *get_patch_entry(struct usb_device *udev); -+static int load_firmware(dev_data * dev_entry, uint8_t ** buff); -+static void init_xdata(xchange_data * xdata, dev_data * dev_entry); -+static int check_fw_version(xchange_data * xdata); -+static int download_data(xchange_data * xdata); -+static int send_hci_cmd(xchange_data * xdata); -+static int rcv_hci_evt(xchange_data * xdata); -+static uint8_t rtk_get_eversion(dev_data * dev_entry); -+static int rtk_vendor_read(dev_data * dev_entry, uint8_t class); -+ -+static patch_info fw_patch_table[] = { -+/* { pid, lmp_sub, mp_fw_name, fw_name, config_name, chip_type } */ -+ {0x1724, 0x1200, "mp_rtl8723a_fw", "rtl8723a_fw", "rtl8723a_config", RTLPREVIOUS}, /* RTL8723A */ -+ {0x8723, 0x1200, "mp_rtl8723a_fw", "rtl8723a_fw", "rtl8723a_config", RTLPREVIOUS}, /* 8723AE */ -+ {0xA723, 0x1200, "mp_rtl8723a_fw", "rtl8723a_fw", "rtl8723a_config", RTLPREVIOUS}, /* 8723AE for LI */ -+ {0x0723, 0x1200, "mp_rtl8723a_fw", "rtl8723a_fw", "rtl8723a_config", RTLPREVIOUS}, /* 8723AE */ -+ {0x3394, 0x1200, "mp_rtl8723a_fw", "rtl8723a_fw", "rtl8723a_config", RTLPREVIOUS}, /* 8723AE for Azurewave */ -+ -+ {0x0724, 0x1200, "mp_rtl8723a_fw", "rtl8723a_fw", "rtl8723a_config", RTLPREVIOUS}, /* 8723AU */ -+ {0x8725, 0x1200, "mp_rtl8723a_fw", "rtl8723a_fw", "rtl8723a_config", RTLPREVIOUS}, /* 8723AU */ -+ {0x872A, 0x1200, "mp_rtl8723a_fw", "rtl8723a_fw", "rtl8723a_config", RTLPREVIOUS}, /* 8723AU */ -+ {0x872B, 0x1200, "mp_rtl8723a_fw", "rtl8723a_fw", "rtl8723a_config", RTLPREVIOUS}, /* 8723AU */ -+ -+ {0xb720, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BU */ -+ {0xb72A, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BU */ -+ {0xb728, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE for LC */ -+ {0xb723, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE */ -+ {0xb72B, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE */ -+ {0xb001, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE for HP */ -+ {0xb002, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE */ -+ {0xb003, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE */ -+ {0xb004, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE */ -+ {0xb005, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE */ -+ -+ {0x3410, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE for Azurewave */ -+ {0x3416, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE for Azurewave */ -+ {0x3459, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE for Azurewave */ -+ {0xE085, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE for Foxconn */ -+ {0xE08B, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE for Foxconn */ -+ {0xE09E, 0x8723, "mp_rtl8723b_fw", "rtl8723b_fw", "rtl8723b_config", RTLPREVIOUS}, /* RTL8723BE for Foxconn */ -+ -+ {0xA761, 0x8761, "mp_rtl8761a_fw", "rtl8761au_fw", "rtl8761a_config", RTLPREVIOUS}, /* RTL8761AU only */ -+ {0x818B, 0x8761, "mp_rtl8761a_fw", "rtl8761aw_fw", "rtl8761aw_config", RTLPREVIOUS}, /* RTL8761AW + 8192EU */ -+ {0x818C, 0x8761, "mp_rtl8761a_fw", "rtl8761aw_fw", "rtl8761aw_config", RTLPREVIOUS}, /* RTL8761AW + 8192EU */ -+ {0x8760, 0x8761, "mp_rtl8761a_fw", "rtl8761au8192ee_fw", "rtl8761a_config", RTLPREVIOUS}, /* RTL8761AU + 8192EE */ -+ {0xB761, 0x8761, "mp_rtl8761a_fw", "rtl8761au_fw", "rtl8761a_config", RTLPREVIOUS}, /* RTL8761AUV only */ -+ {0x8761, 0x8761, "mp_rtl8761a_fw", "rtl8761au8192ee_fw", "rtl8761a_config", RTLPREVIOUS}, /* RTL8761AU + 8192EE for LI */ -+ {0x8A60, 0x8761, "mp_rtl8761a_fw", "rtl8761au8812ae_fw", "rtl8761a_config", RTLPREVIOUS}, /* RTL8761AU + 8812AE */ -+ {0x3527, 0x8761, "mp_rtl8761a_fw", "rtl8761au8192ee_fw", "rtl8761a_config", RTLPREVIOUS}, /* RTL8761AU + 8814AE */ -+ -+ {0x8821, 0x8821, "mp_rtl8821a_fw", "rtl8821a_fw", "rtl8821a_config", RTLPREVIOUS}, /* RTL8821AE */ -+ {0x0821, 0x8821, "mp_rtl8821a_fw", "rtl8821a_fw", "rtl8821a_config", RTLPREVIOUS}, /* RTL8821AE */ -+ {0x0823, 0x8821, "mp_rtl8821a_fw", "rtl8821a_fw", "rtl8821a_config", RTLPREVIOUS}, /* RTL8821AU */ -+ {0x3414, 0x8821, "mp_rtl8821a_fw", "rtl8821a_fw", "rtl8821a_config", RTLPREVIOUS}, /* RTL8821AE */ -+ {0x3458, 0x8821, "mp_rtl8821a_fw", "rtl8821a_fw", "rtl8821a_config", RTLPREVIOUS}, /* RTL8821AE */ -+ {0x3461, 0x8821, "mp_rtl8821a_fw", "rtl8821a_fw", "rtl8821a_config", RTLPREVIOUS}, /* RTL8821AE */ -+ {0x3462, 0x8821, "mp_rtl8821a_fw", "rtl8821a_fw", "rtl8821a_config", RTLPREVIOUS}, /* RTL8821AE */ -+ -+ {0xb82c, 0x8822, "mp_rtl8822bu_fw", "rtl8822bu_fw", "rtl8822bu_config", RTL8822BU}, /* RTL8822BU */ -+ -+ {0xd720, 0x8723, "mp_rtl8723du_fw", "rtl8723du_fw", "rtl8723du_config", RTL8723DU}, /* RTL8723DU */ -+ {0xd723, 0x8723, "mp_rtl8723du_fw", "rtl8723du_fw", "rtl8723du_config", RTL8723DU}, /* RTL8723DU */ -+ {0xd739, 0x8723, "mp_rtl8723du_fw", "rtl8723du_fw", "rtl8723du_config", RTL8723DU}, /* RTL8723DU */ -+ {0xb009, 0x8723, "mp_rtl8723du_fw", "rtl8723du_fw", "rtl8723du_config", RTL8723DU}, /* RTL8723DU */ -+ {0x0231, 0x8723, "mp_rtl8723du_fw", "rtl8723du_fw", "rtl8723du_config", RTL8723DU}, /* RTL8723DU for LiteOn */ -+ -+ {0xb820, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CU */ -+ {0xc820, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CU */ -+ {0xc821, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE */ -+ {0xc823, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE */ -+ {0xc824, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE */ -+ {0xc825, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE */ -+ {0xc827, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE */ -+ {0xc025, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE */ -+ {0xc024, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE */ -+ {0xc030, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE */ -+ {0xb00a, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE */ -+ {0xb00e, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE */ -+ {0xc032, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE */ -+ {0x4000, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for LiteOn */ -+ {0x4001, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for LiteOn */ -+ {0x3529, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for Azurewave */ -+ {0x3530, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for Azurewave */ -+ {0x3532, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for Azurewave */ -+ {0x3533, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for Azurewave */ -+ {0x3538, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for Azurewave */ -+ {0x3539, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for Azurewave */ -+ {0x3558, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for Azurewave */ -+ {0x3559, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for Azurewave */ -+ {0x3581, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for Azurewave */ -+ {0x3540, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE */ -+ {0x3541, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for GSD */ -+ {0x3543, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CE for GSD */ -+ {0xc80c, 0x8821, "mp_rtl8821cu_fw", "rtl8821cu_fw", "rtl8821cu_config", RTL8821CU}, /* RTL8821CUH */ -+ -+ {0xc82c, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CU */ -+ {0xc82e, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CU */ -+ {0xc81d, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CU */ -+ {0xd820, 0x8822, "mp_rtl8821du_fw", "rtl8821du_fw", "rtl8821du_config", RTL8822CU}, /* RTL8821DU */ -+ -+ {0xc822, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0xc82b, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0xb00c, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0xb00d, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0xc123, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0xc126, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0xc127, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0xc128, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0xc129, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0xc131, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0xc136, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0x3549, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE for Azurewave */ -+ {0x3548, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE for Azurewave */ -+ {0xc125, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0x4005, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE for LiteOn */ -+ {0x3051, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE for LiteOn */ -+ {0x18ef, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0x161f, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0x3053, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0xc547, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0x3553, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0x3555, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE */ -+ {0xc82f, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE-VS */ -+ {0xc02f, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE-VS */ -+ {0xc03f, 0x8822, "mp_rtl8822cu_fw", "rtl8822cu_fw", "rtl8822cu_config", RTL8822CU}, /* RTL8822CE-VS */ -+ -+ {0x8771, 0x8761, "mp_rtl8761b_fw", "rtl8761bu_fw", "rtl8761bu_config", RTL8761BU}, /* RTL8761BU only */ -+ {0xa725, 0x8761, "mp_rtl8761b_fw", "rtl8725au_fw", "rtl8725au_config", RTL8761BU}, /* RTL8725AU */ -+ {0xa72A, 0x8761, "mp_rtl8761b_fw", "rtl8725au_fw", "rtl8725au_config", RTL8761BU}, /* RTL8725AU BT only */ -+ -+ {0x885a, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AU */ -+ {0x8852, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0xa852, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0x2852, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0x385a, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0x3852, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0x1852, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0x4852, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0x4006, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0x3561, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0x3562, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0x588a, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0x589a, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0x590a, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0xc125, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0xe852, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0xb852, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0xc852, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0xc549, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0xc127, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ {0x3565, 0x8852, "mp_rtl8852au_fw", "rtl8852au_fw", "rtl8852au_config", RTL8852AU}, /* RTL8852AE */ -+ -+ {0xb733, 0x8723, "mp_rtl8723fu_fw", "rtl8723fu_fw", "rtl8723fu_config", RTL8723FU}, /* RTL8723FU */ -+ {0xb73a, 0x8723, "mp_rtl8723fu_fw", "rtl8723fu_fw", "rtl8723fu_config", RTL8723FU}, /* RTL8723FU */ -+ {0xf72b, 0x8723, "mp_rtl8723fu_fw", "rtl8723fu_fw", "rtl8723fu_config", RTL8723FU}, /* RTL8723FU */ -+ -+ {0x8851, 0x8852, "mp_rtl8851au_fw", "rtl8851au_fw", "rtl8851au_config", RTL8852BU}, /* RTL8851AU */ -+ {0xa85b, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BU */ -+ {0xb85b, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0xb85c, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0x3571, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0x3570, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0x3572, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0x4b06, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0x885b, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0x886b, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0x887b, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0xc559, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0xb052, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0xb152, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0xb252, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0x4853, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ {0x1670, 0x8852, "mp_rtl8852bu_fw", "rtl8852bu_fw", "rtl8852bu_config", RTL8852BU}, /* RTL8852BE */ -+ -+ {0xc85a, 0x8852, "mp_rtl8852cu_fw", "rtl8852cu_fw", "rtl8852cu_config", RTL8852CU}, /* RTL8852CU */ -+ {0xc85d, 0x8852, "mp_rtl8852cu_fw", "rtl8852cu_fw", "rtl8852cu_config", RTL8852CU}, /* RTL8852CU */ -+ {0x0852, 0x8852, "mp_rtl8852cu_fw", "rtl8852cu_fw", "rtl8852cu_config", RTL8852CU}, /* RTL8852CE */ -+ {0x5852, 0x8852, "mp_rtl8852cu_fw", "rtl8852cu_fw", "rtl8852cu_config", RTL8852CU}, /* RTL8852CE */ -+ {0xc85c, 0x8852, "mp_rtl8852cu_fw", "rtl8852cu_fw", "rtl8852cu_config", RTL8852CU}, /* RTL8852CE */ -+ {0x885c, 0x8852, "mp_rtl8852cu_fw", "rtl8852cu_fw", "rtl8852cu_config", RTL8852CU}, /* RTL8852CE */ -+ {0x886c, 0x8852, "mp_rtl8852cu_fw", "rtl8852cu_fw", "rtl8852cu_config", RTL8852CU}, /* RTL8852CE */ -+ {0x887c, 0x8852, "mp_rtl8852cu_fw", "rtl8852cu_fw", "rtl8852cu_config", RTL8852CU}, /* RTL8852CE */ -+ {0x4007, 0x8852, "mp_rtl8852cu_fw", "rtl8852cu_fw", "rtl8852cu_config", RTL8852CU}, /* RTL8852CE */ -+ -+ {0xe822, 0x8822, "mp_rtl8822eu_fw", "rtl8822eu_fw", "rtl8822eu_config", RTL8822EU}, /* RTL8822EU */ -+ {0xa82a, 0x8822, "mp_rtl8822eu_fw", "rtl8822eu_fw", "rtl8822eu_config", RTL8822EU}, /* RTL8822EU */ -+ -+ {0xb851, 0x8851, "mp_rtl8851bu_fw", "rtl8851bu_fw", "rtl8851bu_config", RTL8851BU}, /* RTL8851BU */ -+ -+/* NOTE: must append patch entries above the null entry */ -+ {0, 0, NULL, NULL, NULL, 0} -+}; -+ -+static LIST_HEAD(dev_data_list); -+ -+static void util_hexdump(const u8 *buf, size_t len) -+{ -+ static const char hexdigits[] = "0123456789abcdef"; -+ char str[16 * 3]; -+ size_t i; -+ -+ if (!buf || !len) -+ return; -+ -+ for (i = 0; i < len; i++) { -+ str[((i % 16) * 3)] = hexdigits[buf[i] >> 4]; -+ str[((i % 16) * 3) + 1] = hexdigits[buf[i] & 0xf]; -+ str[((i % 16) * 3) + 2] = ' '; -+ if ((i + 1) % 16 == 0) { -+ str[16 * 3 - 1] = '\0'; -+ RTKBT_DBG("%s", str); -+ } -+ } -+ -+ if (i % 16 > 0) { -+ str[(i % 16) * 3 - 1] = '\0'; -+ RTKBT_DBG("%s", str); -+ } -+} -+ -+#if defined RTKBT_SWITCH_PATCH || defined RTKBT_TV_POWERON_WHITELIST -+int __rtk_send_hci_cmd(struct usb_device *udev, u8 *buf, u16 size) -+{ -+ int result; -+ unsigned int pipe = usb_sndctrlpipe(udev, 0); -+ -+ result = usb_control_msg(udev, pipe, 0, USB_TYPE_CLASS, 0, 0, -+ buf, size, 1000); /* 1000 msecs */ -+ -+ if (result < 0) -+ RTKBT_ERR("%s: Couldn't send hci cmd, err %d", -+ __func__, result); -+ -+ return result; -+} -+#endif -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) -+static inline struct inode *file_inode(const struct file *f) -+{ -+ return f->f_path.dentry->d_inode; -+} -+#endif -+ -+static int config_lists_init(void) -+{ -+ INIT_LIST_HEAD(&list_configs); -+ INIT_LIST_HEAD(&list_extracfgs); -+ -+ return 0; -+} -+ -+static void config_lists_free(void) -+{ -+ struct list_head *iter; -+ struct list_head *tmp; -+ struct list_head *head; -+ struct cfg_list_item *n; -+ -+ if (!list_empty(&list_extracfgs)) -+ list_splice_tail(&list_extracfgs, &list_configs); -+ head = &list_configs; -+ list_for_each_safe(iter, tmp, head) { -+ n = list_entry(iter, struct cfg_list_item, list); -+ if (n) { -+ list_del(&n->list); -+ kfree(n); -+ } -+ } -+ -+ INIT_LIST_HEAD(&list_configs); -+ INIT_LIST_HEAD(&list_extracfgs); -+} -+ -+static void line_process(char *buf, int len) -+{ -+ char *argv[32]; -+ int argc = 0; -+ unsigned long offset; -+ u8 l; -+ u8 i = 0; -+ char *ptr = buf; -+ char *head = buf; -+ struct cfg_list_item *item; -+ -+ while ((ptr = strsep(&head, ", \t")) != NULL) { -+ if (!ptr[0]) -+ continue; -+ argv[argc++] = ptr; -+ if (argc >= 32) { -+ RTKBT_WARN("%s: Config item is too long", __func__); -+ break; -+ } -+ } -+ -+ if (argc < 4) { -+ RTKBT_WARN("%s: Invalid Config item, ignore", __func__); -+ return; -+ } -+ -+ offset = simple_strtoul(argv[0], NULL, 16); -+ offset = offset | (simple_strtoul(argv[1], NULL, 16) << 8); -+ l = (u8)simple_strtoul(argv[2], NULL, 16); -+ if (l != (u8)(argc - 3)) { -+ RTKBT_ERR("invalid len %u", l); -+ return; -+ } -+ -+ item = kzalloc(sizeof(*item) + l, GFP_KERNEL); -+ if (!item) { -+ RTKBT_WARN("%s: Cannot alloc mem for item, %04lx, %u", __func__, -+ offset, l); -+ return; -+ } -+ -+ item->offset = (u16)offset; -+ item->len = l; -+ for (i = 0; i < l; i++) -+ item->data[i] = (u8)simple_strtoul(argv[3 + i], NULL, 16); -+ list_add_tail(&item->list, &list_extracfgs); -+} -+ -+static void config_process(u8 *buff, int len) -+{ -+ char *head = (void *)buff; -+ char *ptr = (void *)buff; -+ -+ while ((ptr = strsep(&head, "\n\r")) != NULL) { -+ if (!ptr[0]) -+ continue; -+ line_process(ptr, strlen(ptr) + 1); -+ } -+} -+ -+static void config_file_proc(const char *path) -+{ -+ int size; -+ int rc; -+ struct file *file; -+ u8 tbuf[256]; -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) -+ loff_t pos = 0; -+#endif -+ -+ file = filp_open(path, O_RDONLY, 0); -+ if (IS_ERR(file)) -+ return; -+ -+ if (!S_ISREG(file_inode(file)->i_mode)) -+ return; -+ size = i_size_read(file_inode(file)); -+ if (size <= 0) -+ return; -+ -+ memset(tbuf, 0, sizeof(tbuf)); -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) -+ rc = kernel_read(file, tbuf, size, &pos); -+#else -+ rc = kernel_read(file, 0, tbuf, size); -+#endif -+ fput(file); -+ if (rc != size) { -+ if (rc >= 0) -+ rc = -EIO; -+ return; -+ } -+ -+ tbuf[rc++] = '\n'; -+ tbuf[rc++] = '\0'; -+ config_process(tbuf, rc); -+} -+ -+int patch_add(struct usb_interface *intf) -+{ -+ dev_data *dev_entry; -+ struct usb_device *udev; -+ -+ RTKBT_DBG("patch_add"); -+ dev_entry = dev_data_find(intf); -+ if (NULL != dev_entry) { -+ return -1; -+ } -+ -+ udev = interface_to_usbdev(intf); -+#ifdef BTUSB_RPM -+ RTKBT_DBG("auto suspend is enabled"); -+ usb_enable_autosuspend(udev); -+ pm_runtime_set_autosuspend_delay(&(udev->dev), 2000); -+#else -+ RTKBT_DBG("auto suspend is disabled"); -+ usb_disable_autosuspend(udev); -+#endif -+ -+ dev_entry = kzalloc(sizeof(dev_data), GFP_KERNEL); -+ dev_entry->intf = intf; -+ dev_entry->udev = udev; -+ dev_entry->patch_entry = get_patch_entry(udev); -+ if (NULL == dev_entry->patch_entry) { -+ kfree(dev_entry); -+ return -1; -+ } -+ list_add(&dev_entry->list_node, &dev_data_list); -+ -+ /* Should reset the gEVersion to 0xff, otherwise the stored gEVersion -+ * would cause rtk_get_eversion() returning previous gEVersion if -+ * change to different ECO chip. -+ * This would cause downloading wrong patch, and the controller can't -+ * work. */ -+ RTKBT_DBG("%s: Reset gEVersion to 0xff", __func__); -+ gEVersion = 0xff; -+ -+ return 0; -+} -+ -+void patch_remove(struct usb_interface *intf) -+{ -+ dev_data *dev_entry; -+ struct usb_device *udev; -+ -+ udev = interface_to_usbdev(intf); -+#ifdef BTUSB_RPM -+ usb_disable_autosuspend(udev); -+#endif -+ -+ dev_entry = dev_data_find(intf); -+ if (NULL == dev_entry) { -+ return; -+ } -+ -+ RTKBT_DBG("patch_remove"); -+ list_del(&dev_entry->list_node); -+ kfree(dev_entry); -+} -+ -+static int send_reset_command(xchange_data *xdata) -+{ -+ int ret_val; -+ -+ RTKBT_DBG("HCI reset."); -+ -+ xdata->cmd_hdr->opcode = cpu_to_le16(HCI_OP_RESET); -+ xdata->cmd_hdr->plen = 0; -+ xdata->pkt_len = CMD_HDR_LEN; -+ -+ ret_val = send_hci_cmd(xdata); -+ if (ret_val < 0) { -+ RTKBT_ERR("failed to send hci cmd."); -+ return ret_val; -+ } -+ -+ ret_val = rcv_hci_evt(xdata); -+ if (ret_val < 0) { -+ RTKBT_ERR("failed to recv hci event."); -+ return ret_val; -+ } -+ -+ return 0; -+} -+ -+static inline int get_max_patch_size(u8 chip_type) -+{ -+ int max_patch_size = 0; -+ -+ switch (chip_type) { -+ case RTLPREVIOUS: -+ max_patch_size = 24 * 1024; -+ break; -+ case RTL8822BU: -+ max_patch_size = 25 * 1024; -+ break; -+ case RTL8723DU: -+ case RTL8822CU: -+ case RTL8761BU: -+ case RTL8821CU: -+ max_patch_size = 40 * 1024; -+ break; -+ case RTL8852AU: -+ max_patch_size = 0x114D0 + 529; /* 69.2KB */ -+ break; -+ case RTL8723FU: -+ max_patch_size = 0xC4Cf + 529; /* 49.2KB */ -+ break; -+ case RTL8852BU: -+ case RTL8851BU: -+ max_patch_size = 0x104D0 + 529; /* 65KB */ -+ break; -+ case RTL8852CU: -+ max_patch_size = 0x130D0 + 529; /* 76.2KB */ -+ break; -+ case RTL8822EU: -+ max_patch_size = 0x24620 + 529; /* 145KB */ -+ break; -+ default: -+ max_patch_size = 40 * 1024; -+ break; -+ } -+ -+ return max_patch_size; -+} -+ -+static int check_fw_chip_ver(dev_data * dev_entry, xchange_data * xdata) -+{ -+ int ret_val; -+ uint16_t chip = 0; -+ uint16_t chip_ver = 0; -+ -+ chip = rtk_vendor_read(dev_entry, READ_CHIP_TYPE); -+ if(chip == 0x8822) { -+ chip_ver = rtk_vendor_read(dev_entry, READ_CHIP_VER); -+ if(chip_ver == 0x000e) { -+ return 0; -+ } -+ } -+ -+ ret_val = check_fw_version(xdata); -+ if (ret_val < 0) { -+ RTKBT_ERR("Failed to get Local Version Information"); -+ return ret_val; -+ -+ } else if (ret_val > 0) { -+ RTKBT_DBG("Firmware already exists"); -+ /* Patch alread exists, just return */ -+ if (gEVersion == 0xff) { -+ RTKBT_DBG("global_version is not set, get it!"); -+ gEVersion = rtk_get_eversion(dev_entry); -+ } -+ return ret_val; -+ } -+ -+ return 0; -+} -+ -+int download_patch(struct usb_interface *intf) -+{ -+ dev_data *dev_entry; -+ patch_info *pinfo; -+ xchange_data *xdata = NULL; -+ uint8_t *fw_buf; -+ int ret_val; -+ int max_patch_size = 0; -+ -+ RTKBT_DBG("download_patch start"); -+ dev_entry = dev_data_find(intf); -+ if (NULL == dev_entry) { -+ ret_val = -1; -+ RTKBT_ERR("NULL == dev_entry"); -+ goto patch_end; -+ } -+ -+ xdata = kzalloc(sizeof(xchange_data), GFP_KERNEL); -+ if (NULL == xdata) { -+ ret_val = -1; -+ RTKBT_DBG("NULL == xdata"); -+ goto patch_end; -+ } -+ -+ init_xdata(xdata, dev_entry); -+ -+ ret_val = check_fw_chip_ver(dev_entry, xdata); -+ if (ret_val != 0 ) -+ goto patch_end; -+ -+ xdata->fw_len = load_firmware(dev_entry, &xdata->fw_data); -+ if (xdata->fw_len <= 0) { -+ RTKBT_ERR("load firmware failed!"); -+ ret_val = -1; -+ goto patch_end; -+ } -+ -+ fw_buf = xdata->fw_data; -+ -+ pinfo = dev_entry->patch_entry; -+ if (!pinfo) { -+ RTKBT_ERR("%s: No patch entry", __func__); -+ ret_val = -1; -+ goto patch_fail; -+ } -+ max_patch_size = get_max_patch_size(pinfo->chip_type); -+ if (xdata->fw_len > max_patch_size) { -+ RTKBT_ERR("FW/CONFIG total length larger than allowed %d", -+ max_patch_size); -+ ret_val = -1; -+ goto patch_fail; -+ } -+ -+ ret_val = download_data(xdata); -+ if (ret_val < 0) { -+ RTKBT_ERR("download_data failed, err %d", ret_val); -+ goto patch_fail; -+ } -+ -+ ret_val = check_fw_version(xdata); -+ if (ret_val <= 0) { -+ RTKBT_ERR("%s: Read Local Version Info failure after download", -+ __func__); -+ ret_val = -1; -+ goto patch_fail; -+ } -+ -+ ret_val = 0; -+patch_fail: -+ kfree(fw_buf); -+patch_end: -+ if (xdata != NULL) { -+ if (xdata->send_pkt) -+ kfree(xdata->send_pkt); -+ if (xdata->rcv_pkt) -+ kfree(xdata->rcv_pkt); -+ kfree(xdata); -+ } -+ RTKBT_DBG("Rtk patch end %d", ret_val); -+ return ret_val; -+} -+ -+#ifdef RTKBT_SWITCH_PATCH -+/* @return: -+ * -1: error -+ * 0: download patch successfully -+ * >0: patch already exists */ -+int download_special_patch(struct usb_interface *intf, const char *special_name) -+{ -+ dev_data *dev_entry; -+ patch_info *pinfo; -+ xchange_data *xdata = NULL; -+ uint8_t *fw_buf; -+ int result; -+ char name1[64]; -+ char *origin_name1; -+ char name2[64]; -+ char *origin_name2; -+ int max_patch_size = 0; -+ -+ RTKBT_DBG("Download LPS Patch start"); -+ dev_entry = dev_data_find(intf); -+ if (!dev_entry) { -+ RTKBT_ERR("No Patch found"); -+ return -1; -+ } -+ -+ xdata = kzalloc(sizeof(xchange_data), GFP_KERNEL); -+ if (!xdata) { -+ RTKBT_ERR("Couldn't alloc xdata"); -+ return -1; -+ } -+ -+ init_xdata(xdata, dev_entry); -+ -+ result = check_fw_version(xdata); -+ if (result < 0) { -+ RTKBT_ERR("Failed to get Local Version Information"); -+ goto patch_end; -+ -+ } else if (result > 0) { -+ RTKBT_DBG("Firmware already exists"); -+ /* Patch alread exists, just return */ -+ if (gEVersion == 0xff) { -+ RTKBT_DBG("global_version is not set, get it!"); -+ gEVersion = rtk_get_eversion(dev_entry); -+ } -+ goto patch_end; -+ } -+ memset(name1, 0, sizeof(name1)); -+ memset(name2, 0, sizeof(name2)); -+ origin_name1 = dev_entry->patch_entry->patch_name; -+ origin_name2 = dev_entry->patch_entry->config_name; -+ memcpy(name1, special_name, strlen(special_name)); -+ strncat(name1, origin_name1, sizeof(name1) - 1 - strlen(special_name)); -+ memcpy(name2, special_name, strlen(special_name)); -+ strncat(name2, origin_name2, sizeof(name2) - 1 - strlen(special_name)); -+ dev_entry->patch_entry->patch_name = name1; -+ dev_entry->patch_entry->config_name = name2; -+ RTKBT_INFO("Loading %s and %s", name1, name2); -+ xdata->fw_len = load_firmware(dev_entry, &xdata->fw_data); -+ dev_entry->patch_entry->patch_name = origin_name1; -+ dev_entry->patch_entry->config_name = origin_name2; -+ if (xdata->fw_len <= 0) { -+ result = -1; -+ RTKBT_ERR("load firmware failed!"); -+ goto patch_end; -+ } -+ -+ fw_buf = xdata->fw_data; -+ -+ pinfo = dev_entry->patch_entry; -+ if (!pinfo) { -+ RTKBT_ERR("%s: No patch entry", __func__); -+ result = -1; -+ goto patch_fail; -+ } -+ max_patch_size = get_max_patch_size(pinfo->chip_type); -+ if (xdata->fw_len > max_patch_size) { -+ result = -1; -+ RTKBT_ERR("FW/CONFIG total length larger than allowed %d", -+ max_patch_size); -+ goto patch_fail; -+ } -+ -+ result = download_data(xdata); -+ if (result < 0) { -+ RTKBT_ERR("download_data failed, err %d", result); -+ goto patch_fail; -+ } -+ -+ result = check_fw_version(xdata); -+ if (result <= 0) { -+ RTKBT_ERR("%s: Read Local Version Info failure after download", -+ __func__); -+ result = -1; -+ goto patch_fail; -+ } -+ -+ result = 0; -+ -+patch_fail: -+ kfree(fw_buf); -+patch_end: -+ if (xdata->send_pkt) -+ kfree(xdata->send_pkt); -+ if (xdata->rcv_pkt) -+ kfree(xdata->rcv_pkt); -+ kfree(xdata); -+ RTKBT_DBG("Download LPS Patch end %d", result); -+ -+ return result; -+} -+#endif -+ -+int setup_btrealtek_flag(struct usb_interface *intf, struct hci_dev *hdev) -+{ -+ dev_data *dev_entry; -+ patch_info *pinfo; -+ int ret_val = 0; -+ -+ dev_entry = dev_data_find(intf); -+ if (NULL == dev_entry) { -+ ret_val = -1; -+ RTKBT_ERR("%s: NULL == dev_entry", __func__); -+ return ret_val; -+ } -+ -+ pinfo = dev_entry->patch_entry; -+ if (!pinfo) { -+ RTKBT_ERR("%s: No patch entry", __func__); -+ ret_val = -1; -+ return ret_val; -+ } -+ -+ switch (pinfo->chip_type){ -+ case RTL8852CU: -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) -+ btrealtek_set_flag(hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP); -+#endif -+ break; -+ default: -+ break; -+ } -+ -+ return ret_val; -+} -+ -+#if defined RTKBT_SUSPEND_WAKEUP || defined RTKBT_SHUTDOWN_WAKEUP || defined RTKBT_SWITCH_PATCH -+int set_scan(struct usb_interface *intf) -+{ -+ dev_data *dev_entry; -+ xchange_data *xdata = NULL; -+ int result; -+ -+ RTKBT_DBG("%s", __func__); -+ dev_entry = dev_data_find(intf); -+ if (!dev_entry) -+ return -1; -+ -+ xdata = kzalloc(sizeof(xchange_data), GFP_KERNEL); -+ if (!xdata) { -+ RTKBT_ERR("Could not alloc xdata"); -+ return -1; -+ } -+ -+ init_xdata(xdata, dev_entry); -+ -+ if ( !xdata->send_pkt || !xdata->rcv_pkt ){ -+ result = -1; -+ goto end; -+ } -+ -+ xdata->cmd_hdr->opcode = cpu_to_le16(STARTSCAN_OPCODE); -+ xdata->cmd_hdr->plen = 1; -+ xdata->pkt_len = CMD_HDR_LEN + 1; -+ xdata->send_pkt[CMD_HDR_LEN] = 1; -+ -+ result = send_hci_cmd(xdata); -+ if (result < 0) -+ goto end; -+ -+end: -+ kfree(xdata->send_pkt); -+ kfree(xdata->rcv_pkt); -+ kfree(xdata); -+ -+ RTKBT_DBG("%s done", __func__); -+ -+ return result; -+} -+#endif -+ -+dev_data *dev_data_find(struct usb_interface * intf) -+{ -+ dev_data *dev_entry; -+ -+ list_for_each_entry(dev_entry, &dev_data_list, list_node) { -+ if (dev_entry->intf == intf) { -+ patch_info *patch = dev_entry->patch_entry; -+ if (!patch) -+ return NULL; -+ -+ RTKBT_INFO("chip type value: 0x%02x", patch->chip_type); -+ return dev_entry; -+ } -+ } -+ -+ return NULL; -+} -+ -+patch_info *get_patch_entry(struct usb_device * udev) -+{ -+ patch_info *patch_entry; -+ uint16_t pid; -+ -+ patch_entry = fw_patch_table; -+ pid = le16_to_cpu(udev->descriptor.idProduct); -+ RTKBT_DBG("pid = 0x%x", pid); -+ while (pid != patch_entry->prod_id) { -+ if (0 == patch_entry->prod_id) { -+ RTKBT_DBG -+ ("get_patch_entry =NULL, can not find device pid in patch_table"); -+ return NULL; //break; -+ } -+ patch_entry++; -+ } -+ -+ return patch_entry; -+} -+ -+static int is_mac(u8 chip_type, u16 offset) -+{ -+ int result = 0; -+ -+ switch (chip_type) { -+ case RTL8822BU: -+ case RTL8723DU: -+ case RTL8821CU: -+ if (offset == 0x0044) -+ return 1; -+ break; -+ case RTL8822CU: -+ case RTL8761BU: -+ case RTL8852AU: -+ case RTL8723FU: -+ case RTL8852BU: -+ case RTL8852CU: -+ case RTL8822EU: -+ case RTL8851BU: -+ if (offset == 0x0030) -+ return 1; -+ break; -+ case RTLPREVIOUS: -+ if (offset == 0x003c) -+ return 1; -+ break; -+ } -+ -+ return result; -+} -+ -+static uint16_t get_mac_offset(u8 chip_type) -+{ -+ switch (chip_type) { -+ case RTL8822BU: -+ case RTL8723DU: -+ case RTL8821CU: -+ return 0x0044; -+ case RTL8822CU: -+ case RTL8761BU: -+ case RTL8852AU: -+ case RTL8723FU: -+ case RTL8852BU: -+ case RTL8852CU: -+ case RTL8822EU: -+ case RTL8851BU: -+ return 0x0030; -+ case RTLPREVIOUS: -+ return 0x003c; -+ default: -+ return 0x003c; -+ } -+} -+ -+static void merge_configs(struct list_head *head, struct list_head *head2) -+{ -+ struct list_head *epos, *enext; -+ struct list_head *pos, *next; -+ struct cfg_list_item *n; -+ struct cfg_list_item *extra; -+ -+ if (!head || !head2) -+ return; -+ -+ if (list_empty(head2)) -+ return; -+ -+ if (list_empty(head)) { -+ list_splice_tail(head2, head); -+ INIT_LIST_HEAD(head2); -+ return; -+ } -+ -+ /* Add or update & replace */ -+ list_for_each_safe(epos, enext, head2) { -+ extra = list_entry(epos, struct cfg_list_item, list); -+ -+ list_for_each_safe(pos, next, head) { -+ n = list_entry(pos, struct cfg_list_item, list); -+ if (extra->offset == n->offset) { -+ if (extra->len < n->len) { -+ /* Update the cfg data */ -+ RTKBT_INFO("Update cfg: ofs %04x len %u", -+ n->offset, n->len); -+ memcpy(n->data, extra->data, -+ extra->len); -+ list_del(epos); -+ kfree(extra); -+ break; -+ } else { -+ /* Replace the item */ -+ list_del(epos); -+ list_replace_init(pos, epos); -+ /* free the old item */ -+ kfree(n); -+ } -+ } -+ -+ } -+ -+ } -+ -+ if (list_empty(head2)) -+ return; -+ list_for_each_safe(epos, enext, head2) { -+ extra = list_entry(epos, struct cfg_list_item, list); -+ RTKBT_INFO("Add new cfg: ofs %04x, len %u", extra->offset, -+ extra->len); -+ /* Add the item to list */ -+ list_del(epos); -+ list_add_tail(epos, head); -+ } -+} -+ -+static int rtk_parse_config_file(u8 *config_buf, int filelen) -+{ -+ struct rtk_bt_vendor_config *config = (void *)config_buf; -+ u16 config_len = 0, temp = 0; -+ struct rtk_bt_vendor_config_entry *entry = NULL; -+ u32 i = 0; -+ struct cfg_list_item *item; -+ -+ if (!config_buf) -+ return -EINVAL; -+ -+ config_len = le16_to_cpu(config->data_len); -+ entry = config->entry; -+ -+ if (le32_to_cpu(config->signature) != RTK_VENDOR_CONFIG_MAGIC) { -+ RTKBT_ERR("sig magic num %08x, not rtk vendor magic %08x", -+ config->signature, RTK_VENDOR_CONFIG_MAGIC); -+ return -1; -+ } -+ -+ if (config_len != filelen - BT_CONFIG_HDRLEN) { -+ RTKBT_ERR("config length %u is not right %u", config_len, -+ (u16)(filelen - BT_CONFIG_HDRLEN)); -+ return -1; -+ } -+ -+ for (i = 0; i < config_len;) { -+ /* Add config item to list */ -+ item = kzalloc(sizeof(*item) + entry->entry_len, GFP_KERNEL); -+ if (item) { -+ item->offset = le16_to_cpu(entry->offset); -+ item->len = entry->entry_len; -+ memcpy(item->data, entry->entry_data, item->len); -+ list_add_tail(&item->list, &list_configs); -+ } else { -+ RTKBT_ERR("Cannot alloc mem for entry %04x, %u", -+ entry->offset, entry->entry_len); -+ break; -+ } -+ -+ temp = entry->entry_len + -+ sizeof(struct rtk_bt_vendor_config_entry); -+ i += temp; -+ entry = -+ (struct rtk_bt_vendor_config_entry *)((uint8_t *) entry + -+ temp); -+ } -+ -+ return 0; -+} -+ -+static uint8_t rtk_get_fw_project_id(uint8_t * p_buf) -+{ -+ uint8_t opcode; -+ uint8_t len; -+ uint8_t data = 0; -+ -+ do { -+ opcode = *p_buf; -+ len = *(p_buf - 1); -+ if (opcode == 0x00) { -+ if (len == 1) { -+ data = *(p_buf - 2); -+ RTKBT_DBG -+ ("rtk_get_fw_project_id: opcode %d, len %d, data %d", -+ opcode, len, data); -+ break; -+ } else { -+ RTKBT_ERR -+ ("rtk_get_fw_project_id: invalid len %d", -+ len); -+ } -+ } -+ p_buf -= len + 2; -+ } while (*p_buf != 0xFF); -+ -+ return data; -+} -+ -+struct rtb_ota_flag { -+ uint8_t eco; -+ uint8_t enable; -+ uint16_t reserve; -+} __attribute__ ((packed)); -+ -+struct rtb_security_hdr { -+ uint8_t eco; -+ uint8_t pri; -+ uint8_t key_id; -+ uint8_t reserve; -+ uint32_t security_len; -+ uint8_t *payload; -+} __attribute__ ((packed)); -+ -+struct rtb_dummy_hdr { -+ uint8_t eco; -+ uint8_t pri; -+ uint8_t reserve; -+ uint32_t dummy_len; -+ uint8_t *payload; -+} __attribute__ ((packed)); -+ -+struct rtb_snippet_hdr { -+ uint8_t eco; -+ uint8_t pri; -+ uint16_t reserve; -+ uint32_t snippet_len; -+ uint8_t *payload; -+} __attribute__ ((packed)); -+ -+struct patch_node { -+ uint8_t eco; -+ uint8_t pri; -+ uint8_t key_id; -+ uint8_t reserve; -+ uint32_t len; -+ uint8_t *payload; -+ struct list_head list; -+} __attribute__ ((packed)); -+ -+/* Add a node to alist that is in ascending order. */ -+static void insert_queue_sort(struct list_head *head, struct patch_node *node) -+{ -+ struct list_head *pos; -+ struct list_head *next; -+ struct patch_node *tmp; -+ -+ if(!head || !node) { -+ return; -+ } -+ list_for_each_safe(pos, next, head) { -+ tmp = list_entry(pos, struct patch_node, list); -+ if(tmp->pri >= node->pri) -+ break; -+ } -+ __list_add(&node->list, pos->prev, pos); -+} -+ -+static int insert_patch(struct patch_node *patch_node_hdr, uint8_t *section_pos, -+ uint32_t opcode, uint32_t *patch_len, uint8_t *sec_flag) -+{ -+ struct patch_node *tmp; -+ int i; -+ uint32_t numbers; -+ uint32_t section_len = 0; -+ uint8_t eco = 0; -+ uint8_t *pos = section_pos + 8; -+ -+ numbers = get_unaligned_le16(pos); -+ RTKBT_DBG("number 0x%04x", numbers); -+ -+ pos += 4; -+ for (i = 0; i < numbers; i++) { -+ eco = (uint8_t)*(pos); -+ RTKBT_DBG("eco 0x%02x, Eversion:%02x", eco, gEVersion); -+ if (eco == gEVersion + 1) { -+ tmp = (struct patch_node*)kzalloc(sizeof(struct patch_node), GFP_KERNEL); -+ tmp->pri = (uint8_t)*(pos + 1); -+ if(opcode == PATCH_SECURITY_HEADER) -+ tmp->key_id = (uint8_t)*(pos + 1); -+ -+ section_len = get_unaligned_le32(pos + 4); -+ tmp->len = section_len; -+ *patch_len += section_len; -+ RTKBT_DBG("Pri:%d, Patch length 0x%04x", tmp->pri, tmp->len); -+ tmp->payload = pos + 8; -+ if(opcode != PATCH_SECURITY_HEADER) { -+ insert_queue_sort(&(patch_node_hdr->list), tmp); -+ } else { -+ if((g_key_id == tmp->key_id) && (g_key_id > 0)) { -+ insert_queue_sort(&(patch_node_hdr->list), tmp); -+ *sec_flag = 1; -+ } else { -+ pos += (8 + section_len); -+ kfree(tmp); -+ continue; -+ } -+ } -+ } else { -+ section_len = get_unaligned_le32(pos + 4); -+ RTKBT_DBG("Patch length 0x%04x", section_len); -+ } -+ pos += (8 + section_len); -+ } -+ return 0; -+} -+ -+static uint8_t *rtb_get_patch_header(int *len, -+ struct patch_node *patch_node_hdr, uint8_t * epatch_buf, -+ uint8_t key_id) -+{ -+ uint16_t i, j; -+ struct rtb_new_patch_hdr *new_patch; -+ uint8_t sec_flag = 0; -+ uint32_t number_of_ota_flag; -+ uint32_t patch_len = 0; -+ uint8_t *section_pos; -+ uint8_t *ota_flag_pos; -+ uint32_t number_of_section; -+ -+ struct rtb_section_hdr section_hdr; -+ struct rtb_ota_flag ota_flag; -+ -+ new_patch = (struct rtb_new_patch_hdr *)epatch_buf; -+ number_of_section = le32_to_cpu(new_patch->number_of_section); -+ -+ RTKBT_DBG("FW version 0x%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x", -+ *(epatch_buf + 8), *(epatch_buf + 9), *(epatch_buf + 10), -+ *(epatch_buf + 11),*(epatch_buf + 12), *(epatch_buf + 13), -+ *(epatch_buf + 14), *(epatch_buf + 15)); -+ -+ section_pos = epatch_buf + 20; -+ -+ for (i = 0; i < number_of_section; i++) { -+ section_hdr.opcode = get_unaligned_le32(section_pos); -+ section_hdr.section_len = get_unaligned_le32(section_pos + 4); -+ RTKBT_DBG("opcode 0x%04x", section_hdr.opcode); -+ -+ switch (section_hdr.opcode) { -+ case PATCH_SNIPPETS: -+ insert_patch(patch_node_hdr, section_pos, PATCH_SNIPPETS, &patch_len, NULL); -+ break; -+ case PATCH_SECURITY_HEADER: -+ if(!g_key_id) -+ break; -+ -+ sec_flag = 0; -+ insert_patch(patch_node_hdr, section_pos, PATCH_SECURITY_HEADER, &patch_len, &sec_flag); -+ if(sec_flag) -+ break; -+ -+ for (i = 0; i < number_of_section; i++) { -+ section_hdr.opcode = get_unaligned_le32(section_pos); -+ section_hdr.section_len = get_unaligned_le32(section_pos + 4); -+ if(section_hdr.opcode == PATCH_DUMMY_HEADER) { -+ insert_patch(patch_node_hdr, section_pos, PATCH_DUMMY_HEADER, &patch_len, NULL); -+ } -+ section_pos += (SECTION_HEADER_SIZE + section_hdr.section_len); -+ } -+ break; -+ case PATCH_DUMMY_HEADER: -+ if(g_key_id) { -+ break; -+ } -+ insert_patch(patch_node_hdr, section_pos, PATCH_DUMMY_HEADER, &patch_len, NULL); -+ break; -+ case PATCH_OTA_FLAG: -+ ota_flag_pos = section_pos + 4; -+ number_of_ota_flag = get_unaligned_le32(ota_flag_pos); -+ ota_flag.eco = (uint8_t)*(ota_flag_pos + 1); -+ if (ota_flag.eco == gEVersion + 1) { -+ for (j = 0; j < number_of_ota_flag; j++) { -+ if (ota_flag.eco == gEVersion + 1) { -+ ota_flag.enable = get_unaligned_le32(ota_flag_pos + 4); -+ } -+ } -+ } -+ break; -+ default: -+ RTKBT_ERR("Unknown Opcode. Ignore"); -+ } -+ section_pos += (SECTION_HEADER_SIZE + section_hdr.section_len); -+ } -+ *len = patch_len; -+ -+ return NULL; -+} -+ -+static int rtk_get_patch_entry(uint8_t * epatch_buf, -+ struct rtk_epatch_entry *entry) -+{ -+ uint32_t svn_ver; -+ uint32_t coex_ver; -+ uint32_t tmp; -+ uint16_t i; -+ uint16_t number_of_total_patch; -+ struct rtk_epatch *epatch_info = (struct rtk_epatch *)epatch_buf; -+ -+ number_of_total_patch = -+ le16_to_cpu(epatch_info->number_of_total_patch); -+ RTKBT_DBG("fw_version = 0x%x", le32_to_cpu(epatch_info->fw_version)); -+ RTKBT_DBG("number_of_total_patch = %d", number_of_total_patch); -+ -+ /* get right epatch entry */ -+ for (i = 0; i < number_of_total_patch; i++) { -+ if (get_unaligned_le16(epatch_buf + 14 + 2 * i) == -+ gEVersion + 1) { -+ entry->chipID = gEVersion + 1; -+ entry->patch_length = get_unaligned_le16(epatch_buf + -+ 14 + -+ 2 * number_of_total_patch + -+ 2 * i); -+ entry->start_offset = get_unaligned_le32(epatch_buf + -+ 14 + -+ 4 * number_of_total_patch + -+ 4 * i); -+ break; -+ } -+ } -+ -+ if (i >= number_of_total_patch) { -+ entry->patch_length = 0; -+ entry->start_offset = 0; -+ RTKBT_ERR("No corresponding patch found\n"); -+ return 0; -+ } -+ -+ svn_ver = get_unaligned_le32(epatch_buf + -+ entry->start_offset + -+ entry->patch_length - 8); -+ coex_ver = get_unaligned_le32(epatch_buf + -+ entry->start_offset + -+ entry->patch_length - 12); -+ -+ RTKBT_DBG("chipID %d", entry->chipID); -+ RTKBT_DBG("patch_length 0x%04x", entry->patch_length); -+ RTKBT_DBG("start_offset 0x%08x", entry->start_offset); -+ -+ RTKBT_DBG("Svn version: %8d", svn_ver); -+ tmp = ((coex_ver >> 16) & 0x7ff) + (coex_ver >> 27) * 10000; -+ RTKBT_DBG("Coexistence: BTCOEX_20%06d-%04x", -+ tmp, (coex_ver & 0xffff)); -+ -+ return 0; -+} -+ -+static int bachk(const char *str) -+{ -+ if (!str) -+ return -1; -+ -+ if (strlen(str) != 17) -+ return -1; -+ -+ while (*str) { -+ if (!isxdigit(*str++)) -+ return -1; -+ -+ if (!isxdigit(*str++)) -+ return -1; -+ -+ if (*str == 0) -+ break; -+ -+ if (*str++ != ':') -+ return -1; -+ } -+ -+ return 0; -+} -+ -+static int request_bdaddr(u8 *buf) -+{ -+ int size; -+ int rc; -+ struct file *file; -+ u8 tbuf[BDADDR_STRING_LEN + 1]; -+ char *str; -+ int i; -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) -+ loff_t pos = 0; -+#endif -+ -+ if (!buf) -+ return -EINVAL; -+ -+ file = filp_open(BDADDR_FILE, O_RDONLY, 0); -+ if (IS_ERR(file)) -+ return -ENOENT; -+ -+ if (!S_ISREG(file_inode(file)->i_mode)) -+ return -EINVAL; -+ size = i_size_read(file_inode(file)); -+ if (size <= 0) -+ return -EINVAL; -+ -+ if (size > BDADDR_STRING_LEN) -+ size = BDADDR_STRING_LEN; -+ -+ memset(tbuf, 0, sizeof(tbuf)); -+ RTKBT_INFO("size = %d", size); -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) -+ rc = kernel_read(file, tbuf, size, &pos); -+#else -+ rc = kernel_read(file, 0, tbuf, size); -+#endif -+ fput(file); -+ if (rc != size) { -+ if (rc >= 0) -+ rc = -EIO; -+ goto fail; -+ } -+ -+ if (bachk(tbuf) < 0) { -+ rc = -EINVAL; -+ goto fail; -+ } -+ -+ str = tbuf; -+ for (i = 5; i >= 0; i--) { -+ buf[i] = simple_strtol(str, NULL, 16); -+ str += 3; -+ } -+ -+ return size; -+fail: -+ return rc; -+} -+ -+static u8 *load_config(dev_data *dev_entry, int *length) -+{ -+ patch_info *patch_entry; -+ const char *config_name; -+ const struct firmware *fw; -+ struct usb_device *udev; -+ int result; -+ u8 *buf; -+ u8 *p; -+ u16 config_len; -+ u16 dlen; -+ u8 tmp_buf[32]; -+ int file_sz; -+ struct cfg_list_item *n; -+ struct list_head *pos, *next; -+ u8 chip_type; -+ -+ config_lists_init(); -+ patch_entry = dev_entry->patch_entry; -+ config_name = patch_entry->config_name; -+ udev = dev_entry->udev; -+ chip_type = patch_entry->chip_type; -+ -+ RTKBT_INFO("config filename %s", config_name); -+ result = request_firmware(&fw, config_name, &udev->dev); -+ if (result < 0) -+ return NULL; -+ -+ file_sz = fw->size; -+ buf = (u8 *)fw->data; -+ -+ /* Load extra configs */ -+ config_file_proc(EXTRA_CONFIG_FILE); -+ list_for_each_safe(pos, next, &list_extracfgs) { -+ n = list_entry(pos, struct cfg_list_item, list); -+ RTKBT_INFO("extra cfg: ofs %04x, len %u", n->offset, n->len); -+ } -+ -+ /* Load extra bdaddr config */ -+ memset(tmp_buf, 0, sizeof(tmp_buf)); -+ result = request_bdaddr(tmp_buf); -+ if (result > 0) { -+ n = kzalloc(sizeof(*n) + 6, GFP_KERNEL); -+ if (n) { -+ n->offset = get_mac_offset(patch_entry->chip_type); -+ n->len = 6; -+ memcpy(n->data, tmp_buf, 6); -+ list_add_tail(&n->list, &list_extracfgs); -+ } else { -+ RTKBT_WARN("Couldn't alloc mem for bdaddr"); -+ } -+ } else { -+ if (result == -ENOENT) -+ RTKBT_WARN("no bdaddr file %s", BDADDR_FILE); -+ else -+ RTKBT_WARN("invalid customer bdaddr %d", result); -+ } -+ -+ RTKBT_INFO("Origin cfg len %u", (u16)file_sz); -+ util_hexdump((const u8 *)buf, file_sz); -+ -+ result = rtk_parse_config_file(buf, file_sz); -+ if (result < 0) { -+ RTKBT_ERR("Parse config file error"); -+ buf = NULL; -+ goto done; -+ } -+ -+ merge_configs(&list_configs, &list_extracfgs); -+ -+ /* Calculate the config_len */ -+ config_len = 4; /* magic word length */ -+ config_len += 2; /* data length field */ -+ dlen = 0; -+ list_for_each_safe(pos, next, &list_configs) { -+ n = list_entry(pos, struct cfg_list_item, list); -+ switch (n->offset) { -+ case 0x003c: -+ case 0x0030: -+ case 0x0044: -+ if (is_mac(chip_type, n->offset) && n->len == 6) { -+ char s[18]; -+ sprintf(s, "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X", -+ n->data[5], n->data[4], -+ n->data[3], n->data[2], -+ n->data[1], n->data[0]); -+ RTKBT_INFO("bdaddr ofs %04x, %s", n->offset, s); -+ } -+ break; -+ default: -+ break; -+ } -+ -+ config_len += (3 + n->len); -+ dlen += (3 + n->len); -+ } -+ -+ -+ buf = kzalloc(config_len, GFP_KERNEL); -+ if (!buf) { -+ RTKBT_ERR("Couldn't alloc buf for configs"); -+ goto done; -+ } -+ -+ /* Save configs to a buffer */ -+ memcpy(buf, cfg_magic, 4); -+ buf[4] = dlen & 0xff; -+ buf[5] = (dlen >> 8) & 0xff; -+ p = buf + 6; -+ list_for_each_safe(pos, next, &list_configs) { -+ n = list_entry(pos, struct cfg_list_item, list); -+ p[0] = n->offset & 0xff; -+ p[1] = (n->offset >> 8) & 0xff; -+ p[2] = n->len; -+ memcpy(p + 3, n->data, n->len); -+ p += (3 + n->len); -+ } -+ -+ RTKBT_INFO("New cfg len %u", config_len); -+ util_hexdump((const u8 *)buf, config_len); -+ -+ *length = config_len; -+ -+done: -+ config_lists_free(); -+ release_firmware(fw); -+ -+ return buf; -+} -+ -+static int rtk_vendor_read(dev_data * dev_entry, uint8_t class) -+{ -+ struct rtk_chip_type_evt *chip_type; -+ struct rtk_security_proj_evt *sec_proj; -+ patch_info *patch_entry; -+ int ret_val = 0; -+ xchange_data *xdata = NULL; -+ unsigned char cmd_ct_buf[] = {0x10, 0x38, 0x04, 0x28, 0x80}; -+ unsigned char cmd_cv_buf[] = {0x10, 0x3A, 0x04, 0x28, 0x80}; -+ unsigned char cmd_sec_buf[] = {0x10, 0xA4, 0x0D, 0x00, 0xb0}; -+ -+ xdata = kzalloc(sizeof(xchange_data), GFP_KERNEL); -+ if (NULL == xdata) { -+ ret_val = 0xFE; -+ RTKBT_DBG("NULL == xdata"); -+ return ret_val; -+ } -+ -+ init_xdata(xdata, dev_entry); -+ -+ xdata->cmd_hdr->opcode = cpu_to_le16(HCI_VENDOR_READ_CMD); -+ xdata->cmd_hdr->plen = 5; -+ memcpy(xdata->send_pkt, &(xdata->cmd_hdr->opcode), 2); -+ memcpy(xdata->send_pkt+2, &(xdata->cmd_hdr->plen), 1); -+ -+ switch (class) { -+ case READ_CHIP_TYPE: -+ memcpy(xdata->send_pkt+3, cmd_ct_buf, sizeof(cmd_ct_buf)); -+ break; -+ case READ_CHIP_VER: -+ memcpy(xdata->send_pkt+3, cmd_cv_buf, sizeof(cmd_cv_buf)); -+ break; -+ case READ_SEC_PROJ: -+ memcpy(xdata->send_pkt+3, cmd_sec_buf, sizeof(cmd_sec_buf)); -+ break; -+ default: -+ break; -+ } -+ -+ xdata->pkt_len = CMD_HDR_LEN + 5; -+ -+ ret_val = send_hci_cmd(xdata); -+ if (ret_val < 0) { -+ RTKBT_ERR("Failed to send read RTK chip_type cmd."); -+ ret_val = 0xFE; -+ goto read_end; -+ } -+ -+ ret_val = rcv_hci_evt(xdata); -+ if (ret_val < 0) { -+ RTKBT_ERR("Failed to receive HCI event for chip type."); -+ ret_val = 0xFE; -+ goto read_end; -+ } -+ -+ patch_entry = xdata->dev_entry->patch_entry; -+ if(class == READ_SEC_PROJ){ -+ sec_proj = (struct rtk_security_proj_evt *)(xdata->rsp_para); -+ RTKBT_DBG("sec_proj->status = 0x%x, sec_proj->key_id = 0x%x", -+ sec_proj->status, sec_proj->key_id); -+ if (sec_proj->status) { -+ ret_val = 0; -+ } else { -+ ret_val = sec_proj->key_id; -+ g_key_id = sec_proj->key_id; -+ } -+ } else { -+ chip_type = (struct rtk_chip_type_evt *)(xdata->rsp_para); -+ RTKBT_DBG("chip_type->status = 0x%x, chip_type->chip = 0x%x", -+ chip_type->status, chip_type->chip); -+ if (chip_type->status) { -+ ret_val = 0; -+ } else { -+ ret_val = chip_type->chip; -+ } -+ } -+ -+read_end: -+ if (xdata != NULL) { -+ if (xdata->send_pkt) -+ kfree(xdata->send_pkt); -+ if (xdata->rcv_pkt) -+ kfree(xdata->rcv_pkt); -+ kfree(xdata); -+ } -+ return ret_val; -+} -+ -+int load_firmware(dev_data * dev_entry, uint8_t ** buff) -+{ -+ const struct firmware *fw; -+ struct usb_device *udev; -+ patch_info *patch_entry; -+ char *fw_name; -+ int fw_len = 0, ret_val = 0, config_len = 0, buf_len = -1; -+ uint8_t *buf = NULL, *config_file_buf = NULL, *epatch_buf = NULL; -+ uint8_t proj_id = 0; -+ uint8_t need_download_fw = 1; -+ uint16_t lmp_version; -+ struct rtk_epatch_entry current_entry = { 0 }; -+ -+ struct list_head *pos, *next; -+ struct patch_node *tmp; -+ struct patch_node patch_node_hdr; -+ -+ RTKBT_DBG("load_firmware start"); -+ udev = dev_entry->udev; -+ patch_entry = dev_entry->patch_entry; -+ lmp_version = patch_entry->lmp_sub; -+ RTKBT_DBG("lmp_version = 0x%04x", lmp_version); -+ -+ config_file_buf = load_config(dev_entry, &config_len); -+ -+ fw_name = patch_entry->patch_name; -+ RTKBT_DBG("fw name is %s", fw_name); -+ ret_val = request_firmware(&fw, fw_name, &udev->dev); -+ if (ret_val < 0) { -+ RTKBT_ERR("request_firmware error"); -+ fw_len = 0; -+ kfree(config_file_buf); -+ config_file_buf = NULL; -+ goto fw_fail; -+ } -+ -+ INIT_LIST_HEAD(&patch_node_hdr.list); -+ -+ epatch_buf = kzalloc(fw->size, GFP_KERNEL); -+ if (NULL == epatch_buf) -+ goto alloc_fail; -+ -+ memcpy(epatch_buf, fw->data, fw->size); -+ buf_len = fw->size + config_len; -+ -+ if (lmp_version == ROM_LMP_8723a) { -+ RTKBT_DBG("This is 8723a, use old patch style!"); -+ -+ if (memcmp(epatch_buf, RTK_EPATCH_SIGNATURE, 8) == 0) { -+ RTKBT_ERR("8723a Check signature error!"); -+ need_download_fw = 0; -+ } else { -+ if (!(buf = kzalloc(buf_len, GFP_KERNEL))) { -+ RTKBT_ERR("Can't alloc memory for fw&config"); -+ buf_len = -1; -+ } else { -+ RTKBT_DBG("8723a, fw copy direct"); -+ memcpy(buf, epatch_buf, fw->size); -+ if (config_len) { -+ memcpy(&buf[buf_len - config_len], -+ config_file_buf, config_len); -+ } -+ } -+ } -+ } else { -+ RTKBT_ERR("This is not 8723a, use new patch style!"); -+ -+ /* Get version from ROM */ -+ gEVersion = rtk_get_eversion(dev_entry); -+ RTKBT_DBG("%s: New gEVersion %d", __func__, gEVersion); -+ if (gEVersion == 0xFE) { -+ RTKBT_ERR("%s: Read ROM version failure", __func__); -+ need_download_fw = 0; -+ fw_len = 0; -+ goto alloc_fail; -+ } -+ -+ /* check Signature and Extension Section Field */ -+ if (((memcmp(epatch_buf, RTK_EPATCH_SIGNATURE, 8) != 0) && (memcmp(epatch_buf, RTK_EPATCH_SIGNATURE_NEW, 8) != 0))|| -+ memcmp(epatch_buf + buf_len - config_len - 4, -+ Extension_Section_SIGNATURE, 4) != 0) { -+ RTKBT_ERR("Check SIGNATURE error! do not download fw"); -+ need_download_fw = 0; -+ } else { -+ proj_id = -+ rtk_get_fw_project_id(epatch_buf + buf_len - -+ config_len - 5); -+ -+ if (lmp_version != project_id[proj_id]) { -+ RTKBT_ERR -+ ("lmp_version is %x, project_id is %x, does not match!!!", -+ lmp_version, project_id[proj_id]); -+ need_download_fw = 0; -+ } else { -+ RTKBT_DBG -+ ("lmp_version is %x, project_id is %x, match!", -+ lmp_version, project_id[proj_id]); -+ -+ if(memcmp(epatch_buf, RTK_EPATCH_SIGNATURE_NEW, 8) == 0) { -+ int key_id = rtk_vendor_read(dev_entry, READ_SEC_PROJ); -+ RTKBT_DBG("%s: key id %d", __func__, key_id); -+ if (key_id < 0) { -+ RTKBT_ERR("%s: Read key id failure", __func__); -+ need_download_fw = 0; -+ fw_len = 0; -+ goto alloc_fail; -+ } -+ rtb_get_patch_header(&buf_len, &patch_node_hdr, epatch_buf, key_id); -+ if(buf_len == 0) -+ goto alloc_fail; -+ RTKBT_DBG("buf_len = 0x%x", buf_len); -+ buf_len += config_len; -+ } else { -+ rtk_get_patch_entry(epatch_buf, ¤t_entry); -+ -+ if (current_entry.patch_length == 0) -+ goto alloc_fail; -+ -+ buf_len = current_entry.patch_length + config_len; -+ RTKBT_DBG("buf_len = 0x%x", buf_len); -+ } -+ -+ if (!(buf = kzalloc(buf_len, GFP_KERNEL))) { -+ RTKBT_ERR -+ ("Can't alloc memory for multi fw&config"); -+ buf_len = -1; -+ } else { -+ if(memcmp(epatch_buf, RTK_EPATCH_SIGNATURE_NEW, 8) == 0) { -+ int tmp_len = 0; -+ list_for_each_safe(pos, next, &patch_node_hdr.list) -+ { -+ tmp = list_entry(pos, struct patch_node, list); -+ RTKBT_DBG("len = 0x%x", tmp->len); -+ memcpy(buf + tmp_len, tmp->payload, tmp->len); -+ tmp_len += tmp->len; -+ list_del_init(pos); -+ kfree(tmp); -+ } -+ if (config_len) { -+ memcpy(&buf -+ [buf_len - config_len], -+ config_file_buf, -+ config_len); -+ } -+ } else { -+ memcpy(buf, -+ epatch_buf + -+ current_entry.start_offset, -+ current_entry.patch_length); -+ memcpy(buf + current_entry.patch_length - 4, epatch_buf + 8, 4); /*fw version */ -+ if (config_len) { -+ memcpy(&buf -+ [buf_len - config_len], -+ config_file_buf, -+ config_len); -+ } -+ } -+ } -+ } -+ } -+ } -+ -+ RTKBT_DBG("fw:%s exists, config file:%s exists", -+ (buf_len > 0) ? "" : "not", (config_len > 0) ? "" : "not"); -+ if (buf && (buf_len > 0) && (need_download_fw)) { -+ fw_len = buf_len; -+ *buff = buf; -+ } -+ -+ RTKBT_DBG("load_firmware done"); -+ -+alloc_fail: -+ release_firmware(fw); -+ -+ if (epatch_buf) -+ kfree(epatch_buf); -+ -+ if (config_file_buf) -+ kfree(config_file_buf); -+fw_fail: -+ if (fw_len == 0) -+ kfree(buf); -+ -+ return fw_len; -+} -+ -+void init_xdata(xchange_data * xdata, dev_data * dev_entry) -+{ -+ memset(xdata, 0, sizeof(xchange_data)); -+ xdata->dev_entry = dev_entry; -+ xdata->pipe_in = usb_rcvintpipe(dev_entry->udev, INTR_EP); -+ xdata->pipe_out = usb_sndctrlpipe(dev_entry->udev, CTRL_EP); -+ xdata->send_pkt = kzalloc(PKT_LEN, GFP_KERNEL); -+ xdata->rcv_pkt = kzalloc(PKT_LEN, GFP_KERNEL); -+ xdata->cmd_hdr = (struct hci_command_hdr *)(xdata->send_pkt); -+ xdata->evt_hdr = (struct hci_event_hdr *)(xdata->rcv_pkt); -+ xdata->cmd_cmp = -+ (struct hci_ev_cmd_complete *)(xdata->rcv_pkt + EVT_HDR_LEN); -+ xdata->req_para = xdata->send_pkt + CMD_HDR_LEN; -+ xdata->rsp_para = xdata->rcv_pkt + EVT_HDR_LEN + CMD_CMP_LEN; -+} -+ -+int check_fw_version(xchange_data * xdata) -+{ -+ struct hci_rp_read_local_version *read_ver_rsp; -+ patch_info *patch_entry; -+ int ret_val; -+ int retry = 0; -+ uint16_t lmp_subver, hci_rev, manufacturer; -+ -+ /* Ensure that the first cmd is hci reset after system suspend -+ * or system reboot */ -+ send_reset_command(xdata); -+ -+get_ver: -+ xdata->cmd_hdr->opcode = cpu_to_le16(HCI_OP_READ_LOCAL_VERSION); -+ xdata->cmd_hdr->plen = 0; -+ xdata->pkt_len = CMD_HDR_LEN; -+ -+ ret_val = send_hci_cmd(xdata); -+ if (ret_val < 0) { -+ RTKBT_ERR("%s: Failed to send HCI command.", __func__); -+ goto version_end; -+ } -+ -+ ret_val = rcv_hci_evt(xdata); -+ if (ret_val < 0) { -+ RTKBT_ERR("%s: Failed to receive HCI event.", __func__); -+ goto version_end; -+ } -+ -+ patch_entry = xdata->dev_entry->patch_entry; -+ read_ver_rsp = (struct hci_rp_read_local_version *)(xdata->rsp_para); -+ lmp_subver = le16_to_cpu(read_ver_rsp->lmp_subver); -+ hci_rev = le16_to_cpu(read_ver_rsp->hci_rev); -+ manufacturer = le16_to_cpu(read_ver_rsp->manufacturer); -+ -+ RTKBT_DBG("read_ver_rsp->lmp_subver = 0x%x", lmp_subver); -+ RTKBT_DBG("read_ver_rsp->hci_rev = 0x%x", hci_rev); -+ RTKBT_DBG("patch_entry->lmp_sub = 0x%x", patch_entry->lmp_sub); -+ if (patch_entry->lmp_sub != lmp_subver) { -+ return 1; -+ } -+ -+ ret_val = 0; -+version_end: -+ if (ret_val) { -+ send_reset_command(xdata); -+ retry++; -+ if (retry < 2) -+ goto get_ver; -+ } -+ -+ return ret_val; -+} -+ -+uint8_t rtk_get_eversion(dev_data * dev_entry) -+{ -+ struct rtk_eversion_evt *eversion; -+ patch_info *patch_entry; -+ int ret_val = 0; -+ xchange_data *xdata = NULL; -+ -+ RTKBT_DBG("%s: gEVersion %d", __func__, gEVersion); -+ if (gEVersion != 0xFF && gEVersion != 0xFE) { -+ RTKBT_DBG("gEVersion != 0xFF, return it directly!"); -+ return gEVersion; -+ } -+ -+ xdata = kzalloc(sizeof(xchange_data), GFP_KERNEL); -+ if (NULL == xdata) { -+ ret_val = 0xFE; -+ RTKBT_DBG("NULL == xdata"); -+ return ret_val; -+ } -+ -+ init_xdata(xdata, dev_entry); -+ -+ xdata->cmd_hdr->opcode = cpu_to_le16(HCI_VENDOR_READ_RTK_ROM_VERISION); -+ xdata->cmd_hdr->plen = 0; -+ xdata->pkt_len = CMD_HDR_LEN; -+ -+ ret_val = send_hci_cmd(xdata); -+ if (ret_val < 0) { -+ RTKBT_ERR("Failed to send read RTK rom version cmd."); -+ ret_val = 0xFE; -+ goto version_end; -+ } -+ -+ ret_val = rcv_hci_evt(xdata); -+ if (ret_val < 0) { -+ RTKBT_ERR("Failed to receive HCI event for rom version."); -+ ret_val = 0xFE; -+ goto version_end; -+ } -+ -+ patch_entry = xdata->dev_entry->patch_entry; -+ eversion = (struct rtk_eversion_evt *)(xdata->rsp_para); -+ RTKBT_DBG("eversion->status = 0x%x, eversion->version = 0x%x", -+ eversion->status, eversion->version); -+ if (eversion->status) { -+ ret_val = 0; -+ //global_eversion = 0; -+ } else { -+ ret_val = eversion->version; -+ //global_eversion = eversion->version; -+ } -+ -+version_end: -+ if (xdata != NULL) { -+ if (xdata->send_pkt) -+ kfree(xdata->send_pkt); -+ if (xdata->rcv_pkt) -+ kfree(xdata->rcv_pkt); -+ kfree(xdata); -+ } -+ return ret_val; -+} -+ -+int download_data(xchange_data * xdata) -+{ -+ download_cp *cmd_para; -+ download_rp *evt_para; -+ uint8_t *pcur; -+ int pkt_len, frag_num, frag_len; -+ int i, ret_val; -+ int j = 0; -+ -+ RTKBT_DBG("download_data start"); -+ -+ cmd_para = (download_cp *) xdata->req_para; -+ evt_para = (download_rp *) xdata->rsp_para; -+ pcur = xdata->fw_data; -+ pkt_len = CMD_HDR_LEN + sizeof(download_cp); -+ frag_num = xdata->fw_len / PATCH_SEG_MAX + 1; -+ frag_len = PATCH_SEG_MAX; -+ -+ for (i = 0; i < frag_num; i++) { -+ cmd_para->index = j++; -+ -+ if(cmd_para->index == 0x7f) -+ j = 1; -+ -+ if (i == (frag_num - 1)) { -+ cmd_para->index |= DATA_END; -+ frag_len = xdata->fw_len % PATCH_SEG_MAX; -+ pkt_len -= (PATCH_SEG_MAX - frag_len); -+ } -+ xdata->cmd_hdr->opcode = cpu_to_le16(DOWNLOAD_OPCODE); -+ xdata->cmd_hdr->plen = sizeof(uint8_t) + frag_len; -+ xdata->pkt_len = pkt_len; -+ memcpy(cmd_para->data, pcur, frag_len); -+ -+ ret_val = send_hci_cmd(xdata); -+ if (ret_val < 0) { -+ return ret_val; -+ } -+ -+ ret_val = rcv_hci_evt(xdata); -+ if (ret_val < 0) { -+ return ret_val; -+ } -+ -+ if (0 != evt_para->status) { -+ return -1; -+ } -+ -+ pcur += PATCH_SEG_MAX; -+ } -+ -+ RTKBT_DBG("download_data done"); -+ return xdata->fw_len; -+} -+ -+int send_hci_cmd(xchange_data * xdata) -+{ -+ int ret_val; -+ -+ ret_val = usb_control_msg(xdata->dev_entry->udev, xdata->pipe_out, -+ 0, USB_TYPE_CLASS, 0, 0, -+ (void *)(xdata->send_pkt), -+ xdata->pkt_len, MSG_TO); -+ -+ if (ret_val < 0) -+ RTKBT_ERR("%s; failed to send ctl msg for hci cmd, err %d", -+ __func__, ret_val); -+ -+ return ret_val; -+} -+ -+int rcv_hci_evt(xchange_data * xdata) -+{ -+ int ret_len = 0, ret_val = 0; -+ int i; // Added by Realtek -+ -+ while (1) { -+ // **************************** Modifed by Realtek (begin) -+ for (i = 0; i < 5; i++) // Try to send USB interrupt message 5 times. -+ { -+ ret_val = -+ usb_interrupt_msg(xdata->dev_entry->udev, -+ xdata->pipe_in, -+ (void *)(xdata->rcv_pkt), PKT_LEN, -+ &ret_len, MSG_TO); -+ if (ret_val >= 0) -+ break; -+ } -+ // **************************** Modifed by Realtek (end) -+ -+ if (ret_val < 0) { -+ RTKBT_ERR("%s; no usb intr msg for hci event, err %d", -+ __func__, ret_val); -+ return ret_val; -+ } -+ -+ if (CMD_CMP_EVT == xdata->evt_hdr->evt) { -+ if (xdata->cmd_hdr->opcode == xdata->cmd_cmp->opcode) -+ return ret_len; -+ } -+ } -+} -+ -+void print_acl(struct sk_buff *skb, int dataOut) -+{ -+#if PRINT_ACL_DATA -+ uint wlength = skb->len; -+ uint icount = 0; -+ u16 *handle = (u16 *) (skb->data); -+ u16 dataLen = *(handle + 1); -+ u8 *acl_data = (u8 *) (skb->data); -+//if (0==dataOut) -+ printk("%d handle:%04x,len:%d,", dataOut, *handle, dataLen); -+//else -+// printk("In handle:%04x,len:%d,",*handle,dataLen); -+/* for(icount=4;(icountlen; -+ uint icount = 0; -+ u16 *opcode = (u16 *) (skb->data); -+ u8 *cmd_data = (u8 *) (skb->data); -+ u8 paramLen = *(cmd_data + 2); -+ -+ switch (*opcode) { -+ case HCI_OP_INQUIRY: -+ printk("HCI_OP_INQUIRY"); -+ break; -+ case HCI_OP_INQUIRY_CANCEL: -+ printk("HCI_OP_INQUIRY_CANCEL"); -+ break; -+ case HCI_OP_EXIT_PERIODIC_INQ: -+ printk("HCI_OP_EXIT_PERIODIC_INQ"); -+ break; -+ case HCI_OP_CREATE_CONN: -+ printk("HCI_OP_CREATE_CONN"); -+ break; -+ case HCI_OP_DISCONNECT: -+ printk("HCI_OP_DISCONNECT"); -+ break; -+ case HCI_OP_CREATE_CONN_CANCEL: -+ printk("HCI_OP_CREATE_CONN_CANCEL"); -+ break; -+ case HCI_OP_ACCEPT_CONN_REQ: -+ printk("HCI_OP_ACCEPT_CONN_REQ"); -+ break; -+ case HCI_OP_REJECT_CONN_REQ: -+ printk("HCI_OP_REJECT_CONN_REQ"); -+ break; -+ case HCI_OP_AUTH_REQUESTED: -+ printk("HCI_OP_AUTH_REQUESTED"); -+ break; -+ case HCI_OP_SET_CONN_ENCRYPT: -+ printk("HCI_OP_SET_CONN_ENCRYPT"); -+ break; -+ case HCI_OP_REMOTE_NAME_REQ: -+ printk("HCI_OP_REMOTE_NAME_REQ"); -+ break; -+ case HCI_OP_READ_REMOTE_FEATURES: -+ printk("HCI_OP_READ_REMOTE_FEATURES"); -+ break; -+ case HCI_OP_SNIFF_MODE: -+ printk("HCI_OP_SNIFF_MODE"); -+ break; -+ case HCI_OP_EXIT_SNIFF_MODE: -+ printk("HCI_OP_EXIT_SNIFF_MODE"); -+ break; -+ case HCI_OP_SWITCH_ROLE: -+ printk("HCI_OP_SWITCH_ROLE"); -+ break; -+ case HCI_OP_SNIFF_SUBRATE: -+ printk("HCI_OP_SNIFF_SUBRATE"); -+ break; -+ case HCI_OP_RESET: -+ printk("HCI_OP_RESET"); -+ break; -+ default: -+ printk("CMD"); -+ break; -+ } -+ printk(":%04x,len:%d,", *opcode, paramLen); -+ for (icount = 3; (icount < wlength) && (icount < 24); icount++) { -+ printk("%02x ", *(cmd_data + icount)); -+ } -+ printk("\n"); -+ -+#endif -+} -+ -+void print_event(struct sk_buff *skb) -+{ -+#if PRINT_CMD_EVENT -+ uint wlength = skb->len; -+ uint icount = 0; -+ u8 *opcode = (u8 *) (skb->data); -+ u8 paramLen = *(opcode + 1); -+ -+ switch (*opcode) { -+ case HCI_EV_INQUIRY_COMPLETE: -+ printk("HCI_EV_INQUIRY_COMPLETE"); -+ break; -+ case HCI_EV_INQUIRY_RESULT: -+ printk("HCI_EV_INQUIRY_RESULT"); -+ break; -+ case HCI_EV_CONN_COMPLETE: -+ printk("HCI_EV_CONN_COMPLETE"); -+ break; -+ case HCI_EV_CONN_REQUEST: -+ printk("HCI_EV_CONN_REQUEST"); -+ break; -+ case HCI_EV_DISCONN_COMPLETE: -+ printk("HCI_EV_DISCONN_COMPLETE"); -+ break; -+ case HCI_EV_AUTH_COMPLETE: -+ printk("HCI_EV_AUTH_COMPLETE"); -+ break; -+ case HCI_EV_REMOTE_NAME: -+ printk("HCI_EV_REMOTE_NAME"); -+ break; -+ case HCI_EV_ENCRYPT_CHANGE: -+ printk("HCI_EV_ENCRYPT_CHANGE"); -+ break; -+ case HCI_EV_CHANGE_LINK_KEY_COMPLETE: -+ printk("HCI_EV_CHANGE_LINK_KEY_COMPLETE"); -+ break; -+ case HCI_EV_REMOTE_FEATURES: -+ printk("HCI_EV_REMOTE_FEATURES"); -+ break; -+ case HCI_EV_REMOTE_VERSION: -+ printk("HCI_EV_REMOTE_VERSION"); -+ break; -+ case HCI_EV_QOS_SETUP_COMPLETE: -+ printk("HCI_EV_QOS_SETUP_COMPLETE"); -+ break; -+ case HCI_EV_CMD_COMPLETE: -+ printk("HCI_EV_CMD_COMPLETE"); -+ break; -+ case HCI_EV_CMD_STATUS: -+ printk("HCI_EV_CMD_STATUS"); -+ break; -+ case HCI_EV_ROLE_CHANGE: -+ printk("HCI_EV_ROLE_CHANGE"); -+ break; -+ case HCI_EV_NUM_COMP_PKTS: -+ printk("HCI_EV_NUM_COMP_PKTS"); -+ break; -+ case HCI_EV_MODE_CHANGE: -+ printk("HCI_EV_MODE_CHANGE"); -+ break; -+ case HCI_EV_PIN_CODE_REQ: -+ printk("HCI_EV_PIN_CODE_REQ"); -+ break; -+ case HCI_EV_LINK_KEY_REQ: -+ printk("HCI_EV_LINK_KEY_REQ"); -+ break; -+ case HCI_EV_LINK_KEY_NOTIFY: -+ printk("HCI_EV_LINK_KEY_NOTIFY"); -+ break; -+ case HCI_EV_CLOCK_OFFSET: -+ printk("HCI_EV_CLOCK_OFFSET"); -+ break; -+ case HCI_EV_PKT_TYPE_CHANGE: -+ printk("HCI_EV_PKT_TYPE_CHANGE"); -+ break; -+ case HCI_EV_PSCAN_REP_MODE: -+ printk("HCI_EV_PSCAN_REP_MODE"); -+ break; -+ case HCI_EV_INQUIRY_RESULT_WITH_RSSI: -+ printk("HCI_EV_INQUIRY_RESULT_WITH_RSSI"); -+ break; -+ case HCI_EV_REMOTE_EXT_FEATURES: -+ printk("HCI_EV_REMOTE_EXT_FEATURES"); -+ break; -+ case HCI_EV_SYNC_CONN_COMPLETE: -+ printk("HCI_EV_SYNC_CONN_COMPLETE"); -+ break; -+ case HCI_EV_SYNC_CONN_CHANGED: -+ printk("HCI_EV_SYNC_CONN_CHANGED"); -+ break; -+ case HCI_EV_SNIFF_SUBRATE: -+ printk("HCI_EV_SNIFF_SUBRATE"); -+ break; -+ case HCI_EV_EXTENDED_INQUIRY_RESULT: -+ printk("HCI_EV_EXTENDED_INQUIRY_RESULT"); -+ break; -+ case HCI_EV_IO_CAPA_REQUEST: -+ printk("HCI_EV_IO_CAPA_REQUEST"); -+ break; -+ case HCI_EV_SIMPLE_PAIR_COMPLETE: -+ printk("HCI_EV_SIMPLE_PAIR_COMPLETE"); -+ break; -+ case HCI_EV_REMOTE_HOST_FEATURES: -+ printk("HCI_EV_REMOTE_HOST_FEATURES"); -+ break; -+ default: -+ printk("event"); -+ break; -+ } -+ printk(":%02x,len:%d,", *opcode, paramLen); -+ for (icount = 2; (icount < wlength) && (icount < 24); icount++) { -+ printk("%02x ", *(opcode + icount)); -+ } -+ printk("\n"); -+ -+#endif -+} -diff --git a/drivers/bluetooth/rtk_misc.h b/drivers/bluetooth/rtk_misc.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/bluetooth/rtk_misc.h -@@ -0,0 +1,134 @@ -+/* -+ * -+ * Realtek Bluetooth USB download firmware driver -+ * -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* Download LPS patch when host suspends or power off -+ * LPS patch name: lps_rtl8xxx_fw -+ * LPS config name: lps_rtl8xxx_config -+ * Download normal patch when host resume or power on */ -+/* #define RTKBT_SWITCH_PATCH */ -+ -+/* RTKBT Power-on for sideband wake-up by LE Advertising from Remote. */ -+/* Note that it's necessary to apply TV FW Patch. */ -+/* #define RTKBT_SUSPEND_WAKEUP */ -+/* #define RTKBT_SHUTDOWN_WAKEUP */ -+#define RTKBT_POWERKEY_WAKEUP -+ -+/* RTKBT Power-on Whitelist for sideband wake-up by LE Advertising from Remote. -+ * Note that it's necessary to apply TV FW Patch. */ -+/* #define RTKBT_TV_POWERON_WHITELIST */ -+ -+#if 1 -+#define RTKBT_DBG(fmt, arg...) printk(KERN_DEBUG "rtk_btusb: " fmt "\n" , ## arg) -+#define RTKBT_INFO(fmt, arg...) printk(KERN_INFO "rtk_btusb: " fmt "\n" , ## arg) -+#define RTKBT_WARN(fmt, arg...) printk(KERN_WARNING "rtk_btusb: " fmt "\n", ## arg) -+#else -+#define RTKBT_DBG(fmt, arg...) -+#endif -+ -+#if 1 -+#define RTKBT_ERR(fmt, arg...) printk(KERN_ERR "rtk_btusb: " fmt "\n" , ## arg) -+#else -+#define RTKBT_ERR(fmt, arg...) -+#endif -+ -+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 33) -+#define USB_RPM -+#endif -+ -+#define CONFIG_NEEDS_BINDING -+ -+/* If module is still powered when kernel suspended, there is no re-binding. */ -+#ifdef RTKBT_SWITCH_PATCH -+#undef CONFIG_NEEDS_BINDING -+#endif -+ -+/* USB SS */ -+#if (defined CONFIG_BTUSB_AUTOSUSPEND) && (defined USB_RPM) -+#define BTUSB_RPM -+#endif -+ -+#define PRINT_CMD_EVENT 0 -+#define PRINT_ACL_DATA 0 -+ -+extern int patch_add(struct usb_interface *intf); -+extern void patch_remove(struct usb_interface *intf); -+extern int download_patch(struct usb_interface *intf); -+extern void print_event(struct sk_buff *skb); -+extern void print_command(struct sk_buff *skb); -+extern void print_acl(struct sk_buff *skb, int dataOut); -+ -+#if defined RTKBT_SWITCH_PATCH || defined RTKBT_TV_POWERON_WHITELIST -+int __rtk_send_hci_cmd(struct usb_device *udev, u8 *buf, u16 size); -+#endif -+ -+#ifdef RTKBT_SWITCH_PATCH -+#define RTLBT_CLOSE (1 << 0) -+struct api_context { -+ u32 flags; -+ struct completion done; -+ int status; -+}; -+ -+int download_special_patch(struct usb_interface *intf, const char *special_name); -+#endif -+ -+int setup_btrealtek_flag(struct usb_interface *intf, struct hci_dev *hdev); -+ -+enum { -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) -+ REALTEK_ALT6_CONTINUOUS_TX_CHIP, -+#endif -+ -+ __REALTEK_NUM_FLAGS, -+}; -+ -+struct btrealtek_data { -+ DECLARE_BITMAP(flags, __REALTEK_NUM_FLAGS); -+}; -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0) -+static inline void *hci_get_priv(struct hci_dev *hdev) -+{ -+ return (char *)hdev + sizeof(*hdev); -+} -+#endif -+ -+#define btrealtek_set_flag(hdev, nr) \ -+ do { \ -+ struct btrealtek_data *realtek = hci_get_priv((hdev)); \ -+ set_bit((nr), realtek->flags); \ -+ } while (0) -+ -+#define btrealtek_get_flag(hdev) \ -+ (((struct btrealtek_data *)hci_get_priv(hdev))->flags) -+ -+#define btrealtek_test_flag(hdev, nr) test_bit((nr), btrealtek_get_flag(hdev)) -+ -+#if defined RTKBT_SUSPEND_WAKEUP || defined RTKBT_SHUTDOWN_WAKEUP || defined RTKBT_SWITCH_PATCH -+int set_scan(struct usb_interface *intf); -+#endif --- -Armbian - diff --git a/patch/kernel/archive/spacemit-6.1/007-drivers-clk-spacemit.patch b/patch/kernel/archive/spacemit-6.1/007-drivers-clk-spacemit.patch deleted file mode 100644 index c5643d5da7b7..000000000000 --- a/patch/kernel/archive/spacemit-6.1/007-drivers-clk-spacemit.patch +++ /dev/null @@ -1,3245 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - drivers/clk/Kconfig | 2 +- - drivers/clk/Makefile | 1 + - drivers/clk/spacemit/Kconfig | 15 + - drivers/clk/spacemit/Makefile | 8 + - drivers/clk/spacemit/ccu-spacemit-k1x.c | 1537 ++++++++++ - drivers/clk/spacemit/ccu-spacemit-k1x.h | 83 + - drivers/clk/spacemit/ccu_ddn.c | 170 + - drivers/clk/spacemit/ccu_ddn.h | 97 + - drivers/clk/spacemit/ccu_mix.c | 489 +++ - drivers/clk/spacemit/ccu_mix.h | 374 +++ - drivers/clk/spacemit/ccu_pll.c | 280 ++ - drivers/clk/spacemit/ccu_pll.h | 84 + - 12 files changed, 3139 insertions(+), 1 deletion(-) - -diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/clk/Kconfig -+++ b/drivers/clk/Kconfig -@@ -471,7 +471,7 @@ source "drivers/clk/visconti/Kconfig" - source "drivers/clk/x86/Kconfig" - source "drivers/clk/xilinx/Kconfig" - source "drivers/clk/zynqmp/Kconfig" -- -+source "drivers/clk/spacemit/Kconfig" - # Kunit test cases - config CLK_KUNIT_TEST - tristate "Basic Clock Framework Kunit Tests" if !KUNIT_ALL_TESTS -diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/clk/Makefile -+++ b/drivers/clk/Makefile -@@ -132,3 +132,4 @@ endif - obj-y += xilinx/ - obj-$(CONFIG_ARCH_ZYNQ) += zynq/ - obj-$(CONFIG_COMMON_CLK_ZYNQMP) += zynqmp/ -+obj-y += spacemit/ -diff --git a/drivers/clk/spacemit/Kconfig b/drivers/clk/spacemit/Kconfig -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/clk/spacemit/Kconfig -@@ -0,0 +1,15 @@ -+# SPDX-License-Identifier: GPL-2.0 -+# common clock support for SPACEMIT SoC family. -+ -+config SPACEMIT_K1PRO_CCU -+ tristate "Clock support for Spacemit k1pro SoCs" -+ depends on SOC_SPACEMIT_K1PRO -+ help -+ Build the driver for K1pro Clock Driver. -+ -+config SPACEMIT_K1X_CCU -+ tristate "Clock support for Spacemit k1x SoCs" -+ depends on SOC_SPACEMIT_K1X -+ help -+ Build the driver for Spacemit K1x Clock Driver. -+ -diff --git a/drivers/clk/spacemit/Makefile b/drivers/clk/spacemit/Makefile -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/clk/spacemit/Makefile -@@ -0,0 +1,8 @@ -+# SPDX-License-Identifier: GPL-2.0 -+# -+# Spacemit Clock specific Makefile -+# -+ -+#SoC support -+obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu-spacemit-k1x.o ccu_mix.o ccu_pll.o ccu_ddn.o -+obj-$(CONFIG_SPACEMIT_K1PRO_CCU) += ccu-spacemit-k1pro.o -diff --git a/drivers/clk/spacemit/ccu-spacemit-k1x.c b/drivers/clk/spacemit/ccu-spacemit-k1x.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/clk/spacemit/ccu-spacemit-k1x.c -@@ -0,0 +1,1537 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Spacemit k1x clock controller driver -+ * -+ * Copyright (c) 2023, spacemit Corporation. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "ccu-spacemit-k1x.h" -+#include "ccu_mix.h" -+#include "ccu_pll.h" -+#include "ccu_ddn.h" -+ -+#define LOG_INFO(fmt, arg...) pr_info("[K1X-CLK][%s][%d]:" fmt "\n", __func__, __LINE__, ##arg) -+ -+DEFINE_SPINLOCK(g_cru_lock); -+ -+/* APBS register offset */ -+//pll1 -+#define APB_SPARE1_REG 0x100 -+#define APB_SPARE2_REG 0x104 -+#define APB_SPARE3_REG 0x108 -+//pll2 -+#define APB_SPARE7_REG 0x118 -+#define APB_SPARE8_REG 0x11c -+#define APB_SPARE9_REG 0x120 -+//pll3 -+#define APB_SPARE10_REG 0x124 -+#define APB_SPARE11_REG 0x128 -+#define APB_SPARE12_REG 0x12c -+/* end of APBS register offset */ -+ -+/* APBC register offset */ -+#define APBC_UART1_CLK_RST 0x0 -+#define APBC_UART2_CLK_RST 0x4 -+#define APBC_GPIO_CLK_RST 0x8 -+#define APBC_PWM0_CLK_RST 0xc -+#define APBC_PWM1_CLK_RST 0x10 -+#define APBC_PWM2_CLK_RST 0x14 -+#define APBC_PWM3_CLK_RST 0x18 -+#define APBC_TWSI8_CLK_RST 0x20 -+#define APBC_UART3_CLK_RST 0x24 -+#define APBC_RTC_CLK_RST 0x28 //reserved -+#define APBC_TWSI0_CLK_RST 0x2c -+#define APBC_TWSI1_CLK_RST 0x30 -+#define APBC_TIMERS1_CLK_RST 0x34 -+#define APBC_TWSI2_CLK_RST 0x38 -+#define APBC_AIB_CLK_RST 0x3c -+#define APBC_TWSI4_CLK_RST 0x40 -+#define APBC_TIMERS2_CLK_RST 0x44 -+#define APBC_ONEWIRE_CLK_RST 0x48 -+#define APBC_TWSI5_CLK_RST 0x4c -+#define APBC_DRO_CLK_RST 0x58 -+#define APBC_IR_CLK_RST 0x5c -+#define APBC_TWSI6_CLK_RST 0x60 -+#define APBC_COUNTER_CLK_SEL 0x64 -+ -+#define APBC_TWSI7_CLK_RST 0x68 -+#define APBC_TSEN_CLK_RST 0x6c -+ -+#define APBC_UART4_CLK_RST 0x70 -+#define APBC_UART5_CLK_RST 0x74 -+#define APBC_UART6_CLK_RST 0x78 -+#define APBC_SSP3_CLK_RST 0x7c -+ -+#define APBC_SSPA0_CLK_RST 0x80 -+#define APBC_SSPA1_CLK_RST 0x84 -+ -+#define APBC_IPC_AP2AUD_CLK_RST 0x90 -+#define APBC_UART7_CLK_RST 0x94 -+#define APBC_UART8_CLK_RST 0x98 -+#define APBC_UART9_CLK_RST 0x9c -+ -+#define APBC_CAN0_CLK_RST 0xa0 -+#define APBC_PWM4_CLK_RST 0xa8 -+#define APBC_PWM5_CLK_RST 0xac -+#define APBC_PWM6_CLK_RST 0xb0 -+#define APBC_PWM7_CLK_RST 0xb4 -+#define APBC_PWM8_CLK_RST 0xb8 -+#define APBC_PWM9_CLK_RST 0xbc -+#define APBC_PWM10_CLK_RST 0xc0 -+#define APBC_PWM11_CLK_RST 0xc4 -+#define APBC_PWM12_CLK_RST 0xc8 -+#define APBC_PWM13_CLK_RST 0xcc -+#define APBC_PWM14_CLK_RST 0xd0 -+#define APBC_PWM15_CLK_RST 0xd4 -+#define APBC_PWM16_CLK_RST 0xd8 -+#define APBC_PWM17_CLK_RST 0xdc -+#define APBC_PWM18_CLK_RST 0xe0 -+#define APBC_PWM19_CLK_RST 0xe4 -+/* end of APBC register offset */ -+ -+/* MPMU register offset */ -+#define MPMU_POSR 0x10 //no define -+#define POSR_PLL1_LOCK BIT(27) -+#define POSR_PLL2_LOCK BIT(28) -+#define POSR_PLL3_LOCK BIT(29) -+ -+#define MPMU_VRCR 0x18 //no define -+#define MPMU_VRCR_REQ_EN0 BIT(0) -+#define MPMU_VRCR_REQ_EN2 BIT(2) -+#define MPMU_VRCR_REQ_POL2 BIT(6) -+#define MPMU_VRCR_VCXO_OUT_REQ_EN2 BIT(14) -+ -+#define MPMU_WDTPCR 0x200 -+#define MPMU_RIPCCR 0x210 //no define -+#define MPMU_ACGR 0x1024 -+#define MPMU_SUCCR 0x14 -+#define MPMU_ISCCR 0x44 -+#define MPMU_SUCCR_1 0x10b0 -+#define MPMU_APBCSCR 0x1050 -+ -+/* end of MPMU register offset */ -+ -+/* APMU register offset */ -+#define APMU_JPG_CLK_RES_CTRL 0x20 -+#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x24 -+#define APMU_ISP_CLK_RES_CTRL 0x38 -+#define APMU_LCD_CLK_RES_CTRL1 0x44 -+#define APMU_LCD_SPI_CLK_RES_CTRL 0x48 -+#define APMU_LCD_CLK_RES_CTRL2 0x4c -+#define APMU_CCIC_CLK_RES_CTRL 0x50 -+#define APMU_SDH0_CLK_RES_CTRL 0x54 -+#define APMU_SDH1_CLK_RES_CTRL 0x58 -+#define APMU_USB_CLK_RES_CTRL 0x5c -+#define APMU_QSPI_CLK_RES_CTRL 0x60 -+#define APMU_USB_CLK_RES_CTRL 0x5c -+#define APMU_DMA_CLK_RES_CTRL 0x64 -+#define APMU_AES_CLK_RES_CTRL 0x68 -+#define APMU_VPU_CLK_RES_CTRL 0xa4 -+#define APMU_GPU_CLK_RES_CTRL 0xcc -+#define APMU_SDH2_CLK_RES_CTRL 0xe0 -+#define APMU_PMUA_MC_CTRL 0xe8 -+#define APMU_PMU_CC2_AP 0x100 -+#define APMU_PMUA_EM_CLK_RES_CTRL 0x104 -+ -+#define APMU_AUDIO_CLK_RES_CTRL 0x14c -+#define APMU_HDMI_CLK_RES_CTRL 0x1B8 -+#define APMU_CCI550_CLK_CTRL 0x300 -+#define APMU_ACLK_CLK_CTRL 0x388 -+#define APMU_CPU_C0_CLK_CTRL 0x38C -+#define APMU_CPU_C1_CLK_CTRL 0x390 -+ -+#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc -+#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4 -+#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc -+ -+#define APMU_EMAC0_CLK_RES_CTRL 0x3e4 -+#define APMU_EMAC1_CLK_RES_CTRL 0x3ec -+/* end of APMU register offset */ -+ -+/* APBC2 register offset */ -+#define APBC2_UART1_CLK_RST 0x00 -+#define APBC2_SSP2_CLK_RST 0x04 -+#define APBC2_TWSI3_CLK_RST 0x08 -+#define APBC2_RTC_CLK_RST 0x0c -+#define APBC2_TIMERS0_CLK_RST 0x10 -+#define APBC2_KPC_CLK_RST 0x14 -+#define APBC2_GPIO_CLK_RST 0x1c -+/* end of APBC2 register offset */ -+ -+/* RCPU register offset */ -+#define RCPU_HDMI_CLK_RST 0x2044 -+#define RCPU_CAN_CLK_RST 0x4c -+/* end of RCPU register offset */ -+ -+/* RCPU2 register offset */ -+#define RCPU2_PWM_CLK_RST 0x08 -+/* end of RCPU2 register offset */ -+ -+struct spacemit_k1x_clk k1x_clock_controller; -+ -+//apbs -+static const struct ccu_pll_rate_tbl pll2_rate_tbl[] = { -+ PLL_RATE(3000000000UL, 0x66, 0xdd, 0x50, 0x00, 0x3f, 0xe00000), -+ PLL_RATE(3200000000UL, 0x67, 0xdd, 0x50, 0x00, 0x43, 0xeaaaab), -+ PLL_RATE(2457600000UL, 0x64, 0xdd, 0x50, 0x00, 0x33, 0x0ccccd), -+ PLL_RATE(2800000000UL, 0x66, 0xdd, 0x50, 0x00, 0x3a, 0x155555), -+}; -+ -+static const struct ccu_pll_rate_tbl pll3_rate_tbl[] = { -+ PLL_RATE(3000000000UL, 0x66, 0xdd, 0x50, 0x00, 0x3f, 0xe00000), -+ PLL_RATE(3200000000UL, 0x67, 0xdd, 0x50, 0x00, 0x43, 0xeaaaab), -+ PLL_RATE(2457600000UL, 0x64, 0xdd, 0x50, 0x00, 0x33, 0x0ccccd), -+}; -+ -+static SPACEMIT_CCU_PLL(pll2, "pll2", &pll2_rate_tbl, ARRAY_SIZE(pll2_rate_tbl), -+ BASE_TYPE_APBS, APB_SPARE7_REG, APB_SPARE8_REG, APB_SPARE9_REG, -+ MPMU_POSR, POSR_PLL2_LOCK, 1, -+ CLK_IGNORE_UNUSED); -+ -+static SPACEMIT_CCU_PLL(pll3, "pll3", &pll3_rate_tbl, ARRAY_SIZE(pll3_rate_tbl), -+ BASE_TYPE_APBS, APB_SPARE10_REG, APB_SPARE11_REG, APB_SPARE12_REG, -+ MPMU_POSR, POSR_PLL3_LOCK, 1, -+ CLK_IGNORE_UNUSED); -+ -+//pll1 -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d2, "pll1_d2", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(1), BIT(1), 0x0, -+ 2, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d3, "pll1_d3", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(2), BIT(2), 0x0, -+ 3, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d4, "pll1_d4", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(3), BIT(3), 0x0, -+ 4, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d5, "pll1_d5", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(4), BIT(4), 0x0, -+ 5, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d6, "pll1_d6", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(5), BIT(5), 0x0, -+ 6, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d7, "pll1_d7", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(6), BIT(6), 0x0, -+ 7, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d8, "pll1_d8", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(7), BIT(7), 0x0, -+ 8, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d11_223p4, "pll1_d11_223p4", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(15), BIT(15), 0x0, -+ 11, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d13_189, "pll1_d13_189", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(16), BIT(16), 0x0, -+ 13, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d23_106p8, "pll1_d23_106p8", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(20), BIT(20), 0x0, -+ 23, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d64_38p4, "pll1_d64_38p4", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(0), BIT(0), 0x0, -+ 64, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_aud_245p7, "pll1_aud_245p7", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(10), BIT(10), 0x0, -+ 10, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_aud_24p5, "pll1_aud_24p5", "pll1_2457p6_vco", -+ BASE_TYPE_APBS, APB_SPARE2_REG, -+ BIT(11), BIT(11), 0x0, -+ 100, 1, CLK_IGNORE_UNUSED); -+ -+//pll2 -+static SPACEMIT_CCU_GATE_FACTOR(pll2_d1, "pll2_d1", "pll2", -+ BASE_TYPE_APBS, APB_SPARE8_REG, -+ BIT(0), BIT(0), 0x0, -+ 1, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll2_d2, "pll2_d2", "pll2", -+ BASE_TYPE_APBS, APB_SPARE8_REG, -+ BIT(1), BIT(1), 0x0, -+ 2, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll2_d3, "pll2_d3", "pll2", -+ BASE_TYPE_APBS, APB_SPARE8_REG, -+ BIT(2), BIT(2), 0x0, -+ 3, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll2_d4, "pll2_d4", "pll2", -+ BASE_TYPE_APBS, APB_SPARE8_REG, -+ BIT(3), BIT(3), 0x0, -+ 4, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll2_d5, "pll2_d5", "pll2", -+ BASE_TYPE_APBS, APB_SPARE8_REG, -+ BIT(4), BIT(4), 0x0, -+ 5, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll2_d6, "pll2_d6", "pll2", -+ BASE_TYPE_APBS, APB_SPARE8_REG, -+ BIT(5), BIT(5), 0x0, -+ 6, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll2_d7, "pll2_d7", "pll2", -+ BASE_TYPE_APBS, APB_SPARE8_REG, -+ BIT(6), BIT(6), 0x0, -+ 7, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll2_d8, "pll2_d8", "pll2", -+ BASE_TYPE_APBS, APB_SPARE8_REG, -+ BIT(7), BIT(7), 0x0, -+ 8, 1, CLK_IGNORE_UNUSED); -+ -+//pll3 -+static SPACEMIT_CCU_GATE_FACTOR(pll3_d1, "pll3_d1", "pll3", -+ BASE_TYPE_APBS, APB_SPARE11_REG, -+ BIT(0), BIT(0), 0x0, -+ 1, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll3_d2, "pll3_d2", "pll3", -+ BASE_TYPE_APBS, APB_SPARE11_REG, -+ BIT(1), BIT(1), 0x0, -+ 2, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll3_d3, "pll3_d3", "pll3", -+ BASE_TYPE_APBS, APB_SPARE11_REG, -+ BIT(2), BIT(2), 0x0, -+ 3, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll3_d4, "pll3_d4", "pll3", -+ BASE_TYPE_APBS, APB_SPARE11_REG, -+ BIT(3), BIT(3), 0x0, -+ 4, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll3_d5, "pll3_d5", "pll3", -+ BASE_TYPE_APBS, APB_SPARE11_REG, -+ BIT(4), BIT(4), 0x0, -+ 5, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll3_d6, "pll3_d6", "pll3", -+ BASE_TYPE_APBS, APB_SPARE11_REG, -+ BIT(5), BIT(5), 0x0, -+ 6, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll3_d7, "pll3_d7", "pll3", -+ BASE_TYPE_APBS, APB_SPARE11_REG, -+ BIT(6), BIT(6), 0x0, -+ 7, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll3_d8, "pll3_d8", "pll3", -+ BASE_TYPE_APBS, APB_SPARE11_REG, -+ BIT(7), BIT(7), 0x0, -+ 8, 1, CLK_IGNORE_UNUSED); -+ -+//pll3_div -+static SPACEMIT_CCU_FACTOR(pll3_80, "pll3_80", "pll3_d8", -+ 5, 1); -+static SPACEMIT_CCU_FACTOR(pll3_40, "pll3_40", "pll3_d8", -+ 10, 1); -+static SPACEMIT_CCU_FACTOR(pll3_20, "pll3_20", "pll3_d8", -+ 20, 1); -+ -+//pll1_d8 -+static SPACEMIT_CCU_GATE(pll1_d8_307p2, "pll1_d8_307p2", "pll1_d8", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(13), BIT(13), 0x0, -+ CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_FACTOR(pll1_d32_76p8, "pll1_d32_76p8", "pll1_d8_307p2", -+ 4, 1); -+static SPACEMIT_CCU_FACTOR(pll1_d40_61p44, "pll1_d40_61p44", "pll1_d8_307p2", -+ 5, 1); -+static SPACEMIT_CCU_FACTOR(pll1_d16_153p6, "pll1_d16_153p6", "pll1_d8", -+ 2, 1); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d24_102p4, "pll1_d24_102p4", "pll1_d8", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(12), BIT(12), 0x0, -+ 3, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d48_51p2, "pll1_d48_51p2", "pll1_d8", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(7), BIT(7), 0x0, -+ 6, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d48_51p2_ap, "pll1_d48_51p2_ap", "pll1_d8", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(11), BIT(11), 0x0, -+ 6, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_m3d128_57p6, "pll1_m3d128_57p6", "pll1_d8", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(8), BIT(8), 0x0, -+ 16, 3, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d96_25p6, "pll1_d96_25p6", "pll1_d8", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(4), BIT(4), 0x0, -+ 12, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d192_12p8, "pll1_d192_12p8", "pll1_d8", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(3), BIT(3), 0x0, -+ 24, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d192_12p8_wdt, "pll1_d192_12p8_wdt", "pll1_d8", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(19), BIT(19), 0x0, -+ 24, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d384_6p4, "pll1_d384_6p4", "pll1_d8", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(2), BIT(2), 0x0, -+ 48, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_FACTOR(pll1_d768_3p2, "pll1_d768_3p2", "pll1_d384_6p4", -+ 2, 1); -+static SPACEMIT_CCU_FACTOR(pll1_d1536_1p6, "pll1_d1536_1p6", "pll1_d384_6p4", -+ 4, 1); -+static SPACEMIT_CCU_FACTOR(pll1_d3072_0p8, "pll1_d3072_0p8", "pll1_d384_6p4", -+ 8, 1); -+//pll1_d7 -+static SPACEMIT_CCU_FACTOR(pll1_d7_351p08, "pll1_d7_351p08", "pll1_d7", -+ 1, 1); -+//pll1_d6 -+static SPACEMIT_CCU_GATE(pll1_d6_409p6, "pll1_d6_409p6", "pll1_d6", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(0), BIT(0), 0x0, -+ CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d12_204p8, "pll1_d12_204p8", "pll1_d6", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(5), BIT(5), 0x0, -+ 2, 1, CLK_IGNORE_UNUSED); -+//pll1_d5 -+static SPACEMIT_CCU_GATE(pll1_d5_491p52, "pll1_d5_491p52", "pll1_d5", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(21), BIT(21), 0x0, -+ CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d10_245p76, "pll1_d10_245p76", "pll1_d5", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(18), BIT(18), 0x0, -+ 2, 1, CLK_IGNORE_UNUSED); -+//pll1_d4 -+static SPACEMIT_CCU_GATE(pll1_d4_614p4, "pll1_d4_614p4", "pll1_d4", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(15), BIT(15), 0x0, -+ CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d52_47p26, "pll1_d52_47p26", "pll1_d4", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(10), BIT(10), 0x0, -+ 13, 1, CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_GATE_FACTOR(pll1_d78_31p5, "pll1_d78_31p5", "pll1_d4", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(6), BIT(6), 0x0, -+ 39, 2, CLK_IGNORE_UNUSED); -+//pll1_d3 -+static SPACEMIT_CCU_GATE(pll1_d3_819p2, "pll1_d3_819p2", "pll1_d3", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(14), BIT(14), 0x0, -+ CLK_IGNORE_UNUSED); -+//pll1_d2 -+static SPACEMIT_CCU_GATE(pll1_d2_1228p8, "pll1_d2_1228p8", "pll1_d2", -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(16), BIT(16), 0x0, -+ CLK_IGNORE_UNUSED); -+ -+//mpmu -+static struct ccu_ddn_info uart_ddn_mask_info = { -+ .factor = 2, -+ .num_mask = 0x1fff, -+ .den_mask = 0x1fff, -+ .num_shift = 16, -+ .den_shift = 0, -+}; -+static struct ccu_ddn_tbl slow_uart1_tbl[] = { -+ {.num = 125, .den = 24}, /*rate = parent_rate*24/125/2) */ -+}; -+static struct ccu_ddn_tbl slow_uart2_tbl[] = { -+ {.num = 6144, .den = 960},/*rate = parent_rate*960/6144/2) */ -+}; -+ -+static SPACEMIT_CCU_GATE_NO_PARENT(slow_uart, "slow_uart", NULL, -+ BASE_TYPE_MPMU, MPMU_ACGR, -+ BIT(1), BIT(1), 0x0, -+ 0); -+static SPACEMIT_CCU_DDN(slow_uart1_14p74, "slow_uart1_14p74", "pll1_d16_153p6", -+ &uart_ddn_mask_info, &slow_uart1_tbl, ARRAY_SIZE(slow_uart1_tbl), -+ BASE_TYPE_MPMU, MPMU_SUCCR, -+ CLK_IGNORE_UNUSED); -+static SPACEMIT_CCU_DDN(slow_uart2_48, "slow_uart2_48", "pll1_d4_614p4", -+ &uart_ddn_mask_info, &slow_uart2_tbl, ARRAY_SIZE(slow_uart2_tbl), -+ BASE_TYPE_MPMU, MPMU_SUCCR_1, -+ CLK_IGNORE_UNUSED); -+ -+//apbc -+static const char * const uart_parent_names[] = { -+ "pll1_m3d128_57p6", "slow_uart1_14p74", "slow_uart2_48" -+}; -+static SPACEMIT_CCU_MUX_GATE(uart1_clk, "uart1_clk", uart_parent_names, -+ BASE_TYPE_APBC, APBC_UART1_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(uart2_clk, "uart2_clk", uart_parent_names, -+ BASE_TYPE_APBC, APBC_UART2_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(uart3_clk, "uart3_clk", uart_parent_names, -+ BASE_TYPE_APBC, APBC_UART3_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(uart4_clk, "uart4_clk", uart_parent_names, -+ BASE_TYPE_APBC, APBC_UART4_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(uart5_clk, "uart5_clk", uart_parent_names, -+ BASE_TYPE_APBC, APBC_UART5_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(uart6_clk, "uart6_clk", uart_parent_names, -+ BASE_TYPE_APBC, APBC_UART6_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(uart7_clk, "uart7_clk", uart_parent_names, -+ BASE_TYPE_APBC, APBC_UART7_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(uart8_clk, "uart8_clk", uart_parent_names, -+ BASE_TYPE_APBC, APBC_UART8_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(uart9_clk, "uart9_clk", uart_parent_names, -+ BASE_TYPE_APBC, APBC_UART9_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_GATE(gpio_clk, "gpio_clk", "vctcxo_24", -+ BASE_TYPE_APBC, APBC_GPIO_CLK_RST, -+ 0x3, 0x3, 0x0, -+ 0); -+static const char *pwm_parent_names[] = { -+ "pll1_d192_12p8", "clk_32k" -+}; -+static SPACEMIT_CCU_MUX_GATE(pwm0_clk, "pwm0_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM0_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm1_clk, "pwm1_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM1_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm2_clk, "pwm2_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM2_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm3_clk, "pwm3_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM3_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm4_clk, "pwm4_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM4_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm5_clk, "pwm5_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM5_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm6_clk, "pwm6_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM6_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm7_clk, "pwm7_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM7_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm8_clk, "pwm8_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM8_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm9_clk, "pwm9_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM9_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm10_clk, "pwm10_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM10_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm11_clk, "pwm11_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM11_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm12_clk, "pwm12_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM12_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm13_clk, "pwm13_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM13_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm14_clk, "pwm14_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM14_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm15_clk, "pwm15_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM15_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm16_clk, "pwm16_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM16_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm17_clk, "pwm17_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM17_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm18_clk, "pwm18_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM18_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(pwm19_clk, "pwm19_clk", pwm_parent_names, -+ BASE_TYPE_APBC, APBC_PWM19_CLK_RST, -+ 4, 3, 0x2, 0x2, 0x0, -+ 0); -+static const char *ssp_parent_names[] = { "pll1_d384_6p4", "pll1_d192_12p8", "pll1_d96_25p6", -+ "pll1_d48_51p2", "pll1_d768_3p2", "pll1_d1536_1p6", "pll1_d3072_0p8" -+}; -+static SPACEMIT_CCU_MUX_GATE(ssp3_clk, "ssp3_clk", ssp_parent_names, -+ BASE_TYPE_APBC, APBC_SSP3_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_GATE(rtc_clk, "rtc_clk", "clk_32k", -+ BASE_TYPE_APBC, APBC_RTC_CLK_RST, -+ 0x83, 0x83, 0x0, 0); -+static const char *twsi_parent_names[] = { -+ "pll1_d78_31p5", "pll1_d48_51p2", "pll1_d40_61p44" -+}; -+static SPACEMIT_CCU_MUX_GATE(twsi0_clk, "twsi0_clk", twsi_parent_names, -+ BASE_TYPE_APBC, APBC_TWSI0_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(twsi1_clk, "twsi1_clk", twsi_parent_names, -+ BASE_TYPE_APBC, APBC_TWSI1_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(twsi2_clk, "twsi2_clk", twsi_parent_names, -+ BASE_TYPE_APBC, APBC_TWSI2_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(twsi4_clk, "twsi4_clk", twsi_parent_names, -+ BASE_TYPE_APBC, APBC_TWSI4_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(twsi5_clk, "twsi5_clk", twsi_parent_names, -+ BASE_TYPE_APBC, APBC_TWSI5_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(twsi6_clk, "twsi6_clk", twsi_parent_names, -+ BASE_TYPE_APBC, APBC_TWSI6_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(twsi7_clk, "twsi7_clk", twsi_parent_names, -+ BASE_TYPE_APBC, APBC_TWSI7_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(twsi8_clk, "twsi8_clk", twsi_parent_names, -+ BASE_TYPE_APBC, APBC_TWSI8_CLK_RST, -+ 4, 3, 0x7, 0x3, 0x4, -+ 0); -+static const char *timer_parent_names[] = { -+ "pll1_d192_12p8", "clk_32k", "pll1_d384_6p4", "vctcxo_3", "vctcxo_1" -+}; -+static SPACEMIT_CCU_MUX_GATE(timers1_clk, "timers1_clk", timer_parent_names, -+ BASE_TYPE_APBC, APBC_TIMERS1_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(timers2_clk, "timers2_clk", timer_parent_names, -+ BASE_TYPE_APBC, APBC_TIMERS2_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_GATE(aib_clk, "aib_clk", "vctcxo_24", -+ BASE_TYPE_APBC, APBC_AIB_CLK_RST, -+ 0x3, 0x3, 0x0, 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(onewire_clk, "onewire_clk", NULL, -+ BASE_TYPE_APBC, APBC_ONEWIRE_CLK_RST, -+ 0x3, 0x3, 0x0, 0); -+ -+static SPACEMIT_CCU_GATE_FACTOR(i2s_sysclk, "i2s_sysclk", "pll1_d16_153p6", -+ BASE_TYPE_MPMU, MPMU_ISCCR, -+ BIT(31), BIT(31), 0x0, -+ 50, 1, 0); -+static SPACEMIT_CCU_GATE_FACTOR(i2s_bclk, "i2s_bclk", "i2s_sysclk", -+ BASE_TYPE_MPMU, MPMU_ISCCR, -+ BIT(29), BIT(29), 0x0, -+ 1, 1, 0); -+static const char *sspa_parent_names[] = { "pll1_d384_6p4", "pll1_d192_12p8", "pll1_d96_25p6", -+ "pll1_d48_51p2", "pll1_d768_3p2", "pll1_d1536_1p6", "pll1_d3072_0p8", "i2s_bclk" -+}; -+static SPACEMIT_CCU_MUX_GATE(sspa0_clk, "sspa0_clk", sspa_parent_names, -+ BASE_TYPE_APBC, APBC_SSPA0_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_MUX_GATE(sspa1_clk, "sspa1_clk", sspa_parent_names, -+ BASE_TYPE_APBC, APBC_SSPA1_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(dro_clk, "dro_clk", NULL, -+ BASE_TYPE_APBC, APBC_DRO_CLK_RST, -+ 0x1, 0x1, 0x0, 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(ir_clk, "ir_clk", NULL, -+ BASE_TYPE_APBC, APBC_IR_CLK_RST, -+ 0x1, 0x1, 0x0, 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(tsen_clk, "tsen_clk", NULL, -+ BASE_TYPE_APBC, APBC_TSEN_CLK_RST, -+ 0x3, 0x3, 0x0, 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(ipc_ap2aud_clk, "ipc_ap2aud_clk", NULL, -+ BASE_TYPE_APBC, APBC_IPC_AP2AUD_CLK_RST, -+ 0x3, 0x3, 0x0, 0); -+static const char *can_parent_names[] = { -+ "pll3_20", "pll3_40", "pll3_80" -+}; -+static SPACEMIT_CCU_MUX_GATE(can0_clk, "can0_clk", can_parent_names, -+ BASE_TYPE_APBC, APBC_CAN0_CLK_RST, -+ 4, 3, BIT(1), BIT(1), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(can0_bus_clk, "can0_bus_clk", NULL, -+ BASE_TYPE_APBC, APBC_CAN0_CLK_RST, -+ BIT(0), BIT(0), 0x0, 0); -+ -+//mpmu -+static SPACEMIT_CCU_GATE(wdt_clk, "wdt_clk", "pll1_d96_25p6", -+ BASE_TYPE_MPMU, MPMU_WDTPCR, -+ 0x3, 0x3, 0x0, 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(ripc_clk, "ripc_clk", NULL, -+ BASE_TYPE_MPMU, MPMU_RIPCCR, -+ 0x3, 0x3, 0x0, 0); -+ -+//apmu -+static const char * const jpg_parent_names[] = { -+ "pll1_d4_614p4", "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d3_819p2", -+ "pll1_d2_1228p8", "pll2_d4", "pll2_d3" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(jpg_clk, "jpg_clk", jpg_parent_names, -+ BASE_TYPE_APMU, APMU_JPG_CLK_RES_CTRL, -+ 5, 3, BIT(15), -+ 2, 3, BIT(1), BIT(1), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(jpg_4kafbc_clk, "jpg_4kafbc_clk", NULL, -+ BASE_TYPE_APMU, APMU_JPG_CLK_RES_CTRL, -+ BIT(16), BIT(16), 0x0, 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(jpg_2kafbc_clk, "jpg_2kafbc_clk", NULL, -+ BASE_TYPE_APMU, APMU_JPG_CLK_RES_CTRL, -+ BIT(17), BIT(17), 0x0, 0); -+static const char * const ccic2phy_parent_names[] = { -+ "pll1_d24_102p4", "pll1_d48_51p2_ap" -+}; -+static SPACEMIT_CCU_MUX_GATE(ccic2phy_clk, "ccic2phy_clk", ccic2phy_parent_names, -+ BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, -+ 7, 1, BIT(5), BIT(5), 0x0, -+ 0); -+static const char * const ccic3phy_parent_names[] = { -+ "pll1_d24_102p4", "pll1_d48_51p2_ap" -+}; -+static SPACEMIT_CCU_MUX_GATE(ccic3phy_clk, "ccic3phy_clk", ccic3phy_parent_names, -+ BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, -+ 31, 1, BIT(30), BIT(30), 0x0, 0); -+static const char * const csi_parent_names[] = { -+ "pll1_d5_491p52", "pll1_d6_409p6", "pll1_d4_614p4", "pll1_d3_819p2", -+ "pll2_d2", "pll2_d3", "pll2_d4", "pll1_d2_1228p8" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(csi_clk, "csi_clk", csi_parent_names, -+ BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, -+ 20, 3, BIT(15), -+ 16, 3, BIT(4), BIT(4), 0x0, -+ 0); -+static const char * const camm_parent_names[] = { -+ "pll1_d8_307p2", "pll2_d5", "pll1_d6_409p6", "vctcxo_24" -+}; -+static SPACEMIT_CCU_DIV_MUX_GATE(camm0_clk, "camm0_clk", camm_parent_names, -+ BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, -+ 23, 4, 8, 2, -+ BIT(28), BIT(28), 0x0, -+ 0); -+static SPACEMIT_CCU_DIV_MUX_GATE(camm1_clk, "camm1_clk", camm_parent_names, -+ BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, -+ 23, 4, 8, 2, -+ BIT(6), BIT(6), 0x0, -+ 0); -+static SPACEMIT_CCU_DIV_MUX_GATE(camm2_clk, "camm2_clk", camm_parent_names, -+ BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, -+ 23, 4, 8, 2, -+ BIT(3), BIT(3), 0x0, -+ 0); -+static const char * const isp_cpp_parent_names[] = { -+ "pll1_d8_307p2", "pll1_d6_409p6" -+}; -+static SPACEMIT_CCU_DIV_MUX_GATE(isp_cpp_clk, "isp_cpp_clk", isp_cpp_parent_names, -+ BASE_TYPE_APMU, APMU_ISP_CLK_RES_CTRL, -+ 24, 2, 26, 1, -+ BIT(28), BIT(28), 0x0, -+ 0); -+static const char * const isp_bus_parent_names[] = { -+ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d8_307p2", "pll1_d10_245p76" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(isp_bus_clk, "isp_bus_clk", isp_bus_parent_names, -+ BASE_TYPE_APMU, APMU_ISP_CLK_RES_CTRL, -+ 18, 3, BIT(23), -+ 21, 2, BIT(17), BIT(17), 0x0, -+ 0); -+static const char * const isp_parent_names[] = { -+ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d4_614p4", "pll1_d8_307p2" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(isp_clk, "isp_clk", isp_parent_names, -+ BASE_TYPE_APMU, APMU_ISP_CLK_RES_CTRL, -+ 4, 3, BIT(7), -+ 8, 2, BIT(1), BIT(1), 0x0, -+ 0); -+static const char * const dpumclk_parent_names[] = { -+ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d4_614p4", "pll1_d8_307p2" -+}; -+static SPACEMIT_CCU_DIV2_FC_MUX_GATE(dpu_mclk, "dpu_mclk", dpumclk_parent_names, -+ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, APMU_LCD_CLK_RES_CTRL2, -+ 1, 4, BIT(29), -+ 5, 3, BIT(0), BIT(0), 0x0, -+ 0); -+static const char * const dpuesc_parent_names[] = { -+ "pll1_d48_51p2_ap", "pll1_d52_47p26", "pll1_d96_25p6", "pll1_d32_76p8" -+}; -+static SPACEMIT_CCU_MUX_GATE(dpu_esc_clk, "dpu_esc_clk", dpuesc_parent_names, -+ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, -+ 0, 2, BIT(2), BIT(2), 0x0, -+ 0); -+static const char * const dpubit_parent_names[] = { "pll1_d3_819p2", "pll2_d2", "pll2_d3", -+ "pll1_d2_1228p8", "pll2_d4", "pll2_d5", "pll2_d8", "pll2_d8" //6 should be 429M? -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(dpu_bit_clk, "dpu_bit_clk", dpubit_parent_names, -+ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, -+ 17, 3, BIT(31), -+ 20, 3, BIT(16), BIT(16), 0x0, -+ 0); -+static const char * const dpupx_parent_names[] = { -+ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d4_614p4", "pll1_d8_307p2", "pll2_d7", "pll2_d8" -+}; -+static SPACEMIT_CCU_DIV2_FC_MUX_GATE(dpu_pxclk, "dpu_pxclk", dpupx_parent_names, -+ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, APMU_LCD_CLK_RES_CTRL2, -+ 17, 4, BIT(30), -+ 21, 3, BIT(16), BIT(16), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(dpu_hclk, "dpu_hclk", NULL, -+ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, -+ BIT(5), BIT(5), 0x0, -+ 0); -+static const char * const dpu_spi_parent_names[] = { -+ "pll1_d8_307p2", "pll1_d6_409p6", "pll1_d10_245p76", "pll1_d11_223p4", -+ "pll1_d13_189", "pll1_d23_106p8", "pll2_d3", "pll2_d5" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(dpu_spi_clk, "dpu_spi_clk", dpu_spi_parent_names, -+ BASE_TYPE_APMU, APMU_LCD_SPI_CLK_RES_CTRL, -+ 8, 3, BIT(7), -+ 12, 3, BIT(1), BIT(1), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(dpu_spi_hbus_clk, "dpu_spi_hbus_clk", NULL, -+ BASE_TYPE_APMU, APMU_LCD_SPI_CLK_RES_CTRL, -+ BIT(3), BIT(3), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(dpu_spi_bus_clk, "dpu_spi_bus_clk", NULL, -+ BASE_TYPE_APMU, APMU_LCD_SPI_CLK_RES_CTRL, -+ BIT(5), BIT(5), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(dpu_spi_aclk, "dpu_spi_aclk", NULL, -+ BASE_TYPE_APMU, APMU_LCD_SPI_CLK_RES_CTRL, -+ BIT(6), BIT(6), 0x0, -+ 0); -+static const char * const v2d_parent_names[] = { -+ "pll1_d5_491p52", "pll1_d6_409p6", "pll1_d8_307p2", "pll1_d4_614p4", -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(v2d_clk, "v2d_clk", v2d_parent_names, -+ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, -+ 9, 3, BIT(28), -+ 12, 2, BIT(8), BIT(8), 0x0, -+ 0); -+static const char * const ccic_4x_parent_names[] = { -+ "pll1_d5_491p52", "pll1_d6_409p6", "pll1_d4_614p4", "pll1_d3_819p2", -+ "pll2_d2", "pll2_d3", "pll2_d4", "pll1_d2_1228p8" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(ccic_4x_clk, "ccic_4x_clk", ccic_4x_parent_names, -+ BASE_TYPE_APMU, APMU_CCIC_CLK_RES_CTRL, -+ 18, 3, BIT(15), -+ 23, 2, BIT(4), BIT(4), 0x0, -+ 0); -+static const char * const ccic1phy_parent_names[] = { -+ "pll1_d24_102p4", "pll1_d48_51p2_ap" -+}; -+static SPACEMIT_CCU_MUX_GATE(ccic1phy_clk, "ccic1phy_clk", ccic1phy_parent_names, -+ BASE_TYPE_APMU, APMU_CCIC_CLK_RES_CTRL, -+ 7, 1, BIT(5), BIT(5), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(sdh_axi_aclk, "sdh_axi_aclk", NULL, -+ BASE_TYPE_APMU, APMU_SDH0_CLK_RES_CTRL, -+ BIT(3), BIT(3), 0x0, -+ 0); -+static const char * const sdh01_parent_names[] = {"pll1_d6_409p6", -+ "pll1_d4_614p4", "pll2_d8", "pll2_d5", "pll1_d11_223p4", "pll1_d13_189", "pll1_d23_106p8" }; -+ -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(sdh0_clk, "sdh0_clk", sdh01_parent_names, -+ BASE_TYPE_APMU, APMU_SDH0_CLK_RES_CTRL, -+ 8, 3, BIT(11), -+ 5, 3, BIT(4), BIT(4), 0x0, -+ 0); -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(sdh1_clk, "sdh1_clk", sdh01_parent_names, -+ BASE_TYPE_APMU, APMU_SDH1_CLK_RES_CTRL, -+ 8, 3, BIT(11), -+ 5, 3, BIT(4), BIT(4), 0x0, -+ 0); -+static const char * const sdh2_parent_names[] = {"pll1_d6_409p6", -+ "pll1_d4_614p4", "pll2_d8", "pll1_d3_819p2", "pll1_d11_223p4", "pll1_d13_189", "pll1_d23_106p8" }; -+ -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(sdh2_clk, "sdh2_clk", sdh2_parent_names, -+ BASE_TYPE_APMU, APMU_SDH2_CLK_RES_CTRL, -+ 8, 3, BIT(11), -+ 5, 3, BIT(4), BIT(4), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(usb_axi_clk, "usb_axi_clk", NULL, -+ BASE_TYPE_APMU, APMU_USB_CLK_RES_CTRL, -+ BIT(1), BIT(1), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(usb_p1_aclk, "usb_p1_aclk", NULL, -+ BASE_TYPE_APMU, APMU_USB_CLK_RES_CTRL, -+ BIT(5), BIT(5), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(usb30_clk, "usb30_clk", NULL, -+ BASE_TYPE_APMU, APMU_USB_CLK_RES_CTRL, -+ BIT(8), BIT(8), 0x0, -+ 0); -+static const char * const qspi_parent_names[] = {"pll1_d6_409p6", "pll2_d8", "pll1_d8_307p2", -+ "pll1_d10_245p76", "pll1_d11_223p4", "pll1_d23_106p8", "pll1_d5_491p52", "pll1_d13_189"}; -+static SPACEMIT_CCU_DIV_MUX_GATE(qspi_clk, "qspi_clk", qspi_parent_names, -+ BASE_TYPE_APMU, APMU_QSPI_CLK_RES_CTRL, -+ 9, 3, -+ 6, 3, BIT(4), BIT(4), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(qspi_bus_clk, "qspi_bus_clk", NULL, -+ BASE_TYPE_APMU, APMU_QSPI_CLK_RES_CTRL, -+ BIT(3), BIT(3), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(dma_clk, "dma_clk", NULL, -+ BASE_TYPE_APMU, APMU_DMA_CLK_RES_CTRL, -+ BIT(3), BIT(3), 0x0, -+ 0); -+static const char * const aes_parent_names[] = { -+ "pll1_d12_204p8", "pll1_d24_102p4" -+}; -+static SPACEMIT_CCU_MUX_GATE(aes_clk, "aes_clk", aes_parent_names, -+ BASE_TYPE_APMU, APMU_AES_CLK_RES_CTRL, -+ 6, 1, BIT(5), BIT(5), 0x0, -+ 0); -+static const char * const vpu_parent_names[] = { -+ "pll1_d4_614p4", "pll1_d5_491p52", "pll1_d3_819p2", "pll1_d6_409p6", -+ "pll3_d6", "pll2_d3", "pll2_d4", "pll2_d5" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(vpu_clk, "vpu_clk", vpu_parent_names, -+ BASE_TYPE_APMU, APMU_VPU_CLK_RES_CTRL, -+ 13, 3, BIT(21), -+ 10, 3, -+ BIT(3), BIT(3), 0x0, -+ 0); -+static const char * const gpu_parent_names[] = { -+ "pll1_d4_614p4", "pll1_d5_491p52", "pll1_d3_819p2", "pll1_d6_409p6", -+ "pll3_d6", "pll2_d3", "pll2_d4", "pll2_d5" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(gpu_clk, "gpu_clk", gpu_parent_names, -+ BASE_TYPE_APMU, APMU_GPU_CLK_RES_CTRL, -+ 12, 3, BIT(15), -+ 18, 3, -+ BIT(4), BIT(4), 0x0, -+ 0); -+static const char * const emmc_parent_names[] = { -+ "pll1_d6_409p6", "pll1_d4_614p4", "pll1_d52_47p26", "pll1_d3_819p2" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(emmc_clk, "emmc_clk", emmc_parent_names, -+ BASE_TYPE_APMU, APMU_PMUA_EM_CLK_RES_CTRL, -+ 8, 3, BIT(11), -+ 6, 2, -+ 0x18, 0x18, 0x0, -+ 0); -+static SPACEMIT_CCU_DIV_GATE(emmc_x_clk, "emmc_x_clk", "pll1_d2_1228p8", -+ BASE_TYPE_APMU, APMU_PMUA_EM_CLK_RES_CTRL, -+ 12, 3, BIT(15), BIT(15), 0x0, -+ 0); -+static const char * const audio_parent_names[] = { -+ "pll1_aud_245p7", "pll1_d8_307p2", "pll1_d6_409p6" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(audio_clk, "audio_clk", audio_parent_names, -+ BASE_TYPE_APMU, APMU_AUDIO_CLK_RES_CTRL, -+ 4, 3, BIT(15), -+ 7, 3, -+ BIT(12), BIT(12), 0x0, -+ 0); -+static const char * const hdmi_parent_names[] = { -+ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d4_614p4", "pll1_d8_307p2" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX_GATE(hdmi_mclk, "hdmi_mclk", hdmi_parent_names, -+ BASE_TYPE_APMU, APMU_HDMI_CLK_RES_CTRL, -+ 1, 4, BIT(29), -+ 5, 3, -+ BIT(0), BIT(0), 0x0, -+ 0); -+static const char * const cci550_parent_names[] = { -+ "pll1_d5_491p52", "pll1_d4_614p4", "pll1_d3_819p2", "pll2_d3" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX(cci550_clk, "cci550_clk", cci550_parent_names, -+ BASE_TYPE_APMU, APMU_CCI550_CLK_CTRL, -+ 8, 3, BIT(12), -+ 0, 2, -+ 0); -+static const char * const pmua_aclk_parent_names[] = { -+ "pll1_d10_245p76", "pll1_d8_307p2" -+}; -+static SPACEMIT_CCU_DIV_FC_MUX(pmua_aclk, "pmua_aclk", pmua_aclk_parent_names, -+ BASE_TYPE_APMU, APMU_ACLK_CLK_CTRL, -+ 1, 2, BIT(4), -+ 0, 1, -+ 0); -+static const char * const cpu_c0_hi_parent_names[] = { -+ "pll3_d2", "pll3_d1" -+}; -+static SPACEMIT_CCU_MUX(cpu_c0_hi_clk, "cpu_c0_hi_clk", cpu_c0_hi_parent_names, -+ BASE_TYPE_APMU, APMU_CPU_C0_CLK_CTRL, -+ 13, 1, -+ 0); -+static const char * const cpu_c0_parent_names[] = { "pll1_d4_614p4", "pll1_d3_819p2", "pll1_d6_409p6", -+ "pll1_d5_491p52", "pll1_d2_1228p8", "pll3_d3", "pll2_d3", "cpu_c0_hi_clk" -+}; -+static SPACEMIT_CCU_MUX_FC(cpu_c0_core_clk, "cpu_c0_core_clk", cpu_c0_parent_names, -+ BASE_TYPE_APMU, APMU_CPU_C0_CLK_CTRL, -+ BIT(12), -+ 0, 3, -+ 0); -+static SPACEMIT_CCU_DIV(cpu_c0_ace_clk, "cpu_c0_ace_clk", "cpu_c0_core_clk", -+ BASE_TYPE_APMU, APMU_CPU_C0_CLK_CTRL, -+ 6, 3, -+ 0); -+static SPACEMIT_CCU_DIV(cpu_c0_tcm_clk, "cpu_c0_tcm_clk", "cpu_c0_core_clk", -+ BASE_TYPE_APMU, APMU_CPU_C0_CLK_CTRL, -+ 9, 3, -+ 0); -+static const char * const cpu_c1_hi_parent_names[] = { -+ "pll3_d2", "pll3_d1" -+}; -+static SPACEMIT_CCU_MUX(cpu_c1_hi_clk, "cpu_c1_hi_clk", cpu_c1_hi_parent_names, -+ BASE_TYPE_APMU, APMU_CPU_C1_CLK_CTRL, -+ 13, 1, -+ 0); -+static const char * const cpu_c1_parent_names[] = { "pll1_d4_614p4", "pll1_d3_819p2", "pll1_d6_409p6", -+ "pll1_d5_491p52", "pll1_d2_1228p8", "pll3_d3", "pll2_d3", "cpu_c1_hi_clk" -+}; -+static SPACEMIT_CCU_MUX_FC(cpu_c1_pclk, "cpu_c1_pclk", cpu_c1_parent_names, -+ BASE_TYPE_APMU, APMU_CPU_C1_CLK_CTRL, -+ BIT(12), -+ 0, 3, -+ 0); -+static SPACEMIT_CCU_DIV(cpu_c1_ace_clk, "cpu_c1_ace_clk", "cpu_c1_pclk", -+ BASE_TYPE_APMU, APMU_CPU_C1_CLK_CTRL, -+ 6, 3, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(pcie0_clk, "pcie0_clk", NULL, -+ BASE_TYPE_APMU, APMU_PCIE_CLK_RES_CTRL_0, -+ 0x7, 0x7, 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(pcie1_clk, "pcie1_clk", NULL, -+ BASE_TYPE_APMU, APMU_PCIE_CLK_RES_CTRL_1, -+ 0x7, 0x7, 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(pcie2_clk, "pcie2_clk", NULL, -+ BASE_TYPE_APMU, APMU_PCIE_CLK_RES_CTRL_2, -+ 0x7, 0x7, 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(emac0_bus_clk, "emac0_bus_clk", NULL, -+ BASE_TYPE_APMU, APMU_EMAC0_CLK_RES_CTRL, -+ BIT(0), BIT(0), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE(emac0_ptp_clk, "emac0_ptp_clk", "pll2_d6", -+ BASE_TYPE_APMU, APMU_EMAC0_CLK_RES_CTRL, -+ BIT(15), BIT(15), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(emac1_bus_clk, "emac1_bus_clk", NULL, -+ BASE_TYPE_APMU, APMU_EMAC1_CLK_RES_CTRL, -+ BIT(0), BIT(0), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE(emac1_ptp_clk, "emac1_ptp_clk", "pll2_d6", -+ BASE_TYPE_APMU, APMU_EMAC1_CLK_RES_CTRL, -+ BIT(15), BIT(15), 0x0, -+ 0); -+ -+//apbc2 -+static const char * const uart1_sec_parent_names[] = { -+ "pll1_m3d128_57p6", "slow_uart1_14p74", "slow_uart2_48" -+}; -+static SPACEMIT_CCU_MUX_GATE(uart1_sec_clk, "uart1_sec_clk", uart1_sec_parent_names, -+ BASE_TYPE_APBC2, APBC2_UART1_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+ -+static const char *ssp2_sec_parent_names[] = { "pll1_d384_6p4", "pll1_d192_12p8", "pll1_d96_25p6", -+ "pll1_d48_51p2", "pll1_d768_3p2", "pll1_d1536_1p6", "pll1_d3072_0p8" -+}; -+static SPACEMIT_CCU_MUX_GATE(ssp2_sec_clk, "ssp2_sec_clk", ssp2_sec_parent_names, -+ BASE_TYPE_APBC2, APBC2_SSP2_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static const char *twsi3_sec_parent_names[] = { -+ "pll1_d78_31p5", "pll1_d48_51p2", "pll1_d40_61p44" -+}; -+static SPACEMIT_CCU_MUX_GATE(twsi3_sec_clk, "twsi3_sec_clk", twsi3_sec_parent_names, -+ BASE_TYPE_APBC2, APBC2_TWSI3_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_GATE(rtc_sec_clk, "rtc_sec_clk", "clk_32k", -+ BASE_TYPE_APBC2, APBC2_RTC_CLK_RST, -+ 0x83, 0x83, 0x0, 0); -+static const char *timer_sec_parent_names[] = { -+ "pll1_d192_12p8", "clk_32k", "pll1_d384_6p4", "vctcxo_3", "vctcxo_1" -+}; -+static SPACEMIT_CCU_MUX_GATE(timers0_sec_clk, "timers0_sec_clk", timer_sec_parent_names, -+ BASE_TYPE_APBC2, APBC2_TIMERS0_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static const char *kpc_sec_parent_names[] = { -+ "pll1_d192_12p8", "clk_32k", "pll1_d384_6p4", "vctcxo_3", "vctcxo_1" -+}; -+static SPACEMIT_CCU_MUX_GATE(kpc_sec_clk, "kpc_sec_clk", kpc_sec_parent_names, -+ BASE_TYPE_APBC2, APBC2_KPC_CLK_RST, -+ 4, 3, 0x3, 0x3, 0x0, -+ 0); -+static SPACEMIT_CCU_GATE(gpio_sec_clk, "gpio_sec_clk", "vctcxo_24", -+ BASE_TYPE_APBC2, APBC2_GPIO_CLK_RST, -+ 0x3, 0x3, 0x0, -+ 0); -+ -+static const char * const apb_parent_names[] = { -+ "pll1_d96_25p6", "pll1_d48_51p2", "pll1_d96_25p6", "pll1_d24_102p4" -+}; -+static SPACEMIT_CCU_MUX(apb_clk, "apb_clk", apb_parent_names, -+ BASE_TYPE_MPMU, MPMU_APBCSCR, -+ 0, 2, 0); -+//rcpu -+static const char *rhdmi_audio_parent_names[] = { -+ "pll1_aud_24p5", "pll1_aud_245p7" -+}; -+static SPACEMIT_CCU_DIV_MUX_GATE(rhdmi_audio_clk, "rhdmi_audio_clk", rhdmi_audio_parent_names, -+ BASE_TYPE_RCPU, RCPU_HDMI_CLK_RST, -+ 4, 11, 16, 2, -+ 0x6, 0x6, 0x0, -+ 0); -+ -+static const char *rcan_parent_names[] = { -+ "pll3_20", "pll3_40", "pll3_80" -+}; -+static SPACEMIT_CCU_DIV_MUX_GATE(rcan_clk, "rcan_clk", rcan_parent_names, -+ BASE_TYPE_RCPU, RCPU_CAN_CLK_RST, -+ 8, 11, 4, 2, -+ BIT(1), BIT(1), 0x0, -+ 0); -+static SPACEMIT_CCU_GATE_NO_PARENT(rcan_bus_clk, "rcan_bus_clk", NULL, -+ BASE_TYPE_RCPU, RCPU_CAN_CLK_RST, -+ BIT(2), BIT(2), 0x0, 0); -+//rcpu2 -+static const char *rpwm_parent_names[] = { -+ "pll1_aud_245p7", "pll1_aud_24p5" -+}; -+static SPACEMIT_CCU_DIV_MUX_GATE(rpwm_clk, "rpwm_clk", rpwm_parent_names, -+ BASE_TYPE_RCPU2, RCPU2_PWM_CLK_RST, -+ 8, 11, 4, 2, -+ BIT(1), BIT(1), 0x0, -+ 0); -+ -+static struct clk_hw_onecell_data spacemit_k1x_hw_clks = { -+ .hws = { -+ [CLK_PLL2] = &pll2.common.hw, -+ [CLK_PLL3] = &pll3.common.hw, -+ [CLK_PLL1_D2] = &pll1_d2.common.hw, -+ [CLK_PLL1_D3] = &pll1_d3.common.hw, -+ [CLK_PLL1_D4] = &pll1_d4.common.hw, -+ [CLK_PLL1_D5] = &pll1_d5.common.hw, -+ [CLK_PLL1_D6] = &pll1_d6.common.hw, -+ [CLK_PLL1_D7] = &pll1_d7.common.hw, -+ [CLK_PLL1_D8] = &pll1_d8.common.hw, -+ [CLK_PLL1_D11] = &pll1_d11_223p4.common.hw, -+ [CLK_PLL1_D13] = &pll1_d13_189.common.hw, -+ [CLK_PLL1_D23] = &pll1_d23_106p8.common.hw, -+ [CLK_PLL1_D64] = &pll1_d64_38p4.common.hw, -+ [CLK_PLL1_D10_AUD] = &pll1_aud_245p7.common.hw, -+ [CLK_PLL1_D100_AUD] = &pll1_aud_24p5.common.hw, -+ [CLK_PLL2_D1] = &pll2_d1.common.hw, -+ [CLK_PLL2_D2] = &pll2_d2.common.hw, -+ [CLK_PLL2_D3] = &pll2_d3.common.hw, -+ [CLK_PLL2_D4] = &pll2_d4.common.hw, -+ [CLK_PLL2_D5] = &pll2_d5.common.hw, -+ [CLK_PLL2_D6] = &pll2_d6.common.hw, -+ [CLK_PLL2_D7] = &pll2_d7.common.hw, -+ [CLK_PLL2_D8] = &pll2_d8.common.hw, -+ [CLK_PLL3_D1] = &pll3_d1.common.hw, -+ [CLK_PLL3_D2] = &pll3_d2.common.hw, -+ [CLK_PLL3_D3] = &pll3_d3.common.hw, -+ [CLK_PLL3_D4] = &pll3_d4.common.hw, -+ [CLK_PLL3_D5] = &pll3_d5.common.hw, -+ [CLK_PLL3_D6] = &pll3_d6.common.hw, -+ [CLK_PLL3_D7] = &pll3_d7.common.hw, -+ [CLK_PLL3_D8] = &pll3_d8.common.hw, -+ [CLK_PLL3_80] = &pll3_80.common.hw, -+ [CLK_PLL3_40] = &pll3_40.common.hw, -+ [CLK_PLL3_20] = &pll3_20.common.hw, -+ [CLK_PLL1_307P2] = &pll1_d8_307p2.common.hw, -+ [CLK_PLL1_76P8] = &pll1_d32_76p8.common.hw, -+ [CLK_PLL1_61P44] = &pll1_d40_61p44.common.hw, -+ [CLK_PLL1_153P6] = &pll1_d16_153p6.common.hw, -+ [CLK_PLL1_102P4] = &pll1_d24_102p4.common.hw, -+ [CLK_PLL1_51P2] = &pll1_d48_51p2.common.hw, -+ [CLK_PLL1_51P2_AP] = &pll1_d48_51p2_ap.common.hw, -+ [CLK_PLL1_57P6] = &pll1_m3d128_57p6.common.hw, -+ [CLK_PLL1_25P6] = &pll1_d96_25p6.common.hw, -+ [CLK_PLL1_12P8] = &pll1_d192_12p8.common.hw, -+ [CLK_PLL1_12P8_WDT] = &pll1_d192_12p8_wdt.common.hw, -+ [CLK_PLL1_6P4] = &pll1_d384_6p4.common.hw, -+ [CLK_PLL1_3P2] = &pll1_d768_3p2.common.hw, -+ [CLK_PLL1_1P6] = &pll1_d1536_1p6.common.hw, -+ [CLK_PLL1_0P8] = &pll1_d3072_0p8.common.hw, -+ [CLK_PLL1_351] = &pll1_d7_351p08.common.hw, -+ [CLK_PLL1_409P6] = &pll1_d6_409p6.common.hw, -+ [CLK_PLL1_204P8] = &pll1_d12_204p8.common.hw, -+ [CLK_PLL1_491] = &pll1_d5_491p52.common.hw, -+ [CLK_PLL1_245P76] = &pll1_d10_245p76.common.hw, -+ [CLK_PLL1_614] = &pll1_d4_614p4.common.hw, -+ [CLK_PLL1_47P26] = &pll1_d52_47p26.common.hw, -+ [CLK_PLL1_31P5] = &pll1_d78_31p5.common.hw, -+ [CLK_PLL1_819] = &pll1_d3_819p2.common.hw, -+ [CLK_PLL1_1228] = &pll1_d2_1228p8.common.hw, -+ [CLK_SLOW_UART1] = &slow_uart1_14p74.common.hw, -+ [CLK_SLOW_UART2] = &slow_uart2_48.common.hw, -+ [CLK_UART1] = &uart1_clk.common.hw, -+ [CLK_UART2] = &uart2_clk.common.hw, -+ [CLK_UART3] = &uart3_clk.common.hw, -+ [CLK_UART4] = &uart4_clk.common.hw, -+ [CLK_UART5] = &uart5_clk.common.hw, -+ [CLK_UART6] = &uart6_clk.common.hw, -+ [CLK_UART7] = &uart7_clk.common.hw, -+ [CLK_UART8] = &uart8_clk.common.hw, -+ [CLK_UART9] = &uart9_clk.common.hw, -+ [CLK_GPIO] = &gpio_clk.common.hw, -+ [CLK_PWM0] = &pwm0_clk.common.hw, -+ [CLK_PWM1] = &pwm1_clk.common.hw, -+ [CLK_PWM2] = &pwm2_clk.common.hw, -+ [CLK_PWM3] = &pwm3_clk.common.hw, -+ [CLK_PWM4] = &pwm4_clk.common.hw, -+ [CLK_PWM5] = &pwm5_clk.common.hw, -+ [CLK_PWM6] = &pwm6_clk.common.hw, -+ [CLK_PWM7] = &pwm7_clk.common.hw, -+ [CLK_PWM8] = &pwm8_clk.common.hw, -+ [CLK_PWM9] = &pwm9_clk.common.hw, -+ [CLK_PWM10] = &pwm10_clk.common.hw, -+ [CLK_PWM11] = &pwm11_clk.common.hw, -+ [CLK_PWM12] = &pwm12_clk.common.hw, -+ [CLK_PWM13] = &pwm13_clk.common.hw, -+ [CLK_PWM14] = &pwm14_clk.common.hw, -+ [CLK_PWM15] = &pwm15_clk.common.hw, -+ [CLK_PWM16] = &pwm16_clk.common.hw, -+ [CLK_PWM17] = &pwm17_clk.common.hw, -+ [CLK_PWM18] = &pwm18_clk.common.hw, -+ [CLK_PWM19] = &pwm19_clk.common.hw, -+ [CLK_SSP3] = &ssp3_clk.common.hw, -+ [CLK_RTC] = &rtc_clk.common.hw, -+ [CLK_TWSI0] = &twsi0_clk.common.hw, -+ [CLK_TWSI1] = &twsi1_clk.common.hw, -+ [CLK_TWSI2] = &twsi2_clk.common.hw, -+ [CLK_TWSI4] = &twsi4_clk.common.hw, -+ [CLK_TWSI5] = &twsi5_clk.common.hw, -+ [CLK_TWSI6] = &twsi6_clk.common.hw, -+ [CLK_TWSI7] = &twsi7_clk.common.hw, -+ [CLK_TWSI8] = &twsi8_clk.common.hw, -+ [CLK_TIMERS1] = &timers1_clk.common.hw, -+ [CLK_TIMERS2] = &timers2_clk.common.hw, -+ [CLK_AIB] = &aib_clk.common.hw, -+ [CLK_ONEWIRE] = &onewire_clk.common.hw, -+ [CLK_SSPA0] = &sspa0_clk.common.hw, -+ [CLK_SSPA1] = &sspa1_clk.common.hw, -+ [CLK_DRO] = &dro_clk.common.hw, -+ [CLK_IR] = &ir_clk.common.hw, -+ [CLK_TSEN] = &tsen_clk.common.hw, -+ [CLK_IPC_AP2AUD] = &ipc_ap2aud_clk.common.hw, -+ [CLK_CAN0] = &can0_clk.common.hw, -+ [CLK_CAN0_BUS] = &can0_bus_clk.common.hw, -+ [CLK_WDT] = &wdt_clk.common.hw, -+ [CLK_RIPC] = &ripc_clk.common.hw, -+ [CLK_JPG] = &jpg_clk.common.hw, -+ [CLK_JPF_4KAFBC] = &jpg_4kafbc_clk.common.hw, -+ [CLK_JPF_2KAFBC] = &jpg_2kafbc_clk.common.hw, -+ [CLK_CCIC2PHY] = &ccic2phy_clk.common.hw, -+ [CLK_CCIC3PHY] = &ccic3phy_clk.common.hw, -+ [CLK_CSI] = &csi_clk.common.hw, -+ [CLK_CAMM0] = &camm0_clk.common.hw, -+ [CLK_CAMM1] = &camm1_clk.common.hw, -+ [CLK_CAMM2] = &camm2_clk.common.hw, -+ [CLK_ISP_CPP] = &isp_cpp_clk.common.hw, -+ [CLK_ISP_BUS] = &isp_bus_clk.common.hw, -+ [CLK_ISP] = &isp_clk.common.hw, -+ [CLK_DPU_MCLK] = &dpu_mclk.common.hw, -+ [CLK_DPU_ESC] = &dpu_esc_clk.common.hw, -+ [CLK_DPU_BIT] = &dpu_bit_clk.common.hw, -+ [CLK_DPU_PXCLK] = &dpu_pxclk.common.hw, -+ [CLK_DPU_HCLK] = &dpu_hclk.common.hw, -+ [CLK_DPU_SPI] = &dpu_spi_clk.common.hw, -+ [CLK_DPU_SPI_HBUS] = &dpu_spi_hbus_clk.common.hw, -+ [CLK_DPU_SPIBUS] = &dpu_spi_bus_clk.common.hw, -+ [CLK_SPU_SPI_ACLK] = &dpu_spi_aclk.common.hw, -+ [CLK_V2D] = &v2d_clk.common.hw, -+ [CLK_CCIC_4X] = &ccic_4x_clk.common.hw, -+ [CLK_CCIC1PHY] = &ccic1phy_clk.common.hw, -+ [CLK_SDH_AXI] = &sdh_axi_aclk.common.hw, -+ [CLK_SDH0] = &sdh0_clk.common.hw, -+ [CLK_SDH1] = &sdh1_clk.common.hw, -+ [CLK_SDH2] = &sdh2_clk.common.hw, -+ [CLK_USB_P1] = &usb_p1_aclk.common.hw, -+ [CLK_USB_AXI] = &usb_axi_clk.common.hw, -+ [CLK_USB30] = &usb30_clk.common.hw, -+ [CLK_QSPI] = &qspi_clk.common.hw, -+ [CLK_QSPI_BUS] = &qspi_bus_clk.common.hw, -+ [CLK_DMA] = &dma_clk.common.hw, -+ [CLK_AES] = &aes_clk.common.hw, -+ [CLK_VPU] = &vpu_clk.common.hw, -+ [CLK_GPU] = &gpu_clk.common.hw, -+ [CLK_EMMC] = &emmc_clk.common.hw, -+ [CLK_EMMC_X] = &emmc_x_clk.common.hw, -+ [CLK_AUDIO] = &audio_clk.common.hw, -+ [CLK_HDMI] = &hdmi_mclk.common.hw, -+ [CLK_CCI550] = &cci550_clk.common.hw, -+ [CLK_PMUA_ACLK] = &pmua_aclk.common.hw, -+ [CLK_CPU_C0_HI] = &cpu_c0_hi_clk.common.hw, -+ [CLK_CPU_C0_CORE] = &cpu_c0_core_clk.common.hw, -+ [CLK_CPU_C0_ACE] = &cpu_c0_ace_clk.common.hw, -+ [CLK_CPU_C0_TCM] = &cpu_c0_tcm_clk.common.hw, -+ [CLK_CPU_C1_HI] = &cpu_c1_hi_clk.common.hw, -+ [CLK_CPU_C1_CORE] = &cpu_c1_pclk.common.hw, -+ [CLK_CPU_C1_ACE] = &cpu_c1_ace_clk.common.hw, -+ [CLK_PCIE0] = &pcie0_clk.common.hw, -+ [CLK_PCIE1] = &pcie1_clk.common.hw, -+ [CLK_PCIE2] = &pcie2_clk.common.hw, -+ [CLK_EMAC0_BUS] = &emac0_bus_clk.common.hw, -+ [CLK_EMAC0_PTP] = &emac0_ptp_clk.common.hw, -+ [CLK_EMAC1_BUS] = &emac1_bus_clk.common.hw, -+ [CLK_EMAC1_PTP] = &emac1_ptp_clk.common.hw, -+ [CLK_SEC_UART1] = &uart1_sec_clk.common.hw, -+ [CLK_SEC_SSP2] = &ssp2_sec_clk.common.hw, -+ [CLK_SEC_TWSI3] = &twsi3_sec_clk.common.hw, -+ [CLK_SEC_RTC] = &rtc_sec_clk.common.hw, -+ [CLK_SEC_TIMERS0] = &timers0_sec_clk.common.hw, -+ [CLK_SEC_KPC] = &kpc_sec_clk.common.hw, -+ [CLK_SEC_GPIO] = &gpio_sec_clk.common.hw, -+ [CLK_APB] = &apb_clk.common.hw, -+ [CLK_SLOW_UART] = &slow_uart.common.hw, -+ [CLK_I2S_SYSCLK] = &i2s_sysclk.common.hw, -+ [CLK_I2S_BCLK] = &i2s_bclk.common.hw, -+ [CLK_RCPU_HDMIAUDIO] = &rhdmi_audio_clk.common.hw, -+ [CLK_RCPU_CAN] = &rcan_clk.common.hw, -+ [CLK_RCPU_CAN_BUS] = &rcan_bus_clk.common.hw, -+ [CLK_RCPU2_PWM] = &rpwm_clk.common.hw, -+ }, -+ .num = CLK_MAX_NO, -+}; -+ -+static struct clk_hw_table bootup_enable_clk_table[] = { -+ {"pll1_d8_307p2", CLK_PLL1_307P2}, -+ {"pll1_d6_409p6", CLK_PLL1_409P6}, -+ {"pll1_d5_491p52", CLK_PLL1_491}, -+ {"pll1_d4_614p4", CLK_PLL1_614}, -+ {"pll1_d3_819p2", CLK_PLL1_819}, -+ {"pll1_d2_1228p8", CLK_PLL1_1228}, -+ {"pll1_d10_245p76", CLK_PLL1_245P76}, -+ {"pll1_d48_51p2", CLK_PLL1_51P2}, -+ {"pll1_d48_51p2_ap", CLK_PLL1_51P2_AP}, -+ {"pll1_d96_25p6", CLK_PLL1_25P6}, -+ {"pll3_d1", CLK_PLL3_D1}, -+ {"pll3_d2", CLK_PLL3_D2}, -+ {"pll3_d3", CLK_PLL3_D3}, -+ {"pll2_d3", CLK_PLL2_D3}, -+ {"apb_clk", CLK_APB}, -+ {"pmua_aclk", CLK_PMUA_ACLK}, -+}; -+ -+void spacemit_clocks_enable(struct clk_hw_table *tbl, int tbl_size) -+{ -+ int i; -+ struct clk *clk; -+ -+ for (i = 0; i < tbl_size; i++) { -+ clk = clk_hw_get_clk(spacemit_k1x_hw_clks.hws[tbl[i].clk_hw_id], tbl[i].name); -+ if (!IS_ERR_OR_NULL(clk)) -+ clk_prepare_enable(clk); -+ else -+ pr_err("%s : can't find clk %s\n", __func__, tbl[i].name); -+ } -+} -+ -+int ccu_common_init(struct clk_hw * hw, struct spacemit_k1x_clk *clk_info) -+{ -+ struct ccu_common *common = hw_to_ccu_common(hw); -+ struct ccu_pll *pll = hw_to_ccu_pll(hw); -+ -+ if (!common) -+ return -1; -+ -+ common->lock = &g_cru_lock; -+ -+ switch(common->base_type){ -+ case BASE_TYPE_MPMU: -+ common->base = clk_info->mpmu_base; -+ break; -+ case BASE_TYPE_APMU: -+ common->base = clk_info->apmu_base; -+ break; -+ case BASE_TYPE_APBC: -+ common->base = clk_info->apbc_base; -+ break; -+ case BASE_TYPE_APBS: -+ common->base = clk_info->apbs_base; -+ break; -+ case BASE_TYPE_CIU: -+ common->base = clk_info->ciu_base; -+ break; -+ case BASE_TYPE_DCIU: -+ common->base = clk_info->dciu_base; -+ break; -+ case BASE_TYPE_DDRC: -+ common->base = clk_info->ddrc_base; -+ break; -+ case BASE_TYPE_AUDC: -+ common->base = clk_info->audio_ctrl_base; -+ break; -+ case BASE_TYPE_APBC2: -+ common->base = clk_info->apbc2_base; -+ break; -+ case BASE_TYPE_RCPU: -+ common->base = clk_info->rcpu_base; -+ break; -+ case BASE_TYPE_RCPU2: -+ common->base = clk_info->rcpu2_base; -+ break; -+ default: -+ common->base = clk_info->apbc_base; -+ break; -+ -+ } -+ if(common->is_pll) -+ pll->pll.lock_base = clk_info->mpmu_base; -+ -+ return 0; -+} -+ -+int spacemit_ccu_probe(struct device_node *node, struct spacemit_k1x_clk *clk_info, -+ struct clk_hw_onecell_data *hw_clks) -+{ -+ int i, ret; -+ for (i = 0; i < hw_clks->num ; i++) { -+ struct clk_hw *hw = hw_clks->hws[i]; -+ const char *name; -+ if (!hw) -+ continue; -+ if (!hw->init) -+ continue; -+ -+ ccu_common_init(hw, clk_info); -+ name = hw->init->name; -+ -+ ret = of_clk_hw_register(node, hw); -+ if (ret) { -+ pr_err("Couldn't register clock %d - %s\n", i, name); -+ goto err_clk_unreg; -+ } -+ } -+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, -+ hw_clks); -+ if (ret) -+ goto err_clk_unreg; -+ -+ //enable some clocks -+ spacemit_clocks_enable(bootup_enable_clk_table, ARRAY_SIZE(bootup_enable_clk_table)); -+ -+ return 0; -+ -+err_clk_unreg: -+ while (--i >= 0) { -+ struct clk_hw *hw = hw_clks->hws[i]; -+ if (!hw) -+ continue; -+ clk_hw_unregister(hw); -+ } -+ LOG_INFO("clock init fail"); -+ return ret; -+} -+ -+static void spacemit_k1x_ccu_probe(struct device_node *np) -+{ -+ int ret; -+ struct spacemit_k1x_clk *clk_info; -+ struct clk_hw_onecell_data *hw_clks = &spacemit_k1x_hw_clks; -+ -+ //LOG_INFO("init clock"); -+ if (of_device_is_compatible(np, "spacemit,k1x-clock")){ -+ clk_info = &k1x_clock_controller; -+ -+ clk_info->mpmu_base = of_iomap(np, 0); -+ if (!clk_info->mpmu_base) { -+ pr_err("failed to map mpmu registers\n"); -+ goto out; -+ } -+ -+ clk_info->apmu_base = of_iomap(np, 1); -+ if (!clk_info->apmu_base) { -+ pr_err("failed to map apmu registers\n"); -+ goto out; -+ } -+ -+ clk_info->apbc_base = of_iomap(np, 2); -+ if (!clk_info->apbc_base) { -+ pr_err("failed to map apbc registers\n"); -+ goto out; -+ } -+ -+ clk_info->apbs_base = of_iomap(np, 3); -+ if (!clk_info->apbs_base) { -+ pr_err("failed to map apbs registers\n"); -+ goto out; -+ } -+ -+ clk_info->ciu_base = of_iomap(np, 4); -+ if (!clk_info->ciu_base) { -+ pr_err("failed to map ciu registers\n"); -+ goto out; -+ } -+ -+ clk_info->dciu_base = of_iomap(np, 5); -+ if (!clk_info->dciu_base) { -+ pr_err("failed to map dragon ciu registers\n"); -+ goto out; -+ } -+ -+ clk_info->ddrc_base = of_iomap(np, 6); -+ if (!clk_info->ddrc_base) { -+ pr_err("failed to map ddrc registers\n"); -+ goto out; -+ } -+ -+ clk_info->apbc2_base = of_iomap(np, 7); -+ if (!clk_info->apbc2_base) { -+ pr_err("failed to map apbc2 registers\n"); -+ goto out; -+ } -+ -+ clk_info->rcpu_base = of_iomap(np, 8); -+ if (!clk_info->rcpu_base) { -+ pr_err("failed to map rcpu registers\n"); -+ goto out; -+ } -+ -+ clk_info->rcpu2_base = of_iomap(np, 9); -+ if (!clk_info->rcpu2_base) { -+ pr_err("failed to map rcpu2 registers\n"); -+ goto out; -+ } -+ } -+ ret = spacemit_ccu_probe(np, clk_info, hw_clks); -+ //LOG_INFO("init clock finish"); -+ if (ret) -+ return; -+out: -+ return; -+} -+ -+CLK_OF_DECLARE(k1x_clock, "spacemit,k1x-clock", spacemit_k1x_ccu_probe); -+ -diff --git a/drivers/clk/spacemit/ccu-spacemit-k1x.h b/drivers/clk/spacemit/ccu-spacemit-k1x.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/clk/spacemit/ccu-spacemit-k1x.h -@@ -0,0 +1,83 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Copyright (c) 2023, spacemit Corporation. -+ * -+ */ -+ -+#ifndef _CCU_SPACEMIT_K1X_H_ -+#define _CCU_SPACEMIT_K1X_H_ -+ -+#include -+#include -+ -+enum ccu_base_type{ -+ BASE_TYPE_MPMU = 0, -+ BASE_TYPE_APMU = 1, -+ BASE_TYPE_APBC = 2, -+ BASE_TYPE_APBS = 3, -+ BASE_TYPE_CIU = 4, -+ BASE_TYPE_DCIU = 5, -+ BASE_TYPE_DDRC = 6, -+ BASE_TYPE_AUDC = 7, -+ BASE_TYPE_APBC2 = 8, -+ BASE_TYPE_RCPU = 9, -+ BASE_TYPE_RCPU2 = 10, -+}; -+ -+enum { -+ CLK_DIV_TYPE_1REG_NOFC_V1 = 0, -+ CLK_DIV_TYPE_1REG_FC_V2, -+ CLK_DIV_TYPE_2REG_NOFC_V3, -+ CLK_DIV_TYPE_2REG_FC_V4, -+ CLK_DIV_TYPE_1REG_FC_DIV_V5, -+ CLK_DIV_TYPE_1REG_FC_MUX_V6, -+}; -+ -+struct ccu_common { -+ void __iomem *base; -+ enum ccu_base_type base_type; -+ u32 reg_type; -+ u32 reg_ctrl; -+ u32 reg_sel; -+ u32 reg_xtc; -+ u32 fc; -+ bool is_pll; -+ const char *name; -+ const struct clk_ops *ops; -+ const char * const *parent_names; -+ u8 num_parents; -+ unsigned long flags; -+ spinlock_t *lock; -+ struct clk_hw hw; -+}; -+ -+struct spacemit_k1x_clk { -+ void __iomem *mpmu_base; -+ void __iomem *apmu_base; -+ void __iomem *apbc_base; -+ void __iomem *apbs_base; -+ void __iomem *ciu_base; -+ void __iomem *dciu_base; -+ void __iomem *ddrc_base; -+ void __iomem *audio_ctrl_base; -+ void __iomem *apbc2_base; -+ void __iomem *rcpu_base; -+ void __iomem *rcpu2_base; -+}; -+ -+struct clk_hw_table { -+ char *name; -+ u32 clk_hw_id; -+}; -+ -+extern spinlock_t g_cru_lock; -+ -+static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw) -+{ -+ return container_of(hw, struct ccu_common, hw); -+} -+ -+int spacemit_ccu_probe(struct device_node *node, struct spacemit_k1x_clk *clk_info, -+ struct clk_hw_onecell_data *desc); -+ -+#endif /* _CCU_SPACEMIT_K1X_H_ */ -diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/clk/spacemit/ccu_ddn.c -@@ -0,0 +1,170 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Spacemit clock type ddn -+ * -+ * Copyright (c) 2023, spacemit Corporation. -+ * -+ */ -+ -+#include -+#include -+ -+#include "ccu_ddn.h" -+/* -+ * It is M/N clock -+ * -+ * Fout from synthesizer can be given from two equations: -+ * numerator/denominator = Fin / (Fout * factor) -+ */ -+ -+static void ccu_ddn_disable(struct clk_hw *hw) -+{ -+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); -+ struct ccu_common * common = &ddn->common; -+ unsigned long flags; -+ u32 reg; -+ -+ if (!ddn->gate) -+ return; -+ -+ spin_lock_irqsave(common->lock, flags); -+ -+ reg = readl(common->base + common->reg_sel); -+ -+ writel(reg & ~ddn->gate, common->base + common->reg_sel); -+ -+ spin_unlock_irqrestore(common->lock, flags); -+} -+ -+static int ccu_ddn_enable(struct clk_hw *hw) -+{ -+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); -+ struct ccu_common * common = &ddn->common; -+ unsigned long flags; -+ u32 reg; -+ -+ if (!ddn->gate) -+ return 0; -+ -+ spin_lock_irqsave(common->lock, flags); -+ -+ reg = readl(common->base + common->reg_sel); -+ -+ writel(reg | ddn->gate, common->base + common->reg_sel); -+ -+ spin_unlock_irqrestore(common->lock, flags); -+ -+ return 0; -+} -+ -+static int ccu_ddn_is_enabled(struct clk_hw *hw) -+{ -+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); -+ struct ccu_common * common = &ddn->common; -+ -+ if (!ddn->gate) -+ return 1; -+ -+ return readl(common->base + common->reg_sel) & ddn->gate; -+} -+ -+static long clk_ddn_round_rate(struct clk_hw *hw, unsigned long drate, -+ unsigned long *prate) -+{ -+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); -+ struct ccu_ddn_config *params = &ddn->ddn; -+ unsigned long rate = 0, prev_rate; -+ unsigned long result; -+ int i; -+ -+ for (i = 0; i < params->tbl_size; i++) { -+ prev_rate = rate; -+ rate = (((*prate / 10000) * params->tbl[i].den) / -+ (params->tbl[i].num * params->info->factor)) * 10000; -+ if (rate > drate) -+ break; -+ } -+ if ((i == 0) || (i == params->tbl_size)) { -+ result = rate; -+ } else { -+ if ((drate - prev_rate) > (rate - drate)) -+ result = rate; -+ else -+ result = prev_rate; -+ } -+ return result; -+} -+ -+static unsigned long clk_ddn_recalc_rate(struct clk_hw *hw, -+ unsigned long parent_rate) -+{ -+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); -+ struct ccu_ddn_config *params = &ddn->ddn; -+ unsigned int val, num, den; -+ unsigned long rate; -+ -+ val = readl(ddn->common.base + ddn->common.reg_ctrl); -+ -+ /* calculate numerator */ -+ num = (val >> params->info->num_shift) & params->info->num_mask; -+ -+ /* calculate denominator */ -+ den = (val >> params->info->den_shift) & params->info->den_mask; -+ -+ if (!den) -+ return 0; -+ rate = (((parent_rate / 10000) * den) / -+ (num * params->info->factor)) * 10000; -+ return rate; -+} -+ -+/* Configures new clock rate*/ -+static int clk_ddn_set_rate(struct clk_hw *hw, unsigned long drate, -+ unsigned long prate) -+{ -+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); -+ struct ccu_ddn_config *params = &ddn->ddn; -+ int i; -+ unsigned long val; -+ unsigned long prev_rate, rate = 0; -+ unsigned long flags = 0; -+ -+ for (i = 0; i < params->tbl_size; i++) { -+ prev_rate = rate; -+ rate = (((prate / 10000) * params->tbl[i].den) / -+ (params->tbl[i].num * params->info->factor)) * 10000; -+ if (rate > drate) -+ break; -+ } -+ -+ if (i > 0) -+ i--; -+ -+ if (ddn->common.lock) -+ spin_lock_irqsave(ddn->common.lock, flags); -+ -+ val = readl(ddn->common.base + ddn->common.reg_ctrl); -+ -+ val &= ~(params->info->num_mask << params->info->num_shift); -+ val |= (params->tbl[i].num & params->info->num_mask) << params->info->num_shift; -+ -+ val &= ~(params->info->den_mask << params->info->den_shift); -+ val |= (params->tbl[i].den & params->info->den_mask) << params->info->den_shift; -+ -+ writel(val, ddn->common.base + ddn->common.reg_ctrl); -+ -+ if (ddn->common.lock) -+ spin_unlock_irqrestore(ddn->common.lock, flags); -+ -+ return 0; -+} -+ -+const struct clk_ops ccu_ddn_ops = { -+ .disable = ccu_ddn_disable, -+ .enable = ccu_ddn_enable, -+ .is_enabled = ccu_ddn_is_enabled, -+ .recalc_rate = clk_ddn_recalc_rate, -+ .round_rate = clk_ddn_round_rate, -+ .set_rate = clk_ddn_set_rate, -+}; -+ -diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/clk/spacemit/ccu_ddn.h -@@ -0,0 +1,97 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Copyright (c) 2023, spacemit Corporation. -+ * -+ */ -+ -+#ifndef _CCU_DDN_H_ -+#define _CCU_DDN_H_ -+ -+ -+#include -+#include -+ -+#include "ccu-spacemit-k1x.h" -+ -+struct ccu_ddn_tbl { -+ unsigned int num; -+ unsigned int den; -+}; -+ -+struct ccu_ddn_info { -+ unsigned int factor; -+ unsigned int num_mask; -+ unsigned int den_mask; -+ unsigned int num_shift; -+ unsigned int den_shift; -+}; -+ -+struct ccu_ddn_config { -+ struct ccu_ddn_info * info; -+ struct ccu_ddn_tbl * tbl; -+ u32 tbl_size; -+}; -+ -+#define PLL_DDN_TBL(_num, _den) \ -+ { \ -+ .num = (_num), \ -+ .den = (_den), \ -+ } -+ -+struct ccu_ddn { -+ u32 gate; -+ struct ccu_ddn_config ddn; -+ struct ccu_common common; -+}; -+ -+#define _SPACEMIT_CCU_DDN_CONFIG(_info, _table, _size) \ -+ { \ -+ .info = (struct ccu_ddn_info *)_info, \ -+ .tbl = (struct ccu_ddn_tbl *)_table, \ -+ .tbl_size = _size, \ -+ } -+ -+#define SPACEMIT_CCU_DDN(_struct, _name, _parent, _info, _table, _size, \ -+ _base_type, _reg_ctrl, \ -+ _flags) \ -+ struct ccu_ddn _struct = { \ -+ .ddn = _SPACEMIT_CCU_DDN_CONFIG(_info, _table, _size), \ -+ .common = { \ -+ .reg_ctrl = _reg_ctrl, \ -+ .base_type = _base_type, \ -+ .hw.init = CLK_HW_INIT(_name, \ -+ _parent, \ -+ &ccu_ddn_ops, \ -+ _flags), \ -+ } \ -+ } -+ -+#define SPACEMIT_CCU_DDN_GATE(_struct, _name, _parent, _info, _table, _size, \ -+ _base_type, _reg_ddn, __reg_gate, _gate_mask, \ -+ _flags) \ -+ struct ccu_ddn _struct = { \ -+ .gate = _gate_mask, \ -+ .ddn = _SPACEMIT_CCU_DDN_CONFIG(_info, _table, _size), \ -+ .common = { \ -+ .reg_ctrl = _reg_ddn, \ -+ .reg_sel = __reg_gate, \ -+ .base_type = _base_type, \ -+ .hw.init = CLK_HW_INIT(_name, \ -+ _parent, \ -+ &ccu_ddn_ops, \ -+ _flags), \ -+ } \ -+ } -+ -+ -+static inline struct ccu_ddn *hw_to_ccu_ddn(struct clk_hw *hw) -+{ -+ struct ccu_common *common = hw_to_ccu_common(hw); -+ -+ return container_of(common, struct ccu_ddn, common); -+} -+ -+extern const struct clk_ops ccu_ddn_ops; -+ -+ -+#endif -diff --git a/drivers/clk/spacemit/ccu_mix.c b/drivers/clk/spacemit/ccu_mix.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/clk/spacemit/ccu_mix.c -@@ -0,0 +1,489 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Spacemit clock type mix(div/mux/gate/factor) -+ * -+ * Copyright (c) 2023, spacemit Corporation. -+ * -+ */ -+#include -+#include -+#include -+#include -+#include -+ -+#include "ccu_mix.h" -+ -+#define TIMEOUT_LIMIT (20000) /* max timeout 10000us */ -+static int twsi8_reg_val = 0x04; -+const char * tswi8_clk_name = "twsi8_clk"; -+static void ccu_mix_disable(struct clk_hw *hw) -+{ -+ struct ccu_mix *mix = hw_to_ccu_mix(hw); -+ struct ccu_common * common = &mix->common; -+ struct ccu_gate_config *gate = mix->gate; -+ unsigned long flags = 0; -+ unsigned long rate; -+ u32 tmp; -+ -+ if (!gate) -+ return; -+ -+ if (!strcmp(common->name, tswi8_clk_name)){ -+ twsi8_reg_val &= ~gate->gate_mask;; -+ twsi8_reg_val |= gate->val_disable; -+ tmp = twsi8_reg_val; -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ writel(tmp, common->base + common->reg_sel); -+ else -+ writel(tmp, common->base + common->reg_ctrl); -+ return; -+ } -+ -+ if (common->lock) -+ spin_lock_irqsave(common->lock, flags); -+ -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ tmp = readl(common->base + common->reg_sel); -+ else -+ tmp = readl(common->base + common->reg_ctrl); -+ -+ tmp &= ~gate->gate_mask; -+ tmp |= gate->val_disable; -+ -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ writel(tmp, common->base + common->reg_sel); -+ else -+ writel(tmp, common->base + common->reg_ctrl); -+ -+ if (common->lock) -+ spin_unlock_irqrestore(common->lock, flags); -+ -+ if (gate->flags & SPACEMIT_CLK_GATE_NEED_DELAY) { -+ rate = clk_hw_get_rate(&common->hw); -+ -+ if (rate == 0) -+ pr_err("clock rate of %s is 0.\n", clk_hw_get_name(&common->hw)); -+ else -+ /* Need delay 2M cycles. */ -+ udelay(DIV_ROUND_UP(2000000, rate)); -+ } -+ -+ return; -+} -+ -+static int ccu_mix_enable(struct clk_hw *hw) -+{ -+ struct ccu_mix *mix = hw_to_ccu_mix(hw); -+ struct ccu_common * common = &mix->common; -+ struct ccu_gate_config *gate = mix->gate; -+ unsigned long flags = 0; -+ unsigned long rate; -+ u32 tmp; -+ u32 val = 0; -+ int timeout_power = 1; -+ -+ if (!gate) -+ return 0; -+ -+ if (!strcmp(common->name, tswi8_clk_name)){ -+ twsi8_reg_val &= ~gate->gate_mask;; -+ twsi8_reg_val |= gate->val_enable; -+ tmp = twsi8_reg_val; -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ writel(tmp, common->base + common->reg_sel); -+ else -+ writel(tmp, common->base + common->reg_ctrl); -+ return 0; -+ } -+ -+ if (common->lock) -+ spin_lock_irqsave(common->lock, flags); -+ -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ tmp = readl(common->base + common->reg_sel); -+ else -+ tmp = readl(common->base + common->reg_ctrl); -+ -+ tmp &= ~gate->gate_mask; -+ tmp |= gate->val_enable; -+ -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ writel(tmp, common->base + common->reg_sel); -+ else -+ writel(tmp, common->base + common->reg_ctrl); -+ -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ val = readl(common->base + common->reg_sel); -+ else -+ val = readl(common->base + common->reg_ctrl); -+ -+ if (common->lock) -+ spin_unlock_irqrestore(common->lock, flags); -+ -+ while ((val & gate->gate_mask) != gate->val_enable && (timeout_power < TIMEOUT_LIMIT)) { -+ udelay(timeout_power); -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ val = readl(common->base + common->reg_sel); -+ else -+ val = readl(common->base + common->reg_ctrl); -+ timeout_power *= 10; -+ } -+ -+ if (timeout_power > 1) { -+ if (val == tmp) -+ pr_err("write clk_gate %s timeout occur, read pass after %d us delay\n", -+ clk_hw_get_name(&common->hw), timeout_power); -+ else -+ pr_err("write clk_gate %s timeout after %d us!\n", clk_hw_get_name(&common->hw), timeout_power); -+ } -+ -+ if (gate->flags & SPACEMIT_CLK_GATE_NEED_DELAY) { -+ rate = clk_hw_get_rate(&common->hw); -+ -+ if (rate == 0) -+ pr_err("clock rate of %s is 0.\n", clk_hw_get_name(&common->hw)); -+ else -+ /* Need delay 2M cycles. */ -+ udelay(DIV_ROUND_UP(2000000, rate)); -+ } -+ -+ return 0; -+} -+ -+static int ccu_mix_is_enabled(struct clk_hw *hw) -+{ -+ struct ccu_mix *mix = hw_to_ccu_mix(hw); -+ struct ccu_common * common = &mix->common; -+ struct ccu_gate_config *gate = mix->gate; -+ unsigned long flags = 0; -+ u32 tmp; -+ -+ if (!gate) -+ return 1; -+ -+ if (!strcmp(common->name, tswi8_clk_name)){ -+ return (twsi8_reg_val & gate->gate_mask) == gate->val_enable; -+ } -+ -+ if (common->lock) -+ spin_lock_irqsave(common->lock, flags); -+ -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ tmp = readl(common->base + common->reg_sel); -+ else -+ tmp = readl(common->base + common->reg_ctrl); -+ -+ if (common->lock) -+ spin_unlock_irqrestore(common->lock, flags); -+ -+ return (tmp & gate->gate_mask) == gate->val_enable; -+} -+ -+static unsigned long ccu_mix_recalc_rate(struct clk_hw *hw, -+ unsigned long parent_rate) -+{ -+ struct ccu_mix *mix = hw_to_ccu_mix(hw); -+ struct ccu_common * common = &mix->common; -+ struct ccu_div_config *div = mix->div; -+ unsigned long val; -+ u32 reg; -+ -+ if (!div){ -+ if (mix->factor) -+ return parent_rate * mix->factor->mul / mix->factor->div; -+ else -+ return parent_rate; -+ } -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ reg = readl(common->base + common->reg_sel); -+ else -+ reg = readl(common->base + common->reg_ctrl); -+ -+ val = reg >> div->shift; -+ val &= (1 << div->width) - 1; -+ -+ val = divider_recalc_rate(hw, parent_rate, val, div->table, -+ div->flags, div->width); -+ -+ return val; -+} -+ -+ -+static int ccu_mix_trigger_fc(struct clk_hw *hw) -+{ -+ struct ccu_mix *mix = hw_to_ccu_mix(hw); -+ struct ccu_common * common = &mix->common; -+ unsigned long val = 0; -+ -+ int ret = 0, timeout = 50; -+ -+ if (common->reg_type == CLK_DIV_TYPE_1REG_FC_V2 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4 -+ || common->reg_type == CLK_DIV_TYPE_1REG_FC_DIV_V5 -+ || common->reg_type == CLK_DIV_TYPE_1REG_FC_MUX_V6) { -+ -+ timeout = 50; -+ val = readl(common->base + common->reg_ctrl); -+ val |= common->fc; -+ writel(val, common->base + common->reg_ctrl); -+ -+ do { -+ val = readl(common->base + common->reg_ctrl); -+ timeout--; -+ if (!(val & (common->fc))) -+ break; -+ } while (timeout); -+ -+ if (timeout == 0) { -+ timeout = 5000; -+ do { -+ val = readl(common->base + common->reg_ctrl); -+ timeout--; -+ if (!(val & (common->fc))) -+ break; -+ } while (timeout); -+ if (timeout != 0) { -+ ret = 0; -+ -+ } else { -+ ret = -1; -+ } -+ } -+ } -+ -+ return ret; -+ -+} -+ -+static long ccu_mix_round_rate(struct clk_hw *hw, unsigned long rate, -+ unsigned long *prate) -+{ -+ return rate; -+} -+ -+unsigned long ccu_mix_calc_best_rate(struct clk_hw *hw, unsigned long rate, u32 *mux_val, u32 *div_val) -+{ -+ struct ccu_mix *mix = hw_to_ccu_mix(hw); -+ struct ccu_common * common = &mix->common; -+ struct ccu_div_config *div = mix->div? mix->div: NULL; -+ struct clk_hw *parent; -+ unsigned long parent_rate = 0, best_rate = 0; -+ u32 i, j, div_max; -+ -+ for (i = 0; i < common->num_parents; i++) { -+ -+ parent = clk_hw_get_parent_by_index(hw, i); -+ if (!parent) -+ continue; -+ parent_rate = clk_hw_get_rate(parent); -+ -+ if(div) -+ div_max = 1 << div->width; -+ else -+ div_max = 1; -+ -+ for(j = 1; j <= div_max; j++){ -+ if(abs(parent_rate/j - rate) < abs(best_rate - rate)){ -+ best_rate = DIV_ROUND_UP_ULL(parent_rate, j); -+ *mux_val = i; -+ *div_val = j - 1; -+ } -+ } -+ } -+ -+ return best_rate; -+} -+ -+static int ccu_mix_set_rate(struct clk_hw *hw, unsigned long rate, -+ unsigned long parent_rate) -+{ -+ struct ccu_mix *mix = hw_to_ccu_mix(hw); -+ struct ccu_common * common = &mix->common; -+ struct ccu_div_config *div = mix->div? mix->div: NULL; -+ struct ccu_mux_config *mux = mix->mux? mix->mux: NULL; -+ unsigned long best_rate = 0; -+ unsigned long flags; -+ u32 cur_mux, cur_div, mux_val = 0, div_val = 0; -+ u32 reg = 0; -+ int ret = 0; -+ -+ if(!div && !mux){ -+ return 0; -+ } -+ -+ best_rate = ccu_mix_calc_best_rate(hw, rate, &mux_val, &div_val); -+ if (!strcmp(common->name, tswi8_clk_name)){ -+ if(mux){ -+ cur_mux = twsi8_reg_val >> mux->shift; -+ cur_mux &= (1 << mux->width) - 1; -+ if(cur_mux != mux_val) -+ clk_hw_set_parent(hw, clk_hw_get_parent_by_index(hw, mux_val)); -+ } -+ return 0; -+ } -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ reg = readl(common->base + common->reg_sel); -+ else -+ reg = readl(common->base + common->reg_ctrl); -+ -+ if(mux){ -+ cur_mux = reg >> mux->shift; -+ cur_mux &= (1 << mux->width) - 1; -+ if(cur_mux != mux_val) -+ clk_hw_set_parent(hw, clk_hw_get_parent_by_index(hw, mux_val)); -+ } -+ if(div){ -+ cur_div = reg >> div->shift; -+ cur_div &= (1 << div->width) - 1; -+ if(cur_div == div_val) -+ return 0; -+ }else{ -+ return 0; -+ } -+ -+ spin_lock_irqsave(common->lock, flags); -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ reg = readl(common->base + common->reg_sel); -+ else -+ reg = readl(common->base + common->reg_ctrl); -+ -+ reg &= ~GENMASK(div->width + div->shift - 1, div->shift); -+ -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ writel(reg | (div_val << div->shift), -+ common->base + common->reg_sel); -+ else -+ writel(reg | (div_val << div->shift), -+ common->base + common->reg_ctrl); -+ -+ if (common->reg_type == CLK_DIV_TYPE_1REG_FC_V2 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4 -+ || common->reg_type == CLK_DIV_TYPE_1REG_FC_DIV_V5) { -+ -+ ret = ccu_mix_trigger_fc(hw); -+ } -+ spin_unlock_irqrestore(common->lock, flags); -+ -+ if(ret) -+ pr_err("%s of %s timeout\n", __func__, clk_hw_get_name(&common->hw)); -+ return 0; -+} -+ -+static u8 ccu_mix_get_parent(struct clk_hw *hw) -+{ -+ struct ccu_mix *mix = hw_to_ccu_mix(hw); -+ struct ccu_common * common = &mix->common; -+ struct ccu_mux_config *mux = mix->mux; -+ u32 reg; -+ u8 parent; -+ -+ if(!mux) -+ return 0; -+ -+ if (!strcmp(common->name, tswi8_clk_name)){ -+ parent = twsi8_reg_val >> mux->shift; -+ parent &= (1 << mux->width) - 1; -+ return parent; -+ } -+ -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ reg = readl(common->base + common->reg_sel); -+ else -+ reg = readl(common->base + common->reg_ctrl); -+ -+ parent = reg >> mux->shift; -+ parent &= (1 << mux->width) - 1; -+ -+ if (mux->table) { -+ int num_parents = clk_hw_get_num_parents(&common->hw); -+ int i; -+ -+ for (i = 0; i < num_parents; i++) -+ if (mux->table[i] == parent) -+ return i; -+ } -+ return parent; -+} -+ -+static int ccu_mix_set_parent(struct clk_hw *hw, u8 index) -+{ -+ struct ccu_mix *mix = hw_to_ccu_mix(hw); -+ struct ccu_common * common = &mix->common; -+ struct ccu_mux_config *mux = mix->mux; -+ unsigned long flags; -+ u32 reg = 0; -+ int ret = 0; -+ -+ if(!mux) -+ return 0; -+ -+ if (mux->table) -+ index = mux->table[index]; -+ -+ if (!strcmp(common->name, tswi8_clk_name)){ -+ twsi8_reg_val &= ~GENMASK(mux->width + mux->shift - 1, mux->shift); -+ twsi8_reg_val |= (index << mux->shift); -+ reg = twsi8_reg_val; -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ writel(reg, common->base + common->reg_sel); -+ else -+ writel(reg, common->base + common->reg_ctrl); -+ return 0; -+ } -+ -+ spin_lock_irqsave(common->lock, flags); -+ -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ reg = readl(common->base + common->reg_sel); -+ else -+ reg = readl(common->base + common->reg_ctrl); -+ -+ reg &= ~GENMASK(mux->width + mux->shift - 1, mux->shift); -+ -+ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) -+ writel(reg | (index << mux->shift), common->base + common->reg_sel); -+ else -+ writel(reg | (index << mux->shift), common->base + common->reg_ctrl); -+ -+ if (common->reg_type == CLK_DIV_TYPE_1REG_FC_V2 -+ || common->reg_type == CLK_DIV_TYPE_2REG_FC_V4 -+ || common->reg_type == CLK_DIV_TYPE_1REG_FC_MUX_V6) { -+ -+ ret = ccu_mix_trigger_fc(hw); -+ } -+ spin_unlock_irqrestore(common->lock, flags); -+ -+ if(ret) -+ pr_err("%s of %s timeout\n", __func__, clk_hw_get_name(&common->hw)); -+ -+ return 0; -+} -+ -+const struct clk_ops ccu_mix_ops = { -+ .disable = ccu_mix_disable, -+ .enable = ccu_mix_enable, -+ .is_enabled = ccu_mix_is_enabled, -+ .get_parent = ccu_mix_get_parent, -+ .set_parent = ccu_mix_set_parent, -+ .round_rate = ccu_mix_round_rate, -+ .recalc_rate = ccu_mix_recalc_rate, -+ .set_rate = ccu_mix_set_rate, -+}; -+ -diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/clk/spacemit/ccu_mix.h -@@ -0,0 +1,374 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Copyright (c) 2023, spacemit Corporation. -+ * -+ */ -+ -+#ifndef _CCU_MIX_H_ -+#define _CCU_MIX_H_ -+ -+#include -+#include "ccu-spacemit-k1x.h" -+ -+ -+#define SPACEMIT_CLK_GATE_NEED_DELAY BIT(0) -+ -+struct ccu_gate_config { -+ u32 gate_mask; -+ u32 val_enable; -+ u32 val_disable; -+ u32 flags; -+}; -+ -+struct ccu_factor_config { -+ u32 div; -+ u32 mul; -+}; -+ -+struct ccu_mux_config { -+ u8 shift; -+ u8 width; -+ const u8 *table; -+ u32 flags; -+}; -+ -+struct ccu_div_config { -+ u8 shift; -+ u8 width; -+ u32 max; -+ u32 offset; -+ u32 flags; -+ struct clk_div_table *table; -+}; -+ -+struct ccu_mix { -+ struct ccu_gate_config *gate; -+ struct ccu_factor_config *factor; -+ struct ccu_div_config *div; -+ struct ccu_mux_config *mux; -+ struct ccu_common common; -+}; -+ -+#define CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, _flags) \ -+ (&(struct ccu_gate_config) { \ -+ .gate_mask = _gate_mask, \ -+ .val_enable = _val_enable, \ -+ .val_disable = _val_disable, \ -+ .flags = _flags, \ -+ }) -+ -+#define CCU_FACTOR_INIT(_div, _mul) \ -+ (&(struct ccu_factor_config) { \ -+ .div = _div, \ -+ .mul = _mul, \ -+ }) -+ -+ -+#define CCU_MUX_INIT(_shift, _width, _table, _flags) \ -+ (&(struct ccu_mux_config) { \ -+ .shift = _shift, \ -+ .width = _width, \ -+ .table = _table, \ -+ .flags = _flags, \ -+ }) -+ -+#define CCU_DIV_INIT(_shift, _width, _table, _flags) \ -+ (&(struct ccu_div_config) { \ -+ .shift = _shift, \ -+ .width = _width, \ -+ .flags = _flags, \ -+ .table = _table, \ -+ }) -+ -+#define SPACEMIT_CCU_GATE(_struct, _name, _parent, _base_type, _reg, \ -+ _gate_mask, _val_enable, _val_disable, _flags) \ -+ struct ccu_mix _struct = { \ -+ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, 0), \ -+ .common = { \ -+ .reg_ctrl = _reg, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .num_parents = 1, \ -+ .hw.init = CLK_HW_INIT(_name, \ -+ _parent, \ -+ &ccu_mix_ops, \ -+ _flags), \ -+ } \ -+ } -+#define SPACEMIT_CCU_GATE_NO_PARENT(_struct, _name, _parent, _base_type, _reg, \ -+ _gate_mask, _val_enable, _val_disable, _flags) \ -+ struct ccu_mix _struct = { \ -+ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, 0), \ -+ .common = { \ -+ .reg_ctrl = _reg, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .num_parents = 0, \ -+ .hw.init = CLK_HW_INIT_NO_PARENT(_name, \ -+ &ccu_mix_ops, \ -+ _flags), \ -+ } \ -+ } -+ -+#define SPACEMIT_CCU_FACTOR(_struct, _name, _parent, \ -+ _div, _mul) \ -+ struct ccu_mix _struct = { \ -+ .factor = CCU_FACTOR_INIT(_div, _mul), \ -+ .common = { \ -+ .name = _name, \ -+ .num_parents = 1, \ -+ .hw.init = CLK_HW_INIT(_name, \ -+ _parent, \ -+ &ccu_mix_ops, \ -+ 0), \ -+ } \ -+ } -+ -+#define SPACEMIT_CCU_MUX(_struct, _name, _parents, _base_type, _reg, \ -+ _shift, _width, _flags) \ -+ struct ccu_mix _struct = { \ -+ .mux = CCU_MUX_INIT(_shift, _width, NULL, 0), \ -+ .common = { \ -+ .reg_ctrl = _reg, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .parent_names = _parents, \ -+ .num_parents = ARRAY_SIZE(_parents), \ -+ .hw.init = CLK_HW_INIT_PARENTS(_name, \ -+ _parents, \ -+ &ccu_mix_ops, \ -+ _flags|CLK_GET_RATE_NOCACHE), \ -+ } \ -+ } -+ -+#define SPACEMIT_CCU_DIV(_struct, _name, _parent, _base_type, _reg, \ -+ _shift, _width, _flags) \ -+ struct ccu_mix _struct = { \ -+ .div = CCU_DIV_INIT(_shift, _width, NULL, 0), \ -+ .common = { \ -+ .reg_ctrl = _reg, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .num_parents = 1, \ -+ .hw.init = CLK_HW_INIT(_name, \ -+ _parent, \ -+ &ccu_mix_ops, \ -+ _flags|CLK_GET_RATE_NOCACHE), \ -+ } \ -+ } -+ -+#define SPACEMIT_CCU_GATE_FACTOR(_struct, _name, _parent, _base_type, _reg, \ -+ _gate_mask, _val_enable, _val_disable, \ -+ _div, _mul, _flags) \ -+ struct ccu_mix _struct = { \ -+ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, 0), \ -+ .factor = CCU_FACTOR_INIT(_div, _mul), \ -+ .common = { \ -+ .reg_ctrl = _reg, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .num_parents = 1, \ -+ .hw.init = CLK_HW_INIT(_name, \ -+ _parent, \ -+ &ccu_mix_ops, \ -+ _flags), \ -+ } \ -+ } -+ -+ -+#define SPACEMIT_CCU_MUX_GATE(_struct, _name, _parents, _base_type, _reg, \ -+ _shift, _width, _gate_mask, _val_enable, _val_disable, _flags) \ -+ struct ccu_mix _struct = { \ -+ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, 0), \ -+ .mux = CCU_MUX_INIT(_shift, _width, NULL, 0), \ -+ .common = { \ -+ .reg_ctrl = _reg, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .parent_names = _parents, \ -+ .num_parents = ARRAY_SIZE(_parents), \ -+ .hw.init = CLK_HW_INIT_PARENTS(_name, \ -+ _parents, \ -+ &ccu_mix_ops, \ -+ _flags|CLK_GET_RATE_NOCACHE), \ -+ } \ -+ } -+ -+#define SPACEMIT_CCU_DIV_GATE(_struct, _name, _parent, _base_type, _reg, \ -+ _shift, _width, _gate_mask, _val_enable, _val_disable, _flags) \ -+ struct ccu_mix _struct = { \ -+ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, 0), \ -+ .div = CCU_DIV_INIT(_shift, _width, NULL, 0), \ -+ .common = { \ -+ .reg_ctrl = _reg, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .num_parents = 1, \ -+ .hw.init = CLK_HW_INIT(_name, \ -+ _parent, \ -+ &ccu_mix_ops, \ -+ _flags|CLK_GET_RATE_NOCACHE), \ -+ } \ -+ } -+ -+ -+#define SPACEMIT_CCU_DIV_MUX_GATE(_struct, _name, _parents, \ -+ _base_type, _reg_ctrl, \ -+ _mshift, _mwidth, \ -+ _muxshift, _muxwidth, \ -+ _gate_mask, _val_enable, _val_disable, _flags) \ -+ struct ccu_mix _struct = { \ -+ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, 0), \ -+ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ -+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ -+ .common = { \ -+ .reg_ctrl = _reg_ctrl, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .parent_names = _parents, \ -+ .num_parents = ARRAY_SIZE(_parents), \ -+ .hw.init = CLK_HW_INIT_PARENTS(_name, \ -+ _parents, \ -+ &ccu_mix_ops, \ -+ _flags|CLK_GET_RATE_NOCACHE), \ -+ }, \ -+ } -+ -+#define SPACEMIT_CCU_DIV2_FC_MUX_GATE(_struct, _name, _parents, _base_type, _reg_ctrl, _reg_sel, \ -+ _mshift, _mwidth, _fc, _muxshift, _muxwidth, _gate_mask, _val_enable, _val_disable, \ -+ _flags) \ -+ struct ccu_mix _struct = { \ -+ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, 0), \ -+ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ -+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ -+ .common = { \ -+ .reg_type = CLK_DIV_TYPE_2REG_FC_V4, \ -+ .reg_ctrl = _reg_ctrl, \ -+ .reg_sel = _reg_sel, \ -+ .fc = _fc, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .parent_names = _parents, \ -+ .num_parents = ARRAY_SIZE(_parents), \ -+ .hw.init = CLK_HW_INIT_PARENTS(_name, \ -+ _parents, \ -+ &ccu_mix_ops, \ -+ _flags|CLK_GET_RATE_NOCACHE), \ -+ }, \ -+ } -+ -+ -+#define SPACEMIT_CCU_DIV_FC_MUX_GATE(_struct, _name, _parents, _base_type, _reg_ctrl, \ -+ _mshift, _mwidth, _fc, _muxshift, _muxwidth, _gate_mask, _val_enable, _val_disable, \ -+ _flags) \ -+ struct ccu_mix _struct = { \ -+ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, 0), \ -+ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ -+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ -+ .common = { \ -+ .reg_type = CLK_DIV_TYPE_1REG_FC_V2, \ -+ .reg_ctrl = _reg_ctrl, \ -+ .fc = _fc, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .parent_names = _parents, \ -+ .num_parents = ARRAY_SIZE(_parents), \ -+ .hw.init = CLK_HW_INIT_PARENTS(_name, \ -+ _parents, \ -+ &ccu_mix_ops, \ -+ _flags|CLK_GET_RATE_NOCACHE), \ -+ }, \ -+ } -+ -+#define SPACEMIT_CCU_DIV_MFC_MUX_GATE(_struct, _name, _parents, _base_type, _reg_ctrl, \ -+ _mshift, _mwidth, _fc, _muxshift, _muxwidth, _gate_mask, _val_enable, _val_disable, \ -+ _flags) \ -+ struct ccu_mix _struct = { \ -+ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, 0), \ -+ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ -+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ -+ .common = { \ -+ .reg_type = CLK_DIV_TYPE_1REG_FC_MUX_V6, \ -+ .reg_ctrl = _reg_ctrl, \ -+ .fc = _fc, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .parent_names = _parents, \ -+ .num_parents = ARRAY_SIZE(_parents), \ -+ .hw.init = CLK_HW_INIT_PARENTS(_name, \ -+ _parents, \ -+ &ccu_mix_ops, \ -+ _flags|CLK_GET_RATE_NOCACHE), \ -+ }, \ -+ } -+ -+ -+#define SPACEMIT_CCU_DIV_FC_WITH_GATE(_struct, _name, _parent, _base_type, _reg_ctrl, \ -+ _mshift, _mwidth, _fc, _gate_mask, _val_enable, _val_disable, \ -+ _flags) \ -+ struct ccu_mix _struct = { \ -+ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, 0), \ -+ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ -+ .common = { \ -+ .reg_type = CLK_DIV_TYPE_1REG_FC_V2, \ -+ .reg_ctrl = _reg_ctrl, \ -+ .fc = _fc, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .num_parents = 1, \ -+ .hw.init = CLK_HW_INIT(_name, \ -+ _parent, \ -+ &ccu_mix_ops, \ -+ _flags|CLK_GET_RATE_NOCACHE), \ -+ }, \ -+ } -+ -+#define SPACEMIT_CCU_DIV_FC_MUX(_struct, _name, _parents, _base_type, _reg_ctrl, \ -+ _mshift, _mwidth, _fc, _muxshift, _muxwidth, _flags) \ -+ struct ccu_mix _struct = { \ -+ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ -+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ -+ .common = { \ -+ .reg_type = CLK_DIV_TYPE_1REG_FC_V2, \ -+ .reg_ctrl = _reg_ctrl, \ -+ .fc = _fc, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .parent_names = _parents, \ -+ .num_parents = ARRAY_SIZE(_parents), \ -+ .hw.init = CLK_HW_INIT_PARENTS(_name, \ -+ _parents, \ -+ &ccu_mix_ops, \ -+ _flags|CLK_GET_RATE_NOCACHE), \ -+ }, \ -+ } -+ -+#define SPACEMIT_CCU_MUX_FC(_struct, _name, _parents, _base_type, _reg_ctrl, \ -+ _fc, _muxshift, _muxwidth, _flags) \ -+ struct ccu_mix _struct = { \ -+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ -+ .common = { \ -+ .reg_type = CLK_DIV_TYPE_1REG_FC_V2, \ -+ .reg_ctrl = _reg_ctrl, \ -+ .fc = _fc, \ -+ .base_type = _base_type, \ -+ .name = _name, \ -+ .parent_names = _parents, \ -+ .num_parents = ARRAY_SIZE(_parents), \ -+ .hw.init = CLK_HW_INIT_PARENTS(_name, \ -+ _parents, \ -+ &ccu_mix_ops, \ -+ _flags|CLK_GET_RATE_NOCACHE), \ -+ }, \ -+ } -+ -+static inline struct ccu_mix *hw_to_ccu_mix(struct clk_hw *hw) -+{ -+ struct ccu_common *common = hw_to_ccu_common(hw); -+ -+ return container_of(common, struct ccu_mix, common); -+} -+ -+extern const struct clk_ops ccu_mix_ops; -+ -+#endif /* _CCU_DIV_H_ */ -diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/clk/spacemit/ccu_pll.c -@@ -0,0 +1,280 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Spacemit clock type pll -+ * -+ * Copyright (c) 2023, spacemit Corporation. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "ccu_pll.h" -+ -+#define PLL_MIN_FREQ 600000000 -+#define PLL_MAX_FREQ 3400000000 -+#define PLL_DELAYTIME 590 //(590*5)us -+ -+#define pll_readl(reg) readl(reg) -+#define pll_readl_pll_swcr1(p) pll_readl(p.base + p.reg_ctrl) -+#define pll_readl_pll_swcr2(p) pll_readl(p.base + p.reg_sel) -+#define pll_readl_pll_swcr3(p) pll_readl(p.base + p.reg_xtc) -+ -+#define pll_writel(val, reg) writel(val, reg) -+#define pll_writel_pll_swcr1(val, p) pll_writel(val, p.base + p.reg_ctrl) -+#define pll_writel_pll_swcr2(val, p) pll_writel(val, p.base + p.reg_sel) -+#define pll_writel_pll_swcr3(val, p) pll_writel(val, p.base + p.reg_xtc) -+ -+/* unified pllx_swcr1 for pll1~3 */ -+union pllx_swcr1 { -+ struct { -+ unsigned int reg5:8; -+ unsigned int reg6:8; -+ unsigned int reg7:8; -+ unsigned int reg8:8; -+ } b; -+ unsigned int v; -+}; -+ -+/* unified pllx_swcr2 for pll1~3 */ -+union pllx_swcr2 { -+ struct { -+ unsigned int div1_en:1; -+ unsigned int div2_en:1; -+ unsigned int div3_en:1; -+ unsigned int div4_en:1; -+ unsigned int div5_en:1; -+ unsigned int div6_en:1; -+ unsigned int div7_en:1; -+ unsigned int div8_en:1; -+ unsigned int reserved1:4; -+ unsigned int atest_en:1; -+ unsigned int cktest_en:1; -+ unsigned int dtest_en:1; -+ unsigned int rdo:2; -+ unsigned int mon_cfg:4; -+ unsigned int reserved2:11; -+ } b; -+ unsigned int v; -+}; -+ -+/* unified pllx_swcr3 for pll1~3 */ -+union pllx_swcr3{ -+ struct { -+ unsigned int div_frc:24; -+ unsigned int div_int:7; -+ unsigned int pll_en:1; -+ } b; -+ -+ unsigned int v; -+}; -+ -+static int ccu_pll_is_enabled(struct clk_hw *hw) -+{ -+ struct ccu_pll *p = hw_to_ccu_pll(hw); -+ union pllx_swcr3 swcr3; -+ unsigned int enabled; -+ -+ swcr3.v = pll_readl_pll_swcr3(p->common); -+ enabled = swcr3.b.pll_en; -+ -+ return enabled; -+} -+ -+/* frequency unit Mhz, return pll vco freq */ -+static unsigned long __get_vco_freq(struct clk_hw *hw) -+{ -+ unsigned int reg5, reg6, reg7, reg8, size, i; -+ unsigned int div_int, div_frc; -+ struct ccu_pll_rate_tbl *freq_pll_regs_table; -+ struct ccu_pll *p = hw_to_ccu_pll(hw); -+ union pllx_swcr1 swcr1; -+ union pllx_swcr3 swcr3; -+ -+ swcr1.v = pll_readl_pll_swcr1(p->common); -+ swcr3.v = pll_readl_pll_swcr3(p->common); -+ -+ reg5 = swcr1.b.reg5; -+ reg6 = swcr1.b.reg6; -+ reg7 = swcr1.b.reg7; -+ reg8 = swcr1.b.reg8; -+ -+ div_int = swcr3.b.div_int; -+ div_frc = swcr3.b.div_frc; -+ -+ freq_pll_regs_table = p->pll.rate_tbl; -+ size = p->pll.tbl_size; -+ -+ for (i = 0; i < size; i++) { -+ if ((freq_pll_regs_table[i].reg5 == reg5) -+ && (freq_pll_regs_table[i].reg6 == reg6) -+ && (freq_pll_regs_table[i].reg7 == reg7) -+ && (freq_pll_regs_table[i].reg8 == reg8) -+ && (freq_pll_regs_table[i].div_int == div_int) -+ && (freq_pll_regs_table[i].div_frac == div_frc)) -+ return freq_pll_regs_table[i].rate; -+ -+ } -+ -+ pr_err("Unknown rate for clock %s\n", __clk_get_name(hw->clk)); -+ -+ return 0; -+} -+ -+static int ccu_pll_enable(struct clk_hw *hw) -+{ -+ unsigned int delaytime = PLL_DELAYTIME; -+ unsigned long flags; -+ struct ccu_pll *p = hw_to_ccu_pll(hw); -+ union pllx_swcr3 swcr3; -+ -+ if (ccu_pll_is_enabled(hw)) -+ return 0; -+ -+ spin_lock_irqsave(p->common.lock, flags); -+ swcr3.v = pll_readl_pll_swcr3(p->common); -+ swcr3.b.pll_en = 1; -+ pll_writel_pll_swcr3(swcr3.v, p->common); -+ spin_unlock_irqrestore(p->common.lock, flags); -+ -+ /* check lock status */ -+ udelay(50); -+ -+ while ((!(readl(p->pll.lock_base + p->pll.reg_lock) & p->pll.lock_enable_bit)) -+ && delaytime) { -+ udelay(5); -+ delaytime--; -+ } -+ if (unlikely(!delaytime)) { -+ pr_err("%s enabling didn't get stable within 3000us!!!\n", __clk_get_name(hw->clk)); -+ //panic("pllx_r/w timeout!\n"); -+ } -+ -+ return 0; -+} -+ -+static void ccu_pll_disable(struct clk_hw *hw) -+{ -+ unsigned long flags; -+ struct ccu_pll *p = hw_to_ccu_pll(hw); -+ union pllx_swcr3 swcr3; -+ -+ spin_lock_irqsave(p->common.lock, flags); -+ swcr3.v = pll_readl_pll_swcr3(p->common); -+ swcr3.b.pll_en = 0; -+ pll_writel_pll_swcr3(swcr3.v, p->common); -+ spin_unlock_irqrestore(p->common.lock, flags); -+} -+ -+/* -+ * pll rate change requires sequence: -+ * clock off -> change rate setting -> clock on -+ * This function doesn't really change rate, but cache the config -+ */ -+static int ccu_pll_set_rate(struct clk_hw *hw, unsigned long rate, -+ unsigned long parent_rate) -+{ -+ unsigned int i, reg5 = 0, reg6 = 0, reg7 = 0, reg8 = 0; -+ unsigned int div_int, div_frc; -+ unsigned long flags; -+ unsigned long new_rate = rate, old_rate; -+ struct ccu_pll *p = hw_to_ccu_pll(hw); -+ struct ccu_pll_config *params = &p->pll; -+ union pllx_swcr1 swcr1; -+ union pllx_swcr3 swcr3; -+ bool found = false; -+ -+ if (ccu_pll_is_enabled(hw)) { -+ pr_err("%s %s is enabled, ignore the setrate!\n", -+ __func__, __clk_get_name(hw->clk)); -+ return 0; -+ } -+ -+ old_rate = __get_vco_freq(hw); -+ /* setp 1: calculate fbd frcd kvco and band */ -+ if (params->rate_tbl) { -+ for (i = 0; i < params->tbl_size; i++) { -+ if (rate == params->rate_tbl[i].rate) { -+ found = true; -+ -+ reg5 = params->rate_tbl[i].reg5; -+ reg6 = params->rate_tbl[i].reg6; -+ reg7 = params->rate_tbl[i].reg7; -+ reg8 = params->rate_tbl[i].reg8; -+ div_int = params->rate_tbl[i].div_int; -+ div_frc = params->rate_tbl[i].div_frac; -+ break; -+ } -+ } -+ -+ BUG_ON(!found); -+ } else { -+ pr_err("don't find freq table for pll\n"); -+ return -EINVAL; -+ } -+ -+ spin_lock_irqsave(p->common.lock, flags); -+ /* setp 2: set pll kvco/band and fbd/frcd setting */ -+ swcr1.v = pll_readl_pll_swcr1(p->common); -+ swcr1.b.reg5 = reg5; -+ swcr1.b.reg6 = reg6; -+ swcr1.b.reg7 = reg7; -+ swcr1.b.reg8 = reg8; -+ pll_writel_pll_swcr1(swcr1.v, p->common); -+ -+ swcr3.v = pll_readl_pll_swcr3(p->common); -+ swcr3.b.div_int = div_int; -+ swcr3.b.div_frc = div_frc; -+ pll_writel_pll_swcr3(swcr3.v, p->common); -+ -+ spin_unlock_irqrestore(p->common.lock, flags); -+ -+ pr_debug("%s %s rate %lu->%lu!\n", __func__, -+ __clk_get_name(hw->clk), old_rate, new_rate); -+ return 0; -+} -+ -+static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw, -+ unsigned long parent_rate) -+{ -+ return __get_vco_freq(hw); -+} -+ -+static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate, -+ unsigned long *prate) -+{ -+ struct ccu_pll *p = hw_to_ccu_pll(hw); -+ unsigned long max_rate = 0; -+ unsigned int i; -+ struct ccu_pll_config *params = &p->pll; -+ -+ if (rate > PLL_MAX_FREQ || rate < PLL_MIN_FREQ) { -+ pr_err("%lu rate out of range!\n", rate); -+ return -EINVAL; -+ } -+ -+ if (params->rate_tbl) { -+ for (i = 0; i < params->tbl_size; i++) { -+ if (params->rate_tbl[i].rate <= rate) { -+ if (max_rate < params->rate_tbl[i].rate) -+ max_rate = params->rate_tbl[i].rate; -+ } -+ } -+ } else { -+ pr_err("don't find freq table for pll\n"); -+ } -+ return max_rate; -+} -+ -+const struct clk_ops ccu_pll_ops = { -+ .enable = ccu_pll_enable, -+ .disable = ccu_pll_disable, -+ .set_rate = ccu_pll_set_rate, -+ .recalc_rate = ccu_pll_recalc_rate, -+ .round_rate = ccu_pll_round_rate, -+ .is_enabled = ccu_pll_is_enabled, -+}; -+ -diff --git a/drivers/clk/spacemit/ccu_pll.h b/drivers/clk/spacemit/ccu_pll.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/clk/spacemit/ccu_pll.h -@@ -0,0 +1,84 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Copyright (c) 2023, spacemit Corporation. -+ * -+ */ -+ -+#ifndef _CCU_PLL_H_ -+#define _CCU_PLL_H_ -+ -+#include -+#include -+#include "ccu-spacemit-k1x.h" -+ -+struct ccu_pll_rate_tbl { -+ unsigned long long rate; -+ u32 reg5; -+ u32 reg6; -+ u32 reg7; -+ u32 reg8; -+ unsigned int div_int; -+ unsigned int div_frac; -+}; -+ -+struct ccu_pll_config { -+ struct ccu_pll_rate_tbl * rate_tbl; -+ u32 tbl_size; -+ void __iomem *lock_base; -+ u32 reg_lock; -+ u32 lock_enable_bit; -+}; -+ -+#define PLL_RATE(_rate, _reg5, _reg6, _reg7, _reg8, _div_int, _div_frac) \ -+ { \ -+ .rate = (_rate), \ -+ .reg5 = (_reg5), \ -+ .reg6 = (_reg6), \ -+ .reg7 = (_reg7), \ -+ .reg8 = (_reg8), \ -+ .div_int = (_div_int), \ -+ .div_frac = (_div_frac), \ -+ } -+ -+struct ccu_pll { -+ struct ccu_pll_config pll; -+ struct ccu_common common; -+}; -+ -+#define _SPACEMIT_CCU_PLL_CONFIG(_table, _size, _reg_lock, _lock_enable_bit) \ -+ { \ -+ .rate_tbl = (struct ccu_pll_rate_tbl *)_table, \ -+ .tbl_size = _size, \ -+ .reg_lock = _reg_lock, \ -+ .lock_enable_bit = _lock_enable_bit, \ -+ } -+ -+#define SPACEMIT_CCU_PLL(_struct, _name, _table, _size, \ -+ _base_type, _reg_ctrl, _reg_sel, _reg_xtc,\ -+ _reg_lock, _lock_enable_bit, _is_pll, \ -+ _flags) \ -+ struct ccu_pll _struct = { \ -+ .pll = _SPACEMIT_CCU_PLL_CONFIG(_table, _size, _reg_lock, _lock_enable_bit), \ -+ .common = { \ -+ .reg_ctrl = _reg_ctrl, \ -+ .reg_sel = _reg_sel, \ -+ .reg_xtc = _reg_xtc, \ -+ .base_type = _base_type, \ -+ .is_pll = _is_pll, \ -+ .hw.init = CLK_HW_INIT_NO_PARENT(_name, \ -+ &ccu_pll_ops, \ -+ _flags), \ -+ } \ -+ } -+ -+ -+static inline struct ccu_pll *hw_to_ccu_pll(struct clk_hw *hw) -+{ -+ struct ccu_common *common = hw_to_ccu_common(hw); -+ -+ return container_of(common, struct ccu_pll, common); -+} -+ -+extern const struct clk_ops ccu_pll_ops; -+ -+#endif --- -Armbian - diff --git a/patch/kernel/archive/spacemit-6.1/008-drivers-clocksource.patch b/patch/kernel/archive/spacemit-6.1/008-drivers-clocksource.patch deleted file mode 100644 index 75240dcc6875..000000000000 --- a/patch/kernel/archive/spacemit-6.1/008-drivers-clocksource.patch +++ /dev/null @@ -1,938 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - drivers/clocksource/Kconfig | 7 + - drivers/clocksource/Makefile | 1 + - drivers/clocksource/dw_apb_timer.c | 14 +- - drivers/clocksource/timer-k1x.c | 703 ++++++++++ - 4 files changed, 723 insertions(+), 2 deletions(-) - -diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/clocksource/Kconfig -+++ b/drivers/clocksource/Kconfig -@@ -134,6 +134,13 @@ config RDA_TIMER - help - Enables the support for the RDA Micro timer driver. - -+config SPACEMIT_K1X_TIMER -+ bool "Spacemit k1x timer driver" if COMPILE_TEST -+ select CLKSRC_MMIO -+ select TIMER_OF -+ help -+ Enables the support for the spacemit k1x timer driver. -+ - config SUN4I_TIMER - bool "Sun4i timer driver" if COMPILE_TEST - depends on HAS_IOMEM -diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/clocksource/Makefile -+++ b/drivers/clocksource/Makefile -@@ -59,6 +59,7 @@ obj-$(CONFIG_MILBEAUT_TIMER) += timer-milbeaut.o - obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o - obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o - obj-$(CONFIG_RDA_TIMER) += timer-rda.o -+obj-$(CONFIG_SPACEMIT_K1X_TIMER) += timer-k1x.o - - obj-$(CONFIG_ARC_TIMERS) += arc_timer.o - obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o -diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c -index 111111111111..222222222222 100644 ---- a/drivers/clocksource/dw_apb_timer.c -+++ b/drivers/clocksource/dw_apb_timer.c -@@ -34,6 +34,8 @@ - #define APBTMR_CONTROL_MODE_PERIODIC (1 << 1) - #define APBTMR_CONTROL_INT (1 << 2) - -+static raw_spinlock_t dw_apb_timer_lock; -+ - static inline struct dw_apb_clock_event_device * - ced_to_dw_apb_ced(struct clock_event_device *evt) - { -@@ -102,8 +104,10 @@ static irqreturn_t dw_apb_clockevent_irq(int irq, void *data) - return IRQ_NONE; - } - -+ raw_spin_lock(&dw_ced->timer_lock); - if (dw_ced->eoi) - dw_ced->eoi(&dw_ced->timer); -+ raw_spin_unlock(&dw_ced->timer_lock); - - evt->event_handler(evt); - return IRQ_HANDLED; -@@ -207,15 +211,20 @@ static int apbt_next_event(unsigned long delta, - u32 ctrl; - struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); - -+ raw_spin_lock(&dw_ced->timer_lock); -+ - /* Disable timer */ - ctrl = apbt_readl_relaxed(&dw_ced->timer, APBTMR_N_CONTROL); - ctrl &= ~APBTMR_CONTROL_ENABLE; - apbt_writel_relaxed(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); -+ - /* write new count */ - apbt_writel_relaxed(&dw_ced->timer, delta, APBTMR_N_LOAD_COUNT); - ctrl |= APBTMR_CONTROL_ENABLE; - apbt_writel_relaxed(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); - -+ raw_spin_unlock(&dw_ced->timer_lock); -+ - return 0; - } - -@@ -248,6 +257,8 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, - if (!dw_ced) - return NULL; - -+ raw_spin_lock_init(&dw_ced->timer_lock); -+ - dw_ced->timer.base = base; - dw_ced->timer.irq = irq; - dw_ced->timer.freq = freq; -@@ -272,8 +283,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, - dw_ced->ced.name = name; - - dw_ced->eoi = apbt_eoi; -- err = request_irq(irq, dw_apb_clockevent_irq, -- IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, -+ err = request_irq(irq, dw_apb_clockevent_irq, IRQF_ONESHOT, - dw_ced->ced.name, &dw_ced->ced); - if (err) { - pr_err("failed to request timer irq\n"); -diff --git a/drivers/clocksource/timer-k1x.c b/drivers/clocksource/timer-k1x.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/clocksource/timer-k1x.c -@@ -0,0 +1,703 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * spacemit-k1x timer driver -+ * -+ * Copyright (C) 2023 Spacemit -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define TMR_CCR (0x000c) -+#define TMR_TN_MM(n, m) (0x0010 + ((n) << 4) + ((m) << 2)) -+#define TMR_CR(n) (0x0090 + ((n) << 2)) -+#define TMR_SR(n) (0x0080 + ((n) << 2)) -+#define TMR_IER(n) (0x0060 + ((n) << 2)) -+#define TMR_PLVR(n) (0x0040 + ((n) << 2)) -+#define TMR_PLCR(n) (0x0050 + ((n) << 2)) -+#define TMR_WMER (0x0068) -+#define TMR_WMR (0x006c) -+#define TMR_WVR (0x00cc) -+#define TMR_WSR (0x00c0) -+#define TMR_ICR(n) (0x0070 + ((n) << 2)) -+#define TMR_WICR (0x00c4) -+#define TMR_CER (0x0000) -+#define TMR_CMR (0x0004) -+#define TMR_WCR (0x00c8) -+#define TMR_WFAR (0x00b0) -+#define TMR_WSAR (0x00b4) -+#define TMR_CRSR (0x0008) -+ -+#define TMR_CCR_CS_0(x) (((x) & 0x3) << 0) -+#define TMR_CCR_CS_1(x) (((x) & 0x3) << 2) -+#define TMR_CCR_CS_2(x) (((x) & 0x3) << 5) -+ -+#define MAX_EVT_NUM 5 -+ -+#define MAX_DELTA (0xfffffffe) -+#define MIN_DELTA (5) -+ -+#define SPACEMIT_MAX_COUNTER 3 -+#define SPACEMIT_MAX_TIMER 3 -+ -+#define TMR_CER_COUNTER(cid) (1 << (cid)) -+#define SPACEMIT_ALL_COUNTERS ((1 << SPACEMIT_MAX_COUNTER) - 1) -+ -+#define SPACEMIT_TIMER_CLOCK_32KHZ 32768 -+ -+#define SPACEMIT_TIMER_COUNTER_CLKSRC (1 << 0) -+#define SPACEMIT_TIMER_COUNTER_CLKEVT (1 << 1) -+#define SPACEMIT_TIMER_COUNTER_DELAY (1 << 2) -+ -+#define SPACEMIT_TIMER_ALL_CPU (0xFFFFFFFF) -+ -+struct spacemit_timer; -+ -+struct spacemit_timer_evt { -+ struct clock_event_device ced; -+ struct irqaction irqa; -+ unsigned int freq; -+ unsigned int irq; -+ unsigned int cid; -+ unsigned int tid; -+ int cpu; -+ bool timer_enabled; -+ /* 0: timer set; 1: timer timeout(irq comes) */ -+ int timer_status; -+ unsigned int timeout; -+ struct spacemit_timer *timer; -+}; -+ -+struct spacemit_timer { -+ unsigned int id; -+ void __iomem *base; -+ struct spacemit_timer_evt evt[SPACEMIT_MAX_COUNTER]; -+ unsigned int flag; -+ int loop_delay_fastclk; -+ unsigned int fc_freq; -+ unsigned int freq; -+ struct clk *clk; -+ /* lock to protect hw operation. */ -+ spinlock_t tm_lock; -+}; -+ -+struct timer_werror_info { -+ u32 reg; -+ u32 target; -+ u32 val; -+ u32 mask; -+}; -+ -+/* record the last x write failures */ -+#define TIMER_ERR_NUM 10 -+static struct timer_werror_info werr_info[TIMER_ERR_NUM]; -+static int werr_index; -+ -+static struct spacemit_timer *spacemit_timers[SPACEMIT_MAX_TIMER]; -+static int timer_counter_switch_clock(struct spacemit_timer *tm, unsigned int freq); -+ -+void timer_dump_hwinfo(int tid) -+{ -+ struct spacemit_timer_evt *t_evt = &spacemit_timers[tid]->evt[0]; -+ void __iomem *base = spacemit_timers[tid]->base; -+ unsigned int sr, cid, cer, cmr, ccr, mr, ier, cr; -+ -+ cid = t_evt->cid; -+ -+ cer = __raw_readl(base + TMR_CER); -+ cmr = __raw_readl(base + TMR_CMR); -+ ccr = __raw_readl(base + TMR_CCR); -+ mr = __raw_readl(base + TMR_TN_MM(cid, 0)); -+ ier = __raw_readl(base + TMR_IER(cid)); -+ sr = __raw_readl(base + TMR_SR(cid)); -+ cr = __raw_readl(base + TMR_CR(cid)); -+ -+ pr_err("timer enable: %d. timeout: %d cycles. next event: %lld\n", !t_evt->timer_status, t_evt->timeout, t_evt->ced.next_event); -+ -+ pr_err("cer/cmr/ccr/mr/ier/sr/cr: (0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x)\n", cer, cmr, ccr, mr, ier, sr, cr); -+ -+ return; -+} -+ -+static void timer_write_error(u32 reg, u32 target, u32 val, u32 mask) -+{ -+ werr_info[werr_index].reg = reg; -+ werr_info[werr_index].target = target; -+ werr_info[werr_index].val = val; -+ werr_info[werr_index].mask = mask; -+ werr_index = (werr_index+1) % TIMER_ERR_NUM; -+ -+ pr_err("timer write fail: register = 0x%x: (0x%x, 0x%x, 0x%x)\n", reg, target, val, mask); -+} -+ -+static void timer_write_check(struct spacemit_timer *tm, u32 reg, u32 val, u32 mask, bool clr, bool clk_switch) -+{ -+ int loop = 3, retry = 100; -+ u32 t_read, t_check = clr ? !val : val; -+ -+reg_re_write: -+ __raw_writel(val, tm->base + reg); -+ -+ if (clk_switch) -+ timer_counter_switch_clock(tm, tm->fc_freq); -+ -+ t_read = __raw_readl(tm->base + reg); -+ -+ while (((t_read & mask) != (t_check & mask)) && loop) { -+ /* avoid trying frequently to worsen bus contention */ -+ udelay(30); -+ t_read = __raw_readl(tm->base + reg); -+ loop--; -+ -+ if (!loop) { -+ timer_write_error(reg, t_check, t_read, mask); -+ loop = 3; -+ if (--retry) -+ goto reg_re_write; -+ else -+ return; -+ } -+ } -+} -+ -+static int timer_counter_switch_clock(struct spacemit_timer *tm, unsigned int freq) -+{ -+ u32 ccr, val, mask, tid; -+ -+ tid = tm->id; -+ -+ ccr = __raw_readl(tm->base + TMR_CCR); -+ -+ switch (tid) { -+ case 0: -+ mask = TMR_CCR_CS_0(3); -+ break; -+ case 1: -+ mask = TMR_CCR_CS_1(3); -+ break; -+ case 2: -+ mask = TMR_CCR_CS_2(3); -+ break; -+ default: -+ pr_err("wrong timer id: 0x%x\n", tid); -+ return -EINVAL; -+ } -+ -+ ccr &= ~mask; -+ -+ if (freq == tm->fc_freq) -+ val = 0; -+ else if (freq == SPACEMIT_TIMER_CLOCK_32KHZ) -+ val = 1; -+ else { -+ pr_err("Timer %d: invalid clock rate %d\n", tid, freq); -+ return -EINVAL; -+ } -+ -+ switch (tid) { -+ case 0: -+ ccr |= TMR_CCR_CS_0(val); -+ break; -+ case 1: -+ ccr |= TMR_CCR_CS_1(val); -+ break; -+ case 2: -+ ccr |= TMR_CCR_CS_2(val); -+ break; -+ } -+ -+ timer_write_check(tm, TMR_CCR, ccr, mask, false, false); -+ -+ return 0; -+} -+ -+static void timer_counter_disable(struct spacemit_timer_evt *evt) -+{ -+ struct spacemit_timer *tm = evt->timer; -+ u32 cer; -+ bool clk_switch = false; -+ -+ if (evt->freq != tm->fc_freq) -+ clk_switch = true; -+ /* -+ * Stop the counter will need multiple timer clock to take effect. -+ * Some operations can only be done when counter is disabled. So -+ * add delay here. -+ */ -+ /* Step1: disable counter */ -+ cer = __raw_readl(tm->base + TMR_CER); -+ timer_write_check(tm, TMR_CER, (cer & ~(1 << evt->cid)), (1 << evt->cid), false, clk_switch); -+ -+ /* remove unnecesary write, check explicitly: 2 cycles (32k) */ -+ -+ evt->timer_status = 1; -+} -+ -+static void timer_counter_enable(struct spacemit_timer_evt *evt) -+{ -+ struct spacemit_timer *tm = evt->timer; -+ u32 cer; -+ -+ /* Switch to original clock */ -+ if (evt->freq != tm->fc_freq) -+ timer_counter_switch_clock(tm, evt->freq); -+ -+ /* Enable timer */ -+ cer = __raw_readl(tm->base + TMR_CER); -+ -+ timer_write_check(tm, TMR_CER, (cer | (1 << evt->cid)), (1 << evt->cid), false, false); -+ -+ evt->timer_status = 0; -+} -+ -+static irqreturn_t timer_interrupt(int irq, void *dev_id) -+{ -+ struct clock_event_device *c = dev_id; -+ struct spacemit_timer_evt *evt; -+ unsigned int cnt; -+ unsigned long flags; -+ void __iomem *base; -+ -+ evt = container_of(c, struct spacemit_timer_evt, ced); -+ cnt = evt->cid; -+ base = evt->timer->base; -+ -+ spin_lock_irqsave(&(evt->timer->tm_lock), flags); -+ /* We only use match #0 for the counter. */ -+ if (__raw_readl(base + TMR_SR(cnt)) & 0x1) { -+ timer_counter_disable(evt); -+ -+ /* Disable the interrupt. */ -+ timer_write_check(evt->timer, TMR_IER(cnt), 0, 0x7, false, false); -+ /* Clear interrupt status */ -+ timer_write_check(evt->timer, TMR_ICR(cnt), 0x1, 0x7, true, false); -+ -+ spin_unlock_irqrestore(&(evt->timer->tm_lock), flags); -+ -+ c->event_handler(c); -+ -+ return IRQ_HANDLED; -+ } -+ -+ spin_unlock_irqrestore(&(evt->timer->tm_lock), flags); -+ return IRQ_NONE; -+} -+ -+static int timer_shutdown(struct clock_event_device *dev) -+{ -+ struct spacemit_timer_evt *evt; -+ unsigned long flags; -+ -+ evt = container_of(dev, struct spacemit_timer_evt, ced); -+ -+ spin_lock_irqsave(&(evt->timer->tm_lock), flags); -+ -+ evt->timer_enabled = !evt->timer_status; -+ -+ /* disable counter */ -+ timer_counter_disable(evt); -+ -+ spin_unlock_irqrestore(&(evt->timer->tm_lock), flags); -+ -+ return 0; -+} -+ -+static int timer_resume(struct clock_event_device *dev) -+{ -+ struct spacemit_timer_evt *evt; -+ unsigned long flags; -+ -+ evt = container_of(dev, struct spacemit_timer_evt, ced); -+ -+ spin_lock_irqsave(&(evt->timer->tm_lock), flags); -+ -+ /* check whether need to enable timer */ -+ if (evt->timer_enabled) -+ timer_counter_enable(evt); -+ -+ spin_unlock_irqrestore(&(evt->timer->tm_lock), flags); -+ -+ return 0; -+} -+ -+static int timer_set_next_event(unsigned long delta, -+ struct clock_event_device *dev) -+{ -+ struct spacemit_timer_evt *evt; -+ unsigned int cid; -+ unsigned long flags; -+ u32 cer; -+ void __iomem *base; -+ -+ evt = container_of(dev, struct spacemit_timer_evt, ced); -+ cid = evt->cid; -+ base = evt->timer->base; -+ -+ spin_lock_irqsave(&(evt->timer->tm_lock), flags); -+ -+ cer = __raw_readl(base + TMR_CER); -+ -+ /* If the timer counter is enabled, first disable it. */ -+ if (cer & (1 << cid)) -+ timer_counter_disable(evt); -+ -+ /* Setup new counter value */ -+ timer_write_check(evt->timer, TMR_TN_MM(cid, 0), (delta - 1), (u32)(-1), false, false); -+ -+ /* enable the matching interrupt */ -+ timer_write_check(evt->timer, TMR_IER(cid), 0x1, 0x1, false, false); -+ -+ timer_counter_enable(evt); -+ -+ evt->timeout = delta - 1; -+ -+ spin_unlock_irqrestore(&(evt->timer->tm_lock), flags); -+ return 0; -+} -+ -+int __init spacemit_timer_init(struct device_node *np, int tid, void __iomem *base, -+ unsigned int flag, unsigned int fc_freq, -+ unsigned int apb_freq, unsigned int freq) -+{ -+ struct spacemit_timer *tm = spacemit_timers[tid]; -+ struct clk *clk; -+ struct reset_control *resets; -+ u32 tmp, delay; -+ -+ if (tm) -+ return -EINVAL; -+ -+ tm = kzalloc(sizeof(*tm), GFP_KERNEL); -+ if (!tm) -+ return -ENOMEM; -+ -+ clk = of_clk_get(np, 0); -+ if (!clk) { -+ pr_err("%s: get clk failed! %s\n", __func__, np->name); -+ goto out; -+ } -+ -+ if (IS_ERR(clk)) { -+ pr_err("Timer %d: fail to get clock!\n", tid); -+ goto out; -+ } -+ -+ if (clk_prepare_enable(clk)) { -+ pr_err("Timer %d: fail to enable clock!\n", tid); -+ goto out; -+ } -+ -+ if (clk_set_rate(clk, fc_freq)) { -+ pr_err("Timer %d: fail to set clock rate to %uHz!\n", tid, fc_freq); -+ goto out; -+ } -+ -+ resets = of_reset_control_get(np, 0); -+ if(IS_ERR(resets)) { -+ clk_disable_unprepare(clk); -+ return PTR_ERR(resets); -+ } -+ reset_control_deassert(resets); -+ /* -+ * The calculation formula for the loop cycle is: -+ * -+ * (1) need wait for 2 timer's clock cycle: -+ * 1 2 -+ * ------- x 2 = ------- -+ * fc_freq fc_freq -+ * -+ * (2) convert to apb clock cycle: -+ * 2 1 apb_freq * 2 -+ * ------- / -------- = ---------------- -+ * fc_freq apb_freq fc_freq -+ * -+ * (3) every apb register's accessing will take 8 apb clock cycle, -+ * also consider add extral one more time for safe way; -+ * so finally need loop times for the apb register accessing: -+ * -+ * (apb_freq * 2) -+ * ------------------ / 8 + 1 -+ * fc_freq -+ */ -+ delay = ((apb_freq * 2) / fc_freq / 8) + 1; -+ pr_err("Timer %d: loop_delay_fastclk is %d\n", tid, delay); -+ -+ tm->id = tid; -+ tm->base = base; -+ tm->flag = flag; -+ tm->loop_delay_fastclk = delay; -+ tm->fc_freq = fc_freq; -+ tm->freq = freq; -+ spin_lock_init(&(tm->tm_lock)); -+ -+ spacemit_timers[tid] = tm; -+ -+ /* We will disable all counters. Switch to fastclk first. */ -+ timer_counter_switch_clock(tm, fc_freq); -+ -+ /* disalbe all counters */ -+ tmp = __raw_readl(base + TMR_CER) & ~SPACEMIT_ALL_COUNTERS; -+ __raw_writel(tmp, base + TMR_CER); -+ -+ /* disable matching interrupt */ -+ __raw_writel(0x00, base + TMR_IER(0)); -+ __raw_writel(0x00, base + TMR_IER(1)); -+ __raw_writel(0x00, base + TMR_IER(2)); -+ -+ while (delay--) { -+ /* Clear pending interrupt status */ -+ __raw_writel(0x1, base + TMR_ICR(0)); -+ __raw_writel(0x1, base + TMR_ICR(1)); -+ __raw_writel(0x1, base + TMR_ICR(2)); -+ __raw_writel(tmp, base + TMR_CER); -+ } -+ -+ return 0; -+out: -+ kfree(tm); -+ return -EINVAL; -+} -+ -+static int __init spacemit_timer_hw_init(struct spacemit_timer_evt *evt) -+{ -+ struct spacemit_timer *tm = evt->timer; -+ unsigned int tmp, delay, freq, cid, ratio; -+ int ret; -+ -+ cid = evt->cid; -+ freq = evt->freq; -+ -+ ret = timer_counter_switch_clock(tm, freq); -+ if (ret) -+ return ret; -+ -+ ratio = tm->fc_freq / freq; -+ delay = tm->loop_delay_fastclk * ratio; -+ -+ /* set timer to free-running mode */ -+ tmp = __raw_readl(tm->base + TMR_CMR) | TMR_CER_COUNTER(cid); -+ __raw_writel(tmp, tm->base + TMR_CMR); -+ -+ /* free-running */ -+ __raw_writel(0x0, tm->base + TMR_PLCR(cid)); -+ /* clear status */ -+ __raw_writel(0x7, tm->base + TMR_ICR(cid)); -+ -+ /* enable counter */ -+ tmp = __raw_readl(tm->base + TMR_CER) | TMR_CER_COUNTER(cid); -+ __raw_writel(tmp, tm->base + TMR_CER); -+ -+ while (delay--) -+ __raw_writel(tmp, tm->base + TMR_CER); -+ -+ return 0; -+} -+ -+ -+int __init spacemit_timer_setup(struct spacemit_timer_evt *evt) -+{ -+ int broadcast = 0; -+ int ret; -+ -+ if (evt->cpu == SPACEMIT_TIMER_ALL_CPU) -+ broadcast = 1; -+ else if (evt->cpu >= num_possible_cpus()) -+ return -EINVAL; -+ -+ evt->ced.name = "timer-spacemit"; -+ evt->ced.features = CLOCK_EVT_FEAT_ONESHOT; -+ evt->ced.rating = 200; -+ evt->ced.set_next_event = timer_set_next_event; -+ evt->ced.set_state_shutdown = timer_shutdown; -+ evt->ced.tick_resume = timer_resume; -+ evt->ced.irq = evt->irq; -+ -+ evt->irqa.flags = IRQF_TIMER | IRQF_IRQPOLL; -+ evt->irqa.handler = timer_interrupt; -+ evt->irqa.dev_id = &(evt->ced); -+ -+ ret = spacemit_timer_hw_init(evt); -+ if (ret) -+ return ret; -+ -+ if (broadcast) { -+ evt->irqa.name = "broadcast-timer"; -+ /* evt->ced.features |= CLOCK_EVT_FEAT_DYNIRQ; */ -+ evt->ced.cpumask = cpu_possible_mask; -+ ret = request_irq(evt->ced.irq, timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL | IRQF_ONESHOT, "broadcast-timer", evt->irqa.dev_id); -+ if (ret < 0) -+ return ret; -+ clockevents_config_and_register(&evt->ced, -+ evt->freq, MIN_DELTA, MAX_DELTA); -+ } else { -+ evt->irqa.name = "local-timer"; -+ evt->ced.cpumask = cpumask_of(evt->cpu); -+ evt->irqa.flags |= IRQF_PERCPU; -+ ret = request_irq(evt->ced.irq, timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, "local-timer", evt->irqa.dev_id); -+ if (ret < 0) -+ return ret; -+ /* Enable clock event device for boot CPU. */ -+ if (evt->cpu == smp_processor_id()) { -+ clockevents_config_and_register(&evt->ced, -+ evt->freq, MIN_DELTA, -+ MAX_DELTA); -+ /* Only online CPU can be set affinity. */ -+ irq_set_affinity_hint(evt->ced.irq, cpumask_of(evt->cpu)); -+ } else { -+ /* disable none boot CPU's irq at first */ -+ disable_irq(evt->ced.irq); -+ } -+ } -+ -+ return 0; -+} -+ -+#ifdef CONFIG_OF -+ -+const struct of_device_id spacemit_counter_of_id[] = { -+ { -+ .compatible = "spacemit,timer-match", -+ }, -+ { }, -+}; -+ -+static int __init spacemit_of_counter_init(struct device_node *np, int tid) -+{ -+ int irq, ret; -+ unsigned int cid, cpu; -+ struct spacemit_timer_evt *evt; -+ -+ if (!np) -+ return -EINVAL; -+ -+ ret = of_property_read_u32(np, "spacemit,timer-counter-id", &cid); -+ if (ret || cid >= SPACEMIT_MAX_TIMER) { -+ pr_err("Timer %d: fail to get counter id 0x%x\n", tid, cid); -+ return ret; -+ } -+ -+ if (of_property_read_bool(np, "spacemit,timer-broadcast")) -+ cpu = SPACEMIT_TIMER_ALL_CPU; -+ else { -+ ret = of_property_read_u32(np, -+ "spacemit,timer-counter-cpu", -+ &cpu); -+ if (ret) { -+ pr_err("Timer %d:%d: fail to get cpu\n", -+ tid, cid); -+ return ret; -+ } -+ } -+ irq = irq_of_parse_and_map(np, 0); -+ evt = &spacemit_timers[tid]->evt[cid]; -+ evt->timer = spacemit_timers[tid]; -+ evt->freq = spacemit_timers[tid]->freq; -+ evt->irq = irq; -+ evt->cpu = cpu; -+ evt->cid = cid; -+ evt->tid = tid; -+ ret = spacemit_timer_setup(evt); -+ if (ret) { -+ pr_err("Timer %d:%d: fail to create clkevt\n", -+ tid, cid); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int __init spacemit_of_timer_init(struct device_node *np) -+{ -+ unsigned int flag, tid, fc_freq, apb_freq, freq; -+ void __iomem *base; -+ struct device_node *child_np; -+ const struct of_device_id *match; -+ int ret = 0; -+ -+ /* timer initialization */ -+ base = of_iomap(np, 0); -+ if (!base) { -+ pr_err("Timer: fail to map register space\n"); -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ flag = 0; -+ -+ /* get timer id */ -+ ret = of_property_read_u32(np, "spacemit,timer-id", &tid); -+ if (ret || tid >= SPACEMIT_MAX_TIMER) { -+ pr_err("Timer %d: fail to get timer-id with err %d\n", tid, ret); -+ goto out; -+ } -+ -+ /* timer's fast clock and apb frequency */ -+ ret = of_property_read_u32(np, "spacemit,timer-fastclk-frequency", &fc_freq); -+ if (ret) { -+ pr_err("Timer %d: fail to get fastclk-frequency with err %d\n", -+ tid, ret); -+ goto out; -+ } -+ -+ ret = of_property_read_u32(np, "spacemit,timer-apb-frequency", &apb_freq); -+ if (ret) { -+ pr_err("Timer %d: fail to get apb-frequency with err %d\n", -+ tid, ret); -+ goto out; -+ } -+ -+ ret = of_property_read_u32(np, "spacemit,timer-frequency", &freq); -+ if (ret) { -+ pr_err("Timer %d: fail to get timer frequency with err %d\n", -+ tid, ret); -+ goto out; -+ } -+ -+ /* -+ * Need use loop for more safe register's accessing, -+ * so at here dynamically calculate the loop time. -+ */ -+ if (!fc_freq || !apb_freq) { -+ pr_err("mmp timer's fast clock or apb freq are incorrect!\n"); -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ ret = spacemit_timer_init(np, tid, base, flag, fc_freq, apb_freq, freq); -+ if (ret) -+ goto out; -+ -+ /* counter initialization */ -+ for_each_child_of_node(np, child_np) { -+ match = of_match_node(spacemit_counter_of_id, child_np); -+ if (!of_device_is_available(child_np)) -+ continue; -+ ret = spacemit_of_counter_init(child_np, tid); -+ if (ret) -+ goto out; -+ } -+ return 0; -+out: -+ if (ret) -+ pr_err("Failed to get timer from dtb with error:%d\n", ret); -+ return ret; -+} -+ -+TIMER_OF_DECLARE(spacemit_timer, "spacemit,soc-timer", spacemit_of_timer_init); -+#endif --- -Armbian - -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Sat, 22 Jun 2024 09:59:12 -0400 -Subject: drivers: clocksource: timer-riscv.c: fixups - -Signed-off-by: Patrick Yavitz ---- - drivers/clocksource/timer-riscv.c | 61 +++++++++- - 1 file changed, 55 insertions(+), 6 deletions(-) - -diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c -index 111111111111..222222222222 100644 ---- a/drivers/clocksource/timer-riscv.c -+++ b/drivers/clocksource/timer-riscv.c -@@ -33,8 +33,46 @@ static int riscv_clock_next_event(unsigned long delta, - struct clock_event_device *ce) - { - u64 next_tval = get_cycles64() + delta; -+ csr_set(CSR_IE, IE_TIE); -+ -+ if (static_branch_likely(&riscv_sstc_available)) { -+#if defined(CONFIG_32BIT) -+ csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF); -+ csr_write(CSR_STIMECMPH, next_tval >> 32); -+#else -+ csr_write(CSR_STIMECMP, next_tval); -+#endif -+ } else -+ sbi_set_timer(next_tval); -+ -+ return 0; -+} -+ -+static int riscv_set_state_shutdown(struct clock_event_device *ce) -+{ -+ u64 next_tval = 0xffffffffffffffff; -+ -+ csr_clear(CSR_IE, IE_TIE); -+ -+ if (static_branch_likely(&riscv_sstc_available)) { -+#if defined(CONFIG_32BIT) -+ csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF); -+ csr_write(CSR_STIMECMPH, next_tval >> 32); -+#else -+ csr_write(CSR_STIMECMP, next_tval); -+#endif -+ } else -+ sbi_set_timer(next_tval); -+ -+ return 0; -+} -+ -+static int riscv_set_state_oneshot(struct clock_event_device *ce) -+{ -+ u64 next_tval = 0xffffffffffffffff; - - csr_set(CSR_IE, IE_TIE); -+ - if (static_branch_likely(&riscv_sstc_available)) { - #if defined(CONFIG_32BIT) - csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF); -@@ -51,9 +89,12 @@ static int riscv_clock_next_event(unsigned long delta, - static unsigned int riscv_clock_event_irq; - static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = { - .name = "riscv_timer_clockevent", -- .features = CLOCK_EVT_FEAT_ONESHOT, -+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP, - .rating = 100, - .set_next_event = riscv_clock_next_event, -+ .set_state_shutdown = riscv_set_state_shutdown, -+ .set_state_oneshot_stopped = riscv_set_state_shutdown, -+ .set_state_oneshot = riscv_set_state_oneshot, - }; - - /* -@@ -158,6 +199,13 @@ static int __init riscv_timer_init_dt(struct device_node *n) - return -ENODEV; - } - -+#ifdef CONFIG_SOC_SPACEMIT -+ if (riscv_isa_extension_available(NULL, SSTC)) { -+ pr_info("Timer interrupt in S-mode is available via sstc extension\n"); -+ static_branch_enable(&riscv_sstc_available); -+ } -+#endif -+ - pr_info("%s: Registering clocksource cpuid [%d] hartid [%lu]\n", - __func__, cpuid, hartid); - error = clocksource_register_hz(&riscv_clocksource, riscv_timebase); -@@ -177,11 +225,6 @@ static int __init riscv_timer_init_dt(struct device_node *n) - return error; - } - -- if (riscv_isa_extension_available(NULL, SSTC)) { -- pr_info("Timer interrupt in S-mode is available via sstc extension\n"); -- static_branch_enable(&riscv_sstc_available); -- } -- - error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING, - "clockevents/riscv/timer:starting", - riscv_timer_starting_cpu, riscv_timer_dying_cpu); -@@ -189,6 +232,12 @@ static int __init riscv_timer_init_dt(struct device_node *n) - pr_err("cpu hp setup state failed for RISCV timer [%d]\n", - error); - -+#ifndef CONFIG_SOC_SPACEMIT -+ if (riscv_isa_extension_available(NULL, SSTC)) { -+ pr_info("Timer interrupt in S-mode is available via sstc extension\n"); -+ static_branch_enable(&riscv_sstc_available); -+ } -+#endif - return error; - } - --- -Armbian - diff --git a/patch/kernel/archive/spacemit-6.1/009-drivers-cpufreq.patch b/patch/kernel/archive/spacemit-6.1/009-drivers-cpufreq.patch deleted file mode 100644 index 9336bc98d6b4..000000000000 --- a/patch/kernel/archive/spacemit-6.1/009-drivers-cpufreq.patch +++ /dev/null @@ -1,314 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - drivers/cpufreq/Kconfig | 11 +- - drivers/cpufreq/Makefile | 1 + - drivers/cpufreq/cpufreq-dt-platdev.c | 4 + - drivers/cpufreq/spacemit-cpufreq.c | 215 ++++++++++ - drivers/cpuidle/cpuidle-riscv-sbi.c | 4 + - 5 files changed, 234 insertions(+), 1 deletion(-) - -diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/cpufreq/Kconfig -+++ b/drivers/cpufreq/Kconfig -@@ -231,7 +231,7 @@ if X86 - source "drivers/cpufreq/Kconfig.x86" - endif - --if ARM || ARM64 -+if ARM || ARM64 || RISCV - source "drivers/cpufreq/Kconfig.arm" - endif - -@@ -321,5 +321,14 @@ config QORIQ_CPUFREQ - This adds the CPUFreq driver support for Freescale QorIQ SoCs - which are capable of changing the CPU's frequency dynamically. - -+config SPACEMIT_K1X_CPUFREQ -+ tristate "CPU frequency scaling driver for Spacemit K1X" -+ depends on OF && COMMON_CLK -+ select CPUFREQ_DT -+ select CPUFREQ_DT_PLATDEV -+ help -+ This adds the CPUFreq driver support for Freescale QorIQ SoCs -+ which are capable of changing the CPU's frequency dynamically. -+ - endif - endmenu -diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/cpufreq/Makefile -+++ b/drivers/cpufreq/Makefile -@@ -114,3 +114,4 @@ obj-$(CONFIG_LOONGSON1_CPUFREQ) += loongson1-cpufreq.o - obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o - obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o - obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o -+obj-$(CONFIG_SPACEMIT_K1X_CPUFREQ) += spacemit-cpufreq.o -diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c -index 111111111111..222222222222 100644 ---- a/drivers/cpufreq/cpufreq-dt-platdev.c -+++ b/drivers/cpufreq/cpufreq-dt-platdev.c -@@ -93,6 +93,8 @@ static const struct of_device_id allowlist[] __initconst = { - { .compatible = "xlnx,zynq-7000", }, - { .compatible = "xlnx,zynqmp", }, - -+ { .compatible = "spacemit,k1-x", }, -+ - { } - }; - -@@ -168,6 +170,8 @@ static const struct of_device_id blocklist[] __initconst = { - { .compatible = "qcom,msm8974", }, - { .compatible = "qcom,msm8960", }, - -+ { .compatible = "spacemit,k1-x", }, -+ - { } - }; - -diff --git a/drivers/cpufreq/spacemit-cpufreq.c b/drivers/cpufreq/spacemit-cpufreq.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/cpufreq/spacemit-cpufreq.c -@@ -0,0 +1,215 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "../opp/opp.h" -+ -+struct per_device_qos { -+ struct regulator *regulator; -+ struct freq_qos_request qos; -+}; -+ -+static DEFINE_MUTEX(regulator_mutex); -+static struct notifier_block vol_constraints_notifier; -+static struct freq_constraints vol_constraints; -+static struct per_device_qos *vol_qos[CONFIG_NR_CPUS]; -+ -+#ifdef CONFIG_CPU_HOTPLUG_THERMAL -+struct thermal_cooling_device **ghotplug_cooling; -+extern struct thermal_cooling_device ** -+of_hotplug_cooling_register(struct cpufreq_policy *policy); -+#endif -+ -+static int spacemit_vol_qos_notifier_call(struct notifier_block *nb, unsigned long action, void *data) -+{ -+ regulator_set_voltage(vol_qos[0]->regulator, action * 1000, action * 1000); -+ -+ return 0; -+} -+ -+static int spacemit_policy_notifier(struct notifier_block *nb, -+ unsigned long event, void *data) -+{ -+ int cpu, err; -+ u64 rates; -+ static int cci_init; -+ struct clk *cci_clk; -+ struct device *cpu_dev; -+ struct cpufreq_policy *policy = data; -+ struct opp_table *opp_table; -+ const char *strings; -+ -+ cpu = cpumask_first(policy->related_cpus); -+ cpu_dev = get_cpu_device(cpu); -+ opp_table = _find_opp_table(cpu_dev); -+ -+ if (cci_init == 0) { -+ cci_clk = of_clk_get_by_name(opp_table->np, "cci"); -+ of_property_read_u64_array(opp_table->np, "cci-hz", &rates, 1); -+ clk_set_rate(cci_clk, rates); -+ clk_put(cci_clk); -+ cci_init = 1; -+ } -+ -+ vol_qos[cpu] = devm_kzalloc(cpu_dev, sizeof(struct per_device_qos), GFP_KERNEL); -+ if (!vol_qos[cpu]) -+ return -ENOMEM; -+ -+ err = of_property_read_string_array(cpu_dev->of_node, "vin-supply-names", -+ &strings, 1); -+ if (err >= 0) { -+ vol_qos[cpu]->regulator = devm_regulator_get(cpu_dev, strings); -+ if (IS_ERR(vol_qos[cpu]->regulator)) { -+ pr_err("regulator supply %s, get failed\n", strings); -+ return PTR_ERR(vol_qos[cpu]->regulator); -+ } -+ -+ err = regulator_enable(vol_qos[cpu]->regulator); -+ -+ } else { -+ /* using the same regulator */ -+ vol_qos[cpu]->regulator = vol_qos[0]->regulator; -+ } -+ -+ if (vol_qos[cpu]->regulator) -+ freq_qos_add_request(&vol_constraints, &vol_qos[cpu]->qos, FREQ_QOS_MIN, -+ regulator_get_voltage(vol_qos[cpu]->regulator) / 1000); -+ -+#ifdef CONFIG_CPU_HOTPLUG_THERMAL -+ ghotplug_cooling = of_hotplug_cooling_register(policy); -+ if (!ghotplug_cooling) { -+ pr_err("register hotplug cpu cooling failed\n"); -+ return -EINVAL; -+ } -+#endif -+ return 0; -+} -+ -+static int spacemit_processor_notifier(struct notifier_block *nb, -+ unsigned long event, void *data) -+{ -+ int cpu; -+ struct device *cpu_dev; -+ struct cpufreq_freqs *freqs = (struct cpufreq_freqs *)data; -+ struct cpufreq_policy *policy = ( struct cpufreq_policy *)freqs->policy; -+ struct opp_table *opp_table; -+ struct device_node *np; -+ struct clk *tcm_clk, *ace_clk; -+ u64 rates; -+ u32 microvol; -+ -+ cpu = cpumask_first(policy->related_cpus); -+ cpu_dev = get_cpu_device(cpu); -+ opp_table = _find_opp_table(cpu_dev); -+ -+ for_each_available_child_of_node(opp_table->np, np) { -+ of_property_read_u64_array(np, "opp-hz", &rates, 1); -+ if (rates == freqs->new * 1000) { -+ of_property_read_u32(np, "opp-microvolt", µvol); -+ break; -+ } -+ } -+ -+ /* get the tcm/ace clk handler */ -+ tcm_clk = of_clk_get_by_name(opp_table->np, "tcm"); -+ ace_clk = of_clk_get_by_name(opp_table->np, "ace"); -+ -+ if (event == CPUFREQ_PRECHANGE) { -+ -+ mutex_lock(®ulator_mutex); -+ -+ if (freqs->new > freqs->old) { -+ /* increase voltage first */ -+ if (vol_qos[cpu]->regulator) -+ freq_qos_update_request(&vol_qos[cpu]->qos, microvol / 1000); -+ } -+ -+ /** -+ * change the tcm/ace's frequency first. -+ * binary division is safe -+ */ -+ if (!IS_ERR(ace_clk)) { -+ clk_set_rate(ace_clk, clk_get_rate(clk_get_parent(ace_clk)) / 2); -+ clk_put(ace_clk); -+ } -+ -+ if (!IS_ERR(tcm_clk)) { -+ clk_set_rate(tcm_clk, clk_get_rate(clk_get_parent(tcm_clk)) / 2); -+ clk_put(tcm_clk); -+ } -+ } -+ -+ if (event == CPUFREQ_POSTCHANGE) { -+ -+ if (!IS_ERR(tcm_clk)) { -+ clk_get_rate(clk_get_parent(tcm_clk)); -+ /* get the tcm-hz */ -+ of_property_read_u64_array(np, "tcm-hz", &rates, 1); -+ /* then set rate */ -+ clk_set_rate(tcm_clk, rates); -+ clk_put(tcm_clk); -+ } -+ -+ if (!IS_ERR(ace_clk)) { -+ clk_get_rate(clk_get_parent(ace_clk)); -+ /* get the ace-hz */ -+ of_property_read_u64_array(np, "ace-hz", &rates, 1); -+ /* then set rate */ -+ clk_set_rate(ace_clk, rates); -+ clk_put(ace_clk); -+ } -+ -+ if (freqs->new < freqs->old) { -+ /* decrease the voltage last */ -+ if (vol_qos[cpu]->regulator) -+ freq_qos_update_request(&vol_qos[cpu]->qos, microvol / 1000); -+ } -+ -+ mutex_unlock(®ulator_mutex); -+ } -+ -+ dev_pm_opp_put_opp_table(opp_table); -+ -+ return 0; -+} -+ -+static struct notifier_block spacemit_processor_notifier_block = { -+ .notifier_call = spacemit_processor_notifier, -+}; -+ -+static struct notifier_block spacemit_policy_notifier_block = { -+ .notifier_call = spacemit_policy_notifier, -+}; -+ -+static int __init spacemit_processor_driver_init(void) -+{ -+ int ret; -+ -+ ret = cpufreq_register_notifier(&spacemit_processor_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); -+ if (ret) { -+ pr_err("register cpufreq notifier failed\n"); -+ return -EINVAL; -+ } -+ -+ ret = cpufreq_register_notifier(&spacemit_policy_notifier_block, CPUFREQ_POLICY_NOTIFIER); -+ if (ret) { -+ pr_err("register cpufreq notifier failed\n"); -+ return -EINVAL; -+ } -+ -+ vol_constraints_notifier.notifier_call = spacemit_vol_qos_notifier_call; -+ freq_constraints_init(&vol_constraints); -+ freq_qos_add_notifier(&vol_constraints, FREQ_QOS_MIN, &vol_constraints_notifier); -+ -+ return 0; -+} -+ -+arch_initcall(spacemit_processor_driver_init); -diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c -index 111111111111..222222222222 100644 ---- a/drivers/cpuidle/cpuidle-riscv-sbi.c -+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c -@@ -79,6 +79,10 @@ static int sbi_suspend_finisher(unsigned long suspend_type, - { - struct sbiret ret; - -+#if defined(CONFIG_SOC_SPACEMIT_K1PRO) || defined(CONFIG_SOC_SPACEMIT_K1X) -+ /* flush the local cache */ -+ sbi_flush_local_dcache_all(); -+#endif - ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, - suspend_type, resume_addr, opaque, 0, 0, 0); - --- -Armbian - diff --git a/patch/kernel/archive/spacemit-6.1/010-drivers-crypto.patch b/patch/kernel/archive/spacemit-6.1/010-drivers-crypto.patch deleted file mode 100644 index 998f747daeed..000000000000 --- a/patch/kernel/archive/spacemit-6.1/010-drivers-crypto.patch +++ /dev/null @@ -1,2820 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - drivers/crypto/Kconfig | 1 + - drivers/crypto/Makefile | 1 + - drivers/crypto/spacemit/Kconfig | 29 + - drivers/crypto/spacemit/Makefile | 3 + - drivers/crypto/spacemit/spacemit-ce-glue.c | 479 +++ - drivers/crypto/spacemit/spacemit_ce_engine.c | 1917 ++++++++++ - drivers/crypto/spacemit/spacemit_engine.h | 321 ++ - 7 files changed, 2751 insertions(+) - -diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/crypto/Kconfig -+++ b/drivers/crypto/Kconfig -@@ -823,5 +823,6 @@ config CRYPTO_DEV_SA2UL - - source "drivers/crypto/keembay/Kconfig" - source "drivers/crypto/aspeed/Kconfig" -+source "drivers/crypto/spacemit/Kconfig" - - endif # CRYPTO_HW -diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/crypto/Makefile -+++ b/drivers/crypto/Makefile -@@ -53,3 +53,4 @@ obj-y += xilinx/ - obj-y += hisilicon/ - obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ - obj-y += keembay/ -+obj-y += spacemit/ -diff --git a/drivers/crypto/spacemit/Kconfig b/drivers/crypto/spacemit/Kconfig -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/crypto/spacemit/Kconfig -@@ -0,0 +1,29 @@ -+# SPDX-License-Identifier: GPL-2.0 -+ -+config SPACEMIT_REE_ENGINE -+ tristate "SPACEMIT REE Crypto Engine" -+ depends on SOC_SPACEMIT_K1X -+ default n -+ help -+ Support SPACEMIT REE Crypto Engine. -+ -+config SPACEMIT_REE_AES -+ tristate "Enable AES in SPACEMIT REE Crytpo Engine" -+ depends on SPACEMIT_REE_ENGINE -+ default n -+ help -+ Support AES using SPACEMIT REE Crypto Engine. -+ -+config SPACEMIT_CRYPTO_DEBUG -+ tristate "Enable SPACEMIT REE Crytpo Engine Debug Interface" -+ depends on SPACEMIT_REE_ENGINE -+ default n -+ help -+ Enable spacemit crypto engine debug interface in userspace -+ -+config SPACEMIT_CRYPTO_SELF_TEST -+ tristate "Enable SPACEMIT REE Crytpo Engine Selftest" -+ depends on SPACEMIT_REE_ENGINE -+ default n -+ help -+ SPACEMIT REE Crypto Engine support selftest when probe -diff --git a/drivers/crypto/spacemit/Makefile b/drivers/crypto/spacemit/Makefile -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/crypto/spacemit/Makefile -@@ -0,0 +1,3 @@ -+# SPDX-License-Identifier: GPL-2.0 -+obj-$(CONFIG_SPACEMIT_REE_ENGINE) += spacemit_ce_engine.o -+obj-$(CONFIG_SPACEMIT_REE_AES) += spacemit-ce-glue.o -diff --git a/drivers/crypto/spacemit/spacemit-ce-glue.c b/drivers/crypto/spacemit/spacemit-ce-glue.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/crypto/spacemit/spacemit-ce-glue.c -@@ -0,0 +1,479 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * spacemit aes skcipher driver -+ * -+ * Copyright (C) 2023 Spacemit -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "crypto/skcipher.h" -+#include "spacemit_engine.h" -+ -+int aes_expandkey_nouse(struct crypto_aes_ctx *key, u8 const in[], int size){return 0;} -+#define aes_expandkey aes_expandkey_nouse -+#define PRIO 500 -+#define MODE "spacemit-ce1" -+char __aligned(8) align[16] = {0}; -+ -+extern int spacemit_aes_ecb_encrypt(int index, const unsigned char *pt,unsigned char *ct, u8 *key, unsigned int len, unsigned int blocks); -+extern int spacemit_aes_ecb_decrypt(int index, const unsigned char *ct,unsigned char *pt, u8 *key, unsigned int len, unsigned int blocks); -+extern int spacemit_aes_cbc_encrypt(int index, const unsigned char *pt,unsigned char *ct, u8 *key, unsigned int len, u8 *IV,unsigned int blocks); -+extern int spacemit_aes_cbc_decrypt(int index, const unsigned char *ct,unsigned char *pt, u8 *key, unsigned int len, u8 *IV,unsigned int blocks); -+extern int spacemit_aes_xts_encrypt(int index, const unsigned char *pt, unsigned char *ct, -+ u8 *key1, u8 *key2, unsigned int len, u8 *IV, -+ unsigned int blocks); -+extern int spacemit_aes_xts_decrypt(int index, const unsigned char *ct, unsigned char *pt, -+ u8 *key1, u8 *key2, unsigned int len, u8 *iv, -+ unsigned int blocks); -+extern int spacemit_crypto_aes_set_key(int index, struct crypto_tfm *tfm, const u8 *key,unsigned int keylen); -+extern void spacemit_aes_getaddr(unsigned char **in, unsigned char **out); -+extern void spacemit_aes_reladdr(void); -+ -+int aes_setkey(struct crypto_skcipher *tfm, const u8 *key,unsigned int keylen) -+{ -+ return spacemit_crypto_aes_set_key(0, &tfm->base, key, keylen); -+} -+ -+static int ecb_encrypt(struct skcipher_request *req) -+{ -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); -+ int i; -+ unsigned char* map_addr; -+ struct scatterlist* sg, *start_srcsg, *start_dstsg; -+ int len = 0,sgl_len; -+ unsigned char* sg_va,*in_buffer,*out_buffer; -+ int total_len = req->cryptlen; -+ int page_len = 0, singal_len = 0; -+ -+ spacemit_aes_getaddr(&in_buffer,&out_buffer); -+ start_srcsg = req->src; -+ start_dstsg = req->dst; -+ for(i = 0; total_len > 0; i++){ -+ if(total_len > SPACEMIT_AES_BUFFER_LEN) -+ page_len = singal_len = SPACEMIT_AES_BUFFER_LEN; -+ else -+ page_len = singal_len = total_len; -+ -+ if(singal_len % AES_BLOCK_SIZE) -+ singal_len = (total_len / AES_BLOCK_SIZE + 1) * AES_BLOCK_SIZE; -+ -+ map_addr = in_buffer; -+ for(sg = start_srcsg,len = 0;lenlength) -+ { -+ if(len != 0) -+ sg = sg_next(sg); -+ sg_va = (unsigned char*)(PageHighMem(sg_page(sg)) ? kmap_atomic(sg_page(sg)) : page_address(sg_page(sg))) + offset_in_page(sg->offset); -+ memcpy(map_addr,sg_va,sg->length); -+ sgl_len++; -+ map_addr += sg->length; -+ } -+ if(page_len != singal_len) -+ memcpy(map_addr, align, singal_len-page_len); -+ start_srcsg = sg_next(sg); -+ -+ spacemit_aes_ecb_encrypt(0,in_buffer, out_buffer,(u8 *)(ctx->key_enc), -+ (unsigned int)(ctx->key_length), singal_len / AES_BLOCK_SIZE); -+ -+ map_addr = out_buffer; -+ for(sg = start_dstsg,len = 0;lenlength) -+ { -+ if(len != 0) -+ sg = sg_next(sg); -+ sg_va = (unsigned char*)(PageHighMem(sg_page(sg)) ? kmap_atomic(sg_page(sg)) : page_address(sg_page(sg))) + offset_in_page(sg->offset); -+ memcpy(sg_va,map_addr,sg->length); -+ sgl_len++; -+ map_addr += sg->length; -+ flush_dcache_page(sg_page(sg)); -+ } -+ start_dstsg = sg_next(sg); -+ -+ total_len = total_len - singal_len; -+ -+ } -+ spacemit_aes_reladdr(); -+ -+ return 0; -+} -+ -+static int ecb_decrypt(struct skcipher_request *req) -+{ -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); -+ int i; -+ unsigned char* map_addr; -+ struct scatterlist* sg, *start_srcsg, *start_dstsg; -+ int len = 0,sgl_len; -+ unsigned char* sg_va,*in_buffer,*out_buffer; -+ int total_len = req->cryptlen; -+ int page_len = 0, singal_len = 0; -+ -+ spacemit_aes_getaddr(&in_buffer,&out_buffer); -+ start_srcsg = req->src; -+ start_dstsg = req->dst; -+ for(i = 0; total_len > 0; i++){ -+ if(total_len > SPACEMIT_AES_BUFFER_LEN) -+ page_len = singal_len = SPACEMIT_AES_BUFFER_LEN; -+ else -+ page_len = singal_len = total_len; -+ if(singal_len % AES_BLOCK_SIZE) -+ singal_len = (total_len / AES_BLOCK_SIZE + 1) * AES_BLOCK_SIZE; -+ -+ map_addr = in_buffer; -+ for(sg = start_srcsg,len = 0;lenlength) -+ { -+ if(len != 0) -+ sg = sg_next(sg); -+ sg_va = (unsigned char*)(PageHighMem(sg_page(sg)) ? kmap_atomic(sg_page(sg)) : page_address(sg_page(sg))) + offset_in_page(sg->offset); -+ memcpy(map_addr,sg_va,sg->length); -+ sgl_len++; -+ map_addr += sg->length; -+ } -+ start_srcsg = sg_next(sg); -+ if(page_len != singal_len) -+ memcpy(map_addr, align, singal_len-page_len); -+ -+ spacemit_aes_ecb_decrypt(0,in_buffer, out_buffer,(u8 *)(ctx->key_dec), -+ (unsigned int)(ctx->key_length), singal_len / AES_BLOCK_SIZE); -+ -+ map_addr = out_buffer; -+ for(sg = start_dstsg,len = 0;lenlength) -+ { -+ if(len != 0) -+ sg = sg_next(sg); -+ sg_va = (unsigned char*)(PageHighMem(sg_page(sg)) ? kmap_atomic(sg_page(sg)) : page_address(sg_page(sg))) + offset_in_page(sg->offset); -+ memcpy(sg_va,map_addr,sg->length); -+ sgl_len++; -+ map_addr += sg->length; -+ flush_dcache_page(sg_page(sg)); -+ } -+ start_dstsg = sg_next(sg); -+ -+ total_len = total_len - singal_len; -+ } -+ spacemit_aes_reladdr(); -+ -+ return 0; -+} -+ -+static int cbc_encrypt(struct skcipher_request *req) -+{ -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); -+ int i; -+ unsigned char* map_addr; -+ struct scatterlist* sg, *start_srcsg, *start_dstsg; -+ int len = 0,sgl_len; -+ unsigned char* sg_va,*in_buffer,*out_buffer; -+ int total_len = req->cryptlen; -+ int page_len = 0, singal_len = 0; -+ -+ spacemit_aes_getaddr(&in_buffer,&out_buffer); -+ start_srcsg = req->src; -+ start_dstsg = req->dst; -+ for(i = 0; total_len > 0; i++){ -+ if(total_len > SPACEMIT_AES_BUFFER_LEN) -+ page_len = singal_len = SPACEMIT_AES_BUFFER_LEN; -+ else -+ page_len = singal_len = total_len; -+ -+ if(singal_len % AES_BLOCK_SIZE) -+ singal_len = (total_len / AES_BLOCK_SIZE + 1) * AES_BLOCK_SIZE; -+ -+ map_addr = in_buffer; -+ for(sg = start_srcsg,len = 0;lenlength) -+ { -+ if(len != 0) -+ sg = sg_next(sg); -+ sg_va = (unsigned char*)(PageHighMem(sg_page(sg)) ? kmap_atomic(sg_page(sg)) : page_address(sg_page(sg))) + offset_in_page(sg->offset); -+ memcpy(map_addr,sg_va,sg->length); -+ sgl_len++; -+ map_addr += sg->length; -+ } -+ if(page_len != singal_len) -+ memcpy(map_addr, align, singal_len-page_len); -+ start_srcsg = sg_next(sg); -+ -+ spacemit_aes_cbc_encrypt(0,in_buffer, out_buffer,(u8 *)(ctx->key_enc), -+ (unsigned int)(ctx->key_length), (u8 *)req->iv,singal_len / AES_BLOCK_SIZE); -+ -+ map_addr = out_buffer; -+ for(sg = start_dstsg,len = 0;lenlength) -+ { -+ if(len != 0) -+ sg = sg_next(sg); -+ sg_va = (unsigned char*)(PageHighMem(sg_page(sg)) ? kmap_atomic(sg_page(sg)) : page_address(sg_page(sg))) + offset_in_page(sg->offset); -+ memcpy(sg_va,map_addr,sg->length); -+ sgl_len++; -+ map_addr += sg->length; -+ flush_dcache_page(sg_page(sg)); -+ } -+ start_dstsg = sg_next(sg); -+ -+ total_len = total_len - singal_len; -+ -+ } -+ spacemit_aes_reladdr(); -+ -+ return 0; -+} -+ -+static int cbc_decrypt(struct skcipher_request *req) -+{ -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); -+ int i; -+ unsigned char* map_addr; -+ struct scatterlist* sg, *start_srcsg, *start_dstsg; -+ int len = 0,sgl_len; -+ unsigned char* sg_va,*in_buffer,*out_buffer; -+ int total_len = req->cryptlen; -+ int page_len = 0, singal_len = 0; -+ -+ spacemit_aes_getaddr(&in_buffer,&out_buffer); -+ start_srcsg = req->src; -+ start_dstsg = req->dst; -+ for(i = 0; total_len > 0; i++){ -+ if(total_len > SPACEMIT_AES_BUFFER_LEN) -+ page_len = singal_len = SPACEMIT_AES_BUFFER_LEN; -+ else -+ page_len = singal_len = total_len; -+ if(singal_len % AES_BLOCK_SIZE) -+ singal_len = (total_len / AES_BLOCK_SIZE + 1) * AES_BLOCK_SIZE; -+ -+ map_addr = in_buffer; -+ for(sg = start_srcsg,len = 0;lenlength) -+ { -+ if(len != 0) -+ sg = sg_next(sg); -+ sg_va = (unsigned char*)(PageHighMem(sg_page(sg)) ? kmap_atomic(sg_page(sg)) : page_address(sg_page(sg))) + offset_in_page(sg->offset); -+ memcpy(map_addr,sg_va,sg->length); -+ sgl_len++; -+ map_addr += sg->length; -+ } -+ start_srcsg = sg_next(sg); -+ if(page_len != singal_len) -+ memcpy(map_addr, align, singal_len-page_len); -+ -+ spacemit_aes_cbc_decrypt(0,in_buffer, out_buffer,(u8 *)(ctx->key_dec), -+ (unsigned int)(ctx->key_length), (u8 *)req->iv,singal_len / AES_BLOCK_SIZE); -+ -+ map_addr = out_buffer; -+ for(sg = start_dstsg,len = 0;lenlength) -+ { -+ if(len != 0) -+ sg = sg_next(sg); -+ sg_va = (unsigned char*)(PageHighMem(sg_page(sg)) ? kmap_atomic(sg_page(sg)) : page_address(sg_page(sg))) + offset_in_page(sg->offset); -+ memcpy(sg_va,map_addr,sg->length); -+ sgl_len++; -+ map_addr += sg->length; -+ flush_dcache_page(sg_page(sg)); -+ } -+ start_dstsg = sg_next(sg); -+ -+ total_len = total_len - singal_len; -+ } -+ spacemit_aes_reladdr(); -+ -+ return 0; -+} -+ -+static int xts_encrypt(struct skcipher_request *req) -+{ -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); -+ int i; -+ unsigned char* map_addr; -+ struct scatterlist* sg, *start_srcsg, *start_dstsg; -+ int len = 0,sgl_len; -+ uint32_t xts_key_len = ctx->key_length / 2; -+ unsigned char* sg_va,*in_buffer,*out_buffer; -+ int total_len = req->cryptlen; -+ int page_len = 0, singal_len = 0; -+ -+ spacemit_aes_getaddr(&in_buffer,&out_buffer); -+ start_srcsg = req->src; -+ start_dstsg = req->dst; -+ for(i = 0; total_len > 0; i++){ -+ if(total_len > SPACEMIT_AES_BUFFER_LEN) -+ page_len = singal_len = SPACEMIT_AES_BUFFER_LEN; -+ else -+ page_len = singal_len = total_len; -+ -+ if(singal_len % AES_BLOCK_SIZE) -+ singal_len = (total_len / AES_BLOCK_SIZE + 1) * AES_BLOCK_SIZE; -+ -+ map_addr = in_buffer; -+ for(sg = start_srcsg,len = 0;lenlength) -+ { -+ if(len != 0) -+ sg = sg_next(sg); -+ sg_va = (unsigned char*)(PageHighMem(sg_page(sg)) ? kmap_atomic(sg_page(sg)) : page_address(sg_page(sg))) + offset_in_page(sg->offset); -+ memcpy(map_addr,sg_va,sg->length); -+ sgl_len++; -+ map_addr += sg->length; -+ } -+ if(page_len != singal_len) -+ memcpy(map_addr, align, singal_len-page_len); -+ start_srcsg = sg_next(sg); -+ -+ spacemit_aes_xts_encrypt(0,in_buffer, out_buffer,(u8 *)(ctx->key_enc), -+ (u8 *)(ctx->key_enc + xts_key_len), xts_key_len, (u8 *)req->iv,singal_len / AES_BLOCK_SIZE); -+ -+ map_addr = out_buffer; -+ for(sg = start_dstsg,len = 0;lenlength) -+ { -+ if(len != 0) -+ sg = sg_next(sg); -+ sg_va = (unsigned char*)(PageHighMem(sg_page(sg)) ? kmap_atomic(sg_page(sg)) : page_address(sg_page(sg))) + offset_in_page(sg->offset); -+ memcpy(sg_va,map_addr,sg->length); -+ sgl_len++; -+ map_addr += sg->length; -+ flush_dcache_page(sg_page(sg)); -+ } -+ start_dstsg = sg_next(sg); -+ -+ total_len = total_len - singal_len; -+ -+ } -+ spacemit_aes_reladdr(); -+ -+ return 0; -+} -+ -+static int xts_decrypt(struct skcipher_request *req) -+{ -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); -+ int i; -+ unsigned char* map_addr; -+ struct scatterlist* sg, *start_srcsg, *start_dstsg; -+ int len = 0,sgl_len; -+ uint32_t xts_key_len = ctx->key_length / 2; -+ unsigned char* sg_va,*in_buffer,*out_buffer; -+ int total_len = req->cryptlen; -+ int page_len = 0, singal_len = 0; -+ -+ spacemit_aes_getaddr(&in_buffer,&out_buffer); -+ start_srcsg = req->src; -+ start_dstsg = req->dst; -+ for(i = 0; total_len > 0; i++){ -+ if(total_len > SPACEMIT_AES_BUFFER_LEN) -+ page_len = singal_len = SPACEMIT_AES_BUFFER_LEN; -+ else -+ page_len = singal_len = total_len; -+ if(singal_len % AES_BLOCK_SIZE) -+ singal_len = (total_len / AES_BLOCK_SIZE + 1) * AES_BLOCK_SIZE; -+ -+ map_addr = in_buffer; -+ for(sg = start_srcsg,len = 0;lenlength) -+ { -+ if(len != 0) -+ sg = sg_next(sg); -+ sg_va = (unsigned char*)(PageHighMem(sg_page(sg)) ? kmap_atomic(sg_page(sg)) : page_address(sg_page(sg))) + offset_in_page(sg->offset); -+ memcpy(map_addr,sg_va,sg->length); -+ sgl_len++; -+ map_addr += sg->length; -+ } -+ start_srcsg = sg_next(sg); -+ if(page_len != singal_len) -+ memcpy(map_addr, align, singal_len-page_len); -+ -+ spacemit_aes_xts_decrypt(0,in_buffer, out_buffer,(u8 *)(ctx->key_dec), -+ (u8 *)(ctx->key_dec + xts_key_len), xts_key_len, (u8 *)req->iv,singal_len / AES_BLOCK_SIZE); -+ -+ map_addr = out_buffer; -+ for(sg = start_dstsg,len = 0;lenlength) -+ { -+ if(len != 0) -+ sg = sg_next(sg); -+ sg_va = (unsigned char*)(PageHighMem(sg_page(sg)) ? kmap_atomic(sg_page(sg)) : page_address(sg_page(sg))) + offset_in_page(sg->offset); -+ memcpy(sg_va,map_addr,sg->length); -+ sgl_len++; -+ map_addr += sg->length; -+ flush_dcache_page(sg_page(sg)); -+ } -+ start_dstsg = sg_next(sg); -+ -+ total_len = total_len - singal_len; -+ } -+ spacemit_aes_reladdr(); -+ -+ return 0; -+} -+ -+static struct skcipher_alg aes_algs[] = { -+{ -+ .base.cra_name = "ecb(aes)", -+ .base.cra_driver_name = "__driver-ecb-aes-" MODE, -+ .base.cra_priority = PRIO, -+ .base.cra_flags = CRYPTO_ALG_ASYNC, -+ .base.cra_blocksize = AES_BLOCK_SIZE, -+ .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), -+ .base.cra_alignmask = 0xf, -+ .base.cra_module = THIS_MODULE, -+ .min_keysize = AES_MIN_KEY_SIZE, -+ .max_keysize = AES_MAX_KEY_SIZE, -+ .ivsize = AES_BLOCK_SIZE, -+ .setkey = aes_setkey, -+ .encrypt = ecb_encrypt, -+ .decrypt = ecb_decrypt, -+}, { -+ .base.cra_name = "cbc(aes)", -+ .base.cra_driver_name = "__driver-cbc-aes-" MODE, -+ .base.cra_priority = PRIO, -+ .base.cra_flags = CRYPTO_ALG_ASYNC, -+ .base.cra_blocksize = AES_BLOCK_SIZE, -+ .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), -+ .base.cra_alignmask = 0xf, -+ .base.cra_module = THIS_MODULE, -+ .min_keysize = AES_MIN_KEY_SIZE, -+ .max_keysize = AES_MAX_KEY_SIZE, -+ .ivsize = AES_BLOCK_SIZE, -+ .setkey = aes_setkey, -+ .encrypt = cbc_encrypt, -+ .decrypt = cbc_decrypt, -+},{ -+ .base.cra_name = "xts(aes)", -+ .base.cra_driver_name = "__driver-xts-aes-" MODE, -+ .base.cra_priority = PRIO, -+ .base.cra_flags = CRYPTO_ALG_ASYNC, -+ .base.cra_blocksize = AES_BLOCK_SIZE, -+ .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), -+ .base.cra_alignmask = 0xf, -+ .base.cra_module = THIS_MODULE, -+ .min_keysize = 2 * AES_MIN_KEY_SIZE, -+ .max_keysize = 2 * AES_MAX_KEY_SIZE, -+ .ivsize = AES_BLOCK_SIZE, -+ .setkey = aes_setkey, -+ .encrypt = xts_encrypt, -+ .decrypt = xts_decrypt, -+}, -+}; -+ -+static int __init aes_init(void) -+{ -+ return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); -+} -+ -+static void __exit aes_exit(void) -+{ -+ crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); -+} -+ -+module_init(aes_init); -+module_exit(aes_exit); -+ -+MODULE_DESCRIPTION("AES-ECB/CBC using Spacemit CE Engine"); -+MODULE_ALIAS_CRYPTO("ecb(aes)"); -+MODULE_ALIAS_CRYPTO("cbc(aes)"); -+MODULE_LICENSE("GPL v2"); -diff --git a/drivers/crypto/spacemit/spacemit_ce_engine.c b/drivers/crypto/spacemit/spacemit_ce_engine.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/crypto/spacemit/spacemit_ce_engine.c -@@ -0,0 +1,1917 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * CE engine for spacemit-k1x -+ * -+ * Copyright (C) 2023 Spacemit -+ */ -+ -+#include "linux/dma-direction.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include <../crypto/internal.h> -+#include -+#include -+#include -+#include -+#include "spacemit_engine.h" -+#include -+#include -+ -+struct device *dev; -+unsigned char *in_buffer, *out_buffer; -+uint64_t dma_addr_in, dma_addr_out; -+static struct regmap *ciu_base; -+static struct engine_info engine[ENGINE_MAX]; -+ -+static void dma_write32(int index, size_t offset, uint32_t value) -+{ -+ tcm_override_writel(value, (void*)(engine[index].engine_base + CE_DMA_REG_OFFSET + offset)); -+} -+static uint32_t dma_read32(int index, size_t offset) -+{ -+ return tcm_override_readl((void*)(engine[index].engine_base + CE_DMA_REG_OFFSET + offset)); -+} -+static void biu_write32(int index, size_t offset, uint32_t value) -+{ -+ tcm_override_writel(value, (void*)(engine[index].engine_base + CE_BIU_REG_OFFSET + offset)); -+} -+static uint32_t biu_read32(int index, size_t offset) -+{ -+ return tcm_override_readl((void*)(engine[index].engine_base + CE_BIU_REG_OFFSET + offset)); -+} -+static void adec_write32(int index, size_t offset, uint32_t value) -+{ -+ tcm_override_writel(value, (void*)(engine[index].engine_base + CE_ADEC_REG_OFFSET + offset)); -+} -+static uint32_t adec_read32(int index, size_t offset) -+{ -+ return tcm_override_readl((void*)(engine[index].engine_base + CE_ADEC_REG_OFFSET + offset)); -+} -+static void abus_write32(int index, size_t offset, uint32_t value) -+{ -+ tcm_override_writel(value, (void*)(engine[index].engine_base + CE_ABUS_REG_OFFSET + offset)); -+} -+static uint32_t abus_read32(int index, size_t offset) -+{ -+ return tcm_override_readl((void*)(engine[index].engine_base + CE_ABUS_REG_OFFSET + offset)); -+} -+static void crypto_write32(int index, size_t offset, uint32_t value) -+{ -+ tcm_override_writel(value, (void*)(engine[index].engine_base + CE_CRYPTO_REG_OFFSET + offset)); -+} -+static uint32_t crypto_read32(int index, size_t offset) -+{ -+ return tcm_override_readl((void*)(engine[index].engine_base + CE_CRYPTO_REG_OFFSET + offset)); -+} -+ -+/* just port from syscon_regmap_lookup_by_compatible */ -+struct regmap *spacemit_syscon_regmap_lookup_by_compatible(const char *s) -+{ -+ struct device_node *syscon_np; -+ struct regmap *regmap; -+ -+ syscon_np = of_find_compatible_node(NULL, NULL, s); -+ if (!syscon_np) -+ return ERR_PTR(-ENODEV); -+ -+ regmap = syscon_node_to_regmap(syscon_np); -+ of_node_put(syscon_np); -+ -+ return regmap; -+} -+EXPORT_SYMBOL_GPL(spacemit_syscon_regmap_lookup_by_compatible); -+ -+void dump_data(const unsigned char *tag, const unsigned char *str, unsigned int len) -+{ -+ char *p_addr; -+ uint8_t *buff; -+ int i = 0; -+ uint32_t size = 0; -+ -+ p_addr = (char *) kmalloc(len * 2 + 1, GFP_KERNEL); -+ if (!p_addr) { -+ dev_err(dev, "kmalloc failed!\n"); -+ return; -+ } -+ -+ memset(p_addr, 0, len * 2 + 1); -+ buff = (uint8_t *)str; -+ for (i = 0; i < len; i++) { -+ size += sprintf(p_addr + size, "%02x", buff[i]); -+ } -+ dev_info(dev," %s:%s\n", tag, p_addr); -+ -+ kfree((void *)p_addr); -+} -+ -+static void engine_irq_enable(int index) -+{ -+ uint32_t val; -+ -+ /* clear aes INT */ -+ val = crypto_read32(index, CE_CRYPTO_AES_INTRPT_SRC_REG); -+ crypto_write32(index, CE_CRYPTO_AES_INTRPT_SRC_REG, val); -+ -+ val = crypto_read32(index, CE_CRYPTO_AES_INTRPT_SRC_EN_REG); -+ val |= AES_INTERRUPT_MASK; -+ crypto_write32(index, CE_CRYPTO_AES_INTRPT_SRC_EN_REG, val); -+ -+ val = dma_read32(index, CE_DMA_OUT_INT_MASK); -+ val &=~DMA_INTERRUPT_MASK; -+ dma_write32(index, CE_DMA_OUT_INT_MASK, val); -+ -+ val = dma_read32(index, CE_DMA_IN_INT_MASK); -+ val &=~DMA_INTERRUPT_MASK; -+ dma_write32(index, CE_DMA_IN_INT_MASK, val); -+} -+static void enable_biu_mask(int index) -+{ -+ uint32_t val; -+ val = biu_read32(index, SP_INTERRUPT_MASK); -+ val &=~BIU_MASK; -+ biu_write32(index, SP_INTERRUPT_MASK, val); -+} -+static void enable_adec_mask(int index) -+{ -+ uint32_t val; -+ val = adec_read32(index, CE_ADEC_INT_MSK); -+ val &=~ADEC_MASK; -+ adec_write32(index, CE_ADEC_INT_MSK, val); -+} -+ -+static void dma_output_start(int index) -+{ -+ uint32_t val; -+ -+ val = dma_read32(index, CE_DMA_OUT_INT); -+ dma_write32(index, CE_DMA_OUT_INT, val); -+ -+ val = dma_read32(index, CE_DMA_OUT_CTRL); -+ val |= 0x1; -+ dma_write32(index, CE_DMA_OUT_CTRL, val); -+ -+ return; -+} -+ -+static void dma_output_stop(int index) -+{ -+ uint32_t val; -+ -+ val = dma_read32(index, CE_DMA_OUT_CTRL); -+ val &= ~0x1; -+ dma_write32(index, CE_DMA_OUT_CTRL, val); -+ -+ return; -+} -+ -+static int dma_input_config(int index, int rid_ext, int rid) -+{ -+ uint32_t val; -+ -+ val = dma_read32(index, CE_DMA_IN_CTRL); -+ val &= 0x0f0f0000; -+ val |= (0x7 << 28) | /* dis error check */ -+ ((rid_ext & 0xF) << 20) | /* rid ext */ -+ (0x1 << 18) | /* dis out-of-order */ -+ (0x1 << 17) | /* data 64 Byte aligned */ -+ (0x1 << 15) | /* FIFO bus size 64bit */ -+ (0x1 << 13) | /* burst type: Inc */ -+ (0x8 << 8) | /* burst len */ -+ ((rid & 0xF) << 4); -+ -+ dma_write32(index, CE_DMA_IN_CTRL, val); -+ -+ return 0; -+} -+ -+static int dma_input_address(int index, uint32_t src_addr, uint32_t src_size, bool chained) -+{ -+ if (chained == true) { -+ dma_write32(index, CE_DMA_IN_NX_LL_ADR, src_addr); -+ dma_write32(index, CE_DMA_IN_SRC_ADR, 0x0); -+ dma_write32(index, CE_DMA_IN_XFER_CNTR, 0x0); -+ } else { -+ dma_write32(index, CE_DMA_IN_NX_LL_ADR, 0x0); -+ dma_write32(index, CE_DMA_IN_SRC_ADR, src_addr); -+ dma_write32(index, CE_DMA_IN_XFER_CNTR, src_size); -+ } -+ -+ return 0; -+} -+ -+static void dma_input_start(int index) -+{ -+ uint32_t val; -+ -+ val = dma_read32(index, CE_DMA_IN_INT); -+ dma_write32(index, CE_DMA_IN_INT, val); -+ -+ val = dma_read32(index, CE_DMA_IN_CTRL); -+ val |= 0x1; -+ dma_write32(index, CE_DMA_IN_CTRL, val); -+ -+ return; -+} -+ -+static void dma_input_stop(int index) -+{ -+ uint32_t val; -+ -+ val = dma_read32(index, CE_DMA_IN_CTRL); -+ val &= ~0x1; -+ dma_write32(index, CE_DMA_IN_CTRL, val); -+ -+ return; -+} -+ -+ -+static int __maybe_unused dma_wait_int_output_finish(int index) -+{ -+ wait_for_completion(&engine[index].dma_output_done); -+ if(engine[index].dma_out_status != DMA_INOUT_DONE) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static int __maybe_unused dma_wait_int_input_finish(int index) -+{ -+ wait_for_completion(&engine[index].dma_input_done); -+ if(engine[index].dma_in_status != DMA_INOUT_DONE) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static int dma_output_config(int index, int wid_ext, int wid) -+{ -+ uint32_t val; -+ -+ val = dma_read32(index, CE_DMA_OUT_CTRL); -+ val &= 0x0f0f0000; -+ val |= (0x7 << 28) | /* dis error check */ -+ ((wid_ext & 0xF) << 20) | /* rid ext */ -+ (0x1 << 18) | /* dis out-of-order */ -+ (0x1 << 17) | /* data 64 Byte aligned */ -+ (0x1 << 15) | /* FIFO bus size 64bit */ -+ (0x1 << 13) | /* burst type: Inc */ -+ (0x8 << 8) | /* burst len */ -+ ((wid & 0xF) << 4); -+ -+ dma_write32(index, CE_DMA_OUT_CTRL, val); -+ -+ return 0; -+} -+ -+static int dma_output_address(int index, uint32_t dst_addr, uint32_t dst_size, bool chained) -+{ -+ if (chained == true) { -+ dma_write32(index, CE_DMA_OUT_NX_LL_ADR, dst_addr); -+ dma_write32(index, CE_DMA_OUT_DEST_ADR, 0x0); -+ dma_write32(index, CE_DMA_OUT_XFER_CNTR, 0x0); -+ } else { -+ dma_write32(index, CE_DMA_OUT_NX_LL_ADR, 0x0); -+ dma_write32(index, CE_DMA_OUT_DEST_ADR, dst_addr); -+ dma_write32(index, CE_DMA_OUT_XFER_CNTR, dst_size); -+ } -+ -+ return 0; -+} -+ -+static int adec_engine_hw_reset(int index, ADEC_ACC_ENG_T engine) -+{ -+ uint32_t val; -+ int tmp; -+ -+ if (engine == E_ACC_ENG_ALL) -+ tmp = 0xffff; -+ else -+ tmp = 1 << engine; -+ -+ val = adec_read32(index, CE_ADEC_CTRL); -+ val |= tmp; -+ adec_write32(index, CE_ADEC_CTRL, val); -+ val &= ~tmp; -+ adec_write32(index, CE_ADEC_CTRL, val); -+ -+ if (engine == E_ACC_ENG_DMA) { -+ regmap_update_bits(ciu_base, ENGINE_DMA_ADDR_HIGH_OFFSET, -+ (WADDR_BIT32 | RADDR_BIT32), 0); -+ } -+ -+ return 0; -+} -+ -+static int abus_set_mode(int index, ABUS_GRP_A_T grp_a_mode, -+ ABUS_GRP_B_T grp_b_mode, -+ ABUS_CROSS_BAR_T input_bar, -+ ABUS_CROSS_BAR_T output_bar) -+{ -+ uint32_t val; -+ -+ val = abus_read32(index, CE_ABUS_BUS_CTRL); -+ -+ val &= ~(0x77 << 0x4); -+ val |= (grp_a_mode << 0x4) | (grp_b_mode << 0x8); -+ -+ if (input_bar == E_ABUS_STRAIGHT) { -+ val &= ~(0x1 << 0x0); -+ } else if (input_bar == E_ABUS_CROSS) { -+ val |= (0x1 << 0x0); -+ } else { -+ return -EINVAL; -+ } -+ -+ if (output_bar == E_ABUS_STRAIGHT) { -+ val &= ~(0x1 << 0x2); -+ } else if (output_bar == E_ABUS_CROSS) { -+ val |= (0x1 << 0x2); -+ } else { -+ return -EINVAL; -+ } -+ -+ abus_write32(index, CE_ABUS_BUS_CTRL, val); -+ -+ return 0; -+} -+ -+static void crypto_aes_sw_reset(int index) -+{ -+ uint32_t val; -+ -+ val = 0x1; -+ crypto_write32(index, CE_CRYPTO_AES_CONTROL_REG, val); -+ val = 0x0; -+ crypto_write32(index, CE_CRYPTO_AES_CONTROL_REG, val); -+ -+ return; -+} -+static void crypto_aes_start(int index) -+{ -+ uint32_t val; -+ -+ val = 0x1; -+ crypto_write32(index, CE_CRYPTO_AES_COMMAND_REG, val); -+ -+ return; -+} -+ -+ -+static int crypto_aes_wait(int index) -+{ -+ wait_for_completion(&engine[index].aes_done); -+ if(engine[index].aes_status != AES_DONE) -+ { -+ dev_err_once(dev, "%s : %d : engine[%d].status = %d\n",__func__,__LINE__,index,engine[index].aes_status); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static int crypto_engine_select(int index, CRYPTO_ENG_SEL_T engine) -+{ -+ uint32_t val; -+ -+ val = crypto_read32(index, CE_CRYPTO_ENGINE_SEL_REG); -+ val &= ~0x3; -+ -+ switch (engine) { -+ case E_ENG_AES: -+ val |= (0x1); -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ crypto_write32(index, CE_CRYPTO_ENGINE_SEL_REG, val); -+ -+ return 0; -+} -+ -+static int crypto_aes_set_iv(int index, const uint8_t *iv) -+{ -+ uint32_t val; -+ int reg_index; -+ -+ if (iv == NULL) -+ return -EINVAL; -+ -+ for(reg_index = 0; reg_index < 4; reg_index++) -+ { -+ val = ((iv[(reg_index << 2) + 0] & 0xFF)<< 0)|\ -+ ((iv[(reg_index << 2) + 1] & 0xFF)<< 8)|\ -+ ((iv[(reg_index << 2) + 2] & 0xFF)<< 16)|\ -+ ((iv[(reg_index << 2) + 3] & 0xFF)<< 24); -+ crypto_write32(index, CE_CRYPTO_IV_REG(reg_index),val); -+ } -+ -+ return 0; -+} -+ -+static int crypto_aes_get_iv(int index, uint8_t *iv) -+{ -+ uint32_t val; -+ int reg_index; -+ -+ if (iv == NULL) -+ return -EINVAL; -+ -+ for(reg_index = 0; reg_index < 4; reg_index++) -+ { -+ val = crypto_read32(index, CE_CRYPTO_IV_REG(reg_index)); -+ iv[(reg_index << 2) + 0] = val & 0xFF; -+ iv[(reg_index << 2) + 1] = (val >> 8) & 0xFF; -+ iv[(reg_index << 2) + 2] = (val >> 16) & 0xFF; -+ iv[(reg_index << 2) + 3] = (val >> 24) & 0xFF; -+ } -+ -+ return 0; -+} -+ -+static int crypto_aes_set_mode(int index, AES_MODE_T mode, -+ AES_OP_MODE_T op_mode, -+ AES_KEY_LEN_T keylen, bool use_rkey) -+{ -+ uint32_t val; -+ -+ crypto_engine_select(index, E_ENG_AES); -+ -+ val = crypto_read32(index, CE_CRYPTO_AES_CONFIG_REG); -+ val &= ~(0x7 << 0x3); -+ switch (mode) { -+ case E_AES_ECB: -+ val |= (0x0 << 0x3); -+ break; -+ case E_AES_CBC: -+ val |= (0x1 << 0x3); -+ break; -+ case E_AES_XTS: -+ val |= (0x3 << 0x3); -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ val &= ~(0x3 << 0x1); -+ switch (keylen) { -+ case E_AES_128: -+ val |= (0x0 << 0x1); -+ break; -+ case E_AES_192: -+ val |= (0x2 << 0x1); -+ break; -+ case E_AES_256: -+ val |= (0x1 << 0x1); -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ val &= ~(0x1 << 0x0); -+ if (op_mode == E_AES_DECRYPT) { -+ val |= (0x1 << 0x0); -+ } else { -+ val |= (0x0 << 0x0); -+ } -+ -+ val &= ~(0x1 << 0x6); -+ if (use_rkey == false) { -+ val |= (0x0 << 0x6); -+ } else { -+ val |= (0x1 << 0x6); -+ } -+ -+ crypto_write32(index, CE_CRYPTO_AES_CONFIG_REG, val); -+ -+ return 0; -+} -+ -+static int crypto_aes_set_key1(int index, const uint8_t *key, AES_KEY_LEN_T keylen) -+{ -+ uint32_t val; -+ int reg_index, key_end; -+ -+ if (!key) -+ return 0; -+ -+ switch (keylen) { -+ case E_AES_128: -+ key_end = 4; -+ break; -+ case E_AES_192: -+ key_end = 6; -+ break; -+ case E_AES_256: -+ key_end = 8; -+ break; -+ default: -+ key_end = 0; -+ return -EINVAL; -+ } -+ -+ for (reg_index = 0; reg_index < 8; reg_index++) { -+ if (reg_index < key_end) { -+ val = ((key[0 + (reg_index << 2)] & 0xFF) << 0) | -+ ((key[1 + (reg_index << 2)] & 0xFF) << 8) | -+ ((key[2 + (reg_index << 2)] & 0xFF) << 16) | -+ ((key[3 + (reg_index << 2)] & 0xFF) << 24); -+ } else { -+ val = 0; -+ } -+ crypto_write32(index, CE_CRYPTO_K1_W_REG(reg_index), val); -+ } -+ -+ return 0; -+} -+ -+static int crypto_aes_set_key2(int index, const uint8_t *key, AES_KEY_LEN_T keylen) -+{ -+ uint32_t val; -+ int reg_index, key_end; -+ -+ if (!key) -+ return 0; -+ -+ switch (keylen) { -+ case E_AES_128: -+ key_end = 4; -+ break; -+ case E_AES_192: -+ key_end = 6; -+ break; -+ case E_AES_256: -+ key_end = 8; -+ break; -+ default: -+ key_end = 0; -+ return -EINVAL; -+ } -+ -+ for (reg_index = 0; reg_index < 8; reg_index++) { -+ if (reg_index < key_end) { -+ val = ((key[0 + (reg_index << 2)] & 0xFF) << 0) | -+ ((key[1 + (reg_index << 2)] & 0xFF) << 8) | -+ ((key[2 + (reg_index << 2)] & 0xFF) << 16) | -+ ((key[3 + (reg_index << 2)] & 0xFF) << 24); -+ } else { -+ val = 0; -+ } -+ crypto_write32(index, CE_CRYPTO_K2_W_REG(reg_index), val); -+ } -+ -+ return 0; -+} -+ -+int ce_rijndael_setup_internal(int index, const unsigned char *key, int keylen) -+{ -+ if (!key || keylen <= 0) { -+ goto error; -+ } -+ -+ adec_engine_hw_reset(index, E_ACC_ENG_DMA); -+ adec_engine_hw_reset(index, E_ACC_ENG_CRYPTO); -+ abus_set_mode(index, E_ABUS_GRP_A_HASH, E_ABUS_GRP_B_AES, E_ABUS_STRAIGHT, E_ABUS_STRAIGHT); -+ crypto_aes_sw_reset(index); -+ -+ enable_biu_mask(index); -+ enable_adec_mask(index); -+ engine_irq_enable(index); -+ -+ return 0; -+error: -+ return -ENOKEY; -+} -+ -+BLOCKING_NOTIFIER_HEAD(spacemit_crypto_chain); -+ -+static struct crypto_alg *spacemit_crypto_mod_get(struct crypto_alg *alg) -+{ -+ return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; -+} -+ -+static void spacemit_crypto_mod_put(struct crypto_alg *alg) -+{ -+ struct module *module = alg->cra_module; -+ -+ crypto_alg_put(alg); -+ module_put(module); -+} -+ -+static void spacemit_crypto_larval_destroy(struct crypto_alg *alg) -+{ -+ struct crypto_larval *larval = (void *)alg; -+ -+ BUG_ON(!crypto_is_larval(alg)); -+ if (!IS_ERR_OR_NULL(larval->adult)) -+ spacemit_crypto_mod_put(larval->adult); -+ kfree(larval); -+} -+ -+static struct crypto_larval *spacemit_crypto_larval_alloc(const char *name, u32 type, u32 mask) -+{ -+ struct crypto_larval *larval; -+ -+ larval = kzalloc(sizeof(*larval), GFP_KERNEL); -+ if (!larval) -+ return ERR_PTR(-ENOMEM); -+ -+ larval->mask = mask; -+ larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; -+ larval->alg.cra_priority = -1; -+ larval->alg.cra_destroy = spacemit_crypto_larval_destroy; -+ -+ strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); -+ init_completion(&larval->completion); -+ -+ return larval; -+} -+ -+static struct crypto_alg *__spacemit_crypto_alg_lookup(const char *name, u32 type, -+ u32 mask) -+{ -+ struct crypto_alg *q, *alg = NULL; -+ int best = -2; -+ -+ list_for_each_entry(q, &crypto_alg_list, cra_list) { -+ int exact, fuzzy; -+ -+ if (q->cra_flags & (CRYPTO_ALG_DEAD | CRYPTO_ALG_DYING)) -+ continue; -+ -+ if ((q->cra_flags ^ type) & mask) -+ continue; -+ -+ if ((q->cra_flags & CRYPTO_ALG_LARVAL) && -+ !crypto_is_test_larval((struct crypto_larval *)q) && -+ ((struct crypto_larval *)q)->mask != mask) -+ continue; -+ -+ exact = !strcmp(q->cra_driver_name, name); -+ fuzzy = !strcmp(q->cra_name, name); -+ if (!exact && !(fuzzy && q->cra_priority > best)) -+ continue; -+ -+ if (unlikely(!spacemit_crypto_mod_get(q))) -+ continue; -+ -+ best = q->cra_priority; -+ if (alg) -+ spacemit_crypto_mod_put(alg); -+ alg = q; -+ -+ if (exact) -+ break; -+ } -+ -+ return alg; -+} -+ -+static struct crypto_alg *spacemit_crypto_alg_lookup(const char *name, u32 type, -+ u32 mask) -+{ -+ struct crypto_alg *alg; -+ u32 test = 0; -+ -+ if (!((type | mask) & CRYPTO_ALG_TESTED)) -+ test |= CRYPTO_ALG_TESTED; -+ -+ down_read(&crypto_alg_sem); -+ alg = __spacemit_crypto_alg_lookup(name, type | test, mask | test); -+ if (!alg && test) { -+ alg = __spacemit_crypto_alg_lookup(name, type, mask); -+ if (alg && !crypto_is_larval(alg)) { -+ /* Test failed */ -+ spacemit_crypto_mod_put(alg); -+ alg = ERR_PTR(-ELIBBAD); -+ } -+ } -+ up_read(&crypto_alg_sem); -+ -+ return alg; -+} -+ -+static void spacemit_crypto_start_test(struct crypto_larval *larval) -+{ -+ if (!larval->alg.cra_driver_name[0]) -+ return; -+ -+ if (larval->test_started) -+ return; -+ -+ down_write(&crypto_alg_sem); -+ if (larval->test_started) { -+ up_write(&crypto_alg_sem); -+ return; -+ } -+ -+ larval->test_started = true; -+ up_write(&crypto_alg_sem); -+ -+ crypto_wait_for_test(larval); -+} -+ -+static struct crypto_alg *spacemit_crypto_larval_wait(struct crypto_alg *alg) -+{ -+ struct crypto_larval *larval = (void *)alg; -+ long timeout; -+ -+ if (!static_branch_likely(&crypto_boot_test_finished)) -+ spacemit_crypto_start_test(larval); -+ -+ timeout = wait_for_completion_killable_timeout( -+ &larval->completion, 60 * HZ); -+ -+ alg = larval->adult; -+ if (timeout < 0) -+ alg = ERR_PTR(-EINTR); -+ else if (!timeout) -+ alg = ERR_PTR(-ETIMEDOUT); -+ else if (!alg) -+ alg = ERR_PTR(-ENOENT); -+ else if (IS_ERR(alg)) -+ ; -+ else if (crypto_is_test_larval(larval) && -+ !(alg->cra_flags & CRYPTO_ALG_TESTED)) -+ alg = ERR_PTR(-EAGAIN); -+ else if (!spacemit_crypto_mod_get(alg)) -+ alg = ERR_PTR(-EAGAIN); -+ spacemit_crypto_mod_put(&larval->alg); -+ -+ return alg; -+} -+ -+static struct crypto_alg *spacemit_crypto_larval_add(const char *name, -+ u32 type, u32 mask) -+{ -+ struct crypto_alg *alg; -+ struct crypto_larval *larval; -+ -+ larval = spacemit_crypto_larval_alloc(name, type, mask); -+ if (IS_ERR(larval)) -+ return ERR_CAST(larval); -+ -+ refcount_set(&larval->alg.cra_refcnt, 2); -+ -+ down_write(&crypto_alg_sem); -+ alg = __spacemit_crypto_alg_lookup(name, type, mask); -+ if (!alg) { -+ alg = &larval->alg; -+ list_add(&alg->cra_list, &crypto_alg_list); -+ } -+ up_write(&crypto_alg_sem); -+ -+ if (alg != &larval->alg) { -+ kfree(larval); -+ if (crypto_is_larval(alg)) -+ alg = spacemit_crypto_larval_wait(alg); -+ } -+ -+ return alg; -+} -+ -+static void spacemit_crypto_larval_kill(struct crypto_alg *alg) -+{ -+ struct crypto_larval *larval = (void *)alg; -+ -+ down_write(&crypto_alg_sem); -+ list_del(&alg->cra_list); -+ up_write(&crypto_alg_sem); -+ complete_all(&larval->completion); -+ crypto_alg_put(alg); -+} -+ -+static struct crypto_alg *spacemit_crypto_larval_lookup(const char *name, -+ u32 type, u32 mask) -+{ -+ struct crypto_alg *alg; -+ -+ if (!name) -+ return ERR_PTR(-ENOENT); -+ -+ type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); -+ mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); -+ -+ alg = spacemit_crypto_alg_lookup(name, type, mask); -+ if (!alg && !(mask & CRYPTO_NOLOAD)) { -+ request_module("crypto-%s", name); -+ -+ if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & -+ CRYPTO_ALG_NEED_FALLBACK)) -+ request_module("crypto-%s-all", name); -+ -+ alg = spacemit_crypto_alg_lookup(name, type, mask); -+ } -+ -+ if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg)) -+ alg = spacemit_crypto_larval_wait(alg); -+ else if (!alg) -+ alg = spacemit_crypto_larval_add(name, type, mask); -+ -+ return alg; -+} -+ -+static int spacemit_crypto_probing_notify(unsigned long val, void *v) -+{ -+ int ok; -+ -+ ok = blocking_notifier_call_chain(&spacemit_crypto_chain, val, v); -+ if (ok == NOTIFY_DONE) { -+ request_module("cryptomgr"); -+ ok = blocking_notifier_call_chain(&spacemit_crypto_chain, val, v); -+ } -+ -+ return ok; -+} -+ -+static struct crypto_alg *spacemit_crypto_alg_mod_lookup(const char *name, -+ u32 type, u32 mask) -+{ -+ struct crypto_alg *alg; -+ struct crypto_alg *larval; -+ int ok; -+ -+ /* -+ * If the internal flag is set for a cipher, require a caller to -+ * to invoke the cipher with the internal flag to use that cipher. -+ * Also, if a caller wants to allocate a cipher that may or may -+ * not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and -+ * !(mask & CRYPTO_ALG_INTERNAL). -+ */ -+ if (!((type | mask) & CRYPTO_ALG_INTERNAL)) -+ mask |= CRYPTO_ALG_INTERNAL; -+ -+ larval = spacemit_crypto_larval_lookup(name, type, mask); -+ if (IS_ERR(larval) || !crypto_is_larval(larval)) -+ return larval; -+ -+ ok = spacemit_crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval); -+ -+ if (ok == NOTIFY_STOP) { -+ alg = spacemit_crypto_larval_wait(larval); -+ } else { -+ spacemit_crypto_mod_put(larval); -+ alg = ERR_PTR(-ENOENT); -+ } -+ spacemit_crypto_larval_kill(larval); -+ return alg; -+} -+ -+static struct crypto_alg *spacemit_crypto_find_alg(const char *alg_name, -+ const struct crypto_type *frontend, -+ u32 type, u32 mask) -+{ -+ if (frontend) { -+ type &= frontend->maskclear; -+ mask &= frontend->maskclear; -+ type |= frontend->type; -+ mask |= frontend->maskset; -+ } -+ -+ return spacemit_crypto_alg_mod_lookup(alg_name, type, mask); -+} -+ -+static unsigned int spacemit_crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) -+{ -+ const struct crypto_type *type_obj = alg->cra_type; -+ unsigned int len; -+ -+ len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1); -+ if (type_obj) -+ return len + type_obj->ctxsize(alg, type, mask); -+ -+ switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { -+ default: -+ BUG(); -+ -+ case CRYPTO_ALG_TYPE_CIPHER: -+ len += alg->cra_ctxsize; -+ break; -+ -+ case CRYPTO_ALG_TYPE_COMPRESS: -+ len += alg->cra_ctxsize; -+ break; -+ } -+ -+ return len; -+} -+ -+static unsigned int sw_aes_ce_decrypt(unsigned char *in, unsigned char *out, -+ unsigned char *key, unsigned int keylen) -+{ -+ int ret = 0; -+ struct crypto_alg *alg; -+ struct crypto_tfm *tfm; -+ unsigned int tfm_size; -+ struct crypto_aes_ctx aes_ctx; -+ -+ alg = spacemit_crypto_find_alg("aes-generic", NULL, 0, 0); -+ if (IS_ERR(alg)) { -+ dev_err_once(dev, "%s : %d : find crypto sw-aes-ce failed!\n", -+ __func__,__LINE__); -+ ret = -1; -+ goto exit; -+ } -+ dev_err_once(dev, "%s : %d : algo drv name %s.\n",__func__,__LINE__, -+ alg->cra_driver_name); -+ -+ tfm_size = sizeof(*tfm) + spacemit_crypto_ctxsize(alg, 0, 0); -+ tfm = kzalloc(tfm_size, GFP_KERNEL); -+ if (tfm == NULL) { -+ dev_err_once(dev, "%s : %d : alloc tfm failed.\n",__func__, -+ __LINE__); -+ ret = -1; -+ goto exit; -+ } -+ tfm->__crt_ctx[0] = (void *)&aes_ctx; -+ -+ alg->cra_cipher.cia_setkey(tfm, (const uint8_t *)key, keylen); -+ alg->cra_cipher.cia_decrypt(tfm, out, in); -+ -+ kfree(tfm); -+exit: -+ return ret; -+} -+ -+static int ce_aes_process_nblocks(int index, const unsigned char *buf_in, unsigned char *buf_out, -+ unsigned long blocks, symmetric_key * skey1,symmetric_key * skey2, -+ AES_MODE_T mode,uint8_t *inv, AES_OP_MODE_T op) -+{ -+ int ret; -+ uint32_t dma_addr_in_low,dma_addr_in_high; -+ uint32_t dma_addr_out_low,dma_addr_out_high; -+ uint32_t val; -+ -+ dma_sync_single_for_device(dev,dma_addr_in,blocks*16,DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, dma_addr_in)) { -+ dev_err(dev, "failed to map buffer\n"); -+ return -EFAULT; -+ } -+ if (dma_mapping_error(dev, dma_addr_out)) { -+ dev_err(dev, "failed to map buffer\n"); -+ return -EFAULT; -+ } -+ -+ dma_addr_in_high = upper_32_bits(dma_addr_in); -+ dma_addr_in_low = lower_32_bits(dma_addr_in); -+ dma_addr_out_high = upper_32_bits(dma_addr_out); -+ dma_addr_out_low = lower_32_bits(dma_addr_out); -+ -+ /*reset the HW before using it*/ -+ adec_engine_hw_reset(index, E_ACC_ENG_DMA); -+ adec_engine_hw_reset(index, E_ACC_ENG_CRYPTO); -+ abus_set_mode(index, E_ABUS_GRP_A_HASH, E_ABUS_GRP_B_AES, E_ABUS_STRAIGHT, E_ABUS_STRAIGHT); -+ crypto_aes_sw_reset(index); -+ -+ /* -+ The CIU REGISTER(ENGINE_DMA_ADDR_HIGH_OFFSET,offset=0x70) is -+ represent for the high address. The bits' definition: -+ BIT4 : the write addr of engine1 -+ BIT5 : the read addr of engine1 -+ TODO:change below if had engine2 -+ BIT8-11 : the write addr of engine2 -+ BIT12-15 : the read addr of engine2 -+ */ -+ regmap_read(ciu_base, ENGINE_DMA_ADDR_HIGH_OFFSET, &val); -+ switch (index) { -+ case 0: -+ val &= ~(WADDR_BIT32 | RADDR_BIT32); -+ val |= ((dma_addr_out_high&0x1) << 4 | (dma_addr_in_high&0x1) << 5); -+ break; -+ case 1: -+ val &= ~0xFF00; -+ val |= ((dma_addr_out_high&0xF) << 8 | (dma_addr_in_high&0xF) << 12);; -+ break; -+ default: -+ ret = -EINVAL; -+ dev_err_once(dev, "%s : %d : index is error!\n",__func__,__LINE__); -+ goto error; -+ } -+ regmap_write(ciu_base, ENGINE_DMA_ADDR_HIGH_OFFSET, val); -+ -+ if ((unsigned long) dma_addr_in & 0x3 || (unsigned long) dma_addr_out & 0x3) { -+ ret = -EINVAL; -+ dev_err_once(dev, "%s : %d : dma_addr_in or dma_addr_out is unaligned!\n",__func__,__LINE__); -+ goto error; -+ } -+ -+ enable_biu_mask(index); -+ enable_adec_mask(index); -+ engine_irq_enable(index); -+ -+ dma_input_config(index, 0, 0); -+ dma_output_config(index, 0, 1); -+ -+ ret = dma_input_address(index, dma_addr_in_low, blocks << 2, false); -+ if (ret != 0) -+ { -+ ret = -EINVAL; -+ dev_err_once(dev, "%s : %d : dma_input_address failed!\n",__func__,__LINE__); -+ goto error; -+ } -+ -+ ret = dma_output_address(index, dma_addr_out_low, blocks << 2, false); -+ if (ret != 0) -+ { -+ ret = -EINVAL; -+ dev_err_once(dev, "%s : %d : dma_output_address failed!\n",__func__,__LINE__); -+ goto error; -+ } -+ -+ /* Process KEY*/ -+ if (skey1 == NULL) { -+ ret = -EINVAL; -+ dev_err_once(dev, "%s : %d : skey1 is NULL!\n",__func__,__LINE__); -+ goto error; -+ } -+ ret = crypto_aes_set_mode(index, mode, op, skey1->rijndael.Nr , false); -+ if (ret) { -+ dev_err_once(dev, "%s : %d : crypto_aes_set_mode failed!\n",__func__,__LINE__); -+ goto error; -+ } -+ switch(op) { -+ case E_AES_ENCRYPT: -+ ret = crypto_aes_set_key1(index, (uint8_t *)skey1->rijndael.eK, skey1->rijndael.Nr ); -+ break; -+ case E_AES_DECRYPT: -+ ret = crypto_aes_set_key1(index, (uint8_t *)skey1->rijndael.dK, skey1->rijndael.Nr ); -+ break; -+ default: -+ dev_err_once(dev, "%s : %d : cmd(op) is invalid!\n",__func__,__LINE__); -+ ret = -EINVAL; -+ } -+ if (ret) { -+ dev_err_once(dev, "%s : %d : set_key1 failed!\n",__func__,__LINE__); -+ goto error; -+ } -+ -+ /* Process IV*/ -+ switch(mode) { -+ case E_AES_XTS: -+ if (!skey2) { -+ dev_err_once(dev, "%s : %d: skey2 is invalid in xts mode.\n", __func__,__LINE__); -+ ret = -EINVAL; -+ goto error; -+ } -+ if (op == E_AES_ENCRYPT) -+ ret = crypto_aes_set_key2(index, (uint8_t *)skey2->rijndael.eK, skey2->rijndael.Nr); -+ else -+ ret = crypto_aes_set_key2(index, (uint8_t *)skey2->rijndael.dK, skey2->rijndael.Nr); -+ if (ret != 0) { -+ dev_err_once(dev, "%s : %d : crypto_aes_set_key2 failed!\n",__func__,__LINE__); -+ goto error; -+ } -+ break; -+ case E_AES_CBC: -+ case E_AES_CTR: -+ ret = crypto_aes_set_iv(index, inv); -+ if (ret != 0) { -+ dev_err_once(dev, "%s : %d : crypto_aes_set_iv failed!\n",__func__,__LINE__); -+ goto error; -+ } -+ break; -+ default: -+ break; -+ } -+ -+ crypto_write32(index, CE_CRYPTO_AES_STREAM_SIZE_REG, blocks << 4); -+ -+ dma_input_start(index); -+ dma_output_start(index); -+ crypto_aes_start(index); -+ -+ ret = dma_wait_int_input_finish(index); -+ if (ret) { -+ dev_err_once(dev, "%s : %d : dma_wait_input_finish failed! ret = %d\n",__func__,__LINE__,ret); -+ goto error; -+ } -+ -+ ret = crypto_aes_wait(index); -+ if (ret) { -+ dev_err_once(dev, "%s : %d : crypto_aes_wait failed! ret = %d\n",__func__,__LINE__,ret); -+ goto error; -+ } -+ ret = dma_wait_int_output_finish(index); -+ if (ret) { -+ dev_err_once(dev, "%s : %d : dma_wait_output_finish failed! ret = %d\n",__func__,__LINE__,ret); -+ goto error; -+ } -+ dma_sync_single_for_cpu(dev,dma_addr_out,blocks*16,DMA_FROM_DEVICE); -+ -+ /* Readback IV after operation*/ -+ switch(mode) { -+ case E_AES_XTS: -+ case E_AES_CBC: -+ case E_AES_CTR: -+ ret = crypto_aes_get_iv(index, inv); -+ if (ret != 0) { -+ dev_err_once(dev, "%s : %d : crypto_aes_get_iv failed!\n",__func__,__LINE__); -+ goto error; -+ } -+ break; -+ default: -+ break; -+ } -+ return 0; -+error: -+ dev_err_once(dev, "====================failed==============\n"); -+ dev_err_once(dev, "%s : %d : failed! mode=%s,op=%s,keylen=%d\n",__func__,__LINE__, -+ (mode==E_AES_CBC?"cbc":(mode==E_AES_CTR?"ctr":(mode==E_AES_ECB?"ecb":(mode==E_AES_XTS?"xts":"err")))), -+ (op==E_AES_ENCRYPT?"encrypt":(op==E_AES_DECRYPT?"decrypt":"err")), -+ (skey1==NULL?0:skey1->rijndael.Nr)); -+ return ret; -+} -+ -+static int ce_aes_process_nblocks_noalign(int index, const unsigned char *buf_in, unsigned char *buf_out, -+ unsigned long blocks, symmetric_key * skey1, symmetric_key * skey2, -+ AES_MODE_T mode, uint8_t *inv, AES_OP_MODE_T op) { -+ int ret; -+ int len_bytes = 0; -+ int step_bytes = 0; -+ unsigned char *in_cpy = NULL, *out_cpy = NULL; -+ unsigned char *in_work = NULL, *out_work = NULL; -+ unsigned char *aligned_buf_1 = &engine[index].internal_working_buffer[0]; -+ unsigned char *aligned_buf_2 = &engine[index].internal_working_buffer[WORK_BUF_SIZE]; -+ -+ if ((unsigned long) buf_in & 0x3 || (unsigned long) buf_out & 0x3) { -+ len_bytes = blocks << 4; -+ in_cpy = (unsigned char *) buf_in; -+ out_cpy = (unsigned char *) buf_out; -+ -+ while(len_bytes) { -+ step_bytes = len_bytes > WORK_BUF_SIZE ? WORK_BUF_SIZE : len_bytes; -+ if((unsigned long) buf_in & 0x3) { -+ memcpy(aligned_buf_1, in_cpy, step_bytes); -+ in_work = aligned_buf_1; -+ } else { -+ in_work = in_cpy; -+ } -+ len_bytes -= step_bytes; -+ in_cpy += step_bytes; -+ if((unsigned long) buf_out & 0x3) { -+ memset(aligned_buf_2, 0x0, WORK_BUF_SIZE); -+ out_work = aligned_buf_2; -+ } else { -+ out_work = out_cpy; -+ } -+ ret = ce_aes_process_nblocks(index, in_work, out_work, step_bytes >> 4, skey1, skey2, mode, inv, op); -+ if (ret != 0) -+ goto exit; -+ if((unsigned long) buf_out & 0x3) { -+ memcpy(out_cpy, aligned_buf_2, step_bytes); -+ } -+ out_cpy += step_bytes; -+ if ((mode == E_AES_XTS) && (len_bytes != 0) && (len_bytes > WORK_BUF_SIZE)) { -+ unsigned char key_local[32]; -+ unsigned int key_len = (skey2->rijndael.Nr < 32) ? skey2->rijndael.Nr : 32; -+ -+ if (op == E_AES_ENCRYPT) -+ memcpy(key_local, (unsigned char *)skey2->rijndael.eK, key_len); -+ else -+ memcpy(key_local, (unsigned char *)skey2->rijndael.dK, key_len); -+ sw_aes_ce_decrypt(inv, inv, key_local, key_len); -+ } -+ } -+ } else { -+ ret = ce_aes_process_nblocks(index, buf_in, buf_out, blocks, skey1, skey2, mode, inv, op); -+ if (!ret && (mode == E_AES_XTS)) { -+ unsigned char key_local[32]; -+ unsigned int key_len = (skey2->rijndael.Nr < 32) ? skey2->rijndael.Nr : 32; -+ -+ if (op == E_AES_ENCRYPT) -+ memcpy(key_local, (unsigned char *)skey2->rijndael.eK, key_len); -+ else -+ memcpy(key_local, (unsigned char *)skey2->rijndael.dK, key_len); -+ sw_aes_ce_decrypt(inv, inv, key_local, key_len); -+ } -+ } -+ -+exit: -+ memset(aligned_buf_1, 0x0, WORK_BUF_SIZE); -+ memset(aligned_buf_2, 0x0, WORK_BUF_SIZE); -+ return ret; -+} -+ -+//--------------------------------------------------------- -+int spacemit_crypto_aes_set_key(int index, struct crypto_tfm *tfm, const u8 *key,unsigned int keylen) -+{ -+ struct crypto_aes_ctx *ctx; -+ -+ if (!tfm || keylen <= 0) { -+ goto error; -+ } -+ -+ ctx = crypto_tfm_ctx(tfm); -+ -+ if ((!key) || (keylen > (int)(sizeof(ctx->key_enc))) -+ || (keylen > (int)(sizeof(ctx->key_dec)))){ -+ goto error; -+ } -+ -+ ctx->key_length = keylen; -+ memcpy(ctx->key_enc, key, ctx->key_length); -+ memcpy(ctx->key_dec, key, ctx->key_length); -+ -+ return 0; -+error: -+ return -EINVAL; -+} -+EXPORT_SYMBOL(spacemit_crypto_aes_set_key); -+ -+int spacemit_aes_ecb_encrypt(int index, const unsigned char *pt,unsigned char *ct, u8 *key, unsigned int len,unsigned int blocks) -+{ -+ symmetric_key skey1; -+ skey1.rijndael.Nr=len; -+ memcpy(skey1.rijndael.eK,key,sizeof(skey1.rijndael.eK)); -+ -+ return ce_aes_process_nblocks_noalign(index, pt,ct,blocks, &skey1,NULL,E_AES_ECB,NULL, E_AES_ENCRYPT); -+} -+EXPORT_SYMBOL(spacemit_aes_ecb_encrypt); -+ -+int spacemit_aes_ecb_decrypt(int index, const unsigned char *ct,unsigned char *pt, u8 *key, unsigned int len,unsigned int blocks) -+{ -+ symmetric_key skey1; -+ skey1.rijndael.Nr=len; -+ memcpy(skey1.rijndael.dK,key,sizeof(skey1.rijndael.dK)); -+ -+ return ce_aes_process_nblocks_noalign(index, ct,pt,blocks, &skey1,NULL,E_AES_ECB,NULL, E_AES_DECRYPT); -+} -+EXPORT_SYMBOL(spacemit_aes_ecb_decrypt); -+ -+int spacemit_aes_cbc_encrypt(int index, const unsigned char *pt,unsigned char *ct, u8 *key, unsigned int len, u8 *IV,unsigned int blocks) -+{ -+ symmetric_key skey1; -+ skey1.rijndael.Nr=len; -+ memcpy(skey1.rijndael.eK,key,sizeof(skey1.rijndael.eK)); -+ -+ return ce_aes_process_nblocks_noalign(index, pt,ct,blocks, &skey1,NULL,E_AES_CBC,IV,E_AES_ENCRYPT); -+} -+EXPORT_SYMBOL(spacemit_aes_cbc_encrypt); -+ -+int spacemit_aes_cbc_decrypt(int index, const unsigned char *ct,unsigned char *pt, u8 *key, unsigned int len, u8 *IV,unsigned int blocks) -+{ -+ symmetric_key skey1; -+ skey1.rijndael.Nr=len; -+ memcpy(skey1.rijndael.dK,key,sizeof(skey1.rijndael.dK)); -+ -+ return ce_aes_process_nblocks_noalign(index, ct,pt,blocks, &skey1,NULL,E_AES_CBC,IV,E_AES_DECRYPT); -+} -+EXPORT_SYMBOL(spacemit_aes_cbc_decrypt); -+ -+int spacemit_aes_xts_encrypt(int index, const unsigned char *pt, unsigned char *ct, -+ u8 *key1, u8 *key2, unsigned int len, u8 *IV, -+ unsigned int blocks) -+{ -+ symmetric_key skey1, skey2; -+ -+ skey1.rijndael.Nr = len; -+ memcpy(skey1.rijndael.eK, key1, sizeof(skey1.rijndael.eK)); -+ -+ skey2.rijndael.Nr = len; -+ memcpy(skey2.rijndael.eK, key2, sizeof(skey2.rijndael.eK)); -+ -+ return ce_aes_process_nblocks_noalign(index, pt, ct, blocks, &skey1, &skey2, -+ E_AES_XTS, IV, E_AES_ENCRYPT); -+} -+EXPORT_SYMBOL(spacemit_aes_xts_encrypt); -+ -+int spacemit_aes_xts_decrypt(int index, const unsigned char *ct, unsigned char *pt, -+ u8 *key1, u8 *key2, unsigned int len, u8 *IV, -+ unsigned int blocks) -+{ -+ symmetric_key skey1, skey2; -+ -+ skey1.rijndael.Nr = len; -+ memcpy(skey1.rijndael.dK, key1, sizeof(skey1.rijndael.dK)); -+ -+ skey2.rijndael.Nr = len; -+ memcpy(skey2.rijndael.dK, key2, sizeof(skey2.rijndael.dK)); -+ -+ return ce_aes_process_nblocks_noalign(index, ct, pt, blocks, &skey1, &skey2, -+ E_AES_XTS, IV, E_AES_DECRYPT); -+} -+EXPORT_SYMBOL(spacemit_aes_xts_decrypt); -+ -+void spacemit_aes_getaddr(unsigned char **in,unsigned char **out) -+{ -+ mutex_lock(&engine[0].eng_mutex); -+ *in = in_buffer; -+ *out = out_buffer; -+} -+EXPORT_SYMBOL(spacemit_aes_getaddr); -+ -+void spacemit_aes_reladdr(void) -+{ -+ mutex_unlock(&engine[0].eng_mutex); -+} -+EXPORT_SYMBOL(spacemit_aes_reladdr); -+ -+__maybe_unused static void engine_reg_dump(int index) -+{ -+ uint32_t val; -+ printk("======> engine[%d] reg dump start! <======\n", index); -+ -+ /*BIU*/ -+ val = biu_read32(index, SP_HST_INTERRUPT_MASK); -+ printk("BIU[%d] SP_HST_INTERRUPT_MASK: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_BIU_REG_OFFSET + SP_HST_INTERRUPT_MASK, val); -+ val = biu_read32(index, SP_INTERRUPT_MASK); -+ printk("BIU[%d] SP_INTERRUPT_MASK: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_BIU_REG_OFFSET + SP_INTERRUPT_MASK, val); -+ val = biu_read32(index, SP_CONTROL); -+ printk("BIU[%d] SP_CONTROL: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_BIU_REG_OFFSET + SP_CONTROL, val); -+ -+ /*ADEC*/ -+ val = adec_read32(index, CE_ADEC_CTRL); -+ printk("ADEC[%d] CE_ADEC_CTRL: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_ADEC_REG_OFFSET + CE_ADEC_CTRL, val); -+ val = adec_read32(index, CE_ADEC_CTRL2); -+ printk("ADEC[%d] CE_ADEC_CTRL2: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_ADEC_REG_OFFSET + CE_ADEC_CTRL2, val); -+ val = adec_read32(index, CE_AXI_SL_CTRL); -+ printk("ADEC[%d] CE_AXI_SL_CTRL: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_ADEC_REG_OFFSET + CE_AXI_SL_CTRL, val); -+ val = adec_read32(index, CE_ADEC_INT); -+ printk("ADEC[%d] CE_ADEC_INT: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_ADEC_REG_OFFSET + CE_ADEC_INT, val); -+ val = adec_read32(index, CE_ADEC_INT_MSK); -+ printk("ADEC[%d] CE_ADEC_INT_MSK: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_ADEC_REG_OFFSET + CE_ADEC_INT_MSK, val); -+ val = adec_read32(index, CE_ADEC_ACC_ERR_ADR); -+ printk("ADEC[%d] CE_ADEC_ACC_ERR_ADR: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_ADEC_REG_OFFSET + CE_ADEC_ACC_ERR_ADR, val); -+ val = adec_read32(index, CE_ADEC_MP_FIFO_ERR_ADR); -+ printk("ADEC[%d] CE_ADEC_MP_FIFO_ERR_ADR: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_ADEC_REG_OFFSET + CE_ADEC_MP_FIFO_ERR_ADR, val); -+ -+ /*ABUS*/ -+ val = abus_read32(index, CE_ABUS_BUS_CTRL); -+ printk("ABUS[%d] CE_ABUS_BUS_CTRL: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_ABUS_REG_OFFSET + CE_ABUS_BUS_CTRL, val); -+ -+ /*DMA*/ -+ val = dma_read32(index, CE_DMA_IN_CTRL); -+ printk("DMA[%d] CE_DMA_IN_CTRL: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_DMA_REG_OFFSET + CE_DMA_IN_CTRL, val); -+ val = dma_read32(index, CE_DMA_IN_STATUS); -+ printk("DMA[%d] CE_DMA_IN_STATUS: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_DMA_REG_OFFSET + CE_DMA_IN_STATUS, val); -+ val = dma_read32(index, CE_DMA_IN_SRC_ADR); -+ printk("DMA[%d] CE_DMA_IN_SRC_ADR: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_DMA_REG_OFFSET + CE_DMA_IN_SRC_ADR, val); -+ val = dma_read32(index, CE_DMA_IN_XFER_CNTR); -+ printk("DMA[%d] CE_DMA_IN_XFER_CNTR: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_DMA_REG_OFFSET + CE_DMA_IN_XFER_CNTR, val); -+ val = dma_read32(index, CE_DMA_IN_NX_LL_ADR); -+ printk("DMA[%d] CE_DMA_IN_NX_LL_ADR: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_DMA_REG_OFFSET + CE_DMA_IN_NX_LL_ADR, val); -+ val = dma_read32(index, CE_DMA_IN_INT); -+ printk("DMA[%d] CE_DMA_IN_INT: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_DMA_REG_OFFSET + CE_DMA_IN_INT, val); -+ val = dma_read32(index, CE_DMA_IN_INT_MASK); -+ printk("DMA[%d] CE_DMA_IN_INT_MASK: reg = 0x%lx, val = 0x%x\n", index, engine[index].engine_base + CE_DMA_REG_OFFSET + CE_DMA_IN_INT_MASK, val); -+ -+ printk("======> engine[%d] reg dump finish! <======\n", index); -+} -+ -+static inline void clear_adec_biu_int_flag(int index) -+{ -+ volatile uint32_t val; -+ -+ val = adec_read32(index, CE_ADEC_INT); -+ adec_write32(index, CE_ADEC_INT, val); -+ -+ val = biu_read32(index, SP_INTERRUPT_RST); -+ biu_write32(index, SP_INTERRUPT_RST, val); -+} -+ -+static inline void engine_irq_handler(int index) -+{ -+ volatile uint32_t val_aes; -+ -+ /* aes */ -+ val_aes = crypto_read32(index, CE_CRYPTO_AES_INTRPT_SRC_REG); -+ if (val_aes & AES_INTERRUPT_MASK) -+ { -+ crypto_write32(index, CE_CRYPTO_AES_INTRPT_SRC_REG, val_aes); -+ clear_adec_biu_int_flag(index); -+ engine[index].aes_status = (val_aes & AES_INTERRUPT_FLAG) ? AES_DONE : AES_ERROR; -+ if(!(val_aes & AES_INTERRUPT_FLAG)) -+ dev_info(dev, "%s : %d : complete aes_done (0x%x) !\n",__func__,__LINE__,val_aes); -+ complete(&engine[index].aes_done); -+ return; -+ } -+ -+ /* dma output */ -+ val_aes = dma_read32(index, CE_DMA_OUT_INT); -+ if (val_aes & DMA_INTERRUPT_MASK) -+ { -+ dma_output_stop(index); -+ dma_write32(index, CE_DMA_OUT_INT, val_aes); -+ clear_adec_biu_int_flag(index); -+ engine[index].dma_out_status = (val_aes & BIT_DMA_INOUT_DONE) ? DMA_INOUT_DONE : DMA_INOUT_ERROR; -+ complete(&engine[index].dma_output_done); -+ return; -+ } -+ -+ /* dma input */ -+ val_aes = dma_read32(index, CE_DMA_IN_INT); -+ if (val_aes & DMA_INTERRUPT_MASK) -+ { -+ dma_input_stop(index); -+ dma_write32(index, CE_DMA_IN_INT, val_aes); -+ clear_adec_biu_int_flag(index); -+ engine[index].dma_in_status = (val_aes & BIT_DMA_INOUT_DONE) ? DMA_INOUT_DONE : DMA_INOUT_ERROR; -+ complete(&engine[index].dma_input_done); -+ return; -+ } -+} -+ -+ -+static irqreturn_t engine_irq_handler_0(int irq, void *nouse) -+{ -+ engine_irq_handler(0); -+ -+ return IRQ_HANDLED; -+} -+ -+static irqreturn_t engine_irq_handler_1(int irq, void *nouse) -+{ -+ engine_irq_handler(1); -+ -+ return IRQ_HANDLED; -+} -+ -+irqreturn_t (* irq_func[ENGINE_MAX])(int, void *) ={ -+ &engine_irq_handler_0, -+ &engine_irq_handler_1 -+}; -+ -+#ifdef CONFIG_SPACEMIT_CRYPTO_SELF_TEST -+static struct { -+ int keylen; -+ unsigned char key[32]; -+ const unsigned char pt[16]; -+ unsigned char ct[16]; -+} tests[] = { -+ { -+ 16, { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, -+ 0x0e, 0x0f}, { -+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, -+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, -+ 0xee, 0xff}, { -+ 0x69, 0xc4, 0xe0, 0xd8, 0x6a, 0x7b, 0x04, 0x30, -+ 0xd8, 0xcd, 0xb7, 0x80, 0x70, 0xb4, -+ 0xc5, 0x5a} -+ }, { -+ 24, { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, -+ 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, -+ 0x14, 0x15, 0x16, 0x17}, { -+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, -+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, -+ 0xee, 0xff}, { -+ 0xdd, 0xa9, 0x7c, 0xa4, 0x86, 0x4c, 0xdf, 0xe0, -+ 0x6e, 0xaf, 0x70, 0xa0, 0xec, 0x0d, -+ 0x71, 0x91} -+ }, { -+ 32, { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, -+ 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, -+ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, -+ 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f}, { -+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, -+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, -+ 0xee, 0xff}, { -+ 0x8e, 0xa2, 0xb7, 0xca, 0x51, 0x67, 0x45, 0xbf, -+ 0xea, 0xfc, 0x49, 0x90, 0x4b, 0x49, -+ 0x60, 0x89} -+ } -+}; -+#define PT_CT_SIZE 4096 -+ -+static int ce_aes_test(u32 num) -+{ -+ int err; -+ -+ unsigned char iv[16]; -+ int i, y, ret; -+ int index = num; -+ unsigned char *ct_buf; -+ unsigned char *pt_buf; -+ unsigned char *ct_buf_tmp; -+ unsigned char *pt_buf_tmp; -+ -+ ct_buf = kzalloc(PT_CT_SIZE, GFP_KERNEL); -+ pt_buf = kzalloc(PT_CT_SIZE, GFP_KERNEL); -+ -+ ct_buf_tmp = kzalloc(PT_CT_SIZE, GFP_KERNEL); -+ pt_buf_tmp = kzalloc(PT_CT_SIZE, GFP_KERNEL); -+ -+ while (--index >= 0) { -+ dev_info(dev,"================ aes test(%d) =============\n",index); -+ for (i = 0; i < (int)(sizeof(tests) / sizeof(tests[0])); i++) { -+ ret = ce_rijndael_setup_internal(index, tests[i].key, tests[i].keylen * BYTES_TO_BITS); -+ if (ret != 0) { -+ goto err; -+ } -+ memcpy(ct_buf, tests[i].ct , 16); -+ memcpy(pt_buf, tests[i].pt , 16); -+ spacemit_aes_ecb_encrypt(index, pt_buf,ct_buf_tmp, tests[i].key, tests[i].keylen, 1); -+ if (memcmp(ct_buf_tmp, tests[i].ct, 16)) { -+ dev_err(dev," (ecb test)failed : tmp[0] != tests[i].ct\n"); -+ dump_data("(ecb ct)", (const unsigned char *)ct_buf_tmp, 16); -+ ret = -EPERM; -+ goto err; -+ } -+ spacemit_aes_ecb_decrypt(index, ct_buf_tmp, pt_buf_tmp, tests[i].key, tests[i].keylen, 1); -+ dump_data("(ecb after encrypt-decrypt)", (const unsigned char *)pt_buf_tmp, 16); -+ if (memcmp(pt_buf_tmp, tests[i].pt, 16)) { -+ dev_err_once(dev," (ecb test)failed : tmp[1] != tests[i].pt\n"); -+ ret = -EPERM; -+ goto err; -+ } -+ -+ memset(ct_buf_tmp, 0, PT_CT_SIZE); -+ memcpy(iv, "1234567890123456", sizeof(iv)); -+ spacemit_aes_cbc_encrypt(index, pt_buf, ct_buf_tmp, tests[i].key, tests[i].keylen, iv, 1); -+ memset(pt_buf_tmp, 0, PT_CT_SIZE); -+ memcpy(iv, "1234567890123456", sizeof(iv)); -+ spacemit_aes_cbc_decrypt(index, ct_buf_tmp, pt_buf_tmp, tests[i].key, tests[i].keylen, iv, 1); -+ dump_data("(cbc after encrypt-decrypt)", (const unsigned char *)pt_buf_tmp, 16); -+ if (memcmp(pt_buf_tmp, tests[i].pt, 16)) { -+ dev_err_once(dev," (cbc test)failed : tmp[1] != tests[i].pt\n"); -+ ret = -EPERM; -+ goto err; -+ } -+ -+ /* now see if we can encrypt all zero bytes 1000 times, decrypt and come back where we started */ -+ memset(ct_buf_tmp, 0, PT_CT_SIZE); -+ for (y = 0; y < 100; y++) { -+ spacemit_aes_ecb_encrypt(index, ct_buf_tmp, ct_buf_tmp, tests[i].key, tests[i].keylen, 1); -+ memcpy(iv,"1234567890123456", sizeof(iv)); -+ spacemit_aes_cbc_encrypt(index, ct_buf_tmp, ct_buf_tmp, tests[i].key, tests[i].keylen, iv, 1); -+ } -+ for (y = 0; y < 100; y++) { -+ memcpy(iv,"1234567890123456", sizeof(iv)); -+ spacemit_aes_cbc_decrypt(index, ct_buf_tmp, ct_buf_tmp, tests[i].key, tests[i].keylen, iv, 1); -+ spacemit_aes_ecb_decrypt(index, ct_buf_tmp, ct_buf_tmp, tests[i].key, tests[i].keylen, 1); -+ } -+ for (y = 0; y < 16; y++) { -+ if (ct_buf_tmp[y] != 0) { -+ dev_err_once(dev," failed : encrypt & decrypt 100 times failed!\n"); -+ ret = -EPERM; -+ goto err; -+ } -+ } -+ } -+ dev_info(dev," successful \n"); -+ } -+ -+ return 0; -+err: -+ kfree(ct_buf); -+ kfree(pt_buf); -+ kfree(ct_buf_tmp); -+ kfree(pt_buf_tmp); -+ return ret; -+} -+#endif -+ -+#ifdef CONFIG_SPACEMIT_CRYPTO_DEBUG -+static enum engine_ddr_type check_addr_type(unsigned long pddr) -+{ -+ int i; -+ -+ for(i=0;i < sizeof(sram_reserved)/sizeof(struct sram_area);i++) -+ { -+ if(pddr >= sram_reserved[i].sram_start && pddr <= sram_reserved[i].sram_end) -+ return RESERVED_SRAM; -+ } -+ return RESERVED_DDR; -+} -+ -+static int aes_test_for_nsaid(unsigned char *pt,unsigned char *ct,unsigned long engine_number) -+{ -+ int err; -+ static struct { -+ int keylen; -+ unsigned char key[32]; -+ } tests = { -+ 32, -+ { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, -+ 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, -+ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, -+ 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f -+ } -+ }; -+ -+ int index; -+ index = (int)engine_number; -+ -+ dev_info(dev,"================ aes test(%d) =============\n",index); -+ -+ if ((err = ce_rijndael_setup_internal(index, tests.key, tests.keylen * BYTES_TO_BITS)) != 0) { -+ dev_err_once(dev,"ce_rijndael_setup_internal failed!\n"); -+ return err; -+ } -+ -+ spacemit_aes_ecb_encrypt(index, pt, ct, tests.key, tests.keylen,1); -+ dump_data("(ecb after encrypt)===",ct,16); -+ -+ dev_info(dev,"================ aes test(%d) end=============\n",index); -+ return 0; -+} -+#endif -+ -+static ssize_t engine_store(struct device *dev, -+ struct device_attribute *attr, const char *buf, size_t count) -+{ -+#ifndef CONFIG_SPACEMIT_CRYPTO_DEBUG -+ (void)dev; -+ (void)buf; -+ (void)count; -+ dev_info(dev, "%s : %d : Debugging interface is not open !\n", __func__,__LINE__); -+#else -+ unsigned long pddr1,pddr2,index; -+ enum engine_ddr_type pddr1_type,pddr2_type; -+ unsigned char *pt,*ct; -+ sscanf(buf,"0x%lx 0x%lx 0x%lx",&pddr1,&pddr2,&index); -+ -+ pddr1_type = check_addr_type(pddr1); -+ pddr2_type = check_addr_type(pddr2); -+ if(pddr1_type == RESERVED_SRAM && pddr2_type == RESERVED_SRAM) -+ { -+ sram_phy_base_src = pddr1; -+ sram_phy_base_dst = pddr2; -+ engine[(int)index].ddr_type = RESERVED_SRAM; -+ pt = (char *)ioremap(pddr1, SRAM_MAP_SIZE); -+ if (!pt) -+ { -+ dev_err_once(dev,"engine_store ioremap pddr1 failed!\n"); -+ return -ENOMEM; -+ } -+ -+ ct = (char *)ioremap(pddr2, SRAM_MAP_SIZE); -+ if (!ct) -+ { -+ dev_err_once(dev,"engine_store ioremap pddr2 failed!\n"); -+ iounmap(pt); -+ return -ENOMEM; -+ } -+ } -+ else if(pddr1_type == RESERVED_DDR && pddr2_type == RESERVED_DDR) -+ { -+ engine[(int)index].ddr_type = RESERVED_DDR; -+ pt = (char *)phys_to_virt((unsigned long)pddr1); -+ ct = (char *)phys_to_virt((unsigned long)pddr2); -+ } -+ else -+ { -+ dev_err_once(dev,"engine_store pddr bad parameters!\n"); -+ return count; -+ } -+ -+ dev_dbg(dev,"engine_store (0x%lx,0x%lx)-->(0x%lx,0x%lx)\n",pddr1,pddr2,(unsigned long)pt,(unsigned long)ct); -+ aes_test_for_nsaid(pt,ct,index); -+ engine[(int)index].ddr_type = NORMAL_DDR; -+ -+ if(pddr1_type == RESERVED_SRAM && pddr2_type == RESERVED_SRAM) -+ { -+ iounmap(pt); -+ iounmap(ct); -+ } -+#endif -+ return count; -+} -+ -+static DEVICE_ATTR(engine_fun, S_IWUSR | S_IRUGO, NULL, engine_store); -+ -+static struct attribute *engine_operation[] = { -+ &dev_attr_engine_fun.attr, -+ NULL -+}; -+ -+static const struct attribute_group engine_operations = { -+ .name = "engine", -+ .attrs = engine_operation -+}; -+ -+static const char *eng_names[ENGINE_MAX] = { -+ "spacemit-crypto-engine-0", -+}; -+ -+/* ================================================ -+probe -+===================================================*/ -+static int crypto_engine_probe(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ int ret = 0; -+ int i; -+ uint32_t addr_range[2]; -+ unsigned int engine_irq; -+ struct aes_clk_reset_ctrl *ctrl; -+ char obj_name[32]; -+ const char *irq_name; -+ u32 num_engines; -+ dev = &pdev->dev; -+ -+ ret = of_property_read_u32(np, "num-engines", &num_engines); -+ if(ret){ -+ dev_err_once(dev, "can't get %s from dts!\n", "num-engines"); -+ return -ENODEV; -+ } -+ -+ in_buffer = dma_alloc_noncoherent(dev, SPACEMIT_AES_BUFFER_LEN, &dma_addr_in, DMA_TO_DEVICE, GFP_KERNEL); -+ out_buffer = dma_alloc_noncoherent(dev, SPACEMIT_AES_BUFFER_LEN, &dma_addr_out, DMA_FROM_DEVICE, GFP_KERNEL); -+ ctrl = kmalloc(sizeof(struct aes_clk_reset_ctrl), GFP_KERNEL); -+ ctrl->clk = devm_clk_get(&pdev->dev, NULL); -+ if (IS_ERR(ctrl->clk)) -+ return PTR_ERR(ctrl->clk); -+ clk_prepare_enable(ctrl->clk); -+ -+ ctrl->reset = devm_reset_control_get_optional(&pdev->dev, NULL); -+ if(IS_ERR(ctrl->reset)) -+ return PTR_ERR(ctrl->reset); -+ reset_control_deassert(ctrl->reset); -+ -+ pm_runtime_enable(&pdev->dev); -+ platform_set_drvdata(pdev, ctrl); -+ -+ for(i=0; i < num_engines; i++) -+ { -+ sprintf(obj_name,"spacemit-crypto-engine-%d",i); -+ init_completion(&engine[i].aes_done); -+ init_completion(&engine[i].dma_output_done); -+ init_completion(&engine[i].dma_input_done); -+ mutex_init(&engine[i].eng_mutex); -+ engine[i].ddr_type = NORMAL_DDR; -+ engine[i].handler = irq_func[i]; -+ -+ ret = of_property_read_u32_array(np, obj_name, &addr_range[0], 2); -+ if(0 != ret){ -+ dev_err_once(dev, "can't get %s from dts!\n", obj_name); -+ return -ENOMEM; -+ } -+ -+ engine[i].engine_base = (unsigned long)ioremap(addr_range[0], addr_range[1]); -+ if (engine[i].engine_base == 0) -+ { -+ dev_err_once(dev,"engine_mem ioremap failed. pyh_addr=0x%08x,pyh_size=0x%08x\n",addr_range[0], addr_range[1]); -+ goto err_ioremap; -+ } -+ dev_dbg(dev, "map %s successful. pyh_addr=0x%08x,pyh_size=0x%08x, vir_addr=0x%lx\n", obj_name,addr_range[0], addr_range[1],engine[0].engine_base); -+ -+ engine_irq = irq_of_parse_and_map(np, i); -+ if (!engine_irq) { -+ dev_err_once(dev,"%s: %s irq_of_parse_and_map failed\n",__FILE__,obj_name); -+ goto err_ioremap; -+ } -+ -+ irq_name = eng_names[i]; -+ ret = request_irq(engine_irq, engine[i].handler,IRQF_TRIGGER_HIGH | IRQF_ONESHOT, irq_name, NULL); -+ if (ret) { -+ dev_err_once(dev,"failed to request %s IRQ\n",obj_name); -+ goto err_ioremap; -+ } -+ -+ } -+ -+#ifdef CONFIG_SPACEMIT_CRYPTO_DEBUG -+ { -+ int j; -+ int sram_index = 0; -+ for (i = 0; i < SRAM_NUM; i++) { -+ for (j = 0; j < SRAM_NUM; j++) { -+ sprintf(obj_name,"spacemit-sub%d-sram%d",i,j); -+ ret = of_property_read_u32_array(np, obj_name, &addr_range[0], 2); -+ if(0 != ret){ -+ dev_err_once(dev, "can't get %s from dts!\n", obj_name); -+ return -ENOMEM; -+ } -+ -+ sram_reserved[sram_index].sram_start = addr_range[0]; -+ sram_reserved[sram_index].sram_end = addr_range[1]; -+ sram_reserved[sram_index].sram_size = addr_range[1] - addr_range[0]; -+ dev_dbg(dev, "sram_%d : 0x%lx 0x%lx 0x%lx\n", sram_index,sram_reserved[sram_index].sram_start, -+ sram_reserved[sram_index].sram_end,sram_reserved[sram_index].sram_size); -+ sram_index ++; -+ } -+ } -+ } -+#endif -+ -+ ciu_base = spacemit_syscon_regmap_lookup_by_compatible("spacemit,ciu"); -+ if (IS_ERR(ciu_base)) -+ { -+ dev_err_once(dev,"ciu_base has not mapped. \n"); -+ goto err_ioremap; -+ } -+ -+ regmap_update_bits(ciu_base, ENGINE_DMA_ADDR_HIGH_OFFSET, -+ (SW_RESETN | MASTER_CLK_EN | SLAVE_CLK_EN), -+ (SW_RESETN | MASTER_CLK_EN | SLAVE_CLK_EN)); -+ -+ ret = sysfs_create_group(&dev->kobj, &engine_operations); -+ if (ret) { -+ dev_err_once(dev,"sysfs_create_group failed\n"); -+ return ret; -+ } -+ -+ -+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); -+ if (ret) { -+ dev_err(dev, "Unable to set dma mask\n"); -+ return ret; -+ } -+ -+#ifdef CONFIG_SPACEMIT_CRYPTO_SELF_TEST -+ ce_aes_test(num_engines); -+#endif -+ return 0; -+ -+err_ioremap: -+ return -EINVAL; -+} -+ -+static int crypto_engine_remove(struct platform_device *pdev) -+{ -+ struct aes_clk_reset_ctrl *ctrl = dev_get_drvdata(&pdev->dev); -+ dma_free_noncoherent(dev, SPACEMIT_AES_BUFFER_LEN, in_buffer, dma_addr_in, DMA_TO_DEVICE); -+ dma_free_noncoherent(dev, SPACEMIT_AES_BUFFER_LEN, out_buffer, dma_addr_out, DMA_FROM_DEVICE); -+ clk_disable_unprepare(ctrl->clk); -+ reset_control_assert(ctrl->reset); -+ return 0; -+} -+ -+ -+static struct of_device_id crypto_engine_of_match[] = { -+ { .compatible = "spacemit,crypto_engine", }, -+ {} -+}; -+ -+#ifdef CONFIG_PM_SLEEP -+static int spacemit_aes_suspend_noirq(struct device *dev) -+{ -+ struct aes_clk_reset_ctrl *ctrl = dev_get_drvdata(dev); -+ -+ clk_disable_unprepare(ctrl->clk); -+ -+ return 0; -+} -+ -+static int spacemit_aes_resume_noirq(struct device *dev) -+{ -+ struct aes_clk_reset_ctrl *ctrl = dev_get_drvdata(dev); -+ -+ clk_prepare_enable(ctrl->clk); -+ -+ return 0; -+} -+ -+static const struct dev_pm_ops spacemit_aes_pm_qos = { -+ .suspend_noirq = spacemit_aes_suspend_noirq, -+ .resume_noirq = spacemit_aes_resume_noirq, -+}; -+#endif -+ -+static struct platform_driver crypto_engine_driver = { -+ .driver = { -+ .name = "crypto_engine", -+ .owner = THIS_MODULE, -+#ifdef CONFIG_PM_SLEEP -+ .pm = &spacemit_aes_pm_qos, -+#endif -+ .of_match_table = crypto_engine_of_match, -+ }, -+ .probe = crypto_engine_probe, -+ .remove = crypto_engine_remove, -+ -+}; -+ -+static int crypto_engine_init(void) -+{ -+ return platform_driver_register(&crypto_engine_driver); -+} -+ -+static void crypto_engine_exit(void) -+{ -+ platform_driver_unregister(&crypto_engine_driver); -+} -+ -+ -+module_init(crypto_engine_init); -+module_exit(crypto_engine_exit); -+ -+MODULE_LICENSE("GPL v2"); -diff --git a/drivers/crypto/spacemit/spacemit_engine.h b/drivers/crypto/spacemit/spacemit_engine.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/crypto/spacemit/spacemit_engine.h -@@ -0,0 +1,321 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * CE engine for spacemit -+ * -+ * Copyright (C) 2023 Spacemit -+ */ -+#include -+#include -+ -+#ifndef SPAECMIT_SECENG_H -+#define SPACEMIT_SECENG_H -+ -+#define SPACEMIT_AES_BUFFER_LEN 1024 * 256 -+ -+#define WORK_BUF_SIZE 2048 -+#define CTR_COUNTER_LITTLE_ENDIAN 0x0000 -+#define CTR_COUNTER_BIG_ENDIAN 0x1000 -+#define BYTES_TO_BITS 8 -+#define SPACEMIT_SECENG_SIZE 0x3000 -+ -+#define ENGINE_DMA_ADDR_HIGH_OFFSET 0x14c -+#define SW_RESETN BIT(0) -+#define MASTER_CLK_EN BIT(1) -+#define SLAVE_CLK_EN BIT(2) -+#define WADDR_BIT32 BIT(4) -+#define RADDR_BIT32 BIT(5) -+ -+#define CE_BIU_REG_OFFSET 0x00000000L -+#define CE_ADEC_REG_OFFSET 0x00000400L -+#define CE_DMA_REG_OFFSET 0x00000800L -+#define CE_ABUS_REG_OFFSET 0x00000C00L -+#define CE_CRYPTO_REG_OFFSET 0x00001000L -+#define CE_HASH_REG_OFFSET 0x00001800L -+ -+#define CE_ADEC_CTRL 0x0000 -+#define CE_ADEC_CTRL2 0x0004 -+#define CE_AXI_SL_CTRL 0x0008 -+#define CE_ADEC_INT 0x000C -+#define CE_ADEC_INT_MSK 0x0010 -+#define CE_ADEC_ACC_ERR_ADR 0x0014 -+#define CE_ADEC_MP_FIFO_ERR_ADR 0x0018 -+ -+#define CE_ABUS_BUS_CTRL 0x0000 -+ -+#define SP_HST_INTERRUPT_MASK 0x0cc -+#define SP_INTERRUPT_RST 0x218 -+#define SP_INTERRUPT_MASK 0x21c -+#define SP_CONTROL 0x220 -+ -+#define CE_HASH_CONFIG_REG 0x0000 -+#define CE_HASH_CONTROL_REG 0x0004 -+#define CE_HASH_COMMAND_REG 0x0008 -+#define CE_HASH_STATUS_REG 0x000C -+#define CE_HASH_INCOME_SEG_SZ_REG 0x0010 -+#define CE_HASH_TOTAL_MSG_SZ_L_REG 0x0018 -+#define CE_HASH_TOTAL_MSG_SZ_H_REG 0x001C -+#define CE_HASH_DIGEST_BASE 0x0020 -+#define CE_HASH_DIGEST_REG(a) \ -+ (CE_HASH_DIGEST_BASE + (a << 2)) -+#define CE_HASH_DIGEST_H_BASE 0x0040 -+#define CE_HASH_DIGEST_H_REG(a) \ -+ (CE_HASH_DIGEST_H_BASE + (a << 2)) -+#define CE_HASH_CONTEXTO_BASE 0x0064 -+#define CE_HASH_CONTEXTO_REG(a) \ -+ (CE_HASH_CONTEXTO_BASE + (a << 2)) -+#define CE_HASH_CONTEXTO_H_BASE 0x0080 -+#define CE_HASH_CONTEXTO_H_REG(a) \ -+ (CE_HASH_CONTEXTO_H_BASE + (a << 2)) -+#define CE_HASH_KEY_BASE 0x00A4 -+#define CE_HASH_KEY_REG(a) \ -+ (CE_HASH_KEY_BASE + (a << 2)) -+ -+#define CE_DMA_IN_CTRL 0x0000 -+#define CE_DMA_IN_STATUS 0x0004 -+#define CE_DMA_IN_SRC_ADR 0x0008 -+#define CE_DMA_IN_XFER_CNTR 0x000C -+#define CE_DMA_IN_NX_LL_ADR 0x0010 -+#define CE_DMA_IN_INT 0x0014 -+#define CE_DMA_IN_INT_MASK 0x0018 -+#define CE_DMA_OUT_CTRL 0x001C -+#define CE_DMA_OUT_STATUS 0x0020 -+#define CE_DMA_OUT_DEST_ADR 0x0024 -+#define CE_DMA_OUT_XFER_CNTR 0x0028 -+#define CE_DMA_OUT_NX_LL_ADR 0x002C -+#define CE_DMA_OUT_INT 0x0030 -+#define CE_DMA_OUT_INT_MASK 0x0034 -+#define CE_DMA_AXI_CTRL 0x0038 -+#define CE_DMA_IF_RCOUNT 0x003C -+#define CE_DMA_IF_RD_PTR_ERR 0x0040 -+#define CE_DMA_OF_SPACE 0x0044 -+#define CE_DMA_OF_RD_PTR_ERR 0x0048 -+#define CE_DMA_IF_RAM_BASE 0x0100 -+#define CE_DMA_IF_RAM_REG(a) \ -+ (CE_DMA_IF_RAM_BASE + a*0x4) -+#define CE_DMA_OF_RAM_BASE 0x0300 -+#define CE_DMA_OF_RAM_REG(a) \ -+ (CE_DMA_OF_RAM_BASE + a*0x4) -+ -+#define CE_BIU_HST_INTERRUPT_MASK 0x00CC -+#define CE_BIU_SP_INTERRUPT_MASK 0x021C -+#define CE_BIU_SP_CONTROL 0x0220 -+ -+#define CE_CRYPTO_AES_CONFIG_REG 0x0000 -+#define CE_CRYPTO_AES_CONTROL_REG 0x0004 -+#define CE_CRYPTO_AES_COMMAND_REG 0x0008 -+#define CE_CRYPTO_AES_STATUS_REG 0x000C -+#define CE_CRYPTO_AES_INTRPT_SRC_REG 0x0010 -+#define CE_CRYPTO_AES_INTRPT_SRC_EN_REG 0x0014 -+#define CE_CRYPTO_AES_STREAM_SIZE_REG 0x0018 -+#define CE_CRYPTO_ENGINE_SEL_REG 0x00A8 -+ -+#define CE_CRYPTO_K2_BASE 0x0058 -+#define CE_CRYPTO_K2_W_REG(a) \ -+ (CE_CRYPTO_K2_BASE + a*0x4) -+#define CE_CRYPTO_K1_BASE 0x0078 -+#define CE_CRYPTO_K1_W_REG(a) \ -+ (CE_CRYPTO_K1_BASE + a*0x4) -+#define CE_CRYPTO_IV_BASE 0x0098 -+#define CE_CRYPTO_IV_REG(a) \ -+ (CE_CRYPTO_IV_BASE + a*0x4) -+ -+#define BIT0 1<<0 -+#define BIT1 1<<1 -+#define BIT2 1<<2 -+#define BIT3 1<<3 -+#define BIT4 1<<4 -+#define BIT5 1<<5 -+ -+#define AES_INTERRUPT_FLAG BIT0 -+#define AES_ERR1_INTERRUPT_FLAG BIT1 -+#define AES_ERR2_INTERRUPT_FLAG BIT2 -+#define AES_INTERRUPT_MASK (AES_INTERRUPT_FLAG | AES_ERR1_INTERRUPT_FLAG | AES_ERR2_INTERRUPT_FLAG) -+ -+#define BIT_DMA_INOUT_DONE BIT0 -+#define BIT_DMA_INOUT_BUS_ERR BIT1 -+#define BIT_DMA_INOUT_LL_ERR BIT2 -+#define BIT_DMA_INOUT_PAR_ERR BIT3 -+#define BIT_DMA_INOUT_PAUSE_CMPL_ERR BIT4 -+#define BIT_DMA_INOUT_DATA_PAR_ERR BIT5 -+#define DMA_INTERRUPT_MASK (BIT_DMA_INOUT_DONE | BIT_DMA_INOUT_BUS_ERR | BIT_DMA_INOUT_LL_ERR \ -+ | BIT_DMA_INOUT_PAR_ERR | BIT_DMA_INOUT_PAUSE_CMPL_ERR | BIT_DMA_INOUT_DATA_PAR_ERR) -+ -+#define BIU_MASK BIT0 -+#define ADEC_MASK (BIT1 | BIT5) -+ -+typedef enum { -+ /* reset bit */ -+ E_ACC_ENG_DMA = 1, -+ E_ACC_ENG_HASH = 5, -+ E_ACC_ENG_CRYPTO = 3, -+ E_ACC_ENG_ALL, -+} ADEC_ACC_ENG_T; -+ -+typedef enum { -+ E_ABUS_GRP_A_HASH = 0x0, -+} ABUS_GRP_A_T; -+ -+typedef enum { -+ E_ABUS_GRP_B_AES = 0x0, -+ E_ABUS_GRP_B_BYPASS = 0x2, -+} ABUS_GRP_B_T; -+ -+typedef enum { -+ E_ABUS_STRAIGHT = 0, -+ E_ABUS_CROSS, -+} ABUS_CROSS_BAR_T; -+ -+typedef enum { -+ E_HASH_INIT = 0x1, -+ E_HASH_UPDATE = 0x2, -+ E_HASH_FINAL = 0x3, -+} HASH_OP_MODE_T; -+ -+typedef enum { -+ E_HASH_LEN_SHA1 = 20, -+ E_HASH_LEN_SHA256 = 32, -+ E_HASH_LEN_SHA224 = 28, -+ E_HASH_LEN_MD5 = 16, -+ E_HASH_LEN_SHA512 = 64, -+ E_HASH_LEN_SHA384 = 48, -+} HASH_LEN_T; -+ -+typedef enum { -+ E_HASH_SIMPLE = 0, -+ E_HASH_HMAC, -+} HASH_MODE_T; -+ -+typedef enum { -+ E_HASH_SHA1 = 0x0, -+ E_HASH_SHA256 = 0x1, -+ E_HASH_SHA224 = 0x2, -+ E_HASH_MD5 = 0x3, -+ E_HASH_SHA512 = 0x4, -+ E_HASH_SHA384 = 0x5, -+} HASH_ALGO_T; -+ -+typedef struct { -+ uint32_t addr; -+ uint32_t size; -+ uint32_t next_desc; -+ uint32_t reserved; -+} DMA_DESC_T; -+ -+typedef enum { -+ E_AES_128 = 128/8, -+ E_AES_192 = 192/8, -+ E_AES_256 = 256/8, -+} AES_KEY_LEN_T; -+ -+typedef enum { -+ E_AES_ECB = 0, -+ E_AES_CBC, -+ E_AES_CTR, -+ E_AES_XTS, -+} AES_MODE_T; -+ -+typedef enum { -+ E_AES_DECRYPT = 0, -+ E_AES_ENCRYPT, -+} AES_OP_MODE_T; -+ -+typedef enum { -+ E_ENG_AES = 0, -+} CRYPTO_ENG_SEL_T; -+ -+ -+struct rijndael_key { -+ uint32_t eK[60], dK[60]; -+ int Nr; -+}; -+ -+typedef union Symmetric_key { -+ struct rijndael_key rijndael; -+ void *data; -+} symmetric_key; -+ -+struct md5_state { -+ uint64_t length; -+ uint32_t state[4], curlen; -+ unsigned char buf[64]; -+}; -+struct sha512_state { -+ uint64_t length, state[8]; -+ unsigned long curlen; -+ unsigned char buf[128]; -+}; -+struct sha256_state { -+ uint64_t length; -+ uint32_t state[8], curlen; -+ unsigned char buf[64]; -+}; -+struct sha1_state { -+ uint64_t length; -+ uint32_t state[5], curlen; -+ unsigned char buf[64]; -+}; -+ -+typedef union Hash_state { -+ char dummy[1]; -+ -+ struct sha512_state sha512; -+ struct sha256_state sha256; -+ struct sha1_state sha1; -+ struct md5_state md5; -+ void *data; -+} hash_state; -+ -+enum engine_index{ -+ ENGINE_1, -+ ENGINE_2, -+ ENGINE_MAX, -+}; -+enum crypto_aes_status{ -+ AES_INVALID, -+ AES_DONE, -+ AES_ERROR -+}; -+enum crypto_dma_status{ -+ DMA_INVALID, -+ DMA_INOUT_DONE, -+ DMA_INOUT_ERROR -+}; -+ -+enum engine_ddr_type{ -+ NORMAL_DDR, -+ RESERVED_DDR, -+ RESERVED_SRAM -+}; -+ -+struct engine_info{ -+ unsigned long engine_base; -+ struct completion aes_done; -+ struct completion dma_output_done; -+ struct completion dma_input_done; -+ struct mutex eng_mutex; -+ enum crypto_aes_status aes_status; -+ enum crypto_dma_status dma_in_status; -+ enum crypto_dma_status dma_out_status; -+ enum engine_ddr_type ddr_type; -+ irqreturn_t (*handler)(int irq, void *nouse); -+ unsigned char internal_working_buffer[WORK_BUF_SIZE + WORK_BUF_SIZE] __attribute__ ((aligned(32))); -+}; -+ -+#ifdef SPACEMIT_CRYPTO_DEBUG -+#define SRAM_NUM 2 -+#define SUBSYS_MAX 2 -+#define SRAM_MAP_SIZE 0x1000 -+struct sram_area{ -+ unsigned long sram_start; -+ unsigned long sram_end; -+ unsigned long sram_size; -+}; -+static struct sram_area sram_reserved[SRAM_NUM * SUBSYS_MAX]; -+static unsigned long sram_phy_base_src,sram_phy_base_dst; -+#endif -+struct aes_clk_reset_ctrl { -+ struct clk *clk; -+ struct reset_control *reset; -+}; -+ -+#endif --- -Armbian - diff --git a/patch/kernel/archive/spacemit-6.1/011-drivers-dma.patch b/patch/kernel/archive/spacemit-6.1/011-drivers-dma.patch deleted file mode 100644 index ddcc6853cc13..000000000000 --- a/patch/kernel/archive/spacemit-6.1/011-drivers-dma.patch +++ /dev/null @@ -1,3826 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - drivers/dma/Kconfig | 35 +- - drivers/dma/Makefile | 3 + - drivers/dma/adma-spacemit.c | 696 ++++++++++ - drivers/dma/bcm2835-dma.c | 4 +- - drivers/dma/dma-axi-dmac.c | 4 +- - 5 files changed, 735 insertions(+), 7 deletions(-) - -diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/dma/Kconfig -+++ b/drivers/dma/Kconfig -@@ -416,6 +416,12 @@ config MILBEAUT_XDMAC - Say yes here to support the Socionext Milbeaut - XDMAC device. - -+menuconfig MMP_PDMA_DRIVER -+ bool "MMP_PDMA driver" -+ help -+ choice mmp_pdma driver -+ -+if MMP_PDMA_DRIVER - config MMP_PDMA - tristate "MMP PDMA support" - depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST -@@ -423,6 +429,27 @@ config MMP_PDMA - help - Support the MMP PDMA engine for PXA and MMP platform. - -+config MMP_PDMA_SPACEMIT_K1X -+ bool "Spacemit mmp_pdma support" -+ depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST || SOC_SPACEMIT_K1X -+ select DMA_ENGINE -+ help -+ Support the MMP PDMA engine for Spacemit-k1x platform. -+endif -+ -+config SPACEMIT_PDMA_SUPPORT_64BIT -+ bool "MMP PDMA support the 64-bit address" -+ default y -+ help -+ Support 64-bit address in the MMP PDMA -+ -+config ADMA_SPACEMIT_K1X -+ bool "Spacemit adma support" -+ depends on SOC_SPACEMIT_K1X && RPMSG_VIRTIO -+ select DMA_ENGINE -+ help -+ Support the AMDA engine for Spacemit-k1x sspa. -+ - config MMP_TDMA - tristate "MMP Two-Channel DMA support" - depends on ARCH_MMP || COMPILE_TEST -@@ -439,7 +466,7 @@ config MOXART_DMA - select DMA_VIRTUAL_CHANNELS - help - Enable support for the MOXA ART SoC DMA controller. -- -+ - Say Y here if you enabled MMP ADMA, otherwise say N. - - config MPC512X_DMA -@@ -755,6 +782,12 @@ config XILINX_ZYNQMP_DPDMA - driver provides the dmaengine required by the DisplayPort subsystem - display driver. - -+config USERSPACE_DMA -+ bool "userspace dma driver support" -+ depends on DMA_ENGINE -+ help -+ Support dma operation in userspace -+ - # driver files - source "drivers/dma/bestcomm/Kconfig" - -diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/dma/Makefile -+++ b/drivers/dma/Makefile -@@ -50,6 +50,8 @@ obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o - obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o - obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o - obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o -+obj-$(CONFIG_MMP_PDMA_SPACEMIT_K1X) += mmp_pdma_k1x.o -+obj-$(CONFIG_ADMA_SPACEMIT_K1X) += adma-spacemit.o - obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o - obj-$(CONFIG_MOXART_DMA) += moxart-dma.o - obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o -@@ -81,6 +83,7 @@ obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o - obj-$(CONFIG_UNIPHIER_XDMAC) += uniphier-xdmac.o - obj-$(CONFIG_XGENE_DMA) += xgene-dma.o - obj-$(CONFIG_ST_FDMA) += st_fdma.o -+obj-$(CONFIG_USERSPACE_DMA) += udma.o - obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/ - obj-$(CONFIG_INTEL_LDMA) += lgm/ - -diff --git a/drivers/dma/adma-spacemit.c b/drivers/dma/adma-spacemit.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/dma/adma-spacemit.c -@@ -0,0 +1,696 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Copyright 2024 Spacemit K1x Adma Driver -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "dmaengine.h" -+ -+#include -+#include -+#include -+#include -+ -+#define BCR 0x0 -+#define SAR 0x10 -+#define DAR 0x20 -+#define NDR 0x30 -+#define DCR 0x40 -+#define IER 0x80 -+#define ADMA_SAMPLE_BITS_MASK (0x7 << 22) -+#define ADMA_SAMPLE_BITS(x) (((x) << 22) & ADMA_SAMPLE_BITS_MASK) -+#define ADMA_CH_ABORT (1 << 20) -+#define ADMA_CLOSE_DESC_EN (1 << 17) -+#define ADMA_UNPACK_SAMPLES (1 << 16) -+#define ADMA_CH_ACTIVE (1 << 14) -+#define ADMA_FETCH_NEXT_DESC (1 << 13) -+#define ADMA_CH_EN (1 << 12) -+#define ADMA_INTRRUPT_MODE (1 << 10) -+ -+#define ADMA_BURST_LIMIT_MASK (0x7 << 6) -+#define ADMA_BURST_LIMIT(x) (((x) << 6) & ADMA_BURST_LIMIT_MASK) -+ -+#define ADMA_DEST_ADDR_DIR_MASK (0x3 << 4) -+#define ADMA_DEST_ADDR_INCREMENT (0x0 << 4) -+#define ADMA_DEST_ADDR_DECREMENT (0x1 << 4) -+#define ADMA_DEST_ADDR_HOLD (0x2 << 4) -+ -+#define ADMA_SRC_ADDR_DIR_MASK (0x3 << 2) -+#define ADMA_SRC_ADDR_INCREMENT (0x0 << 2) -+#define ADMA_SRC_ADDR_DECREMENT (0x1 << 2) -+#define ADMA_SRC_ADDR_HOLD (0x2 << 2) -+ -+/* current descriptor register */ -+#define ADMA_CH_CUR_DESC_REG 0x70 -+ -+/* interrupt mask register */ -+#define ADMA_CH_INTR_MASK_REG 0x80 -+#define ADMA_FINISH_INTR_EN (0x1 << 0) -+ -+/* interrupt status register */ -+#define ADMA_CH_INTR_STATUS_REG 0xa0 -+#define ADMA_FINISH_INTR_DONE (0x1 << 0) -+ -+#define HDMI_ADMA 0x50 -+#define HDMI_ENABLE (1 << 0) -+#define HDMI_DISABLE (0 << 0) -+ -+#define DESC_BUF_BASE 0xc08d0000 -+#define DESC_BUF_SIZE 0x400 -+ -+#define tx_to_adma_desc(tx) \ -+ container_of(tx, struct adma_desc_sw, async_tx) -+#define to_adma_chan(dchan) \ -+ container_of(dchan, struct adma_ch, chan) -+#define to_adma_dev(dmadev) \ -+ container_of(dmadev, struct adma_dev, device) -+ -+#define STARTUP_MSG "startup" -+#define STARTUP_OK_MSG "startup-ok" -+//#define DESC_BUFFER_ADDR -+ -+enum { -+ AUDIO_SAMPLE_WORD_8BITS = 0x0, -+ AUDIO_SAMPLE_WORD_12BITS, -+ AUDIO_SAMPLE_WORD_16BITS, -+ AUDIO_SAMPLE_WORD_20BITS, -+ AUDIO_SAMPLE_WORD_24BITS, -+ AUDIO_SAMPLE_WORD_32BITS, -+}; -+ -+struct adma_desc_hw { -+ u32 byte_cnt; -+ u32 src_addr; -+ u32 dst_addr; -+ u32 nxt_desc; -+}; -+ -+struct adma_desc_sw { -+ struct adma_desc_hw desc; -+ struct list_head node; -+ struct list_head tx_list; -+ struct dma_async_tx_descriptor async_tx; -+}; -+ -+struct adma_pchan; -+ -+struct adma_ch { -+ struct device *dev; -+ struct dma_chan chan; -+ struct dma_async_tx_descriptor desc; -+ struct adma_pchan *phy; -+ struct dma_slave_config slave_config; -+ enum dma_transfer_direction dir; -+ struct adma_desc_sw *cyclic_first; -+ bool unpack_sample; -+ -+ struct tasklet_struct tasklet; -+ u32 dev_addr; -+ -+ spinlock_t desc_lock; -+ struct list_head chain_pending; -+ struct list_head chain_running; -+ enum dma_status status; -+ -+ struct gen_pool *desc_pool; -+}; -+ -+struct adma_pchan { -+ void __iomem *base; -+ void __iomem *ctrl_base; -+ struct adma_ch *vchan; -+}; -+ -+struct adma_dev { -+ int max_burst_size; -+ void __iomem *base; -+ void __iomem *ctrl_base; -+ void __iomem *desc_base; -+ struct dma_device device; -+ struct device *dev; -+ spinlock_t phy_lock; -+}; -+ -+static unsigned long long private_data[2]; -+ -+struct instance_data { -+ struct rpmsg_device *rpdev; -+ struct adma_ch *achan; -+}; -+ -+static void adma_ch_write_reg(struct adma_pchan *phy, u32 reg_offset, u32 value) -+{ -+ writel(value, phy->base + reg_offset); -+} -+ -+static u32 adma_ch_read_reg(struct adma_pchan *phy, u32 reg_offset) -+{ -+ u32 val; -+ return val = readl(phy->base + reg_offset); -+} -+ -+/*define adma-controller driver*/ -+static dma_cookie_t adma_tx_submit(struct dma_async_tx_descriptor *tx) -+{ -+ struct adma_ch *achan = to_adma_chan(tx->chan); -+ struct adma_desc_sw *desc = tx_to_adma_desc(tx); -+ struct adma_desc_sw *child; -+ unsigned long flags; -+ dma_cookie_t cookie = -EBUSY; -+ -+ spin_lock_irqsave(&achan->desc_lock, flags); -+ list_for_each_entry(child, &desc->tx_list, node) { -+ cookie = dma_cookie_assign(&child->async_tx); -+ } -+ -+ list_splice_tail_init(&desc->tx_list, &achan->chain_pending); -+ spin_unlock_irqrestore(&achan->desc_lock, flags); -+ -+ return cookie; -+} -+ -+static int adma_alloc_chan_resources(struct dma_chan *dchan) -+{ -+ struct adma_ch *achan = to_adma_chan(dchan); -+ struct adma_dev *adev = to_adma_dev(achan->chan.device); -+ if(achan->desc_pool) -+ return 1; -+ achan->desc_pool = gen_pool_create(7, -1); -+ if (!achan->desc_pool) { -+ pr_err("unable to allocate descriptor pool\n"); -+ return -ENOMEM; -+ } -+ if(gen_pool_add_virt(achan->desc_pool, (long)adev->desc_base, DESC_BUF_BASE, -+ DESC_BUF_SIZE, -1) != 0) { -+ pr_err("gen_pool_add mem error!\n"); -+ gen_pool_destroy(achan->desc_pool); -+ return -ENOMEM; -+ } -+ -+ achan->status = DMA_COMPLETE; -+ achan->dir = 0; -+ achan->dev_addr = 0; -+ return 1; -+} -+ -+static void adma_free_desc_list(struct adma_ch *chan, -+ struct list_head *list) -+{ -+ struct adma_desc_sw *desc, *_desc; -+ -+ list_for_each_entry_safe(desc, _desc, list, node) { -+ list_del(&desc->node); -+ gen_pool_free(chan->desc_pool, (long)desc, sizeof(struct adma_desc_sw)); -+ } -+} -+ -+static void adma_free_chan_resources(struct dma_chan *dchan) -+{ -+ struct adma_ch *achan = to_adma_chan(dchan); -+ struct adma_dev *adev = to_adma_dev(achan->chan.device); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&achan->desc_lock, flags); -+ adma_free_desc_list(achan, &achan->chain_pending); -+ adma_free_desc_list(achan, &achan->chain_running); -+ spin_unlock_irqrestore(&achan->desc_lock, flags); -+ gen_pool_destroy(achan->desc_pool); -+ achan->desc_pool = NULL; -+ achan->status = DMA_COMPLETE; -+ achan->dir = 0; -+ achan->dev_addr = 0; -+ spin_lock_irqsave(&adev->phy_lock, flags); -+ spin_unlock_irqrestore(&adev->phy_lock, flags); -+ return; -+} -+ -+static struct adma_desc_sw *alloc_descriptor(struct adma_ch *achan) -+{ -+ struct adma_desc_sw *desc; -+ dma_addr_t pdesc; -+ -+ desc = (struct adma_desc_sw*)gen_pool_alloc(achan->desc_pool, sizeof(struct adma_desc_sw)); -+ if (!desc) { -+ dev_err(achan->dev, "out of memory for link descriptor\n"); -+ return NULL; -+ } -+ memset(desc, 0, sizeof(struct adma_desc_sw)); -+ pdesc = (dma_addr_t)gen_pool_virt_to_phys(achan->desc_pool, (long)desc); -+ -+ INIT_LIST_HEAD(&desc->tx_list); -+ dma_async_tx_descriptor_init(&desc->async_tx, &achan->chan); -+ desc->async_tx.tx_submit = adma_tx_submit; -+ desc->async_tx.phys = pdesc; -+ return desc; -+} -+ -+static struct dma_async_tx_descriptor * -+adma_prep_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr, -+ size_t len, size_t period_len, -+ enum dma_transfer_direction direction, -+ unsigned long flags) -+{ -+ struct adma_ch *achan; -+ struct adma_desc_sw *first = NULL, *prev = NULL, *new; -+ dma_addr_t adma_src, adma_dst; -+ -+ achan = to_adma_chan(dchan); -+ -+ switch(direction) { -+ case DMA_MEM_TO_DEV: -+ adma_src = buf_addr & 0xffffffff; -+ achan->dev_addr = achan->slave_config.dst_addr; -+ adma_dst = achan->dev_addr; -+ break; -+ case DMA_DEV_TO_MEM: -+ adma_dst = buf_addr & 0xffffffff; -+ achan->dev_addr = achan->slave_config.src_addr; -+ adma_src = achan->dev_addr; -+ break; -+ default: -+ dev_err(achan->dev, "Unsupported direction for cyclic DMA\n"); -+ return NULL; -+ } -+ achan->dir = direction; -+ do { -+ new = alloc_descriptor(achan); -+ if(!new) { -+ dev_err(achan->dev, "no memory for desc\n"); -+ -+ } -+ new->desc.byte_cnt = period_len; -+ new->desc.src_addr = adma_src; -+ new->desc.dst_addr = adma_dst; -+ if(!first) -+ first = new; -+ else -+ prev->desc.nxt_desc = new->async_tx.phys; -+ new->async_tx.cookie = 0; -+ prev = new; -+ len -= period_len; -+ -+ if(achan->dir == DMA_MEM_TO_DEV) -+ adma_src += period_len; -+ else -+ adma_dst += period_len; -+ list_add_tail(&new->node, &first->tx_list); -+ }while(len); -+ -+ first->async_tx.flags = flags; -+ first->async_tx.cookie = -EBUSY; -+ new->desc.nxt_desc = first->async_tx.phys; -+ achan->cyclic_first = first; -+ return &first->async_tx; -+} -+ -+static int adma_config(struct dma_chan *dchan, -+ struct dma_slave_config *cfg) -+{ -+ struct adma_ch *achan = to_adma_chan(dchan); -+ -+ memcpy(&achan->slave_config, cfg, sizeof(*cfg)); -+ return 0; -+} -+ -+static void set_desc(struct adma_pchan *phy, dma_addr_t addr) -+{ -+ adma_ch_write_reg(phy, NDR, addr); -+} -+ -+static void set_ctrl_reg(struct adma_pchan *phy) -+{ -+ u32 ctrl_reg_val; -+ u32 maxburst = 0, sample_bits = 0; -+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; -+ struct adma_ch *achan = phy->vchan; -+ -+ if(achan->dir == DMA_MEM_TO_DEV) { -+ maxburst = achan->slave_config.dst_maxburst; -+ width = achan->slave_config.dst_addr_width; -+ ctrl_reg_val |= ADMA_DEST_ADDR_HOLD | ADMA_SRC_ADDR_INCREMENT; -+ } -+ else if(achan->dir == DMA_DEV_TO_MEM) { -+ maxburst = achan->slave_config.src_maxburst; -+ width = achan->slave_config.src_addr_width; -+ ctrl_reg_val |= ADMA_SRC_ADDR_HOLD | ADMA_DEST_ADDR_INCREMENT; -+ } -+ else -+ ctrl_reg_val |= ADMA_SRC_ADDR_HOLD | ADMA_DEST_ADDR_HOLD; -+ -+ if(width == DMA_SLAVE_BUSWIDTH_1_BYTE) -+ sample_bits = AUDIO_SAMPLE_WORD_8BITS; -+ else if(width == DMA_SLAVE_BUSWIDTH_2_BYTES) -+ sample_bits = AUDIO_SAMPLE_WORD_16BITS; -+ else if(width == DMA_SLAVE_BUSWIDTH_3_BYTES) -+ sample_bits = AUDIO_SAMPLE_WORD_24BITS; -+ else if(width == DMA_SLAVE_BUSWIDTH_4_BYTES) -+ sample_bits = AUDIO_SAMPLE_WORD_32BITS; -+ ctrl_reg_val |= ADMA_SAMPLE_BITS(sample_bits); -+ -+ /*no burst function information,default 0*/ -+ ctrl_reg_val |= ADMA_BURST_LIMIT(0); -+ ctrl_reg_val |= ADMA_CH_ABORT; -+ if(achan->unpack_sample) -+ ctrl_reg_val |= ADMA_UNPACK_SAMPLES; -+ adma_ch_write_reg(phy, DCR, ctrl_reg_val); -+ if(!achan->unpack_sample) -+ writel(HDMI_ENABLE, phy->ctrl_base); -+} -+ -+static void enable_chan(struct adma_pchan *phy) -+{ -+ u32 ctrl_val; -+ struct adma_ch *achan = phy->vchan; -+ -+ if(achan->dir == DMA_MEM_TO_DEV) -+ adma_ch_write_reg(phy, DAR, achan->dev_addr); -+ else if(achan->dir == DMA_DEV_TO_MEM) -+ adma_ch_write_reg(phy, SAR, achan->dev_addr); -+ adma_ch_write_reg(phy, IER, 1); -+ ctrl_val = adma_ch_read_reg(phy, DCR); -+ ctrl_val |= ADMA_FETCH_NEXT_DESC; -+ ctrl_val |= ADMA_CH_EN; -+ adma_ch_write_reg(phy, DCR, ctrl_val); -+} -+ -+static void start_pending_queue(struct adma_ch *achan) -+{ -+ struct adma_dev *adev = to_adma_dev(achan->chan.device); -+ struct adma_pchan *phy; -+ struct adma_desc_sw *desc; -+ unsigned long flags; -+ -+ if(achan->status == DMA_IN_PROGRESS) { -+ dev_dbg(achan->dev, "DMA controller still busy\n"); -+ return; -+ } -+ spin_lock_irqsave(&adev->phy_lock, flags); -+ phy = achan->phy; -+ desc = list_first_entry(&achan->chain_pending, -+ struct adma_desc_sw, node); -+ list_splice_tail_init(&achan->chain_pending, &achan->chain_running); -+ set_desc(phy, desc->async_tx.phys); -+ set_ctrl_reg(phy); -+ enable_chan(phy); -+ spin_unlock_irqrestore(&adev->phy_lock, flags); -+ achan->status = DMA_IN_PROGRESS; -+} -+ -+static void adma_issue_pending(struct dma_chan *dchan) -+{ -+ struct adma_ch *achan = to_adma_chan(dchan); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&achan->desc_lock, flags); -+ start_pending_queue(achan); -+ spin_unlock_irqrestore(&achan->desc_lock, flags); -+} -+ -+static enum dma_status adma_tx_status(struct dma_chan *dchan, -+ dma_cookie_t cookie, -+ struct dma_tx_state *txstate) -+{ -+ /*struct adma_ch *chan = to_adma_chan(dchan); -+ enum dma_status ret; -+ unsigned long flags; -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ ret = dma_cookie_status(dchan, cookie, txstate); -+ if (likely(ret != DMA_ERROR)) -+ dma_set_residue(txstate, mmp_pdma_residue(chan, cookie)); -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ if (ret == DMA_COMPLETE) -+ return ret; -+ else -+ return chan->status;*/ -+ return 0; -+} -+ -+static void disable_chan(struct adma_pchan *phy) -+{ -+ u32 reg_val = adma_ch_read_reg(phy,DCR); -+ reg_val |= ADMA_CH_ABORT; -+ adma_ch_write_reg(phy, DCR, reg_val); -+ -+ udelay(500); -+ reg_val = adma_ch_read_reg(phy, DCR); -+ reg_val &= ~ADMA_CH_EN; -+ adma_ch_write_reg(phy, DCR, reg_val); -+ adma_ch_write_reg(phy, IER, 0); -+ if((!phy->vchan->unpack_sample) && ((readl(phy->ctrl_base) & HDMI_ENABLE) == 0x1)) -+ writel(HDMI_DISABLE, phy->ctrl_base); -+} -+ -+static int adma_terminate_all(struct dma_chan *dchan) -+{ -+ struct adma_ch *achan = to_adma_chan(dchan); -+ struct adma_dev *adev = to_adma_dev(achan->chan.device); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&achan->desc_lock, flags); -+ disable_chan(achan->phy); -+ achan->status = DMA_COMPLETE; -+ spin_lock_irqsave(&adev->phy_lock, flags); -+ spin_unlock_irqrestore(&adev->phy_lock, flags); -+ -+ adma_free_desc_list(achan, &achan->chain_pending); -+ adma_free_desc_list(achan, &achan->chain_running); -+ //achan->bytes_residue = 0; -+ -+ spin_unlock_irqrestore(&achan->desc_lock, flags); -+ return 0; -+} -+ -+static struct dma_chan *adma_dma_xlate(struct of_phandle_args *dma_spec, -+ struct of_dma *ofdma) -+{ -+ struct adma_dev *d = ofdma->of_dma_data; -+ struct dma_chan *chan; -+ -+ chan = dma_get_any_slave_channel(&d->device); -+ if (!chan) -+ return NULL; -+ return chan; -+} -+ -+static const struct of_device_id adma_id_table[] = { -+ { .compatible = "spacemit,k1x-adma", .data =(void *)&private_data[0] }, -+ {}, -+}; -+ -+static int adma_probe(struct platform_device *pdev) -+{ -+ struct adma_dev *adev; -+ struct device *dev; -+ const struct of_device_id *of_id; -+ struct rpmsg_device *rpdev; -+ struct instance_data *idata; -+ struct adma_pchan *phy; -+ struct adma_ch *achan; -+ int ret; -+ const enum dma_slave_buswidth widths = -+ DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | -+ DMA_SLAVE_BUSWIDTH_3_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES; -+ -+ of_id = of_match_device(adma_id_table, &pdev->dev); -+ if (!of_id) { -+ pr_err("Unable to match OF ID\n"); -+ return -ENODEV; -+ } -+ idata = (struct instance_data *)((unsigned long long *)(of_id->data))[0]; -+ rpdev = idata->rpdev; -+ ret = rpmsg_send(rpdev->ept, STARTUP_MSG, strlen(STARTUP_MSG)); -+ if (ret) { -+ dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret); -+ return ret; -+ } -+ -+ /*get controller dts info*/ -+ dev = &pdev->dev; -+ adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL); -+ adev->dev = dev; -+ adev->base = devm_platform_ioremap_resource_byname(pdev, "adma_reg"); -+ if(IS_ERR(adev->base)) -+ return PTR_ERR(adev->base); -+ adev->ctrl_base = devm_platform_ioremap_resource_byname(pdev, "ctrl_reg"); -+ if(IS_ERR(adev->ctrl_base)) -+ return PTR_ERR(adev->ctrl_base); -+ adev->desc_base = devm_platform_ioremap_resource_byname(pdev, "buf_addr"); -+ if(IS_ERR(adev->desc_base)) -+ return PTR_ERR(adev->desc_base); -+ /*if(of_property_read_u32(pdev->dev->of_node, "max-burst-size", &adev->max_burst_size)) -+ adev->max_burst_size = DEFAULT_MAX_BURST_SIZE;*/ -+ -+ /*init adma-chan*/ -+ INIT_LIST_HEAD(&adev->device.channels); -+ achan = devm_kzalloc(dev, sizeof(struct adma_ch), GFP_KERNEL); -+ if(achan == NULL) -+ return -ENOMEM; -+ phy = devm_kzalloc(dev, sizeof(struct adma_pchan), GFP_KERNEL); -+ phy->base = adev->base; -+ phy->ctrl_base = adev->ctrl_base; -+ phy->vchan = achan; -+ achan->phy = phy; -+ achan->dev = adev->dev; -+ achan->chan.device = &adev->device; -+ spin_lock_init(&achan->desc_lock); -+ spin_lock_init(&adev->phy_lock); -+ INIT_LIST_HEAD(&achan->chain_pending); -+ INIT_LIST_HEAD(&achan->chain_running); -+ achan->status = DMA_COMPLETE; -+ achan->unpack_sample = !of_property_read_bool(pdev->dev.of_node, "hdmi-sample"); -+ -+ /* register virt channel to dma engine */ -+ list_add_tail(&achan->chan.device_node, &adev->device.channels); -+ idata->achan = achan; -+ -+ dma_cap_set(DMA_SLAVE, adev->device.cap_mask); -+ dma_cap_set(DMA_CYCLIC, adev->device.cap_mask); -+ adev->device.dev = dev; -+ adev->device.device_tx_status = adma_tx_status; -+ adev->device.device_alloc_chan_resources = adma_alloc_chan_resources; -+ adev->device.device_free_chan_resources = adma_free_chan_resources; -+ adev->device.device_prep_dma_cyclic = adma_prep_cyclic; -+ adev->device.device_issue_pending = adma_issue_pending; -+ adev->device.device_config = adma_config; -+ adev->device.device_terminate_all = adma_terminate_all; -+ adev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; -+ adev->device.src_addr_widths = widths; -+ adev->device.dst_addr_widths = widths; -+ adev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); -+ -+ dma_set_mask(adev->dev, adev->dev->coherent_dma_mask); -+ -+ ret = dma_async_device_register(&adev->device); -+ if(ret) { -+ dev_err(adev->device.dev, "unable to register\n"); -+ return ret; -+ } -+ -+ if(pdev->dev.of_node) { -+ ret = of_dma_controller_register(pdev->dev.of_node, -+ adma_dma_xlate,adev); -+ if(ret < 0){ -+ dev_err(dev, "of_dma_controller_register failed\n"); -+ dma_async_device_unregister(&adev->device); -+ return ret; -+ } -+ } -+ -+ platform_set_drvdata(pdev, adev); -+ return 0; -+} -+ -+static int adma_remove(struct platform_device *pdev) -+{ -+ struct adma_dev *adev = platform_get_drvdata(pdev);; -+ -+ if(pdev->dev.of_node) -+ of_dma_controller_free(pdev->dev.of_node); -+ dma_async_device_unregister(&adev->device); -+ platform_set_drvdata(pdev, NULL); -+ return 0; -+} -+ -+static struct platform_driver adma_driver = { -+ .driver = { -+ .name = "k1x-adma", -+ .of_match_table = adma_id_table, -+ }, -+ .probe = adma_probe, -+ .remove = adma_remove, -+}; -+ -+static struct rpmsg_device_id rpmsg_driver_adma_id_table[] = { -+ { .name = "adma-service", .driver_data = 0 }, -+ { }, -+}; -+MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_adma_id_table); -+ -+static int rpmsg_adma_client_cb(struct rpmsg_device *rpdev, void *data, -+ int len, void *priv, u32 src) -+{ -+ struct instance_data *idata = dev_get_drvdata(&rpdev->dev); -+ struct adma_ch *chan = idata->achan; -+ -+#if 0 -+ if (strcmp(data, STARTUP_OK_MSG) == 0) { -+ dev_info(&rpdev->dev, "channel: 0x%x -> 0x%x startup ok!\n", -+ rpdev->src, rpdev->dst); -+ } -+ -+ if (strcmp(data, "#") == 0) { -+#endif -+ /* adma irq happend */ -+ struct adma_desc_sw *desc; -+ LIST_HEAD(chain_cleanup); -+ unsigned long flags; -+ struct dmaengine_desc_callback cb; -+ -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ if (chan->status == DMA_COMPLETE) { -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ return 0; -+ } -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ desc = chan->cyclic_first; -+ dmaengine_desc_get_callback(&desc->async_tx, &cb); -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ -+ dmaengine_desc_callback_invoke(&cb, NULL); -+// } -+ -+ return 0; -+} -+ -+static int rpmsg_adma_client_probe(struct rpmsg_device *rpdev) -+{ -+ struct instance_data *idata; -+ -+ dev_info(&rpdev->dev, "new channel: 0x%x -> 0x%x!\n", -+ rpdev->src, rpdev->dst); -+ -+ idata = devm_kzalloc(&rpdev->dev, sizeof(*idata), GFP_KERNEL); -+ if (!idata) -+ return -ENOMEM; -+ -+ dev_set_drvdata(&rpdev->dev, idata); -+ idata->rpdev = rpdev; -+ -+ ((unsigned long long *)(adma_id_table[0].data))[0] = (unsigned long long)idata; -+ -+ platform_driver_register(&adma_driver); -+ -+ return 0; -+} -+ -+static void rpmsg_adma_client_remove(struct rpmsg_device *rpdev) -+{ -+ dev_info(&rpdev->dev, "rpmsg adma client driver is removed\n"); -+ platform_driver_unregister(&adma_driver); -+} -+ -+static struct rpmsg_driver rpmsg_adma_client = { -+ .drv.name = KBUILD_MODNAME, -+ .id_table = rpmsg_driver_adma_id_table, -+ .probe = rpmsg_adma_client_probe, -+ .callback = rpmsg_adma_client_cb, -+ .remove = rpmsg_adma_client_remove, -+}; -+module_rpmsg_driver(rpmsg_adma_client); -diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/bcm2835-dma.c -+++ b/drivers/dma/bcm2835-dma.c -@@ -878,7 +878,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec, - static int bcm2835_dma_probe(struct platform_device *pdev) - { - struct bcm2835_dmadev *od; -- struct resource *res; - void __iomem *base; - int rc; - int i, j; -@@ -902,8 +901,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev) - - dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF); - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- base = devm_ioremap_resource(&pdev->dev, res); -+ base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(base)) - return PTR_ERR(base); - -diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/dma-axi-dmac.c -+++ b/drivers/dma/dma-axi-dmac.c -@@ -910,7 +910,6 @@ static int axi_dmac_probe(struct platform_device *pdev) - { - struct dma_device *dma_dev; - struct axi_dmac *dmac; -- struct resource *res; - struct regmap *regmap; - unsigned int version; - int ret; -@@ -925,8 +924,7 @@ static int axi_dmac_probe(struct platform_device *pdev) - if (dmac->irq == 0) - return -EINVAL; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- dmac->base = devm_ioremap_resource(&pdev->dev, res); -+ dmac->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(dmac->base)) - return PTR_ERR(dmac->base); - --- -Armbian - -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Sat, 22 Jun 2024 07:54:04 -0400 -Subject: drivers: dma: dw-axi-dmac: dw-axi-dmac-platform.c - -Signed-off-by: Patrick Yavitz ---- - drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 70 +- - drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 5 + - drivers/dma/fsl-edma.c | 8 +- - drivers/dma/fsl-qdma.c | 10 +- - drivers/dma/idma64.c | 4 +- - drivers/dma/img-mdc-dma.c | 4 +- - drivers/dma/imx-dma.c | 4 +- - drivers/dma/imx-sdma.c | 4 +- - drivers/dma/mcf-edma.c | 5 +- - drivers/dma/mediatek/mtk-hsdma.c | 4 +- - drivers/dma/mmp_pdma.c | 4 +- - drivers/dma/mmp_pdma_k1x.c | 1665 ++++++++++ - drivers/dma/mmp_tdma.c | 4 +- - drivers/dma/moxart-dma.c | 4 +- - drivers/dma/mv_xor_v2.c | 7 +- - drivers/dma/mxs-dma.c | 4 +- - drivers/dma/nbpfaxi.c | 4 +- - drivers/dma/pxa_dma.c | 4 +- - drivers/dma/qcom/bam_dma.c | 4 +- - drivers/dma/sf-pdma/sf-pdma.c | 4 +- - drivers/dma/sh/usb-dmac.c | 4 +- - drivers/dma/stm32-dmamux.c | 4 +- - drivers/dma/stm32-mdma.c | 4 +- - drivers/dma/sun4i-dma.c | 4 +- - drivers/dma/sun6i-dma.c | 4 +- - drivers/dma/tegra210-adma.c | 4 +- - drivers/dma/ti/cppi41.c | 10 +- - drivers/dma/ti/omap-dma.c | 4 +- - drivers/dma/udma.c | 432 +++ - drivers/dma/xilinx/zynqmp_dma.c | 4 +- - 30 files changed, 2194 insertions(+), 102 deletions(-) - -diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c -+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c -@@ -21,10 +21,12 @@ - #include - #include - #include -+#include - #include - #include - #include - #include -+#include - #include - #include - -@@ -46,6 +48,10 @@ - DMA_SLAVE_BUSWIDTH_32_BYTES | \ - DMA_SLAVE_BUSWIDTH_64_BYTES) - -+#define AXI_DMA_FLAG_HAS_APB_REGS BIT(0) -+#define AXI_DMA_FLAG_HAS_RESETS BIT(1) -+#define AXI_DMA_FLAG_USE_CFG2 BIT(2) -+ - static inline void - axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val) - { -@@ -86,7 +92,7 @@ static inline void axi_chan_config_write(struct axi_dma_chan *chan, - - cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS | - config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS); -- if (chan->chip->dw->hdata->reg_map_8_channels) { -+ if (!chan->chip->dw->hdata->use_cfg2) { - cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS | - config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS | - config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS | -@@ -606,6 +612,7 @@ static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan, - size_t block_ts; - u32 ctllo, ctlhi; - u32 burst_len; -+ u32 burst_trans_len; - - axi_block_ts = chan->chip->dw->hdata->block_size[chan->id]; - -@@ -669,8 +676,14 @@ static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan, - - hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1); - -- ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS | -- DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS; -+ if (chan->fixed_burst_trans_len == true) -+ burst_trans_len = chan->burst_trans_len; -+ else -+ burst_trans_len = DWAXIDMAC_BURST_TRANS_LEN_4; -+ -+ ctllo |= burst_trans_len << CH_CTL_L_DST_MSIZE_POS | -+ burst_trans_len << CH_CTL_L_SRC_MSIZE_POS; -+ - hw_desc->lli->ctl_lo = cpu_to_le32(ctllo); - - set_desc_src_master(hw_desc); -@@ -1138,7 +1151,7 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) - axi_chan_disable(chan); - - ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val, -- !(val & chan_active), 1000, 10000); -+ !(val & chan_active), 1000, 50000); - if (ret == -ETIMEDOUT) - dev_warn(dchan2dev(dchan), - "%s failed to stop\n", axi_chan_name(chan)); -@@ -1290,6 +1303,13 @@ static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec, - - chan = dchan_to_axi_dma_chan(dchan); - chan->hw_handshake_num = dma_spec->args[0]; -+ -+ /*some per may need fixed-burst_trans_len*/ -+ if (dma_spec->args_count == 2 && dma_spec->args[1] > 0) { -+ chan->fixed_burst_trans_len = true; -+ chan->burst_trans_len = dma_spec->args[1]; -+ } -+ - return dchan; - } - -@@ -1360,16 +1380,24 @@ static int parse_device_properties(struct axi_dma_chip *chip) - chip->dw->hdata->axi_rw_burst_len = tmp; - } - -+ /* get number of handshak interface and configure multi reg */ -+ ret = device_property_read_u32(dev, "snps,num-hs-if", &tmp); -+ if (!ret) -+ chip->dw->hdata->nr_hs_if = tmp; -+ if (chip->dw->hdata->nr_channels > DMA_REG_MAP_CH_REF || -+ chip->dw->hdata->nr_hs_if > DMA_REG_MAP_HS_IF_REF) -+ chip->dw->hdata->use_cfg2 = true; -+ - return 0; - } - - static int dw_probe(struct platform_device *pdev) - { -- struct device_node *node = pdev->dev.of_node; - struct axi_dma_chip *chip; -- struct resource *mem; - struct dw_axi_dma *dw; - struct dw_axi_dma_hcfg *hdata; -+ struct reset_control *resets; -+ unsigned int flags; - u32 i; - int ret; - -@@ -1393,17 +1421,27 @@ static int dw_probe(struct platform_device *pdev) - if (chip->irq < 0) - return chip->irq; - -- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- chip->regs = devm_ioremap_resource(chip->dev, mem); -+ chip->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(chip->regs)) - return PTR_ERR(chip->regs); - -- if (of_device_is_compatible(node, "intel,kmb-axi-dma")) { -+ flags = (uintptr_t)of_device_get_match_data(&pdev->dev); -+ if (flags & AXI_DMA_FLAG_HAS_APB_REGS) { - chip->apb_regs = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(chip->apb_regs)) - return PTR_ERR(chip->apb_regs); - } - -+ if (flags & AXI_DMA_FLAG_HAS_RESETS) { -+ resets = devm_reset_control_array_get_exclusive(&pdev->dev); -+ if (IS_ERR(resets)) -+ return PTR_ERR(resets); -+ -+ ret = reset_control_deassert(resets); -+ if (ret) -+ return ret; -+ } -+ - chip->core_clk = devm_clk_get(chip->dev, "core-clk"); - if (IS_ERR(chip->core_clk)) - return PTR_ERR(chip->core_clk); -@@ -1554,8 +1592,18 @@ static const struct dev_pm_ops dw_axi_dma_pm_ops = { - }; - - static const struct of_device_id dw_dma_of_id_table[] = { -- { .compatible = "snps,axi-dma-1.01a" }, -- { .compatible = "intel,kmb-axi-dma" }, -+ { -+ .compatible = "snps,axi-dma-1.01a" -+ }, { -+ .compatible = "intel,kmb-axi-dma", -+ .data = (void *)AXI_DMA_FLAG_HAS_APB_REGS, -+ }, { -+ .compatible = "starfive,jh7110-axi-dma", -+ .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2), -+ }, { -+ .compatible = "spacemit,k1pro-axi-dma", -+ .data = (void *)AXI_DMA_FLAG_HAS_RESETS, -+ }, - {} - }; - MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); -diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h -index 111111111111..222222222222 100644 ---- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h -+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h -@@ -25,6 +25,7 @@ - struct dw_axi_dma_hcfg { - u32 nr_channels; - u32 nr_masters; -+ u32 nr_hs_if; - u32 m_data_width; - u32 block_size[DMAC_MAX_CHANNELS]; - u32 priority[DMAC_MAX_CHANNELS]; -@@ -33,6 +34,7 @@ struct dw_axi_dma_hcfg { - /* Register map for DMAX_NUM_CHANNELS <= 8 */ - bool reg_map_8_channels; - bool restrict_axi_burst_len; -+ bool use_cfg2; - }; - - struct axi_dma_chan { -@@ -40,6 +42,7 @@ struct axi_dma_chan { - void __iomem *chan_regs; - u8 id; - u8 hw_handshake_num; -+ s8 burst_trans_len; - atomic_t descs_allocated; - - struct dma_pool *desc_pool; -@@ -48,6 +51,7 @@ struct axi_dma_chan { - struct axi_dma_desc *desc; - struct dma_slave_config config; - enum dma_transfer_direction direction; -+ bool fixed_burst_trans_len; - bool cyclic; - /* these other elements are all protected by vc.lock */ - bool is_paused; -@@ -204,6 +208,7 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan) - #define DMA_APB_HS_SEL_MASK 0xFF /* HW handshake select masks */ - #define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */ - #define DMA_REG_MAP_CH_REF 0x08 /* Channel count to choose register map */ -+#define DMA_REG_MAP_HS_IF_REF 0x10 /* handshake num to choose register map */ - - /* DMAC_CFG */ - #define DMAC_EN_POS 0 -diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/fsl-edma.c -+++ b/drivers/dma/fsl-edma.c -@@ -272,7 +272,6 @@ static int fsl_edma_probe(struct platform_device *pdev) - const struct fsl_edma_drvdata *drvdata = NULL; - struct fsl_edma_chan *fsl_chan; - struct edma_regs *regs; -- struct resource *res; - int len, chans; - int ret, i; - -@@ -298,8 +297,7 @@ static int fsl_edma_probe(struct platform_device *pdev) - fsl_edma->n_chans = chans; - mutex_init(&fsl_edma->fsl_edma_mutex); - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res); -+ fsl_edma->membase = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(fsl_edma->membase)) - return PTR_ERR(fsl_edma->membase); - -@@ -323,8 +321,8 @@ static int fsl_edma_probe(struct platform_device *pdev) - for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) { - char clkname[32]; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); -- fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res); -+ fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev, -+ 1 + i); - if (IS_ERR(fsl_edma->muxbase[i])) { - /* on error: disable all previously enabled clks */ - fsl_disable_clocks(fsl_edma, i); -diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/fsl-qdma.c -+++ b/drivers/dma/fsl-qdma.c -@@ -1121,7 +1121,6 @@ static int fsl_qdma_probe(struct platform_device *pdev) - int ret, i; - int blk_num, blk_off; - u32 len, chans, queues; -- struct resource *res; - struct fsl_qdma_chan *fsl_chan; - struct fsl_qdma_engine *fsl_qdma; - struct device_node *np = pdev->dev.of_node; -@@ -1185,18 +1184,15 @@ static int fsl_qdma_probe(struct platform_device *pdev) - if (!fsl_qdma->status[i]) - return -ENOMEM; - } -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res); -+ fsl_qdma->ctrl_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(fsl_qdma->ctrl_base)) - return PTR_ERR(fsl_qdma->ctrl_base); - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 1); -- fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res); -+ fsl_qdma->status_base = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(fsl_qdma->status_base)) - return PTR_ERR(fsl_qdma->status_base); - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 2); -- fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res); -+ fsl_qdma->block_base = devm_platform_ioremap_resource(pdev, 2); - if (IS_ERR(fsl_qdma->block_base)) - return PTR_ERR(fsl_qdma->block_base); - fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma); -diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/idma64.c -+++ b/drivers/dma/idma64.c -@@ -635,7 +635,6 @@ static int idma64_platform_probe(struct platform_device *pdev) - struct idma64_chip *chip; - struct device *dev = &pdev->dev; - struct device *sysdev = dev->parent; -- struct resource *mem; - int ret; - - chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); -@@ -646,8 +645,7 @@ static int idma64_platform_probe(struct platform_device *pdev) - if (chip->irq < 0) - return chip->irq; - -- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- chip->regs = devm_ioremap_resource(dev, mem); -+ chip->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(chip->regs)) - return PTR_ERR(chip->regs); - -diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/img-mdc-dma.c -+++ b/drivers/dma/img-mdc-dma.c -@@ -886,7 +886,6 @@ static int img_mdc_runtime_resume(struct device *dev) - static int mdc_dma_probe(struct platform_device *pdev) - { - struct mdc_dma *mdma; -- struct resource *res; - unsigned int i; - u32 val; - int ret; -@@ -898,8 +897,7 @@ static int mdc_dma_probe(struct platform_device *pdev) - - mdma->soc = of_device_get_match_data(&pdev->dev); - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- mdma->regs = devm_ioremap_resource(&pdev->dev, res); -+ mdma->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(mdma->regs)) - return PTR_ERR(mdma->regs); - -diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/imx-dma.c -+++ b/drivers/dma/imx-dma.c -@@ -1038,7 +1038,6 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, - static int __init imxdma_probe(struct platform_device *pdev) - { - struct imxdma_engine *imxdma; -- struct resource *res; - int ret, i; - int irq, irq_err; - -@@ -1049,8 +1048,7 @@ static int __init imxdma_probe(struct platform_device *pdev) - imxdma->dev = &pdev->dev; - imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev); - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- imxdma->base = devm_ioremap_resource(&pdev->dev, res); -+ imxdma->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(imxdma->base)) - return PTR_ERR(imxdma->base); - -diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/imx-sdma.c -+++ b/drivers/dma/imx-sdma.c -@@ -2169,7 +2169,6 @@ static int sdma_probe(struct platform_device *pdev) - const char *fw_name; - int ret; - int irq; -- struct resource *iores; - struct resource spba_res; - int i; - struct sdma_engine *sdma; -@@ -2192,8 +2191,7 @@ static int sdma_probe(struct platform_device *pdev) - if (irq < 0) - return irq; - -- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- sdma->regs = devm_ioremap_resource(&pdev->dev, iores); -+ sdma->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(sdma->regs)) - return PTR_ERR(sdma->regs); - -diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/mcf-edma.c -+++ b/drivers/dma/mcf-edma.c -@@ -182,7 +182,6 @@ static int mcf_edma_probe(struct platform_device *pdev) - struct fsl_edma_engine *mcf_edma; - struct fsl_edma_chan *mcf_chan; - struct edma_regs *regs; -- struct resource *res; - int ret, i, len, chans; - - pdata = dev_get_platdata(&pdev->dev); -@@ -211,9 +210,7 @@ static int mcf_edma_probe(struct platform_device *pdev) - - mutex_init(&mcf_edma->fsl_edma_mutex); - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- -- mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res); -+ mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(mcf_edma->membase)) - return PTR_ERR(mcf_edma->membase); - -diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/mediatek/mtk-hsdma.c -+++ b/drivers/dma/mediatek/mtk-hsdma.c -@@ -896,7 +896,6 @@ static int mtk_hsdma_probe(struct platform_device *pdev) - struct mtk_hsdma_device *hsdma; - struct mtk_hsdma_vchan *vc; - struct dma_device *dd; -- struct resource *res; - int i, err; - - hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL); -@@ -905,8 +904,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev) - - dd = &hsdma->ddev; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- hsdma->base = devm_ioremap_resource(&pdev->dev, res); -+ hsdma->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(hsdma->base)) - return PTR_ERR(hsdma->base); - -diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/mmp_pdma.c -+++ b/drivers/dma/mmp_pdma.c -@@ -1022,7 +1022,6 @@ static int mmp_pdma_probe(struct platform_device *op) - struct mmp_pdma_device *pdev; - const struct of_device_id *of_id; - struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); -- struct resource *iores; - int i, ret, irq = 0; - int dma_channels = 0, irq_num = 0; - const enum dma_slave_buswidth widths = -@@ -1037,8 +1036,7 @@ static int mmp_pdma_probe(struct platform_device *op) - - spin_lock_init(&pdev->phy_lock); - -- iores = platform_get_resource(op, IORESOURCE_MEM, 0); -- pdev->base = devm_ioremap_resource(pdev->dev, iores); -+ pdev->base = devm_platform_ioremap_resource(op, 0); - if (IS_ERR(pdev->base)) - return PTR_ERR(pdev->base); - -diff --git a/drivers/dma/mmp_pdma_k1x.c b/drivers/dma/mmp_pdma_k1x.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/dma/mmp_pdma_k1x.c -@@ -0,0 +1,1665 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Copyright 2012 Marvell International Ltd. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "dmaengine.h" -+ -+#define DDADRH(n) (0x0300 + ((n) << 4)) -+#define DSADRH(n) (0x0304 + ((n) << 4)) -+#define DTADRH(n) (0x0308 + ((n) << 4)) -+#define DCSR_LPAEEN BIT(21) /* Long Physical Address Extension enable */ -+#define DRCMR_INVALID 100 /* Max DMA request number + 1 */ -+#define DCMD_BURST64 (4 << 16) /* 64 byte burst */ -+ -+#define DCSR 0x0000 -+#define DALGN 0x00a0 -+#define DINT 0x00f0 -+#define DDADR 0x0200 -+#define DSADR(n) (0x0204 + ((n) << 4)) -+#define DTADR(n) (0x0208 + ((n) << 4)) -+#define DCMD 0x020c -+ -+#define DCSR_RUN BIT(31) /* Run Bit (read / write) */ -+#define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */ -+#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */ -+#define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */ -+#define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */ -+#define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */ -+#define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */ -+#define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */ -+ -+#define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */ -+#define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */ -+#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ -+#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ -+#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ -+#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ -+#define DCSR_EORINTR BIT(9) /* The end of Receive */ -+ -+#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2)) -+#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */ -+#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ -+ -+#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ -+#define DDADR_STOP BIT(0) /* Stop (read / write) */ -+ -+#define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */ -+#define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */ -+#define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */ -+#define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */ -+#define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */ -+#define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */ -+#define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */ -+#define DCMD_BURST8 (1 << 16) /* 8 byte burst */ -+#define DCMD_BURST16 (2 << 16) /* 16 byte burst */ -+#define DCMD_BURST32 (3 << 16) /* 32 byte burst */ -+#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ -+#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ -+#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ -+#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ -+ -+#define PDMA_MAX_DESC_BYTES DCMD_LENGTH -+ -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+struct mmp_pdma_desc_hw { -+ u32 ddadr; /* Points to the next descriptor + flags */ -+ u32 dsadr; /* DSADR value for the current transfer */ -+ u32 dtadr; /* DTADR value for the current transfer */ -+ u32 dcmd; /* DCMD value for the current transfer */ -+ u32 ddadrh; /* Points to the next descriptor + flags */ -+ u32 dsadrh; /* DSADR value for the current transfer */ -+ u32 dtadrh; /* DTADR value for the current transfer */ -+ u32 rsvd; /* DCMD value for the current transfer */ -+} __aligned(64); -+#else -+struct mmp_pdma_desc_hw { -+ u32 ddadr; /* Points to the next descriptor + flags */ -+ u32 dsadr; /* DSADR value for the current transfer */ -+ u32 dtadr; /* DTADR value for the current transfer */ -+ u32 dcmd; /* DCMD value for the current transfer */ -+} __aligned(32); -+#endif -+ -+struct mmp_pdma_desc_sw { -+ struct mmp_pdma_desc_hw desc; -+ struct list_head node; -+ struct list_head tx_list; -+ struct dma_async_tx_descriptor async_tx; -+}; -+ -+struct mmp_pdma_phy; -+ -+struct mmp_pdma_chan { -+ struct device *dev; -+ struct dma_chan chan; -+ struct dma_async_tx_descriptor desc; -+ struct mmp_pdma_phy *phy; -+ enum dma_transfer_direction dir; -+ struct dma_slave_config slave_config; -+ -+ struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel -+ * is in cyclic mode */ -+ -+ /* channel's basic info */ -+ struct tasklet_struct tasklet; -+ u32 dcmd; -+ u32 drcmr; -+ u32 dev_addr; -+ -+ /* list for desc */ -+ spinlock_t desc_lock; /* Descriptor list lock */ -+ struct list_head chain_pending; /* Link descriptors queue for pending */ -+ struct list_head chain_running; /* Link descriptors queue for running */ -+ bool idle; /* channel statue machine */ -+ bool byte_align; -+ -+ int user_do_qos; -+ int qos_count; /* Per-channel qos count */ -+ enum dma_status status; /* channel state machine */ -+ u32 bytes_residue; -+ -+ struct dma_pool *desc_pool; /* Descriptors pool */ -+}; -+ -+struct mmp_pdma_phy { -+ int idx; -+ void __iomem *base; -+ struct mmp_pdma_chan *vchan; -+}; -+ -+struct reserved_chan{ -+ int chan_id; -+ int drcmr; -+}; -+ -+struct mmp_pdma_device { -+ int dma_channels; -+ int nr_reserved_channels; -+ struct reserved_chan *reserved_channels; -+ s32 lpm_qos; -+ struct clk *clk; -+ struct reset_control *resets; -+ int max_burst_size; -+ void __iomem *base; -+ struct device *dev; -+ struct dma_device device; -+ struct mmp_pdma_phy *phy; -+ spinlock_t phy_lock; /* protect alloc/free phy channels */ -+}; -+ -+#define tx_to_mmp_pdma_desc(tx) \ -+ container_of(tx, struct mmp_pdma_desc_sw, async_tx) -+#define to_mmp_pdma_desc(lh) \ -+ container_of(lh, struct mmp_pdma_desc_sw, node) -+#define to_mmp_pdma_chan(dchan) \ -+ container_of(dchan, struct mmp_pdma_chan, chan) -+#define to_mmp_pdma_dev(dmadev) \ -+ container_of(dmadev, struct mmp_pdma_device, device) -+ -+static void mmp_pdma_qos_get(struct mmp_pdma_chan *chan); -+static void mmp_pdma_qos_put(struct mmp_pdma_chan *chan); -+ -+#define QSPI_PHY_CHAN 15 -+ -+static int mmp_pdma_config_write(struct dma_chan *dchan, -+ struct dma_slave_config *cfg, -+ enum dma_transfer_direction direction); -+ -+static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) -+{ -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ u32 ddadrh; -+#endif -+ u32 reg = (phy->idx << 4) + DDADR; -+ -+ writel(addr & 0xffffffff, phy->base + reg); -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ /* config higher bits for desc address */ -+ ddadrh = (addr >> 32); -+ writel(ddadrh, phy->base + DDADRH(phy->idx)); -+#endif -+} -+ -+static void enable_chan(struct mmp_pdma_phy *phy) -+{ -+ u32 reg, dalgn; -+ u32 dcsr; -+ unsigned long flags; -+ struct mmp_pdma_device *pdev; -+ -+ if (phy == NULL) -+ return; -+ -+ if (!phy->vchan) -+ return; -+ -+ pdev = to_mmp_pdma_dev(phy->vchan->chan.device); -+ -+ spin_lock_irqsave(&pdev->phy_lock, flags); -+ -+ reg = DRCMR(phy->vchan->drcmr); -+ writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); -+ -+ dalgn = readl(phy->base + DALGN); -+ if (phy->vchan->byte_align) -+ dalgn |= 1 << phy->idx; -+ else -+ dalgn &= ~(1 << phy->idx); -+ writel(dalgn, phy->base + DALGN); -+ -+ reg = (phy->idx << 2) + DCSR; -+ -+ dcsr = readl(phy->base + reg); -+ dcsr |= (DCSR_RUN | DCSR_EORIRQEN | DCSR_EORSTOPEN); -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ /* use long descriptor mode: set DCSR_LPAEEN bit */ -+ dcsr |= DCSR_LPAEEN; -+#endif -+ writel(dcsr, phy->base + reg); -+ -+ spin_unlock_irqrestore(&pdev->phy_lock, flags); -+} -+ -+static void disable_chan(struct mmp_pdma_phy *phy) -+{ -+ u32 reg; -+ u32 dcsr, cnt = 1000; -+ -+ if (!phy) -+ return; -+ -+ reg = (phy->idx << 2) + DCSR; -+ -+ dcsr = readl(phy->base + reg); -+ dcsr &= ~(DCSR_RUN | DCSR_EORIRQEN | DCSR_EORSTOPEN); -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ /* use long descriptor mode: set DCSR_LPAEEN bit */ -+ dcsr &= ~DCSR_LPAEEN; -+#endif -+ writel(dcsr, phy->base + reg); -+ -+ /* ensure dma is stopped. */ -+ dcsr = readl(phy->base + reg); -+ while (!(dcsr & (0x1 << 3)) && --cnt) { -+ udelay(10); -+ dcsr = readl(phy->base + reg); -+ } -+ -+ WARN_ON(!cnt); -+} -+ -+static int clear_chan_irq(struct mmp_pdma_phy *phy) -+{ -+ u32 dcsr; -+ u32 dint = readl(phy->base + DINT); -+ u32 reg = (phy->idx << 2) + DCSR; -+ -+ if (!(dint & BIT(phy->idx))) -+ return -EAGAIN; -+ -+ /* clear irq */ -+ dcsr = readl(phy->base + reg); -+ writel(dcsr, phy->base + reg); -+ if ((dcsr & DCSR_BUSERR) && (phy->vchan)) -+ dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); -+ -+ return 0; -+} -+ -+static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) -+{ -+ struct mmp_pdma_phy *phy = dev_id; -+ struct mmp_pdma_chan *pchan = phy->vchan; -+ -+ if (clear_chan_irq(phy) != 0) -+ return IRQ_NONE; -+ -+ if (pchan) -+ tasklet_schedule(&pchan->tasklet); -+ -+ return IRQ_HANDLED; -+} -+ -+static bool is_channel_reserved(struct mmp_pdma_device *pdev, int chan_id) -+{ -+ int i; -+ -+ for (i = 0; i < pdev->nr_reserved_channels; i++) { -+ if (chan_id == pdev->reserved_channels[i].chan_id) -+ return true; -+ } -+ -+ return false; -+} -+ -+static struct mmp_pdma_phy * lookup_phy_for_drcmr(struct mmp_pdma_device *pdev, int drcmr) -+{ -+ int i; -+ int chan_id; -+ struct mmp_pdma_phy *phy; -+ -+ for (i = 0; i < pdev->nr_reserved_channels; i++) { -+ if (drcmr == pdev->reserved_channels[i].drcmr) { -+ chan_id = pdev->reserved_channels[i].chan_id; -+ phy = &pdev->phy[chan_id]; -+ return phy; -+ } -+ } -+ -+ return NULL; -+} -+ -+static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) -+{ -+ struct mmp_pdma_device *pdev = dev_id; -+ struct mmp_pdma_phy *phy; -+ u32 dint = readl(pdev->base + DINT); -+ int i, ret; -+ int irq_num = 0; -+ unsigned long flags; -+ -+ while (dint) { -+ i = __ffs(dint); -+ /* only handle interrupts belonging to pdma driver*/ -+ if (i >= pdev->dma_channels) -+ break; -+ -+ dint &= (dint - 1); -+ phy = &pdev->phy[i]; -+ spin_lock_irqsave(&pdev->phy_lock, flags); -+ -+ ret = mmp_pdma_chan_handler(irq, phy); -+ -+ spin_unlock_irqrestore(&pdev->phy_lock, flags); -+ if (ret == IRQ_HANDLED) -+ irq_num++; -+ } -+ -+ if (irq_num) -+ return IRQ_HANDLED; -+ -+ return IRQ_NONE; -+} -+ -+/* lookup free phy channel as descending priority */ -+static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) -+{ -+ int prio, i; -+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); -+ struct mmp_pdma_phy *phy, *found = NULL; -+ unsigned long flags; -+ -+ /* -+ * dma channel priorities -+ * ch 0 - 3, 16 - 19 <--> (0) -+ * ch 4 - 7, 20 - 23 <--> (1) -+ * ch 8 - 11, 24 - 27 <--> (2) -+ * ch 12 - 15, 28 - 31 <--> (3) -+ */ -+ -+ spin_lock_irqsave(&pdev->phy_lock, flags); -+ -+ phy = lookup_phy_for_drcmr(pdev, pchan->drcmr); -+ -+ if (phy != NULL) { -+ if (!phy->vchan) { -+ phy->vchan = pchan; -+ found = phy; -+ } -+ -+ goto out_unlock; -+ } -+ -+ for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) { -+ for (i = 0; i < pdev->dma_channels; i++) { -+ if (prio != (i & 0xf) >> 2) -+ continue; -+ -+ if (is_channel_reserved(pdev, i)) -+ continue; -+ phy = &pdev->phy[i]; -+ if (!phy->vchan) { -+ phy->vchan = pchan; -+ found = phy; -+ goto out_unlock; -+ } -+ } -+ } -+ -+out_unlock: -+ spin_unlock_irqrestore(&pdev->phy_lock, flags); -+ return found; -+} -+ -+static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan) -+{ -+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); -+ unsigned long flags; -+ u32 reg; -+ -+ if (!pchan->phy) -+ return; -+ -+ /* clear the channel mapping in DRCMR */ -+ reg = DRCMR(pchan->drcmr); -+ writel(0, pchan->phy->base + reg); -+ -+ spin_lock_irqsave(&pdev->phy_lock, flags); -+ pchan->phy->vchan = NULL; -+ pchan->phy = NULL; -+ -+ spin_unlock_irqrestore(&pdev->phy_lock, flags); -+} -+ -+/* -+ * start_pending_queue - transfer any pending transactions -+ * pending list ==> running list -+ */ -+static int start_pending_queue(struct mmp_pdma_chan *chan) -+{ -+ struct mmp_pdma_desc_sw *desc; -+ struct mmp_pdma_desc_sw *_desc; -+ -+ /* still in running, irq will start the pending list */ -+ if (chan->status == DMA_IN_PROGRESS) { -+ dev_dbg(chan->dev, "DMA controller still busy\n"); -+ return -1; -+ } -+ -+ if (list_empty(&chan->chain_pending)) { -+ /* chance to re-fetch phy channel with higher prio */ -+ mmp_pdma_free_phy(chan); -+ dev_dbg(chan->dev, "no pending list\n"); -+ -+ return -1; -+ } -+ -+ if (!chan->phy) { -+ chan->phy = lookup_phy(chan); -+ if (!chan->phy) { -+ dev_dbg(chan->dev, "no free dma channel\n"); -+ -+ return -1; -+ } -+ } -+ -+ /* -+ * pending -> running -+ * reintilize pending list -+ */ -+ list_for_each_entry_safe(desc, _desc, &chan->chain_pending, node) { -+ list_del(&desc->node); -+ list_add_tail(&desc->node, &chan->chain_running); -+ if (desc->desc.ddadr & DDADR_STOP) -+ break; -+ } -+ -+ desc = list_first_entry(&chan->chain_running, -+ struct mmp_pdma_desc_sw, node); -+ -+ /* -+ * Program the descriptor's address into the DMA controller, -+ * then start the DMA transaction -+ */ -+ set_desc(chan->phy, desc->async_tx.phys); -+ enable_chan(chan->phy); -+ chan->idle = false; -+ chan->status = DMA_IN_PROGRESS; -+ chan->bytes_residue = 0; -+ return 0; -+} -+ -+ -+/* desc->tx_list ==> pending list */ -+static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) -+{ -+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); -+ struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); -+ struct mmp_pdma_desc_sw *child; -+ unsigned long flags; -+ dma_cookie_t cookie = -EBUSY; -+ -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ -+ list_for_each_entry(child, &desc->tx_list, node) { -+ cookie = dma_cookie_assign(&child->async_tx); -+ } -+ -+ /* softly link to pending list - desc->tx_list ==> pending list */ -+ list_splice_tail_init(&desc->tx_list, &chan->chain_pending); -+ -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ -+ return cookie; -+} -+ -+static struct mmp_pdma_desc_sw * -+mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) -+{ -+ struct mmp_pdma_desc_sw *desc; -+ dma_addr_t pdesc; -+ -+ desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc); -+ if (!desc) { -+ dev_err(chan->dev, "out of memory for link descriptor\n"); -+ return NULL; -+ } -+ -+ INIT_LIST_HEAD(&desc->tx_list); -+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); -+ /* each desc has submit */ -+ desc->async_tx.tx_submit = mmp_pdma_tx_submit; -+ desc->async_tx.phys = pdesc; -+ -+ return desc; -+} -+ -+/* -+ * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. -+ * -+ * This function will create a dma pool for descriptor allocation. -+ * Request irq only when channel is requested -+ * Return - The number of allocated descriptors. -+ */ -+ -+static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) -+{ -+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); -+ -+ if (chan->desc_pool) -+ return 1; -+ -+ chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device), -+ chan->dev, -+ sizeof(struct mmp_pdma_desc_sw), -+ __alignof__(struct mmp_pdma_desc_sw), -+ 0); -+ if (!chan->desc_pool) { -+ dev_err(chan->dev, "unable to allocate descriptor pool\n"); -+ return -ENOMEM; -+ } -+ -+ chan->status = DMA_COMPLETE; -+ chan->dir = 0; -+ chan->dcmd = 0; -+ -+ mmp_pdma_free_phy(chan); -+ -+ chan->idle = true; -+ chan->dev_addr = 0; -+ return 1; -+} -+ -+static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, -+ struct list_head *list) -+{ -+ struct mmp_pdma_desc_sw *desc, *_desc; -+ -+ list_for_each_entry_safe(desc, _desc, list, node) { -+ list_del(&desc->node); -+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); -+ } -+} -+ -+static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) -+{ -+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); -+ unsigned long flags; -+ -+ /* wait until task ends if necessary */ -+ tasklet_kill(&chan->tasklet); -+ -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ mmp_pdma_free_desc_list(chan, &chan->chain_pending); -+ mmp_pdma_free_desc_list(chan, &chan->chain_running); -+ -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ -+ dma_pool_destroy(chan->desc_pool); -+ chan->desc_pool = NULL; -+ chan->idle = true; -+ chan->dev_addr = 0; -+ -+ chan->status = DMA_COMPLETE; -+ chan->dir = 0; -+ chan->dcmd = 0; -+ -+ mmp_pdma_free_phy(chan); -+ return; -+} -+ -+#define INVALID_BURST_SETTING -1 -+#define DEFAULT_MAX_BURST_SIZE 32 -+ -+static int get_max_burst_setting(unsigned int max_burst_size) -+{ -+ switch (max_burst_size) { -+ case 8: -+ return DCMD_BURST8; -+ case 16: -+ return DCMD_BURST16; -+ case 32: -+ return DCMD_BURST32; -+ case 64: -+ return DCMD_BURST64; -+ default: -+ return INVALID_BURST_SETTING; -+ } -+} -+ -+static struct dma_async_tx_descriptor * -+mmp_pdma_prep_memcpy(struct dma_chan *dchan, -+ dma_addr_t dma_dst, dma_addr_t dma_src, -+ size_t len, unsigned long flags) -+{ -+ struct mmp_pdma_chan *chan; -+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; -+ size_t copy = 0; -+ struct mmp_pdma_device *dev; -+ int value; -+ -+ if (!dchan) -+ return NULL; -+ -+ if (!len) -+ return NULL; -+ -+ chan = to_mmp_pdma_chan(dchan); -+ chan->byte_align = false; -+ -+ if (!chan->dir) { -+ chan->dir = DMA_MEM_TO_MEM; -+ chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; -+ dev = to_mmp_pdma_dev(dchan->device); -+ value = get_max_burst_setting(dev->max_burst_size); -+ -+ BUG_ON(value == INVALID_BURST_SETTING); -+ -+ chan->dcmd |= value; -+ } -+ -+ do { -+ /* Allocate the link descriptor from DMA pool */ -+ new = mmp_pdma_alloc_descriptor(chan); -+ if (!new) { -+ dev_err(chan->dev, "no memory for desc\n"); -+ goto fail; -+ } -+ -+ copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); -+ if (dma_src & 0x7 || dma_dst & 0x7) -+ chan->byte_align = true; -+ -+ new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); -+ -+ /* -+ * Check whether descriptor/source-addr/target-addr is in -+ * region higher than 4G. If so, set related higher bits to 1. -+ */ -+ if (chan->dir == DMA_MEM_TO_DEV) { -+ new->desc.dsadr = dma_src & 0xffffffff; -+ new->desc.dtadr = dma_dst; -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ new->desc.dsadrh = (dma_src >> 32); -+ new->desc.dtadrh = 0; -+#endif -+ } else if (chan->dir == DMA_DEV_TO_MEM) { -+ new->desc.dsadr = dma_src; -+ new->desc.dtadr = dma_dst & 0xffffffff; -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ new->desc.dsadrh = 0; -+ new->desc.dtadrh = (dma_dst >> 32); -+#endif -+ } else if (chan->dir == DMA_MEM_TO_MEM) { -+ new->desc.dsadr = dma_src & 0xffffffff; -+ new->desc.dtadr = dma_dst & 0xffffffff; -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ new->desc.dsadrh = (dma_src >> 32); -+ new->desc.dtadrh = (dma_dst >> 32); -+#endif -+ } else { -+ dev_err(chan->dev, "wrong direction: 0x%x\n", chan->dir); -+ goto fail; -+ } -+ -+ if (!first) -+ first = new; -+ else { -+ prev->desc.ddadr = new->async_tx.phys; -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ prev->desc.ddadrh = (new->async_tx.phys >> 32); -+#endif -+ } -+ -+ new->async_tx.cookie = 0; -+ async_tx_ack(&new->async_tx); -+ -+ prev = new; -+ len -= copy; -+ -+ if (chan->dir == DMA_MEM_TO_DEV) { -+ dma_src += copy; -+ } else if (chan->dir == DMA_DEV_TO_MEM) { -+ dma_dst += copy; -+ } else if (chan->dir == DMA_MEM_TO_MEM) { -+ dma_src += copy; -+ dma_dst += copy; -+ } -+ -+ /* Insert the link descriptor to the LD ring */ -+ list_add_tail(&new->node, &first->tx_list); -+ } while (len); -+ -+ first->async_tx.flags = flags; /* client is in control of this ack */ -+ first->async_tx.cookie = -EBUSY; -+ -+ /* last desc and fire IRQ */ -+ new->desc.ddadr = DDADR_STOP; -+ new->desc.dcmd |= DCMD_ENDIRQEN; -+ -+ chan->cyclic_first = NULL; -+ -+ return &first->async_tx; -+ -+fail: -+ if (first) -+ mmp_pdma_free_desc_list(chan, &first->tx_list); -+ return NULL; -+} -+ -+static struct dma_async_tx_descriptor * -+mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, -+ unsigned int sg_len, enum dma_transfer_direction dir, -+ unsigned long flags, void *context) -+{ -+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); -+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; -+ size_t len, avail; -+ struct scatterlist *sg; -+ dma_addr_t addr; -+ int i; -+ -+ if ((sgl == NULL) || (sg_len == 0)) -+ return NULL; -+ -+ chan->byte_align = true; -+ -+ mmp_pdma_config_write(dchan, &chan->slave_config, dir); -+ -+ for_each_sg(sgl, sg, sg_len, i) { -+ addr = sg_dma_address(sg); -+ avail = sg_dma_len(sgl); -+ -+ do { -+ len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); -+ if (addr & 0x7) -+ chan->byte_align = true; -+ -+ /* allocate and populate the descriptor */ -+ new = mmp_pdma_alloc_descriptor(chan); -+ if (!new) { -+ dev_err(chan->dev, "no memory for desc\n"); -+ goto fail; -+ } -+ -+ new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); -+ -+ /* -+ * Check whether descriptor/source-addr/target-addr is in -+ * region higher than 4G. If so, set related higher bits to 1. -+ */ -+ if (dir == DMA_MEM_TO_DEV) { -+ new->desc.dsadr = addr & 0xffffffff; -+ new->desc.dtadr = chan->dev_addr; -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ new->desc.dsadrh = (addr >> 32); -+ new->desc.dtadrh = 0; -+#endif -+ } else if (dir == DMA_DEV_TO_MEM) { -+ new->desc.dsadr = chan->dev_addr; -+ new->desc.dtadr = addr & 0xffffffff; -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ new->desc.dsadrh = 0; -+ new->desc.dtadrh = (addr >> 32); -+#endif -+ } else { -+ dev_err(chan->dev, "wrong direction: 0x%x\n", chan->dir); -+ goto fail; -+ } -+ -+ if (!first) -+ first = new; -+ else { -+ prev->desc.ddadr = new->async_tx.phys; -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ prev->desc.ddadrh = (new->async_tx.phys >> 32); -+#endif -+ } -+ -+ new->async_tx.cookie = 0; -+ async_tx_ack(&new->async_tx); -+ prev = new; -+ -+ /* Insert the link descriptor to the LD ring */ -+ list_add_tail(&new->node, &first->tx_list); -+ -+ /* update metadata */ -+ addr += len; -+ avail -= len; -+ } while (avail); -+ } -+ -+ first->async_tx.cookie = -EBUSY; -+ first->async_tx.flags = flags; -+ -+ /* last desc and fire IRQ */ -+ new->desc.ddadr = DDADR_STOP; -+ new->desc.dcmd |= DCMD_ENDIRQEN; -+ -+ chan->dir = dir; -+ chan->cyclic_first = NULL; -+ -+ return &first->async_tx; -+ -+fail: -+ if (first) -+ mmp_pdma_free_desc_list(chan, &first->tx_list); -+ return NULL; -+} -+ -+static struct dma_async_tx_descriptor * -+mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, -+ dma_addr_t buf_addr, size_t len, size_t period_len, -+ enum dma_transfer_direction direction, -+ unsigned long flags) -+{ -+ struct mmp_pdma_chan *chan; -+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; -+ dma_addr_t dma_src, dma_dst; -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ dma_addr_t dma_srch, dma_dsth; -+#endif -+ -+ if (!dchan || !len || !period_len) -+ return NULL; -+ -+ /* the buffer length must be a multiple of period_len */ -+ if (len % period_len != 0) -+ return NULL; -+ -+ if (period_len > PDMA_MAX_DESC_BYTES) -+ return NULL; -+ -+ chan = to_mmp_pdma_chan(dchan); -+ mmp_pdma_config_write(dchan, &chan->slave_config, direction); -+ -+ switch (direction) { -+ case DMA_MEM_TO_DEV: -+ dma_src = buf_addr & 0xffffffff; -+ dma_dst = chan->dev_addr; -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ dma_srch = (buf_addr >> 32); -+ dma_dsth = 0; -+#endif -+ break; -+ case DMA_DEV_TO_MEM: -+ dma_dst = buf_addr & 0xffffffff; -+ dma_src = chan->dev_addr; -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ dma_dsth = (buf_addr >> 32); -+ dma_srch = 0; -+#endif -+ break; -+ default: -+ dev_err(chan->dev, "Unsupported direction for cyclic DMA\n"); -+ return NULL; -+ } -+ -+ chan->dir = direction; -+ -+ do { -+ /* Allocate the link descriptor from DMA pool */ -+ new = mmp_pdma_alloc_descriptor(chan); -+ if (!new) { -+ dev_err(chan->dev, "no memory for desc\n"); -+ goto fail; -+ } -+ -+ new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | -+ (DCMD_LENGTH & period_len)); -+ new->desc.dsadr = dma_src; -+ new->desc.dtadr = dma_dst; -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ new->desc.dsadrh = dma_dsth; -+ new->desc.dtadrh = dma_srch; -+#endif -+ -+ if (!first) -+ first = new; -+ else { -+ prev->desc.ddadr = new->async_tx.phys; -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ prev->desc.ddadrh = (new->async_tx.phys >> 32); -+#endif -+ } -+ -+ new->async_tx.cookie = 0; -+ async_tx_ack(&new->async_tx); -+ -+ prev = new; -+ len -= period_len; -+ -+ if (chan->dir == DMA_MEM_TO_DEV) -+ dma_src += period_len; -+ else -+ dma_dst += period_len; -+ -+ /* Insert the link descriptor to the LD ring */ -+ list_add_tail(&new->node, &first->tx_list); -+ } while (len); -+ -+ first->async_tx.flags = flags; /* client is in control of this ack */ -+ first->async_tx.cookie = -EBUSY; -+ -+ /* make the cyclic link */ -+ new->desc.ddadr = first->async_tx.phys; -+ chan->cyclic_first = first; -+ -+ return &first->async_tx; -+ -+fail: -+ if (first) -+ mmp_pdma_free_desc_list(chan, &first->tx_list); -+ return NULL; -+} -+ -+static int mmp_pdma_config_write(struct dma_chan *dchan, -+ struct dma_slave_config *cfg, -+ enum dma_transfer_direction direction) -+{ -+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); -+ u32 maxburst = 0, addr = 0; -+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; -+ -+ if (!dchan) -+ return -EINVAL; -+ -+ if (direction == DMA_DEV_TO_MEM) { -+ chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; -+ maxburst = cfg->src_maxburst; -+ width = cfg->src_addr_width; -+ addr = cfg->src_addr; -+ } else if (direction == DMA_MEM_TO_DEV) { -+ chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; -+ maxburst = cfg->dst_maxburst; -+ width = cfg->dst_addr_width; -+ addr = cfg->dst_addr; -+ } -+ -+ if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) -+ chan->dcmd |= DCMD_WIDTH1; -+ else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) -+ chan->dcmd |= DCMD_WIDTH2; -+ else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) -+ chan->dcmd |= DCMD_WIDTH4; -+ -+ if (maxburst == 8) -+ chan->dcmd |= DCMD_BURST8; -+ else if (maxburst == 16) -+ chan->dcmd |= DCMD_BURST16; -+ else if (maxburst == 32) -+ chan->dcmd |= DCMD_BURST32; -+ -+ chan->dir = direction; -+ chan->dev_addr = addr; -+ -+ return 0; -+} -+ -+static int mmp_pdma_pause_chan(struct dma_chan *dchan) -+{ -+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); -+ -+ if (!chan->phy) -+ return -1; -+ -+ disable_chan(chan->phy); -+ chan->status = DMA_PAUSED; -+ -+ return 0; -+} -+ -+static int mmp_pdma_config(struct dma_chan *dchan, -+ struct dma_slave_config *cfg) -+{ -+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); -+ -+ memcpy(&chan->slave_config, cfg, sizeof(*cfg)); -+ return 0; -+} -+ -+static int mmp_pdma_terminate_all(struct dma_chan *dchan) -+{ -+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); -+ unsigned long flags; -+ -+ if (!dchan) -+ return -EINVAL; -+ -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ disable_chan(chan->phy); -+ chan->status = DMA_COMPLETE; -+ mmp_pdma_free_phy(chan); -+ -+ mmp_pdma_free_desc_list(chan, &chan->chain_pending); -+ mmp_pdma_free_desc_list(chan, &chan->chain_running); -+ chan->bytes_residue = 0; -+ -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ chan->idle = true; -+ -+ mmp_pdma_qos_put(chan); -+ -+ return 0; -+} -+ -+static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan, -+ dma_cookie_t cookie) -+{ -+ struct mmp_pdma_desc_sw *sw; -+ u32 curr, residue = 0; -+ bool passed = false; -+ bool cyclic = chan->cyclic_first != NULL; -+ -+ /* -+ * If the channel does not have a phy pointer anymore, it has already -+ * been completed. Therefore, its residue is 0. -+ */ -+ if (!chan->phy) -+ return chan->bytes_residue; /* special case for EORIRQEN */ -+ -+ if (chan->dir == DMA_DEV_TO_MEM) -+ curr = readl(chan->phy->base + DTADR(chan->phy->idx)); -+ else -+ curr = readl(chan->phy->base + DSADR(chan->phy->idx)); -+ -+ list_for_each_entry(sw, &chan->chain_running, node) { -+ u32 start, end, len; -+ -+ if (chan->dir == DMA_DEV_TO_MEM) -+ start = sw->desc.dtadr; -+ else -+ start = sw->desc.dsadr; -+ -+ len = sw->desc.dcmd & DCMD_LENGTH; -+ end = start + len; -+ -+ /* -+ * 'passed' will be latched once we found the descriptor which -+ * lies inside the boundaries of the curr pointer. All -+ * descriptors that occur in the list _after_ we found that -+ * partially handled descriptor are still to be processed and -+ * are hence added to the residual bytes counter. -+ */ -+ -+ if (passed) { -+ residue += len; -+ } else if (curr >= start && curr <= end) { -+ residue += end - curr; -+ passed = true; -+ } -+ -+ /* -+ * Descriptors that have the ENDIRQEN bit set mark the end of a -+ * transaction chain, and the cookie assigned with it has been -+ * returned previously from mmp_pdma_tx_submit(). -+ * -+ * In case we have multiple transactions in the running chain, -+ * and the cookie does not match the one the user asked us -+ * about, reset the state variables and start over. -+ * -+ * This logic does not apply to cyclic transactions, where all -+ * descriptors have the ENDIRQEN bit set, and for which we -+ * can't have multiple transactions on one channel anyway. -+ */ -+ if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN)) -+ continue; -+ -+ if (sw->async_tx.cookie == cookie) { -+ return residue; -+ } else { -+ residue = 0; -+ passed = false; -+ } -+ } -+ -+ /* We should only get here in case of cyclic transactions */ -+ return residue; -+} -+ -+static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, -+ dma_cookie_t cookie, -+ struct dma_tx_state *txstate) -+{ -+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); -+ enum dma_status ret; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ ret = dma_cookie_status(dchan, cookie, txstate); -+ if (likely(ret != DMA_ERROR)) -+ dma_set_residue(txstate, mmp_pdma_residue(chan, cookie)); -+ -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ -+ if (ret == DMA_COMPLETE) -+ return ret; -+ else -+ return chan->status; -+} -+ -+/* -+ * mmp_pdma_issue_pending - Issue the DMA start command -+ * pending list ==> running list -+ */ -+static void mmp_pdma_issue_pending(struct dma_chan *dchan) -+{ -+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); -+ unsigned long flags; -+ int ret = 0; -+ -+ mmp_pdma_qos_get(chan); -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ ret = start_pending_queue(chan); -+ -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ -+ if (ret) -+ mmp_pdma_qos_put(chan); -+} -+ -+/* -+ * dma_do_tasklet -+ * Do call back -+ * Start pending list -+ */ -+static void dma_do_tasklet(struct tasklet_struct *t) -+{ -+ struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet); -+ struct mmp_pdma_desc_sw *desc, *_desc; -+ LIST_HEAD(chain_cleanup); -+ unsigned long flags; -+ struct dmaengine_desc_callback cb; -+ -+ int ret = 0; -+ -+ /* return if this channel has been stopped */ -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ if (chan->status == DMA_COMPLETE) { -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ return; -+ } -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ -+ if (chan->cyclic_first) { -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ desc = chan->cyclic_first; -+ dmaengine_desc_get_callback(&desc->async_tx, &cb); -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ -+ dmaengine_desc_callback_invoke(&cb, NULL); -+ -+ return; -+ } -+ -+ /* submit pending list; callback for each desc; free desc */ -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ -+ /* special for the EORIRQEN case, residue is not 0 */ -+ list_for_each_entry(desc, &chan->chain_running, node) { -+ if (desc->desc.dcmd & DCMD_ENDIRQEN) { -+ chan->bytes_residue = -+ mmp_pdma_residue(chan, desc->async_tx.cookie); -+ break; -+ } -+ } -+ -+ list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) { -+ /* -+ * move the descriptors to a temporary list so we can drop -+ * the lock during the entire cleanup operation -+ */ -+ list_move(&desc->node, &chain_cleanup); -+ -+ /* -+ * Look for the first list entry which has the ENDIRQEN flag -+ * set. That is the descriptor we got an interrupt for, so -+ * complete that transaction and its cookie. -+ */ -+ if (desc->desc.dcmd & DCMD_ENDIRQEN) { -+ dma_cookie_t cookie = desc->async_tx.cookie; -+ dma_cookie_complete(&desc->async_tx); -+ dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); -+ break; -+ } -+ } -+ -+ /* -+ * The hardware is idle and ready for more when the -+ * chain_running list is empty. -+ */ -+ chan->status = list_empty(&chan->chain_running) ? -+ DMA_COMPLETE : DMA_IN_PROGRESS; -+ -+ /* Start any pending transactions automatically */ -+ ret = start_pending_queue(chan); -+ -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+ -+ /* restart pending transactions failed, do not need qos anymore */ -+ if (ret) -+ mmp_pdma_qos_put(chan); -+ -+ /* Run the callback for each descriptor, in order */ -+ list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { -+ struct dma_async_tx_descriptor *txd = &desc->async_tx; -+ -+ /* Remove from the list of transactions */ -+ list_del(&desc->node); -+ /* Run the link descriptor callback function */ -+ dmaengine_desc_get_callback(txd, &cb); -+ dmaengine_desc_callback_invoke(&cb, NULL); -+ -+ dma_pool_free(chan->desc_pool, desc, txd->phys); -+ } -+} -+ -+static int mmp_pdma_remove(struct platform_device *op) -+{ -+ struct mmp_pdma_device *pdev = platform_get_drvdata(op); -+ struct mmp_pdma_phy *phy; -+ int i, irq = 0, irq_num = 0; -+ -+ if (op->dev.of_node) -+ of_dma_controller_free(op->dev.of_node); -+ -+ for (i = 0; i < pdev->dma_channels; i++) { -+ if (platform_get_irq(op, i) > 0) -+ irq_num++; -+ } -+ -+ if (irq_num != pdev->dma_channels) { -+ irq = platform_get_irq(op, 0); -+ devm_free_irq(&op->dev, irq, pdev); -+ } else { -+ for (i = 0; i < pdev->dma_channels; i++) { -+ phy = &pdev->phy[i]; -+ irq = platform_get_irq(op, i); -+ devm_free_irq(&op->dev, irq, phy); -+ } -+ } -+ -+ dma_async_device_unregister(&pdev->device); -+ -+ reset_control_assert(pdev->resets); -+ clk_disable_unprepare(pdev->clk); -+ -+ kfree(pdev->reserved_channels); -+ platform_set_drvdata(op, NULL); -+ -+ return 0; -+} -+ -+static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq) -+{ -+ struct mmp_pdma_phy *phy = &pdev->phy[idx]; -+ struct mmp_pdma_chan *chan; -+ int ret; -+ -+ chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL); -+ if (chan == NULL) -+ return -ENOMEM; -+ -+ phy->idx = idx; -+ phy->base = pdev->base; -+ -+ if (irq) { -+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, -+ IRQF_SHARED, "pdma", phy); -+ if (ret) { -+ dev_err(pdev->dev, "channel request irq fail!\n"); -+ return ret; -+ } -+ } -+ -+ spin_lock_init(&chan->desc_lock); -+ chan->dev = pdev->dev; -+ chan->chan.device = &pdev->device; -+ tasklet_setup(&chan->tasklet, dma_do_tasklet); -+ INIT_LIST_HEAD(&chan->chain_pending); -+ INIT_LIST_HEAD(&chan->chain_running); -+ -+ chan->status = DMA_COMPLETE; -+ chan->bytes_residue = 0; -+ chan->qos_count = 0; -+ chan->user_do_qos = 1; -+ -+ /* register virt channel to dma engine */ -+ list_add_tail(&chan->chan.device_node, &pdev->device.channels); -+ -+ return 0; -+} -+ -+static const struct of_device_id mmp_pdma_dt_ids[] = { -+ { .compatible = "spacemit,pdma-1.0", }, -+ {} -+}; -+MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); -+ -+static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec, -+ struct of_dma *ofdma) -+{ -+ struct mmp_pdma_device *d = ofdma->of_dma_data; -+ struct dma_chan *chan; -+#ifdef CONFIG_PM -+ struct mmp_pdma_chan *c; -+#endif -+ -+ chan = dma_get_any_slave_channel(&d->device); -+ if (!chan) -+ return NULL; -+ -+ to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0]; -+#ifdef CONFIG_PM -+ if (unlikely(dma_spec->args_count != 2)) -+ dev_err(d->dev, "#dma-cells should be 2!\n"); -+ -+ c = to_mmp_pdma_chan(chan); -+ c->user_do_qos = dma_spec->args[1] ? 1 : 0; -+ -+ if (c->user_do_qos) -+ dev_dbg(d->dev, "channel %d: user does qos itself\n", -+ c->chan.chan_id); -+ else -+ dev_dbg(d->dev, "channel %d: pdma does qos\n", -+ c->chan.chan_id); -+#endif -+ -+ return chan; -+} -+ -+static int mmp_pdma_probe(struct platform_device *op) -+{ -+ struct mmp_pdma_device *pdev; -+ const struct of_device_id *of_id; -+ struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); -+ struct resource *iores; -+ int i, ret, irq = 0; -+ int dma_channels = 0, irq_num = 0; -+ const enum dma_slave_buswidth widths = -+ DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | -+ DMA_SLAVE_BUSWIDTH_4_BYTES; -+ -+ int nr_reserved_channels; -+ const int *list; -+ unsigned int max_burst_size = DEFAULT_MAX_BURST_SIZE; -+ -+ pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); -+ if (!pdev) -+ return -ENOMEM; -+ -+ pdev->dev = &op->dev; -+ -+ spin_lock_init(&pdev->phy_lock); -+ -+ iores = platform_get_resource(op, IORESOURCE_MEM, 0); -+ pdev->base = devm_ioremap_resource(pdev->dev, iores); -+ if (IS_ERR(pdev->base)) -+ return PTR_ERR(pdev->base); -+ -+ pdev->clk = devm_clk_get(pdev->dev,NULL); -+ if(IS_ERR(pdev->clk)) -+ return PTR_ERR(pdev->clk); -+ -+ ret = clk_prepare_enable(pdev->clk); -+ if (ret) -+ return dev_err_probe(pdev->dev, ret, "could not enable dma bus clock\n"); -+ -+ pdev->resets = devm_reset_control_get_optional(pdev->dev,NULL); -+ if(IS_ERR(pdev->resets)) { -+ ret = PTR_ERR(pdev->resets); -+ goto err_rst; -+ } -+ ret = reset_control_deassert(pdev->resets); -+ if(ret) -+ goto err_rst; -+ -+ of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); -+ -+ if (of_id) { -+ int n; -+ of_property_read_u32(pdev->dev->of_node, "#dma-channels", -+ &dma_channels); -+ -+ list = of_get_property(pdev->dev->of_node, "reserved-channels", -+ &n); -+ -+ if (of_property_read_u32(pdev->dev->of_node, "max-burst-size", -+ &max_burst_size)) { -+ dev_err(pdev->dev, "Cannot find the max-burst-size node " -+ "in the device tree, set it to %d\n", -+ DEFAULT_MAX_BURST_SIZE); -+ max_burst_size = DEFAULT_MAX_BURST_SIZE; -+ } -+ -+ if (get_max_burst_setting(max_burst_size) == INVALID_BURST_SETTING) { -+ dev_err(pdev->dev, "Unsupported max-burst-size value %d " -+ "in the device tree, set it to %d\n", -+ max_burst_size, DEFAULT_MAX_BURST_SIZE); -+ max_burst_size = DEFAULT_MAX_BURST_SIZE; -+ } -+ -+ if (list) { -+ int num_args = 2; -+ -+ nr_reserved_channels = n / (sizeof(u32) * num_args); -+ -+ pdev->nr_reserved_channels = nr_reserved_channels; -+ -+ pdev->reserved_channels = kzalloc(nr_reserved_channels * sizeof(struct reserved_chan), -+ GFP_KERNEL); -+ -+ if (pdev->reserved_channels == NULL) -+ return -ENOMEM; -+ -+ for (i = 0; i < nr_reserved_channels; i++) { -+ int value; -+ -+ of_property_read_u32_index(pdev->dev->of_node, "reserved-channels", i * num_args, &value); -+ pdev->reserved_channels[i].chan_id = value; -+ of_property_read_u32_index(pdev->dev->of_node, "reserved-channels", i * num_args + 1, &value); -+ pdev->reserved_channels[i].drcmr = value; -+ } -+ } -+ } else if (pdata && pdata->dma_channels) { -+ dma_channels = pdata->dma_channels; -+ } else { -+ dma_channels = 32; /* default 32 channel */ -+ } -+ pdev->dma_channels = dma_channels; -+ -+ pdev->max_burst_size = max_burst_size; -+ dev_dbg(pdev->dev, "set max burst size to %d\n", max_burst_size); -+ -+#ifdef CONFIG_PM -+ pm_runtime_enable(&op->dev); -+ /* -+ * We can't ensure the pm operations are always in non-atomic context. -+ * Actually it depends on the drivers' behavior. So mark it as irq safe. -+ */ -+ pm_runtime_irq_safe(&op->dev); -+#endif -+ for (i = 0; i < dma_channels; i++) { -+ if (platform_get_irq_optional(op, i) > 0) -+ irq_num++; -+ } -+ -+ pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy), -+ GFP_KERNEL); -+ if (pdev->phy == NULL) -+ return -ENOMEM; -+ -+ INIT_LIST_HEAD(&pdev->device.channels); -+ -+ if (irq_num != dma_channels) { -+ /* all chan share one irq, demux inside */ -+ irq = platform_get_irq(op, 0); -+ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, -+ IRQF_SHARED, "pdma", pdev); -+ if (ret) -+ return ret; -+ } -+ -+ for (i = 0; i < dma_channels; i++) { -+ irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i); -+ ret = mmp_pdma_chan_init(pdev, i, irq); -+ if (ret) -+ return ret; -+ } -+ -+ dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); -+ dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); -+ dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask); -+ dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask); -+ pdev->device.dev = &op->dev; -+ pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; -+ pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; -+ pdev->device.device_tx_status = mmp_pdma_tx_status; -+ pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; -+ pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; -+ pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; -+ pdev->device.device_issue_pending = mmp_pdma_issue_pending; -+ pdev->device.device_config = mmp_pdma_config; -+ pdev->device.device_pause = mmp_pdma_pause_chan; -+ pdev->device.device_terminate_all = mmp_pdma_terminate_all; -+ pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; -+ pdev->device.src_addr_widths = widths; -+ pdev->device.dst_addr_widths = widths; -+ pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); -+ pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; -+ -+#ifdef CONFIG_SPACEMIT_PDMA_SUPPORT_64BIT -+ dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); -+#else -+ dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); -+#endif -+ -+ ret = dma_async_device_register(&pdev->device); -+ if (ret) { -+ dev_err(pdev->device.dev, "unable to register\n"); -+ return ret; -+ } -+ -+ if (op->dev.of_node) { -+ /* Device-tree DMA controller registration */ -+ ret = of_dma_controller_register(op->dev.of_node, -+ mmp_pdma_dma_xlate, pdev); -+ if (ret < 0) { -+ dev_err(&op->dev, "of_dma_controller_register failed\n"); -+ dma_async_device_unregister(&pdev->device); -+ return ret; -+ } -+ } -+ -+ platform_set_drvdata(op, pdev); -+ dev_dbg(pdev->device.dev, "initialized %d channels\n", dma_channels); -+ return 0; -+ -+err_rst: -+ clk_disable_unprepare(pdev->clk); -+ return ret; -+} -+ -+/* -+ * Per-channel qos get/put function. This function ensures that pm_ -+ * runtime_get/put are not called multi times for one channel. -+ * This guarantees pm_runtime_get/put always match for the entire device. -+ */ -+static void mmp_pdma_qos_get(struct mmp_pdma_chan *chan) -+{ -+ unsigned long flags; -+ -+ if (chan->user_do_qos) -+ return; -+ -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ if (chan->qos_count == 0) { -+ chan->qos_count = 1; -+ /* -+ * Safe in spin_lock because it's marked as irq safe. -+ * Similar case for mmp_pdma_qos_put(). -+ */ -+ pm_runtime_get_sync(chan->dev); -+ } -+ -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+} -+ -+static void mmp_pdma_qos_put(struct mmp_pdma_chan *chan) -+{ -+ unsigned long flags; -+ -+ if (chan->user_do_qos) -+ return; -+ -+ spin_lock_irqsave(&chan->desc_lock, flags); -+ if (chan->qos_count == 1) { -+ chan->qos_count = 0; -+ pm_runtime_put_autosuspend(chan->dev); -+ } -+ -+ spin_unlock_irqrestore(&chan->desc_lock, flags); -+} -+ -+static const struct platform_device_id mmp_pdma_id_table[] = { -+ { "mmp-pdma", }, -+ { }, -+}; -+ -+#ifdef CONFIG_PM_SLEEP -+static int mmp_pdma_suspend_noirq(struct device *dev) -+{ -+ struct mmp_pdma_device *pdev = dev_get_drvdata(dev); -+ -+ clk_disable_unprepare(pdev->clk); -+ -+ return 0; -+} -+ -+static int mmp_pdma_resume_noirq(struct device *dev) -+{ -+ struct mmp_pdma_device *pdev = dev_get_drvdata(dev); -+ -+ clk_prepare_enable(pdev->clk); -+ -+ return 0; -+} -+ -+static const struct dev_pm_ops k1x_mmp_pdma_pm_qos = { -+ .suspend_noirq = mmp_pdma_suspend_noirq, -+ .resume_noirq = mmp_pdma_resume_noirq, -+}; -+#endif -+ -+static struct platform_driver mmp_pdma_driver = { -+ .driver = { -+ .name = "mmp-pdma", -+#ifdef CONFIG_PM_SLEEP -+ .pm = &k1x_mmp_pdma_pm_qos, -+#endif -+ .of_match_table = mmp_pdma_dt_ids, -+ }, -+ .id_table = mmp_pdma_id_table, -+ .probe = mmp_pdma_probe, -+ .remove = mmp_pdma_remove, -+}; -+ -+static int __init mmp_pdma_init(void) -+{ -+ return platform_driver_register(&mmp_pdma_driver); -+} -+ -+static void __exit mmp_pdma_exit(void) -+{ -+ platform_driver_unregister(&mmp_pdma_driver); -+} -+ -+subsys_initcall(mmp_pdma_init); -+module_exit(mmp_pdma_exit); -+ -+MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver"); -+MODULE_AUTHOR("Marvell International Ltd."); -+MODULE_LICENSE("GPL v2"); -diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/mmp_tdma.c -+++ b/drivers/dma/mmp_tdma.c -@@ -639,7 +639,6 @@ static int mmp_tdma_probe(struct platform_device *pdev) - enum mmp_tdma_type type; - const struct of_device_id *of_id; - struct mmp_tdma_device *tdev; -- struct resource *iores; - int i, ret; - int irq = 0, irq_num = 0; - int chan_num = TDMA_CHANNEL_NUM; -@@ -663,8 +662,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) - irq_num++; - } - -- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- tdev->base = devm_ioremap_resource(&pdev->dev, iores); -+ tdev->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(tdev->base)) - return PTR_ERR(tdev->base); - -diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/moxart-dma.c -+++ b/drivers/dma/moxart-dma.c -@@ -563,7 +563,6 @@ static int moxart_probe(struct platform_device *pdev) - { - struct device *dev = &pdev->dev; - struct device_node *node = dev->of_node; -- struct resource *res; - void __iomem *dma_base_addr; - int ret, i; - unsigned int irq; -@@ -580,8 +579,7 @@ static int moxart_probe(struct platform_device *pdev) - return -EINVAL; - } - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- dma_base_addr = devm_ioremap_resource(dev, res); -+ dma_base_addr = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(dma_base_addr)) - return PTR_ERR(dma_base_addr); - -diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/mv_xor_v2.c -+++ b/drivers/dma/mv_xor_v2.c -@@ -714,7 +714,6 @@ static int mv_xor_v2_resume(struct platform_device *dev) - static int mv_xor_v2_probe(struct platform_device *pdev) - { - struct mv_xor_v2_device *xor_dev; -- struct resource *res; - int i, ret = 0; - struct dma_device *dma_dev; - struct mv_xor_v2_sw_desc *sw_desc; -@@ -726,13 +725,11 @@ static int mv_xor_v2_probe(struct platform_device *pdev) - if (!xor_dev) - return -ENOMEM; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res); -+ xor_dev->dma_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(xor_dev->dma_base)) - return PTR_ERR(xor_dev->dma_base); - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 1); -- xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res); -+ xor_dev->glob_base = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(xor_dev->glob_base)) - return PTR_ERR(xor_dev->glob_base); - -diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/mxs-dma.c -+++ b/drivers/dma/mxs-dma.c -@@ -746,7 +746,6 @@ static int mxs_dma_probe(struct platform_device *pdev) - struct device_node *np = pdev->dev.of_node; - const struct mxs_dma_type *dma_type; - struct mxs_dma_engine *mxs_dma; -- struct resource *iores; - int ret, i; - - mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL); -@@ -763,8 +762,7 @@ static int mxs_dma_probe(struct platform_device *pdev) - mxs_dma->type = dma_type->type; - mxs_dma->dev_id = dma_type->id; - -- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores); -+ mxs_dma->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(mxs_dma->base)) - return PTR_ERR(mxs_dma->base); - -diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/nbpfaxi.c -+++ b/drivers/dma/nbpfaxi.c -@@ -1294,7 +1294,6 @@ static int nbpf_probe(struct platform_device *pdev) - struct device_node *np = dev->of_node; - struct nbpf_device *nbpf; - struct dma_device *dma_dev; -- struct resource *iomem; - const struct nbpf_config *cfg; - int num_channels; - int ret, irq, eirq, i; -@@ -1318,8 +1317,7 @@ static int nbpf_probe(struct platform_device *pdev) - dma_dev = &nbpf->dma_dev; - dma_dev->dev = dev; - -- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- nbpf->base = devm_ioremap_resource(dev, iomem); -+ nbpf->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(nbpf->base)) - return PTR_ERR(nbpf->base); - -diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/pxa_dma.c -+++ b/drivers/dma/pxa_dma.c -@@ -1345,7 +1345,6 @@ static int pxad_probe(struct platform_device *op) - const struct of_device_id *of_id; - const struct dma_slave_map *slave_map = NULL; - struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); -- struct resource *iores; - int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0; - const enum dma_slave_buswidth widths = - DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | -@@ -1357,8 +1356,7 @@ static int pxad_probe(struct platform_device *op) - - spin_lock_init(&pdev->phy_lock); - -- iores = platform_get_resource(op, IORESOURCE_MEM, 0); -- pdev->base = devm_ioremap_resource(&op->dev, iores); -+ pdev->base = devm_platform_ioremap_resource(op, 0); - if (IS_ERR(pdev->base)) - return PTR_ERR(pdev->base); - -diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/qcom/bam_dma.c -+++ b/drivers/dma/qcom/bam_dma.c -@@ -1237,7 +1237,6 @@ static int bam_dma_probe(struct platform_device *pdev) - { - struct bam_device *bdev; - const struct of_device_id *match; -- struct resource *iores; - int ret, i; - - bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL); -@@ -1254,8 +1253,7 @@ static int bam_dma_probe(struct platform_device *pdev) - - bdev->layout = match->data; - -- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- bdev->regs = devm_ioremap_resource(&pdev->dev, iores); -+ bdev->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(bdev->regs)) - return PTR_ERR(bdev->regs); - -diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/sf-pdma/sf-pdma.c -+++ b/drivers/dma/sf-pdma/sf-pdma.c -@@ -493,7 +493,6 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma) - static int sf_pdma_probe(struct platform_device *pdev) - { - struct sf_pdma *pdma; -- struct resource *res; - int ret, n_chans; - const enum dma_slave_buswidth widths = - DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | -@@ -518,8 +517,7 @@ static int sf_pdma_probe(struct platform_device *pdev) - - pdma->n_chans = n_chans; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- pdma->membase = devm_ioremap_resource(&pdev->dev, res); -+ pdma->membase = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(pdma->membase)) - return PTR_ERR(pdma->membase); - -diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/sh/usb-dmac.c -+++ b/drivers/dma/sh/usb-dmac.c -@@ -768,7 +768,6 @@ static int usb_dmac_probe(struct platform_device *pdev) - const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH; - struct dma_device *engine; - struct usb_dmac *dmac; -- struct resource *mem; - unsigned int i; - int ret; - -@@ -789,8 +788,7 @@ static int usb_dmac_probe(struct platform_device *pdev) - return -ENOMEM; - - /* Request resources. */ -- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- dmac->iomem = devm_ioremap_resource(&pdev->dev, mem); -+ dmac->iomem = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(dmac->iomem)) - return PTR_ERR(dmac->iomem); - -diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/stm32-dmamux.c -+++ b/drivers/dma/stm32-dmamux.c -@@ -179,7 +179,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev) - const struct of_device_id *match; - struct device_node *dma_node; - struct stm32_dmamux_data *stm32_dmamux; -- struct resource *res; - void __iomem *iomem; - struct reset_control *rst; - int i, count, ret; -@@ -238,8 +237,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev) - } - pm_runtime_get_noresume(&pdev->dev); - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- iomem = devm_ioremap_resource(&pdev->dev, res); -+ iomem = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(iomem)) - return PTR_ERR(iomem); - -diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/stm32-mdma.c -+++ b/drivers/dma/stm32-mdma.c -@@ -1595,7 +1595,6 @@ static int stm32_mdma_probe(struct platform_device *pdev) - struct stm32_mdma_device *dmadev; - struct dma_device *dd; - struct device_node *of_node; -- struct resource *res; - struct reset_control *rst; - u32 nr_channels, nr_requests; - int i, count, ret; -@@ -1637,8 +1636,7 @@ static int stm32_mdma_probe(struct platform_device *pdev) - count); - dmadev->nr_ahb_addr_masks = count; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- dmadev->base = devm_ioremap_resource(&pdev->dev, res); -+ dmadev->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(dmadev->base)) - return PTR_ERR(dmadev->base); - -diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/sun4i-dma.c -+++ b/drivers/dma/sun4i-dma.c -@@ -1144,15 +1144,13 @@ static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id) - static int sun4i_dma_probe(struct platform_device *pdev) - { - struct sun4i_dma_dev *priv; -- struct resource *res; - int i, j, ret; - - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- priv->base = devm_ioremap_resource(&pdev->dev, res); -+ priv->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); - -diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/sun6i-dma.c -+++ b/drivers/dma/sun6i-dma.c -@@ -1283,7 +1283,6 @@ static int sun6i_dma_probe(struct platform_device *pdev) - { - struct device_node *np = pdev->dev.of_node; - struct sun6i_dma_dev *sdc; -- struct resource *res; - int ret, i; - - sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); -@@ -1294,8 +1293,7 @@ static int sun6i_dma_probe(struct platform_device *pdev) - if (!sdc->cfg) - return -ENODEV; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- sdc->base = devm_ioremap_resource(&pdev->dev, res); -+ sdc->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(sdc->base)) - return PTR_ERR(sdc->base); - -diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/tegra210-adma.c -+++ b/drivers/dma/tegra210-adma.c -@@ -837,7 +837,6 @@ static int tegra_adma_probe(struct platform_device *pdev) - { - const struct tegra_adma_chip_data *cdata; - struct tegra_adma *tdma; -- struct resource *res; - int ret, i; - - cdata = of_device_get_match_data(&pdev->dev); -@@ -857,8 +856,7 @@ static int tegra_adma_probe(struct platform_device *pdev) - tdma->nr_channels = cdata->nr_channels; - platform_set_drvdata(pdev, tdma); - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); -+ tdma->base_addr = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(tdma->base_addr)) - return PTR_ERR(tdma->base_addr); - -diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/ti/cppi41.c -+++ b/drivers/dma/ti/cppi41.c -@@ -1039,7 +1039,6 @@ static int cppi41_dma_probe(struct platform_device *pdev) - struct cppi41_dd *cdd; - struct device *dev = &pdev->dev; - const struct cppi_glue_infos *glue_info; -- struct resource *mem; - int index; - int irq; - int ret; -@@ -1072,18 +1071,15 @@ static int cppi41_dma_probe(struct platform_device *pdev) - if (index < 0) - return index; - -- mem = platform_get_resource(pdev, IORESOURCE_MEM, index); -- cdd->ctrl_mem = devm_ioremap_resource(dev, mem); -+ cdd->ctrl_mem = devm_platform_ioremap_resource(pdev, index); - if (IS_ERR(cdd->ctrl_mem)) - return PTR_ERR(cdd->ctrl_mem); - -- mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1); -- cdd->sched_mem = devm_ioremap_resource(dev, mem); -+ cdd->sched_mem = devm_platform_ioremap_resource(pdev, index + 1); - if (IS_ERR(cdd->sched_mem)) - return PTR_ERR(cdd->sched_mem); - -- mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2); -- cdd->qmgr_mem = devm_ioremap_resource(dev, mem); -+ cdd->qmgr_mem = devm_platform_ioremap_resource(pdev, index + 2); - if (IS_ERR(cdd->qmgr_mem)) - return PTR_ERR(cdd->qmgr_mem); - -diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/ti/omap-dma.c -+++ b/drivers/dma/ti/omap-dma.c -@@ -1658,7 +1658,6 @@ static int omap_dma_probe(struct platform_device *pdev) - { - const struct omap_dma_config *conf; - struct omap_dmadev *od; -- struct resource *res; - int rc, i, irq; - u32 val; - -@@ -1666,8 +1665,7 @@ static int omap_dma_probe(struct platform_device *pdev) - if (!od) - return -ENOMEM; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- od->base = devm_ioremap_resource(&pdev->dev, res); -+ od->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(od->base)) - return PTR_ERR(od->base); - -diff --git a/drivers/dma/udma.c b/drivers/dma/udma.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/dma/udma.c -@@ -0,0 +1,432 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define USE_DMA_MALLOC -+// #define DMA_CONFIG_DEBUG -+ -+#define DEVICE_NAME "udma" -+#define IOC_MAGIC 'c' -+#define DMA_MEMCPY_CMD _IOR(IOC_MAGIC, 0, int) -+#define DMA_VA_TO_PA _IOR(IOC_MAGIC, 1, int) -+ -+static unsigned char dma_major; -+static struct class *dma_class; -+static struct dma_device *dma_dev; -+static struct dma_chan *dma_chan; -+static struct dma_async_tx_descriptor *dma_tx; -+static struct list_head dmabuf_list; -+static struct completion dma_m2m_ok; -+static struct mutex dma_mutex; -+ -+typedef struct { -+ void *src; -+ void *dst; -+ size_t size; -+#ifdef DMA_CONFIG_DEBUG -+ long long time[10]; -+ int time_cnt; -+ int dma_irq_subscript; -+#endif -+} memcpy_msg_t; -+ -+typedef struct { -+ void *user_addr; -+ void *dma_addr; -+} va_to_pa_msg_t; -+ -+typedef struct { -+ size_t size; // Size of the buffer -+ unsigned long user_addr; // User virtual address of the buffer -+ void *kern_addr; // Kernel virtual address of the buffer -+ dma_addr_t dma_addr; // DMA bus address of the buffer -+ struct list_head list; // List node pointers for dma alloc list -+} dma_map_info_t; -+ -+typedef struct { -+ dma_addr_t addr; -+ size_t size; -+ int dirty; -+} va2pa_t; -+ -+#ifdef DMA_CONFIG_DEBUG -+#include -+ -+#define DMA_TIME_STAMP() \ -+ do { \ -+ g_time[g_time_cnt++] = getus(); \ -+ } while (0) -+ -+static volatile int g_time_cnt; -+static volatile long long g_time[10]; -+static int g_dma_irq_subscript; -+ -+static long long getus(void) -+{ -+ return ktime_to_us(ktime_get()); -+} -+#else -+#define DMA_TIME_STAMP() -+#endif -+ -+static void dma_callback_func(void *priv) -+{ -+#ifdef DMA_CONFIG_DEBUG -+ g_dma_irq_subscript = g_time_cnt; -+#endif -+ DMA_TIME_STAMP(); -+ complete(&dma_m2m_ok); -+} -+ -+static int dma_open(struct inode *inode, struct file *filp) -+{ -+ return (dma_dev != NULL) ? 0 : -EPERM; -+} -+ -+static int dma_release(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+static ssize_t dma_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) -+{ -+ return size; -+} -+ -+static ssize_t dma_write(struct file *filp, const char __user *buf, size_t size, loff_t *ppos) -+{ -+ return size; -+} -+ -+#ifdef USE_DMA_MALLOC -+static int dma_malloc(dma_map_info_t *dma_info, struct vm_area_struct *vma) -+{ -+ int ret; -+ -+ dma_info->kern_addr = dma_alloc_coherent(dma_dev->dev, dma_info->size, &dma_info->dma_addr, GFP_KERNEL); -+ if (!dma_info->kern_addr) { -+ dev_err(dma_dev->dev,"Unable to allocate contiguous DMA memory region of size " \ -+ "%zu.\n", dma_info->size); -+ return -ENOMEM; -+ } -+ -+ ret = dma_mmap_coherent(dma_dev->dev, vma, dma_info->kern_addr, -+ dma_info->dma_addr, dma_info->size); -+ if (ret < 0) { -+ dev_err(dma_dev->dev,"Unable to remap address %p to userspace address 0x%lx, size "\ -+ "%zu.\n", dma_info->kern_addr, dma_info->user_addr, \ -+ dma_info->size); -+ return -1; -+ } -+ -+ return 0; -+} -+ -+static int dma_free(dma_map_info_t *dma_info) -+{ -+ dma_free_coherent(dma_dev->dev, dma_info->size, dma_info->kern_addr, dma_info->dma_addr); -+ -+ return 0; -+} -+#else -+static int kernel_malloc(dma_map_info_t *dma_info, struct vm_area_struct *vma) -+{ -+ dma_info->kern_addr = kmalloc(dma_info->size, GFP_KERNEL); -+ if (!dma_info->kern_addr) { -+ dev_err(dma_dev->dev,"kmalloc failed\n"); -+ return -ENOMEM; -+ } -+ -+ if (remap_pfn_range(vma, -+ vma->vm_start, -+ (virt_to_phys(dma_info->kern_addr) >> PAGE_SHIFT), -+ vma->vm_end - vma->vm_start, -+ vma->vm_page_prot)) { -+ return -EAGAIN; -+ } -+ -+ dma_info->dma_addr = dma_map_single(dma_dev->dev, dma_info->kern_addr, dma_info->size, DMA_FROM_DEVICE); -+ if (dma_mapping_error(dma_dev->dev, dma_info->dma_addr)) { -+ dev_err(dma_dev->dev,"mapping buffer failed\n"); -+ return -1; -+ } -+ -+ return 0; -+} -+ -+static int kernel_free(dma_map_info_t *dma_info) -+{ -+ dma_unmap_single(dma_dev->dev, dma_info->dma_addr, dma_info->size, DMA_FROM_DEVICE); -+ kfree(dma_info->kern_addr); -+ -+ return 0; -+} -+#endif -+ -+static void dma_vma_close(struct vm_area_struct *vma) -+{ -+ dma_map_info_t *dma_info; -+ -+ dma_info = vma->vm_private_data; -+#ifdef USE_DMA_MALLOC -+ dma_free(dma_info); -+#else -+ kernel_free(dma_info); -+#endif -+ kfree(dma_info); -+ -+ return; -+} -+ -+static const struct vm_operations_struct dma_vm_ops = { -+ .close = dma_vma_close, -+}; -+ -+static int dma_mmap(struct file *file, struct vm_area_struct *vma) -+{ -+ int ret; -+ dma_map_info_t *dma_info; -+ -+ dma_info = kmalloc(sizeof(*dma_info), GFP_KERNEL); -+ if (dma_info == NULL) { -+ dev_err(dma_dev->dev,"Unable to allocate VMA data structure."); -+ return -ENOMEM; -+ } -+ -+ dma_info->size = vma->vm_end - vma->vm_start; -+ dma_info->user_addr = vma->vm_start; -+ -+#ifdef USE_DMA_MALLOC -+ ret = dma_malloc(dma_info, vma); -+#else -+ ret = kernel_malloc(dma_info, vma); -+#endif -+ -+ if (ret < 0) { -+ return -1; -+ } -+ -+ vma->vm_ops = &dma_vm_ops; -+ vma->vm_private_data = dma_info; -+ vma->vm_flags |= VM_DONTCOPY; -+ -+ list_add(&dma_info->list, &dmabuf_list); -+ return 0; -+} -+ -+static int va2pa(void *va, size_t size, va2pa_t **va2pa) -+{ -+ pmd_t *pmd; -+ pte_t *pte; -+ -+ unsigned long pg_offset; -+ unsigned long pg_address; -+ unsigned long old_pfn; -+ unsigned long paddr; -+ size_t total; -+ int flag = 0; -+ -+ va2pa_t *p; -+ int num = size/sizeof(PAGE_SIZE); -+ unsigned long vaddr = (unsigned long)va; -+ int i, j; -+ -+ p = kmalloc(num*sizeof(va2pa_t), GFP_KERNEL); -+ *va2pa = p; -+ -+ j = 0; -+ old_pfn = 0; -+ total = 0; -+ -+ for (i = 0; i < num; i++) { -+ memset(p +i, 0x00, sizeof(va2pa_t)); -+ pmd = pmd_off(current->mm, vaddr); -+ if(pmd_none(*pmd)) { -+ dev_err(dma_dev->dev, "not in the pmd!"); -+ flag = -1; -+ break; -+ } -+ -+ pte = pte_offset_map(pmd, vaddr); -+ if(pte_none(*pte)) { -+ dev_err(dma_dev->dev, "not in the pte!"); -+ flag = -1; -+ break; -+ } -+ -+ pg_offset = offset_in_page(vaddr); -+ pg_address = pte_pfn(__pte(pte_val(*pte))) << PAGE_SHIFT; -+ paddr = pg_address | pg_offset; -+ -+ if ((old_pfn + PAGE_SIZE) == pg_address) { -+ p[j].size += (PAGE_SIZE - (vaddr - (vaddr & PAGE_MASK))); -+ total += (PAGE_SIZE - (vaddr - (vaddr & PAGE_MASK))); -+ p[j].dirty = 1; -+ } else { -+ if (p[j].dirty == 1) { -+ j++; -+ } -+ p[j].addr = paddr; -+ p[j].size = (PAGE_SIZE - (vaddr - (vaddr & PAGE_MASK))); -+ total += p[j].size; -+ p[j].dirty = 1; -+ } -+ if (total >= size) { -+ j ++; -+ break; -+ } -+ old_pfn = pg_address; -+ vaddr = (vaddr & PAGE_MASK) + PAGE_SIZE; -+ } -+ -+ if (flag == -1) { -+ kfree(p); -+ *va2pa = NULL; -+ j = 0; -+ } -+ -+ return j; -+} -+ -+static int dma_memcpy(dma_addr_t dst, dma_addr_t src, size_t size) -+{ -+#ifndef USE_DMA_MALLOC -+ dma_sync_single_for_cpu(dma_dev->dev, (dma_addr_t)src, size, DMA_FROM_DEVICE); -+#endif -+ mutex_lock(&dma_mutex); -+ dma_tx = dma_dev->device_prep_dma_memcpy(dma_chan, -+ dst, -+ src, -+ size, -+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT); -+ if (!dma_tx){ -+ dev_err(dma_dev->dev, "Failed to prepare DMA memcpy"); -+ return -1; -+ } -+ -+ dma_tx->callback = dma_callback_func;//set call back function -+ dma_tx->callback_param = NULL; -+ if (dma_submit_error(dma_tx->tx_submit(dma_tx))){ -+ dev_err(dma_dev->dev, "Failed to do DMA tx_submit"); -+ return -1; -+ } -+ -+ init_completion(&dma_m2m_ok); -+ dma_async_issue_pending(dma_chan);//begin dma transfer -+ wait_for_completion(&dma_m2m_ok); -+ reinit_completion(&dma_m2m_ok); -+ -+#ifndef USE_DMA_MALLOC -+ dma_sync_single_for_device(dma_dev->dev, (dma_addr_t)msg.dst, msg.size, DMA_FROM_DEVICE); -+#endif -+ mutex_unlock(&dma_mutex); -+ -+ return 0; -+} -+ -+static long dma_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ if (cmd == DMA_MEMCPY_CMD) { -+ memcpy_msg_t msg; -+ va2pa_t *s_pa_l, *d_pa_l; -+ int i, loop, ret; -+ size_t off; -+ -+ if(copy_from_user(&msg, (void *)arg, sizeof(memcpy_msg_t))) { -+ return -EFAULT; -+ } -+ -+ ret = va2pa(msg.src, msg.size, &s_pa_l); -+ if (ret != 1) { -+ if (s_pa_l) { -+ kfree(s_pa_l); -+ } -+ return -1; -+ } -+ ret = va2pa(msg.dst, msg.size, &d_pa_l); -+ if (ret == 0) { -+ kfree(s_pa_l); -+ return -1; -+ } -+ -+ loop = ret; -+ off = 0; -+ -+ for (i = 0; i < loop; i++) { -+ dma_memcpy(d_pa_l[i].addr, s_pa_l[0].addr + off, d_pa_l[i].size); -+ off += d_pa_l[i].size; -+ } -+ -+ kfree(s_pa_l); -+ kfree(d_pa_l); -+ } -+ return 0; -+} -+ -+static const struct file_operations dma_fops = { -+ .owner = THIS_MODULE, -+ .read = dma_read, -+ .write = dma_write, -+ .open = dma_open, -+ .release = dma_release, -+ .mmap = dma_mmap, -+ .unlocked_ioctl = dma_ioctl, -+}; -+ -+static int dma_init(void) -+{ -+ dma_cap_mask_t mask; -+ struct device *dev_ret; -+ -+ dma_major = register_chrdev(0, DEVICE_NAME, &dma_fops); -+ if (dma_major < 0) -+ return dma_major; -+ -+ dma_class = class_create(THIS_MODULE, DEVICE_NAME); -+ if (IS_ERR(dma_class)) -+ return -1; -+ -+ dev_ret = device_create(dma_class, NULL, MKDEV(dma_major, 0), NULL, DEVICE_NAME); -+ if (IS_ERR(dev_ret)) -+ return PTR_ERR(dev_ret); -+ -+ dma_cap_zero(mask); -+ dma_cap_set(DMA_MEMCPY, mask);//direction:memory to memory -+ dma_chan = dma_request_channel(mask,NULL,NULL); //request a dma channel -+ if (!dma_chan) { -+ printk(KERN_ERR"dma request failed\n"); -+ return -1; -+ } -+ -+ dma_dev = dma_chan->device; -+ dma_set_mask(dma_dev->dev, DMA_BIT_MASK(32)); -+ -+ INIT_LIST_HEAD(&dmabuf_list); -+ dev_dbg(dma_dev->dev, "dma channel id = %d\n",dma_chan->chan_id); -+ mutex_init(&dma_mutex); -+ -+ return 0; -+} -+ -+static void dma_exit(void) -+{ -+ unregister_chrdev(dma_major, DEVICE_NAME); -+ device_destroy(dma_class, MKDEV(dma_major, 0)); -+ class_destroy(dma_class); -+ dma_release_channel(dma_chan); -+} -+ -+module_init(dma_init); -+module_exit(dma_exit); -+ -+MODULE_LICENSE("GPL"); -\ No newline at end of file -diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c -index 111111111111..222222222222 100644 ---- a/drivers/dma/xilinx/zynqmp_dma.c -+++ b/drivers/dma/xilinx/zynqmp_dma.c -@@ -890,7 +890,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, - struct platform_device *pdev) - { - struct zynqmp_dma_chan *chan; -- struct resource *res; - struct device_node *node = pdev->dev.of_node; - int err; - -@@ -900,8 +899,7 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, - chan->dev = zdev->dev; - chan->zdev = zdev; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- chan->regs = devm_ioremap_resource(&pdev->dev, res); -+ chan->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(chan->regs)) - return PTR_ERR(chan->regs); - --- -Armbian - diff --git a/patch/kernel/archive/spacemit-6.1/012-drivers-extcon.patch b/patch/kernel/archive/spacemit-6.1/012-drivers-extcon.patch deleted file mode 100644 index e99cc0b61db7..000000000000 --- a/patch/kernel/archive/spacemit-6.1/012-drivers-extcon.patch +++ /dev/null @@ -1,411 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - drivers/extcon/Kconfig | 7 + - drivers/extcon/Makefile | 1 + - drivers/extcon/extcon-k1xci.c | 358 ++++++++++ - 3 files changed, 366 insertions(+) - -diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/extcon/Kconfig -+++ b/drivers/extcon/Kconfig -@@ -41,6 +41,13 @@ config EXTCON_FSA9480 - I2C and enables USB data, stereo and mono audio, video, microphone - and UART data to use a common connector port. - -+config EXTCON_USB_K1XCI -+ tristate "Spacemit K1-x USB extcon support" -+ depends on GPIOLIB || COMPILE_TEST -+ help -+ say Y here to enable spacemit k1-x usb wakeup irq based USB cable detection extcon support. -+ Used typically if wakeup irq is used for USB ID pin detection. -+ - config EXTCON_GPIO - tristate "GPIO extcon support" - depends on GPIOLIB || COMPILE_TEST -diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/extcon/Makefile -+++ b/drivers/extcon/Makefile -@@ -22,6 +22,7 @@ obj-$(CONFIG_EXTCON_PTN5150) += extcon-ptn5150.o - obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o - obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o - obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o -+obj-$(CONFIG_EXTCON_USB_K1XCI) += extcon-k1xci.o - obj-$(CONFIG_EXTCON_USB_GPIO) += extcon-usb-gpio.o - obj-$(CONFIG_EXTCON_USBC_CROS_EC) += extcon-usbc-cros-ec.o - obj-$(CONFIG_EXTCON_USBC_TUSB320) += extcon-usbc-tusb320.o -diff --git a/drivers/extcon/extcon-k1xci.c b/drivers/extcon/extcon-k1xci.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/extcon/extcon-k1xci.c -@@ -0,0 +1,358 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * extcon-k1xci.c - Driver for usb vbus/id detect -+ * -+ * Copyright (c) 2023, Spacemit Corporation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define USB_GPIO_DEBOUNCE_MS 200 /* ms */ -+ -+/* PMU_SD_ROT_WAKE_CLR */ -+#define USB_VBUS_WK_MASK BIT(10) -+#define USB_ID_WK_MASK BIT(11) -+ -+#define USB_VBUS_WK_CLR BIT(18) -+#define USB_ID_WK_CLR BIT(19) -+ -+#define USB_VBUS_WK_STATUS BIT(26) -+#define USB_ID_WK_STATUS BIT(27) -+ -+/* PMUA_USB_PHY_READ */ -+#define USB_ID BIT(1) -+#define USB_VBUS BIT(2) -+ -+struct mv_usb_extcon_info { -+ struct device *dev; -+ struct extcon_dev *edev; -+ -+ void __iomem *pmuap_reg; -+ void __iomem *pin_state_reg; -+ -+ int irq; -+ -+ unsigned long debounce_jiffies; -+ struct delayed_work wq_detcable; -+ -+ struct freq_qos_request qos_idle; -+ u32 lpm_qos; -+ -+ /* debugfs interface for user-space */ -+ struct dentry *dbgfs; -+ char dbgfs_name[32]; -+ uint32_t dbgfs_qos_mode; -+}; -+ -+static const unsigned int usb_extcon_cable[] = { -+ EXTCON_USB, -+ EXTCON_USB_HOST, -+ EXTCON_NONE, -+}; -+ -+static void mv_enable_wakeup_irqs(struct mv_usb_extcon_info *info) -+{ -+ u32 reg; -+ reg = readl(info->pmuap_reg); -+ reg |= (USB_VBUS_WK_MASK | USB_ID_WK_MASK); -+ writel(reg, info->pmuap_reg); -+} -+ -+static void mv_disable_wakeup_irqs(struct mv_usb_extcon_info *info) -+{ -+ u32 reg; -+ reg = readl(info->pmuap_reg); -+ reg &= ~(USB_VBUS_WK_MASK | USB_ID_WK_MASK); -+ writel(reg, info->pmuap_reg); -+} -+ -+/* -+ * "USB" = VBUS and "USB-HOST" = !ID, so we have: -+ * Both "USB" and "USB-HOST" can't be set as active at the -+ * same time so if "USB-HOST" is active (i.e. ID is 0) we keep "USB" inactive -+ * even if VBUS is on. -+ * -+ * State | ID | VBUS -+ * ---------------------------------------- -+ * [1] USB | H | H -+ * [2] none | H | L -+ * [3] USB-HOST | L | H -+ * [4] USB-HOST | L | L -+ * -+ * In case we have only one of these signals: -+ * - VBUS only - we want to distinguish between [1] and [2], so ID is always 1. -+ * - ID only - we want to distinguish between [1] and [4], so VBUS = ID. -+*/ -+ -+static void usb_detect_cable(struct work_struct *work) -+{ -+ int id, vbus; -+ int id_state, vbus_state; -+ u32 reg; -+ u32 state; -+ -+ struct mv_usb_extcon_info *info = container_of( -+ to_delayed_work(work), struct mv_usb_extcon_info, wq_detcable); -+ -+ reg = readl(info->pmuap_reg); -+ id = reg & USB_ID_WK_STATUS; -+ vbus = reg & USB_VBUS_WK_STATUS; -+ -+ pr_info("info->pmuap_reg: 0x%x id: %d vbus: %d \n", reg, id, vbus); -+ if (id || vbus) { -+ state = readl(info->pin_state_reg); -+ id_state = state & USB_ID; -+ vbus_state = state & USB_VBUS; -+ -+ if (!id_state) { -+ dev_info(info->dev, "USB we as host connected\n"); -+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, -+ true); -+ } else { -+ dev_info(info->dev, "USB we as host disconnected\n"); -+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, -+ false); -+ -+ if (!vbus_state) { -+ dev_info(info->dev, -+ "USB we as peripheral disconnected\n"); -+ extcon_set_state_sync(info->edev, EXTCON_USB, -+ false); -+ } else { -+ dev_dbg(info->dev, "dbgfs_qos_mode = %d \n", -+ info->dbgfs_qos_mode); -+ dev_info(info->dev, -+ "USB we as peripheral connected\n"); -+ extcon_set_state_sync(info->edev, EXTCON_USB, -+ true); -+ } -+ } -+ } -+ -+ reg |= (USB_VBUS_WK_CLR | USB_ID_WK_CLR); -+ writel(reg, info->pmuap_reg); -+ mv_enable_wakeup_irqs(info); -+} -+ -+static irqreturn_t mv_wakeup_interrupt(int irq, void *_info) -+{ -+ struct mv_usb_extcon_info *info = (struct mv_usb_extcon_info *)_info; -+ -+ pr_info("extcon_mvci: mv_wakeup_interrupt... \n"); -+ mv_disable_wakeup_irqs(info); -+ -+ queue_delayed_work(system_power_efficient_wq, &info->wq_detcable, -+ info->debounce_jiffies); -+ -+ return IRQ_HANDLED; -+} -+ -+static ssize_t extcon_mvci_dbgfs_read(struct file *filp, char __user *user_buf, -+ size_t size, loff_t *ppos) -+{ -+ struct mv_usb_extcon_info *info = filp->private_data; -+ char buf[64]; -+ int ret, n, copy; -+ -+ n = min(sizeof(buf) - 1, size); -+ -+ if (info->dbgfs_qos_mode == 1) -+ copy = sprintf(buf, "enable mvci qos_hold\n"); -+ else -+ copy = sprintf(buf, "disable mvci qos_hold\n"); -+ -+ copy = min(n, copy); -+ ret = simple_read_from_buffer(user_buf, size, ppos, buf, copy); -+ -+ return ret; -+} -+ -+static ssize_t extcon_mvci_dbgfs_write(struct file *filp, -+ const char __user *user_buf, size_t size, -+ loff_t *ppos) -+{ -+ struct mv_usb_extcon_info *info = filp->private_data; -+ char buf[32]; -+ int buf_size, i = 0; -+ -+ buf_size = min(size, sizeof(buf) - 1); -+ if (copy_from_user(buf, user_buf, buf_size)) -+ return -EFAULT; -+ -+ *(buf + buf_size) = '\0'; -+ while (*(buf + i) != '\n' && *(buf + i) != '\0') -+ i++; -+ *(buf + i) = '\0'; -+ -+ i = 0; -+ while (*(buf + i) == ' ') -+ i++; -+ -+ if (!strncmp(buf + i, "enable", 6)) { -+ info->dbgfs_qos_mode = 1; -+ } else if (!strncmp(buf + i, "disable", 7)) { -+ info->dbgfs_qos_mode = 0; -+ dev_info(info->dev, "mvci qos release\n"); -+ } else { -+ dev_err(info->dev, "only accept: enable, disable\n"); -+ } -+ -+ return size; -+} -+ -+static const struct file_operations extcon_mvci_dbgfs_ops = { -+ .owner = THIS_MODULE, -+ .open = simple_open, -+ .read = extcon_mvci_dbgfs_read, -+ .write = extcon_mvci_dbgfs_write, -+}; -+ -+static int mv_usb_extcon_probe(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ struct device_node *np = dev->of_node; -+ struct resource *res; -+ struct mv_usb_extcon_info *info; -+ int ret; -+ u32 property; -+#ifdef CONFIG_PM -+ //struct freq_constraints *idle_qos; -+#endif -+ -+ if (!np) -+ return -EINVAL; -+ -+ dev_info(dev, "mv_usb_extcon_probe\n"); -+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); -+ if (!info) -+ return -ENOMEM; -+ -+ info->dev = dev; -+ info->irq = platform_get_irq(pdev, 0); -+ if (info->irq < 0) { -+ dev_err(dev, "missing IRQ resource\n"); -+ return -EINVAL; -+ } -+ -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg_pmuap"); -+ if (!res) { -+ dev_err(dev, "missing memory base resource\n"); -+ return -ENODEV; -+ } -+ -+ info->pmuap_reg = -+ devm_ioremap(&pdev->dev, res->start, resource_size(res)); -+ if (!info->pmuap_reg) { -+ dev_err(dev, "ioremap failed\n"); -+ return -ENODEV; -+ } -+ -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pin_state"); -+ if (!res) { -+ dev_err(dev, "missing memory base resource\n"); -+ return -ENODEV; -+ } -+ -+ info->pin_state_reg = -+ devm_ioremap(&pdev->dev, res->start, resource_size(res)); -+ if (!info->pin_state_reg) { -+ dev_err(dev, "ioremap failed\n"); -+ return -ENODEV; -+ } -+ -+ info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable); -+ if (IS_ERR(info->edev)) { -+ dev_err(dev, "failed to allocate extcon device\n"); -+ return -ENOMEM; -+ } -+ -+ ret = devm_extcon_dev_register(dev, info->edev); -+ if (ret < 0) { -+ dev_err(dev, "failed to register extcon device\n"); -+ return ret; -+ } -+ -+ info->dbgfs_qos_mode = 1; -+ -+ ret = devm_request_irq(dev, info->irq, mv_wakeup_interrupt, -+ IRQF_NO_SUSPEND, "mv-wakeup", info); -+ if (ret) { -+ dev_err(dev, "failed to request IRQ #%d --> %d\n", info->irq, -+ ret); -+ return ret; -+ } -+ -+ if (!of_property_read_s32(pdev->dev.of_node, "lpm-qos", &property)) -+ info->lpm_qos = property; -+ else -+ info->lpm_qos = 15; -+ -+ platform_set_drvdata(pdev, info); -+ device_init_wakeup(dev, 1); -+ -+ info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEBOUNCE_MS); -+ -+ INIT_DELAYED_WORK(&info->wq_detcable, usb_detect_cable); -+ -+ mv_enable_wakeup_irqs(info); -+ -+ /* Perform initial detection */ -+ usb_detect_cable(&info->wq_detcable.work); -+ -+ info->dbgfs = debugfs_create_file("mvci_extcon_qos", 0644, NULL, info, -+ &extcon_mvci_dbgfs_ops); -+ if (!info->dbgfs) { -+ dev_err(info->dev, "failed to create debugfs\n"); -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+static int mv_usb_extcon_remove(struct platform_device *pdev) -+{ -+ struct mv_usb_extcon_info *info = platform_get_drvdata(pdev); -+ -+ cancel_delayed_work_sync(&info->wq_detcable); -+ device_init_wakeup(&pdev->dev, 0); -+ -+ freq_qos_remove_request(&info->qos_idle); -+ -+ return 0; -+} -+ -+static const struct of_device_id mv_usb_extcon_dt_match[] = { -+ { -+ .compatible = "spacemit,vbus-id", -+ }, -+ {} -+}; -+ -+MODULE_DEVICE_TABLE(of, mv_usb_extcon_dt_match); -+ -+static struct platform_driver usb_extcon_driver = { -+ .probe = mv_usb_extcon_probe, -+ .remove = mv_usb_extcon_remove, -+ .driver = { -+ .name = "extcon-k1xci-usb", -+ .of_match_table = mv_usb_extcon_dt_match, -+ }, -+}; -+ -+module_platform_driver(usb_extcon_driver); -+ -+MODULE_LICENSE("GPL v2"); --- -Armbian - diff --git a/patch/kernel/archive/spacemit-6.1/013-drivers-firmware-arm_scmi.patch b/patch/kernel/archive/spacemit-6.1/013-drivers-firmware-arm_scmi.patch deleted file mode 100644 index cf0b23b55f3c..000000000000 --- a/patch/kernel/archive/spacemit-6.1/013-drivers-firmware-arm_scmi.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - drivers/firmware/arm_scmi/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/firmware/arm_scmi/Kconfig -+++ b/drivers/firmware/arm_scmi/Kconfig -@@ -3,7 +3,7 @@ menu "ARM System Control and Management Interface Protocol" - - config ARM_SCMI_PROTOCOL - tristate "ARM System Control and Management Interface (SCMI) Message Protocol" -- depends on ARM || ARM64 || COMPILE_TEST -+ depends on ARM || ARM64 || COMPILE_TEST || SOC_SPACEMIT - help - ARM System Control and Management Interface (SCMI) protocol is a - set of operating system-independent software interfaces that are --- -Armbian - diff --git a/patch/kernel/archive/spacemit-6.1/014-drivers-gpio.patch b/patch/kernel/archive/spacemit-6.1/014-drivers-gpio.patch deleted file mode 100644 index 9e0cc992cb5a..000000000000 --- a/patch/kernel/archive/spacemit-6.1/014-drivers-gpio.patch +++ /dev/null @@ -1,476 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - drivers/gpio/Kconfig | 9 + - drivers/gpio/Makefile | 1 + - drivers/gpio/gpio-k1x.c | 424 ++++++++++ - 3 files changed, 434 insertions(+) - -diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/gpio/Kconfig -+++ b/drivers/gpio/Kconfig -@@ -355,6 +355,15 @@ config GPIO_IOP - - If unsure, say N. - -+config GPIO_K1X -+ bool "SPACEMIT-K1X GPIO support" -+ depends on SOC_SPACEMIT_K1X -+ help -+ Say yes here to support the K1X GPIO device. -+ The K1X GPIO device may have several banks, and each -+ bank control at most 32 GPIO pins. The number of banks -+ is passed by device tree or platform data. -+ - config GPIO_IXP4XX - bool "Intel IXP4xx GPIO" - depends on ARCH_IXP4XX -diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/gpio/Makefile -+++ b/drivers/gpio/Makefile -@@ -187,3 +187,4 @@ obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o - obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o - obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o - obj-$(CONFIG_GPIO_ZYNQMP_MODEPIN) += gpio-zynqmp-modepin.o -+obj-$(CONFIG_GPIO_K1X) += gpio-k1x.o -diff --git a/drivers/gpio/gpio-k1x.c b/drivers/gpio/gpio-k1x.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpio/gpio-k1x.c -@@ -0,0 +1,424 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * spacemit-k1x gpio driver file -+ * -+ * Copyright (C) 2023 Spacemit -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define GPLR 0x0 -+#define GPDR 0xc -+#define GPSR 0x18 -+#define GPCR 0x24 -+#define GRER 0x30 -+#define GFER 0x3c -+#define GEDR 0x48 -+#define GSDR 0x54 -+#define GCDR 0x60 -+#define GSRER 0x6c -+#define GCRER 0x78 -+#define GSFER 0x84 -+#define GCFER 0x90 -+#define GAPMASK 0x9c -+#define GCPMASK 0xa8 -+ -+#define K1X_BANK_GPIO_NUMBER (32) -+#define BANK_GPIO_MASK (K1X_BANK_GPIO_NUMBER - 1) -+ -+#define k1x_gpio_to_bank_idx(gpio) ((gpio)/K1X_BANK_GPIO_NUMBER) -+#define k1x_gpio_to_bank_offset(gpio) ((gpio) & BANK_GPIO_MASK) -+#define k1x_bank_to_gpio(idx, offset) (((idx) * K1X_BANK_GPIO_NUMBER) \ -+ | ((offset) & BANK_GPIO_MASK)) -+ -+struct k1x_gpio_bank { -+ void __iomem *reg_bank; -+ u32 irq_mask; -+ u32 irq_rising_edge; -+ u32 irq_falling_edge; -+}; -+ -+struct k1x_gpio_chip { -+ struct gpio_chip chip; -+ void __iomem *reg_base; -+ int irq; -+ struct irq_domain *domain; -+ unsigned int ngpio; -+ unsigned int nbank; -+ struct k1x_gpio_bank *banks; -+}; -+ -+static int k1x_gpio_to_irq(struct gpio_chip *chip, unsigned offset) -+{ -+ struct k1x_gpio_chip *k1x_chip = -+ container_of(chip, struct k1x_gpio_chip, chip); -+ -+ return irq_create_mapping(k1x_chip->domain, offset); -+} -+ -+static int k1x_gpio_direction_input(struct gpio_chip *chip, unsigned offset) -+{ -+ struct k1x_gpio_chip *k1x_chip = -+ container_of(chip, struct k1x_gpio_chip, chip); -+ struct k1x_gpio_bank *bank = -+ &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); -+ -+ writel(bit, bank->reg_bank + GCDR); -+ -+ return 0; -+} -+ -+static int k1x_gpio_direction_output(struct gpio_chip *chip, -+ unsigned offset, int value) -+{ -+ struct k1x_gpio_chip *k1x_chip = -+ container_of(chip, struct k1x_gpio_chip, chip); -+ struct k1x_gpio_bank *bank = -+ &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); -+ -+ /* Set value first. */ -+ writel(bit, bank->reg_bank + (value ? GPSR : GPCR)); -+ -+ writel(bit, bank->reg_bank + GSDR); -+ -+ return 0; -+} -+ -+static int k1x_gpio_get(struct gpio_chip *chip, unsigned offset) -+{ -+ struct k1x_gpio_chip *k1x_chip = -+ container_of(chip, struct k1x_gpio_chip, chip); -+ struct k1x_gpio_bank *bank = -+ &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); -+ u32 gplr; -+ -+ gplr = readl(bank->reg_bank + GPLR); -+ -+ return !!(gplr & bit); -+} -+ -+static void k1x_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -+{ -+ struct k1x_gpio_chip *k1x_chip = -+ container_of(chip, struct k1x_gpio_chip, chip); -+ struct k1x_gpio_bank *bank = -+ &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); -+ u32 gpdr; -+ -+ gpdr = readl(bank->reg_bank + GPDR); -+ /* Is it configured as output? */ -+ if (gpdr & bit) -+ writel(bit, bank->reg_bank + (value ? GPSR : GPCR)); -+} -+ -+#ifdef CONFIG_OF_GPIO -+static int k1x_gpio_of_xlate(struct gpio_chip *chip, -+ const struct of_phandle_args *gpiospec, -+ u32 *flags) -+{ -+ struct k1x_gpio_chip *k1x_chip = -+ container_of(chip, struct k1x_gpio_chip, chip); -+ -+ /* GPIO index start from 0. */ -+ if (gpiospec->args[0] >= k1x_chip->ngpio) -+ return -EINVAL; -+ -+ if (flags) -+ *flags = gpiospec->args[1]; -+ -+ return gpiospec->args[0]; -+} -+#endif -+ -+static int k1x_gpio_irq_type(struct irq_data *d, unsigned int type) -+{ -+ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); -+ int gpio = irqd_to_hwirq(d); -+ struct k1x_gpio_bank *bank = -+ &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); -+ -+ if (type & IRQ_TYPE_EDGE_RISING) { -+ bank->irq_rising_edge |= bit; -+ writel(bit, bank->reg_bank + GSRER); -+ } else { -+ bank->irq_rising_edge &= ~bit; -+ writel(bit, bank->reg_bank + GCRER); -+ } -+ -+ if (type & IRQ_TYPE_EDGE_FALLING) { -+ bank->irq_falling_edge |= bit; -+ writel(bit, bank->reg_bank + GSFER); -+ } else { -+ bank->irq_falling_edge &= ~bit; -+ writel(bit, bank->reg_bank + GCFER); -+ } -+ -+ return 0; -+} -+ -+static irqreturn_t k1x_gpio_demux_handler(int irq, void *data) -+{ -+ struct k1x_gpio_chip *k1x_chip = (struct k1x_gpio_chip *)data; -+ struct k1x_gpio_bank *bank; -+ int i, n; -+ u32 gedr; -+ unsigned long pending = 0; -+ unsigned int irqs_handled = 0; -+ -+ for (i = 0; i < k1x_chip->nbank; i++) { -+ bank = &k1x_chip->banks[i]; -+ -+ gedr = readl(bank->reg_bank + GEDR); -+ if (!gedr) -+ continue; -+ -+ writel(gedr, bank->reg_bank + GEDR); -+ gedr = gedr & bank->irq_mask; -+ -+ if (!gedr) -+ continue; -+ pending = gedr; -+ for_each_set_bit(n, &pending, BITS_PER_LONG) { -+ generic_handle_irq(irq_find_mapping(k1x_chip->domain, -+ k1x_bank_to_gpio(i, n))); -+ } -+ irqs_handled++; -+ } -+ -+ return irqs_handled ? IRQ_HANDLED : IRQ_NONE; -+} -+ -+static void k1x_ack_muxed_gpio(struct irq_data *d) -+{ -+ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); -+ int gpio = irqd_to_hwirq(d); -+ struct k1x_gpio_bank *bank = -+ &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); -+ -+ writel(bit, bank->reg_bank + GEDR); -+} -+ -+static void k1x_mask_muxed_gpio(struct irq_data *d) -+{ -+ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); -+ int gpio = irqd_to_hwirq(d); -+ struct k1x_gpio_bank *bank = -+ &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); -+ -+ bank->irq_mask &= ~bit; -+ -+ /* Clear the bit of rising and falling edge detection. */ -+ writel(bit, bank->reg_bank + GCRER); -+ writel(bit, bank->reg_bank + GCFER); -+} -+ -+static void k1x_unmask_muxed_gpio(struct irq_data *d) -+{ -+ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); -+ int gpio = irqd_to_hwirq(d); -+ struct k1x_gpio_bank *bank = -+ &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); -+ -+ bank->irq_mask |= bit; -+ -+ /* Set the bit of rising and falling edge detection if the gpio has. */ -+ writel(bit & bank->irq_rising_edge, bank->reg_bank + GSRER); -+ writel(bit & bank->irq_falling_edge, bank->reg_bank + GSFER); -+} -+ -+static struct irq_chip k1x_muxed_gpio_chip = { -+ .name = "k1x-gpio-irqchip", -+ .irq_ack = k1x_ack_muxed_gpio, -+ .irq_mask = k1x_mask_muxed_gpio, -+ .irq_unmask = k1x_unmask_muxed_gpio, -+ .irq_set_type = k1x_gpio_irq_type, -+ .flags = IRQCHIP_SKIP_SET_WAKE, -+}; -+ -+static const struct of_device_id k1x_gpio_dt_ids[] = { -+ { .compatible = "spacemit,k1x-gpio"}, -+ {} -+}; -+ -+static int k1x_irq_domain_map(struct irq_domain *d, unsigned int irq, -+ irq_hw_number_t hw) -+{ -+ irq_set_chip_and_handler(irq, &k1x_muxed_gpio_chip, -+ handle_edge_irq); -+ irq_set_chip_data(irq, d->host_data); -+ -+ return 0; -+} -+ -+static const struct irq_domain_ops k1x_gpio_irq_domain_ops = { -+ .map = k1x_irq_domain_map, -+ .xlate = irq_domain_xlate_twocell, -+}; -+ -+static int k1x_gpio_probe_dt(struct platform_device *pdev, -+ struct k1x_gpio_chip *k1x_chip) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ struct device_node *child; -+ u32 offset; -+ int i, nbank, ret; -+ -+ nbank = of_get_child_count(np); -+ if (nbank == 0) -+ return -EINVAL; -+ -+ k1x_chip->banks = devm_kzalloc(&pdev->dev, -+ sizeof(*k1x_chip->banks) * nbank, -+ GFP_KERNEL); -+ if (k1x_chip->banks == NULL) -+ return -ENOMEM; -+ -+ i = 0; -+ for_each_child_of_node(np, child) { -+ ret = of_property_read_u32(child, "reg-offset", &offset); -+ if (ret) { -+ of_node_put(child); -+ return ret; -+ } -+ k1x_chip->banks[i].reg_bank = k1x_chip->reg_base + offset; -+ i++; -+ } -+ -+ k1x_chip->nbank = nbank; -+ k1x_chip->ngpio = k1x_chip->nbank * K1X_BANK_GPIO_NUMBER; -+ -+ return 0; -+} -+ -+static int k1x_gpio_probe(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ struct device_node *np; -+ struct k1x_gpio_chip *k1x_chip; -+ struct k1x_gpio_bank *bank; -+ struct resource *res; -+ struct irq_domain *domain; -+ struct clk *clk; -+ -+ int irq, i, ret; -+ void __iomem *base; -+ -+ np = pdev->dev.of_node; -+ if (!np) -+ return -EINVAL; -+ -+ k1x_chip = devm_kzalloc(dev, sizeof(*k1x_chip), GFP_KERNEL); -+ if (k1x_chip == NULL) -+ return -ENOMEM; -+ -+ irq = platform_get_irq(pdev, 0); -+ if (irq < 0) -+ return irq; -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (!res) -+ return -EINVAL; -+ base = devm_ioremap_resource(dev, res); -+ if (!base) -+ return -EINVAL; -+ -+ k1x_chip->irq = irq; -+ k1x_chip->reg_base = base; -+ -+ ret = k1x_gpio_probe_dt(pdev, k1x_chip); -+ if (ret) { -+ dev_err(dev, "Fail to initialize gpio unit, error %d.\n", ret); -+ return ret; -+ } -+ -+ clk = devm_clk_get(dev, NULL); -+ if (IS_ERR(clk)) { -+ dev_err(dev, "Fail to get gpio clock, error %ld.\n", -+ PTR_ERR(clk)); -+ return PTR_ERR(clk); -+ } -+ ret = clk_prepare_enable(clk); -+ if (ret) { -+ dev_err(dev, "Fail to enable gpio clock, error %d.\n", ret); -+ return ret; -+ } -+ -+ domain = irq_domain_add_linear(np, k1x_chip->ngpio, -+ &k1x_gpio_irq_domain_ops, k1x_chip); -+ if (domain == NULL) -+ return -EINVAL; -+ -+ k1x_chip->domain = domain; -+ -+ /* Initialize the gpio chip */ -+ k1x_chip->chip.label = "k1x-gpio"; -+ k1x_chip->chip.request = gpiochip_generic_request; -+ k1x_chip->chip.free = gpiochip_generic_free; -+ k1x_chip->chip.direction_input = k1x_gpio_direction_input; -+ k1x_chip->chip.direction_output = k1x_gpio_direction_output; -+ k1x_chip->chip.get = k1x_gpio_get; -+ k1x_chip->chip.set = k1x_gpio_set; -+ k1x_chip->chip.to_irq = k1x_gpio_to_irq; -+#ifdef CONFIG_OF_GPIO -+ k1x_chip->chip.of_node = np; -+ k1x_chip->chip.of_xlate = k1x_gpio_of_xlate; -+ k1x_chip->chip.of_gpio_n_cells = 2; -+#endif -+ k1x_chip->chip.ngpio = k1x_chip->ngpio; -+ -+ if (devm_request_irq(&pdev->dev, irq, -+ k1x_gpio_demux_handler, 0, k1x_chip->chip.label, k1x_chip)) { -+ dev_err(&pdev->dev, "failed to request high IRQ\n"); -+ ret = -ENOENT; -+ goto err; -+ } -+ -+ gpiochip_add(&k1x_chip->chip); -+ -+ /* clear all GPIO edge detects */ -+ for (i = 0; i < k1x_chip->nbank; i++) { -+ bank = &k1x_chip->banks[i]; -+ writel(0xffffffff, bank->reg_bank + GCFER); -+ writel(0xffffffff, bank->reg_bank + GCRER); -+ /* Unmask edge detection to AP. */ -+ writel(0xffffffff, bank->reg_bank + GAPMASK); -+ } -+ -+ return 0; -+err: -+ irq_domain_remove(domain); -+ return ret; -+} -+ -+static struct platform_driver k1x_gpio_driver = { -+ .probe = k1x_gpio_probe, -+ .driver = { -+ .name = "k1x-gpio", -+ .of_match_table = k1x_gpio_dt_ids, -+ }, -+}; -+ -+static int __init k1x_gpio_init(void) -+{ -+ return platform_driver_register(&k1x_gpio_driver); -+} -+subsys_initcall(k1x_gpio_init); --- -Armbian - diff --git a/patch/kernel/archive/spacemit-6.1/015-drivers-gpu.patch b/patch/kernel/archive/spacemit-6.1/015-drivers-gpu.patch deleted file mode 100644 index 33906d6899c2..000000000000 --- a/patch/kernel/archive/spacemit-6.1/015-drivers-gpu.patch +++ /dev/null @@ -1,303187 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Patrick Yavitz -Date: Fri, 21 Jun 2024 11:54:06 -0400 -Subject: add spacemit patch set - -source: https://gitee.com/bianbu-linux/linux-6.1 - -Signed-off-by: Patrick Yavitz ---- - drivers/gpu/drm/Kconfig | 4 + - drivers/gpu/drm/Makefile | 2 + - drivers/gpu/drm/img-rogue/Kconfig | 10 + - drivers/gpu/drm/img-rogue/Makefile | 155 + - drivers/gpu/drm/img-rogue/allocmem.c | 436 + - drivers/gpu/drm/img-rogue/allocmem.h | 224 + - drivers/gpu/drm/img-rogue/apollo/apollo.mk | 4 + - drivers/gpu/drm/img-rogue/apollo/apollo_regs.h | 108 + - drivers/gpu/drm/img-rogue/apollo/bonnie_tcf.h | 68 + - drivers/gpu/drm/img-rogue/apollo/drm_pdp_crtc.c | 1104 ++ - drivers/gpu/drm/img-rogue/apollo/drm_pdp_debugfs.c | 184 + - drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.c | 889 + - drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.h | 242 + - drivers/gpu/drm/img-rogue/apollo/drm_pdp_dvi.c | 311 + - drivers/gpu/drm/img-rogue/apollo/drm_pdp_fb.c | 320 + - drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.c | 899 + - drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.h | 158 + - drivers/gpu/drm/img-rogue/apollo/drm_pdp_modeset.c | 468 + - drivers/gpu/drm/img-rogue/apollo/drm_pdp_plane.c | 385 + - drivers/gpu/drm/img-rogue/apollo/drm_pdp_tmds.c | 148 + - drivers/gpu/drm/img-rogue/apollo/odin_defs.h | 326 + - drivers/gpu/drm/img-rogue/apollo/odin_pdp_regs.h | 8540 +++++++++ - drivers/gpu/drm/img-rogue/apollo/odin_regs.h | 1026 ++ - drivers/gpu/drm/img-rogue/apollo/orion_defs.h | 183 + - drivers/gpu/drm/img-rogue/apollo/orion_regs.h | 439 + - drivers/gpu/drm/img-rogue/apollo/pdp_apollo.c | 329 + - drivers/gpu/drm/img-rogue/apollo/pdp_apollo.h | 88 + - drivers/gpu/drm/img-rogue/apollo/pdp_common.h | 107 + - drivers/gpu/drm/img-rogue/apollo/pdp_odin.c | 1231 ++ - drivers/gpu/drm/img-rogue/apollo/pdp_odin.h | 95 + - drivers/gpu/drm/img-rogue/apollo/pdp_plato.c | 339 + - drivers/gpu/drm/img-rogue/apollo/pdp_plato.h | 86 + - drivers/gpu/drm/img-rogue/apollo/pdp_regs.h | 75 + - drivers/gpu/drm/img-rogue/apollo/pfim_defs.h | 69 + - drivers/gpu/drm/img-rogue/apollo/pfim_regs.h | 265 + - drivers/gpu/drm/img-rogue/apollo/sysconfig.c | 1414 ++ - drivers/gpu/drm/img-rogue/apollo/sysinfo.h | 60 + - drivers/gpu/drm/img-rogue/apollo/tc_apollo.c | 1507 ++ - drivers/gpu/drm/img-rogue/apollo/tc_apollo.h | 77 + - drivers/gpu/drm/img-rogue/apollo/tc_clocks.h | 158 + - drivers/gpu/drm/img-rogue/apollo/tc_drv.c | 943 + - drivers/gpu/drm/img-rogue/apollo/tc_drv.h | 191 + - drivers/gpu/drm/img-rogue/apollo/tc_drv_internal.h | 204 + - drivers/gpu/drm/img-rogue/apollo/tc_odin.c | 2305 +++ - drivers/gpu/drm/img-rogue/apollo/tc_odin.h | 82 + - drivers/gpu/drm/img-rogue/apollo/tc_odin_common_regs.h | 105 + - drivers/gpu/drm/img-rogue/apollo/tcf_clk_ctrl.h | 1018 ++ - drivers/gpu/drm/img-rogue/apollo/tcf_pll.h | 311 + - drivers/gpu/drm/img-rogue/apollo/tcf_rgbpdp_regs.h | 559 + - drivers/gpu/drm/img-rogue/cache_km.c | 1631 ++ - drivers/gpu/drm/img-rogue/cache_km.h | 151 + - drivers/gpu/drm/img-rogue/cache_ops.h | 61 + - drivers/gpu/drm/img-rogue/client_cache_bridge.h | 80 + - drivers/gpu/drm/img-rogue/client_cache_direct_bridge.c | 112 + - drivers/gpu/drm/img-rogue/client_devicememhistory_bridge.h | 111 + - drivers/gpu/drm/img-rogue/client_devicememhistory_direct_bridge.c | 195 + - drivers/gpu/drm/img-rogue/client_htbuffer_bridge.h | 64 + - drivers/gpu/drm/img-rogue/client_htbuffer_direct_bridge.c | 70 + - drivers/gpu/drm/img-rogue/client_mm_bridge.h | 243 + - drivers/gpu/drm/img-rogue/client_mm_direct_bridge.c | 752 + - drivers/gpu/drm/img-rogue/client_pvrtl_bridge.h | 93 + - drivers/gpu/drm/img-rogue/client_pvrtl_direct_bridge.c | 175 + - drivers/gpu/drm/img-rogue/client_ri_bridge.h | 89 + - drivers/gpu/drm/img-rogue/client_ri_direct_bridge.c | 182 + - drivers/gpu/drm/img-rogue/client_sync_bridge.h | 102 + - drivers/gpu/drm/img-rogue/client_sync_direct_bridge.c | 262 + - drivers/gpu/drm/img-rogue/client_synctracking_bridge.h | 68 + - drivers/gpu/drm/img-rogue/client_synctracking_direct_bridge.c | 92 + - drivers/gpu/drm/img-rogue/common_cache_bridge.h | 126 + - drivers/gpu/drm/img-rogue/common_cmm_bridge.h | 114 + - drivers/gpu/drm/img-rogue/common_devicememhistory_bridge.h | 185 + - drivers/gpu/drm/img-rogue/common_di_bridge.h | 153 + - drivers/gpu/drm/img-rogue/common_dmabuf_bridge.h | 150 + - drivers/gpu/drm/img-rogue/common_htbuffer_bridge.h | 82 + - drivers/gpu/drm/img-rogue/common_mm_bridge.h | 779 + - drivers/gpu/drm/img-rogue/common_mmextmem_bridge.h | 80 + - drivers/gpu/drm/img-rogue/common_pvrtl_bridge.h | 214 + - drivers/gpu/drm/img-rogue/common_rgxbreakpoint_bridge.h | 150 + - drivers/gpu/drm/img-rogue/common_rgxcmp_bridge.h | 250 + - drivers/gpu/drm/img-rogue/common_rgxfwdbg_bridge.h | 339 + - drivers/gpu/drm/img-rogue/common_rgxhwperf_bridge.h | 248 + - drivers/gpu/drm/img-rogue/common_rgxkicksync_bridge.h | 143 + - drivers/gpu/drm/img-rogue/common_rgxregconfig_bridge.h | 146 + - drivers/gpu/drm/img-rogue/common_rgxta3d_bridge.h | 424 + - drivers/gpu/drm/img-rogue/common_rgxtimerquery_bridge.h | 112 + - drivers/gpu/drm/img-rogue/common_rgxtq2_bridge.h | 228 + - drivers/gpu/drm/img-rogue/common_rgxtq_bridge.h | 210 + - drivers/gpu/drm/img-rogue/common_ri_bridge.h | 225 + - drivers/gpu/drm/img-rogue/common_srvcore_bridge.h | 369 + - drivers/gpu/drm/img-rogue/common_sync_bridge.h | 254 + - drivers/gpu/drm/img-rogue/common_synctracking_bridge.h | 97 + - drivers/gpu/drm/img-rogue/config_kernel.h | 203 + - drivers/gpu/drm/img-rogue/config_kernel.mk | 53 + - drivers/gpu/drm/img-rogue/configs/rgxconfig_km_36.V.52.182.h | 104 + - drivers/gpu/drm/img-rogue/connection_server.c | 556 + - drivers/gpu/drm/img-rogue/connection_server.h | 145 + - drivers/gpu/drm/img-rogue/cores/rgxcore_km_36.29.52.182.h | 75 + - drivers/gpu/drm/img-rogue/debug_common.c | 2172 +++ - drivers/gpu/drm/img-rogue/debug_common.h | 69 + - drivers/gpu/drm/img-rogue/device.h | 647 + - drivers/gpu/drm/img-rogue/device_connection.h | 130 + - drivers/gpu/drm/img-rogue/devicemem.c | 2779 +++ - drivers/gpu/drm/img-rogue/devicemem.h | 694 + - drivers/gpu/drm/img-rogue/devicemem_heapcfg.c | 219 + - drivers/gpu/drm/img-rogue/devicemem_heapcfg.h | 223 + - drivers/gpu/drm/img-rogue/devicemem_history_server.c | 2313 +++ - drivers/gpu/drm/img-rogue/devicemem_history_server.h | 167 + - drivers/gpu/drm/img-rogue/devicemem_pdump.h | 363 + - drivers/gpu/drm/img-rogue/devicemem_server.c | 2326 +++ - drivers/gpu/drm/img-rogue/devicemem_server.h | 729 + - drivers/gpu/drm/img-rogue/devicemem_server_utils.h | 198 + - drivers/gpu/drm/img-rogue/devicemem_typedefs.h | 141 + - drivers/gpu/drm/img-rogue/devicemem_utils.c | 1267 ++ - drivers/gpu/drm/img-rogue/devicemem_utils.h | 582 + - drivers/gpu/drm/img-rogue/di_common.h | 236 + - drivers/gpu/drm/img-rogue/di_impl_brg.c | 889 + - drivers/gpu/drm/img-rogue/di_impl_brg.h | 92 + - drivers/gpu/drm/img-rogue/di_impl_brg_intern.h | 61 + - drivers/gpu/drm/img-rogue/di_server.c | 800 + - drivers/gpu/drm/img-rogue/di_server.h | 219 + - drivers/gpu/drm/img-rogue/dllist.h | 407 + - drivers/gpu/drm/img-rogue/dma_km.h | 83 + - drivers/gpu/drm/img-rogue/dma_support.c | 523 + - drivers/gpu/drm/img-rogue/dma_support.h | 117 + - drivers/gpu/drm/img-rogue/env_connection.h | 92 + - drivers/gpu/drm/img-rogue/event.c | 514 + - drivers/gpu/drm/img-rogue/event.h | 54 + - drivers/gpu/drm/img-rogue/fwload.c | 255 + - drivers/gpu/drm/img-rogue/fwload.h | 158 + - drivers/gpu/drm/img-rogue/fwtrace_string.h | 52 + - drivers/gpu/drm/img-rogue/handle.c | 2498 +++ - drivers/gpu/drm/img-rogue/handle.h | 206 + - drivers/gpu/drm/img-rogue/handle_idr.c | 440 + - drivers/gpu/drm/img-rogue/handle_impl.h | 89 + - drivers/gpu/drm/img-rogue/handle_types.h | 89 + - drivers/gpu/drm/img-rogue/hash.c | 734 + - drivers/gpu/drm/img-rogue/hash.h | 247 + - drivers/gpu/drm/img-rogue/htb_debug.c | 1189 ++ - drivers/gpu/drm/img-rogue/htb_debug.h | 72 + - drivers/gpu/drm/img-rogue/htbserver.c | 936 + - drivers/gpu/drm/img-rogue/htbserver.h | 240 + - drivers/gpu/drm/img-rogue/htbuffer.c | 106 + - drivers/gpu/drm/img-rogue/htbuffer.h | 92 + - drivers/gpu/drm/img-rogue/htbuffer_init.h | 92 + - drivers/gpu/drm/img-rogue/htbuffer_sf.h | 245 + - drivers/gpu/drm/img-rogue/htbuffer_types.h | 122 + - drivers/gpu/drm/img-rogue/img_3dtypes.h | 248 + - drivers/gpu/drm/img-rogue/img_defs.h | 599 + - drivers/gpu/drm/img-rogue/img_elf.h | 111 + - drivers/gpu/drm/img-rogue/img_types.h | 331 + - drivers/gpu/drm/img-rogue/img_types_check.h | 58 + - drivers/gpu/drm/img-rogue/info_page.h | 99 + - drivers/gpu/drm/img-rogue/info_page_client.h | 89 + - drivers/gpu/drm/img-rogue/info_page_defs.h | 133 + - drivers/gpu/drm/img-rogue/info_page_km.c | 142 + - drivers/gpu/drm/img-rogue/interrupt_support.c | 151 + - drivers/gpu/drm/img-rogue/interrupt_support.h | 103 + - drivers/gpu/drm/img-rogue/kernel_compatibility.h | 605 + - drivers/gpu/drm/img-rogue/kernel_config_compatibility.h | 54 + - drivers/gpu/drm/img-rogue/kernel_nospec.h | 71 + - drivers/gpu/drm/img-rogue/kernel_types.h | 137 + - drivers/gpu/drm/img-rogue/km/rgx_bvnc_defs_km.h | 393 + - drivers/gpu/drm/img-rogue/km/rgx_bvnc_table_km.h | 487 + - drivers/gpu/drm/img-rogue/km/rgx_cr_defs_km.h | 8472 +++++++++ - drivers/gpu/drm/img-rogue/km/rgxdefs_km.h | 365 + - drivers/gpu/drm/img-rogue/km/rgxmhdefs_km.h | 286 + - drivers/gpu/drm/img-rogue/km/rgxmmudefs_km.h | 216 + - drivers/gpu/drm/img-rogue/km_apphint.c | 1760 ++ - drivers/gpu/drm/img-rogue/km_apphint.h | 99 + - drivers/gpu/drm/img-rogue/km_apphint_defs.h | 162 + - drivers/gpu/drm/img-rogue/km_apphint_defs_common.h | 304 + - drivers/gpu/drm/img-rogue/linkage.h | 52 + - drivers/gpu/drm/img-rogue/linux_sw_sync.h | 52 + - drivers/gpu/drm/img-rogue/lists.c | 60 + - drivers/gpu/drm/img-rogue/lists.h | 367 + - drivers/gpu/drm/img-rogue/lock.h | 431 + - drivers/gpu/drm/img-rogue/lock_types.h | 92 + - drivers/gpu/drm/img-rogue/log2.h | 417 + - drivers/gpu/drm/img-rogue/mem_utils.c | 445 + - drivers/gpu/drm/img-rogue/mmu_common.c | 4800 ++++++ - drivers/gpu/drm/img-rogue/mmu_common.h | 831 + - drivers/gpu/drm/img-rogue/module_common.c | 767 + - drivers/gpu/drm/img-rogue/module_common.h | 109 + - drivers/gpu/drm/img-rogue/multicore_defs.h | 53 + - drivers/gpu/drm/img-rogue/opaque_types.h | 56 + - drivers/gpu/drm/img-rogue/os_apphint.h | 186 + - drivers/gpu/drm/img-rogue/os_cpu_cache.h | 69 + - drivers/gpu/drm/img-rogue/os_srvinit_param.h | 328 + - drivers/gpu/drm/img-rogue/osconnection_server.c | 157 + - drivers/gpu/drm/img-rogue/osconnection_server.h | 133 + - drivers/gpu/drm/img-rogue/osdi_impl.h | 211 + - drivers/gpu/drm/img-rogue/osfunc.c | 2878 ++++ - drivers/gpu/drm/img-rogue/osfunc.h | 1882 ++ - drivers/gpu/drm/img-rogue/osfunc_arm.c | 181 + - drivers/gpu/drm/img-rogue/osfunc_arm64.c | 272 + - drivers/gpu/drm/img-rogue/osfunc_common.h | 284 + - drivers/gpu/drm/img-rogue/osfunc_riscv.c | 231 + - drivers/gpu/drm/img-rogue/osfunc_x86.c | 135 + - drivers/gpu/drm/img-rogue/oskm_apphint.h | 186 + - drivers/gpu/drm/img-rogue/osmmap.h | 115 + - drivers/gpu/drm/img-rogue/osmmap_stub.c | 146 + - drivers/gpu/drm/img-rogue/ospvr_gputrace.h | 183 + - drivers/gpu/drm/img-rogue/pci_support.c | 726 + - drivers/gpu/drm/img-rogue/pci_support.h | 99 + - drivers/gpu/drm/img-rogue/pdp/drm_pdp.mk | 13 + - drivers/gpu/drm/img-rogue/pdp2_mmu_regs.h | 764 + - drivers/gpu/drm/img-rogue/pdp2_regs.h | 8565 ++++++++++ - drivers/gpu/drm/img-rogue/pdp_drm.h | 105 + - drivers/gpu/drm/img-rogue/pdump.h | 238 + - drivers/gpu/drm/img-rogue/pdump_km.h | 1146 ++ - drivers/gpu/drm/img-rogue/pdump_mmu.h | 147 + - drivers/gpu/drm/img-rogue/pdump_physmem.h | 300 + - drivers/gpu/drm/img-rogue/pdump_symbolicaddr.h | 55 + - drivers/gpu/drm/img-rogue/pdumpdefs.h | 268 + - drivers/gpu/drm/img-rogue/pdumpdesc.h | 230 + - drivers/gpu/drm/img-rogue/physheap.c | 1735 ++ - drivers/gpu/drm/img-rogue/physheap.h | 486 + - drivers/gpu/drm/img-rogue/physheap_config.h | 164 + - drivers/gpu/drm/img-rogue/physmem.c | 731 + - drivers/gpu/drm/img-rogue/physmem.h | 237 + - drivers/gpu/drm/img-rogue/physmem_dmabuf.c | 1297 ++ - drivers/gpu/drm/img-rogue/physmem_dmabuf.h | 124 + - drivers/gpu/drm/img-rogue/physmem_extmem.c | 71 + - drivers/gpu/drm/img-rogue/physmem_extmem.h | 76 + - drivers/gpu/drm/img-rogue/physmem_extmem_linux.c | 1026 ++ - drivers/gpu/drm/img-rogue/physmem_extmem_wrap.h | 115 + - drivers/gpu/drm/img-rogue/physmem_hostmem.c | 207 + - drivers/gpu/drm/img-rogue/physmem_hostmem.h | 65 + - drivers/gpu/drm/img-rogue/physmem_lma.c | 2934 ++++ - drivers/gpu/drm/img-rogue/physmem_lma.h | 94 + - drivers/gpu/drm/img-rogue/physmem_osmem.c | 91 + - drivers/gpu/drm/img-rogue/physmem_osmem.h | 151 + - drivers/gpu/drm/img-rogue/physmem_osmem_linux.c | 3940 +++++ - drivers/gpu/drm/img-rogue/physmem_osmem_linux.h | 49 + - drivers/gpu/drm/img-rogue/physmem_test.c | 1037 ++ - drivers/gpu/drm/img-rogue/physmem_test.h | 51 + - drivers/gpu/drm/img-rogue/plato_drv.h | 464 + - drivers/gpu/drm/img-rogue/pmr.c | 4189 +++++ - drivers/gpu/drm/img-rogue/pmr.h | 1137 ++ - drivers/gpu/drm/img-rogue/pmr_impl.h | 558 + - drivers/gpu/drm/img-rogue/pmr_os.c | 611 + - drivers/gpu/drm/img-rogue/pmr_os.h | 62 + - drivers/gpu/drm/img-rogue/power.c | 1301 ++ - drivers/gpu/drm/img-rogue/power.h | 457 + - drivers/gpu/drm/img-rogue/powervr/buffer_attribs.h | 193 + - drivers/gpu/drm/img-rogue/powervr/img_drm_fourcc.h | 143 + - drivers/gpu/drm/img-rogue/powervr/mem_types.h | 64 + - drivers/gpu/drm/img-rogue/powervr/pvrsrv_sync_ext.h | 72 + - drivers/gpu/drm/img-rogue/private_data.h | 59 + - drivers/gpu/drm/img-rogue/proc_stats.h | 153 + - drivers/gpu/drm/img-rogue/process_stats.c | 3312 ++++ - drivers/gpu/drm/img-rogue/process_stats.h | 214 + - drivers/gpu/drm/img-rogue/pvr_bridge.h | 457 + - drivers/gpu/drm/img-rogue/pvr_bridge_k.c | 635 + - drivers/gpu/drm/img-rogue/pvr_bridge_k.h | 111 + - drivers/gpu/drm/img-rogue/pvr_buffer_sync.c | 739 + - drivers/gpu/drm/img-rogue/pvr_buffer_sync.h | 125 + - drivers/gpu/drm/img-rogue/pvr_buffer_sync_shared.h | 57 + - drivers/gpu/drm/img-rogue/pvr_counting_timeline.c | 308 + - drivers/gpu/drm/img-rogue/pvr_counting_timeline.h | 68 + - drivers/gpu/drm/img-rogue/pvr_debug.c | 486 + - drivers/gpu/drm/img-rogue/pvr_debug.h | 1071 ++ - drivers/gpu/drm/img-rogue/pvr_debugfs.c | 622 + - drivers/gpu/drm/img-rogue/pvr_debugfs.h | 50 + - drivers/gpu/drm/img-rogue/pvr_dicommon.h | 59 + - drivers/gpu/drm/img-rogue/pvr_dma_resv.h | 80 + - drivers/gpu/drm/img-rogue/pvr_drm.c | 452 + - drivers/gpu/drm/img-rogue/pvr_drm.h | 146 + - drivers/gpu/drm/img-rogue/pvr_drv.h | 112 + - drivers/gpu/drm/img-rogue/pvr_fd_sync_kernel.h | 64 + - drivers/gpu/drm/img-rogue/pvr_fence.c | 1153 ++ - drivers/gpu/drm/img-rogue/pvr_fence.h | 240 + - drivers/gpu/drm/img-rogue/pvr_fence_trace.h | 225 + - drivers/gpu/drm/img-rogue/pvr_gputrace.c | 1685 ++ - drivers/gpu/drm/img-rogue/pvr_intrinsics.h | 70 + - drivers/gpu/drm/img-rogue/pvr_ion_stats.h | 91 + - drivers/gpu/drm/img-rogue/pvr_linux_fence.h | 103 + - drivers/gpu/drm/img-rogue/pvr_notifier.c | 657 + - drivers/gpu/drm/img-rogue/pvr_notifier.h | 326 + - drivers/gpu/drm/img-rogue/pvr_platform_drv.c | 337 + - drivers/gpu/drm/img-rogue/pvr_procfs.h | 50 + - drivers/gpu/drm/img-rogue/pvr_ricommon.h | 68 + - drivers/gpu/drm/img-rogue/pvr_sw_fence.c | 199 + - drivers/gpu/drm/img-rogue/pvr_sw_fence.h | 60 + - drivers/gpu/drm/img-rogue/pvr_sync.h | 120 + - drivers/gpu/drm/img-rogue/pvr_sync_api.h | 63 + - drivers/gpu/drm/img-rogue/pvr_sync_file.c | 1094 ++ - drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.c | 277 + - drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.h | 71 + - drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.c | 168 + - drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.h | 62 + - drivers/gpu/drm/img-rogue/pvr_uaccess.h | 99 + - drivers/gpu/drm/img-rogue/pvr_vmap.h | 83 + - drivers/gpu/drm/img-rogue/pvrmodule.h | 48 + - drivers/gpu/drm/img-rogue/pvrsrv.c | 3748 ++++ - drivers/gpu/drm/img-rogue/pvrsrv.h | 553 + - drivers/gpu/drm/img-rogue/pvrsrv_apphint.h | 71 + - drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.c | 390 + - drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.h | 53 + - drivers/gpu/drm/img-rogue/pvrsrv_cleanup.h | 256 + - drivers/gpu/drm/img-rogue/pvrsrv_device.h | 403 + - drivers/gpu/drm/img-rogue/pvrsrv_device_types.h | 60 + - drivers/gpu/drm/img-rogue/pvrsrv_devvar.h | 291 + - drivers/gpu/drm/img-rogue/pvrsrv_error.c | 61 + - drivers/gpu/drm/img-rogue/pvrsrv_error.h | 75 + - drivers/gpu/drm/img-rogue/pvrsrv_errors.h | 421 + - drivers/gpu/drm/img-rogue/pvrsrv_firmware_boot.h | 87 + - drivers/gpu/drm/img-rogue/pvrsrv_memalloc_physheap.h | 215 + - drivers/gpu/drm/img-rogue/pvrsrv_memallocflags.h | 1047 ++ - drivers/gpu/drm/img-rogue/pvrsrv_memallocflags_internal.h | 78 + - drivers/gpu/drm/img-rogue/pvrsrv_pool.c | 260 + - drivers/gpu/drm/img-rogue/pvrsrv_pool.h | 135 + - drivers/gpu/drm/img-rogue/pvrsrv_sync_km.h | 65 + - drivers/gpu/drm/img-rogue/pvrsrv_sync_server.h | 278 + - drivers/gpu/drm/img-rogue/pvrsrv_tlcommon.h | 260 + - drivers/gpu/drm/img-rogue/pvrsrv_tlstreams.h | 63 + - drivers/gpu/drm/img-rogue/pvrsrvkm.mk | 154 + - drivers/gpu/drm/img-rogue/pvrversion.h | 68 + - drivers/gpu/drm/img-rogue/ra.c | 3472 ++++ - drivers/gpu/drm/img-rogue/ra.h | 644 + - drivers/gpu/drm/img-rogue/rgx_bridge.h | 252 + - drivers/gpu/drm/img-rogue/rgx_bridge_init.c | 105 + - drivers/gpu/drm/img-rogue/rgx_bridge_init.h | 55 + - drivers/gpu/drm/img-rogue/rgx_common.h | 232 + - drivers/gpu/drm/img-rogue/rgx_common_asserts.h | 73 + - drivers/gpu/drm/img-rogue/rgx_compat_bvnc.h | 140 + - drivers/gpu/drm/img-rogue/rgx_fw_info.h | 144 + - drivers/gpu/drm/img-rogue/rgx_fwif_alignchecks.h | 192 + - drivers/gpu/drm/img-rogue/rgx_fwif_hwperf.h | 253 + - drivers/gpu/drm/img-rogue/rgx_fwif_km.h | 2666 +++ - drivers/gpu/drm/img-rogue/rgx_fwif_resetframework.h | 70 + - drivers/gpu/drm/img-rogue/rgx_fwif_sf.h | 995 ++ - drivers/gpu/drm/img-rogue/rgx_fwif_shared.h | 361 + - drivers/gpu/drm/img-rogue/rgx_heap_firmware.h | 126 + - drivers/gpu/drm/img-rogue/rgx_heaps.h | 68 + - drivers/gpu/drm/img-rogue/rgx_hwperf.h | 483 + - drivers/gpu/drm/img-rogue/rgx_hwperf_common.h | 1634 ++ - drivers/gpu/drm/img-rogue/rgx_hwperf_table.c | 635 + - drivers/gpu/drm/img-rogue/rgx_hwperf_table.h | 116 + - drivers/gpu/drm/img-rogue/rgx_memallocflags.h | 58 + - drivers/gpu/drm/img-rogue/rgx_meta.h | 379 + - drivers/gpu/drm/img-rogue/rgx_mips.h | 406 + - drivers/gpu/drm/img-rogue/rgx_options.h | 342 + - drivers/gpu/drm/img-rogue/rgx_pdump_panics.h | 64 + - drivers/gpu/drm/img-rogue/rgx_riscv.h | 248 + - drivers/gpu/drm/img-rogue/rgx_tq_shared.h | 61 + - drivers/gpu/drm/img-rogue/rgxapi_km.h | 336 + - drivers/gpu/drm/img-rogue/rgxbreakpoint.c | 292 + - drivers/gpu/drm/img-rogue/rgxbreakpoint.h | 142 + - drivers/gpu/drm/img-rogue/rgxbvnc.c | 969 ++ - drivers/gpu/drm/img-rogue/rgxbvnc.h | 90 + - drivers/gpu/drm/img-rogue/rgxccb.c | 2869 ++++ - drivers/gpu/drm/img-rogue/rgxccb.h | 356 + - drivers/gpu/drm/img-rogue/rgxcompute.c | 1562 ++ - drivers/gpu/drm/img-rogue/rgxcompute.h | 196 + - drivers/gpu/drm/img-rogue/rgxdebug.c | 4077 +++++ - drivers/gpu/drm/img-rogue/rgxdebug_common.c | 2219 +++ - drivers/gpu/drm/img-rogue/rgxdebug_common.h | 388 + - drivers/gpu/drm/img-rogue/rgxdevice.h | 912 + - drivers/gpu/drm/img-rogue/rgxfw_log_helper.h | 79 + - drivers/gpu/drm/img-rogue/rgxfwcmnctx.c | 755 + - drivers/gpu/drm/img-rogue/rgxfwcmnctx.h | 150 + - drivers/gpu/drm/img-rogue/rgxfwdbg.c | 608 + - drivers/gpu/drm/img-rogue/rgxfwdbg.h | 160 + - drivers/gpu/drm/img-rogue/rgxfwimageutils.c | 1154 ++ - drivers/gpu/drm/img-rogue/rgxfwimageutils.h | 224 + - drivers/gpu/drm/img-rogue/rgxfwmemctx.h | 163 + - drivers/gpu/drm/img-rogue/rgxfwriscv.c | 1076 ++ - drivers/gpu/drm/img-rogue/rgxfwriscv.h | 212 + - drivers/gpu/drm/img-rogue/rgxfwtrace_strings.c | 56 + - drivers/gpu/drm/img-rogue/rgxfwutils.c | 6946 ++++++++ - drivers/gpu/drm/img-rogue/rgxfwutils.h | 1292 ++ - drivers/gpu/drm/img-rogue/rgxheapconfig.h | 294 + - drivers/gpu/drm/img-rogue/rgxheapconfig_65273.h | 124 + - drivers/gpu/drm/img-rogue/rgxhwperf.c | 1037 ++ - drivers/gpu/drm/img-rogue/rgxhwperf.h | 96 + - drivers/gpu/drm/img-rogue/rgxhwperf_common.c | 4050 +++++ - drivers/gpu/drm/img-rogue/rgxhwperf_common.h | 635 + - drivers/gpu/drm/img-rogue/rgxinit.c | 5208 ++++++ - drivers/gpu/drm/img-rogue/rgxinit.h | 282 + - drivers/gpu/drm/img-rogue/rgxkicksync.c | 804 + - drivers/gpu/drm/img-rogue/rgxkicksync.h | 128 + - drivers/gpu/drm/img-rogue/rgxlayer.h | 850 + - drivers/gpu/drm/img-rogue/rgxlayer_impl.c | 1346 ++ - drivers/gpu/drm/img-rogue/rgxlayer_impl.h | 67 + - drivers/gpu/drm/img-rogue/rgxmem.c | 972 ++ - drivers/gpu/drm/img-rogue/rgxmem.h | 157 + - drivers/gpu/drm/img-rogue/rgxmipsmmuinit.c | 1068 ++ - drivers/gpu/drm/img-rogue/rgxmipsmmuinit.h | 97 + - drivers/gpu/drm/img-rogue/rgxmmuinit.c | 1147 ++ - drivers/gpu/drm/img-rogue/rgxmmuinit.h | 60 + - drivers/gpu/drm/img-rogue/rgxmulticore.c | 252 + - drivers/gpu/drm/img-rogue/rgxmulticore.h | 54 + - drivers/gpu/drm/img-rogue/rgxpower.c | 1691 ++ - drivers/gpu/drm/img-rogue/rgxpower.h | 286 + - drivers/gpu/drm/img-rogue/rgxregconfig.c | 319 + - drivers/gpu/drm/img-rogue/rgxregconfig.h | 130 + - drivers/gpu/drm/img-rogue/rgxshader.c | 308 + - drivers/gpu/drm/img-rogue/rgxshader.h | 83 + - drivers/gpu/drm/img-rogue/rgxsrvinit.c | 1863 ++ - drivers/gpu/drm/img-rogue/rgxstartstop.c | 1314 ++ - drivers/gpu/drm/img-rogue/rgxstartstop.h | 84 + - drivers/gpu/drm/img-rogue/rgxsyncutils.c | 184 + - drivers/gpu/drm/img-rogue/rgxsyncutils.h | 76 + - drivers/gpu/drm/img-rogue/rgxta3d.c | 5603 ++++++ - drivers/gpu/drm/img-rogue/rgxta3d.h | 510 + - drivers/gpu/drm/img-rogue/rgxtdmtransfer.c | 1374 ++ - drivers/gpu/drm/img-rogue/rgxtdmtransfer.h | 132 + - drivers/gpu/drm/img-rogue/rgxtimecorr.c | 729 + - drivers/gpu/drm/img-rogue/rgxtimecorr.h | 272 + - drivers/gpu/drm/img-rogue/rgxtimerquery.c | 225 + - drivers/gpu/drm/img-rogue/rgxtimerquery.h | 117 + - drivers/gpu/drm/img-rogue/rgxtransfer.c | 1839 ++ - drivers/gpu/drm/img-rogue/rgxtransfer.h | 159 + - drivers/gpu/drm/img-rogue/rgxtransfer_shader.h | 71 + - drivers/gpu/drm/img-rogue/rgxutils.c | 306 + - drivers/gpu/drm/img-rogue/rgxutils.h | 211 + - drivers/gpu/drm/img-rogue/ri_server.c | 2161 +++ - drivers/gpu/drm/img-rogue/ri_server.h | 110 + - drivers/gpu/drm/img-rogue/ri_typedefs.h | 52 + - drivers/gpu/drm/img-rogue/rogue_trace_events.h | 593 + - drivers/gpu/drm/img-rogue/server_cache_bridge.c | 449 + - drivers/gpu/drm/img-rogue/server_cmm_bridge.c | 409 + - drivers/gpu/drm/img-rogue/server_devicememhistory_bridge.c | 826 + - drivers/gpu/drm/img-rogue/server_di_bridge.c | 618 + - drivers/gpu/drm/img-rogue/server_dmabuf_bridge.c | 670 + - drivers/gpu/drm/img-rogue/server_htbuffer_bridge.c | 225 + - drivers/gpu/drm/img-rogue/server_mm_bridge.c | 3186 ++++ - drivers/gpu/drm/img-rogue/server_mmextmem_bridge.c | 164 + - drivers/gpu/drm/img-rogue/server_pvrtl_bridge.c | 814 + - drivers/gpu/drm/img-rogue/server_rgxbreakpoint_bridge.c | 371 + - drivers/gpu/drm/img-rogue/server_rgxcmp_bridge.c | 1314 ++ - drivers/gpu/drm/img-rogue/server_rgxfwdbg_bridge.c | 545 + - drivers/gpu/drm/img-rogue/server_rgxhwperf_bridge.c | 1016 ++ - drivers/gpu/drm/img-rogue/server_rgxkicksync_bridge.c | 586 + - drivers/gpu/drm/img-rogue/server_rgxregconfig_bridge.c | 239 + - drivers/gpu/drm/img-rogue/server_rgxta3d_bridge.c | 2447 +++ - drivers/gpu/drm/img-rogue/server_rgxtimerquery_bridge.c | 167 + - drivers/gpu/drm/img-rogue/server_rgxtq2_bridge.c | 1194 ++ - drivers/gpu/drm/img-rogue/server_rgxtq_bridge.c | 1288 ++ - drivers/gpu/drm/img-rogue/server_ri_bridge.c | 745 + - drivers/gpu/drm/img-rogue/server_srvcore_bridge.c | 1053 ++ - drivers/gpu/drm/img-rogue/server_sync_bridge.c | 738 + - drivers/gpu/drm/img-rogue/server_synctracking_bridge.c | 325 + - drivers/gpu/drm/img-rogue/services_kernel_client.h | 289 + - drivers/gpu/drm/img-rogue/services_km.h | 180 + - drivers/gpu/drm/img-rogue/servicesext.h | 168 + - drivers/gpu/drm/img-rogue/sofunc_pvr.h | 94 + - drivers/gpu/drm/img-rogue/spacemit/spacemit_init.c | 361 + - drivers/gpu/drm/img-rogue/spacemit/spacemit_init.h | 34 + - drivers/gpu/drm/img-rogue/spacemit/sysconfig.c | 342 + - drivers/gpu/drm/img-rogue/spacemit/sysconfig.h | 63 + - drivers/gpu/drm/img-rogue/spacemit/sysinfo.h | 58 + - drivers/gpu/drm/img-rogue/srvcore.c | 1643 ++ - drivers/gpu/drm/img-rogue/srvcore.h | 240 + - drivers/gpu/drm/img-rogue/srvinit.h | 68 + - drivers/gpu/drm/img-rogue/srvkm.h | 144 + - drivers/gpu/drm/img-rogue/sync.c | 824 + - drivers/gpu/drm/img-rogue/sync.h | 292 + - drivers/gpu/drm/img-rogue/sync_checkpoint.c | 3238 ++++ - drivers/gpu/drm/img-rogue/sync_checkpoint.h | 666 + - drivers/gpu/drm/img-rogue/sync_checkpoint_external.h | 83 + - drivers/gpu/drm/img-rogue/sync_checkpoint_init.h | 82 + - drivers/gpu/drm/img-rogue/sync_checkpoint_internal.h | 274 + - drivers/gpu/drm/img-rogue/sync_fallback_server.h | 204 + - drivers/gpu/drm/img-rogue/sync_internal.h | 112 + - drivers/gpu/drm/img-rogue/sync_prim_internal.h | 84 + - drivers/gpu/drm/img-rogue/sync_server.c | 1220 ++ - drivers/gpu/drm/img-rogue/sync_server.h | 249 + - drivers/gpu/drm/img-rogue/syscommon.h | 175 + - drivers/gpu/drm/img-rogue/sysconfig_cmn.c | 210 + - drivers/gpu/drm/img-rogue/sysvalidation.h | 63 + - drivers/gpu/drm/img-rogue/tlclient.c | 499 + - drivers/gpu/drm/img-rogue/tlclient.h | 257 + - drivers/gpu/drm/img-rogue/tlintern.c | 442 + - drivers/gpu/drm/img-rogue/tlintern.h | 345 + - drivers/gpu/drm/img-rogue/tlserver.c | 747 + - drivers/gpu/drm/img-rogue/tlserver.h | 97 + - drivers/gpu/drm/img-rogue/tlstream.c | 1625 ++ - drivers/gpu/drm/img-rogue/tlstream.h | 600 + - drivers/gpu/drm/img-rogue/trace_events.c | 269 + - drivers/gpu/drm/img-rogue/trace_events.h | 196 + - drivers/gpu/drm/img-rogue/uniq_key_splay_tree.c | 280 + - drivers/gpu/drm/img-rogue/uniq_key_splay_tree.h | 90 + - drivers/gpu/drm/img-rogue/vmm_impl.h | 203 + - drivers/gpu/drm/img-rogue/vmm_pvz_client.c | 131 + - drivers/gpu/drm/img-rogue/vmm_pvz_client.h | 76 + - drivers/gpu/drm/img-rogue/vmm_pvz_server.c | 241 + - drivers/gpu/drm/img-rogue/vmm_pvz_server.h | 121 + - drivers/gpu/drm/img-rogue/vmm_type_stub.c | 112 + - drivers/gpu/drm/img-rogue/vz_vm.h | 63 + - drivers/gpu/drm/img-rogue/vz_vmm_pvz.c | 196 + - drivers/gpu/drm/img-rogue/vz_vmm_pvz.h | 79 + - drivers/gpu/drm/img-rogue/vz_vmm_vm.c | 294 + - drivers/gpu/drm/spacemit/Kconfig | 36 + - drivers/gpu/drm/spacemit/Makefile | 32 + - drivers/gpu/drm/spacemit/dphy/spacemit_dphy_drv.c | 481 + - drivers/gpu/drm/spacemit/dpu/dpu_debug.c | 321 + - drivers/gpu/drm/spacemit/dpu/dpu_debug.h | 108 + - drivers/gpu/drm/spacemit/dpu/dpu_saturn.c | 1859 ++ - drivers/gpu/drm/spacemit/dpu/dpu_saturn.h | 69 + - drivers/gpu/drm/spacemit/dpu/dpu_trace.h | 473 + - drivers/gpu/drm/spacemit/dpu/saturn_fbcmem.c | 347 + - drivers/gpu/drm/spacemit/dpu/saturn_fbcmem.h | 40 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/cmdlist.h | 95 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/cmps_x.h | 515 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/dma_top.h | 257 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/dpu_crg.h | 64 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/dpu_ctl.h | 516 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/dpu_intp.h | 611 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/dpu_top.h | 126 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/mmu.h | 365 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/outctrl_proc_x.h | 480 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/outctrl_top_x.h | 432 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/prepipe_layer_proc_x.h | 780 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/rdma_path_x.h | 516 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/reg_map.h | 139 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/scaler_x.h | 222 + - drivers/gpu/drm/spacemit/dpu/saturn_regs/wb_top.h | 374 + - drivers/gpu/drm/spacemit/dsi/spacemit_dptc_drv.c | 232 + - drivers/gpu/drm/spacemit/dsi/spacemit_dptc_drv.h | 44 + - drivers/gpu/drm/spacemit/dsi/spacemit_dsi_drv.c | 903 + - drivers/gpu/drm/spacemit/dsi/spacemit_dsi_hw.h | 356 + - drivers/gpu/drm/spacemit/lt8911exb.c | 1581 ++ - drivers/gpu/drm/spacemit/spacemit_bootloader.c | 69 + - drivers/gpu/drm/spacemit/spacemit_bootloader.h | 18 + - drivers/gpu/drm/spacemit/spacemit_cmdlist.c | 247 + - drivers/gpu/drm/spacemit/spacemit_cmdlist.h | 72 + - drivers/gpu/drm/spacemit/spacemit_dmmu.c | 142 + - drivers/gpu/drm/spacemit/spacemit_dmmu.h | 93 + - drivers/gpu/drm/spacemit/spacemit_dphy.c | 155 + - drivers/gpu/drm/spacemit/spacemit_dphy.h | 103 + - drivers/gpu/drm/spacemit/spacemit_dpu.c | 1205 ++ - drivers/gpu/drm/spacemit/spacemit_dpu.h | 259 + - drivers/gpu/drm/spacemit/spacemit_dpu_reg.h | 40 + - drivers/gpu/drm/spacemit/spacemit_drm.c | 549 + - drivers/gpu/drm/spacemit/spacemit_drm.h | 54 + - drivers/gpu/drm/spacemit/spacemit_dsi.c | 748 + - drivers/gpu/drm/spacemit/spacemit_dsi.h | 227 + - drivers/gpu/drm/spacemit/spacemit_gem.c | 451 + - drivers/gpu/drm/spacemit/spacemit_gem.h | 37 + - drivers/gpu/drm/spacemit/spacemit_hdmi.c | 1062 ++ - drivers/gpu/drm/spacemit/spacemit_hdmi.h | 50 + - drivers/gpu/drm/spacemit/spacemit_lib.c | 243 + - drivers/gpu/drm/spacemit/spacemit_lib.h | 44 + - drivers/gpu/drm/spacemit/spacemit_mipi_panel.c | 728 + - drivers/gpu/drm/spacemit/spacemit_mipi_panel.h | 108 + - drivers/gpu/drm/spacemit/spacemit_planes.c | 702 + - drivers/gpu/drm/spacemit/spacemit_wb.c | 273 + - drivers/gpu/drm/spacemit/spacemit_wb.h | 59 + - drivers/gpu/drm/spacemit/sysfs/sysfs_class.c | 38 + - drivers/gpu/drm/spacemit/sysfs/sysfs_display.h | 20 + - drivers/gpu/drm/spacemit/sysfs/sysfs_dphy.c | 34 + - drivers/gpu/drm/spacemit/sysfs/sysfs_dpu.c | 157 + - drivers/gpu/drm/spacemit/sysfs/sysfs_dsi.c | 28 + - drivers/gpu/drm/spacemit/sysfs/sysfs_mipi_panel.c | 26 + - 556 files changed, 299273 insertions(+) - -diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig -index 111111111111..222222222222 100644 ---- a/drivers/gpu/drm/Kconfig -+++ b/drivers/gpu/drm/Kconfig -@@ -417,6 +417,10 @@ source "drivers/gpu/drm/solomon/Kconfig" - - source "drivers/gpu/drm/sprd/Kconfig" - -+source "drivers/gpu/drm/spacemit/Kconfig" -+ -+source "drivers/gpu/drm/img-rogue/Kconfig" -+ - config DRM_HYPERV - tristate "DRM Support for Hyper-V synthetic video device" - depends on DRM && PCI && MMU && HYPERV -diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile -index 111111111111..222222222222 100644 ---- a/drivers/gpu/drm/Makefile -+++ b/drivers/gpu/drm/Makefile -@@ -148,3 +148,5 @@ obj-y += gud/ - obj-$(CONFIG_DRM_HYPERV) += hyperv/ - obj-y += solomon/ - obj-$(CONFIG_DRM_SPRD) += sprd/ -+obj-$(CONFIG_DRM_SPACEMIT) += spacemit/ -+obj-$(CONFIG_POWERVR_ROGUE) += img-rogue/ -diff --git a/drivers/gpu/drm/img-rogue/Kconfig b/drivers/gpu/drm/img-rogue/Kconfig -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/Kconfig -@@ -0,0 +1,10 @@ -+config POWERVR_ROGUE -+ tristate "PowerVR GPU" -+ default n -+ depends on DRM -+ help -+ Driver for PowerVR graphics processor hardware. -+ -+ Say Y here if your SoC contains a PowerVR GPU. For more -+ information, see . -+ -diff --git a/drivers/gpu/drm/img-rogue/Makefile b/drivers/gpu/drm/img-rogue/Makefile -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/Makefile -@@ -0,0 +1,155 @@ -+ -+include $(srctree)/drivers/gpu/drm/img-rogue/config_kernel.mk -+ccflags-y += -include $(srctree)/drivers/gpu/drm/img-rogue/config_kernel.h \ -+ -I$(srctree)/$(src)/km \ -+ -I$(srctree)/$(src)/spacemit \ -+ -I$(srctree)/$(src) -+ -+obj-$(CONFIG_POWERVR_ROGUE) += pvrsrvkm.o -+pvrsrvkm-y += \ -+ client_cache_direct_bridge.o \ -+ server_cache_bridge.o \ -+ server_cmm_bridge.o \ -+ client_devicememhistory_direct_bridge.o \ -+ server_devicememhistory_bridge.o \ -+ server_di_bridge.o \ -+ server_dmabuf_bridge.o \ -+ client_mm_direct_bridge.o \ -+ server_mm_bridge.o \ -+ server_mmextmem_bridge.o \ -+ client_pvrtl_direct_bridge.o \ -+ server_pvrtl_bridge.o \ -+ server_rgxbreakpoint_bridge.o \ -+ server_rgxcmp_bridge.o \ -+ server_rgxfwdbg_bridge.o \ -+ server_rgxhwperf_bridge.o \ -+ server_rgxregconfig_bridge.o \ -+ server_rgxta3d_bridge.o \ -+ server_rgxtimerquery_bridge.o \ -+ server_rgxtq2_bridge.o \ -+ server_rgxtq_bridge.o \ -+ server_srvcore_bridge.o \ -+ client_sync_direct_bridge.o \ -+ server_sync_bridge.o \ -+ client_synctracking_direct_bridge.o \ -+ server_synctracking_bridge.o \ -+ cache_km.o \ -+ connection_server.o \ -+ debug_common.o \ -+ devicemem_heapcfg.o \ -+ devicemem_history_server.o \ -+ devicemem_server.o \ -+ di_impl_brg.o \ -+ di_server.o \ -+ handle.o \ -+ info_page_km.o \ -+ lists.o \ -+ mmu_common.o \ -+ physheap.o \ -+ physmem.o \ -+ physmem_extmem.o \ -+ physmem_hostmem.o \ -+ physmem_lma.o \ -+ physmem_osmem.o \ -+ pmr.o \ -+ power.o \ -+ process_stats.o \ -+ pvr_notifier.o \ -+ pvrsrv.o \ -+ pvrsrv_bridge_init.o \ -+ pvrsrv_pool.o \ -+ srvcore.o \ -+ sync_checkpoint.o \ -+ sync_server.o \ -+ tlintern.o \ -+ tlserver.o \ -+ tlstream.o \ -+ vmm_pvz_client.o \ -+ vmm_pvz_server.o \ -+ vz_vmm_pvz.o \ -+ vz_vmm_vm.o \ -+ rgx_bridge_init.o \ -+ rgxbreakpoint.o \ -+ rgxbvnc.o \ -+ rgxccb.o \ -+ rgxcompute.o \ -+ rgxdebug_common.o \ -+ rgxfwcmnctx.o \ -+ rgxfwdbg.o \ -+ rgxfwimageutils.o \ -+ rgxfwtrace_strings.o \ -+ rgxhwperf_common.o \ -+ rgxmem.o \ -+ rgxregconfig.o \ -+ rgxshader.o \ -+ rgxsyncutils.o \ -+ rgxtdmtransfer.o \ -+ rgxtimecorr.o \ -+ rgxtimerquery.o \ -+ rgxutils.o \ -+ rgxdebug.o \ -+ rgxfwriscv.o \ -+ rgxfwutils.o \ -+ rgxhwperf.o \ -+ rgxinit.o \ -+ rgxlayer_impl.o \ -+ rgxmipsmmuinit.o \ -+ rgxmmuinit.o \ -+ rgxmulticore.o \ -+ rgxpower.o \ -+ rgxsrvinit.o \ -+ rgxstartstop.o \ -+ rgxta3d.o \ -+ rgxtransfer.o \ -+ allocmem.o \ -+ event.o \ -+ fwload.o \ -+ handle_idr.o \ -+ km_apphint.o \ -+ module_common.o \ -+ osconnection_server.o \ -+ osfunc.o \ -+ osmmap_stub.o \ -+ physmem_dmabuf.o \ -+ physmem_extmem_linux.o \ -+ physmem_osmem_linux.o \ -+ physmem_test.o \ -+ pmr_os.o \ -+ pvr_bridge_k.o \ -+ pvr_buffer_sync.o \ -+ pvr_counting_timeline.o \ -+ pvr_debug.o \ -+ pvr_debugfs.o \ -+ pvr_drm.o \ -+ pvr_fence.o \ -+ pvr_platform_drv.o \ -+ pvr_sw_fence.o \ -+ pvr_sync_file.o \ -+ pvr_sync_ioctl_common.o \ -+ pvr_sync_ioctl_drm.o \ -+ devicemem.o \ -+ devicemem_utils.o \ -+ hash.o \ -+ mem_utils.o \ -+ pvrsrv_error.o \ -+ ra.o \ -+ sync.o \ -+ tlclient.o \ -+ uniq_key_splay_tree.o \ -+ rgx_hwperf_table.o \ -+ interrupt_support.o \ -+ pci_support.o \ -+ sysconfig_cmn.o \ -+ dma_support.o \ -+ vmm_type_stub.o \ -+ spacemit/sysconfig.o \ -+ spacemit/spacemit_init.o \ -+ server_rgxkicksync_bridge.o \ -+ rgxkicksync.o \ -+ pvr_sync_ioctl_common.o \ -+ pvr_sync_ioctl_drm.o -+pvrsrvkm-$(CONFIG_ARM) += osfunc_arm.o -+pvrsrvkm-$(CONFIG_ARM64) += osfunc_arm64.o -+pvrsrvkm-$(CONFIG_EVENT_TRACING) += trace_events.o pvr_gputrace.o -+pvrsrvkm-$(CONFIG_RISCV) += osfunc_riscv.o -+pvrsrvkm-$(CONFIG_X86) += osfunc_x86.o -diff --git a/drivers/gpu/drm/img-rogue/allocmem.c b/drivers/gpu/drm/img-rogue/allocmem.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/allocmem.c -@@ -0,0 +1,436 @@ -+/*************************************************************************/ /*! -+@File -+@Title Host memory management implementation for Linux -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+#include -+#include -+#include -+ -+#include "img_defs.h" -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "process_stats.h" -+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) -+#include "pvrsrv.h" -+#endif -+#include "osfunc.h" -+ -+ -+/* -+ * When memory statistics are disabled, memory records are used instead. -+ * In order for these to work, the PID of the process that requested the -+ * allocation needs to be stored at the end of the kmalloc'd memory, making -+ * sure 4 extra bytes are allocated to fit the PID. -+ * -+ * There is no need for this extra allocation when memory statistics are -+ * enabled, since all allocations are tracked in DebugFS mem_area files. -+ */ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) -+/* kmalloc guarantees a minimal alignment which is ARCH_KMALLOC_MINALIGN. This -+ * alignment is architecture specific and can be quite big, e.g. on Aarch64 -+ * it can be 64 bytes. This is too much for keeping a single PID field and could -+ * lead to a lot of wasted memory. This is a reason why we're defaulting to 8 -+ * bytes alignment which should be enough for any architecture. -+ */ -+#define ALLOCMEM_PID_SIZE_PADDING PVR_ALIGN(sizeof(IMG_UINT32), 8) -+#else -+#define ALLOCMEM_PID_SIZE_PADDING 0UL -+#endif -+ -+/* How many times kmalloc can fail before the allocation threshold is reduced */ -+static const IMG_UINT32 g_ui32kmallocFailLimit = 10; -+/* How many kmalloc failures happened since the last allocation threshold change */ -+static IMG_UINT32 g_ui32kmallocFailCount = 0; -+/* Current kmalloc threshold value in bytes */ -+static IMG_UINT32 g_ui32kmallocThreshold = PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD; -+/* Spinlock used so that the global variables above may not be modified by more than 1 thread at a time */ -+static DEFINE_SPINLOCK(kmalloc_lock); -+ -+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) -+static DEFINE_SPINLOCK(kmalloc_leak_lock); -+static IMG_UINT32 g_ui32kmallocLeakCounter = 0; -+#endif -+ -+static inline void OSTryDecreaseKmallocThreshold(void) -+{ -+ unsigned long flags; -+ spin_lock_irqsave(&kmalloc_lock, flags); -+ -+ g_ui32kmallocFailCount++; -+ -+ if (g_ui32kmallocFailCount >= g_ui32kmallocFailLimit) -+ { -+ g_ui32kmallocFailCount = 0; -+ if (g_ui32kmallocThreshold > PAGE_SIZE) -+ { -+ g_ui32kmallocThreshold >>= 1; -+ printk(KERN_INFO "Threshold is now set to %d\n", g_ui32kmallocThreshold); -+ } -+ } -+ -+ spin_unlock_irqrestore(&kmalloc_lock, flags); -+} -+ -+static inline void OSResetKmallocFailCount(void) -+{ -+ unsigned long flags; -+ spin_lock_irqsave(&kmalloc_lock, flags); -+ -+ g_ui32kmallocFailCount = 0; -+ -+ spin_unlock_irqrestore(&kmalloc_lock, flags); -+} -+ -+static inline void _pvr_vfree(const void* pvAddr) -+{ -+#if defined(DEBUG) -+ /* Size harder to come by for vmalloc and since vmalloc allocates -+ * a whole number of pages, poison the minimum size known to have -+ * been allocated. -+ */ -+ OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, -+ PAGE_SIZE); -+#endif -+ vfree(pvAddr); -+} -+ -+static inline void _pvr_kfree(const void* pvAddr) -+{ -+#if defined(DEBUG) -+ /* Poison whole memory block */ -+ OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, -+ ksize(pvAddr)); -+#endif -+ kfree(pvAddr); -+} -+ -+static inline void *_pvr_alloc_stats_add(void *pvAddr, IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) -+{ -+#if !defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVR_UNREFERENCED_PARAMETER(pvAddr); -+#else -+ if (!is_vmalloc_addr(pvAddr)) -+ { -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ IMG_CPU_PHYADDR sCpuPAddr; -+ sCpuPAddr.uiAddr = 0; -+ -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, -+ pvAddr, -+ sCpuPAddr, -+ ksize(pvAddr), -+ OSGetCurrentClientProcessIDKM() -+ DEBUG_MEMSTATS_ARGS); -+#else -+ /* because clang has some features that allow detection out-of-bounds -+ * access we need to put the metadata in the beginning of the allocation */ -+ *(IMG_UINT32 *) pvAddr = OSGetCurrentClientProcessIDKM(); -+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvAddr), -+ *(IMG_UINT32 *) pvAddr); -+ -+ /* because metadata is kept in the beginning of the allocation we need -+ * to return address offset by the ALLOCMEM_PID_SIZE_PADDING */ -+ pvAddr = (IMG_UINT8 *) pvAddr + ALLOCMEM_PID_SIZE_PADDING; -+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+ } -+ else -+ { -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ IMG_CPU_PHYADDR sCpuPAddr; -+ sCpuPAddr.uiAddr = 0; -+ -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, -+ pvAddr, -+ sCpuPAddr, -+ PVR_ALIGN(ui32Size, PAGE_SIZE), -+ OSGetCurrentClientProcessIDKM() -+ DEBUG_MEMSTATS_ARGS); -+#else -+ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, -+ PVR_ALIGN(ui32Size, PAGE_SIZE), -+ (IMG_UINT64)(uintptr_t) pvAddr, -+ OSGetCurrentClientProcessIDKM()); -+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+ } -+#endif /* !defined(PVRSRV_ENABLE_PROCESS_STATS) */ -+ -+ return pvAddr; -+} -+ -+static inline void *_pvr_alloc_stats_remove(void *pvAddr) -+{ -+#if !defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVR_UNREFERENCED_PARAMETER(pvAddr); -+#else -+ if (!is_vmalloc_addr(pvAddr)) -+ { -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ /* because metadata is kept in the beginning of the allocation we need -+ * shift address offset by the ALLOCMEM_PID_SIZE_PADDING to the original -+ * value */ -+ pvAddr = (IMG_UINT8 *) pvAddr - ALLOCMEM_PID_SIZE_PADDING; -+ -+ /* first 4 bytes of the allocation are the process' PID */ -+ PVRSRVStatsDecrMemKAllocStat(ksize(pvAddr), *(IMG_UINT32 *) pvAddr); -+#else -+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, -+ (IMG_UINT64)(uintptr_t) pvAddr, -+ OSGetCurrentClientProcessIDKM()); -+#endif -+ } -+ else -+ { -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, -+ (IMG_UINT64)(uintptr_t) pvAddr); -+#else -+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, -+ (IMG_UINT64)(uintptr_t) pvAddr, -+ OSGetCurrentClientProcessIDKM()); -+#endif -+ } -+#endif /* !defined(PVRSRV_ENABLE_PROCESS_STATS) */ -+ -+ return pvAddr; -+} -+ -+void *(OSAllocMem)(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) -+{ -+ void *pvRet = NULL; -+ -+ if ((ui32Size + ALLOCMEM_PID_SIZE_PADDING) <= g_ui32kmallocThreshold) -+ { -+ pvRet = kmalloc(ui32Size + ALLOCMEM_PID_SIZE_PADDING, GFP_KERNEL); -+ if (pvRet == NULL) -+ { -+ OSTryDecreaseKmallocThreshold(); -+ } -+ else -+ { -+ OSResetKmallocFailCount(); -+ } -+ } -+ -+ if (pvRet == NULL) -+ { -+ pvRet = vmalloc(ui32Size); -+ } -+ -+ if (pvRet != NULL) -+ { -+ pvRet = _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ARGS); -+ } -+ -+ return pvRet; -+} -+ -+void *(OSAllocZMem)(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) -+{ -+ void *pvRet = NULL; -+ -+ if ((ui32Size + ALLOCMEM_PID_SIZE_PADDING) <= g_ui32kmallocThreshold) -+ { -+ pvRet = kzalloc(ui32Size + ALLOCMEM_PID_SIZE_PADDING, GFP_KERNEL); -+ if (pvRet == NULL) -+ { -+ OSTryDecreaseKmallocThreshold(); -+ } -+ else -+ { -+ OSResetKmallocFailCount(); -+ } -+ } -+ -+ if (pvRet == NULL) -+ { -+ pvRet = vzalloc(ui32Size); -+ } -+ -+ if (pvRet != NULL) -+ { -+ pvRet = _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ARGS); -+ } -+ -+ return pvRet; -+} -+ -+/* -+ * The parentheses around OSFreeMem prevent the macro in allocmem.h from -+ * applying, as it would break the function's definition. -+ */ -+void (OSFreeMem)(void *pvMem) -+{ -+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) -+ unsigned long flags; -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ if (psPVRSRVData) -+ { -+ IMG_UINT32 ui32kmallocLeakMax = psPVRSRVData->sMemLeakIntervals.ui32OSAlloc; -+ -+ spin_lock_irqsave(&kmalloc_leak_lock, flags); -+ -+ g_ui32kmallocLeakCounter++; -+ if (ui32kmallocLeakMax && (g_ui32kmallocLeakCounter >= ui32kmallocLeakMax)) -+ { -+ g_ui32kmallocLeakCounter = 0; -+ spin_unlock_irqrestore(&kmalloc_leak_lock, flags); -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Skipped freeing of pointer 0x%p to trigger memory leak.", -+ __func__, -+ pvMem)); -+ return; -+ } -+ -+ spin_unlock_irqrestore(&kmalloc_leak_lock, flags); -+ } -+#endif -+ if (pvMem != NULL) -+ { -+ pvMem = _pvr_alloc_stats_remove(pvMem); -+ -+ if (!is_vmalloc_addr(pvMem)) -+ { -+ _pvr_kfree(pvMem); -+ } -+ else -+ { -+ _pvr_vfree(pvMem); -+ } -+ } -+} -+ -+void *OSAllocMemNoStats(IMG_UINT32 ui32Size) -+{ -+ void *pvRet = NULL; -+ -+ if (ui32Size <= g_ui32kmallocThreshold) -+ { -+ pvRet = kmalloc(ui32Size, GFP_KERNEL); -+ if (pvRet == NULL) -+ { -+ OSTryDecreaseKmallocThreshold(); -+ } -+ else -+ { -+ OSResetKmallocFailCount(); -+ } -+ } -+ -+ if (pvRet == NULL) -+ { -+ pvRet = vmalloc(ui32Size); -+ } -+ -+ return pvRet; -+} -+ -+void *OSAllocZMemNoStats(IMG_UINT32 ui32Size) -+{ -+ void *pvRet = NULL; -+ -+ if (ui32Size <= g_ui32kmallocThreshold) -+ { -+ pvRet = kzalloc(ui32Size, GFP_KERNEL); -+ if (pvRet == NULL) -+ { -+ OSTryDecreaseKmallocThreshold(); -+ } -+ else -+ { -+ OSResetKmallocFailCount(); -+ } -+ } -+ -+ if (pvRet == NULL) -+ { -+ pvRet = vzalloc(ui32Size); -+ } -+ -+ return pvRet; -+} -+ -+/* -+ * The parentheses around OSFreeMemNoStats prevent the macro in allocmem.h from -+ * applying, as it would break the function's definition. -+ */ -+void (OSFreeMemNoStats)(void *pvMem) -+{ -+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) -+ unsigned long flags; -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ if (psPVRSRVData) -+ { -+ IMG_UINT32 ui32kmallocLeakMax = psPVRSRVData->sMemLeakIntervals.ui32OSAlloc; -+ -+ spin_lock_irqsave(&kmalloc_leak_lock, flags); -+ -+ g_ui32kmallocLeakCounter++; -+ if (ui32kmallocLeakMax && (g_ui32kmallocLeakCounter >= ui32kmallocLeakMax)) -+ { -+ g_ui32kmallocLeakCounter = 0; -+ spin_unlock_irqrestore(&kmalloc_leak_lock, flags); -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Skipped freeing of pointer 0x%p to trigger memory leak.", -+ __func__, -+ pvMem)); -+ return; -+ } -+ -+ spin_unlock_irqrestore(&kmalloc_leak_lock, flags); -+ } -+#endif -+ if (pvMem != NULL) -+ { -+ if (!is_vmalloc_addr(pvMem)) -+ { -+ _pvr_kfree(pvMem); -+ } -+ else -+ { -+ _pvr_vfree(pvMem); -+ } -+ } -+} -diff --git a/drivers/gpu/drm/img-rogue/allocmem.h b/drivers/gpu/drm/img-rogue/allocmem.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/allocmem.h -@@ -0,0 +1,224 @@ -+/*************************************************************************/ /*! -+@File allocmem.h -+@Title memory allocation header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Memory-Allocation API definitions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef ALLOCMEM_H -+#define ALLOCMEM_H -+ -+#include "img_types.h" -+#include "pvr_debug.h" -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+/* -+ * PVRSRV_ENABLE_PROCESS_STATS enables process statistics regarding events, -+ * resources and memory across all processes -+ * PVRSRV_ENABLE_MEMORY_STATS enables recording of Linux kernel memory -+ * allocations, provided that PVRSRV_ENABLE_PROCESS_STATS is enabled -+ * - Output can be found in: -+ * /(sys/kernel/debug|proc)/pvr/proc_stats/[live|retired]_pids_stats/mem_area -+ * PVRSRV_DEBUG_LINUX_MEMORY_STATS provides more details about memory -+ * statistics in conjunction with PVRSRV_ENABLE_MEMORY_STATS -+ * PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON is defined to encompass both memory -+ * allocation statistics functionalities described above in a single macro -+ */ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) && defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG) -+#define PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON -+#endif -+ -+/* -+ * When using detailed memory allocation statistics, the line number and -+ * file name where the allocation happened are also provided. -+ * When this feature is not used, these parameters are not needed. -+ */ -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) -+#define DEBUG_MEMSTATS_PARAMS ,void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine -+#define DEBUG_MEMSTATS_ARGS ,pvAllocFromFile, ui32AllocFromLine -+#define DEBUG_MEMSTATS_UNREF (void)pvAllocFromFile; (void)ui32AllocFromLine; -+#define DEBUG_MEMSTATS_VALUES ,__FILE__, __LINE__ -+#else -+#define DEBUG_MEMSTATS_PARAMS /*!< -+ * Used for PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON -+ * build option. */ -+#define DEBUG_MEMSTATS_ARGS /*!< -+ * Used for PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON -+ * build option. */ -+#define DEBUG_MEMSTATS_UNREF /*!< -+ * Used for PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON -+ * build option. */ -+#define DEBUG_MEMSTATS_VALUES /*!< -+ * Used for PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON -+ * build option. */ -+#endif -+ -+ -+/**************************************************************************/ /*! -+@Function OSAllocMem -+@Description Allocates CPU memory. Contents are uninitialized. -+ If passed a size of zero, function should not assert, -+ but just return a NULL pointer. -+@Input ui32Size Size of required allocation (in bytes) -+@Return Pointer to allocated memory on success. -+ Otherwise NULL. -+ */ /**************************************************************************/ -+#if defined(DOXYGEN) -+void *OSAllocMem(IMG_UINT32 ui32Size); -+#else -+void *OSAllocMem(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS); -+#define OSAllocMem(_size) (OSAllocMem)((_size) DEBUG_MEMSTATS_VALUES) -+#endif -+ -+/**************************************************************************/ /*! -+@Function OSAllocZMem -+@Description Allocates CPU memory and initializes the contents to zero. -+ If passed a size of zero, function should not assert, -+ but just return a NULL pointer. -+@Input ui32Size Size of required allocation (in bytes) -+@Return Pointer to allocated memory on success. -+ Otherwise NULL. -+ */ /**************************************************************************/ -+#if defined(DOXYGEN) -+void *OSAllocZMem(IMG_UINT32 ui32Size); -+#else -+void *OSAllocZMem(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS); -+#define OSAllocZMem(_size) (OSAllocZMem)((_size) DEBUG_MEMSTATS_VALUES) -+#endif -+ -+ -+/**************************************************************************/ /*! -+@Function OSAllocMemNoStats -+@Description Allocates CPU memory. Contents are uninitialized. -+ If passed a size of zero, function should not assert, -+ but just return a NULL pointer. -+ The allocated memory is not accounted for by process stats. -+ Process stats are an optional feature (enabled only when -+ PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount -+ of memory allocated to help in debugging. Where this is not -+ required, OSAllocMem() and OSAllocMemNoStats() equate to -+ the same operation. -+@Input ui32Size Size of required allocation (in bytes) -+@Return Pointer to allocated memory on success. -+ Otherwise NULL. -+ */ /**************************************************************************/ -+void *OSAllocMemNoStats(IMG_UINT32 ui32Size); -+ -+/**************************************************************************/ /*! -+@Function OSAllocZMemNoStats -+@Description Allocates CPU memory and initializes the contents to zero. -+ If passed a size of zero, function should not assert, -+ but just return a NULL pointer. -+ The allocated memory is not accounted for by process stats. -+ Process stats are an optional feature (enabled only when -+ PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount -+ of memory allocated to help in debugging. Where this is not -+ required, OSAllocZMem() and OSAllocZMemNoStats() equate to -+ the same operation. -+@Input ui32Size Size of required allocation (in bytes) -+@Return Pointer to allocated memory on success. -+ Otherwise NULL. -+ */ /**************************************************************************/ -+void *OSAllocZMemNoStats(IMG_UINT32 ui32Size); -+ -+/**************************************************************************/ /*! -+@Function OSFreeMem -+@Description Frees previously allocated CPU memory. -+@Input pvCpuVAddr Pointer to the memory to be freed. -+@Return None. -+ */ /**************************************************************************/ -+void OSFreeMem(void *pvCpuVAddr); -+ -+/**************************************************************************/ /*! -+@Function OSFreeMemNoStats -+@Description Frees previously allocated CPU memory. -+ The freed memory does not update the figures in process stats. -+ Process stats are an optional feature (enabled only when -+ PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount -+ of memory allocated to help in debugging. Where this is not -+ required, OSFreeMem() and OSFreeMemNoStats() equate to the -+ same operation. -+@Input pvCpuVAddr Pointer to the memory to be freed. -+@Return None. -+ */ /**************************************************************************/ -+void OSFreeMemNoStats(void *pvCpuVAddr); -+ -+/* -+ * These macros allow us to catch double-free bugs on DEBUG builds and -+ * prevent crashes on RELEASE builds. -+ */ -+ -+/*! @cond Doxygen_Suppress */ -+#if defined(DEBUG) -+#define double_free_sentinel ((void *)&OSFreeMem) -+#define ALLOCMEM_ASSERT(exp) PVR_ASSERT(exp) -+#else -+#define double_free_sentinel NULL -+#define ALLOCMEM_ASSERT(exp) do {} while (0) -+#endif -+/*! @endcond */ -+ -+/*! Frees memory allocated by OSAllocMem(). */ -+#define OSFreeMem(_ptr) do { \ -+ ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \ -+ (OSFreeMem)(_ptr); \ -+ (_ptr) = double_free_sentinel; \ -+ MSC_SUPPRESS_4127 \ -+ } while (0) -+ -+/*! Frees memory allocated by OSAllocMemNoStats(). */ -+#define OSFreeMemNoStats(_ptr) do { \ -+ ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \ -+ (OSFreeMemNoStats)(_ptr); \ -+ (_ptr) = double_free_sentinel; \ -+ MSC_SUPPRESS_4127 \ -+ } while (0) -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* ALLOCMEM_H */ -+ -+/****************************************************************************** -+ End of file (allocmem.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/apollo/apollo.mk b/drivers/gpu/drm/img-rogue/apollo/apollo.mk -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/apollo.mk -@@ -0,0 +1,4 @@ -+apollo-y += \ -+ tc_apollo.o \ -+ tc_drv.o \ -+ tc_odin.o -diff --git a/drivers/gpu/drm/img-rogue/apollo/apollo_regs.h b/drivers/gpu/drm/img-rogue/apollo/apollo_regs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/apollo_regs.h -@@ -0,0 +1,108 @@ -+/*************************************************************************/ /*! -+@File -+@Title System Description Header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This header provides system-specific declarations and macros -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(APOLLO_REGS_H) -+#define APOLLO_REGS_H -+ -+#include "tc_clocks.h" -+ -+/* TC TCF5 */ -+#define TC5_SYS_APOLLO_REG_PCI_BASENUM (1) -+#define TC5_SYS_APOLLO_REG_PDP2_OFFSET (0x800000) -+#define TC5_SYS_APOLLO_REG_PDP2_SIZE (0x7C4) -+ -+#define TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET (0xA00000) -+#define TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE (0x14) -+ -+#define TC5_SYS_APOLLO_REG_HDMI_OFFSET (0xC00000) -+#define TC5_SYS_APOLLO_REG_HDMI_SIZE (0x1C) -+ -+/* TC ES2 */ -+#define TCF_TEMP_SENSOR_SPI_OFFSET 0xe -+#define TCF_TEMP_SENSOR_TO_C(raw) (((raw) * 248 / 4096) - 54) -+ -+/* Number of bytes that are broken */ -+#define SYS_DEV_MEM_BROKEN_BYTES (1024 * 1024) -+#define SYS_DEV_MEM_REGION_SIZE (0x40000000 - SYS_DEV_MEM_BROKEN_BYTES) -+ -+/* Apollo reg on base register 0 */ -+#define SYS_APOLLO_REG_PCI_BASENUM (0) -+#define SYS_APOLLO_REG_REGION_SIZE (0x00010000) -+ -+#define SYS_APOLLO_REG_SYS_OFFSET (0x0000) -+#define SYS_APOLLO_REG_SYS_SIZE (0x0400) -+ -+#define SYS_APOLLO_REG_PLL_OFFSET (0x1000) -+#define SYS_APOLLO_REG_PLL_SIZE (0x0400) -+ -+#define SYS_APOLLO_REG_HOST_OFFSET (0x4050) -+#define SYS_APOLLO_REG_HOST_SIZE (0x0014) -+ -+#define SYS_APOLLO_REG_PDP1_OFFSET (0xC000) -+#define SYS_APOLLO_REG_PDP1_SIZE (0x2000) -+ -+/* Offsets for flashing Apollo PROMs from base 0 */ -+#define APOLLO_FLASH_STAT_OFFSET (0x4058) -+#define APOLLO_FLASH_DATA_WRITE_OFFSET (0x4050) -+#define APOLLO_FLASH_RESET_OFFSET (0x4060) -+ -+#define APOLLO_FLASH_FIFO_STATUS_MASK (0xF) -+#define APOLLO_FLASH_FIFO_STATUS_SHIFT (0) -+#define APOLLO_FLASH_PROGRAM_STATUS_MASK (0xF) -+#define APOLLO_FLASH_PROGRAM_STATUS_SHIFT (16) -+ -+#define APOLLO_FLASH_PROG_COMPLETE_BIT (0x1) -+#define APOLLO_FLASH_PROG_PROGRESS_BIT (0x2) -+#define APOLLO_FLASH_PROG_FAILED_BIT (0x4) -+#define APOLLO_FLASH_INV_FILETYPE_BIT (0x8) -+ -+#define APOLLO_FLASH_FIFO_SIZE (8) -+ -+/* RGX reg on base register 1 */ -+#define SYS_RGX_REG_PCI_BASENUM (1) -+#define SYS_RGX_REG_REGION_SIZE (0x7FFFF) -+ -+/* Device memory (including HP mapping) on base register 2 */ -+#define SYS_DEV_MEM_PCI_BASENUM (2) -+ -+#endif /* APOLLO_REGS_H */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/bonnie_tcf.h b/drivers/gpu/drm/img-rogue/apollo/bonnie_tcf.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/bonnie_tcf.h -@@ -0,0 +1,68 @@ -+/*************************************************************************/ /*! -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* bonnie_tcf.h - Bonnie TCF register definitions */ -+ -+/* tab size 4 */ -+ -+#ifndef BONNIE_TCF_DEFS_H -+#define BONNIE_TCF_DEFS_H -+ -+#define BONNIE_TCF_OFFSET_BONNIETC_REGBANK 0x00000000 -+#define BONNIE_TCF_OFFSET_TC_IFACE_COUNTERS 0x00004000 -+#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_IMGV4_RTM_TOP 0x00008000 -+#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_SECN 0x0000C000 -+#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_DBG 0x00010000 -+#define BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN 0x00014000 -+#define BONNIE_TCF_OFFSET_ALIGN_DATA_TX 0x00018000 -+#define BONNIE_TCF_OFFSET_SAI_RX_1 0x0001C000 -+#define BONNIE_TCF_OFFSET_SAI_RX_SDR 0x00040000 -+#define BONNIE_TCF_OFFSET_SAI_TX_1 0x00044000 -+#define BONNIE_TCF_OFFSET_SAI_TX_SDR 0x00068000 -+ -+#define BONNIE_TCF_OFFSET_SAI_RX_DELTA 0x00004000 -+#define BONNIE_TCF_OFFSET_SAI_TX_DELTA 0x00004000 -+ -+#define BONNIE_TCF_OFFSET_SAI_CLK_TAPS 0x0000000C -+#define BONNIE_TCF_OFFSET_SAI_EYES 0x00000010 -+#define BONNIE_TCF_OFFSET_SAI_TRAIN_ACK 0x00000018 -+ -+ -+#endif /* BONNIE_TCF_DEFS_H */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_crtc.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_crtc.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_crtc.c -@@ -0,0 +1,1104 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include "pvr_linux_fence.h" -+#include "drm_pdp_drv.h" -+ -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#include -+#else -+#include -+#endif -+ -+#include -+#include -+#include -+ -+#include "pvr_dma_resv.h" -+#include "drm_pdp_gem.h" -+ -+#include "pdp_apollo.h" -+#include "pdp_odin.h" -+#include "pdp_plato.h" -+ -+#include "plato_drv.h" -+ -+#if defined(PDP_USE_ATOMIC) -+#include -+#include -+#endif -+ -+#include "kernel_compatibility.h" -+ -+enum pdp_crtc_flip_status { -+ PDP_CRTC_FLIP_STATUS_NONE = 0, -+ PDP_CRTC_FLIP_STATUS_PENDING, -+ PDP_CRTC_FLIP_STATUS_DONE, -+}; -+ -+struct pdp_flip_data { -+ struct dma_fence_cb base; -+ struct drm_crtc *crtc; -+ struct dma_fence *wait_fence; -+}; -+ -+/* returns true for ok, false for fail */ -+static bool pdp_clocks_set(struct drm_crtc *crtc, -+ struct drm_display_mode *adjusted_mode) -+{ -+ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ bool res; -+ -+ switch (dev_priv->version) { -+ case PDP_VERSION_ODIN: { -+ pdp_odin_set_updates_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, false); -+ res = pdp_odin_clocks_set(crtc->dev->dev, -+ pdp_crtc->pdp_reg, pdp_crtc->pll_reg, -+ 0, /* apollo only */ -+ dev_priv->outdev - 1, -+ pdp_crtc->odn_core_reg, /* odin only */ -+ adjusted_mode->hdisplay, -+ adjusted_mode->vdisplay, -+ dev_priv->subversion); -+ pdp_odin_set_updates_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, true); -+ -+ break; -+ } -+ case PDP_VERSION_APOLLO: { -+ int clock_in_mhz = adjusted_mode->clock / 1000; -+ -+ pdp_apollo_set_updates_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, false); -+ res = pdp_apollo_clocks_set(crtc->dev->dev, -+ pdp_crtc->pdp_reg, pdp_crtc->pll_reg, -+ clock_in_mhz, /* apollo only */ -+ NULL, /* odin only */ -+ adjusted_mode->hdisplay, -+ adjusted_mode->vdisplay); -+ pdp_apollo_set_updates_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, true); -+ -+ DRM_DEBUG_DRIVER("pdp clock set to %dMhz\n", clock_in_mhz); -+ -+ break; -+ } -+ case PDP_VERSION_PLATO: -+#if defined(SUPPORT_PLATO_DISPLAY) -+ plato_enable_pdp_clock(dev_priv->dev->dev->parent); -+ res = true; -+#else -+ DRM_ERROR("Trying to enable plato PDP clock on non-Plato build\n"); -+ res = false; -+#endif -+ break; -+ default: -+ BUG(); -+ } -+ -+ return res; -+} -+ -+void pdp_crtc_set_plane_enabled(struct drm_crtc *crtc, bool enable) -+{ -+ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ -+ switch (dev_priv->version) { -+ case PDP_VERSION_ODIN: -+ pdp_odin_set_plane_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ 0, enable); -+ break; -+ case PDP_VERSION_APOLLO: -+ pdp_apollo_set_plane_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ 0, enable); -+ break; -+ case PDP_VERSION_PLATO: -+ pdp_plato_set_plane_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ 0, enable); -+ break; -+ default: -+ BUG(); -+ } -+} -+ -+static void pdp_crtc_set_syncgen_enabled(struct drm_crtc *crtc, bool enable) -+{ -+ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ -+ switch (dev_priv->version) { -+ case PDP_VERSION_ODIN: -+ pdp_odin_set_syncgen_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ enable); -+ break; -+ case PDP_VERSION_APOLLO: -+ pdp_apollo_set_syncgen_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ enable); -+ break; -+ case PDP_VERSION_PLATO: -+ pdp_plato_set_syncgen_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ enable); -+ break; -+ default: -+ BUG(); -+ } -+} -+ -+static void pdp_crtc_set_enabled(struct drm_crtc *crtc, bool enable) -+{ -+ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; -+ -+ if (enable) { -+ pdp_crtc_set_syncgen_enabled(crtc, enable); -+ pdp_crtc_set_plane_enabled(crtc, dev_priv->display_enabled); -+ drm_crtc_vblank_on(crtc); -+ } else { -+ drm_crtc_vblank_off(crtc); -+ pdp_crtc_set_plane_enabled(crtc, enable); -+ pdp_crtc_set_syncgen_enabled(crtc, enable); -+ } -+} -+ -+static void pdp_crtc_mode_set(struct drm_crtc *crtc, -+ struct drm_display_mode *adjusted_mode) -+{ -+ /* -+ * ht = horizontal total -+ * hbps = horizontal back porch start -+ * has = horizontal active start -+ * hlbs = horizontal left border start -+ * hfps = horizontal front porch start -+ * hrbs = horizontal right border start -+ * -+ * vt = vertical total -+ * vbps = vertical back porch start -+ * vas = vertical active start -+ * vtbs = vertical top border start -+ * vfps = vertical front porch start -+ * vbbs = vertical bottom border start -+ */ -+ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ uint32_t ht = adjusted_mode->htotal; -+ uint32_t hbps = adjusted_mode->hsync_end - adjusted_mode->hsync_start; -+ uint32_t has = (adjusted_mode->htotal - adjusted_mode->hsync_start); -+ uint32_t hlbs = has; -+ uint32_t hfps = (hlbs + adjusted_mode->hdisplay); -+ uint32_t hrbs = hfps; -+ uint32_t vt = adjusted_mode->vtotal; -+ uint32_t vbps = adjusted_mode->vsync_end - adjusted_mode->vsync_start; -+ uint32_t vas = (adjusted_mode->vtotal - adjusted_mode->vsync_start); -+ uint32_t vtbs = vas; -+ uint32_t vfps = (vtbs + adjusted_mode->vdisplay); -+ uint32_t vbbs = vfps; -+ bool ok; -+ -+ ok = pdp_clocks_set(crtc, adjusted_mode); -+ -+ if (!ok) { -+ dev_info(crtc->dev->dev, "%s failed\n", __func__); -+ return; -+ } -+ -+ switch (dev_priv->version) { -+ case PDP_VERSION_ODIN: -+ pdp_odin_set_updates_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, false); -+ pdp_odin_reset_planes(crtc->dev->dev, -+ pdp_crtc->pdp_reg); -+ pdp_odin_mode_set(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ adjusted_mode->hdisplay, adjusted_mode->vdisplay, -+ hbps, ht, has, -+ hlbs, hfps, hrbs, -+ vbps, vt, vas, -+ vtbs, vfps, vbbs, -+ adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC, -+ adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC, -+ pdp_crtc->pfim_reg); -+ pdp_odin_set_powerdwn_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, false); -+ pdp_odin_set_updates_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, true); -+ break; -+ case PDP_VERSION_APOLLO: -+ pdp_apollo_set_updates_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, false); -+ pdp_apollo_reset_planes(crtc->dev->dev, -+ pdp_crtc->pdp_reg); -+ pdp_apollo_mode_set(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ adjusted_mode->hdisplay, adjusted_mode->vdisplay, -+ hbps, ht, has, -+ hlbs, hfps, hrbs, -+ vbps, vt, vas, -+ vtbs, vfps, vbbs, -+ adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC, -+ adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC); -+ pdp_apollo_set_powerdwn_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, false); -+ pdp_apollo_set_updates_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, true); -+ break; -+ case PDP_VERSION_PLATO: -+ pdp_plato_mode_set(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ adjusted_mode->hdisplay, -+ adjusted_mode->vdisplay, -+ hbps, ht, has, -+ hlbs, hfps, hrbs, -+ vbps, vt, vas, -+ vtbs, vfps, vbbs, -+ adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC, -+ adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC); -+ break; -+ default: -+ BUG(); -+ } -+} -+ -+ -+static bool pdp_crtc_helper_mode_fixup(struct drm_crtc *crtc, -+ const struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode) -+{ -+ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; -+ -+ if (dev_priv->version == PDP_VERSION_ODIN -+ && mode->hdisplay == 1920 -+ && mode->vdisplay == 1080) { -+ -+ /* 1080p 60Hz */ -+ const int h_total = 2200; -+ const int h_active_start = 192; -+ const int h_back_porch_start = 44; -+ const int v_total = 1125; -+ const int v_active_start = 41; -+ const int v_back_porch_start = 5; -+ -+ adjusted_mode->htotal = h_total; -+ adjusted_mode->hsync_start = adjusted_mode->htotal - -+ h_active_start; -+ adjusted_mode->hsync_end = adjusted_mode->hsync_start + -+ h_back_porch_start; -+ adjusted_mode->vtotal = v_total; -+ adjusted_mode->vsync_start = adjusted_mode->vtotal - -+ v_active_start; -+ adjusted_mode->vsync_end = adjusted_mode->vsync_start + -+ v_back_porch_start; -+ } -+ return true; -+} -+ -+static void pdp_crtc_flip_complete(struct drm_crtc *crtc); -+ -+#if defined(PDP_USE_ATOMIC) -+static void pdp_crtc_helper_mode_set_nofb(struct drm_crtc *crtc) -+{ -+ pdp_crtc_mode_set(crtc, &crtc->state->adjusted_mode); -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) -+static void pdp_crtc_helper_atomic_flush(struct drm_crtc *crtc, -+ struct drm_crtc_state *old_crtc_state) -+{ -+#else -+static void pdp_crtc_helper_atomic_flush(struct drm_crtc *crtc, -+ struct drm_atomic_state *state) -+{ -+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); -+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ -+ struct drm_crtc_state *new_crtc_state = crtc->state; -+ -+ if (!new_crtc_state->active || !old_crtc_state->active) -+ return; -+ -+ if (crtc->state->event) { -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ unsigned long flags; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) -+ pdp_crtc->flip_async = new_crtc_state->async_flip; -+#else -+ pdp_crtc->flip_async = !!(new_crtc_state->pageflip_flags -+ & DRM_MODE_PAGE_FLIP_ASYNC); -+#endif -+ if (pdp_crtc->flip_async) -+ WARN_ON(drm_crtc_vblank_get(crtc) != 0); -+ -+ spin_lock_irqsave(&crtc->dev->event_lock, flags); -+ pdp_crtc->flip_event = crtc->state->event; -+ crtc->state->event = NULL; -+ -+ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE); -+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); -+ -+ if (pdp_crtc->flip_async) -+ pdp_crtc_flip_complete(crtc); -+ } -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) -+static void pdp_crtc_helper_atomic_enable(struct drm_crtc *crtc, -+ struct drm_crtc_state *old_crtc_state) -+#else -+static void pdp_crtc_helper_atomic_enable(struct drm_crtc *crtc, -+ struct drm_atomic_state *state) -+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ -+{ -+ pdp_crtc_set_enabled(crtc, true); -+ -+ if (crtc->state->event) { -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ unsigned long flags; -+ -+ WARN_ON(drm_crtc_vblank_get(crtc) != 0); -+ -+ spin_lock_irqsave(&crtc->dev->event_lock, flags); -+ pdp_crtc->flip_event = crtc->state->event; -+ crtc->state->event = NULL; -+ -+ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE); -+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); -+ } -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) -+static void pdp_crtc_helper_atomic_disable(struct drm_crtc *crtc, -+ struct drm_crtc_state *old_crtc_state) -+#else -+static void pdp_crtc_helper_atomic_disable(struct drm_crtc *crtc, -+ struct drm_atomic_state *state) -+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ -+{ -+ pdp_crtc_set_enabled(crtc, false); -+ -+ if (crtc->state->event) { -+ unsigned long flags; -+ -+ spin_lock_irqsave(&crtc->dev->event_lock, flags); -+ drm_crtc_send_vblank_event(crtc, crtc->state->event); -+ crtc->state->event = NULL; -+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); -+ } -+} -+#else -+static void pdp_crtc_helper_dpms(struct drm_crtc *crtc, int mode) -+{ -+} -+ -+static void pdp_crtc_helper_prepare(struct drm_crtc *crtc) -+{ -+ pdp_crtc_set_enabled(crtc, false); -+} -+ -+static void pdp_crtc_helper_commit(struct drm_crtc *crtc) -+{ -+ pdp_crtc_set_enabled(crtc, true); -+} -+ -+static int pdp_crtc_helper_mode_set_base_atomic(struct drm_crtc *crtc, -+ struct drm_framebuffer *fb, -+ int x, int y, -+ enum mode_set_atomic atomic) -+{ -+ if (x < 0 || y < 0) -+ return -EINVAL; -+ -+ pdp_plane_set_surface(crtc, crtc->primary, fb, -+ (uint32_t) x, (uint32_t) y); -+ -+ return 0; -+} -+ -+static int pdp_crtc_helper_mode_set_base(struct drm_crtc *crtc, -+ int x, int y, -+ struct drm_framebuffer *old_fb) -+{ -+ if (!crtc->primary->fb) { -+ DRM_ERROR("no framebuffer\n"); -+ return 0; -+ } -+ -+ return pdp_crtc_helper_mode_set_base_atomic(crtc, -+ crtc->primary->fb, -+ x, y, -+ 0); -+} -+ -+static int pdp_crtc_helper_mode_set(struct drm_crtc *crtc, -+ struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode, -+ int x, int y, -+ struct drm_framebuffer *old_fb) -+{ -+ pdp_crtc_mode_set(crtc, adjusted_mode); -+ -+ return pdp_crtc_helper_mode_set_base(crtc, x, y, old_fb); -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) -+static void pdp_crtc_helper_load_lut(struct drm_crtc *crtc) -+{ -+} -+#endif -+ -+static void pdp_crtc_helper_disable(struct drm_crtc *crtc) -+{ -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ enum pdp_crtc_flip_status status; -+ -+ pdp_crtc_set_enabled(crtc, false); -+ -+ status = atomic_read(&pdp_crtc->flip_status); -+ if (status != PDP_CRTC_FLIP_STATUS_NONE) { -+ long lerr; -+ -+ lerr = wait_event_timeout( -+ pdp_crtc->flip_pending_wait_queue, -+ atomic_read(&pdp_crtc->flip_status) -+ != PDP_CRTC_FLIP_STATUS_PENDING, -+ 30 * HZ); -+ if (!lerr) -+ DRM_ERROR("Failed to wait for pending flip\n"); -+ else if (!pdp_crtc->flip_async) -+ pdp_crtc_flip_complete(crtc); -+ } -+} -+#endif /* defined(PDP_USE_ATOMIC) */ -+ -+static int pfim_init(struct drm_device *dev, -+ struct pdp_crtc *pdp_crtc, -+ const char *crtc_name) -+{ -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ struct resource *regs; -+ int err; -+ -+ if (!dev_priv->pfim_capable) { -+ pdp_crtc->pfim_reg = NULL; -+ return 0; -+ } -+ -+ regs = platform_get_resource_byname(to_platform_device(dev->dev), -+ IORESOURCE_MEM, -+ "pfim-regs"); -+ if (!regs) { -+ DRM_ERROR("missing pfim register info\n"); -+ return -ENXIO; -+ } -+ -+ pdp_crtc->pfim_reg_phys_base = regs->start; -+ pdp_crtc->pfim_reg_size = resource_size(regs); -+ -+ if (!request_mem_region(pdp_crtc->pfim_reg_phys_base, -+ pdp_crtc->pfim_reg_size, -+ crtc_name)) { -+ DRM_ERROR("failed to reserve pfim registers\n"); -+ return -EBUSY; -+ } -+ -+ pdp_crtc->pfim_reg = -+ ioremap(pdp_crtc->pfim_reg_phys_base, pdp_crtc->pfim_reg_size); -+ if (!pdp_crtc->pfim_reg) { -+ DRM_ERROR("failed to map pfim registers\n"); -+ err = -ENOMEM; -+ goto err_release_mem; -+ } -+ return 0; -+ -+err_release_mem: -+ release_mem_region(pdp_crtc->pfim_reg_phys_base, -+ pdp_crtc->pfim_reg_size); -+ pdp_crtc->pfim_reg = NULL; -+ return err; -+} -+ -+static void pfim_deinit(struct pdp_crtc *pdp_crtc) -+{ -+ if (pdp_crtc->pfim_reg) { -+ iounmap(pdp_crtc->pfim_reg); -+ release_mem_region(pdp_crtc->pfim_reg_phys_base, -+ pdp_crtc->pfim_reg_size); -+ pdp_crtc->pfim_reg = NULL; -+ } -+} -+ -+static void pdp_crtc_destroy(struct drm_crtc *crtc) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ -+ DRM_DEBUG_DRIVER("[CRTC:%d]\n", crtc->base.id); -+ -+ drm_crtc_cleanup(crtc); -+ -+ iounmap(pdp_crtc->pll_reg); -+ -+ iounmap(pdp_crtc->pdp_reg); -+ release_mem_region(pdp_crtc->pdp_reg_phys_base, pdp_crtc->pdp_reg_size); -+ -+ pfim_deinit(pdp_crtc); -+ -+ kfree(pdp_crtc); -+ dev_priv->crtc = NULL; -+} -+ -+static void pdp_crtc_flip_complete(struct drm_crtc *crtc) -+{ -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&crtc->dev->event_lock, flags); -+ -+ /* The flipping process has been completed so reset the flip state */ -+ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE); -+ pdp_crtc->flip_async = false; -+ -+#if !defined(PDP_USE_ATOMIC) -+ if (pdp_crtc->flip_data) { -+ dma_fence_put(pdp_crtc->flip_data->wait_fence); -+ kfree(pdp_crtc->flip_data); -+ pdp_crtc->flip_data = NULL; -+ } -+#endif -+ -+ if (pdp_crtc->flip_event) { -+ drm_crtc_send_vblank_event(crtc, pdp_crtc->flip_event); -+ pdp_crtc->flip_event = NULL; -+ } -+ -+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); -+} -+ -+#if !defined(PDP_USE_ATOMIC) -+static void pdp_crtc_flip(struct drm_crtc *crtc) -+{ -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ struct drm_framebuffer *old_fb; -+ -+ WARN_ON(atomic_read(&to_pdp_crtc(crtc)->flip_status) -+ != PDP_CRTC_FLIP_STATUS_PENDING); -+ -+ old_fb = pdp_crtc->old_fb; -+ pdp_crtc->old_fb = NULL; -+ -+ /* -+ * The graphics stream registers latch on vsync so we can go ahead and -+ * do the flip now. -+ */ -+ (void) pdp_crtc_helper_mode_set_base(crtc, crtc->x, crtc->y, old_fb); -+ -+ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE); -+ wake_up(&pdp_crtc->flip_pending_wait_queue); -+ -+ if (pdp_crtc->flip_async) -+ pdp_crtc_flip_complete(crtc); -+} -+ -+static void pdp_crtc_flip_cb(struct dma_fence *fence, struct dma_fence_cb *cb) -+{ -+ struct pdp_flip_data *flip_data = -+ container_of(cb, struct pdp_flip_data, base); -+ -+ pdp_crtc_flip(flip_data->crtc); -+} -+ -+static void pdp_crtc_flip_schedule_cb(struct dma_fence *fence, -+ struct dma_fence_cb *cb) -+{ -+ struct pdp_flip_data *flip_data = -+ container_of(cb, struct pdp_flip_data, base); -+ int err = 0; -+ -+ if (flip_data->wait_fence) -+ err = dma_fence_add_callback(flip_data->wait_fence, -+ &flip_data->base, -+ pdp_crtc_flip_cb); -+ -+ if (!flip_data->wait_fence || err) { -+ if (err && err != -ENOENT) -+ DRM_ERROR("flip failed to wait on old buffer\n"); -+ pdp_crtc_flip_cb(flip_data->wait_fence, &flip_data->base); -+ } -+} -+ -+static int pdp_crtc_flip_schedule(struct drm_crtc *crtc, -+ struct drm_gem_object *obj, -+ struct drm_gem_object *old_obj) -+{ -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ struct dma_resv *resv = pdp_gem_get_resv(obj); -+ struct dma_resv *old_resv = pdp_gem_get_resv(old_obj); -+ struct pdp_flip_data *flip_data; -+ struct dma_fence *fence; -+ int err; -+ -+ flip_data = kmalloc(sizeof(*flip_data), GFP_KERNEL); -+ if (!flip_data) -+ return -ENOMEM; -+ -+ flip_data->crtc = crtc; -+ -+ ww_mutex_lock(&old_resv->lock, NULL); -+ flip_data->wait_fence = -+ dma_fence_get(dma_resv_get_excl(old_resv)); -+ -+ if (old_resv != resv) { -+ ww_mutex_unlock(&old_resv->lock); -+ ww_mutex_lock(&resv->lock, NULL); -+ } -+ -+ fence = dma_fence_get(dma_resv_get_excl(resv)); -+ ww_mutex_unlock(&resv->lock); -+ -+ pdp_crtc->flip_data = flip_data; -+ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_PENDING); -+ -+ if (fence) { -+ err = dma_fence_add_callback(fence, &flip_data->base, -+ pdp_crtc_flip_schedule_cb); -+ dma_fence_put(fence); -+ if (err && err != -ENOENT) -+ goto err_set_flip_status_none; -+ } -+ -+ if (!fence || err == -ENOENT) { -+ pdp_crtc_flip_schedule_cb(fence, &flip_data->base); -+ err = 0; -+ } -+ -+ return err; -+ -+err_set_flip_status_none: -+ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE); -+ dma_fence_put(flip_data->wait_fence); -+ kfree(flip_data); -+ return err; -+} -+ -+static int pdp_crtc_page_flip(struct drm_crtc *crtc, -+ struct drm_framebuffer *fb, -+ struct drm_pending_vblank_event *event, -+ uint32_t page_flip_flags -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) -+ , struct drm_modeset_acquire_ctx *ctx -+#endif -+ ) -+{ -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb); -+ struct pdp_framebuffer *pdp_old_fb = -+ to_pdp_framebuffer(crtc->primary->fb); -+ enum pdp_crtc_flip_status status; -+ unsigned long flags; -+ int err; -+ -+ spin_lock_irqsave(&crtc->dev->event_lock, flags); -+ status = atomic_read(&pdp_crtc->flip_status); -+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); -+ -+ if (status != PDP_CRTC_FLIP_STATUS_NONE) -+ return -EBUSY; -+ -+ if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) { -+ err = drm_crtc_vblank_get(crtc); -+ if (err) -+ return err; -+ } -+ -+ pdp_crtc->old_fb = crtc->primary->fb; -+ pdp_crtc->flip_event = event; -+ pdp_crtc->flip_async = !!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC); -+ -+ /* Set the crtc primary plane to point to the new framebuffer */ -+ crtc->primary->fb = fb; -+ -+ err = pdp_crtc_flip_schedule(crtc, pdp_fb->obj[0], pdp_old_fb->obj[0]); -+ if (err) { -+ crtc->primary->fb = pdp_crtc->old_fb; -+ pdp_crtc->old_fb = NULL; -+ pdp_crtc->flip_event = NULL; -+ pdp_crtc->flip_async = false; -+ -+ DRM_ERROR("failed to schedule flip (err=%d)\n", err); -+ goto err_vblank_put; -+ } -+ -+ return 0; -+ -+err_vblank_put: -+ if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) -+ drm_crtc_vblank_put(crtc); -+ return err; -+} -+#endif /* !defined(PDP_USE_ATOMIC) */ -+ -+static const struct drm_crtc_helper_funcs pdp_crtc_helper_funcs = { -+ .mode_fixup = pdp_crtc_helper_mode_fixup, -+#if defined(PDP_USE_ATOMIC) -+ .mode_set_nofb = pdp_crtc_helper_mode_set_nofb, -+ .atomic_flush = pdp_crtc_helper_atomic_flush, -+ .atomic_enable = pdp_crtc_helper_atomic_enable, -+ .atomic_disable = pdp_crtc_helper_atomic_disable, -+#else -+ .dpms = pdp_crtc_helper_dpms, -+ .prepare = pdp_crtc_helper_prepare, -+ .commit = pdp_crtc_helper_commit, -+ .mode_set = pdp_crtc_helper_mode_set, -+ .mode_set_base = pdp_crtc_helper_mode_set_base, -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) -+ .load_lut = pdp_crtc_helper_load_lut, -+#endif -+ .mode_set_base_atomic = pdp_crtc_helper_mode_set_base_atomic, -+ .disable = pdp_crtc_helper_disable, -+#endif -+}; -+ -+static const struct drm_crtc_funcs pdp_crtc_funcs = { -+ .destroy = pdp_crtc_destroy, -+#if defined(PDP_USE_ATOMIC) -+ .reset = drm_atomic_helper_crtc_reset, -+ .set_config = drm_atomic_helper_set_config, -+ .page_flip = drm_atomic_helper_page_flip, -+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, -+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, -+#else -+ .set_config = drm_crtc_helper_set_config, -+ .page_flip = pdp_crtc_page_flip, -+#endif -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) -+ .enable_vblank = pdp_enable_vblank, -+ .disable_vblank = pdp_disable_vblank, -+#endif -+}; -+ -+ -+struct drm_crtc *pdp_crtc_create(struct drm_device *dev, uint32_t number, -+ struct drm_plane *primary_plane) -+{ -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ struct pdp_crtc *pdp_crtc; -+ const char *crtc_name = "crtc-0"; -+ int err; -+ -+ pdp_crtc = kzalloc(sizeof(*pdp_crtc), GFP_KERNEL); -+ if (!pdp_crtc) { -+ err = -ENOMEM; -+ goto err_exit; -+ } -+ -+ init_waitqueue_head(&pdp_crtc->flip_pending_wait_queue); -+ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE); -+ pdp_crtc->number = number; -+ -+ switch (number) { -+ case 0: -+ { -+ struct resource *regs; -+ const char *pdp_resname = NULL; -+ -+ if (dev_priv->version == PDP_VERSION_ODIN) { -+ switch (dev_priv->outdev) { -+ case PDP_OUTPUT_PDP1: { -+ pdp_resname = "pdp-regs"; -+ break; -+ } -+ case PDP_OUTPUT_PDP2: { -+ pdp_resname = "pdp2-regs"; -+ break; -+ } -+ default: -+ DRM_ERROR("wrong PDP output device\n"); -+ err = -ENODEV; -+ goto err_exit; -+ } -+ } else { -+ pdp_resname = "pdp-regs"; -+ } -+ -+ regs = platform_get_resource_byname( -+ to_platform_device(dev->dev), -+ IORESOURCE_MEM, -+ pdp_resname); -+ if (!regs) { -+ DRM_ERROR("missing pdp register info\n"); -+ err = -ENXIO; -+ goto err_crtc_free; -+ } -+ -+ pdp_crtc->pdp_reg_phys_base = regs->start; -+ pdp_crtc->pdp_reg_size = resource_size(regs); -+ -+ if (dev_priv->version == PDP_VERSION_ODIN || -+ dev_priv->version == PDP_VERSION_APOLLO) { -+ regs = platform_get_resource_byname( -+ to_platform_device(dev->dev), -+ IORESOURCE_MEM, -+ "pll-regs"); -+ if (!regs) { -+ DRM_ERROR("missing pll register info\n"); -+ err = -ENXIO; -+ goto err_crtc_free; -+ } -+ -+ pdp_crtc->pll_reg_phys_base = regs->start; -+ pdp_crtc->pll_reg_size = resource_size(regs); -+ -+ pdp_crtc->pll_reg = ioremap(pdp_crtc->pll_reg_phys_base, -+ pdp_crtc->pll_reg_size); -+ if (!pdp_crtc->pll_reg) { -+ DRM_ERROR("failed to map pll registers\n"); -+ err = -ENOMEM; -+ goto err_crtc_free; -+ } -+ } else if (dev_priv->version == PDP_VERSION_PLATO) { -+ regs = platform_get_resource_byname( -+ to_platform_device(dev->dev), -+ IORESOURCE_MEM, -+ PLATO_PDP_RESOURCE_BIF_REGS); -+ if (!regs) { -+ DRM_ERROR("missing pdp-bif register info\n"); -+ err = -ENXIO; -+ goto err_crtc_free; -+ } -+ -+ pdp_crtc->pdp_bif_reg_phys_base = regs->start; -+ pdp_crtc->pdp_bif_reg_size = resource_size(regs); -+ -+ if (!request_mem_region(pdp_crtc->pdp_bif_reg_phys_base, -+ pdp_crtc->pdp_bif_reg_size, -+ crtc_name)) { -+ DRM_ERROR("failed to reserve pdp-bif registers\n"); -+ err = -EBUSY; -+ goto err_crtc_free; -+ } -+ -+ pdp_crtc->pdp_bif_reg = -+ ioremap(pdp_crtc->pdp_bif_reg_phys_base, -+ pdp_crtc->pdp_bif_reg_size); -+ if (!pdp_crtc->pdp_bif_reg) { -+ DRM_ERROR("failed to map pdp-bif registers\n"); -+ err = -ENOMEM; -+ goto err_iounmap_regs; -+ } -+ } -+ -+ if (dev_priv->version == PDP_VERSION_ODIN) { -+ regs = platform_get_resource_byname( -+ to_platform_device(dev->dev), -+ IORESOURCE_MEM, -+ "odn-core"); -+ if (!regs) { -+ DRM_ERROR("missing odn-core info\n"); -+ err = -ENXIO; -+ goto err_crtc_free; -+ } -+ -+ pdp_crtc->odn_core_phys_base = regs->start; -+ pdp_crtc->odn_core_size = resource_size(regs); -+ -+ pdp_crtc->odn_core_reg -+ = ioremap(pdp_crtc->odn_core_phys_base, -+ pdp_crtc->odn_core_size); -+ if (!pdp_crtc->odn_core_reg) { -+ DRM_ERROR("failed to map pdp reset register\n"); -+ err = -ENOMEM; -+ goto err_iounmap_regs; -+ } -+ -+ err = pfim_init(dev, pdp_crtc, crtc_name); -+ if (err) { -+ DRM_ERROR("failed to initialise PFIM\n"); -+ goto err_iounmap_regs; -+ } -+ } -+ -+ break; -+ } -+ default: -+ DRM_ERROR("invalid crtc number %u\n", number); -+ err = -EINVAL; -+ goto err_crtc_free; -+ } -+ -+ if (!request_mem_region(pdp_crtc->pdp_reg_phys_base, -+ pdp_crtc->pdp_reg_size, -+ crtc_name)) { -+ DRM_ERROR("failed to reserve pdp registers\n"); -+ err = -EBUSY; -+ goto err_crtc_free; -+ } -+ -+ pdp_crtc->pdp_reg = ioremap(pdp_crtc->pdp_reg_phys_base, -+ pdp_crtc->pdp_reg_size); -+ if (!pdp_crtc->pdp_reg) { -+ DRM_ERROR("failed to map pdp registers\n"); -+ err = -ENOMEM; -+ goto err_release_mem_region; -+ } -+ -+ err = drm_crtc_init_with_planes(dev, &pdp_crtc->base, primary_plane, -+ NULL, &pdp_crtc_funcs, NULL); -+ if (err) { -+ DRM_ERROR("CRTC init with planes failed"); -+ goto err_iounmap_regs; -+ } -+ -+ drm_crtc_helper_add(&pdp_crtc->base, &pdp_crtc_helper_funcs); -+ -+ DRM_DEBUG_DRIVER("[CRTC:%d]\n", pdp_crtc->base.base.id); -+ -+ return &pdp_crtc->base; -+ -+err_iounmap_regs: -+ iounmap(pdp_crtc->pdp_reg); -+ if (pdp_crtc->odn_core_reg) -+ iounmap(pdp_crtc->odn_core_reg); -+ if (pdp_crtc->pdp_bif_reg) -+ iounmap(pdp_crtc->pdp_bif_reg); -+err_release_mem_region: -+ release_mem_region(pdp_crtc->pdp_reg_phys_base, pdp_crtc->pdp_reg_size); -+ pfim_deinit(pdp_crtc); -+err_crtc_free: -+ kfree(pdp_crtc); -+err_exit: -+ return ERR_PTR(err); -+} -+ -+void pdp_crtc_set_vblank_enabled(struct drm_crtc *crtc, bool enable) -+{ -+ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ -+ switch (dev_priv->version) { -+ case PDP_VERSION_ODIN: -+ pdp_odin_set_vblank_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ enable); -+ break; -+ case PDP_VERSION_APOLLO: -+ pdp_apollo_set_vblank_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ enable); -+ break; -+ case PDP_VERSION_PLATO: -+ pdp_plato_set_vblank_enabled(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ enable); -+ break; -+ default: -+ BUG(); -+ } -+} -+ -+void pdp_crtc_irq_handler(struct drm_crtc *crtc) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ bool handled; -+ -+ switch (dev_priv->version) { -+ case PDP_VERSION_ODIN: -+ handled = pdp_odin_check_and_clear_vblank(dev->dev, -+ pdp_crtc->pdp_reg); -+ break; -+ case PDP_VERSION_APOLLO: -+ handled = pdp_apollo_check_and_clear_vblank(dev->dev, -+ pdp_crtc->pdp_reg); -+ break; -+ case PDP_VERSION_PLATO: -+ handled = pdp_plato_check_and_clear_vblank(dev->dev, -+ pdp_crtc->pdp_reg); -+ break; -+ default: -+ handled = false; -+ break; -+ } -+ -+ if (handled) { -+ enum pdp_crtc_flip_status status; -+ -+ drm_handle_vblank(dev, pdp_crtc->number); -+ -+ status = atomic_read(&pdp_crtc->flip_status); -+ if (status == PDP_CRTC_FLIP_STATUS_DONE) { -+ if (!pdp_crtc->flip_async) { -+ pdp_crtc_flip_complete(crtc); -+#if !defined(PDP_USE_ATOMIC) -+ drm_crtc_vblank_put(crtc); -+#endif -+ } -+ } -+ } -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) -+void pdp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file) -+{ -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&crtc->dev->event_lock, flags); -+ -+ if (pdp_crtc->flip_event && -+ pdp_crtc->flip_event->base.file_priv == file) { -+ pdp_crtc->flip_event->base.destroy(&pdp_crtc->flip_event->base); -+ pdp_crtc->flip_event = NULL; -+ } -+ -+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); -+} -+#endif -diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_debugfs.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_debugfs.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_debugfs.c -@@ -0,0 +1,184 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+ -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#include -+#include -+#include -+#endif -+ -+#include "drm_pdp_drv.h" -+ -+#define PDP_DEBUGFS_DISPLAY_ENABLED "display_enabled" -+ -+static int display_enabled_open(struct inode *inode, struct file *file) -+{ -+ file->private_data = inode->i_private; -+ -+ return 0; -+} -+ -+static ssize_t display_enabled_read(struct file *file, -+ char __user *user_buffer, -+ size_t count, -+ loff_t *position_ptr) -+{ -+ struct drm_device *dev = file->private_data; -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ loff_t position = *position_ptr; -+ char buffer[] = "N\n"; -+ size_t buffer_size = ARRAY_SIZE(buffer); -+ int err; -+ -+ if (position < 0) -+ return -EINVAL; -+ else if (position >= buffer_size || count == 0) -+ return 0; -+ -+ if (dev_priv->display_enabled) -+ buffer[0] = 'Y'; -+ -+ if (count > buffer_size - position) -+ count = buffer_size - position; -+ -+ err = copy_to_user(user_buffer, &buffer[position], count); -+ if (err) -+ return -EFAULT; -+ -+ *position_ptr = position + count; -+ -+ return count; -+} -+ -+static ssize_t display_enabled_write(struct file *file, -+ const char __user *user_buffer, -+ size_t count, -+ loff_t *position) -+{ -+ struct drm_device *dev = file->private_data; -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ char buffer[3]; -+ int err; -+ -+ count = min(count, ARRAY_SIZE(buffer) - 1); -+ -+ err = copy_from_user(buffer, user_buffer, count); -+ if (err) -+ return -EFAULT; -+ buffer[count] = '\0'; -+ -+ if (!strtobool(buffer, &dev_priv->display_enabled) && dev_priv->crtc) -+ pdp_crtc_set_plane_enabled(dev_priv->crtc, dev_priv->display_enabled); -+ -+ return count; -+} -+ -+static const struct file_operations pdp_display_enabled_fops = { -+ .owner = THIS_MODULE, -+ .open = display_enabled_open, -+ .read = display_enabled_read, -+ .write = display_enabled_write, -+ .llseek = default_llseek, -+}; -+ -+static int pdp_debugfs_create(struct drm_minor *minor, const char *name, -+ umode_t mode, const struct file_operations *fops) -+{ -+ struct drm_info_node *node; -+ -+ /* -+ * We can't get access to our driver private data when this function is -+ * called so we fake up a node so that we can clean up entries later on. -+ */ -+ node = kzalloc(sizeof(*node), GFP_KERNEL); -+ if (!node) -+ return -ENOMEM; -+ -+ node->dent = debugfs_create_file(name, mode, minor->debugfs_root, -+ minor->dev, fops); -+ if (!node->dent) { -+ kfree(node); -+ return -ENOMEM; -+ } -+ -+ node->minor = minor; -+ node->info_ent = (void *) fops; -+ -+ mutex_lock(&minor->debugfs_lock); -+ list_add(&node->list, &minor->debugfs_list); -+ mutex_unlock(&minor->debugfs_lock); -+ -+ return 0; -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) -+int pdp_debugfs_init(struct drm_minor *minor) -+#else -+void pdp_debugfs_init(struct drm_minor *minor) -+#endif -+{ -+ int err; -+ -+ err = pdp_debugfs_create(minor, PDP_DEBUGFS_DISPLAY_ENABLED, -+ 0100644, -+ &pdp_display_enabled_fops); -+ if (err) { -+ DRM_INFO("failed to create '%s' debugfs entry\n", -+ PDP_DEBUGFS_DISPLAY_ENABLED); -+ } -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) -+ return err; -+#endif -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+void pdp_debugfs_cleanup(struct drm_minor *minor) -+{ -+ drm_debugfs_remove_files((struct drm_info_list *) &pdp_display_enabled_fops, -+ 1, minor); -+} -+#endif -diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.c -@@ -0,0 +1,889 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#include -+#include -+#include -+#include -+#include -+#include -+#else -+#include -+#endif -+ -+#include "tc_drv.h" -+#include "pvrversion.h" -+ -+#include "drm_pdp_drv.h" -+#include "drm_pdp_gem.h" -+#include "pdp_drm.h" -+ -+#include "odin_defs.h" -+ -+#if defined(SUPPORT_PLATO_DISPLAY) -+#include "plato_drv.h" -+#include "pdp2_regs.h" -+#include "pdp2_mmu_regs.h" -+#endif -+ -+#define DRIVER_NAME "pdp" -+#define DRIVER_DESC "Imagination Technologies PDP DRM Display Driver" -+#define DRIVER_DATE "20150612" -+ -+#if defined(PDP_USE_ATOMIC) -+#include -+ -+#define PVR_DRIVER_ATOMIC DRIVER_ATOMIC -+#else -+#define PVR_DRIVER_ATOMIC 0 -+#endif -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) -+#define PVR_DRIVER_PRIME 0 -+#else -+#define PVR_DRIVER_PRIME DRIVER_PRIME -+#endif -+ -+/* This header must always be included last */ -+#include "kernel_compatibility.h" -+ -+static bool display_enable = true; -+static unsigned int output_device = 1; -+ -+module_param(display_enable, bool, 0444); -+MODULE_PARM_DESC(display_enable, "Enable all displays (default: Y)"); -+ -+module_param(output_device, uint, 0444); -+MODULE_PARM_DESC(output_device, "PDP output device (default: PDP1)"); -+ -+struct pdp_gem_private *pdp_gem_get_private(struct drm_device *dev) -+{ -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ -+ return dev_priv->gem_priv; -+} -+ -+static void pdp_irq_handler(void *data) -+{ -+ struct drm_device *dev = data; -+ struct drm_crtc *crtc; -+ -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) -+ pdp_crtc_irq_handler(crtc); -+} -+ -+static int pdp_early_load(struct drm_device *dev) -+{ -+ struct pdp_drm_private *dev_priv; -+ int err; -+ -+ DRM_DEBUG("loading %s device\n", to_platform_device(dev->dev)->name); -+ -+ platform_set_drvdata(to_platform_device(dev->dev), dev); -+ -+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); -+ if (!dev_priv) -+ return -ENOMEM; -+ -+ dev->dev_private = dev_priv; -+ dev_priv->dev = dev; -+ dev_priv->version = (enum pdp_version) -+ to_platform_device(dev->dev)->id_entry->driver_data; -+ dev_priv->display_enabled = display_enable; -+ -+#if !defined(SUPPORT_PLATO_DISPLAY) -+ /* PDP output device selection */ -+ dev_priv->outdev = (enum pdp_output_device)output_device; -+ if (dev_priv->outdev == PDP_OUTPUT_PDP2 && -+ !tc_pdp2_compatible(dev->dev->parent)) { -+ DRM_ERROR("TC doesn't support PDP2\n"); -+ err = -ENODEV; -+ goto err_dev_priv_free; -+ } -+ -+ if (dev_priv->outdev == PDP_OUTPUT_PDP1) { -+ dev_priv->pdp_interrupt = TC_INTERRUPT_PDP; -+ } else if (dev_priv->outdev == PDP_OUTPUT_PDP2) { -+ dev_priv->pdp_interrupt = TC_INTERRUPT_PDP2; -+ } else { -+ DRM_ERROR("wrong PDP device number (outdev=%u)\n", -+ dev_priv->outdev); -+ err = -ENODEV; -+ goto err_dev_priv_free; -+ } -+ -+ /* PDP FBC module support detection */ -+ dev_priv->pfim_capable = (dev_priv->outdev == PDP_OUTPUT_PDP2 && -+ tc_pfim_capable(dev->dev->parent)); -+#endif -+ -+ if (dev_priv->version == PDP_VERSION_APOLLO || -+ dev_priv->version == PDP_VERSION_ODIN) { -+#if !defined(SUPPORT_PLATO_DISPLAY) -+ err = tc_enable(dev->dev->parent); -+ if (err) { -+ DRM_ERROR("failed to enable parent device (err=%d)\n", err); -+ goto err_dev_priv_free; -+ } -+ -+ /* -+ * check whether it's Orion PDP for picking -+ * the right display mode list later on -+ */ -+ if (dev_priv->version == PDP_VERSION_ODIN) -+ dev_priv->subversion = (enum pdp_odin_subversion) -+ tc_odin_subvers(dev->dev->parent); -+#endif -+ } -+ -+#if defined(SUPPORT_PLATO_DISPLAY) -+ else if (dev_priv->version == PDP_VERSION_PLATO) { -+// XXX do we need to do this? Plato driver has already enabled device. -+ err = plato_enable(dev->dev->parent); -+ if (err) { -+ DRM_ERROR("failed to enable parent device (err=%d)\n", err); -+ goto err_dev_priv_free; -+ } -+ } -+#endif -+ -+ dev_priv->gem_priv = pdp_gem_init(dev, 0); -+ if (!dev_priv->gem_priv) { -+ DRM_ERROR("gem initialisation failed\n"); -+ err = -ENOMEM; -+ goto err_disable_parent_device; -+ } -+ -+ err = pdp_modeset_early_init(dev_priv); -+ if (err) { -+ DRM_ERROR("early modeset initialisation failed (err=%d)\n", -+ err); -+ goto err_gem_cleanup; -+ } -+ -+ err = drm_vblank_init(dev_priv->dev, 1); -+ if (err) { -+ DRM_ERROR("failed to complete vblank init (err=%d)\n", err); -+ goto err_modeset_late_cleanup; -+ } -+ -+ if (dev_priv->version == PDP_VERSION_APOLLO || -+ dev_priv->version == PDP_VERSION_ODIN) { -+#if !defined(SUPPORT_PLATO_DISPLAY) -+ err = tc_set_interrupt_handler(dev->dev->parent, -+ dev_priv->pdp_interrupt, -+ pdp_irq_handler, -+ dev); -+ if (err) { -+ DRM_ERROR("failed to set interrupt handler (err=%d)\n", -+ err); -+ goto err_vblank_cleanup; -+ } -+ -+ err = tc_enable_interrupt(dev->dev->parent, -+ dev_priv->pdp_interrupt); -+ if (err) { -+ DRM_ERROR("failed to enable pdp interrupts (err=%d)\n", -+ err); -+ goto err_uninstall_interrupt_handle; -+ } -+#endif -+ } -+#if defined(SUPPORT_PLATO_DISPLAY) -+ else if (dev_priv->version == PDP_VERSION_PLATO) { -+ err = plato_set_interrupt_handler(dev->dev->parent, -+ PLATO_INTERRUPT_PDP, -+ pdp_irq_handler, -+ dev); -+ if (err) { -+ DRM_ERROR("failed to set interrupt handler (err=%d)\n", -+ err); -+ goto err_vblank_cleanup; -+ } -+ -+ err = plato_enable_interrupt(dev->dev->parent, PLATO_INTERRUPT_PDP); -+ if (err) { -+ DRM_ERROR("failed to enable pdp interrupts (err=%d)\n", -+ err); -+ goto err_uninstall_interrupt_handle; -+ } -+ } -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)) -+ dev->irq_enabled = true; -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) -+ dev->vblank_disable_allowed = 1; -+#endif -+ -+ return 0; -+ -+err_uninstall_interrupt_handle: -+ if (dev_priv->version == PDP_VERSION_APOLLO || -+ dev_priv->version == PDP_VERSION_ODIN) { -+#if !defined(SUPPORT_PLATO_DISPLAY) -+ tc_set_interrupt_handler(dev->dev->parent, -+ dev_priv->pdp_interrupt, -+ NULL, -+ NULL); -+#endif -+ } -+#if defined(SUPPORT_PLATO_DISPLAY) -+ else if (dev_priv->version == PDP_VERSION_PLATO) { -+ plato_set_interrupt_handler(dev->dev->parent, -+ PLATO_INTERRUPT_PDP, -+ NULL, -+ NULL); -+ } -+#endif -+err_vblank_cleanup: -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) -+ /* Called by drm_dev_fini in Linux 4.11.0 and later */ -+ drm_vblank_cleanup(dev_priv->dev); -+#endif -+err_modeset_late_cleanup: -+ pdp_modeset_late_cleanup(dev_priv); -+err_gem_cleanup: -+ pdp_gem_cleanup(dev_priv->gem_priv); -+err_disable_parent_device: -+ if (dev_priv->version == PDP_VERSION_APOLLO || -+ dev_priv->version == PDP_VERSION_ODIN) { -+#if !defined(SUPPORT_PLATO_DISPLAY) -+ tc_disable(dev->dev->parent); -+#endif -+ } -+#if defined(SUPPORT_PLATO_DISPLAY) -+ else if (dev_priv->version == PDP_VERSION_PLATO) -+ plato_disable(dev->dev->parent); -+#endif -+err_dev_priv_free: -+ kfree(dev_priv); -+ return err; -+} -+ -+static int pdp_late_load(struct drm_device *dev) -+{ -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ int err; -+ -+ err = pdp_modeset_late_init(dev_priv); -+ if (err) { -+ DRM_ERROR("late modeset initialisation failed (err=%d)\n", -+ err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static void pdp_early_unload(struct drm_device *dev) -+{ -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ -+#if defined(CONFIG_DRM_FBDEV_EMULATION) && defined(PDP_USE_ATOMIC) -+ drm_atomic_helper_shutdown(dev); -+#endif -+ pdp_modeset_early_cleanup(dev_priv); -+} -+ -+static void pdp_late_unload(struct drm_device *dev) -+{ -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ -+ DRM_INFO("unloading %s device.\n", to_platform_device(dev->dev)->name); -+ if (dev_priv->version == PDP_VERSION_APOLLO || -+ dev_priv->version == PDP_VERSION_ODIN) { -+#if !defined(SUPPORT_PLATO_DISPLAY) -+ tc_disable_interrupt(dev->dev->parent, dev_priv->pdp_interrupt); -+ tc_set_interrupt_handler(dev->dev->parent, -+ dev_priv->pdp_interrupt, -+ NULL, -+ NULL); -+#endif -+ } -+#if defined(SUPPORT_PLATO_DISPLAY) -+ else if (dev_priv->version == PDP_VERSION_PLATO) { -+ plato_disable_interrupt(dev->dev->parent, PLATO_INTERRUPT_PDP); -+ plato_set_interrupt_handler(dev->dev->parent, -+ PLATO_INTERRUPT_PDP, -+ NULL, -+ NULL); -+ } -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) -+ /* Called by drm_dev_fini in Linux 4.11.0 and later */ -+ drm_vblank_cleanup(dev_priv->dev); -+#endif -+ pdp_modeset_late_cleanup(dev_priv); -+ pdp_gem_cleanup(dev_priv->gem_priv); -+ -+ if (dev_priv->version == PDP_VERSION_APOLLO || -+ dev_priv->version == PDP_VERSION_ODIN) { -+#if !defined(SUPPORT_PLATO_DISPLAY) -+ tc_disable(dev->dev->parent); -+#endif -+ } -+#if defined(SUPPORT_PLATO_DISPLAY) -+ else if (dev_priv->version == PDP_VERSION_PLATO) -+ plato_disable(dev->dev->parent); -+#endif -+ -+ kfree(dev_priv); -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) -+static int pdp_load(struct drm_device *dev, unsigned long flags) -+{ -+ int err; -+ -+ err = pdp_early_load(dev); -+ if (err) -+ return err; -+ -+ err = pdp_late_load(dev); -+ if (err) { -+ pdp_late_unload(dev); -+ return err; -+ } -+ -+ return 0; -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) -+static int pdp_unload(struct drm_device *dev) -+#else -+static void pdp_unload(struct drm_device *dev) -+#endif -+{ -+ pdp_early_unload(dev); -+ pdp_late_unload(dev); -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) -+ return 0; -+#endif -+} -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) -+static void pdp_preclose(struct drm_device *dev, struct drm_file *file) -+{ -+ struct drm_crtc *crtc; -+ -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) -+ pdp_crtc_flip_event_cancel(crtc, file); -+} -+#endif -+ -+#if !defined(CONFIG_DRM_FBDEV_EMULATION) -+static inline void pdp_teardown_drm_config(struct drm_device *dev) -+{ -+#if defined(PDP_USE_ATOMIC) -+ drm_atomic_helper_shutdown(dev); -+#else -+ struct drm_crtc *crtc; -+ -+ DRM_INFO("%s: %s device\n", __func__, to_platform_device(dev->dev)->name); -+ -+ /* -+ * When non atomic driver is in use, manually trigger ->set_config -+ * with an empty mode set associated to this crtc. -+ */ -+ drm_modeset_lock_all(dev); -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ if (crtc->primary->fb) { -+ struct drm_mode_set mode_set = { .crtc = crtc }; -+ int err; -+ -+ err = drm_mode_set_config_internal(&mode_set); -+ if (err) -+ DRM_ERROR("failed to disable crtc %p (err=%d)\n", -+ crtc, err); -+ } -+ } -+ drm_modeset_unlock_all(dev); -+#endif -+} -+#endif /* !defined(CONFIG_DRM_FBDEV_EMULATION) */ -+ -+static void pdp_lastclose(struct drm_device *dev) -+{ -+#if defined(CONFIG_DRM_FBDEV_EMULATION) -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ struct pdp_fbdev *fbdev = dev_priv->fbdev; -+ int err; -+ -+ if (fbdev) { -+ /* -+ * This is a fbdev driver, therefore never attempt to shutdown -+ * on a client disconnecting. -+ */ -+ err = drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->helper); -+ if (err) -+ DRM_ERROR("failed to restore mode (err=%d)\n", err); -+ } -+#else -+ pdp_teardown_drm_config(dev); -+#endif -+} -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) -+int pdp_enable_vblank(struct drm_crtc *crtc) -+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) -+static int pdp_enable_vblank(struct drm_device *dev, unsigned int pipe) -+#else -+static int pdp_enable_vblank(struct drm_device *dev, int pipe) -+#endif -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) -+ struct drm_device *dev = crtc->dev; -+ unsigned int pipe = drm_crtc_index(crtc); -+#endif -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ -+ switch (pipe) { -+ case 0: -+ pdp_crtc_set_vblank_enabled(dev_priv->crtc, true); -+ break; -+ default: -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) -+ DRM_ERROR("invalid crtc %u\n", pipe); -+#else -+ DRM_ERROR("invalid crtc %d\n", pipe); -+#endif -+ return -EINVAL; -+ } -+ -+ DRM_DEBUG("vblank interrupts enabled for crtc %d\n", pipe); -+ -+ return 0; -+} -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) -+void pdp_disable_vblank(struct drm_crtc *crtc) -+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) -+static void pdp_disable_vblank(struct drm_device *dev, unsigned int pipe) -+#else -+static void pdp_disable_vblank(struct drm_device *dev, int pipe) -+#endif -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) -+ struct drm_device *dev = crtc->dev; -+ unsigned int pipe = drm_crtc_index(crtc); -+#endif -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ -+ switch (pipe) { -+ case 0: -+ pdp_crtc_set_vblank_enabled(dev_priv->crtc, false); -+ break; -+ default: -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) -+ DRM_ERROR("invalid crtc %u\n", pipe); -+#else -+ DRM_ERROR("invalid crtc %d\n", pipe); -+#endif -+ return; -+ } -+ -+ DRM_DEBUG("vblank interrupts disabled for crtc %d\n", pipe); -+} -+ -+static int pdp_gem_object_create_ioctl(struct drm_device *dev, -+ void *data, -+ struct drm_file *file) -+{ -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ -+ return pdp_gem_object_create_ioctl_priv(dev, -+ dev_priv->gem_priv, -+ data, -+ file); -+} -+ -+static int pdp_gem_dumb_create(struct drm_file *file, -+ struct drm_device *dev, -+ struct drm_mode_create_dumb *args) -+{ -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ -+ return pdp_gem_dumb_create_priv(file, -+ dev, -+ dev_priv->gem_priv, -+ args); -+} -+ -+void pdp_gem_object_free(struct drm_gem_object *obj) -+{ -+ struct pdp_drm_private *dev_priv = obj->dev->dev_private; -+ -+ pdp_gem_object_free_priv(dev_priv->gem_priv, obj); -+} -+ -+static const struct drm_ioctl_desc pdp_ioctls[] = { -+ DRM_IOCTL_DEF_DRV(PDP_GEM_CREATE, pdp_gem_object_create_ioctl, -+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), -+ DRM_IOCTL_DEF_DRV(PDP_GEM_MMAP, pdp_gem_object_mmap_ioctl, -+ DRM_AUTH | DRM_UNLOCKED), -+ DRM_IOCTL_DEF_DRV(PDP_GEM_CPU_PREP, pdp_gem_object_cpu_prep_ioctl, -+ DRM_AUTH | DRM_UNLOCKED), -+ DRM_IOCTL_DEF_DRV(PDP_GEM_CPU_FINI, pdp_gem_object_cpu_fini_ioctl, -+ DRM_AUTH | DRM_UNLOCKED), -+}; -+ -+static const struct file_operations pdp_driver_fops = { -+ .owner = THIS_MODULE, -+ .open = drm_open, -+ .release = drm_release, -+ .unlocked_ioctl = drm_ioctl, -+ .mmap = drm_gem_mmap, -+ .poll = drm_poll, -+ .read = drm_read, -+ .llseek = noop_llseek, -+#ifdef CONFIG_COMPAT -+ .compat_ioctl = drm_compat_ioctl, -+#endif -+}; -+ -+static struct drm_driver pdp_drm_driver = { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) -+ .load = NULL, -+ .unload = NULL, -+#else -+ .load = pdp_load, -+ .unload = pdp_unload, -+#endif -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) -+ .preclose = pdp_preclose, -+#endif -+ .lastclose = pdp_lastclose, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \ -+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) -+ .set_busid = drm_platform_set_busid, -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) -+ .get_vblank_counter = drm_vblank_count, -+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ .get_vblank_counter = drm_vblank_no_hw_counter, -+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0)) -+ .enable_vblank = pdp_enable_vblank, -+ .disable_vblank = pdp_disable_vblank, -+#endif -+ -+ .debugfs_init = pdp_debugfs_init, -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ .debugfs_cleanup = pdp_debugfs_cleanup, -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) -+ .gem_prime_export = pdp_gem_prime_export, -+ .gem_free_object = pdp_gem_object_free, -+ .gem_vm_ops = &pdp_gem_vm_ops, -+#endif -+ -+ .gem_prime_import = pdp_gem_prime_import, -+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, -+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, -+ .gem_prime_import_sg_table = pdp_gem_prime_import_sg_table, -+ -+ // Set dumb_create to NULL to avoid xorg owning the display (if xorg is running). -+ .dumb_create = pdp_gem_dumb_create, -+ .dumb_map_offset = pdp_gem_dumb_map_offset, -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) -+ .dumb_destroy = drm_gem_dumb_destroy, -+#endif -+ -+ .name = DRIVER_NAME, -+ .desc = DRIVER_DESC, -+ .date = DRIVER_DATE, -+ .major = PVRVERSION_MAJ, -+ .minor = PVRVERSION_MIN, -+ .patchlevel = PVRVERSION_BUILD, -+ -+ .driver_features = DRIVER_GEM | -+ DRIVER_MODESET | -+ PVR_DRIVER_PRIME | -+ PVR_DRIVER_ATOMIC, -+ .ioctls = pdp_ioctls, -+ .num_ioctls = ARRAY_SIZE(pdp_ioctls), -+ .fops = &pdp_driver_fops, -+}; -+ -+#if defined(SUPPORT_PLATO_DISPLAY) -+ -+static int compare_parent_dev(struct device *dev, void *data) -+{ -+ struct device *pdp_dev = data; -+ -+ return dev->parent && dev->parent == pdp_dev->parent; -+} -+ -+static int pdp_component_bind(struct device *dev) -+{ -+ struct platform_device *pdev = to_platform_device(dev); -+ struct drm_device *ddev; -+ int ret; -+ -+ dev_info(dev, "Loading platform device\n"); -+ ddev = drm_dev_alloc(&pdp_drm_driver, &pdev->dev); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) -+ if (IS_ERR(ddev)) -+ return PTR_ERR(ddev); -+#else -+ if (!ddev) -+ return -ENOMEM; -+#endif -+ -+ // XXX no need to do this as happens in pdp_early_load -+ platform_set_drvdata(pdev, ddev); -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) -+ /* Needed by drm_platform_set_busid */ -+ ddev->platformdev = pdev; -+#endif -+ BUG_ON(pdp_drm_driver.load != NULL); -+ -+ ret = pdp_early_load(ddev); -+ if (ret) -+ goto err_drm_dev_put; -+ -+ DRM_DEBUG_DRIVER("Binding other components\n"); -+ /* Bind other components, including HDMI encoder/connector */ -+ ret = component_bind_all(dev, ddev); -+ if (ret) { -+ DRM_ERROR("Failed to bind other components (ret=%d)\n", ret); -+ goto err_drm_dev_late_unload; -+ } -+ -+ ret = drm_dev_register(ddev, 0); -+ if (ret) -+ goto err_drm_dev_late_unload; -+ -+ ret = pdp_late_load(ddev); -+ if (ret) -+ goto err_drm_dev_unregister; -+ -+ return 0; -+ -+err_drm_dev_unregister: -+ drm_dev_unregister(ddev); -+err_drm_dev_late_unload: -+ pdp_late_unload(ddev); -+err_drm_dev_put: -+ drm_dev_put(ddev); -+ return ret; -+} -+ -+static void pdp_component_unbind(struct device *dev) -+{ -+ struct drm_device *ddev = dev_get_drvdata(dev); -+ -+ dev_info(dev, "Unloading platform device\n"); -+ BUG_ON(pdp_drm_driver.unload != NULL); -+ pdp_early_unload(ddev); -+ drm_dev_unregister(ddev); -+ pdp_late_unload(ddev); -+ component_unbind_all(dev, ddev); -+ drm_dev_put(ddev); -+} -+ -+static const struct component_master_ops pdp_component_ops = { -+ .bind = pdp_component_bind, -+ .unbind = pdp_component_unbind, -+}; -+ -+ -+static int pdp_probe(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ struct component_match *match = NULL; -+ -+ component_match_add(dev, &match, compare_parent_dev, dev); -+ return component_master_add_with_match(dev, &pdp_component_ops, match); -+} -+ -+static int pdp_remove(struct platform_device *pdev) -+{ -+ component_master_del(&pdev->dev, &pdp_component_ops); -+ return 0; -+} -+ -+#else // !SUPPORT_PLATO_DISPLAY -+ -+static int pdp_probe(struct platform_device *pdev) -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) -+ struct drm_device *ddev; -+ int ret; -+ -+ ddev = drm_dev_alloc(&pdp_drm_driver, &pdev->dev); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) -+ if (IS_ERR(ddev)) -+ return PTR_ERR(ddev); -+#else -+ if (!ddev) -+ return -ENOMEM; -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) -+ /* Needed by drm_platform_set_busid */ -+ ddev->platformdev = pdev; -+#endif -+ /* -+ * The load callback, called from drm_dev_register, is deprecated, -+ * because of potential race conditions. -+ */ -+ BUG_ON(pdp_drm_driver.load != NULL); -+ -+ ret = pdp_early_load(ddev); -+ if (ret) -+ goto err_drm_dev_put; -+ -+ ret = drm_dev_register(ddev, 0); -+ if (ret) -+ goto err_drm_dev_late_unload; -+ -+ ret = pdp_late_load(ddev); -+ if (ret) -+ goto err_drm_dev_unregister; -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) -+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", -+ pdp_drm_driver.name, -+ pdp_drm_driver.major, -+ pdp_drm_driver.minor, -+ pdp_drm_driver.patchlevel, -+ pdp_drm_driver.date, -+ ddev->primary->index); -+#endif -+ return 0; -+ -+err_drm_dev_unregister: -+ drm_dev_unregister(ddev); -+err_drm_dev_late_unload: -+ pdp_late_unload(ddev); -+err_drm_dev_put: -+ drm_dev_put(ddev); -+ return ret; -+#else -+ return drm_platform_init(&pdp_drm_driver, pdev); -+#endif -+} -+ -+static int pdp_remove(struct platform_device *pdev) -+{ -+ struct drm_device *ddev = platform_get_drvdata(pdev); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) -+ /* -+ * The unload callback, called from drm_dev_unregister, is -+ * deprecated. -+ */ -+ BUG_ON(pdp_drm_driver.unload != NULL); -+ -+ pdp_early_unload(ddev); -+ -+ drm_dev_unregister(ddev); -+ -+ pdp_late_unload(ddev); -+ -+ drm_dev_put(ddev); -+#else -+ drm_put_dev(ddev); -+#endif -+ return 0; -+} -+ -+#endif // SUPPORT_PLATO_DISPLAY -+ -+static void pdp_shutdown(struct platform_device *pdev) -+{ -+} -+ -+static struct platform_device_id pdp_platform_device_id_table[] = { -+ { .name = APOLLO_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_APOLLO }, -+ { .name = ODN_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_ODIN }, -+#if defined(SUPPORT_PLATO_DISPLAY) -+#if defined(PLATO_MULTI_DEVICE) -+ { .name = PLATO_MAKE_DEVICE_NAME_PDP(0), -+ .driver_data = PDP_VERSION_PLATO }, -+ { .name = PLATO_MAKE_DEVICE_NAME_PDP(1), -+ .driver_data = PDP_VERSION_PLATO }, -+ { .name = PLATO_MAKE_DEVICE_NAME_PDP(2), -+ .driver_data = PDP_VERSION_PLATO }, -+ { .name = PLATO_MAKE_DEVICE_NAME_PDP(3), -+ .driver_data = PDP_VERSION_PLATO }, -+#else -+ { .name = PLATO_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_PLATO }, -+#endif -+#endif // SUPPORT_PLATO_DISPLAY -+ { }, -+}; -+ -+static struct platform_driver pdp_platform_driver = { -+ .probe = pdp_probe, -+ .remove = pdp_remove, -+ .shutdown = pdp_shutdown, -+ .driver = { -+ .owner = THIS_MODULE, -+ .name = DRIVER_NAME, -+ }, -+ .id_table = pdp_platform_device_id_table, -+}; -+ -+module_platform_driver(pdp_platform_driver); -+ -+MODULE_AUTHOR("Imagination Technologies Ltd. "); -+MODULE_DESCRIPTION(DRIVER_DESC); -+MODULE_DEVICE_TABLE(platform, pdp_platform_device_id_table); -+MODULE_LICENSE("Dual MIT/GPL"); -diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.h b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.h -@@ -0,0 +1,242 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__DRM_PDP_DRV_H__) -+#define __DRM_PDP_DRV_H__ -+ -+#include -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#include -+#else -+#include -+#endif -+ -+#include -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) -+#include -+#endif -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)) -+#include -+#endif -+ -+#include "pdp_common.h" -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && \ -+ !defined(PVR_ANDROID_USE_PDP_LEGACY) -+#define PDP_USE_ATOMIC -+#endif -+ -+struct pdp_gem_context; -+enum pdp_crtc_flip_status; -+struct pdp_flip_data; -+struct pdp_gem_private; -+ -+#if !defined(SUPPORT_PLATO_DISPLAY) -+struct tc_pdp_platform_data; -+#else -+struct plato_pdp_platform_data; -+#endif -+ -+struct pdp_drm_private { -+ struct drm_device *dev; -+#if defined(CONFIG_DRM_FBDEV_EMULATION) -+ struct pdp_fbdev *fbdev; -+#endif -+ -+ enum pdp_version version; -+ -+ /* differentiate Orion from base Odin PDP */ -+ enum pdp_odin_subversion subversion; -+ -+ /* created by pdp_gem_init */ -+ struct pdp_gem_private *gem_priv; -+ -+ /* preferred output device */ -+ enum pdp_output_device outdev; -+ uint32_t pdp_interrupt; -+ -+ /* PDP FBC Decompression module support */ -+ bool pfim_capable; -+ -+ /* initialised by pdp_modeset_early_init */ -+ struct drm_plane *plane; -+ struct drm_crtc *crtc; -+ struct drm_connector *connector; -+ struct drm_encoder *encoder; -+ -+ bool display_enabled; -+}; -+ -+struct pdp_crtc { -+ struct drm_crtc base; -+ -+ uint32_t number; -+ -+ resource_size_t pdp_reg_size; -+ resource_size_t pdp_reg_phys_base; -+ void __iomem *pdp_reg; -+ -+ resource_size_t pdp_bif_reg_size; -+ resource_size_t pdp_bif_reg_phys_base; -+ void __iomem *pdp_bif_reg; -+ -+ resource_size_t pll_reg_size; -+ resource_size_t pll_reg_phys_base; -+ void __iomem *pll_reg; -+ -+ resource_size_t odn_core_size; /* needed for odin pdp clk reset */ -+ resource_size_t odn_core_phys_base; -+ void __iomem *odn_core_reg; -+ -+ resource_size_t pfim_reg_size; -+ resource_size_t pfim_reg_phys_base; -+ void __iomem *pfim_reg; -+ -+ wait_queue_head_t flip_pending_wait_queue; -+ -+ /* Reuse the drm_device event_lock to protect these */ -+ atomic_t flip_status; -+ struct drm_pending_vblank_event *flip_event; -+ struct drm_framebuffer *old_fb; -+ struct pdp_flip_data *flip_data; -+ bool flip_async; -+}; -+ -+#define to_pdp_crtc(crtc) container_of(crtc, struct pdp_crtc, base) -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) -+struct drm_gem_object; -+ -+struct pdp_framebuffer { -+ struct drm_framebuffer base; -+ struct drm_gem_object *obj[1]; -+}; -+ -+#define to_pdp_framebuffer(fb) container_of(fb, struct pdp_framebuffer, base) -+#define to_drm_framebuffer(fb) (&(fb)->base) -+#else -+#define pdp_framebuffer drm_framebuffer -+#define to_pdp_framebuffer(fb) (fb) -+#define to_drm_framebuffer(fb) (fb) -+#endif -+ -+#if defined(CONFIG_DRM_FBDEV_EMULATION) -+struct pdp_fbdev { -+ struct drm_fb_helper helper; -+ struct pdp_framebuffer fb; -+ struct pdp_drm_private *priv; -+}; -+#endif -+ -+static inline u32 pdp_drm_fb_cpp(struct drm_framebuffer *fb) -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) -+ return fb->format->cpp[0]; -+#else -+ return fb->bits_per_pixel / 8; -+#endif -+} -+ -+static inline u32 pdp_drm_fb_format(struct drm_framebuffer *fb) -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) -+ return fb->format->format; -+#else -+ return fb->pixel_format; -+#endif -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) -+int pdp_debugfs_init(struct drm_minor *minor); -+#else -+void pdp_debugfs_init(struct drm_minor *minor); -+#endif -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+void pdp_debugfs_cleanup(struct drm_minor *minor); -+#endif -+ -+struct drm_plane *pdp_plane_create(struct drm_device *dev, -+ enum drm_plane_type type); -+void pdp_plane_set_surface(struct drm_crtc *crtc, struct drm_plane *plane, -+ struct drm_framebuffer *fb, -+ const uint32_t src_x, const uint32_t src_y); -+ -+struct drm_crtc *pdp_crtc_create(struct drm_device *dev, uint32_t number, -+ struct drm_plane *primary_plane); -+void pdp_crtc_set_plane_enabled(struct drm_crtc *crtc, bool enable); -+void pdp_crtc_set_vblank_enabled(struct drm_crtc *crtc, bool enable); -+void pdp_crtc_irq_handler(struct drm_crtc *crtc); -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) -+void pdp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file); -+#endif -+ -+struct drm_connector *pdp_dvi_connector_create(struct drm_device *dev); -+ -+struct drm_encoder *pdp_tmds_encoder_create(struct drm_device *dev); -+ -+int pdp_modeset_early_init(struct pdp_drm_private *dev_priv); -+int pdp_modeset_late_init(struct pdp_drm_private *dev_priv); -+void pdp_modeset_early_cleanup(struct pdp_drm_private *dev_priv); -+void pdp_modeset_late_cleanup(struct pdp_drm_private *dev_priv); -+ -+#if defined(CONFIG_DRM_FBDEV_EMULATION) -+struct pdp_fbdev *pdp_fbdev_create(struct pdp_drm_private *dev); -+void pdp_fbdev_destroy(struct pdp_fbdev *fbdev); -+#endif -+ -+int pdp_modeset_validate_init(struct pdp_drm_private *dev_priv, -+ struct drm_mode_fb_cmd2 *mode_cmd, -+ struct pdp_framebuffer *pdp_fb, -+ struct drm_gem_object *obj); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) -+int pdp_enable_vblank(struct drm_crtc *crtc); -+void pdp_disable_vblank(struct drm_crtc *crtc); -+#endif -+ -+#endif /* !defined(__DRM_PDP_DRV_H__) */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_dvi.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_dvi.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_dvi.c -@@ -0,0 +1,311 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include "drm_pdp_drv.h" -+ -+#include -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#else -+#include -+#endif -+ -+#include -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)) -+#include -+#endif -+ -+#if defined(PDP_USE_ATOMIC) -+#include -+#endif -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)) -+#include -+#endif -+ -+#include "kernel_compatibility.h" -+ -+struct pdp_mode_data { -+ int hdisplay; -+ int vdisplay; -+ int vrefresh; -+ bool reduced_blanking; -+ bool interlaced; -+ bool margins; -+}; -+ -+static const struct pdp_mode_data pdp_extra_modes[] = { -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) -+ { -+ .hdisplay = 1280, -+ .vdisplay = 720, -+ .vrefresh = 60, -+ .reduced_blanking = false, -+ .interlaced = false, -+ .margins = false, -+ }, -+ { -+ .hdisplay = 1920, -+ .vdisplay = 1080, -+ .vrefresh = 60, -+ .reduced_blanking = false, -+ .interlaced = false, -+ .margins = false, -+ }, -+#endif -+}; -+ -+static char preferred_mode_name[DRM_DISPLAY_MODE_LEN] = "\0"; -+ -+module_param_string(dvi_preferred_mode, -+ preferred_mode_name, -+ DRM_DISPLAY_MODE_LEN, -+ 0444); -+ -+MODULE_PARM_DESC(dvi_preferred_mode, -+ "Specify the preferred mode (if supported), e.g. 1280x1024."); -+ -+ -+static int pdp_dvi_add_extra_modes(struct drm_connector *connector) -+{ -+ struct drm_display_mode *mode; -+ int num_modes; -+ int i; -+ -+ for (i = 0, num_modes = 0; i < ARRAY_SIZE(pdp_extra_modes); i++) { -+ mode = drm_cvt_mode(connector->dev, -+ pdp_extra_modes[i].hdisplay, -+ pdp_extra_modes[i].vdisplay, -+ pdp_extra_modes[i].vrefresh, -+ pdp_extra_modes[i].reduced_blanking, -+ pdp_extra_modes[i].interlaced, -+ pdp_extra_modes[i].margins); -+ if (mode) { -+ drm_mode_probed_add(connector, mode); -+ num_modes++; -+ } -+ } -+ -+ return num_modes; -+} -+ -+static int pdp_dvi_connector_helper_get_modes(struct drm_connector *connector) -+{ -+ struct drm_device *dev = connector->dev; -+ int num_modes; -+ int len = strlen(preferred_mode_name); -+ -+ if (len) -+ dev_info(dev->dev, "detected dvi_preferred_mode=%s\n", -+ preferred_mode_name); -+ else -+ dev_info(dev->dev, "no dvi_preferred_mode\n"); -+ -+ num_modes = drm_add_modes_noedid(connector, -+ dev->mode_config.max_width, -+ dev->mode_config.max_height); -+ -+ num_modes += pdp_dvi_add_extra_modes(connector); -+ if (num_modes) { -+ struct drm_display_mode *pref_mode = NULL; -+ -+ if (len) { -+ struct drm_display_mode *mode; -+ struct list_head *entry; -+ -+ list_for_each(entry, &connector->probed_modes) { -+ mode = list_entry(entry, -+ struct drm_display_mode, -+ head); -+ if (!strcmp(mode->name, preferred_mode_name)) { -+ pref_mode = mode; -+ break; -+ } -+ } -+ } -+ -+ if (pref_mode) -+ pref_mode->type |= DRM_MODE_TYPE_PREFERRED; -+ else -+ drm_set_preferred_mode(connector, -+ dev->mode_config.max_width, -+ dev->mode_config.max_height); -+ } -+ -+ drm_mode_sort(&connector->probed_modes); -+ -+ DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s] found %d modes\n", -+ connector->base.id, -+ connector->name, -+ num_modes); -+ -+ return num_modes; -+} -+ -+static enum drm_mode_status -+pdp_dvi_connector_helper_mode_valid(struct drm_connector *connector, -+ struct drm_display_mode *mode) -+{ -+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) -+ return MODE_NO_INTERLACE; -+ else if (mode->flags & DRM_MODE_FLAG_DBLSCAN) -+ return MODE_NO_DBLESCAN; -+ -+ return MODE_OK; -+} -+ -+#if !defined(PDP_USE_ATOMIC) -+static struct drm_encoder * -+pdp_dvi_connector_helper_best_encoder(struct drm_connector *connector) -+{ -+ /* Pick the first encoder we find */ -+ if (connector->encoder_ids[0] != 0) { -+ struct drm_encoder *encoder; -+ -+ encoder = drm_encoder_find(connector->dev, -+ NULL, -+ connector->encoder_ids[0]); -+ if (encoder) { -+ DRM_DEBUG_DRIVER("[ENCODER:%d:%s] best for [CONNECTOR:%d:%s]\n", -+ encoder->base.id, -+ encoder->name, -+ connector->base.id, -+ connector->name); -+ return encoder; -+ } -+ } -+ -+ return NULL; -+} -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) -+static enum drm_connector_status -+pdp_dvi_connector_detect(struct drm_connector *connector, -+ bool force) -+{ -+ /* -+ * It appears that there is no way to determine if a monitor -+ * is connected. This needs to be set to connected otherwise -+ * DPMS never gets set to ON. -+ */ -+ return connector_status_connected; -+} -+#endif -+ -+static void pdp_dvi_connector_destroy(struct drm_connector *connector) -+{ -+ struct pdp_drm_private *dev_priv = connector->dev->dev_private; -+ -+ DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n", -+ connector->base.id, -+ connector->name); -+ -+ drm_connector_cleanup(connector); -+ -+ kfree(connector); -+ dev_priv->connector = NULL; -+} -+ -+static void pdp_dvi_connector_force(struct drm_connector *connector) -+{ -+} -+ -+static struct drm_connector_helper_funcs pdp_dvi_connector_helper_funcs = { -+ .get_modes = pdp_dvi_connector_helper_get_modes, -+ .mode_valid = pdp_dvi_connector_helper_mode_valid, -+ /* -+ * For atomic, don't set atomic_best_encoder or best_encoder. This will -+ * cause the DRM core to fallback to drm_atomic_helper_best_encoder(). -+ * This is fine as we only have a single connector and encoder. -+ */ -+#if !defined(PDP_USE_ATOMIC) -+ .best_encoder = pdp_dvi_connector_helper_best_encoder, -+#endif -+}; -+ -+static const struct drm_connector_funcs pdp_dvi_connector_funcs = { -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) -+ .detect = pdp_dvi_connector_detect, -+#endif -+ .fill_modes = drm_helper_probe_single_connector_modes, -+ .destroy = pdp_dvi_connector_destroy, -+ .force = pdp_dvi_connector_force, -+#if defined(PDP_USE_ATOMIC) -+ .reset = drm_atomic_helper_connector_reset, -+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, -+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -+#else -+ .dpms = drm_helper_connector_dpms, -+#endif -+}; -+ -+ -+struct drm_connector * -+pdp_dvi_connector_create(struct drm_device *dev) -+{ -+ struct drm_connector *connector; -+ -+ connector = kzalloc(sizeof(*connector), GFP_KERNEL); -+ if (!connector) -+ return ERR_PTR(-ENOMEM); -+ -+ drm_connector_init(dev, -+ connector, -+ &pdp_dvi_connector_funcs, -+ DRM_MODE_CONNECTOR_DVID); -+ drm_connector_helper_add(connector, &pdp_dvi_connector_helper_funcs); -+ -+ connector->dpms = DRM_MODE_DPMS_OFF; -+ connector->interlace_allowed = false; -+ connector->doublescan_allowed = false; -+ connector->display_info.subpixel_order = SubPixelHorizontalRGB; -+ -+ DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n", -+ connector->base.id, -+ connector->name); -+ -+ return connector; -+} -diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_fb.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_fb.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_fb.c -@@ -0,0 +1,320 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if defined(CONFIG_DRM_FBDEV_EMULATION) -+#include -+#include -+#include -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) -+#include -+#endif -+#include -+#include -+ -+#include "drm_pdp_gem.h" -+#include "kernel_compatibility.h" -+ -+#define FBDEV_NAME "pdpdrmfb" -+ -+static struct fb_ops pdp_fbdev_ops = { -+ .owner = THIS_MODULE, -+ .fb_check_var = drm_fb_helper_check_var, -+ .fb_set_par = drm_fb_helper_set_par, -+ .fb_fillrect = cfb_fillrect, -+ .fb_copyarea = cfb_copyarea, -+ .fb_imageblit = cfb_imageblit, -+ .fb_pan_display = drm_fb_helper_pan_display, -+ .fb_blank = drm_fb_helper_blank, -+ .fb_setcmap = drm_fb_helper_setcmap, -+ .fb_debug_enter = drm_fb_helper_debug_enter, -+ .fb_debug_leave = drm_fb_helper_debug_leave, -+}; -+ -+ -+static struct fb_info * -+pdp_fbdev_helper_alloc(struct drm_fb_helper *helper) -+{ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) -+ struct device *dev = helper->dev->dev; -+ struct fb_info *info; -+ int ret; -+ -+ info = framebuffer_alloc(0, dev); -+ if (!info) -+ return ERR_PTR(-ENOMEM); -+ -+ ret = fb_alloc_cmap(&info->cmap, 256, 0); -+ if (ret) -+ goto err_release; -+ -+ info->apertures = alloc_apertures(1); -+ if (!info->apertures) { -+ ret = -ENOMEM; -+ goto err_free_cmap; -+ } -+ -+ helper->fbdev = info; -+ -+ return info; -+ -+err_free_cmap: -+ fb_dealloc_cmap(&info->cmap); -+err_release: -+ framebuffer_release(info); -+ return ERR_PTR(ret); -+#else -+ return drm_fb_helper_alloc_info(helper); -+#endif -+} -+ -+static inline void -+pdp_fbdev_helper_fill_info(struct drm_fb_helper *helper, -+ struct drm_fb_helper_surface_size *sizes, -+ struct fb_info *info, -+ struct drm_mode_fb_cmd2 __maybe_unused *mode_cmd) -+{ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) -+ drm_fb_helper_fill_fix(info, mode_cmd->pitches[0], helper->fb->depth); -+ drm_fb_helper_fill_var(info, helper, sizes->fb_width, -+ sizes->fb_height); -+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) -+ drm_fb_helper_fill_fix(info, mode_cmd->pitches[0], -+ helper->fb->format->depth); -+ drm_fb_helper_fill_var(info, helper, helper->fb->width, -+ helper->fb->height); -+#else -+ drm_fb_helper_fill_info(info, helper, sizes); -+#endif -+} -+ -+static int pdp_fbdev_probe(struct drm_fb_helper *helper, -+ struct drm_fb_helper_surface_size *sizes) -+{ -+ struct pdp_fbdev *pdp_fbdev = -+ container_of(helper, struct pdp_fbdev, helper); -+ struct drm_framebuffer *fb = -+ to_drm_framebuffer(&pdp_fbdev->fb); -+ struct pdp_gem_private *gem_priv = pdp_fbdev->priv->gem_priv; -+ struct drm_device *dev = helper->dev; -+ struct drm_mode_fb_cmd2 mode_cmd; -+ struct pdp_gem_object *pdp_obj; -+ struct drm_gem_object *obj; -+ struct fb_info *info; -+ void __iomem *vaddr; -+ size_t obj_size; -+ int err; -+ -+ if (helper->fb) -+ return 0; -+ -+ mutex_lock(&dev->struct_mutex); -+ -+ /* Create a framebuffer */ -+ info = pdp_fbdev_helper_alloc(helper); -+ if (!info) { -+ err = -ENOMEM; -+ goto err_unlock_dev; -+ } -+ -+ memset(&mode_cmd, 0, sizeof(mode_cmd)); -+ mode_cmd.pitches[0] = -+ sizes->surface_width * DIV_ROUND_UP(sizes->surface_bpp, 8); -+ mode_cmd.width = sizes->surface_width; -+ mode_cmd.height = sizes->surface_height; -+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, -+ sizes->surface_depth); -+ obj_size = PAGE_ALIGN(mode_cmd.height * mode_cmd.pitches[0]); -+ -+ obj = pdp_gem_object_create(dev, gem_priv, obj_size, 0); -+ if (IS_ERR(obj)) { -+ err = PTR_ERR(obj); -+ goto err_unlock_dev; -+ } -+ -+ pdp_obj = to_pdp_obj(obj); -+ -+ vaddr = ioremap(pdp_obj->cpu_addr, obj->size); -+ if (!vaddr) { -+ err = PTR_ERR(vaddr); -+ goto err_gem_destroy; -+ } -+ -+ /* Zero fb memory, fb_memset accounts for iomem address space */ -+ fb_memset(vaddr, 0, obj_size); -+ -+ err = pdp_modeset_validate_init(pdp_fbdev->priv, &mode_cmd, -+ &pdp_fbdev->fb, obj); -+ if (err) -+ goto err_gem_unmap; -+ -+ helper->fb = fb; -+ helper->COMPAT_FB_INFO = info; -+ -+ /* Fill out the Linux framebuffer info */ -+ strlcpy(info->fix.id, FBDEV_NAME, sizeof(info->fix.id)); -+ pdp_fbdev_helper_fill_info(helper, sizes, info, &mode_cmd); -+ info->par = helper; -+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)) -+ info->flags |= FBINFO_CAN_FORCE_OUTPUT; -+#endif -+ info->fbops = &pdp_fbdev_ops; -+ info->fix.smem_start = pdp_obj->cpu_addr; -+ info->fix.smem_len = obj_size; -+ info->screen_base = vaddr; -+ info->screen_size = obj_size; -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)) -+ info->apertures->ranges[0].base = pdp_obj->cpu_addr; -+ info->apertures->ranges[0].size = obj_size; -+#endif -+ -+ mutex_unlock(&dev->struct_mutex); -+ return 0; -+ -+err_gem_unmap: -+ iounmap(vaddr); -+ -+err_gem_destroy: -+ pdp_gem_object_free_priv(gem_priv, obj); -+ -+err_unlock_dev: -+ mutex_unlock(&dev->struct_mutex); -+ -+ DRM_ERROR(FBDEV_NAME " - %s failed (err=%d)\n", __func__, err); -+ return err; -+} -+ -+static const struct drm_fb_helper_funcs pdp_fbdev_helper_funcs = { -+ .fb_probe = pdp_fbdev_probe, -+}; -+ -+struct pdp_fbdev *pdp_fbdev_create(struct pdp_drm_private *dev_priv) -+{ -+ struct pdp_fbdev *pdp_fbdev; -+ int err; -+ const u8 preferred_bpp = 32; -+ -+ pdp_fbdev = kzalloc(sizeof(*pdp_fbdev), GFP_KERNEL); -+ if (!pdp_fbdev) -+ return ERR_PTR(-ENOMEM); -+ -+ drm_fb_helper_prepare(dev_priv->dev, &pdp_fbdev->helper, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 3, 0)) -+ preferred_bpp, -+#endif -+ &pdp_fbdev_helper_funcs); -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) -+ err = drm_fb_helper_init(dev_priv->dev, &pdp_fbdev->helper, 1, 1); -+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0)) -+ err = drm_fb_helper_init(dev_priv->dev, &pdp_fbdev->helper, 1); -+#else -+ err = drm_fb_helper_init(dev_priv->dev, &pdp_fbdev->helper); -+#endif -+ if (err) -+ goto err_free_fbdev; -+ -+ pdp_fbdev->priv = dev_priv; -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)) -+ drm_fb_helper_single_add_all_connectors(&pdp_fbdev->helper); -+#endif -+ -+ err = drm_fb_helper_initial_config(&pdp_fbdev->helper -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)) -+ , preferred_bpp -+#endif -+ ); -+ if (err) -+ goto err_fb_helper_fini; -+ -+ DRM_DEBUG_DRIVER(FBDEV_NAME " - fb device registered\n"); -+ return pdp_fbdev; -+ -+err_fb_helper_fini: -+ drm_fb_helper_fini(&pdp_fbdev->helper); -+ -+err_free_fbdev: -+ kfree(pdp_fbdev); -+ -+ DRM_ERROR(FBDEV_NAME " - %s, failed (err=%d)\n", __func__, err); -+ return ERR_PTR(err); -+} -+ -+void pdp_fbdev_destroy(struct pdp_fbdev *pdp_fbdev) -+{ -+ struct pdp_framebuffer *pdp_fb; -+ struct pdp_gem_object *pdp_obj; -+ struct drm_framebuffer *fb; -+ struct fb_info *info; -+ -+ if (!pdp_fbdev) -+ return; -+ -+ drm_fb_helper_unregister_info(&pdp_fbdev->helper); -+ pdp_fb = &pdp_fbdev->fb; -+ -+ pdp_obj = to_pdp_obj(pdp_fb->obj[0]); -+ if (pdp_obj) { -+ info = pdp_fbdev->helper.COMPAT_FB_INFO; -+ iounmap((void __iomem *)info->screen_base); -+ } -+ -+ drm_gem_object_put(pdp_fb->obj[0]); -+ -+ drm_fb_helper_fini(&pdp_fbdev->helper); -+ -+ fb = to_drm_framebuffer(pdp_fb); -+ -+ /** -+ * If the driver's probe function hasn't been called -+ * (due to deferred setup of the framebuffer device), -+ * then the framebuffer won't have been initialised. -+ * Check this before attempting to clean it up. -+ */ -+ if (fb && fb->dev) -+ drm_framebuffer_cleanup(fb); -+ -+ kfree(pdp_fbdev); -+} -+#endif /* CONFIG_DRM_FBDEV_EMULATION */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.c -@@ -0,0 +1,899 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#include -+#include -+#include -+#endif -+ -+#include -+#include -+#include -+#include -+ -+#include -+ -+#if defined(SUPPORT_EXTERNAL_PHYSHEAP_INTERFACE) -+#include "physheap.h" -+#include "pvrsrv.h" -+#include "pvr_debug.h" -+#else -+#if defined(SUPPORT_PLATO_DISPLAY) -+#include "plato_drv.h" -+#else -+#include "tc_drv.h" -+#endif -+#endif -+ -+#include "drm_pdp_gem.h" -+#include "pdp_drm.h" -+#include "kernel_compatibility.h" -+ -+#if !defined(SUPPORT_EXTERNAL_PHYSHEAP_INTERFACE) -+#if defined(SUPPORT_PLATO_DISPLAY) -+#define pdp_gem_platform_data plato_pdp_platform_data -+#else -+#define pdp_gem_platform_data tc_pdp_platform_data -+#endif -+#endif -+ -+const struct vm_operations_struct pdp_gem_vm_ops = { -+ .fault = pdp_gem_object_vm_fault, -+ .open = drm_gem_vm_open, -+ .close = drm_gem_vm_close, -+}; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) -+const struct drm_gem_object_funcs pdp_gem_funcs = { -+ .export = pdp_gem_prime_export, -+ .free = pdp_gem_object_free, -+ .vm_ops = &pdp_gem_vm_ops, -+}; -+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) */ -+ -+struct pdp_gem_private { -+ struct mutex vram_lock; -+ struct drm_mm vram; -+ resource_size_t memory_base; -+ bool dma_map_export_host_addr; -+#if defined(SUPPORT_EXTERNAL_PHYSHEAP_INTERFACE) -+ PVRSRV_DEVICE_NODE *pvr_dev_node; -+ PHYS_HEAP *pvr_phys_heap; -+#endif -+}; -+ -+static struct pdp_gem_object * -+pdp_gem_private_object_create(struct drm_device *dev, -+ size_t size, -+ struct dma_resv *resv) -+{ -+ struct pdp_gem_object *pdp_obj; -+ -+ WARN_ON(PAGE_ALIGN(size) != size); -+ -+ pdp_obj = kzalloc(sizeof(*pdp_obj), GFP_KERNEL); -+ if (!pdp_obj) -+ return ERR_PTR(-ENOMEM); -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) -+ if (!resv) -+ dma_resv_init(&pdp_obj->_resv); -+#else -+ pdp_obj->base.resv = resv; -+#endif -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) -+ pdp_obj->base.funcs = &pdp_gem_funcs; -+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) */ -+ -+ drm_gem_private_object_init(dev, &pdp_obj->base, size); -+ -+ return pdp_obj; -+} -+ -+struct drm_gem_object *pdp_gem_object_create(struct drm_device *dev, -+ struct pdp_gem_private *gem_priv, -+ size_t size, -+ u32 flags) -+{ -+ struct pdp_gem_object *pdp_obj; -+ struct drm_mm_node *node; -+ int err = 0; -+ -+ pdp_obj = pdp_gem_private_object_create(dev, size, NULL); -+ if (!pdp_obj) { -+ err = -ENOMEM; -+ goto err_exit; -+ } -+ -+ node = kzalloc(sizeof(*node), GFP_KERNEL); -+ if (!node) { -+ err = -ENOMEM; -+ goto err_unref; -+ } -+ -+ mutex_lock(&gem_priv->vram_lock); -+ err = drm_mm_insert_node(&gem_priv->vram, node, size); -+ mutex_unlock(&gem_priv->vram_lock); -+ if (err) -+ goto err_free_node; -+ -+ pdp_obj->vram = node; -+ pdp_obj->dev_addr = pdp_obj->vram->start; -+ pdp_obj->cpu_addr = gem_priv->memory_base + pdp_obj->dev_addr; -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) -+ pdp_obj->resv = &pdp_obj->_resv; -+#else -+ pdp_obj->resv = pdp_obj->base.resv; -+#endif -+ pdp_obj->dma_map_export_host_addr = gem_priv->dma_map_export_host_addr; -+ -+ return &pdp_obj->base; -+ -+err_free_node: -+ kfree(node); -+err_unref: -+ pdp_gem_object_free_priv(gem_priv, &pdp_obj->base); -+err_exit: -+ return ERR_PTR(err); -+} -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) -+vm_fault_t pdp_gem_object_vm_fault(struct vm_fault *vmf) -+#else -+int pdp_gem_object_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -+#endif -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) -+ struct vm_area_struct *vma = vmf->vma; -+#endif -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) -+ unsigned long addr = vmf->address; -+#else -+ unsigned long addr = (unsigned long)vmf->virtual_address; -+#endif -+ struct drm_gem_object *obj = vma->vm_private_data; -+ struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); -+ unsigned long off; -+ unsigned long pfn; -+ -+ off = addr - vma->vm_start; -+ pfn = (pdp_obj->cpu_addr + off) >> PAGE_SHIFT; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) -+ return vmf_insert_pfn(vma, addr, pfn); -+#else -+ { -+ int err; -+ -+ err = vm_insert_pfn(vma, addr, pfn); -+ switch (err) { -+ case 0: -+ case -EBUSY: -+ return VM_FAULT_NOPAGE; -+ case -ENOMEM: -+ return VM_FAULT_OOM; -+ default: -+ return VM_FAULT_SIGBUS; -+ } -+ } -+#endif -+} -+ -+void pdp_gem_object_free_priv(struct pdp_gem_private *gem_priv, -+ struct drm_gem_object *obj) -+{ -+ struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); -+ -+ drm_gem_free_mmap_offset(obj); -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) -+ if (&pdp_obj->_resv == pdp_obj->resv) -+ dma_resv_fini(&pdp_obj->_resv); -+#endif -+ if (pdp_obj->vram) { -+ mutex_lock(&gem_priv->vram_lock); -+ drm_mm_remove_node(pdp_obj->vram); -+ mutex_unlock(&gem_priv->vram_lock); -+ -+ kfree(pdp_obj->vram); -+ } else if (obj->import_attach) { -+ drm_prime_gem_destroy(obj, pdp_obj->sgt); -+ } -+ -+ drm_gem_object_release(&pdp_obj->base); -+ kfree(pdp_obj); -+} -+ -+static int pdp_gem_prime_attach(struct dma_buf *dma_buf, -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) -+ struct device *dev, -+#endif -+ struct dma_buf_attachment *attach) -+{ -+#if defined(SUPPORT_EXTERNAL_PHYSHEAP_INTERFACE) -+ (void) dma_buf; -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) -+ (void) dev; -+#endif -+ /* Restrict access to PVR Services */ -+ if (strcmp(attach->dev->driver->name, -+ PVR_LDM_DRIVER_REGISTRATION_NAME)) -+ return -EPERM; -+#else -+ struct drm_gem_object *obj = dma_buf->priv; -+ -+ /* Restrict access to Rogue */ -+ if (WARN_ON(!obj->dev->dev->parent) || -+ obj->dev->dev->parent != attach->dev->parent) -+ return -EPERM; -+#endif -+ return 0; -+} -+ -+static struct sg_table * -+pdp_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, -+ enum dma_data_direction dir) -+{ -+ struct drm_gem_object *obj = attach->dmabuf->priv; -+ struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); -+ struct sg_table *sgt; -+ -+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); -+ if (!sgt) -+ return NULL; -+ -+ if (sg_alloc_table(sgt, 1, GFP_KERNEL)) -+ goto err_free_sgt; -+ -+ if (pdp_obj->dma_map_export_host_addr) -+ sg_dma_address(sgt->sgl) = pdp_obj->cpu_addr; -+ else -+ sg_dma_address(sgt->sgl) = pdp_obj->dev_addr; -+ -+ sg_dma_len(sgt->sgl) = obj->size; -+ -+ return sgt; -+ -+err_free_sgt: -+ kfree(sgt); -+ return NULL; -+} -+ -+static void pdp_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, -+ struct sg_table *sgt, -+ enum dma_data_direction dir) -+{ -+ sg_free_table(sgt); -+ kfree(sgt); -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) -+static void *pdp_gem_prime_kmap_atomic(struct dma_buf *dma_buf, -+ unsigned long page_num) -+{ -+ return NULL; -+} -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) -+static void *pdp_gem_prime_kmap(struct dma_buf *dma_buf, -+ unsigned long page_num) -+{ -+ return NULL; -+} -+#endif -+ -+static int pdp_gem_prime_mmap(struct dma_buf *dma_buf, -+ struct vm_area_struct *vma) -+{ -+ struct drm_gem_object *obj = dma_buf->priv; -+ int err; -+ -+ mutex_lock(&obj->dev->struct_mutex); -+ err = drm_gem_mmap_obj(obj, obj->size, vma); -+ mutex_unlock(&obj->dev->struct_mutex); -+ -+ return err; -+} -+ -+#if defined(CONFIG_X86) -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) -+static void *pdp_gem_prime_vmap(struct dma_buf *dma_buf) -+#else -+static int pdp_gem_prime_vmap(struct dma_buf *dma_buf, struct iosys_map *map) -+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ -+{ -+ struct drm_gem_object *obj = dma_buf->priv; -+ struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); -+ void __iomem *vaddr; -+ __maybe_unused int ret = 0; -+ -+ mutex_lock(&obj->dev->struct_mutex); -+ -+ /* -+ * On x86 platforms, the pointer returned by ioremap can be dereferenced -+ * directly. As such, explicitly cast away the __ioremap qualifier. -+ */ -+ vaddr = ioremap(pdp_obj->cpu_addr, obj->size); -+ if (vaddr == NULL) { -+ DRM_DEBUG_DRIVER("ioremap failed"); -+ ret = -ENOMEM; -+ } -+ -+ mutex_unlock(&obj->dev->struct_mutex); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)) -+ if (ret == 0) -+ iosys_map_set_vaddr_iomem(map, vaddr); -+ return ret; -+#else -+ return (void __force *) vaddr; -+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) */ -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) -+static void pdp_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr) -+#else -+static void pdp_gem_prime_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) -+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ -+{ -+ struct drm_gem_object *obj = dma_buf->priv; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)) -+ void __iomem *vaddr = map->vaddr_iomem; -+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) */ -+ -+ mutex_lock(&obj->dev->struct_mutex); -+ iounmap((void __iomem *)vaddr); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)) -+ iosys_map_clear(map); -+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) */ -+ -+ mutex_unlock(&obj->dev->struct_mutex); -+} -+#endif -+ -+static const struct dma_buf_ops pdp_gem_prime_dmabuf_ops = { -+ .attach = pdp_gem_prime_attach, -+ .map_dma_buf = pdp_gem_prime_map_dma_buf, -+ .unmap_dma_buf = pdp_gem_prime_unmap_dma_buf, -+ .release = drm_gem_dmabuf_release, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) -+ .map_atomic = pdp_gem_prime_kmap_atomic, -+#endif -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) -+ .map = pdp_gem_prime_kmap, -+#endif -+#else -+ .kmap_atomic = pdp_gem_prime_kmap_atomic, -+ .kmap = pdp_gem_prime_kmap, -+#endif -+ .mmap = pdp_gem_prime_mmap, -+#if defined(CONFIG_X86) -+ .vmap = pdp_gem_prime_vmap, -+ .vunmap = pdp_gem_prime_vunmap -+#endif -+}; -+ -+ -+static int -+pdp_gem_lookup_our_object(struct drm_file *file, u32 handle, -+ struct drm_gem_object **objp) -+ -+{ -+ struct drm_gem_object *obj; -+ -+ obj = drm_gem_object_lookup(file, handle); -+ if (!obj) -+ return -ENOENT; -+ -+ if (obj->import_attach) { -+ /* -+ * The dmabuf associated with the object is not one of ours. -+ * Our own buffers are handled differently on import. -+ */ -+ drm_gem_object_put(obj); -+ return -EINVAL; -+ } -+ -+ *objp = obj; -+ return 0; -+} -+ -+struct dma_buf *pdp_gem_prime_export( -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) -+ struct drm_device *dev, -+#endif -+ struct drm_gem_object *obj, -+ int flags) -+{ -+ struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -+ DEFINE_DMA_BUF_EXPORT_INFO(export_info); -+ -+ export_info.ops = &pdp_gem_prime_dmabuf_ops; -+ export_info.size = obj->size; -+ export_info.flags = flags; -+ export_info.resv = pdp_obj->resv; -+ export_info.priv = obj; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) -+ return drm_gem_dmabuf_export(obj->dev, &export_info); -+#else -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) -+ return drm_gem_dmabuf_export(dev, &export_info); -+#else -+ return dma_buf_export(&export_info); -+#endif -+#endif -+#else -+ return dma_buf_export(obj, &pdp_gem_prime_dmabuf_ops, obj->size, -+ flags, pdp_obj->resv); -+#endif -+} -+ -+struct drm_gem_object * -+pdp_gem_prime_import(struct drm_device *dev, -+ struct dma_buf *dma_buf) -+{ -+ if (dma_buf->ops == &pdp_gem_prime_dmabuf_ops) { -+ struct drm_gem_object *obj = dma_buf->priv; -+ -+ if (obj->dev == dev) { -+ /* -+ * The dmabuf is one of ours, so return the associated -+ * PDP GEM object, rather than create a new one. -+ */ -+ drm_gem_object_get(obj); -+ -+ return obj; -+ } -+ } -+ -+ return drm_gem_prime_import(dev, dma_buf); -+} -+ -+struct drm_gem_object * -+pdp_gem_prime_import_sg_table(struct drm_device *dev, -+ struct dma_buf_attachment *attach, -+ struct sg_table *sgt) -+{ -+ struct pdp_gem_private *gem_priv = pdp_gem_get_private(dev); -+ struct pdp_gem_object *pdp_obj; -+ int err; -+ -+ pdp_obj = pdp_gem_private_object_create(dev, -+ attach->dmabuf->size, -+ attach->dmabuf->resv); -+ if (!pdp_obj) { -+ err = -ENOMEM; -+ goto err_exit; -+ } -+ -+ pdp_obj->sgt = sgt; -+ -+ /* We only expect a single entry for card memory */ -+ if (pdp_obj->sgt->nents != 1) { -+ err = -EINVAL; -+ goto err_obj_unref; -+ } -+ -+ pdp_obj->dev_addr = sg_dma_address(pdp_obj->sgt->sgl); -+ pdp_obj->cpu_addr = gem_priv->memory_base + pdp_obj->dev_addr; -+ pdp_obj->resv = attach->dmabuf->resv; -+ -+ return &pdp_obj->base; -+ -+err_obj_unref: -+ drm_gem_object_put(&pdp_obj->base); -+err_exit: -+ return ERR_PTR(err); -+} -+ -+int pdp_gem_dumb_create_priv(struct drm_file *file, -+ struct drm_device *dev, -+ struct pdp_gem_private *gem_priv, -+ struct drm_mode_create_dumb *args) -+{ -+ struct drm_gem_object *obj; -+ u32 handle; -+ u32 pitch; -+ size_t size; -+ int err; -+ -+ pitch = args->width * (ALIGN(args->bpp, 8) >> 3); -+ size = PAGE_ALIGN(pitch * args->height); -+ -+ obj = pdp_gem_object_create(dev, gem_priv, size, 0); -+ if (IS_ERR(obj)) -+ return PTR_ERR(obj); -+ -+ err = drm_gem_handle_create(file, obj, &handle); -+ if (err) -+ goto exit; -+ -+ args->handle = handle; -+ args->pitch = pitch; -+ args->size = size; -+ -+exit: -+ drm_gem_object_put(obj); -+ return err; -+} -+ -+int pdp_gem_dumb_map_offset(struct drm_file *file, -+ struct drm_device *dev, -+ uint32_t handle, -+ uint64_t *offset) -+{ -+ struct drm_gem_object *obj; -+ int err; -+ -+ mutex_lock(&dev->struct_mutex); -+ -+ err = pdp_gem_lookup_our_object(file, handle, &obj); -+ if (err) -+ goto exit_unlock; -+ -+ err = drm_gem_create_mmap_offset(obj); -+ if (err) -+ goto exit_obj_unref; -+ -+ *offset = drm_vma_node_offset_addr(&obj->vma_node); -+ -+exit_obj_unref: -+ drm_gem_object_put(obj); -+exit_unlock: -+ mutex_unlock(&dev->struct_mutex); -+ return err; -+} -+ -+#if defined(SUPPORT_EXTERNAL_PHYSHEAP_INTERFACE) -+static bool -+pdp_gem_init_platform(struct drm_device *dev, -+ struct pdp_gem_private *gem_priv, -+ unsigned int instance) -+{ -+ IMG_CPU_PHYADDR heap_cpu_paddr; -+ IMG_CPU_PHYADDR lma_cpu_paddr; -+ IMG_DEV_PHYADDR heap_dev_paddr; -+ IMG_UINT64 heap_size; -+ PVRSRV_ERROR pvr_err; -+ -+ gem_priv->pvr_dev_node = PVRSRVGetDeviceInstance(instance); -+ if (!gem_priv->pvr_dev_node) { -+ DRM_ERROR("%s can't get PVR device node for instance %d\n", -+ dev->driver->name, instance); -+ return false; -+ } -+ -+ pvr_err = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_DISPLAY, -+ gem_priv->pvr_dev_node, -+ &gem_priv->pvr_phys_heap); -+ if (pvr_err != PVRSRV_OK) { -+ DRM_ERROR("%s couldn't acquire display heap: %s\n", -+ dev->driver->name, PVRSRVGetErrorString(pvr_err)); -+ return false; -+ } -+ -+ if (PhysHeapGetType(gem_priv->pvr_phys_heap) != PHYS_HEAP_TYPE_LMA) { -+ DRM_ERROR("%s display heap is not LMA\n", dev->driver->name); -+ goto exit_release_heap; -+ } -+ -+ pvr_err = PhysHeapGetCpuPAddr(gem_priv->pvr_phys_heap, &heap_cpu_paddr); -+ if (pvr_err != PVRSRV_OK) { -+ DRM_ERROR("%s couldn't get display heap base CPU physical address: %s\n", -+ dev->driver->name, PVRSRVGetErrorString(pvr_err)); -+ goto exit_release_heap; -+ } -+ -+ PhysHeapCpuPAddrToDevPAddr(gem_priv->pvr_phys_heap, 1, -+ &heap_dev_paddr, &heap_cpu_paddr); -+ -+ pvr_err = PhysHeapGetSize(gem_priv->pvr_phys_heap, &heap_size); -+ if (pvr_err != PVRSRV_OK) { -+ DRM_ERROR("%s couldn't get display heap size: %s\n", -+ dev->driver->name, PVRSRVGetErrorString(pvr_err)); -+ goto exit_release_heap; -+ } -+ -+ lma_cpu_paddr = heap_cpu_paddr; -+ lma_cpu_paddr.uiAddr -= heap_dev_paddr.uiAddr; -+ -+ drm_mm_init(&gem_priv->vram, heap_dev_paddr.uiAddr, heap_size); -+ -+ DRM_INFO("%s has 0x%llx bytes of allocatable memory at LMA offset 0x%llx (CPU PA 0x%llx)\n", -+ dev->driver->name, (u64)heap_size, -+ (u64)heap_dev_paddr.uiAddr, (u64)heap_cpu_paddr.uiAddr); -+ -+ gem_priv->memory_base = lma_cpu_paddr.uiAddr; -+ gem_priv->dma_map_export_host_addr = false; -+ -+ return true; -+ -+exit_release_heap: -+ PhysHeapRelease(gem_priv->pvr_phys_heap); -+ return false; -+} -+ -+static void -+pdp_gem_cleanup_platform(struct pdp_gem_private *gem_priv) -+{ -+ drm_mm_takedown(&gem_priv->vram); -+ PhysHeapRelease(gem_priv->pvr_phys_heap); -+} -+#else -+static bool -+pdp_gem_init_platform(struct drm_device *dev, -+ struct pdp_gem_private *gem_priv, -+ unsigned int instance) -+{ -+ struct pdp_gem_platform_data *pdata = -+ to_platform_device(dev->dev)->dev.platform_data; -+ -+ /* Instance is only used by SUPPORT_EXTERNAL_PHYSHEAP_INTERFACE */ -+ WARN_ON(instance != 0); -+ -+#if defined(SUPPORT_ION) && !defined(SUPPORT_GEM_ALLOC) -+ drm_mm_init(&gem_priv->vram, 0, 0); -+ DRM_INFO("%s has no directly allocatable memory; the memory is managed by ION\n", -+ dev->driver->name); -+ -+#else -+ drm_mm_init(&gem_priv->vram, -+ pdata->pdp_heap_memory_base - pdata->memory_base, -+ pdata->pdp_heap_memory_size); -+ -+ DRM_INFO("%s has %pa bytes of allocatable memory at 0x%llx = (0x%llx - 0x%llx)\n", -+ dev->driver->name, &pdata->pdp_heap_memory_size, -+ (u64)(pdata->pdp_heap_memory_base - pdata->memory_base), -+ (u64)pdata->pdp_heap_memory_base, (u64)pdata->memory_base); -+#endif -+ gem_priv->memory_base = pdata->memory_base; -+ gem_priv->dma_map_export_host_addr = pdata->dma_map_export_host_addr; -+ -+ return true; -+} -+ -+static void -+pdp_gem_cleanup_platform(struct pdp_gem_private *gem_priv) -+{ -+ drm_mm_takedown(&gem_priv->vram); -+} -+#endif -+ -+struct pdp_gem_private *pdp_gem_init(struct drm_device *dev, unsigned int instance) -+{ -+ struct pdp_gem_private *gem_priv = kmalloc(sizeof(*gem_priv), GFP_KERNEL); -+ -+ if (!gem_priv) -+ return NULL; -+ -+ memset(&gem_priv->vram, 0, sizeof(gem_priv->vram)); -+ -+ mutex_init(&gem_priv->vram_lock); -+ -+ if (!pdp_gem_init_platform(dev, gem_priv, instance)) { -+ mutex_destroy(&gem_priv->vram_lock); -+ kfree(gem_priv); -+ return NULL; -+ } -+ -+ return gem_priv; -+} -+ -+void pdp_gem_cleanup(struct pdp_gem_private *gem_priv) -+{ -+ pdp_gem_cleanup_platform(gem_priv); -+ -+ mutex_destroy(&gem_priv->vram_lock); -+ -+ kfree(gem_priv); -+} -+ -+struct dma_resv *pdp_gem_get_resv(struct drm_gem_object *obj) -+{ -+ return (to_pdp_obj(obj)->resv); -+} -+ -+u64 pdp_gem_get_dev_addr(struct drm_gem_object *obj) -+{ -+ struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); -+ -+ return pdp_obj->dev_addr; -+} -+ -+int pdp_gem_object_create_ioctl_priv(struct drm_device *dev, -+ struct pdp_gem_private *gem_priv, -+ void *data, -+ struct drm_file *file) -+{ -+ struct drm_pdp_gem_create *args = data; -+ struct drm_gem_object *obj; -+ int err; -+ -+ if (args->flags) { -+ DRM_ERROR("invalid flags: %#08x\n", args->flags); -+ return -EINVAL; -+ } -+ -+ if (args->handle) { -+ DRM_ERROR("invalid handle (this should always be 0)\n"); -+ return -EINVAL; -+ } -+ -+ obj = pdp_gem_object_create(dev, -+ gem_priv, -+ PAGE_ALIGN(args->size), -+ args->flags); -+ if (IS_ERR(obj)) -+ return PTR_ERR(obj); -+ -+ err = drm_gem_handle_create(file, obj, &args->handle); -+ drm_gem_object_put(obj); -+ -+ return err; -+ -+} -+ -+int pdp_gem_object_mmap_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file) -+{ -+ struct drm_pdp_gem_mmap *args = (struct drm_pdp_gem_mmap *)data; -+ -+ if (args->pad) { -+ DRM_ERROR("invalid pad (this should always be 0)\n"); -+ return -EINVAL; -+ } -+ -+ if (args->offset) { -+ DRM_ERROR("invalid offset (this should always be 0)\n"); -+ return -EINVAL; -+ } -+ -+ return pdp_gem_dumb_map_offset(file, dev, args->handle, &args->offset); -+} -+ -+int pdp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file) -+{ -+ struct drm_pdp_gem_cpu_prep *args = (struct drm_pdp_gem_cpu_prep *)data; -+ struct drm_gem_object *obj; -+ struct pdp_gem_object *pdp_obj; -+ bool write = !!(args->flags & PDP_GEM_CPU_PREP_WRITE); -+ bool wait = !(args->flags & PDP_GEM_CPU_PREP_NOWAIT); -+ int err = 0; -+ -+ if (args->flags & ~(PDP_GEM_CPU_PREP_READ | -+ PDP_GEM_CPU_PREP_WRITE | -+ PDP_GEM_CPU_PREP_NOWAIT)) { -+ DRM_ERROR("invalid flags: %#08x\n", args->flags); -+ return -EINVAL; -+ } -+ -+ mutex_lock(&dev->struct_mutex); -+ -+ err = pdp_gem_lookup_our_object(file, args->handle, &obj); -+ if (err) -+ goto exit_unlock; -+ -+ pdp_obj = to_pdp_obj(obj); -+ -+ if (pdp_obj->cpu_prep) { -+ err = -EBUSY; -+ goto exit_unref; -+ } -+ -+ if (wait) { -+ long lerr; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)) -+ lerr = dma_resv_wait_timeout(pdp_obj->resv, write, -+ true, 30 * HZ); -+#else -+ lerr = dma_resv_wait_timeout_rcu(pdp_obj->resv, write, -+ true, 30 * HZ); -+#endif -+ if (!lerr) -+ err = -EBUSY; -+ else if (lerr < 0) -+ err = lerr; -+ } else { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)) -+ if (!dma_resv_test_signaled(pdp_obj->resv, write)) -+#else -+ if (!dma_resv_test_signaled_rcu(pdp_obj->resv, write)) -+#endif -+ err = -EBUSY; -+ } -+ -+ if (!err) -+ pdp_obj->cpu_prep = true; -+ -+exit_unref: -+ drm_gem_object_put(obj); -+exit_unlock: -+ mutex_unlock(&dev->struct_mutex); -+ return err; -+} -+ -+int pdp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file) -+{ -+ struct drm_pdp_gem_cpu_fini *args = (struct drm_pdp_gem_cpu_fini *)data; -+ struct drm_gem_object *obj; -+ struct pdp_gem_object *pdp_obj; -+ int err = 0; -+ -+ if (args->pad) { -+ DRM_ERROR("invalid pad (this should always be 0)\n"); -+ return -EINVAL; -+ } -+ -+ mutex_lock(&dev->struct_mutex); -+ -+ err = pdp_gem_lookup_our_object(file, args->handle, &obj); -+ if (err) -+ goto exit_unlock; -+ -+ pdp_obj = to_pdp_obj(obj); -+ -+ if (!pdp_obj->cpu_prep) { -+ err = -EINVAL; -+ goto exit_unref; -+ } -+ -+ pdp_obj->cpu_prep = false; -+ -+exit_unref: -+ drm_gem_object_put(obj); -+exit_unlock: -+ mutex_unlock(&dev->struct_mutex); -+ return err; -+} -diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.h b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.h -@@ -0,0 +1,158 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__DRM_PDP_GEM_H__) -+#define __DRM_PDP_GEM_H__ -+ -+#include -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#include -+#include -+#else -+#include -+#endif -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) -+#include -+#endif -+ -+#include "drm_pdp_drv.h" -+#include "pvr_dma_resv.h" -+ -+extern const struct vm_operations_struct pdp_gem_vm_ops; -+ -+struct pdp_gem_private; -+ -+struct pdp_gem_object { -+ struct drm_gem_object base; -+ -+ /* Non-null if backing originated from this driver */ -+ struct drm_mm_node *vram; -+ -+ /* Non-null if backing was imported */ -+ struct sg_table *sgt; -+ -+ bool dma_map_export_host_addr; -+ phys_addr_t cpu_addr; -+ dma_addr_t dev_addr; -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) -+ struct dma_resv _resv; -+#endif -+ struct dma_resv *resv; -+ -+ bool cpu_prep; -+}; -+ -+#define to_pdp_obj(obj) container_of(obj, struct pdp_gem_object, base) -+ -+struct pdp_gem_private *pdp_gem_init(struct drm_device *dev, -+ unsigned int instance); -+ -+void pdp_gem_cleanup(struct pdp_gem_private *dev_priv); -+ -+/* ioctl functions */ -+int pdp_gem_object_create_ioctl_priv(struct drm_device *dev, -+ struct pdp_gem_private *gem_priv, -+ void *data, -+ struct drm_file *file); -+int pdp_gem_object_mmap_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file); -+int pdp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file); -+int pdp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file); -+ -+/* drm driver functions */ -+struct drm_gem_object *pdp_gem_object_create(struct drm_device *dev, -+ struct pdp_gem_private *gem_priv, -+ size_t size, -+ u32 flags); -+ -+void pdp_gem_object_free_priv(struct pdp_gem_private *gem_priv, -+ struct drm_gem_object *obj); -+ -+void pdp_gem_object_free(struct drm_gem_object *obj); -+ -+struct dma_buf *pdp_gem_prime_export( -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) -+ struct drm_device *dev, -+#endif -+ struct drm_gem_object *obj, -+ int flags); -+ -+struct drm_gem_object *pdp_gem_prime_import(struct drm_device *dev, -+ struct dma_buf *dma_buf); -+ -+struct drm_gem_object * -+pdp_gem_prime_import_sg_table(struct drm_device *dev, -+ struct dma_buf_attachment *attach, -+ struct sg_table *sgt); -+ -+int pdp_gem_dumb_create_priv(struct drm_file *file, -+ struct drm_device *dev, -+ struct pdp_gem_private *gem_priv, -+ struct drm_mode_create_dumb *args); -+ -+int pdp_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, -+ uint32_t handle, uint64_t *offset); -+ -+/* vm operation functions */ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) -+typedef int vm_fault_t; -+#endif -+vm_fault_t pdp_gem_object_vm_fault(struct vm_fault *vmf); -+#else -+int pdp_gem_object_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf); -+#endif -+ -+/* internal interfaces */ -+struct dma_resv *pdp_gem_get_resv(struct drm_gem_object *obj); -+u64 pdp_gem_get_dev_addr(struct drm_gem_object *obj); -+ -+/* functions provided by clients of PDP GEM */ -+struct pdp_gem_private *pdp_gem_get_private(struct drm_device *dev); -+ -+#endif /* !defined(__DRM_PDP_GEM_H__) */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_modeset.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_modeset.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_modeset.c -@@ -0,0 +1,468 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include "drm_pdp_drv.h" -+ -+#include -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#include -+#include -+#else -+#include -+#endif -+ -+#include -+#include -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) -+#include -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) -+#define drm_gem_fb_create(...) pdp_framebuffer_create(__VA_ARGS__) -+#else -+#include -+#endif -+ -+#if defined(PDP_USE_ATOMIC) -+#include -+#endif -+ -+#include -+ -+#include "kernel_compatibility.h" -+ -+#define PDP_WIDTH_MIN 640 -+#define PDP_WIDTH_MAX 1280 -+#define PDP_HEIGHT_MIN 480 -+#define PDP_HEIGHT_MAX 1024 -+ -+#define ODIN_PDP_WIDTH_MAX 1920 -+#define ODIN_PDP_HEIGHT_MAX 1080 -+ -+#define ORION_PDP_WIDTH_MAX 1280 -+#define ORION_PDP_HEIGHT_MAX 720 -+ -+#define PLATO_PDP_WIDTH_MAX 1920 -+#define PLATO_PDP_HEIGHT_MAX 1080 -+ -+static bool async_flip_enable = true; -+ -+module_param(async_flip_enable, bool, 0444); -+ -+MODULE_PARM_DESC(async_flip_enable, -+ "Enable support for 'faked' async flipping (default: Y)"); -+ -+static inline int -+drm_mode_fb_cmd2_validate(const struct drm_mode_fb_cmd2 *mode_cmd) -+{ -+ switch (mode_cmd->pixel_format) { -+ case DRM_FORMAT_ARGB8888: -+ case DRM_FORMAT_XRGB8888: -+ case DRM_FORMAT_RGB565: -+ break; -+ default: -+ DRM_ERROR_RATELIMITED("pixel format not supported (format = %u)\n", -+ mode_cmd->pixel_format); -+ return -EINVAL; -+ } -+ -+ if (mode_cmd->flags & DRM_MODE_FB_INTERLACED) { -+ DRM_ERROR_RATELIMITED("interlaced framebuffers not supported\n"); -+ return -EINVAL; -+ } -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -+ switch (mode_cmd->modifier[0]) { -+ case DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12: -+ case DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12: -+ case DRM_FORMAT_MOD_LINEAR: -+ break; -+ default: -+ DRM_ERROR_RATELIMITED("format modifier 0x%llx is not supported\n", -+ mode_cmd->modifier[0]); -+ return -EINVAL; -+ } -+#endif -+ -+ return 0; -+} -+ -+static void pdp_framebuffer_destroy(struct drm_framebuffer *fb) -+{ -+ struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb); -+ -+ DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id); -+ -+ drm_framebuffer_cleanup(fb); -+ -+ drm_gem_object_put(pdp_fb->obj[0]); -+ -+ kfree(pdp_fb); -+} -+ -+static int pdp_framebuffer_create_handle(struct drm_framebuffer *fb, -+ struct drm_file *file, -+ unsigned int *handle) -+{ -+ struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb); -+ -+ DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id); -+ -+ return drm_gem_handle_create(file, pdp_fb->obj[0], handle); -+} -+ -+static const struct drm_framebuffer_funcs pdp_framebuffer_funcs = { -+ .destroy = pdp_framebuffer_destroy, -+ .create_handle = pdp_framebuffer_create_handle, -+ .dirty = NULL, -+}; -+ -+static inline int -+pdp_framebuffer_init(struct pdp_drm_private *dev_priv, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ -+ (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) -+ const -+#endif -+ struct drm_mode_fb_cmd2 *mode_cmd, -+ struct pdp_framebuffer *pdp_fb, -+ struct drm_gem_object *obj) -+{ -+ struct drm_framebuffer *fb; -+ -+ if (!pdp_fb) -+ return -EINVAL; -+ -+ fb = to_drm_framebuffer(pdp_fb); -+ pdp_fb->obj[0] = obj; -+ -+ drm_helper_mode_fill_fb_struct(dev_priv->dev, fb, mode_cmd); -+ -+ return drm_framebuffer_init(dev_priv->dev, fb, &pdp_framebuffer_funcs); -+} -+ -+int pdp_modeset_validate_init(struct pdp_drm_private *dev_priv, -+ struct drm_mode_fb_cmd2 *mode_cmd, -+ struct pdp_framebuffer *pdp_fb, -+ struct drm_gem_object *obj) -+{ -+ int err; -+ -+ err = drm_mode_fb_cmd2_validate(mode_cmd); -+ if (err) -+ return err; -+ -+ return pdp_framebuffer_init(dev_priv, mode_cmd, pdp_fb, obj); -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) -+static struct drm_framebuffer * -+pdp_framebuffer_create(struct drm_device *dev, -+ struct drm_file *file, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ -+ (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) -+ const -+#endif -+ struct drm_mode_fb_cmd2 *mode_cmd) -+{ -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ struct drm_gem_object *obj; -+ struct pdp_framebuffer *pdp_fb; -+ int err; -+ -+ obj = drm_gem_object_lookup(file, mode_cmd->handles[0]); -+ if (!obj) { -+ DRM_ERROR("failed to find buffer with handle %u\n", -+ mode_cmd->handles[0]); -+ err = -ENOENT; -+ goto err_out; -+ } -+ -+ pdp_fb = kzalloc(sizeof(*pdp_fb), GFP_KERNEL); -+ if (!pdp_fb) { -+ err = -ENOMEM; -+ goto err_obj_put; -+ } -+ -+ err = pdp_framebuffer_init(dev_priv, mode_cmd, pdp_fb, obj); -+ if (err) { -+ DRM_ERROR("failed to initialise framebuffer (err=%d)\n", err); -+ goto err_free_fb; -+ } -+ -+ DRM_DEBUG_DRIVER("[FB:%d]\n", pdp_fb->base.base.id); -+ -+ return &pdp_fb->base; -+ -+err_free_fb: -+ kfree(pdp_fb); -+err_obj_put: -+ drm_gem_object_put(obj); -+err_out: -+ return ERR_PTR(err); -+} -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */ -+ -+ -+/************************************************************************* -+ * DRM mode config callbacks -+ **************************************************************************/ -+ -+static struct drm_framebuffer * -+pdp_fb_create(struct drm_device *dev, -+ struct drm_file *file, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ -+ (defined(CHROMIUMOS_KERNEL) && \ -+ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) -+ const -+#endif -+ struct drm_mode_fb_cmd2 *mode_cmd) -+{ -+ struct drm_framebuffer *fb; -+ int err; -+ -+ err = drm_mode_fb_cmd2_validate(mode_cmd); -+ if (err) -+ return ERR_PTR(err); -+ -+ fb = drm_gem_fb_create(dev, file, mode_cmd); -+ if (IS_ERR(fb)) -+ goto out; -+ -+ DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id); -+ -+out: -+ return fb; -+} -+ -+static const struct drm_mode_config_funcs pdp_mode_config_funcs = { -+ .fb_create = pdp_fb_create, -+ .output_poll_changed = NULL, -+#if defined(PDP_USE_ATOMIC) -+ .atomic_check = drm_atomic_helper_check, -+ .atomic_commit = drm_atomic_helper_commit, -+#endif -+}; -+ -+ -+int pdp_modeset_early_init(struct pdp_drm_private *dev_priv) -+{ -+ struct drm_device *dev = dev_priv->dev; -+ int err; -+ -+ drm_mode_config_init(dev); -+ -+ dev->mode_config.funcs = &pdp_mode_config_funcs; -+ dev->mode_config.min_width = PDP_WIDTH_MIN; -+ dev->mode_config.min_height = PDP_HEIGHT_MIN; -+ -+ switch (dev_priv->version) { -+ case PDP_VERSION_APOLLO: -+ dev->mode_config.max_width = PDP_WIDTH_MAX; -+ dev->mode_config.max_height = PDP_HEIGHT_MAX; -+ break; -+ case PDP_VERSION_ODIN: -+ if (dev_priv->subversion == PDP_ODIN_ORION) { -+ dev->mode_config.max_width = ORION_PDP_WIDTH_MAX; -+ dev->mode_config.max_height = ORION_PDP_HEIGHT_MAX; -+ } else { -+ dev->mode_config.max_width = ODIN_PDP_WIDTH_MAX; -+ dev->mode_config.max_height = ODIN_PDP_HEIGHT_MAX; -+ } -+ break; -+ case PDP_VERSION_PLATO: -+ dev->mode_config.max_width = PLATO_PDP_WIDTH_MAX; -+ dev->mode_config.max_height = PLATO_PDP_HEIGHT_MAX; -+ break; -+ default: -+ BUG(); -+ } -+ -+ DRM_INFO("max_width is %d\n", -+ dev->mode_config.max_width); -+ DRM_INFO("max_height is %d\n", -+ dev->mode_config.max_height); -+ -+ dev->mode_config.async_page_flip = async_flip_enable; -+ -+ DRM_INFO("%s async flip support is %s\n", -+ dev->driver->name, async_flip_enable ? "enabled" : "disabled"); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) && \ -+ (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)) -+ dev->mode_config.allow_fb_modifiers = true; -+#endif -+ -+ dev_priv->plane = pdp_plane_create(dev, DRM_PLANE_TYPE_PRIMARY); -+ if (IS_ERR(dev_priv->plane)) { -+ DRM_ERROR("failed to create a primary plane\n"); -+ err = PTR_ERR(dev_priv->plane); -+ goto err_config_cleanup; -+ } -+ -+ dev_priv->crtc = pdp_crtc_create(dev, 0, dev_priv->plane); -+ if (IS_ERR(dev_priv->crtc)) { -+ DRM_ERROR("failed to create a CRTC\n"); -+ err = PTR_ERR(dev_priv->crtc); -+ goto err_config_cleanup; -+ } -+ -+ switch (dev_priv->version) { -+ case PDP_VERSION_APOLLO: -+ case PDP_VERSION_ODIN: -+ dev_priv->connector = pdp_dvi_connector_create(dev); -+ if (IS_ERR(dev_priv->connector)) { -+ DRM_ERROR("failed to create a connector\n"); -+ err = PTR_ERR(dev_priv->connector); -+ goto err_config_cleanup; -+ } -+ -+ dev_priv->encoder = pdp_tmds_encoder_create(dev); -+ if (IS_ERR(dev_priv->encoder)) { -+ DRM_ERROR("failed to create an encoder\n"); -+ err = PTR_ERR(dev_priv->encoder); -+ goto err_config_cleanup; -+ } -+ -+ err = drm_connector_attach_encoder(dev_priv->connector, -+ dev_priv->encoder); -+ if (err) { -+ DRM_ERROR("can't attach [ENCODER:%d:%s] to [CONNECTOR:%d:%s] (err=%d)\n", -+ dev_priv->encoder->base.id, -+ dev_priv->encoder->name, -+ dev_priv->connector->base.id, -+ dev_priv->connector->name, -+ err); -+ goto err_config_cleanup; -+ } -+ break; -+ case PDP_VERSION_PLATO: -+ // PLATO connectors are created in HDMI component driver -+ break; -+ default: -+ BUG(); -+ } -+ -+ DRM_DEBUG_DRIVER("initialised\n"); -+ -+ return 0; -+ -+err_config_cleanup: -+ drm_mode_config_cleanup(dev); -+ -+ return err; -+} -+ -+static inline int pdp_modeset_init_fbdev(struct pdp_drm_private *dev_priv) -+{ -+#if defined(CONFIG_DRM_FBDEV_EMULATION) -+ struct pdp_fbdev *fbdev; -+ int err; -+ -+ fbdev = pdp_fbdev_create(dev_priv); -+ if (IS_ERR(fbdev)) { -+ DRM_ERROR("failed to create a fb device"); -+ return PTR_ERR(fbdev); -+ } -+ dev_priv->fbdev = fbdev; -+ -+ /* -+ * pdpdrmfb is registered and available for userspace to use. If this -+ * is the only or primary device, fbcon has already bound a tty to it, -+ * and the following call will take no effect. However, this may be -+ * essential in order to sync the display when fbcon was already bound -+ * to a different tty (and fbdev). This triggers ->set_config() which -+ * will in turn set up a config and then do a modeset. -+ */ -+ err = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper); -+ if (err) { -+ DRM_ERROR("failed to set mode (err=%d)\n", err); -+ return err; -+ } -+#endif -+ return 0; -+} -+ -+int pdp_modeset_late_init(struct pdp_drm_private *dev_priv) -+{ -+ struct drm_device *ddev = dev_priv->dev; -+ int err; -+ -+ drm_mode_config_reset(ddev); -+ -+ err = pdp_modeset_init_fbdev(dev_priv); -+ if (err) -+ DRM_INFO("fbdev init failure is not fatal, continue anyway.\n"); -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) -+ if (dev_priv->connector != NULL) { -+ err = drm_connector_register(dev_priv->connector); -+ if (err) { -+ DRM_ERROR("[CONNECTOR:%d:%s] failed to register (err=%d)\n", -+ dev_priv->connector->base.id, -+ dev_priv->connector->name, -+ err); -+ return err; -+ } -+ } -+#endif -+ return 0; -+} -+ -+void pdp_modeset_early_cleanup(struct pdp_drm_private *dev_priv) -+{ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) -+ if (dev_priv->connector != NULL) -+ drm_connector_unregister(dev_priv->connector); -+#endif -+} -+ -+void pdp_modeset_late_cleanup(struct pdp_drm_private *dev_priv) -+{ -+#if defined(CONFIG_DRM_FBDEV_EMULATION) -+ pdp_fbdev_destroy(dev_priv->fbdev); -+#endif -+ drm_mode_config_cleanup(dev_priv->dev); -+ -+ DRM_DEBUG_DRIVER("cleaned up\n"); -+} -diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_plane.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_plane.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_plane.c -@@ -0,0 +1,385 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+ -+#include "drm_pdp_drv.h" -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) -+#include -+#endif -+ -+#include -+ -+#if defined(PDP_USE_ATOMIC) -+#include -+#include -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)) -+#include -+#else -+#include -+#endif -+#endif -+ -+#include -+ -+#include "drm_pdp_gem.h" -+#include "pdp_apollo.h" -+#include "pdp_odin.h" -+#include "pdp_plato.h" -+#include "pfim_defs.h" -+ -+#include "kernel_compatibility.h" -+ -+ -+static const uint32_t apollo_plato_formats[] = { -+ DRM_FORMAT_XRGB8888, -+ DRM_FORMAT_ARGB8888, -+}; -+static const uint32_t odin_formats[] = { -+ DRM_FORMAT_XRGB8888, -+ DRM_FORMAT_ARGB8888, -+ DRM_FORMAT_RGB565, -+}; -+ -+static const uint64_t default_modifiers[] = { -+ DRM_FORMAT_MOD_LINEAR, -+ DRM_FORMAT_MOD_INVALID -+}; -+static const uint64_t odin_modifiers[] = { -+ DRM_FORMAT_MOD_LINEAR, -+ DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12, -+ DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12, -+ DRM_FORMAT_MOD_INVALID -+}; -+ -+#if defined(PDP_USE_ATOMIC) -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)) -+static int pdp_plane_helper_atomic_check(struct drm_plane *plane, -+ struct drm_atomic_state *atomic_state) -+#else -+static int pdp_plane_helper_atomic_check(struct drm_plane *plane, -+ struct drm_plane_state *state) -+#endif -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)) -+ struct drm_plane_state *state = drm_atomic_get_new_plane_state(atomic_state, -+ plane); -+#endif -+ struct drm_crtc_state *crtc_new_state; -+ -+ if (!state->crtc) -+ return 0; -+ -+ crtc_new_state = drm_atomic_get_new_crtc_state(state->state, -+ state->crtc); -+ -+ return drm_atomic_helper_check_plane_state(state, crtc_new_state, -+ DRM_PLANE_NO_SCALING, -+ DRM_PLANE_NO_SCALING, -+ false, true); -+} -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)) -+static void pdp_plane_helper_atomic_update(struct drm_plane *plane, -+ struct drm_atomic_state *atomic_state) -+#else -+static void pdp_plane_helper_atomic_update(struct drm_plane *plane, -+ struct drm_plane_state *old_state) -+#endif -+{ -+ struct drm_plane_state *plane_state = plane->state; -+ struct drm_framebuffer *fb = plane_state->fb; -+ -+ if (fb) { -+ pdp_plane_set_surface(plane_state->crtc, plane, fb, -+ plane_state->src_x, plane_state->src_y); -+ } -+} -+ -+static bool pdp_plane_format_mod_supported(struct drm_plane *plane, -+ uint32_t format, -+ uint64_t modifier) -+{ -+ struct pdp_drm_private *dev_priv = plane->dev->dev_private; -+ const uint32_t *supported_formats; -+ const uint64_t *supported_modifiers; -+ unsigned int num_supported_formats; -+ unsigned int num_supported_modifiers; -+ unsigned int i; -+ -+ if (modifier == DRM_FORMAT_MOD_INVALID) -+ return false; -+ -+ switch (dev_priv->version) { -+ case PDP_VERSION_ODIN: -+ supported_formats = odin_formats; -+ num_supported_formats = ARRAY_SIZE(odin_formats); -+ supported_modifiers = odin_modifiers; -+ num_supported_modifiers = ARRAY_SIZE(odin_modifiers); -+ break; -+ case PDP_VERSION_APOLLO: -+ case PDP_VERSION_PLATO: -+ supported_formats = apollo_plato_formats; -+ num_supported_formats = ARRAY_SIZE(apollo_plato_formats); -+ supported_modifiers = default_modifiers; -+ num_supported_modifiers = ARRAY_SIZE(default_modifiers); -+ break; -+ default: -+ DRM_ERROR("Unsupported PDP version\n"); -+ return false; -+ } -+ -+ for (i = 0; i < num_supported_formats; i++) { -+ if (supported_formats[i] == format) { -+ unsigned int j; -+ -+ for (j = 0; j < num_supported_modifiers; j++) -+ if (supported_modifiers[j] == modifier) -+ return true; -+ } -+ } -+ -+ return false; -+} -+ -+static const struct drm_plane_helper_funcs pdp_plane_helper_funcs = { -+ .prepare_fb = drm_gem_plane_helper_prepare_fb, -+ .atomic_check = pdp_plane_helper_atomic_check, -+ .atomic_update = pdp_plane_helper_atomic_update, -+}; -+ -+static const struct drm_plane_funcs pdp_plane_funcs = { -+ .update_plane = drm_atomic_helper_update_plane, -+ .disable_plane = drm_atomic_helper_disable_plane, -+ .destroy = drm_plane_helper_destroy, -+ .reset = drm_atomic_helper_plane_reset, -+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, -+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, -+ .format_mod_supported = pdp_plane_format_mod_supported, -+}; -+#else -+#define pdp_plane_funcs drm_primary_helper_funcs -+#endif -+ -+struct drm_plane *pdp_plane_create(struct drm_device *dev, -+ enum drm_plane_type type) -+{ -+ struct pdp_drm_private *dev_priv = dev->dev_private; -+ struct drm_plane *plane; -+ const uint32_t *supported_formats; -+ const uint64_t *supported_modifiers; -+ uint32_t num_supported_formats; -+ int err; -+ -+ switch (dev_priv->version) { -+ case PDP_VERSION_ODIN: -+ supported_formats = odin_formats; -+ num_supported_formats = ARRAY_SIZE(odin_formats); -+ supported_modifiers = odin_modifiers; -+ break; -+ case PDP_VERSION_APOLLO: -+ case PDP_VERSION_PLATO: -+ supported_formats = apollo_plato_formats; -+ num_supported_formats = ARRAY_SIZE(apollo_plato_formats); -+ supported_modifiers = default_modifiers; -+ break; -+ default: -+ DRM_ERROR("Unsupported PDP version\n"); -+ err = -EINVAL; -+ goto err_exit; -+ } -+ -+ plane = kzalloc(sizeof(*plane), GFP_KERNEL); -+ if (!plane) { -+ err = -ENOMEM; -+ goto err_exit; -+ } -+ -+ err = drm_universal_plane_init(dev, plane, 0, &pdp_plane_funcs, -+ supported_formats, -+ num_supported_formats, -+ supported_modifiers, type, NULL); -+ if (err) -+ goto err_plane_free; -+ -+#if defined(PDP_USE_ATOMIC) -+ drm_plane_helper_add(plane, &pdp_plane_helper_funcs); -+#endif -+ -+ DRM_DEBUG_DRIVER("[PLANE:%d]\n", plane->base.id); -+ -+ return plane; -+ -+err_plane_free: -+ kfree(plane); -+err_exit: -+ return ERR_PTR(err); -+} -+ -+void pdp_plane_set_surface(struct drm_crtc *crtc, struct drm_plane *plane, -+ struct drm_framebuffer *fb, -+ const uint32_t src_x, const uint32_t src_y) -+{ -+ struct pdp_drm_private *dev_priv = plane->dev->dev_private; -+ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); -+ struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb); -+ unsigned int pitch = fb->pitches[0]; -+ uint64_t address = pdp_gem_get_dev_addr(pdp_fb->obj[0]); -+ uint64_t modifier = 0; -+ uint32_t format; -+ uint32_t fbc_mode; -+ -+ /* -+ * User space specifies 'x' and 'y' and this is used to tell the display -+ * to scan out from part way through a buffer. -+ */ -+ address += ((src_y * pitch) + (src_x * (pdp_drm_fb_cpp(fb)))); -+ -+ /* -+ * NOTE: If the buffer dimensions are less than the current mode then -+ * the output will appear in the top left of the screen. This can be -+ * centered by adjusting horizontal active start, right border start, -+ * vertical active start and bottom border start. At this point it's -+ * not entirely clear where this should be done. On the one hand it's -+ * related to pdp_crtc_helper_mode_set but on the other hand there -+ * might not always be a call to pdp_crtc_helper_mode_set. This needs -+ * to be investigated. -+ */ -+ switch (dev_priv->version) { -+ case PDP_VERSION_APOLLO: -+ switch (pdp_drm_fb_format(fb)) { -+ case DRM_FORMAT_ARGB8888: -+ case DRM_FORMAT_XRGB8888: -+ format = 0xE; -+ break; -+ default: -+ DRM_ERROR("unsupported pixel format (format = %d)\n", -+ pdp_drm_fb_format(fb)); -+ return; -+ } -+ -+ pdp_apollo_set_surface(plane->dev->dev, -+ pdp_crtc->pdp_reg, -+ 0, -+ address, -+ 0, 0, -+ fb->width, fb->height, pitch, -+ format, -+ 255, -+ false); -+ break; -+ case PDP_VERSION_ODIN: -+ switch (pdp_drm_fb_format(fb)) { -+ case DRM_FORMAT_ARGB8888: -+ case DRM_FORMAT_XRGB8888: -+ format = ODN_PDP_SURF_PIXFMT_ARGB8888; -+ break; -+ case DRM_FORMAT_RGB565: -+ format = ODN_PDP_SURF_PIXFMT_RGB565; -+ break; -+ default: -+ DRM_ERROR("unsupported pixel format (format = %d)\n", -+ pdp_drm_fb_format(fb)); -+ return; -+ } -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) -+ modifier = fb->modifier; -+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -+ modifier = fb->modifier[0]; -+#endif -+ -+ switch (modifier) { -+ case DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12: -+ fbc_mode = ODIN_PFIM_FBCDC_8X8_V12; -+ break; -+ case DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12: -+ fbc_mode = ODIN_PFIM_FBCDC_16X4_V12; -+ break; -+ case DRM_FORMAT_MOD_LINEAR: -+ fbc_mode = ODIN_PFIM_MOD_LINEAR; -+ break; -+ default: -+ DRM_ERROR("unsupported fbc format (format = %llu)\n", -+ modifier); -+ return; -+ } -+ -+ pdp_odin_set_surface(plane->dev->dev, -+ pdp_crtc->pdp_reg, -+ 0, -+ address, fb->offsets[0], -+ 0, 0, -+ fb->width, fb->height, pitch, -+ format, -+ 255, -+ false, -+ pdp_crtc->pfim_reg, fbc_mode); -+ break; -+ case PDP_VERSION_PLATO: -+ switch (pdp_drm_fb_format(fb)) { -+ case DRM_FORMAT_ARGB8888: -+ case DRM_FORMAT_XRGB8888: -+ format = PLATO_PDP_PIXEL_FORMAT_ARGB8; -+ break; -+ default: -+ DRM_ERROR("unsupported pixel format (format = %d)\n", -+ pdp_drm_fb_format(fb)); -+ return; -+ } -+ -+ pdp_plato_set_surface(crtc->dev->dev, -+ pdp_crtc->pdp_reg, -+ pdp_crtc->pdp_bif_reg, -+ 0, -+ address, -+ 0, 0, -+ fb->width, fb->height, pitch, -+ format, -+ 255, -+ false); -+ break; -+ default: -+ BUG(); -+ } -+} -diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_tmds.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_tmds.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_tmds.c -@@ -0,0 +1,148 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include "drm_pdp_drv.h" -+ -+#include -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) -+#include -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) -+#include -+#else -+#include -+#include -+#endif -+ -+#include "kernel_compatibility.h" -+ -+static void pdp_tmds_encoder_helper_dpms(struct drm_encoder *encoder, int mode) -+{ -+} -+ -+static bool -+pdp_tmds_encoder_helper_mode_fixup(struct drm_encoder *encoder, -+ const struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode) -+{ -+ return true; -+} -+ -+static void pdp_tmds_encoder_helper_prepare(struct drm_encoder *encoder) -+{ -+} -+ -+static void pdp_tmds_encoder_helper_commit(struct drm_encoder *encoder) -+{ -+} -+ -+static void -+pdp_tmds_encoder_helper_mode_set(struct drm_encoder *encoder, -+ struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode) -+{ -+} -+ -+static void pdp_tmds_encoder_destroy(struct drm_encoder *encoder) -+{ -+ struct pdp_drm_private *dev_priv = encoder->dev->dev_private; -+ -+ DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n", -+ encoder->base.id, -+ encoder->name); -+ -+ drm_encoder_cleanup(encoder); -+ -+ kfree(encoder); -+ dev_priv->encoder = NULL; -+} -+ -+static const struct drm_encoder_helper_funcs pdp_tmds_encoder_helper_funcs = { -+ .dpms = pdp_tmds_encoder_helper_dpms, -+ .mode_fixup = pdp_tmds_encoder_helper_mode_fixup, -+ .prepare = pdp_tmds_encoder_helper_prepare, -+ .commit = pdp_tmds_encoder_helper_commit, -+ .mode_set = pdp_tmds_encoder_helper_mode_set, -+ .detect = NULL, -+ .disable = NULL, -+}; -+ -+static const struct drm_encoder_funcs pdp_tmds_encoder_funcs = { -+ .reset = NULL, -+ .destroy = pdp_tmds_encoder_destroy, -+}; -+ -+struct drm_encoder * -+pdp_tmds_encoder_create(struct drm_device *dev) -+{ -+ struct drm_encoder *encoder; -+ int err; -+ -+ encoder = kzalloc(sizeof(*encoder), GFP_KERNEL); -+ if (!encoder) -+ return ERR_PTR(-ENOMEM); -+ -+ err = drm_encoder_init(dev, -+ encoder, -+ &pdp_tmds_encoder_funcs, -+ DRM_MODE_ENCODER_TMDS, -+ NULL); -+ if (err) { -+ DRM_ERROR("Failed to initialise encoder"); -+ return ERR_PTR(err); -+ } -+ drm_encoder_helper_add(encoder, &pdp_tmds_encoder_helper_funcs); -+ -+ /* -+ * This is a bit field that's used to determine which -+ * CRTCs can drive this encoder. -+ */ -+ encoder->possible_crtcs = 0x1; -+ -+ DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n", -+ encoder->base.id, -+ encoder->name); -+ -+ return encoder; -+} -diff --git a/drivers/gpu/drm/img-rogue/apollo/odin_defs.h b/drivers/gpu/drm/img-rogue/apollo/odin_defs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/odin_defs.h -@@ -0,0 +1,326 @@ -+/**************************************************************************** -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Odin Memory Map - View from PCIe -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+****************************************************************************/ -+ -+#ifndef _ODIN_DEFS_H_ -+#define _ODIN_DEFS_H_ -+ -+/* These defines have not been autogenerated */ -+ -+#define PCI_VENDOR_ID_ODIN (0x1AEE) -+#define DEVICE_ID_ODIN (0x1010) -+#define DEVICE_ID_TBA (0x1CF2) -+ -+/* PCI BAR 0 contains the PDP regs and the Odin system regs */ -+#define ODN_SYS_BAR 0 -+#define ODN_SYS_REGION_SIZE 0x000800000 /* 8MB */ -+ -+#define ODN_SYS_REGS_OFFSET 0 -+#define ODN_SYS_REGS_SIZE 0x000400000 /* 4MB */ -+ -+#define ODN_PDP_REGS_OFFSET 0x000440000 -+#define ODN_PDP_REGS_SIZE 0x000040000 /* 256k */ -+ -+#define ODN_PDP2_REGS_OFFSET 0x000480000 -+#define ODN_PDP2_REGS_SIZE 0x000040000 /* 256k */ -+ -+#define ODN_PDP2_PFIM_OFFSET 0x000500000 -+#define ODN_PDP2_PFIM_SIZE 0x000040000 /* 256k */ -+ -+#define ODIN_DMA_REGS_OFFSET 0x0004C0000 -+#define ODIN_DMA_REGS_SIZE 0x000040000 /* 256k */ -+ -+#define ODIN_DMA_CHAN_REGS_SIZE 0x000001000 /* 4k */ -+ -+/* PCI BAR 2 contains the Device Under Test SOCIF 64MB region */ -+#define ODN_DUT_SOCIF_BAR 2 -+#define ODN_DUT_SOCIF_OFFSET 0x000000000 -+#define ODN_DUT_SOCIF_SIZE 0x004000000 /* 64MB */ -+ -+/* PCI BAR 4 contains the on-board 1GB DDR memory */ -+#define ODN_DDR_BAR 4 -+#define ODN_DDR_MEM_OFFSET 0x000000000 -+#define ODN_DDR_MEM_SIZE 0x040000000 /* 1GB */ -+ -+/* Odin system register banks */ -+#define ODN_REG_BANK_CORE 0x00000 -+#define ODN_REG_BANK_TCF_SPI_MASTER 0x02000 -+#define ODN_REG_BANK_ODN_CLK_BLK 0x0A000 -+#define ODN_REG_BANK_ODN_MCU_COMMUNICATOR 0x0C000 -+#define ODN_REG_BANK_DB_TYPE_ID 0x0C200 -+#define ODN_REG_BANK_DB_TYPE_ID_TYPE_TCFVUOCTA 0x000000C6U -+#define ODN_REG_BANK_DB_TYPE_ID_TYPE_MASK 0x000000C0U -+#define ODN_REG_BANK_DB_TYPE_ID_TYPE_SHIFT 0x6 -+#define ODN_REG_BANK_ODN_I2C 0x0E000 -+#define ODN_REG_BANK_MULTI_CLK_ALIGN 0x20000 -+#define ODN_REG_BANK_ALIGN_DATA_TX 0x22000 -+#define ODN_REG_BANK_SAI_RX_DDR_0 0x24000 -+#define ODN_REG_BANK_SAI_RX_DDR(n) (ODN_REG_BANK_SAI_RX_DDR_0 + (0x02000*n)) -+#define ODN_REG_BANK_SAI_TX_DDR_0 0x3A000 -+#define ODN_REG_BANK_SAI_TX_DDR(n) (ODN_REG_BANK_SAI_TX_DDR_0 + (0x02000*n)) -+#define ODN_REG_BANK_SAI_TX_SDR 0x4E000 -+ -+/* Odin SPI regs */ -+#define ODN_SPI_MST_ADDR_RDNWR 0x0000 -+#define ODN_SPI_MST_WDATA 0x0004 -+#define ODN_SPI_MST_RDATA 0x0008 -+#define ODN_SPI_MST_STATUS 0x000C -+#define ODN_SPI_MST_GO 0x0010 -+ -+ -+/* -+ Odin CLK regs - the odn_clk_blk module defs are not auto generated -+ */ -+#define ODN_PDP_P_CLK_OUT_DIVIDER_REG1 0x620 -+#define ODN_PDP_PCLK_ODIV1_LO_TIME_MASK 0x0000003FU -+#define ODN_PDP_PCLK_ODIV1_LO_TIME_SHIFT 0 -+#define ODN_PDP_PCLK_ODIV1_HI_TIME_MASK 0x00000FC0U -+#define ODN_PDP_PCLK_ODIV1_HI_TIME_SHIFT 6 -+ -+#define ODN_PDP_P_CLK_OUT_DIVIDER_REG2 0x624 -+#define ODN_PDP_PCLK_ODIV2_NOCOUNT_MASK 0x00000040U -+#define ODN_PDP_PCLK_ODIV2_NOCOUNT_SHIFT 6 -+#define ODN_PDP_PCLK_ODIV2_EDGE_MASK 0x00000080U -+#define ODN_PDP_PCLK_ODIV2_EDGE_SHIFT 7 -+ -+#define ODN_PDP_P_CLK_OUT_DIVIDER_REG3 0x61C -+ -+#define ODN_PDP_M_CLK_OUT_DIVIDER_REG1 0x628 -+#define ODN_PDP_MCLK_ODIV1_LO_TIME_MASK 0x0000003FU -+#define ODN_PDP_MCLK_ODIV1_LO_TIME_SHIFT 0 -+#define ODN_PDP_MCLK_ODIV1_HI_TIME_MASK 0x00000FC0U -+#define ODN_PDP_MCLK_ODIV1_HI_TIME_SHIFT 6 -+ -+#define ODN_PDP_M_CLK_OUT_DIVIDER_REG2 0x62C -+#define ODN_PDP_MCLK_ODIV2_NOCOUNT_MASK 0x00000040U -+#define ODN_PDP_MCLK_ODIV2_NOCOUNT_SHIFT 6 -+#define ODN_PDP_MCLK_ODIV2_EDGE_MASK 0x00000080U -+#define ODN_PDP_MCLK_ODIV2_EDGE_SHIFT 7 -+ -+#define ODN_PDP_P_CLK_MULTIPLIER_REG1 0x650 -+#define ODN_PDP_PCLK_MUL1_LO_TIME_MASK 0x0000003FU -+#define ODN_PDP_PCLK_MUL1_LO_TIME_SHIFT 0 -+#define ODN_PDP_PCLK_MUL1_HI_TIME_MASK 0x00000FC0U -+#define ODN_PDP_PCLK_MUL1_HI_TIME_SHIFT 6 -+ -+#define ODN_PDP_P_CLK_MULTIPLIER_REG2 0x654 -+#define ODN_PDP_PCLK_MUL2_NOCOUNT_MASK 0x00000040U -+#define ODN_PDP_PCLK_MUL2_NOCOUNT_SHIFT 6 -+#define ODN_PDP_PCLK_MUL2_EDGE_MASK 0x00000080U -+#define ODN_PDP_PCLK_MUL2_EDGE_SHIFT 7 -+ -+#define ODN_PDP_P_CLK_MULTIPLIER_REG3 0x64C -+ -+#define ODN_PDP_P_CLK_IN_DIVIDER_REG 0x658 -+#define ODN_PDP_PCLK_IDIV_LO_TIME_MASK 0x0000003FU -+#define ODN_PDP_PCLK_IDIV_LO_TIME_SHIFT 0 -+#define ODN_PDP_PCLK_IDIV_HI_TIME_MASK 0x00000FC0U -+#define ODN_PDP_PCLK_IDIV_HI_TIME_SHIFT 6 -+#define ODN_PDP_PCLK_IDIV_NOCOUNT_MASK 0x00001000U -+#define ODN_PDP_PCLK_IDIV_NOCOUNT_SHIFT 12 -+#define ODN_PDP_PCLK_IDIV_EDGE_MASK 0x00002000U -+#define ODN_PDP_PCLK_IDIV_EDGE_SHIFT 13 -+ -+/* -+ * DUT core clock input divider, multiplier and out divider. -+ */ -+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1 (0x0028) -+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK (0x00000FC0U) -+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT (6) -+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK (0x0000003FU) -+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT (0) -+ -+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2 (0x002C) -+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK (0x00000080U) -+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT (7) -+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK (0x00000040U) -+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT (6) -+ -+#define ODN_DUT_CORE_CLK_MULTIPLIER1 (0x0050) -+#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_MASK (0x00000FC0U) -+#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_SHIFT (6) -+#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_MASK (0x0000003FU) -+#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_SHIFT (0) -+ -+#define ODN_DUT_CORE_CLK_MULTIPLIER2 (0x0054) -+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_MASK (0x00007000U) -+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_SHIFT (12) -+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_MASK (0x00000800U) -+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_SHIFT (11) -+#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_MASK (0x00000080U) -+#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_SHIFT (7) -+#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_MASK (0x00000040U) -+#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_SHIFT (6) -+ -+#define ODN_DUT_CORE_CLK_IN_DIVIDER1 (0x0058) -+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_MASK (0x00002000U) -+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_SHIFT (13) -+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_MASK (0x00001000U) -+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT (12) -+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_MASK (0x00000FC0U) -+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_SHIFT (6) -+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_MASK (0x0000003FU) -+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_SHIFT (0) -+ -+/* -+ * DUT interface clock input divider, multiplier and out divider. -+ */ -+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1 (0x0220) -+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_MASK (0x00000FC0U) -+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT (6) -+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_MASK (0x0000003FU) -+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT (0) -+ -+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2 (0x0224) -+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_MASK (0x00000080U) -+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_SHIFT (7) -+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_MASK (0x00000040U) -+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT (6) -+ -+#define ODN_DUT_IFACE_CLK_MULTIPLIER1 (0x0250) -+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_MASK (0x00000FC0U) -+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_SHIFT (6) -+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_MASK (0x0000003FU) -+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_SHIFT (0) -+ -+#define ODN_DUT_IFACE_CLK_MULTIPLIER2 (0x0254) -+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_MASK (0x00007000U) -+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_SHIFT (12) -+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_MASK (0x00000800U) -+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_SHIFT (11) -+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_MASK (0x00000080U) -+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_SHIFT (7) -+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_MASK (0x00000040U) -+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_SHIFT (6) -+ -+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1 (0x0258) -+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_MASK (0x00002000U) -+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_SHIFT (13) -+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_MASK (0x00001000U) -+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT (12) -+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_MASK (0x00000FC0U) -+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_SHIFT (6) -+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_MASK (0x0000003FU) -+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_SHIFT (0) -+ -+ -+/* -+ * Min max values from Xilinx Virtex7 data sheet DS183, for speed grade 2 -+ * All in Hz -+ */ -+#define ODN_INPUT_CLOCK_SPEED (100000000U) -+#define ODN_INPUT_CLOCK_SPEED_MIN (10000000U) -+#define ODN_INPUT_CLOCK_SPEED_MAX (933000000U) -+#define ODN_OUTPUT_CLOCK_SPEED_MIN (4690000U) -+#define ODN_OUTPUT_CLOCK_SPEED_MAX (933000000U) -+#define ODN_VCO_MIN (600000000U) -+#define ODN_VCO_MAX (1440000000U) -+#define ODN_PFD_MIN (10000000U) -+#define ODN_PFD_MAX (500000000U) -+ -+/* -+ * Max values that can be set in DRP registers -+ */ -+#define ODN_OREG_VALUE_MAX (126.875f) -+#define ODN_MREG_VALUE_MAX (126.875f) -+#define ODN_DREG_VALUE_MAX (126U) -+ -+ -+#define ODN_MMCM_LOCK_STATUS_DUT_CORE (0x00000001U) -+#define ODN_MMCM_LOCK_STATUS_DUT_IF (0x00000002U) -+#define ODN_MMCM_LOCK_STATUS_PDPP (0x00000008U) -+ -+/* -+ Odin interrupt flags -+*/ -+#define ODN_INTERRUPT_ENABLE_PDP1 (1 << ODN_INTERRUPT_ENABLE_PDP1_SHIFT) -+#define ODN_INTERRUPT_ENABLE_PDP2 (1 << ODN_INTERRUPT_ENABLE_PDP2_SHIFT) -+#define ODN_INTERRUPT_ENABLE_DUT (1 << ODN_INTERRUPT_ENABLE_DUT_SHIFT) -+#define ODN_INTERRUPT_STATUS_PDP1 (1 << ODN_INTERRUPT_STATUS_PDP1_SHIFT) -+#define ODN_INTERRUPT_STATUS_PDP2 (1 << ODN_INTERRUPT_STATUS_PDP2_SHIFT) -+#define ODN_INTERRUPT_STATUS_DUT (1 << ODN_INTERRUPT_STATUS_DUT_SHIFT) -+#define ODN_INTERRUPT_CLEAR_PDP1 (1 << ODN_INTERRUPT_CLR_PDP1_SHIFT) -+#define ODN_INTERRUPT_CLEAR_PDP2 (1 << ODN_INTERRUPT_CLR_PDP2_SHIFT) -+#define ODN_INTERRUPT_CLEAR_DUT (1 << ODN_INTERRUPT_CLR_DUT_SHIFT) -+ -+#define ODN_INTERRUPT_ENABLE_CDMA (1 << ODN_INTERRUPT_ENABLE_CDMA_SHIFT) -+#define ODN_INTERRUPT_STATUS_CDMA (1 << ODN_INTERRUPT_STATUS_CDMA_SHIFT) -+#define ODN_INTERRUPT_CLEAR_CDMA (1 << ODN_INTERRUPT_CLR_CDMA_SHIFT) -+ -+#define ODN_INTERRUPT_ENABLE_CDMA2 (1 << (ODN_INTERRUPT_ENABLE_CDMA_SHIFT + 1)) -+#define ODN_INTERRUPT_STATUS_CDMA2 (1 << (ODN_INTERRUPT_STATUS_CDMA_SHIFT + 1)) -+#define ODN_INTERRUPT_CLEAR_CDMA2 (1 << (ODN_INTERRUPT_CLR_CDMA_SHIFT + 1)) -+ -+/* -+ Other defines -+*/ -+#define ODN_STREAM_OFF 0 -+#define ODN_STREAM_ON 1 -+#define ODN_SYNC_GEN_DISABLE 0 -+#define ODN_SYNC_GEN_ENABLE 1 -+#define ODN_INTERLACE_DISABLE 0 -+#define ODN_INTERLACE_ENABLE 1 -+#define ODN_PIXEL_CLOCK_INVERTED 1 -+#define ODN_HSYNC_POLARITY_ACTIVE_HIGH 1 -+ -+#define ODN_PDP_INTCLR_ALL 0x000FFFFFU -+#define ODN_PDP_INTSTAT_ALL_OURUN_MASK 0x000FFFF0U -+ -+/* -+ DMA defs -+*/ -+#define ODN_CDMA_ADDR_WIDTH 35 -+#define ODN_DMA_HW_DESC_HEAP_SIZE 0x100000 -+#define ODN_DMA_CHAN_RX 0 -+#define ODN_DMA_CHAN_TX 1 -+ -+#define ODIN_DMA_TX_CHAN_NAME "tx" -+#define ODIN_DMA_RX_CHAN_NAME "rx" -+ -+/* -+ FBC defs -+*/ -+#define ODIN_PFIM_RELNUM (005U) -+ -+#endif /* _ODIN_DEFS_H_ */ -+ -+/***************************************************************************** -+ End of file (odn_defs.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/apollo/odin_pdp_regs.h b/drivers/gpu/drm/img-rogue/apollo/odin_pdp_regs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/odin_pdp_regs.h -@@ -0,0 +1,8540 @@ -+/*************************************************************************/ /*! -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* tab size 4 */ -+ -+#ifndef ODN_PDP_REGS_H -+#define ODN_PDP_REGS_H -+ -+/* Odin-PDP hardware register definitions */ -+ -+ -+#define ODN_PDP_GRPH1SURF_OFFSET (0x0000) -+ -+/* PDP, GRPH1SURF, GRPH1PIXFMT -+*/ -+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_MASK (0xF8000000) -+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LSBMASK (0x0000001F) -+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT (27) -+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LENGTH (5) -+#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1SURF, GRPH1USEGAMMA -+*/ -+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_MASK (0x04000000) -+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SHIFT (26) -+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LENGTH (1) -+#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1SURF, GRPH1USECSC -+*/ -+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_MASK (0x02000000) -+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SHIFT (25) -+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LENGTH (1) -+#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1SURF, GRPH1LUTRWCHOICE -+*/ -+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_MASK (0x01000000) -+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SHIFT (24) -+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LENGTH (1) -+#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1SURF, GRPH1USELUT -+*/ -+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_MASK (0x00800000) -+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SHIFT (23) -+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LENGTH (1) -+#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2SURF_OFFSET (0x0004) -+ -+/* PDP, GRPH2SURF, GRPH2PIXFMT -+*/ -+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_MASK (0xF8000000) -+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LSBMASK (0x0000001F) -+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT (27) -+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LENGTH (5) -+#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2SURF, GRPH2USEGAMMA -+*/ -+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_MASK (0x04000000) -+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SHIFT (26) -+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LENGTH (1) -+#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2SURF, GRPH2USECSC -+*/ -+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_MASK (0x02000000) -+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SHIFT (25) -+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LENGTH (1) -+#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2SURF, GRPH2LUTRWCHOICE -+*/ -+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_MASK (0x01000000) -+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SHIFT (24) -+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LENGTH (1) -+#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2SURF, GRPH2USELUT -+*/ -+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_MASK (0x00800000) -+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SHIFT (23) -+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LENGTH (1) -+#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3SURF_OFFSET (0x0008) -+ -+/* PDP, GRPH3SURF, GRPH3PIXFMT -+*/ -+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_MASK (0xF8000000) -+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LSBMASK (0x0000001F) -+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SHIFT (27) -+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LENGTH (5) -+#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3SURF, GRPH3USEGAMMA -+*/ -+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_MASK (0x04000000) -+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SHIFT (26) -+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LENGTH (1) -+#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3SURF, GRPH3USECSC -+*/ -+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_MASK (0x02000000) -+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SHIFT (25) -+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LENGTH (1) -+#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3SURF, GRPH3LUTRWCHOICE -+*/ -+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_MASK (0x01000000) -+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SHIFT (24) -+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LENGTH (1) -+#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3SURF, GRPH3USELUT -+*/ -+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_MASK (0x00800000) -+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SHIFT (23) -+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LENGTH (1) -+#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4SURF_OFFSET (0x000C) -+ -+/* PDP, GRPH4SURF, GRPH4PIXFMT -+*/ -+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_MASK (0xF8000000) -+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LSBMASK (0x0000001F) -+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT (27) -+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LENGTH (5) -+#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4SURF, GRPH4USEGAMMA -+*/ -+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_MASK (0x04000000) -+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SHIFT (26) -+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LENGTH (1) -+#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4SURF, GRPH4USECSC -+*/ -+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_MASK (0x02000000) -+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SHIFT (25) -+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LENGTH (1) -+#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4SURF, GRPH4LUTRWCHOICE -+*/ -+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_MASK (0x01000000) -+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SHIFT (24) -+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LENGTH (1) -+#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4SURF, GRPH4USELUT -+*/ -+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_MASK (0x00800000) -+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SHIFT (23) -+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LENGTH (1) -+#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1SURF_OFFSET (0x0010) -+ -+/* PDP, VID1SURF, VID1PIXFMT -+*/ -+#define ODN_PDP_VID1SURF_VID1PIXFMT_MASK (0xF8000000) -+#define ODN_PDP_VID1SURF_VID1PIXFMT_LSBMASK (0x0000001F) -+#define ODN_PDP_VID1SURF_VID1PIXFMT_SHIFT (27) -+#define ODN_PDP_VID1SURF_VID1PIXFMT_LENGTH (5) -+#define ODN_PDP_VID1SURF_VID1PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SURF, VID1USEGAMMA -+*/ -+#define ODN_PDP_VID1SURF_VID1USEGAMMA_MASK (0x04000000) -+#define ODN_PDP_VID1SURF_VID1USEGAMMA_LSBMASK (0x00000001) -+#define ODN_PDP_VID1SURF_VID1USEGAMMA_SHIFT (26) -+#define ODN_PDP_VID1SURF_VID1USEGAMMA_LENGTH (1) -+#define ODN_PDP_VID1SURF_VID1USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SURF, VID1USECSC -+*/ -+#define ODN_PDP_VID1SURF_VID1USECSC_MASK (0x02000000) -+#define ODN_PDP_VID1SURF_VID1USECSC_LSBMASK (0x00000001) -+#define ODN_PDP_VID1SURF_VID1USECSC_SHIFT (25) -+#define ODN_PDP_VID1SURF_VID1USECSC_LENGTH (1) -+#define ODN_PDP_VID1SURF_VID1USECSC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SURF, VID1USEI2P -+*/ -+#define ODN_PDP_VID1SURF_VID1USEI2P_MASK (0x01000000) -+#define ODN_PDP_VID1SURF_VID1USEI2P_LSBMASK (0x00000001) -+#define ODN_PDP_VID1SURF_VID1USEI2P_SHIFT (24) -+#define ODN_PDP_VID1SURF_VID1USEI2P_LENGTH (1) -+#define ODN_PDP_VID1SURF_VID1USEI2P_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SURF, VID1COSITED -+*/ -+#define ODN_PDP_VID1SURF_VID1COSITED_MASK (0x00800000) -+#define ODN_PDP_VID1SURF_VID1COSITED_LSBMASK (0x00000001) -+#define ODN_PDP_VID1SURF_VID1COSITED_SHIFT (23) -+#define ODN_PDP_VID1SURF_VID1COSITED_LENGTH (1) -+#define ODN_PDP_VID1SURF_VID1COSITED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SURF, VID1USEHQCD -+*/ -+#define ODN_PDP_VID1SURF_VID1USEHQCD_MASK (0x00400000) -+#define ODN_PDP_VID1SURF_VID1USEHQCD_LSBMASK (0x00000001) -+#define ODN_PDP_VID1SURF_VID1USEHQCD_SHIFT (22) -+#define ODN_PDP_VID1SURF_VID1USEHQCD_LENGTH (1) -+#define ODN_PDP_VID1SURF_VID1USEHQCD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SURF, VID1USEINSTREAM -+*/ -+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_MASK (0x00200000) -+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LSBMASK (0x00000001) -+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SHIFT (21) -+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LENGTH (1) -+#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2SURF_OFFSET (0x0014) -+ -+/* PDP, VID2SURF, VID2PIXFMT -+*/ -+#define ODN_PDP_VID2SURF_VID2PIXFMT_MASK (0xF8000000) -+#define ODN_PDP_VID2SURF_VID2PIXFMT_LSBMASK (0x0000001F) -+#define ODN_PDP_VID2SURF_VID2PIXFMT_SHIFT (27) -+#define ODN_PDP_VID2SURF_VID2PIXFMT_LENGTH (5) -+#define ODN_PDP_VID2SURF_VID2PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SURF, VID2COSITED -+*/ -+#define ODN_PDP_VID2SURF_VID2COSITED_MASK (0x00800000) -+#define ODN_PDP_VID2SURF_VID2COSITED_LSBMASK (0x00000001) -+#define ODN_PDP_VID2SURF_VID2COSITED_SHIFT (23) -+#define ODN_PDP_VID2SURF_VID2COSITED_LENGTH (1) -+#define ODN_PDP_VID2SURF_VID2COSITED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SURF, VID2USEGAMMA -+*/ -+#define ODN_PDP_VID2SURF_VID2USEGAMMA_MASK (0x04000000) -+#define ODN_PDP_VID2SURF_VID2USEGAMMA_LSBMASK (0x00000001) -+#define ODN_PDP_VID2SURF_VID2USEGAMMA_SHIFT (26) -+#define ODN_PDP_VID2SURF_VID2USEGAMMA_LENGTH (1) -+#define ODN_PDP_VID2SURF_VID2USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SURF, VID2USECSC -+*/ -+#define ODN_PDP_VID2SURF_VID2USECSC_MASK (0x02000000) -+#define ODN_PDP_VID2SURF_VID2USECSC_LSBMASK (0x00000001) -+#define ODN_PDP_VID2SURF_VID2USECSC_SHIFT (25) -+#define ODN_PDP_VID2SURF_VID2USECSC_LENGTH (1) -+#define ODN_PDP_VID2SURF_VID2USECSC_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3SURF_OFFSET (0x0018) -+ -+/* PDP, VID3SURF, VID3PIXFMT -+*/ -+#define ODN_PDP_VID3SURF_VID3PIXFMT_MASK (0xF8000000) -+#define ODN_PDP_VID3SURF_VID3PIXFMT_LSBMASK (0x0000001F) -+#define ODN_PDP_VID3SURF_VID3PIXFMT_SHIFT (27) -+#define ODN_PDP_VID3SURF_VID3PIXFMT_LENGTH (5) -+#define ODN_PDP_VID3SURF_VID3PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SURF, VID3COSITED -+*/ -+#define ODN_PDP_VID3SURF_VID3COSITED_MASK (0x00800000) -+#define ODN_PDP_VID3SURF_VID3COSITED_LSBMASK (0x00000001) -+#define ODN_PDP_VID3SURF_VID3COSITED_SHIFT (23) -+#define ODN_PDP_VID3SURF_VID3COSITED_LENGTH (1) -+#define ODN_PDP_VID3SURF_VID3COSITED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SURF, VID3USEGAMMA -+*/ -+#define ODN_PDP_VID3SURF_VID3USEGAMMA_MASK (0x04000000) -+#define ODN_PDP_VID3SURF_VID3USEGAMMA_LSBMASK (0x00000001) -+#define ODN_PDP_VID3SURF_VID3USEGAMMA_SHIFT (26) -+#define ODN_PDP_VID3SURF_VID3USEGAMMA_LENGTH (1) -+#define ODN_PDP_VID3SURF_VID3USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SURF, VID3USECSC -+*/ -+#define ODN_PDP_VID3SURF_VID3USECSC_MASK (0x02000000) -+#define ODN_PDP_VID3SURF_VID3USECSC_LSBMASK (0x00000001) -+#define ODN_PDP_VID3SURF_VID3USECSC_SHIFT (25) -+#define ODN_PDP_VID3SURF_VID3USECSC_LENGTH (1) -+#define ODN_PDP_VID3SURF_VID3USECSC_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4SURF_OFFSET (0x001C) -+ -+/* PDP, VID4SURF, VID4PIXFMT -+*/ -+#define ODN_PDP_VID4SURF_VID4PIXFMT_MASK (0xF8000000) -+#define ODN_PDP_VID4SURF_VID4PIXFMT_LSBMASK (0x0000001F) -+#define ODN_PDP_VID4SURF_VID4PIXFMT_SHIFT (27) -+#define ODN_PDP_VID4SURF_VID4PIXFMT_LENGTH (5) -+#define ODN_PDP_VID4SURF_VID4PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SURF, VID4COSITED -+*/ -+#define ODN_PDP_VID4SURF_VID4COSITED_MASK (0x00800000) -+#define ODN_PDP_VID4SURF_VID4COSITED_LSBMASK (0x00000001) -+#define ODN_PDP_VID4SURF_VID4COSITED_SHIFT (23) -+#define ODN_PDP_VID4SURF_VID4COSITED_LENGTH (1) -+#define ODN_PDP_VID4SURF_VID4COSITED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SURF, VID4USEGAMMA -+*/ -+#define ODN_PDP_VID4SURF_VID4USEGAMMA_MASK (0x04000000) -+#define ODN_PDP_VID4SURF_VID4USEGAMMA_LSBMASK (0x00000001) -+#define ODN_PDP_VID4SURF_VID4USEGAMMA_SHIFT (26) -+#define ODN_PDP_VID4SURF_VID4USEGAMMA_LENGTH (1) -+#define ODN_PDP_VID4SURF_VID4USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SURF, VID4USECSC -+*/ -+#define ODN_PDP_VID4SURF_VID4USECSC_MASK (0x02000000) -+#define ODN_PDP_VID4SURF_VID4USECSC_LSBMASK (0x00000001) -+#define ODN_PDP_VID4SURF_VID4USECSC_SHIFT (25) -+#define ODN_PDP_VID4SURF_VID4USECSC_LENGTH (1) -+#define ODN_PDP_VID4SURF_VID4USECSC_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1CTRL_OFFSET (0x0020) -+ -+/* PDP, GRPH1CTRL, GRPH1STREN -+*/ -+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_MASK (0x80000000) -+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SHIFT (31) -+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LENGTH (1) -+#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1CTRL, GRPH1CKEYEN -+*/ -+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_MASK (0x40000000) -+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SHIFT (30) -+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LENGTH (1) -+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1CTRL, GRPH1CKEYSRC -+*/ -+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_MASK (0x20000000) -+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SHIFT (29) -+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LENGTH (1) -+#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1CTRL, GRPH1BLEND -+*/ -+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_MASK (0x18000000) -+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LSBMASK (0x00000003) -+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SHIFT (27) -+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LENGTH (2) -+#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1CTRL, GRPH1BLENDPOS -+*/ -+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK (0x07000000) -+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LSBMASK (0x00000007) -+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT (24) -+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LENGTH (3) -+#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1CTRL, GRPH1DITHEREN -+*/ -+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_MASK (0x00800000) -+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SHIFT (23) -+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LENGTH (1) -+#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2CTRL_OFFSET (0x0024) -+ -+/* PDP, GRPH2CTRL, GRPH2STREN -+*/ -+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_MASK (0x80000000) -+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SHIFT (31) -+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LENGTH (1) -+#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2CTRL, GRPH2CKEYEN -+*/ -+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_MASK (0x40000000) -+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SHIFT (30) -+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LENGTH (1) -+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2CTRL, GRPH2CKEYSRC -+*/ -+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_MASK (0x20000000) -+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SHIFT (29) -+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LENGTH (1) -+#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2CTRL, GRPH2BLEND -+*/ -+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_MASK (0x18000000) -+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LSBMASK (0x00000003) -+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SHIFT (27) -+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LENGTH (2) -+#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2CTRL, GRPH2BLENDPOS -+*/ -+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK (0x07000000) -+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LSBMASK (0x00000007) -+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT (24) -+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LENGTH (3) -+#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2CTRL, GRPH2DITHEREN -+*/ -+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_MASK (0x00800000) -+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SHIFT (23) -+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LENGTH (1) -+#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3CTRL_OFFSET (0x0028) -+ -+/* PDP, GRPH3CTRL, GRPH3STREN -+*/ -+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_MASK (0x80000000) -+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SHIFT (31) -+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LENGTH (1) -+#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3CTRL, GRPH3CKEYEN -+*/ -+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_MASK (0x40000000) -+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SHIFT (30) -+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LENGTH (1) -+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3CTRL, GRPH3CKEYSRC -+*/ -+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_MASK (0x20000000) -+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SHIFT (29) -+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LENGTH (1) -+#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3CTRL, GRPH3BLEND -+*/ -+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_MASK (0x18000000) -+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LSBMASK (0x00000003) -+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SHIFT (27) -+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LENGTH (2) -+#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3CTRL, GRPH3BLENDPOS -+*/ -+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_MASK (0x07000000) -+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LSBMASK (0x00000007) -+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SHIFT (24) -+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LENGTH (3) -+#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3CTRL, GRPH3DITHEREN -+*/ -+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_MASK (0x00800000) -+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SHIFT (23) -+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LENGTH (1) -+#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4CTRL_OFFSET (0x002C) -+ -+/* PDP, GRPH4CTRL, GRPH4STREN -+*/ -+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_MASK (0x80000000) -+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SHIFT (31) -+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LENGTH (1) -+#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4CTRL, GRPH4CKEYEN -+*/ -+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_MASK (0x40000000) -+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SHIFT (30) -+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LENGTH (1) -+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4CTRL, GRPH4CKEYSRC -+*/ -+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_MASK (0x20000000) -+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SHIFT (29) -+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LENGTH (1) -+#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4CTRL, GRPH4BLEND -+*/ -+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_MASK (0x18000000) -+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LSBMASK (0x00000003) -+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SHIFT (27) -+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LENGTH (2) -+#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4CTRL, GRPH4BLENDPOS -+*/ -+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK (0x07000000) -+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LSBMASK (0x00000007) -+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT (24) -+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LENGTH (3) -+#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4CTRL, GRPH4DITHEREN -+*/ -+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_MASK (0x00800000) -+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SHIFT (23) -+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LENGTH (1) -+#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1CTRL_OFFSET (0x0030) -+ -+/* PDP, VID1CTRL, VID1STREN -+*/ -+#define ODN_PDP_VID1CTRL_VID1STREN_MASK (0x80000000) -+#define ODN_PDP_VID1CTRL_VID1STREN_LSBMASK (0x00000001) -+#define ODN_PDP_VID1CTRL_VID1STREN_SHIFT (31) -+#define ODN_PDP_VID1CTRL_VID1STREN_LENGTH (1) -+#define ODN_PDP_VID1CTRL_VID1STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1CTRL, VID1CKEYEN -+*/ -+#define ODN_PDP_VID1CTRL_VID1CKEYEN_MASK (0x40000000) -+#define ODN_PDP_VID1CTRL_VID1CKEYEN_LSBMASK (0x00000001) -+#define ODN_PDP_VID1CTRL_VID1CKEYEN_SHIFT (30) -+#define ODN_PDP_VID1CTRL_VID1CKEYEN_LENGTH (1) -+#define ODN_PDP_VID1CTRL_VID1CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1CTRL, VID1CKEYSRC -+*/ -+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_MASK (0x20000000) -+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LSBMASK (0x00000001) -+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SHIFT (29) -+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LENGTH (1) -+#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1CTRL, VID1BLEND -+*/ -+#define ODN_PDP_VID1CTRL_VID1BLEND_MASK (0x18000000) -+#define ODN_PDP_VID1CTRL_VID1BLEND_LSBMASK (0x00000003) -+#define ODN_PDP_VID1CTRL_VID1BLEND_SHIFT (27) -+#define ODN_PDP_VID1CTRL_VID1BLEND_LENGTH (2) -+#define ODN_PDP_VID1CTRL_VID1BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1CTRL, VID1BLENDPOS -+*/ -+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_MASK (0x07000000) -+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LSBMASK (0x00000007) -+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SHIFT (24) -+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LENGTH (3) -+#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1CTRL, VID1DITHEREN -+*/ -+#define ODN_PDP_VID1CTRL_VID1DITHEREN_MASK (0x00800000) -+#define ODN_PDP_VID1CTRL_VID1DITHEREN_LSBMASK (0x00000001) -+#define ODN_PDP_VID1CTRL_VID1DITHEREN_SHIFT (23) -+#define ODN_PDP_VID1CTRL_VID1DITHEREN_LENGTH (1) -+#define ODN_PDP_VID1CTRL_VID1DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2CTRL_OFFSET (0x0034) -+ -+/* PDP, VID2CTRL, VID2STREN -+*/ -+#define ODN_PDP_VID2CTRL_VID2STREN_MASK (0x80000000) -+#define ODN_PDP_VID2CTRL_VID2STREN_LSBMASK (0x00000001) -+#define ODN_PDP_VID2CTRL_VID2STREN_SHIFT (31) -+#define ODN_PDP_VID2CTRL_VID2STREN_LENGTH (1) -+#define ODN_PDP_VID2CTRL_VID2STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2CTRL, VID2CKEYEN -+*/ -+#define ODN_PDP_VID2CTRL_VID2CKEYEN_MASK (0x40000000) -+#define ODN_PDP_VID2CTRL_VID2CKEYEN_LSBMASK (0x00000001) -+#define ODN_PDP_VID2CTRL_VID2CKEYEN_SHIFT (30) -+#define ODN_PDP_VID2CTRL_VID2CKEYEN_LENGTH (1) -+#define ODN_PDP_VID2CTRL_VID2CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2CTRL, VID2CKEYSRC -+*/ -+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_MASK (0x20000000) -+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LSBMASK (0x00000001) -+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SHIFT (29) -+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LENGTH (1) -+#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2CTRL, VID2BLEND -+*/ -+#define ODN_PDP_VID2CTRL_VID2BLEND_MASK (0x18000000) -+#define ODN_PDP_VID2CTRL_VID2BLEND_LSBMASK (0x00000003) -+#define ODN_PDP_VID2CTRL_VID2BLEND_SHIFT (27) -+#define ODN_PDP_VID2CTRL_VID2BLEND_LENGTH (2) -+#define ODN_PDP_VID2CTRL_VID2BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2CTRL, VID2BLENDPOS -+*/ -+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_MASK (0x07000000) -+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LSBMASK (0x00000007) -+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SHIFT (24) -+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LENGTH (3) -+#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2CTRL, VID2DITHEREN -+*/ -+#define ODN_PDP_VID2CTRL_VID2DITHEREN_MASK (0x00800000) -+#define ODN_PDP_VID2CTRL_VID2DITHEREN_LSBMASK (0x00000001) -+#define ODN_PDP_VID2CTRL_VID2DITHEREN_SHIFT (23) -+#define ODN_PDP_VID2CTRL_VID2DITHEREN_LENGTH (1) -+#define ODN_PDP_VID2CTRL_VID2DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3CTRL_OFFSET (0x0038) -+ -+/* PDP, VID3CTRL, VID3STREN -+*/ -+#define ODN_PDP_VID3CTRL_VID3STREN_MASK (0x80000000) -+#define ODN_PDP_VID3CTRL_VID3STREN_LSBMASK (0x00000001) -+#define ODN_PDP_VID3CTRL_VID3STREN_SHIFT (31) -+#define ODN_PDP_VID3CTRL_VID3STREN_LENGTH (1) -+#define ODN_PDP_VID3CTRL_VID3STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3CTRL, VID3CKEYEN -+*/ -+#define ODN_PDP_VID3CTRL_VID3CKEYEN_MASK (0x40000000) -+#define ODN_PDP_VID3CTRL_VID3CKEYEN_LSBMASK (0x00000001) -+#define ODN_PDP_VID3CTRL_VID3CKEYEN_SHIFT (30) -+#define ODN_PDP_VID3CTRL_VID3CKEYEN_LENGTH (1) -+#define ODN_PDP_VID3CTRL_VID3CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3CTRL, VID3CKEYSRC -+*/ -+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_MASK (0x20000000) -+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LSBMASK (0x00000001) -+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SHIFT (29) -+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LENGTH (1) -+#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3CTRL, VID3BLEND -+*/ -+#define ODN_PDP_VID3CTRL_VID3BLEND_MASK (0x18000000) -+#define ODN_PDP_VID3CTRL_VID3BLEND_LSBMASK (0x00000003) -+#define ODN_PDP_VID3CTRL_VID3BLEND_SHIFT (27) -+#define ODN_PDP_VID3CTRL_VID3BLEND_LENGTH (2) -+#define ODN_PDP_VID3CTRL_VID3BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3CTRL, VID3BLENDPOS -+*/ -+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_MASK (0x07000000) -+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LSBMASK (0x00000007) -+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SHIFT (24) -+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LENGTH (3) -+#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3CTRL, VID3DITHEREN -+*/ -+#define ODN_PDP_VID3CTRL_VID3DITHEREN_MASK (0x00800000) -+#define ODN_PDP_VID3CTRL_VID3DITHEREN_LSBMASK (0x00000001) -+#define ODN_PDP_VID3CTRL_VID3DITHEREN_SHIFT (23) -+#define ODN_PDP_VID3CTRL_VID3DITHEREN_LENGTH (1) -+#define ODN_PDP_VID3CTRL_VID3DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4CTRL_OFFSET (0x003C) -+ -+/* PDP, VID4CTRL, VID4STREN -+*/ -+#define ODN_PDP_VID4CTRL_VID4STREN_MASK (0x80000000) -+#define ODN_PDP_VID4CTRL_VID4STREN_LSBMASK (0x00000001) -+#define ODN_PDP_VID4CTRL_VID4STREN_SHIFT (31) -+#define ODN_PDP_VID4CTRL_VID4STREN_LENGTH (1) -+#define ODN_PDP_VID4CTRL_VID4STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4CTRL, VID4CKEYEN -+*/ -+#define ODN_PDP_VID4CTRL_VID4CKEYEN_MASK (0x40000000) -+#define ODN_PDP_VID4CTRL_VID4CKEYEN_LSBMASK (0x00000001) -+#define ODN_PDP_VID4CTRL_VID4CKEYEN_SHIFT (30) -+#define ODN_PDP_VID4CTRL_VID4CKEYEN_LENGTH (1) -+#define ODN_PDP_VID4CTRL_VID4CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4CTRL, VID4CKEYSRC -+*/ -+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_MASK (0x20000000) -+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LSBMASK (0x00000001) -+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SHIFT (29) -+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LENGTH (1) -+#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4CTRL, VID4BLEND -+*/ -+#define ODN_PDP_VID4CTRL_VID4BLEND_MASK (0x18000000) -+#define ODN_PDP_VID4CTRL_VID4BLEND_LSBMASK (0x00000003) -+#define ODN_PDP_VID4CTRL_VID4BLEND_SHIFT (27) -+#define ODN_PDP_VID4CTRL_VID4BLEND_LENGTH (2) -+#define ODN_PDP_VID4CTRL_VID4BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4CTRL, VID4BLENDPOS -+*/ -+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_MASK (0x07000000) -+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LSBMASK (0x00000007) -+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SHIFT (24) -+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LENGTH (3) -+#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4CTRL, VID4DITHEREN -+*/ -+#define ODN_PDP_VID4CTRL_VID4DITHEREN_MASK (0x00800000) -+#define ODN_PDP_VID4CTRL_VID4DITHEREN_LSBMASK (0x00000001) -+#define ODN_PDP_VID4CTRL_VID4DITHEREN_SHIFT (23) -+#define ODN_PDP_VID4CTRL_VID4DITHEREN_LENGTH (1) -+#define ODN_PDP_VID4CTRL_VID4DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1UCTRL_OFFSET (0x0050) -+ -+/* PDP, VID1UCTRL, VID1UVHALFSTR -+*/ -+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_MASK (0xC0000000) -+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LSBMASK (0x00000003) -+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SHIFT (30) -+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LENGTH (2) -+#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2UCTRL_OFFSET (0x0054) -+ -+/* PDP, VID2UCTRL, VID2UVHALFSTR -+*/ -+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_MASK (0xC0000000) -+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LSBMASK (0x00000003) -+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SHIFT (30) -+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LENGTH (2) -+#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3UCTRL_OFFSET (0x0058) -+ -+/* PDP, VID3UCTRL, VID3UVHALFSTR -+*/ -+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_MASK (0xC0000000) -+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LSBMASK (0x00000003) -+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SHIFT (30) -+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LENGTH (2) -+#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4UCTRL_OFFSET (0x005C) -+ -+/* PDP, VID4UCTRL, VID4UVHALFSTR -+*/ -+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_MASK (0xC0000000) -+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LSBMASK (0x00000003) -+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SHIFT (30) -+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LENGTH (2) -+#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1STRIDE_OFFSET (0x0060) -+ -+/* PDP, GRPH1STRIDE, GRPH1STRIDE -+*/ -+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_MASK (0xFFC00000) -+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT (22) -+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LENGTH (10) -+#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2STRIDE_OFFSET (0x0064) -+ -+/* PDP, GRPH2STRIDE, GRPH2STRIDE -+*/ -+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_MASK (0xFFC00000) -+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT (22) -+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LENGTH (10) -+#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3STRIDE_OFFSET (0x0068) -+ -+/* PDP, GRPH3STRIDE, GRPH3STRIDE -+*/ -+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_MASK (0xFFC00000) -+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SHIFT (22) -+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LENGTH (10) -+#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4STRIDE_OFFSET (0x006C) -+ -+/* PDP, GRPH4STRIDE, GRPH4STRIDE -+*/ -+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_MASK (0xFFC00000) -+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT (22) -+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LENGTH (10) -+#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1STRIDE_OFFSET (0x0070) -+ -+/* PDP, VID1STRIDE, VID1STRIDE -+*/ -+#define ODN_PDP_VID1STRIDE_VID1STRIDE_MASK (0xFFC00000) -+#define ODN_PDP_VID1STRIDE_VID1STRIDE_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1STRIDE_VID1STRIDE_SHIFT (22) -+#define ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH (10) -+#define ODN_PDP_VID1STRIDE_VID1STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2STRIDE_OFFSET (0x0074) -+ -+/* PDP, VID2STRIDE, VID2STRIDE -+*/ -+#define ODN_PDP_VID2STRIDE_VID2STRIDE_MASK (0xFFC00000) -+#define ODN_PDP_VID2STRIDE_VID2STRIDE_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2STRIDE_VID2STRIDE_SHIFT (22) -+#define ODN_PDP_VID2STRIDE_VID2STRIDE_LENGTH (10) -+#define ODN_PDP_VID2STRIDE_VID2STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3STRIDE_OFFSET (0x0078) -+ -+/* PDP, VID3STRIDE, VID3STRIDE -+*/ -+#define ODN_PDP_VID3STRIDE_VID3STRIDE_MASK (0xFFC00000) -+#define ODN_PDP_VID3STRIDE_VID3STRIDE_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3STRIDE_VID3STRIDE_SHIFT (22) -+#define ODN_PDP_VID3STRIDE_VID3STRIDE_LENGTH (10) -+#define ODN_PDP_VID3STRIDE_VID3STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4STRIDE_OFFSET (0x007C) -+ -+/* PDP, VID4STRIDE, VID4STRIDE -+*/ -+#define ODN_PDP_VID4STRIDE_VID4STRIDE_MASK (0xFFC00000) -+#define ODN_PDP_VID4STRIDE_VID4STRIDE_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4STRIDE_VID4STRIDE_SHIFT (22) -+#define ODN_PDP_VID4STRIDE_VID4STRIDE_LENGTH (10) -+#define ODN_PDP_VID4STRIDE_VID4STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1SIZE_OFFSET (0x0080) -+ -+/* PDP, GRPH1SIZE, GRPH1WIDTH -+*/ -+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_MASK (0x0FFF0000) -+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT (16) -+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LENGTH (12) -+#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1SIZE, GRPH1HEIGHT -+*/ -+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_MASK (0x00000FFF) -+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT (0) -+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LENGTH (12) -+#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2SIZE_OFFSET (0x0084) -+ -+/* PDP, GRPH2SIZE, GRPH2WIDTH -+*/ -+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_MASK (0x0FFF0000) -+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT (16) -+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LENGTH (12) -+#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2SIZE, GRPH2HEIGHT -+*/ -+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_MASK (0x00000FFF) -+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT (0) -+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LENGTH (12) -+#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3SIZE_OFFSET (0x0088) -+ -+/* PDP, GRPH3SIZE, GRPH3WIDTH -+*/ -+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_MASK (0x0FFF0000) -+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SHIFT (16) -+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LENGTH (12) -+#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3SIZE, GRPH3HEIGHT -+*/ -+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_MASK (0x00000FFF) -+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SHIFT (0) -+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LENGTH (12) -+#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4SIZE_OFFSET (0x008C) -+ -+/* PDP, GRPH4SIZE, GRPH4WIDTH -+*/ -+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_MASK (0x0FFF0000) -+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT (16) -+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LENGTH (12) -+#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4SIZE, GRPH4HEIGHT -+*/ -+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_MASK (0x00000FFF) -+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT (0) -+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LENGTH (12) -+#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1SIZE_OFFSET (0x0090) -+ -+/* PDP, VID1SIZE, VID1WIDTH -+*/ -+#define ODN_PDP_VID1SIZE_VID1WIDTH_MASK (0x0FFF0000) -+#define ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID1SIZE_VID1WIDTH_SHIFT (16) -+#define ODN_PDP_VID1SIZE_VID1WIDTH_LENGTH (12) -+#define ODN_PDP_VID1SIZE_VID1WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SIZE, VID1HEIGHT -+*/ -+#define ODN_PDP_VID1SIZE_VID1HEIGHT_MASK (0x00000FFF) -+#define ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID1SIZE_VID1HEIGHT_SHIFT (0) -+#define ODN_PDP_VID1SIZE_VID1HEIGHT_LENGTH (12) -+#define ODN_PDP_VID1SIZE_VID1HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2SIZE_OFFSET (0x0094) -+ -+/* PDP, VID2SIZE, VID2WIDTH -+*/ -+#define ODN_PDP_VID2SIZE_VID2WIDTH_MASK (0x0FFF0000) -+#define ODN_PDP_VID2SIZE_VID2WIDTH_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID2SIZE_VID2WIDTH_SHIFT (16) -+#define ODN_PDP_VID2SIZE_VID2WIDTH_LENGTH (12) -+#define ODN_PDP_VID2SIZE_VID2WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SIZE, VID2HEIGHT -+*/ -+#define ODN_PDP_VID2SIZE_VID2HEIGHT_MASK (0x00000FFF) -+#define ODN_PDP_VID2SIZE_VID2HEIGHT_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID2SIZE_VID2HEIGHT_SHIFT (0) -+#define ODN_PDP_VID2SIZE_VID2HEIGHT_LENGTH (12) -+#define ODN_PDP_VID2SIZE_VID2HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3SIZE_OFFSET (0x0098) -+ -+/* PDP, VID3SIZE, VID3WIDTH -+*/ -+#define ODN_PDP_VID3SIZE_VID3WIDTH_MASK (0x0FFF0000) -+#define ODN_PDP_VID3SIZE_VID3WIDTH_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID3SIZE_VID3WIDTH_SHIFT (16) -+#define ODN_PDP_VID3SIZE_VID3WIDTH_LENGTH (12) -+#define ODN_PDP_VID3SIZE_VID3WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SIZE, VID3HEIGHT -+*/ -+#define ODN_PDP_VID3SIZE_VID3HEIGHT_MASK (0x00000FFF) -+#define ODN_PDP_VID3SIZE_VID3HEIGHT_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID3SIZE_VID3HEIGHT_SHIFT (0) -+#define ODN_PDP_VID3SIZE_VID3HEIGHT_LENGTH (12) -+#define ODN_PDP_VID3SIZE_VID3HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4SIZE_OFFSET (0x009C) -+ -+/* PDP, VID4SIZE, VID4WIDTH -+*/ -+#define ODN_PDP_VID4SIZE_VID4WIDTH_MASK (0x0FFF0000) -+#define ODN_PDP_VID4SIZE_VID4WIDTH_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID4SIZE_VID4WIDTH_SHIFT (16) -+#define ODN_PDP_VID4SIZE_VID4WIDTH_LENGTH (12) -+#define ODN_PDP_VID4SIZE_VID4WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SIZE, VID4HEIGHT -+*/ -+#define ODN_PDP_VID4SIZE_VID4HEIGHT_MASK (0x00000FFF) -+#define ODN_PDP_VID4SIZE_VID4HEIGHT_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID4SIZE_VID4HEIGHT_SHIFT (0) -+#define ODN_PDP_VID4SIZE_VID4HEIGHT_LENGTH (12) -+#define ODN_PDP_VID4SIZE_VID4HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1POSN_OFFSET (0x00A0) -+ -+/* PDP, GRPH1POSN, GRPH1XSTART -+*/ -+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_MASK (0x0FFF0000) -+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SHIFT (16) -+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LENGTH (12) -+#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1POSN, GRPH1YSTART -+*/ -+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_MASK (0x00000FFF) -+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SHIFT (0) -+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LENGTH (12) -+#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2POSN_OFFSET (0x00A4) -+ -+/* PDP, GRPH2POSN, GRPH2XSTART -+*/ -+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_MASK (0x0FFF0000) -+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SHIFT (16) -+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LENGTH (12) -+#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2POSN, GRPH2YSTART -+*/ -+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_MASK (0x00000FFF) -+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SHIFT (0) -+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LENGTH (12) -+#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3POSN_OFFSET (0x00A8) -+ -+/* PDP, GRPH3POSN, GRPH3XSTART -+*/ -+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_MASK (0x0FFF0000) -+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SHIFT (16) -+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LENGTH (12) -+#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3POSN, GRPH3YSTART -+*/ -+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_MASK (0x00000FFF) -+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SHIFT (0) -+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LENGTH (12) -+#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4POSN_OFFSET (0x00AC) -+ -+/* PDP, GRPH4POSN, GRPH4XSTART -+*/ -+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_MASK (0x0FFF0000) -+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SHIFT (16) -+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LENGTH (12) -+#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4POSN, GRPH4YSTART -+*/ -+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_MASK (0x00000FFF) -+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SHIFT (0) -+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LENGTH (12) -+#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1POSN_OFFSET (0x00B0) -+ -+/* PDP, VID1POSN, VID1XSTART -+*/ -+#define ODN_PDP_VID1POSN_VID1XSTART_MASK (0x0FFF0000) -+#define ODN_PDP_VID1POSN_VID1XSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID1POSN_VID1XSTART_SHIFT (16) -+#define ODN_PDP_VID1POSN_VID1XSTART_LENGTH (12) -+#define ODN_PDP_VID1POSN_VID1XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1POSN, VID1YSTART -+*/ -+#define ODN_PDP_VID1POSN_VID1YSTART_MASK (0x00000FFF) -+#define ODN_PDP_VID1POSN_VID1YSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID1POSN_VID1YSTART_SHIFT (0) -+#define ODN_PDP_VID1POSN_VID1YSTART_LENGTH (12) -+#define ODN_PDP_VID1POSN_VID1YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2POSN_OFFSET (0x00B4) -+ -+/* PDP, VID2POSN, VID2XSTART -+*/ -+#define ODN_PDP_VID2POSN_VID2XSTART_MASK (0x0FFF0000) -+#define ODN_PDP_VID2POSN_VID2XSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID2POSN_VID2XSTART_SHIFT (16) -+#define ODN_PDP_VID2POSN_VID2XSTART_LENGTH (12) -+#define ODN_PDP_VID2POSN_VID2XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2POSN, VID2YSTART -+*/ -+#define ODN_PDP_VID2POSN_VID2YSTART_MASK (0x00000FFF) -+#define ODN_PDP_VID2POSN_VID2YSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID2POSN_VID2YSTART_SHIFT (0) -+#define ODN_PDP_VID2POSN_VID2YSTART_LENGTH (12) -+#define ODN_PDP_VID2POSN_VID2YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3POSN_OFFSET (0x00B8) -+ -+/* PDP, VID3POSN, VID3XSTART -+*/ -+#define ODN_PDP_VID3POSN_VID3XSTART_MASK (0x0FFF0000) -+#define ODN_PDP_VID3POSN_VID3XSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID3POSN_VID3XSTART_SHIFT (16) -+#define ODN_PDP_VID3POSN_VID3XSTART_LENGTH (12) -+#define ODN_PDP_VID3POSN_VID3XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3POSN, VID3YSTART -+*/ -+#define ODN_PDP_VID3POSN_VID3YSTART_MASK (0x00000FFF) -+#define ODN_PDP_VID3POSN_VID3YSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID3POSN_VID3YSTART_SHIFT (0) -+#define ODN_PDP_VID3POSN_VID3YSTART_LENGTH (12) -+#define ODN_PDP_VID3POSN_VID3YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4POSN_OFFSET (0x00BC) -+ -+/* PDP, VID4POSN, VID4XSTART -+*/ -+#define ODN_PDP_VID4POSN_VID4XSTART_MASK (0x0FFF0000) -+#define ODN_PDP_VID4POSN_VID4XSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID4POSN_VID4XSTART_SHIFT (16) -+#define ODN_PDP_VID4POSN_VID4XSTART_LENGTH (12) -+#define ODN_PDP_VID4POSN_VID4XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4POSN, VID4YSTART -+*/ -+#define ODN_PDP_VID4POSN_VID4YSTART_MASK (0x00000FFF) -+#define ODN_PDP_VID4POSN_VID4YSTART_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID4POSN_VID4YSTART_SHIFT (0) -+#define ODN_PDP_VID4POSN_VID4YSTART_LENGTH (12) -+#define ODN_PDP_VID4POSN_VID4YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1GALPHA_OFFSET (0x00C0) -+ -+/* PDP, GRPH1GALPHA, GRPH1GALPHA -+*/ -+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_MASK (0x000003FF) -+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT (0) -+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LENGTH (10) -+#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2GALPHA_OFFSET (0x00C4) -+ -+/* PDP, GRPH2GALPHA, GRPH2GALPHA -+*/ -+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_MASK (0x000003FF) -+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT (0) -+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LENGTH (10) -+#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3GALPHA_OFFSET (0x00C8) -+ -+/* PDP, GRPH3GALPHA, GRPH3GALPHA -+*/ -+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_MASK (0x000003FF) -+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SHIFT (0) -+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LENGTH (10) -+#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4GALPHA_OFFSET (0x00CC) -+ -+/* PDP, GRPH4GALPHA, GRPH4GALPHA -+*/ -+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_MASK (0x000003FF) -+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT (0) -+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LENGTH (10) -+#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1GALPHA_OFFSET (0x00D0) -+ -+/* PDP, VID1GALPHA, VID1GALPHA -+*/ -+#define ODN_PDP_VID1GALPHA_VID1GALPHA_MASK (0x000003FF) -+#define ODN_PDP_VID1GALPHA_VID1GALPHA_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1GALPHA_VID1GALPHA_SHIFT (0) -+#define ODN_PDP_VID1GALPHA_VID1GALPHA_LENGTH (10) -+#define ODN_PDP_VID1GALPHA_VID1GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2GALPHA_OFFSET (0x00D4) -+ -+/* PDP, VID2GALPHA, VID2GALPHA -+*/ -+#define ODN_PDP_VID2GALPHA_VID2GALPHA_MASK (0x000003FF) -+#define ODN_PDP_VID2GALPHA_VID2GALPHA_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2GALPHA_VID2GALPHA_SHIFT (0) -+#define ODN_PDP_VID2GALPHA_VID2GALPHA_LENGTH (10) -+#define ODN_PDP_VID2GALPHA_VID2GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3GALPHA_OFFSET (0x00D8) -+ -+/* PDP, VID3GALPHA, VID3GALPHA -+*/ -+#define ODN_PDP_VID3GALPHA_VID3GALPHA_MASK (0x000003FF) -+#define ODN_PDP_VID3GALPHA_VID3GALPHA_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3GALPHA_VID3GALPHA_SHIFT (0) -+#define ODN_PDP_VID3GALPHA_VID3GALPHA_LENGTH (10) -+#define ODN_PDP_VID3GALPHA_VID3GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4GALPHA_OFFSET (0x00DC) -+ -+/* PDP, VID4GALPHA, VID4GALPHA -+*/ -+#define ODN_PDP_VID4GALPHA_VID4GALPHA_MASK (0x000003FF) -+#define ODN_PDP_VID4GALPHA_VID4GALPHA_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4GALPHA_VID4GALPHA_SHIFT (0) -+#define ODN_PDP_VID4GALPHA_VID4GALPHA_LENGTH (10) -+#define ODN_PDP_VID4GALPHA_VID4GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1CKEY_R_OFFSET (0x00E0) -+ -+/* PDP, GRPH1CKEY_R, GRPH1CKEY_R -+*/ -+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_MASK (0x000003FF) -+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SHIFT (0) -+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LENGTH (10) -+#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1CKEY_GB_OFFSET (0x00E4) -+ -+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_G -+*/ -+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_MASK (0x03FF0000) -+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SHIFT (16) -+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LENGTH (10) -+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_B -+*/ -+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_MASK (0x000003FF) -+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SHIFT (0) -+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LENGTH (10) -+#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2CKEY_R_OFFSET (0x00E8) -+ -+/* PDP, GRPH2CKEY_R, GRPH2CKEY_R -+*/ -+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_MASK (0x000003FF) -+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SHIFT (0) -+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LENGTH (10) -+#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2CKEY_GB_OFFSET (0x00EC) -+ -+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_G -+*/ -+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_MASK (0x03FF0000) -+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SHIFT (16) -+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LENGTH (10) -+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_B -+*/ -+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_MASK (0x000003FF) -+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SHIFT (0) -+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LENGTH (10) -+#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3CKEY_R_OFFSET (0x00F0) -+ -+/* PDP, GRPH3CKEY_R, GRPH3CKEY_R -+*/ -+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_MASK (0x000003FF) -+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SHIFT (0) -+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LENGTH (10) -+#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3CKEY_GB_OFFSET (0x00F4) -+ -+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_G -+*/ -+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_MASK (0x03FF0000) -+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SHIFT (16) -+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LENGTH (10) -+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_B -+*/ -+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_MASK (0x000003FF) -+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SHIFT (0) -+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LENGTH (10) -+#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4CKEY_R_OFFSET (0x00F8) -+ -+/* PDP, GRPH4CKEY_R, GRPH4CKEY_R -+*/ -+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_MASK (0x000003FF) -+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SHIFT (0) -+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LENGTH (10) -+#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4CKEY_GB_OFFSET (0x00FC) -+ -+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_G -+*/ -+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_MASK (0x03FF0000) -+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SHIFT (16) -+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LENGTH (10) -+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_B -+*/ -+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_MASK (0x000003FF) -+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SHIFT (0) -+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LENGTH (10) -+#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1CKEY_R_OFFSET (0x0100) -+ -+/* PDP, VID1CKEY_R, VID1CKEY_R -+*/ -+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_MASK (0x000003FF) -+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SHIFT (0) -+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LENGTH (10) -+#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1CKEY_GB_OFFSET (0x0104) -+ -+/* PDP, VID1CKEY_GB, VID1CKEY_G -+*/ -+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_MASK (0x03FF0000) -+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SHIFT (16) -+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LENGTH (10) -+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1CKEY_GB, VID1CKEY_B -+*/ -+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_MASK (0x000003FF) -+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SHIFT (0) -+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LENGTH (10) -+#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2CKEY_R_OFFSET (0x0108) -+ -+/* PDP, VID2CKEY_R, VID2CKEY_R -+*/ -+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_MASK (0x000003FF) -+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SHIFT (0) -+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LENGTH (10) -+#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2CKEY_GB_OFFSET (0x010C) -+ -+/* PDP, VID2CKEY_GB, VID2CKEY_G -+*/ -+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_MASK (0x03FF0000) -+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SHIFT (16) -+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LENGTH (10) -+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2CKEY_GB, VID2CKEY_B -+*/ -+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_MASK (0x000003FF) -+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SHIFT (0) -+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LENGTH (10) -+#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3CKEY_R_OFFSET (0x0110) -+ -+/* PDP, VID3CKEY_R, VID3CKEY_R -+*/ -+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_MASK (0x000003FF) -+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SHIFT (0) -+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LENGTH (10) -+#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3CKEY_GB_OFFSET (0x0114) -+ -+/* PDP, VID3CKEY_GB, VID3CKEY_G -+*/ -+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_MASK (0x03FF0000) -+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SHIFT (16) -+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LENGTH (10) -+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3CKEY_GB, VID3CKEY_B -+*/ -+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_MASK (0x000003FF) -+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SHIFT (0) -+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LENGTH (10) -+#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4CKEY_R_OFFSET (0x0118) -+ -+/* PDP, VID4CKEY_R, VID4CKEY_R -+*/ -+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_MASK (0x000003FF) -+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SHIFT (0) -+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LENGTH (10) -+#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4CKEY_GB_OFFSET (0x011C) -+ -+/* PDP, VID4CKEY_GB, VID4CKEY_G -+*/ -+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_MASK (0x03FF0000) -+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SHIFT (16) -+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LENGTH (10) -+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4CKEY_GB, VID4CKEY_B -+*/ -+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_MASK (0x000003FF) -+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SHIFT (0) -+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LENGTH (10) -+#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1BLND2_R_OFFSET (0x0120) -+ -+/* PDP, GRPH1BLND2_R, GRPH1PIXDBL -+*/ -+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_MASK (0x80000000) -+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SHIFT (31) -+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LENGTH (1) -+#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1BLND2_R, GRPH1LINDBL -+*/ -+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_MASK (0x20000000) -+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SHIFT (29) -+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LENGTH (1) -+#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1BLND2_R, GRPH1CKEYMASK_R -+*/ -+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_MASK (0x000003FF) -+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SHIFT (0) -+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LENGTH (10) -+#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1BLND2_GB_OFFSET (0x0124) -+ -+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_G -+*/ -+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_MASK (0x03FF0000) -+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SHIFT (16) -+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LENGTH (10) -+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_B -+*/ -+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_MASK (0x000003FF) -+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SHIFT (0) -+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LENGTH (10) -+#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2BLND2_R_OFFSET (0x0128) -+ -+/* PDP, GRPH2BLND2_R, GRPH2PIXDBL -+*/ -+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_MASK (0x80000000) -+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SHIFT (31) -+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LENGTH (1) -+#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2BLND2_R, GRPH2LINDBL -+*/ -+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_MASK (0x20000000) -+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SHIFT (29) -+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LENGTH (1) -+#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2BLND2_R, GRPH2CKEYMASK_R -+*/ -+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_MASK (0x000003FF) -+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SHIFT (0) -+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LENGTH (10) -+#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2BLND2_GB_OFFSET (0x012C) -+ -+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_G -+*/ -+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_MASK (0x03FF0000) -+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SHIFT (16) -+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LENGTH (10) -+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_B -+*/ -+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_MASK (0x000003FF) -+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SHIFT (0) -+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LENGTH (10) -+#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3BLND2_R_OFFSET (0x0130) -+ -+/* PDP, GRPH3BLND2_R, GRPH3PIXDBL -+*/ -+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_MASK (0x80000000) -+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SHIFT (31) -+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LENGTH (1) -+#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3BLND2_R, GRPH3LINDBL -+*/ -+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_MASK (0x20000000) -+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SHIFT (29) -+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LENGTH (1) -+#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3BLND2_R, GRPH3CKEYMASK_R -+*/ -+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_MASK (0x000003FF) -+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SHIFT (0) -+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LENGTH (10) -+#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3BLND2_GB_OFFSET (0x0134) -+ -+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_G -+*/ -+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_MASK (0x03FF0000) -+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SHIFT (16) -+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LENGTH (10) -+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_B -+*/ -+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_MASK (0x000003FF) -+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SHIFT (0) -+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LENGTH (10) -+#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4BLND2_R_OFFSET (0x0138) -+ -+/* PDP, GRPH4BLND2_R, GRPH4PIXDBL -+*/ -+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_MASK (0x80000000) -+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SHIFT (31) -+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LENGTH (1) -+#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4BLND2_R, GRPH4LINDBL -+*/ -+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_MASK (0x20000000) -+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SHIFT (29) -+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LENGTH (1) -+#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4BLND2_R, GRPH4CKEYMASK_R -+*/ -+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_MASK (0x000003FF) -+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SHIFT (0) -+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LENGTH (10) -+#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4BLND2_GB_OFFSET (0x013C) -+ -+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_G -+*/ -+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_MASK (0x03FF0000) -+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SHIFT (16) -+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LENGTH (10) -+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_B -+*/ -+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_MASK (0x000003FF) -+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SHIFT (0) -+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LENGTH (10) -+#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1BLND2_R_OFFSET (0x0140) -+ -+/* PDP, VID1BLND2_R, VID1CKEYMASK_R -+*/ -+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_MASK (0x000003FF) -+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SHIFT (0) -+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LENGTH (10) -+#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1BLND2_GB_OFFSET (0x0144) -+ -+/* PDP, VID1BLND2_GB, VID1CKEYMASK_G -+*/ -+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_MASK (0x03FF0000) -+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SHIFT (16) -+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LENGTH (10) -+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1BLND2_GB, VID1CKEYMASK_B -+*/ -+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_MASK (0x000003FF) -+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SHIFT (0) -+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LENGTH (10) -+#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2BLND2_R_OFFSET (0x0148) -+ -+/* PDP, VID2BLND2_R, VID2CKEYMASK_R -+*/ -+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_MASK (0x000003FF) -+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SHIFT (0) -+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LENGTH (10) -+#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2BLND2_GB_OFFSET (0x014C) -+ -+/* PDP, VID2BLND2_GB, VID2CKEYMASK_G -+*/ -+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_MASK (0x03FF0000) -+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SHIFT (16) -+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LENGTH (10) -+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2BLND2_GB, VID2CKEYMASK_B -+*/ -+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_MASK (0x000003FF) -+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SHIFT (0) -+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LENGTH (10) -+#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3BLND2_R_OFFSET (0x0150) -+ -+/* PDP, VID3BLND2_R, VID3CKEYMASK_R -+*/ -+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_MASK (0x000003FF) -+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SHIFT (0) -+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LENGTH (10) -+#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3BLND2_GB_OFFSET (0x0154) -+ -+/* PDP, VID3BLND2_GB, VID3CKEYMASK_G -+*/ -+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_MASK (0x03FF0000) -+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SHIFT (16) -+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LENGTH (10) -+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3BLND2_GB, VID3CKEYMASK_B -+*/ -+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_MASK (0x000003FF) -+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SHIFT (0) -+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LENGTH (10) -+#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4BLND2_R_OFFSET (0x0158) -+ -+/* PDP, VID4BLND2_R, VID4CKEYMASK_R -+*/ -+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_MASK (0x000003FF) -+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SHIFT (0) -+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LENGTH (10) -+#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4BLND2_GB_OFFSET (0x015C) -+ -+/* PDP, VID4BLND2_GB, VID4CKEYMASK_G -+*/ -+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_MASK (0x03FF0000) -+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SHIFT (16) -+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LENGTH (10) -+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4BLND2_GB, VID4CKEYMASK_B -+*/ -+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_MASK (0x000003FF) -+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SHIFT (0) -+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LENGTH (10) -+#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_OFFSET (0x0160) -+ -+/* PDP, GRPH1INTERLEAVE_CTRL, GRPH1INTFIELD -+*/ -+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK (0x00000001) -+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT (0) -+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LENGTH (1) -+#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_OFFSET (0x0164) -+ -+/* PDP, GRPH2INTERLEAVE_CTRL, GRPH2INTFIELD -+*/ -+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK (0x00000001) -+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT (0) -+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LENGTH (1) -+#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_OFFSET (0x0168) -+ -+/* PDP, GRPH3INTERLEAVE_CTRL, GRPH3INTFIELD -+*/ -+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_MASK (0x00000001) -+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SHIFT (0) -+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LENGTH (1) -+#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_OFFSET (0x016C) -+ -+/* PDP, GRPH4INTERLEAVE_CTRL, GRPH4INTFIELD -+*/ -+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK (0x00000001) -+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT (0) -+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LENGTH (1) -+#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1INTERLEAVE_CTRL_OFFSET (0x0170) -+ -+/* PDP, VID1INTERLEAVE_CTRL, VID1INTFIELD -+*/ -+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK (0x00000001) -+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LSBMASK (0x00000001) -+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT (0) -+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LENGTH (1) -+#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2INTERLEAVE_CTRL_OFFSET (0x0174) -+ -+/* PDP, VID2INTERLEAVE_CTRL, VID2INTFIELD -+*/ -+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_MASK (0x00000001) -+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LSBMASK (0x00000001) -+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SHIFT (0) -+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LENGTH (1) -+#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3INTERLEAVE_CTRL_OFFSET (0x0178) -+ -+/* PDP, VID3INTERLEAVE_CTRL, VID3INTFIELD -+*/ -+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_MASK (0x00000001) -+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LSBMASK (0x00000001) -+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SHIFT (0) -+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LENGTH (1) -+#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4INTERLEAVE_CTRL_OFFSET (0x017C) -+ -+/* PDP, VID4INTERLEAVE_CTRL, VID4INTFIELD -+*/ -+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_MASK (0x00000001) -+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LSBMASK (0x00000001) -+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SHIFT (0) -+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LENGTH (1) -+#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1BASEADDR_OFFSET (0x0180) -+ -+/* PDP, GRPH1BASEADDR, GRPH1BASEADDR -+*/ -+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SHIFT (5) -+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LENGTH (27) -+#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2BASEADDR_OFFSET (0x0184) -+ -+/* PDP, GRPH2BASEADDR, GRPH2BASEADDR -+*/ -+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SHIFT (5) -+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LENGTH (27) -+#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3BASEADDR_OFFSET (0x0188) -+ -+/* PDP, GRPH3BASEADDR, GRPH3BASEADDR -+*/ -+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SHIFT (5) -+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LENGTH (27) -+#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4BASEADDR_OFFSET (0x018C) -+ -+/* PDP, GRPH4BASEADDR, GRPH4BASEADDR -+*/ -+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SHIFT (5) -+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LENGTH (27) -+#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1BASEADDR_OFFSET (0x0190) -+ -+/* PDP, VID1BASEADDR, VID1BASEADDR -+*/ -+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SHIFT (5) -+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH (27) -+#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2BASEADDR_OFFSET (0x0194) -+ -+/* PDP, VID2BASEADDR, VID2BASEADDR -+*/ -+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SHIFT (5) -+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LENGTH (27) -+#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3BASEADDR_OFFSET (0x0198) -+ -+/* PDP, VID3BASEADDR, VID3BASEADDR -+*/ -+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SHIFT (5) -+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LENGTH (27) -+#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4BASEADDR_OFFSET (0x019C) -+ -+/* PDP, VID4BASEADDR, VID4BASEADDR -+*/ -+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SHIFT (5) -+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LENGTH (27) -+#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1UBASEADDR_OFFSET (0x01B0) -+ -+/* PDP, VID1UBASEADDR, VID1UBASEADDR -+*/ -+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SHIFT (5) -+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH (27) -+#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2UBASEADDR_OFFSET (0x01B4) -+ -+/* PDP, VID2UBASEADDR, VID2UBASEADDR -+*/ -+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SHIFT (5) -+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LENGTH (27) -+#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3UBASEADDR_OFFSET (0x01B8) -+ -+/* PDP, VID3UBASEADDR, VID3UBASEADDR -+*/ -+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SHIFT (5) -+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LENGTH (27) -+#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4UBASEADDR_OFFSET (0x01BC) -+ -+/* PDP, VID4UBASEADDR, VID4UBASEADDR -+*/ -+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SHIFT (5) -+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LENGTH (27) -+#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1VBASEADDR_OFFSET (0x01D0) -+ -+/* PDP, VID1VBASEADDR, VID1VBASEADDR -+*/ -+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SHIFT (5) -+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH (27) -+#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2VBASEADDR_OFFSET (0x01D4) -+ -+/* PDP, VID2VBASEADDR, VID2VBASEADDR -+*/ -+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SHIFT (5) -+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LENGTH (27) -+#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3VBASEADDR_OFFSET (0x01D8) -+ -+/* PDP, VID3VBASEADDR, VID3VBASEADDR -+*/ -+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SHIFT (5) -+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LENGTH (27) -+#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4VBASEADDR_OFFSET (0x01DC) -+ -+/* PDP, VID4VBASEADDR, VID4VBASEADDR -+*/ -+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_MASK (0xFFFFFFE0) -+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LSBMASK (0x07FFFFFF) -+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SHIFT (5) -+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LENGTH (27) -+#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1POSTSKIPCTRL_OFFSET (0x0230) -+ -+/* PDP, VID1POSTSKIPCTRL, VID1HPOSTCLIP -+*/ -+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_MASK (0x007F0000) -+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LSBMASK (0x0000007F) -+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SHIFT (16) -+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LENGTH (7) -+#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1POSTSKIPCTRL, VID1VPOSTCLIP -+*/ -+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_MASK (0x0000003F) -+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LSBMASK (0x0000003F) -+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SHIFT (0) -+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LENGTH (6) -+#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2POSTSKIPCTRL_OFFSET (0x0234) -+ -+/* PDP, VID2POSTSKIPCTRL, VID2HPOSTCLIP -+*/ -+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_MASK (0x007F0000) -+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LSBMASK (0x0000007F) -+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SHIFT (16) -+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LENGTH (7) -+#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2POSTSKIPCTRL, VID2VPOSTCLIP -+*/ -+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_MASK (0x0000003F) -+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LSBMASK (0x0000003F) -+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SHIFT (0) -+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LENGTH (6) -+#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3POSTSKIPCTRL_OFFSET (0x0238) -+ -+/* PDP, VID3POSTSKIPCTRL, VID3HPOSTCLIP -+*/ -+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_MASK (0x007F0000) -+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LSBMASK (0x0000007F) -+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SHIFT (16) -+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LENGTH (7) -+#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3POSTSKIPCTRL, VID3VPOSTCLIP -+*/ -+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_MASK (0x0000003F) -+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LSBMASK (0x0000003F) -+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SHIFT (0) -+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LENGTH (6) -+#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4POSTSKIPCTRL_OFFSET (0x023C) -+ -+/* PDP, VID4POSTSKIPCTRL, VID4HPOSTCLIP -+*/ -+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_MASK (0x007F0000) -+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LSBMASK (0x0000007F) -+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SHIFT (16) -+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LENGTH (7) -+#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4POSTSKIPCTRL, VID4VPOSTCLIP -+*/ -+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_MASK (0x0000003F) -+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LSBMASK (0x0000003F) -+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SHIFT (0) -+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LENGTH (6) -+#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1DECIMATE_CTRL_OFFSET (0x0240) -+ -+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_MODE -+*/ -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_PIXEL_HALVE -+*/ -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_EN -+*/ -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_MASK (0x00000001) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SHIFT (0) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LENGTH (1) -+#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2DECIMATE_CTRL_OFFSET (0x0244) -+ -+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_MODE -+*/ -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_PIXEL_HALVE -+*/ -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_EN -+*/ -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_MASK (0x00000001) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SHIFT (0) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LENGTH (1) -+#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3DECIMATE_CTRL_OFFSET (0x0248) -+ -+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_MODE -+*/ -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_PIXEL_HALVE -+*/ -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_EN -+*/ -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_MASK (0x00000001) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SHIFT (0) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LENGTH (1) -+#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4DECIMATE_CTRL_OFFSET (0x024C) -+ -+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_MODE -+*/ -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_PIXEL_HALVE -+*/ -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_EN -+*/ -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_MASK (0x00000001) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SHIFT (0) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LENGTH (1) -+#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1DECIMATE_CTRL_OFFSET (0x0250) -+ -+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_MODE -+*/ -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_PIXEL_HALVE -+*/ -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_EN -+*/ -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_MASK (0x00000001) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LSBMASK (0x00000001) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SHIFT (0) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LENGTH (1) -+#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2DECIMATE_CTRL_OFFSET (0x0254) -+ -+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_MODE -+*/ -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_PIXEL_HALVE -+*/ -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_EN -+*/ -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_MASK (0x00000001) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LSBMASK (0x00000001) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SHIFT (0) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LENGTH (1) -+#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3DECIMATE_CTRL_OFFSET (0x0258) -+ -+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_MODE -+*/ -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_PIXEL_HALVE -+*/ -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_EN -+*/ -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_MASK (0x00000001) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LSBMASK (0x00000001) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SHIFT (0) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LENGTH (1) -+#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4DECIMATE_CTRL_OFFSET (0x025C) -+ -+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_MODE -+*/ -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_PIXEL_HALVE -+*/ -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_EN -+*/ -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_MASK (0x00000001) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LSBMASK (0x00000001) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SHIFT (0) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LENGTH (1) -+#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1SKIPCTRL_OFFSET (0x0270) -+ -+/* PDP, VID1SKIPCTRL, VID1HSKIP -+*/ -+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_MASK (0x0FFF0000) -+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SHIFT (16) -+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LENGTH (12) -+#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SKIPCTRL, VID1VSKIP -+*/ -+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_MASK (0x00000FFF) -+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SHIFT (0) -+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LENGTH (12) -+#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2SKIPCTRL_OFFSET (0x0274) -+ -+/* PDP, VID2SKIPCTRL, VID2HSKIP -+*/ -+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_MASK (0x0FFF0000) -+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SHIFT (16) -+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LENGTH (12) -+#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SKIPCTRL, VID2VSKIP -+*/ -+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_MASK (0x00000FFF) -+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SHIFT (0) -+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LENGTH (12) -+#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3SKIPCTRL_OFFSET (0x0278) -+ -+/* PDP, VID3SKIPCTRL, VID3HSKIP -+*/ -+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_MASK (0x0FFF0000) -+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SHIFT (16) -+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LENGTH (12) -+#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SKIPCTRL, VID3VSKIP -+*/ -+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_MASK (0x00000FFF) -+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SHIFT (0) -+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LENGTH (12) -+#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4SKIPCTRL_OFFSET (0x027C) -+ -+/* PDP, VID4SKIPCTRL, VID4HSKIP -+*/ -+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_MASK (0x0FFF0000) -+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SHIFT (16) -+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LENGTH (12) -+#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SKIPCTRL, VID4VSKIP -+*/ -+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_MASK (0x00000FFF) -+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SHIFT (0) -+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LENGTH (12) -+#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1SCALECTRL_OFFSET (0x0460) -+ -+/* PDP, VID1SCALECTRL, VID1HSCALEBP -+*/ -+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_MASK (0x80000000) -+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LSBMASK (0x00000001) -+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SHIFT (31) -+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LENGTH (1) -+#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALECTRL, VID1VSCALEBP -+*/ -+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_MASK (0x40000000) -+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LSBMASK (0x00000001) -+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SHIFT (30) -+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LENGTH (1) -+#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALECTRL, VID1HSBEFOREVS -+*/ -+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_MASK (0x20000000) -+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LSBMASK (0x00000001) -+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SHIFT (29) -+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LENGTH (1) -+#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALECTRL, VID1VSURUNCTRL -+*/ -+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_MASK (0x08000000) -+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SHIFT (27) -+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LENGTH (1) -+#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALECTRL, VID1PAN_EN -+*/ -+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_MASK (0x00040000) -+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LSBMASK (0x00000001) -+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SHIFT (18) -+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LENGTH (1) -+#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALECTRL, VID1VORDER -+*/ -+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_MASK (0x00030000) -+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LSBMASK (0x00000003) -+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SHIFT (16) -+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LENGTH (2) -+#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALECTRL, VID1VPITCH -+*/ -+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_MASK (0x0000FFFF) -+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SHIFT (0) -+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LENGTH (16) -+#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1VSINIT_OFFSET (0x0464) -+ -+/* PDP, VID1VSINIT, VID1VINITIAL1 -+*/ -+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_MASK (0xFFFF0000) -+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SHIFT (16) -+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LENGTH (16) -+#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1VSINIT, VID1VINITIAL0 -+*/ -+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_MASK (0x0000FFFF) -+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SHIFT (0) -+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LENGTH (16) -+#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1VCOEFF0_OFFSET (0x0468) -+ -+/* PDP, VID1VCOEFF0, VID1VCOEFF0 -+*/ -+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SHIFT (0) -+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LENGTH (32) -+#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1VCOEFF1_OFFSET (0x046C) -+ -+/* PDP, VID1VCOEFF1, VID1VCOEFF1 -+*/ -+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SHIFT (0) -+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LENGTH (32) -+#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1VCOEFF2_OFFSET (0x0470) -+ -+/* PDP, VID1VCOEFF2, VID1VCOEFF2 -+*/ -+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SHIFT (0) -+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LENGTH (32) -+#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1VCOEFF3_OFFSET (0x0474) -+ -+/* PDP, VID1VCOEFF3, VID1VCOEFF3 -+*/ -+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SHIFT (0) -+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LENGTH (32) -+#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1VCOEFF4_OFFSET (0x0478) -+ -+/* PDP, VID1VCOEFF4, VID1VCOEFF4 -+*/ -+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SHIFT (0) -+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LENGTH (32) -+#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1VCOEFF5_OFFSET (0x047C) -+ -+/* PDP, VID1VCOEFF5, VID1VCOEFF5 -+*/ -+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SHIFT (0) -+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LENGTH (32) -+#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1VCOEFF6_OFFSET (0x0480) -+ -+/* PDP, VID1VCOEFF6, VID1VCOEFF6 -+*/ -+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SHIFT (0) -+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LENGTH (32) -+#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1VCOEFF7_OFFSET (0x0484) -+ -+/* PDP, VID1VCOEFF7, VID1VCOEFF7 -+*/ -+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SHIFT (0) -+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LENGTH (32) -+#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1VCOEFF8_OFFSET (0x0488) -+ -+/* PDP, VID1VCOEFF8, VID1VCOEFF8 -+*/ -+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_MASK (0x000000FF) -+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LSBMASK (0x000000FF) -+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SHIFT (0) -+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LENGTH (8) -+#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HSINIT_OFFSET (0x048C) -+ -+/* PDP, VID1HSINIT, VID1HINITIAL -+*/ -+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_MASK (0xFFFF0000) -+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SHIFT (16) -+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LENGTH (16) -+#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1HSINIT, VID1HPITCH -+*/ -+#define ODN_PDP_VID1HSINIT_VID1HPITCH_MASK (0x0000FFFF) -+#define ODN_PDP_VID1HSINIT_VID1HPITCH_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID1HSINIT_VID1HPITCH_SHIFT (0) -+#define ODN_PDP_VID1HSINIT_VID1HPITCH_LENGTH (16) -+#define ODN_PDP_VID1HSINIT_VID1HPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF0_OFFSET (0x0490) -+ -+/* PDP, VID1HCOEFF0, VID1HCOEFF0 -+*/ -+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF1_OFFSET (0x0494) -+ -+/* PDP, VID1HCOEFF1, VID1HCOEFF1 -+*/ -+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF2_OFFSET (0x0498) -+ -+/* PDP, VID1HCOEFF2, VID1HCOEFF2 -+*/ -+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF3_OFFSET (0x049C) -+ -+/* PDP, VID1HCOEFF3, VID1HCOEFF3 -+*/ -+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF4_OFFSET (0x04A0) -+ -+/* PDP, VID1HCOEFF4, VID1HCOEFF4 -+*/ -+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF5_OFFSET (0x04A4) -+ -+/* PDP, VID1HCOEFF5, VID1HCOEFF5 -+*/ -+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF6_OFFSET (0x04A8) -+ -+/* PDP, VID1HCOEFF6, VID1HCOEFF6 -+*/ -+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF7_OFFSET (0x04AC) -+ -+/* PDP, VID1HCOEFF7, VID1HCOEFF7 -+*/ -+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF8_OFFSET (0x04B0) -+ -+/* PDP, VID1HCOEFF8, VID1HCOEFF8 -+*/ -+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF9_OFFSET (0x04B4) -+ -+/* PDP, VID1HCOEFF9, VID1HCOEFF9 -+*/ -+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF10_OFFSET (0x04B8) -+ -+/* PDP, VID1HCOEFF10, VID1HCOEFF10 -+*/ -+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF11_OFFSET (0x04BC) -+ -+/* PDP, VID1HCOEFF11, VID1HCOEFF11 -+*/ -+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF12_OFFSET (0x04C0) -+ -+/* PDP, VID1HCOEFF12, VID1HCOEFF12 -+*/ -+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF13_OFFSET (0x04C4) -+ -+/* PDP, VID1HCOEFF13, VID1HCOEFF13 -+*/ -+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF14_OFFSET (0x04C8) -+ -+/* PDP, VID1HCOEFF14, VID1HCOEFF14 -+*/ -+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF15_OFFSET (0x04CC) -+ -+/* PDP, VID1HCOEFF15, VID1HCOEFF15 -+*/ -+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LENGTH (32) -+#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1HCOEFF16_OFFSET (0x04D0) -+ -+/* PDP, VID1HCOEFF16, VID1HCOEFF16 -+*/ -+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_MASK (0x000000FF) -+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LSBMASK (0x000000FF) -+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SHIFT (0) -+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LENGTH (8) -+#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1SCALESIZE_OFFSET (0x04D4) -+ -+/* PDP, VID1SCALESIZE, VID1SCALEWIDTH -+*/ -+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_MASK (0x0FFF0000) -+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SHIFT (16) -+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LENGTH (12) -+#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALESIZE, VID1SCALEHEIGHT -+*/ -+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_MASK (0x00000FFF) -+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SHIFT (0) -+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LENGTH (12) -+#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CORE_ID_OFFSET (0x04E0) -+ -+/* PDP, PVR_ODN_PDP_CORE_ID, GROUP_ID -+*/ -+#define ODN_PDP_CORE_ID_GROUP_ID_MASK (0xFF000000) -+#define ODN_PDP_CORE_ID_GROUP_ID_LSBMASK (0x000000FF) -+#define ODN_PDP_CORE_ID_GROUP_ID_SHIFT (24) -+#define ODN_PDP_CORE_ID_GROUP_ID_LENGTH (8) -+#define ODN_PDP_CORE_ID_GROUP_ID_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PVR_ODN_PDP_CORE_ID, CORE_ID -+*/ -+#define ODN_PDP_CORE_ID_CORE_ID_MASK (0x00FF0000) -+#define ODN_PDP_CORE_ID_CORE_ID_LSBMASK (0x000000FF) -+#define ODN_PDP_CORE_ID_CORE_ID_SHIFT (16) -+#define ODN_PDP_CORE_ID_CORE_ID_LENGTH (8) -+#define ODN_PDP_CORE_ID_CORE_ID_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PVR_ODN_PDP_CORE_ID, CONFIG_ID -+*/ -+#define ODN_PDP_CORE_ID_CONFIG_ID_MASK (0x0000FFFF) -+#define ODN_PDP_CORE_ID_CONFIG_ID_LSBMASK (0x0000FFFF) -+#define ODN_PDP_CORE_ID_CONFIG_ID_SHIFT (0) -+#define ODN_PDP_CORE_ID_CONFIG_ID_LENGTH (16) -+#define ODN_PDP_CORE_ID_CONFIG_ID_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CORE_REV_OFFSET (0x04F0) -+ -+/* PDP, PVR_ODN_PDP_CORE_REV, MAJOR_REV -+*/ -+#define ODN_PDP_CORE_REV_MAJOR_REV_MASK (0x00FF0000) -+#define ODN_PDP_CORE_REV_MAJOR_REV_LSBMASK (0x000000FF) -+#define ODN_PDP_CORE_REV_MAJOR_REV_SHIFT (16) -+#define ODN_PDP_CORE_REV_MAJOR_REV_LENGTH (8) -+#define ODN_PDP_CORE_REV_MAJOR_REV_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PVR_ODN_PDP_CORE_REV, MINOR_REV -+*/ -+#define ODN_PDP_CORE_REV_MINOR_REV_MASK (0x0000FF00) -+#define ODN_PDP_CORE_REV_MINOR_REV_LSBMASK (0x000000FF) -+#define ODN_PDP_CORE_REV_MINOR_REV_SHIFT (8) -+#define ODN_PDP_CORE_REV_MINOR_REV_LENGTH (8) -+#define ODN_PDP_CORE_REV_MINOR_REV_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PVR_ODN_PDP_CORE_REV, MAINT_REV -+*/ -+#define ODN_PDP_CORE_REV_MAINT_REV_MASK (0x000000FF) -+#define ODN_PDP_CORE_REV_MAINT_REV_LSBMASK (0x000000FF) -+#define ODN_PDP_CORE_REV_MAINT_REV_SHIFT (0) -+#define ODN_PDP_CORE_REV_MAINT_REV_LENGTH (8) -+#define ODN_PDP_CORE_REV_MAINT_REV_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2SCALECTRL_OFFSET (0x0500) -+ -+/* PDP, VID2SCALECTRL, VID2HSCALEBP -+*/ -+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_MASK (0x80000000) -+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LSBMASK (0x00000001) -+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SHIFT (31) -+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LENGTH (1) -+#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALECTRL, VID2VSCALEBP -+*/ -+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_MASK (0x40000000) -+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LSBMASK (0x00000001) -+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SHIFT (30) -+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LENGTH (1) -+#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALECTRL, VID2HSBEFOREVS -+*/ -+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_MASK (0x20000000) -+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LSBMASK (0x00000001) -+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SHIFT (29) -+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LENGTH (1) -+#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALECTRL, VID2VSURUNCTRL -+*/ -+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_MASK (0x08000000) -+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SHIFT (27) -+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LENGTH (1) -+#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALECTRL, VID2PAN_EN -+*/ -+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_MASK (0x00040000) -+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LSBMASK (0x00000001) -+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SHIFT (18) -+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LENGTH (1) -+#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALECTRL, VID2VORDER -+*/ -+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_MASK (0x00030000) -+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LSBMASK (0x00000003) -+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SHIFT (16) -+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LENGTH (2) -+#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALECTRL, VID2VPITCH -+*/ -+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_MASK (0x0000FFFF) -+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SHIFT (0) -+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LENGTH (16) -+#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2VSINIT_OFFSET (0x0504) -+ -+/* PDP, VID2VSINIT, VID2VINITIAL1 -+*/ -+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_MASK (0xFFFF0000) -+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SHIFT (16) -+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LENGTH (16) -+#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2VSINIT, VID2VINITIAL0 -+*/ -+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_MASK (0x0000FFFF) -+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SHIFT (0) -+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LENGTH (16) -+#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2VCOEFF0_OFFSET (0x0508) -+ -+/* PDP, VID2VCOEFF0, VID2VCOEFF0 -+*/ -+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SHIFT (0) -+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LENGTH (32) -+#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2VCOEFF1_OFFSET (0x050C) -+ -+/* PDP, VID2VCOEFF1, VID2VCOEFF1 -+*/ -+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SHIFT (0) -+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LENGTH (32) -+#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2VCOEFF2_OFFSET (0x0510) -+ -+/* PDP, VID2VCOEFF2, VID2VCOEFF2 -+*/ -+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SHIFT (0) -+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LENGTH (32) -+#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2VCOEFF3_OFFSET (0x0514) -+ -+/* PDP, VID2VCOEFF3, VID2VCOEFF3 -+*/ -+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SHIFT (0) -+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LENGTH (32) -+#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2VCOEFF4_OFFSET (0x0518) -+ -+/* PDP, VID2VCOEFF4, VID2VCOEFF4 -+*/ -+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SHIFT (0) -+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LENGTH (32) -+#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2VCOEFF5_OFFSET (0x051C) -+ -+/* PDP, VID2VCOEFF5, VID2VCOEFF5 -+*/ -+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SHIFT (0) -+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LENGTH (32) -+#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2VCOEFF6_OFFSET (0x0520) -+ -+/* PDP, VID2VCOEFF6, VID2VCOEFF6 -+*/ -+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SHIFT (0) -+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LENGTH (32) -+#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2VCOEFF7_OFFSET (0x0524) -+ -+/* PDP, VID2VCOEFF7, VID2VCOEFF7 -+*/ -+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SHIFT (0) -+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LENGTH (32) -+#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2VCOEFF8_OFFSET (0x0528) -+ -+/* PDP, VID2VCOEFF8, VID2VCOEFF8 -+*/ -+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_MASK (0x000000FF) -+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LSBMASK (0x000000FF) -+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SHIFT (0) -+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LENGTH (8) -+#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HSINIT_OFFSET (0x052C) -+ -+/* PDP, VID2HSINIT, VID2HINITIAL -+*/ -+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_MASK (0xFFFF0000) -+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SHIFT (16) -+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LENGTH (16) -+#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2HSINIT, VID2HPITCH -+*/ -+#define ODN_PDP_VID2HSINIT_VID2HPITCH_MASK (0x0000FFFF) -+#define ODN_PDP_VID2HSINIT_VID2HPITCH_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID2HSINIT_VID2HPITCH_SHIFT (0) -+#define ODN_PDP_VID2HSINIT_VID2HPITCH_LENGTH (16) -+#define ODN_PDP_VID2HSINIT_VID2HPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF0_OFFSET (0x0530) -+ -+/* PDP, VID2HCOEFF0, VID2HCOEFF0 -+*/ -+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF1_OFFSET (0x0534) -+ -+/* PDP, VID2HCOEFF1, VID2HCOEFF1 -+*/ -+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF2_OFFSET (0x0538) -+ -+/* PDP, VID2HCOEFF2, VID2HCOEFF2 -+*/ -+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF3_OFFSET (0x053C) -+ -+/* PDP, VID2HCOEFF3, VID2HCOEFF3 -+*/ -+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF4_OFFSET (0x0540) -+ -+/* PDP, VID2HCOEFF4, VID2HCOEFF4 -+*/ -+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF5_OFFSET (0x0544) -+ -+/* PDP, VID2HCOEFF5, VID2HCOEFF5 -+*/ -+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF6_OFFSET (0x0548) -+ -+/* PDP, VID2HCOEFF6, VID2HCOEFF6 -+*/ -+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF7_OFFSET (0x054C) -+ -+/* PDP, VID2HCOEFF7, VID2HCOEFF7 -+*/ -+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF8_OFFSET (0x0550) -+ -+/* PDP, VID2HCOEFF8, VID2HCOEFF8 -+*/ -+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF9_OFFSET (0x0554) -+ -+/* PDP, VID2HCOEFF9, VID2HCOEFF9 -+*/ -+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF10_OFFSET (0x0558) -+ -+/* PDP, VID2HCOEFF10, VID2HCOEFF10 -+*/ -+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF11_OFFSET (0x055C) -+ -+/* PDP, VID2HCOEFF11, VID2HCOEFF11 -+*/ -+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF12_OFFSET (0x0560) -+ -+/* PDP, VID2HCOEFF12, VID2HCOEFF12 -+*/ -+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF13_OFFSET (0x0564) -+ -+/* PDP, VID2HCOEFF13, VID2HCOEFF13 -+*/ -+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF14_OFFSET (0x0568) -+ -+/* PDP, VID2HCOEFF14, VID2HCOEFF14 -+*/ -+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF15_OFFSET (0x056C) -+ -+/* PDP, VID2HCOEFF15, VID2HCOEFF15 -+*/ -+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LENGTH (32) -+#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2HCOEFF16_OFFSET (0x0570) -+ -+/* PDP, VID2HCOEFF16, VID2HCOEFF16 -+*/ -+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_MASK (0x000000FF) -+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LSBMASK (0x000000FF) -+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SHIFT (0) -+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LENGTH (8) -+#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2SCALESIZE_OFFSET (0x0574) -+ -+/* PDP, VID2SCALESIZE, VID2SCALEWIDTH -+*/ -+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_MASK (0x0FFF0000) -+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SHIFT (16) -+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LENGTH (12) -+#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALESIZE, VID2SCALEHEIGHT -+*/ -+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_MASK (0x00000FFF) -+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SHIFT (0) -+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LENGTH (12) -+#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3SCALECTRL_OFFSET (0x0578) -+ -+/* PDP, VID3SCALECTRL, VID3HSCALEBP -+*/ -+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_MASK (0x80000000) -+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LSBMASK (0x00000001) -+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SHIFT (31) -+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LENGTH (1) -+#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALECTRL, VID3VSCALEBP -+*/ -+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_MASK (0x40000000) -+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LSBMASK (0x00000001) -+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SHIFT (30) -+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LENGTH (1) -+#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALECTRL, VID3HSBEFOREVS -+*/ -+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_MASK (0x20000000) -+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LSBMASK (0x00000001) -+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SHIFT (29) -+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LENGTH (1) -+#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALECTRL, VID3VSURUNCTRL -+*/ -+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_MASK (0x08000000) -+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SHIFT (27) -+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LENGTH (1) -+#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALECTRL, VID3PAN_EN -+*/ -+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_MASK (0x00040000) -+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LSBMASK (0x00000001) -+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SHIFT (18) -+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LENGTH (1) -+#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALECTRL, VID3VORDER -+*/ -+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_MASK (0x00030000) -+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LSBMASK (0x00000003) -+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SHIFT (16) -+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LENGTH (2) -+#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALECTRL, VID3VPITCH -+*/ -+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_MASK (0x0000FFFF) -+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SHIFT (0) -+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LENGTH (16) -+#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3VSINIT_OFFSET (0x057C) -+ -+/* PDP, VID3VSINIT, VID3VINITIAL1 -+*/ -+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_MASK (0xFFFF0000) -+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SHIFT (16) -+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LENGTH (16) -+#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3VSINIT, VID3VINITIAL0 -+*/ -+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_MASK (0x0000FFFF) -+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SHIFT (0) -+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LENGTH (16) -+#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3VCOEFF0_OFFSET (0x0580) -+ -+/* PDP, VID3VCOEFF0, VID3VCOEFF0 -+*/ -+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SHIFT (0) -+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LENGTH (32) -+#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3VCOEFF1_OFFSET (0x0584) -+ -+/* PDP, VID3VCOEFF1, VID3VCOEFF1 -+*/ -+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SHIFT (0) -+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LENGTH (32) -+#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3VCOEFF2_OFFSET (0x0588) -+ -+/* PDP, VID3VCOEFF2, VID3VCOEFF2 -+*/ -+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SHIFT (0) -+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LENGTH (32) -+#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3VCOEFF3_OFFSET (0x058C) -+ -+/* PDP, VID3VCOEFF3, VID3VCOEFF3 -+*/ -+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SHIFT (0) -+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LENGTH (32) -+#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3VCOEFF4_OFFSET (0x0590) -+ -+/* PDP, VID3VCOEFF4, VID3VCOEFF4 -+*/ -+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SHIFT (0) -+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LENGTH (32) -+#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3VCOEFF5_OFFSET (0x0594) -+ -+/* PDP, VID3VCOEFF5, VID3VCOEFF5 -+*/ -+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SHIFT (0) -+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LENGTH (32) -+#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3VCOEFF6_OFFSET (0x0598) -+ -+/* PDP, VID3VCOEFF6, VID3VCOEFF6 -+*/ -+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SHIFT (0) -+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LENGTH (32) -+#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3VCOEFF7_OFFSET (0x059C) -+ -+/* PDP, VID3VCOEFF7, VID3VCOEFF7 -+*/ -+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SHIFT (0) -+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LENGTH (32) -+#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3VCOEFF8_OFFSET (0x05A0) -+ -+/* PDP, VID3VCOEFF8, VID3VCOEFF8 -+*/ -+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_MASK (0x000000FF) -+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LSBMASK (0x000000FF) -+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SHIFT (0) -+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LENGTH (8) -+#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HSINIT_OFFSET (0x05A4) -+ -+/* PDP, VID3HSINIT, VID3HINITIAL -+*/ -+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_MASK (0xFFFF0000) -+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SHIFT (16) -+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LENGTH (16) -+#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3HSINIT, VID3HPITCH -+*/ -+#define ODN_PDP_VID3HSINIT_VID3HPITCH_MASK (0x0000FFFF) -+#define ODN_PDP_VID3HSINIT_VID3HPITCH_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID3HSINIT_VID3HPITCH_SHIFT (0) -+#define ODN_PDP_VID3HSINIT_VID3HPITCH_LENGTH (16) -+#define ODN_PDP_VID3HSINIT_VID3HPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF0_OFFSET (0x05A8) -+ -+/* PDP, VID3HCOEFF0, VID3HCOEFF0 -+*/ -+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF1_OFFSET (0x05AC) -+ -+/* PDP, VID3HCOEFF1, VID3HCOEFF1 -+*/ -+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF2_OFFSET (0x05B0) -+ -+/* PDP, VID3HCOEFF2, VID3HCOEFF2 -+*/ -+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF3_OFFSET (0x05B4) -+ -+/* PDP, VID3HCOEFF3, VID3HCOEFF3 -+*/ -+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF4_OFFSET (0x05B8) -+ -+/* PDP, VID3HCOEFF4, VID3HCOEFF4 -+*/ -+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF5_OFFSET (0x05BC) -+ -+/* PDP, VID3HCOEFF5, VID3HCOEFF5 -+*/ -+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF6_OFFSET (0x05C0) -+ -+/* PDP, VID3HCOEFF6, VID3HCOEFF6 -+*/ -+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF7_OFFSET (0x05C4) -+ -+/* PDP, VID3HCOEFF7, VID3HCOEFF7 -+*/ -+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF8_OFFSET (0x05C8) -+ -+/* PDP, VID3HCOEFF8, VID3HCOEFF8 -+*/ -+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF9_OFFSET (0x05CC) -+ -+/* PDP, VID3HCOEFF9, VID3HCOEFF9 -+*/ -+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF10_OFFSET (0x05D0) -+ -+/* PDP, VID3HCOEFF10, VID3HCOEFF10 -+*/ -+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF11_OFFSET (0x05D4) -+ -+/* PDP, VID3HCOEFF11, VID3HCOEFF11 -+*/ -+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF12_OFFSET (0x05D8) -+ -+/* PDP, VID3HCOEFF12, VID3HCOEFF12 -+*/ -+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF13_OFFSET (0x05DC) -+ -+/* PDP, VID3HCOEFF13, VID3HCOEFF13 -+*/ -+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF14_OFFSET (0x05E0) -+ -+/* PDP, VID3HCOEFF14, VID3HCOEFF14 -+*/ -+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF15_OFFSET (0x05E4) -+ -+/* PDP, VID3HCOEFF15, VID3HCOEFF15 -+*/ -+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LENGTH (32) -+#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3HCOEFF16_OFFSET (0x05E8) -+ -+/* PDP, VID3HCOEFF16, VID3HCOEFF16 -+*/ -+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_MASK (0x000000FF) -+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LSBMASK (0x000000FF) -+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SHIFT (0) -+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LENGTH (8) -+#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3SCALESIZE_OFFSET (0x05EC) -+ -+/* PDP, VID3SCALESIZE, VID3SCALEWIDTH -+*/ -+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_MASK (0x0FFF0000) -+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SHIFT (16) -+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LENGTH (12) -+#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALESIZE, VID3SCALEHEIGHT -+*/ -+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_MASK (0x00000FFF) -+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SHIFT (0) -+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LENGTH (12) -+#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4SCALECTRL_OFFSET (0x05F0) -+ -+/* PDP, VID4SCALECTRL, VID4HSCALEBP -+*/ -+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_MASK (0x80000000) -+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LSBMASK (0x00000001) -+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SHIFT (31) -+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LENGTH (1) -+#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALECTRL, VID4VSCALEBP -+*/ -+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_MASK (0x40000000) -+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LSBMASK (0x00000001) -+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SHIFT (30) -+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LENGTH (1) -+#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALECTRL, VID4HSBEFOREVS -+*/ -+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_MASK (0x20000000) -+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LSBMASK (0x00000001) -+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SHIFT (29) -+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LENGTH (1) -+#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALECTRL, VID4VSURUNCTRL -+*/ -+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_MASK (0x08000000) -+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SHIFT (27) -+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LENGTH (1) -+#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALECTRL, VID4PAN_EN -+*/ -+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_MASK (0x00040000) -+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LSBMASK (0x00000001) -+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SHIFT (18) -+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LENGTH (1) -+#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALECTRL, VID4VORDER -+*/ -+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_MASK (0x00030000) -+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LSBMASK (0x00000003) -+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SHIFT (16) -+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LENGTH (2) -+#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALECTRL, VID4VPITCH -+*/ -+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_MASK (0x0000FFFF) -+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SHIFT (0) -+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LENGTH (16) -+#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4VSINIT_OFFSET (0x05F4) -+ -+/* PDP, VID4VSINIT, VID4VINITIAL1 -+*/ -+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_MASK (0xFFFF0000) -+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SHIFT (16) -+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LENGTH (16) -+#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4VSINIT, VID4VINITIAL0 -+*/ -+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_MASK (0x0000FFFF) -+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SHIFT (0) -+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LENGTH (16) -+#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4VCOEFF0_OFFSET (0x05F8) -+ -+/* PDP, VID4VCOEFF0, VID4VCOEFF0 -+*/ -+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SHIFT (0) -+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LENGTH (32) -+#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4VCOEFF1_OFFSET (0x05FC) -+ -+/* PDP, VID4VCOEFF1, VID4VCOEFF1 -+*/ -+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SHIFT (0) -+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LENGTH (32) -+#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4VCOEFF2_OFFSET (0x0600) -+ -+/* PDP, VID4VCOEFF2, VID4VCOEFF2 -+*/ -+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SHIFT (0) -+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LENGTH (32) -+#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4VCOEFF3_OFFSET (0x0604) -+ -+/* PDP, VID4VCOEFF3, VID4VCOEFF3 -+*/ -+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SHIFT (0) -+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LENGTH (32) -+#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4VCOEFF4_OFFSET (0x0608) -+ -+/* PDP, VID4VCOEFF4, VID4VCOEFF4 -+*/ -+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SHIFT (0) -+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LENGTH (32) -+#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4VCOEFF5_OFFSET (0x060C) -+ -+/* PDP, VID4VCOEFF5, VID4VCOEFF5 -+*/ -+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SHIFT (0) -+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LENGTH (32) -+#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4VCOEFF6_OFFSET (0x0610) -+ -+/* PDP, VID4VCOEFF6, VID4VCOEFF6 -+*/ -+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SHIFT (0) -+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LENGTH (32) -+#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4VCOEFF7_OFFSET (0x0614) -+ -+/* PDP, VID4VCOEFF7, VID4VCOEFF7 -+*/ -+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SHIFT (0) -+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LENGTH (32) -+#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4VCOEFF8_OFFSET (0x0618) -+ -+/* PDP, VID4VCOEFF8, VID4VCOEFF8 -+*/ -+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_MASK (0x000000FF) -+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LSBMASK (0x000000FF) -+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SHIFT (0) -+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LENGTH (8) -+#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HSINIT_OFFSET (0x061C) -+ -+/* PDP, VID4HSINIT, VID4HINITIAL -+*/ -+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_MASK (0xFFFF0000) -+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SHIFT (16) -+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LENGTH (16) -+#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4HSINIT, VID4HPITCH -+*/ -+#define ODN_PDP_VID4HSINIT_VID4HPITCH_MASK (0x0000FFFF) -+#define ODN_PDP_VID4HSINIT_VID4HPITCH_LSBMASK (0x0000FFFF) -+#define ODN_PDP_VID4HSINIT_VID4HPITCH_SHIFT (0) -+#define ODN_PDP_VID4HSINIT_VID4HPITCH_LENGTH (16) -+#define ODN_PDP_VID4HSINIT_VID4HPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF0_OFFSET (0x0620) -+ -+/* PDP, VID4HCOEFF0, VID4HCOEFF0 -+*/ -+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF1_OFFSET (0x0624) -+ -+/* PDP, VID4HCOEFF1, VID4HCOEFF1 -+*/ -+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF2_OFFSET (0x0628) -+ -+/* PDP, VID4HCOEFF2, VID4HCOEFF2 -+*/ -+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF3_OFFSET (0x062C) -+ -+/* PDP, VID4HCOEFF3, VID4HCOEFF3 -+*/ -+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF4_OFFSET (0x0630) -+ -+/* PDP, VID4HCOEFF4, VID4HCOEFF4 -+*/ -+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF5_OFFSET (0x0634) -+ -+/* PDP, VID4HCOEFF5, VID4HCOEFF5 -+*/ -+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF6_OFFSET (0x0638) -+ -+/* PDP, VID4HCOEFF6, VID4HCOEFF6 -+*/ -+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF7_OFFSET (0x063C) -+ -+/* PDP, VID4HCOEFF7, VID4HCOEFF7 -+*/ -+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF8_OFFSET (0x0640) -+ -+/* PDP, VID4HCOEFF8, VID4HCOEFF8 -+*/ -+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF9_OFFSET (0x0644) -+ -+/* PDP, VID4HCOEFF9, VID4HCOEFF9 -+*/ -+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF10_OFFSET (0x0648) -+ -+/* PDP, VID4HCOEFF10, VID4HCOEFF10 -+*/ -+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF11_OFFSET (0x064C) -+ -+/* PDP, VID4HCOEFF11, VID4HCOEFF11 -+*/ -+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF12_OFFSET (0x0650) -+ -+/* PDP, VID4HCOEFF12, VID4HCOEFF12 -+*/ -+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF13_OFFSET (0x0654) -+ -+/* PDP, VID4HCOEFF13, VID4HCOEFF13 -+*/ -+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF14_OFFSET (0x0658) -+ -+/* PDP, VID4HCOEFF14, VID4HCOEFF14 -+*/ -+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF15_OFFSET (0x065C) -+ -+/* PDP, VID4HCOEFF15, VID4HCOEFF15 -+*/ -+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_MASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LSBMASK (0xFFFFFFFF) -+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LENGTH (32) -+#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4HCOEFF16_OFFSET (0x0660) -+ -+/* PDP, VID4HCOEFF16, VID4HCOEFF16 -+*/ -+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_MASK (0x000000FF) -+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LSBMASK (0x000000FF) -+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SHIFT (0) -+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LENGTH (8) -+#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4SCALESIZE_OFFSET (0x0664) -+ -+/* PDP, VID4SCALESIZE, VID4SCALEWIDTH -+*/ -+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_MASK (0x0FFF0000) -+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SHIFT (16) -+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LENGTH (12) -+#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALESIZE, VID4SCALEHEIGHT -+*/ -+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_MASK (0x00000FFF) -+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SHIFT (0) -+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LENGTH (12) -+#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PORTER_BLND0_OFFSET (0x0668) -+ -+/* PDP, PORTER_BLND0, BLND0BLENDTYPE -+*/ -+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_MASK (0x00000010) -+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LSBMASK (0x00000001) -+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SHIFT (4) -+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LENGTH (1) -+#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND0, BLND0PORTERMODE -+*/ -+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_MASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LSBMASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SHIFT (0) -+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LENGTH (4) -+#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PORTER_BLND1_OFFSET (0x066C) -+ -+/* PDP, PORTER_BLND1, BLND1BLENDTYPE -+*/ -+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_MASK (0x00000010) -+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LSBMASK (0x00000001) -+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SHIFT (4) -+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LENGTH (1) -+#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND1, BLND1PORTERMODE -+*/ -+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_MASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LSBMASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SHIFT (0) -+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LENGTH (4) -+#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PORTER_BLND2_OFFSET (0x0670) -+ -+/* PDP, PORTER_BLND2, BLND2BLENDTYPE -+*/ -+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_MASK (0x00000010) -+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LSBMASK (0x00000001) -+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SHIFT (4) -+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LENGTH (1) -+#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND2, BLND2PORTERMODE -+*/ -+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_MASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LSBMASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SHIFT (0) -+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LENGTH (4) -+#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PORTER_BLND3_OFFSET (0x0674) -+ -+/* PDP, PORTER_BLND3, BLND3BLENDTYPE -+*/ -+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_MASK (0x00000010) -+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LSBMASK (0x00000001) -+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SHIFT (4) -+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LENGTH (1) -+#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND3, BLND3PORTERMODE -+*/ -+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_MASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LSBMASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SHIFT (0) -+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LENGTH (4) -+#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PORTER_BLND4_OFFSET (0x0678) -+ -+/* PDP, PORTER_BLND4, BLND4BLENDTYPE -+*/ -+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_MASK (0x00000010) -+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LSBMASK (0x00000001) -+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SHIFT (4) -+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LENGTH (1) -+#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND4, BLND4PORTERMODE -+*/ -+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_MASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LSBMASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SHIFT (0) -+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LENGTH (4) -+#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PORTER_BLND5_OFFSET (0x067C) -+ -+/* PDP, PORTER_BLND5, BLND5BLENDTYPE -+*/ -+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_MASK (0x00000010) -+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LSBMASK (0x00000001) -+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SHIFT (4) -+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LENGTH (1) -+#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND5, BLND5PORTERMODE -+*/ -+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_MASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LSBMASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SHIFT (0) -+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LENGTH (4) -+#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PORTER_BLND6_OFFSET (0x0680) -+ -+/* PDP, PORTER_BLND6, BLND6BLENDTYPE -+*/ -+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_MASK (0x00000010) -+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LSBMASK (0x00000001) -+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SHIFT (4) -+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LENGTH (1) -+#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND6, BLND6PORTERMODE -+*/ -+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_MASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LSBMASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SHIFT (0) -+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LENGTH (4) -+#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PORTER_BLND7_OFFSET (0x0684) -+ -+/* PDP, PORTER_BLND7, BLND7BLENDTYPE -+*/ -+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_MASK (0x00000010) -+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LSBMASK (0x00000001) -+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SHIFT (4) -+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LENGTH (1) -+#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND7, BLND7PORTERMODE -+*/ -+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_MASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LSBMASK (0x0000000F) -+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SHIFT (0) -+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LENGTH (4) -+#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06C8) -+ -+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_TRANS -+*/ -+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_MASK (0x03FF0000) -+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SHIFT (16) -+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LENGTH (10) -+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_OPAQUE -+*/ -+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) -+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SHIFT (0) -+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LENGTH (10) -+#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06CC) -+ -+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMAX -+*/ -+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_MASK (0x03FF0000) -+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SHIFT (16) -+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LENGTH (10) -+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMIN -+*/ -+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_MASK (0x000003FF) -+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SHIFT (0) -+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LENGTH (10) -+#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1LUMAKEY_C_RG_OFFSET (0x06D0) -+ -+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_R -+*/ -+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_MASK (0x0FFF0000) -+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SHIFT (16) -+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LENGTH (12) -+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_G -+*/ -+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_MASK (0x00000FFF) -+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SHIFT (0) -+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LENGTH (12) -+#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1LUMAKEY_C_B_OFFSET (0x06D4) -+ -+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYALPHAMULT -+*/ -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_MASK (0x20000000) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LSBMASK (0x00000001) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SHIFT (29) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LENGTH (1) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYEN -+*/ -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_MASK (0x10000000) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LSBMASK (0x00000001) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SHIFT (28) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LENGTH (1) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYOUTOFF -+*/ -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_MASK (0x03FF0000) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LSBMASK (0x000003FF) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SHIFT (16) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LENGTH (10) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYC_B -+*/ -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_MASK (0x00000FFF) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SHIFT (0) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LENGTH (12) -+#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06D8) -+ -+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_TRANS -+*/ -+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_MASK (0x03FF0000) -+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SHIFT (16) -+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LENGTH (10) -+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_OPAQUE -+*/ -+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) -+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SHIFT (0) -+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LENGTH (10) -+#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06DC) -+ -+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMAX -+*/ -+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_MASK (0x03FF0000) -+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SHIFT (16) -+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LENGTH (10) -+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMIN -+*/ -+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_MASK (0x000003FF) -+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SHIFT (0) -+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LENGTH (10) -+#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2LUMAKEY_C_RG_OFFSET (0x06E0) -+ -+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_R -+*/ -+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_MASK (0x0FFF0000) -+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SHIFT (16) -+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LENGTH (12) -+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_G -+*/ -+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_MASK (0x00000FFF) -+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SHIFT (0) -+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LENGTH (12) -+#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2LUMAKEY_C_B_OFFSET (0x06E4) -+ -+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYALPHAMULT -+*/ -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_MASK (0x20000000) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LSBMASK (0x00000001) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SHIFT (29) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LENGTH (1) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYEN -+*/ -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_MASK (0x10000000) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LSBMASK (0x00000001) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SHIFT (28) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LENGTH (1) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYOUTOFF -+*/ -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_MASK (0x03FF0000) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LSBMASK (0x000003FF) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SHIFT (16) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LENGTH (10) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYC_B -+*/ -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_MASK (0x00000FFF) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SHIFT (0) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LENGTH (12) -+#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06E8) -+ -+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_TRANS -+*/ -+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_MASK (0x03FF0000) -+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SHIFT (16) -+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LENGTH (10) -+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_OPAQUE -+*/ -+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) -+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SHIFT (0) -+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LENGTH (10) -+#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06EC) -+ -+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMAX -+*/ -+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_MASK (0x03FF0000) -+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SHIFT (16) -+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LENGTH (10) -+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMIN -+*/ -+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_MASK (0x000003FF) -+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SHIFT (0) -+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LENGTH (10) -+#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3LUMAKEY_C_RG_OFFSET (0x06F0) -+ -+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_R -+*/ -+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_MASK (0x0FFF0000) -+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SHIFT (16) -+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LENGTH (12) -+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_G -+*/ -+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_MASK (0x00000FFF) -+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SHIFT (0) -+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LENGTH (12) -+#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3LUMAKEY_C_B_OFFSET (0x06F4) -+ -+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYALPHAMULT -+*/ -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_MASK (0x20000000) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LSBMASK (0x00000001) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SHIFT (29) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LENGTH (1) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYEN -+*/ -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_MASK (0x10000000) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LSBMASK (0x00000001) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SHIFT (28) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LENGTH (1) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYOUTOFF -+*/ -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_MASK (0x03FF0000) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LSBMASK (0x000003FF) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SHIFT (16) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LENGTH (10) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYC_B -+*/ -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_MASK (0x00000FFF) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SHIFT (0) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LENGTH (12) -+#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06F8) -+ -+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_TRANS -+*/ -+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_MASK (0x03FF0000) -+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SHIFT (16) -+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LENGTH (10) -+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_OPAQUE -+*/ -+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) -+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SHIFT (0) -+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LENGTH (10) -+#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06FC) -+ -+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMAX -+*/ -+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_MASK (0x03FF0000) -+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SHIFT (16) -+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LENGTH (10) -+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMIN -+*/ -+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_MASK (0x000003FF) -+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SHIFT (0) -+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LENGTH (10) -+#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4LUMAKEY_C_RG_OFFSET (0x0700) -+ -+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_R -+*/ -+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_MASK (0x0FFF0000) -+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SHIFT (16) -+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LENGTH (12) -+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_G -+*/ -+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_MASK (0x00000FFF) -+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SHIFT (0) -+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LENGTH (12) -+#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4LUMAKEY_C_B_OFFSET (0x0704) -+ -+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYALPHAMULT -+*/ -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_MASK (0x20000000) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LSBMASK (0x00000001) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SHIFT (29) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LENGTH (1) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYEN -+*/ -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_MASK (0x10000000) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LSBMASK (0x00000001) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SHIFT (28) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LENGTH (1) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYOUTOFF -+*/ -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_MASK (0x03FF0000) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LSBMASK (0x000003FF) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SHIFT (16) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LENGTH (10) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYC_B -+*/ -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_MASK (0x00000FFF) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LSBMASK (0x00000FFF) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SHIFT (0) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LENGTH (12) -+#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CSCCOEFF0_OFFSET (0x0708) -+ -+/* PDP, CSCCOEFF0, CSCCOEFFRU -+*/ -+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_MASK (0x003FF800) -+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LSBMASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SHIFT (11) -+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LENGTH (11) -+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CSCCOEFF0, CSCCOEFFRY -+*/ -+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_MASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LSBMASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SHIFT (0) -+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LENGTH (11) -+#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CSCCOEFF1_OFFSET (0x070C) -+ -+/* PDP, CSCCOEFF1, CSCCOEFFGY -+*/ -+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_MASK (0x003FF800) -+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LSBMASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SHIFT (11) -+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LENGTH (11) -+#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CSCCOEFF1, CSCCOEFFRV -+*/ -+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_MASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LSBMASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SHIFT (0) -+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LENGTH (11) -+#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CSCCOEFF2_OFFSET (0x0710) -+ -+/* PDP, CSCCOEFF2, CSCCOEFFGV -+*/ -+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_MASK (0x003FF800) -+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LSBMASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SHIFT (11) -+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LENGTH (11) -+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CSCCOEFF2, CSCCOEFFGU -+*/ -+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_MASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LSBMASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SHIFT (0) -+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LENGTH (11) -+#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CSCCOEFF3_OFFSET (0x0714) -+ -+/* PDP, CSCCOEFF3, CSCCOEFFBU -+*/ -+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_MASK (0x003FF800) -+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LSBMASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SHIFT (11) -+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LENGTH (11) -+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CSCCOEFF3, CSCCOEFFBY -+*/ -+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_MASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LSBMASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SHIFT (0) -+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LENGTH (11) -+#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CSCCOEFF4_OFFSET (0x0718) -+ -+/* PDP, CSCCOEFF4, CSCCOEFFBV -+*/ -+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_MASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LSBMASK (0x000007FF) -+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SHIFT (0) -+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LENGTH (11) -+#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_BGNDCOL_AR_OFFSET (0x071C) -+ -+/* PDP, BGNDCOL_AR, BGNDCOL_A -+*/ -+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_MASK (0x03FF0000) -+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LSBMASK (0x000003FF) -+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT (16) -+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LENGTH (10) -+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, BGNDCOL_AR, BGNDCOL_R -+*/ -+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_MASK (0x000003FF) -+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LSBMASK (0x000003FF) -+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT (0) -+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LENGTH (10) -+#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_BGNDCOL_GB_OFFSET (0x0720) -+ -+/* PDP, BGNDCOL_GB, BGNDCOL_G -+*/ -+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_MASK (0x03FF0000) -+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LSBMASK (0x000003FF) -+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT (16) -+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LENGTH (10) -+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, BGNDCOL_GB, BGNDCOL_B -+*/ -+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_MASK (0x000003FF) -+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LSBMASK (0x000003FF) -+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT (0) -+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LENGTH (10) -+#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_BORDCOL_R_OFFSET (0x0724) -+ -+/* PDP, BORDCOL_R, BORDCOL_R -+*/ -+#define ODN_PDP_BORDCOL_R_BORDCOL_R_MASK (0x000003FF) -+#define ODN_PDP_BORDCOL_R_BORDCOL_R_LSBMASK (0x000003FF) -+#define ODN_PDP_BORDCOL_R_BORDCOL_R_SHIFT (0) -+#define ODN_PDP_BORDCOL_R_BORDCOL_R_LENGTH (10) -+#define ODN_PDP_BORDCOL_R_BORDCOL_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_BORDCOL_GB_OFFSET (0x0728) -+ -+/* PDP, BORDCOL_GB, BORDCOL_G -+*/ -+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_MASK (0x03FF0000) -+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LSBMASK (0x000003FF) -+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SHIFT (16) -+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LENGTH (10) -+#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, BORDCOL_GB, BORDCOL_B -+*/ -+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_MASK (0x000003FF) -+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LSBMASK (0x000003FF) -+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SHIFT (0) -+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LENGTH (10) -+#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_LINESTAT_OFFSET (0x0734) -+ -+/* PDP, LINESTAT, LINENO -+*/ -+#define ODN_PDP_LINESTAT_LINENO_MASK (0x00001FFF) -+#define ODN_PDP_LINESTAT_LINENO_LSBMASK (0x00001FFF) -+#define ODN_PDP_LINESTAT_LINENO_SHIFT (0) -+#define ODN_PDP_LINESTAT_LINENO_LENGTH (13) -+#define ODN_PDP_LINESTAT_LINENO_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_OFFSET (0x0738) -+ -+/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C12 -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_MASK (0x3FFF0000) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LSBMASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SHIFT (16) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LENGTH (14) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C11 -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_MASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LSBMASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SHIFT (0) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LENGTH (14) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_OFFSET (0x073C) -+ -+/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C21 -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_MASK (0x3FFF0000) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LSBMASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SHIFT (16) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LENGTH (14) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C13 -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_MASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LSBMASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SHIFT (0) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LENGTH (14) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_OFFSET (0x0740) -+ -+/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C23 -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_MASK (0x3FFF0000) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LSBMASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SHIFT (16) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LENGTH (14) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C22 -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_MASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LSBMASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SHIFT (0) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LENGTH (14) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_OFFSET (0x0744) -+ -+/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C32 -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_MASK (0x3FFF0000) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LSBMASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SHIFT (16) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LENGTH (14) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C31 -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_MASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LSBMASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SHIFT (0) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LENGTH (14) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_OFFSET (0x0748) -+ -+/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_C33 -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_MASK (0x3FFF0000) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LSBMASK (0x00003FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SHIFT (16) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LENGTH (14) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_RANGE -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_MASK (0x00000030) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LSBMASK (0x00000003) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SHIFT (4) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LENGTH (2) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_EN -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_MASK (0x00000001) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LSBMASK (0x00000001) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SHIFT (0) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LENGTH (1) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_OFFSET (0x074C) -+ -+/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_G -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_MASK (0x0FFF0000) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LSBMASK (0x00000FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SHIFT (16) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LENGTH (12) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_B -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_MASK (0x00000FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LSBMASK (0x00000FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SHIFT (0) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LENGTH (12) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_OFFSET (0x0750) -+ -+/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_R, CR_PROCAMP_OUTOFF_R -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_MASK (0x00000FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LSBMASK (0x00000FFF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SHIFT (0) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LENGTH (12) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_OFFSET (0x0754) -+ -+/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_G -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_MASK (0x03FF0000) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LSBMASK (0x000003FF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SHIFT (16) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LENGTH (10) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_B -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_MASK (0x000003FF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LSBMASK (0x000003FF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SHIFT (0) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LENGTH (10) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_OFFSET (0x0758) -+ -+/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_R, CR_PROCAMP_INOFF_R -+*/ -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_MASK (0x000003FF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LSBMASK (0x000003FF) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SHIFT (0) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LENGTH (10) -+#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_SIGNAT_R_OFFSET (0x075C) -+ -+/* PDP, SIGNAT_R, SIGNATURE_R -+*/ -+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_MASK (0x000003FF) -+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LSBMASK (0x000003FF) -+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SHIFT (0) -+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LENGTH (10) -+#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_SIGNAT_GB_OFFSET (0x0760) -+ -+/* PDP, SIGNAT_GB, SIGNATURE_G -+*/ -+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_MASK (0x03FF0000) -+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LSBMASK (0x000003FF) -+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SHIFT (16) -+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LENGTH (10) -+#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SIGNAT_GB, SIGNATURE_B -+*/ -+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_MASK (0x000003FF) -+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LSBMASK (0x000003FF) -+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SHIFT (0) -+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LENGTH (10) -+#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_REGISTER_UPDATE_CTRL_OFFSET (0x0764) -+ -+/* PDP, REGISTER_UPDATE_CTRL, BYPASS_DOUBLE_BUFFERING -+*/ -+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_MASK (0x00000004) -+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LSBMASK (0x00000001) -+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SHIFT (2) -+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LENGTH (1) -+#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, REGISTER_UPDATE_CTRL, REGISTERS_VALID -+*/ -+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK (0x00000002) -+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LSBMASK (0x00000001) -+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT (1) -+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LENGTH (1) -+#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, REGISTER_UPDATE_CTRL, USE_VBLANK -+*/ -+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_MASK (0x00000001) -+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LSBMASK (0x00000001) -+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT (0) -+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LENGTH (1) -+#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_REGISTER_UPDATE_STATUS_OFFSET (0x0768) -+ -+/* PDP, REGISTER_UPDATE_STATUS, REGISTERS_UPDATED -+*/ -+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_MASK (0x00000002) -+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LSBMASK (0x00000001) -+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SHIFT (1) -+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LENGTH (1) -+#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DBGCTRL_OFFSET (0x076C) -+ -+/* PDP, DBGCTRL, DBG_READ -+*/ -+#define ODN_PDP_DBGCTRL_DBG_READ_MASK (0x00000002) -+#define ODN_PDP_DBGCTRL_DBG_READ_LSBMASK (0x00000001) -+#define ODN_PDP_DBGCTRL_DBG_READ_SHIFT (1) -+#define ODN_PDP_DBGCTRL_DBG_READ_LENGTH (1) -+#define ODN_PDP_DBGCTRL_DBG_READ_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DBGCTRL, DBG_ENAB -+*/ -+#define ODN_PDP_DBGCTRL_DBG_ENAB_MASK (0x00000001) -+#define ODN_PDP_DBGCTRL_DBG_ENAB_LSBMASK (0x00000001) -+#define ODN_PDP_DBGCTRL_DBG_ENAB_SHIFT (0) -+#define ODN_PDP_DBGCTRL_DBG_ENAB_LENGTH (1) -+#define ODN_PDP_DBGCTRL_DBG_ENAB_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DBGDATA_R_OFFSET (0x0770) -+ -+/* PDP, DBGDATA_R, DBG_DATA_R -+*/ -+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_MASK (0x000003FF) -+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LSBMASK (0x000003FF) -+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SHIFT (0) -+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LENGTH (10) -+#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DBGDATA_GB_OFFSET (0x0774) -+ -+/* PDP, DBGDATA_GB, DBG_DATA_G -+*/ -+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_MASK (0x03FF0000) -+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LSBMASK (0x000003FF) -+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SHIFT (16) -+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LENGTH (10) -+#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DBGDATA_GB, DBG_DATA_B -+*/ -+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_MASK (0x000003FF) -+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LSBMASK (0x000003FF) -+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SHIFT (0) -+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LENGTH (10) -+#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DBGSIDE_OFFSET (0x0778) -+ -+/* PDP, DBGSIDE, DBG_VAL -+*/ -+#define ODN_PDP_DBGSIDE_DBG_VAL_MASK (0x00000008) -+#define ODN_PDP_DBGSIDE_DBG_VAL_LSBMASK (0x00000001) -+#define ODN_PDP_DBGSIDE_DBG_VAL_SHIFT (3) -+#define ODN_PDP_DBGSIDE_DBG_VAL_LENGTH (1) -+#define ODN_PDP_DBGSIDE_DBG_VAL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DBGSIDE, DBG_SIDE -+*/ -+#define ODN_PDP_DBGSIDE_DBG_SIDE_MASK (0x00000007) -+#define ODN_PDP_DBGSIDE_DBG_SIDE_LSBMASK (0x00000007) -+#define ODN_PDP_DBGSIDE_DBG_SIDE_SHIFT (0) -+#define ODN_PDP_DBGSIDE_DBG_SIDE_LENGTH (3) -+#define ODN_PDP_DBGSIDE_DBG_SIDE_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_OUTPUT_OFFSET (0x077C) -+ -+/* PDP, OUTPUT, EIGHT_BIT_OUTPUT -+*/ -+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_MASK (0x00000002) -+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LSBMASK (0x00000001) -+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SHIFT (1) -+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LENGTH (1) -+#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, OUTPUT, OUTPUT_CONFIG -+*/ -+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_MASK (0x00000001) -+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LSBMASK (0x00000001) -+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SHIFT (0) -+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LENGTH (1) -+#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_SYNCCTRL_OFFSET (0x0780) -+ -+/* PDP, SYNCCTRL, SYNCACTIVE -+*/ -+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_MASK (0x80000000) -+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SHIFT (31) -+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, ODN_PDP_RST -+*/ -+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_MASK (0x20000000) -+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SHIFT (29) -+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, POWERDN -+*/ -+#define ODN_PDP_SYNCCTRL_POWERDN_MASK (0x10000000) -+#define ODN_PDP_SYNCCTRL_POWERDN_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_POWERDN_SHIFT (28) -+#define ODN_PDP_SYNCCTRL_POWERDN_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_POWERDN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, LOWPWRMODE -+*/ -+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_MASK (0x08000000) -+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SHIFT (27) -+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, UPDSYNCTRL -+*/ -+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_MASK (0x04000000) -+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SHIFT (26) -+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, UPDINTCTRL -+*/ -+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_MASK (0x02000000) -+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SHIFT (25) -+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, UPDCTRL -+*/ -+#define ODN_PDP_SYNCCTRL_UPDCTRL_MASK (0x01000000) -+#define ODN_PDP_SYNCCTRL_UPDCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_UPDCTRL_SHIFT (24) -+#define ODN_PDP_SYNCCTRL_UPDCTRL_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_UPDCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, UPDWAIT -+*/ -+#define ODN_PDP_SYNCCTRL_UPDWAIT_MASK (0x000F0000) -+#define ODN_PDP_SYNCCTRL_UPDWAIT_LSBMASK (0x0000000F) -+#define ODN_PDP_SYNCCTRL_UPDWAIT_SHIFT (16) -+#define ODN_PDP_SYNCCTRL_UPDWAIT_LENGTH (4) -+#define ODN_PDP_SYNCCTRL_UPDWAIT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, FIELD_EN -+*/ -+#define ODN_PDP_SYNCCTRL_FIELD_EN_MASK (0x00002000) -+#define ODN_PDP_SYNCCTRL_FIELD_EN_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_FIELD_EN_SHIFT (13) -+#define ODN_PDP_SYNCCTRL_FIELD_EN_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_FIELD_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, CSYNC_EN -+*/ -+#define ODN_PDP_SYNCCTRL_CSYNC_EN_MASK (0x00001000) -+#define ODN_PDP_SYNCCTRL_CSYNC_EN_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_CSYNC_EN_SHIFT (12) -+#define ODN_PDP_SYNCCTRL_CSYNC_EN_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_CSYNC_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, CLKPOL -+*/ -+#define ODN_PDP_SYNCCTRL_CLKPOL_MASK (0x00000800) -+#define ODN_PDP_SYNCCTRL_CLKPOL_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_CLKPOL_SHIFT (11) -+#define ODN_PDP_SYNCCTRL_CLKPOL_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_CLKPOL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, VS_SLAVE -+*/ -+#define ODN_PDP_SYNCCTRL_VS_SLAVE_MASK (0x00000080) -+#define ODN_PDP_SYNCCTRL_VS_SLAVE_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_VS_SLAVE_SHIFT (7) -+#define ODN_PDP_SYNCCTRL_VS_SLAVE_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_VS_SLAVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, HS_SLAVE -+*/ -+#define ODN_PDP_SYNCCTRL_HS_SLAVE_MASK (0x00000040) -+#define ODN_PDP_SYNCCTRL_HS_SLAVE_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_HS_SLAVE_SHIFT (6) -+#define ODN_PDP_SYNCCTRL_HS_SLAVE_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_HS_SLAVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, BLNKPOL -+*/ -+#define ODN_PDP_SYNCCTRL_BLNKPOL_MASK (0x00000020) -+#define ODN_PDP_SYNCCTRL_BLNKPOL_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_BLNKPOL_SHIFT (5) -+#define ODN_PDP_SYNCCTRL_BLNKPOL_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_BLNKPOL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, BLNKDIS -+*/ -+#define ODN_PDP_SYNCCTRL_BLNKDIS_MASK (0x00000010) -+#define ODN_PDP_SYNCCTRL_BLNKDIS_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_BLNKDIS_SHIFT (4) -+#define ODN_PDP_SYNCCTRL_BLNKDIS_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_BLNKDIS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, VSPOL -+*/ -+#define ODN_PDP_SYNCCTRL_VSPOL_MASK (0x00000008) -+#define ODN_PDP_SYNCCTRL_VSPOL_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_VSPOL_SHIFT (3) -+#define ODN_PDP_SYNCCTRL_VSPOL_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_VSPOL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, VSDIS -+*/ -+#define ODN_PDP_SYNCCTRL_VSDIS_MASK (0x00000004) -+#define ODN_PDP_SYNCCTRL_VSDIS_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_VSDIS_SHIFT (2) -+#define ODN_PDP_SYNCCTRL_VSDIS_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_VSDIS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, HSPOL -+*/ -+#define ODN_PDP_SYNCCTRL_HSPOL_MASK (0x00000002) -+#define ODN_PDP_SYNCCTRL_HSPOL_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_HSPOL_SHIFT (1) -+#define ODN_PDP_SYNCCTRL_HSPOL_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_HSPOL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, HSDIS -+*/ -+#define ODN_PDP_SYNCCTRL_HSDIS_MASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_HSDIS_LSBMASK (0x00000001) -+#define ODN_PDP_SYNCCTRL_HSDIS_SHIFT (0) -+#define ODN_PDP_SYNCCTRL_HSDIS_LENGTH (1) -+#define ODN_PDP_SYNCCTRL_HSDIS_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_HSYNC1_OFFSET (0x0784) -+ -+/* PDP, HSYNC1, HBPS -+*/ -+#define ODN_PDP_HSYNC1_HBPS_MASK (0x1FFF0000) -+#define ODN_PDP_HSYNC1_HBPS_LSBMASK (0x00001FFF) -+#define ODN_PDP_HSYNC1_HBPS_SHIFT (16) -+#define ODN_PDP_HSYNC1_HBPS_LENGTH (13) -+#define ODN_PDP_HSYNC1_HBPS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, HSYNC1, HT -+*/ -+#define ODN_PDP_HSYNC1_HT_MASK (0x00001FFF) -+#define ODN_PDP_HSYNC1_HT_LSBMASK (0x00001FFF) -+#define ODN_PDP_HSYNC1_HT_SHIFT (0) -+#define ODN_PDP_HSYNC1_HT_LENGTH (13) -+#define ODN_PDP_HSYNC1_HT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_HSYNC2_OFFSET (0x0788) -+ -+/* PDP, HSYNC2, HAS -+*/ -+#define ODN_PDP_HSYNC2_HAS_MASK (0x1FFF0000) -+#define ODN_PDP_HSYNC2_HAS_LSBMASK (0x00001FFF) -+#define ODN_PDP_HSYNC2_HAS_SHIFT (16) -+#define ODN_PDP_HSYNC2_HAS_LENGTH (13) -+#define ODN_PDP_HSYNC2_HAS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, HSYNC2, HLBS -+*/ -+#define ODN_PDP_HSYNC2_HLBS_MASK (0x00001FFF) -+#define ODN_PDP_HSYNC2_HLBS_LSBMASK (0x00001FFF) -+#define ODN_PDP_HSYNC2_HLBS_SHIFT (0) -+#define ODN_PDP_HSYNC2_HLBS_LENGTH (13) -+#define ODN_PDP_HSYNC2_HLBS_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_HSYNC3_OFFSET (0x078C) -+ -+/* PDP, HSYNC3, HFPS -+*/ -+#define ODN_PDP_HSYNC3_HFPS_MASK (0x1FFF0000) -+#define ODN_PDP_HSYNC3_HFPS_LSBMASK (0x00001FFF) -+#define ODN_PDP_HSYNC3_HFPS_SHIFT (16) -+#define ODN_PDP_HSYNC3_HFPS_LENGTH (13) -+#define ODN_PDP_HSYNC3_HFPS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, HSYNC3, HRBS -+*/ -+#define ODN_PDP_HSYNC3_HRBS_MASK (0x00001FFF) -+#define ODN_PDP_HSYNC3_HRBS_LSBMASK (0x00001FFF) -+#define ODN_PDP_HSYNC3_HRBS_SHIFT (0) -+#define ODN_PDP_HSYNC3_HRBS_LENGTH (13) -+#define ODN_PDP_HSYNC3_HRBS_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VSYNC1_OFFSET (0x0790) -+ -+/* PDP, VSYNC1, VBPS -+*/ -+#define ODN_PDP_VSYNC1_VBPS_MASK (0x1FFF0000) -+#define ODN_PDP_VSYNC1_VBPS_LSBMASK (0x00001FFF) -+#define ODN_PDP_VSYNC1_VBPS_SHIFT (16) -+#define ODN_PDP_VSYNC1_VBPS_LENGTH (13) -+#define ODN_PDP_VSYNC1_VBPS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VSYNC1, VT -+*/ -+#define ODN_PDP_VSYNC1_VT_MASK (0x00001FFF) -+#define ODN_PDP_VSYNC1_VT_LSBMASK (0x00001FFF) -+#define ODN_PDP_VSYNC1_VT_SHIFT (0) -+#define ODN_PDP_VSYNC1_VT_LENGTH (13) -+#define ODN_PDP_VSYNC1_VT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VSYNC2_OFFSET (0x0794) -+ -+/* PDP, VSYNC2, VAS -+*/ -+#define ODN_PDP_VSYNC2_VAS_MASK (0x1FFF0000) -+#define ODN_PDP_VSYNC2_VAS_LSBMASK (0x00001FFF) -+#define ODN_PDP_VSYNC2_VAS_SHIFT (16) -+#define ODN_PDP_VSYNC2_VAS_LENGTH (13) -+#define ODN_PDP_VSYNC2_VAS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VSYNC2, VTBS -+*/ -+#define ODN_PDP_VSYNC2_VTBS_MASK (0x00001FFF) -+#define ODN_PDP_VSYNC2_VTBS_LSBMASK (0x00001FFF) -+#define ODN_PDP_VSYNC2_VTBS_SHIFT (0) -+#define ODN_PDP_VSYNC2_VTBS_LENGTH (13) -+#define ODN_PDP_VSYNC2_VTBS_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VSYNC3_OFFSET (0x0798) -+ -+/* PDP, VSYNC3, VFPS -+*/ -+#define ODN_PDP_VSYNC3_VFPS_MASK (0x1FFF0000) -+#define ODN_PDP_VSYNC3_VFPS_LSBMASK (0x00001FFF) -+#define ODN_PDP_VSYNC3_VFPS_SHIFT (16) -+#define ODN_PDP_VSYNC3_VFPS_LENGTH (13) -+#define ODN_PDP_VSYNC3_VFPS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VSYNC3, VBBS -+*/ -+#define ODN_PDP_VSYNC3_VBBS_MASK (0x00001FFF) -+#define ODN_PDP_VSYNC3_VBBS_LSBMASK (0x00001FFF) -+#define ODN_PDP_VSYNC3_VBBS_SHIFT (0) -+#define ODN_PDP_VSYNC3_VBBS_LENGTH (13) -+#define ODN_PDP_VSYNC3_VBBS_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_INTSTAT_OFFSET (0x079C) -+ -+/* PDP, INTSTAT, INTS_VID4ORUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_MASK (0x00080000) -+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SHIFT (19) -+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID3ORUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_MASK (0x00040000) -+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SHIFT (18) -+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID2ORUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_MASK (0x00020000) -+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SHIFT (17) -+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID1ORUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_MASK (0x00010000) -+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SHIFT (16) -+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH4ORUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_MASK (0x00008000) -+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SHIFT (15) -+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH3ORUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_MASK (0x00004000) -+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SHIFT (14) -+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH2ORUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_MASK (0x00002000) -+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SHIFT (13) -+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH1ORUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_MASK (0x00001000) -+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SHIFT (12) -+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID4URUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_VID4URUN_MASK (0x00000800) -+#define ODN_PDP_INTSTAT_INTS_VID4URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_VID4URUN_SHIFT (11) -+#define ODN_PDP_INTSTAT_INTS_VID4URUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_VID4URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID3URUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_VID3URUN_MASK (0x00000400) -+#define ODN_PDP_INTSTAT_INTS_VID3URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_VID3URUN_SHIFT (10) -+#define ODN_PDP_INTSTAT_INTS_VID3URUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_VID3URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID2URUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_VID2URUN_MASK (0x00000200) -+#define ODN_PDP_INTSTAT_INTS_VID2URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_VID2URUN_SHIFT (9) -+#define ODN_PDP_INTSTAT_INTS_VID2URUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_VID2URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID1URUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_VID1URUN_MASK (0x00000100) -+#define ODN_PDP_INTSTAT_INTS_VID1URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_VID1URUN_SHIFT (8) -+#define ODN_PDP_INTSTAT_INTS_VID1URUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_VID1URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH4URUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_MASK (0x00000080) -+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SHIFT (7) -+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH3URUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_MASK (0x00000040) -+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SHIFT (6) -+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH2URUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_MASK (0x00000020) -+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SHIFT (5) -+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH1URUN -+*/ -+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_MASK (0x00000010) -+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SHIFT (4) -+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VBLNK1 -+*/ -+#define ODN_PDP_INTSTAT_INTS_VBLNK1_MASK (0x00000008) -+#define ODN_PDP_INTSTAT_INTS_VBLNK1_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_VBLNK1_SHIFT (3) -+#define ODN_PDP_INTSTAT_INTS_VBLNK1_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_VBLNK1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VBLNK0 -+*/ -+#define ODN_PDP_INTSTAT_INTS_VBLNK0_MASK (0x00000004) -+#define ODN_PDP_INTSTAT_INTS_VBLNK0_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_VBLNK0_SHIFT (2) -+#define ODN_PDP_INTSTAT_INTS_VBLNK0_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_VBLNK0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_HBLNK1 -+*/ -+#define ODN_PDP_INTSTAT_INTS_HBLNK1_MASK (0x00000002) -+#define ODN_PDP_INTSTAT_INTS_HBLNK1_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_HBLNK1_SHIFT (1) -+#define ODN_PDP_INTSTAT_INTS_HBLNK1_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_HBLNK1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_HBLNK0 -+*/ -+#define ODN_PDP_INTSTAT_INTS_HBLNK0_MASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_HBLNK0_LSBMASK (0x00000001) -+#define ODN_PDP_INTSTAT_INTS_HBLNK0_SHIFT (0) -+#define ODN_PDP_INTSTAT_INTS_HBLNK0_LENGTH (1) -+#define ODN_PDP_INTSTAT_INTS_HBLNK0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_INTENAB_OFFSET (0x07A0) -+ -+/* PDP, INTENAB, INTEN_VID4ORUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_MASK (0x00080000) -+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SHIFT (19) -+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID3ORUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_MASK (0x00040000) -+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SHIFT (18) -+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID2ORUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_MASK (0x00020000) -+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SHIFT (17) -+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID1ORUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_MASK (0x00010000) -+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SHIFT (16) -+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH4ORUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_MASK (0x00008000) -+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SHIFT (15) -+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH3ORUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_MASK (0x00004000) -+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SHIFT (14) -+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH2ORUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_MASK (0x00002000) -+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SHIFT (13) -+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH1ORUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_MASK (0x00001000) -+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SHIFT (12) -+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID4URUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_VID4URUN_MASK (0x00000800) -+#define ODN_PDP_INTENAB_INTEN_VID4URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_VID4URUN_SHIFT (11) -+#define ODN_PDP_INTENAB_INTEN_VID4URUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_VID4URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID3URUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_VID3URUN_MASK (0x00000400) -+#define ODN_PDP_INTENAB_INTEN_VID3URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_VID3URUN_SHIFT (10) -+#define ODN_PDP_INTENAB_INTEN_VID3URUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_VID3URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID2URUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_VID2URUN_MASK (0x00000200) -+#define ODN_PDP_INTENAB_INTEN_VID2URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_VID2URUN_SHIFT (9) -+#define ODN_PDP_INTENAB_INTEN_VID2URUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_VID2URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID1URUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_VID1URUN_MASK (0x00000100) -+#define ODN_PDP_INTENAB_INTEN_VID1URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_VID1URUN_SHIFT (8) -+#define ODN_PDP_INTENAB_INTEN_VID1URUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_VID1URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH4URUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_MASK (0x00000080) -+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SHIFT (7) -+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH3URUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_MASK (0x00000040) -+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SHIFT (6) -+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH2URUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_MASK (0x00000020) -+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SHIFT (5) -+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH1URUN -+*/ -+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_MASK (0x00000010) -+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SHIFT (4) -+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VBLNK1 -+*/ -+#define ODN_PDP_INTENAB_INTEN_VBLNK1_MASK (0x00000008) -+#define ODN_PDP_INTENAB_INTEN_VBLNK1_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_VBLNK1_SHIFT (3) -+#define ODN_PDP_INTENAB_INTEN_VBLNK1_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_VBLNK1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VBLNK0 -+*/ -+#define ODN_PDP_INTENAB_INTEN_VBLNK0_MASK (0x00000004) -+#define ODN_PDP_INTENAB_INTEN_VBLNK0_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT (2) -+#define ODN_PDP_INTENAB_INTEN_VBLNK0_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_VBLNK0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_HBLNK1 -+*/ -+#define ODN_PDP_INTENAB_INTEN_HBLNK1_MASK (0x00000002) -+#define ODN_PDP_INTENAB_INTEN_HBLNK1_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_HBLNK1_SHIFT (1) -+#define ODN_PDP_INTENAB_INTEN_HBLNK1_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_HBLNK1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_HBLNK0 -+*/ -+#define ODN_PDP_INTENAB_INTEN_HBLNK0_MASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_HBLNK0_LSBMASK (0x00000001) -+#define ODN_PDP_INTENAB_INTEN_HBLNK0_SHIFT (0) -+#define ODN_PDP_INTENAB_INTEN_HBLNK0_LENGTH (1) -+#define ODN_PDP_INTENAB_INTEN_HBLNK0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_INTCLR_OFFSET (0x07A4) -+ -+/* PDP, INTCLR, INTCLR_VID4ORUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_MASK (0x00080000) -+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SHIFT (19) -+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID3ORUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_MASK (0x00040000) -+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SHIFT (18) -+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID2ORUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_MASK (0x00020000) -+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SHIFT (17) -+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID1ORUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_MASK (0x00010000) -+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SHIFT (16) -+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH4ORUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_MASK (0x00008000) -+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SHIFT (15) -+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH3ORUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_MASK (0x00004000) -+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SHIFT (14) -+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH2ORUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_MASK (0x00002000) -+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SHIFT (13) -+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH1ORUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_MASK (0x00001000) -+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SHIFT (12) -+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID4URUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_MASK (0x00000800) -+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SHIFT (11) -+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID3URUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_MASK (0x00000400) -+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SHIFT (10) -+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID2URUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_MASK (0x00000200) -+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SHIFT (9) -+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID1URUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_MASK (0x00000100) -+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SHIFT (8) -+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH4URUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_MASK (0x00000080) -+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SHIFT (7) -+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH3URUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_MASK (0x00000040) -+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SHIFT (6) -+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH2URUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_MASK (0x00000020) -+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SHIFT (5) -+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH1URUN -+*/ -+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_MASK (0x00000010) -+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SHIFT (4) -+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VBLNK1 -+*/ -+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_MASK (0x00000008) -+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SHIFT (3) -+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VBLNK0 -+*/ -+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_MASK (0x00000004) -+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SHIFT (2) -+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_HBLNK1 -+*/ -+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_MASK (0x00000002) -+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SHIFT (1) -+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_HBLNK0 -+*/ -+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_MASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LSBMASK (0x00000001) -+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SHIFT (0) -+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LENGTH (1) -+#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_MEMCTRL_OFFSET (0x07A8) -+ -+/* PDP, MEMCTRL, MEMREFRESH -+*/ -+#define ODN_PDP_MEMCTRL_MEMREFRESH_MASK (0xC0000000) -+#define ODN_PDP_MEMCTRL_MEMREFRESH_LSBMASK (0x00000003) -+#define ODN_PDP_MEMCTRL_MEMREFRESH_SHIFT (30) -+#define ODN_PDP_MEMCTRL_MEMREFRESH_LENGTH (2) -+#define ODN_PDP_MEMCTRL_MEMREFRESH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, MEMCTRL, BURSTLEN -+*/ -+#define ODN_PDP_MEMCTRL_BURSTLEN_MASK (0x000000FF) -+#define ODN_PDP_MEMCTRL_BURSTLEN_LSBMASK (0x000000FF) -+#define ODN_PDP_MEMCTRL_BURSTLEN_SHIFT (0) -+#define ODN_PDP_MEMCTRL_BURSTLEN_LENGTH (8) -+#define ODN_PDP_MEMCTRL_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_MEM_THRESH_OFFSET (0x07AC) -+ -+/* PDP, MEM_THRESH, UVTHRESHOLD -+*/ -+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_MASK (0xFF000000) -+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SHIFT (24) -+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LENGTH (8) -+#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, MEM_THRESH, YTHRESHOLD -+*/ -+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_MASK (0x001FF000) -+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SHIFT (12) -+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LENGTH (9) -+#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, MEM_THRESH, THRESHOLD -+*/ -+#define ODN_PDP_MEM_THRESH_THRESHOLD_MASK (0x000001FF) -+#define ODN_PDP_MEM_THRESH_THRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_MEM_THRESH_THRESHOLD_SHIFT (0) -+#define ODN_PDP_MEM_THRESH_THRESHOLD_LENGTH (9) -+#define ODN_PDP_MEM_THRESH_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_ALTERNATE_3D_CTRL_OFFSET (0x07B0) -+ -+/* PDP, ALTERNATE_3D_CTRL, ALT3D_ON -+*/ -+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_MASK (0x00000010) -+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LSBMASK (0x00000001) -+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SHIFT (4) -+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LENGTH (1) -+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, ALTERNATE_3D_CTRL, ALT3D_BLENDSEL -+*/ -+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_MASK (0x00000007) -+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LSBMASK (0x00000007) -+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SHIFT (0) -+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LENGTH (3) -+#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA0_R_OFFSET (0x07B4) -+ -+/* PDP, GAMMA0_R, GAMMA0_R -+*/ -+#define ODN_PDP_GAMMA0_R_GAMMA0_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA0_R_GAMMA0_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA0_R_GAMMA0_R_SHIFT (0) -+#define ODN_PDP_GAMMA0_R_GAMMA0_R_LENGTH (10) -+#define ODN_PDP_GAMMA0_R_GAMMA0_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA0_GB_OFFSET (0x07B8) -+ -+/* PDP, GAMMA0_GB, GAMMA0_G -+*/ -+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SHIFT (16) -+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LENGTH (10) -+#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA0_GB, GAMMA0_B -+*/ -+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SHIFT (0) -+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LENGTH (10) -+#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA1_R_OFFSET (0x07BC) -+ -+/* PDP, GAMMA1_R, GAMMA1_R -+*/ -+#define ODN_PDP_GAMMA1_R_GAMMA1_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA1_R_GAMMA1_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA1_R_GAMMA1_R_SHIFT (0) -+#define ODN_PDP_GAMMA1_R_GAMMA1_R_LENGTH (10) -+#define ODN_PDP_GAMMA1_R_GAMMA1_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA1_GB_OFFSET (0x07C0) -+ -+/* PDP, GAMMA1_GB, GAMMA1_G -+*/ -+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SHIFT (16) -+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LENGTH (10) -+#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA1_GB, GAMMA1_B -+*/ -+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SHIFT (0) -+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LENGTH (10) -+#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA2_R_OFFSET (0x07C4) -+ -+/* PDP, GAMMA2_R, GAMMA2_R -+*/ -+#define ODN_PDP_GAMMA2_R_GAMMA2_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA2_R_GAMMA2_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA2_R_GAMMA2_R_SHIFT (0) -+#define ODN_PDP_GAMMA2_R_GAMMA2_R_LENGTH (10) -+#define ODN_PDP_GAMMA2_R_GAMMA2_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA2_GB_OFFSET (0x07C8) -+ -+/* PDP, GAMMA2_GB, GAMMA2_G -+*/ -+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SHIFT (16) -+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LENGTH (10) -+#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA2_GB, GAMMA2_B -+*/ -+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SHIFT (0) -+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LENGTH (10) -+#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA3_R_OFFSET (0x07CC) -+ -+/* PDP, GAMMA3_R, GAMMA3_R -+*/ -+#define ODN_PDP_GAMMA3_R_GAMMA3_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA3_R_GAMMA3_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA3_R_GAMMA3_R_SHIFT (0) -+#define ODN_PDP_GAMMA3_R_GAMMA3_R_LENGTH (10) -+#define ODN_PDP_GAMMA3_R_GAMMA3_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA3_GB_OFFSET (0x07D0) -+ -+/* PDP, GAMMA3_GB, GAMMA3_G -+*/ -+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SHIFT (16) -+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LENGTH (10) -+#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA3_GB, GAMMA3_B -+*/ -+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SHIFT (0) -+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LENGTH (10) -+#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA4_R_OFFSET (0x07D4) -+ -+/* PDP, GAMMA4_R, GAMMA4_R -+*/ -+#define ODN_PDP_GAMMA4_R_GAMMA4_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA4_R_GAMMA4_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA4_R_GAMMA4_R_SHIFT (0) -+#define ODN_PDP_GAMMA4_R_GAMMA4_R_LENGTH (10) -+#define ODN_PDP_GAMMA4_R_GAMMA4_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA4_GB_OFFSET (0x07D8) -+ -+/* PDP, GAMMA4_GB, GAMMA4_G -+*/ -+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SHIFT (16) -+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LENGTH (10) -+#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA4_GB, GAMMA4_B -+*/ -+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SHIFT (0) -+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LENGTH (10) -+#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA5_R_OFFSET (0x07DC) -+ -+/* PDP, GAMMA5_R, GAMMA5_R -+*/ -+#define ODN_PDP_GAMMA5_R_GAMMA5_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA5_R_GAMMA5_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA5_R_GAMMA5_R_SHIFT (0) -+#define ODN_PDP_GAMMA5_R_GAMMA5_R_LENGTH (10) -+#define ODN_PDP_GAMMA5_R_GAMMA5_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA5_GB_OFFSET (0x07E0) -+ -+/* PDP, GAMMA5_GB, GAMMA5_G -+*/ -+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SHIFT (16) -+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LENGTH (10) -+#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA5_GB, GAMMA5_B -+*/ -+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SHIFT (0) -+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LENGTH (10) -+#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA6_R_OFFSET (0x07E4) -+ -+/* PDP, GAMMA6_R, GAMMA6_R -+*/ -+#define ODN_PDP_GAMMA6_R_GAMMA6_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA6_R_GAMMA6_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA6_R_GAMMA6_R_SHIFT (0) -+#define ODN_PDP_GAMMA6_R_GAMMA6_R_LENGTH (10) -+#define ODN_PDP_GAMMA6_R_GAMMA6_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA6_GB_OFFSET (0x07E8) -+ -+/* PDP, GAMMA6_GB, GAMMA6_G -+*/ -+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SHIFT (16) -+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LENGTH (10) -+#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA6_GB, GAMMA6_B -+*/ -+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SHIFT (0) -+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LENGTH (10) -+#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA7_R_OFFSET (0x07EC) -+ -+/* PDP, GAMMA7_R, GAMMA7_R -+*/ -+#define ODN_PDP_GAMMA7_R_GAMMA7_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA7_R_GAMMA7_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA7_R_GAMMA7_R_SHIFT (0) -+#define ODN_PDP_GAMMA7_R_GAMMA7_R_LENGTH (10) -+#define ODN_PDP_GAMMA7_R_GAMMA7_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA7_GB_OFFSET (0x07F0) -+ -+/* PDP, GAMMA7_GB, GAMMA7_G -+*/ -+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SHIFT (16) -+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LENGTH (10) -+#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA7_GB, GAMMA7_B -+*/ -+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SHIFT (0) -+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LENGTH (10) -+#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA8_R_OFFSET (0x07F4) -+ -+/* PDP, GAMMA8_R, GAMMA8_R -+*/ -+#define ODN_PDP_GAMMA8_R_GAMMA8_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA8_R_GAMMA8_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA8_R_GAMMA8_R_SHIFT (0) -+#define ODN_PDP_GAMMA8_R_GAMMA8_R_LENGTH (10) -+#define ODN_PDP_GAMMA8_R_GAMMA8_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA8_GB_OFFSET (0x07F8) -+ -+/* PDP, GAMMA8_GB, GAMMA8_G -+*/ -+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SHIFT (16) -+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LENGTH (10) -+#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA8_GB, GAMMA8_B -+*/ -+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SHIFT (0) -+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LENGTH (10) -+#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA9_R_OFFSET (0x07FC) -+ -+/* PDP, GAMMA9_R, GAMMA9_R -+*/ -+#define ODN_PDP_GAMMA9_R_GAMMA9_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA9_R_GAMMA9_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA9_R_GAMMA9_R_SHIFT (0) -+#define ODN_PDP_GAMMA9_R_GAMMA9_R_LENGTH (10) -+#define ODN_PDP_GAMMA9_R_GAMMA9_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA9_GB_OFFSET (0x0800) -+ -+/* PDP, GAMMA9_GB, GAMMA9_G -+*/ -+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SHIFT (16) -+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LENGTH (10) -+#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA9_GB, GAMMA9_B -+*/ -+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SHIFT (0) -+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LENGTH (10) -+#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA10_R_OFFSET (0x0804) -+ -+/* PDP, GAMMA10_R, GAMMA10_R -+*/ -+#define ODN_PDP_GAMMA10_R_GAMMA10_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA10_R_GAMMA10_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA10_R_GAMMA10_R_SHIFT (0) -+#define ODN_PDP_GAMMA10_R_GAMMA10_R_LENGTH (10) -+#define ODN_PDP_GAMMA10_R_GAMMA10_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA10_GB_OFFSET (0x0808) -+ -+/* PDP, GAMMA10_GB, GAMMA10_G -+*/ -+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SHIFT (16) -+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LENGTH (10) -+#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA10_GB, GAMMA10_B -+*/ -+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SHIFT (0) -+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LENGTH (10) -+#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA11_R_OFFSET (0x080C) -+ -+/* PDP, GAMMA11_R, GAMMA11_R -+*/ -+#define ODN_PDP_GAMMA11_R_GAMMA11_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA11_R_GAMMA11_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA11_R_GAMMA11_R_SHIFT (0) -+#define ODN_PDP_GAMMA11_R_GAMMA11_R_LENGTH (10) -+#define ODN_PDP_GAMMA11_R_GAMMA11_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA11_GB_OFFSET (0x0810) -+ -+/* PDP, GAMMA11_GB, GAMMA11_G -+*/ -+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SHIFT (16) -+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LENGTH (10) -+#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA11_GB, GAMMA11_B -+*/ -+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SHIFT (0) -+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LENGTH (10) -+#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA12_R_OFFSET (0x0814) -+ -+/* PDP, GAMMA12_R, GAMMA12_R -+*/ -+#define ODN_PDP_GAMMA12_R_GAMMA12_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA12_R_GAMMA12_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA12_R_GAMMA12_R_SHIFT (0) -+#define ODN_PDP_GAMMA12_R_GAMMA12_R_LENGTH (10) -+#define ODN_PDP_GAMMA12_R_GAMMA12_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA12_GB_OFFSET (0x0818) -+ -+/* PDP, GAMMA12_GB, GAMMA12_G -+*/ -+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SHIFT (16) -+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LENGTH (10) -+#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA12_GB, GAMMA12_B -+*/ -+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SHIFT (0) -+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LENGTH (10) -+#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA13_R_OFFSET (0x081C) -+ -+/* PDP, GAMMA13_R, GAMMA13_R -+*/ -+#define ODN_PDP_GAMMA13_R_GAMMA13_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA13_R_GAMMA13_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA13_R_GAMMA13_R_SHIFT (0) -+#define ODN_PDP_GAMMA13_R_GAMMA13_R_LENGTH (10) -+#define ODN_PDP_GAMMA13_R_GAMMA13_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA13_GB_OFFSET (0x0820) -+ -+/* PDP, GAMMA13_GB, GAMMA13_G -+*/ -+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SHIFT (16) -+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LENGTH (10) -+#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA13_GB, GAMMA13_B -+*/ -+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SHIFT (0) -+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LENGTH (10) -+#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA14_R_OFFSET (0x0824) -+ -+/* PDP, GAMMA14_R, GAMMA14_R -+*/ -+#define ODN_PDP_GAMMA14_R_GAMMA14_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA14_R_GAMMA14_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA14_R_GAMMA14_R_SHIFT (0) -+#define ODN_PDP_GAMMA14_R_GAMMA14_R_LENGTH (10) -+#define ODN_PDP_GAMMA14_R_GAMMA14_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA14_GB_OFFSET (0x0828) -+ -+/* PDP, GAMMA14_GB, GAMMA14_G -+*/ -+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SHIFT (16) -+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LENGTH (10) -+#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA14_GB, GAMMA14_B -+*/ -+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SHIFT (0) -+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LENGTH (10) -+#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA15_R_OFFSET (0x082C) -+ -+/* PDP, GAMMA15_R, GAMMA15_R -+*/ -+#define ODN_PDP_GAMMA15_R_GAMMA15_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA15_R_GAMMA15_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA15_R_GAMMA15_R_SHIFT (0) -+#define ODN_PDP_GAMMA15_R_GAMMA15_R_LENGTH (10) -+#define ODN_PDP_GAMMA15_R_GAMMA15_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA15_GB_OFFSET (0x0830) -+ -+/* PDP, GAMMA15_GB, GAMMA15_G -+*/ -+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SHIFT (16) -+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LENGTH (10) -+#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA15_GB, GAMMA15_B -+*/ -+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SHIFT (0) -+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LENGTH (10) -+#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA16_R_OFFSET (0x0834) -+ -+/* PDP, GAMMA16_R, GAMMA16_R -+*/ -+#define ODN_PDP_GAMMA16_R_GAMMA16_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA16_R_GAMMA16_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA16_R_GAMMA16_R_SHIFT (0) -+#define ODN_PDP_GAMMA16_R_GAMMA16_R_LENGTH (10) -+#define ODN_PDP_GAMMA16_R_GAMMA16_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA16_GB_OFFSET (0x0838) -+ -+/* PDP, GAMMA16_GB, GAMMA16_G -+*/ -+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SHIFT (16) -+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LENGTH (10) -+#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA16_GB, GAMMA16_B -+*/ -+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SHIFT (0) -+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LENGTH (10) -+#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA17_R_OFFSET (0x083C) -+ -+/* PDP, GAMMA17_R, GAMMA17_R -+*/ -+#define ODN_PDP_GAMMA17_R_GAMMA17_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA17_R_GAMMA17_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA17_R_GAMMA17_R_SHIFT (0) -+#define ODN_PDP_GAMMA17_R_GAMMA17_R_LENGTH (10) -+#define ODN_PDP_GAMMA17_R_GAMMA17_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA17_GB_OFFSET (0x0840) -+ -+/* PDP, GAMMA17_GB, GAMMA17_G -+*/ -+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SHIFT (16) -+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LENGTH (10) -+#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA17_GB, GAMMA17_B -+*/ -+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SHIFT (0) -+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LENGTH (10) -+#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA18_R_OFFSET (0x0844) -+ -+/* PDP, GAMMA18_R, GAMMA18_R -+*/ -+#define ODN_PDP_GAMMA18_R_GAMMA18_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA18_R_GAMMA18_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA18_R_GAMMA18_R_SHIFT (0) -+#define ODN_PDP_GAMMA18_R_GAMMA18_R_LENGTH (10) -+#define ODN_PDP_GAMMA18_R_GAMMA18_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA18_GB_OFFSET (0x0848) -+ -+/* PDP, GAMMA18_GB, GAMMA18_G -+*/ -+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SHIFT (16) -+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LENGTH (10) -+#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA18_GB, GAMMA18_B -+*/ -+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SHIFT (0) -+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LENGTH (10) -+#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA19_R_OFFSET (0x084C) -+ -+/* PDP, GAMMA19_R, GAMMA19_R -+*/ -+#define ODN_PDP_GAMMA19_R_GAMMA19_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA19_R_GAMMA19_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA19_R_GAMMA19_R_SHIFT (0) -+#define ODN_PDP_GAMMA19_R_GAMMA19_R_LENGTH (10) -+#define ODN_PDP_GAMMA19_R_GAMMA19_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA19_GB_OFFSET (0x0850) -+ -+/* PDP, GAMMA19_GB, GAMMA19_G -+*/ -+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SHIFT (16) -+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LENGTH (10) -+#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA19_GB, GAMMA19_B -+*/ -+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SHIFT (0) -+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LENGTH (10) -+#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA20_R_OFFSET (0x0854) -+ -+/* PDP, GAMMA20_R, GAMMA20_R -+*/ -+#define ODN_PDP_GAMMA20_R_GAMMA20_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA20_R_GAMMA20_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA20_R_GAMMA20_R_SHIFT (0) -+#define ODN_PDP_GAMMA20_R_GAMMA20_R_LENGTH (10) -+#define ODN_PDP_GAMMA20_R_GAMMA20_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA20_GB_OFFSET (0x0858) -+ -+/* PDP, GAMMA20_GB, GAMMA20_G -+*/ -+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SHIFT (16) -+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LENGTH (10) -+#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA20_GB, GAMMA20_B -+*/ -+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SHIFT (0) -+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LENGTH (10) -+#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA21_R_OFFSET (0x085C) -+ -+/* PDP, GAMMA21_R, GAMMA21_R -+*/ -+#define ODN_PDP_GAMMA21_R_GAMMA21_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA21_R_GAMMA21_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA21_R_GAMMA21_R_SHIFT (0) -+#define ODN_PDP_GAMMA21_R_GAMMA21_R_LENGTH (10) -+#define ODN_PDP_GAMMA21_R_GAMMA21_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA21_GB_OFFSET (0x0860) -+ -+/* PDP, GAMMA21_GB, GAMMA21_G -+*/ -+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SHIFT (16) -+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LENGTH (10) -+#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA21_GB, GAMMA21_B -+*/ -+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SHIFT (0) -+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LENGTH (10) -+#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA22_R_OFFSET (0x0864) -+ -+/* PDP, GAMMA22_R, GAMMA22_R -+*/ -+#define ODN_PDP_GAMMA22_R_GAMMA22_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA22_R_GAMMA22_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA22_R_GAMMA22_R_SHIFT (0) -+#define ODN_PDP_GAMMA22_R_GAMMA22_R_LENGTH (10) -+#define ODN_PDP_GAMMA22_R_GAMMA22_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA22_GB_OFFSET (0x0868) -+ -+/* PDP, GAMMA22_GB, GAMMA22_G -+*/ -+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SHIFT (16) -+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LENGTH (10) -+#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA22_GB, GAMMA22_B -+*/ -+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SHIFT (0) -+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LENGTH (10) -+#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA23_R_OFFSET (0x086C) -+ -+/* PDP, GAMMA23_R, GAMMA23_R -+*/ -+#define ODN_PDP_GAMMA23_R_GAMMA23_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA23_R_GAMMA23_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA23_R_GAMMA23_R_SHIFT (0) -+#define ODN_PDP_GAMMA23_R_GAMMA23_R_LENGTH (10) -+#define ODN_PDP_GAMMA23_R_GAMMA23_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA23_GB_OFFSET (0x0870) -+ -+/* PDP, GAMMA23_GB, GAMMA23_G -+*/ -+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SHIFT (16) -+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LENGTH (10) -+#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA23_GB, GAMMA23_B -+*/ -+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SHIFT (0) -+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LENGTH (10) -+#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA24_R_OFFSET (0x0874) -+ -+/* PDP, GAMMA24_R, GAMMA24_R -+*/ -+#define ODN_PDP_GAMMA24_R_GAMMA24_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA24_R_GAMMA24_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA24_R_GAMMA24_R_SHIFT (0) -+#define ODN_PDP_GAMMA24_R_GAMMA24_R_LENGTH (10) -+#define ODN_PDP_GAMMA24_R_GAMMA24_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA24_GB_OFFSET (0x0878) -+ -+/* PDP, GAMMA24_GB, GAMMA24_G -+*/ -+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SHIFT (16) -+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LENGTH (10) -+#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA24_GB, GAMMA24_B -+*/ -+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SHIFT (0) -+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LENGTH (10) -+#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA25_R_OFFSET (0x087C) -+ -+/* PDP, GAMMA25_R, GAMMA25_R -+*/ -+#define ODN_PDP_GAMMA25_R_GAMMA25_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA25_R_GAMMA25_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA25_R_GAMMA25_R_SHIFT (0) -+#define ODN_PDP_GAMMA25_R_GAMMA25_R_LENGTH (10) -+#define ODN_PDP_GAMMA25_R_GAMMA25_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA25_GB_OFFSET (0x0880) -+ -+/* PDP, GAMMA25_GB, GAMMA25_G -+*/ -+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SHIFT (16) -+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LENGTH (10) -+#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA25_GB, GAMMA25_B -+*/ -+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SHIFT (0) -+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LENGTH (10) -+#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA26_R_OFFSET (0x0884) -+ -+/* PDP, GAMMA26_R, GAMMA26_R -+*/ -+#define ODN_PDP_GAMMA26_R_GAMMA26_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA26_R_GAMMA26_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA26_R_GAMMA26_R_SHIFT (0) -+#define ODN_PDP_GAMMA26_R_GAMMA26_R_LENGTH (10) -+#define ODN_PDP_GAMMA26_R_GAMMA26_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA26_GB_OFFSET (0x0888) -+ -+/* PDP, GAMMA26_GB, GAMMA26_G -+*/ -+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SHIFT (16) -+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LENGTH (10) -+#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA26_GB, GAMMA26_B -+*/ -+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SHIFT (0) -+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LENGTH (10) -+#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA27_R_OFFSET (0x088C) -+ -+/* PDP, GAMMA27_R, GAMMA27_R -+*/ -+#define ODN_PDP_GAMMA27_R_GAMMA27_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA27_R_GAMMA27_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA27_R_GAMMA27_R_SHIFT (0) -+#define ODN_PDP_GAMMA27_R_GAMMA27_R_LENGTH (10) -+#define ODN_PDP_GAMMA27_R_GAMMA27_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA27_GB_OFFSET (0x0890) -+ -+/* PDP, GAMMA27_GB, GAMMA27_G -+*/ -+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SHIFT (16) -+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LENGTH (10) -+#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA27_GB, GAMMA27_B -+*/ -+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SHIFT (0) -+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LENGTH (10) -+#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA28_R_OFFSET (0x0894) -+ -+/* PDP, GAMMA28_R, GAMMA28_R -+*/ -+#define ODN_PDP_GAMMA28_R_GAMMA28_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA28_R_GAMMA28_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA28_R_GAMMA28_R_SHIFT (0) -+#define ODN_PDP_GAMMA28_R_GAMMA28_R_LENGTH (10) -+#define ODN_PDP_GAMMA28_R_GAMMA28_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA28_GB_OFFSET (0x0898) -+ -+/* PDP, GAMMA28_GB, GAMMA28_G -+*/ -+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SHIFT (16) -+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LENGTH (10) -+#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA28_GB, GAMMA28_B -+*/ -+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SHIFT (0) -+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LENGTH (10) -+#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA29_R_OFFSET (0x089C) -+ -+/* PDP, GAMMA29_R, GAMMA29_R -+*/ -+#define ODN_PDP_GAMMA29_R_GAMMA29_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA29_R_GAMMA29_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA29_R_GAMMA29_R_SHIFT (0) -+#define ODN_PDP_GAMMA29_R_GAMMA29_R_LENGTH (10) -+#define ODN_PDP_GAMMA29_R_GAMMA29_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA29_GB_OFFSET (0x08A0) -+ -+/* PDP, GAMMA29_GB, GAMMA29_G -+*/ -+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SHIFT (16) -+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LENGTH (10) -+#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA29_GB, GAMMA29_B -+*/ -+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SHIFT (0) -+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LENGTH (10) -+#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA30_R_OFFSET (0x08A4) -+ -+/* PDP, GAMMA30_R, GAMMA30_R -+*/ -+#define ODN_PDP_GAMMA30_R_GAMMA30_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA30_R_GAMMA30_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA30_R_GAMMA30_R_SHIFT (0) -+#define ODN_PDP_GAMMA30_R_GAMMA30_R_LENGTH (10) -+#define ODN_PDP_GAMMA30_R_GAMMA30_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA30_GB_OFFSET (0x08A8) -+ -+/* PDP, GAMMA30_GB, GAMMA30_G -+*/ -+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SHIFT (16) -+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LENGTH (10) -+#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA30_GB, GAMMA30_B -+*/ -+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SHIFT (0) -+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LENGTH (10) -+#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA31_R_OFFSET (0x08AC) -+ -+/* PDP, GAMMA31_R, GAMMA31_R -+*/ -+#define ODN_PDP_GAMMA31_R_GAMMA31_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA31_R_GAMMA31_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA31_R_GAMMA31_R_SHIFT (0) -+#define ODN_PDP_GAMMA31_R_GAMMA31_R_LENGTH (10) -+#define ODN_PDP_GAMMA31_R_GAMMA31_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA31_GB_OFFSET (0x08B0) -+ -+/* PDP, GAMMA31_GB, GAMMA31_G -+*/ -+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SHIFT (16) -+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LENGTH (10) -+#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA31_GB, GAMMA31_B -+*/ -+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SHIFT (0) -+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LENGTH (10) -+#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA32_R_OFFSET (0x08B4) -+ -+/* PDP, GAMMA32_R, GAMMA32_R -+*/ -+#define ODN_PDP_GAMMA32_R_GAMMA32_R_MASK (0x000003FF) -+#define ODN_PDP_GAMMA32_R_GAMMA32_R_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA32_R_GAMMA32_R_SHIFT (0) -+#define ODN_PDP_GAMMA32_R_GAMMA32_R_LENGTH (10) -+#define ODN_PDP_GAMMA32_R_GAMMA32_R_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GAMMA32_GB_OFFSET (0x08B8) -+ -+/* PDP, GAMMA32_GB, GAMMA32_G -+*/ -+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_MASK (0x03FF0000) -+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SHIFT (16) -+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LENGTH (10) -+#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA32_GB, GAMMA32_B -+*/ -+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_MASK (0x000003FF) -+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LSBMASK (0x000003FF) -+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SHIFT (0) -+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LENGTH (10) -+#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VEVENT_OFFSET (0x08BC) -+ -+/* PDP, VEVENT, VEVENT -+*/ -+#define ODN_PDP_VEVENT_VEVENT_MASK (0x1FFF0000) -+#define ODN_PDP_VEVENT_VEVENT_LSBMASK (0x00001FFF) -+#define ODN_PDP_VEVENT_VEVENT_SHIFT (16) -+#define ODN_PDP_VEVENT_VEVENT_LENGTH (13) -+#define ODN_PDP_VEVENT_VEVENT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VEVENT, VFETCH -+*/ -+#define ODN_PDP_VEVENT_VFETCH_MASK (0x00001FFF) -+#define ODN_PDP_VEVENT_VFETCH_LSBMASK (0x00001FFF) -+#define ODN_PDP_VEVENT_VFETCH_SHIFT (0) -+#define ODN_PDP_VEVENT_VFETCH_LENGTH (13) -+#define ODN_PDP_VEVENT_VFETCH_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_HDECTRL_OFFSET (0x08C0) -+ -+/* PDP, HDECTRL, HDES -+*/ -+#define ODN_PDP_HDECTRL_HDES_MASK (0x1FFF0000) -+#define ODN_PDP_HDECTRL_HDES_LSBMASK (0x00001FFF) -+#define ODN_PDP_HDECTRL_HDES_SHIFT (16) -+#define ODN_PDP_HDECTRL_HDES_LENGTH (13) -+#define ODN_PDP_HDECTRL_HDES_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, HDECTRL, HDEF -+*/ -+#define ODN_PDP_HDECTRL_HDEF_MASK (0x00001FFF) -+#define ODN_PDP_HDECTRL_HDEF_LSBMASK (0x00001FFF) -+#define ODN_PDP_HDECTRL_HDEF_SHIFT (0) -+#define ODN_PDP_HDECTRL_HDEF_LENGTH (13) -+#define ODN_PDP_HDECTRL_HDEF_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VDECTRL_OFFSET (0x08C4) -+ -+/* PDP, VDECTRL, VDES -+*/ -+#define ODN_PDP_VDECTRL_VDES_MASK (0x1FFF0000) -+#define ODN_PDP_VDECTRL_VDES_LSBMASK (0x00001FFF) -+#define ODN_PDP_VDECTRL_VDES_SHIFT (16) -+#define ODN_PDP_VDECTRL_VDES_LENGTH (13) -+#define ODN_PDP_VDECTRL_VDES_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VDECTRL, VDEF -+*/ -+#define ODN_PDP_VDECTRL_VDEF_MASK (0x00001FFF) -+#define ODN_PDP_VDECTRL_VDEF_LSBMASK (0x00001FFF) -+#define ODN_PDP_VDECTRL_VDEF_SHIFT (0) -+#define ODN_PDP_VDECTRL_VDEF_LENGTH (13) -+#define ODN_PDP_VDECTRL_VDEF_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_OPMASK_R_OFFSET (0x08C8) -+ -+/* PDP, OPMASK_R, MASKLEVEL -+*/ -+#define ODN_PDP_OPMASK_R_MASKLEVEL_MASK (0x80000000) -+#define ODN_PDP_OPMASK_R_MASKLEVEL_LSBMASK (0x00000001) -+#define ODN_PDP_OPMASK_R_MASKLEVEL_SHIFT (31) -+#define ODN_PDP_OPMASK_R_MASKLEVEL_LENGTH (1) -+#define ODN_PDP_OPMASK_R_MASKLEVEL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, OPMASK_R, BLANKLEVEL -+*/ -+#define ODN_PDP_OPMASK_R_BLANKLEVEL_MASK (0x40000000) -+#define ODN_PDP_OPMASK_R_BLANKLEVEL_LSBMASK (0x00000001) -+#define ODN_PDP_OPMASK_R_BLANKLEVEL_SHIFT (30) -+#define ODN_PDP_OPMASK_R_BLANKLEVEL_LENGTH (1) -+#define ODN_PDP_OPMASK_R_BLANKLEVEL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, OPMASK_R, MASKR -+*/ -+#define ODN_PDP_OPMASK_R_MASKR_MASK (0x000003FF) -+#define ODN_PDP_OPMASK_R_MASKR_LSBMASK (0x000003FF) -+#define ODN_PDP_OPMASK_R_MASKR_SHIFT (0) -+#define ODN_PDP_OPMASK_R_MASKR_LENGTH (10) -+#define ODN_PDP_OPMASK_R_MASKR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_OPMASK_GB_OFFSET (0x08CC) -+ -+/* PDP, OPMASK_GB, MASKG -+*/ -+#define ODN_PDP_OPMASK_GB_MASKG_MASK (0x03FF0000) -+#define ODN_PDP_OPMASK_GB_MASKG_LSBMASK (0x000003FF) -+#define ODN_PDP_OPMASK_GB_MASKG_SHIFT (16) -+#define ODN_PDP_OPMASK_GB_MASKG_LENGTH (10) -+#define ODN_PDP_OPMASK_GB_MASKG_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, OPMASK_GB, MASKB -+*/ -+#define ODN_PDP_OPMASK_GB_MASKB_MASK (0x000003FF) -+#define ODN_PDP_OPMASK_GB_MASKB_LSBMASK (0x000003FF) -+#define ODN_PDP_OPMASK_GB_MASKB_SHIFT (0) -+#define ODN_PDP_OPMASK_GB_MASKB_LENGTH (10) -+#define ODN_PDP_OPMASK_GB_MASKB_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_REGLD_ADDR_CTRL_OFFSET (0x08D0) -+ -+/* PDP, REGLD_ADDR_CTRL, REGLD_ADDRIN -+*/ -+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_MASK (0xFFFFFFF0) -+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LSBMASK (0x0FFFFFFF) -+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SHIFT (4) -+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LENGTH (28) -+#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_REGLD_ADDR_STAT_OFFSET (0x08D4) -+ -+/* PDP, REGLD_ADDR_STAT, REGLD_ADDROUT -+*/ -+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_MASK (0xFFFFFFF0) -+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LSBMASK (0x0FFFFFFF) -+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SHIFT (4) -+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LENGTH (28) -+#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_REGLD_STAT_OFFSET (0x08D8) -+ -+/* PDP, REGLD_STAT, REGLD_ADDREN -+*/ -+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_MASK (0x00800000) -+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LSBMASK (0x00000001) -+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SHIFT (23) -+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LENGTH (1) -+#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_REGLD_CTRL_OFFSET (0x08DC) -+ -+/* PDP, REGLD_CTRL, REGLD_ADDRLEN -+*/ -+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_MASK (0xFF000000) -+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LSBMASK (0x000000FF) -+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SHIFT (24) -+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LENGTH (8) -+#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, REGLD_CTRL, REGLD_VAL -+*/ -+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_MASK (0x00800000) -+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LSBMASK (0x00000001) -+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SHIFT (23) -+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LENGTH (1) -+#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_UPDCTRL_OFFSET (0x08E0) -+ -+/* PDP, UPDCTRL, UPDFIELD -+*/ -+#define ODN_PDP_UPDCTRL_UPDFIELD_MASK (0x00000001) -+#define ODN_PDP_UPDCTRL_UPDFIELD_LSBMASK (0x00000001) -+#define ODN_PDP_UPDCTRL_UPDFIELD_SHIFT (0) -+#define ODN_PDP_UPDCTRL_UPDFIELD_LENGTH (1) -+#define ODN_PDP_UPDCTRL_UPDFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_INTCTRL_OFFSET (0x08E4) -+ -+/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINE -+*/ -+#define ODN_PDP_INTCTRL_HBLNK_LINE_MASK (0x00010000) -+#define ODN_PDP_INTCTRL_HBLNK_LINE_LSBMASK (0x00000001) -+#define ODN_PDP_INTCTRL_HBLNK_LINE_SHIFT (16) -+#define ODN_PDP_INTCTRL_HBLNK_LINE_LENGTH (1) -+#define ODN_PDP_INTCTRL_HBLNK_LINE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINENO -+*/ -+#define ODN_PDP_INTCTRL_HBLNK_LINENO_MASK (0x00001FFF) -+#define ODN_PDP_INTCTRL_HBLNK_LINENO_LSBMASK (0x00001FFF) -+#define ODN_PDP_INTCTRL_HBLNK_LINENO_SHIFT (0) -+#define ODN_PDP_INTCTRL_HBLNK_LINENO_LENGTH (13) -+#define ODN_PDP_INTCTRL_HBLNK_LINENO_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PDISETUP_OFFSET (0x0900) -+ -+/* PDP, PDISETUP, PDI_BLNKLVL -+*/ -+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_MASK (0x00000040) -+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LSBMASK (0x00000001) -+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SHIFT (6) -+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LENGTH (1) -+#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDISETUP, PDI_BLNK -+*/ -+#define ODN_PDP_PDISETUP_PDI_BLNK_MASK (0x00000020) -+#define ODN_PDP_PDISETUP_PDI_BLNK_LSBMASK (0x00000001) -+#define ODN_PDP_PDISETUP_PDI_BLNK_SHIFT (5) -+#define ODN_PDP_PDISETUP_PDI_BLNK_LENGTH (1) -+#define ODN_PDP_PDISETUP_PDI_BLNK_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDISETUP, PDI_PWR -+*/ -+#define ODN_PDP_PDISETUP_PDI_PWR_MASK (0x00000010) -+#define ODN_PDP_PDISETUP_PDI_PWR_LSBMASK (0x00000001) -+#define ODN_PDP_PDISETUP_PDI_PWR_SHIFT (4) -+#define ODN_PDP_PDISETUP_PDI_PWR_LENGTH (1) -+#define ODN_PDP_PDISETUP_PDI_PWR_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDISETUP, PDI_EN -+*/ -+#define ODN_PDP_PDISETUP_PDI_EN_MASK (0x00000008) -+#define ODN_PDP_PDISETUP_PDI_EN_LSBMASK (0x00000001) -+#define ODN_PDP_PDISETUP_PDI_EN_SHIFT (3) -+#define ODN_PDP_PDISETUP_PDI_EN_LENGTH (1) -+#define ODN_PDP_PDISETUP_PDI_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDISETUP, PDI_GDEN -+*/ -+#define ODN_PDP_PDISETUP_PDI_GDEN_MASK (0x00000004) -+#define ODN_PDP_PDISETUP_PDI_GDEN_LSBMASK (0x00000001) -+#define ODN_PDP_PDISETUP_PDI_GDEN_SHIFT (2) -+#define ODN_PDP_PDISETUP_PDI_GDEN_LENGTH (1) -+#define ODN_PDP_PDISETUP_PDI_GDEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDISETUP, PDI_NFEN -+*/ -+#define ODN_PDP_PDISETUP_PDI_NFEN_MASK (0x00000002) -+#define ODN_PDP_PDISETUP_PDI_NFEN_LSBMASK (0x00000001) -+#define ODN_PDP_PDISETUP_PDI_NFEN_SHIFT (1) -+#define ODN_PDP_PDISETUP_PDI_NFEN_LENGTH (1) -+#define ODN_PDP_PDISETUP_PDI_NFEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDISETUP, PDI_CR -+*/ -+#define ODN_PDP_PDISETUP_PDI_CR_MASK (0x00000001) -+#define ODN_PDP_PDISETUP_PDI_CR_LSBMASK (0x00000001) -+#define ODN_PDP_PDISETUP_PDI_CR_SHIFT (0) -+#define ODN_PDP_PDISETUP_PDI_CR_LENGTH (1) -+#define ODN_PDP_PDISETUP_PDI_CR_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PDITIMING0_OFFSET (0x0904) -+ -+/* PDP, PDITIMING0, PDI_PWRSVGD -+*/ -+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_MASK (0x0F000000) -+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LSBMASK (0x0000000F) -+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SHIFT (24) -+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LENGTH (4) -+#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDITIMING0, PDI_LSDEL -+*/ -+#define ODN_PDP_PDITIMING0_PDI_LSDEL_MASK (0x007F0000) -+#define ODN_PDP_PDITIMING0_PDI_LSDEL_LSBMASK (0x0000007F) -+#define ODN_PDP_PDITIMING0_PDI_LSDEL_SHIFT (16) -+#define ODN_PDP_PDITIMING0_PDI_LSDEL_LENGTH (7) -+#define ODN_PDP_PDITIMING0_PDI_LSDEL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDITIMING0, PDI_PWRSV2GD2 -+*/ -+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_MASK (0x000003FF) -+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LSBMASK (0x000003FF) -+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SHIFT (0) -+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LENGTH (10) -+#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PDITIMING1_OFFSET (0x0908) -+ -+/* PDP, PDITIMING1, PDI_NLDEL -+*/ -+#define ODN_PDP_PDITIMING1_PDI_NLDEL_MASK (0x000F0000) -+#define ODN_PDP_PDITIMING1_PDI_NLDEL_LSBMASK (0x0000000F) -+#define ODN_PDP_PDITIMING1_PDI_NLDEL_SHIFT (16) -+#define ODN_PDP_PDITIMING1_PDI_NLDEL_LENGTH (4) -+#define ODN_PDP_PDITIMING1_PDI_NLDEL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDITIMING1, PDI_ACBDEL -+*/ -+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_MASK (0x000003FF) -+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LSBMASK (0x000003FF) -+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SHIFT (0) -+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LENGTH (10) -+#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PDICOREID_OFFSET (0x090C) -+ -+/* PDP, PDICOREID, PDI_GROUP_ID -+*/ -+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_MASK (0xFF000000) -+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LSBMASK (0x000000FF) -+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SHIFT (24) -+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LENGTH (8) -+#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDICOREID, PDI_CORE_ID -+*/ -+#define ODN_PDP_PDICOREID_PDI_CORE_ID_MASK (0x00FF0000) -+#define ODN_PDP_PDICOREID_PDI_CORE_ID_LSBMASK (0x000000FF) -+#define ODN_PDP_PDICOREID_PDI_CORE_ID_SHIFT (16) -+#define ODN_PDP_PDICOREID_PDI_CORE_ID_LENGTH (8) -+#define ODN_PDP_PDICOREID_PDI_CORE_ID_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDICOREID, PDI_CONFIG_ID -+*/ -+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_MASK (0x0000FFFF) -+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LSBMASK (0x0000FFFF) -+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SHIFT (0) -+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LENGTH (16) -+#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_PDICOREREV_OFFSET (0x0910) -+ -+/* PDP, PDICOREREV, PDI_MAJOR_REV -+*/ -+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_MASK (0x00FF0000) -+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LSBMASK (0x000000FF) -+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SHIFT (16) -+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LENGTH (8) -+#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDICOREREV, PDI_MINOR_REV -+*/ -+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_MASK (0x0000FF00) -+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LSBMASK (0x000000FF) -+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SHIFT (8) -+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LENGTH (8) -+#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDICOREREV, PDI_MAINT_REV -+*/ -+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_MASK (0x000000FF) -+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LSBMASK (0x000000FF) -+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SHIFT (0) -+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LENGTH (8) -+#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX2_OFFSET (0x0920) -+ -+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_MASK (0x000000C0) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LSBMASK (0x00000003) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LENGTH (2) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_MASK (0x00000030) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LSBMASK (0x00000003) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SHIFT (4) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LENGTH (2) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_MASK (0x0000000C) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LSBMASK (0x00000003) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SHIFT (2) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LENGTH (2) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_MASK (0x00000003) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LSBMASK (0x00000003) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LENGTH (2) -+#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX4_0_OFFSET (0x0924) -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_MASK (0xF0000000) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SHIFT (28) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_MASK (0x0F000000) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_MASK (0x00F00000) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SHIFT (20) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_MASK (0x000F0000) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SHIFT (16) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_MASK (0x0000F000) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_MASK (0x00000F00) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SHIFT (8) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_MASK (0x000000F0) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SHIFT (4) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_MASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX4_1_OFFSET (0x0928) -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y3 -+*/ -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_MASK (0xF0000000) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SHIFT (28) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y3 -+*/ -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_MASK (0x0F000000) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y3 -+*/ -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_MASK (0x00F00000) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SHIFT (20) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y3 -+*/ -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_MASK (0x000F0000) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SHIFT (16) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y2 -+*/ -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_MASK (0x0000F000) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y2 -+*/ -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_MASK (0x00000F00) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SHIFT (8) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y2 -+*/ -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_MASK (0x000000F0) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SHIFT (4) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y2 -+*/ -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_MASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LSBMASK (0x0000000F) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LENGTH (4) -+#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_0_OFFSET (0x092C) -+ -+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X4Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_MASK (0x3F000000) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X3Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X2Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X1Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X0Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_1_OFFSET (0x0930) -+ -+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X1Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_MASK (0x3F000000) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X0Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X7Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X6Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X5Y0 -+*/ -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_2_OFFSET (0x0934) -+ -+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X6Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_MASK (0x3F000000) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X5Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X4Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X3Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X2Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_3_OFFSET (0x0938) -+ -+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X3Y2 -+*/ -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_MASK (0x3F000000) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X2Y2 -+*/ -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X1Y2 -+*/ -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X0Y2 -+*/ -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X7Y1 -+*/ -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_4_OFFSET (0x093C) -+ -+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X0Y3 -+*/ -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_MASK (0x3F000000) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X7Y2 -+*/ -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X6Y2 -+*/ -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X5Y2 -+*/ -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X4Y2 -+*/ -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_5_OFFSET (0x0940) -+ -+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X5Y3 -+*/ -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_MASK (0x3F000000) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X4Y3 -+*/ -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X3Y3 -+*/ -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X2Y3 -+*/ -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X1Y3 -+*/ -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_6_OFFSET (0x0944) -+ -+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X2Y4 -+*/ -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_MASK (0x3F000000) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X1Y4 -+*/ -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X0Y4 -+*/ -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X7Y3 -+*/ -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X6Y3 -+*/ -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_7_OFFSET (0x0948) -+ -+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X7Y4 -+*/ -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_MASK (0x3F000000) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X6Y4 -+*/ -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X5Y4 -+*/ -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X4Y4 -+*/ -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X3Y4 -+*/ -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_8_OFFSET (0x094C) -+ -+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X4Y5 -+*/ -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_MASK (0x3F000000) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X3Y5 -+*/ -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X2Y5 -+*/ -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X1Y5 -+*/ -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X0Y5 -+*/ -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_9_OFFSET (0x0950) -+ -+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X1Y6 -+*/ -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_MASK (0x3F000000) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X0Y6 -+*/ -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X7Y5 -+*/ -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X6Y5 -+*/ -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X5Y5 -+*/ -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_10_OFFSET (0x0954) -+ -+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X6Y6 -+*/ -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_MASK (0x3F000000) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X5Y6 -+*/ -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X4Y6 -+*/ -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X3Y6 -+*/ -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X2Y6 -+*/ -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_11_OFFSET (0x0958) -+ -+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X3Y7 -+*/ -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_MASK (0x3F000000) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SHIFT (24) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X2Y7 -+*/ -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X1Y7 -+*/ -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X0Y7 -+*/ -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X7Y6 -+*/ -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_DITHERMATRIX8_12_OFFSET (0x095C) -+ -+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X7Y7 -+*/ -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_MASK (0x00FC0000) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SHIFT (18) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X6Y7 -+*/ -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_MASK (0x0003F000) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SHIFT (12) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X5Y7 -+*/ -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_MASK (0x00000FC0) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SHIFT (6) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X4Y7 -+*/ -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_MASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LSBMASK (0x0000003F) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SHIFT (0) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LENGTH (6) -+#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1_MEMCTRL_OFFSET (0x0960) -+ -+/* PDP, GRPH1_MEMCTRL, GRPH1_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_MEMCTRL, GRPH1_BURSTLEN -+*/ -+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_MASK (0x000000FF) -+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SHIFT (0) -+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LENGTH (8) -+#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1_MEM_THRESH_OFFSET (0x0964) -+ -+/* PDP, GRPH1_MEM_THRESH, GRPH1_UVTHRESHOLD -+*/ -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_MASK (0xFF000000) -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SHIFT (24) -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LENGTH (8) -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_MEM_THRESH, GRPH1_YTHRESHOLD -+*/ -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_MASK (0x001FF000) -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SHIFT (12) -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LENGTH (9) -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_MEM_THRESH, GRPH1_THRESHOLD -+*/ -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_MASK (0x000001FF) -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SHIFT (0) -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LENGTH (9) -+#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2_MEMCTRL_OFFSET (0x0968) -+ -+/* PDP, GRPH2_MEMCTRL, GRPH2_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_MEMCTRL, GRPH2_BURSTLEN -+*/ -+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_MASK (0x000000FF) -+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SHIFT (0) -+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LENGTH (8) -+#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2_MEM_THRESH_OFFSET (0x096C) -+ -+/* PDP, GRPH2_MEM_THRESH, GRPH2_UVTHRESHOLD -+*/ -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_MASK (0xFF000000) -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SHIFT (24) -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LENGTH (8) -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_MEM_THRESH, GRPH2_YTHRESHOLD -+*/ -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_MASK (0x001FF000) -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SHIFT (12) -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LENGTH (9) -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_MEM_THRESH, GRPH2_THRESHOLD -+*/ -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_MASK (0x000001FF) -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SHIFT (0) -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LENGTH (9) -+#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3_MEMCTRL_OFFSET (0x0970) -+ -+/* PDP, GRPH3_MEMCTRL, GRPH3_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_MEMCTRL, GRPH3_BURSTLEN -+*/ -+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_MASK (0x000000FF) -+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SHIFT (0) -+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LENGTH (8) -+#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3_MEM_THRESH_OFFSET (0x0974) -+ -+/* PDP, GRPH3_MEM_THRESH, GRPH3_UVTHRESHOLD -+*/ -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_MASK (0xFF000000) -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SHIFT (24) -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LENGTH (8) -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_MEM_THRESH, GRPH3_YTHRESHOLD -+*/ -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_MASK (0x001FF000) -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SHIFT (12) -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LENGTH (9) -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_MEM_THRESH, GRPH3_THRESHOLD -+*/ -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_MASK (0x000001FF) -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SHIFT (0) -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LENGTH (9) -+#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4_MEMCTRL_OFFSET (0x0978) -+ -+/* PDP, GRPH4_MEMCTRL, GRPH4_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_MEMCTRL, GRPH4_BURSTLEN -+*/ -+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_MASK (0x000000FF) -+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SHIFT (0) -+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LENGTH (8) -+#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4_MEM_THRESH_OFFSET (0x097C) -+ -+/* PDP, GRPH4_MEM_THRESH, GRPH4_UVTHRESHOLD -+*/ -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_MASK (0xFF000000) -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SHIFT (24) -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LENGTH (8) -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_MEM_THRESH, GRPH4_YTHRESHOLD -+*/ -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_MASK (0x001FF000) -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SHIFT (12) -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LENGTH (9) -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_MEM_THRESH, GRPH4_THRESHOLD -+*/ -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_MASK (0x000001FF) -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SHIFT (0) -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LENGTH (9) -+#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1_MEMCTRL_OFFSET (0x0980) -+ -+/* PDP, VID1_MEMCTRL, VID1_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_MEMCTRL, VID1_BURSTLEN -+*/ -+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_MASK (0x000000FF) -+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LSBMASK (0x000000FF) -+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SHIFT (0) -+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LENGTH (8) -+#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1_MEM_THRESH_OFFSET (0x0984) -+ -+/* PDP, VID1_MEM_THRESH, VID1_UVTHRESHOLD -+*/ -+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_MASK (0xFF000000) -+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SHIFT (24) -+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LENGTH (8) -+#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_MEM_THRESH, VID1_YTHRESHOLD -+*/ -+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_MASK (0x001FF000) -+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SHIFT (12) -+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LENGTH (9) -+#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_MEM_THRESH, VID1_THRESHOLD -+*/ -+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_MASK (0x000001FF) -+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SHIFT (0) -+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LENGTH (9) -+#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2_MEMCTRL_OFFSET (0x0988) -+ -+/* PDP, VID2_MEMCTRL, VID2_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_MEMCTRL, VID2_BURSTLEN -+*/ -+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_MASK (0x000000FF) -+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LSBMASK (0x000000FF) -+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SHIFT (0) -+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LENGTH (8) -+#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2_MEM_THRESH_OFFSET (0x098C) -+ -+/* PDP, VID2_MEM_THRESH, VID2_UVTHRESHOLD -+*/ -+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_MASK (0xFF000000) -+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SHIFT (24) -+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LENGTH (8) -+#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_MEM_THRESH, VID2_YTHRESHOLD -+*/ -+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_MASK (0x001FF000) -+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SHIFT (12) -+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LENGTH (9) -+#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_MEM_THRESH, VID2_THRESHOLD -+*/ -+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_MASK (0x000001FF) -+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SHIFT (0) -+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LENGTH (9) -+#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3_MEMCTRL_OFFSET (0x0990) -+ -+/* PDP, VID3_MEMCTRL, VID3_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_MEMCTRL, VID3_BURSTLEN -+*/ -+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_MASK (0x000000FF) -+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LSBMASK (0x000000FF) -+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SHIFT (0) -+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LENGTH (8) -+#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3_MEM_THRESH_OFFSET (0x0994) -+ -+/* PDP, VID3_MEM_THRESH, VID3_UVTHRESHOLD -+*/ -+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_MASK (0xFF000000) -+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SHIFT (24) -+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LENGTH (8) -+#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_MEM_THRESH, VID3_YTHRESHOLD -+*/ -+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_MASK (0x001FF000) -+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SHIFT (12) -+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LENGTH (9) -+#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_MEM_THRESH, VID3_THRESHOLD -+*/ -+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_MASK (0x000001FF) -+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SHIFT (0) -+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LENGTH (9) -+#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4_MEMCTRL_OFFSET (0x0998) -+ -+/* PDP, VID4_MEMCTRL, VID4_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_MEMCTRL, VID4_BURSTLEN -+*/ -+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_MASK (0x000000FF) -+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LSBMASK (0x000000FF) -+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SHIFT (0) -+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LENGTH (8) -+#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4_MEM_THRESH_OFFSET (0x099C) -+ -+/* PDP, VID4_MEM_THRESH, VID4_UVTHRESHOLD -+*/ -+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_MASK (0xFF000000) -+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SHIFT (24) -+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LENGTH (8) -+#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_MEM_THRESH, VID4_YTHRESHOLD -+*/ -+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_MASK (0x001FF000) -+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SHIFT (12) -+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LENGTH (9) -+#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_MEM_THRESH, VID4_THRESHOLD -+*/ -+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_MASK (0x000001FF) -+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LSBMASK (0x000001FF) -+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SHIFT (0) -+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LENGTH (9) -+#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH1_PANIC_THRESH_OFFSET (0x09A0) -+ -+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_ENABLE -+*/ -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_MASK (0x80000000) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SHIFT (31) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LENGTH (1) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_ENABLE -+*/ -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_MASK (0x40000000) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SHIFT (30) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LENGTH (1) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MAX -+*/ -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MIN -+*/ -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MAX -+*/ -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MIN -+*/ -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH2_PANIC_THRESH_OFFSET (0x09A4) -+ -+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_ENABLE -+*/ -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_MASK (0x80000000) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SHIFT (31) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LENGTH (1) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_ENABLE -+*/ -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_MASK (0x40000000) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SHIFT (30) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LENGTH (1) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MAX -+*/ -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MIN -+*/ -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MAX -+*/ -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MIN -+*/ -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH3_PANIC_THRESH_OFFSET (0x09A8) -+ -+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_ENABLE -+*/ -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_MASK (0x80000000) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SHIFT (31) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LENGTH (1) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_ENABLE -+*/ -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_MASK (0x40000000) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SHIFT (30) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LENGTH (1) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MAX -+*/ -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MIN -+*/ -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MAX -+*/ -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MIN -+*/ -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_GRPH4_PANIC_THRESH_OFFSET (0x09AC) -+ -+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_ENABLE -+*/ -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_MASK (0x80000000) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SHIFT (31) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LENGTH (1) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_ENABLE -+*/ -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_MASK (0x40000000) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SHIFT (30) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LENGTH (1) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MAX -+*/ -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MIN -+*/ -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MAX -+*/ -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MIN -+*/ -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID1_PANIC_THRESH_OFFSET (0x09B0) -+ -+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_ENABLE -+*/ -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_MASK (0x80000000) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SHIFT (31) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LENGTH (1) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_ENABLE -+*/ -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_MASK (0x40000000) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SHIFT (30) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LENGTH (1) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MAX -+*/ -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MIN -+*/ -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MAX -+*/ -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MIN -+*/ -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID2_PANIC_THRESH_OFFSET (0x09B4) -+ -+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_ENABLE -+*/ -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_MASK (0x80000000) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SHIFT (31) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LENGTH (1) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_ENABLE -+*/ -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_MASK (0x40000000) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SHIFT (30) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LENGTH (1) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MAX -+*/ -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MIN -+*/ -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MAX -+*/ -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MIN -+*/ -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID3_PANIC_THRESH_OFFSET (0x09B8) -+ -+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_ENABLE -+*/ -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_MASK (0x80000000) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SHIFT (31) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LENGTH (1) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_ENABLE -+*/ -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_MASK (0x40000000) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SHIFT (30) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LENGTH (1) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MAX -+*/ -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MIN -+*/ -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MAX -+*/ -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MIN -+*/ -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_VID4_PANIC_THRESH_OFFSET (0x09BC) -+ -+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_ENABLE -+*/ -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_MASK (0x80000000) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SHIFT (31) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LENGTH (1) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_ENABLE -+*/ -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_MASK (0x40000000) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SHIFT (30) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LENGTH (1) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MAX -+*/ -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MIN -+*/ -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MAX -+*/ -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MIN -+*/ -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define ODN_PDP_BURST_BOUNDARY_OFFSET (0x09C0) -+ -+/* PDP, BURST_BOUNDARY, BURST_BOUNDARY -+*/ -+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_MASK (0x0000003F) -+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LSBMASK (0x0000003F) -+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SHIFT (0) -+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LENGTH (6) -+#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SIGNED_FIELD IMG_FALSE -+ -+ -+/* ---------------------- End of register definitions ---------------------- */ -+ -+/* NUMREG defines the extent of register address space. -+*/ -+ -+#define ODN_PDP_NUMREG ((0x09C0 >> 2)+1) -+ -+/* Info about video plane addresses */ -+#define ODN_PDP_YADDR_BITS (ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH) -+#define ODN_PDP_YADDR_ALIGN 5 -+#define ODN_PDP_UADDR_BITS (ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH) -+#define ODN_PDP_UADDR_ALIGN 5 -+#define ODN_PDP_VADDR_BITS (ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH) -+#define ODN_PDP_VADDR_ALIGN 5 -+ -+#define ODN_PDP_YSTRIDE_BITS (ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH) -+#define ODN_PDP_YSTRIDE_ALIGN 5 -+ -+#define ODN_PDP_MAX_INPUT_WIDTH (ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK + 1) -+#define ODN_PDP_MAX_INPUT_HEIGHT (ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK + 1) -+ -+/* Maximum 6 bytes per pixel for RGB161616 */ -+#define ODN_PDP_MAX_IMAGE_BYTES (ODN_PDP_MAX_INPUT_WIDTH * ODN_PDP_MAX_INPUT_HEIGHT * 6) -+ -+/* Round up */ -+#define ODN_PDP_MAX_IMAGE_PAGES ((ODN_PDP_MAX_IMAGE_BYTES+PAGE_SIZE-1)/PAGE_SIZE) -+ -+#define ODN_PDP_YADDR_MAX (((1 << ODN_PDP_YADDR_BITS) - 1) << ODN_PDP_YADDR_ALIGN) -+#define ODN_PDP_UADDR_MAX (((1 << ODN_PDP_UADDR_BITS) - 1) << ODN_PDP_UADDR_ALIGN) -+#define ODN_PDP_VADDR_MAX (((1 << ODN_PDP_VADDR_BITS) - 1) << ODN_PDP_VADDR_ALIGN) -+#define ODN_PDP_YSTRIDE_MAX ((1 << ODN_PDP_YSTRIDE_BITS) << ODN_PDP_YSTRIDE_ALIGN) -+#define ODN_PDP_YADDR_ALIGNMASK ((1 << ODN_PDP_YADDR_ALIGN) - 1) -+#define ODN_PDP_UADDR_ALIGNMASK ((1 << ODN_PDP_UADDR_ALIGN) - 1) -+#define ODN_PDP_VADDR_ALIGNMASK ((1 << ODN_PDP_VADDR_ALIGN) - 1) -+#define ODN_PDP_YSTRIDE_ALIGNMASK ((1 << ODN_PDP_YSTRIDE_ALIGN) - 1) -+ -+/* Field Values (some are reserved for future use) */ -+#define ODN_PDP_SURF_PIXFMT_RGB332 0x3 -+#define ODN_PDP_SURF_PIXFMT_ARGB4444 0x4 -+#define ODN_PDP_SURF_PIXFMT_ARGB1555 0x5 -+#define ODN_PDP_SURF_PIXFMT_RGB888 0x6 -+#define ODN_PDP_SURF_PIXFMT_RGB565 0x7 -+#define ODN_PDP_SURF_PIXFMT_ARGB8888 0x8 -+#define ODN_PDP_SURF_PIXFMT_420_PL8 0x9 -+#define ODN_PDP_SURF_PIXFMT_420_PL8IVU 0xA -+#define ODN_PDP_SURF_PIXFMT_420_PL8IUV 0xB -+#define ODN_PDP_SURF_PIXFMT_422_UY0VY1_8888 0xC -+#define ODN_PDP_SURF_PIXFMT_422_VY0UY1_8888 0xD -+#define ODN_PDP_SURF_PIXFMT_422_Y0UY1V_8888 0xE -+#define ODN_PDP_SURF_PIXFMT_422_Y0VY1U_8888 0xF -+#define ODN_PDP_SURF_PIXFMT_AYUV8888 0x10 -+#define ODN_PDP_SURF_PIXFMT_YUV101010 0x15 -+#define ODN_PDP_SURF_PIXFMT_RGB101010 0x17 -+#define ODN_PDP_SURF_PIXFMT_420_PL10IUV 0x18 -+#define ODN_PDP_SURF_PIXFMT_420_PL10IVU 0x19 -+#define ODN_PDP_SURF_PIXFMT_422_PL10IUV 0x1A -+#define ODN_PDP_SURF_PIXFMT_422_PL10IVU 0x1B -+#define ODN_PDP_SURF_PIXFMT_RGB121212 0x1E -+#define ODN_PDP_SURF_PIXFMT_RGB161616 0x1F -+ -+#define ODN_PDP_CTRL_CKEYSRC_PREV 0x0 -+#define ODN_PDP_CTRL_CKEYSRC_CUR 0x1 -+ -+#define ODN_PDP_MEMCTRL_MEMREFRESH_ALWAYS 0x0 -+#define ODN_PDP_MEMCTRL_MEMREFRESH_HBLNK 0x1 -+#define ODN_PDP_MEMCTRL_MEMREFRESH_VBLNK 0x2 -+#define ODN_PDP_MEMCTRL_MEMREFRESH_BOTH 0x3 -+ -+#define ODN_PDP_3D_CTRL_BLENDSEL_BGND_WITH_POS0 0x0 -+#define ODN_PDP_3D_CTRL_BLENDSEL_POS0_WITH_POS1 0x1 -+#define ODN_PDP_3D_CTRL_BLENDSEL_POS1_WITH_POS2 0x2 -+#define ODN_PDP_3D_CTRL_BLENDSEL_POS2_WITH_POS3 0x3 -+#define ODN_PDP_3D_CTRL_BLENDSEL_POS3_WITH_POS4 0x4 -+#define ODN_PDP_3D_CTRL_BLENDSEL_POS4_WITH_POS5 0x5 -+#define ODN_PDP_3D_CTRL_BLENDSEL_POS5_WITH_POS6 0x6 -+#define ODN_PDP_3D_CTRL_BLENDSEL_POS6_WITH_POS7 0x7 -+ -+#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_Y_STRIDE 0x0 -+#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_DOUBLE_Y_STRIDE 0x1 -+#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_HALF_Y_STRIDE 0x2 -+ -+#define ODN_PDP_PROCAMP_OUTPUT_OFFSET_FRACTIONAL_BITS 1 -+#define ODN_PDP_PROCAMP_COEFFICIENT_FRACTIONAL_BITS 10 -+ -+/*---------------------------------------------------------------------------*/ -+ -+#endif /* ODN_PDP_REGS_H */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/odin_regs.h b/drivers/gpu/drm/img-rogue/apollo/odin_regs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/odin_regs.h -@@ -0,0 +1,1026 @@ -+/****************************************************************************** -+@Title Odin system control register definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Odin FPGA register defs for IMG 3rd generation TCF -+ -+ Auto generated headers, eg. odn_core.h: -+ regconv -d . -a 8 odn_core.def -+ -+ Source files : -+ odn_core.def -+ mca_debug.def -+ sai_rx_debug.def -+ sai_tx_debug.def -+ ad_tx.def -+ -+ Changes: -+ Removed obsolete copyright dates -+ Changed lower case to upper case -+ (eg. odn_core changed to ODN_CORE) -+ Changed PVR5__ to ODN_ -+ Merged multiple .def files into one header -+ -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+******************************************************************************/ -+ -+/* tab size 4 */ -+ -+#ifndef _ODIN_REGS_H_ -+#define _ODIN_REGS_H_ -+ -+/****************************** -+ Generated from: odn_core.def -+*******************************/ -+ -+/* -+ Register ID -+*/ -+#define ODN_CORE_ID 0x0000 -+#define ODN_ID_VARIANT_MASK 0x0000FFFFU -+#define ODN_ID_VARIANT_SHIFT 0 -+#define ODN_ID_VARIANT_SIGNED 0 -+ -+#define ODN_ID_ID_MASK 0xFFFF0000U -+#define ODN_ID_ID_SHIFT 16 -+#define ODN_ID_ID_SIGNED 0 -+ -+/* -+ Register REL -+*/ -+#define ODN_CORE_REL 0x0004 -+#define ODN_REL_MINOR_MASK 0x0000FFFFU -+#define ODN_REL_MINOR_SHIFT 0 -+#define ODN_REL_MINOR_SIGNED 0 -+ -+#define ODN_REL_MAJOR_MASK 0xFFFF0000U -+#define ODN_REL_MAJOR_SHIFT 16 -+#define ODN_REL_MAJOR_SIGNED 0 -+ -+/* -+ Register CHANGE_SET -+*/ -+#define ODN_CORE_CHANGE_SET 0x0008 -+#define ODN_CHANGE_SET_SET_MASK 0xFFFFFFFFU -+#define ODN_CHANGE_SET_SET_SHIFT 0 -+#define ODN_CHANGE_SET_SET_SIGNED 0 -+ -+/* -+ Register USER_ID -+*/ -+#define ODN_CORE_USER_ID 0x000C -+#define ODN_USER_ID_ID_MASK 0x000000FFU -+#define ODN_USER_ID_ID_SHIFT 0 -+#define ODN_USER_ID_ID_SIGNED 0 -+ -+/* -+ Register USER_BUILD -+*/ -+#define ODN_CORE_USER_BUILD 0x0010 -+#define ODN_USER_BUILD_BUILD_MASK 0xFFFFFFFFU -+#define ODN_USER_BUILD_BUILD_SHIFT 0 -+#define ODN_USER_BUILD_BUILD_SIGNED 0 -+ -+/* -+ Register SW_IF_VERSION -+*/ -+#define ODN_CORE_SW_IF_VERSION 0x0014 -+#define ODN_SW_IF_VERSION_VERSION_MASK 0x0000FFFFU -+#define ODN_SW_IF_VERSION_VERSION_SHIFT 0 -+#define ODN_SW_IF_VERSION_VERSION_SIGNED 0 -+ -+/* -+ Register INTERNAL_RESETN -+*/ -+#define ODN_CORE_INTERNAL_RESETN 0x0080 -+#define ODN_INTERNAL_RESETN_DDR_MASK 0x00000001U -+#define ODN_INTERNAL_RESETN_DDR_SHIFT 0 -+#define ODN_INTERNAL_RESETN_DDR_SIGNED 0 -+ -+#define ODN_INTERNAL_RESETN_MIG0_MASK 0x00000002U -+#define ODN_INTERNAL_RESETN_MIG0_SHIFT 1 -+#define ODN_INTERNAL_RESETN_MIG0_SIGNED 0 -+ -+#define ODN_INTERNAL_RESETN_MIG1_MASK 0x00000004U -+#define ODN_INTERNAL_RESETN_MIG1_SHIFT 2 -+#define ODN_INTERNAL_RESETN_MIG1_SIGNED 0 -+ -+#define ODN_INTERNAL_RESETN_PDP1_MASK 0x00000008U -+#define ODN_INTERNAL_RESETN_PDP1_SHIFT 3 -+#define ODN_INTERNAL_RESETN_PDP1_SIGNED 0 -+ -+#define ODN_INTERNAL_RESETN_PDP2_MASK 0x00000010U -+#define ODN_INTERNAL_RESETN_PDP2_SHIFT 4 -+#define ODN_INTERNAL_RESETN_PDP2_SIGNED 0 -+ -+#define ODN_INTERNAL_RESETN_PERIP_MASK 0x00000020U -+#define ODN_INTERNAL_RESETN_PERIP_SHIFT 5 -+#define ODN_INTERNAL_RESETN_PERIP_SIGNED 0 -+ -+#define ODN_INTERNAL_RESETN_GIST_MASK 0x00000040U -+#define ODN_INTERNAL_RESETN_GIST_SHIFT 6 -+#define ODN_INTERNAL_RESETN_GIST_SIGNED 0 -+ -+#define ODN_INTERNAL_RESETN_PIKE_MASK 0x00000080U -+#define ODN_INTERNAL_RESETN_PIKE_SHIFT 7 -+#define ODN_INTERNAL_RESETN_PIKE_SIGNED 0 -+ -+/* -+ Register EXTERNAL_RESETN -+*/ -+#define ODN_CORE_EXTERNAL_RESETN 0x0084 -+#define ODN_EXTERNAL_RESETN_DUT_MASK 0x00000001U -+#define ODN_EXTERNAL_RESETN_DUT_SHIFT 0 -+#define ODN_EXTERNAL_RESETN_DUT_SIGNED 0 -+ -+#define ODN_EXTERNAL_RESETN_DUT_SPI_MASK 0x00000002U -+#define ODN_EXTERNAL_RESETN_DUT_SPI_SHIFT 1 -+#define ODN_EXTERNAL_RESETN_DUT_SPI_SIGNED 0 -+ -+#define ODN_EXTERNAL_RESETN_DUT_PEP_DDR_MASK 0x00000004U -+#define ODN_EXTERNAL_RESETN_DUT_PEP_DDR_SHIFT 2 -+#define ODN_EXTERNAL_RESETN_DUT_PEP_DDR_SIGNED 0 -+ -+#define ODN_EXTERNAL_RESETN_DUT_IF_MASK 0x00000008U -+#define ODN_EXTERNAL_RESETN_DUT_IF_SHIFT 3 -+#define ODN_EXTERNAL_RESETN_DUT_IF_SIGNED 0 -+ -+#define ODN_EXTERNAL_RESETN_DUT1_MASK 0x00000010U -+#define ODN_EXTERNAL_RESETN_DUT1_SHIFT 4 -+#define ODN_EXTERNAL_RESETN_DUT1_SIGNED 0 -+ -+#define ODN_EXTERNAL_RESETN_DUT2_MASK 0x00000020U -+#define ODN_EXTERNAL_RESETN_DUT2_SHIFT 5 -+#define ODN_EXTERNAL_RESETN_DUT2_SIGNED 0 -+ -+/* -+ Register EXTERNAL_RESET -+*/ -+#define ODN_CORE_EXTERNAL_RESET 0x0088 -+#define ODN_EXTERNAL_RESET_PVT_CAL_MASK 0x00000001U -+#define ODN_EXTERNAL_RESET_PVT_CAL_SHIFT 0 -+#define ODN_EXTERNAL_RESET_PVT_CAL_SIGNED 0 -+ -+#define ODN_EXTERNAL_RESET_PLL_MASK 0x00000002U -+#define ODN_EXTERNAL_RESET_PLL_SHIFT 1 -+#define ODN_EXTERNAL_RESET_PLL_SIGNED 0 -+ -+/* -+ Register INTERNAL_AUTO_RESETN -+*/ -+#define ODN_CORE_INTERNAL_AUTO_RESETN 0x008C -+#define ODN_INTERNAL_AUTO_RESETN_AUX_MASK 0x00000001U -+#define ODN_INTERNAL_AUTO_RESETN_AUX_SHIFT 0 -+#define ODN_INTERNAL_AUTO_RESETN_AUX_SIGNED 0 -+ -+/* -+ Register CLK_GEN_RESET -+*/ -+#define ODN_CORE_CLK_GEN_RESET 0x0090 -+#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_MASK 0x00000001U -+#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SHIFT 0 -+#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SIGNED 0 -+ -+#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK 0x00000002U -+#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SHIFT 1 -+#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SIGNED 0 -+ -+#define ODN_CLK_GEN_RESET_MULTI_MMCM_MASK 0x00000004U -+#define ODN_CLK_GEN_RESET_MULTI_MMCM_SHIFT 2 -+#define ODN_CLK_GEN_RESET_MULTI_MMCM_SIGNED 0 -+ -+#define ODN_CLK_GEN_RESET_PDP_MMCM_MASK 0x00000008U -+#define ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT 3 -+#define ODN_CLK_GEN_RESET_PDP_MMCM_SIGNED 0 -+ -+/* -+ Register INTERRUPT_STATUS -+*/ -+#define ODN_CORE_INTERRUPT_STATUS 0x0100 -+#define ODN_INTERRUPT_STATUS_DUT_MASK 0x00000001U -+#define ODN_INTERRUPT_STATUS_DUT_SHIFT 0 -+#define ODN_INTERRUPT_STATUS_DUT_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_PDP1_MASK 0x00000002U -+#define ODN_INTERRUPT_STATUS_PDP1_SHIFT 1 -+#define ODN_INTERRUPT_STATUS_PDP1_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_PDP2_MASK 0x00000004U -+#define ODN_INTERRUPT_STATUS_PDP2_SHIFT 2 -+#define ODN_INTERRUPT_STATUS_PDP2_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_PERIP_MASK 0x00000008U -+#define ODN_INTERRUPT_STATUS_PERIP_SHIFT 3 -+#define ODN_INTERRUPT_STATUS_PERIP_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_UART_MASK 0x00000010U -+#define ODN_INTERRUPT_STATUS_UART_SHIFT 4 -+#define ODN_INTERRUPT_STATUS_UART_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_MASK 0x00000020U -+#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SHIFT 5 -+#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_MASK 0x00000040U -+#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SHIFT 6 -+#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_MASK 0x00000080U -+#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SHIFT 7 -+#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_MASK 0x00000100U -+#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SHIFT 8 -+#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_DUT2_MASK 0x00000200U -+#define ODN_INTERRUPT_STATUS_DUT2_SHIFT 9 -+#define ODN_INTERRUPT_STATUS_DUT2_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_AXI_LOCKUP_PROTECTION_MASK 0x00000400U -+#define ODN_INTERRUPT_STATUS_AXI_LOCKUP_PROTECTION_SHIFT 10 -+#define ODN_INTERRUPT_STATUS_AXI_LOCKUP_PROTECTION_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_CDMA_MASK 0x00001800U -+#define ODN_INTERRUPT_STATUS_CDMA_SHIFT 11 -+#define ODN_INTERRUPT_STATUS_CDMA_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_OS_IRQ_MASK 0x001FE000U -+#define ODN_INTERRUPT_STATUS_OS_IRQ_SHIFT 13 -+#define ODN_INTERRUPT_STATUS_OS_IRQ_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_IRQ_TEST_MASK 0x40000000U -+#define ODN_INTERRUPT_STATUS_IRQ_TEST_SHIFT 30 -+#define ODN_INTERRUPT_STATUS_IRQ_TEST_SIGNED 0 -+ -+#define ODN_INTERRUPT_STATUS_MASTER_STATUS_MASK 0x80000000U -+#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SHIFT 31 -+#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SIGNED 0 -+ -+/* -+ Register INTERRUPT_ENABLE -+*/ -+#define ODN_CORE_INTERRUPT_ENABLE 0x0104 -+#define ODN_INTERRUPT_ENABLE_DUT_MASK 0x00000001U -+#define ODN_INTERRUPT_ENABLE_DUT_SHIFT 0 -+#define ODN_INTERRUPT_ENABLE_DUT_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_PDP1_MASK 0x00000002U -+#define ODN_INTERRUPT_ENABLE_PDP1_SHIFT 1 -+#define ODN_INTERRUPT_ENABLE_PDP1_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_PDP2_MASK 0x00000004U -+#define ODN_INTERRUPT_ENABLE_PDP2_SHIFT 2 -+#define ODN_INTERRUPT_ENABLE_PDP2_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_PERIP_MASK 0x00000008U -+#define ODN_INTERRUPT_ENABLE_PERIP_SHIFT 3 -+#define ODN_INTERRUPT_ENABLE_PERIP_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_UART_MASK 0x00000010U -+#define ODN_INTERRUPT_ENABLE_UART_SHIFT 4 -+#define ODN_INTERRUPT_ENABLE_UART_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_MASK 0x00000020U -+#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SHIFT 5 -+#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_MASK 0x00000040U -+#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SHIFT 6 -+#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_MASK 0x00000080U -+#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SHIFT 7 -+#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_MASK 0x00000100U -+#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SHIFT 8 -+#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_DUT2_MASK 0x00000200U -+#define ODN_INTERRUPT_ENABLE_DUT2_SHIFT 9 -+#define ODN_INTERRUPT_ENABLE_DUT2_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_AXI_LOCKUP_PROTECTION_MASK 0x00000400U -+#define ODN_INTERRUPT_ENABLE_AXI_LOCKUP_PROTECTION_SHIFT 10 -+#define ODN_INTERRUPT_ENABLE_AXI_LOCKUP_PROTECTION_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_CDMA_MASK 0x00001800U -+#define ODN_INTERRUPT_ENABLE_CDMA_SHIFT 11 -+#define ODN_INTERRUPT_ENABLE_CDMA_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_OS_IRQ_MASK 0x001FE000U -+#define ODN_INTERRUPT_ENABLE_OS_IRQ_SHIFT 13 -+#define ODN_INTERRUPT_ENABLE_OS_IRQ_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_IRQ_TEST_MASK 0x40000000U -+#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SHIFT 30 -+#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SIGNED 0 -+ -+#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_MASK 0x80000000U -+#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SHIFT 31 -+#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SIGNED 0 -+ -+/* -+ Register INTERRUPT_CLR -+*/ -+#define ODN_CORE_INTERRUPT_CLR 0x010C -+#define ODN_INTERRUPT_CLR_DUT_MASK 0x00000001U -+#define ODN_INTERRUPT_CLR_DUT_SHIFT 0 -+#define ODN_INTERRUPT_CLR_DUT_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_PDP1_MASK 0x00000002U -+#define ODN_INTERRUPT_CLR_PDP1_SHIFT 1 -+#define ODN_INTERRUPT_CLR_PDP1_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_PDP2_MASK 0x00000004U -+#define ODN_INTERRUPT_CLR_PDP2_SHIFT 2 -+#define ODN_INTERRUPT_CLR_PDP2_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_PERIP_MASK 0x00000008U -+#define ODN_INTERRUPT_CLR_PERIP_SHIFT 3 -+#define ODN_INTERRUPT_CLR_PERIP_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_UART_MASK 0x00000010U -+#define ODN_INTERRUPT_CLR_UART_SHIFT 4 -+#define ODN_INTERRUPT_CLR_UART_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_MASK 0x00000020U -+#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SHIFT 5 -+#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_MASK 0x00000040U -+#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SHIFT 6 -+#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_MASK 0x00000080U -+#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SHIFT 7 -+#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_MASK 0x00000100U -+#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SHIFT 8 -+#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_DUT2_MASK 0x00000200U -+#define ODN_INTERRUPT_CLR_DUT2_SHIFT 9 -+#define PVR5__INTERRUPT_CLR_DUT2_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_AXI_LOCKUP_PROTECTION_MASK 0x00000400U -+#define ODN_INTERRUPT_CLR_AXI_LOCKUP_PROTECTION_SHIFT 10 -+#define ODN_INTERRUPT_CLR_AXI_LOCKUP_PROTECTION_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_CDMA_MASK 0x00001800U -+#define ODN_INTERRUPT_CLR_CDMA_SHIFT 11 -+#define ODN_INTERRUPT_CLR_CDMA_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_OS_IRQ_MASK 0x001FE000U -+#define ODN_INTERRUPT_CLR_OS_IRQ_SHIFT 13 -+#define ODN_INTERRUPT_CLR_OS_IRQ_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_IRQ_TEST_MASK 0x40000000U -+#define ODN_INTERRUPT_CLR_IRQ_TEST_SHIFT 30 -+#define ODN_INTERRUPT_CLR_IRQ_TEST_SIGNED 0 -+ -+#define ODN_INTERRUPT_CLR_MASTER_CLEAR_MASK 0x80000000U -+#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SHIFT 31 -+#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SIGNED 0 -+ -+/* -+ Register INTERRUPT_TEST -+*/ -+#define ODN_CORE_INTERRUPT_TEST 0x0110 -+#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_MASK 0x00000001U -+#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SHIFT 0 -+#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SIGNED 0 -+ -+/* -+ Register INTERRUPT_TIMEOUT_CLR -+*/ -+#define ODN_CORE_INTERRUPT_TIMEOUT_CLR 0x0114 -+#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_MASK 0x00000002U -+#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SHIFT 1 -+#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SIGNED 0 -+ -+#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_MASK 0x00000001U -+#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SHIFT 0 -+#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SIGNED 0 -+ -+/* -+ Register INTERRUPT_TIMEOUT -+*/ -+#define ODN_CORE_INTERRUPT_TIMEOUT 0x0118 -+#define ODN_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_MASK 0xFFFFFFFFU -+#define ODN_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SHIFT 0 -+#define ODN_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SIGNED 0 -+/* -+ Register SYSTEM_ID -+*/ -+#define ODN_CORE_SYSTEM_ID 0x011C -+#define ODN_SYSTEM_ID_ID_MASK 0x0000FFFFU -+#define ODN_SYSTEM_ID_ID_SHIFT 0 -+#define ODN_SYSTEM_ID_ID_SIGNED 0 -+ -+/* -+ Register SUPPORTED_FEATURES -+*/ -+#define ODN_CORE_SUPPORTED_FEATURES 0x0120 -+#define ODN_SUPPORTED_FEATURES_UNIMPLEMENTED_FREATURES_MASK 0xFFFFFFFEU -+#define ODN_SUPPORTED_FEATURES_UNIMPLEMENTED_FREATURES_SHIFT 1 -+#define ODN_SUPPORTED_FEATURES_UNIMPLEMENTED_FREATURES_SIGNED 0 -+ -+#define ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS_MASK 0x00000001U -+#define ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS_SHIFT 0 -+#define ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS_SIGNED 0 -+ -+/* -+ Register NUM_GPIO -+*/ -+#define ODN_CORE_NUM_GPIO 0x0180 -+#define ODN_NUM_GPIO_NUMBER_MASK 0x0000000FU -+#define ODN_NUM_GPIO_NUMBER_SHIFT 0 -+#define ODN_NUM_GPIO_NUMBER_SIGNED 0 -+ -+/* -+ Register GPIO_EN -+*/ -+#define ODN_CORE_GPIO_EN 0x0184 -+#define ODN_GPIO_EN_DIRECTION_MASK 0x000000FFU -+#define ODN_GPIO_EN_DIRECTION_SHIFT 0 -+#define ODN_GPIO_EN_DIRECTION_SIGNED 0 -+ -+/* -+ Register GPIO -+*/ -+#define ODN_CORE_GPIO 0x0188 -+#define ODN_GPIO_GPIO_MASK 0x000000FFU -+#define ODN_GPIO_GPIO_SHIFT 0 -+#define ODN_GPIO_GPIO_SIGNED 0 -+ -+/* -+ Register NUM_DUT_CTRL -+*/ -+#define ODN_CORE_NUM_DUT_CTRL 0x0190 -+#define ODN_NUM_DUT_CTRL_NUM_PINS_MASK 0xFFFFFFFFU -+#define ODN_NUM_DUT_CTRL_NUM_PINS_SHIFT 0 -+#define ODN_NUM_DUT_CTRL_NUM_PINS_SIGNED 0 -+ -+/* -+ Register DUT_CTRL1 -+*/ -+#define ODN_CORE_DUT_CTRL1 0x0194 -+#define ODN_DUT_CTRL1_CONTROL1_MASK 0x3FFFFFFFU -+#define ODN_DUT_CTRL1_CONTROL1_SHIFT 0 -+#define ODN_DUT_CTRL1_CONTROL1_SIGNED 0 -+ -+#define ODN_DUT_CTRL1_FBDC_BYPASS_MASK 0x40000000U -+#define ODN_DUT_CTRL1_FBDC_BYPASS_SHIFT 30 -+#define ODN_DUT_CTRL1_FBDC_BYPASS_SIGNED 0 -+ -+#define ODN_DUT_CTRL1_DUT_MST_OFFSET_MASK 0x80000000U -+#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SHIFT 31 -+#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SIGNED 0 -+ -+/* -+ Register DUT_CTRL2 -+*/ -+#define ODN_CORE_DUT_CTRL2 0x0198 -+#define ODN_DUT_CTRL2_CONTROL2_MASK 0xFFFFFFFFU -+#define ODN_DUT_CTRL2_CONTROL2_SHIFT 0 -+#define ODN_DUT_CTRL2_CONTROL2_SIGNED 0 -+ -+/* -+ Register NUM_DUT_STAT -+*/ -+#define ODN_CORE_NUM_DUT_STAT 0x019C -+#define ODN_NUM_DUT_STAT_NUM_PINS_MASK 0xFFFFFFFFU -+#define ODN_NUM_DUT_STAT_NUM_PINS_SHIFT 0 -+#define ODN_NUM_DUT_STAT_NUM_PINS_SIGNED 0 -+ -+/* -+ Register DUT_STAT1 -+*/ -+#define ODN_CORE_DUT_STAT1 0x01A0 -+#define ODN_DUT_STAT1_STATUS1_MASK 0xFFFFFFFFU -+#define ODN_DUT_STAT1_STATUS1_SHIFT 0 -+#define ODN_DUT_STAT1_STATUS1_SIGNED 0 -+ -+/* -+ Register DUT_STAT2 -+*/ -+#define ODN_CORE_DUT_STAT2 0x01A4 -+#define ODN_DUT_STAT2_STATUS2_MASK 0xFFFFFFFFU -+#define ODN_DUT_STAT2_STATUS2_SHIFT 0 -+#define ODN_DUT_STAT2_STATUS2_SIGNED 0 -+ -+/* -+ Register DASH_LEDS -+*/ -+#define ODN_CORE_DASH_LEDS 0x01A8 -+#define ODN_DASH_LEDS_REPA_MASK 0xFFF00000U -+#define ODN_DASH_LEDS_REPA_SHIFT 20 -+#define ODN_DASH_LEDS_REPA_SIGNED 0 -+ -+#define ODN_DASH_LEDS_PIKE_MASK 0x00000FFFU -+#define ODN_DASH_LEDS_PIKE_SHIFT 0 -+#define ODN_DASH_LEDS_PIKE_SIGNED 0 -+ -+/* -+ Register DUT_CLK_INFO -+*/ -+#define ODN_CORE_DUT_CLK_INFO 0x01B0 -+#define ODN_DUT_CLK_INFO_CORE_MASK 0x0000FFFFU -+#define ODN_DUT_CLK_INFO_CORE_SHIFT 0 -+#define ODN_DUT_CLK_INFO_CORE_SIGNED 0 -+ -+#define ODN_DUT_CLK_INFO_MEM_MASK 0xFFFF0000U -+#define ODN_DUT_CLK_INFO_MEM_SHIFT 16 -+#define ODN_DUT_CLK_INFO_MEM_SIGNED 0 -+ -+/* -+ Register DUT_CLK_PHSE -+*/ -+#define ODN_CORE_DUT_CLK_PHSE 0x01B4 -+#define ODN_DUT_CLK_PHSE_MEM_REQ_MASK 0x0000FFFFU -+#define ODN_DUT_CLK_PHSE_MEM_REQ_SHIFT 0 -+#define ODN_DUT_CLK_PHSE_MEM_REQ_SIGNED 0 -+ -+#define ODN_DUT_CLK_PHSE_MEM_RD_MASK 0xFFFF0000U -+#define ODN_DUT_CLK_PHSE_MEM_RD_SHIFT 16 -+#define ODN_DUT_CLK_PHSE_MEM_RD_SIGNED 0 -+ -+/* -+ Register CORE_STATUS -+*/ -+#define ODN_CORE_CORE_STATUS 0x0200 -+#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_MASK 0x00000001U -+#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SHIFT 0 -+#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SIGNED 0 -+ -+#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_MASK 0x00000010U -+#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SHIFT 4 -+#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SIGNED 0 -+ -+#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_MASK 0x00000020U -+#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SHIFT 5 -+#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SIGNED 0 -+ -+#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_MASK 0x00000040U -+#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SHIFT 6 -+#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SIGNED 0 -+ -+#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_MASK 0x00000080U -+#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SHIFT 7 -+#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SIGNED 0 -+ -+#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_MASK 0x00000100U -+#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SHIFT 8 -+#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SIGNED 0 -+ -+#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_MASK 0x00000200U -+#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SHIFT 9 -+#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SIGNED 0 -+ -+#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_MASK 0x00001000U -+#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SHIFT 12 -+#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SIGNED 0 -+ -+#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_MASK 0x00002000U -+#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SHIFT 13 -+#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SIGNED 0 -+ -+/* -+ Register CORE_CONTROL -+*/ -+#define ODN_CORE_CORE_CONTROL 0x0204 -+#define ODN_CORE_CONTROL_BAR4_OFFSET_MASK 0x0000001FU -+#define ODN_CORE_CONTROL_BAR4_OFFSET_SHIFT 0 -+#define ODN_CORE_CONTROL_BAR4_OFFSET_SIGNED 0 -+ -+#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_MASK 0x00000300U -+#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SHIFT 8 -+#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SIGNED 0 -+ -+#define ODN_CORE_CONTROL_HDMI_MODULE_EN_MASK 0x00001C00U -+#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT 10 -+#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SIGNED 0 -+ -+#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_MASK 0x00002000U -+#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT 13 -+#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SIGNED 0 -+ -+#define ODN_CORE_CONTROL_PDP1_OFFSET_MASK 0x00070000U -+#define ODN_CORE_CONTROL_PDP1_OFFSET_SHIFT 16 -+#define ODN_CORE_CONTROL_PDP1_OFFSET_SIGNED 0 -+ -+#define ODN_CORE_CONTROL_PDP2_OFFSET_MASK 0x00700000U -+#define ODN_CORE_CONTROL_PDP2_OFFSET_SHIFT 20 -+#define ODN_CORE_CONTROL_PDP2_OFFSET_SIGNED 0 -+ -+#define ODN_CORE_CONTROL_DUT_OFFSET_MASK 0x07000000U -+#define ODN_CORE_CONTROL_DUT_OFFSET_SHIFT 24 -+#define ODN_CORE_CONTROL_DUT_OFFSET_SIGNED 0 -+ -+/* -+ Register REG_BANK_STATUS -+*/ -+#define ODN_CORE_REG_BANK_STATUS 0x0208 -+#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_MASK 0xFFFFFFFFU -+#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SHIFT 0 -+#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SIGNED 0 -+ -+/* -+ Register MMCM_LOCK_STATUS -+*/ -+#define ODN_CORE_MMCM_LOCK_STATUS 0x020C -+#define ODN_MMCM_LOCK_STATUS_DUT_CORE_MASK 0x00000001U -+#define ODN_MMCM_LOCK_STATUS_DUT_CORE_SHIFT 0 -+#define ODN_MMCM_LOCK_STATUS_DUT_CORE_SIGNED 0 -+ -+#define ODN_MMCM_LOCK_STATUS_DUT_IF_MASK 0x00000002U -+#define ODN_MMCM_LOCK_STATUS_DUT_IF_SHIFT 1 -+#define ODN_MMCM_LOCK_STATUS_DUT_IF_SIGNED 0 -+ -+#define ODN_MMCM_LOCK_STATUS_MULTI_MASK 0x00000004U -+#define ODN_MMCM_LOCK_STATUS_MULTI_SHIFT 2 -+#define ODN_MMCM_LOCK_STATUS_MULTI_SIGNED 0 -+ -+#define ODN_MMCM_LOCK_STATUS_PDPP_MASK 0x00000008U -+#define ODN_MMCM_LOCK_STATUS_PDPP_SHIFT 3 -+#define ODN_MMCM_LOCK_STATUS_PDPP_SIGNED 0 -+ -+/* -+ Register GIST_STATUS -+*/ -+#define ODN_CORE_GIST_STATUS 0x0210 -+#define ODN_GIST_STATUS_MST_MASK 0x000001FFU -+#define ODN_GIST_STATUS_MST_SHIFT 0 -+#define ODN_GIST_STATUS_MST_SIGNED 0 -+ -+#define ODN_GIST_STATUS_SLV_MASK 0x001FF000U -+#define ODN_GIST_STATUS_SLV_SHIFT 12 -+#define ODN_GIST_STATUS_SLV_SIGNED 0 -+ -+#define ODN_GIST_STATUS_SLV_OUT_MASK 0x03000000U -+#define ODN_GIST_STATUS_SLV_OUT_SHIFT 24 -+#define ODN_GIST_STATUS_SLV_OUT_SIGNED 0 -+ -+#define ODN_GIST_STATUS_MST_OUT_MASK 0x70000000U -+#define ODN_GIST_STATUS_MST_OUT_SHIFT 28 -+#define ODN_GIST_STATUS_MST_OUT_SIGNED 0 -+ -+/* -+ Register DUT_MST_ADD -+*/ -+#define ODN_CORE_DUT_MST_ADD 0x0214 -+#define ODN_DUT_MST_ADD_SLV_OUT_MASK 0x0000003FU -+#define ODN_DUT_MST_ADD_SLV_OUT_SHIFT 0 -+#define ODN_DUT_MST_ADD_SLV_OUT_SIGNED 0 -+ -+/* -+ Register DUT_MULTIPLX_INFO -+*/ -+#define ODN_CORE_DUT_MULTIPLX_INFO 0x0218 -+#define ODN_DUT_MULTIPLX_INFO_MEM_MASK 0x000000FFU -+#define ODN_DUT_MULTIPLX_INFO_MEM_SHIFT 0 -+#define ODN_DUT_MULTIPLX_INFO_MEM_SIGNED 0 -+ -+/**************************** -+ Generated from: ad_tx.def -+*****************************/ -+ -+/* -+ Register ADT_CONTROL -+*/ -+#define ODN_AD_TX_DEBUG_ADT_CONTROL 0x0000 -+#define ODN_SET_ADTX_READY_MASK 0x00000004U -+#define ODN_SET_ADTX_READY_SHIFT 2 -+#define ODN_SET_ADTX_READY_SIGNED 0 -+ -+#define ODN_SEND_ALIGN_DATA_MASK 0x00000002U -+#define ODN_SEND_ALIGN_DATA_SHIFT 1 -+#define ODN_SEND_ALIGN_DATA_SIGNED 0 -+ -+#define ODN_ENABLE_FLUSHING_MASK 0x00000001U -+#define ODN_ENABLE_FLUSHING_SHIFT 0 -+#define ODN_ENABLE_FLUSHING_SIGNED 0 -+ -+/* -+ Register ADT_STATUS -+*/ -+#define ODN_AD_TX_DEBUG_ADT_STATUS 0x0004 -+#define ODN_REQUEST_COMPLETE_MASK 0x00000001U -+#define ODN_REQUEST_COMPLETE_SHIFT 0 -+#define ODN_REQUEST_COMPLETE_SIGNED 0 -+ -+ -+/****************************** -+ Generated from: mca_debug.def -+*******************************/ -+ -+/* -+ Register MCA_CONTROL -+*/ -+#define ODN_MCA_DEBUG_MCA_CONTROL 0x0000 -+#define ODN_ALIGN_START_MASK 0x00000001U -+#define ODN_ALIGN_START_SHIFT 0 -+#define ODN_ALIGN_START_SIGNED 0 -+ -+/* -+ Register MCA_STATUS -+*/ -+#define ODN_MCA_DEBUG_MCA_STATUS 0x0004 -+#define ODN_TCHECK_SDEBUG_MASK 0x40000000U -+#define ODN_TCHECK_SDEBUG_SHIFT 30 -+#define ODN_TCHECK_SDEBUG_SIGNED 0 -+ -+#define ODN_CHECK_SDEBUG_MASK 0x20000000U -+#define ODN_CHECK_SDEBUG_SHIFT 29 -+#define ODN_CHECK_SDEBUG_SIGNED 0 -+ -+#define ODN_ALIGN_SDEBUG_MASK 0x10000000U -+#define ODN_ALIGN_SDEBUG_SHIFT 28 -+#define ODN_ALIGN_SDEBUG_SIGNED 0 -+ -+#define ODN_FWAIT_SDEBUG_MASK 0x08000000U -+#define ODN_FWAIT_SDEBUG_SHIFT 27 -+#define ODN_FWAIT_SDEBUG_SIGNED 0 -+ -+#define ODN_IDLE_SDEBUG_MASK 0x04000000U -+#define ODN_IDLE_SDEBUG_SHIFT 26 -+#define ODN_IDLE_SDEBUG_SIGNED 0 -+ -+#define ODN_FIFO_FULL_MASK 0x03FF0000U -+#define ODN_FIFO_FULL_SHIFT 16 -+#define ODN_FIFO_FULL_SIGNED 0 -+ -+#define ODN_FIFO_EMPTY_MASK 0x0000FFC0U -+#define ODN_FIFO_EMPTY_SHIFT 6 -+#define ODN_FIFO_EMPTY_SIGNED 0 -+ -+#define ODN_TAG_CHECK_ERROR_MASK 0x00000020U -+#define ODN_TAG_CHECK_ERROR_SHIFT 5 -+#define ODN_TAG_CHECK_ERROR_SIGNED 0 -+ -+#define ODN_ALIGN_CHECK_ERROR_MASK 0x00000010U -+#define ODN_ALIGN_CHECK_ERROR_SHIFT 4 -+#define ODN_ALIGN_CHECK_ERROR_SIGNED 0 -+ -+#define ODN_ALIGN_ERROR_MASK 0x00000008U -+#define ODN_ALIGN_ERROR_SHIFT 3 -+#define ODN_ALIGN_ERROR_SIGNED 0 -+ -+#define ODN_TAG_CHECKING_OK_MASK 0x00000004U -+#define ODN_TAG_CHECKING_OK_SHIFT 2 -+#define ODN_TAG_CHECKING_OK_SIGNED 0 -+ -+#define ODN_ALIGN_CHECK_OK_MASK 0x00000002U -+#define ODN_ALIGN_CHECK_OK_SHIFT 1 -+#define ODN_ALIGN_CHECK_OK_SIGNED 0 -+ -+#define ODN_ALIGNMENT_FOUND_MASK 0x00000001U -+#define ODN_ALIGNMENT_FOUND_SHIFT 0 -+#define ODN_ALIGNMENT_FOUND_SIGNED 0 -+ -+ -+/********************************* -+ Generated from: sai_rx_debug.def -+**********************************/ -+ -+/* -+ Register SIG_RESULT -+*/ -+#define ODN_SAI_RX_DEBUG_SIG_RESULT 0x0000 -+#define ODN_SIG_RESULT_VALUE_MASK 0xFFFFFFFFU -+#define ODN_SIG_RESULT_VALUE_SHIFT 0 -+#define ODN_SIG_RESULT_VALUE_SIGNED 0 -+ -+/* -+ Register INIT_SIG -+*/ -+#define ODN_SAI_RX_DEBUG_INIT_SIG 0x0004 -+#define ODN_INIT_SIG_VALUE_MASK 0x00000001U -+#define ODN_INIT_SIG_VALUE_SHIFT 0 -+#define ODN_INIT_SIG_VALUE_SIGNED 0 -+ -+/* -+ Register SAI_BYPASS -+*/ -+#define ODN_SAI_RX_DEBUG_SAI_BYPASS 0x0008 -+#define ODN_BYPASS_CLK_TAPS_VALUE_MASK 0x000003FFU -+#define ODN_BYPASS_CLK_TAPS_VALUE_SHIFT 0 -+#define ODN_BYPASS_CLK_TAPS_VALUE_SIGNED 0 -+ -+#define ODN_BYPASS_SET_MASK 0x00010000U -+#define ODN_BYPASS_SET_SHIFT 16 -+#define ODN_BYPASS_SET_SIGNED 0 -+ -+#define ODN_BYPASS_EN_MASK 0x00100000U -+#define ODN_BYPASS_EN_SHIFT 20 -+#define ODN_BYPASS_EN_SIGNED 0 -+ -+#define ODN_EN_STATUS_MASK 0x01000000U -+#define ODN_EN_STATUS_SHIFT 24 -+#define ODN_EN_STATUS_SIGNED 0 -+ -+/* -+ Register SAI_CLK_TAPS -+*/ -+#define ODN_SAI_RX_DEBUG_SAI_CLK_TAPS 0x000C -+#define ODN_CLK_TAPS_VALUE_MASK 0x000003FFU -+#define ODN_CLK_TAPS_VALUE_SHIFT 0 -+#define ODN_CLK_TAPS_VALUE_SIGNED 0 -+ -+#define ODN_TRAINING_COMPLETE_MASK 0x00010000U -+#define ODN_TRAINING_COMPLETE_SHIFT 16 -+#define ODN_TRAINING_COMPLETE_SIGNED 0 -+ -+/* -+ Register SAI_EYES -+*/ -+#define ODN_SAI_RX_DEBUG_SAI_EYES 0x0010 -+#define ODN_MIN_EYE_END_MASK 0x0000FFFFU -+#define ODN_MIN_EYE_END_SHIFT 0 -+#define ODN_MIN_EYE_END_SIGNED 0 -+ -+#define ODN_MAX_EYE_START_MASK 0xFFFF0000U -+#define ODN_MAX_EYE_START_SHIFT 16 -+#define ODN_MAX_EYE_START_SIGNED 0 -+ -+/* -+ Register SAI_DDR_INVERT -+*/ -+#define ODN_SAI_RX_DEBUG_SAI_DDR_INVERT 0x0014 -+#define ODN_DDR_INVERT_MASK 0x00000001U -+#define ODN_DDR_INVERT_SHIFT 0 -+#define ODN_DDR_INVERT_SIGNED 0 -+ -+#define ODN_OVERIDE_VALUE_MASK 0x00010000U -+#define ODN_OVERIDE_VALUE_SHIFT 16 -+#define ODN_OVERIDE_VALUE_SIGNED 0 -+ -+#define ODN_INVERT_OVERIDE_MASK 0x00100000U -+#define ODN_INVERT_OVERIDE_SHIFT 20 -+#define ODN_INVERT_OVERIDE_SIGNED 0 -+ -+/* -+ Register SAI_TRAIN_ACK -+*/ -+#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK 0x0018 -+#define ODN_TRAIN_ACK_FAIL_MASK 0x00000001U -+#define ODN_TRAIN_ACK_FAIL_SHIFT 0 -+#define ODN_TRAIN_ACK_FAIL_SIGNED 0 -+ -+#define ODN_TRAIN_ACK_FAIL_COUNT_MASK 0x000000F0U -+#define ODN_TRAIN_ACK_FAIL_COUNT_SHIFT 4 -+#define ODN_TRAIN_ACK_FAIL_COUNT_SIGNED 0 -+ -+#define ODN_TRAIN_ACK_COMPLETE_MASK 0x00000100U -+#define ODN_TRAIN_ACK_COMPLETE_SHIFT 8 -+#define ODN_TRAIN_ACK_COMPLETE_SIGNED 0 -+ -+#define ODN_TRAIN_ACK_OVERIDE_MASK 0x00001000U -+#define ODN_TRAIN_ACK_OVERIDE_SHIFT 12 -+#define ODN_TRAIN_ACK_OVERIDE_SIGNED 0 -+ -+/* -+ Register SAI_TRAIN_ACK_COUNT -+*/ -+#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK_COUNT 0x001C -+#define ODN_TRAIN_COUNT_MASK 0xFFFFFFFFU -+#define ODN_TRAIN_COUNT_SHIFT 0 -+#define ODN_TRAIN_COUNT_SIGNED 0 -+ -+/* -+ Register SAI_CHANNEL_NUMBER -+*/ -+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_NUMBER 0x0020 -+#define ODN_CHANNEL_NUMBER_MASK 0x0000FFFFU -+#define ODN_CHANNEL_NUMBER_SHIFT 0 -+#define ODN_CHANNEL_NUMBER_SIGNED 0 -+ -+/* -+ Register SAI_CHANNEL_EYE_START -+*/ -+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_START 0x0024 -+#define ODN_CHANNEL_EYE_START_MASK 0xFFFFFFFFU -+#define ODN_CHANNEL_EYE_START_SHIFT 0 -+#define ODN_CHANNEL_EYE_START_SIGNED 0 -+ -+/* -+ Register SAI_CHANNEL_EYE_END -+*/ -+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_END 0x0028 -+#define ODN_CHANNEL_EYE_END_MASK 0xFFFFFFFFU -+#define ODN_CHANNEL_EYE_END_SHIFT 0 -+#define ODN_CHANNEL_EYE_END_SIGNED 0 -+ -+/* -+ Register SAI_CHANNEL_EYE_PATTERN -+*/ -+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_PATTERN 0x002C -+#define ODN_CHANNEL_EYE_PATTERN_MASK 0xFFFFFFFFU -+#define ODN_CHANNEL_EYE_PATTERN_SHIFT 0 -+#define ODN_CHANNEL_EYE_PATTERN_SIGNED 0 -+ -+/* -+ Register SAI_CHANNEL_EYE_DEBUG -+*/ -+#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_DEBUG 0x0030 -+#define ODN_CHANNEL_EYE_SENSE_MASK 0x00000001U -+#define ODN_CHANNEL_EYE_SENSE_SHIFT 0 -+#define ODN_CHANNEL_EYE_SENSE_SIGNED 0 -+ -+#define ODN_CHANNEL_EYE_COMPLETE_MASK 0x00000002U -+#define ODN_CHANNEL_EYE_COMPLETE_SHIFT 1 -+#define ODN_CHANNEL_EYE_COMPLETE_SIGNED 0 -+ -+ -+/********************************* -+ Generated from: sai_tx_debug.def -+**********************************/ -+ -+/* -+ Register SIG_RESULT -+*/ -+#define ODN_SAI_TX_DEBUG_SIG_RESULT 0x0000 -+#define ODN_TX_SIG_RESULT_VALUE_MASK 0xFFFFFFFFU -+#define ODN_TX_SIG_RESULT_VALUE_SHIFT 0 -+#define ODN_TX_SIG_RESULT_VALUE_SIGNED 0 -+ -+/* -+ Register INIT_SIG -+*/ -+#define ODN_SAI_TX_DEBUG_INIT_SIG 0x0004 -+#define ODN_TX_INIT_SIG_VALUE_MASK 0x00000001U -+#define ODN_TX_INIT_SIG_VALUE_SHIFT 0 -+#define ODN_TX_INIT_SIG_VALUE_SIGNED 0 -+ -+/* -+ Register SAI_BYPASS -+*/ -+#define ODN_SAI_TX_DEBUG_SAI_BYPASS 0x0008 -+#define ODN_TX_BYPASS_EN_MASK 0x00000001U -+#define ODN_TX_BYPASS_EN_SHIFT 0 -+#define ODN_TX_BYPASS_EN_SIGNED 0 -+ -+#define ODN_TX_ACK_RESEND_MASK 0x00000002U -+#define ODN_TX_ACK_RESEND_SHIFT 1 -+#define ODN_TX_ACK_RESEND_SIGNED 0 -+ -+#define ODN_TX_DISABLE_ACK_SEND_MASK 0x00000004U -+#define ODN_TX_DISABLE_ACK_SEND_SHIFT 2 -+#define ODN_TX_DISABLE_ACK_SEND_SIGNED 0 -+ -+/* -+ Register SAI_STATUS -+*/ -+#define ODN_SAI_TX_DEBUG_SAI_STATUS 0x000C -+#define ODN_TX_TRAINING_COMPLETE_MASK 0x00000001U -+#define ODN_TX_TRAINING_COMPLETE_SHIFT 0 -+#define ODN_TX_TRAINING_COMPLETE_SIGNED 0 -+ -+#define ODN_TX_TRAINING_ACK_COMPLETE_MASK 0x00000002U -+#define ODN_TX_TRAINING_ACK_COMPLETE_SHIFT 1 -+#define ODN_TX_TRAINING_ACK_COMPLETE_SIGNED 0 -+ -+ -+ -+#endif /* _ODIN_REGS_H_ */ -+ -+/****************************************************************************** -+ End of file (odin_regs.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/apollo/orion_defs.h b/drivers/gpu/drm/img-rogue/apollo/orion_defs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/orion_defs.h -@@ -0,0 +1,183 @@ -+/**************************************************************************** -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Orion Memory Map - View from PCIe -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+****************************************************************************/ -+ -+#ifndef _ORION_DEFS_H_ -+#define _ORION_DEFS_H_ -+ -+/* -+ * These defines have not been autogenerated -+ * Only values different from Odin will be included here -+ */ -+ -+#define DEVICE_ID_ORION 0x1020 -+ -+/* Odin system register banks */ -+#define SRS_REG_BANK_ODN_CLK_BLK 0x02000 -+ -+/* -+ * Orion CLK regs - the srs_clk_blk module defs are not auto generated -+ */ -+#define SRS_PDP_P_CLK_OUT_DIVIDER_REG1 0x620 -+#define SRS_PDP_PCLK_ODIV1_LO_TIME_MASK 0x0000003FU -+#define SRS_PDP_PCLK_ODIV1_LO_TIME_SHIFT 0 -+#define SRS_PDP_PCLK_ODIV1_HI_TIME_MASK 0x00000FC0U -+#define SRS_PDP_PCLK_ODIV1_HI_TIME_SHIFT 6 -+ -+#define SRS_PDP_P_CLK_OUT_DIVIDER_REG2 0x624 -+#define SRS_PDP_PCLK_ODIV2_NOCOUNT_MASK 0x00000040U -+#define SRS_PDP_PCLK_ODIV2_NOCOUNT_SHIFT 6 -+#define SRS_PDP_PCLK_ODIV2_EDGE_MASK 0x00000080U -+#define SRS_PDP_PCLK_ODIV2_EDGE_SHIFT 7 -+#define SRS_PDP_PCLK_ODIV2_FRAC_MASK 0x00007C00U -+#define SRS_PDP_PCLK_ODIV2_FRAC_SHIFT 10 -+ -+#define SRS_PDP_P_CLK_OUT_DIVIDER_REG3 0x61C -+ -+#define SRS_PDP_M_CLK_OUT_DIVIDER_REG1 0x628 -+#define SRS_PDP_MCLK_ODIV1_LO_TIME_MASK 0x0000003FU -+#define SRS_PDP_MCLK_ODIV1_LO_TIME_SHIFT 0 -+#define SRS_PDP_MCLK_ODIV1_HI_TIME_MASK 0x00000FC0U -+#define SRS_PDP_MCLK_ODIV1_HI_TIME_SHIFT 6 -+ -+#define SRS_PDP_M_CLK_OUT_DIVIDER_REG2 0x62C -+#define SRS_PDP_MCLK_ODIV2_NOCOUNT_MASK 0x00000040U -+#define SRS_PDP_MCLK_ODIV2_NOCOUNT_SHIFT 6 -+#define SRS_PDP_MCLK_ODIV2_EDGE_MASK 0x00000080U -+#define SRS_PDP_MCLK_ODIV2_EDGE_SHIFT 7 -+ -+#define SRS_PDP_P_CLK_MULTIPLIER_REG1 0x650 -+#define SRS_PDP_PCLK_MUL1_LO_TIME_MASK 0x0000003FU -+#define SRS_PDP_PCLK_MUL1_LO_TIME_SHIFT 0 -+#define SRS_PDP_PCLK_MUL1_HI_TIME_MASK 0x00000FC0U -+#define SRS_PDP_PCLK_MUL1_HI_TIME_SHIFT 6 -+ -+#define SRS_PDP_P_CLK_MULTIPLIER_REG2 0x654 -+#define SRS_PDP_PCLK_MUL2_NOCOUNT_MASK 0x00000040U -+#define SRS_PDP_PCLK_MUL2_NOCOUNT_SHIFT 6 -+#define SRS_PDP_PCLK_MUL2_EDGE_MASK 0x00000080U -+#define SRS_PDP_PCLK_MUL2_EDGE_SHIFT 7 -+#define SRS_PDP_PCLK_MUL2_FRAC_MASK 0x00007C00U -+#define SRS_PDP_PCLK_MUL2_FRAC_SHIFT 10 -+ -+#define SRS_PDP_P_CLK_MULTIPLIER_REG3 0x64C -+ -+#define SRS_PDP_P_CLK_IN_DIVIDER_REG 0x658 -+#define SRS_PDP_PCLK_IDIV_LO_TIME_MASK 0x0000003FU -+#define SRS_PDP_PCLK_IDIV_LO_TIME_SHIFT 0 -+#define SRS_PDP_PCLK_IDIV_HI_TIME_MASK 0x00000FC0U -+#define SRS_PDP_PCLK_IDIV_HI_TIME_SHIFT 6 -+#define SRS_PDP_PCLK_IDIV_NOCOUNT_MASK 0x00001000U -+#define SRS_PDP_PCLK_IDIV_NOCOUNT_SHIFT 12 -+#define SRS_PDP_PCLK_IDIV_EDGE_MASK 0x00002000U -+#define SRS_PDP_PCLK_IDIV_EDGE_SHIFT 13 -+ -+/* -+ * DUT core clock input divider, DUT reference clock input divider -+ */ -+#define SRS_DUT_CORE_CLK_OUT_DIVIDER1 0x0020 -+#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK 0x00000FC0U -+#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT 6 -+#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK 0x0000003FU -+#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT 0 -+ -+#define SRS_DUT_CORE_CLK_OUT_DIVIDER2 0x0024 -+#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK 0x00000080U -+#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT 7 -+#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK 0x00000040U -+#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT 6 -+ -+#define SRS_DUT_REF_CLK_OUT_DIVIDER1 0x0028 -+#define SRS_DUT_REF_CLK_OUT_DIVIDER1_HI_TIME_MASK 0x00000FC0U -+#define SRS_DUT_REF_CLK_OUT_DIVIDER1_HI_TIME_SHIFT 6 -+#define SRS_DUT_REF_CLK_OUT_DIVIDER1_LO_TIME_MASK 0x0000003FU -+#define SRS_DUT_REF_CLK_OUT_DIVIDER1_LO_TIME_SHIFT 0 -+ -+#define SRS_DUT_REF_CLK_OUT_DIVIDER2 0x002C -+#define SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE_MASK 0x00000080U -+#define SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE_SHIFT 7 -+#define SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT_MASK 0x00000040U -+#define SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT 6 -+ -+/* -+ * DUT interface reference clock input divider -+ */ -+ -+#define SRS_DUT_MEM_CLK_OUT_DIVIDER1 0x0228 -+#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME_MASK 0x00000FC0U -+#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME_SHIFT 6 -+#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME_MASK 0x0000003FU -+#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME_SHIFT 0 -+ -+#define SRS_DUT_MEM_CLK_OUT_DIVIDER2 0x022C -+#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE_MASK 0x00000080U -+#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE_SHIFT 7 -+#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT_MASK 0x00000040U -+#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT 6 -+ -+/* -+ * Min max values from Xilinx Virtex Ultrascale data sheet DS893, -+ * for speed grade 1. All in Hz. -+ */ -+#define SRS_INPUT_CLOCK_SPEED 100000000U -+#define SRS_INPUT_CLOCK_SPEED_MIN 10000000U -+#define SRS_INPUT_CLOCK_SPEED_MAX 800000000U -+#define SRS_OUTPUT_CLOCK_SPEED_MIN 4690000U -+#define SRS_OUTPUT_CLOCK_SPEED_MAX 630000000U -+#define SRS_VCO_MIN 600000000U -+#define SRS_VCO_MAX 1200000000U -+#define SRS_PFD_MIN 10000000U -+#define SRS_PFD_MAX 450000000U -+ -+/* -+ * Orion interrupt flags -+ */ -+#define SRS_INTERRUPT_ENABLE_PDP1 (1 << SRS_INTERRUPT_ENABLE_PDP_SHIFT) -+#define SRS_INTERRUPT_ENABLE_DUT (1 << SRS_INTERRUPT_ENABLE_DUT_SHIFT) -+#define SRS_INTERRUPT_STATUS_PDP1 (1 << SRS_INTERRUPT_STATUS_PDP_SHIFT) -+#define SRS_INTERRUPT_STATUS_DUT (1 << SRS_INTERRUPT_STATUS_DUT_SHIFT) -+#define SRS_INTERRUPT_CLEAR_PDP1 (1 << SRS_INTERRUPT_CLR_PDP_SHIFT) -+#define SRS_INTERRUPT_CLEAR_DUT (1 << SRS_INTERRUPT_CLR_DUT_SHIFT) -+ -+#endif /* _ORION_DEFS_H_ */ -+ -+/***************************************************************************** -+ End of file (orion_defs.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/apollo/orion_regs.h b/drivers/gpu/drm/img-rogue/apollo/orion_regs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/orion_regs.h -@@ -0,0 +1,439 @@ -+/****************************************************************************** -+@Title Orion system control register definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Orion FPGA register defs for Sirius RTL -+@Author Autogenerated -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+******************************************************************************/ -+ -+#ifndef _OUT_DRV_H_ -+#define _OUT_DRV_H_ -+ -+/* -+ Register ID -+*/ -+#define SRS_CORE_ID 0x0000 -+#define SRS_ID_VARIANT_MASK 0x0000FFFFU -+#define SRS_ID_VARIANT_SHIFT 0 -+#define SRS_ID_VARIANT_SIGNED 0 -+ -+#define SRS_ID_ID_MASK 0xFFFF0000U -+#define SRS_ID_ID_SHIFT 16 -+#define SRS_ID_ID_SIGNED 0 -+ -+/* -+ Register REVISION -+*/ -+#define SRS_CORE_REVISION 0x0004 -+#define SRS_REVISION_MINOR_MASK 0x000000FFU -+#define SRS_REVISION_MINOR_SHIFT 0 -+#define SRS_REVISION_MINOR_SIGNED 0 -+ -+#define SRS_REVISION_MAJOR_MASK 0x00000F00U -+#define SRS_REVISION_MAJOR_SHIFT 8 -+#define SRS_REVISION_MAJOR_SIGNED 0 -+ -+/* -+ Register CHANGE_SET -+*/ -+#define SRS_CORE_CHANGE_SET 0x0008 -+#define SRS_CHANGE_SET_SET_MASK 0xFFFFFFFFU -+#define SRS_CHANGE_SET_SET_SHIFT 0 -+#define SRS_CHANGE_SET_SET_SIGNED 0 -+ -+/* -+ Register USER_ID -+*/ -+#define SRS_CORE_USER_ID 0x000C -+#define SRS_USER_ID_ID_MASK 0x0000000FU -+#define SRS_USER_ID_ID_SHIFT 0 -+#define SRS_USER_ID_ID_SIGNED 0 -+ -+/* -+ Register USER_BUILD -+*/ -+#define SRS_CORE_USER_BUILD 0x0010 -+#define SRS_USER_BUILD_BUILD_MASK 0xFFFFFFFFU -+#define SRS_USER_BUILD_BUILD_SHIFT 0 -+#define SRS_USER_BUILD_BUILD_SIGNED 0 -+ -+/* -+ Register SOFT_RESETN -+*/ -+#define SRS_CORE_SOFT_RESETN 0x0080 -+#define SRS_SOFT_RESETN_DDR_MASK 0x00000001U -+#define SRS_SOFT_RESETN_DDR_SHIFT 0 -+#define SRS_SOFT_RESETN_DDR_SIGNED 0 -+ -+#define SRS_SOFT_RESETN_USB_MASK 0x00000002U -+#define SRS_SOFT_RESETN_USB_SHIFT 1 -+#define SRS_SOFT_RESETN_USB_SIGNED 0 -+ -+#define SRS_SOFT_RESETN_PDP_MASK 0x00000004U -+#define SRS_SOFT_RESETN_PDP_SHIFT 2 -+#define SRS_SOFT_RESETN_PDP_SIGNED 0 -+ -+#define SRS_SOFT_RESETN_GIST_MASK 0x00000008U -+#define SRS_SOFT_RESETN_GIST_SHIFT 3 -+#define SRS_SOFT_RESETN_GIST_SIGNED 0 -+ -+/* -+ Register DUT_SOFT_RESETN -+*/ -+#define SRS_CORE_DUT_SOFT_RESETN 0x0084 -+#define SRS_DUT_SOFT_RESETN_EXTERNAL_MASK 0x00000001U -+#define SRS_DUT_SOFT_RESETN_EXTERNAL_SHIFT 0 -+#define SRS_DUT_SOFT_RESETN_EXTERNAL_SIGNED 0 -+ -+/* -+ Register SOFT_AUTO_RESETN -+*/ -+#define SRS_CORE_SOFT_AUTO_RESETN 0x0088 -+#define SRS_SOFT_AUTO_RESETN_CFG_MASK 0x00000001U -+#define SRS_SOFT_AUTO_RESETN_CFG_SHIFT 0 -+#define SRS_SOFT_AUTO_RESETN_CFG_SIGNED 0 -+ -+/* -+ Register CLK_GEN_RESET -+*/ -+#define SRS_CORE_CLK_GEN_RESET 0x0090 -+#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_MASK 0x00000001U -+#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_SHIFT 0 -+#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_SIGNED 0 -+ -+#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_MASK 0x00000002U -+#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_SHIFT 1 -+#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_SIGNED 0 -+ -+#define SRS_CLK_GEN_RESET_MULTI_MMCM_MASK 0x00000004U -+#define SRS_CLK_GEN_RESET_MULTI_MMCM_SHIFT 2 -+#define SRS_CLK_GEN_RESET_MULTI_MMCM_SIGNED 0 -+ -+#define SRS_CLK_GEN_RESET_PDP_MMCM_MASK 0x00000008U -+#define SRS_CLK_GEN_RESET_PDP_MMCM_SHIFT 3 -+#define SRS_CLK_GEN_RESET_PDP_MMCM_SIGNED 0 -+ -+/* -+ Register DUT_MEM -+*/ -+#define SRS_CORE_DUT_MEM 0x0120 -+#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_MASK 0x0000FFFFU -+#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_SHIFT 0 -+#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_SIGNED 0 -+ -+#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_MASK 0xFFFF0000U -+#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_SHIFT 16 -+#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_SIGNED 0 -+ -+/* -+ Register APM -+*/ -+#define SRS_CORE_APM 0x0150 -+#define SRS_APM_RESET_EVENT_MASK 0x00000001U -+#define SRS_APM_RESET_EVENT_SHIFT 0 -+#define SRS_APM_RESET_EVENT_SIGNED 0 -+ -+#define SRS_APM_CAPTURE_EVENT_MASK 0x00000002U -+#define SRS_APM_CAPTURE_EVENT_SHIFT 1 -+#define SRS_APM_CAPTURE_EVENT_SIGNED 0 -+ -+/* -+ Register NUM_GPIO -+*/ -+#define SRS_CORE_NUM_GPIO 0x0180 -+#define SRS_NUM_GPIO_NUMBER_MASK 0x0000000FU -+#define SRS_NUM_GPIO_NUMBER_SHIFT 0 -+#define SRS_NUM_GPIO_NUMBER_SIGNED 0 -+ -+/* -+ Register GPIO_EN -+*/ -+#define SRS_CORE_GPIO_EN 0x0184 -+#define SRS_GPIO_EN_DIRECTION_MASK 0x000000FFU -+#define SRS_GPIO_EN_DIRECTION_SHIFT 0 -+#define SRS_GPIO_EN_DIRECTION_SIGNED 0 -+ -+/* -+ Register GPIO -+*/ -+#define SRS_CORE_GPIO 0x0188 -+#define SRS_GPIO_GPIO_MASK 0x000000FFU -+#define SRS_GPIO_GPIO_SHIFT 0 -+#define SRS_GPIO_GPIO_SIGNED 0 -+ -+/* -+ Register SPI_MASTER_IFACE -+*/ -+#define SRS_CORE_SPI_MASTER_IFACE 0x018C -+#define SRS_SPI_MASTER_IFACE_ENABLE_MASK 0x00000001U -+#define SRS_SPI_MASTER_IFACE_ENABLE_SHIFT 0 -+#define SRS_SPI_MASTER_IFACE_ENABLE_SIGNED 0 -+ -+/* -+ Register SRS_IP_STATUS -+*/ -+#define SRS_CORE_SRS_IP_STATUS 0x0200 -+#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_MASK 0x00000001U -+#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_SHIFT 0 -+#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_SIGNED 0 -+ -+#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_MASK 0x00000002U -+#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_SHIFT 1 -+#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_SIGNED 0 -+ -+#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_MASK 0x00000004U -+#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SHIFT 2 -+#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SIGNED 0 -+ -+#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_MASK 0x00000008U -+#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SHIFT 3 -+#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SIGNED 0 -+ -+/* -+ Register CORE_CONTROL -+*/ -+#define SRS_CORE_CORE_CONTROL 0x0204 -+#define SRS_CORE_CONTROL_BAR4_OFFSET_MASK 0x0000001FU -+#define SRS_CORE_CONTROL_BAR4_OFFSET_SHIFT 0 -+#define SRS_CORE_CONTROL_BAR4_OFFSET_SIGNED 0 -+ -+#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_MASK 0x00000300U -+#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SHIFT 8 -+#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SIGNED 0 -+ -+#define SRS_CORE_CONTROL_HDMI_MODULE_EN_MASK 0x00001C00U -+#define SRS_CORE_CONTROL_HDMI_MODULE_EN_SHIFT 10 -+#define SRS_CORE_CONTROL_HDMI_MODULE_EN_SIGNED 0 -+ -+/* -+ Register REG_BANK_STATUS -+*/ -+#define SRS_CORE_REG_BANK_STATUS 0x0208 -+#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_MASK 0xFFFFFFFFU -+#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SHIFT 0 -+#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SIGNED 0 -+ -+/* -+ Register MMCM_LOCK_STATUS -+*/ -+#define SRS_CORE_MMCM_LOCK_STATUS 0x020C -+#define SRS_MMCM_LOCK_STATUS_DUT_CORE_MASK 0x00000001U -+#define SRS_MMCM_LOCK_STATUS_DUT_CORE_SHIFT 0 -+#define SRS_MMCM_LOCK_STATUS_DUT_CORE_SIGNED 0 -+ -+#define SRS_MMCM_LOCK_STATUS_DUT_IF_MASK 0x00000002U -+#define SRS_MMCM_LOCK_STATUS_DUT_IF_SHIFT 1 -+#define SRS_MMCM_LOCK_STATUS_DUT_IF_SIGNED 0 -+ -+#define SRS_MMCM_LOCK_STATUS_MULTI_MASK 0x00000004U -+#define SRS_MMCM_LOCK_STATUS_MULTI_SHIFT 2 -+#define SRS_MMCM_LOCK_STATUS_MULTI_SIGNED 0 -+ -+#define SRS_MMCM_LOCK_STATUS_PDP_MASK 0x00000008U -+#define SRS_MMCM_LOCK_STATUS_PDP_SHIFT 3 -+#define SRS_MMCM_LOCK_STATUS_PDP_SIGNED 0 -+ -+/* -+ Register GIST_STATUS -+*/ -+#define SRS_CORE_GIST_STATUS 0x0210 -+#define SRS_GIST_STATUS_MST_MASK 0x000001FFU -+#define SRS_GIST_STATUS_MST_SHIFT 0 -+#define SRS_GIST_STATUS_MST_SIGNED 0 -+ -+#define SRS_GIST_STATUS_SLV_MASK 0x001FF000U -+#define SRS_GIST_STATUS_SLV_SHIFT 12 -+#define SRS_GIST_STATUS_SLV_SIGNED 0 -+ -+#define SRS_GIST_STATUS_SLV_OUT_MASK 0x03000000U -+#define SRS_GIST_STATUS_SLV_OUT_SHIFT 24 -+#define SRS_GIST_STATUS_SLV_OUT_SIGNED 0 -+ -+#define SRS_GIST_STATUS_MST_OUT_MASK 0x70000000U -+#define SRS_GIST_STATUS_MST_OUT_SHIFT 28 -+#define SRS_GIST_STATUS_MST_OUT_SIGNED 0 -+ -+/* -+ Register SENSOR_BOARD -+*/ -+#define SRS_CORE_SENSOR_BOARD 0x0214 -+#define SRS_SENSOR_BOARD_ID_MASK 0x00000003U -+#define SRS_SENSOR_BOARD_ID_SHIFT 0 -+#define SRS_SENSOR_BOARD_ID_SIGNED 0 -+ -+/* -+ Register INTERRUPT_STATUS -+*/ -+#define SRS_CORE_INTERRUPT_STATUS 0x0218 -+#define SRS_INTERRUPT_STATUS_DUT_MASK 0x00000001U -+#define SRS_INTERRUPT_STATUS_DUT_SHIFT 0 -+#define SRS_INTERRUPT_STATUS_DUT_SIGNED 0 -+ -+#define SRS_INTERRUPT_STATUS_PDP_MASK 0x00000002U -+#define SRS_INTERRUPT_STATUS_PDP_SHIFT 1 -+#define SRS_INTERRUPT_STATUS_PDP_SIGNED 0 -+ -+#define SRS_INTERRUPT_STATUS_I2C_MASK 0x00000004U -+#define SRS_INTERRUPT_STATUS_I2C_SHIFT 2 -+#define SRS_INTERRUPT_STATUS_I2C_SIGNED 0 -+ -+#define SRS_INTERRUPT_STATUS_SPI_MASK 0x00000008U -+#define SRS_INTERRUPT_STATUS_SPI_SHIFT 3 -+#define SRS_INTERRUPT_STATUS_SPI_SIGNED 0 -+ -+#define SRS_INTERRUPT_STATUS_APM_MASK 0x00000010U -+#define SRS_INTERRUPT_STATUS_APM_SHIFT 4 -+#define SRS_INTERRUPT_STATUS_APM_SIGNED 0 -+ -+#define SRS_INTERRUPT_STATUS_OS_IRQ_MASK 0x00001FE0U -+#define SRS_INTERRUPT_STATUS_OS_IRQ_SHIFT 5 -+#define SRS_INTERRUPT_STATUS_OS_IRQ_SIGNED 0 -+ -+#define SRS_INTERRUPT_STATUS_IRQ_TEST_MASK 0x40000000U -+#define SRS_INTERRUPT_STATUS_IRQ_TEST_SHIFT 30 -+#define SRS_INTERRUPT_STATUS_IRQ_TEST_SIGNED 0 -+ -+#define SRS_INTERRUPT_STATUS_MASTER_STATUS_MASK 0x80000000U -+#define SRS_INTERRUPT_STATUS_MASTER_STATUS_SHIFT 31 -+#define SRS_INTERRUPT_STATUS_MASTER_STATUS_SIGNED 0 -+ -+/* -+ Register INTERRUPT_ENABLE -+*/ -+#define SRS_CORE_INTERRUPT_ENABLE 0x021C -+#define SRS_INTERRUPT_ENABLE_DUT_MASK 0x00000001U -+#define SRS_INTERRUPT_ENABLE_DUT_SHIFT 0 -+#define SRS_INTERRUPT_ENABLE_DUT_SIGNED 0 -+ -+#define SRS_INTERRUPT_ENABLE_PDP_MASK 0x00000002U -+#define SRS_INTERRUPT_ENABLE_PDP_SHIFT 1 -+#define SRS_INTERRUPT_ENABLE_PDP_SIGNED 0 -+ -+#define SRS_INTERRUPT_ENABLE_I2C_MASK 0x00000004U -+#define SRS_INTERRUPT_ENABLE_I2C_SHIFT 2 -+#define SRS_INTERRUPT_ENABLE_I2C_SIGNED 0 -+ -+#define SRS_INTERRUPT_ENABLE_SPI_MASK 0x00000008U -+#define SRS_INTERRUPT_ENABLE_SPI_SHIFT 3 -+#define SRS_INTERRUPT_ENABLE_SPI_SIGNED 0 -+ -+#define SRS_INTERRUPT_ENABLE_APM_MASK 0x00000010U -+#define SRS_INTERRUPT_ENABLE_APM_SHIFT 4 -+#define SRS_INTERRUPT_ENABLE_APM_SIGNED 0 -+ -+#define SRS_INTERRUPT_ENABLE_OS_IRQ_MASK 0x00001FE0U -+#define SRS_INTERRUPT_ENABLE_OS_IRQ_SHIFT 5 -+#define SRS_INTERRUPT_ENABLE_OS_IRQ_SIGNED 0 -+ -+#define SRS_INTERRUPT_ENABLE_IRQ_TEST_MASK 0x40000000U -+#define SRS_INTERRUPT_ENABLE_IRQ_TEST_SHIFT 30 -+#define SRS_INTERRUPT_ENABLE_IRQ_TEST_SIGNED 0 -+ -+#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_MASK 0x80000000U -+#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_SHIFT 31 -+#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_SIGNED 0 -+ -+/* -+ Register INTERRUPT_CLR -+*/ -+#define SRS_CORE_INTERRUPT_CLR 0x0220 -+#define SRS_INTERRUPT_CLR_DUT_MASK 0x00000001U -+#define SRS_INTERRUPT_CLR_DUT_SHIFT 0 -+#define SRS_INTERRUPT_CLR_DUT_SIGNED 0 -+ -+#define SRS_INTERRUPT_CLR_PDP_MASK 0x00000002U -+#define SRS_INTERRUPT_CLR_PDP_SHIFT 1 -+#define SRS_INTERRUPT_CLR_PDP_SIGNED 0 -+ -+#define SRS_INTERRUPT_CLR_I2C_MASK 0x00000004U -+#define SRS_INTERRUPT_CLR_I2C_SHIFT 2 -+#define SRS_INTERRUPT_CLR_I2C_SIGNED 0 -+ -+#define SRS_INTERRUPT_CLR_SPI_MASK 0x00000008U -+#define SRS_INTERRUPT_CLR_SPI_SHIFT 3 -+#define SRS_INTERRUPT_CLR_SPI_SIGNED 0 -+ -+#define SRS_INTERRUPT_CLR_APM_MASK 0x00000010U -+#define SRS_INTERRUPT_CLR_APM_SHIFT 4 -+#define SRS_INTERRUPT_CLR_APM_SIGNED 0 -+ -+#define SRS_INTERRUPT_CLR_OS_IRQ_MASK 0x00001FE0U -+#define SRS_INTERRUPT_CLR_OS_IRQ_SHIFT 5 -+#define SRS_INTERRUPT_CLR_OS_IRQ_SIGNED 0 -+ -+#define SRS_INTERRUPT_CLR_IRQ_TEST_MASK 0x40000000U -+#define SRS_INTERRUPT_CLR_IRQ_TEST_SHIFT 30 -+#define SRS_INTERRUPT_CLR_IRQ_TEST_SIGNED 0 -+ -+#define SRS_INTERRUPT_CLR_MASTER_CLEAR_MASK 0x80000000U -+#define SRS_INTERRUPT_CLR_MASTER_CLEAR_SHIFT 31 -+#define SRS_INTERRUPT_CLR_MASTER_CLEAR_SIGNED 0 -+ -+/* -+ Register INTERRUPT_TEST -+*/ -+#define SRS_CORE_INTERRUPT_TEST 0x0224 -+#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_MASK 0x00000001U -+#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_SHIFT 0 -+#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_SIGNED 0 -+ -+/* -+ Register INTERRUPT_TIMEOUT_CLR -+*/ -+#define SRS_CORE_INTERRUPT_TIMEOUT_CLR 0x0228 -+#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_MASK 0x00000002U -+#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SHIFT 1 -+#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SIGNED 0 -+ -+#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_MASK 0x00000001U -+#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SHIFT 0 -+#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SIGNED 0 -+ -+/* -+ Register INTERRUPT_TIMEOUT -+*/ -+#define SRS_CORE_INTERRUPT_TIMEOUT 0x022C -+#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_MASK 0xFFFFFFFFU -+#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SHIFT 0 -+#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SIGNED 0 -+ -+#endif /* _OUT_DRV_H_ */ -+ -+/****************************************************************************** -+ End of file (orion_regs.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_apollo.c b/drivers/gpu/drm/img-rogue/apollo/pdp_apollo.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/pdp_apollo.c -@@ -0,0 +1,329 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+ -+#include "pdp_apollo.h" -+#include "pdp_common.h" -+#include "pdp_regs.h" -+#include "tcf_rgbpdp_regs.h" -+#include "tcf_pll.h" -+ -+/* Map a register to the "pll-regs" region */ -+#define PLL_REG(n) ((n) - TCF_PLL_PLL_PDP_CLK0) -+ -+bool pdp_apollo_clocks_set(struct device *dev, -+ void __iomem *pdp_reg, void __iomem *pll_reg, -+ u32 clock_in_mhz, -+ void __iomem *odn_core_reg, -+ u32 hdisplay, u32 vdisplay) -+{ -+ /* -+ * Setup TCF_CR_PLL_PDP_CLK1TO5 based on the main clock speed -+ * (clock 0 or 3) -+ */ -+ const u32 clock = (clock_in_mhz >= 50) ? 0 : 0x3; -+ -+ /* Set phase 0, ratio 50:50 and frequency in MHz */ -+ pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_CLK0), clock_in_mhz); -+ -+ pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_CLK1TO5), clock); -+ -+ /* Now initiate reprogramming of the PLLs */ -+ pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_DRP_GO), 0x1); -+ -+ udelay(1000); -+ -+ pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_DRP_GO), 0x0); -+ -+ return true; -+} -+ -+void pdp_apollo_set_updates_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable) -+{ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set updates: %s\n", enable ? "enable" : "disable"); -+#endif -+ /* nothing to do here */ -+} -+ -+void pdp_apollo_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set syncgen: %s\n", enable ? "enable" : "disable"); -+#endif -+ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL); -+ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, -+ SYNCACTIVE_SHIFT, SYNCACTIVE_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, value); -+} -+ -+void pdp_apollo_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set powerdwn: %s\n", enable ? "enable" : "disable"); -+#endif -+ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL); -+ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, -+ POWERDN_SHIFT, POWERDN_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, value); -+} -+ -+void pdp_apollo_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set vblank: %s\n", enable ? "enable" : "disable"); -+#endif -+ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB); -+ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, -+ INTEN_VBLNK0_SHIFT, INTEN_VBLNK0_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB, value); -+} -+ -+bool pdp_apollo_check_and_clear_vblank(struct device *dev, -+ void __iomem *pdp_reg) -+{ -+ u32 value; -+ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT); -+ -+ if (REG_VALUE_GET(value, INTS_VBLNK0_SHIFT, INTS_VBLNK0_MASK)) { -+ value = REG_VALUE_SET(0, 0x1, -+ INTCLR_VBLNK0_SHIFT, INTCLR_VBLNK0_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR, value); -+ return true; -+ } -+ return false; -+} -+ -+void pdp_apollo_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, -+ u32 plane, bool enable) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set plane %u: %s\n", -+ plane, enable ? "enable" : "disable"); -+#endif -+ -+ if (plane > 0) { -+ dev_err(dev, "Maximum of 1 plane is supported\n"); -+ return; -+ } -+ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL); -+ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, -+ STR1STREN_SHIFT, STR1STREN_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, value); -+} -+ -+void pdp_apollo_reset_planes(struct device *dev, void __iomem *pdp_reg) -+{ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Reset planes\n"); -+#endif -+ -+ pdp_apollo_set_plane_enabled(dev, pdp_reg, 0, false); -+} -+ -+void pdp_apollo_set_surface(struct device *dev, void __iomem *pdp_reg, -+ u32 plane, u32 address, -+ u32 posx, u32 posy, -+ u32 width, u32 height, u32 stride, -+ u32 format, u32 alpha, bool blend) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, -+ "Set surface: size=%dx%d stride=%d format=%d address=0x%x\n", -+ width, height, stride, format, address); -+#endif -+ -+ if (plane > 0) { -+ dev_err(dev, "Maximum of 1 plane is supported\n"); -+ return; -+ } -+ -+ /* Size & format */ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF); -+ value = REG_VALUE_SET(value, width - 1, -+ STR1WIDTH_SHIFT, STR1WIDTH_MASK); -+ value = REG_VALUE_SET(value, height - 1, -+ STR1HEIGHT_SHIFT, STR1HEIGHT_MASK); -+ value = REG_VALUE_SET(value, format, -+ STR1PIXFMT_SHIFT, STR1PIXFMT_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF, value); -+ /* Stride */ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_PDP_STR1POSN); -+ value = REG_VALUE_SET(value, -+ (stride >> DCPDP_STR1POSN_STRIDE_SHIFT) - 1, -+ STR1STRIDE_SHIFT, STR1STRIDE_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_PDP_STR1POSN, value); -+ /* Disable interlaced output */ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL); -+ value = REG_VALUE_SET(value, 0x0, -+ STR1INTFIELD_SHIFT, -+ STR1INTFIELD_MASK); -+ /* Frame buffer base address */ -+ value = REG_VALUE_SET(value, -+ address >> DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT, -+ STR1BASE_SHIFT, STR1BASE_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, value); -+} -+ -+void pdp_apollo_mode_set(struct device *dev, void __iomem *pdp_reg, -+ u32 h_display, u32 v_display, -+ u32 hbps, u32 ht, u32 has, -+ u32 hlbs, u32 hfps, u32 hrbs, -+ u32 vbps, u32 vt, u32 vas, -+ u32 vtbs, u32 vfps, u32 vbbs, -+ bool nhsync, bool nvsync) -+{ -+ u32 value; -+ -+ dev_info(dev, "Set mode: %dx%d\n", h_display, v_display); -+#ifdef PDP_VERBOSE -+ dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n", -+ ht, hbps, has, hlbs, hfps, hrbs); -+ dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n", -+ vt, vbps, vas, vtbs, vfps, vbbs); -+#endif -+ -+/* -+ * This gets set in dc_pdp_common.c, but hasn't been necessary here. -+ * -+ * if (pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL) -+ * != 0x0000C010) { -+ * // Buffer request threshold -+ * pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL, -+ * 0x00001C10); -+ * } -+ */ -+ -+ /* Border colour */ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL); -+ value = REG_VALUE_SET(value, 0x0, BORDCOL_SHIFT, BORDCOL_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL, value); -+ -+ /* Update control */ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL); -+ value = REG_VALUE_SET(value, 0x0, UPDFIELD_SHIFT, UPDFIELD_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL, value); -+ -+ /* Set hsync timings */ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1); -+ value = REG_VALUE_SET(value, hbps, HBPS_SHIFT, HBPS_MASK); -+ value = REG_VALUE_SET(value, ht, HT_SHIFT, HT_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1, value); -+ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2); -+ value = REG_VALUE_SET(value, has, HAS_SHIFT, HAS_MASK); -+ value = REG_VALUE_SET(value, hlbs, HLBS_SHIFT, HLBS_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2, value); -+ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3); -+ value = REG_VALUE_SET(value, hfps, HFPS_SHIFT, HFPS_MASK); -+ value = REG_VALUE_SET(value, hrbs, HRBS_SHIFT, HRBS_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3, value); -+ -+ /* Set vsync timings */ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1); -+ value = REG_VALUE_SET(value, vbps, VBPS_SHIFT, VBPS_MASK); -+ value = REG_VALUE_SET(value, vt, VT_SHIFT, VT_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1, value); -+ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2); -+ value = REG_VALUE_SET(value, vas, VAS_SHIFT, VAS_MASK); -+ value = REG_VALUE_SET(value, vtbs, VTBS_SHIFT, VTBS_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2, value); -+ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3); -+ value = REG_VALUE_SET(value, vfps, VFPS_SHIFT, VFPS_MASK); -+ value = REG_VALUE_SET(value, vbbs, VBBS_SHIFT, VBBS_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3, value); -+ -+ /* Horizontal data enable */ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL); -+ value = REG_VALUE_SET(value, hlbs, HDES_SHIFT, HDES_MASK); -+ value = REG_VALUE_SET(value, hfps, HDEF_SHIFT, HDEF_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL, value); -+ -+ /* Vertical data enable */ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL); -+ value = REG_VALUE_SET(value, vtbs, VDES_SHIFT, VDES_MASK); -+ value = REG_VALUE_SET(value, vfps, VDEF_SHIFT, VDEF_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL, value); -+ -+ /* Vertical event start and vertical fetch start */ -+ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT); -+ value = REG_VALUE_SET(value, vbps, VFETCH_SHIFT, VFETCH_MASK); -+ value = REG_VALUE_SET(value, vfps, VEVENT_SHIFT, VEVENT_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT, value); -+ -+ /* Set up polarities of sync/blank */ -+ value = REG_VALUE_SET(0, 0x1, BLNKPOL_SHIFT, BLNKPOL_MASK); -+ -+/* -+ * Enable this if you want vblnk1. You also need to change to vblnk1 -+ * in the interrupt handler. -+ * -+ * value = REG_VALUE_SET(value, 0x1, FIELDPOL_SHIFT, FIELDPOL_MASK); -+ */ -+ if (nhsync) -+ value = REG_VALUE_SET(value, 0x1, HSPOL_SHIFT, HSPOL_MASK); -+ if (nvsync) -+ value = REG_VALUE_SET(value, 0x1, VSPOL_SHIFT, VSPOL_MASK); -+ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, value); -+} -diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_apollo.h b/drivers/gpu/drm/img-rogue/apollo/pdp_apollo.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/pdp_apollo.h -@@ -0,0 +1,88 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__PDP_APOLLO_H__) -+#define __PDP_APOLLO_H__ -+ -+#include -+#include -+ -+bool pdp_apollo_clocks_set(struct device *dev, -+ void __iomem *pdp_reg, void __iomem *pll_reg, -+ u32 clock_in_mhz, -+ void __iomem *odn_core_reg, -+ u32 hdisplay, u32 vdisplay); -+ -+void pdp_apollo_set_updates_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable); -+ -+void pdp_apollo_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable); -+ -+void pdp_apollo_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable); -+ -+void pdp_apollo_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable); -+ -+bool pdp_apollo_check_and_clear_vblank(struct device *dev, -+ void __iomem *pdp_reg); -+ -+void pdp_apollo_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, -+ u32 plane, bool enable); -+ -+void pdp_apollo_reset_planes(struct device *dev, void __iomem *pdp_reg); -+ -+void pdp_apollo_set_surface(struct device *dev, void __iomem *pdp_reg, -+ u32 plane, u32 address, -+ u32 posx, u32 posy, -+ u32 width, u32 height, u32 stride, -+ u32 format, u32 alpha, bool blend); -+ -+void pdp_apollo_mode_set(struct device *dev, void __iomem *pdp_reg, -+ u32 h_display, u32 v_display, -+ u32 hbps, u32 ht, u32 has, -+ u32 hlbs, u32 hfps, u32 hrbs, -+ u32 vbps, u32 vt, u32 vas, -+ u32 vtbs, u32 vfps, u32 vbbs, -+ bool nhsync, bool nvsync); -+ -+#endif /* __PDP_APOLLO_H__ */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_common.h b/drivers/gpu/drm/img-rogue/apollo/pdp_common.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/pdp_common.h -@@ -0,0 +1,107 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__PDP_COMMON_H__) -+#define __PDP_COMMON_H__ -+ -+#include -+ -+/*#define PDP_VERBOSE*/ -+ -+#define REG_VALUE_GET(v, s, m) \ -+ (u32)(((v) & (m)) >> (s)) -+#define REG_VALUE_SET(v, b, s, m) \ -+ (u32)(((v) & (u32)~(m)) | (u32)(((b) << (s)) & (m))) -+/* Active low */ -+#define REG_VALUE_LO(v, b, s, m) \ -+ (u32)((v) & ~(u32)(((b) << (s)) & (m))) -+ -+enum pdp_version { -+ PDP_VERSION_APOLLO, -+ PDP_VERSION_ODIN, -+ PDP_VERSION_PLATO, -+}; -+ -+enum pdp_odin_subversion { -+ PDP_ODIN_NONE = 0, -+ PDP_ODIN_ORION, -+}; -+ -+enum pdp_output_device { -+ PDP_OUTPUT_PDP1 = 1, -+ PDP_OUTPUT_PDP2, -+}; -+ -+/* Register R-W */ -+static inline u32 core_rreg32(void __iomem *base, resource_size_t reg) -+{ -+ return ioread32(base + reg); -+} -+ -+static inline void core_wreg32(void __iomem *base, resource_size_t reg, -+ u32 value) -+{ -+ iowrite32(value, base + reg); -+} -+ -+static inline u32 pdp_rreg32(void __iomem *base, resource_size_t reg) -+{ -+ return ioread32(base + reg); -+} -+ -+static inline void pdp_wreg32(void __iomem *base, resource_size_t reg, -+ u32 value) -+{ -+ iowrite32(value, base + reg); -+} -+ -+static inline u32 pll_rreg32(void __iomem *base, resource_size_t reg) -+{ -+ return ioread32(base + reg); -+} -+ -+static inline void pll_wreg32(void __iomem *base, resource_size_t reg, -+ u32 value) -+{ -+ iowrite32(value, base + reg); -+} -+ -+#endif /* __PDP_COMMON_H__ */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_odin.c b/drivers/gpu/drm/img-rogue/apollo/pdp_odin.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/pdp_odin.c -@@ -0,0 +1,1231 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+#include -+ -+#include "pdp_common.h" -+#include "pdp_odin.h" -+#include "odin_defs.h" -+#include "odin_regs.h" -+#include "orion_defs.h" -+#include "orion_regs.h" -+#include "pfim_defs.h" -+#include "pfim_regs.h" -+ -+#define ODIN_PLL_REG(n) ((n) - ODN_PDP_P_CLK_OUT_DIVIDER_REG1) -+ -+struct odin_displaymode { -+ int w; /* display width */ -+ int h; /* display height */ -+ int id; /* pixel clock input divider */ -+ int m; /* pixel clock multiplier */ -+ int od1; /* pixel clock output divider */ -+ int od2; /* mem clock output divider */ -+}; -+ -+struct pfim_property { -+ u32 tiles_per_line; -+ u32 tile_type; -+ u32 tile_xsize; -+ u32 tile_ysize; -+}; -+ -+/* -+ * For Odin, only the listed modes below are supported. -+ * 1080p id=5, m=37, od1=5, od2=5 -+ * 720p id=5, m=37, od1=10, od2=5 -+ * 1280x1024 id=1, m=14, od1=13, od2=8 -+ * 1440x900 id=5, m=53, od1=10, od2=8 -+ * 1280x960 id=3, m=40, od1=13, od2=9 -+ * 1024x768 id=1, m=13, od1=20, od2=10 -+ * 800x600 id=2, m=20, od1=25, od2=7 -+ * 640x480 id=1, m=12, od1=48, od2=9 -+ * ... where id is the PDP_P_CLK input divider, -+ * m is PDP_P_CLK multiplier regs 1 to 3 -+ * od1 is PDP_P_clk output divider regs 1 to 3 -+ * od2 is PDP_M_clk output divider regs 1 to 2 -+ */ -+static const struct odin_displaymode odin_modes[] = { -+ {.w = 1920, .h = 1080, .id = 5, .m = 37, .od1 = 5, .od2 = 5}, -+ {.w = 1280, .h = 720, .id = 5, .m = 37, .od1 = 10, .od2 = 5}, -+ {.w = 1280, .h = 1024, .id = 1, .m = 14, .od1 = 13, .od2 = 10}, -+ {.w = 1440, .h = 900, .id = 5, .m = 53, .od1 = 10, .od2 = 8}, -+ {.w = 1280, .h = 960, .id = 3, .m = 40, .od1 = 13, .od2 = 9}, -+ {.w = 1024, .h = 768, .id = 1, .m = 13, .od1 = 20, .od2 = 10}, -+ {.w = 800, .h = 600, .id = 2, .m = 20, .od1 = 25, .od2 = 7}, -+ {.w = 640, .h = 480, .id = 1, .m = 12, .od1 = 48, .od2 = 9}, -+ {.w = 0, .h = 0, .id = 0, .m = 0, .od1 = 0, .od2 = 0} -+}; -+ -+/* -+ * For Orion, only the listed modes below are supported. -+ * 1920x1080 mode is currently not supported. -+ */ -+static const struct odin_displaymode orion_modes[] = { -+ {.w = 1280, .h = 720, .id = 5, .m = 37, .od1 = 10, .od2 = 7}, -+ {.w = 1280, .h = 1024, .id = 1, .m = 12, .od1 = 11, .od2 = 10}, -+ {.w = 1440, .h = 900, .id = 5, .m = 53, .od1 = 10, .od2 = 9}, -+ {.w = 1280, .h = 960, .id = 5, .m = 51, .od1 = 10, .od2 = 9}, -+ {.w = 1024, .h = 768, .id = 3, .m = 33, .od1 = 17, .od2 = 10}, -+ {.w = 800, .h = 600, .id = 2, .m = 24, .od1 = 31, .od2 = 12}, -+ {.w = 640, .h = 480, .id = 1, .m = 12, .od1 = 50, .od2 = 12}, -+ {.w = 0, .h = 0, .id = 0, .m = 0, .od1 = 0, .od2 = 0} -+}; -+ -+static const struct pfim_property pfim_properties[] = { -+ [ODIN_PFIM_MOD_LINEAR] = {0}, -+ [ODIN_PFIM_FBCDC_8X8_V12] = {.tiles_per_line = 8, -+ .tile_type = ODN_PFIM_TILETYPE_8X8, -+ .tile_xsize = 8, -+ .tile_ysize = 8}, -+ [ODIN_PFIM_FBCDC_16X4_V12] = {.tiles_per_line = 16, -+ .tile_type = ODN_PFIM_TILETYPE_16X4, -+ .tile_xsize = 16, -+ .tile_ysize = 4}, -+}; -+ -+static const u32 GRPH_SURF_OFFSET[] = { -+ ODN_PDP_GRPH1SURF_OFFSET, -+ ODN_PDP_GRPH2SURF_OFFSET, -+ ODN_PDP_VID1SURF_OFFSET, -+ ODN_PDP_GRPH4SURF_OFFSET -+}; -+static const u32 GRPH_SURF_GRPH_PIXFMT_SHIFT[] = { -+ ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT, -+ ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT, -+ ODN_PDP_VID1SURF_VID1PIXFMT_SHIFT, -+ ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT -+}; -+static const u32 GRPH_SURF_GRPH_PIXFMT_MASK[] = { -+ ODN_PDP_GRPH1SURF_GRPH1PIXFMT_MASK, -+ ODN_PDP_GRPH2SURF_GRPH2PIXFMT_MASK, -+ ODN_PDP_VID1SURF_VID1PIXFMT_MASK, -+ ODN_PDP_GRPH4SURF_GRPH4PIXFMT_MASK -+}; -+static const u32 GRPH_GALPHA_OFFSET[] = { -+ ODN_PDP_GRPH1GALPHA_OFFSET, -+ ODN_PDP_GRPH2GALPHA_OFFSET, -+ ODN_PDP_VID1GALPHA_OFFSET, -+ ODN_PDP_GRPH4GALPHA_OFFSET -+}; -+static const u32 GRPH_GALPHA_GRPH_GALPHA_SHIFT[] = { -+ ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT, -+ ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT, -+ ODN_PDP_VID1GALPHA_VID1GALPHA_SHIFT, -+ ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT -+}; -+static const u32 GRPH_GALPHA_GRPH_GALPHA_MASK[] = { -+ ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_MASK, -+ ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_MASK, -+ ODN_PDP_VID1GALPHA_VID1GALPHA_MASK, -+ ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_MASK -+}; -+static const u32 GRPH_CTRL_OFFSET[] = { -+ ODN_PDP_GRPH1CTRL_OFFSET, -+ ODN_PDP_GRPH2CTRL_OFFSET, -+ ODN_PDP_VID1CTRL_OFFSET, -+ ODN_PDP_GRPH4CTRL_OFFSET, -+}; -+static const u32 GRPH_CTRL_GRPH_BLEND_SHIFT[] = { -+ ODN_PDP_GRPH1CTRL_GRPH1BLEND_SHIFT, -+ ODN_PDP_GRPH2CTRL_GRPH2BLEND_SHIFT, -+ ODN_PDP_VID1CTRL_VID1BLEND_SHIFT, -+ ODN_PDP_GRPH4CTRL_GRPH4BLEND_SHIFT -+}; -+static const u32 GRPH_CTRL_GRPH_BLEND_MASK[] = { -+ ODN_PDP_GRPH1CTRL_GRPH1BLEND_MASK, -+ ODN_PDP_GRPH2CTRL_GRPH2BLEND_MASK, -+ ODN_PDP_VID1CTRL_VID1BLEND_MASK, -+ ODN_PDP_GRPH4CTRL_GRPH4BLEND_MASK -+}; -+static const u32 GRPH_CTRL_GRPH_BLENDPOS_SHIFT[] = { -+ ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT, -+ ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT, -+ ODN_PDP_VID1CTRL_VID1BLENDPOS_SHIFT, -+ ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT -+}; -+static const u32 GRPH_CTRL_GRPH_BLENDPOS_MASK[] = { -+ ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK, -+ ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK, -+ ODN_PDP_VID1CTRL_VID1BLENDPOS_MASK, -+ ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK -+}; -+static const u32 GRPH_CTRL_GRPH_STREN_SHIFT[] = { -+ ODN_PDP_GRPH1CTRL_GRPH1STREN_SHIFT, -+ ODN_PDP_GRPH2CTRL_GRPH2STREN_SHIFT, -+ ODN_PDP_VID1CTRL_VID1STREN_SHIFT, -+ ODN_PDP_GRPH4CTRL_GRPH4STREN_SHIFT -+}; -+static const u32 GRPH_CTRL_GRPH_STREN_MASK[] = { -+ ODN_PDP_GRPH1CTRL_GRPH1STREN_MASK, -+ ODN_PDP_GRPH2CTRL_GRPH2STREN_MASK, -+ ODN_PDP_VID1CTRL_VID1STREN_MASK, -+ ODN_PDP_GRPH4CTRL_GRPH4STREN_MASK -+}; -+static const u32 GRPH_POSN_OFFSET[] = { -+ ODN_PDP_GRPH1POSN_OFFSET, -+ ODN_PDP_GRPH2POSN_OFFSET, -+ ODN_PDP_VID1POSN_OFFSET, -+ ODN_PDP_GRPH4POSN_OFFSET -+}; -+static const u32 GRPH_POSN_GRPH_XSTART_SHIFT[] = { -+ ODN_PDP_GRPH1POSN_GRPH1XSTART_SHIFT, -+ ODN_PDP_GRPH2POSN_GRPH2XSTART_SHIFT, -+ ODN_PDP_VID1POSN_VID1XSTART_SHIFT, -+ ODN_PDP_GRPH4POSN_GRPH4XSTART_SHIFT, -+}; -+static const u32 GRPH_POSN_GRPH_XSTART_MASK[] = { -+ ODN_PDP_GRPH1POSN_GRPH1XSTART_MASK, -+ ODN_PDP_GRPH2POSN_GRPH2XSTART_MASK, -+ ODN_PDP_VID1POSN_VID1XSTART_MASK, -+ ODN_PDP_GRPH4POSN_GRPH4XSTART_MASK, -+}; -+static const u32 GRPH_POSN_GRPH_YSTART_SHIFT[] = { -+ ODN_PDP_GRPH1POSN_GRPH1YSTART_SHIFT, -+ ODN_PDP_GRPH2POSN_GRPH2YSTART_SHIFT, -+ ODN_PDP_VID1POSN_VID1YSTART_SHIFT, -+ ODN_PDP_GRPH4POSN_GRPH4YSTART_SHIFT, -+}; -+static const u32 GRPH_POSN_GRPH_YSTART_MASK[] = { -+ ODN_PDP_GRPH1POSN_GRPH1YSTART_MASK, -+ ODN_PDP_GRPH2POSN_GRPH2YSTART_MASK, -+ ODN_PDP_VID1POSN_VID1YSTART_MASK, -+ ODN_PDP_GRPH4POSN_GRPH4YSTART_MASK, -+}; -+static const u32 GRPH_SIZE_OFFSET[] = { -+ ODN_PDP_GRPH1SIZE_OFFSET, -+ ODN_PDP_GRPH2SIZE_OFFSET, -+ ODN_PDP_VID1SIZE_OFFSET, -+ ODN_PDP_GRPH4SIZE_OFFSET, -+}; -+static const u32 GRPH_SIZE_GRPH_WIDTH_SHIFT[] = { -+ ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT, -+ ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT, -+ ODN_PDP_VID1SIZE_VID1WIDTH_SHIFT, -+ ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT -+}; -+static const u32 GRPH_SIZE_GRPH_WIDTH_MASK[] = { -+ ODN_PDP_GRPH1SIZE_GRPH1WIDTH_MASK, -+ ODN_PDP_GRPH2SIZE_GRPH2WIDTH_MASK, -+ ODN_PDP_VID1SIZE_VID1WIDTH_MASK, -+ ODN_PDP_GRPH4SIZE_GRPH4WIDTH_MASK -+}; -+static const u32 GRPH_SIZE_GRPH_HEIGHT_SHIFT[] = { -+ ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT, -+ ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT, -+ ODN_PDP_VID1SIZE_VID1HEIGHT_SHIFT, -+ ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT -+}; -+static const u32 GRPH_SIZE_GRPH_HEIGHT_MASK[] = { -+ ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_MASK, -+ ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_MASK, -+ ODN_PDP_VID1SIZE_VID1HEIGHT_MASK, -+ ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_MASK -+}; -+static const u32 GRPH_STRIDE_OFFSET[] = { -+ ODN_PDP_GRPH1STRIDE_OFFSET, -+ ODN_PDP_GRPH2STRIDE_OFFSET, -+ ODN_PDP_VID1STRIDE_OFFSET, -+ ODN_PDP_GRPH4STRIDE_OFFSET -+}; -+static const u32 GRPH_STRIDE_GRPH_STRIDE_SHIFT[] = { -+ ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT, -+ ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT, -+ ODN_PDP_VID1STRIDE_VID1STRIDE_SHIFT, -+ ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT -+}; -+static const u32 GRPH_STRIDE_GRPH_STRIDE_MASK[] = { -+ ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_MASK, -+ ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_MASK, -+ ODN_PDP_VID1STRIDE_VID1STRIDE_MASK, -+ ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_MASK -+}; -+static const u32 GRPH_INTERLEAVE_CTRL_OFFSET[] = { -+ ODN_PDP_GRPH1INTERLEAVE_CTRL_OFFSET, -+ ODN_PDP_GRPH2INTERLEAVE_CTRL_OFFSET, -+ ODN_PDP_VID1INTERLEAVE_CTRL_OFFSET, -+ ODN_PDP_GRPH4INTERLEAVE_CTRL_OFFSET -+}; -+static const u32 GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_SHIFT[] = { -+ ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT, -+ ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT, -+ ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT, -+ ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT -+}; -+static const u32 GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_MASK[] = { -+ ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK, -+ ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK, -+ ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK, -+ ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK -+}; -+static const u32 GRPH_BASEADDR_OFFSET[] = { -+ ODN_PDP_GRPH1BASEADDR_OFFSET, -+ ODN_PDP_GRPH2BASEADDR_OFFSET, -+ ODN_PDP_VID1BASEADDR_OFFSET, -+ ODN_PDP_GRPH4BASEADDR_OFFSET -+}; -+ -+static const u32 ODN_INTERNAL_RESETN_PDP_MASK[] = { -+ ODN_INTERNAL_RESETN_PDP1_MASK, -+ ODN_INTERNAL_RESETN_PDP2_MASK -+}; -+ -+static const u32 ODN_INTERNAL_RESETN_PDP_SHIFT[] = { -+ ODN_INTERNAL_RESETN_PDP1_SHIFT, -+ ODN_INTERNAL_RESETN_PDP2_SHIFT -+}; -+ -+static void get_odin_clock_settings(u32 value, u32 *lo_time, u32 *hi_time, -+ u32 *no_count, u32 *edge) -+{ -+ u32 lt, ht; -+ -+ /* If the value is 1, High Time & Low Time are both set to 1 -+ * and the NOCOUNT bit is set to 1. -+ */ -+ if (value == 1) { -+ *lo_time = 1; -+ *hi_time = 1; -+ -+ /* If od is an odd number then write 1 to NO_COUNT -+ * otherwise write 0. -+ */ -+ *no_count = 1; -+ -+ /* If m is and odd number then write 1 to EDGE bit of MR2 -+ * otherwise write 0. -+ * If id is an odd number then write 1 to EDGE bit of ID -+ * otherwise write 0. -+ */ -+ *edge = 0; -+ return; -+ } -+ *no_count = 0; -+ -+ /* High Time & Low time is half the value listed for each PDP mode */ -+ lt = value>>1; -+ ht = lt; -+ -+ /* If the value is odd, Low Time is rounded up to nearest integer -+ * and High Time is rounded down, and Edge is set to 1. -+ */ -+ if (value & 1) { -+ lt++; -+ -+ /* If m is and odd number then write 1 to EDGE bit of MR2 -+ * otherwise write 0. -+ * If id is an odd number then write 1 to EDGE bit of ID -+ * otherwise write 0. -+ */ -+ *edge = 1; -+ -+ } else { -+ *edge = 0; -+ } -+ *hi_time = ht; -+ *lo_time = lt; -+} -+ -+static const struct odin_displaymode *get_odin_mode(int w, int h, -+ enum pdp_odin_subversion pv) -+{ -+ struct odin_displaymode *pdp_modes; -+ int n = 0; -+ -+ if (pv == PDP_ODIN_ORION) -+ pdp_modes = (struct odin_displaymode *)orion_modes; -+ else -+ pdp_modes = (struct odin_displaymode *)odin_modes; -+ -+ do { -+ if ((pdp_modes[n].w == w) && (pdp_modes[n].h == h)) -+ return pdp_modes+n; -+ -+ } while (pdp_modes[n++].w); -+ -+ return NULL; -+} -+ -+bool pdp_odin_clocks_set(struct device *dev, -+ void __iomem *pdp_reg, void __iomem *pll_reg, -+ u32 clock_freq, u32 dev_num, -+ void __iomem *odn_core_reg, -+ u32 hdisplay, u32 vdisplay, -+ enum pdp_odin_subversion pdpsubv) -+{ -+ u32 value; -+ const struct odin_displaymode *odispl; -+ u32 hi_time, lo_time, no_count, edge; -+ u32 core_id, core_rev; -+ -+ core_id = pdp_rreg32(pdp_reg, ODN_PDP_CORE_ID_OFFSET); -+ dev_info(dev, "Odin-PDP CORE_ID %08X\n", core_id); -+ -+ core_rev = pdp_rreg32(odn_core_reg, ODN_PDP_CORE_REV_OFFSET); -+ dev_info(dev, "Odin-PDP CORE_REV %08X\n", core_rev); -+ -+ odispl = get_odin_mode(hdisplay, vdisplay, pdpsubv); -+ if (!odispl) { -+ dev_err(dev, "Display mode not supported.\n"); -+ return false; -+ } -+ -+ /* -+ * The PDP uses a Xilinx clock that requires read -+ * modify write for all registers. -+ * It is essential that only the specified bits are changed -+ * because other bits are in use. -+ * To change PDP clocks reset PDP & PDP mmcm (PLL) first, -+ * then apply changes and then un-reset mmcm & PDP. -+ * Warm reset will keep the changes. -+ * wr 0x000080 0x1f7 ; # reset pdp -+ * wr 0x000090 8 ; # reset pdp mmcm -+ * then apply clock changes, then -+ * wr 0x000090 0x0 ; # un-reset pdp mmcm -+ * wr 0x000080 0x1ff ; # un-reset pdp -+ */ -+ -+ /* -+ * Hold Odin PDP in reset while changing the clock regs. -+ * Set the PDP bit of ODN_CORE_INTERNAL_RESETN low to reset. -+ * set bit 3 to 0 (active low) -+ */ -+ if (pdpsubv == PDP_ODIN_ORION) { -+ value = core_rreg32(odn_core_reg, SRS_CORE_SOFT_RESETN); -+ value = REG_VALUE_LO(value, 1, SRS_SOFT_RESETN_PDP_SHIFT, -+ SRS_SOFT_RESETN_PDP_MASK); -+ core_wreg32(odn_core_reg, SRS_CORE_SOFT_RESETN, value); -+ } else { -+ value = core_rreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN); -+ value = REG_VALUE_LO(value, 1, -+ ODN_INTERNAL_RESETN_PDP_SHIFT[dev_num], -+ ODN_INTERNAL_RESETN_PDP_MASK[dev_num]); -+ core_wreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN, value); -+ } -+ -+ /* -+ * Hold the PDP MMCM in reset while changing the clock regs. -+ * Set the PDP bit of ODN_CORE_CLK_GEN_RESET high to reset. -+ */ -+ value = core_rreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET); -+ value = REG_VALUE_SET(value, 0x1, -+ ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT, -+ ODN_CLK_GEN_RESET_PDP_MMCM_MASK); -+ core_wreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET, value); -+ -+ /* Pixel clock Input divider */ -+ get_odin_clock_settings(odispl->id, &lo_time, &hi_time, -+ &no_count, &edge); -+ -+ value = pll_rreg32(pll_reg, -+ ODIN_PLL_REG(ODN_PDP_P_CLK_IN_DIVIDER_REG)); -+ value = REG_VALUE_SET(value, lo_time, -+ ODN_PDP_PCLK_IDIV_LO_TIME_SHIFT, -+ ODN_PDP_PCLK_IDIV_LO_TIME_MASK); -+ value = REG_VALUE_SET(value, hi_time, -+ ODN_PDP_PCLK_IDIV_HI_TIME_SHIFT, -+ ODN_PDP_PCLK_IDIV_HI_TIME_MASK); -+ value = REG_VALUE_SET(value, no_count, -+ ODN_PDP_PCLK_IDIV_NOCOUNT_SHIFT, -+ ODN_PDP_PCLK_IDIV_NOCOUNT_MASK); -+ value = REG_VALUE_SET(value, edge, -+ ODN_PDP_PCLK_IDIV_EDGE_SHIFT, -+ ODN_PDP_PCLK_IDIV_EDGE_MASK); -+ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_IN_DIVIDER_REG), -+ value); -+ -+ /* Pixel clock Output divider */ -+ get_odin_clock_settings(odispl->od1, &lo_time, &hi_time, -+ &no_count, &edge); -+ -+ /* Pixel clock Output divider reg1 */ -+ value = pll_rreg32(pll_reg, -+ ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG1)); -+ value = REG_VALUE_SET(value, lo_time, -+ ODN_PDP_PCLK_ODIV1_LO_TIME_SHIFT, -+ ODN_PDP_PCLK_ODIV1_LO_TIME_MASK); -+ value = REG_VALUE_SET(value, hi_time, -+ ODN_PDP_PCLK_ODIV1_HI_TIME_SHIFT, -+ ODN_PDP_PCLK_ODIV1_HI_TIME_MASK); -+ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG1), -+ value); -+ -+ /* Pixel clock Output divider reg2 */ -+ value = pll_rreg32(pll_reg, -+ ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG2)); -+ value = REG_VALUE_SET(value, no_count, -+ ODN_PDP_PCLK_ODIV2_NOCOUNT_SHIFT, -+ ODN_PDP_PCLK_ODIV2_NOCOUNT_MASK); -+ value = REG_VALUE_SET(value, edge, -+ ODN_PDP_PCLK_ODIV2_EDGE_SHIFT, -+ ODN_PDP_PCLK_ODIV2_EDGE_MASK); -+ if (pdpsubv == PDP_ODIN_ORION) { -+ /* -+ * Fractional divide for PLL registers currently does not work -+ * on Sirius, as duly mentioned on the TRM. However, owing to -+ * what most likely is a design flaw in the RTL, the -+ * following register and a later one have their fractional -+ * divide fields set to values other than 0 by default, -+ * unlike on Odin. This prevents the PDP device from working -+ * on Orion -+ */ -+ value = REG_VALUE_LO(value, 0x1F, SRS_PDP_PCLK_ODIV2_FRAC_SHIFT, -+ SRS_PDP_PCLK_ODIV2_FRAC_MASK); -+ } -+ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG2), -+ value); -+ -+ /* Pixel clock Multiplier */ -+ get_odin_clock_settings(odispl->m, &lo_time, &hi_time, -+ &no_count, &edge); -+ -+ /* Pixel clock Multiplier reg1 */ -+ value = pll_rreg32(pll_reg, -+ ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG1)); -+ value = REG_VALUE_SET(value, lo_time, -+ ODN_PDP_PCLK_MUL1_LO_TIME_SHIFT, -+ ODN_PDP_PCLK_MUL1_LO_TIME_MASK); -+ value = REG_VALUE_SET(value, hi_time, -+ ODN_PDP_PCLK_MUL1_HI_TIME_SHIFT, -+ ODN_PDP_PCLK_MUL1_HI_TIME_MASK); -+ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG1), -+ value); -+ -+ /* Pixel clock Multiplier reg2 */ -+ value = pll_rreg32(pll_reg, -+ ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG2)); -+ value = REG_VALUE_SET(value, no_count, -+ ODN_PDP_PCLK_MUL2_NOCOUNT_SHIFT, -+ ODN_PDP_PCLK_MUL2_NOCOUNT_MASK); -+ value = REG_VALUE_SET(value, edge, -+ ODN_PDP_PCLK_MUL2_EDGE_SHIFT, -+ ODN_PDP_PCLK_MUL2_EDGE_MASK); -+ if (pdpsubv == PDP_ODIN_ORION) { -+ /* Zero out fractional divide fields */ -+ value = REG_VALUE_LO(value, 0x1F, SRS_PDP_PCLK_MUL2_FRAC_SHIFT, -+ SRS_PDP_PCLK_MUL2_FRAC_MASK); -+ } -+ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG2), -+ value); -+ -+ /* Mem clock Output divider */ -+ get_odin_clock_settings(odispl->od2, &lo_time, &hi_time, -+ &no_count, &edge); -+ -+ /* Mem clock Output divider reg1 */ -+ value = pll_rreg32(pll_reg, -+ ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG1)); -+ value = REG_VALUE_SET(value, lo_time, -+ ODN_PDP_MCLK_ODIV1_LO_TIME_SHIFT, -+ ODN_PDP_MCLK_ODIV1_LO_TIME_MASK); -+ value = REG_VALUE_SET(value, hi_time, -+ ODN_PDP_MCLK_ODIV1_HI_TIME_SHIFT, -+ ODN_PDP_MCLK_ODIV1_HI_TIME_MASK); -+ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG1), -+ value); -+ -+ /* Mem clock Output divider reg2 */ -+ value = pll_rreg32(pll_reg, -+ ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG2)); -+ value = REG_VALUE_SET(value, no_count, -+ ODN_PDP_MCLK_ODIV2_NOCOUNT_SHIFT, -+ ODN_PDP_MCLK_ODIV2_NOCOUNT_MASK); -+ value = REG_VALUE_SET(value, edge, -+ ODN_PDP_MCLK_ODIV2_EDGE_SHIFT, -+ ODN_PDP_MCLK_ODIV2_EDGE_MASK); -+ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG2), -+ value); -+ -+ /* -+ * Take the PDP MMCM out of reset. -+ * Set the PDP bit of ODN_CORE_CLK_GEN_RESET to 0. -+ */ -+ value = core_rreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET); -+ value = REG_VALUE_LO(value, 1, ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT, -+ ODN_CLK_GEN_RESET_PDP_MMCM_MASK); -+ core_wreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET, value); -+ -+ /* -+ * Wait until MMCM_LOCK_STATUS_PDPP bit is '1' in register -+ * MMCM_LOCK_STATUS. Issue an error if this does not -+ * go to '1' within 500ms. -+ */ -+ { -+ int count; -+ bool locked = false; -+ -+ for (count = 0; count < 10; count++) { -+ value = core_rreg32(odn_core_reg, -+ ODN_CORE_MMCM_LOCK_STATUS); -+ if (value & ODN_MMCM_LOCK_STATUS_PDPP) { -+ locked = true; -+ break; -+ } -+ msleep(50); -+ } -+ -+ if (!locked) { -+ dev_err(dev, "The MMCM pll did not lock\n"); -+ return false; -+ } -+ } -+ -+ /* -+ * Take Odin-PDP out of reset: -+ * Set the PDP bit of ODN_CORE_INTERNAL_RESETN to 1. -+ */ -+ if (pdpsubv == PDP_ODIN_ORION) { -+ value = core_rreg32(odn_core_reg, SRS_CORE_SOFT_RESETN); -+ value = REG_VALUE_SET(value, 1, SRS_SOFT_RESETN_PDP_SHIFT, -+ SRS_SOFT_RESETN_PDP_MASK); -+ core_wreg32(odn_core_reg, SRS_CORE_SOFT_RESETN, value); -+ } else { -+ value = core_rreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN); -+ value = REG_VALUE_SET(value, 1, -+ ODN_INTERNAL_RESETN_PDP_SHIFT[dev_num], -+ ODN_INTERNAL_RESETN_PDP_MASK[dev_num]); -+ core_wreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN, value); -+ } -+ -+ return true; -+} -+ -+void pdp_odin_set_updates_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable) -+{ -+ u32 value = enable ? -+ (1 << ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT | -+ 1 << ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT) : -+ 0x0; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set updates: %s\n", enable ? "enable" : "disable"); -+#endif -+ -+ pdp_wreg32(pdp_reg, ODN_PDP_REGISTER_UPDATE_CTRL_OFFSET, value); -+} -+ -+void pdp_odin_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set syncgen: %s\n", enable ? "enable" : "disable"); -+#endif -+ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET); -+ -+ value = REG_VALUE_SET(value, -+ enable ? ODN_SYNC_GEN_ENABLE : ODN_SYNC_GEN_DISABLE, -+ ODN_PDP_SYNCCTRL_SYNCACTIVE_SHIFT, -+ ODN_PDP_SYNCCTRL_SYNCACTIVE_MASK); -+ -+ /* Invert the pixel clock */ -+ value = REG_VALUE_SET(value, ODN_PIXEL_CLOCK_INVERTED, -+ ODN_PDP_SYNCCTRL_CLKPOL_SHIFT, -+ ODN_PDP_SYNCCTRL_CLKPOL_MASK); -+ -+ /* Set the Horizontal Sync Polarity to active high */ -+ value = REG_VALUE_LO(value, ODN_HSYNC_POLARITY_ACTIVE_HIGH, -+ ODN_PDP_SYNCCTRL_HSPOL_SHIFT, -+ ODN_PDP_SYNCCTRL_HSPOL_MASK); -+ -+ pdp_wreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET, value); -+ -+ /* Check for underruns when the sync generator -+ * is being turned off. -+ */ -+ if (!enable) { -+ value = pdp_rreg32(pdp_reg, ODN_PDP_INTSTAT_OFFSET); -+ value &= ODN_PDP_INTSTAT_ALL_OURUN_MASK; -+ -+ if (value) { -+ dev_warn(dev, "underruns detected. status=0x%08X\n", -+ value); -+ } else { -+ dev_info(dev, "no underruns detected\n"); -+ } -+ } -+} -+ -+void pdp_odin_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set powerdwn: %s\n", enable ? "enable" : "disable"); -+#endif -+ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET); -+ -+ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, -+ ODN_PDP_SYNCCTRL_POWERDN_SHIFT, -+ ODN_PDP_SYNCCTRL_POWERDN_MASK); -+ -+ pdp_wreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET, value); -+} -+ -+void pdp_odin_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set vblank: %s\n", enable ? "enable" : "disable"); -+#endif -+ -+ pdp_wreg32(pdp_reg, ODN_PDP_INTCLR_OFFSET, ODN_PDP_INTCLR_ALL); -+ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_INTENAB_OFFSET); -+ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, -+ ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT, -+ ODN_PDP_INTENAB_INTEN_VBLNK0_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_INTENAB_OFFSET, value); -+} -+ -+bool pdp_odin_check_and_clear_vblank(struct device *dev, -+ void __iomem *pdp_reg) -+{ -+ u32 value; -+ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_INTSTAT_OFFSET); -+ -+ if (REG_VALUE_GET(value, -+ ODN_PDP_INTSTAT_INTS_VBLNK0_SHIFT, -+ ODN_PDP_INTSTAT_INTS_VBLNK0_MASK)) { -+ pdp_wreg32(pdp_reg, ODN_PDP_INTCLR_OFFSET, -+ (1 << ODN_PDP_INTCLR_INTCLR_VBLNK0_SHIFT)); -+ -+ return true; -+ } -+ return false; -+} -+ -+void pdp_odin_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, -+ u32 plane, bool enable) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set plane %u: %s\n", -+ plane, enable ? "enable" : "disable"); -+#endif -+ -+ if (plane > 3) { -+ dev_err(dev, "Maximum of 4 planes are supported\n"); -+ return; -+ } -+ -+ value = pdp_rreg32(pdp_reg, GRPH_CTRL_OFFSET[plane]); -+ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, -+ GRPH_CTRL_GRPH_STREN_SHIFT[plane], -+ GRPH_CTRL_GRPH_STREN_MASK[plane]); -+ pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[plane], value); -+} -+ -+void pdp_odin_reset_planes(struct device *dev, void __iomem *pdp_reg) -+{ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Reset planes\n"); -+#endif -+ -+ pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[0], 0x00000000); -+ pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[1], 0x01000000); -+ pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[2], 0x02000000); -+ pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[3], 0x03000000); -+} -+ -+static unsigned int pfim_pixel_format(u32 pdp_format) -+{ -+ u32 pfim_pixformat; -+ -+ switch (pdp_format) { -+ case ODN_PDP_SURF_PIXFMT_ARGB8888: -+ pfim_pixformat = ODN_PFIM_PIXFMT_ARGB8888; -+ break; -+ case ODN_PDP_SURF_PIXFMT_RGB565: -+ pfim_pixformat = ODN_PFIM_PIXFMT_RGB565; -+ break; -+ default: -+ WARN(true, "Unknown Odin pixel format: %u defaulting to ARGB8888\n", -+ pdp_format); -+ pfim_pixformat = ODN_PFIM_PIXFMT_ARGB8888; -+ } -+ -+ return pfim_pixformat; -+} -+ -+static unsigned int pfim_tiles_line(u32 width, -+ u32 pfim_format, -+ u32 fbc_mode) -+{ -+ u32 bpp; -+ u32 tpl; -+ -+ switch (pfim_format) { -+ case ODN_PFIM_PIXFMT_ARGB8888: -+ bpp = 32; -+ break; -+ case ODN_PFIM_PIXFMT_RGB565: -+ bpp = 16; -+ break; -+ default: -+ WARN(true, "Unknown PFIM pixel format: %u, defaulting to 32 bpp\n", -+ pfim_format); -+ bpp = 32; -+ } -+ -+ if (fbc_mode < ODIN_PFIM_FBCDC_MAX) { -+ tpl = pfim_properties[fbc_mode].tiles_per_line; -+ } else { -+ WARN(true, "Unknown FBC compression format: %u, defaulting to 8X8_V12\n", -+ fbc_mode); -+ tpl = pfim_properties[ODIN_PFIM_FBCDC_8X8_V12].tiles_per_line; -+ } -+ -+ return ((width/tpl) / (32/bpp)); -+} -+ -+static void pfim_modeset(void __iomem *pfim_reg) -+{ -+ u32 value; -+ -+ /* -+ * Odin PDP can address up to 32 bits of PCI BAR4, -+ * so this register is not necessary -+ */ -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB, 0x00); -+ -+ /* -+ * Following registers are only used with YUV buffers, -+ * which we currently do not support -+ */ -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_UV_BASE_ADDR_LSB, 0x00); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_UV_BASE_ADDR_MSB, 0x00); -+ pdp_wreg32(pfim_reg, CR_PFIM_PDP_Y_BASE_ADDR, 0x00); -+ pdp_wreg32(pfim_reg, CR_PFIM_PDP_UV_BASE_ADDR, 0x00); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CR_Y_VAL0, 0x00); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CR_UV_VAL0, 0x00); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CR_Y_VAL1, 0x00); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CR_UV_VAL1, 0x00); -+ -+ /* -+ * PFIM tags are used for distinguishing between Y and UV plane -+ * request when that is the kind of format we use. Thus, any -+ * random value will do, as explained in the TRM -+ */ -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_REQ_CONTEXT, 0x00); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_REQ_TAG, PFIM_RND_TAG); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_REQ_SB_TAG, 0x00); -+ -+ /* Default tile value if tile is found to be corrupted */ -+ value = REG_VALUE_SET(0, 0x01, -+ CR_PFIM_FBDC_FILTER_ENABLE_SHIFT, -+ CR_PFIM_FBDC_FILTER_ENABLE_MASK); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_FILTER_ENABLE, value); -+ -+ /* Recommended values for corrupt tile substitution */ -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CR_CH0123_VAL0, 0x00); -+ value = REG_VALUE_SET(0, 0x01000000, -+ CR_PFIM_FBDC_CR_CH0123_VAL1_SHIFT, -+ CR_PFIM_FBDC_CR_CH0123_VAL1_MASK); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CR_CH0123_VAL1, value); -+ -+ /* Only used when requesting a clear tile */ -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CLEAR_COLOUR_LSB, 0x00); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CLEAR_COLOUR_MSB, 0x00); -+ -+ /* Current PDP revision does not support lossy formats */ -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_REQ_LOSSY, 0x00); -+ -+ /* Force invalidation of FBC headers at beginning of render */ -+ value = REG_VALUE_SET(0, 0x01, -+ CR_PFIM_FBDC_HDR_INVAL_REQ_SHIFT, -+ CR_PFIM_FBDC_HDR_INVAL_REQ_MASK); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_HDR_INVAL_REQ, value); -+} -+ -+static unsigned int pfim_num_tiles(struct device *dev, u32 width, u32 height, -+ u32 pfim_format, u32 fbc_mode) -+{ -+ u32 phys_width, phys_height; -+ u32 walign, halign; -+ u32 tile_mult; -+ u32 num_tiles; -+ u32 bpp; -+ -+ switch (pfim_format) { -+ case ODN_PFIM_PIXFMT_ARGB8888: -+ bpp = 32; -+ tile_mult = 4; -+ break; -+ case ODN_PFIM_PIXFMT_RGB565: -+ bpp = 16; -+ tile_mult = 2; -+ break; -+ default: -+ dev_warn(dev, "WARNING: Wrong PFIM pixel format: %d\n", -+ pfim_format); -+ return 0; -+ } -+ -+ switch (fbc_mode) { -+ case ODIN_PFIM_FBCDC_8X8_V12: -+ switch (bpp) { -+ case 16: /* 16x8 */ -+ walign = 16; -+ break; -+ case 32: /* 8x8 */ -+ walign = 8; -+ break; -+ default: -+ dev_warn(dev, "WARNING: Wrong bit depth: %d\n", -+ bpp); -+ return 0; -+ } -+ halign = 8; -+ break; -+ case ODIN_PFIM_FBCDC_16X4_V12: -+ switch (bpp) { -+ case 16: /* 32x4 */ -+ walign = 32; -+ break; -+ case 32: /* 16x4 */ -+ walign = 16; -+ break; -+ default: -+ dev_warn(dev, "WARNING: Wrong bit depth: %d\n", -+ bpp); -+ return 0; -+ } -+ halign = 4; -+ break; -+ default: -+ dev_warn(dev, "WARNING: Wrong FBC compression format: %d\n", -+ fbc_mode); -+ return 0; -+ } -+ -+ phys_width = ALIGN(width, walign); -+ phys_height = ALIGN(height, halign); -+ num_tiles = phys_width / pfim_properties[fbc_mode].tile_xsize; -+ num_tiles *= phys_height / pfim_properties[fbc_mode].tile_ysize; -+ num_tiles *= tile_mult; -+ num_tiles /= 4; -+ -+ return num_tiles ? num_tiles : 1; -+} -+ -+static void pfim_set_surface(struct device *dev, -+ void __iomem *pfim_reg, -+ u32 width, -+ u32 height, -+ u32 pdp_format, -+ u32 fbc_mode) -+{ -+ u32 pfim_pixformat = pfim_pixel_format(pdp_format); -+ u32 tiles_line = pfim_tiles_line(width, pfim_pixformat, fbc_mode); -+ u32 tile_type = pfim_properties[fbc_mode].tile_type; -+ u32 num_tiles = pfim_num_tiles(dev, width, height, -+ pfim_pixformat, fbc_mode); -+ -+ pdp_wreg32(pfim_reg, CR_PFIM_NUM_TILES, num_tiles); -+ pdp_wreg32(pfim_reg, CR_PFIM_TILES_PER_LINE, tiles_line); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_PIX_FORMAT, pfim_pixformat); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_TILE_TYPE, tile_type); -+} -+ -+void pdp_odin_set_surface(struct device *dev, void __iomem *pdp_reg, -+ u32 plane, u32 address, u32 offset, -+ u32 posx, u32 posy, -+ u32 width, u32 height, u32 stride, -+ u32 format, u32 alpha, bool blend, -+ void __iomem *pfim_reg, u32 fbcm) -+{ -+ /* -+ * Use a blender based on the plane number (this defines the Z -+ * ordering) -+ */ -+ static const int GRPH_BLEND_POS[] = { 0x0, 0x1, 0x2, 0x3 }; -+ u32 blend_mode; -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, -+ "Set surface: plane=%d pos=%d:%d size=%dx%d stride=%d format=%d alpha=%d address=0x%x\n", -+ plane, posx, posy, width, height, stride, -+ format, alpha, address); -+#endif -+ -+ if (plane > 3) { -+ dev_err(dev, "Maximum of 4 planes are supported\n"); -+ return; -+ } -+ -+ if (address & 0xf) -+ dev_warn(dev, "The frame buffer address is not aligned\n"); -+ -+ if (fbcm && pfim_reg) { -+ pfim_set_surface(dev, pfim_reg, -+ width, height, -+ format, fbcm); -+ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB, -+ (address + offset) >> 6); -+ } else -+ pdp_wreg32(pdp_reg, GRPH_BASEADDR_OFFSET[plane], address); -+ -+ /* Pos */ -+ value = REG_VALUE_SET(0x0, posx, -+ GRPH_POSN_GRPH_XSTART_SHIFT[plane], -+ GRPH_POSN_GRPH_XSTART_MASK[plane]); -+ value = REG_VALUE_SET(value, posy, -+ GRPH_POSN_GRPH_YSTART_SHIFT[plane], -+ GRPH_POSN_GRPH_YSTART_MASK[plane]); -+ pdp_wreg32(pdp_reg, GRPH_POSN_OFFSET[plane], value); -+ -+ /* Size */ -+ value = REG_VALUE_SET(0x0, width - 1, -+ GRPH_SIZE_GRPH_WIDTH_SHIFT[plane], -+ GRPH_SIZE_GRPH_WIDTH_MASK[plane]); -+ value = REG_VALUE_SET(value, height - 1, -+ GRPH_SIZE_GRPH_HEIGHT_SHIFT[plane], -+ GRPH_SIZE_GRPH_HEIGHT_MASK[plane]); -+ pdp_wreg32(pdp_reg, GRPH_SIZE_OFFSET[plane], value); -+ -+ /* Stride */ -+ value = REG_VALUE_SET(0x0, (stride >> 4) - 1, -+ GRPH_STRIDE_GRPH_STRIDE_SHIFT[plane], -+ GRPH_STRIDE_GRPH_STRIDE_MASK[plane]); -+ pdp_wreg32(pdp_reg, GRPH_STRIDE_OFFSET[plane], value); -+ -+ /* Interlace mode: progressive */ -+ value = REG_VALUE_SET(0x0, ODN_INTERLACE_DISABLE, -+ GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_SHIFT[plane], -+ GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_MASK[plane]); -+ pdp_wreg32(pdp_reg, GRPH_INTERLEAVE_CTRL_OFFSET[plane], value); -+ -+ /* Format */ -+ value = REG_VALUE_SET(0x0, format, -+ GRPH_SURF_GRPH_PIXFMT_SHIFT[plane], -+ GRPH_SURF_GRPH_PIXFMT_MASK[plane]); -+ pdp_wreg32(pdp_reg, GRPH_SURF_OFFSET[plane], value); -+ -+ /* Global alpha (0...1023) */ -+ value = REG_VALUE_SET(0x0, ((1024 * 256) / 255 * alpha) / 256, -+ GRPH_GALPHA_GRPH_GALPHA_SHIFT[plane], -+ GRPH_GALPHA_GRPH_GALPHA_MASK[plane]); -+ pdp_wreg32(pdp_reg, GRPH_GALPHA_OFFSET[plane], value); -+ value = pdp_rreg32(pdp_reg, GRPH_CTRL_OFFSET[plane]); -+ -+ /* Blend mode */ -+ if (blend) { -+ if (alpha != 255) -+ blend_mode = 0x2; /* 0b10 = global alpha blending */ -+ else -+ blend_mode = 0x3; /* 0b11 = pixel alpha blending */ -+ } else { -+ blend_mode = 0x0; /* 0b00 = no blending */ -+ } -+ value = REG_VALUE_SET(value, blend_mode, -+ GRPH_CTRL_GRPH_BLEND_SHIFT[plane], -+ GRPH_CTRL_GRPH_BLEND_MASK[plane]); -+ -+ /* Blend position */ -+ value = REG_VALUE_SET(value, GRPH_BLEND_POS[plane], -+ GRPH_CTRL_GRPH_BLENDPOS_SHIFT[plane], -+ GRPH_CTRL_GRPH_BLENDPOS_MASK[plane]); -+ pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[plane], value); -+} -+ -+void pdp_odin_mode_set(struct device *dev, void __iomem *pdp_reg, -+ u32 h_display, u32 v_display, -+ u32 hbps, u32 ht, u32 has, -+ u32 hlbs, u32 hfps, u32 hrbs, -+ u32 vbps, u32 vt, u32 vas, -+ u32 vtbs, u32 vfps, u32 vbbs, -+ bool nhsync, bool nvsync, -+ void __iomem *pfim_reg) -+{ -+ u32 value; -+ -+ dev_info(dev, "Set mode: %dx%d\n", h_display, v_display); -+#ifdef PDP_VERBOSE -+ dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n", -+ ht, hbps, has, hlbs, hfps, hrbs); -+ dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n", -+ vt, vbps, vas, vtbs, vfps, vbbs); -+#endif -+ -+ /* Border colour: 10bits per channel */ -+ pdp_wreg32(pdp_reg, ODN_PDP_BORDCOL_R_OFFSET, 0x0); -+ pdp_wreg32(pdp_reg, ODN_PDP_BORDCOL_GB_OFFSET, 0x0); -+ -+ /* Background: 10bits per channel */ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_BGNDCOL_AR_OFFSET); -+ value = REG_VALUE_SET(value, 0x3ff, -+ ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT, -+ ODN_PDP_BGNDCOL_AR_BGNDCOL_A_MASK); -+ value = REG_VALUE_SET(value, 0x0, -+ ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT, -+ ODN_PDP_BGNDCOL_AR_BGNDCOL_R_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_BGNDCOL_AR_OFFSET, value); -+ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_BGNDCOL_GB_OFFSET); -+ value = REG_VALUE_SET(value, 0x0, -+ ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT, -+ ODN_PDP_BGNDCOL_GB_BGNDCOL_G_MASK); -+ value = REG_VALUE_SET(value, 0x0, -+ ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT, -+ ODN_PDP_BGNDCOL_GB_BGNDCOL_B_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_BGNDCOL_GB_OFFSET, value); -+ pdp_wreg32(pdp_reg, ODN_PDP_BORDCOL_GB_OFFSET, 0x0); -+ -+ /* Update control */ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_UPDCTRL_OFFSET); -+ value = REG_VALUE_SET(value, 0x0, -+ ODN_PDP_UPDCTRL_UPDFIELD_SHIFT, -+ ODN_PDP_UPDCTRL_UPDFIELD_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_UPDCTRL_OFFSET, value); -+ -+ /* Horizontal timing */ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_HSYNC1_OFFSET); -+ value = REG_VALUE_SET(value, hbps, -+ ODN_PDP_HSYNC1_HBPS_SHIFT, -+ ODN_PDP_HSYNC1_HBPS_MASK); -+ value = REG_VALUE_SET(value, ht, -+ ODN_PDP_HSYNC1_HT_SHIFT, -+ ODN_PDP_HSYNC1_HT_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_HSYNC1_OFFSET, value); -+ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_HSYNC2_OFFSET); -+ value = REG_VALUE_SET(value, has, -+ ODN_PDP_HSYNC2_HAS_SHIFT, -+ ODN_PDP_HSYNC2_HAS_MASK); -+ value = REG_VALUE_SET(value, hlbs, -+ ODN_PDP_HSYNC2_HLBS_SHIFT, -+ ODN_PDP_HSYNC2_HLBS_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_HSYNC2_OFFSET, value); -+ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_HSYNC3_OFFSET); -+ value = REG_VALUE_SET(value, hfps, -+ ODN_PDP_HSYNC3_HFPS_SHIFT, -+ ODN_PDP_HSYNC3_HFPS_MASK); -+ value = REG_VALUE_SET(value, hrbs, -+ ODN_PDP_HSYNC3_HRBS_SHIFT, -+ ODN_PDP_HSYNC3_HRBS_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_HSYNC3_OFFSET, value); -+ -+ /* Vertical timing */ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_VSYNC1_OFFSET); -+ value = REG_VALUE_SET(value, vbps, -+ ODN_PDP_VSYNC1_VBPS_SHIFT, -+ ODN_PDP_VSYNC1_VBPS_MASK); -+ value = REG_VALUE_SET(value, vt, -+ ODN_PDP_VSYNC1_VT_SHIFT, -+ ODN_PDP_VSYNC1_VT_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_VSYNC1_OFFSET, value); -+ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_VSYNC2_OFFSET); -+ value = REG_VALUE_SET(value, vas, -+ ODN_PDP_VSYNC2_VAS_SHIFT, -+ ODN_PDP_VSYNC2_VAS_MASK); -+ value = REG_VALUE_SET(value, vtbs, -+ ODN_PDP_VSYNC2_VTBS_SHIFT, -+ ODN_PDP_VSYNC2_VTBS_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_VSYNC2_OFFSET, value); -+ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_VSYNC3_OFFSET); -+ value = REG_VALUE_SET(value, vfps, -+ ODN_PDP_VSYNC3_VFPS_SHIFT, -+ ODN_PDP_VSYNC3_VFPS_MASK); -+ value = REG_VALUE_SET(value, vbbs, -+ ODN_PDP_VSYNC3_VBBS_SHIFT, -+ ODN_PDP_VSYNC3_VBBS_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_VSYNC3_OFFSET, value); -+ -+ /* Horizontal data enable */ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_HDECTRL_OFFSET); -+ value = REG_VALUE_SET(value, hlbs, -+ ODN_PDP_HDECTRL_HDES_SHIFT, -+ ODN_PDP_HDECTRL_HDES_MASK); -+ value = REG_VALUE_SET(value, hfps, -+ ODN_PDP_HDECTRL_HDEF_SHIFT, -+ ODN_PDP_HDECTRL_HDEF_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_HDECTRL_OFFSET, value); -+ -+ /* Vertical data enable */ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_VDECTRL_OFFSET); -+ value = REG_VALUE_SET(value, vtbs, -+ ODN_PDP_VDECTRL_VDES_SHIFT, -+ ODN_PDP_VDECTRL_VDES_MASK); -+ value = REG_VALUE_SET(value, vfps, -+ ODN_PDP_VDECTRL_VDEF_SHIFT, -+ ODN_PDP_VDECTRL_VDEF_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_VDECTRL_OFFSET, value); -+ -+ /* Vertical event start and vertical fetch start */ -+ value = pdp_rreg32(pdp_reg, ODN_PDP_VEVENT_OFFSET); -+ value = REG_VALUE_SET(value, vbps, -+ ODN_PDP_VEVENT_VFETCH_SHIFT, -+ ODN_PDP_VEVENT_VFETCH_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_VEVENT_OFFSET, value); -+ -+ /* Set up polarities of sync/blank */ -+ value = REG_VALUE_SET(0, 0x1, -+ ODN_PDP_SYNCCTRL_BLNKPOL_SHIFT, -+ ODN_PDP_SYNCCTRL_BLNKPOL_MASK); -+ if (nhsync) -+ value = REG_VALUE_SET(value, 0x1, -+ ODN_PDP_SYNCCTRL_HSPOL_SHIFT, -+ ODN_PDP_SYNCCTRL_HSPOL_MASK); -+ if (nvsync) -+ value = REG_VALUE_SET(value, 0x1, -+ ODN_PDP_SYNCCTRL_VSPOL_SHIFT, -+ ODN_PDP_SYNCCTRL_VSPOL_MASK); -+ pdp_wreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET, value); -+ -+ /* PDP framebuffer compression setup */ -+ if (pfim_reg) -+ pfim_modeset(pfim_reg); -+} -diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_odin.h b/drivers/gpu/drm/img-rogue/apollo/pdp_odin.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/pdp_odin.h -@@ -0,0 +1,95 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__PDP_ODIN_H__) -+#define __PDP_ODIN_H__ -+ -+#include -+#include -+ -+/* include here for ODN_PDP_SURF_PIXFMT_ARGB8888 as this is part of the API */ -+#include "odin_pdp_regs.h" -+#include "pdp_common.h" -+ -+bool pdp_odin_clocks_set(struct device *dev, -+ void __iomem *pdp_reg, void __iomem *pll_reg, -+ u32 clock_freq, u32 dev_num, -+ void __iomem *odn_core_reg, -+ u32 hdisplay, u32 vdisplay, -+ enum pdp_odin_subversion); -+ -+void pdp_odin_set_updates_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable); -+ -+void pdp_odin_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable); -+ -+void pdp_odin_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable); -+ -+void pdp_odin_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable); -+ -+bool pdp_odin_check_and_clear_vblank(struct device *dev, -+ void __iomem *pdp_reg); -+ -+void pdp_odin_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, -+ u32 plane, bool enable); -+ -+void pdp_odin_reset_planes(struct device *dev, void __iomem *pdp_reg); -+ -+void pdp_odin_set_surface(struct device *dev, void __iomem *pdp_reg, -+ u32 plane, u32 address, u32 offset, -+ u32 posx, u32 posy, -+ u32 width, u32 height, u32 stride, -+ u32 format, u32 alpha, bool blend, -+ void __iomem *pfim_reg, u32 fbcf); -+ -+void pdp_odin_mode_set(struct device *dev, void __iomem *pdp_reg, -+ u32 h_display, u32 v_display, -+ u32 hbps, u32 ht, u32 has, -+ u32 hlbs, u32 hfps, u32 hrbs, -+ u32 vbps, u32 vt, u32 vas, -+ u32 vtbs, u32 vfps, u32 vbbs, -+ bool nhsync, bool nvsync, -+ void __iomem *pfim_reg); -+ -+#endif /* __PDP_ODIN_H__ */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_plato.c b/drivers/gpu/drm/img-rogue/apollo/pdp_plato.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/pdp_plato.c -@@ -0,0 +1,339 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include "pdp_common.h" -+#include "pdp_plato.h" -+#include "pdp2_mmu_regs.h" -+#include "pdp2_regs.h" -+ -+#define PLATO_PDP_STRIDE_SHIFT 5 -+ -+ -+void pdp_plato_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set syncgen: %s\n", enable ? "enable" : "disable"); -+#endif -+ -+ value = pdp_rreg32(pdp_reg, PDP_SYNCCTRL_OFFSET); -+ /* Starts Sync Generator. */ -+ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, -+ PDP_SYNCCTRL_SYNCACTIVE_SHIFT, -+ PDP_SYNCCTRL_SYNCACTIVE_MASK); -+ /* Controls polarity of pixel clock: Pixel clock is inverted */ -+ value = REG_VALUE_SET(value, 0x01, -+ PDP_SYNCCTRL_CLKPOL_SHIFT, -+ PDP_SYNCCTRL_CLKPOL_MASK); -+ pdp_wreg32(pdp_reg, PDP_SYNCCTRL_OFFSET, value); -+} -+ -+void pdp_plato_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set vblank: %s\n", enable ? "enable" : "disable"); -+#endif -+ -+ pdp_wreg32(pdp_reg, PDP_INTCLR_OFFSET, 0xFFFFFFFF); -+ -+ value = pdp_rreg32(pdp_reg, PDP_INTENAB_OFFSET); -+ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, -+ PDP_INTENAB_INTEN_VBLNK0_SHIFT, -+ PDP_INTENAB_INTEN_VBLNK0_MASK); -+ pdp_wreg32(pdp_reg, PDP_INTENAB_OFFSET, value); -+} -+ -+bool pdp_plato_check_and_clear_vblank(struct device *dev, -+ void __iomem *pdp_reg) -+{ -+ u32 value; -+ -+ value = pdp_rreg32(pdp_reg, PDP_INTSTAT_OFFSET); -+ -+ if (REG_VALUE_GET(value, -+ PDP_INTSTAT_INTS_VBLNK0_SHIFT, -+ PDP_INTSTAT_INTS_VBLNK0_MASK)) { -+ pdp_wreg32(pdp_reg, PDP_INTCLR_OFFSET, -+ (1 << PDP_INTCLR_INTCLR_VBLNK0_SHIFT)); -+ return true; -+ } -+ -+ return false; -+} -+ -+void pdp_plato_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, -+ u32 plane, bool enable) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, "Set plane %u: %s\n", -+ plane, enable ? "enable" : "disable"); -+#endif -+ value = pdp_rreg32(pdp_reg, PDP_GRPH1CTRL_OFFSET); -+ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, -+ PDP_GRPH1CTRL_GRPH1STREN_SHIFT, -+ PDP_GRPH1CTRL_GRPH1STREN_MASK); -+ pdp_wreg32(pdp_reg, PDP_GRPH1CTRL_OFFSET, value); -+} -+ -+void pdp_plato_set_surface(struct device *dev, -+ void __iomem *pdp_reg, void __iomem *pdp_bif_reg, -+ u32 plane, u64 address, -+ u32 posx, u32 posy, -+ u32 width, u32 height, u32 stride, -+ u32 format, u32 alpha, bool blend) -+{ -+ u32 value; -+ -+#ifdef PDP_VERBOSE -+ dev_info(dev, -+ "Set surface: size=%dx%d stride=%d format=%d address=0x%llx\n", -+ width, height, stride, format, address); -+#endif -+ -+ pdp_wreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET, 0x0); -+ /* -+ * Set the offset position to (0,0) as we've already added any offset -+ * to the base address. -+ */ -+ pdp_wreg32(pdp_reg, PDP_GRPH1POSN_OFFSET, 0); -+ -+ /* Set the frame buffer base address */ -+ if (address & 0xF) -+ dev_warn(dev, "The frame buffer address is not aligned\n"); -+ -+ pdp_wreg32(pdp_reg, PDP_GRPH1BASEADDR_OFFSET, -+ (u32)address & PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK); -+ -+ /* -+ * Write 8 msb of the address to address extension bits in the PDP -+ * MMU control register. -+ */ -+ value = pdp_rreg32(pdp_bif_reg, PDP_BIF_ADDRESS_CONTROL_OFFSET); -+ value = REG_VALUE_SET(value, address >> 32, -+ PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SHIFT, -+ PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_MASK); -+ value = REG_VALUE_SET(value, 0x00, -+ PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SHIFT, -+ PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_MASK); -+ value = REG_VALUE_SET(value, 0x01, -+ PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SHIFT, -+ PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_MASK); -+ pdp_wreg32(pdp_bif_reg, PDP_BIF_ADDRESS_CONTROL_OFFSET, value); -+ -+ /* Set the framebuffer pixel format */ -+ value = pdp_rreg32(pdp_reg, PDP_GRPH1SURF_OFFSET); -+ value = REG_VALUE_SET(value, format, -+ PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT, -+ PDP_GRPH1SURF_GRPH1PIXFMT_MASK); -+ pdp_wreg32(pdp_reg, PDP_GRPH1SURF_OFFSET, value); -+ /* -+ * Set the framebuffer size (this might be smaller than the resolution) -+ */ -+ value = REG_VALUE_SET(0, width - 1, -+ PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT, -+ PDP_GRPH1SIZE_GRPH1WIDTH_MASK); -+ value = REG_VALUE_SET(value, height - 1, -+ PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT, -+ PDP_GRPH1SIZE_GRPH1HEIGHT_MASK); -+ pdp_wreg32(pdp_reg, PDP_GRPH1SIZE_OFFSET, value); -+ -+ /* Set the framebuffer stride in 16byte words */ -+ value = REG_VALUE_SET(0, (stride >> PLATO_PDP_STRIDE_SHIFT) - 1, -+ PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT, -+ PDP_GRPH1STRIDE_GRPH1STRIDE_MASK); -+ pdp_wreg32(pdp_reg, PDP_GRPH1STRIDE_OFFSET, value); -+ -+ /* Enable the register writes on the next vblank */ -+ pdp_wreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET, 0x3); -+ -+ /* -+ * Issues with NoC sending interleaved read responses to PDP require -+ * burst to be 1. -+ */ -+ value = REG_VALUE_SET(0, 0x02, -+ PDP_MEMCTRL_MEMREFRESH_SHIFT, -+ PDP_MEMCTRL_MEMREFRESH_MASK); -+ value = REG_VALUE_SET(value, 0x01, -+ PDP_MEMCTRL_BURSTLEN_SHIFT, -+ PDP_MEMCTRL_BURSTLEN_MASK); -+ pdp_wreg32(pdp_reg, PDP_MEMCTRL_OFFSET, value); -+} -+ -+void pdp_plato_mode_set(struct device *dev, void __iomem *pdp_reg, -+ u32 h_display, u32 v_display, -+ u32 hbps, u32 ht, u32 has, -+ u32 hlbs, u32 hfps, u32 hrbs, -+ u32 vbps, u32 vt, u32 vas, -+ u32 vtbs, u32 vfps, u32 vbbs, -+ bool nhsync, bool nvsync) -+{ -+ u32 value; -+ -+ dev_info(dev, "Set mode: %dx%d\n", h_display, v_display); -+#ifdef PDP_VERBOSE -+ dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n", -+ ht, hbps, has, hlbs, hfps, hrbs); -+ dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n", -+ vt, vbps, vas, vtbs, vfps, vbbs); -+#endif -+ -+ /* Update control */ -+ value = pdp_rreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET); -+ value = REG_VALUE_SET(value, 0x0, -+ PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT, -+ PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK); -+ pdp_wreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET, value); -+ -+ /* Set hsync timings */ -+ value = pdp_rreg32(pdp_reg, PDP_HSYNC1_OFFSET); -+ value = REG_VALUE_SET(value, hbps, -+ PDP_HSYNC1_HBPS_SHIFT, -+ PDP_HSYNC1_HBPS_MASK); -+ value = REG_VALUE_SET(value, ht, -+ PDP_HSYNC1_HT_SHIFT, -+ PDP_HSYNC1_HT_MASK); -+ pdp_wreg32(pdp_reg, PDP_HSYNC1_OFFSET, value); -+ -+ value = pdp_rreg32(pdp_reg, PDP_HSYNC2_OFFSET); -+ value = REG_VALUE_SET(value, has, -+ PDP_HSYNC2_HAS_SHIFT, -+ PDP_HSYNC2_HAS_MASK); -+ value = REG_VALUE_SET(value, hlbs, -+ PDP_HSYNC2_HLBS_SHIFT, -+ PDP_HSYNC2_HLBS_MASK); -+ pdp_wreg32(pdp_reg, PDP_HSYNC2_OFFSET, value); -+ -+ value = pdp_rreg32(pdp_reg, PDP_HSYNC3_OFFSET); -+ value = REG_VALUE_SET(value, hfps, -+ PDP_HSYNC3_HFPS_SHIFT, -+ PDP_HSYNC3_HFPS_MASK); -+ value = REG_VALUE_SET(value, hrbs, -+ PDP_HSYNC3_HRBS_SHIFT, -+ PDP_HSYNC3_HRBS_MASK); -+ pdp_wreg32(pdp_reg, PDP_HSYNC3_OFFSET, value); -+ -+ /* Set vsync timings */ -+ value = pdp_rreg32(pdp_reg, PDP_VSYNC1_OFFSET); -+ value = REG_VALUE_SET(value, vbps, -+ PDP_VSYNC1_VBPS_SHIFT, -+ PDP_VSYNC1_VBPS_MASK); -+ value = REG_VALUE_SET(value, vt, -+ PDP_VSYNC1_VT_SHIFT, -+ PDP_VSYNC1_VT_MASK); -+ pdp_wreg32(pdp_reg, PDP_VSYNC1_OFFSET, value); -+ -+ value = pdp_rreg32(pdp_reg, PDP_VSYNC2_OFFSET); -+ value = REG_VALUE_SET(value, vas, -+ PDP_VSYNC2_VAS_SHIFT, -+ PDP_VSYNC2_VAS_MASK); -+ value = REG_VALUE_SET(value, vtbs, -+ PDP_VSYNC2_VTBS_SHIFT, -+ PDP_VSYNC2_VTBS_MASK); -+ pdp_wreg32(pdp_reg, PDP_VSYNC2_OFFSET, value); -+ -+ value = pdp_rreg32(pdp_reg, PDP_VSYNC3_OFFSET); -+ value = REG_VALUE_SET(value, vfps, -+ PDP_VSYNC3_VFPS_SHIFT, -+ PDP_VSYNC3_VFPS_MASK); -+ value = REG_VALUE_SET(value, vbbs, -+ PDP_VSYNC3_VBBS_SHIFT, -+ PDP_VSYNC3_VBBS_MASK); -+ pdp_wreg32(pdp_reg, PDP_VSYNC3_OFFSET, value); -+ -+ /* Horizontal data enable */ -+ value = pdp_rreg32(pdp_reg, PDP_HDECTRL_OFFSET); -+ value = REG_VALUE_SET(value, has, -+ PDP_HDECTRL_HDES_SHIFT, -+ PDP_HDECTRL_HDES_MASK); -+ value = REG_VALUE_SET(value, hrbs, -+ PDP_HDECTRL_HDEF_SHIFT, -+ PDP_HDECTRL_HDEF_MASK); -+ pdp_wreg32(pdp_reg, PDP_HDECTRL_OFFSET, value); -+ -+ /* Vertical data enable */ -+ value = pdp_rreg32(pdp_reg, PDP_VDECTRL_OFFSET); -+ value = REG_VALUE_SET(value, vtbs, /* XXX: we're setting this to VAS */ -+ PDP_VDECTRL_VDES_SHIFT, -+ PDP_VDECTRL_VDES_MASK); -+ value = REG_VALUE_SET(value, vfps, /* XXX: set to VBBS */ -+ PDP_VDECTRL_VDEF_SHIFT, -+ PDP_VDECTRL_VDEF_MASK); -+ pdp_wreg32(pdp_reg, PDP_VDECTRL_OFFSET, value); -+ -+ /* Vertical event start and vertical fetch start */ -+ value = 0; -+ value = REG_VALUE_SET(value, 0, -+ PDP_VEVENT_VEVENT_SHIFT, -+ PDP_VEVENT_VEVENT_MASK); -+ value = REG_VALUE_SET(value, vbps, -+ PDP_VEVENT_VFETCH_SHIFT, -+ PDP_VEVENT_VFETCH_MASK); -+ value = REG_VALUE_SET(value, vfps, -+ PDP_VEVENT_VEVENT_SHIFT, -+ PDP_VEVENT_VEVENT_MASK); -+ pdp_wreg32(pdp_reg, PDP_VEVENT_OFFSET, value); -+ -+ /* Set up polarities of sync/blank */ -+ value = REG_VALUE_SET(0, 0x1, -+ PDP_SYNCCTRL_BLNKPOL_SHIFT, -+ PDP_SYNCCTRL_BLNKPOL_MASK); -+ -+ if (nhsync) -+ value = REG_VALUE_SET(value, 0x1, -+ PDP_SYNCCTRL_HSPOL_SHIFT, -+ PDP_SYNCCTRL_HSPOL_MASK); -+ -+ if (nvsync) -+ value = REG_VALUE_SET(value, 0x1, -+ PDP_SYNCCTRL_VSPOL_SHIFT, -+ PDP_SYNCCTRL_VSPOL_MASK); -+ -+ pdp_wreg32(pdp_reg, -+ PDP_SYNCCTRL_OFFSET, -+ value); -+} -diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_plato.h b/drivers/gpu/drm/img-rogue/apollo/pdp_plato.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/pdp_plato.h -@@ -0,0 +1,86 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__PDP_PLATO_H__) -+#define __PDP_PLATO_H__ -+ -+#include -+#include -+ -+#define PLATO_PDP_PIXEL_FORMAT_G (0x00) -+#define PLATO_PDP_PIXEL_FORMAT_ARGB4 (0x04) -+#define PLATO_PDP_PIXEL_FORMAT_ARGB1555 (0x05) -+#define PLATO_PDP_PIXEL_FORMAT_RGB8 (0x06) -+#define PLATO_PDP_PIXEL_FORMAT_RGB565 (0x07) -+#define PLATO_PDP_PIXEL_FORMAT_ARGB8 (0x08) -+#define PLATO_PDP_PIXEL_FORMAT_AYUV8 (0x10) -+#define PLATO_PDP_PIXEL_FORMAT_YUV10 (0x15) -+#define PLATO_PDP_PIXEL_FORMAT_RGBA8 (0x16) -+ -+ -+void pdp_plato_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable); -+ -+void pdp_plato_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, -+ bool enable); -+ -+bool pdp_plato_check_and_clear_vblank(struct device *dev, -+ void __iomem *pdp_reg); -+ -+void pdp_plato_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, -+ u32 plane, bool enable); -+ -+void pdp_plato_set_surface(struct device *dev, -+ void __iomem *pdp_reg, void __iomem *pdp_bif_reg, -+ u32 plane, u64 address, -+ u32 posx, u32 posy, -+ u32 width, u32 height, u32 stride, -+ u32 format, u32 alpha, bool blend); -+ -+void pdp_plato_mode_set(struct device *dev, void __iomem *pdp_reg, -+ u32 h_display, u32 v_display, -+ u32 hbps, u32 ht, u32 has, -+ u32 hlbs, u32 hfps, u32 hrbs, -+ u32 vbps, u32 vt, u32 vas, -+ u32 vtbs, u32 vfps, u32 vbbs, -+ bool nhsync, bool nvsync); -+ -+#endif /* __PDP_PLATO_H__ */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_regs.h b/drivers/gpu/drm/img-rogue/apollo/pdp_regs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/pdp_regs.h -@@ -0,0 +1,75 @@ -+/*************************************************************************/ /*! -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(__PDP_REGS_H__) -+#define __PDP_REGS_H__ -+ -+/*************************************************************************/ /*! -+ PCI Device Information -+*/ /**************************************************************************/ -+ -+#define DCPDP_VENDOR_ID_POWERVR (0x1010) -+ -+#define DCPDP_DEVICE_ID_PCI_APOLLO_FPGA (0x1CF1) -+#define DCPDP_DEVICE_ID_PCIE_APOLLO_FPGA (0x1CF2) -+ -+/*************************************************************************/ /*! -+ PCI Device Base Address Information -+*/ /**************************************************************************/ -+ -+/* PLL and PDP registers on base address register 0 */ -+#define DCPDP_REG_PCI_BASENUM (0) -+ -+#define DCPDP_PCI_PLL_REG_OFFSET (0x1000) -+#define DCPDP_PCI_PLL_REG_SIZE (0x0400) -+ -+#define DCPDP_PCI_PDP_REG_OFFSET (0xC000) -+#define DCPDP_PCI_PDP_REG_SIZE (0x2000) -+ -+/*************************************************************************/ /*! -+ Misc register information -+*/ /**************************************************************************/ -+ -+/* This information isn't captured in tcf_rgbpdp_regs.h so define it here */ -+#define DCPDP_STR1SURF_FORMAT_ARGB8888 (0xE) -+#define DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT (4) -+#define DCPDP_STR1POSN_STRIDE_SHIFT (4) -+ -+#endif /* !defined(__PDP_REGS_H__) */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/pfim_defs.h b/drivers/gpu/drm/img-rogue/apollo/pfim_defs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/pfim_defs.h -@@ -0,0 +1,69 @@ -+/****************************************************************************** -+@Title Odin PFIM definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Odin register defs for PDP-FBDC Interface Module -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+******************************************************************************/ -+ -+#ifndef _PFIM_DEFS_H_ -+#define _PFIM_DEFS_H_ -+ -+/* Supported FBC modes */ -+#define ODIN_PFIM_MOD_LINEAR (0x00) -+#define ODIN_PFIM_FBCDC_8X8_V12 (0x01) -+#define ODIN_PFIM_FBCDC_16X4_V12 (0x02) -+#define ODIN_PFIM_FBCDC_MAX (0x03) -+ -+/* Supported pixel formats */ -+#define ODN_PFIM_PIXFMT_NONE (0x00) -+#define ODN_PFIM_PIXFMT_ARGB8888 (0x0C) -+#define ODN_PFIM_PIXFMT_RGB565 (0x05) -+ -+/* Tile types */ -+#define ODN_PFIM_TILETYPE_8X8 (0x01) -+#define ODN_PFIM_TILETYPE_16X4 (0x02) -+#define ODN_PFIM_TILETYPE_32x2 (0x03) -+ -+#define PFIM_ROUNDUP(X, Y) (((X) + ((Y) - 1U)) & ~((Y) - 1U)) -+#define PFIM_RND_TAG (0x10) -+ -+#endif /* _PFIM_DEFS_H_ */ -+ -+/****************************************************************************** -+ End of file (pfim_defs.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/apollo/pfim_regs.h b/drivers/gpu/drm/img-rogue/apollo/pfim_regs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/pfim_regs.h -@@ -0,0 +1,265 @@ -+/****************************************************************************** -+@Title Odin PFIM control register definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Odin register defs for PDP-FBDC Interface Module -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+******************************************************************************/ -+#ifndef _PFIM_REGS_H_ -+#define _PFIM_REGS_H_ -+ -+/* -+ Register CR_PFIM_NUM_TILES -+*/ -+#define CR_PFIM_NUM_TILES 0x0000 -+#define CR_PFIM_NUM_TILES_MASK 0x007FFFFFU -+#define CR_PFIM_NUM_TILES_SHIFT 0 -+#define CR_PFIM_NUM_TILES_SIGNED 0 -+ -+/* -+ Register CR_PFIM_TILES_PER_LINE -+*/ -+#define CR_PFIM_TILES_PER_LINE 0x0004 -+#define CR_PFIM_TILES_PER_LINE_PFIM_TILES_PER_LINE_MASK 0x000000FFU -+#define CR_PFIM_TILES_PER_LINE_PFIM_TILES_PER_LINE_SHIFT 0 -+#define CR_PFIM_TILES_PER_LINE_PFIM_TILES_PER_LINE_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB -+*/ -+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB 0x0008 -+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB_MASK 0xFFFFFFFFU -+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB_SHIFT 0 -+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB -+*/ -+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB 0x000C -+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB_MASK 0x00000003U -+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB_SHIFT 0 -+#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_UV_BASE_ADDR_LSB -+*/ -+#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB 0x0010 -+#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB_MASK 0xFFFFFFFFU -+#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB_SHIFT 0 -+#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_UV_BASE_ADDR_MSB -+*/ -+#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB 0x0014 -+#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB_MASK 0x00000003U -+#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB_SHIFT 0 -+#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB_SIGNED 0 -+ -+/* -+ Register CR_PFIM_PDP_Y_BASE_ADDR -+*/ -+#define CR_PFIM_PDP_Y_BASE_ADDR 0x0018 -+#define CR_PFIM_PDP_Y_BASE_ADDR_MASK 0xFFFFFFFFU -+#define CR_PFIM_PDP_Y_BASE_ADDR_SHIFT 0 -+#define CR_PFIM_PDP_Y_BASE_ADDR_SIGNED 0 -+ -+/* -+ Register CR_PFIM_PDP_UV_BASE_ADDR -+*/ -+#define CR_PFIM_PDP_UV_BASE_ADDR 0x001C -+#define CR_PFIM_PDP_UV_BASE_ADDR_MASK 0xFFFFFFFFU -+#define CR_PFIM_PDP_UV_BASE_ADDR_SHIFT 0 -+#define CR_PFIM_PDP_UV_BASE_ADDR_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_REQ_CONTEXT -+*/ -+#define CR_PFIM_FBDC_REQ_CONTEXT 0x0020 -+#define CR_PFIM_FBDC_REQ_CONTEXT_MASK 0x00000007U -+#define CR_PFIM_FBDC_REQ_CONTEXT_SHIFT 0 -+#define CR_PFIM_FBDC_REQ_CONTEXT_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_REQ_TAG -+*/ -+#define CR_PFIM_FBDC_REQ_TAG 0x0024 -+#define CR_PFIM_FBDC_REQ_TAG_YARGB_MASK 0x00000003U -+#define CR_PFIM_FBDC_REQ_TAG_YARGB_SHIFT 0 -+#define CR_PFIM_FBDC_REQ_TAG_YARGB_SIGNED 0 -+ -+#define CR_PFIM_FBDC_REQ_TAG_UV_MASK 0x00000030U -+#define CR_PFIM_FBDC_REQ_TAG_UV_SHIFT 4 -+#define CR_PFIM_FBDC_REQ_TAG_UV_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_REQ_SB_TAG -+*/ -+#define CR_PFIM_FBDC_REQ_SB_TAG 0x0028 -+#define CR_PFIM_FBDC_REQ_SB_TAG_YARGB_MASK 0x00000003U -+#define CR_PFIM_FBDC_REQ_SB_TAG_YARGB_SHIFT 0 -+#define CR_PFIM_FBDC_REQ_SB_TAG_YARGB_SIGNED 0 -+ -+#define CR_PFIM_FBDC_REQ_SB_TAG_UV_MASK 0x00000030U -+#define CR_PFIM_FBDC_REQ_SB_TAG_UV_SHIFT 4 -+#define CR_PFIM_FBDC_REQ_SB_TAG_UV_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_HDR_INVAL_REQ -+*/ -+#define CR_PFIM_FBDC_HDR_INVAL_REQ 0x002C -+#define CR_PFIM_FBDC_HDR_INVAL_REQ_MASK 0x00000001U -+#define CR_PFIM_FBDC_HDR_INVAL_REQ_SHIFT 0 -+#define CR_PFIM_FBDC_HDR_INVAL_REQ_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_PIX_FORMAT -+*/ -+#define CR_PFIM_FBDC_PIX_FORMAT 0x0030 -+#define CR_PFIM_FBDC_PIX_FORMAT_FBDC_PIX_FMT_MASK 0x0000007FU -+#define CR_PFIM_FBDC_PIX_FORMAT_FBDC_PIX_FMT_SHIFT 0 -+#define CR_PFIM_FBDC_PIX_FORMAT_FBDC_PIX_FMT_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_CR_CH0123_VAL0 -+*/ -+#define CR_PFIM_FBDC_CR_CH0123_VAL0 0x0034 -+#define CR_PFIM_FBDC_CR_CH0123_VAL0_MASK 0xFFFFFFFFU -+#define CR_PFIM_FBDC_CR_CH0123_VAL0_SHIFT 0 -+#define CR_PFIM_FBDC_CR_CH0123_VAL0_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_CR_CH0123_VAL1 -+*/ -+#define CR_PFIM_FBDC_CR_CH0123_VAL1 0x0038 -+#define CR_PFIM_FBDC_CR_CH0123_VAL1_MASK 0xFFFFFFFFU -+#define CR_PFIM_FBDC_CR_CH0123_VAL1_SHIFT 0 -+#define CR_PFIM_FBDC_CR_CH0123_VAL1_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_CR_Y_VAL0 -+*/ -+#define CR_PFIM_FBDC_CR_Y_VAL0 0x003C -+#define CR_PFIM_FBDC_CR_Y_VAL0_MASK 0x000003FFU -+#define CR_PFIM_FBDC_CR_Y_VAL0_SHIFT 0 -+#define CR_PFIM_FBDC_CR_Y_VAL0_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_CR_UV_VAL0 -+*/ -+#define CR_PFIM_FBDC_CR_UV_VAL0 0x0040 -+#define CR_PFIM_FBDC_CR_UV_VAL0_MASK 0x000003FFU -+#define CR_PFIM_FBDC_CR_UV_VAL0_SHIFT 0 -+#define CR_PFIM_FBDC_CR_UV_VAL0_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_CR_Y_VAL1 -+*/ -+#define CR_PFIM_FBDC_CR_Y_VAL1 0x0044 -+#define CR_PFIM_FBDC_CR_Y_VAL1_MASK 0x000003FFU -+#define CR_PFIM_FBDC_CR_Y_VAL1_SHIFT 0 -+#define CR_PFIM_FBDC_CR_Y_VAL1_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_CR_UV_VAL1 -+*/ -+#define CR_PFIM_FBDC_CR_UV_VAL1 0x0048 -+#define CR_PFIM_FBDC_CR_UV_VAL1_MASK 0x000003FFU -+#define CR_PFIM_FBDC_CR_UV_VAL1_SHIFT 0 -+#define CR_PFIM_FBDC_CR_UV_VAL1_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_FILTER_ENABLE -+*/ -+#define CR_PFIM_FBDC_FILTER_ENABLE 0x004C -+#define CR_PFIM_FBDC_FILTER_ENABLE_MASK 0x00000001U -+#define CR_PFIM_FBDC_FILTER_ENABLE_SHIFT 0 -+#define CR_PFIM_FBDC_FILTER_ENABLE_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_FILTER_STATUS -+*/ -+#define CR_PFIM_FBDC_FILTER_STATUS 0x0050 -+#define CR_PFIM_FBDC_FILTER_STATUS_MASK 0x0000000FU -+#define CR_PFIM_FBDC_FILTER_STATUS_SHIFT 0 -+#define CR_PFIM_FBDC_FILTER_STATUS_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_FILTER_CLEAR -+*/ -+#define CR_PFIM_FBDC_FILTER_CLEAR 0x0054 -+#define CR_PFIM_FBDC_FILTER_CLEAR_MASK 0x0000000FU -+#define CR_PFIM_FBDC_FILTER_CLEAR_SHIFT 0 -+#define CR_PFIM_FBDC_FILTER_CLEAR_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_TILE_TYPE -+*/ -+#define CR_PFIM_FBDC_TILE_TYPE 0x0058 -+#define CR_PFIM_FBDC_TILE_TYPE_MASK 0x00000003U -+#define CR_PFIM_FBDC_TILE_TYPE_SHIFT 0 -+#define CR_PFIM_FBDC_TILE_TYPE_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_CLEAR_COLOUR_LSB -+*/ -+#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB 0x005C -+#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB_MASK 0xFFFFFFFFU -+#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB_SHIFT 0 -+#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_CLEAR_COLOUR_MSB -+*/ -+#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB 0x0060 -+#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB_MASK 0xFFFFFFFFU -+#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB_SHIFT 0 -+#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB_SIGNED 0 -+ -+/* -+ Register CR_PFIM_FBDC_REQ_LOSSY -+*/ -+#define CR_PFIM_FBDC_REQ_LOSSY 0x0064 -+#define CR_PFIM_FBDC_REQ_LOSSY_MASK 0x00000001U -+#define CR_PFIM_FBDC_REQ_LOSSY_SHIFT 0 -+#define CR_PFIM_FBDC_REQ_LOSSY_SIGNED 0 -+ -+#endif /* _PFIM_REGS_H_ */ -+ -+/****************************************************************************** -+ End of file (pfim_regs.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/apollo/sysconfig.c b/drivers/gpu/drm/img-rogue/apollo/sysconfig.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/sysconfig.c -@@ -0,0 +1,1414 @@ -+/*************************************************************************/ /*! -+@File -+@Title System Configuration -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description System Configuration functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+ -+#include "sysinfo.h" -+#include "apollo_regs.h" -+ -+#include "pvrsrv.h" -+#include "pvrsrv_device.h" -+#include "rgxdevice.h" -+#include "syscommon.h" -+#include "allocmem.h" -+#include "pvr_debug.h" -+ -+#if defined(SUPPORT_ION) -+#include PVR_ANDROID_ION_HEADER -+#include "ion_support.h" -+#include "ion_sys.h" -+#endif -+ -+#include "tc_drv.h" -+ -+#include -+#include -+ -+#define SECURE_FW_MEM_SIZE (0x400000) /* 4MB */ -+#define SECURE_MEM_SIZE (0x4000000) /* 64MB */ -+ -+typedef struct -+{ -+ PHYS_HEAP_USAGE_FLAGS ui32UsageFlags; -+ IMG_UINT64 uiSize; -+ IMG_BOOL bUsed; -+} CARD_PHYS_HEAP_CONFIG_SPEC; -+ -+#define HEAP_SPEC_IDX_GPU_PRIVATE (0U) -+#define HEAP_SPEC_IDX_GPU_LOCAL (1U) -+ -+static const CARD_PHYS_HEAP_CONFIG_SPEC gasCardHeapTemplate[] = -+{ -+ { -+ PHYS_HEAP_USAGE_GPU_PRIVATE, -+ 0, /* determined at runtime by apphints */ -+ false /* determined at runtime by apphints */ -+ }, -+ { -+ PHYS_HEAP_USAGE_GPU_LOCAL, -+ 0, /* determined at runtime */ -+ true -+ }, -+ { -+ PHYS_HEAP_USAGE_GPU_SECURE, -+ SECURE_MEM_SIZE, -+#if defined(SUPPORT_SECURITY_VALIDATION) -+ true -+#else -+ false -+#endif -+ }, -+ { -+ PHYS_HEAP_USAGE_FW_PRIVATE, -+ SECURE_FW_MEM_SIZE, -+#if defined(SUPPORT_SECURITY_VALIDATION) -+ true -+#else -+ false -+#endif -+ }, -+ { -+ PHYS_HEAP_USAGE_FW_SHARED, -+#if defined(SUPPORT_SECURITY_VALIDATION) && defined(RGX_PREMAP_FW_HEAPS) -+ /* simultaneous virtualisation and security support requires premapped heaps, -+ * i.e. FW_PRIVATE and FW_SHARED must fit contiguously into Fw's VA heap (RGX_FIRMWARE_RAW_HEAP_SIZE) */ -+ (RGX_NUM_DRIVERS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE) - SECURE_FW_MEM_SIZE, -+#else -+ (RGX_NUM_DRIVERS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE), -+#endif -+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ true /* VZ drivers need dedicated Fw heaps */ -+#else -+ false /* Native drivers can fallback on GPU_LOCAL for Fw mem */ -+#endif -+ }, -+ { -+ PHYS_HEAP_USAGE_FW_PREMAP_PT, -+ RGX_FIRMWARE_MAX_PAGETABLE_SIZE, -+#if defined(RGX_PREMAP_FW_HEAPS) -+ true -+#else -+ false -+#endif -+ } -+}; -+ -+#define ODIN_MEMORY_HYBRID_DEVICE_BASE 0x400000000 -+ -+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10) -+ -+#define UI64_TOPWORD_IS_ZERO(ui64) ((ui64 >> 32) == 0) -+ -+#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) -+ -+/* Fake DVFS configuration used purely for testing purposes */ -+ -+static const IMG_OPP asOPPTable[] = -+{ -+ { 8, 25000000}, -+ { 16, 50000000}, -+ { 32, 75000000}, -+ { 64, 100000000}, -+}; -+ -+#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP)) -+ -+static void SetFrequency(IMG_HANDLE hSysData, IMG_UINT32 ui32Frequency) -+{ -+ PVR_UNREFERENCED_PARAMETER(hSysData); -+ -+ PVR_DPF((PVR_DBG_ERROR, "SetFrequency %u", ui32Frequency)); -+} -+ -+static void SetVoltage(IMG_HANDLE hSysData, IMG_UINT32 ui32Voltage) -+{ -+ PVR_UNREFERENCED_PARAMETER(hSysData); -+ -+ PVR_DPF((PVR_DBG_ERROR, "SetVoltage %u", ui32Voltage)); -+} -+ -+#endif -+ -+static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr); -+ -+static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr, -+ IMG_DEV_PHYADDR *psDevPAddr); -+ -+static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs = -+{ -+ .pfnCpuPAddrToDevPAddr = TCLocalCpuPAddrToDevPAddr, -+ .pfnDevPAddrToCpuPAddr = TCLocalDevPAddrToCpuPAddr, -+}; -+ -+static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr); -+ -+static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr, -+ IMG_DEV_PHYADDR *psDevPAddr); -+ -+static PHYS_HEAP_FUNCTIONS gsHostPhysHeapFuncs = -+{ -+ .pfnCpuPAddrToDevPAddr = TCHostCpuPAddrToDevPAddr, -+ .pfnDevPAddrToCpuPAddr = TCHostDevPAddrToCpuPAddr, -+}; -+ -+static void TCHybridCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr); -+ -+static void TCHybridDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr, -+ IMG_DEV_PHYADDR *psDevPAddr); -+ -+static PHYS_HEAP_FUNCTIONS gsHybridPhysHeapFuncs = -+{ -+ .pfnCpuPAddrToDevPAddr = TCHybridCpuPAddrToDevPAddr, -+ .pfnDevPAddrToCpuPAddr = TCHybridDevPAddrToCpuPAddr -+}; -+ -+typedef struct _SYS_DATA_ SYS_DATA; -+ -+struct _SYS_DATA_ -+{ -+ struct platform_device *pdev; -+ -+ struct tc_rogue_platform_data *pdata; -+ -+ struct resource *registers; -+ -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ struct ion_client *ion_client; -+ struct ion_handle *ion_rogue_allocation; -+#endif -+ -+#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) -+ PVRSRV_DEVICE_CONFIG *psDevConfig; -+ -+ PHYS_HEAP_ITERATOR *psHeapIter; -+ void *pvS3Buffer; -+#endif /* defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) */ -+}; -+ -+#define SYSTEM_INFO_FORMAT_STRING "FPGA Revision: %s - TCF Core Revision: %s - TCF Core Target Build ID: %s - PCI Version: %s - Macro Version: %s" -+#define FPGA_REV_MAX_LEN 8 /* current longest format: "x.y.z" */ -+#define TCF_CORE_REV_MAX_LEN 8 /* current longest format: "x.y.z" */ -+#define TCF_CORE_CFG_MAX_LEN 4 /* current longest format: "x" */ -+#define PCI_VERSION_MAX_LEN 4 /* current longest format: "x" */ -+#define MACRO_VERSION_MAX_LEN 8 /* current longest format: "x.yz" */ -+ -+static IMG_CHAR *GetDeviceVersionString(SYS_DATA *psSysData) -+{ -+ int err; -+ char str_fpga_rev[FPGA_REV_MAX_LEN]={0}; -+ char str_tcf_core_rev[TCF_CORE_REV_MAX_LEN]={0}; -+ char str_tcf_core_target_build_id[TCF_CORE_CFG_MAX_LEN]={0}; -+ char str_pci_ver[PCI_VERSION_MAX_LEN]={0}; -+ char str_macro_ver[MACRO_VERSION_MAX_LEN]={0}; -+ -+ IMG_CHAR *pszVersion; -+ IMG_UINT32 ui32StringLength; -+ -+ err = tc_sys_strings(psSysData->pdev->dev.parent, -+ str_fpga_rev, sizeof(str_fpga_rev), -+ str_tcf_core_rev, sizeof(str_tcf_core_rev), -+ str_tcf_core_target_build_id, sizeof(str_tcf_core_target_build_id), -+ str_pci_ver, sizeof(str_pci_ver), -+ str_macro_ver, sizeof(str_macro_ver)); -+ if (err) -+ { -+ return NULL; -+ } -+ -+ /* Calculate how much space we need to allocate for the string */ -+ ui32StringLength = OSStringLength(SYSTEM_INFO_FORMAT_STRING); -+ ui32StringLength += OSStringLength(str_fpga_rev); -+ ui32StringLength += OSStringLength(str_tcf_core_rev); -+ ui32StringLength += OSStringLength(str_tcf_core_target_build_id); -+ ui32StringLength += OSStringLength(str_pci_ver); -+ ui32StringLength += OSStringLength(str_macro_ver); -+ -+ /* Create the version string */ -+ pszVersion = OSAllocMem(ui32StringLength * sizeof(IMG_CHAR)); -+ if (pszVersion) -+ { -+ OSSNPrintf(&pszVersion[0], ui32StringLength, -+ SYSTEM_INFO_FORMAT_STRING, -+ str_fpga_rev, -+ str_tcf_core_rev, -+ str_tcf_core_target_build_id, -+ str_pci_ver, -+ str_macro_ver); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to create format string", __func__)); -+ } -+ -+ return pszVersion; -+} -+ -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+static SYS_DATA *gpsIonPrivateData; -+ -+PVRSRV_ERROR IonInit(void *pvPrivateData) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ SYS_DATA *psSysData = pvPrivateData; -+ gpsIonPrivateData = psSysData; -+ -+ psSysData->ion_client = ion_client_create(psSysData->pdata->ion_device, SYS_RGX_DEV_NAME); -+ if (IS_ERR(psSysData->ion_client)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create ION client (%ld)", __func__, PTR_ERR(psSysData->ion_client))); -+ eError = PVRSRV_ERROR_ION_NO_CLIENT; -+ goto err_out; -+ } -+ /* Allocate the whole rogue ion heap and pass that to services to manage */ -+ psSysData->ion_rogue_allocation = ion_alloc(psSysData->ion_client, psSysData->pdata->rogue_heap_memory_size, 4096, (1 << psSysData->pdata->ion_heap_id), 0); -+ if (IS_ERR(psSysData->ion_rogue_allocation)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate ION rogue buffer (%ld)", __func__, PTR_ERR(psSysData->ion_rogue_allocation))); -+ eError = PVRSRV_ERROR_ION_FAILED_TO_ALLOC; -+ goto err_destroy_client; -+ -+ } -+ -+ return PVRSRV_OK; -+err_destroy_client: -+ ion_client_destroy(psSysData->ion_client); -+ psSysData->ion_client = NULL; -+err_out: -+ return eError; -+} -+ -+void IonDeinit(void) -+{ -+ SYS_DATA *psSysData = gpsIonPrivateData; -+ ion_free(psSysData->ion_client, psSysData->ion_rogue_allocation); -+ psSysData->ion_rogue_allocation = NULL; -+ ion_client_destroy(psSysData->ion_client); -+ psSysData->ion_client = NULL; -+} -+ -+struct ion_device *IonDevAcquire(void) -+{ -+ return gpsIonPrivateData->pdata->ion_device; -+} -+ -+void IonDevRelease(struct ion_device *ion_device) -+{ -+ PVR_ASSERT(ion_device == gpsIonPrivateData->pdata->ion_device); -+} -+#endif /* defined(SUPPORT_ION) */ -+ -+static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr) -+{ -+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; -+ SYS_DATA *psSysData = psDevConfig->hSysData; -+ IMG_UINT32 ui32Idx; -+ -+ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) -+ { -+ psDevPAddr[ui32Idx].uiAddr = -+ psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base; -+ } -+} -+ -+static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr, -+ IMG_DEV_PHYADDR *psDevPAddr) -+{ -+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; -+ SYS_DATA *psSysData = psDevConfig->hSysData; -+ IMG_UINT32 ui32Idx; -+ -+ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) -+ { -+ psCpuPAddr[ui32Idx].uiAddr = -+ psDevPAddr[ui32Idx].uiAddr + psSysData->pdata->tc_memory_base; -+ } -+} -+ -+static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 uiNumOfAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr) -+{ -+ if (sizeof(*psDevPAddr) == sizeof(*psCpuPAddr)) -+ { -+ OSCachedMemCopy(psDevPAddr, psCpuPAddr, uiNumOfAddr * sizeof(*psDevPAddr)); -+ return; -+ } -+ -+ /* In this case we may have a 32bit host, so we can't do a memcpy */ -+ /* Optimise common case */ -+ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr; -+ if (uiNumOfAddr > 1) -+ { -+ IMG_UINT32 ui32Idx; -+ for (ui32Idx = 1; ui32Idx < uiNumOfAddr; ++ui32Idx) -+ { -+ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr; -+ } -+ } -+} -+ -+static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 uiNumOfAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr, -+ IMG_DEV_PHYADDR *psDevPAddr) -+{ -+ if (sizeof(*psCpuPAddr) == sizeof(*psDevPAddr)) -+ { -+ OSCachedMemCopy(psCpuPAddr, psDevPAddr, uiNumOfAddr * sizeof(*psCpuPAddr)); -+ return; -+ } -+ -+ /* In this case we may have a 32bit host, so we can't do a memcpy. -+ * Check we are not dropping any data from the 64bit dev addr */ -+ PVR_ASSERT(UI64_TOPWORD_IS_ZERO(psDevPAddr[0].uiAddr)); -+ /* Optimise common case */ -+ psCpuPAddr[0].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[0].uiAddr); -+ if (uiNumOfAddr > 1) -+ { -+ IMG_UINT32 ui32Idx; -+ for (ui32Idx = 1; ui32Idx < uiNumOfAddr; ++ui32Idx) -+ { -+ PVR_ASSERT(UI64_TOPWORD_IS_ZERO(psDevPAddr[ui32Idx].uiAddr)); -+ psCpuPAddr[ui32Idx].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[ui32Idx].uiAddr); -+ } -+ } -+ -+} -+ -+static inline -+IMG_CHAR* GetHeapName(PHYS_HEAP_USAGE_FLAGS ui32Flags) -+{ -+ if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_GPU_LOCAL)) return "lma_gpu_local"; -+ if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_GPU_SECURE)) return "lma_gpu_secure"; -+ if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_GPU_PRIVATE)) return "lma_gpu_private"; -+ if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_FW_PRIVATE)) return "lma_fw_private"; -+ if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_FW_SHARED)) return "lma_fw_shared"; -+ if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_FW_PREMAP_PT)) return "lma_fw_pagetables"; -+ if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_CPU_LOCAL)) return "lma_cpu_local"; -+ if (BITMASK_HAS(ui32Flags,PHYS_HEAP_USAGE_DISPLAY)) return "lma_gpu_display"; -+ else return "Unexpected Heap"; -+} -+ -+static void TCHybridCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr) -+{ -+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; -+ SYS_DATA *psSysData = psDevConfig->hSysData; -+ IMG_UINT32 ui32Idx; -+ -+ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) -+ { -+ psDevPAddr[ui32Idx].uiAddr = -+ (psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base) + -+ ODIN_MEMORY_HYBRID_DEVICE_BASE; -+ } -+} -+ -+static void TCHybridDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr, -+ IMG_DEV_PHYADDR *psDevPAddr) -+{ -+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; -+ SYS_DATA *psSysData = psDevConfig->hSysData; -+ IMG_UINT32 ui32Idx; -+ -+ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) -+ { -+ psCpuPAddr[ui32Idx].uiAddr = -+ (psDevPAddr[ui32Idx].uiAddr - ODIN_MEMORY_HYBRID_DEVICE_BASE) + -+ psSysData->pdata->tc_memory_base; -+ } -+} -+ -+static PVRSRV_ERROR -+InitLocalHeap(PHYS_HEAP_CONFIG *psPhysHeap, -+ IMG_UINT64 uiBaseAddr, IMG_UINT64 uiStartAddr, -+ IMG_UINT64 uiSize, PHYS_HEAP_FUNCTIONS *psFuncs, -+ PHYS_HEAP_USAGE_FLAGS ui32Flags) -+{ -+ psPhysHeap->sCardBase.uiAddr = uiBaseAddr; -+ psPhysHeap->sStartAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(uiStartAddr); -+ psPhysHeap->uiSize = uiSize; -+ psPhysHeap->eType = PHYS_HEAP_TYPE_LMA; -+ psPhysHeap->pszPDumpMemspaceName = "LMA"; -+ psPhysHeap->pszHeapName = GetHeapName(ui32Flags); -+ psPhysHeap->psMemFuncs = psFuncs; -+ psPhysHeap->ui32UsageFlags = ui32Flags; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+CreateCardGPUHeaps(const SYS_DATA *psSysData, -+ CARD_PHYS_HEAP_CONFIG_SPEC *pasCardHeapSpec, -+ PHYS_HEAP_CONFIG *pasPhysHeaps, -+ PHYS_HEAP_FUNCTIONS *psHeapFuncs, -+ IMG_UINT32 *pui32HeapIdx, -+ IMG_UINT64 ui64CardAddr) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT64 ui64StartAddr = psSysData->pdata->rogue_heap_memory_base; -+ IMG_UINT32 ui32SpecIdx; -+ -+ for (ui32SpecIdx = 0; ui32SpecIdx < ARRAY_SIZE(gasCardHeapTemplate); ui32SpecIdx++) -+ { -+ if (pasCardHeapSpec[ui32SpecIdx].bUsed) -+ { -+ IMG_UINT64 ui64HeapSize = pasCardHeapSpec[ui32SpecIdx].uiSize; -+ -+ if (BITMASK_HAS(pasCardHeapSpec[ui32SpecIdx].ui32UsageFlags, PHYS_HEAP_USAGE_FW_SHARED)) -+ { -+ /* The FW_SHARED size reserved initially covered the entire carveout meant to hold the Guest heaps, -+ * but the heap used by the driver has only RGX_FIRMWARE_RAW_HEAP_SIZE, trim the rest */ -+ ui64HeapSize -= (RGX_NUM_DRIVERS_SUPPORTED-1) * RGX_FIRMWARE_RAW_HEAP_SIZE; -+ } -+ -+ eError = InitLocalHeap(&pasPhysHeaps[*pui32HeapIdx], -+ ui64CardAddr, -+ IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr), -+ ui64HeapSize, -+ psHeapFuncs, -+ pasCardHeapSpec[ui32SpecIdx].ui32UsageFlags); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ ui64CardAddr += pasCardHeapSpec[ui32SpecIdx].uiSize; -+ ui64StartAddr += pasCardHeapSpec[ui32SpecIdx].uiSize; -+ (*pui32HeapIdx)++; -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+#if TC_DISPLAY_MEM_SIZE != 0 -+static PVRSRV_ERROR -+CreateCardEXTHeap(const SYS_DATA *psSysData, -+ PHYS_HEAP_CONFIG *pasPhysHeaps, -+ PHYS_HEAP_FUNCTIONS *psHeapFuncs, -+ IMG_UINT32 *pui32HeapIdx, -+ IMG_UINT64 ui64CardBase) -+{ -+ IMG_UINT64 ui64StartAddr = psSysData->pdata->pdp_heap_memory_base; -+ IMG_UINT64 ui64Size = psSysData->pdata->pdp_heap_memory_size; -+ PVRSRV_ERROR eError; -+ -+ eError = InitLocalHeap(&pasPhysHeaps[*pui32HeapIdx], -+ ui64CardBase + psSysData->pdata->rogue_heap_memory_size, -+ IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr), -+ ui64Size, psHeapFuncs, -+ PHYS_HEAP_USAGE_EXTERNAL | PHYS_HEAP_USAGE_DISPLAY); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ (*pui32HeapIdx)++; -+ -+ return PVRSRV_OK; -+} -+#endif -+ -+static PVRSRV_ERROR -+InitLocalHeaps(const SYS_DATA *psSysData, -+ CARD_PHYS_HEAP_CONFIG_SPEC *pasCardHeapSpec, -+ PHYS_HEAP_CONFIG *pasPhysHeaps, -+ IMG_UINT32 *pui32HeapIdx) -+{ -+ PHYS_HEAP_FUNCTIONS *psHeapFuncs; -+ PVRSRV_ERROR eError; -+ IMG_UINT64 ui64CardBase; -+ -+ if (psSysData->pdata->baseboard == TC_BASEBOARD_ODIN && -+ psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) -+ { -+ psHeapFuncs = &gsHybridPhysHeapFuncs; -+ ui64CardBase = ODIN_MEMORY_HYBRID_DEVICE_BASE; -+ } -+ else if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) -+ { -+ psHeapFuncs = &gsHostPhysHeapFuncs; -+ ui64CardBase = psSysData->pdata->tc_memory_base; -+ } -+ else -+ { -+ psHeapFuncs = &gsLocalPhysHeapFuncs; -+ ui64CardBase = 0; -+ } -+ -+ eError = CreateCardGPUHeaps(psSysData, pasCardHeapSpec, pasPhysHeaps, psHeapFuncs, pui32HeapIdx, ui64CardBase); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+#if TC_DISPLAY_MEM_SIZE != 0 -+ eError = CreateCardEXTHeap(psSysData, pasPhysHeaps, psHeapFuncs, pui32HeapIdx, ui64CardBase); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+InitHostHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UINT32 *pui32HeapIdx) -+{ -+ if (psSysData->pdata->mem_mode != TC_MEMORY_LOCAL) -+ { -+ pasPhysHeaps[*pui32HeapIdx].eType = PHYS_HEAP_TYPE_UMA; -+ pasPhysHeaps[*pui32HeapIdx].pszPDumpMemspaceName = "SYSMEM"; -+ pasPhysHeaps[*pui32HeapIdx].pszHeapName = "uma_cpu_local"; -+ pasPhysHeaps[*pui32HeapIdx].psMemFuncs = &gsHostPhysHeapFuncs; -+ pasPhysHeaps[*pui32HeapIdx].ui32UsageFlags = PHYS_HEAP_USAGE_CPU_LOCAL; -+ -+ (*pui32HeapIdx)++; -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "Initialising CPU_LOCAL UMA Host PhysHeaps with memory mode: %d", -+ psSysData->pdata->mem_mode)); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+PhysHeapsInit(const SYS_DATA *psSysData, -+ CARD_PHYS_HEAP_CONFIG_SPEC *pasCardHeapSpec, -+ PHYS_HEAP_CONFIG *pasPhysHeaps, -+ void *pvPrivData, IMG_UINT32 ui32NumHeaps) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ IMG_UINT32 ui32HeapCounter = 0; -+ -+ eError = InitLocalHeaps(psSysData, pasCardHeapSpec, pasPhysHeaps, &ui32HeapCounter); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ eError = InitHostHeaps(psSysData, pasPhysHeaps, &ui32HeapCounter); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ PVR_LOG_RETURN_IF_FALSE((ui32HeapCounter == ui32NumHeaps), -+ "Number of PhysHeapConfigs set up doesn't match the initial requirement.", -+ PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ -+ /* Initialise fields that don't change between memory modes. -+ * Fix up heap IDs. This is needed for multi-testchip systems to -+ * ensure the heap IDs are unique as this is what Services expects. -+ */ -+ for (i = 0; i < ui32NumHeaps; i++) -+ { -+ pasPhysHeaps[i].hPrivData = pvPrivData; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+PhysHeapSetRequirements(const SYS_DATA *psSysData, -+ CARD_PHYS_HEAP_CONFIG_SPEC *pasCardHeapSpec, -+ IMG_UINT32 *pui32CardPhysHeapCfgCount) -+{ -+ IMG_UINT32 i; -+ IMG_UINT64 ui64FreeCardMemory = psSysData->pdata->rogue_heap_memory_size; -+ -+ PVR_LOG_RETURN_IF_FALSE( -+ BITMASK_HAS(pasCardHeapSpec[HEAP_SPEC_IDX_GPU_PRIVATE].ui32UsageFlags, PHYS_HEAP_USAGE_GPU_PRIVATE) && -+ BITMASK_HAS(pasCardHeapSpec[HEAP_SPEC_IDX_GPU_LOCAL].ui32UsageFlags, PHYS_HEAP_USAGE_GPU_LOCAL), -+ "PhysHeapConfigs not set correctly in the system layer.", PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ -+ for (i = 0; i < ARRAY_SIZE(gasCardHeapTemplate); i++) -+ { -+ if (pasCardHeapSpec[i].bUsed) -+ { -+ /* Determine the memory requirements of heaps with a fixed size */ -+ ui64FreeCardMemory -= pasCardHeapSpec[i].uiSize; -+ -+ /* Count card physheap configs used by the system */ -+ (*pui32CardPhysHeapCfgCount)++; -+ } -+ } -+ -+ if (SysRestrictGpuLocalAddPrivateHeap()) -+ { -+ IMG_UINT64 ui64GpuSharedMem = SysRestrictGpuLocalPhysheap(ui64FreeCardMemory); -+ -+ if (ui64GpuSharedMem == ui64FreeCardMemory) -+ { -+ /* No memory reserved for GPU private use, special heap not needed */ -+ } -+ else -+ { -+ /* Set up the GPU private heap */ -+ pasCardHeapSpec[HEAP_SPEC_IDX_GPU_PRIVATE].bUsed = true; -+ pasCardHeapSpec[HEAP_SPEC_IDX_GPU_PRIVATE].uiSize = ui64FreeCardMemory - ui64GpuSharedMem; -+ ui64FreeCardMemory = ui64GpuSharedMem; -+ (*pui32CardPhysHeapCfgCount)++; -+ } -+ } -+ -+ /* all remaining memory card memory goes to GPU_LOCAL */ -+ pasCardHeapSpec[HEAP_SPEC_IDX_GPU_LOCAL].uiSize = ui64FreeCardMemory; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+PhysHeapsCreate(const SYS_DATA *psSysData, PVRSRV_DEVICE_CONFIG *psDevConfig, -+ PHYS_HEAP_CONFIG **ppasPhysHeapsOut, -+ IMG_UINT32 *puiPhysHeapCountOut) -+{ -+ PHYS_HEAP_CONFIG *pasPhysHeaps; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32NumHeaps = 0; -+ CARD_PHYS_HEAP_CONFIG_SPEC asCardHeapSpec[ARRAY_SIZE(gasCardHeapTemplate)]; -+ -+ PVR_LOG_RETURN_IF_FALSE((psSysData->pdata->mem_mode == TC_MEMORY_LOCAL) || -+ (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID), -+ "Unsupported memory mode", PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ /* Initialise the local heap specs with the build-time template */ -+ memcpy(asCardHeapSpec, gasCardHeapTemplate, sizeof(gasCardHeapTemplate)); -+ -+ eError = PhysHeapSetRequirements(psSysData, asCardHeapSpec, &ui32NumHeaps); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ psDevConfig->bHasNonMappableLocalMemory = asCardHeapSpec[HEAP_SPEC_IDX_GPU_PRIVATE].bUsed; -+ -+ if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) -+ { -+ /* CPU_LOCAL heap */ -+ ui32NumHeaps++; -+ } -+ -+#if TC_DISPLAY_MEM_SIZE != 0 -+ if (psSysData->pdata->mem_mode == TC_MEMORY_LOCAL || -+ psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) -+ { -+ /* EXTERNAL / DISPLAY heap */ -+ ui32NumHeaps++; -+ } -+#endif -+ -+ pasPhysHeaps = OSAllocMem(sizeof(*pasPhysHeaps) * ui32NumHeaps); -+ if (!pasPhysHeaps) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ eError = PhysHeapsInit(psSysData, asCardHeapSpec, pasPhysHeaps, psDevConfig, ui32NumHeaps); -+ if (eError != PVRSRV_OK) -+ { -+ OSFreeMem(pasPhysHeaps); -+ return eError; -+ } -+ -+ *ppasPhysHeapsOut = pasPhysHeaps; -+ *puiPhysHeapCountOut = ui32NumHeaps; -+ -+ return PVRSRV_OK; -+} -+ -+static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ if (psDevConfig->pszVersion) -+ { -+ OSFreeMem(psDevConfig->pszVersion); -+ } -+ -+ OSFreeMem(psDevConfig->pasPhysHeaps); -+ -+ OSFreeMem(psDevConfig); -+} -+ -+static void odinTCDevPhysAddr2DmaAddr(PVRSRV_DEVICE_CONFIG *psDevConfig, -+ IMG_DMA_ADDR *psDmaAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_BOOL *pbValid, -+ IMG_UINT32 ui32NumAddr, -+ IMG_BOOL bSparseAlloc) -+{ -+ IMG_CPU_PHYADDR sCpuPAddr = {0}; -+ IMG_UINT32 ui32Idx; -+ -+ /* Fast path */ -+ if (!bSparseAlloc) -+ { -+ /* In Odin, DMA address space is the same as host CPU */ -+ TCLocalDevPAddrToCpuPAddr(psDevConfig, -+ 1, -+ &sCpuPAddr, -+ psDevPAddr); -+ psDmaAddr->uiAddr = sCpuPAddr.uiAddr; -+ } -+ else -+ { -+ for (ui32Idx = 0; ui32Idx < ui32NumAddr; ui32Idx++) -+ { -+ if (pbValid[ui32Idx]) -+ { -+ TCLocalDevPAddrToCpuPAddr(psDevConfig, -+ 1, -+ &sCpuPAddr, -+ &psDevPAddr[ui32Idx]); -+ psDmaAddr[ui32Idx].uiAddr = sCpuPAddr.uiAddr; -+ } -+ else -+ { -+ /* Invalid DMA address marker */ -+ psDmaAddr[ui32Idx].uiAddr = ~((IMG_UINT64)0x0); -+ } -+ } -+ } -+} -+ -+static void * odinTCgetCDMAChan(PVRSRV_DEVICE_CONFIG *psDevConfig, char *name) -+{ -+ struct device* psDev = (struct device*) psDevConfig->pvOSDevice; -+ return tc_dma_chan(psDev->parent, name); -+} -+ -+static void odinTCFreeCDMAChan(PVRSRV_DEVICE_CONFIG *psDevConfig, -+ void* channel) -+{ -+ -+ struct device* psDev = (struct device*) psDevConfig->pvOSDevice; -+ struct dma_chan *chan = (struct dma_chan*) channel; -+ -+ tc_dma_chan_free(psDev->parent, chan); -+} -+ -+static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, -+ PVRSRV_DEVICE_CONFIG **ppsDevConfigOut) -+{ -+ PVRSRV_DEVICE_CONFIG *psDevConfig; -+ RGX_DATA *psRGXData; -+ RGX_TIMING_INFORMATION *psRGXTimingInfo; -+ PHYS_HEAP_CONFIG *pasPhysHeaps; -+ IMG_UINT32 uiPhysHeapCount; -+ PVRSRV_ERROR eError; -+ -+ psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + -+ sizeof(*psRGXData) + -+ sizeof(*psRGXTimingInfo)); -+ if (!psDevConfig) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psRGXData = (RGX_DATA *) IMG_OFFSET_ADDR(psDevConfig, sizeof(*psDevConfig)); -+ psRGXTimingInfo = (RGX_TIMING_INFORMATION *) IMG_OFFSET_ADDR(psRGXData, sizeof(*psRGXData)); -+ -+ eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount); -+ if (eError != PVRSRV_OK) -+ { -+ goto ErrorFreeDevConfig; -+ } -+ -+ /* Setup RGX specific timing data */ -+#if defined(TC_APOLLO_BONNIE) -+ /* For BonnieTC there seems to be an additional 5x multiplier that occurs to the clock as measured speed is 540Mhz not 108Mhz. */ -+ psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) * 6 * 5; -+#elif defined(TC_APOLLO_ES2) -+ psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) * 6; -+#else -+ psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) / -+ tc_core_clock_multiplex(&psSysData->pdev->dev); -+#endif -+ psRGXTimingInfo->bEnableActivePM = IMG_FALSE; -+ psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; -+ psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; -+ -+ /* Set up the RGX data */ -+ psRGXData->psRGXTimingInfo = psRGXTimingInfo; -+ -+ /* Setup the device config */ -+ psDevConfig->pvOSDevice = &psSysData->pdev->dev; -+ psDevConfig->pszName = "tc"; -+ psDevConfig->pszVersion = GetDeviceVersionString(psSysData); -+ -+ psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start; -+ psDevConfig->ui32RegsSize = resource_size(psSysData->registers); -+ -+ if (psSysData->pdata->baseboard == TC_BASEBOARD_ODIN && -+ psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) -+ { -+ psDevConfig->eDefaultHeap = SysDefaultToCpuLocalHeap() ? -+ PVRSRV_PHYS_HEAP_CPU_LOCAL : PVRSRV_PHYS_HEAP_GPU_LOCAL; -+ } -+ else -+ { -+ psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; -+ } -+ -+ psDevConfig->ui32IRQ = TC_INTERRUPT_EXT; -+ -+ psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; -+ -+ psDevConfig->pasPhysHeaps = pasPhysHeaps; -+ psDevConfig->ui32PhysHeapCount = uiPhysHeapCount; -+ -+ /* Only required for LMA but having this always set shouldn't be a problem */ -+ psDevConfig->bDevicePA0IsValid = IMG_TRUE; -+ -+ psDevConfig->hDevData = psRGXData; -+ psDevConfig->hSysData = psSysData; -+ -+#if defined(SUPPORT_ALT_REGBASE) -+ if (psSysData->pdata->mem_mode != TC_MEMORY_LOCAL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: alternative GPU register base is " -+ "supported only in LMA mode", __func__)); -+ goto ErrorFreeDevConfig; -+ } -+ else -+ { -+ /* Using display memory base as the alternative GPU register base, -+ * since the display memory range is not used by the firmware. */ -+ IMG_CPU_PHYADDR sDisplayMemAddr; -+ -+ sDisplayMemAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psSysData->pdata->pdp_heap_memory_base); -+ TCLocalCpuPAddrToDevPAddr(psDevConfig, 1, -+ &psDevConfig->sAltRegsGpuPBase, -+ &sDisplayMemAddr); -+ } -+#endif -+ -+#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) -+ /* Fake DVFS configuration used purely for testing purposes */ -+ psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable; -+ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT; -+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency; -+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage; -+#endif -+#if defined(SUPPORT_LINUX_DVFS) -+ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = 1000; -+ psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE; -+ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90; -+ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10; -+#endif -+ -+ /* DMA channel config */ -+ psDevConfig->pfnSlaveDMAGetChan = odinTCgetCDMAChan; -+ psDevConfig->pfnSlaveDMAFreeChan = odinTCFreeCDMAChan; -+ psDevConfig->pfnDevPhysAddr2DmaAddr = odinTCDevPhysAddr2DmaAddr; -+ psDevConfig->pszDmaTxChanName = psSysData->pdata->tc_dma_tx_chan_name; -+ psDevConfig->pszDmaRxChanName = psSysData->pdata->tc_dma_rx_chan_name; -+ psDevConfig->bHasDma = IMG_TRUE; -+ /* Following two values are expressed in number of bytes */ -+ psDevConfig->ui32DmaTransferUnit = 1; -+ psDevConfig->ui32DmaAlignment = 1; -+ -+ *ppsDevConfigOut = psDevConfig; -+ -+ return PVRSRV_OK; -+ -+ErrorFreeDevConfig: -+ OSFreeMem(psDevConfig); -+ return eError; -+} -+ -+#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) -+/* #define _DBG(...) PVR_LOG((__VA_ARGS__)) */ -+#define _DBG(...) -+ -+static PVRSRV_ERROR PrePower(IMG_HANDLE hSysData, -+ PVRSRV_SYS_POWER_STATE eNewPowerState, -+ PVRSRV_SYS_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *) hSysData; -+ IMG_DEV_PHYADDR sDevPAddr = {0}; -+ IMG_UINT64 uiHeapTotalSize, uiHeapUsedSize, uiHeapFreeSize; -+ IMG_UINT64 uiSize = 0, uiOffset = 0; -+ PVRSRV_ERROR eError; -+ -+ _DBG("(%s()) state: current=%s, new=%s; flags: 0x%08x", __func__, -+ PVRSRVSysPowerStateToString(eCurrentPowerState), -+ PVRSRVSysPowerStateToString(eNewPowerState), ePwrFlags); -+ -+ /* The transition might be both from ON or OFF states to OFF state so check -+ * only for the *new* state. Also this is only valid for suspend requests. */ -+ if (eNewPowerState != PVRSRV_SYS_POWER_STATE_OFF || -+ !BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_OSPM_SUSPEND_REQ)) -+ { -+ return PVRSRV_OK; -+ } -+ -+ eError = LMA_HeapIteratorCreate(psSysData->psDevConfig->psDevNode, -+ PVRSRV_PHYS_HEAP_GPU_LOCAL, -+ &psSysData->psHeapIter); -+ PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorCreate", return_error); -+ -+ eError = LMA_HeapIteratorGetHeapStats(psSysData->psHeapIter, &uiHeapTotalSize, -+ &uiHeapUsedSize); -+ PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorGetHeapStats", -+ return_error); -+ uiHeapFreeSize = uiHeapTotalSize - uiHeapUsedSize; -+ -+ _DBG("(%s()) heap stats: total=0x%" IMG_UINT64_FMTSPECx ", " -+ "used=0x%" IMG_UINT64_FMTSPECx ", free=0x%" IMG_UINT64_FMTSPECx, -+ __func__, uiHeapTotalSize, uiHeapUsedSize, uiHeapFreeSize); -+ -+ psSysData->pvS3Buffer = OSAllocMem(uiHeapUsedSize); -+ PVR_LOG_GOTO_IF_NOMEM(psSysData->pvS3Buffer, eError, destroy_iterator); -+ -+ while (LMA_HeapIteratorNext(psSysData->psHeapIter, &sDevPAddr, &uiSize)) -+ { -+ void *pvCpuVAddr; -+ IMG_CPU_PHYADDR sCpuPAddr = {0}; -+ -+ if (uiOffset + uiSize > uiHeapUsedSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "uiOffset = %" IMG_UINT64_FMTSPECx ", " -+ "uiSize = %" IMG_UINT64_FMTSPECx, uiOffset, uiSize)); -+ -+ PVR_LOG_GOTO_WITH_ERROR("LMA_HeapIteratorNext", eError, -+ PVRSRV_ERROR_INVALID_OFFSET, -+ free_buffer); -+ } -+ -+ TCLocalDevPAddrToCpuPAddr(psSysData->psDevConfig, 1, &sCpuPAddr, -+ &sDevPAddr); -+ -+ pvCpuVAddr = OSMapPhysToLin(sCpuPAddr, uiSize, -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC); -+ -+ _DBG("(%s()) iterator: dev_paddr=%px, cpu_paddr=%px, cpu_vaddr=%px, " -+ "size=0x%05" IMG_UINT64_FMTSPECx, __func__, -+ (void *) sDevPAddr.uiAddr, (void *) sCpuPAddr.uiAddr, -+ pvCpuVAddr, uiSize); -+ -+ /* copy memory */ -+ memcpy((IMG_BYTE *) psSysData->pvS3Buffer + uiOffset, pvCpuVAddr, -+ uiSize); -+ /* and now poison it */ -+ memset(pvCpuVAddr, 0x9b, uiSize); -+ -+ uiOffset += uiSize; -+ -+ OSUnMapPhysToLin(pvCpuVAddr, uiSize); -+ } -+ -+ return PVRSRV_OK; -+ -+free_buffer: -+ OSFreeMem(psSysData->pvS3Buffer); -+ psSysData->pvS3Buffer = NULL; -+destroy_iterator: -+ LMA_HeapIteratorDestroy(psSysData->psHeapIter); -+ psSysData->psHeapIter = NULL; -+return_error: -+ return eError; -+} -+ -+static PVRSRV_ERROR PostPower(IMG_HANDLE hSysData, -+ PVRSRV_SYS_POWER_STATE eNewPowerState, -+ PVRSRV_SYS_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *) hSysData; -+ IMG_DEV_PHYADDR sDevPAddr = {0}; -+ IMG_UINT64 uiSize = 0, uiOffset = 0; -+ PVRSRV_ERROR eError; -+ -+ _DBG("(%s()) state: current=%s, new=%s; flags=0x%08x; buffer=%px", __func__, -+ PVRSRVSysPowerStateToString(eCurrentPowerState), -+ PVRSRVSysPowerStateToString(eNewPowerState), ePwrFlags, -+ psSysData->pvS3Buffer); -+ -+ /* The transition might be both to ON or OFF states from OFF state so check -+ * only for the *current* state. Also this is only valid for resume requests. */ -+ if (eCurrentPowerState != PVRSRV_SYS_POWER_STATE_OFF || -+ !BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_OSPM_RESUME_REQ) || -+ psSysData->pvS3Buffer == NULL) -+ { -+ return PVRSRV_OK; -+ } -+ -+ eError = LMA_HeapIteratorReset(psSysData->psHeapIter); -+ PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorReset", free_buffer); -+ -+ while (LMA_HeapIteratorNext(psSysData->psHeapIter, &sDevPAddr, &uiSize)) -+ { -+ void *pvCpuVAddr; -+ IMG_CPU_PHYADDR sCpuPAddr = {0}; -+ -+ TCLocalDevPAddrToCpuPAddr(psSysData->psDevConfig, 1, &sCpuPAddr, -+ &sDevPAddr); -+ -+ pvCpuVAddr = OSMapPhysToLin(sCpuPAddr, uiSize, -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC); -+ -+ _DBG("(%s()) iterator: dev_paddr=%px, cpu_paddr=%px, cpu_vaddr=%px, " -+ "size=0x%05" IMG_UINT64_FMTSPECx, __func__, -+ (void *) sDevPAddr.uiAddr, (void *) sCpuPAddr.uiAddr, -+ pvCpuVAddr, uiSize); -+ -+ /* copy memory */ -+ memcpy(pvCpuVAddr, (IMG_BYTE *) psSysData->pvS3Buffer + uiOffset, -+ uiSize); -+ -+ uiOffset += uiSize; -+ -+ OSUnMapPhysToLin(pvCpuVAddr, uiSize); -+ } -+ -+ LMA_HeapIteratorDestroy(psSysData->psHeapIter); -+ psSysData->psHeapIter = NULL; -+ -+ OSFreeMem(psSysData->pvS3Buffer); -+ psSysData->pvS3Buffer = NULL; -+ -+ return PVRSRV_OK; -+ -+free_buffer: -+ OSFreeMem(psSysData->pvS3Buffer); -+ psSysData->pvS3Buffer = NULL; -+ -+ return eError; -+} -+#endif /* defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) */ -+ -+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) -+{ -+ PVRSRV_DEVICE_CONFIG *psDevConfig; -+ SYS_DATA *psSysData; -+ resource_size_t uiRegistersSize; -+ PVRSRV_ERROR eError; -+ int err = 0; -+ -+ PVR_ASSERT(pvOSDevice); -+ -+ psSysData = OSAllocZMem(sizeof(*psSysData)); -+ if (psSysData == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psSysData->pdev = to_platform_device((struct device *)pvOSDevice); -+ psSysData->pdata = psSysData->pdev->dev.platform_data; -+ -+ /* -+ * The device cannot address system memory, so there is no DMA -+ * limitation. -+ */ -+ if (psSysData->pdata->mem_mode == TC_MEMORY_LOCAL) -+ { -+ dma_set_mask(pvOSDevice, DMA_BIT_MASK(64)); -+ } -+ else -+ { -+ dma_set_mask(pvOSDevice, DMA_BIT_MASK(32)); -+ } -+ -+ err = tc_enable(psSysData->pdev->dev.parent); -+ if (err) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to enable PCI device (%d)", __func__, err)); -+ eError = PVRSRV_ERROR_PCI_CALL_FAILED; -+ goto ErrFreeSysData; -+ } -+ -+ psSysData->registers = platform_get_resource_byname(psSysData->pdev, -+ IORESOURCE_MEM, -+ "rogue-regs"); -+ if (!psSysData->registers) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to get Rogue register information", -+ __func__)); -+ eError = PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; -+ goto ErrorDevDisable; -+ } -+ -+ /* Check the address range is large enough. */ -+ uiRegistersSize = resource_size(psSysData->registers); -+ if (uiRegistersSize < SYS_RGX_REG_REGION_SIZE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Rogue register region isn't big enough (was %pa, required 0x%08x)", -+ __func__, &uiRegistersSize, SYS_RGX_REG_REGION_SIZE)); -+ -+ eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL; -+ goto ErrorDevDisable; -+ } -+ -+ /* Reserve the address range */ -+ if (!request_mem_region(psSysData->registers->start, -+ resource_size(psSysData->registers), -+ SYS_RGX_DEV_NAME)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Rogue register memory region not available", -+ __func__)); -+ eError = PVRSRV_ERROR_PCI_CALL_FAILED; -+ -+ goto ErrorDevDisable; -+ } -+ -+ eError = DeviceConfigCreate(psSysData, &psDevConfig); -+ if (eError != PVRSRV_OK) -+ { -+ goto ErrorReleaseMemRegion; -+ } -+ -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ eError = IonInit(psSysData); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise ION", __func__)); -+ goto ErrorDeviceConfigDestroy; -+ } -+#endif -+ -+ /* Set psDevConfig->pfnSysDevErrorNotify callback */ -+ psDevConfig->pfnSysDevErrorNotify = SysRGXErrorNotify; -+ -+#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) -+ /* power functions */ -+ psDevConfig->pfnPrePowerState = PrePower; -+ psDevConfig->pfnPostPowerState = PostPower; -+ -+ psSysData->psDevConfig = psDevConfig; -+#endif /* defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) */ -+ -+ *ppsDevConfig = psDevConfig; -+ -+ return PVRSRV_OK; -+ -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ErrorDeviceConfigDestroy: -+ DeviceConfigDestroy(psDevConfig); -+#endif -+ErrorReleaseMemRegion: -+ release_mem_region(psSysData->registers->start, -+ resource_size(psSysData->registers)); -+ErrorDevDisable: -+ tc_disable(psSysData->pdev->dev.parent); -+ErrFreeSysData: -+ OSFreeMem(psSysData); -+ return eError; -+} -+ -+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *)psDevConfig->hSysData; -+ -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ IonDeinit(); -+#endif -+ -+ DeviceConfigDestroy(psDevConfig); -+ -+ release_mem_region(psSysData->registers->start, -+ resource_size(psSysData->registers)); -+ tc_disable(psSysData->pdev->dev.parent); -+ -+ OSFreeMem(psSysData); -+} -+ -+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+#if defined(TC_APOLLO_TCF5) -+ PVR_UNREFERENCED_PARAMETER(psDevConfig); -+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); -+ return PVRSRV_OK; -+#else -+ SYS_DATA *psSysData = psDevConfig->hSysData; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ u32 tmp = 0; -+ u32 pll; -+ -+ PVR_DUMPDEBUG_LOG("------[ rgx_tc system debug ]------"); -+ -+ if (tc_sys_info(psSysData->pdev->dev.parent, &tmp, &pll)) -+ goto err_out; -+ -+ if (tmp > 0) -+ PVR_DUMPDEBUG_LOG("Chip temperature: %d degrees C", tmp); -+ PVR_DUMPDEBUG_LOG("PLL status: %x", pll); -+ -+err_out: -+ return eError; -+#endif -+} -+ -+typedef struct -+{ -+ struct device *psDev; -+ int iInterruptID; -+ void *pvData; -+ PFN_LISR pfnLISR; -+} LISR_DATA; -+ -+static void TCInterruptHandler(void* pvData) -+{ -+ LISR_DATA *psLISRData = pvData; -+ psLISRData->pfnLISR(psLISRData->pvData); -+} -+ -+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, -+ IMG_UINT32 ui32IRQ, -+ const IMG_CHAR *pszName, -+ PFN_LISR pfnLISR, -+ void *pvData, -+ IMG_HANDLE *phLISRData) -+{ -+ SYS_DATA *psSysData = (SYS_DATA *)hSysData; -+ LISR_DATA *psLISRData; -+ PVRSRV_ERROR eError; -+ int err; -+ -+ if (ui32IRQ != TC_INTERRUPT_EXT) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: No device matching IRQ %d", __func__, ui32IRQ)); -+ return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; -+ } -+ -+ psLISRData = OSAllocZMem(sizeof(*psLISRData)); -+ if (!psLISRData) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_out; -+ } -+ -+ psLISRData->pfnLISR = pfnLISR; -+ psLISRData->pvData = pvData; -+ psLISRData->iInterruptID = ui32IRQ; -+ psLISRData->psDev = psSysData->pdev->dev.parent; -+ -+ err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, TCInterruptHandler, psLISRData); -+ if (err) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err)); -+ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; -+ goto err_free_data; -+ } -+ -+ err = tc_enable_interrupt(psLISRData->psDev, psLISRData->iInterruptID); -+ if (err) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: tc_enable_interrupt() failed (%d)", __func__, err)); -+ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; -+ goto err_unset_interrupt_handler; -+ } -+ -+ *phLISRData = psLISRData; -+ eError = PVRSRV_OK; -+ -+ PVR_TRACE(("Installed device LISR " IMG_PFN_FMTSPEC " with tc module to ID %u", -+ pfnLISR, ui32IRQ)); -+ -+err_out: -+ return eError; -+err_unset_interrupt_handler: -+ tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL); -+err_free_data: -+ OSFreeMem(psLISRData); -+ goto err_out; -+} -+ -+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) -+{ -+ LISR_DATA *psLISRData = (LISR_DATA *) hLISRData; -+ int err; -+ -+ err = tc_disable_interrupt(psLISRData->psDev, psLISRData->iInterruptID); -+ if (err) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: tc_disable_interrupt() failed (%d)", __func__, err)); -+ } -+ -+ err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL); -+ if (err) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err)); -+ } -+ -+ PVR_TRACE(("Uninstalled device LISR " IMG_PFN_FMTSPEC " with tc module from ID %u", -+ psLISRData->pfnLISR, psLISRData->iInterruptID)); -+ -+ OSFreeMem(psLISRData); -+ -+ return PVRSRV_OK; -+} -diff --git a/drivers/gpu/drm/img-rogue/apollo/sysinfo.h b/drivers/gpu/drm/img-rogue/apollo/sysinfo.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/sysinfo.h -@@ -0,0 +1,60 @@ -+/*************************************************************************/ /*! -+@File -+@Title System Description Header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This header provides system-specific declarations and macros -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(__SYSINFO_H__) -+#define __SYSINFO_H__ -+ -+/*!< System specific poll/timeout details */ -+#if defined(VIRTUAL_PLATFORM) || defined(FPGA) -+#define MAX_HW_TIME_US (240000000) -+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (120000) -+#else -+#define MAX_HW_TIME_US (500000) -+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000) -+#endif -+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) -+#define WAIT_TRY_COUNT (10000) -+ -+#define SYS_RGX_DEV_NAME "tc_rogue" -+ -+#endif /* !defined(__SYSINFO_H__) */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_apollo.c b/drivers/gpu/drm/img-rogue/apollo/tc_apollo.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/tc_apollo.c -@@ -0,0 +1,1507 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+/* -+ * This is a device driver for the apollo testchip framework. It creates -+ * platform devices for the pdp and ext sub-devices, and exports functions to -+ * manage the shared interrupt handling -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "tc_drv_internal.h" -+#include "tc_apollo.h" -+ -+#if defined(SUPPORT_DMA_HEAP) -+#include "tc_dmabuf_heap.h" -+#elif defined(SUPPORT_ION) -+#include "tc_ion.h" -+#endif -+ -+#include "apollo_regs.h" -+#include "tcf_clk_ctrl.h" -+#include "tcf_pll.h" -+#include "tc_clocks.h" -+ -+#if defined(SUPPORT_APOLLO_FPGA) -+#include "tc_apollo_debugfs.h" -+#endif /* defined(SUPPORT_APOLLO_FPGA) */ -+ -+/* -+ * kernel_compatibility.h: This header is a special case and should always be -+ * the last file included, as it can affect definitions/declarations in files -+ * included after it. -+ */ -+#include "kernel_compatibility.h" -+ -+#define TC_INTERRUPT_FLAG_PDP (1 << PDP1_INT_SHIFT) -+#define TC_INTERRUPT_FLAG_EXT (1 << EXT_INT_SHIFT) -+ -+#define PCI_VENDOR_ID_POWERVR 0x1010 -+#define DEVICE_ID_PCI_APOLLO_FPGA 0x1CF1 -+#define DEVICE_ID_PCIE_APOLLO_FPGA 0x1CF2 -+ -+#define APOLLO_MEM_PCI_BASENUM (2) -+ -+static struct { -+ struct thermal_zone_device *thermal_zone; -+ -+#if defined(SUPPORT_APOLLO_FPGA) -+ struct tc_io_region fpga; -+ struct apollo_debugfs_fpga_entries fpga_entries; -+#endif -+} apollo_pdata; -+ -+#if defined(SUPPORT_APOLLO_FPGA) -+ -+#define APOLLO_DEVICE_NAME_FPGA "apollo_fpga" -+ -+struct apollo_fpga_platform_data { -+ /* The testchip memory mode (LMA, HOST or HYBRID) */ -+ int mem_mode; -+ -+ resource_size_t tc_memory_base; -+ -+ resource_size_t pdp_heap_memory_base; -+ resource_size_t pdp_heap_memory_size; -+}; -+ -+#endif /* defined(SUPPORT_APOLLO_FPGA) */ -+ -+static void spi_write(struct tc_device *tc, u32 off, u32 val) -+{ -+ iowrite32(off, tc->tcf.registers -+ + TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR); -+ iowrite32(val, tc->tcf.registers -+ + TCF_CLK_CTRL_TCF_SPI_MST_WDATA); -+ iowrite32(TCF_SPI_MST_GO_MASK, tc->tcf.registers -+ + TCF_CLK_CTRL_TCF_SPI_MST_GO); -+ udelay(1000); -+} -+ -+static int spi_read(struct tc_device *tc, u32 off, u32 *val) -+{ -+ int cnt = 0; -+ u32 spi_mst_status; -+ -+ iowrite32(0x40000 | off, tc->tcf.registers -+ + TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR); -+ iowrite32(TCF_SPI_MST_GO_MASK, tc->tcf.registers -+ + TCF_CLK_CTRL_TCF_SPI_MST_GO); -+ -+ udelay(100); -+ -+ do { -+ spi_mst_status = ioread32(tc->tcf.registers -+ + TCF_CLK_CTRL_TCF_SPI_MST_STATUS); -+ -+ if (cnt++ > 10000) { -+ dev_err(&tc->pdev->dev, -+ "%s: Time out reading SPI reg (0x%x)\n", -+ __func__, off); -+ return -1; -+ } -+ -+ } while (spi_mst_status != 0x08); -+ -+ *val = ioread32(tc->tcf.registers -+ + TCF_CLK_CTRL_TCF_SPI_MST_RDATA); -+ -+ return 0; -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) -+static int apollo_thermal_get_temp(struct thermal_zone_device *thermal, -+ unsigned long *t) -+#else -+static int apollo_thermal_get_temp(struct thermal_zone_device *thermal, -+ int *t) -+#endif -+{ -+ struct tc_device *tc; -+ int err = -ENODEV; -+ u32 tmp; -+ -+ if (!thermal) -+ goto err_out; -+ -+ tc = (struct tc_device *)thermal->devdata; -+ -+ if (!tc) -+ goto err_out; -+ -+ if (spi_read(tc, TCF_TEMP_SENSOR_SPI_OFFSET, &tmp)) { -+ dev_err(&tc->pdev->dev, -+ "Failed to read apollo temperature sensor\n"); -+ -+ goto err_out; -+ } -+ -+ /* Report this in millidegree Celsius */ -+ *t = TCF_TEMP_SENSOR_TO_C(tmp) * 1000; -+ -+ err = 0; -+ -+err_out: -+ return err; -+} -+ -+static struct thermal_zone_device_ops apollo_thermal_dev_ops = { -+ .get_temp = apollo_thermal_get_temp, -+}; -+ -+#if defined(SUPPORT_RGX) -+ -+static void pll_write_reg(struct tc_device *tc, -+ resource_size_t reg_offset, u32 reg_value) -+{ -+ BUG_ON(reg_offset < TCF_PLL_PLL_CORE_CLK0); -+ BUG_ON(reg_offset > tc->tcf_pll.region.size + -+ TCF_PLL_PLL_CORE_CLK0 - 4); -+ -+ /* Tweak the offset because we haven't mapped the full pll region */ -+ iowrite32(reg_value, tc->tcf_pll.registers + -+ reg_offset - TCF_PLL_PLL_CORE_CLK0); -+} -+ -+static u32 sai_read_es2(struct tc_device *tc, u32 addr) -+{ -+ iowrite32(0x200 | addr, tc->tcf.registers + 0x300); -+ iowrite32(0x1 | addr, tc->tcf.registers + 0x318); -+ return ioread32(tc->tcf.registers + 0x310); -+} -+ -+static int apollo_align_interface_es2(struct tc_device *tc) -+{ -+ u32 reg = 0; -+ u32 reg_reset_n; -+ int reset_cnt = 0; -+ int err = -EFAULT; -+ bool aligned = false; -+ -+ /* Try to enable the core clock PLL */ -+ spi_write(tc, 0x1, 0x0); -+ reg = ioread32(tc->tcf.registers + 0x320); -+ reg |= 0x1; -+ iowrite32(reg, tc->tcf.registers + 0x320); -+ reg &= 0xfffffffe; -+ iowrite32(reg, tc->tcf.registers + 0x320); -+ msleep(1000); -+ -+ if (spi_read(tc, 0x2, ®)) { -+ dev_err(&tc->pdev->dev, -+ "Unable to read PLL status\n"); -+ goto err_out; -+ } -+ -+ if (reg == 0x1) { -+ /* Select DUT PLL as core clock */ -+ reg = ioread32(tc->tcf.registers + -+ TCF_CLK_CTRL_DUT_CONTROL_1); -+ reg &= 0xfffffff7; -+ iowrite32(reg, tc->tcf.registers + -+ TCF_CLK_CTRL_DUT_CONTROL_1); -+ } else { -+ dev_err(&tc->pdev->dev, -+ "PLL has failed to lock, status = %x\n", reg); -+ goto err_out; -+ } -+ -+ reg_reset_n = ioread32(tc->tcf.registers + -+ TCF_CLK_CTRL_CLK_AND_RST_CTRL); -+ -+ while (!aligned && reset_cnt < 10 && -+ tc->version != APOLLO_VERSION_TCF_5) { -+ int bank; -+ u32 eyes; -+ u32 clk_taps; -+ u32 train_ack; -+ -+ ++reset_cnt; -+ -+ /* Reset the DUT to allow the SAI to retrain */ -+ reg_reset_n &= ~(0x1 << DUT_RESETN_SHIFT); -+ iowrite32(reg_reset_n, tc->tcf.registers + -+ TCF_CLK_CTRL_CLK_AND_RST_CTRL); -+ udelay(100); -+ reg_reset_n |= (0x1 << DUT_RESETN_SHIFT); -+ iowrite32(reg_reset_n, tc->tcf.registers + -+ TCF_CLK_CTRL_CLK_AND_RST_CTRL); -+ udelay(100); -+ -+ /* Assume alignment passed, if any bank fails on either DUT or -+ * FPGA we will set this to false and try again for a max of 10 -+ * times. -+ */ -+ aligned = true; -+ -+ /* For each of the banks */ -+ for (bank = 0; bank < 10; bank++) { -+ int bank_aligned = 0; -+ /* Check alignment on the DUT */ -+ u32 bank_base = 0x7000 + (0x1000 * bank); -+ -+ spi_read(tc, bank_base + 0x4, &eyes); -+ spi_read(tc, bank_base + 0x3, &clk_taps); -+ spi_read(tc, bank_base + 0x6, &train_ack); -+ -+ bank_aligned = tc_is_interface_aligned( -+ eyes, clk_taps, train_ack); -+ if (!bank_aligned) { -+ dev_warn(&tc->pdev->dev, -+ "Alignment check failed, retrying\n"); -+ aligned = false; -+ break; -+ } -+ -+ /* Check alignment on the FPGA */ -+ bank_base = 0xb0 + (0x10 * bank); -+ -+ eyes = sai_read_es2(tc, bank_base + 0x4); -+ clk_taps = sai_read_es2(tc, bank_base + 0x3); -+ train_ack = sai_read_es2(tc, bank_base + 0x6); -+ -+ bank_aligned = tc_is_interface_aligned( -+ eyes, clk_taps, train_ack); -+ -+ if (!bank_aligned) { -+ dev_warn(&tc->pdev->dev, -+ "Alignment check failed, retrying\n"); -+ aligned = false; -+ break; -+ } -+ } -+ } -+ -+ if (!aligned) { -+ dev_err(&tc->pdev->dev, "Unable to initialise the testchip (interface alignment failure), please restart the system.\n"); -+ /* We are not returning an error here, cause VP doesn't -+ * implement the necessary registers although they claim to be -+ * TC compatible. -+ */ -+ } -+ -+ if (reset_cnt > 1) { -+ dev_dbg(&tc->pdev->dev, "Note: The testchip required more than one reset to find a good interface alignment!\n"); -+ dev_dbg(&tc->pdev->dev, " This should be harmless, but if you do suspect foul play, please reset the machine.\n"); -+ dev_dbg(&tc->pdev->dev, " If you continue to see this message you may want to report it to PowerVR Verification Platforms.\n"); -+ } -+ -+ err = 0; -+err_out: -+ return err; -+} -+ -+static void apollo_set_clocks(struct tc_device *tc, -+ int core_clock, int mem_clock, int sys_clock) -+{ -+ u32 val; -+ -+ /* This is disabled for TCF2 since the current FPGA builds do not -+ * like their core clocks being set (it takes apollo down). -+ */ -+ if (tc->version != APOLLO_VERSION_TCF_2) { -+ val = core_clock / 1000000; -+ pll_write_reg(tc, TCF_PLL_PLL_CORE_CLK0, val); -+ -+ val = 0x1 << PLL_CORE_DRP_GO_SHIFT; -+ pll_write_reg(tc, TCF_PLL_PLL_CORE_DRP_GO, val); -+ } -+ -+ val = mem_clock / 1000000; -+ pll_write_reg(tc, TCF_PLL_PLL_MEMIF_CLK0, val); -+ -+ val = 0x1 << PLL_MEM_DRP_GO_SHIFT; -+ pll_write_reg(tc, TCF_PLL_PLL_MEM_DRP_GO, val); -+ -+ if (tc->version == APOLLO_VERSION_TCF_5) { -+ val = sys_clock / 1000000; -+ pll_write_reg(tc, TCF_PLL_PLL_SYSIF_CLK0, val); -+ -+ val = 0x1 << PLL_MEM_DRP_GO_SHIFT; -+ pll_write_reg(tc, TCF_PLL_PLL_SYS_DRP_GO, val); -+ } -+ -+ udelay(400); -+} -+ -+static void apollo_set_mem_latency(struct tc_device *tc, -+ int mem_latency, int mem_wresp_latency) -+{ -+ u32 regval = 0; -+ -+ if (mem_latency <= 4) { -+ /* The total memory read latency cannot be lower than the -+ * amount of cycles consumed by the hardware to do a read. -+ * Set the memory read latency to 0 cycles. -+ */ -+ mem_latency = 0; -+ } else { -+ mem_latency -= 4; -+ -+ dev_info(&tc->pdev->dev, -+ "Setting memory read latency to %i cycles\n", -+ mem_latency); -+ } -+ -+ if (mem_wresp_latency <= 2) { -+ /* The total memory write latency cannot be lower than the -+ * amount of cycles consumed by the hardware to do a write. -+ * Set the memory write latency to 0 cycles. -+ */ -+ mem_wresp_latency = 0; -+ } else { -+ mem_wresp_latency -= 2; -+ -+ dev_info(&tc->pdev->dev, -+ "Setting memory write response latency to %i cycles\n", -+ mem_wresp_latency); -+ } -+ -+ mem_latency |= mem_wresp_latency << 16; -+ -+ spi_write(tc, 0x1009, mem_latency); -+ -+ if (spi_read(tc, 0x1009, ®val) != 0) { -+ dev_err(&tc->pdev->dev, -+ "Failed to read back memory latency register"); -+ return; -+ } -+ -+ if (mem_latency != regval) { -+ dev_err(&tc->pdev->dev, -+ "Memory latency register doesn't match requested value (actual: %#08x, expected: %#08x)\n", -+ regval, mem_latency); -+ } -+} -+ -+static void apollo_fpga_update_dut_clk_freq(struct tc_device *tc, -+ int *core_clock, int *mem_clock, int *clock_multiplex) -+{ -+ struct device *dev = &tc->pdev->dev; -+ u32 reg = 0; -+ -+#if defined(SUPPORT_FPGA_DUT_CLK_INFO) -+ /* DUT_CLK_INFO available only if SW_IF_VERSION >= 1 */ -+ reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_SW_IF_VERSION); -+ reg = (reg & VERSION_MASK) >> VERSION_SHIFT; -+#endif -+ if (reg >= 1) { -+ reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_DUT_CLK_INFO); -+ -+ if ((reg != 0) && (reg != 0xbaadface) && (reg != 0xffffffff)) { -+ dev_info(dev, "TCF_CLK_CTRL_DUT_CLK_INFO = %08x\n", reg); -+ -+ if (*core_clock == 0) { -+ *core_clock = ((reg & CORE_MASK) >> CORE_SHIFT) * 1000000; -+ dev_info(dev, "Using register DUT core clock value: %i\n", -+ *core_clock); -+ } else { -+ dev_info(dev, "Using module param DUT core clock value: %i\n", -+ *core_clock); -+ } -+ -+ if (*mem_clock == 0) { -+ *mem_clock = ((reg & MEM_MASK) >> MEM_SHIFT) * 1000000; -+ dev_info(dev, "Using register DUT mem clock value: %i\n", -+ *mem_clock); -+ } else { -+ dev_info(dev, "Using module param DUT mem clock value: %i\n", -+ *mem_clock); -+ } -+ -+ return; -+ } -+ } -+ -+ if (*core_clock == 0) { -+ *core_clock = RGX_TC_CORE_CLOCK_SPEED; -+ dev_info(dev, "Using default DUT core clock value: %i\n", -+ *core_clock); -+ } else { -+ dev_info(dev, "Using module param DUT core clock value: %i\n", -+ *core_clock); -+ } -+ -+ if (*mem_clock == 0) { -+ *mem_clock = RGX_TC_MEM_CLOCK_SPEED; -+ dev_info(dev, "Using default DUT mem clock value: %i\n", -+ *mem_clock); -+ } else { -+ dev_info(dev, "Using module param DUT mem clock value: %i\n", -+ *mem_clock); -+ } -+ -+ if (*clock_multiplex == 0) { -+ *clock_multiplex = RGX_TC_CLOCK_MULTIPLEX; -+ dev_info(dev, "Using default DUT clock multiplex: %i\n", -+ *clock_multiplex); -+ } else { -+ dev_info(dev, "Using module param DUT clock multiplex: %i\n", -+ *clock_multiplex); -+ } -+} -+ -+#endif /* defined(SUPPORT_RGX) */ -+ -+static int apollo_hard_reset(struct tc_device *tc, -+ int *core_clock, int *mem_clock, int sys_clock, int *clock_multiplex) -+{ -+ u32 reg; -+ u32 reg_reset_n = 0; -+ -+ int err = 0; -+ -+ /* This is required for SPI reset which is not yet implemented. */ -+ /*u32 aux_reset_n;*/ -+ -+ if (tc->version == APOLLO_VERSION_TCF_2) { -+ /* Power down */ -+ reg = ioread32(tc->tcf.registers + -+ TCF_CLK_CTRL_DUT_CONTROL_1); -+ reg &= ~DUT_CTRL_VCC_0V9EN; -+ reg &= ~DUT_CTRL_VCC_1V8EN; -+ reg |= DUT_CTRL_VCC_IO_INH; -+ reg |= DUT_CTRL_VCC_CORE_INH; -+ iowrite32(reg, tc->tcf.registers + -+ TCF_CLK_CTRL_DUT_CONTROL_1); -+ msleep(500); -+ } -+ -+ /* Put everything into reset */ -+ iowrite32(reg_reset_n, tc->tcf.registers + -+ TCF_CLK_CTRL_CLK_AND_RST_CTRL); -+ -+ /* Take PDP1 and PDP2 out of reset */ -+ reg_reset_n |= (0x1 << PDP1_RESETN_SHIFT); -+ reg_reset_n |= (0x1 << PDP2_RESETN_SHIFT); -+ -+ iowrite32(reg_reset_n, tc->tcf.registers + -+ TCF_CLK_CTRL_CLK_AND_RST_CTRL); -+ msleep(100); -+ -+ /* Take DDR out of reset */ -+ reg_reset_n |= (0x1 << DDR_RESETN_SHIFT); -+ iowrite32(reg_reset_n, tc->tcf.registers + -+ TCF_CLK_CTRL_CLK_AND_RST_CTRL); -+ -+#if defined(SUPPORT_RGX) -+ if (tc->version == APOLLO_VERSION_TCF_5) { -+ apollo_fpga_update_dut_clk_freq(tc, core_clock, mem_clock, clock_multiplex); -+ } else { -+ struct device *dev = &tc->pdev->dev; -+ -+ if (*core_clock == 0) { -+ *core_clock = RGX_TC_CORE_CLOCK_SPEED; -+ dev_info(dev, "Using default DUT core clock value: %i\n", -+ *core_clock); -+ } else { -+ dev_info(dev, "Using module param DUT core clock value: %i\n", -+ *core_clock); -+ } -+ -+ if (*mem_clock == 0) { -+ *mem_clock = RGX_TC_MEM_CLOCK_SPEED; -+ dev_info(dev, "Using default DUT mem clock value: %i\n", -+ *mem_clock); -+ } else { -+ dev_info(dev, "Using module param DUT mem clock value: %i\n", -+ *mem_clock); -+ } -+ -+ if (*clock_multiplex == 0) { -+ *clock_multiplex = RGX_TC_CLOCK_MULTIPLEX; -+ dev_info(dev, "Using default DUT clock multiplex: %i\n", -+ *clock_multiplex); -+ } else { -+ dev_info(dev, "Using module param DUT clock multiplex: %i\n", -+ *clock_multiplex); -+ } -+ } -+ -+ /* Set clock speed here, before reset. */ -+ apollo_set_clocks(tc, *core_clock, *mem_clock, sys_clock); -+ -+ /* Put take GLB_CLKG and SCB out of reset */ -+ reg_reset_n |= (0x1 << GLB_CLKG_EN_SHIFT); -+ reg_reset_n |= (0x1 << SCB_RESETN_SHIFT); -+ iowrite32(reg_reset_n, tc->tcf.registers + -+ TCF_CLK_CTRL_CLK_AND_RST_CTRL); -+ msleep(100); -+ -+ if (tc->version == APOLLO_VERSION_TCF_2) { -+ /* Enable the voltage control regulators on DUT */ -+ reg = ioread32(tc->tcf.registers + -+ TCF_CLK_CTRL_DUT_CONTROL_1); -+ reg |= DUT_CTRL_VCC_0V9EN; -+ reg |= DUT_CTRL_VCC_1V8EN; -+ reg &= ~DUT_CTRL_VCC_IO_INH; -+ reg &= ~DUT_CTRL_VCC_CORE_INH; -+ iowrite32(reg, tc->tcf.registers + -+ TCF_CLK_CTRL_DUT_CONTROL_1); -+ msleep(300); -+ } -+ -+ /* Take DUT_DCM out of reset */ -+ reg_reset_n |= (0x1 << DUT_DCM_RESETN_SHIFT); -+ iowrite32(reg_reset_n, tc->tcf.registers + -+ TCF_CLK_CTRL_CLK_AND_RST_CTRL); -+ msleep(100); -+ -+ -+ err = tc_iopol32_nonzero(DCM_LOCK_STATUS_MASK, -+ tc->tcf.registers + TCF_CLK_CTRL_DCM_LOCK_STATUS); -+ -+ if (err != 0) -+ goto err_out; -+ -+ if (tc->version == APOLLO_VERSION_TCF_2) { -+ /* Set ODT to a specific value that seems to provide the most -+ * stable signals. -+ */ -+ spi_write(tc, 0x11, 0x413130); -+ } -+ -+ /* Take DUT out of reset */ -+ reg_reset_n |= (0x1 << DUT_RESETN_SHIFT); -+ iowrite32(reg_reset_n, tc->tcf.registers + -+ TCF_CLK_CTRL_CLK_AND_RST_CTRL); -+ msleep(100); -+ -+ if (tc->version != APOLLO_VERSION_TCF_5) { -+ u32 hood_ctrl; -+ -+ err = apollo_align_interface_es2(tc); -+ if (err) -+ goto err_out; -+ -+ spi_read(tc, 0xF, &hood_ctrl); -+ hood_ctrl |= 0x1; -+ spi_write(tc, 0xF, hood_ctrl); -+ } -+ -+#endif /* defined(SUPPORT_RGX) */ -+ -+ if (tc->version == APOLLO_VERSION_TCF_2) { -+ /* Enable the temperature sensor */ -+ spi_write(tc, 0xc, 0); /* power up */ -+ spi_write(tc, 0xc, 2); /* reset */ -+ spi_write(tc, 0xc, 6); /* init & run */ -+ -+ /* Register a new thermal zone */ -+ apollo_pdata.thermal_zone = -+ thermal_zone_device_register("apollo", 0, 0, tc, -+ &apollo_thermal_dev_ops, -+ NULL, 0, 0); -+ if (IS_ERR(apollo_pdata.thermal_zone)) { -+ dev_warn(&tc->pdev->dev, "Couldn't register thermal zone"); -+ apollo_pdata.thermal_zone = NULL; -+ } -+ } -+ -+ reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_SW_IF_VERSION); -+ reg = (reg & VERSION_MASK) >> VERSION_SHIFT; -+ -+ if (reg == 0) { -+ u32 build_inc; -+ u32 build_owner; -+ -+ /* Check the build */ -+ reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_FPGA_DES_REV_1); -+ build_inc = (reg >> 12) & 0xff; -+ build_owner = (reg >> 20) & 0xf; -+ -+ if (build_inc) { -+ dev_alert(&tc->pdev->dev, -+ "BE WARNED: You are not running a tagged release of the FPGA!\n"); -+ -+ dev_alert(&tc->pdev->dev, "Owner: 0x%01x, Inc: 0x%02x\n", -+ build_owner, build_inc); -+ } -+ -+ dev_info(&tc->pdev->dev, "FPGA Release: %u.%02u\n", -+ reg >> 8 & 0xf, reg & 0xff); -+ } -+ -+#if defined(SUPPORT_RGX) -+err_out: -+#endif /* defined(SUPPORT_RGX) */ -+ return err; -+} -+ -+static void apollo_set_mem_mode_lma(struct tc_device *tc) -+{ -+ u32 val; -+ -+ val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL); -+ val &= ~(ADDRESS_FORCE_MASK | PCI_TEST_MODE_MASK | HOST_ONLY_MODE_MASK -+ | HOST_PHY_MODE_MASK); -+ val |= (0x1 << ADDRESS_FORCE_SHIFT); -+ iowrite32(val, tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL); -+} -+ -+static void apollo_set_mem_mode_hybrid(struct tc_device *tc) -+{ -+ u32 val; -+ -+ val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL); -+ val &= ~(ADDRESS_FORCE_MASK | PCI_TEST_MODE_MASK | HOST_ONLY_MODE_MASK -+ | HOST_PHY_MODE_MASK); -+ val |= ((0x1 << HOST_ONLY_MODE_SHIFT) | (0x1 << HOST_PHY_MODE_SHIFT)); -+ iowrite32(val, tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL); -+ -+ /* Setup apollo to pass 1GB window of address space to the local memory. -+ * This is a sub-mode of the host only mode, meaning that the apollo TC -+ * can address the system memory with a 1GB window of address space -+ * routed to the device local memory. The simplest approach is to mirror -+ * the CPU physical address space, by moving the device local memory -+ * window where it is mapped in the CPU physical address space. -+ */ -+ iowrite32(tc->tc_mem.base, -+ tc->tcf.registers + TCF_CLK_CTRL_HOST_PHY_OFFSET); -+} -+ -+static int apollo_set_mem_mode(struct tc_device *tc, int mem_mode) -+{ -+ switch (mem_mode) { -+ case TC_MEMORY_HYBRID: -+ apollo_set_mem_mode_hybrid(tc); -+ dev_info(&tc->pdev->dev, "Memory mode: TC_MEMORY_HYBRID\n"); -+ break; -+ case TC_MEMORY_LOCAL: -+ apollo_set_mem_mode_lma(tc); -+ dev_info(&tc->pdev->dev, "Memory mode: TC_MEMORY_LOCAL\n"); -+ break; -+ default: -+ dev_err(&tc->pdev->dev, "unsupported memory mode = %d\n", -+ mem_mode); -+ return -EINVAL; -+ }; -+ -+ tc->mem_mode = mem_mode; -+ -+ return 0; -+} -+ -+static bool apollo_pdp_export_host_addr(struct tc_device *tc) -+{ -+ return tc->mem_mode == TC_MEMORY_HYBRID; -+} -+ -+static u64 apollo_get_pdp_dma_mask(struct tc_device *tc) -+{ -+ /* The PDP does not access system memory, so there is no -+ * DMA limitation. -+ */ -+ if ((tc->mem_mode == TC_MEMORY_LOCAL) || -+ (tc->mem_mode == TC_MEMORY_HYBRID)) -+ return DMA_BIT_MASK(64); -+ -+ return DMA_BIT_MASK(32); -+} -+ -+#if defined(SUPPORT_RGX) || defined(SUPPORT_APOLLO_FPGA) -+#if defined(SUPPORT_RGX) -+static u64 apollo_get_rogue_dma_mask(struct tc_device *tc) -+#else /* SUPPORT_APOLLO_FPGA */ -+static u64 apollo_get_fpga_dma_mask(struct tc_device *tc) -+#endif /* defined(SUPPORT_RGX) */ -+{ -+ /* Does not access system memory, so there is no DMA limitation */ -+ if (tc->mem_mode == TC_MEMORY_LOCAL) -+ return DMA_BIT_MASK(64); -+ -+ return DMA_BIT_MASK(32); -+} -+#endif /* defined(SUPPORT_RGX) || defined(SUPPORT_APOLLO_FPGA) */ -+ -+static int apollo_hw_init(struct tc_device *tc, -+ int *core_clock, int *mem_clock, int sys_clock, int *clock_multiplex, -+ int mem_latency, int mem_wresp_latency, int mem_mode) -+{ -+ int err = 0; -+ -+ err = apollo_hard_reset(tc, core_clock, mem_clock, sys_clock, clock_multiplex); -+ if (err) -+ goto err_out; -+ -+ err = apollo_set_mem_mode(tc, mem_mode); -+ if (err) -+ goto err_out; -+ -+#if defined(SUPPORT_RGX) -+ if (tc->version == APOLLO_VERSION_TCF_BONNIE) { -+ u32 reg; -+ /* Enable ASTC via SPI */ -+ if (spi_read(tc, 0xf, ®)) { -+ dev_err(&tc->pdev->dev, -+ "Failed to read apollo ASTC register\n"); -+ err = -ENODEV; -+ goto err_out; -+ } -+ -+ reg |= 0x1 << 4; -+ spi_write(tc, 0xf, reg); -+ } else if (tc->version == APOLLO_VERSION_TCF_5) { -+ apollo_set_mem_latency(tc, mem_latency, mem_wresp_latency); -+ } -+#endif /* defined(SUPPORT_RGX) */ -+ -+err_out: -+ return err; -+} -+ -+static int apollo_enable_irq(struct tc_device *tc) -+{ -+ int err = 0; -+ -+#if defined(TC_FAKE_INTERRUPTS) -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) -+ timer_setup(&tc->timer, tc_irq_fake_wrapper, 0); -+#else -+ setup_timer(&tc->timer, tc_irq_fake_wrapper, (unsigned long)tc); -+#endif -+ -+ mod_timer(&tc->timer, -+ jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS)); -+#else -+ { -+ u32 val; -+ -+ iowrite32(0, tc->tcf.registers + -+ TCF_CLK_CTRL_INTERRUPT_ENABLE); -+ iowrite32(0xffffffff, tc->tcf.registers + -+ TCF_CLK_CTRL_INTERRUPT_CLEAR); -+ -+ /* Set sense to active high */ -+ val = ioread32(tc->tcf.registers + -+ TCF_CLK_CTRL_INTERRUPT_OP_CFG) & ~(INT_SENSE_MASK); -+ iowrite32(val, tc->tcf.registers + -+ TCF_CLK_CTRL_INTERRUPT_OP_CFG); -+ -+ err = request_irq(tc->pdev->irq, apollo_irq_handler, -+ IRQF_SHARED, DRV_NAME, tc); -+ } -+#endif -+ return err; -+} -+ -+static void apollo_disable_irq(struct tc_device *tc) -+{ -+#if defined(TC_FAKE_INTERRUPTS) -+ del_timer_sync(&tc->timer); -+#else -+ iowrite32(0, tc->tcf.registers + -+ TCF_CLK_CTRL_INTERRUPT_ENABLE); -+ iowrite32(0xffffffff, tc->tcf.registers + -+ TCF_CLK_CTRL_INTERRUPT_CLEAR); -+ -+ free_irq(tc->pdev->irq, tc); -+#endif -+} -+ -+static enum tc_version_t -+apollo_detect_tc_version(struct tc_device *tc) -+{ -+ u32 val = ioread32(tc->tcf.registers + -+ TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG); -+ -+ switch (val) { -+ default: -+ dev_err(&tc->pdev->dev, -+ "Unknown TCF core target build ID (0x%x) - assuming Hood ES2 - PLEASE REPORT TO ANDROID TEAM\n", -+ val); -+ __fallthrough; -+ case 5: -+ dev_err(&tc->pdev->dev, "Looks like a Hood ES2 TC\n"); -+ return APOLLO_VERSION_TCF_2; -+ case 1: -+ dev_err(&tc->pdev->dev, "Looks like a TCF5\n"); -+ return APOLLO_VERSION_TCF_5; -+ case 6: -+ dev_err(&tc->pdev->dev, "Looks like a Bonnie TC\n"); -+ return APOLLO_VERSION_TCF_BONNIE; -+ } -+} -+ -+static u32 apollo_interrupt_id_to_flag(int interrupt_id) -+{ -+ switch (interrupt_id) { -+ case TC_INTERRUPT_PDP: -+ return TC_INTERRUPT_FLAG_PDP; -+ case TC_INTERRUPT_EXT: -+ return TC_INTERRUPT_FLAG_EXT; -+ default: -+ BUG(); -+ } -+} -+ -+static int apollo_dev_init(struct tc_device *tc, struct pci_dev *pdev, -+ int pdp_mem_size, int secure_mem_size) -+{ -+ int err; -+ -+ /* Reserve and map the tcf_clk / "sys" registers */ -+ err = setup_io_region(pdev, &tc->tcf, -+ SYS_APOLLO_REG_PCI_BASENUM, -+ SYS_APOLLO_REG_SYS_OFFSET, SYS_APOLLO_REG_SYS_SIZE); -+ if (err) -+ goto err_out; -+ -+ /* Reserve and map the tcf_pll registers */ -+ err = setup_io_region(pdev, &tc->tcf_pll, -+ SYS_APOLLO_REG_PCI_BASENUM, -+ SYS_APOLLO_REG_PLL_OFFSET + TCF_PLL_PLL_CORE_CLK0, -+ TCF_PLL_PLL_DRP_STATUS - TCF_PLL_PLL_CORE_CLK0 + 4); -+ if (err) -+ goto err_unmap_sys_registers; -+ -+#if defined(SUPPORT_APOLLO_FPGA) -+#define FPGA_REGISTERS_SIZE 4 -+ /* If this is a special 'fgpa' build, have the apollo driver manage -+ * the second register bar. -+ */ -+ err = setup_io_region(pdev, &apollo_pdata.fpga, -+ SYS_RGX_REG_PCI_BASENUM, 0, FPGA_REGISTERS_SIZE); -+ if (err) -+ goto err_unmap_pll_registers; -+#endif -+ -+ /* Detect testchip version */ -+ tc->version = apollo_detect_tc_version(tc); -+ -+ /* Setup card memory */ -+ tc->tc_mem.base = -+ pci_resource_start(pdev, APOLLO_MEM_PCI_BASENUM); -+ tc->tc_mem.size = -+ pci_resource_len(pdev, APOLLO_MEM_PCI_BASENUM); -+ -+ if (tc->tc_mem.size < pdp_mem_size) { -+ dev_err(&pdev->dev, -+ "Apollo MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu", -+ APOLLO_MEM_PCI_BASENUM, -+ (unsigned long)tc->tc_mem.size, -+ (unsigned long)pdp_mem_size); -+ err = -EIO; -+ goto err_unmap_fpga_registers; -+ } -+ -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+ if (tc->tc_mem.size < -+ (pdp_mem_size + secure_mem_size)) { -+ dev_err(&pdev->dev, -+ "Apollo MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu plus the requested secure heap size %lu", -+ APOLLO_MEM_PCI_BASENUM, -+ (unsigned long)tc->tc_mem.size, -+ (unsigned long)pdp_mem_size, -+ (unsigned long)secure_mem_size); -+ err = -EIO; -+ goto err_unmap_fpga_registers; -+ } -+#endif -+ -+ err = tc_mtrr_setup(tc); -+ if (err) -+ goto err_unmap_fpga_registers; -+ -+ /* Setup ranges for the device heaps */ -+ tc->pdp_heap_mem_size = pdp_mem_size; -+ -+ /* We know ext_heap_mem_size won't underflow as we've compared -+ * tc_mem.size against the pdp_mem_size value earlier -+ */ -+ tc->ext_heap_mem_size = -+ tc->tc_mem.size - tc->pdp_heap_mem_size; -+ -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+ tc->ext_heap_mem_size -= secure_mem_size; -+#endif -+ -+ if (tc->ext_heap_mem_size < TC_EXT_MINIMUM_MEM_SIZE) { -+ dev_warn(&pdev->dev, -+ "Apollo MEM region (bar %d) has size of %lu, with %lu pdp_mem_size only %lu bytes are left for ext device, which looks too small", -+ APOLLO_MEM_PCI_BASENUM, -+ (unsigned long)tc->tc_mem.size, -+ (unsigned long)pdp_mem_size, -+ (unsigned long)tc->ext_heap_mem_size); -+ /* Continue as this is only a 'helpful warning' not a hard -+ * requirement -+ */ -+ } -+ -+ tc->ext_heap_mem_base = tc->tc_mem.base; -+ tc->pdp_heap_mem_base = -+ tc->tc_mem.base + tc->ext_heap_mem_size; -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+ tc->secure_heap_mem_base = tc->pdp_heap_mem_base + -+ tc->pdp_heap_mem_size; -+ tc->secure_heap_mem_size = secure_mem_size; -+#endif -+ -+#if defined(SUPPORT_DMA_HEAP) -+ err = tc_dmabuf_heap_init(tc, APOLLO_MEM_PCI_BASENUM); -+ if (err) { -+ dev_err(&pdev->dev, "Failed to initialise DMA heap\n"); -+ goto err_unmap_fpga_registers; -+ } -+#elif defined(SUPPORT_ION) -+ err = tc_ion_init(tc, APOLLO_MEM_PCI_BASENUM); -+ if (err) { -+ dev_err(&pdev->dev, "Failed to initialise ION\n"); -+ goto err_unmap_fpga_registers; -+ } -+#endif /* defined(SUPPORT_ION) */ -+ -+#if defined(SUPPORT_APOLLO_FPGA) -+ apollo_debugfs_add_fpga_entries(tc, &apollo_pdata.fpga, -+ &apollo_pdata.fpga_entries); -+#endif /* defined(SUPPORT_APOLLO_FPGA) */ -+ -+err_out: -+ return err; -+err_unmap_fpga_registers: -+#if defined(SUPPORT_APOLLO_FPGA) -+ iounmap(apollo_pdata.fpga.registers); -+ release_pci_io_addr(pdev, SYS_RGX_REG_PCI_BASENUM, -+ apollo_pdata.fpga.region.base, apollo_pdata.fpga.region.size); -+err_unmap_pll_registers: -+#endif /* defined(SUPPORT_APOLLO_FPGA) */ -+ iounmap(tc->tcf_pll.registers); -+ release_pci_io_addr(pdev, SYS_APOLLO_REG_PCI_BASENUM, -+ tc->tcf_pll.region.base, tc->tcf_pll.region.size); -+err_unmap_sys_registers: -+ iounmap(tc->tcf.registers); -+ release_pci_io_addr(pdev, SYS_APOLLO_REG_PCI_BASENUM, -+ tc->tcf.region.base, tc->tcf.region.size); -+ goto err_out; -+} -+ -+static void apollo_dev_cleanup(struct tc_device *tc) -+{ -+#if defined(SUPPORT_APOLLO_FPGA) -+ apollo_debugfs_remove_fpga_entries(&apollo_pdata.fpga_entries); -+#endif -+ -+#if defined(SUPPORT_DMA_HEAP) -+ tc_dmabuf_heap_deinit(tc, APOLLO_MEM_PCI_BASENUM); -+#elif defined(SUPPORT_ION) -+ tc_ion_deinit(tc, APOLLO_MEM_PCI_BASENUM); -+#endif -+ -+ tc_mtrr_cleanup(tc); -+ -+#if defined(SUPPORT_APOLLO_FPGA) -+ iounmap(apollo_pdata.fpga.registers); -+ release_pci_io_addr(tc->pdev, SYS_RGX_REG_PCI_BASENUM, -+ apollo_pdata.fpga.region.base, apollo_pdata.fpga.region.size); -+#endif -+ -+ iounmap(tc->tcf_pll.registers); -+ release_pci_io_addr(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM, -+ tc->tcf_pll.region.base, tc->tcf_pll.region.size); -+ -+ iounmap(tc->tcf.registers); -+ release_pci_io_addr(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM, -+ tc->tcf.region.base, tc->tcf.region.size); -+ -+ if (apollo_pdata.thermal_zone) -+ thermal_zone_device_unregister(apollo_pdata.thermal_zone); -+} -+ -+int apollo_init(struct tc_device *tc, struct pci_dev *pdev, -+ int *core_clock, int *mem_clock, int sys_clock, int *clock_multiplex, -+ int pdp_mem_size, int secure_mem_size, -+ int mem_latency, int mem_wresp_latency, int mem_mode) -+{ -+ int err = 0; -+ -+ err = apollo_dev_init(tc, pdev, pdp_mem_size, secure_mem_size); -+ if (err) { -+ dev_err(&pdev->dev, "apollo_dev_init failed\n"); -+ goto err_out; -+ } -+ -+ err = apollo_hw_init(tc, core_clock, mem_clock, sys_clock, clock_multiplex, -+ mem_latency, mem_wresp_latency, mem_mode); -+ if (err) { -+ dev_err(&pdev->dev, "apollo_hw_init failed\n"); -+ goto err_dev_cleanup; -+ } -+ -+ err = apollo_enable_irq(tc); -+ if (err) { -+ dev_err(&pdev->dev, -+ "Failed to initialise IRQ\n"); -+ goto err_dev_cleanup; -+ } -+ -+err_out: -+ return err; -+ -+err_dev_cleanup: -+ apollo_dev_cleanup(tc); -+ goto err_out; -+} -+ -+int apollo_cleanup(struct tc_device *tc) -+{ -+ apollo_disable_irq(tc); -+ apollo_dev_cleanup(tc); -+ -+ return 0; -+} -+ -+int apollo_register_pdp_device(struct tc_device *tc) -+{ -+ int err = 0; -+ resource_size_t reg_start = -+ pci_resource_start(tc->pdev, -+ SYS_APOLLO_REG_PCI_BASENUM); -+ struct resource pdp_resources_es2[] = { -+ DEFINE_RES_MEM_NAMED(reg_start + SYS_APOLLO_REG_PDP1_OFFSET, -+ SYS_APOLLO_REG_PDP1_SIZE, "pdp-regs"), -+ DEFINE_RES_MEM_NAMED(reg_start + -+ SYS_APOLLO_REG_PLL_OFFSET + -+ TCF_PLL_PLL_PDP_CLK0, -+ TCF_PLL_PLL_PDP2_DRP_GO - -+ TCF_PLL_PLL_PDP_CLK0 + 4, "pll-regs"), -+ }; -+ struct resource pdp_resources_tcf5[] = { -+ DEFINE_RES_MEM_NAMED(reg_start + SYS_APOLLO_REG_PDP1_OFFSET, -+ SYS_APOLLO_REG_PDP1_SIZE, "pdp-regs"), -+ DEFINE_RES_MEM_NAMED(reg_start + -+ SYS_APOLLO_REG_PLL_OFFSET + -+ TCF_PLL_PLL_PDP_CLK0, -+ TCF_PLL_PLL_PDP2_DRP_GO - -+ TCF_PLL_PLL_PDP_CLK0 + 4, "pll-regs"), -+ DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, -+ TC5_SYS_APOLLO_REG_PCI_BASENUM) -+ + TC5_SYS_APOLLO_REG_PDP2_OFFSET, -+ TC5_SYS_APOLLO_REG_PDP2_SIZE, "tc5-pdp2-regs"), -+ -+ DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, -+ TC5_SYS_APOLLO_REG_PCI_BASENUM) -+ + TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET, -+ TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE, -+ "tc5-pdp2-fbdc-regs"), -+ -+ DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, -+ TC5_SYS_APOLLO_REG_PCI_BASENUM) -+ + TC5_SYS_APOLLO_REG_HDMI_OFFSET, -+ TC5_SYS_APOLLO_REG_HDMI_SIZE, -+ "tc5-adv5711-regs"), -+ }; -+ -+ struct tc_pdp_platform_data pdata = { -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ .ion_device = tc->ion_device, -+ .ion_heap_id = ION_HEAP_TC_PDP, -+#endif -+ .memory_base = tc->tc_mem.base, -+ .pdp_heap_memory_base = tc->pdp_heap_mem_base, -+ .pdp_heap_memory_size = tc->pdp_heap_mem_size, -+ .dma_map_export_host_addr = apollo_pdp_export_host_addr(tc), -+ }; -+ struct platform_device_info pdp_device_info = { -+ .parent = &tc->pdev->dev, -+ .name = APOLLO_DEVICE_NAME_PDP, -+ .id = -2, -+ .data = &pdata, -+ .size_data = sizeof(pdata), -+ .dma_mask = apollo_get_pdp_dma_mask(tc), -+ }; -+ -+ if (tc->version == APOLLO_VERSION_TCF_5) { -+ pdp_device_info.res = pdp_resources_tcf5; -+ pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_tcf5); -+ } else if (tc->version == APOLLO_VERSION_TCF_2 || -+ tc->version == APOLLO_VERSION_TCF_BONNIE) { -+ pdp_device_info.res = pdp_resources_es2; -+ pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_es2); -+ } else { -+ dev_err(&tc->pdev->dev, -+ "Unable to set PDP resource info for unknown apollo device\n"); -+ } -+ -+ tc->pdp_dev = platform_device_register_full(&pdp_device_info); -+ if (IS_ERR(tc->pdp_dev)) { -+ err = PTR_ERR(tc->pdp_dev); -+ dev_err(&tc->pdev->dev, -+ "Failed to register PDP device (%d)\n", err); -+ tc->pdp_dev = NULL; -+ goto err; -+ } -+err: -+ return err; -+} -+ -+#if defined(SUPPORT_RGX) -+ -+int apollo_register_ext_device(struct tc_device *tc) -+{ -+ int err = 0; -+ struct resource rogue_resources[] = { -+ DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, -+ SYS_RGX_REG_PCI_BASENUM), -+ SYS_RGX_REG_REGION_SIZE, "rogue-regs"), -+ }; -+ struct tc_rogue_platform_data pdata = { -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ .ion_device = tc->ion_device, -+ .ion_heap_id = ION_HEAP_TC_ROGUE, -+#endif -+ .mem_mode = tc->mem_mode, -+ .baseboard = TC_BASEBOARD_APOLLO, -+ .tc_memory_base = tc->tc_mem.base, -+ .pdp_heap_memory_base = tc->pdp_heap_mem_base, -+ .pdp_heap_memory_size = tc->pdp_heap_mem_size, -+ .rogue_heap_memory_base = tc->ext_heap_mem_base, -+ .rogue_heap_memory_size = tc->ext_heap_mem_size, -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+ .secure_heap_memory_base = tc->secure_heap_mem_base, -+ .secure_heap_memory_size = tc->secure_heap_mem_size, -+#endif -+ }; -+ struct platform_device_info rogue_device_info = { -+ .parent = &tc->pdev->dev, -+ .name = TC_DEVICE_NAME_ROGUE, -+ .id = -2, -+ .res = rogue_resources, -+ .num_res = ARRAY_SIZE(rogue_resources), -+ .data = &pdata, -+ .size_data = sizeof(pdata), -+ .dma_mask = apollo_get_rogue_dma_mask(tc), -+ }; -+ -+ tc->ext_dev -+ = platform_device_register_full(&rogue_device_info); -+ -+ if (IS_ERR(tc->ext_dev)) { -+ err = PTR_ERR(tc->ext_dev); -+ dev_err(&tc->pdev->dev, -+ "Failed to register rogue device (%d)\n", err); -+ tc->ext_dev = NULL; -+ } -+ return err; -+} -+ -+#elif defined(SUPPORT_APOLLO_FPGA) -+ -+int apollo_register_ext_device(struct tc_device *tc) -+{ -+ int err = 0; -+ struct resource fpga_resources[] = { -+ /* For the 'fpga' build, we don't use the Rogue, but reuse the -+ * define that mentions RGX. -+ */ -+ DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, -+ SYS_RGX_REG_PCI_BASENUM), -+ SYS_RGX_REG_REGION_SIZE, "fpga-regs"), -+ }; -+ struct apollo_fpga_platform_data pdata = { -+ .mem_mode = tc->mem_mode, -+ .tc_memory_base = tc->tc_mem.base, -+ .pdp_heap_memory_base = tc->pdp_heap_mem_base, -+ .pdp_heap_memory_size = tc->pdp_heap_mem_size, -+ }; -+ struct platform_device_info fpga_device_info = { -+ .parent = &tc->pdev->dev, -+ .name = APOLLO_DEVICE_NAME_FPGA, -+ .id = -1, -+ .res = fpga_resources, -+ .num_res = ARRAY_SIZE(fpga_resources), -+ .data = &pdata, -+ .size_data = sizeof(pdata), -+ .dma_mask = apollo_get_fpga_dma_mask(tc), -+ }; -+ -+ tc->ext_dev = platform_device_register_full(&fpga_device_info); -+ if (IS_ERR(tc->ext_dev)) { -+ err = PTR_ERR(tc->ext_dev); -+ dev_err(&tc->pdev->dev, -+ "Failed to register fpga device (%d)\n", err); -+ tc->ext_dev = NULL; -+ /* Fall through */ -+ } -+ -+ return err; -+} -+ -+#else /* defined(SUPPORT_APOLLO_FPGA) */ -+ -+int apollo_register_ext_device(struct tc_device *tc) -+{ -+ return 0; -+} -+ -+#endif /* defined(SUPPORT_RGX) */ -+ -+void apollo_enable_interrupt_register(struct tc_device *tc, -+ int interrupt_id) -+{ -+ u32 val; -+ -+ if (interrupt_id == TC_INTERRUPT_PDP || -+ interrupt_id == TC_INTERRUPT_EXT) { -+ val = ioread32( -+ tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE); -+ val |= apollo_interrupt_id_to_flag(interrupt_id); -+ iowrite32(val, -+ tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE); -+ } -+} -+ -+void apollo_disable_interrupt_register(struct tc_device *tc, -+ int interrupt_id) -+{ -+ u32 val; -+ -+ if (interrupt_id == TC_INTERRUPT_PDP || -+ interrupt_id == TC_INTERRUPT_EXT) { -+ val = ioread32( -+ tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE); -+ val &= ~(apollo_interrupt_id_to_flag(interrupt_id)); -+ iowrite32(val, -+ tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE); -+ } -+} -+ -+irqreturn_t apollo_irq_handler(int irq, void *data) -+{ -+ u32 interrupt_status; -+ u32 interrupt_clear = 0; -+ unsigned long flags; -+ irqreturn_t ret = IRQ_NONE; -+ struct tc_device *tc = (struct tc_device *)data; -+ -+ spin_lock_irqsave(&tc->interrupt_handler_lock, flags); -+ -+#if defined(TC_FAKE_INTERRUPTS) -+ /* If we're faking interrupts pretend we got both ext and PDP ints */ -+ interrupt_status = TC_INTERRUPT_FLAG_EXT -+ | TC_INTERRUPT_FLAG_PDP; -+#else -+ interrupt_status = ioread32(tc->tcf.registers -+ + TCF_CLK_CTRL_INTERRUPT_STATUS); -+#endif -+ -+ if (interrupt_status & TC_INTERRUPT_FLAG_EXT) { -+ struct tc_interrupt_handler *ext_int = -+ &tc->interrupt_handlers[TC_INTERRUPT_EXT]; -+ -+ if (ext_int->enabled && ext_int->handler_function) { -+ ext_int->handler_function(ext_int->handler_data); -+ interrupt_clear |= TC_INTERRUPT_FLAG_EXT; -+ } -+ ret = IRQ_HANDLED; -+ } -+ if (interrupt_status & TC_INTERRUPT_FLAG_PDP) { -+ struct tc_interrupt_handler *pdp_int = -+ &tc->interrupt_handlers[TC_INTERRUPT_PDP]; -+ -+ if (pdp_int->enabled && pdp_int->handler_function) { -+ pdp_int->handler_function(pdp_int->handler_data); -+ interrupt_clear |= TC_INTERRUPT_FLAG_PDP; -+ } -+ ret = IRQ_HANDLED; -+ } -+ -+ if (tc->version == APOLLO_VERSION_TCF_5) { -+ /* On TC5 the interrupt is not by the TC framework, but -+ * by the PDP itself. So we always have to callback to the tc5 -+ * pdp code regardless of the interrupt status of the TCF. -+ */ -+ struct tc_interrupt_handler *pdp_int = -+ &tc->interrupt_handlers[TC_INTERRUPT_TC5_PDP]; -+ -+ if (pdp_int->enabled && pdp_int->handler_function) { -+ pdp_int->handler_function(pdp_int->handler_data); -+ ret = IRQ_HANDLED; -+ } -+ } -+ -+ if (interrupt_clear) -+ iowrite32(0xffffffff, -+ tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_CLEAR); -+ -+ spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags); -+ -+ return ret; -+} -+ -+int apollo_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll) -+{ -+ int err = 0; -+ -+ *tmp = 0; -+ *pll = 0; -+ -+ if (tc->version == APOLLO_VERSION_TCF_5) -+ /* Not implemented on TCF5 */ -+ goto err_out; -+ else if (tc->version == APOLLO_VERSION_TCF_2) { -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) -+ unsigned long t; -+#else -+ int t; -+#endif -+ -+ err = apollo_thermal_get_temp(apollo_pdata.thermal_zone, &t); -+ if (err) -+ goto err_out; -+ *tmp = t / 1000; -+ } -+ -+ if (spi_read(tc, 0x2, pll)) { -+ dev_err(&tc->pdev->dev, "Failed to read PLL status\n"); -+ err = -ENODEV; -+ goto err_out; -+ } -+ -+err_out: -+ return err; -+} -+ -+int apollo_sys_strings(struct tc_device *tc, -+ char *str_fpga_rev, size_t size_fpga_rev, -+ char *str_tcf_core_rev, size_t size_tcf_core_rev, -+ char *str_tcf_core_target_build_id, -+ size_t size_tcf_core_target_build_id, -+ char *str_pci_ver, size_t size_pci_ver, -+ char *str_macro_ver, size_t size_macro_ver) -+{ -+ int err = 0; -+ u32 val; -+ resource_size_t host_fpga_base; -+ void __iomem *host_fpga_registers; -+ -+ /* To get some of the version information we need to read from a -+ * register that we don't normally have mapped. Map it temporarily -+ * (without trying to reserve it) to get the information we need. -+ */ -+ host_fpga_base = -+ pci_resource_start(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM) -+ + 0x40F0; -+ -+ host_fpga_registers = ioremap(host_fpga_base, 0x04); -+ if (!host_fpga_registers) { -+ dev_err(&tc->pdev->dev, -+ "Failed to map host fpga registers\n"); -+ err = -EIO; -+ goto err_out; -+ } -+ -+ /* Create the components of the PCI and macro versions */ -+ val = ioread32(host_fpga_registers); -+ snprintf(str_pci_ver, size_pci_ver, "%d", -+ HEX2DEC((val & 0x00FF0000) >> 16)); -+ snprintf(str_macro_ver, size_macro_ver, "%d.%d", -+ (val & 0x00000F00) >> 8, -+ HEX2DEC((val & 0x000000FF) >> 0)); -+ -+ /* Unmap the register now that we no longer need it */ -+ iounmap(host_fpga_registers); -+ -+ /* -+ * Check bits 7:0 of register 0x28 (TCF_CORE_REV_REG or SW_IF_VERSION -+ * depending on its own value) to find out how the driver should -+ * generate the strings for FPGA and core revision. -+ */ -+ val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_SW_IF_VERSION); -+ val = (val & VERSION_MASK) >> VERSION_SHIFT; -+ -+ if (val == 0) { -+ /* Create the components of the TCF core revision number */ -+ val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TCF_CORE_REV_REG); -+ snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d.%d.%d", -+ HEX2DEC((val & TCF_CORE_REV_REG_MAJOR_MASK) -+ >> TCF_CORE_REV_REG_MAJOR_SHIFT), -+ HEX2DEC((val & TCF_CORE_REV_REG_MINOR_MASK) -+ >> TCF_CORE_REV_REG_MINOR_SHIFT), -+ HEX2DEC((val & TCF_CORE_REV_REG_MAINT_MASK) -+ >> TCF_CORE_REV_REG_MAINT_SHIFT)); -+ -+ /* Create the components of the FPGA revision number */ -+ val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_FPGA_REV_REG); -+ snprintf(str_fpga_rev, size_fpga_rev, "%d.%d.%d", -+ HEX2DEC((val & FPGA_REV_REG_MAJOR_MASK) -+ >> FPGA_REV_REG_MAJOR_SHIFT), -+ HEX2DEC((val & FPGA_REV_REG_MINOR_MASK) -+ >> FPGA_REV_REG_MINOR_SHIFT), -+ HEX2DEC((val & FPGA_REV_REG_MAINT_MASK) -+ >> FPGA_REV_REG_MAINT_SHIFT)); -+ } else if (val == 1) { -+ /* Create the components of the TCF core revision number */ -+ snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d", val); -+ -+ /* Create the components of the FPGA revision number */ -+ val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_REL); -+ snprintf(str_fpga_rev, size_fpga_rev, "%d.%d", -+ HEX2DEC((val & MAJOR_MASK) >> MAJOR_SHIFT), -+ HEX2DEC((val & MINOR_MASK) >> MINOR_SHIFT)); -+ } else { -+ dev_warn(&tc->pdev->dev, -+ "%s: unrecognised SW_IF_VERSION %#08x\n", -+ __func__, val); -+ -+ /* Create the components of the TCF core revision number */ -+ snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d", val); -+ -+ /* Create the components of the FPGA revision number */ -+ snprintf(str_fpga_rev, size_fpga_rev, "N/A"); -+ } -+ -+ /* Create the component of the TCF core target build ID */ -+ val = ioread32(tc->tcf.registers + -+ TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG); -+ snprintf(str_tcf_core_target_build_id, size_tcf_core_target_build_id, -+ "%d", -+ (val & TCF_CORE_TARGET_BUILD_ID_MASK) -+ >> TCF_CORE_TARGET_BUILD_ID_SHIFT); -+ -+err_out: -+ return err; -+} -diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_apollo.h b/drivers/gpu/drm/img-rogue/apollo/tc_apollo.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/tc_apollo.h -@@ -0,0 +1,77 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef _APOLLO_DRV_H -+#define _APOLLO_DRV_H -+ -+#include "tc_drv_internal.h" -+#include "apollo_regs.h" -+ -+#if defined(SUPPORT_RGX) && defined(SUPPORT_APOLLO_FPGA) -+#error Define either SUPPORT_RGX or SUPPORT_APOLLO_FGPA, not both -+#endif -+ -+int apollo_init(struct tc_device *tc, struct pci_dev *pdev, -+ int *core_clock, int *mem_clock, int sys_clock, int *clock_multiplex, -+ int pdp_mem_size, int secure_mem_size, -+ int mem_latency, int mem_wresp_latency, int mem_mode); -+int apollo_cleanup(struct tc_device *tc); -+ -+int apollo_register_pdp_device(struct tc_device *tc); -+int apollo_register_ext_device(struct tc_device *tc); -+ -+void apollo_enable_interrupt_register(struct tc_device *tc, -+ int interrupt_id); -+void apollo_disable_interrupt_register(struct tc_device *tc, -+ int interrupt_id); -+ -+irqreturn_t apollo_irq_handler(int irq, void *data); -+ -+int apollo_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll); -+int apollo_sys_strings(struct tc_device *tc, -+ char *str_fpga_rev, size_t size_fpga_rev, -+ char *str_tcf_core_rev, size_t size_tcf_core_rev, -+ char *str_tcf_core_target_build_id, -+ size_t size_tcf_core_target_build_id, -+ char *str_pci_ver, size_t size_pci_ver, -+ char *str_macro_ver, size_t size_macro_ver); -+ -+#endif /* _APOLLO_DRV_H */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_clocks.h b/drivers/gpu/drm/img-rogue/apollo/tc_clocks.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/tc_clocks.h -@@ -0,0 +1,158 @@ -+/*************************************************************************/ /*! -+@File -+@Title System Description Header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This header provides system-specific declarations and macros -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(TC_CLOCKS_H) -+#define TC_CLOCKS_H -+ -+/* -+ * The core clock speed is passed through a multiplier depending on the TC -+ * version. -+ * -+ * On TC_ES1: Multiplier = x3, final speed = 270MHz -+ * On TC_ES2: Multiplier = x6, final speed = 540MHz -+ * On TCF5: Multiplier = 1x final speed = 45MHz -+ * -+ * -+ * The base (unmultiplied speed) can be adjusted using a module parameter -+ * called "sys_core_clk_speed", a number in Hz. -+ * As an example: -+ * -+ * PVR_SRVKM_PARAMS="sys_core_clk_speed=60000000" /etc/init.d/rc.pvr start -+ * -+ * would result in a core speed of 60MHz xMultiplier. -+ * -+ * -+ * The memory clock is unmultiplied and can be adjusted using a module -+ * parameter called "sys_mem_clk_speed", this should be the number in Hz for -+ * the memory clock speed. -+ * As an example: -+ * -+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=100000000" /etc/init.d/rc.pvr start -+ * -+ * would attempt to start the driver with the memory clock speed set to 100MHz. -+ * -+ * -+ * Same applies to the system interface clock speed, "sys_sysif_clk_speed". -+ * Needed for TCF5 but not for TC_ES2/ES1. -+ * As an example: -+ * -+ * PVR_SRVKM_PARAMS="sys_sysif_clk_speed=45000000" /etc/init.d/rc.pvr start -+ * -+ * would attempt to start the driver with the system clock speed set to 45MHz. -+ * -+ * -+ * All parameters can be specified at once, e.g., -+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=MEMORY_SPEED sys_core_clk_speed=CORE_SPEED sys_sysif_clk_speed=SYSIF_SPEED" /etc/init.d/rc.pvr start -+ */ -+ -+#define RGX_TC_SYS_CLOCK_SPEED (25000000) /*< At the moment just used for TCF5 */ -+#define RGX_TC_CLOCK_MULTIPLEX (1) -+ -+#if defined(TC_APOLLO_TCF5_22_46_54_330) -+ #undef RGX_TC_SYS_CLOCK_SPEED -+ #define RGX_TC_CORE_CLOCK_SPEED (100000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (45000000) -+ #define RGX_TC_SYS_CLOCK_SPEED (45000000) -+#elif defined(TC_APOLLO_TCF5_22_49_21_16) || \ -+ defined(TC_APOLLO_TCF5_22_60_22_29) || \ -+ defined(TC_APOLLO_TCF5_22_75_22_25) -+ #define RGX_TC_CORE_CLOCK_SPEED (20000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (50000000) -+#elif defined(TC_APOLLO_TCF5_22_67_54_30) -+ #define RGX_TC_CORE_CLOCK_SPEED (100000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (45000000) -+#elif defined(TC_APOLLO_TCF5_22_89_204_18) -+ #define RGX_TC_CORE_CLOCK_SPEED (50000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (25000000) -+#elif defined(TC_APOLLO_TCF5_22_86_104_218) -+ #define RGX_TC_CORE_CLOCK_SPEED (30000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (40000000) -+#elif defined(TC_APOLLO_TCF5_22_88_104_318) -+ #define RGX_TC_CORE_CLOCK_SPEED (28000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (40000000) -+#elif defined(TC_APOLLO_TCF5_22_98_54_230) -+ #define RGX_TC_CORE_CLOCK_SPEED (100000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (40000000) -+#elif defined(TC_APOLLO_TCF5_22_102_54_38) -+ #define RGX_TC_CORE_CLOCK_SPEED (80000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (25000000) -+#elif defined(TC_APOLLO_TCF5_BVNC_NOT_SUPPORTED) -+ /* TC TCF5 (22.*) fallback frequencies */ -+ #undef RGX_TC_SYS_CLOCK_SPEED -+ #define RGX_TC_CORE_CLOCK_SPEED (20000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (50000000) -+ #define RGX_TC_SYS_CLOCK_SPEED (25000000) -+#elif defined(TC_APOLLO_TCF5_33_8_22_1) -+ #define RGX_TC_CORE_CLOCK_SPEED (25000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (45000000) -+#elif defined(TC_APOLLO_TCF5_REFERENCE) -+ /* TC TCF5 (Reference bitfile) */ -+ #undef RGX_TC_SYS_CLOCK_SPEED -+ #define RGX_TC_CORE_CLOCK_SPEED (50000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (50000000) -+ #define RGX_TC_SYS_CLOCK_SPEED (45000000) -+#elif defined(TC_APOLLO_BONNIE) -+ /* TC Bonnie */ -+ #define RGX_TC_CORE_CLOCK_SPEED (18000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (65000000) -+#elif defined(TC_APOLLO_ES2) -+ /* TC ES2 */ -+ #define RGX_TC_CORE_CLOCK_SPEED (90000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (104000000) -+#elif defined(TC_ORION) -+ #define RGX_TC_CORE_CLOCK_SPEED (40000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (100000000) -+ #define RGX_TC_SYS_CLOCK_SPEED (25000000) -+#elif defined(TC_APOLLO_TCF5_29_19_52_202) -+ #define RGX_TC_CORE_CLOCK_SPEED (25000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (40000000) -+#elif defined(TC_APOLLO_TCF5_29_18_204_508) -+ #define RGX_TC_CORE_CLOCK_SPEED (15000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (35000000) -+#else -+ /* TC ES1 */ -+ #define RGX_TC_CORE_CLOCK_SPEED (90000000) -+ #define RGX_TC_MEM_CLOCK_SPEED (65000000) -+#endif -+ -+#endif /* if !defined(TC_CLOCKS_H) */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_drv.c b/drivers/gpu/drm/img-rogue/apollo/tc_drv.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/tc_drv.c -@@ -0,0 +1,943 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+/* -+ * This is a device driver for the testchip framework. It creates platform -+ * devices for the pdp and ext sub-devices, and exports functions to manage the -+ * shared interrupt handling -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#if defined(CONFIG_MTRR) -+#include -+#endif -+ -+#include "pvrmodule.h" -+ -+#include "tc_apollo.h" -+#include "tc_odin.h" -+#include "kernel_compatibility.h" -+ -+/* How much memory to give to the PDP heap (used for pdp buffers). */ -+#define TC_PDP_MEM_SIZE_BYTES ((TC_DISPLAY_MEM_SIZE)*1024*1024) -+ -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+/* How much memory to give to the secure heap. */ -+#define TC_SECURE_MEM_SIZE_BYTES ((TC_SECURE_MEM_SIZE)*1024*1024) -+#endif -+ -+#define PCI_VENDOR_ID_POWERVR 0x1010 -+#define DEVICE_ID_PCI_APOLLO_FPGA 0x1CF1 -+#define DEVICE_ID_PCIE_APOLLO_FPGA 0x1CF2 -+ -+MODULE_DESCRIPTION("PowerVR testchip framework driver"); -+MODULE_IMPORT_NS(DMA_BUF); -+ -+static int tc_core_clock; -+module_param(tc_core_clock, int, 0444); -+MODULE_PARM_DESC(tc_core_clock, "TC core clock speed"); -+ -+static int tc_mem_clock; -+module_param(tc_mem_clock, int, 0444); -+MODULE_PARM_DESC(tc_mem_clock, "TC memory clock speed"); -+ -+static int tc_clock_multiplex; -+module_param(tc_clock_multiplex, int, 0444); -+MODULE_PARM_DESC(tc_clock_multiplex, "TC core clock multiplex"); -+ -+static int tc_sys_clock = RGX_TC_SYS_CLOCK_SPEED; -+module_param(tc_sys_clock, int, 0444); -+MODULE_PARM_DESC(tc_sys_clock, "TC system clock speed (TCF5 only)"); -+ -+static int tc_mem_latency; -+module_param(tc_mem_latency, int, 0444); -+MODULE_PARM_DESC(tc_mem_latency, "TC memory read latency in cycles (TCF5 only)"); -+ -+static unsigned long tc_mem_mode = TC_MEMORY_CONFIG; -+module_param(tc_mem_mode, ulong, 0444); -+MODULE_PARM_DESC(tc_mem_mode, "TC memory mode (local = 1, hybrid = 2, host = 3)"); -+ -+static int tc_wresp_latency; -+module_param(tc_wresp_latency, int, 0444); -+MODULE_PARM_DESC(tc_wresp_latency, "TC memory write response latency in cycles (TCF5 only)"); -+ -+static unsigned long tc_pdp_mem_size = TC_PDP_MEM_SIZE_BYTES; -+module_param(tc_pdp_mem_size, ulong, 0444); -+MODULE_PARM_DESC(tc_pdp_mem_size, -+ "TC PDP reserved memory size in bytes"); -+ -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+static unsigned long tc_secure_mem_size = TC_SECURE_MEM_SIZE_BYTES; -+module_param(tc_secure_mem_size, ulong, 0444); -+MODULE_PARM_DESC(tc_secure_mem_size, -+ "TC secure reserved memory size in bytes"); -+#endif -+ -+static bool fbc_bypass; -+module_param(fbc_bypass, bool, 0444); -+MODULE_PARM_DESC(fbc_bypass, "Force bypass of PDP2 FBC decompression"); -+ -+static struct debugfs_blob_wrapper tc_debugfs_rogue_name_blobs[] = { -+ [APOLLO_VERSION_TCF_2] = { -+ .data = "hood", /* probably */ -+ .size = sizeof("hood") - 1, -+ }, -+ [APOLLO_VERSION_TCF_5] = { -+ .data = "fpga (unknown)", -+ .size = sizeof("fpga (unknown)") - 1, -+ }, -+ [APOLLO_VERSION_TCF_BONNIE] = { -+ .data = "bonnie", -+ .size = sizeof("bonnie") - 1, -+ }, -+ [ODIN_VERSION_TCF_BONNIE] = { -+ .data = "bonnie", -+ .size = sizeof("bonnie") - 1, -+ }, -+ [ODIN_VERSION_FPGA] = { -+ .data = "fpga (unknown)", -+ .size = sizeof("fpga (unknown)") - 1, -+ }, -+ [ODIN_VERSION_ORION] = { -+ .data = "orion", -+ .size = sizeof("orion") - 1, -+ }, -+}; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) -+/* forward declaration */ -+static void tc_devres_release(struct device *dev, void *res); -+ -+static ssize_t rogue_name_show(struct device_driver *drv, char *buf) -+{ -+ struct pci_dev *pci_dev; -+ struct tc_device *tc; -+ struct device *dev; -+ -+ dev = driver_find_next_device(drv, NULL); -+ if (!dev) -+ return -ENODEV; -+ -+ pci_dev = to_pci_dev(dev); -+ if (!pci_dev) -+ return -ENODEV; -+ -+ tc = devres_find(&pci_dev->dev, tc_devres_release, NULL, NULL); -+ if (!tc) -+ return -ENODEV; -+ -+ return sprintf(buf, "%s\n", (const char *) -+ tc_debugfs_rogue_name_blobs[tc->version].data); -+} -+ -+static DRIVER_ATTR_RO(rogue_name); -+ -+static struct attribute *tc_attrs[] = { -+ &driver_attr_rogue_name.attr, -+ NULL, -+}; -+ -+static struct attribute_group tc_attr_group = { -+ .attrs = tc_attrs, -+}; -+ -+static const struct attribute_group *tc_attr_groups[] = { -+ &tc_attr_group, -+ NULL, -+}; -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) */ -+ -+#if defined(CONFIG_MTRR) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) -+/* -+ * A return value of: -+ * 0 or more means success -+ * -1 means we were unable to add an mtrr but we should continue -+ * -2 means we were unable to add an mtrr but we shouldn't continue -+ */ -+static int mtrr_setup(struct pci_dev *pdev, -+ resource_size_t mem_start, -+ resource_size_t mem_size) -+{ -+ int err; -+ int mtrr; -+ -+ /* Reset MTRR */ -+ mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_UNCACHABLE, 0); -+ if (mtrr < 0) { -+ dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", -+ __LINE__, __func__, mtrr); -+ mtrr = -2; -+ goto err_out; -+ } -+ -+ err = mtrr_del(mtrr, mem_start, mem_size); -+ if (err < 0) { -+ dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n", -+ __LINE__, __func__, err); -+ mtrr = -2; -+ goto err_out; -+ } -+ -+ mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRBACK, 0); -+ if (mtrr < 0) { -+ /* Stop, but not an error as this may be already be setup */ -+ dev_dbg(&pdev->dev, -+ "%d - %s: mtrr_add failed (%d) - probably means the mtrr is already setup\n", -+ __LINE__, __func__, mtrr); -+ mtrr = -1; -+ goto err_out; -+ } -+ -+ err = mtrr_del(mtrr, mem_start, mem_size); -+ if (err < 0) { -+ dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n", -+ __LINE__, __func__, err); -+ mtrr = -2; -+ goto err_out; -+ } -+ -+ if (mtrr == 0) { -+ /* Replace 0 with a non-overlapping WRBACK mtrr */ -+ err = mtrr_add(0, mem_start, MTRR_TYPE_WRBACK, 0); -+ if (err < 0) { -+ dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", -+ __LINE__, __func__, err); -+ mtrr = -2; -+ goto err_out; -+ } -+ } -+ -+ mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRCOMB, 0); -+ if (mtrr < 0) { -+ dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", -+ __LINE__, __func__, mtrr); -+ mtrr = -1; -+ } -+ -+err_out: -+ return mtrr; -+} -+#endif /* defined(CONFIG_MTRR) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) */ -+ -+int tc_mtrr_setup(struct tc_device *tc) -+{ -+ int err = 0; -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) -+ /* Register the LMA as write combined */ -+ err = arch_io_reserve_memtype_wc(tc->tc_mem.base, -+ tc->tc_mem.size); -+ if (err) -+ return -ENODEV; -+#endif -+ /* Enable write combining */ -+ tc->mtrr = arch_phys_wc_add(tc->tc_mem.base, -+ tc->tc_mem.size); -+ if (tc->mtrr < 0) { -+ err = -ENODEV; -+ goto err_out; -+ } -+ -+#elif defined(CONFIG_MTRR) -+ /* Enable mtrr region caching */ -+ tc->mtrr = mtrr_setup(tc->pdev, -+ tc->tc_mem.base, -+ tc->tc_mem.size); -+ if (tc->mtrr == -2) { -+ err = -ENODEV; -+ goto err_out; -+ } -+#endif -+ return err; -+ -+err_out: -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) -+ arch_io_free_memtype_wc(tc->tc_mem.base, -+ tc->tc_mem.size); -+#endif -+ return err; -+} -+ -+void tc_mtrr_cleanup(struct tc_device *tc) -+{ -+ if (tc->mtrr >= 0) { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -+ arch_phys_wc_del(tc->mtrr); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) -+ arch_io_free_memtype_wc(tc->tc_mem.base, -+ tc->tc_mem.size); -+#endif -+#elif defined(CONFIG_MTRR) -+ int err; -+ -+ err = mtrr_del(tc->mtrr, -+ tc->tc_mem.base, -+ tc->tc_mem.size); -+ if (err < 0) -+ dev_err(&tc->pdev->dev, -+ "mtrr_del failed (%d)\n", err); -+#endif -+ } -+} -+ -+int tc_is_interface_aligned(u32 eyes, u32 clk_taps, u32 train_ack) -+{ -+ u32 max_eye_start = eyes >> 16; -+ u32 min_eye_end = eyes & 0xffff; -+ -+ /* If either the training or training ack failed, we haven't aligned */ -+ if (!(clk_taps & 0x10000) || !(train_ack & 0x100)) -+ return 0; -+ -+ /* If the max eye >= min eye it means the readings are nonsense */ -+ if (max_eye_start >= min_eye_end) -+ return 0; -+ -+ /* If we failed the ack pattern more than 4 times */ -+ if (((train_ack & 0xf0) >> 4) > 4) -+ return 0; -+ -+ /* If there is less than 7 taps (240ps @40ps/tap, this number should be -+ * lower for the fpga, since its taps are bigger We should really -+ * calculate the "7" based on the interface clock speed. -+ */ -+ if ((min_eye_end - max_eye_start) < 7) -+ return 0; -+ -+ return 1; -+} -+ -+int tc_iopol32_nonzero(u32 mask, void __iomem *addr) -+{ -+ int polnum; -+ u32 read_value; -+ -+ for (polnum = 0; polnum < 50; polnum++) { -+ read_value = ioread32(addr) & mask; -+ if (read_value != 0) -+ break; -+ msleep(20); -+ } -+ if (polnum == 50) { -+ pr_err(DRV_NAME " iopol32_nonzero timeout\n"); -+ return -ETIME; -+ } -+ return 0; -+} -+ -+int request_pci_io_addr(struct pci_dev *pdev, u32 index, -+ resource_size_t offset, resource_size_t length) -+{ -+ resource_size_t start, end; -+ -+ start = pci_resource_start(pdev, index); -+ end = pci_resource_end(pdev, index); -+ -+ if ((start + offset + length - 1) > end) -+ return -EIO; -+ if (pci_resource_flags(pdev, index) & IORESOURCE_IO) { -+ if (request_region(start + offset, length, DRV_NAME) == NULL) -+ return -EIO; -+ } else { -+ if (request_mem_region(start + offset, length, DRV_NAME) -+ == NULL) -+ return -EIO; -+ } -+ return 0; -+} -+ -+void release_pci_io_addr(struct pci_dev *pdev, u32 index, -+ resource_size_t start, resource_size_t length) -+{ -+ if (pci_resource_flags(pdev, index) & IORESOURCE_IO) -+ release_region(start, length); -+ else -+ release_mem_region(start, length); -+} -+ -+int setup_io_region(struct pci_dev *pdev, -+ struct tc_io_region *region, u32 index, -+ resource_size_t offset, resource_size_t size) -+{ -+ int err; -+ resource_size_t pci_phys_addr; -+ -+ err = request_pci_io_addr(pdev, index, offset, size); -+ if (err) { -+ dev_err(&pdev->dev, -+ "Failed to request tc registers (err=%d)\n", err); -+ return -EIO; -+ } -+ pci_phys_addr = pci_resource_start(pdev, index); -+ region->region.base = pci_phys_addr + offset; -+ region->region.size = size; -+ -+ region->registers = ioremap(region->region.base, region->region.size); -+ -+ if (!region->registers) { -+ dev_err(&pdev->dev, "Failed to map tc registers\n"); -+ release_pci_io_addr(pdev, index, -+ region->region.base, region->region.size); -+ return -EIO; -+ } -+ return 0; -+} -+ -+#if defined(TC_FAKE_INTERRUPTS) -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) -+void tc_irq_fake_wrapper(struct timer_list *t) -+{ -+ struct tc_device *tc = from_timer(tc, t, timer); -+#else -+void tc_irq_fake_wrapper(unsigned long data) -+{ -+ struct tc_device *tc = (struct tc_device *)data; -+#endif -+ -+ if (tc->odin) -+ odin_irq_handler(0, tc); -+ else -+ apollo_irq_handler(0, tc); -+ -+ mod_timer(&tc->timer, -+ jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS)); -+} -+#endif -+ -+static int tc_register_pdp_device(struct tc_device *tc) -+{ -+ int err = 0; -+ -+ if (tc->odin || tc->orion) -+ err = odin_register_pdp_device(tc); -+ else -+ err = apollo_register_pdp_device(tc); -+ -+ return err; -+} -+ -+static int tc_register_ext_device(struct tc_device *tc) -+{ -+ int err = 0; -+ -+ if (tc->odin || tc->orion) -+ err = odin_register_ext_device(tc); -+ else -+ err = apollo_register_ext_device(tc); -+ -+ return err; -+} -+ -+static int tc_register_dma_device(struct tc_device *tc) -+{ -+ int err = 0; -+ -+ if (tc->odin) -+ err = odin_register_dma_device(tc); -+ -+ return err; -+} -+ -+static void tc_devres_release(struct device *dev, void *res) -+{ -+ /* No extra cleanup needed */ -+} -+ -+static int tc_cleanup(struct pci_dev *pdev) -+{ -+ struct tc_device *tc = devres_find(&pdev->dev, -+ tc_devres_release, NULL, NULL); -+ int i, err = 0; -+ -+ if (!tc) { -+ dev_err(&pdev->dev, "No tc device resources found\n"); -+ return -ENODEV; -+ } -+ -+ debugfs_remove(tc->debugfs_rogue_name); -+ -+ for (i = 0; i < TC_INTERRUPT_COUNT; i++) -+ if (tc->interrupt_handlers[i].enabled) -+ tc_disable_interrupt(&pdev->dev, i); -+ -+ if (tc->odin || tc->orion) -+ err = odin_cleanup(tc); -+ else -+ err = apollo_cleanup(tc); -+ -+ debugfs_remove(tc->debugfs_tc_dir); -+ -+ return err; -+} -+ -+static int tc_init(struct pci_dev *pdev, const struct pci_device_id *id) -+{ -+ struct tc_device *tc; -+ int err = 0; -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+ int sec_mem_size = TC_SECURE_MEM_SIZE_BYTES; -+#else /* defined(SUPPORT_FAKE_SECURE_ION_HEAP) */ -+ int sec_mem_size = 0; -+#endif /* defined(SUPPORT_FAKE_SECURE_ION_HEAP) */ -+ -+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) -+ return -ENOMEM; -+ -+ tc = devres_alloc(tc_devres_release, -+ sizeof(*tc), GFP_KERNEL); -+ if (!tc) { -+ err = -ENOMEM; -+ goto err_out; -+ } -+ -+ devres_add(&pdev->dev, tc); -+ -+ err = tc_enable(&pdev->dev); -+ if (err) { -+ dev_err(&pdev->dev, -+ "tc_enable failed %d\n", err); -+ goto err_release; -+ } -+ -+ tc->pdev = pdev; -+ -+ spin_lock_init(&tc->interrupt_handler_lock); -+ spin_lock_init(&tc->interrupt_enable_lock); -+ -+ tc->debugfs_tc_dir = debugfs_create_dir(DRV_NAME, NULL); -+ -+ if (pdev->vendor == PCI_VENDOR_ID_ODIN) { -+ -+ if (pdev->device == DEVICE_ID_ODIN) -+ tc->odin = true; -+ else if (pdev->device == DEVICE_ID_ORION) -+ tc->orion = true; -+ -+ dev_info(&pdev->dev, "%s detected\n", odin_tc_name(tc)); -+ -+ err = odin_init(tc, pdev, -+ &tc_core_clock, &tc_mem_clock, &tc_clock_multiplex, -+ tc_pdp_mem_size, sec_mem_size, -+ tc_mem_latency, tc_wresp_latency, -+ tc_mem_mode, fbc_bypass); -+ if (err) -+ goto err_dev_cleanup; -+ -+ } else { -+ dev_info(&pdev->dev, "Apollo detected"); -+ tc->odin = false; -+ -+ err = apollo_init(tc, pdev, -+ &tc_core_clock, &tc_mem_clock, tc_sys_clock, &tc_clock_multiplex, -+ tc_pdp_mem_size, sec_mem_size, -+ tc_mem_latency, tc_wresp_latency, -+ tc_mem_mode); -+ if (err) -+ goto err_dev_cleanup; -+ } -+ -+ /* Add the rogue name debugfs entry */ -+ tc->debugfs_rogue_name = -+ debugfs_create_blob("rogue-name", 0444, -+ tc->debugfs_tc_dir, -+ &tc_debugfs_rogue_name_blobs[tc->version]); -+ -+#if defined(TC_FAKE_INTERRUPTS) -+ dev_warn(&pdev->dev, "WARNING: Faking interrupts every %d ms", -+ FAKE_INTERRUPT_TIME_MS); -+#endif -+ -+ /* Register pdp and ext platform devices */ -+ err = tc_register_pdp_device(tc); -+ if (err) -+ goto err_dev_cleanup; -+ -+ err = tc_register_ext_device(tc); -+ if (err) -+ goto err_dev_cleanup; -+ -+ err = tc_register_dma_device(tc); -+ if (err) -+ goto err_dev_cleanup; -+ -+ devres_remove_group(&pdev->dev, NULL); -+ -+ pci_set_master(pdev); -+ -+err_out: -+ if (err) -+ dev_err(&pdev->dev, "%s: failed\n", __func__); -+ -+ return err; -+ -+err_dev_cleanup: -+ tc_cleanup(pdev); -+ tc_disable(&pdev->dev); -+err_release: -+ devres_release_group(&pdev->dev, NULL); -+ goto err_out; -+} -+ -+static void tc_exit(struct pci_dev *pdev) -+{ -+ struct tc_device *tc = devres_find(&pdev->dev, -+ tc_devres_release, NULL, NULL); -+ -+ if (!tc) { -+ dev_err(&pdev->dev, "No tc device resources found\n"); -+ return; -+ } -+ -+ if (tc->pdp_dev) -+ platform_device_unregister(tc->pdp_dev); -+ -+ if (tc->ext_dev) -+ platform_device_unregister(tc->ext_dev); -+ -+ if (tc->dma_dev) -+ platform_device_unregister(tc->dma_dev); -+ -+ pci_clear_master(pdev); -+ -+ tc_cleanup(pdev); -+ -+ tc_disable(&pdev->dev); -+} -+ -+static struct pci_device_id tc_pci_tbl[] = { -+ { PCI_VDEVICE(POWERVR, DEVICE_ID_PCI_APOLLO_FPGA) }, -+ { PCI_VDEVICE(POWERVR, DEVICE_ID_PCIE_APOLLO_FPGA) }, -+ { PCI_VDEVICE(POWERVR, DEVICE_ID_TBA) }, -+ { PCI_VDEVICE(ODIN, DEVICE_ID_ODIN) }, -+ { PCI_VDEVICE(ODIN, DEVICE_ID_ORION) }, -+ { }, -+}; -+ -+static struct pci_driver tc_pci_driver = { -+ .name = DRV_NAME, -+ .id_table = tc_pci_tbl, -+ .probe = tc_init, -+ .remove = tc_exit, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) -+ .groups = tc_attr_groups, -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) */ -+}; -+ -+module_pci_driver(tc_pci_driver); -+ -+MODULE_DEVICE_TABLE(pci, tc_pci_tbl); -+ -+int tc_enable(struct device *dev) -+{ -+ struct pci_dev *pdev = to_pci_dev(dev); -+ -+ return pci_enable_device(pdev); -+} -+EXPORT_SYMBOL(tc_enable); -+ -+void tc_disable(struct device *dev) -+{ -+ struct pci_dev *pdev = to_pci_dev(dev); -+ -+ pci_disable_device(pdev); -+} -+EXPORT_SYMBOL(tc_disable); -+ -+int tc_set_interrupt_handler(struct device *dev, int interrupt_id, -+ void (*handler_function)(void *), void *data) -+{ -+ struct tc_device *tc = devres_find(dev, tc_devres_release, -+ NULL, NULL); -+ int err = 0; -+ unsigned long flags; -+ -+ if (!tc) { -+ dev_err(dev, "No tc device resources found\n"); -+ err = -ENODEV; -+ goto err_out; -+ } -+ -+ if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) { -+ dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id); -+ err = -EINVAL; -+ goto err_out; -+ } -+ -+ spin_lock_irqsave(&tc->interrupt_handler_lock, flags); -+ -+ tc->interrupt_handlers[interrupt_id].handler_function = -+ handler_function; -+ tc->interrupt_handlers[interrupt_id].handler_data = data; -+ -+ spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags); -+ -+err_out: -+ return err; -+} -+EXPORT_SYMBOL(tc_set_interrupt_handler); -+ -+int tc_enable_interrupt(struct device *dev, int interrupt_id) -+{ -+ struct tc_device *tc = devres_find(dev, tc_devres_release, -+ NULL, NULL); -+ int err = 0; -+ unsigned long flags; -+ -+ if (!tc) { -+ dev_err(dev, "No tc device resources found\n"); -+ err = -ENODEV; -+ goto err_out; -+ } -+ if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) { -+ dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id); -+ err = -EINVAL; -+ goto err_out; -+ } -+ spin_lock_irqsave(&tc->interrupt_enable_lock, flags); -+ -+ if (tc->interrupt_handlers[interrupt_id].enabled) { -+ dev_warn(dev, "Interrupt ID %d already enabled\n", -+ interrupt_id); -+ err = -EEXIST; -+ goto err_unlock; -+ } -+ tc->interrupt_handlers[interrupt_id].enabled = true; -+ -+ if (tc->odin || tc->orion) -+ odin_enable_interrupt_register(tc, interrupt_id); -+ else -+ apollo_enable_interrupt_register(tc, interrupt_id); -+ -+err_unlock: -+ spin_unlock_irqrestore(&tc->interrupt_enable_lock, flags); -+err_out: -+ return err; -+} -+EXPORT_SYMBOL(tc_enable_interrupt); -+ -+int tc_disable_interrupt(struct device *dev, int interrupt_id) -+{ -+ struct tc_device *tc = devres_find(dev, tc_devres_release, -+ NULL, NULL); -+ int err = 0; -+ unsigned long flags; -+ -+ if (!tc) { -+ dev_err(dev, "No tc device resources found\n"); -+ err = -ENODEV; -+ goto err_out; -+ } -+ if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) { -+ dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id); -+ err = -EINVAL; -+ goto err_out; -+ } -+ spin_lock_irqsave(&tc->interrupt_enable_lock, flags); -+ -+ if (!tc->interrupt_handlers[interrupt_id].enabled) { -+ dev_warn(dev, "Interrupt ID %d already disabled\n", -+ interrupt_id); -+ } -+ tc->interrupt_handlers[interrupt_id].enabled = false; -+ -+ if (tc->odin || tc->orion) -+ odin_disable_interrupt_register(tc, interrupt_id); -+ else -+ apollo_disable_interrupt_register(tc, interrupt_id); -+ -+ spin_unlock_irqrestore(&tc->interrupt_enable_lock, flags); -+err_out: -+ return err; -+} -+EXPORT_SYMBOL(tc_disable_interrupt); -+ -+int tc_sys_info(struct device *dev, u32 *tmp, u32 *pll) -+{ -+ int err = -ENODEV; -+ struct tc_device *tc = devres_find(dev, tc_devres_release, -+ NULL, NULL); -+ -+ if (!tc) { -+ dev_err(dev, "No tc device resources found\n"); -+ goto err_out; -+ } -+ -+ if (tc->odin || tc->orion) -+ err = odin_sys_info(tc, tmp, pll); -+ else -+ err = apollo_sys_info(tc, tmp, pll); -+ -+err_out: -+ return err; -+} -+EXPORT_SYMBOL(tc_sys_info); -+ -+int tc_sys_strings(struct device *dev, -+ char *str_fpga_rev, size_t size_fpga_rev, -+ char *str_tcf_core_rev, size_t size_tcf_core_rev, -+ char *str_tcf_core_target_build_id, -+ size_t size_tcf_core_target_build_id, -+ char *str_pci_ver, size_t size_pci_ver, -+ char *str_macro_ver, size_t size_macro_ver) -+{ -+ int err = -ENODEV; -+ -+ struct tc_device *tc = devres_find(dev, tc_devres_release, -+ NULL, NULL); -+ -+ if (!tc) { -+ dev_err(dev, "No tc device resources found\n"); -+ goto err_out; -+ } -+ -+ if (!str_fpga_rev || -+ !size_fpga_rev || -+ !str_tcf_core_rev || -+ !size_tcf_core_rev || -+ !str_tcf_core_target_build_id || -+ !size_tcf_core_target_build_id || -+ !str_pci_ver || -+ !size_pci_ver || -+ !str_macro_ver || -+ !size_macro_ver) { -+ -+ err = -EINVAL; -+ goto err_out; -+ } -+ -+ if (tc->odin || tc->orion) { -+ err = odin_sys_strings(tc, -+ str_fpga_rev, size_fpga_rev, -+ str_tcf_core_rev, size_tcf_core_rev, -+ str_tcf_core_target_build_id, -+ size_tcf_core_target_build_id, -+ str_pci_ver, size_pci_ver, -+ str_macro_ver, size_macro_ver); -+ } else { -+ err = apollo_sys_strings(tc, -+ str_fpga_rev, size_fpga_rev, -+ str_tcf_core_rev, size_tcf_core_rev, -+ str_tcf_core_target_build_id, -+ size_tcf_core_target_build_id, -+ str_pci_ver, size_pci_ver, -+ str_macro_ver, size_macro_ver); -+ } -+ -+err_out: -+ return err; -+} -+EXPORT_SYMBOL(tc_sys_strings); -+ -+int tc_core_clock_speed(struct device *dev) -+{ -+ return tc_core_clock; -+} -+EXPORT_SYMBOL(tc_core_clock_speed); -+ -+int tc_core_clock_multiplex(struct device *dev) -+{ -+ return tc_clock_multiplex; -+} -+EXPORT_SYMBOL(tc_core_clock_multiplex); -+ -+unsigned int tc_odin_subvers(struct device *dev) -+{ -+ struct tc_device *tc = devres_find(dev, tc_devres_release, -+ NULL, NULL); -+ -+ if (tc->orion) -+ return 1; -+ else -+ return 0; -+} -+EXPORT_SYMBOL(tc_odin_subvers); -+ -+bool tc_pfim_capable(struct device *dev) -+{ -+ struct tc_device *tc = devres_find(dev, tc_devres_release, -+ NULL, NULL); -+ -+ if (tc->odin && !tc->orion) -+ return (!tc->fbc_bypass && -+ odin_pfim_compatible(tc)); -+ -+ return false; -+} -+EXPORT_SYMBOL(tc_pfim_capable); -+ -+bool tc_pdp2_compatible(struct device *dev) -+{ -+ struct tc_device *tc = devres_find(dev, tc_devres_release, -+ NULL, NULL); -+ -+ /* PDP2 is available in all versions of Sleipnir PCB / Odin RTL */ -+ return (tc->odin && !tc->orion); -+} -+EXPORT_SYMBOL(tc_pdp2_compatible); -+ -+struct dma_chan *tc_dma_chan(struct device *dev, char *name) -+ -+{ -+ struct tc_device *tc = devres_find(dev, tc_devres_release, -+ NULL, NULL); -+ -+ if (tc->odin) -+ return odin_cdma_chan(tc, name); -+ else -+ return NULL; -+} -+EXPORT_SYMBOL(tc_dma_chan); -+ -+void tc_dma_chan_free(struct device *dev, -+ void *chan_prv) -+{ -+ struct tc_device *tc = devres_find(dev, tc_devres_release, -+ NULL, NULL); -+ -+ if (tc->odin) -+ odin_cdma_chan_free(tc, chan_prv); -+} -+EXPORT_SYMBOL(tc_dma_chan_free); -diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_drv.h b/drivers/gpu/drm/img-rogue/apollo/tc_drv.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/tc_drv.h -@@ -0,0 +1,191 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef _TC_DRV_H -+#define _TC_DRV_H -+ -+/* -+ * This contains the hooks for the testchip driver, as used by the Rogue and -+ * PDP sub-devices, and the platform data passed to each of their drivers -+ */ -+ -+#include -+#include -+#include -+ -+/* Valid values for the TC_MEMORY_CONFIG configuration option */ -+#define TC_MEMORY_LOCAL 1 -+#define TC_MEMORY_HOST 2 -+#define TC_MEMORY_HYBRID 3 -+ -+/* Baseboard implementation enumeration */ -+#define TC_BASEBOARD_APOLLO 1 -+#define TC_BASEBOARD_ODIN 2 -+#define TC_BASEBOARD_ORION 3 -+ -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ -+#include PVR_ANDROID_ION_HEADER -+ -+/* NOTE: This should be kept in sync with the user side (in buffer_generic.c) */ -+#if defined(SUPPORT_RGX) -+#define ION_HEAP_TC_ROGUE (ION_HEAP_TYPE_CUSTOM+1) -+#endif -+#define ION_HEAP_TC_PDP (ION_HEAP_TYPE_CUSTOM+2) -+ -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+#define ION_HEAP_TC_SECURE (ION_HEAP_TYPE_CUSTOM+3) -+#endif -+ -+#endif /* defined(SUPPORT_ION) */ -+ -+#define TC_INTERRUPT_PDP 0 -+#define TC_INTERRUPT_EXT 1 -+#define TC_INTERRUPT_TC5_PDP 2 -+#define TC_INTERRUPT_PDP2 3 -+#define TC_INTERRUPT_CDMA 4 -+#define TC_INTERRUPT_CDMA2 5 -+#define TC_INTERRUPT_COUNT 6 -+ -+int tc_enable(struct device *dev); -+void tc_disable(struct device *dev); -+ -+int tc_enable_interrupt(struct device *dev, int interrupt_id); -+int tc_disable_interrupt(struct device *dev, int interrupt_id); -+ -+int tc_set_interrupt_handler(struct device *dev, int interrupt_id, -+ void (*handler_function)(void *), void *handler_data); -+ -+int tc_sys_info(struct device *dev, u32 *tmp, u32 *pll); -+int tc_sys_strings(struct device *dev, -+ char *str_fpga_rev, size_t size_fpga_rev, char *str_tcf_core_rev, -+ size_t size_tcf_core_rev, char *str_tcf_core_target_build_id, -+ size_t size_tcf_core_target_build_id, char *str_pci_ver, -+ size_t size_pci_ver, char *str_macro_ver, size_t size_macro_ver); -+int tc_core_clock_speed(struct device *dev); -+int tc_core_clock_multiplex(struct device *dev); -+ -+unsigned int tc_odin_subvers(struct device *dev); -+ -+bool tc_pfim_capable(struct device *dev); -+bool tc_pdp2_compatible(struct device *dev); -+ -+void tc_dma_chan_free(struct device *dev, void *chandata); -+struct dma_chan *tc_dma_chan(struct device *dev, char *name); -+ -+#define APOLLO_DEVICE_NAME_PDP "apollo_pdp" -+#define ODN_DEVICE_NAME_PDP "odin_pdp" -+#define ODN_DEVICE_NAME_CDMA "odin-cdma" -+ -+/* The following structs are initialised and passed down by the parent tc -+ * driver to the respective sub-drivers -+ */ -+ -+struct tc_pdp_platform_data { -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ struct ion_device *ion_device; -+ int ion_heap_id; -+#endif -+ resource_size_t memory_base; -+ -+ /* The following is used by the drm_pdp driver as it manages the -+ * pdp memory -+ */ -+ resource_size_t pdp_heap_memory_base; -+ resource_size_t pdp_heap_memory_size; -+ -+ /* Used to export host address instead of pdp address, depends on the -+ * TC memory mode. -+ * -+ * PDP phys address space is from 0 to end of local device memory, -+ * however if the TC is configured to operate in hybrid mode then the -+ * GPU is configured to match the CPU phys address space view. -+ */ -+ bool dma_map_export_host_addr; -+}; -+ -+struct tc_dma_platform_data { -+ u32 addr_width; -+ u32 num_dmas; -+ bool has_dre; -+ bool has_sg; -+}; -+ -+#if defined(SUPPORT_RGX) -+ -+#define TC_DEVICE_NAME_ROGUE "tc_rogue" -+ -+struct tc_rogue_platform_data { -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ struct ion_device *ion_device; -+ int ion_heap_id; -+#endif -+ /* The testchip memory mode (LOCAL, HOST or HYBRID) */ -+ int mem_mode; -+ -+ /* The testchip baseboard type (APOLLO, ODIN or ORION) */ -+ int baseboard; -+ -+ /* The base address of the testchip memory (CPU physical address) - -+ * used to convert from CPU-Physical to device-physical addresses -+ */ -+ resource_size_t tc_memory_base; -+ -+ /* The following is used to setup the services heaps that map to the -+ * ion heaps -+ */ -+ resource_size_t pdp_heap_memory_base; -+ resource_size_t pdp_heap_memory_size; -+ resource_size_t rogue_heap_memory_base; -+ resource_size_t rogue_heap_memory_size; -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+ resource_size_t secure_heap_memory_base; -+ resource_size_t secure_heap_memory_size; -+#endif -+ -+ /* DMA channel names for RGX usage */ -+ char *tc_dma_tx_chan_name; -+ char *tc_dma_rx_chan_name; -+}; -+ -+#endif /* defined(SUPPORT_RGX) */ -+ -+#endif /* _TC_DRV_H */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_drv_internal.h b/drivers/gpu/drm/img-rogue/apollo/tc_drv_internal.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/tc_drv_internal.h -@@ -0,0 +1,204 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef _TC_DRV_INTERNAL_H -+#define _TC_DRV_INTERNAL_H -+ -+#include "tc_drv.h" -+ -+#include -+ -+#if defined(TC_FAKE_INTERRUPTS) -+#define FAKE_INTERRUPT_TIME_MS 20 -+#include -+#include -+#endif -+ -+#define DRV_NAME "tc" -+ -+/* This is a guess of what's a minimum sensible size for the ext heap -+ * It is only used for a warning if the ext heap is smaller, and does -+ * not affect the functional logic in any way -+ */ -+#define TC_EXT_MINIMUM_MEM_SIZE (10*1024*1024) -+ -+#if defined(SUPPORT_DMA_HEAP) -+ #if defined(SUPPORT_FAKE_SECURE_DMA_HEAP) -+ #define TC_DMA_HEAP_COUNT 3 -+ #else -+ #define TC_DMA_HEAP_COUNT 2 -+ #endif -+#elif defined(SUPPORT_ION) -+ #if defined(SUPPORT_RGX) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ #define TC_ION_HEAP_BASE_COUNT 3 -+ #else -+ #define TC_ION_HEAP_BASE_COUNT 2 -+ #endif -+ -+ #if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+ #define TC_ION_HEAP_COUNT (TC_ION_HEAP_BASE_COUNT + 1) -+ #else -+ #define TC_ION_HEAP_COUNT TC_ION_HEAP_BASE_COUNT -+ #endif -+#endif /* defined(SUPPORT_ION) */ -+ -+/* Convert a byte offset to a 32 bit dword offset */ -+#define DWORD_OFFSET(byte_offset) ((byte_offset)>>2) -+ -+#define HEX2DEC(v) ((((v) >> 4) * 10) + ((v) & 0x0F)) -+ -+enum tc_version_t { -+ TC_INVALID_VERSION, -+ APOLLO_VERSION_TCF_2, -+ APOLLO_VERSION_TCF_5, -+ APOLLO_VERSION_TCF_BONNIE, -+ ODIN_VERSION_TCF_BONNIE, -+ ODIN_VERSION_FPGA, -+ ODIN_VERSION_ORION, -+}; -+ -+struct tc_interrupt_handler { -+ bool enabled; -+ void (*handler_function)(void *data); -+ void *handler_data; -+}; -+ -+struct tc_region { -+ resource_size_t base; -+ resource_size_t size; -+}; -+ -+struct tc_io_region { -+ struct tc_region region; -+ void __iomem *registers; -+}; -+ -+struct tc_device { -+ struct pci_dev *pdev; -+ -+ enum tc_version_t version; -+ bool odin; -+ bool orion; -+ -+ int mem_mode; -+ -+ struct tc_io_region tcf; -+ struct tc_io_region tcf_pll; -+ -+ struct tc_region tc_mem; -+ -+ struct platform_device *pdp_dev; -+ -+ resource_size_t pdp_heap_mem_base; -+ resource_size_t pdp_heap_mem_size; -+ -+ struct platform_device *ext_dev; -+ -+ resource_size_t ext_heap_mem_base; -+ resource_size_t ext_heap_mem_size; -+ -+ struct platform_device *dma_dev; -+ -+ struct dma_chan *dma_chans[2]; -+ unsigned int dma_refcnt[2]; -+ unsigned int dma_nchan; -+ struct mutex dma_mutex; -+ -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) || \ -+ defined(SUPPORT_FAKE_SECURE_DMA_HEAP) -+ resource_size_t secure_heap_mem_base; -+ resource_size_t secure_heap_mem_size; -+#endif -+ -+#if defined(CONFIG_MTRR) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -+ int mtrr; -+#endif -+ spinlock_t interrupt_handler_lock; -+ spinlock_t interrupt_enable_lock; -+ -+ struct tc_interrupt_handler -+ interrupt_handlers[TC_INTERRUPT_COUNT]; -+ -+#if defined(TC_FAKE_INTERRUPTS) -+ struct timer_list timer; -+#endif -+ -+#if defined(SUPPORT_DMA_HEAP) -+ struct dma_heap *dma_heaps[TC_DMA_HEAP_COUNT]; -+#elif defined(SUPPORT_ION) -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ struct ion_device *ion_device; -+#endif -+ struct ion_heap *ion_heaps[TC_ION_HEAP_COUNT]; -+ int ion_heap_count; -+#endif /* defined(SUPPORT_ION) */ -+ -+ bool fbc_bypass; -+ -+ struct dentry *debugfs_tc_dir; -+ struct dentry *debugfs_rogue_name; -+}; -+ -+int tc_mtrr_setup(struct tc_device *tc); -+void tc_mtrr_cleanup(struct tc_device *tc); -+ -+int tc_is_interface_aligned(u32 eyes, u32 clk_taps, u32 train_ack); -+ -+int tc_iopol32_nonzero(u32 mask, void __iomem *addr); -+ -+int request_pci_io_addr(struct pci_dev *pdev, u32 index, -+ resource_size_t offset, resource_size_t length); -+void release_pci_io_addr(struct pci_dev *pdev, u32 index, -+ resource_size_t start, resource_size_t length); -+ -+int setup_io_region(struct pci_dev *pdev, -+ struct tc_io_region *region, u32 index, -+ resource_size_t offset, resource_size_t size); -+ -+#if defined(TC_FAKE_INTERRUPTS) -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) -+void tc_irq_fake_wrapper(struct timer_list *t); -+#else -+void tc_irq_fake_wrapper(unsigned long data); -+#endif -+#endif /* defined(TC_FAKE_INTERRUPTS) */ -+ -+#endif /* _TC_DRV_INTERNAL_H */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_odin.c b/drivers/gpu/drm/img-rogue/apollo/tc_odin.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/tc_odin.c -@@ -0,0 +1,2305 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+/* -+ * This is a device driver for the odin testchip framework. It creates -+ * platform devices for the pdp and ext sub-devices, and exports functions -+ * to manage the shared interrupt handling -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "tc_drv_internal.h" -+#include "tc_odin.h" -+#if defined(SUPPORT_DMA_HEAP) -+#include "tc_dmabuf_heap.h" -+#elif defined(SUPPORT_ION) -+#include "tc_ion.h" -+#endif -+ -+/* Odin (3rd gen TCF FPGA) */ -+#include "odin_defs.h" -+#include "odin_regs.h" -+#include "bonnie_tcf.h" -+#include "tc_clocks.h" -+ -+/* Orion demo platform */ -+#include "orion_defs.h" -+#include "orion_regs.h" -+ -+/* Odin/Orion common registers */ -+#include "tc_odin_common_regs.h" -+ -+/* Macros to set and get register fields */ -+#define REG_FIELD_GET(v, str) \ -+ (u32)(((v) & (str##_MASK)) >> (str##_SHIFT)) -+#define REG_FIELD_SET(v, f, str) \ -+ (v = (u32)(((v) & (u32)~(str##_MASK)) | \ -+ (u32)(((f) << (str##_SHIFT)) & (str##_MASK)))) -+ -+#define SAI_STATUS_UNALIGNED 0 -+#define SAI_STATUS_ALIGNED 1 -+#define SAI_STATUS_ERROR 2 -+ -+/* Odin/Orion shared masks */ -+static const u32 CHANGE_SET_SET_MASK[] = { -+ ODN_CHANGE_SET_SET_MASK, -+ SRS_CHANGE_SET_SET_MASK -+}; -+static const u32 CHANGE_SET_SET_SHIFT[] = { -+ ODN_CHANGE_SET_SET_SHIFT, -+ SRS_CHANGE_SET_SET_SHIFT -+}; -+static const u32 USER_ID_ID_MASK[] = { -+ ODN_USER_ID_ID_MASK, -+ SRS_USER_ID_ID_MASK -+}; -+static const u32 USER_ID_ID_SHIFT[] = { -+ ODN_USER_ID_ID_SHIFT, -+ SRS_USER_ID_ID_SHIFT -+}; -+static const u32 USER_BUILD_BUILD_MASK[] = { -+ ODN_USER_BUILD_BUILD_MASK, -+ SRS_USER_BUILD_BUILD_MASK -+}; -+static const u32 USER_BUILD_BUILD_SHIFT[] = { -+ ODN_USER_BUILD_BUILD_SHIFT, -+ SRS_USER_BUILD_BUILD_SHIFT -+}; -+static const u32 INPUT_CLOCK_SPEED_MIN[] = { -+ ODN_INPUT_CLOCK_SPEED_MIN, -+ SRS_INPUT_CLOCK_SPEED_MIN -+}; -+static const u32 INPUT_CLOCK_SPEED_MAX[] = { -+ ODN_INPUT_CLOCK_SPEED_MAX, -+ SRS_INPUT_CLOCK_SPEED_MAX -+}; -+static const u32 OUTPUT_CLOCK_SPEED_MIN[] = { -+ ODN_OUTPUT_CLOCK_SPEED_MIN, -+ SRS_OUTPUT_CLOCK_SPEED_MIN -+}; -+static const u32 OUTPUT_CLOCK_SPEED_MAX[] = { -+ ODN_OUTPUT_CLOCK_SPEED_MAX, -+ SRS_OUTPUT_CLOCK_SPEED_MAX -+}; -+static const u32 VCO_MIN[] = { -+ ODN_VCO_MIN, -+ SRS_VCO_MIN -+}; -+static const u32 VCO_MAX[] = { -+ ODN_VCO_MAX, -+ SRS_VCO_MAX -+}; -+static const u32 PFD_MIN[] = { -+ ODN_PFD_MIN, -+ SRS_PFD_MIN -+}; -+static const u32 PFD_MAX[] = { -+ ODN_PFD_MAX, -+ SRS_PFD_MAX -+}; -+ -+#if defined(SUPPORT_RGX) -+ -+static void spi_write(struct tc_device *tc, u32 off, u32 val) -+{ -+ iowrite32(off, tc->tcf.registers -+ + ODN_REG_BANK_TCF_SPI_MASTER -+ + ODN_SPI_MST_ADDR_RDNWR); -+ iowrite32(val, tc->tcf.registers -+ + ODN_REG_BANK_TCF_SPI_MASTER -+ + ODN_SPI_MST_WDATA); -+ iowrite32(0x1, tc->tcf.registers -+ + ODN_REG_BANK_TCF_SPI_MASTER -+ + ODN_SPI_MST_GO); -+ udelay(1000); -+} -+ -+static int spi_read(struct tc_device *tc, u32 off, u32 *val) -+{ -+ int cnt = 0; -+ u32 spi_mst_status; -+ -+ iowrite32(0x40000 | off, tc->tcf.registers -+ + ODN_REG_BANK_TCF_SPI_MASTER -+ + ODN_SPI_MST_ADDR_RDNWR); -+ iowrite32(0x1, tc->tcf.registers -+ + ODN_REG_BANK_TCF_SPI_MASTER -+ + ODN_SPI_MST_GO); -+ udelay(100); -+ -+ do { -+ spi_mst_status = ioread32(tc->tcf.registers -+ + ODN_REG_BANK_TCF_SPI_MASTER -+ + ODN_SPI_MST_STATUS); -+ -+ if (cnt++ > 10000) { -+ dev_err(&tc->pdev->dev, -+ "%s: Time out reading SPI reg (0x%x)\n", -+ __func__, off); -+ return -1; -+ } -+ -+ } while (spi_mst_status != 0x08); -+ -+ *val = ioread32(tc->tcf.registers -+ + ODN_REG_BANK_TCF_SPI_MASTER -+ + ODN_SPI_MST_RDATA); -+ -+ return 0; -+} -+ -+/* Returns 1 for aligned, 0 for unaligned */ -+static int get_odin_sai_status(struct tc_device *tc, int bank) -+{ -+ void __iomem *bank_addr = tc->tcf.registers -+ + ODN_REG_BANK_SAI_RX_DDR(bank); -+ void __iomem *reg_addr; -+ u32 eyes; -+ u32 clk_taps; -+ u32 train_ack; -+ -+ reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_EYES; -+ eyes = ioread32(reg_addr); -+ -+ reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_CLK_TAPS; -+ clk_taps = ioread32(reg_addr); -+ -+ reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK; -+ train_ack = ioread32(reg_addr); -+ -+#if 0 /* enable this to get debug info if the board is not aligning */ -+ dev_info(&tc->pdev->dev, -+ "odin bank %d align: eyes=%08x clk_taps=%08x train_ack=%08x\n", -+ bank, eyes, clk_taps, train_ack); -+#endif -+ -+ if (tc_is_interface_aligned(eyes, clk_taps, train_ack)) -+ return SAI_STATUS_ALIGNED; -+ -+ dev_warn(&tc->pdev->dev, "odin bank %d is unaligned\n", bank); -+ return SAI_STATUS_UNALIGNED; -+} -+ -+/* Read the odin multi clocked bank align status. -+ * Returns 1 for aligned, 0 for unaligned -+ */ -+static int read_odin_mca_status(struct tc_device *tc) -+{ -+ void __iomem *bank_addr = tc->tcf.registers -+ + ODN_REG_BANK_MULTI_CLK_ALIGN; -+ void __iomem *reg_addr = bank_addr + ODN_MCA_DEBUG_MCA_STATUS; -+ u32 mca_status; -+ -+ mca_status = ioread32(reg_addr); -+ -+#if 0 /* Enable this if there are alignment issues */ -+ dev_info(&tc->pdev->dev, -+ "Odin MCA_STATUS = %08x\n", mca_status); -+#endif -+ return mca_status & ODN_ALIGNMENT_FOUND_MASK; -+} -+ -+/* Read the DUT multi clocked bank align status. -+ * Returns 1 for aligned, 0 for unaligned -+ */ -+static int read_dut_mca_status(struct tc_device *tc) -+{ -+ u32 mca_status; -+ const int mca_status_register_offset = 1; /* not in bonnie_tcf.h */ -+ int spi_address = DWORD_OFFSET(BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN); -+ -+ spi_address = DWORD_OFFSET(BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN) -+ + mca_status_register_offset; -+ -+ spi_read(tc, spi_address, &mca_status); -+ -+#if 0 /* Enable this if there are alignment issues */ -+ dev_info(&tc->pdev->dev, -+ "DUT MCA_STATUS = %08x\n", mca_status); -+#endif -+ return mca_status & 1; /* 'alignment found' status is in bit 1 */ -+} -+ -+/* Returns 1 for aligned, 0 for unaligned */ -+static int get_dut_sai_status(struct tc_device *tc, int bank) -+{ -+ u32 eyes; -+ u32 clk_taps; -+ u32 train_ack; -+ const u32 bank_base = DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_RX_1 -+ + (BONNIE_TCF_OFFSET_SAI_RX_DELTA * bank)); -+ int spi_timeout; -+ -+ spi_timeout = spi_read(tc, bank_base -+ + DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_EYES), &eyes); -+ if (spi_timeout) -+ return SAI_STATUS_ERROR; -+ -+ spi_read(tc, bank_base -+ + DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_CLK_TAPS), &clk_taps); -+ spi_read(tc, bank_base -+ + DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_TRAIN_ACK), &train_ack); -+ -+#if 0 /* enable this to get debug info if the board is not aligning */ -+ dev_info(&tc->pdev->dev, -+ "dut bank %d align: eyes=%08x clk_taps=%08x train_ack=%08x\n", -+ bank, eyes, clk_taps, train_ack); -+#endif -+ -+ if (tc_is_interface_aligned(eyes, clk_taps, train_ack)) -+ return SAI_STATUS_ALIGNED; -+ -+ dev_warn(&tc->pdev->dev, "dut bank %d is unaligned\n", bank); -+ return SAI_STATUS_UNALIGNED; -+} -+ -+/* -+ * Returns the divider group register fields for the specified counter value. -+ * See Xilinx Application Note xapp888. -+ */ -+static void odin_mmcm_reg_param_calc(u32 value, u32 *low, u32 *high, -+ u32 *edge, u32 *no_count) -+{ -+ if (value == 1U) { -+ *no_count = 1U; -+ *edge = 0; -+ *high = 0; -+ *low = 0; -+ } else { -+ *no_count = 0; -+ *edge = value % 2U; -+ *high = value >> 1; -+ *low = (value + *edge) >> 1U; -+ } -+} -+ -+/* -+ * Returns the MMCM Input Divider, FB Multiplier and Output Divider values for -+ * the specified input frequency and target output frequency. -+ * Function doesn't support fractional values for multiplier and output divider -+ * As per Xilinx 7 series FPGAs clocking resources user guide, aims for highest -+ * VCO and smallest D and M. -+ * Configured for Xilinx Virtex7 speed grade 2. -+ */ -+static int odin_mmcm_counter_calc(struct device *dev, -+ u32 freq_input, u32 freq_output, -+ u32 *d, u32 *m, u32 *o) -+{ -+ u32 tcver = tc_odin_subvers(dev); -+ u32 best_diff, d_best, m_best, o_best; -+ u32 m_min, m_max, m_ideal; -+ u32 d_cur, m_cur, o_cur; -+ u32 d_min, d_max; -+ -+ /* -+ * Check specified input frequency is within range -+ */ -+ if (freq_input < INPUT_CLOCK_SPEED_MIN[tcver]) { -+ dev_err(dev, "Input frequency (%u hz) below minimum supported value (%u hz)\n", -+ freq_input, INPUT_CLOCK_SPEED_MIN[tcver]); -+ return -EINVAL; -+ } -+ if (freq_input > INPUT_CLOCK_SPEED_MAX[tcver]) { -+ dev_err(dev, "Input frequency (%u hz) above maximum supported value (%u hz)\n", -+ freq_input, INPUT_CLOCK_SPEED_MAX[tcver]); -+ return -EINVAL; -+ } -+ -+ /* -+ * Check specified target frequency is within range -+ */ -+ if (freq_output < OUTPUT_CLOCK_SPEED_MIN[tcver]) { -+ dev_err(dev, "Output frequency (%u hz) below minimum supported value (%u hz)\n", -+ freq_input, OUTPUT_CLOCK_SPEED_MIN[tcver]); -+ return -EINVAL; -+ } -+ if (freq_output > OUTPUT_CLOCK_SPEED_MAX[tcver]) { -+ dev_err(dev, "Output frequency (%u hz) above maximum supported value (%u hz)\n", -+ freq_output, OUTPUT_CLOCK_SPEED_MAX[tcver]); -+ return -EINVAL; -+ } -+ -+ /* -+ * Calculate min and max for Input Divider. -+ * Refer Xilinx 7 series FPGAs clocking resources user guide -+ * equation 3-6 and 3-7 -+ */ -+ d_min = DIV_ROUND_UP(freq_input, PFD_MAX[tcver]); -+ d_max = min(freq_input/PFD_MIN[tcver], (u32)ODN_DREG_VALUE_MAX); -+ -+ /* -+ * Calculate min and max for Input Divider. -+ * Refer Xilinx 7 series FPGAs clocking resources user guide. -+ * equation 3-8 and 3-9 -+ */ -+ m_min = DIV_ROUND_UP((VCO_MIN[tcver] * d_min), freq_input); -+ m_max = min(((VCO_MAX[tcver] * d_max) / freq_input), -+ (u32)ODN_MREG_VALUE_MAX); -+ -+ for (d_cur = d_min; d_cur <= d_max; d_cur++) { -+ /* -+ * Refer Xilinx 7 series FPGAs clocking resources user guide. -+ * equation 3-10 -+ */ -+ m_ideal = min(((d_cur * VCO_MAX[tcver])/freq_input), m_max); -+ -+ for (m_cur = m_ideal; m_cur >= m_min; m_cur -= 1) { -+ /** -+ * Skip if VCO for given 'm' and 'd' value is not an -+ * integer since fractional component is not supported -+ */ -+ if (((freq_input * m_cur) % d_cur) != 0) -+ continue; -+ -+ /** -+ * Skip if divider for given 'm' and 'd' value is not -+ * an integer since fractional component is not -+ * supported -+ */ -+ if ((freq_input * m_cur) % (d_cur * freq_output) != 0) -+ continue; -+ -+ /** -+ * Calculate output divider value. -+ */ -+ o_cur = (freq_input * m_cur)/(d_cur * freq_output); -+ -+ *d = d_cur; -+ *m = m_cur; -+ *o = o_cur; -+ return 0; -+ } -+ } -+ -+ /* -+ * Failed to find exact optimal solution with high VCO. Brute-force find -+ * a suitable config, again prioritising high VCO, to get lowest jitter -+ */ -+ d_min = 1; d_max = (u32)ODN_DREG_VALUE_MAX; -+ m_min = 1; m_max = (u32)ODN_MREG_VALUE_MAX; -+ best_diff = 0xFFFFFFFF; -+ -+ for (d_cur = d_min; d_cur <= d_max; d_cur++) { -+ for (m_cur = m_max; m_cur >= m_min; m_cur -= 1) { -+ u32 pfd, vco, o_avg, o_min, o_max; -+ -+ pfd = freq_input / d_cur; -+ vco = pfd * m_cur; -+ -+ if (pfd < PFD_MIN[tcver]) -+ continue; -+ -+ if (pfd > PFD_MAX[tcver]) -+ continue; -+ -+ if (vco < VCO_MIN[tcver]) -+ continue; -+ -+ if (vco > VCO_MAX[tcver]) -+ continue; -+ -+ /* -+ * A range of -1/+3 around o_avg gives us 100kHz granularity. -+ * It can be extended further. -+ */ -+ o_avg = vco / freq_output; -+ o_min = (o_avg >= 2) ? (o_avg - 1) : 1; -+ o_max = o_avg + 3; -+ if (o_max > (u32)ODN_OREG_VALUE_MAX) -+ o_max = (u32)ODN_OREG_VALUE_MAX; -+ -+ for (o_cur = o_min; o_cur <= o_max; o_cur++) { -+ u32 freq_cur, diff_cur; -+ -+ freq_cur = vco / o_cur; -+ -+ if (freq_cur > freq_output) -+ continue; -+ -+ diff_cur = freq_output - freq_cur; -+ -+ if (diff_cur == 0) { -+ /* Found an exact match */ -+ *d = d_cur; -+ *m = m_cur; -+ *o = o_cur; -+ return 0; -+ } -+ -+ if (diff_cur < best_diff) { -+ best_diff = diff_cur; -+ d_best = d_cur; -+ m_best = m_cur; -+ o_best = o_cur; -+ } -+ } -+ } -+ } -+ -+ if (best_diff != 0xFFFFFFFF) { -+ dev_warn(dev, "Odin: Found similar freq of %u Hz\n", freq_output - best_diff); -+ *d = d_best; -+ *m = m_best; -+ *o = o_best; -+ return 0; -+ } -+ -+ dev_err(dev, "Odin: Unable to find integer values for d, m and o for requested frequency (%u)\n", -+ freq_output); -+ -+ return -ERANGE; -+} -+ -+static int odin_fpga_set_dut_core_clk(struct tc_device *tc, -+ u32 input_clk, u32 output_clk) -+{ -+ int err = 0; -+ u32 in_div, mul, out_div; -+ u32 high_time, low_time, edge, no_count; -+ u32 value; -+ void __iomem *base = tc->tcf.registers; -+ void __iomem *clk_blk_base = base + ODN_REG_BANK_ODN_CLK_BLK; -+ struct device *dev = &tc->pdev->dev; -+ -+ err = odin_mmcm_counter_calc(dev, input_clk, output_clk, &in_div, -+ &mul, &out_div); -+ if (err != 0) -+ return err; -+ -+ /* Put DUT into reset */ -+ iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK, -+ base + ODN_CORE_EXTERNAL_RESETN); -+ msleep(20); -+ -+ /* Put DUT Core MMCM into reset */ -+ iowrite32(ODN_CLK_GEN_RESET_DUT_CORE_MMCM_MASK, -+ base + ODN_CORE_CLK_GEN_RESET); -+ msleep(20); -+ -+ /* Calculate the register fields for output divider */ -+ odin_mmcm_reg_param_calc(out_div, &high_time, &low_time, -+ &edge, &no_count); -+ -+ /* Read-modify-write the required fields to output divider register 1 */ -+ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER1); -+ REG_FIELD_SET(value, high_time, -+ ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME); -+ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER1); -+ -+ /* Read-modify-write the required fields to output divider register 2 */ -+ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER2); -+ REG_FIELD_SET(value, edge, -+ ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE); -+ REG_FIELD_SET(value, no_count, -+ ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT); -+ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER2); -+ -+ /* Calculate the register fields for multiplier */ -+ odin_mmcm_reg_param_calc(mul, &high_time, &low_time, -+ &edge, &no_count); -+ -+ /* Read-modify-write the required fields to multiplier register 1 */ -+ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1); -+ REG_FIELD_SET(value, high_time, -+ ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME); -+ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1); -+ -+ /* Read-modify-write the required fields to multiplier register 2 */ -+ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2); -+ REG_FIELD_SET(value, edge, -+ ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE); -+ REG_FIELD_SET(value, no_count, -+ ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT); -+ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2); -+ -+ /* Calculate the register fields for input divider */ -+ odin_mmcm_reg_param_calc(in_div, &high_time, &low_time, -+ &edge, &no_count); -+ -+ /* Read-modify-write the required fields to input divider register 1 */ -+ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1); -+ REG_FIELD_SET(value, high_time, -+ ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME); -+ REG_FIELD_SET(value, edge, -+ ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE); -+ REG_FIELD_SET(value, no_count, -+ ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT); -+ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1); -+ -+ /* Bring DUT clock MMCM out of reset */ -+ iowrite32(0, tc->tcf.registers + ODN_CORE_CLK_GEN_RESET); -+ -+ err = tc_iopol32_nonzero(ODN_MMCM_LOCK_STATUS_DUT_CORE, -+ base + ODN_CORE_MMCM_LOCK_STATUS); -+ if (err != 0) { -+ dev_err(dev, "MMCM failed to lock for DUT core\n"); -+ return err; -+ } -+ -+ /* Bring DUT out of reset */ -+ iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK | -+ ODN_EXTERNAL_RESETN_DUT_MASK, -+ tc->tcf.registers + ODN_CORE_EXTERNAL_RESETN); -+ msleep(20); -+ -+ dev_info(dev, "DUT core clock set-up successful\n"); -+ -+ return err; -+} -+ -+static int odin_fpga_set_dut_if_clk(struct tc_device *tc, -+ u32 input_clk, u32 output_clk) -+{ -+ int err = 0; -+ u32 in_div, mul, out_div; -+ u32 high_time, low_time, edge, no_count; -+ u32 value; -+ void __iomem *base = tc->tcf.registers; -+ void __iomem *clk_blk_base = base + ODN_REG_BANK_ODN_CLK_BLK; -+ struct device *dev = &tc->pdev->dev; -+ -+ err = odin_mmcm_counter_calc(dev, input_clk, output_clk, -+ &in_div, &mul, &out_div); -+ if (err != 0) -+ return err; -+ -+ /* Put DUT into reset */ -+ iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK, -+ base + ODN_CORE_EXTERNAL_RESETN); -+ msleep(20); -+ -+ /* Put DUT Core MMCM into reset */ -+ iowrite32(ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK, -+ base + ODN_CORE_CLK_GEN_RESET); -+ msleep(20); -+ -+ /* Calculate the register fields for output divider */ -+ odin_mmcm_reg_param_calc(out_div, &high_time, &low_time, -+ &edge, &no_count); -+ -+ /* Read-modify-write the required fields to output divider register 1 */ -+ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1); -+ REG_FIELD_SET(value, high_time, -+ ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME); -+ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1); -+ -+ /* Read-modify-write the required fields to output divider register 2 */ -+ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2); -+ REG_FIELD_SET(value, edge, -+ ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE); -+ REG_FIELD_SET(value, no_count, -+ ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT); -+ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2); -+ -+ /* Calculate the register fields for multiplier */ -+ odin_mmcm_reg_param_calc(mul, &high_time, &low_time, &edge, &no_count); -+ -+ /* Read-modify-write the required fields to multiplier register 1 */ -+ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1); -+ REG_FIELD_SET(value, high_time, -+ ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME); -+ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1); -+ -+ /* Read-modify-write the required fields to multiplier register 2 */ -+ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2); -+ REG_FIELD_SET(value, edge, -+ ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE); -+ REG_FIELD_SET(value, no_count, -+ ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT); -+ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2); -+ -+ /* Calculate the register fields for input divider */ -+ odin_mmcm_reg_param_calc(in_div, &high_time, &low_time, -+ &edge, &no_count); -+ -+ /* Read-modify-write the required fields to input divider register 1 */ -+ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1); -+ REG_FIELD_SET(value, high_time, -+ ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME); -+ REG_FIELD_SET(value, edge, -+ ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE); -+ REG_FIELD_SET(value, no_count, -+ ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT); -+ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1); -+ -+ /* Bring DUT interface clock MMCM out of reset */ -+ iowrite32(0, tc->tcf.registers + ODN_CORE_CLK_GEN_RESET); -+ -+ err = tc_iopol32_nonzero(ODN_MMCM_LOCK_STATUS_DUT_IF, -+ base + ODN_CORE_MMCM_LOCK_STATUS); -+ if (err != 0) { -+ dev_err(dev, "MMCM failed to lock for DUT IF\n"); -+ return err; -+ } -+ -+ /* Bring DUT out of reset */ -+ iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK | -+ ODN_EXTERNAL_RESETN_DUT_MASK, -+ tc->tcf.registers + ODN_CORE_EXTERNAL_RESETN); -+ msleep(20); -+ -+ dev_info(dev, "DUT IF clock set-up successful\n"); -+ -+ return err; -+} -+ -+static void odin_fpga_update_dut_clk_freq(struct tc_device *tc, -+ int *core_clock, int *mem_clock, int *clock_multiplex) -+{ -+ struct device *dev = &tc->pdev->dev; -+ int dut_clk_info = 0; -+ int dut_clk_multiplex = 0; -+ -+#if defined(SUPPORT_FPGA_DUT_CLK_INFO) -+ dut_clk_info = ioread32(tc->tcf.registers + ODN_CORE_DUT_CLK_INFO); -+#endif -+ -+ if ((dut_clk_info != 0) && (dut_clk_info != 0xbaadface) -+ && (dut_clk_info != 0xffffffff)) { -+ dev_info(dev, "ODN_DUT_CLK_INFO = %08x\n", dut_clk_info); -+ -+ if (*core_clock == 0) { -+ *core_clock = ((dut_clk_info & ODN_DUT_CLK_INFO_CORE_MASK) -+ >> ODN_DUT_CLK_INFO_CORE_SHIFT) * 1000000; -+ dev_info(dev, "Using register DUT core clock value: %i\n", -+ *core_clock); -+ } else { -+ dev_info(dev, "Using module param DUT core clock value: %i\n", -+ *core_clock); -+ } -+ -+ if (*mem_clock == 0) { -+ *mem_clock = ((dut_clk_info & ODN_DUT_CLK_INFO_MEM_MASK) -+ >> ODN_DUT_CLK_INFO_MEM_SHIFT) * 1000000; -+ dev_info(dev, "Using register DUT mem clock value: %i\n", -+ *mem_clock); -+ } else { -+ dev_info(dev, "Using module param DUT mem clock value: %i\n", -+ *mem_clock); -+ } -+ } else { -+ if (*core_clock == 0) { -+ *core_clock = RGX_TC_CORE_CLOCK_SPEED; -+ dev_info(dev, "Using default DUT core clock value: %i\n", -+ *core_clock); -+ } else { -+ dev_info(dev, "Using module param DUT core clock value: %i\n", -+ *core_clock); -+ } -+ -+ if (*mem_clock == 0) { -+ *mem_clock = RGX_TC_MEM_CLOCK_SPEED; -+ dev_info(dev, "Using default DUT mem clock value: %i\n", -+ *mem_clock); -+ } else { -+ dev_info(dev, "Using module param DUT mem clock value: %i\n", -+ *mem_clock); -+ } -+ } -+ -+#if defined(SUPPORT_FPGA_DUT_MULTIPLEX_INFO) -+ dut_clk_multiplex = ioread32(tc->tcf.registers + ODN_CORE_DUT_MULTIPLX_INFO); -+#endif -+ -+ if ((dut_clk_multiplex != 0) && (dut_clk_multiplex != 0xbaadface) -+ && (dut_clk_multiplex != 0xffffffff)) { -+ dev_info(dev, "ODN_DUT_MULTIPLX_INFO = %08x\n", dut_clk_multiplex); -+ if (*clock_multiplex == 0) { -+ *clock_multiplex = ((dut_clk_multiplex & ODN_DUT_MULTIPLX_INFO_MEM_MASK) -+ >> ODN_DUT_MULTIPLX_INFO_MEM_SHIFT); -+ dev_info(dev, "Using register DUT clock multiplex: %i\n", -+ *clock_multiplex); -+ } else { -+ dev_info(dev, "Using module param DUT clock multiplex: %i\n", -+ *clock_multiplex); -+ } -+ } else { -+ if (*clock_multiplex == 0) { -+ *clock_multiplex = RGX_TC_CLOCK_MULTIPLEX; -+ dev_info(dev, "Using default DUT clock multiplex: %i\n", -+ *clock_multiplex); -+ } else { -+ dev_info(dev, "Using module param DUT clock multiplex: %i\n", -+ *clock_multiplex); -+ } -+ } -+} -+ -+static int odin_hard_reset_fpga(struct tc_device *tc, -+ int *core_clock, int *mem_clock, int *clock_mulitplex) -+{ -+ int err = 0; -+ -+ odin_fpga_update_dut_clk_freq(tc, core_clock, mem_clock, clock_mulitplex); -+ -+ err = odin_fpga_set_dut_core_clk(tc, ODN_INPUT_CLOCK_SPEED, *core_clock); -+ if (err != 0) -+ goto err_out; -+ -+ err = odin_fpga_set_dut_if_clk(tc, ODN_INPUT_CLOCK_SPEED, *mem_clock); -+ -+err_out: -+ return err; -+} -+ -+static int odin_hard_reset_bonnie(struct tc_device *tc) -+{ -+ int reset_cnt = 0; -+ bool aligned = false; -+ int alignment_found; -+ -+ msleep(100); -+ -+ /* It is essential to do an SPI reset once on power-up before -+ * doing any DUT reads via the SPI interface. -+ */ -+ iowrite32(1, tc->tcf.registers /* set bit 1 low */ -+ + ODN_CORE_EXTERNAL_RESETN); -+ msleep(20); -+ -+ iowrite32(3, tc->tcf.registers /* set bit 1 high */ -+ + ODN_CORE_EXTERNAL_RESETN); -+ msleep(20); -+ -+ while (!aligned && (reset_cnt < 20)) { -+ -+ int bank; -+ -+ /* Reset the DUT to allow the SAI to retrain */ -+ iowrite32(2, /* set bit 0 low */ -+ tc->tcf.registers -+ + ODN_CORE_EXTERNAL_RESETN); -+ -+ /* Hold the DUT in reset for 50ms */ -+ msleep(50); -+ -+ /* Take the DUT out of reset */ -+ iowrite32(3, /* set bit 0 hi */ -+ tc->tcf.registers -+ + ODN_CORE_EXTERNAL_RESETN); -+ reset_cnt++; -+ -+ /* Wait 200ms for the DUT to stabilise */ -+ msleep(200); -+ -+ /* Check the odin Multi Clocked bank Align status */ -+ alignment_found = read_odin_mca_status(tc); -+ dev_info(&tc->pdev->dev, -+ "Odin mca_status indicates %s\n", -+ (alignment_found)?"aligned":"UNALIGNED"); -+ -+ /* Check the DUT MCA status */ -+ alignment_found = read_dut_mca_status(tc); -+ dev_info(&tc->pdev->dev, -+ "DUT mca_status indicates %s\n", -+ (alignment_found)?"aligned":"UNALIGNED"); -+ -+ /* If all banks have aligned then the reset was successful */ -+ for (bank = 0; bank < 10; bank++) { -+ -+ int dut_aligned = 0; -+ int odin_aligned = 0; -+ -+ odin_aligned = get_odin_sai_status(tc, bank); -+ dut_aligned = get_dut_sai_status(tc, bank); -+ -+ if (dut_aligned == SAI_STATUS_ERROR) -+ return SAI_STATUS_ERROR; -+ -+ if (!dut_aligned || !odin_aligned) { -+ aligned = false; -+ break; -+ } -+ aligned = true; -+ } -+ -+ if (aligned) { -+ dev_info(&tc->pdev->dev, -+ "all banks have aligned\n"); -+ break; -+ } -+ -+ dev_warn(&tc->pdev->dev, -+ "Warning- not all banks have aligned. Trying again.\n"); -+ } -+ -+ if (!aligned) -+ dev_warn(&tc->pdev->dev, "odin_hard_reset failed\n"); -+ -+ return (aligned) ? 0 : 1; /* return 0 for success */ -+} -+ -+static void odin_set_mem_latency(struct tc_device *tc, -+ int mem_latency, int mem_wresp_latency) -+{ -+ u32 regval = 0; -+ -+ if (mem_latency <= 4) { -+ /* The total memory read latency cannot be lower than the -+ * amount of cycles consumed by the hardware to do a read. -+ * Set the memory read latency to 0 cycles. -+ */ -+ mem_latency = 0; -+ } else { -+ mem_latency -= 4; -+ -+ dev_info(&tc->pdev->dev, -+ "Setting memory read latency to %i cycles\n", -+ mem_latency); -+ } -+ -+ if (mem_wresp_latency <= 2) { -+ /* The total memory write latency cannot be lower than the -+ * amount of cycles consumed by the hardware to do a write. -+ * Set the memory write latency to 0 cycles. -+ */ -+ mem_wresp_latency = 0; -+ } else { -+ mem_wresp_latency -= 2; -+ -+ dev_info(&tc->pdev->dev, -+ "Setting memory write response latency to %i cycles\n", -+ mem_wresp_latency); -+ } -+ -+ mem_latency |= mem_wresp_latency << 16; -+ -+ spi_write(tc, 0x1009, mem_latency); -+ -+ if (spi_read(tc, 0x1009, ®val) != 0) { -+ dev_err(&tc->pdev->dev, -+ "Failed to read back memory latency register"); -+ return; -+ } -+ -+ if (mem_latency != regval) { -+ dev_err(&tc->pdev->dev, -+ "Memory latency register doesn't match requested value (actual: %#08x, expected: %#08x)\n", -+ regval, mem_latency); -+ } -+} -+ -+static int orion_set_dut_core_clk(struct tc_device *tc, -+ u32 input_clk, -+ u32 output_clk) -+{ -+ void __iomem *base = tc->tcf.registers; -+ void __iomem *clk_blk_base = base + SRS_REG_BANK_ODN_CLK_BLK; -+ struct device *dev = &tc->pdev->dev; -+ u32 high_time, low_time, edge, no_count; -+ u32 in_div, mul, out_div; -+ u32 value; -+ int err; -+ -+ err = odin_mmcm_counter_calc(dev, input_clk, output_clk, &in_div, -+ &mul, &out_div); -+ if (err != 0) -+ return err; -+ -+ /* Put DUT into reset */ -+ iowrite32(0, base + SRS_CORE_DUT_SOFT_RESETN); -+ msleep(20); -+ -+ /* Put DUT Core MMCM into reset */ -+ iowrite32(SRS_CLK_GEN_RESET_DUT_CORE_MMCM_MASK, -+ base + SRS_CORE_CLK_GEN_RESET); -+ msleep(20); -+ -+ /* Calculate the register fields for input divider */ -+ odin_mmcm_reg_param_calc(in_div, &high_time, &low_time, -+ &edge, &no_count); -+ -+ /* Read-modify-write the required fields to input divider register 1 */ -+ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1); -+ REG_FIELD_SET(value, high_time, -+ ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME); -+ REG_FIELD_SET(value, edge, -+ ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE); -+ REG_FIELD_SET(value, no_count, -+ ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT); -+ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1); -+ -+ /* Calculate the register fields for multiplier */ -+ odin_mmcm_reg_param_calc(mul, &high_time, &low_time, -+ &edge, &no_count); -+ -+ /* Read-modify-write the required fields to multiplier register 1 */ -+ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1); -+ REG_FIELD_SET(value, high_time, -+ ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME); -+ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1); -+ -+ /* Read-modify-write the required fields to multiplier register 2 */ -+ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2); -+ REG_FIELD_SET(value, edge, -+ ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE); -+ REG_FIELD_SET(value, no_count, -+ ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT); -+ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2); -+ -+ /* Calculate the register fields for output divider */ -+ odin_mmcm_reg_param_calc(out_div, &high_time, &low_time, -+ &edge, &no_count); -+ -+ /* -+ * Read-modify-write the required fields to -+ * core output divider register 1 -+ */ -+ value = ioread32(clk_blk_base + SRS_DUT_CORE_CLK_OUT_DIVIDER1); -+ REG_FIELD_SET(value, high_time, -+ SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME); -+ iowrite32(value, clk_blk_base + SRS_DUT_CORE_CLK_OUT_DIVIDER1); -+ -+ /* -+ * Read-modify-write the required fields to core output -+ * divider register 2 -+ */ -+ value = ioread32(clk_blk_base + SRS_DUT_CORE_CLK_OUT_DIVIDER2); -+ REG_FIELD_SET(value, edge, -+ SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE); -+ REG_FIELD_SET(value, no_count, -+ SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT); -+ iowrite32(value, clk_blk_base + SRS_DUT_CORE_CLK_OUT_DIVIDER2); -+ -+ /* -+ * Read-modify-write the required fields to -+ * reference output divider register 1 -+ */ -+ value = ioread32(clk_blk_base + SRS_DUT_REF_CLK_OUT_DIVIDER1); -+ REG_FIELD_SET(value, high_time, -+ SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME); -+ iowrite32(value, clk_blk_base + SRS_DUT_REF_CLK_OUT_DIVIDER1); -+ -+ /* -+ * Read-modify-write the required fields to -+ * reference output divider register 2 -+ */ -+ value = ioread32(clk_blk_base + SRS_DUT_REF_CLK_OUT_DIVIDER2); -+ REG_FIELD_SET(value, edge, -+ SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE); -+ REG_FIELD_SET(value, no_count, -+ SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT); -+ iowrite32(value, clk_blk_base + SRS_DUT_REF_CLK_OUT_DIVIDER2); -+ -+ /* Bring DUT IF clock MMCM out of reset */ -+ iowrite32(0, tc->tcf.registers + SRS_CORE_CLK_GEN_RESET); -+ -+ err = tc_iopol32_nonzero(SRS_MMCM_LOCK_STATUS_DUT_CORE_MASK, -+ base + SRS_CORE_MMCM_LOCK_STATUS); -+ if (err != 0) { -+ dev_err(dev, "MMCM failed to lock for DUT core\n"); -+ return err; -+ } -+ -+ /* Bring DUT out of reset */ -+ iowrite32(SRS_DUT_SOFT_RESETN_EXTERNAL_MASK, -+ tc->tcf.registers + SRS_CORE_DUT_SOFT_RESETN); -+ msleep(20); -+ -+ dev_info(dev, "DUT core clock set-up successful\n"); -+ -+ return err; -+} -+ -+static int orion_set_dut_sys_mem_clk(struct tc_device *tc, -+ u32 input_clk, -+ u32 output_clk) -+{ -+ void __iomem *base = tc->tcf.registers; -+ void __iomem *clk_blk_base = base + SRS_REG_BANK_ODN_CLK_BLK; -+ struct device *dev = &tc->pdev->dev; -+ u32 high_time, low_time, edge, no_count; -+ u32 in_div, mul, out_div; -+ u32 value; -+ int err; -+ -+ err = odin_mmcm_counter_calc(dev, input_clk, output_clk, &in_div, -+ &mul, &out_div); -+ if (err != 0) -+ return err; -+ -+ /* Put DUT into reset */ -+ iowrite32(0, base + SRS_CORE_DUT_SOFT_RESETN); -+ msleep(20); -+ -+ /* Put DUT Core MMCM into reset */ -+ iowrite32(SRS_CLK_GEN_RESET_DUT_IF_MMCM_MASK, -+ base + SRS_CORE_CLK_GEN_RESET); -+ msleep(20); -+ -+ /* Calculate the register fields for input divider */ -+ odin_mmcm_reg_param_calc(in_div, &high_time, &low_time, -+ &edge, &no_count); -+ -+ /* Read-modify-write the required fields to input divider register 1 */ -+ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1); -+ REG_FIELD_SET(value, high_time, -+ ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME); -+ REG_FIELD_SET(value, edge, -+ ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE); -+ REG_FIELD_SET(value, no_count, -+ ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT); -+ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1); -+ -+ /* Calculate the register fields for multiplier */ -+ odin_mmcm_reg_param_calc(mul, &high_time, &low_time, -+ &edge, &no_count); -+ -+ /* Read-modify-write the required fields to multiplier register 1 */ -+ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1); -+ REG_FIELD_SET(value, high_time, -+ ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME); -+ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1); -+ -+ /* Read-modify-write the required fields to multiplier register 2 */ -+ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2); -+ REG_FIELD_SET(value, edge, -+ ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE); -+ REG_FIELD_SET(value, no_count, -+ ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT); -+ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2); -+ -+ /* Calculate the register fields for output divider */ -+ odin_mmcm_reg_param_calc(out_div, &high_time, &low_time, -+ &edge, &no_count); -+ -+ /* Read-modify-write the required fields to output divider register 1 */ -+ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1); -+ REG_FIELD_SET(value, high_time, -+ ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME); -+ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1); -+ -+ /* Read-modify-write the required fields to output divider register 2 */ -+ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2); -+ REG_FIELD_SET(value, edge, -+ ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE); -+ REG_FIELD_SET(value, no_count, -+ ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT); -+ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2); -+ -+ /* -+ * New to Orion, registers undocumented in the TRM, assumed high_time, -+ * low_time, edge and no_count are in the same bit fields as the -+ * previous two registers Even though these registers seem to be -+ * undocumented, setting them is essential for the DUT not to show -+ * abnormal behaviour, like the firmware jumping to random addresses -+ */ -+ -+ /* -+ * Read-modify-write the required fields to memory clock output divider -+ * register 1 -+ */ -+ value = ioread32(clk_blk_base + SRS_DUT_MEM_CLK_OUT_DIVIDER1); -+ REG_FIELD_SET(value, high_time, -+ SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME); -+ REG_FIELD_SET(value, low_time, -+ SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME); -+ iowrite32(value, clk_blk_base + SRS_DUT_MEM_CLK_OUT_DIVIDER1); -+ -+ /* -+ * Read-modify-write the required fields to memory clock output divider -+ * register 1 -+ */ -+ value = ioread32(clk_blk_base + SRS_DUT_MEM_CLK_OUT_DIVIDER2); -+ REG_FIELD_SET(value, edge, -+ SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE); -+ REG_FIELD_SET(value, no_count, -+ SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT); -+ iowrite32(value, clk_blk_base + SRS_DUT_MEM_CLK_OUT_DIVIDER2); -+ -+ /* Bring DUT clock MMCM out of reset */ -+ iowrite32(0, tc->tcf.registers + SRS_CORE_CLK_GEN_RESET); -+ -+ err = tc_iopol32_nonzero(SRS_MMCM_LOCK_STATUS_DUT_IF_MASK, -+ base + SRS_CORE_MMCM_LOCK_STATUS); -+ if (err != 0) { -+ dev_err(dev, "MMCM failed to lock for DUT IF\n"); -+ return err; -+ } -+ -+ /* Bring DUT out of reset */ -+ iowrite32(SRS_DUT_SOFT_RESETN_EXTERNAL_MASK, -+ tc->tcf.registers + SRS_CORE_DUT_SOFT_RESETN); -+ msleep(20); -+ -+ dev_info(dev, "DUT IF clock set-up successful\n"); -+ -+ return err; -+} -+ -+ -+static int orion_hard_reset(struct tc_device *tc, int *core_clock, int *mem_clock) -+{ -+ int err; -+ struct device *dev = &tc->pdev->dev; -+ -+ if (*core_clock == 0) { -+ *core_clock = RGX_TC_CORE_CLOCK_SPEED; -+ dev_info(dev, "Using default DUT core clock value: %i\n", -+ *core_clock); -+ } else { -+ dev_info(dev, "Using module param DUT core clock value: %i\n", -+ *core_clock); -+ } -+ -+ if (*mem_clock == 0) { -+ *mem_clock = RGX_TC_MEM_CLOCK_SPEED; -+ dev_info(dev, "Using default DUT mem clock value: %i\n", -+ *mem_clock); -+ } else { -+ dev_info(dev, "Using module param DUT mem clock value: %i\n", -+ *mem_clock); -+ } -+ -+ err = orion_set_dut_core_clk(tc, SRS_INPUT_CLOCK_SPEED, *core_clock); -+ if (err != 0) -+ goto err_out; -+ -+ err = orion_set_dut_sys_mem_clk(tc, SRS_INPUT_CLOCK_SPEED, *mem_clock); -+ -+err_out: -+ return err; -+} -+ -+#endif /* defined(SUPPORT_RGX) */ -+ -+/* Do a hard reset on the DUT */ -+static int odin_hard_reset(struct tc_device *tc, int *core_clock, int *mem_clock, -+ int *clock_mulitplex) -+{ -+#if defined(SUPPORT_RGX) -+ if (tc->version == ODIN_VERSION_TCF_BONNIE) -+ return odin_hard_reset_bonnie(tc); -+ if (tc->version == ODIN_VERSION_FPGA) -+ return odin_hard_reset_fpga(tc, core_clock, mem_clock, clock_mulitplex); -+ if (tc->version == ODIN_VERSION_ORION) -+ return orion_hard_reset(tc, core_clock, mem_clock); -+ -+ dev_err(&tc->pdev->dev, "Invalid Odin version"); -+ return 1; -+#else /* defined(SUPPORT_RGX) */ -+ return 0; -+#endif /* defined(SUPPORT_RGX) */ -+} -+ -+static void odin_set_mem_mode_lma(struct tc_device *tc) -+{ -+ u32 val; -+ -+ if (tc->version != ODIN_VERSION_FPGA) -+ return; -+ -+ /* Enable memory offset to be applied to DUT and PDPs */ -+ iowrite32(0x80000A10, tc->tcf.registers + ODN_CORE_DUT_CTRL1); -+ -+ /* Apply memory offset to GPU and PDPs to point to DDR memory. -+ * Enable HDMI. -+ */ -+ val = (0x4 << ODN_CORE_CONTROL_DUT_OFFSET_SHIFT) | -+ (0x4 << ODN_CORE_CONTROL_PDP1_OFFSET_SHIFT) | -+ (0x4 << ODN_CORE_CONTROL_PDP2_OFFSET_SHIFT) | -+ (0x2 << ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT) | -+ (0x1 << ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT); -+ iowrite32(val, tc->tcf.registers + ODN_CORE_CORE_CONTROL); -+} -+ -+/* -+ * For Hybrid mode we don't want to set the above DUT/PDP offset shifts -+ * because those are for 32bit address limitations or to make address translation -+ * easier by placing the device physical address range to start at 0. -+ * Odin will get 35 bits which will allow it to address all host memory -+ * through the bus master and all device memory. -+ */ -+static void odin_set_mem_mode_hybrid(struct tc_device *tc) -+{ -+ u32 val; -+ -+ /* Disable memory offset to be applied to DUT and PDPs */ -+ iowrite32(0x00000A10, tc->tcf.registers + ODN_CORE_DUT_CTRL1); -+ -+ /* Enable HDMI */ -+ val = (0x2 << ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT) | -+ (0x1 << ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT); -+ iowrite32(val, tc->tcf.registers + ODN_CORE_CORE_CONTROL); -+} -+ -+static int odin_set_mem_mode(struct tc_device *tc, int mem_mode) -+{ -+ switch (mem_mode) { -+ case TC_MEMORY_LOCAL: -+ odin_set_mem_mode_lma(tc); -+ dev_info(&tc->pdev->dev, "Memory mode: TC_MEMORY_LOCAL\n"); -+ break; -+ case TC_MEMORY_HYBRID: -+ odin_set_mem_mode_hybrid(tc); -+ dev_info(&tc->pdev->dev, "Memory mode: TC_MEMORY_HYBRID\n"); -+ break; -+ default: -+ dev_err(&tc->pdev->dev, "unsupported memory mode = %d\n", -+ mem_mode); -+ return -EINVAL; -+ }; -+ -+ tc->mem_mode = mem_mode; -+ -+ return 0; -+} -+ -+static u64 odin_get_pdp_dma_mask(struct tc_device *tc) -+{ -+ /* Does not access system memory, so there is no DMA limitation */ -+ if ((tc->mem_mode == TC_MEMORY_LOCAL) || -+ (tc->mem_mode == TC_MEMORY_HYBRID)) -+ return DMA_BIT_MASK(64); -+ -+ return DMA_BIT_MASK(32); -+} -+ -+#if defined(SUPPORT_RGX) -+static u64 odin_get_rogue_dma_mask(struct tc_device *tc) -+{ -+ /* Does not access system memory, so there is no DMA limitation */ -+ if (tc->mem_mode == TC_MEMORY_LOCAL) -+ return DMA_BIT_MASK(64); -+ -+ return DMA_BIT_MASK(32); -+} -+#endif /* defined(SUPPORT_RGX) */ -+ -+static void odin_set_fbc_bypass(struct tc_device *tc, bool fbc_bypass) -+{ -+ u32 val; -+ -+ /* Register field is present whether TC has PFIM support or not */ -+ val = ioread32(tc->tcf.registers + ODN_CORE_DUT_CTRL1); -+ REG_FIELD_SET(val, fbc_bypass ? 0x1 : 0x0, -+ ODN_DUT_CTRL1_FBDC_BYPASS); -+ iowrite32(val, tc->tcf.registers + ODN_CORE_DUT_CTRL1); -+ -+ tc->fbc_bypass = fbc_bypass; -+} -+ -+static int odin_hw_init(struct tc_device *tc, int *core_clock, -+ int *mem_clock, int *clock_mulitplex, int mem_latency, -+ int mem_wresp_latency, int mem_mode, -+ bool fbc_bypass) -+{ -+ int err; -+ -+ err = odin_hard_reset(tc, core_clock, mem_clock, clock_mulitplex); -+ if (err) { -+ dev_err(&tc->pdev->dev, "Failed to initialise Odin"); -+ goto err_out; -+ } -+ -+ err = odin_set_mem_mode(tc, mem_mode); -+ if (err) -+ goto err_out; -+ -+ odin_set_fbc_bypass(tc, fbc_bypass); -+ -+#if defined(SUPPORT_RGX) -+ if (tc->version == ODIN_VERSION_FPGA) -+ odin_set_mem_latency(tc, mem_latency, mem_wresp_latency); -+#endif /* defined(SUPPORT_RGX) */ -+ -+err_out: -+ return err; -+} -+ -+static int odin_enable_irq(struct tc_device *tc) -+{ -+ int err = 0; -+ -+#if defined(TC_FAKE_INTERRUPTS) -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) -+ timer_setup(&tc->timer, tc_irq_fake_wrapper, 0); -+#else -+ setup_timer(&tc->timer, tc_irq_fake_wrapper, (unsigned long)tc); -+#endif -+ mod_timer(&tc->timer, -+ jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS)); -+#else -+ iowrite32(0, tc->tcf.registers + -+ common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); -+ iowrite32(0xffffffff, tc->tcf.registers + -+ common_reg_offset(tc, CORE_INTERRUPT_CLR)); -+ -+ dev_info(&tc->pdev->dev, -+ "Registering IRQ %d for use by %s\n", -+ tc->pdev->irq, -+ odin_tc_name(tc)); -+ -+ err = request_irq(tc->pdev->irq, odin_irq_handler, -+ IRQF_SHARED, DRV_NAME, tc); -+ -+ if (err) { -+ dev_err(&tc->pdev->dev, -+ "Error - IRQ %d failed to register\n", -+ tc->pdev->irq); -+ } else { -+ dev_info(&tc->pdev->dev, -+ "IRQ %d was successfully registered for use by %s\n", -+ tc->pdev->irq, -+ odin_tc_name(tc)); -+ } -+#endif -+ return err; -+} -+ -+static void odin_disable_irq(struct tc_device *tc) -+{ -+#if defined(TC_FAKE_INTERRUPTS) -+ del_timer_sync(&tc->timer); -+#else -+ iowrite32(0, tc->tcf.registers + -+ common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); -+ iowrite32(0xffffffff, tc->tcf.registers + -+ common_reg_offset(tc, CORE_INTERRUPT_CLR)); -+ -+ free_irq(tc->pdev->irq, tc); -+#endif -+} -+ -+static enum tc_version_t -+odin_detect_daughterboard_version(struct tc_device *tc) -+{ -+ u32 reg = ioread32(tc->tcf.registers + ODN_REG_BANK_DB_TYPE_ID); -+ u32 val = reg; -+ -+ if (tc->orion) -+ return ODIN_VERSION_ORION; -+ -+ val = (val & ODN_REG_BANK_DB_TYPE_ID_TYPE_MASK) >> -+ ODN_REG_BANK_DB_TYPE_ID_TYPE_SHIFT; -+ -+ switch (val) { -+ default: -+ dev_err(&tc->pdev->dev, -+ "Unknown odin version ID type %#x (DB_TYPE_ID: %#08x)\n", -+ val, reg); -+ return TC_INVALID_VERSION; -+ case 1: -+ dev_info(&tc->pdev->dev, "DUT: Bonnie TC\n"); -+ return ODIN_VERSION_TCF_BONNIE; -+ case 2: -+ case 3: -+ dev_info(&tc->pdev->dev, "DUT: FPGA\n"); -+ return ODIN_VERSION_FPGA; -+ } -+} -+ -+#if defined(SUPPORT_BAR_RESIZE) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)) -+/* Should only be called with all BAR's resources released */ -+static int try_resize_mem_bar(struct pci_dev *pdev, int target_bar_num) -+{ -+ int err = 0; -+ u16 command; -+ u32 sizes, max_size, i; -+ -+ pci_disable_device(pdev); -+ -+ /* Disable memory decoding */ -+ pci_read_config_word(pdev, PCI_COMMAND, &command); -+ command &= ~PCI_COMMAND_MEMORY; -+ pci_write_config_word(pdev, PCI_COMMAND, command); -+ -+ sizes = pci_rebar_get_possible_sizes(pdev, target_bar_num); -+ if (sizes) { -+ max_size = fls(sizes); -+ err = pci_resize_resource(pdev, target_bar_num, max_size); -+ if (err) { -+ dev_err(&pdev->dev, -+ "error %u in pci_resize_resource (size requested 0x%x)\n", -+ err, max_size); -+ goto re_enable; -+ } -+ pci_assign_unassigned_bus_resources(pdev->bus); -+ } else { -+ dev_info(&pdev->dev, "PCI resize not supported on this platform\n"); -+ } -+ -+re_enable: -+ /* Re-enable memory decoding */ -+ pci_read_config_word(pdev, PCI_COMMAND, &command); -+ command &= PCI_COMMAND_MEMORY; -+ pci_write_config_word(pdev, PCI_COMMAND, command); -+ -+ err = pci_enable_device(pdev); -+ if (err) { -+ dev_err(&pdev->dev, "error %u in pci_enable_device\n", err); -+ goto err_out; -+ } -+ -+ return dev_err; -+ -+} -+#endif -+ -+static int odin_dev_init(struct tc_device *tc, struct pci_dev *pdev, -+ int pdp_mem_size, int secure_mem_size) -+{ -+ int err; -+ u32 val; -+ -+#if defined(SUPPORT_BAR_RESIZE) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)) -+ err = try_resize_mem_bar(pdev, ODN_DDR_BAR); -+ if (err) { -+ dev_err(&pdev->dev, -+ "Failed to resize BAR %d", ODN_DDR_BAR); -+ goto err_out; -+ } -+#endif -+ -+ /* Reserve and map the tcf system registers */ -+ err = setup_io_region(pdev, &tc->tcf, -+ ODN_SYS_BAR, ODN_SYS_REGS_OFFSET, ODN_SYS_REGS_SIZE); -+ if (err) -+ goto err_out; -+ -+ tc->version = odin_detect_daughterboard_version(tc); -+ if (tc->version == TC_INVALID_VERSION) { -+ err = -EIO; -+ goto err_odin_unmap_sys_registers; -+ } -+ -+ /* Setup card memory */ -+ tc->tc_mem.base = pci_resource_start(pdev, ODN_DDR_BAR); -+ tc->tc_mem.size = pci_resource_len(pdev, ODN_DDR_BAR); -+ -+ if (tc->tc_mem.size < pdp_mem_size) { -+ dev_err(&pdev->dev, -+ "%s MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu", -+ odin_tc_name(tc), -+ ODN_DDR_BAR, -+ (unsigned long)tc->tc_mem.size, -+ (unsigned long)pdp_mem_size); -+ -+ err = -EIO; -+ goto err_odin_unmap_sys_registers; -+ } -+ -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+ if (tc->tc_mem.size < -+ (pdp_mem_size + secure_mem_size)) { -+ dev_err(&pdev->dev, -+ "Odin MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu" -+ " plus the requested secure heap size %lu", -+ ODN_DDR_BAR, -+ (unsigned long)tc->tc_mem.size, -+ (unsigned long)pdp_mem_size, -+ (unsigned long)secure_mem_size); -+ err = -EIO; -+ goto err_odin_unmap_sys_registers; -+ } -+#endif -+ -+ err = tc_mtrr_setup(tc); -+ if (err) -+ goto err_odin_unmap_sys_registers; -+ -+ /* Setup ranges for the device heaps */ -+ tc->pdp_heap_mem_size = pdp_mem_size; -+ -+ /* We know ext_heap_mem_size won't underflow as we've compared -+ * tc_mem.size against the pdp_mem_size value earlier -+ */ -+ tc->ext_heap_mem_size = -+ tc->tc_mem.size - tc->pdp_heap_mem_size; -+ -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+ tc->ext_heap_mem_size -= secure_mem_size; -+#endif -+ -+ if (tc->ext_heap_mem_size < TC_EXT_MINIMUM_MEM_SIZE) { -+ dev_warn(&pdev->dev, -+ "%s MEM region (bar 4) has size of %lu, with %lu pdp_mem_size only %lu bytes are left for " -+ "ext device, which looks too small", -+ odin_tc_name(tc), -+ (unsigned long)tc->tc_mem.size, -+ (unsigned long)pdp_mem_size, -+ (unsigned long)tc->ext_heap_mem_size); -+ /* Continue as this is only a 'helpful warning' not a hard -+ * requirement -+ */ -+ } -+ tc->ext_heap_mem_base = tc->tc_mem.base; -+ tc->pdp_heap_mem_base = -+ tc->tc_mem.base + tc->ext_heap_mem_size; -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+ tc->secure_heap_mem_base = tc->pdp_heap_mem_base + -+ tc->pdp_heap_mem_size; -+ tc->secure_heap_mem_size = secure_mem_size; -+#endif -+ -+#if defined(SUPPORT_DMA_HEAP) -+ err = tc_dmabuf_heap_init(tc, ODN_DDR_BAR); -+ if (err) { -+ dev_err(&pdev->dev, "Failed to initialise ION\n"); -+ goto err_odin_unmap_sys_registers; -+ } -+#elif defined(SUPPORT_ION) -+ err = tc_ion_init(tc, ODN_DDR_BAR); -+ if (err) { -+ dev_err(&pdev->dev, "Failed to initialise ION\n"); -+ goto err_odin_unmap_sys_registers; -+ } -+#endif /* defined(SUPPORT_ION) */ -+ -+ /* CDMA initialisation */ -+ val = ioread32(tc->tcf.registers + ODN_CORE_SUPPORTED_FEATURES); -+ tc->dma_nchan = REG_FIELD_GET(val, -+ ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS); -+ tc->dma_nchan++; -+ dev_info(&tc->pdev->dev, "Odin RTL has %u DMA(s)\n", tc->dma_nchan); -+ mutex_init(&tc->dma_mutex); -+ -+ if (tc->odin) { -+ val = ioread32(tc->tcf.registers + -+ ODN_CORE_REL); -+ dev_info(&pdev->dev, "%s = 0x%08x\n", -+ "ODN_CORE_REL", val); -+ } else { -+ val = ioread32(tc->tcf.registers + -+ SRS_CORE_REVISION); -+ dev_info(&pdev->dev, "%s = 0x%08x\n", -+ "SRS_CORE_REVISION", val); -+ } -+ -+ val = ioread32(tc->tcf.registers + -+ common_reg_offset(tc, CORE_CHANGE_SET)); -+ dev_info(&pdev->dev, "%s = 0x%08x\n", -+ common_reg_name(tc, CORE_CHANGE_SET), val); -+ -+ val = ioread32(tc->tcf.registers + -+ common_reg_offset(tc, CORE_USER_ID)); -+ dev_info(&pdev->dev, "%s = 0x%08x\n", -+ common_reg_name(tc, CORE_USER_ID), val); -+ -+ val = ioread32(tc->tcf.registers + -+ common_reg_offset(tc, CORE_USER_BUILD)); -+ dev_info(&pdev->dev, "%s = 0x%08x\n", -+ common_reg_name(tc, CORE_USER_BUILD), val); -+ -+err_out: -+ return err; -+ -+err_odin_unmap_sys_registers: -+ dev_info(&pdev->dev, -+ "%s: failed - unmapping the io regions.\n", __func__); -+ -+ iounmap(tc->tcf.registers); -+ release_pci_io_addr(pdev, ODN_SYS_BAR, -+ tc->tcf.region.base, tc->tcf.region.size); -+ goto err_out; -+} -+ -+static void odin_dev_cleanup(struct tc_device *tc) -+{ -+#if defined(SUPPORT_DMA_HEAP) -+ tc_dmabuf_heap_deinit(tc, ODN_DDR_BAR); -+#elif defined(SUPPORT_ION) -+ tc_ion_deinit(tc, ODN_DDR_BAR); -+#endif -+ -+ tc_mtrr_cleanup(tc); -+ -+ iounmap(tc->tcf.registers); -+ -+ release_pci_io_addr(tc->pdev, -+ ODN_SYS_BAR, -+ tc->tcf.region.base, -+ tc->tcf.region.size); -+} -+ -+static u32 odin_interrupt_id_to_flag(int interrupt_id) -+{ -+ switch (interrupt_id) { -+ case TC_INTERRUPT_PDP: -+ return ODN_INTERRUPT_ENABLE_PDP1; -+ case TC_INTERRUPT_EXT: -+ return ODN_INTERRUPT_ENABLE_DUT; -+ case TC_INTERRUPT_PDP2: -+ return ODN_INTERRUPT_ENABLE_PDP2; -+ case TC_INTERRUPT_CDMA: -+ return ODN_INTERRUPT_ENABLE_CDMA; -+ case TC_INTERRUPT_CDMA2: -+ return ODN_INTERRUPT_ENABLE_CDMA2; -+ default: -+ BUG(); -+ } -+} -+ -+int odin_init(struct tc_device *tc, struct pci_dev *pdev, -+ int *core_clock, int *mem_clock, int *clock_mulitplex, -+ int pdp_mem_size, int secure_mem_size, -+ int mem_latency, int mem_wresp_latency, int mem_mode, -+ bool fbc_bypass) -+{ -+ int err = 0; -+ -+ err = odin_dev_init(tc, pdev, pdp_mem_size, secure_mem_size); -+ if (err) { -+ dev_err(&pdev->dev, "odin_dev_init failed\n"); -+ goto err_out; -+ } -+ -+ err = odin_hw_init(tc, core_clock, mem_clock, clock_mulitplex, -+ mem_latency, mem_wresp_latency, mem_mode, -+ fbc_bypass); -+ if (err) { -+ dev_err(&pdev->dev, "odin_hw_init failed\n"); -+ goto err_dev_cleanup; -+ } -+ -+ err = odin_enable_irq(tc); -+ if (err) { -+ dev_err(&pdev->dev, -+ "Failed to initialise IRQ\n"); -+ goto err_dev_cleanup; -+ } -+ -+err_out: -+ return err; -+ -+err_dev_cleanup: -+ odin_dev_cleanup(tc); -+ goto err_out; -+} -+ -+int odin_cleanup(struct tc_device *tc) -+{ -+ /* -+ * Make sure we don't attempt to clean-up after an invalid device. -+ * We'll have already unmapped the PCI i/o space so cannot access -+ * anything now. -+ */ -+ if (tc->version != TC_INVALID_VERSION) { -+ odin_disable_irq(tc); -+ odin_dev_cleanup(tc); -+ } -+ -+ return 0; -+} -+ -+int odin_register_pdp_device(struct tc_device *tc) -+{ -+ int err = 0; -+ resource_size_t reg_start = pci_resource_start(tc->pdev, ODN_SYS_BAR); -+ struct resource pdp_resources_odin[] = { -+ DEFINE_RES_MEM_NAMED(reg_start + -+ ODN_PDP_REGS_OFFSET, /* start */ -+ ODN_PDP_REGS_SIZE, /* size */ -+ "pdp-regs"), -+ DEFINE_RES_MEM_NAMED(reg_start + -+ ODN_PDP2_REGS_OFFSET, /* start */ -+ ODN_PDP2_REGS_SIZE, /* size */ -+ "pdp2-regs"), -+ DEFINE_RES_MEM_NAMED(reg_start + -+ ODN_SYS_REGS_OFFSET + -+ common_reg_offset(tc, REG_BANK_ODN_CLK_BLK) + -+ ODN_PDP_P_CLK_OUT_DIVIDER_REG1, /* start */ -+ ODN_PDP_P_CLK_IN_DIVIDER_REG - -+ ODN_PDP_P_CLK_OUT_DIVIDER_REG1 + 4, /* size */ -+ "pll-regs"), -+ DEFINE_RES_MEM_NAMED(reg_start + -+ ODN_PDP2_PFIM_OFFSET, /* start */ -+ ODN_PDP2_PFIM_SIZE, /* size */ -+ "pfim-regs"), -+ DEFINE_RES_MEM_NAMED(reg_start + -+ ODN_SYS_REGS_OFFSET + -+ ODN_REG_BANK_CORE, /* start */ -+ ODN_CORE_MMCM_LOCK_STATUS + 4, /* size */ -+ "odn-core"), -+ }; -+ -+ struct tc_pdp_platform_data pdata = { -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ .ion_device = tc->ion_device, -+ .ion_heap_id = ION_HEAP_TC_PDP, -+#endif -+ .memory_base = tc->tc_mem.base, -+ .pdp_heap_memory_base = tc->pdp_heap_mem_base, -+ .pdp_heap_memory_size = tc->pdp_heap_mem_size, -+ }; -+ struct platform_device_info pdp_device_info = { -+ .parent = &tc->pdev->dev, -+ .name = ODN_DEVICE_NAME_PDP, -+ .id = -2, -+ .data = &pdata, -+ .size_data = sizeof(pdata), -+ .dma_mask = odin_get_pdp_dma_mask(tc), -+ }; -+ -+ pdp_device_info.res = pdp_resources_odin; -+ pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_odin); -+ -+ tc->pdp_dev = platform_device_register_full(&pdp_device_info); -+ if (IS_ERR(tc->pdp_dev)) { -+ err = PTR_ERR(tc->pdp_dev); -+ dev_err(&tc->pdev->dev, -+ "Failed to register PDP device (%d)\n", err); -+ tc->pdp_dev = NULL; -+ goto err_out; -+ } -+ -+err_out: -+ return err; -+} -+ -+int odin_register_ext_device(struct tc_device *tc) -+{ -+#if defined(SUPPORT_RGX) -+ int err = 0; -+ struct resource odin_rogue_resources[] = { -+ DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, -+ ODN_DUT_SOCIF_BAR), -+ ODN_DUT_SOCIF_SIZE, "rogue-regs"), -+ }; -+ struct tc_rogue_platform_data pdata = { -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ .ion_device = tc->ion_device, -+ .ion_heap_id = ION_HEAP_TC_ROGUE, -+#endif -+ .mem_mode = tc->mem_mode, -+ .tc_memory_base = tc->tc_mem.base, -+ .pdp_heap_memory_base = tc->pdp_heap_mem_base, -+ .pdp_heap_memory_size = tc->pdp_heap_mem_size, -+ .rogue_heap_memory_base = tc->ext_heap_mem_base, -+ .rogue_heap_memory_size = tc->ext_heap_mem_size, -+#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) -+ .secure_heap_memory_base = tc->secure_heap_mem_base, -+ .secure_heap_memory_size = tc->secure_heap_mem_size, -+#endif -+ .tc_dma_tx_chan_name = ODIN_DMA_TX_CHAN_NAME, -+ .tc_dma_rx_chan_name = ODIN_DMA_RX_CHAN_NAME, -+ }; -+ struct platform_device_info odin_rogue_dev_info = { -+ .parent = &tc->pdev->dev, -+ .name = TC_DEVICE_NAME_ROGUE, -+ .id = -2, -+ .res = odin_rogue_resources, -+ .num_res = ARRAY_SIZE(odin_rogue_resources), -+ .data = &pdata, -+ .size_data = sizeof(pdata), -+ .dma_mask = odin_get_rogue_dma_mask(tc), -+ }; -+ -+ if (tc->odin) -+ pdata.baseboard = TC_BASEBOARD_ODIN; -+ else if (tc->orion) -+ pdata.baseboard = TC_BASEBOARD_ORION; -+ -+ tc->ext_dev -+ = platform_device_register_full(&odin_rogue_dev_info); -+ -+ if (IS_ERR(tc->ext_dev)) { -+ err = PTR_ERR(tc->ext_dev); -+ dev_err(&tc->pdev->dev, -+ "Failed to register rogue device (%d)\n", err); -+ tc->ext_dev = NULL; -+ } -+ return err; -+#else /* defined(SUPPORT_RGX) */ -+ return 0; -+#endif /* defined(SUPPORT_RGX) */ -+} -+ -+int odin_register_dma_device(struct tc_device *tc) -+{ -+ resource_size_t reg_start = pci_resource_start(tc->pdev, ODN_SYS_BAR); -+ int err = 0; -+ -+ struct resource odin_cdma_resources[] = { -+ DEFINE_RES_MEM_NAMED(reg_start + -+ ODIN_DMA_REGS_OFFSET, /* start */ -+ ODIN_DMA_REGS_SIZE, /* size */ -+ "cdma-regs"), -+ DEFINE_RES_IRQ_NAMED(TC_INTERRUPT_CDMA, -+ "cdma-irq"), -+ DEFINE_RES_IRQ_NAMED(TC_INTERRUPT_CDMA2, -+ "cdma-irq2"), -+ }; -+ -+ struct tc_dma_platform_data pdata = { -+ .addr_width = ODN_CDMA_ADDR_WIDTH, -+ .num_dmas = tc->dma_nchan, -+ .has_dre = true, -+ .has_sg = true, -+ }; -+ -+ struct platform_device_info odin_cdma_dev_info = { -+ .parent = &tc->pdev->dev, -+ .name = ODN_DEVICE_NAME_CDMA, -+ .id = -2, -+ .res = odin_cdma_resources, -+ .num_res = ARRAY_SIZE(odin_cdma_resources), -+ .dma_mask = DMA_BIT_MASK(ODN_CDMA_ADDR_WIDTH), -+ .data = &pdata, -+ .size_data = sizeof(pdata), -+ }; -+ -+ tc->dma_dev -+ = platform_device_register_full(&odin_cdma_dev_info); -+ -+ if (IS_ERR(tc->dma_dev)) { -+ err = PTR_ERR(tc->dma_dev); -+ dev_err(&tc->pdev->dev, -+ "Failed to register CDMA device (%d)\n", err); -+ tc->dma_dev = NULL; -+ } -+ -+ return err; -+} -+ -+void odin_enable_interrupt_register(struct tc_device *tc, -+ int interrupt_id) -+{ -+ u32 val; -+ u32 flag; -+ -+ switch (interrupt_id) { -+ case TC_INTERRUPT_PDP: -+ dev_info(&tc->pdev->dev, -+ "Enabling Odin PDP interrupts\n"); -+ break; -+ case TC_INTERRUPT_EXT: -+ dev_info(&tc->pdev->dev, -+ "Enabling Odin DUT interrupts\n"); -+ break; -+ case TC_INTERRUPT_PDP2: -+ dev_info(&tc->pdev->dev, -+ "Enabling Odin PDP2 interrupts\n"); -+ break; -+ case TC_INTERRUPT_CDMA: -+ dev_info(&tc->pdev->dev, -+ "Enabling Odin CDMA interrupts\n"); -+ break; -+ case TC_INTERRUPT_CDMA2: -+ dev_info(&tc->pdev->dev, -+ "Enabling Odin CDMA2 interrupts\n"); -+ break; -+ default: -+ dev_err(&tc->pdev->dev, -+ "Error - illegal interrupt id\n"); -+ return; -+ } -+ -+ val = ioread32(tc->tcf.registers + -+ common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); -+ flag = odin_interrupt_id_to_flag(interrupt_id); -+ val |= flag; -+ iowrite32(val, tc->tcf.registers + -+ common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); -+} -+ -+void odin_disable_interrupt_register(struct tc_device *tc, -+ int interrupt_id) -+{ -+ u32 val; -+ -+ switch (interrupt_id) { -+ case TC_INTERRUPT_PDP: -+ dev_info(&tc->pdev->dev, -+ "Disabling Odin PDP interrupts\n"); -+ break; -+ case TC_INTERRUPT_EXT: -+ dev_info(&tc->pdev->dev, -+ "Disabling Odin DUT interrupts\n"); -+ break; -+ case TC_INTERRUPT_PDP2: -+ dev_info(&tc->pdev->dev, -+ "Disabling Odin PDP2 interrupts\n"); -+ break; -+ case TC_INTERRUPT_CDMA: -+ dev_info(&tc->pdev->dev, -+ "Disabling Odin CDMA interrupts\n"); -+ break; -+ case TC_INTERRUPT_CDMA2: -+ dev_info(&tc->pdev->dev, -+ "Disabling Odin CDMA2 interrupts\n"); -+ break; -+ default: -+ dev_err(&tc->pdev->dev, -+ "Error - illegal interrupt id\n"); -+ return; -+ } -+ val = ioread32(tc->tcf.registers + -+ common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); -+ val &= ~(odin_interrupt_id_to_flag(interrupt_id)); -+ iowrite32(val, tc->tcf.registers + -+ common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); -+} -+ -+irqreturn_t odin_irq_handler(int irq, void *data) -+{ -+ u32 interrupt_status; -+ u32 interrupt_clear = 0; -+ unsigned long flags; -+ irqreturn_t ret = IRQ_NONE; -+ struct tc_device *tc = (struct tc_device *)data; -+ -+ spin_lock_irqsave(&tc->interrupt_handler_lock, flags); -+ -+#if defined(TC_FAKE_INTERRUPTS) -+ /* If we're faking interrupts pretend we got both ext and PDP ints */ -+ interrupt_status = ODN_INTERRUPT_STATUS_DUT -+ | ODN_INTERRUPT_STATUS_PDP1; -+#else -+ interrupt_status = ioread32(tc->tcf.registers + -+ common_reg_offset(tc, -+ CORE_INTERRUPT_STATUS)); -+#endif -+ -+ if (interrupt_status & ODN_INTERRUPT_STATUS_DUT) { -+ struct tc_interrupt_handler *ext_int = -+ &tc->interrupt_handlers[TC_INTERRUPT_EXT]; -+ -+ if (ext_int->enabled && ext_int->handler_function) { -+ ext_int->handler_function(ext_int->handler_data); -+ interrupt_clear |= ODN_INTERRUPT_CLEAR_DUT; -+ } -+ ret = IRQ_HANDLED; -+ } -+ if (interrupt_status & ODN_INTERRUPT_STATUS_PDP1) { -+ struct tc_interrupt_handler *pdp_int = -+ &tc->interrupt_handlers[TC_INTERRUPT_PDP]; -+ -+ if (pdp_int->enabled && pdp_int->handler_function) { -+ pdp_int->handler_function(pdp_int->handler_data); -+ interrupt_clear |= ODN_INTERRUPT_CLEAR_PDP1; -+ } -+ ret = IRQ_HANDLED; -+ } -+ if (interrupt_status & ODN_INTERRUPT_STATUS_PDP2) { -+ struct tc_interrupt_handler *pdp_int = -+ &tc->interrupt_handlers[TC_INTERRUPT_PDP2]; -+ -+ if (pdp_int->enabled && pdp_int->handler_function) { -+ pdp_int->handler_function(pdp_int->handler_data); -+ interrupt_clear |= ODN_INTERRUPT_CLEAR_PDP2; -+ } -+ ret = IRQ_HANDLED; -+ } -+ -+ if (interrupt_status & ODN_INTERRUPT_STATUS_CDMA) { -+ struct tc_interrupt_handler *cdma_int = -+ &tc->interrupt_handlers[TC_INTERRUPT_CDMA]; -+ if (cdma_int->enabled && cdma_int->handler_function) { -+ cdma_int->handler_function(cdma_int->handler_data); -+ interrupt_clear |= ODN_INTERRUPT_CLEAR_CDMA; -+ } -+ ret = IRQ_HANDLED; -+ } -+ -+ if (interrupt_status & ODN_INTERRUPT_STATUS_CDMA2) { -+ struct tc_interrupt_handler *cdma_int = -+ &tc->interrupt_handlers[TC_INTERRUPT_CDMA2]; -+ if (cdma_int->enabled && cdma_int->handler_function) { -+ cdma_int->handler_function(cdma_int->handler_data); -+ interrupt_clear |= ODN_INTERRUPT_CLEAR_CDMA2; -+ } -+ ret = IRQ_HANDLED; -+ } -+ -+ -+ if (interrupt_clear) -+ iowrite32(interrupt_clear, -+ tc->tcf.registers + -+ common_reg_offset(tc, CORE_INTERRUPT_CLR)); -+ -+ /* -+ * Orion PDP interrupts are occasionally masked because, for unknown -+ * reasons, a vblank goes without being asserted for about 1000 ms. This -+ * feature is not present on Odin, and setting the -+ * INTERRUPT_TIMEOUT_THRESHOLD register to 0 does not seem to disable it -+ * either. This is probably caused by a bug in some versions of Sirius -+ * RTL. Also this bug seems to only affect PDP interrupts, but not the -+ * DUT. This might sometimes lead to a sudden jitter effect in the -+ * render. Further investigation is pending before this code can -+ * be safely removed. -+ */ -+ -+ if (tc->orion) { -+ if (REG_FIELD_GET(ioread32(tc->tcf.registers + -+ SRS_CORE_INTERRUPT_TIMEOUT_CLR), -+ SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT)) { -+ dev_warn(&tc->pdev->dev, -+ "Orion PDP interrupts were masked, clearing now\n"); -+ iowrite32(SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_MASK, -+ tc->tcf.registers + SRS_CORE_INTERRUPT_TIMEOUT_CLR); -+ } -+ } -+ -+ spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags); -+ -+ return ret; -+} -+ -+int odin_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll) -+{ -+ *tmp = 0; -+ *pll = 0; -+ return 0; -+} -+ -+int odin_sys_strings(struct tc_device *tc, -+ char *str_fpga_rev, size_t size_fpga_rev, -+ char *str_tcf_core_rev, size_t size_tcf_core_rev, -+ char *str_tcf_core_target_build_id, -+ size_t size_tcf_core_target_build_id, -+ char *str_pci_ver, size_t size_pci_ver, -+ char *str_macro_ver, size_t size_macro_ver) -+{ -+ u32 tcver = tc_odin_subvers(&tc->pdev->dev); -+ char temp_str[12]; -+ u32 val; -+ -+ if (tc->odin) { -+ /* Read the Odin major and minor revision ID register Rx-xx */ -+ val = ioread32(tc->tcf.registers + -+ ODN_CORE_REL); -+ -+ snprintf(str_tcf_core_rev, -+ size_tcf_core_rev, -+ "%d.%d", -+ HEX2DEC((val & ODN_REL_MAJOR_MASK) -+ >> ODN_REL_MAJOR_SHIFT), -+ HEX2DEC((val & ODN_REL_MINOR_MASK) -+ >> ODN_REL_MINOR_SHIFT)); -+ } else { -+ /* Read the Orion major and minor revision ID register Rx-xx */ -+ val = ioread32(tc->tcf.registers + -+ SRS_CORE_REVISION); -+ -+ snprintf(str_tcf_core_rev, -+ size_tcf_core_rev, -+ "%d.%d", -+ HEX2DEC((val & SRS_REVISION_MAJOR_MASK) -+ >> SRS_REVISION_MAJOR_SHIFT), -+ HEX2DEC((val & SRS_REVISION_MINOR_MASK) -+ >> SRS_REVISION_MINOR_SHIFT)); -+ } -+ -+ dev_info(&tc->pdev->dev, "%s core revision %s\n", -+ odin_tc_name(tc), str_tcf_core_rev); -+ -+ /* Read the Odin register containing the Perforce changelist -+ * value that the FPGA build was generated from -+ */ -+ val = ioread32(tc->tcf.registers + -+ common_reg_offset(tc, CORE_CHANGE_SET)); -+ -+ snprintf(str_tcf_core_target_build_id, -+ size_tcf_core_target_build_id, -+ "%d", -+ (val & CHANGE_SET_SET_MASK[tcver]) -+ >> CHANGE_SET_SET_SHIFT[tcver]); -+ -+ /* Read the Odin User_ID register containing the User ID for -+ * identification of a modified build -+ */ -+ val = ioread32(tc->tcf.registers + common_reg_offset(tc, CORE_USER_ID)); -+ -+ snprintf(temp_str, -+ sizeof(temp_str), -+ "%d", -+ HEX2DEC((val & USER_ID_ID_MASK[tcver]) -+ >> USER_ID_ID_SHIFT[tcver])); -+ -+ /* Read the Odin User_Build register containing the User build -+ * number for identification of modified builds -+ */ -+ val = ioread32(tc->tcf.registers + -+ common_reg_offset(tc, CORE_USER_BUILD)); -+ -+ snprintf(temp_str, -+ sizeof(temp_str), -+ "%d", -+ HEX2DEC((val & USER_BUILD_BUILD_MASK[tcver]) -+ >> USER_BUILD_BUILD_SHIFT[tcver])); -+ -+ return 0; -+} -+ -+const char *odin_tc_name(struct tc_device *tc) -+{ -+ if (tc->odin) -+ return "Odin"; -+ else if (tc->orion) -+ return "Orion"; -+ else -+ return "Unknown TC"; -+} -+ -+bool odin_pfim_compatible(struct tc_device *tc) -+{ -+ u32 val; -+ -+ val = ioread32(tc->tcf.registers + -+ ODN_CORE_REL); -+ -+ return ((REG_FIELD_GET(val, ODN_REL_MAJOR) -+ >= ODIN_PFIM_RELNUM)); -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && !defined(TC_XILINX_DMA) -+static bool odin_dma_chan_filter(struct dma_chan *chan, void *param) -+{ -+ return false; -+} -+#endif -+ -+struct dma_chan *odin_cdma_chan(struct tc_device *tc, char *name) -+{ -+ struct dma_chan *chan; -+ unsigned long chan_idx; -+ int err; -+ -+ if (!(strcmp("rx", name))) -+ chan_idx = ODN_DMA_CHAN_RX; -+ else if (!(strcmp("tx", name))) { -+ /* -+ * When Odin RTL has a single CDMA device, we simulate -+ * a second channel by always opening the first one. -+ * This is made possible because CDMA allows for -+ * transfers in both directions -+ */ -+ if (tc->dma_nchan == 1) { -+ name = "rx"; -+ chan_idx = ODN_DMA_CHAN_RX; -+ } else -+ chan_idx = ODN_DMA_CHAN_TX; -+ } else { -+ dev_err(&tc->pdev->dev, "Wrong CDMA channel name\n"); -+ return NULL; -+ } -+ -+ mutex_lock(&tc->dma_mutex); -+ -+ if (tc->dma_refcnt[chan_idx]) { -+ tc->dma_refcnt[chan_idx]++; -+ } else { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ chan = dma_request_chan(&tc->dma_dev->dev, name); -+#else -+ dma_cap_mask_t mask; -+ -+ dma_cap_zero(mask); -+ dma_cap_set(DMA_SLAVE, mask); -+ chan = dma_request_channel(mask, -+ odin_dma_chan_filter, -+ (void *)chan_idx); -+#endif -+ if (IS_ERR(chan)) { -+ err = PTR_ERR(chan); -+ dev_err(&tc->pdev->dev, -+ "dma channel request failed (%d)\n", err); -+ mutex_unlock(&tc->dma_mutex); -+ return NULL; -+ } -+ tc->dma_chans[chan_idx] = chan; -+ tc->dma_refcnt[chan_idx] = 1; -+ } -+ -+ mutex_unlock(&tc->dma_mutex); -+ -+ return tc->dma_chans[chan_idx]; -+} -+ -+void odin_cdma_chan_free(struct tc_device *tc, -+ void *chan_priv) -+{ -+ struct dma_chan *dma_chan = (struct dma_chan *)chan_priv; -+ u32 chan_idx; -+ -+ BUG_ON(dma_chan == NULL); -+ -+ mutex_lock(&tc->dma_mutex); -+ -+ if (dma_chan == tc->dma_chans[ODN_DMA_CHAN_RX]) -+ chan_idx = ODN_DMA_CHAN_RX; -+ else if (dma_chan == tc->dma_chans[ODN_DMA_CHAN_TX]) -+ chan_idx = ODN_DMA_CHAN_TX; -+ else -+ goto cdma_chan_free_exit; -+ -+ tc->dma_refcnt[chan_idx]--; -+ if (!tc->dma_refcnt[chan_idx]) { -+ tc->dma_chans[chan_idx] = NULL; -+ dma_release_channel(dma_chan); -+ } -+ -+cdma_chan_free_exit: -+ mutex_unlock(&tc->dma_mutex); -+} -diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_odin.h b/drivers/gpu/drm/img-rogue/apollo/tc_odin.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/tc_odin.h -@@ -0,0 +1,82 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef _ODIN_DRV_H -+#define _ODIN_DRV_H -+ -+#include "tc_drv_internal.h" -+#include "odin_defs.h" -+#include "orion_defs.h" -+ -+int odin_init(struct tc_device *tc, struct pci_dev *pdev, -+ int *core_clock, int *mem_clock, int *clock_multiplex, -+ int pdp_mem_size, int secure_mem_size, -+ int mem_latency, int mem_wresp_latency, int mem_mode, -+ bool fbc_bypass); -+int odin_cleanup(struct tc_device *tc); -+ -+int odin_register_pdp_device(struct tc_device *tc); -+int odin_register_ext_device(struct tc_device *tc); -+int odin_register_dma_device(struct tc_device *tc); -+ -+struct dma_chan *odin_cdma_chan(struct tc_device *tc, char *name); -+void odin_cdma_chan_free(struct tc_device *tc, void *chan_priv); -+ -+void odin_enable_interrupt_register(struct tc_device *tc, -+ int interrupt_id); -+void odin_disable_interrupt_register(struct tc_device *tc, -+ int interrupt_id); -+ -+irqreturn_t odin_irq_handler(int irq, void *data); -+ -+int odin_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll); -+int odin_sys_strings(struct tc_device *tc, -+ char *str_fpga_rev, size_t size_fpga_rev, -+ char *str_tcf_core_rev, size_t size_tcf_core_rev, -+ char *str_tcf_core_target_build_id, -+ size_t size_tcf_core_target_build_id, -+ char *str_pci_ver, size_t size_pci_ver, -+ char *str_macro_ver, size_t size_macro_ver); -+ -+const char *odin_tc_name(struct tc_device *tc); -+ -+bool odin_pfim_compatible(struct tc_device *tc); -+#endif /* _ODIN_DRV_H */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_odin_common_regs.h b/drivers/gpu/drm/img-rogue/apollo/tc_odin_common_regs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/tc_odin_common_regs.h -@@ -0,0 +1,105 @@ -+/* -+ * @File odin_common_regs.h -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef __TC_ODIN_COMMON_REGS_H__ -+#define __TC_ODIN_COMMON_REGS_H__ -+ -+#include -+#include -+ -+struct tc_device; -+ -+enum odin_common_regs { -+ CORE_CHANGE_SET = 0, -+ CORE_USER_ID, -+ CORE_USER_BUILD, -+ CORE_INTERRUPT_ENABLE, -+ CORE_INTERRUPT_CLR, -+ CORE_INTERRUPT_STATUS, -+ REG_BANK_ODN_CLK_BLK, -+}; -+ -+#define ODIN_REGNAME(REG_NAME) "ODN_" __stringify(REG_NAME) -+#define ORION_REGNAME(REG_NAME) "SRS_" __stringify(REG_NAME) -+ -+struct odin_orion_reg { -+ u32 odin_offset; -+ u32 orion_offset; -+ const char *odin_name; -+ const char *orion_name; -+}; -+ -+#define COMMON_REG_ENTRY(REG) \ -+ [REG] = { \ -+ .odin_offset = ODN_##REG, \ -+ .orion_offset = SRS_##REG, \ -+ .odin_name = ODIN_REGNAME(REG), \ -+ .orion_name = ORION_REGNAME(REG), \ -+ } -+ -+static const struct odin_orion_reg common_regs[] = { -+ COMMON_REG_ENTRY(CORE_CHANGE_SET), -+ COMMON_REG_ENTRY(CORE_USER_ID), -+ COMMON_REG_ENTRY(CORE_USER_BUILD), -+ COMMON_REG_ENTRY(CORE_INTERRUPT_ENABLE), -+ COMMON_REG_ENTRY(CORE_INTERRUPT_CLR), -+ COMMON_REG_ENTRY(CORE_INTERRUPT_STATUS), -+ COMMON_REG_ENTRY(REG_BANK_ODN_CLK_BLK), -+}; -+ -+static inline const u32 common_reg_offset(struct tc_device *tc, u32 reg) -+{ -+ if (tc->odin) -+ return common_regs[reg].odin_offset; -+ else -+ return common_regs[reg].orion_offset; -+} -+ -+static inline const char *common_reg_name(struct tc_device *tc, u32 reg) -+{ -+ if (tc->odin) -+ return common_regs[reg].odin_name; -+ else -+ return common_regs[reg].orion_name; -+} -+ -+#endif /* __TC_ODIN_COMMON_REGS_H__ */ -diff --git a/drivers/gpu/drm/img-rogue/apollo/tcf_clk_ctrl.h b/drivers/gpu/drm/img-rogue/apollo/tcf_clk_ctrl.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/tcf_clk_ctrl.h -@@ -0,0 +1,1018 @@ -+/*************************************************************************/ /*! -+@Title Test Chip Framework system control register definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Autogenerated C -- do not edit -+ Generated from: tcf_clk_ctrl.def -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(_TCF_CLK_CTRL_H_) -+#define _TCF_CLK_CTRL_H_ -+ -+/* -+ * The following register definitions are valid if register 0x28 has value 0. -+ */ -+ -+/* -+ Register FPGA_ID_REG -+*/ -+#define TCF_CLK_CTRL_FPGA_ID_REG 0x0000 -+#define FPGA_ID_REG_CORE_CFG_MASK 0x0000FFFFU -+#define FPGA_ID_REG_CORE_CFG_SHIFT 0 -+#define FPGA_ID_REG_CORE_CFG_SIGNED 0 -+ -+#define FPGA_ID_REG_CORE_ID_MASK 0xFFFF0000U -+#define FPGA_ID_REG_CORE_ID_SHIFT 16 -+#define FPGA_ID_REG_CORE_ID_SIGNED 0 -+ -+/* -+ Register FPGA_REV_REG -+*/ -+#define TCF_CLK_CTRL_FPGA_REV_REG 0x0008 -+#define FPGA_REV_REG_MAINT_MASK 0x000000FFU -+#define FPGA_REV_REG_MAINT_SHIFT 0 -+#define FPGA_REV_REG_MAINT_SIGNED 0 -+ -+#define FPGA_REV_REG_MINOR_MASK 0x0000FF00U -+#define FPGA_REV_REG_MINOR_SHIFT 8 -+#define FPGA_REV_REG_MINOR_SIGNED 0 -+ -+#define FPGA_REV_REG_MAJOR_MASK 0x00FF0000U -+#define FPGA_REV_REG_MAJOR_SHIFT 16 -+#define FPGA_REV_REG_MAJOR_SIGNED 0 -+ -+#define FPGA_REV_REG_DESIGNER_MASK 0xFF000000U -+#define FPGA_REV_REG_DESIGNER_SHIFT 24 -+#define FPGA_REV_REG_DESIGNER_SIGNED 0 -+ -+/* -+ Register FPGA_DES_REV_1 -+*/ -+#define TCF_CLK_CTRL_FPGA_DES_REV_1 0x0010 -+#define FPGA_DES_REV_1_MASK 0xFFFFFFFFU -+#define FPGA_DES_REV_1_SHIFT 0 -+#define FPGA_DES_REV_1_SIGNED 0 -+ -+/* -+ Register FPGA_DES_REV_2 -+*/ -+#define TCF_CLK_CTRL_FPGA_DES_REV_2 0x0018 -+#define FPGA_DES_REV_2_MASK 0xFFFFFFFFU -+#define FPGA_DES_REV_2_SHIFT 0 -+#define FPGA_DES_REV_2_SIGNED 0 -+ -+/* -+ Register TCF_CORE_ID_REG -+*/ -+#define TCF_CLK_CTRL_TCF_CORE_ID_REG 0x0020 -+#define TCF_CORE_ID_REG_CORE_CFG_MASK 0x0000FFFFU -+#define TCF_CORE_ID_REG_CORE_CFG_SHIFT 0 -+#define TCF_CORE_ID_REG_CORE_CFG_SIGNED 0 -+ -+#define TCF_CORE_ID_REG_CORE_ID_MASK 0xFFFF0000U -+#define TCF_CORE_ID_REG_CORE_ID_SHIFT 16 -+#define TCF_CORE_ID_REG_CORE_ID_SIGNED 0 -+ -+/* -+ Register TCF_CORE_REV_REG -+*/ -+#define TCF_CLK_CTRL_TCF_CORE_REV_REG 0x0028 -+#define TCF_CORE_REV_REG_MAINT_MASK 0x000000FFU -+#define TCF_CORE_REV_REG_MAINT_SHIFT 0 -+#define TCF_CORE_REV_REG_MAINT_SIGNED 0 -+ -+#define TCF_CORE_REV_REG_MINOR_MASK 0x0000FF00U -+#define TCF_CORE_REV_REG_MINOR_SHIFT 8 -+#define TCF_CORE_REV_REG_MINOR_SIGNED 0 -+ -+#define TCF_CORE_REV_REG_MAJOR_MASK 0x00FF0000U -+#define TCF_CORE_REV_REG_MAJOR_SHIFT 16 -+#define TCF_CORE_REV_REG_MAJOR_SIGNED 0 -+ -+#define TCF_CORE_REV_REG_DESIGNER_MASK 0xFF000000U -+#define TCF_CORE_REV_REG_DESIGNER_SHIFT 24 -+#define TCF_CORE_REV_REG_DESIGNER_SIGNED 0 -+ -+/* -+ Register TCF_CORE_DES_REV_1 -+*/ -+#define TCF_CLK_CTRL_TCF_CORE_DES_REV_1 0x0030 -+#define TCF_CORE_DES_REV_1_MASK 0xFFFFFFFFU -+#define TCF_CORE_DES_REV_1_SHIFT 0 -+#define TCF_CORE_DES_REV_1_SIGNED 0 -+ -+/* -+ Register TCF_CORE_DES_REV_2 -+*/ -+#define TCF_CLK_CTRL_TCF_CORE_DES_REV_2 0x0038 -+#define TCF_CORE_DES_REV_2_MASK 0xFFFFFFFFU -+#define TCF_CORE_DES_REV_2_SHIFT 0 -+#define TCF_CORE_DES_REV_2_SIGNED 0 -+ -+ -+/* -+ * The following register definitions are valid if register 0x28 has value 1. -+ */ -+ -+/* -+ Register ID -+*/ -+#define TCF_CLK_CTRL_ID 0x0000 -+#define VARIANT_MASK 0x0000FFFFU -+#define VARIANT_SHIFT 0 -+#define VARIANT_SIGNED 0 -+ -+#define ID_MASK 0xFFFF0000U -+#define ID_SHIFT 16 -+#define ID_SIGNED 0 -+ -+/* -+ Register REL -+*/ -+#define TCF_CLK_CTRL_REL 0x0008 -+#define MINOR_MASK 0x0000FFFFU -+#define MINOR_SHIFT 0 -+#define MINOR_SIGNED 0 -+ -+#define MAJOR_MASK 0xFFFF0000U -+#define MAJOR_SHIFT 16 -+#define MAJOR_SIGNED 0 -+ -+/* -+ Register CHANGE_SET -+*/ -+#define TCF_CLK_CTRL_CHANGE_SET 0x0010 -+#define SET_MASK 0xFFFFFFFFU -+#define SET_SHIFT 0 -+#define SET_SIGNED 0 -+ -+/* -+ Register USER_ID -+*/ -+#define TCF_CLK_CTRL_USER_ID 0x0018 -+#define USER_ID_MASK 0x0000000FU -+#define USER_ID_SHIFT 0 -+#define USER_ID_SIGNED 0 -+ -+/* -+ Register USER_BUILD -+*/ -+#define TCF_CLK_CTRL_USER_BUILD 0x0020 -+#define BUILD_MASK 0xFFFFFFFFU -+#define BUILD_SHIFT 0 -+#define BUILD_SIGNED 0 -+ -+/* -+ Register SW_IF_VERSION -+*/ -+#define TCF_CLK_CTRL_SW_IF_VERSION 0x0028 -+#define VERSION_MASK 0x0000FFFFU -+#define VERSION_SHIFT 0 -+#define VERSION_SIGNED 0 -+ -+/* -+ * The following register definitions are valid for all Apollo builds, -+ * even if some of the registers are not available for certain cores. -+ */ -+ -+/* -+ Register SCB_GENERAL_CONTROL -+*/ -+#define TCF_CLK_CTRL_SCB_GENERAL_CONTROL 0x0040 -+#define SCB_GC_TRANS_HALT_MASK 0x00000200U -+#define SCB_GC_TRANS_HALT_SHIFT 9 -+#define SCB_GC_TRANS_HALT_SIGNED 0 -+ -+#define SCB_GC_CKD_REGS_MASK 0x00000100U -+#define SCB_GC_CKD_REGS_SHIFT 8 -+#define SCB_GC_CKD_REGS_SIGNED 0 -+ -+#define SCB_GC_CKD_SLAVE_MASK 0x00000080U -+#define SCB_GC_CKD_SLAVE_SHIFT 7 -+#define SCB_GC_CKD_SLAVE_SIGNED 0 -+ -+#define SCB_GC_CKD_MASTER_MASK 0x00000040U -+#define SCB_GC_CKD_MASTER_SHIFT 6 -+#define SCB_GC_CKD_MASTER_SIGNED 0 -+ -+#define SCB_GC_CKD_XDATA_MASK 0x00000020U -+#define SCB_GC_CKD_XDATA_SHIFT 5 -+#define SCB_GC_CKD_XDATA_SIGNED 0 -+ -+#define SCB_GC_SFR_REG_MASK 0x00000010U -+#define SCB_GC_SFR_REG_SHIFT 4 -+#define SCB_GC_SFR_REG_SIGNED 0 -+ -+#define SCB_GC_SFR_SLAVE_MASK 0x00000008U -+#define SCB_GC_SFR_SLAVE_SHIFT 3 -+#define SCB_GC_SFR_SLAVE_SIGNED 0 -+ -+#define SCB_GC_SFR_MASTER_MASK 0x00000004U -+#define SCB_GC_SFR_MASTER_SHIFT 2 -+#define SCB_GC_SFR_MASTER_SIGNED 0 -+ -+#define SCB_GC_SFR_DET_DATA_MASK 0x00000002U -+#define SCB_GC_SFR_DET_DATA_SHIFT 1 -+#define SCB_GC_SFR_DET_DATA_SIGNED 0 -+ -+#define SCB_GC_SFR_GEN_DATA_MASK 0x00000001U -+#define SCB_GC_SFR_GEN_DATA_SHIFT 0 -+#define SCB_GC_SFR_GEN_DATA_SIGNED 0 -+ -+/* -+ Register SCB_MASTER_READ_COUNT -+*/ -+#define TCF_CLK_CTRL_SCB_MASTER_READ_COUNT 0x0048 -+#define MASTER_READ_COUNT_MASK 0x0000FFFFU -+#define MASTER_READ_COUNT_SHIFT 0 -+#define MASTER_READ_COUNT_SIGNED 0 -+ -+/* -+ Register SCB_MASTER_READ_DATA -+*/ -+#define TCF_CLK_CTRL_SCB_MASTER_READ_DATA 0x0050 -+#define MASTER_READ_DATA_MASK 0x000000FFU -+#define MASTER_READ_DATA_SHIFT 0 -+#define MASTER_READ_DATA_SIGNED 0 -+ -+/* -+ Register SCB_MASTER_ADDRESS -+*/ -+#define TCF_CLK_CTRL_SCB_MASTER_ADDRESS 0x0058 -+#define SCB_MASTER_ADDRESS_MASK 0x000003FFU -+#define SCB_MASTER_ADDRESS_SHIFT 0 -+#define SCB_MASTER_ADDRESS_SIGNED 0 -+ -+/* -+ Register SCB_MASTER_WRITE_DATA -+*/ -+#define TCF_CLK_CTRL_SCB_MASTER_WRITE_DATA 0x0060 -+#define MASTER_WRITE_DATA_MASK 0x000000FFU -+#define MASTER_WRITE_DATA_SHIFT 0 -+#define MASTER_WRITE_DATA_SIGNED 0 -+ -+/* -+ Register SCB_MASTER_WRITE_COUNT -+*/ -+#define TCF_CLK_CTRL_SCB_MASTER_WRITE_COUNT 0x0068 -+#define MASTER_WRITE_COUNT_MASK 0x0000FFFFU -+#define MASTER_WRITE_COUNT_SHIFT 0 -+#define MASTER_WRITE_COUNT_SIGNED 0 -+ -+/* -+ Register SCB_BUS_SELECT -+*/ -+#define TCF_CLK_CTRL_SCB_BUS_SELECT 0x0070 -+#define BUS_SELECT_MASK 0x00000003U -+#define BUS_SELECT_SHIFT 0 -+#define BUS_SELECT_SIGNED 0 -+ -+/* -+ Register SCB_MASTER_FILL_STATUS -+*/ -+#define TCF_CLK_CTRL_SCB_MASTER_FILL_STATUS 0x0078 -+#define MASTER_WRITE_FIFO_EMPTY_MASK 0x00000008U -+#define MASTER_WRITE_FIFO_EMPTY_SHIFT 3 -+#define MASTER_WRITE_FIFO_EMPTY_SIGNED 0 -+ -+#define MASTER_WRITE_FIFO_FULL_MASK 0x00000004U -+#define MASTER_WRITE_FIFO_FULL_SHIFT 2 -+#define MASTER_WRITE_FIFO_FULL_SIGNED 0 -+ -+#define MASTER_READ_FIFO_EMPTY_MASK 0x00000002U -+#define MASTER_READ_FIFO_EMPTY_SHIFT 1 -+#define MASTER_READ_FIFO_EMPTY_SIGNED 0 -+ -+#define MASTER_READ_FIFO_FULL_MASK 0x00000001U -+#define MASTER_READ_FIFO_FULL_SHIFT 0 -+#define MASTER_READ_FIFO_FULL_SIGNED 0 -+ -+/* -+ Register CLK_AND_RST_CTRL -+*/ -+#define TCF_CLK_CTRL_CLK_AND_RST_CTRL 0x0080 -+#define GLB_CLKG_EN_MASK 0x00020000U -+#define GLB_CLKG_EN_SHIFT 17 -+#define GLB_CLKG_EN_SIGNED 0 -+ -+#define CLK_GATE_CNTL_MASK 0x00010000U -+#define CLK_GATE_CNTL_SHIFT 16 -+#define CLK_GATE_CNTL_SIGNED 0 -+ -+#define DUT_DCM_RESETN_MASK 0x00000400U -+#define DUT_DCM_RESETN_SHIFT 10 -+#define DUT_DCM_RESETN_SIGNED 0 -+ -+#define MEM_RESYNC_BYPASS_MASK 0x00000200U -+#define MEM_RESYNC_BYPASS_SHIFT 9 -+#define MEM_RESYNC_BYPASS_SIGNED 0 -+ -+#define SYS_RESYNC_BYPASS_MASK 0x00000100U -+#define SYS_RESYNC_BYPASS_SHIFT 8 -+#define SYS_RESYNC_BYPASS_SIGNED 0 -+ -+#define SCB_RESETN_MASK 0x00000010U -+#define SCB_RESETN_SHIFT 4 -+#define SCB_RESETN_SIGNED 0 -+ -+#define PDP2_RESETN_MASK 0x00000008U -+#define PDP2_RESETN_SHIFT 3 -+#define PDP2_RESETN_SIGNED 0 -+ -+#define PDP1_RESETN_MASK 0x00000004U -+#define PDP1_RESETN_SHIFT 2 -+#define PDP1_RESETN_SIGNED 0 -+ -+#define DDR_RESETN_MASK 0x00000002U -+#define DDR_RESETN_SHIFT 1 -+#define DDR_RESETN_SIGNED 0 -+ -+#define DUT_RESETN_MASK 0x00000001U -+#define DUT_RESETN_SHIFT 0 -+#define DUT_RESETN_SIGNED 0 -+ -+/* -+ Register TEST_REG_OUT -+*/ -+#define TCF_CLK_CTRL_TEST_REG_OUT 0x0088 -+#define TEST_REG_OUT_MASK 0xFFFFFFFFU -+#define TEST_REG_OUT_SHIFT 0 -+#define TEST_REG_OUT_SIGNED 0 -+ -+/* -+ Register TEST_REG_IN -+*/ -+#define TCF_CLK_CTRL_TEST_REG_IN 0x0090 -+#define TEST_REG_IN_MASK 0xFFFFFFFFU -+#define TEST_REG_IN_SHIFT 0 -+#define TEST_REG_IN_SIGNED 0 -+ -+/* -+ Register TEST_CTRL -+*/ -+#define TCF_CLK_CTRL_TEST_CTRL 0x0098 -+#define PCI_TEST_OFFSET_MASK 0xF8000000U -+#define PCI_TEST_OFFSET_SHIFT 27 -+#define PCI_TEST_OFFSET_SIGNED 0 -+ -+#define PDP1_HOST_MEM_SELECT_MASK 0x00000200U -+#define PDP1_HOST_MEM_SELECT_SHIFT 9 -+#define PDP1_HOST_MEM_SELECT_SIGNED 0 -+ -+#define HOST_PHY_MODE_MASK 0x00000100U -+#define HOST_PHY_MODE_SHIFT 8 -+#define HOST_PHY_MODE_SIGNED 0 -+ -+#define HOST_ONLY_MODE_MASK 0x00000080U -+#define HOST_ONLY_MODE_SHIFT 7 -+#define HOST_ONLY_MODE_SIGNED 0 -+ -+#define PCI_TEST_MODE_MASK 0x00000040U -+#define PCI_TEST_MODE_SHIFT 6 -+#define PCI_TEST_MODE_SIGNED 0 -+ -+#define TURN_OFF_DDR_MASK 0x00000020U -+#define TURN_OFF_DDR_SHIFT 5 -+#define TURN_OFF_DDR_SIGNED 0 -+ -+#define SYS_RD_CLK_INV_MASK 0x00000010U -+#define SYS_RD_CLK_INV_SHIFT 4 -+#define SYS_RD_CLK_INV_SIGNED 0 -+ -+#define MEM_REQ_CLK_INV_MASK 0x00000008U -+#define MEM_REQ_CLK_INV_SHIFT 3 -+#define MEM_REQ_CLK_INV_SIGNED 0 -+ -+#define BURST_SPLIT_MASK 0x00000004U -+#define BURST_SPLIT_SHIFT 2 -+#define BURST_SPLIT_SIGNED 0 -+ -+#define CLK_INVERSION_MASK 0x00000002U -+#define CLK_INVERSION_SHIFT 1 -+#define CLK_INVERSION_SIGNED 0 -+ -+#define ADDRESS_FORCE_MASK 0x00000001U -+#define ADDRESS_FORCE_SHIFT 0 -+#define ADDRESS_FORCE_SIGNED 0 -+ -+/* -+ Register CLEAR_HOST_MEM_SIG -+*/ -+#define TCF_CLK_CTRL_CLEAR_HOST_MEM_SIG 0x00A0 -+#define SIGNATURE_TAG_ID_MASK 0x00000F00U -+#define SIGNATURE_TAG_ID_SHIFT 8 -+#define SIGNATURE_TAG_ID_SIGNED 0 -+ -+#define CLEAR_HOST_MEM_SIGNATURE_MASK 0x00000001U -+#define CLEAR_HOST_MEM_SIGNATURE_SHIFT 0 -+#define CLEAR_HOST_MEM_SIGNATURE_SIGNED 0 -+ -+/* -+ Register HOST_MEM_SIGNATURE -+*/ -+#define TCF_CLK_CTRL_HOST_MEM_SIGNATURE 0x00A8 -+#define HOST_MEM_SIGNATURE_MASK 0xFFFFFFFFU -+#define HOST_MEM_SIGNATURE_SHIFT 0 -+#define HOST_MEM_SIGNATURE_SIGNED 0 -+ -+/* -+ Register INTERRUPT_STATUS -+*/ -+#define TCF_CLK_CTRL_INTERRUPT_STATUS 0x00C8 -+#define INTERRUPT_MASTER_STATUS_MASK 0x80000000U -+#define INTERRUPT_MASTER_STATUS_SHIFT 31 -+#define INTERRUPT_MASTER_STATUS_SIGNED 0 -+ -+#define OTHER_INTS_MASK 0x7FFE0000U -+#define OTHER_INTS_SHIFT 17 -+#define OTHER_INTS_SIGNED 0 -+ -+#define HOST_MST_NORESPONSE_MASK 0x00010000U -+#define HOST_MST_NORESPONSE_SHIFT 16 -+#define HOST_MST_NORESPONSE_SIGNED 0 -+ -+#define PDP2_INT_MASK 0x00008000U -+#define PDP2_INT_SHIFT 15 -+#define PDP2_INT_SIGNED 0 -+ -+#define PDP1_INT_MASK 0x00004000U -+#define PDP1_INT_SHIFT 14 -+#define PDP1_INT_SIGNED 0 -+ -+#define EXT_INT_MASK 0x00002000U -+#define EXT_INT_SHIFT 13 -+#define EXT_INT_SIGNED 0 -+ -+#define SCB_MST_HLT_BIT_MASK 0x00001000U -+#define SCB_MST_HLT_BIT_SHIFT 12 -+#define SCB_MST_HLT_BIT_SIGNED 0 -+ -+#define SCB_SLV_EVENT_MASK 0x00000800U -+#define SCB_SLV_EVENT_SHIFT 11 -+#define SCB_SLV_EVENT_SIGNED 0 -+ -+#define SCB_TDONE_RX_MASK 0x00000400U -+#define SCB_TDONE_RX_SHIFT 10 -+#define SCB_TDONE_RX_SIGNED 0 -+ -+#define SCB_SLV_WT_RD_DAT_MASK 0x00000200U -+#define SCB_SLV_WT_RD_DAT_SHIFT 9 -+#define SCB_SLV_WT_RD_DAT_SIGNED 0 -+ -+#define SCB_SLV_WT_PRV_RD_MASK 0x00000100U -+#define SCB_SLV_WT_PRV_RD_SHIFT 8 -+#define SCB_SLV_WT_PRV_RD_SIGNED 0 -+ -+#define SCB_SLV_WT_WR_DAT_MASK 0x00000080U -+#define SCB_SLV_WT_WR_DAT_SHIFT 7 -+#define SCB_SLV_WT_WR_DAT_SIGNED 0 -+ -+#define SCB_MST_WT_RD_DAT_MASK 0x00000040U -+#define SCB_MST_WT_RD_DAT_SHIFT 6 -+#define SCB_MST_WT_RD_DAT_SIGNED 0 -+ -+#define SCB_ADD_ACK_ERR_MASK 0x00000020U -+#define SCB_ADD_ACK_ERR_SHIFT 5 -+#define SCB_ADD_ACK_ERR_SIGNED 0 -+ -+#define SCB_WR_ACK_ERR_MASK 0x00000010U -+#define SCB_WR_ACK_ERR_SHIFT 4 -+#define SCB_WR_ACK_ERR_SIGNED 0 -+ -+#define SCB_SDAT_LO_TIM_MASK 0x00000008U -+#define SCB_SDAT_LO_TIM_SHIFT 3 -+#define SCB_SDAT_LO_TIM_SIGNED 0 -+ -+#define SCB_SCLK_LO_TIM_MASK 0x00000004U -+#define SCB_SCLK_LO_TIM_SHIFT 2 -+#define SCB_SCLK_LO_TIM_SIGNED 0 -+ -+#define SCB_UNEX_START_BIT_MASK 0x00000002U -+#define SCB_UNEX_START_BIT_SHIFT 1 -+#define SCB_UNEX_START_BIT_SIGNED 0 -+ -+#define SCB_BUS_INACTIVE_MASK 0x00000001U -+#define SCB_BUS_INACTIVE_SHIFT 0 -+#define SCB_BUS_INACTIVE_SIGNED 0 -+ -+/* -+ Register INTERRUPT_OP_CFG -+*/ -+#define TCF_CLK_CTRL_INTERRUPT_OP_CFG 0x00D0 -+#define PULSE_NLEVEL_MASK 0x80000000U -+#define PULSE_NLEVEL_SHIFT 31 -+#define PULSE_NLEVEL_SIGNED 0 -+ -+#define INT_SENSE_MASK 0x40000000U -+#define INT_SENSE_SHIFT 30 -+#define INT_SENSE_SIGNED 0 -+ -+#define INTERRUPT_DEST_MASK 0x0000000FU -+#define INTERRUPT_DEST_SHIFT 0 -+#define INTERRUPT_DEST_SIGNED 0 -+ -+/* -+ Register INTERRUPT_ENABLE -+*/ -+#define TCF_CLK_CTRL_INTERRUPT_ENABLE 0x00D8 -+#define INTERRUPT_MASTER_ENABLE_MASK 0x80000000U -+#define INTERRUPT_MASTER_ENABLE_SHIFT 31 -+#define INTERRUPT_MASTER_ENABLE_SIGNED 0 -+ -+#define INTERRUPT_ENABLE_MASK 0x7FFFFFFFU -+#define INTERRUPT_ENABLE_SHIFT 0 -+#define INTERRUPT_ENABLE_SIGNED 0 -+ -+/* -+ Register INTERRUPT_CLEAR -+*/ -+#define TCF_CLK_CTRL_INTERRUPT_CLEAR 0x00E0 -+#define INTERRUPT_MASTER_CLEAR_MASK 0x80000000U -+#define INTERRUPT_MASTER_CLEAR_SHIFT 31 -+#define INTERRUPT_MASTER_CLEAR_SIGNED 0 -+ -+#define INTERRUPT_CLEAR_MASK 0x7FFFFFFFU -+#define INTERRUPT_CLEAR_SHIFT 0 -+#define INTERRUPT_CLEAR_SIGNED 0 -+ -+/* -+ Register YCC_RGB_CTRL -+*/ -+#define TCF_CLK_CTRL_YCC_RGB_CTRL 0x00E8 -+#define RGB_CTRL1_MASK 0x000001FFU -+#define RGB_CTRL1_SHIFT 0 -+#define RGB_CTRL1_SIGNED 0 -+ -+#define RGB_CTRL2_MASK 0x01FF0000U -+#define RGB_CTRL2_SHIFT 16 -+#define RGB_CTRL2_SIGNED 0 -+ -+/* -+ Register EXP_BRD_CTRL -+*/ -+#define TCF_CLK_CTRL_EXP_BRD_CTRL 0x00F8 -+#define PDP1_DATA_EN_MASK 0x00000003U -+#define PDP1_DATA_EN_SHIFT 0 -+#define PDP1_DATA_EN_SIGNED 0 -+ -+#define PDP2_DATA_EN_MASK 0x00000030U -+#define PDP2_DATA_EN_SHIFT 4 -+#define PDP2_DATA_EN_SIGNED 0 -+ -+#define EXP_BRD_OUTPUT_MASK 0xFFFFFF00U -+#define EXP_BRD_OUTPUT_SHIFT 8 -+#define EXP_BRD_OUTPUT_SIGNED 0 -+ -+/* -+ Register HOSTIF_CONTROL -+*/ -+#define TCF_CLK_CTRL_HOSTIF_CONTROL 0x0100 -+#define HOSTIF_CTRL_MASK 0x000000FFU -+#define HOSTIF_CTRL_SHIFT 0 -+#define HOSTIF_CTRL_SIGNED 0 -+ -+/* -+ Register DUT_CONTROL_1 -+*/ -+#define TCF_CLK_CTRL_DUT_CONTROL_1 0x0108 -+#define DUT_CTRL_1_MASK 0xFFFFFFFFU -+#define DUT_CTRL_1_SHIFT 0 -+#define DUT_CTRL_1_SIGNED 0 -+ -+/* TC ES2 additional needs those: */ -+#define DUT_CTRL_TEST_MODE_SHIFT 0 -+#define DUT_CTRL_TEST_MODE_MASK 0x3 -+ -+#define DUT_CTRL_VCC_0V9EN (1<<12) -+#define DUT_CTRL_VCC_1V8EN (1<<13) -+#define DUT_CTRL_VCC_IO_INH (1<<14) -+#define DUT_CTRL_VCC_CORE_INH (1<<15) -+ -+/* -+ Register DUT_STATUS_1 -+*/ -+#define TCF_CLK_CTRL_DUT_STATUS_1 0x0110 -+#define DUT_STATUS_1_MASK 0xFFFFFFFFU -+#define DUT_STATUS_1_SHIFT 0 -+#define DUT_STATUS_1_SIGNED 0 -+ -+/* -+ Register DUT_CTRL_NOT_STAT_1 -+*/ -+#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_1 0x0118 -+#define DUT_STAT_NOT_CTRL_1_MASK 0xFFFFFFFFU -+#define DUT_STAT_NOT_CTRL_1_SHIFT 0 -+#define DUT_STAT_NOT_CTRL_1_SIGNED 0 -+ -+/* -+ Register DUT_CONTROL_2 -+*/ -+#define TCF_CLK_CTRL_DUT_CONTROL_2 0x0120 -+#define DUT_CTRL_2_MASK 0xFFFFFFFFU -+#define DUT_CTRL_2_SHIFT 0 -+#define DUT_CTRL_2_SIGNED 0 -+ -+/* -+ Register DUT_STATUS_2 -+*/ -+#define TCF_CLK_CTRL_DUT_STATUS_2 0x0128 -+#define DUT_STATUS_2_MASK 0xFFFFFFFFU -+#define DUT_STATUS_2_SHIFT 0 -+#define DUT_STATUS_2_SIGNED 0 -+ -+/* -+ Register DUT_CTRL_NOT_STAT_2 -+*/ -+#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_2 0x0130 -+#define DUT_CTRL_NOT_STAT_2_MASK 0xFFFFFFFFU -+#define DUT_CTRL_NOT_STAT_2_SHIFT 0 -+#define DUT_CTRL_NOT_STAT_2_SIGNED 0 -+ -+/* -+ Register BUS_CAP_BASE_ADDR -+*/ -+#define TCF_CLK_CTRL_BUS_CAP_BASE_ADDR 0x0138 -+#define BUS_CAP_BASE_ADDR_MASK 0xFFFFFFFFU -+#define BUS_CAP_BASE_ADDR_SHIFT 0 -+#define BUS_CAP_BASE_ADDR_SIGNED 0 -+ -+/* -+ Register BUS_CAP_ENABLE -+*/ -+#define TCF_CLK_CTRL_BUS_CAP_ENABLE 0x0140 -+#define BUS_CAP_ENABLE_MASK 0x00000001U -+#define BUS_CAP_ENABLE_SHIFT 0 -+#define BUS_CAP_ENABLE_SIGNED 0 -+ -+/* -+ Register BUS_CAP_COUNT -+*/ -+#define TCF_CLK_CTRL_BUS_CAP_COUNT 0x0148 -+#define BUS_CAP_COUNT_MASK 0xFFFFFFFFU -+#define BUS_CAP_COUNT_SHIFT 0 -+#define BUS_CAP_COUNT_SIGNED 0 -+ -+/* -+ Register DCM_LOCK_STATUS -+*/ -+#define TCF_CLK_CTRL_DCM_LOCK_STATUS 0x0150 -+#define DCM_LOCK_STATUS_MASK 0x00000007U -+#define DCM_LOCK_STATUS_SHIFT 0 -+#define DCM_LOCK_STATUS_SIGNED 0 -+ -+/* -+ Register AUX_DUT_RESETNS -+*/ -+#define TCF_CLK_CTRL_AUX_DUT_RESETNS 0x0158 -+#define AUX_DUT_RESETNS_MASK 0x0000000FU -+#define AUX_DUT_RESETNS_SHIFT 0 -+#define AUX_DUT_RESETNS_SIGNED 0 -+ -+/* -+ Register TCF_SPI_MST_ADDR_RDNWR -+*/ -+#define TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR 0x0160 -+#define TCF_SPI_MST_ADDR_MASK 0x0003FFFFU -+#define TCF_SPI_MST_ADDR_SHIFT 0 -+#define TCF_SPI_MST_ADDR_SIGNED 0 -+ -+#define TCF_SPI_MST_RDNWR_MASK 0x00040000U -+#define TCF_SPI_MST_RDNWR_SHIFT 18 -+#define TCF_SPI_MST_RDNWR_SIGNED 0 -+ -+#define TCF_SPI_MST_SLAVE_ID_MASK 0x00080000U -+#define TCF_SPI_MST_SLAVE_ID_SHIFT 19 -+#define TCF_SPI_MST_SLAVE_ID_SIGNED 0 -+ -+#define TCF_SPI_MST_MASTER_ID_MASK 0x00300000U -+#define TCF_SPI_MST_MASTER_ID_SHIFT 20 -+#define TCF_SPI_MST_MASTER_ID_SIGNED 0 -+ -+/* -+ Register TCF_SPI_MST_WDATA -+*/ -+#define TCF_CLK_CTRL_TCF_SPI_MST_WDATA 0x0168 -+#define TCF_SPI_MST_WDATA_MASK 0xFFFFFFFFU -+#define TCF_SPI_MST_WDATA_SHIFT 0 -+#define TCF_SPI_MST_WDATA_SIGNED 0 -+ -+/* -+ Register TCF_SPI_MST_RDATA -+*/ -+#define TCF_CLK_CTRL_TCF_SPI_MST_RDATA 0x0170 -+#define TCF_SPI_MST_RDATA_MASK 0xFFFFFFFFU -+#define TCF_SPI_MST_RDATA_SHIFT 0 -+#define TCF_SPI_MST_RDATA_SIGNED 0 -+ -+/* -+ Register TCF_SPI_MST_STATUS -+*/ -+#define TCF_CLK_CTRL_TCF_SPI_MST_STATUS 0x0178 -+#define TCF_SPI_MST_STATUS_MASK 0x0000000FU -+#define TCF_SPI_MST_STATUS_SHIFT 0 -+#define TCF_SPI_MST_STATUS_SIGNED 0 -+ -+/* -+ Register TCF_SPI_MST_GO -+*/ -+#define TCF_CLK_CTRL_TCF_SPI_MST_GO 0x0180 -+#define TCF_SPI_MST_GO_MASK 0x00000001U -+#define TCF_SPI_MST_GO_SHIFT 0 -+#define TCF_SPI_MST_GO_SIGNED 0 -+ -+/* -+ Register EXT_SIG_CTRL -+*/ -+#define TCF_CLK_CTRL_EXT_SIG_CTRL 0x0188 -+#define EXT_SYS_REQ_SIG_START_MASK 0x00000001U -+#define EXT_SYS_REQ_SIG_START_SHIFT 0 -+#define EXT_SYS_REQ_SIG_START_SIGNED 0 -+ -+#define EXT_SYS_RD_SIG_START_MASK 0x00000002U -+#define EXT_SYS_RD_SIG_START_SHIFT 1 -+#define EXT_SYS_RD_SIG_START_SIGNED 0 -+ -+#define EXT_MEM_REQ_SIG_START_MASK 0x00000004U -+#define EXT_MEM_REQ_SIG_START_SHIFT 2 -+#define EXT_MEM_REQ_SIG_START_SIGNED 0 -+ -+#define EXT_MEM_RD_SIG_START_MASK 0x00000008U -+#define EXT_MEM_RD_SIG_START_SHIFT 3 -+#define EXT_MEM_RD_SIG_START_SIGNED 0 -+ -+/* -+ Register EXT_SYS_REQ_SIG -+*/ -+#define TCF_CLK_CTRL_EXT_SYS_REQ_SIG 0x0190 -+#define EXT_SYS_REQ_SIG_MASK 0xFFFFFFFFU -+#define EXT_SYS_REQ_SIG_SHIFT 0 -+#define EXT_SYS_REQ_SIG_SIGNED 0 -+ -+/* -+ Register EXT_SYS_RD_SIG -+*/ -+#define TCF_CLK_CTRL_EXT_SYS_RD_SIG 0x0198 -+#define EXT_SYS_RD_SIG_MASK 0xFFFFFFFFU -+#define EXT_SYS_RD_SIG_SHIFT 0 -+#define EXT_SYS_RD_SIG_SIGNED 0 -+ -+/* -+ Register EXT_MEM_REQ_SIG -+*/ -+#define TCF_CLK_CTRL_EXT_MEM_REQ_SIG 0x01A0 -+#define EXT_MEM_REQ_SIG_MASK 0xFFFFFFFFU -+#define EXT_MEM_REQ_SIG_SHIFT 0 -+#define EXT_MEM_REQ_SIG_SIGNED 0 -+ -+/* -+ Register EXT_MEM_RD_SIG -+*/ -+#define TCF_CLK_CTRL_EXT_MEM_RD_SIG 0x01A8 -+#define EXT_MEM_RD_SIG_MASK 0xFFFFFFFFU -+#define EXT_MEM_RD_SIG_SHIFT 0 -+#define EXT_MEM_RD_SIG_SIGNED 0 -+ -+/* -+ Register EXT_SYS_REQ_WR_CNT -+*/ -+#define TCF_CLK_CTRL_EXT_SYS_REQ_WR_CNT 0x01B0 -+#define EXT_SYS_REQ_WR_CNT_MASK 0xFFFFFFFFU -+#define EXT_SYS_REQ_WR_CNT_SHIFT 0 -+#define EXT_SYS_REQ_WR_CNT_SIGNED 0 -+ -+/* -+ Register EXT_SYS_REQ_RD_CNT -+*/ -+#define TCF_CLK_CTRL_EXT_SYS_REQ_RD_CNT 0x01B8 -+#define EXT_SYS_REQ_RD_CNT_MASK 0xFFFFFFFFU -+#define EXT_SYS_REQ_RD_CNT_SHIFT 0 -+#define EXT_SYS_REQ_RD_CNT_SIGNED 0 -+ -+/* -+ Register EXT_SYS_RD_CNT -+*/ -+#define TCF_CLK_CTRL_EXT_SYS_RD_CNT 0x01C0 -+#define EXT_SYS_RD_CNT_MASK 0xFFFFFFFFU -+#define EXT_SYS_RD_CNT_SHIFT 0 -+#define EXT_SYS_RD_CNT_SIGNED 0 -+ -+/* -+ Register EXT_MEM_REQ_WR_CNT -+*/ -+#define TCF_CLK_CTRL_EXT_MEM_REQ_WR_CNT 0x01C8 -+#define EXT_MEM_REQ_WR_CNT_MASK 0xFFFFFFFFU -+#define EXT_MEM_REQ_WR_CNT_SHIFT 0 -+#define EXT_MEM_REQ_WR_CNT_SIGNED 0 -+ -+/* -+ Register EXT_MEM_REQ_RD_CNT -+*/ -+#define TCF_CLK_CTRL_EXT_MEM_REQ_RD_CNT 0x01D0 -+#define EXT_MEM_REQ_RD_CNT_MASK 0xFFFFFFFFU -+#define EXT_MEM_REQ_RD_CNT_SHIFT 0 -+#define EXT_MEM_REQ_RD_CNT_SIGNED 0 -+ -+/* -+ Register EXT_MEM_RD_CNT -+*/ -+#define TCF_CLK_CTRL_EXT_MEM_RD_CNT 0x01D8 -+#define EXT_MEM_RD_CNT_MASK 0xFFFFFFFFU -+#define EXT_MEM_RD_CNT_SHIFT 0 -+#define EXT_MEM_RD_CNT_SIGNED 0 -+ -+/* -+ Register TCF_CORE_TARGET_BUILD_CFG -+*/ -+#define TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG 0x01E0 -+#define TCF_CORE_TARGET_BUILD_ID_MASK 0x000000FFU -+#define TCF_CORE_TARGET_BUILD_ID_SHIFT 0 -+#define TCF_CORE_TARGET_BUILD_ID_SIGNED 0 -+ -+/* -+ Register MEM_THROUGH_SYS -+*/ -+#define TCF_CLK_CTRL_MEM_THROUGH_SYS 0x01E8 -+#define MEM_THROUGH_SYS_MASK 0x00000001U -+#define MEM_THROUGH_SYS_SHIFT 0 -+#define MEM_THROUGH_SYS_SIGNED 0 -+ -+/* -+ Register HOST_PHY_OFFSET -+*/ -+#define TCF_CLK_CTRL_HOST_PHY_OFFSET 0x01F0 -+#define HOST_PHY_OFFSET_MASK 0xFFFFFFFFU -+#define HOST_PHY_OFFSET_SHIFT 0 -+#define HOST_PHY_OFFSET_SIGNED 0 -+ -+/* -+ Register DEBUG_REG_SEL -+*/ -+#define TCF_CLK_CTRL_DEBUG_REG_SEL 0x01F8 -+#define DEBUG_REG_SELECT_MASK 0xFFFFFFFFU -+#define DEBUG_REG_SELECT_SHIFT 0 -+#define DEBUG_REG_SELECT_SIGNED 0 -+ -+/* -+ Register DEBUG_REG -+*/ -+#define TCF_CLK_CTRL_DEBUG_REG 0x0200 -+#define DEBUG_REG_VALUE_MASK 0xFFFFFFFFU -+#define DEBUG_REG_VALUE_SHIFT 0 -+#define DEBUG_REG_VALUE_SIGNED 0 -+ -+/* -+ Register JTAG_CTRL -+*/ -+#define TCF_CLK_CTRL_JTAG_CTRL 0x0208 -+#define JTAG_TRST_MASK 0x00000001U -+#define JTAG_TRST_SHIFT 0 -+#define JTAG_TRST_SIGNED 0 -+ -+#define JTAG_TMS_MASK 0x00000002U -+#define JTAG_TMS_SHIFT 1 -+#define JTAG_TMS_SIGNED 0 -+ -+#define JTAG_TCK_MASK 0x00000004U -+#define JTAG_TCK_SHIFT 2 -+#define JTAG_TCK_SIGNED 0 -+ -+#define JTAG_TDO_MASK 0x00000008U -+#define JTAG_TDO_SHIFT 3 -+#define JTAG_TDO_SIGNED 0 -+ -+#define JTAG_TDI_MASK 0x00000010U -+#define JTAG_TDI_SHIFT 4 -+#define JTAG_TDI_SIGNED 0 -+ -+#define JTAG_DASH_N_REG_MASK 0x40000000U -+#define JTAG_DASH_N_REG_SHIFT 30 -+#define JTAG_DASH_N_REG_SIGNED 0 -+ -+#define JTAG_DISABLE_MASK 0x80000000U -+#define JTAG_DISABLE_SHIFT 31 -+#define JTAG_DISABLE_SIGNED 0 -+ -+/* -+ Register SAI_DEBUG_RDNWR -+*/ -+#define TCF_CLK_CTRL_SAI_DEBUG_RDNWR 0x0300 -+#define SAI_DEBUG_REG_ADDR_MASK 0x000001FFU -+#define SAI_DEBUG_REG_ADDR_SHIFT 0 -+#define SAI_DEBUG_REG_ADDR_SIGNED 0 -+ -+#define SAI_DEBUG_REG_RDNWR_MASK 0x00000200U -+#define SAI_DEBUG_REG_RDNWR_SHIFT 9 -+#define SAI_DEBUG_REG_RDNWR_SIGNED 0 -+ -+/* -+ Register SAI_DEBUG_WDATA -+*/ -+#define TCF_CLK_CTRL_SAI_DEBUG_WDATA 0x0308 -+#define SAI_DEBUG_REG_WDATA_MASK 0xFFFFFFFFU -+#define SAI_DEBUG_REG_WDATA_SHIFT 0 -+#define SAI_DEBUG_REG_WDATA_SIGNED 0 -+ -+/* -+ Register SAI_DEBUG_RDATA -+*/ -+#define TCF_CLK_CTRL_SAI_DEBUG_RDATA 0x0310 -+#define SAI_DEBUG_REG_RDATA_MASK 0xFFFFFFFFU -+#define SAI_DEBUG_REG_RDATA_SHIFT 0 -+#define SAI_DEBUG_REG_RDATA_SIGNED 0 -+ -+/* -+ Register SAI_DEBUG_GO -+*/ -+#define TCF_CLK_CTRL_SAI_DEBUG_GO 0x0318 -+#define SAI_DEBUG_REG_GO_MASK 0x00000001U -+#define SAI_DEBUG_REG_GO_SHIFT 0 -+#define SAI_DEBUG_REG_GO_SIGNED 0 -+ -+/* -+ Register AUX_DUT_RESETS -+*/ -+#define TCF_CLK_CTRL_AUX_DUT_RESETS 0x0320 -+#define AUX_DUT_RESETS_MASK 0x0000000FU -+#define AUX_DUT_RESETS_SHIFT 0 -+#define AUX_DUT_RESETS_SIGNED 0 -+ -+/* -+ Register DUT_CLK_CTRL -+*/ -+#define TCF_CLK_CTRL_DUT_CLK_CTRL 0x0328 -+#define MEM_REQ_PHSE_MASK 0x0000FFFFU -+#define MEM_REQ_PHSE_SHIFT 0 -+#define MEM_REQ_PHSE_SIGNED 0 -+ -+/* -+ Register DUT_CLK_STATUS -+*/ -+#define TCF_CLK_CTRL_DUT_CLK_STATUS 0x0330 -+#define MEM_REQ_PHSE_SET_MASK 0x00000003U -+#define MEM_REQ_PHSE_SET_SHIFT 0 -+#define MEM_REQ_PHSE_SET_SIGNED 0 -+ -+/* -+ Register DUT_CLK_INFO -+*/ -+#define TCF_CLK_CTRL_DUT_CLK_INFO 0x0340 -+#define CORE_MASK 0x0000FFFFU -+#define CORE_SHIFT 0 -+#define CORE_SIGNED 0 -+ -+#define MEM_MASK 0xFFFF0000U -+#define MEM_SHIFT 16 -+#define MEM_SIGNED 0 -+ -+/* -+ Register DUT_CLK_PHSE -+*/ -+#define TCF_CLK_CTRL_DUT_CLK_PHSE 0x0348 -+#define MEM_REQ_MASK 0x0000FFFFU -+#define MEM_REQ_SHIFT 0 -+#define MEM_REQ_SIGNED 0 -+ -+#define MEM_RD_MASK 0xFFFF0000U -+#define MEM_RD_SHIFT 16 -+#define MEM_RD_SIGNED 0 -+ -+#endif /* !defined(_TCF_CLK_CTRL_H_) */ -+ -+/***************************************************************************** -+ End of file (tcf_clk_ctrl.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/apollo/tcf_pll.h b/drivers/gpu/drm/img-rogue/apollo/tcf_pll.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/tcf_pll.h -@@ -0,0 +1,311 @@ -+/*************************************************************************/ /*! -+@Title Test Chip Framework PDP register definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Autogenerated C -- do not edit -+ Generated from tcf_pll.def -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(_TCF_PLL_H_) -+#define _TCF_PLL_H_ -+ -+/* -+ Register PLL_DDR2_CLK0 -+*/ -+#define TCF_PLL_PLL_DDR2_CLK0 0x0000 -+#define DDR2_PLL_CLK0_PHS_MASK 0x00300000U -+#define DDR2_PLL_CLK0_PHS_SHIFT 20 -+#define DDR2_PLL_CLK0_PHS_SIGNED 0 -+ -+#define DDR2_PLL_CLK0_MS_MASK 0x00030000U -+#define DDR2_PLL_CLK0_MS_SHIFT 16 -+#define DDR2_PLL_CLK0_MS_SIGNED 0 -+ -+#define DDR2_PLL_CLK0_FREQ_MASK 0x000001FFU -+#define DDR2_PLL_CLK0_FREQ_SHIFT 0 -+#define DDR2_PLL_CLK0_FREQ_SIGNED 0 -+ -+/* -+ Register PLL_DDR2_CLK1TO5 -+*/ -+#define TCF_PLL_PLL_DDR2_CLK1TO5 0x0008 -+#define DDR2_PLL_CLK1TO5_PHS_MASK 0x3FF00000U -+#define DDR2_PLL_CLK1TO5_PHS_SHIFT 20 -+#define DDR2_PLL_CLK1TO5_PHS_SIGNED 0 -+ -+#define DDR2_PLL_CLK1TO5_MS_MASK 0x000FFC00U -+#define DDR2_PLL_CLK1TO5_MS_SHIFT 10 -+#define DDR2_PLL_CLK1TO5_MS_SIGNED 0 -+ -+#define DDR2_PLL_CLK1TO5_FREQ_MASK 0x000003FFU -+#define DDR2_PLL_CLK1TO5_FREQ_SHIFT 0 -+#define DDR2_PLL_CLK1TO5_FREQ_SIGNED 0 -+ -+/* -+ Register PLL_DDR2_DRP_GO -+*/ -+#define TCF_PLL_PLL_DDR2_DRP_GO 0x0010 -+#define PLL_DDR2_DRP_GO_MASK 0x00000001U -+#define PLL_DDR2_DRP_GO_SHIFT 0 -+#define PLL_DDR2_DRP_GO_SIGNED 0 -+ -+/* -+ Register PLL_PDP_CLK0 -+*/ -+#define TCF_PLL_PLL_PDP_CLK0 0x0018 -+#define PDP_PLL_CLK0_PHS_MASK 0x00300000U -+#define PDP_PLL_CLK0_PHS_SHIFT 20 -+#define PDP_PLL_CLK0_PHS_SIGNED 0 -+ -+#define PDP_PLL_CLK0_MS_MASK 0x00030000U -+#define PDP_PLL_CLK0_MS_SHIFT 16 -+#define PDP_PLL_CLK0_MS_SIGNED 0 -+ -+#define PDP_PLL_CLK0_FREQ_MASK 0x000001FFU -+#define PDP_PLL_CLK0_FREQ_SHIFT 0 -+#define PDP_PLL_CLK0_FREQ_SIGNED 0 -+ -+/* -+ Register PLL_PDP_CLK1TO5 -+*/ -+#define TCF_PLL_PLL_PDP_CLK1TO5 0x0020 -+#define PDP_PLL_CLK1TO5_PHS_MASK 0x3FF00000U -+#define PDP_PLL_CLK1TO5_PHS_SHIFT 20 -+#define PDP_PLL_CLK1TO5_PHS_SIGNED 0 -+ -+#define PDP_PLL_CLK1TO5_MS_MASK 0x000FFC00U -+#define PDP_PLL_CLK1TO5_MS_SHIFT 10 -+#define PDP_PLL_CLK1TO5_MS_SIGNED 0 -+ -+#define PDP_PLL_CLK1TO5_FREQ_MASK 0x000003FFU -+#define PDP_PLL_CLK1TO5_FREQ_SHIFT 0 -+#define PDP_PLL_CLK1TO5_FREQ_SIGNED 0 -+ -+/* -+ Register PLL_PDP_DRP_GO -+*/ -+#define TCF_PLL_PLL_PDP_DRP_GO 0x0028 -+#define PLL_PDP_DRP_GO_MASK 0x00000001U -+#define PLL_PDP_DRP_GO_SHIFT 0 -+#define PLL_PDP_DRP_GO_SIGNED 0 -+ -+/* -+ Register PLL_PDP2_CLK0 -+*/ -+#define TCF_PLL_PLL_PDP2_CLK0 0x0030 -+#define PDP2_PLL_CLK0_PHS_MASK 0x00300000U -+#define PDP2_PLL_CLK0_PHS_SHIFT 20 -+#define PDP2_PLL_CLK0_PHS_SIGNED 0 -+ -+#define PDP2_PLL_CLK0_MS_MASK 0x00030000U -+#define PDP2_PLL_CLK0_MS_SHIFT 16 -+#define PDP2_PLL_CLK0_MS_SIGNED 0 -+ -+#define PDP2_PLL_CLK0_FREQ_MASK 0x000001FFU -+#define PDP2_PLL_CLK0_FREQ_SHIFT 0 -+#define PDP2_PLL_CLK0_FREQ_SIGNED 0 -+ -+/* -+ Register PLL_PDP2_CLK1TO5 -+*/ -+#define TCF_PLL_PLL_PDP2_CLK1TO5 0x0038 -+#define PDP2_PLL_CLK1TO5_PHS_MASK 0x3FF00000U -+#define PDP2_PLL_CLK1TO5_PHS_SHIFT 20 -+#define PDP2_PLL_CLK1TO5_PHS_SIGNED 0 -+ -+#define PDP2_PLL_CLK1TO5_MS_MASK 0x000FFC00U -+#define PDP2_PLL_CLK1TO5_MS_SHIFT 10 -+#define PDP2_PLL_CLK1TO5_MS_SIGNED 0 -+ -+#define PDP2_PLL_CLK1TO5_FREQ_MASK 0x000003FFU -+#define PDP2_PLL_CLK1TO5_FREQ_SHIFT 0 -+#define PDP2_PLL_CLK1TO5_FREQ_SIGNED 0 -+ -+/* -+ Register PLL_PDP2_DRP_GO -+*/ -+#define TCF_PLL_PLL_PDP2_DRP_GO 0x0040 -+#define PLL_PDP2_DRP_GO_MASK 0x00000001U -+#define PLL_PDP2_DRP_GO_SHIFT 0 -+#define PLL_PDP2_DRP_GO_SIGNED 0 -+ -+/* -+ Register PLL_CORE_CLK0 -+*/ -+#define TCF_PLL_PLL_CORE_CLK0 0x0048 -+#define CORE_PLL_CLK0_PHS_MASK 0x00300000U -+#define CORE_PLL_CLK0_PHS_SHIFT 20 -+#define CORE_PLL_CLK0_PHS_SIGNED 0 -+ -+#define CORE_PLL_CLK0_MS_MASK 0x00030000U -+#define CORE_PLL_CLK0_MS_SHIFT 16 -+#define CORE_PLL_CLK0_MS_SIGNED 0 -+ -+#define CORE_PLL_CLK0_FREQ_MASK 0x000001FFU -+#define CORE_PLL_CLK0_FREQ_SHIFT 0 -+#define CORE_PLL_CLK0_FREQ_SIGNED 0 -+ -+/* -+ Register PLL_CORE_CLK1TO5 -+*/ -+#define TCF_PLL_PLL_CORE_CLK1TO5 0x0050 -+#define CORE_PLL_CLK1TO5_PHS_MASK 0x3FF00000U -+#define CORE_PLL_CLK1TO5_PHS_SHIFT 20 -+#define CORE_PLL_CLK1TO5_PHS_SIGNED 0 -+ -+#define CORE_PLL_CLK1TO5_MS_MASK 0x000FFC00U -+#define CORE_PLL_CLK1TO5_MS_SHIFT 10 -+#define CORE_PLL_CLK1TO5_MS_SIGNED 0 -+ -+#define CORE_PLL_CLK1TO5_FREQ_MASK 0x000003FFU -+#define CORE_PLL_CLK1TO5_FREQ_SHIFT 0 -+#define CORE_PLL_CLK1TO5_FREQ_SIGNED 0 -+ -+/* -+ Register PLL_CORE_DRP_GO -+*/ -+#define TCF_PLL_PLL_CORE_DRP_GO 0x0058 -+#define PLL_CORE_DRP_GO_MASK 0x00000001U -+#define PLL_CORE_DRP_GO_SHIFT 0 -+#define PLL_CORE_DRP_GO_SIGNED 0 -+ -+/* -+ Register PLL_SYSIF_CLK0 -+*/ -+#define TCF_PLL_PLL_SYSIF_CLK0 0x0060 -+#define SYSIF_PLL_CLK0_PHS_MASK 0x00300000U -+#define SYSIF_PLL_CLK0_PHS_SHIFT 20 -+#define SYSIF_PLL_CLK0_PHS_SIGNED 0 -+ -+#define SYSIF_PLL_CLK0_MS_MASK 0x00030000U -+#define SYSIF_PLL_CLK0_MS_SHIFT 16 -+#define SYSIF_PLL_CLK0_MS_SIGNED 0 -+ -+#define SYSIF_PLL_CLK0_FREQ_MASK 0x000001FFU -+#define SYSIF_PLL_CLK0_FREQ_SHIFT 0 -+#define SYSIF_PLL_CLK0_FREQ_SIGNED 0 -+ -+/* -+ Register PLL_SYSIF_CLK1TO5 -+*/ -+#define TCF_PLL_PLL_SYSIF_CLK1TO5 0x0068 -+#define SYSIF_PLL_CLK1TO5_PHS_MASK 0x3FF00000U -+#define SYSIF_PLL_CLK1TO5_PHS_SHIFT 20 -+#define SYSIF_PLL_CLK1TO5_PHS_SIGNED 0 -+ -+#define SYSIF_PLL_CLK1TO5_MS_MASK 0x000FFC00U -+#define SYSIF_PLL_CLK1TO5_MS_SHIFT 10 -+#define SYSIF_PLL_CLK1TO5_MS_SIGNED 0 -+ -+#define SYSIF_PLL_CLK1TO5_FREQ_MASK 0x000003FFU -+#define SYSIF_PLL_CLK1TO5_FREQ_SHIFT 0 -+#define SYSIF_PLL_CLK1TO5_FREQ_SIGNED 0 -+ -+/* -+ Register PLL_SYS_DRP_GO -+*/ -+#define TCF_PLL_PLL_SYS_DRP_GO 0x0070 -+#define PLL_SYS_DRP_GO_MASK 0x00000001U -+#define PLL_SYS_DRP_GO_SHIFT 0 -+#define PLL_SYS_DRP_GO_SIGNED 0 -+ -+/* -+ Register PLL_MEMIF_CLK0 -+*/ -+#define TCF_PLL_PLL_MEMIF_CLK0 0x0078 -+#define MEMIF_PLL_CLK0_PHS_MASK 0x00300000U -+#define MEMIF_PLL_CLK0_PHS_SHIFT 20 -+#define MEMIF_PLL_CLK0_PHS_SIGNED 0 -+ -+#define MEMIF_PLL_CLK0_MS_MASK 0x00030000U -+#define MEMIF_PLL_CLK0_MS_SHIFT 16 -+#define MEMIF_PLL_CLK0_MS_SIGNED 0 -+ -+#define MEMIF_PLL_CLK0_FREQ_MASK 0x000001FFU -+#define MEMIF_PLL_CLK0_FREQ_SHIFT 0 -+#define MEMIF_PLL_CLK0_FREQ_SIGNED 0 -+ -+/* -+ Register PLL_MEMIF_CLK1TO5 -+*/ -+#define TCF_PLL_PLL_MEMIF_CLK1TO5 0x0080 -+#define MEMIF_PLL_CLK1TO5_PHS_MASK 0x3FF00000U -+#define MEMIF_PLL_CLK1TO5_PHS_SHIFT 20 -+#define MEMIF_PLL_CLK1TO5_PHS_SIGNED 0 -+ -+#define MEMIF_PLL_CLK1TO5_MS_MASK 0x000FFC00U -+#define MEMIF_PLL_CLK1TO5_MS_SHIFT 10 -+#define MEMIF_PLL_CLK1TO5_MS_SIGNED 0 -+ -+#define MEMIF_PLL_CLK1TO5_FREQ_MASK 0x000003FFU -+#define MEMIF_PLL_CLK1TO5_FREQ_SHIFT 0 -+#define MEMIF_PLL_CLK1TO5_FREQ_SIGNED 0 -+ -+/* -+ Register PLL_MEM_DRP_GO -+*/ -+#define TCF_PLL_PLL_MEM_DRP_GO 0x0088 -+#define PLL_MEM_DRP_GO_MASK 0x00000001U -+#define PLL_MEM_DRP_GO_SHIFT 0 -+#define PLL_MEM_DRP_GO_SIGNED 0 -+ -+/* -+ Register PLL_ALL_DRP_GO -+*/ -+#define TCF_PLL_PLL_ALL_DRP_GO 0x0090 -+#define PLL_ALL_DRP_GO_MASK 0x00000001U -+#define PLL_ALL_DRP_GO_SHIFT 0 -+#define PLL_ALL_DRP_GO_SIGNED 0 -+ -+/* -+ Register PLL_DRP_STATUS -+*/ -+#define TCF_PLL_PLL_DRP_STATUS 0x0098 -+#define PLL_LOCKS_MASK 0x00003F00U -+#define PLL_LOCKS_SHIFT 8 -+#define PLL_LOCKS_SIGNED 0 -+ -+#define PLL_DRP_GOOD_MASK 0x0000003FU -+#define PLL_DRP_GOOD_SHIFT 0 -+#define PLL_DRP_GOOD_SIGNED 0 -+ -+#endif /* !defined(_TCF_PLL_H_) */ -+ -+/***************************************************************************** -+ End of file (tcf_pll.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/apollo/tcf_rgbpdp_regs.h b/drivers/gpu/drm/img-rogue/apollo/tcf_rgbpdp_regs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/apollo/tcf_rgbpdp_regs.h -@@ -0,0 +1,559 @@ -+/*************************************************************************/ /*! -+@Title Test Chip Framework PDP register definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Autogenerated C -- do not edit -+ Generated from: tcf_rgbpdp_regs.def -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(_TCF_RGBPDP_REGS_H_) -+#define _TCF_RGBPDP_REGS_H_ -+ -+/* -+ Register PVR_TCF_RGBPDP_STR1SURF -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF 0x0000 -+#define STR1HEIGHT_MASK 0x000007FFU -+#define STR1HEIGHT_SHIFT 0 -+#define STR1HEIGHT_SIGNED 0 -+ -+#define STR1WIDTH_MASK 0x003FF800U -+#define STR1WIDTH_SHIFT 11 -+#define STR1WIDTH_SIGNED 0 -+ -+#define STR1PIXFMT_MASK 0x0F000000U -+#define STR1PIXFMT_SHIFT 24 -+#define STR1PIXFMT_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_STR1ADDRCTRL -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL 0x0004 -+#define STR1BASE_MASK 0x03FFFFFFU -+#define STR1BASE_SHIFT 0 -+#define STR1BASE_SIGNED 0 -+ -+#define STR1INTFIELD_MASK 0x40000000U -+#define STR1INTFIELD_SHIFT 30 -+#define STR1INTFIELD_SIGNED 0 -+ -+#define STR1STREN_MASK 0x80000000U -+#define STR1STREN_SHIFT 31 -+#define STR1STREN_SIGNED 0 -+ -+/* -+ Register PVR_PDP_STR1POSN -+*/ -+#define TCF_RGBPDP_PVR_PDP_STR1POSN 0x0008 -+#define STR1STRIDE_MASK 0x000003FFU -+#define STR1STRIDE_SHIFT 0 -+#define STR1STRIDE_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_MEMCTRL -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_MEMCTRL 0x000C -+#define MEMREFRESH_MASK 0xC0000000U -+#define MEMREFRESH_SHIFT 30 -+#define MEMREFRESH_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_STRCTRL -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL 0x0010 -+#define BURSTLEN_GFX_MASK 0x000000FFU -+#define BURSTLEN_GFX_SHIFT 0 -+#define BURSTLEN_GFX_SIGNED 0 -+ -+#define THRESHOLD_GFX_MASK 0x0000FF00U -+#define THRESHOLD_GFX_SHIFT 8 -+#define THRESHOLD_GFX_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_SYNCCTRL -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL 0x0014 -+#define HSDIS_MASK 0x00000001U -+#define HSDIS_SHIFT 0 -+#define HSDIS_SIGNED 0 -+ -+#define HSPOL_MASK 0x00000002U -+#define HSPOL_SHIFT 1 -+#define HSPOL_SIGNED 0 -+ -+#define VSDIS_MASK 0x00000004U -+#define VSDIS_SHIFT 2 -+#define VSDIS_SIGNED 0 -+ -+#define VSPOL_MASK 0x00000008U -+#define VSPOL_SHIFT 3 -+#define VSPOL_SIGNED 0 -+ -+#define BLNKDIS_MASK 0x00000010U -+#define BLNKDIS_SHIFT 4 -+#define BLNKDIS_SIGNED 0 -+ -+#define BLNKPOL_MASK 0x00000020U -+#define BLNKPOL_SHIFT 5 -+#define BLNKPOL_SIGNED 0 -+ -+#define HS_SLAVE_MASK 0x00000040U -+#define HS_SLAVE_SHIFT 6 -+#define HS_SLAVE_SIGNED 0 -+ -+#define VS_SLAVE_MASK 0x00000080U -+#define VS_SLAVE_SHIFT 7 -+#define VS_SLAVE_SIGNED 0 -+ -+#define INTERLACE_MASK 0x00000100U -+#define INTERLACE_SHIFT 8 -+#define INTERLACE_SIGNED 0 -+ -+#define FIELDPOL_MASK 0x00000200U -+#define FIELDPOL_SHIFT 9 -+#define FIELDPOL_SIGNED 0 -+ -+#define CLKPOL_MASK 0x00000800U -+#define CLKPOL_SHIFT 11 -+#define CLKPOL_SIGNED 0 -+ -+#define CSYNC_EN_MASK 0x00001000U -+#define CSYNC_EN_SHIFT 12 -+#define CSYNC_EN_SIGNED 0 -+ -+#define FIELD_EN_MASK 0x00002000U -+#define FIELD_EN_SHIFT 13 -+#define FIELD_EN_SIGNED 0 -+ -+#define UPDWAIT_MASK 0x000F0000U -+#define UPDWAIT_SHIFT 16 -+#define UPDWAIT_SIGNED 0 -+ -+#define UPDCTRL_MASK 0x01000000U -+#define UPDCTRL_SHIFT 24 -+#define UPDCTRL_SIGNED 0 -+ -+#define UPDINTCTRL_MASK 0x02000000U -+#define UPDINTCTRL_SHIFT 25 -+#define UPDINTCTRL_SIGNED 0 -+ -+#define UPDSYNCTRL_MASK 0x04000000U -+#define UPDSYNCTRL_SHIFT 26 -+#define UPDSYNCTRL_SIGNED 0 -+ -+#define POWERDN_MASK 0x10000000U -+#define POWERDN_SHIFT 28 -+#define POWERDN_SIGNED 0 -+ -+#define DISP_RST_MASK 0x20000000U -+#define DISP_RST_SHIFT 29 -+#define DISP_RST_SIGNED 0 -+ -+#define SYNCACTIVE_MASK 0x80000000U -+#define SYNCACTIVE_SHIFT 31 -+#define SYNCACTIVE_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_BORDCOL -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL 0x0018 -+#define BORDCOL_MASK 0x00FFFFFFU -+#define BORDCOL_SHIFT 0 -+#define BORDCOL_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_UPDCTRL -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL 0x001C -+#define UPDFIELD_MASK 0x00000001U -+#define UPDFIELD_SHIFT 0 -+#define UPDFIELD_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_HSYNC1 -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1 0x0020 -+#define HT_MASK 0x00000FFFU -+#define HT_SHIFT 0 -+#define HT_SIGNED 0 -+ -+#define HBPS_MASK 0x0FFF0000U -+#define HBPS_SHIFT 16 -+#define HBPS_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_HSYNC2 -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2 0x0024 -+#define HLBS_MASK 0x00000FFFU -+#define HLBS_SHIFT 0 -+#define HLBS_SIGNED 0 -+ -+#define HAS_MASK 0x0FFF0000U -+#define HAS_SHIFT 16 -+#define HAS_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_HSYNC3 -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3 0x0028 -+#define HRBS_MASK 0x00000FFFU -+#define HRBS_SHIFT 0 -+#define HRBS_SIGNED 0 -+ -+#define HFPS_MASK 0x0FFF0000U -+#define HFPS_SHIFT 16 -+#define HFPS_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_VSYNC1 -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1 0x002C -+#define VT_MASK 0x00000FFFU -+#define VT_SHIFT 0 -+#define VT_SIGNED 0 -+ -+#define VBPS_MASK 0x0FFF0000U -+#define VBPS_SHIFT 16 -+#define VBPS_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_VSYNC2 -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2 0x0030 -+#define VTBS_MASK 0x00000FFFU -+#define VTBS_SHIFT 0 -+#define VTBS_SIGNED 0 -+ -+#define VAS_MASK 0x0FFF0000U -+#define VAS_SHIFT 16 -+#define VAS_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_VSYNC3 -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3 0x0034 -+#define VBBS_MASK 0x00000FFFU -+#define VBBS_SHIFT 0 -+#define VBBS_SIGNED 0 -+ -+#define VFPS_MASK 0x0FFF0000U -+#define VFPS_SHIFT 16 -+#define VFPS_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_HDECTRL -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL 0x0038 -+#define HDEF_MASK 0x00000FFFU -+#define HDEF_SHIFT 0 -+#define HDEF_SIGNED 0 -+ -+#define HDES_MASK 0x0FFF0000U -+#define HDES_SHIFT 16 -+#define HDES_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_VDECTRL -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL 0x003C -+#define VDEF_MASK 0x00000FFFU -+#define VDEF_SHIFT 0 -+#define VDEF_SIGNED 0 -+ -+#define VDES_MASK 0x0FFF0000U -+#define VDES_SHIFT 16 -+#define VDES_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_VEVENT -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT 0x0040 -+#define VFETCH_MASK 0x00000FFFU -+#define VFETCH_SHIFT 0 -+#define VFETCH_SIGNED 0 -+ -+#define VEVENT_MASK 0x0FFF0000U -+#define VEVENT_SHIFT 16 -+#define VEVENT_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_OPMASK -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_OPMASK 0x0044 -+#define MASKR_MASK 0x000000FFU -+#define MASKR_SHIFT 0 -+#define MASKR_SIGNED 0 -+ -+#define MASKG_MASK 0x0000FF00U -+#define MASKG_SHIFT 8 -+#define MASKG_SIGNED 0 -+ -+#define MASKB_MASK 0x00FF0000U -+#define MASKB_SHIFT 16 -+#define MASKB_SIGNED 0 -+ -+#define BLANKLEVEL_MASK 0x40000000U -+#define BLANKLEVEL_SHIFT 30 -+#define BLANKLEVEL_SIGNED 0 -+ -+#define MASKLEVEL_MASK 0x80000000U -+#define MASKLEVEL_SHIFT 31 -+#define MASKLEVEL_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_INTSTAT -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT 0x0048 -+#define INTS_HBLNK0_MASK 0x00000001U -+#define INTS_HBLNK0_SHIFT 0 -+#define INTS_HBLNK0_SIGNED 0 -+ -+#define INTS_HBLNK1_MASK 0x00000002U -+#define INTS_HBLNK1_SHIFT 1 -+#define INTS_HBLNK1_SIGNED 0 -+ -+#define INTS_VBLNK0_MASK 0x00000004U -+#define INTS_VBLNK0_SHIFT 2 -+#define INTS_VBLNK0_SIGNED 0 -+ -+#define INTS_VBLNK1_MASK 0x00000008U -+#define INTS_VBLNK1_SHIFT 3 -+#define INTS_VBLNK1_SIGNED 0 -+ -+#define INTS_STR1URUN_MASK 0x00000010U -+#define INTS_STR1URUN_SHIFT 4 -+#define INTS_STR1URUN_SIGNED 0 -+ -+#define INTS_STR1ORUN_MASK 0x00000020U -+#define INTS_STR1ORUN_SHIFT 5 -+#define INTS_STR1ORUN_SIGNED 0 -+ -+#define INTS_DISPURUN_MASK 0x00000040U -+#define INTS_DISPURUN_SHIFT 6 -+#define INTS_DISPURUN_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_INTENAB -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB 0x004C -+#define INTEN_HBLNK0_MASK 0x00000001U -+#define INTEN_HBLNK0_SHIFT 0 -+#define INTEN_HBLNK0_SIGNED 0 -+ -+#define INTEN_HBLNK1_MASK 0x00000002U -+#define INTEN_HBLNK1_SHIFT 1 -+#define INTEN_HBLNK1_SIGNED 0 -+ -+#define INTEN_VBLNK0_MASK 0x00000004U -+#define INTEN_VBLNK0_SHIFT 2 -+#define INTEN_VBLNK0_SIGNED 0 -+ -+#define INTEN_VBLNK1_MASK 0x00000008U -+#define INTEN_VBLNK1_SHIFT 3 -+#define INTEN_VBLNK1_SIGNED 0 -+ -+#define INTEN_STR1URUN_MASK 0x00000010U -+#define INTEN_STR1URUN_SHIFT 4 -+#define INTEN_STR1URUN_SIGNED 0 -+ -+#define INTEN_STR1ORUN_MASK 0x00000020U -+#define INTEN_STR1ORUN_SHIFT 5 -+#define INTEN_STR1ORUN_SIGNED 0 -+ -+#define INTEN_DISPURUN_MASK 0x00000040U -+#define INTEN_DISPURUN_SHIFT 6 -+#define INTEN_DISPURUN_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_INTCLEAR -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR 0x0050 -+#define INTCLR_HBLNK0_MASK 0x00000001U -+#define INTCLR_HBLNK0_SHIFT 0 -+#define INTCLR_HBLNK0_SIGNED 0 -+ -+#define INTCLR_HBLNK1_MASK 0x00000002U -+#define INTCLR_HBLNK1_SHIFT 1 -+#define INTCLR_HBLNK1_SIGNED 0 -+ -+#define INTCLR_VBLNK0_MASK 0x00000004U -+#define INTCLR_VBLNK0_SHIFT 2 -+#define INTCLR_VBLNK0_SIGNED 0 -+ -+#define INTCLR_VBLNK1_MASK 0x00000008U -+#define INTCLR_VBLNK1_SHIFT 3 -+#define INTCLR_VBLNK1_SIGNED 0 -+ -+#define INTCLR_STR1URUN_MASK 0x00000010U -+#define INTCLR_STR1URUN_SHIFT 4 -+#define INTCLR_STR1URUN_SIGNED 0 -+ -+#define INTCLR_STR1ORUN_MASK 0x00000020U -+#define INTCLR_STR1ORUN_SHIFT 5 -+#define INTCLR_STR1ORUN_SIGNED 0 -+ -+#define INTCLR_DISPURUN_MASK 0x00000040U -+#define INTCLR_DISPURUN_SHIFT 6 -+#define INTCLR_DISPURUN_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_INTCTRL -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCTRL 0x0054 -+#define HBLNK_LINENO_MASK 0x00000FFFU -+#define HBLNK_LINENO_SHIFT 0 -+#define HBLNK_LINENO_SIGNED 0 -+ -+#define HBLNK_LINE_MASK 0x00010000U -+#define HBLNK_LINE_SHIFT 16 -+#define HBLNK_LINE_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_SIGNAT -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_SIGNAT 0x0058 -+#define SIGNATURE_MASK 0xFFFFFFFFU -+#define SIGNATURE_SHIFT 0 -+#define SIGNATURE_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_LINESTAT -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_LINESTAT 0x005C -+#define LINENO_MASK 0x00000FFFU -+#define LINENO_SHIFT 0 -+#define LINENO_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_DBGCTRL -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGCTRL 0x0060 -+#define DBG_ENAB_MASK 0x00000001U -+#define DBG_ENAB_SHIFT 0 -+#define DBG_ENAB_SIGNED 0 -+ -+#define DBG_READ_MASK 0x00000002U -+#define DBG_READ_SHIFT 1 -+#define DBG_READ_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_DBGDATA -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGDATA 0x0064 -+#define DBG_DATA_MASK 0x00FFFFFFU -+#define DBG_DATA_SHIFT 0 -+#define DBG_DATA_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_DBGSIDE -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGSIDE 0x0068 -+#define DBG_SIDE_MASK 0x00000007U -+#define DBG_SIDE_SHIFT 0 -+#define DBG_SIDE_SIGNED 0 -+ -+#define DBG_VAL_MASK 0x00000008U -+#define DBG_VAL_SHIFT 3 -+#define DBG_VAL_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_REGLD_STAT -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_STAT 0x0070 -+#define REGLD_ADDROUT_MASK 0x00FFFFFFU -+#define REGLD_ADDROUT_SHIFT 0 -+#define REGLD_ADDROUT_SIGNED 0 -+ -+#define REGLD_ADDREN_MASK 0x80000000U -+#define REGLD_ADDREN_SHIFT 31 -+#define REGLD_ADDREN_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_REGLD_CTRL -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_CTRL 0x0074 -+#define REGLD_ADDRIN_MASK 0x00FFFFFFU -+#define REGLD_ADDRIN_SHIFT 0 -+#define REGLD_ADDRIN_SIGNED 0 -+ -+#define REGLD_VAL_MASK 0x01000000U -+#define REGLD_VAL_SHIFT 24 -+#define REGLD_VAL_SIGNED 0 -+ -+#define REGLD_ADDRLEN_MASK 0xFE000000U -+#define REGLD_ADDRLEN_SHIFT 25 -+#define REGLD_ADDRLEN_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_CORE_ID -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_ID 0x0078 -+#define CONFIG_ID_MASK 0x0000FFFFU -+#define CONFIG_ID_SHIFT 0 -+#define CONFIG_ID_SIGNED 0 -+ -+#define CORE_ID_MASK 0x00FF0000U -+#define CORE_ID_SHIFT 16 -+#define CORE_ID_SIGNED 0 -+ -+#define GROUP_ID_MASK 0xFF000000U -+#define GROUP_ID_SHIFT 24 -+#define GROUP_ID_SIGNED 0 -+ -+/* -+ Register PVR_TCF_RGBPDP_CORE_REV -+*/ -+#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_REV 0x007C -+#define MAINT_REV_MASK 0x000000FFU -+#define MAINT_REV_SHIFT 0 -+#define MAINT_REV_SIGNED 0 -+ -+#define MINOR_REV_MASK 0x0000FF00U -+#define MINOR_REV_SHIFT 8 -+#define MINOR_REV_SIGNED 0 -+ -+#define MAJOR_REV_MASK 0x00FF0000U -+#define MAJOR_REV_SHIFT 16 -+#define MAJOR_REV_SIGNED 0 -+ -+#endif /* !defined(_TCF_RGBPDP_REGS_H_) */ -+ -+/***************************************************************************** -+ End of file (tcf_rgbpdp_regs.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/cache_km.c b/drivers/gpu/drm/img-rogue/cache_km.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/cache_km.c -@@ -0,0 +1,1631 @@ -+/*************************************************************************/ /*! -+@File cache_km.c -+@Title CPU d-cache maintenance operations framework -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements server side code for CPU d-cache maintenance taking -+ into account the idiosyncrasies of the various types of CPU -+ d-cache instruction-set architecture (ISA) maintenance -+ mechanisms. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#if defined(__linux__) -+#include -+#include -+#include -+#include -+#include -+#include -+#endif -+ -+#include "pmr.h" -+#include "log2.h" -+#include "device.h" -+#include "pvrsrv.h" -+#include "osfunc.h" -+#include "cache_km.h" -+#include "pvr_debug.h" -+#include "lock_types.h" -+#include "allocmem.h" -+#include "process_stats.h" -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+#include "ri_server.h" -+#endif -+#include "devicemem.h" -+#include "pvrsrv_apphint.h" -+#include "pvrsrv_sync_server.h" -+#include "km_apphint_defs.h" -+#include "km_apphint_defs_common.h" -+#include "os_apphint.h" -+#include "di_server.h" -+ -+/* This header must always be included last */ -+#if defined(__linux__) -+#include "kernel_compatibility.h" -+#endif -+ -+/* Top-level file-local build definitions */ -+#if defined(PVRSRV_ENABLE_CACHEOP_STATS) && defined(__linux__) -+#define CACHEOP_DEBUG -+#define CACHEOP_STATS_ITEMS_MAX 32 -+#define INCR_WRAP(x) ((x+1) >= CACHEOP_STATS_ITEMS_MAX ? 0 : (x+1)) -+#define DECR_WRAP(x) ((x-1) < 0 ? (CACHEOP_STATS_ITEMS_MAX-1) : (x-1)) -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+/* Refer to CacheOpStatsExecLogHeader() for header item names */ -+#define CACHEOP_RI_PRINTF_HEADER "%-8s %-8s %-10s %-10s %-5s %-16s %-16s %-10s %-10s %-18s" -+#define CACHEOP_RI_PRINTF "%-8d %-8d %-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-18llu\n" -+#else -+#define CACHEOP_PRINTF_HEADER "%-8s %-8s %-10s %-10s %-5s %-10s %-10s %-18s" -+#define CACHEOP_PRINTF "%-8d %-8d %-10s %-10s %-5s 0x%-8llx 0x%-8llx %-18llu\n" -+#endif -+#endif -+ -+//#define CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING /* Force OS page (not cache line) flush granularity */ -+#define CACHEOP_PVR_ASSERT(x) /* Define as PVR_ASSERT(x), enable for swdev & testing */ -+#define CACHEOP_DEVMEM_OOR_ERROR_STRING "cacheop device memory request is out of range" -+#define CACHEOP_MAX_DEBUG_MESSAGE_LEN 160 -+ -+typedef struct _CACHEOP_WORK_ITEM_ -+{ -+ PMR *psPMR; -+ IMG_DEVMEM_SIZE_T uiSize; -+ PVRSRV_CACHE_OP uiCacheOp; -+ IMG_DEVMEM_OFFSET_T uiOffset; -+ PVRSRV_TIMELINE iTimeline; -+ SYNC_TIMELINE_OBJ sSWTimelineObj; -+ PVRSRV_DEVICE_NODE *psDevNode; -+#if defined(CACHEOP_DEBUG) -+ IMG_UINT64 ui64StartTime; -+ IMG_UINT64 ui64EndTime; -+ IMG_BOOL bKMReq; -+ IMG_PID pid; -+#endif -+} CACHEOP_WORK_ITEM; -+ -+typedef struct _CACHEOP_STATS_EXEC_ITEM_ -+{ -+ IMG_UINT32 ui32DeviceID; -+ IMG_PID pid; -+ PVRSRV_CACHE_OP uiCacheOp; -+ IMG_DEVMEM_SIZE_T uiOffset; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_UINT64 ui64StartTime; -+ IMG_UINT64 ui64EndTime; -+ IMG_BOOL bKMReq; -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ IMG_DEV_VIRTADDR sDevVAddr; -+ IMG_DEV_PHYADDR sDevPAddr; -+#endif -+} CACHEOP_STATS_EXEC_ITEM; -+ -+typedef enum _CACHEOP_CONFIG_ -+{ -+ CACHEOP_CONFIG_DEFAULT = 0, -+ /* cache flush mechanism types */ -+ CACHEOP_CONFIG_URBF = 4, -+ /* sw-emulated deferred flush mechanism */ -+ CACHEOP_CONFIG_KDF = 8, -+ /* pseudo configuration items */ -+ CACHEOP_CONFIG_LAST = 16, -+ CACHEOP_CONFIG_KLOG = 16, -+ CACHEOP_CONFIG_ALL = 31 -+} CACHEOP_CONFIG; -+ -+typedef struct _CACHEOP_WORK_QUEUE_ -+{ -+/* -+ * Init. state & primary device node framework -+ * is anchored on. -+ */ -+ IMG_BOOL bInit; -+/* -+ MMU page size/shift & d-cache line size -+ */ -+ size_t uiPageSize; -+ IMG_UINT32 uiLineSize; -+ IMG_UINT32 uiLineShift; -+ IMG_UINT32 uiPageShift; -+ PMR *psInfoPagePMR; -+ IMG_UINT32 *pui32InfoPage; -+ -+#if defined(CACHEOP_DEBUG) -+/* -+ CacheOp statistics -+ */ -+ DI_ENTRY *psDIEntry; -+ IMG_HANDLE hStatsExecLock; -+ -+ IMG_UINT32 ui32ServerOps; -+ IMG_UINT32 ui32ClientOps; -+ IMG_UINT32 ui32TotalOps; -+ IMG_UINT32 ui32ServerOpUsedUMVA; -+ IMG_UINT32 ui32AvgExecTime; -+ IMG_UINT32 ui32AvgExecTimeRemainder; -+ -+ IMG_INT32 i32StatsExecWriteIdx; -+ CACHEOP_STATS_EXEC_ITEM asStatsExecuted[CACHEOP_STATS_ITEMS_MAX]; -+#endif -+ -+ DI_ENTRY *psConfigTune; -+ IMG_HANDLE hConfigLock; -+ CACHEOP_CONFIG eConfig; -+ IMG_UINT32 ui32Config; -+ IMG_BOOL bSupportsUMFlush; -+} CACHEOP_WORK_QUEUE; -+ -+/* Top-level CacheOp framework object */ -+static CACHEOP_WORK_QUEUE gsCwq; -+ -+#define CacheOpConfigSupports(e) ((gsCwq.eConfig & (e)) ? IMG_TRUE : IMG_FALSE) -+ -+#if defined(CACHEOP_DEBUG) -+static INLINE void CacheOpStatsExecLogHeader(IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN]) -+{ -+ OSSNPrintf(szBuffer, CACHEOP_MAX_DEBUG_MESSAGE_LEN, -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ CACHEOP_RI_PRINTF_HEADER, -+#else -+ CACHEOP_PRINTF_HEADER, -+#endif -+ "DevID", -+ "Pid", -+ "CacheOp", -+ "Type", -+ "Origin", -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ "DevVAddr", -+ "DevPAddr", -+#endif -+ "Offset", -+ "Size", -+ "xTime (us)"); -+} -+ -+static void CacheOpStatsExecLogWrite(CACHEOP_WORK_ITEM *psCacheOpWorkItem) -+{ -+ IMG_INT32 i32WriteOffset; -+ IMG_UINT32 ui32ExecTime; -+ printk("log write\n"); -+ if (!psCacheOpWorkItem->uiCacheOp) -+ { -+ return; -+ } -+ else if (psCacheOpWorkItem->bKMReq && !CacheOpConfigSupports(CACHEOP_CONFIG_KLOG)) -+ { -+ /* KM logs spams the history due to frequency, this removes it completely */ -+ return; -+ } -+ -+ OSLockAcquire(gsCwq.hStatsExecLock); -+ -+ i32WriteOffset = gsCwq.i32StatsExecWriteIdx; -+ gsCwq.i32StatsExecWriteIdx = INCR_WRAP(gsCwq.i32StatsExecWriteIdx); -+ gsCwq.asStatsExecuted[i32WriteOffset].ui32DeviceID = psCacheOpWorkItem->psDevNode ? psCacheOpWorkItem->psDevNode->sDevId.ui32InternalID : -1; -+ gsCwq.asStatsExecuted[i32WriteOffset].pid = psCacheOpWorkItem->pid; -+ gsCwq.asStatsExecuted[i32WriteOffset].uiSize = psCacheOpWorkItem->uiSize; -+ gsCwq.asStatsExecuted[i32WriteOffset].bKMReq = psCacheOpWorkItem->bKMReq; -+ gsCwq.asStatsExecuted[i32WriteOffset].uiOffset = psCacheOpWorkItem->uiOffset; -+ gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp = psCacheOpWorkItem->uiCacheOp; -+ gsCwq.asStatsExecuted[i32WriteOffset].ui64StartTime = psCacheOpWorkItem->ui64StartTime; -+ gsCwq.asStatsExecuted[i32WriteOffset].ui64EndTime = psCacheOpWorkItem->ui64EndTime; -+ -+ CACHEOP_PVR_ASSERT(gsCwq.asStatsExecuted[i32WriteOffset].pid); -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ if (gsCwq.bInit && psCacheOpWorkItem->psPMR) -+ { -+ IMG_CPU_PHYADDR sDevPAddr; -+ PVRSRV_ERROR eError, eLockError; -+ IMG_BOOL bValid; -+ -+ /* Get more detailed information regarding the sub allocations that -+ PMR has from RI manager for process that requested the CacheOp */ -+ eError = RIDumpProcessListKM(psCacheOpWorkItem->psPMR, -+ gsCwq.asStatsExecuted[i32WriteOffset].pid, -+ gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, -+ &gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ /* (Re)lock here as some PMR might have not been locked */ -+ eLockError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR); -+ PVR_GOTO_IF_ERROR(eLockError, e0); -+ -+ eError = PMR_CpuPhysAddr(psCacheOpWorkItem->psPMR, -+ gsCwq.uiPageShift, -+ 1, -+ gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, -+ &sDevPAddr, -+ &bValid); -+ -+ eLockError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR); -+ PVR_LOG_IF_ERROR(eLockError, "PMRUnlockSysPhysAddresses"); -+ -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ -+ -+ gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr.uiAddr = sDevPAddr.uiAddr; -+ } -+#endif -+ -+ /* Calculate the approximate cumulative moving average execution time. -+ * This calculation is based on standard equation: -+ * -+ * CMAnext = (new + count * CMAprev) / (count + 1) -+ * -+ * but in simplified form: -+ * -+ * CMAnext = CMAprev + (new - CMAprev) / (count + 1) -+ * -+ * this gets rid of multiplication and prevents overflow. -+ * -+ * Also to increase accuracy that we lose with integer division, -+ * we hold the moving remainder of the division and add it. -+ * -+ * CMAnext = CMAprev + (new - CMAprev + CMRprev) / (count + 1) -+ * -+ * Multiple tests proved it to be the best solution for approximating -+ * CMA using integers. -+ * -+ */ -+ -+ ui32ExecTime = -+ gsCwq.asStatsExecuted[i32WriteOffset].ui64EndTime - -+ gsCwq.asStatsExecuted[i32WriteOffset].ui64StartTime; -+ -+ { -+ -+ IMG_INT32 i32Div = -+ (IMG_INT32) ui32ExecTime - -+ (IMG_INT32) gsCwq.ui32AvgExecTime + -+ (IMG_INT32) gsCwq.ui32AvgExecTimeRemainder; -+ -+ gsCwq.ui32AvgExecTime += i32Div / (IMG_INT32)(gsCwq.ui32TotalOps + 1); -+ gsCwq.ui32AvgExecTimeRemainder = i32Div % (IMG_INT32)(gsCwq.ui32TotalOps + 1); -+ -+ gsCwq.ui32TotalOps++; -+ -+ } -+ -+ if (!gsCwq.asStatsExecuted[i32WriteOffset].bKMReq) -+ { -+ /* This operation queues only UM CacheOp in per-PID process statistics database */ -+ PVRSRVStatsUpdateCacheOpStats( -+ gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp, -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr, -+ gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr, -+#endif -+ gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, -+ gsCwq.asStatsExecuted[i32WriteOffset].uiSize, -+ ui32ExecTime, -+ !gsCwq.asStatsExecuted[i32WriteOffset].bKMReq, -+ psCacheOpWorkItem->pid); -+ } -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+e0: -+#endif -+ OSLockRelease(gsCwq.hStatsExecLock); -+} -+ -+static int CacheOpStatsExecLogRead(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ IMG_CHAR *pszFlushType; -+ IMG_CHAR *pszCacheOpType; -+ IMG_CHAR *pszFlushSource; -+ IMG_INT32 i32ReadOffset; -+ IMG_INT32 i32WriteOffset; -+ -+ IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN] = {0}; -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ OSLockAcquire(gsCwq.hStatsExecLock); -+ -+ DIPrintf(psEntry, -+ "Primary CPU d-cache architecture: LSZ: 0x%x, URBF: %s\n", -+ gsCwq.uiLineSize, -+ gsCwq.bSupportsUMFlush ? "Yes" : "No"); -+ -+ DIPrintf(psEntry, -+ "Configuration: UKT: %d, URBF: %s\n", -+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD], -+ gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No"); -+ -+ DIPrintf(psEntry, -+ "Summary: Total Ops [%d] - Server(using UMVA)/Client [%d(%d)/%d]. Avg execution time [%d]\n", -+ gsCwq.ui32TotalOps, gsCwq.ui32ServerOps, gsCwq.ui32ServerOpUsedUMVA, gsCwq.ui32ClientOps, gsCwq.ui32AvgExecTime); -+ -+ -+ CacheOpStatsExecLogHeader(szBuffer); -+ DIPrintf(psEntry, "%s\n", szBuffer); -+ -+ i32WriteOffset = gsCwq.i32StatsExecWriteIdx; -+ for (i32ReadOffset = DECR_WRAP(i32WriteOffset); -+ i32ReadOffset != i32WriteOffset; -+ i32ReadOffset = DECR_WRAP(i32ReadOffset)) -+ { -+ IMG_UINT64 ui64ExecTime = -+ gsCwq.asStatsExecuted[i32ReadOffset].ui64EndTime - -+ gsCwq.asStatsExecuted[i32ReadOffset].ui64StartTime; -+ -+ IMG_DEVMEM_SIZE_T ui64NumOfPages = -+ gsCwq.asStatsExecuted[i32ReadOffset].uiSize >> gsCwq.uiPageShift; -+ -+ -+ if (!gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp) -+ { -+ break; -+ } -+ if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC) -+ { -+ pszFlushType = "RBF.Fast"; -+ } -+ else -+ { -+ pszFlushType = "RBF.Slow"; -+ } -+ -+ pszFlushSource = gsCwq.asStatsExecuted[i32ReadOffset].bKMReq ? " KM" : " UM"; -+ -+ switch (gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp) -+ { -+ case PVRSRV_CACHE_OP_NONE: -+ pszCacheOpType = "None"; -+ break; -+ case PVRSRV_CACHE_OP_CLEAN: -+ pszCacheOpType = "Clean"; -+ break; -+ case PVRSRV_CACHE_OP_INVALIDATE: -+ pszCacheOpType = "Invalidate"; -+ break; -+ case PVRSRV_CACHE_OP_FLUSH: -+ pszCacheOpType = "Flush"; -+ break; -+ case PVRSRV_CACHE_OP_TIMELINE: -+ pszCacheOpType = "Timeline"; -+ pszFlushType = " "; -+ break; -+ default: -+ pszCacheOpType = "Unknown"; -+ break; -+ } -+ -+ DIPrintf(psEntry, -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ CACHEOP_RI_PRINTF, -+#else -+ CACHEOP_PRINTF, -+#endif -+ gsCwq.asStatsExecuted[i32ReadOffset].ui32DeviceID, -+ gsCwq.asStatsExecuted[i32ReadOffset].pid, -+ pszCacheOpType, -+ pszFlushType, -+ pszFlushSource, -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ gsCwq.asStatsExecuted[i32ReadOffset].sDevVAddr.uiAddr, -+ gsCwq.asStatsExecuted[i32ReadOffset].sDevPAddr.uiAddr, -+#endif -+ gsCwq.asStatsExecuted[i32ReadOffset].uiOffset, -+ gsCwq.asStatsExecuted[i32ReadOffset].uiSize, -+ ui64ExecTime); -+ -+ } -+ -+ OSLockRelease(gsCwq.hStatsExecLock); -+ -+ return 0; -+} -+#endif /* defined(CACHEOP_DEBUG) */ -+ -+static INLINE void CacheOpStatsReset(void) -+{ -+#if defined(CACHEOP_DEBUG) -+ gsCwq.ui32ServerOps = 0; -+ gsCwq.ui32ClientOps = 0; -+ gsCwq.ui32TotalOps = 0; -+ gsCwq.ui32ServerOpUsedUMVA = 0; -+ gsCwq.ui32AvgExecTime = 0; -+ gsCwq.ui32AvgExecTimeRemainder = 0; -+ -+ gsCwq.i32StatsExecWriteIdx = 0; -+ -+ OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted)); -+#endif -+} -+ -+static void CacheOpConfigUpdate(IMG_UINT32 ui32Config) -+{ -+ OSLockAcquire(gsCwq.hConfigLock); -+ -+ /* Step 0, set the gsCwq.eConfig bits */ -+ if (!(ui32Config & (CACHEOP_CONFIG_LAST - 1))) -+ { -+ gsCwq.eConfig = CACHEOP_CONFIG_KDF; -+ if (gsCwq.bSupportsUMFlush) -+ { -+ gsCwq.eConfig |= CACHEOP_CONFIG_URBF; -+ } -+ } -+ else -+ { -+ if (ui32Config & CACHEOP_CONFIG_KDF) -+ { -+ gsCwq.eConfig |= CACHEOP_CONFIG_KDF; -+ } -+ else -+ { -+ gsCwq.eConfig &= ~CACHEOP_CONFIG_KDF; -+ } -+ -+ if (gsCwq.bSupportsUMFlush && (ui32Config & CACHEOP_CONFIG_URBF)) -+ { -+ gsCwq.eConfig |= CACHEOP_CONFIG_URBF; -+ } -+ else -+ { -+ gsCwq.eConfig &= ~CACHEOP_CONFIG_URBF; -+ } -+ } -+ -+ if (ui32Config & CACHEOP_CONFIG_KLOG) -+ { -+ /* Suppress logs from KM caller */ -+ gsCwq.eConfig |= CACHEOP_CONFIG_KLOG; -+ } -+ else -+ { -+ gsCwq.eConfig &= ~CACHEOP_CONFIG_KLOG; -+ } -+ -+ /* Step 1, set gsCwq.ui32Config based on gsCwq.eConfig */ -+ ui32Config = 0; -+ -+ if (gsCwq.eConfig & CACHEOP_CONFIG_KDF) -+ { -+ ui32Config |= CACHEOP_CONFIG_KDF; -+ } -+ if (gsCwq.eConfig & CACHEOP_CONFIG_URBF) -+ { -+ ui32Config |= CACHEOP_CONFIG_URBF; -+ } -+ if (gsCwq.eConfig & CACHEOP_CONFIG_KLOG) -+ { -+ ui32Config |= CACHEOP_CONFIG_KLOG; -+ } -+ gsCwq.ui32Config = ui32Config; -+ -+ -+ /* Step 3, in certain cases where a CacheOp/VA is provided, this threshold determines at what point -+ the optimisation due to the presence of said VA (i.e. us not having to remap the PMR pages in KM) -+ is clawed-back because of the overhead of maintaining such large request which might stalls the -+ user thread; so to hide this latency have these CacheOps executed on deferred CacheOp thread */ -+ gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] = (IMG_UINT32)(PVR_DIRTY_BYTES_FLUSH_THRESHOLD >> 2); -+ -+ /* Step 4, if no UM support, all requests are done in KM so zero these forcing all client requests -+ to come down into the KM for maintenance */ -+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = 0; -+ -+ if (gsCwq.bSupportsUMFlush) -+ { -+ /* With URBF enabled we never go to the kernel */ -+ if (gsCwq.eConfig & CACHEOP_CONFIG_URBF) -+ { -+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = (IMG_UINT32)~0; -+ } -+ } -+ -+ /* Step 5, reset stats. */ -+ CacheOpStatsReset(); -+ -+ OSLockRelease(gsCwq.hConfigLock); -+} -+ -+static int CacheOpConfigRead(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ DIPrintf(psEntry, "URBF: %s\n", -+ gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No"); -+ -+ return 0; -+} -+ -+static INLINE PVRSRV_ERROR CacheOpConfigQuery(const PVRSRV_DEVICE_NODE *psDevNode, -+ const void *psPrivate, -+ IMG_UINT32 *pui32Value) -+{ -+ IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate; -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ -+ switch (ui32ID) -+ { -+ case APPHINT_ID_CacheOpConfig: -+ *pui32Value = gsCwq.ui32Config; -+ break; -+ -+ case APPHINT_ID_CacheOpUMKMThresholdSize: -+ *pui32Value = gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD]; -+ break; -+ -+ default: -+ break; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static INLINE PVRSRV_ERROR CacheOpConfigSet(const PVRSRV_DEVICE_NODE *psDevNode, -+ const void *psPrivate, -+ IMG_UINT32 ui32Value) -+{ -+ IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate; -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ -+ switch (ui32ID) -+ { -+ case APPHINT_ID_CacheOpConfig: -+ CacheOpConfigUpdate(ui32Value & CACHEOP_CONFIG_ALL); -+ break; -+ -+ -+ case APPHINT_ID_CacheOpUMKMThresholdSize: -+ { -+ if (!ui32Value || !gsCwq.bSupportsUMFlush) -+ { -+ /* CPU ISA does not support UM flush, therefore every request goes down into -+ the KM, silently ignore request to adjust threshold */ -+ PVR_ASSERT(! gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD]); -+ break; -+ } -+ else if (ui32Value < gsCwq.uiPageSize) -+ { -+ /* Silently round-up to OS page size */ -+ ui32Value = gsCwq.uiPageSize; -+ } -+ -+ /* Align to OS page size */ -+ ui32Value &= ~(gsCwq.uiPageSize - 1); -+ -+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = ui32Value; -+ -+ break; -+ } -+ -+ default: -+ break; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static INLINE PVRSRV_ERROR CacheOpTimelineBind(PVRSRV_DEVICE_NODE *psDevNode, -+ CACHEOP_WORK_ITEM *psCacheOpWorkItem, -+ PVRSRV_TIMELINE iTimeline) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Always default the incoming CacheOp work-item to safe values */ -+ SyncClearTimelineObj(&psCacheOpWorkItem->sSWTimelineObj); -+ psCacheOpWorkItem->iTimeline = PVRSRV_NO_TIMELINE; -+ psCacheOpWorkItem->psDevNode = psDevNode; -+ if (iTimeline == PVRSRV_NO_TIMELINE) -+ { -+ return PVRSRV_OK; -+ } -+ -+ psCacheOpWorkItem->iTimeline = iTimeline; -+ eError = SyncSWGetTimelineObj(iTimeline, &psCacheOpWorkItem->sSWTimelineObj); -+ PVR_LOG_IF_ERROR(eError, "SyncSWGetTimelineObj"); -+ -+ return eError; -+} -+ -+static INLINE PVRSRV_ERROR CacheOpTimelineExec(CACHEOP_WORK_ITEM *psCacheOpWorkItem) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (psCacheOpWorkItem->iTimeline == PVRSRV_NO_TIMELINE) -+ { -+ return PVRSRV_OK; -+ } -+ CACHEOP_PVR_ASSERT(psCacheOpWorkItem->sSWTimelineObj.pvTlObj); -+ -+ eError = SyncSWTimelineAdvanceKM(psCacheOpWorkItem->psDevNode, -+ &psCacheOpWorkItem->sSWTimelineObj); -+ (void) SyncSWTimelineReleaseKM(&psCacheOpWorkItem->sSWTimelineObj); -+ -+ return eError; -+} -+ -+static INLINE void CacheOpExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode, -+ PVRSRV_CACHE_OP uiCacheOp, -+ IMG_BYTE *pbCpuVirtAddr, -+ IMG_CPU_PHYADDR sCpuPhyAddr, -+ IMG_DEVMEM_OFFSET_T uiPgAlignedOffset, -+ IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset, -+ IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset) -+{ -+ IMG_BYTE *pbCpuVirtAddrEnd; -+ IMG_BYTE *pbCpuVirtAddrStart; -+ IMG_CPU_PHYADDR sCpuPhyAddrEnd; -+ IMG_CPU_PHYADDR sCpuPhyAddrStart; -+ IMG_DEVMEM_SIZE_T uiRelFlushSize; -+ IMG_DEVMEM_OFFSET_T uiRelFlushOffset; -+ IMG_DEVMEM_SIZE_T uiNextPgAlignedOffset; -+ -+ /* These quantities allows us to perform cache operations -+ at cache-line granularity thereby ensuring we do not -+ perform more than is necessary */ -+ CACHEOP_PVR_ASSERT(uiPgAlignedOffset < uiCLAlignedEndOffset); -+ uiRelFlushSize = (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize; -+ uiRelFlushOffset = 0; -+ -+ if (uiCLAlignedStartOffset > uiPgAlignedOffset) -+ { -+ /* Zero unless initially starting at an in-page offset */ -+ uiRelFlushOffset = uiCLAlignedStartOffset - uiPgAlignedOffset; -+ uiRelFlushSize -= uiRelFlushOffset; -+ } -+ -+ /* uiRelFlushSize is gsCwq.uiPageSize unless current outstanding CacheOp -+ size is smaller. The 1st case handles in-page CacheOp range and -+ the 2nd case handles multiple-page CacheOp range with a last -+ CacheOp size that is less than gsCwq.uiPageSize */ -+ uiNextPgAlignedOffset = uiPgAlignedOffset + (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize; -+ if (uiNextPgAlignedOffset < uiPgAlignedOffset) -+ { -+ /* uiNextPgAlignedOffset is greater than uiCLAlignedEndOffset -+ by implication of this wrap-round; this only happens when -+ uiPgAlignedOffset is the last page aligned offset */ -+ uiRelFlushSize = uiRelFlushOffset ? -+ uiCLAlignedEndOffset - uiCLAlignedStartOffset : -+ uiCLAlignedEndOffset - uiPgAlignedOffset; -+ } -+ else -+ { -+ if (uiNextPgAlignedOffset > uiCLAlignedEndOffset) -+ { -+ uiRelFlushSize = uiRelFlushOffset ? -+ uiCLAlignedEndOffset - uiCLAlignedStartOffset : -+ uiCLAlignedEndOffset - uiPgAlignedOffset; -+ } -+ } -+ -+ /* More efficient to request cache maintenance operation for full -+ relative range as opposed to multiple cache-aligned ranges */ -+ sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + uiRelFlushOffset; -+ sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr + uiRelFlushSize; -+ if (pbCpuVirtAddr) -+ { -+ pbCpuVirtAddrStart = pbCpuVirtAddr + uiRelFlushOffset; -+ pbCpuVirtAddrEnd = pbCpuVirtAddrStart + uiRelFlushSize; -+ } -+ else -+ { -+ /* Some OS/Env layer support functions expect NULL(s) */ -+ pbCpuVirtAddrStart = NULL; -+ pbCpuVirtAddrEnd = NULL; -+ } -+ -+ /* Perform requested CacheOp on the CPU data cache for successive cache -+ line worth of bytes up to page or in-page cache-line boundary */ -+ switch (uiCacheOp) -+ { -+ case PVRSRV_CACHE_OP_CLEAN: -+ OSCPUCacheCleanRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd, -+ sCpuPhyAddrStart, sCpuPhyAddrEnd); -+ break; -+ case PVRSRV_CACHE_OP_INVALIDATE: -+ OSCPUCacheInvalidateRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd, -+ sCpuPhyAddrStart, sCpuPhyAddrEnd); -+ break; -+ case PVRSRV_CACHE_OP_FLUSH: -+ OSCPUCacheFlushRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd, -+ sCpuPhyAddrStart, sCpuPhyAddrEnd); -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", -+ __func__, uiCacheOp)); -+ break; -+ } -+ -+} -+ -+static INLINE void CacheOpExecRangeBasedVA(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_CPU_VIRTADDR pvAddress, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PVRSRV_CACHE_OP uiCacheOp) -+{ -+ IMG_CPU_PHYADDR sCpuPhyAddrUnused = -+ { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) }; -+ IMG_BYTE *pbEnd = (IMG_BYTE*)((uintptr_t)pvAddress + (uintptr_t)uiSize); -+ IMG_BYTE *pbStart = (IMG_BYTE*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiLineSize-1)); -+ -+ /* -+ If the start/end address isn't aligned to cache line size, round it up to the -+ nearest multiple; this ensures that we flush all the cache lines affected by -+ unaligned start/end addresses. -+ */ -+ pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)gsCwq.uiLineSize); -+ switch (uiCacheOp) -+ { -+ case PVRSRV_CACHE_OP_CLEAN: -+ OSCPUCacheCleanRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused); -+ break; -+ case PVRSRV_CACHE_OP_INVALIDATE: -+ OSCPUCacheInvalidateRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused); -+ break; -+ case PVRSRV_CACHE_OP_FLUSH: -+ OSCPUCacheFlushRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused); -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", -+ __func__, uiCacheOp)); -+ break; -+ } -+ -+} -+ -+static INLINE PVRSRV_ERROR CacheOpValidateUMVA(PMR *psPMR, -+ IMG_CPU_VIRTADDR pvAddress, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PVRSRV_CACHE_OP uiCacheOp, -+ void **ppvOutAddress) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+#if defined(__linux__) && !defined(CACHEFLUSH_NO_KMRBF_USING_UMVA) -+ struct mm_struct *mm = current->mm; -+ struct vm_area_struct *vma; -+#endif -+ void __user *pvAddr; -+ -+ IMG_BOOL bReadOnlyInvalidate = -+ (uiCacheOp == PVRSRV_CACHE_OP_INVALIDATE) && -+ !PVRSRV_CHECK_CPU_WRITEABLE(PMR_Flags(psPMR)); -+ -+ if (!pvAddress || bReadOnlyInvalidate) -+ { -+ /* As pvAddress is optional, NULL is expected from UM/KM requests */ -+ /* Also don't allow invalidates for UMVA of read-only memory */ -+ pvAddr = NULL; -+ goto e0; -+ } -+ -+ -+ -+#if !defined(__linux__) || defined(CACHEFLUSH_NO_KMRBF_USING_UMVA) -+ pvAddr = NULL; -+#else -+ /* Validate VA, assume most basic address limit access_ok() check */ -+ pvAddr = (void __user *)(uintptr_t)((uintptr_t)pvAddress + uiOffset); -+ if (!access_ok(pvAddr, uiSize)) -+ { -+ pvAddr = NULL; -+ if (! mm) -+ { -+ /* Bad KM request, don't silently ignore */ -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_CPU_ADDR, e0); -+ } -+ } -+ else if (mm) -+ { -+ mmap_read_lock(mm); -+ vma = find_vma(mm, (unsigned long)(uintptr_t)pvAddr); -+ -+ if (!vma || -+ vma->vm_start > (unsigned long)(uintptr_t)pvAddr || -+ vma->vm_end < (unsigned long)(uintptr_t)pvAddr + uiSize || -+ vma->vm_private_data != psPMR) -+ { -+ /* -+ * Request range is not fully mapped or is not matching the PMR -+ * Ignore request's VA. -+ */ -+ pvAddr = NULL; -+ } -+ mmap_read_unlock(mm); -+ } -+#endif -+ -+e0: -+ *ppvOutAddress = (IMG_CPU_VIRTADDR __force) pvAddr; -+ return eError; -+} -+ -+static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, -+ IMG_CPU_VIRTADDR pvAddress, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PVRSRV_CACHE_OP uiCacheOp, -+ IMG_BOOL bIsRequestValidated) -+ -+{ -+ IMG_HANDLE hPrivOut = NULL; -+ IMG_BOOL bPMRIsSparse; -+ IMG_UINT32 ui32PageIndex; -+ IMG_UINT32 ui32NumOfPages; -+ size_t uiOutSize; /* Effectively unused */ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ IMG_DEVMEM_SIZE_T uiPgAlignedSize; -+ IMG_DEVMEM_OFFSET_T uiPgAlignedOffset; -+ IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset; -+ IMG_DEVMEM_OFFSET_T uiPgAlignedEndOffset; -+ IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset; -+ IMG_DEVMEM_OFFSET_T uiPgAlignedStartOffset; -+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_CPU_PHYADDR asCpuPhyAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_CPU_PHYADDR *psCpuPhyAddr = asCpuPhyAddr; -+ IMG_BOOL bIsPMRInfoValid = IMG_FALSE; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_BYTE *pbCpuVirtAddr = NULL; -+ IMG_BOOL *pbValid = abValid; -+ OS_CACHE_OP_ADDR_TYPE eCacheOpAddrType; -+ -+ psDevNode = PMR_DeviceNode(psPMR); -+ eCacheOpAddrType = OSCPUCacheOpAddressType(psDevNode); -+ -+ if (uiCacheOp == PVRSRV_CACHE_OP_NONE || uiCacheOp == PVRSRV_CACHE_OP_TIMELINE) -+ { -+ return PVRSRV_OK; -+ } -+ -+ if (! bIsRequestValidated) -+ { -+ IMG_DEVMEM_SIZE_T uiLPhysicalSize; -+ -+ /* Need to validate parameters before proceeding */ -+ eError = PMR_PhysicalSize(psPMR, &uiLPhysicalSize); -+ PVR_LOG_RETURN_IF_ERROR(eError, "uiLPhysicalSize"); -+ -+ PVR_LOG_RETURN_IF_FALSE(((uiOffset+uiSize) <= uiLPhysicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE); -+ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PMRLockSysPhysAddresses"); -+ } -+ -+ /* Fast track the request if a CPU VA is provided and CPU ISA supports VA only maintenance */ -+ eError = CacheOpValidateUMVA(psPMR, pvAddress, uiOffset, uiSize, uiCacheOp, (void**)&pbCpuVirtAddr); -+ if (eError == PVRSRV_OK) -+ { -+ pvAddress = pbCpuVirtAddr; -+ -+ if (pvAddress && eCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) -+ { -+ CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pvAddress, uiSize, uiCacheOp); -+ -+ if (!bIsRequestValidated) -+ { -+ eError = PMRUnlockSysPhysAddresses(psPMR); -+ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); -+ } -+#if defined(CACHEOP_DEBUG) -+ gsCwq.ui32ServerOpUsedUMVA += 1; -+#endif -+ return PVRSRV_OK; -+ } -+ else if (pvAddress) -+ { -+ /* Round down the incoming VA (if any) down to the nearest page aligned VA */ -+ pvAddress = (void*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1)); -+#if defined(CACHEOP_DEBUG) -+ gsCwq.ui32ServerOpUsedUMVA += 1; -+#endif -+ } -+ } -+ else -+ { -+ /* -+ * This validation pathway has been added to accommodate any/all requests that might -+ * cause the kernel to Oops; essentially, KM requests should prevalidate cache maint. -+ * parameters but if this fails then we would rather fail gracefully than cause the -+ * kernel to Oops so instead we log the fact that an invalid KM virtual address was -+ * supplied and what action was taken to mitigate against kernel Oops(ing) if any. -+ */ -+ CACHEOP_PVR_ASSERT(pbCpuVirtAddr == NULL); -+ -+ if (eCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_PHYSICAL) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, using paddress", -+ __func__, -+ pvAddress)); -+ -+ /* We can still proceed as kernel/cpu uses CPU PA for d-cache maintenance */ -+ pvAddress = NULL; -+ } -+ else -+ { -+ /* -+ * The approach here is to attempt a reacquisition of the PMR kernel VA and see if -+ * said VA corresponds to the parameter VA, if so fail requested cache maint. op. -+ * cause this indicates some kind of internal, memory and/or meta-data corruption -+ * else we reissue the request using this (re)acquired alias PMR kernel VA. -+ */ -+ if (PMR_IsSparse(psPMR)) -+ { -+ eError = PMRAcquireSparseKernelMappingData(psPMR, -+ 0, -+ gsCwq.uiPageSize, -+ (void **)&pbCpuVirtAddr, -+ &uiOutSize, -+ &hPrivOut); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0); -+ } -+ else -+ { -+ eError = PMRAcquireKernelMappingData(psPMR, -+ 0, -+ gsCwq.uiPageSize, -+ (void **)&pbCpuVirtAddr, -+ &uiOutSize, -+ &hPrivOut); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0); -+ } -+ -+ /* Here, we only compare these CPU virtual addresses at granularity of the OS page size */ -+ if ((uintptr_t)pbCpuVirtAddr == ((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, no alt. so failing request", -+ __func__, -+ pvAddress)); -+ -+ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); -+ PVR_LOG_GOTO_WITH_ERROR("PMRReleaseKernelMappingData", eError, PVRSRV_ERROR_INVALID_CPU_ADDR, e0); -+ } -+ else if (eCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Bad vaddress 0x%p in CPU d-cache maint. op, using reacquired vaddress 0x%p", -+ __func__, -+ pvAddress, -+ pbCpuVirtAddr)); -+ -+ /* Note that this might still fail if there is kernel memory/meta-data corruption; -+ there is not much we can do here but at the least we will be informed of this -+ before the kernel Oops(ing) */ -+ CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pbCpuVirtAddr, uiSize, uiCacheOp); -+ -+ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); -+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); -+ -+ eError = PVRSRV_OK; -+ goto e0; -+ } -+ else -+ { -+ /* At this junction, we have exhausted every possible work-around possible but we do -+ know that VA reacquisition returned another/alias page-aligned VA; so with this -+ future expectation of PMRAcquireKernelMappingData(), we proceed */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Bad vaddress %p in CPU d-cache maint. op, will use reacquired vaddress", -+ __func__, -+ pvAddress)); -+ -+ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); -+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); -+ -+ /* NULL this to force per-page reacquisition down-stream */ -+ pvAddress = NULL; -+ } -+ } -+ } -+ -+ /* NULL clobbered var., OK to proceed */ -+ pbCpuVirtAddr = NULL; -+ eError = PVRSRV_OK; -+ -+ /* Need this for kernel mapping */ -+ bPMRIsSparse = PMR_IsSparse(psPMR); -+ -+ /* Round the incoming offset down to the nearest cache-line / page aligned-address */ -+ uiCLAlignedEndOffset = uiOffset + uiSize; -+ uiCLAlignedEndOffset = PVR_ALIGN(uiCLAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiLineSize); -+ uiCLAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiLineSize-1)); -+ -+ uiPgAlignedEndOffset = uiCLAlignedEndOffset; -+ uiPgAlignedEndOffset = PVR_ALIGN(uiPgAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize); -+ uiPgAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiPageSize-1)); -+ uiPgAlignedSize = uiPgAlignedEndOffset - uiPgAlignedStartOffset; -+ -+#if defined(CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING) -+ /* For internal debug if cache-line optimised -+ flushing is suspected of causing data corruption */ -+ uiCLAlignedStartOffset = uiPgAlignedStartOffset; -+ uiCLAlignedEndOffset = uiPgAlignedEndOffset; -+#endif -+ -+ /* Type of allocation backing the PMR data */ -+ ui32NumOfPages = uiPgAlignedSize >> gsCwq.uiPageShift; -+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) -+ { -+ /* The pbValid array is allocated first as it is needed in -+ both physical/virtual cache maintenance methods */ -+ pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL)); -+ if (! pbValid) -+ { -+ pbValid = abValid; -+ } -+ else if (eCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) -+ { -+ psCpuPhyAddr = OSAllocZMem(ui32NumOfPages * sizeof(IMG_CPU_PHYADDR)); -+ if (! psCpuPhyAddr) -+ { -+ psCpuPhyAddr = asCpuPhyAddr; -+ OSFreeMem(pbValid); -+ pbValid = abValid; -+ } -+ } -+ } -+ -+ /* We always retrieve PMR data in bulk, up-front if number of pages is within -+ PMR_MAX_TRANSLATION_STACK_ALLOC limits else we check to ensure that a -+ dynamic buffer has been allocated to satisfy requests outside limits */ -+ if (ui32NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC || pbValid != abValid) -+ { -+ if (eCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) -+ { -+ /* Look-up PMR CpuPhyAddr once, if possible */ -+ eError = PMR_CpuPhysAddr(psPMR, -+ gsCwq.uiPageShift, -+ ui32NumOfPages, -+ uiPgAlignedStartOffset, -+ psCpuPhyAddr, -+ pbValid); -+ if (eError == PVRSRV_OK) -+ { -+ bIsPMRInfoValid = IMG_TRUE; -+ } -+ } -+ else -+ { -+ /* Look-up PMR per-page validity once, if possible */ -+ eError = PMR_IsOffsetValid(psPMR, -+ gsCwq.uiPageShift, -+ ui32NumOfPages, -+ uiPgAlignedStartOffset, -+ pbValid); -+ bIsPMRInfoValid = (eError == PVRSRV_OK) ? IMG_TRUE : IMG_FALSE; -+ } -+ } -+ -+ /* For each (possibly non-contiguous) PMR page(s), carry out the requested cache maint. op. */ -+ for (uiPgAlignedOffset = uiPgAlignedStartOffset, ui32PageIndex = 0; -+ uiPgAlignedOffset < uiPgAlignedEndOffset; -+ uiPgAlignedOffset += (IMG_DEVMEM_OFFSET_T) gsCwq.uiPageSize, ui32PageIndex += 1) -+ { -+ -+ if (! bIsPMRInfoValid) -+ { -+ /* Never cross page boundary without looking up corresponding PMR page physical -+ address and/or page validity if these were not looked-up, in bulk, up-front */ -+ ui32PageIndex = 0; -+ if (eCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) -+ { -+ eError = PMR_CpuPhysAddr(psPMR, -+ gsCwq.uiPageShift, -+ 1, -+ uiPgAlignedOffset, -+ psCpuPhyAddr, -+ pbValid); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMR_CpuPhysAddr", e0); -+ } -+ else -+ { -+ eError = PMR_IsOffsetValid(psPMR, -+ gsCwq.uiPageShift, -+ 1, -+ uiPgAlignedOffset, -+ pbValid); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMR_IsOffsetValid", e0); -+ } -+ } -+ -+ /* Skip invalid PMR pages (i.e. sparse) */ -+ if (pbValid[ui32PageIndex] == IMG_FALSE) -+ { -+ CACHEOP_PVR_ASSERT(bPMRIsSparse); -+ continue; -+ } -+ -+ if (pvAddress) -+ { -+ /* The caller has supplied either a KM/UM CpuVA, so use it unconditionally */ -+ pbCpuVirtAddr = -+ (void *)(uintptr_t)((uintptr_t)pvAddress + (uintptr_t)(uiPgAlignedOffset-uiPgAlignedStartOffset)); -+ } -+ /* Skip CpuVA acquire if CacheOp can be maintained entirely using CpuPA */ -+ else if (eCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL) -+ { -+ if (bPMRIsSparse) -+ { -+ eError = -+ PMRAcquireSparseKernelMappingData(psPMR, -+ uiPgAlignedOffset, -+ gsCwq.uiPageSize, -+ (void **)&pbCpuVirtAddr, -+ &uiOutSize, -+ &hPrivOut); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0); -+ } -+ else -+ { -+ eError = -+ PMRAcquireKernelMappingData(psPMR, -+ uiPgAlignedOffset, -+ gsCwq.uiPageSize, -+ (void **)&pbCpuVirtAddr, -+ &uiOutSize, -+ &hPrivOut); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0); -+ } -+ } -+ -+ /* Issue actual cache maintenance for PMR */ -+ CacheOpExecRangeBased(psDevNode, -+ uiCacheOp, -+ pbCpuVirtAddr, -+ (eCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ? -+ psCpuPhyAddr[ui32PageIndex] : psCpuPhyAddr[0], -+ uiPgAlignedOffset, -+ uiCLAlignedStartOffset, -+ uiCLAlignedEndOffset); -+ -+ if (! pvAddress) -+ { -+ /* The caller has not supplied either a KM/UM CpuVA, release mapping */ -+ if (eCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL) -+ { -+ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); -+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); -+ } -+ } -+ } -+ -+e0: -+ if (psCpuPhyAddr != asCpuPhyAddr) -+ { -+ OSFreeMem(psCpuPhyAddr); -+ } -+ -+ if (pbValid != abValid) -+ { -+ OSFreeMem(pbValid); -+ } -+ -+ if (! bIsRequestValidated) -+ { -+ eError = PMRUnlockSysPhysAddresses(psPMR); -+ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); -+ } -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR CacheOpBatchExecTimeline(PVRSRV_DEVICE_NODE *psDevNode, -+ PVRSRV_TIMELINE iTimeline) -+{ -+ PVRSRV_ERROR eError; -+ CACHEOP_WORK_ITEM sCacheOpWorkItem = {NULL}; -+ -+ eError = CacheOpTimelineBind(psDevNode, &sCacheOpWorkItem, iTimeline); -+ PVR_LOG_RETURN_IF_ERROR(eError, "CacheOpTimelineBind"); -+ -+ eError = CacheOpTimelineExec(&sCacheOpWorkItem); -+ PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec"); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR CacheOpBatchExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode, -+ PMR **ppsPMR, -+ IMG_CPU_VIRTADDR *pvAddress, -+ IMG_DEVMEM_OFFSET_T *puiOffset, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ PVRSRV_CACHE_OP *puiCacheOp, -+ IMG_UINT32 ui32NumCacheOps, -+ PVRSRV_TIMELINE uiTimeline) -+{ -+ IMG_UINT32 ui32Idx; -+ IMG_BOOL bBatchHasTimeline; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+#if defined(CACHEOP_DEBUG) -+ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; -+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); -+#endif -+ -+ /* Check if batch has an associated timeline update */ -+ bBatchHasTimeline = puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_TIMELINE; -+ puiCacheOp[ui32NumCacheOps-1] &= ~(PVRSRV_CACHE_OP_TIMELINE); -+ -+ for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++) -+ { -+ /* Fail UM request, don't silently ignore */ -+ PVR_GOTO_IF_INVALID_PARAM(puiSize[ui32Idx], eError, e0); -+ -+#if defined(CACHEOP_DEBUG) -+ sCacheOpWorkItem.ui64StartTime = OSClockus64(); -+#endif -+ -+ eError = CacheOpPMRExec(ppsPMR[ui32Idx], -+ pvAddress[ui32Idx], -+ puiOffset[ui32Idx], -+ puiSize[ui32Idx], -+ puiCacheOp[ui32Idx], -+ IMG_FALSE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpExecPMR", e0); -+ -+#if defined(CACHEOP_DEBUG) -+ sCacheOpWorkItem.ui64EndTime = OSClockus64(); -+ -+ sCacheOpWorkItem.psDevNode = psDevNode; -+ sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx]; -+ sCacheOpWorkItem.uiSize = puiSize[ui32Idx]; -+ sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx]; -+ sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx]; -+ CacheOpStatsExecLogWrite(&sCacheOpWorkItem); -+ -+ gsCwq.ui32ServerOps += 1; -+#endif -+ } -+ -+e0: -+ if (bBatchHasTimeline) -+ { -+ eError = CacheOpBatchExecTimeline(psDevNode, uiTimeline); -+ } -+ -+ return eError; -+} -+ -+ -+PVRSRV_ERROR CacheOpExec (PPVRSRV_DEVICE_NODE psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd, -+ PVRSRV_CACHE_OP uiCacheOp) -+{ -+#if defined(CACHEOP_DEBUG) -+ IMG_UINT64 ui64StartTime = OSClockus64(); -+#endif -+ -+ switch (uiCacheOp) -+ { -+ case PVRSRV_CACHE_OP_CLEAN: -+ OSCPUCacheCleanRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd); -+ break; -+ case PVRSRV_CACHE_OP_INVALIDATE: -+ OSCPUCacheInvalidateRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd); -+ break; -+ case PVRSRV_CACHE_OP_FLUSH: -+ OSCPUCacheFlushRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd); -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", -+ __func__, uiCacheOp)); -+ break; -+ } -+ -+#if defined(CACHEOP_DEBUG) -+ if (CacheOpConfigSupports(CACHEOP_CONFIG_KLOG)) -+ { -+ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; -+ -+ gsCwq.ui32ServerOps += 1; -+ -+ sCacheOpWorkItem.uiOffset = 0; -+ sCacheOpWorkItem.bKMReq = IMG_TRUE; -+ sCacheOpWorkItem.uiCacheOp = uiCacheOp; -+ /* Use information page PMR for logging KM request */ -+ sCacheOpWorkItem.psPMR = gsCwq.psInfoPagePMR; -+ sCacheOpWorkItem.psDevNode = psDevNode; -+ sCacheOpWorkItem.ui64StartTime = ui64StartTime; -+ sCacheOpWorkItem.ui64EndTime = OSClockus64(); -+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); -+ sCacheOpWorkItem.uiSize = (sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr); -+ -+ CacheOpStatsExecLogWrite(&sCacheOpWorkItem); -+ } -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR CacheOpValExec(PMR *psPMR, -+ IMG_UINT64 uiAddress, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PVRSRV_CACHE_OP uiCacheOp) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CPU_VIRTADDR pvAddress = (IMG_CPU_VIRTADDR)(uintptr_t)uiAddress; -+#if defined(CACHEOP_DEBUG) -+ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; -+ -+ sCacheOpWorkItem.ui64StartTime = OSClockus64(); -+#endif -+ -+ eError = CacheOpPMRExec(psPMR, -+ pvAddress, -+ uiOffset, -+ uiSize, -+ uiCacheOp, -+ IMG_FALSE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpPMRExec", e0); -+ -+#if defined(CACHEOP_DEBUG) -+ sCacheOpWorkItem.ui64EndTime = OSClockus64(); -+ -+ sCacheOpWorkItem.psDevNode = PMR_DeviceNode(psPMR); -+ sCacheOpWorkItem.psPMR = psPMR; -+ sCacheOpWorkItem.uiSize = uiSize; -+ sCacheOpWorkItem.uiOffset = uiOffset; -+ sCacheOpWorkItem.uiCacheOp = uiCacheOp; -+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); -+ CacheOpStatsExecLogWrite(&sCacheOpWorkItem); -+ -+ gsCwq.ui32ServerOps += 1; -+#endif -+ -+e0: -+ return eError; -+} -+ -+PVRSRV_ERROR CacheOpQueue (CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32NumCacheOps, -+ PMR **ppsPMR, -+ IMG_UINT64 *puiAddress, -+ IMG_DEVMEM_OFFSET_T *puiOffset, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ PVRSRV_CACHE_OP *puiCacheOp, -+ IMG_UINT32 ui32OpTimeline) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_TIMELINE uiTimeline = (PVRSRV_TIMELINE)ui32OpTimeline; -+ IMG_CPU_VIRTADDR *pvAddress = (IMG_CPU_VIRTADDR*)(uintptr_t)puiAddress; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ if (!gsCwq.bInit) -+ { -+ PVR_LOG(("CacheOp framework not initialised, failing request")); -+ return PVRSRV_ERROR_NOT_INITIALISED; -+ } -+ else if (! ui32NumCacheOps) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ /* Ensure any single timeline CacheOp request is processed immediately */ -+ else if (ui32NumCacheOps == 1 && puiCacheOp[0] == PVRSRV_CACHE_OP_TIMELINE) -+ { -+ eError = CacheOpBatchExecTimeline(psDevNode, uiTimeline); -+ } -+ /* This is the default entry for all client requests */ -+ else -+ { -+ if (!(gsCwq.eConfig & (CACHEOP_CONFIG_LAST-1))) -+ { -+ /* default the configuration before execution */ -+ CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT); -+ } -+ -+ eError = -+ CacheOpBatchExecRangeBased(psDevNode, -+ ppsPMR, -+ pvAddress, -+ puiOffset, -+ puiSize, -+ puiCacheOp, -+ ui32NumCacheOps, -+ uiTimeline); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR CacheOpLog (PMR *psPMR, -+ IMG_UINT64 puiAddress, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT64 ui64StartTime, -+ IMG_UINT64 ui64EndTime, -+ PVRSRV_CACHE_OP uiCacheOp) -+{ -+#if defined(CACHEOP_DEBUG) -+ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; -+ PVR_UNREFERENCED_PARAMETER(puiAddress); -+ -+ sCacheOpWorkItem.psDevNode = PMR_DeviceNode(psPMR); -+ sCacheOpWorkItem.psPMR = psPMR; -+ sCacheOpWorkItem.uiSize = uiSize; -+ sCacheOpWorkItem.uiOffset = uiOffset; -+ sCacheOpWorkItem.uiCacheOp = uiCacheOp; -+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); -+ -+ sCacheOpWorkItem.ui64StartTime = ui64StartTime; -+ sCacheOpWorkItem.ui64EndTime = ui64EndTime; -+ -+ gsCwq.ui32ClientOps += 1; -+ -+ CacheOpStatsExecLogWrite(&sCacheOpWorkItem); -+#else -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ PVR_UNREFERENCED_PARAMETER(uiSize); -+ PVR_UNREFERENCED_PARAMETER(uiOffset); -+ PVR_UNREFERENCED_PARAMETER(uiCacheOp); -+ PVR_UNREFERENCED_PARAMETER(puiAddress); -+ PVR_UNREFERENCED_PARAMETER(ui64StartTime); -+ PVR_UNREFERENCED_PARAMETER(ui64EndTime); -+#endif -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR CacheOpInit2 (void) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ /* Apphint read/write is not concurrent, so lock protects against this */ -+ eError = OSLockCreate((POS_LOCK*)&gsCwq.hConfigLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); -+ -+ -+#if defined(CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH) -+ gsCwq.bSupportsUMFlush = IMG_TRUE; -+#else -+ gsCwq.bSupportsUMFlush = IMG_FALSE; -+#endif -+ -+ gsCwq.pui32InfoPage = psPVRSRVData->pui32InfoPage; -+ gsCwq.psInfoPagePMR = psPVRSRVData->psInfoPagePMR; -+ -+ /* Normally, platforms should use their default configurations, put exceptions here */ -+#if defined(__i386__) || defined(__x86_64__) -+#if !defined(TC_MEMORY_CONFIG) -+ CacheOpConfigUpdate(CACHEOP_CONFIG_URBF | CACHEOP_CONFIG_KDF); -+#else -+ CacheOpConfigUpdate(CACHEOP_CONFIG_KDF); -+#endif -+#else /* defined(__x86__) */ -+ CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT); -+#endif -+ -+ /* Initialise the remaining occupants of the CacheOp information page */ -+ gsCwq.pui32InfoPage[CACHEOP_INFO_PGSIZE] = (IMG_UINT32)gsCwq.uiPageSize; -+ gsCwq.pui32InfoPage[CACHEOP_INFO_LINESIZE] = (IMG_UINT32)gsCwq.uiLineSize; -+ -+ /* Set before spawning thread */ -+ gsCwq.bInit = IMG_TRUE; -+ -+ { -+ DI_ITERATOR_CB sIterator = {.pfnShow = CacheOpConfigRead}; -+ /* Writing the unsigned integer binary encoding of CACHEOP_CONFIG -+ into this file cycles through avail. configuration(s) */ -+ eError = DICreateEntry("cacheop_config", NULL, &sIterator, NULL, -+ DI_ENTRY_TYPE_GENERIC, &gsCwq.psConfigTune); -+ PVR_LOG_GOTO_IF_FALSE(gsCwq.psConfigTune, "DICreateEntry", e0); -+ } -+ -+ /* Register the CacheOp framework (re)configuration handlers */ -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpConfig, -+ CacheOpConfigQuery, -+ CacheOpConfigSet, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ (void *) APPHINT_ID_CacheOpConfig); -+ -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpUMKMThresholdSize, -+ CacheOpConfigQuery, -+ CacheOpConfigSet, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ (void *) APPHINT_ID_CacheOpUMKMThresholdSize); -+ -+ return PVRSRV_OK; -+e0: -+ CacheOpDeInit2(); -+ return eError; -+} -+ -+void CacheOpDeInit2 (void) -+{ -+ gsCwq.bInit = IMG_FALSE; -+ -+ if (gsCwq.hConfigLock) -+ { -+ OSLockDestroy(gsCwq.hConfigLock); -+ gsCwq.hConfigLock = NULL; -+ } -+ -+ if (gsCwq.psConfigTune) -+ { -+ DIDestroyEntry(gsCwq.psConfigTune); -+ gsCwq.psConfigTune = NULL; -+ } -+ -+ gsCwq.pui32InfoPage = NULL; -+ gsCwq.psInfoPagePMR = NULL; -+} -+ -+PVRSRV_ERROR CacheOpInit (void) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ gsCwq.uiPageSize = OSGetPageSize(); -+ gsCwq.uiPageShift = OSGetPageShift(); -+ gsCwq.uiLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); -+ gsCwq.uiLineShift = ExactLog2(gsCwq.uiLineSize); -+ PVR_LOG_RETURN_IF_FALSE((gsCwq.uiLineSize && gsCwq.uiPageSize && gsCwq.uiPageShift), "", PVRSRV_ERROR_INIT_FAILURE); -+ -+#if defined(CACHEOP_DEBUG) -+ /* debugfs file read-out is not concurrent, so lock protects against this */ -+ eError = OSLockCreate((POS_LOCK*)&gsCwq.hStatsExecLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); -+ -+ gsCwq.i32StatsExecWriteIdx = 0; -+ OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted)); -+ -+ { -+ DI_ITERATOR_CB sIterator = {.pfnShow = CacheOpStatsExecLogRead}; -+ /* File captures the most recent subset of CacheOp(s) executed */ -+ eError = DICreateEntry("cacheop_history", NULL, &sIterator, NULL, -+ DI_ENTRY_TYPE_GENERIC, &gsCwq.psDIEntry); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", e0); -+ } -+e0: -+#endif -+ return eError; -+} -+ -+void CacheOpDeInit (void) -+{ -+#if defined(CACHEOP_DEBUG) -+ if (gsCwq.hStatsExecLock) -+ { -+ OSLockDestroy(gsCwq.hStatsExecLock); -+ gsCwq.hStatsExecLock = NULL; -+ } -+ -+ if (gsCwq.psDIEntry) -+ { -+ DIDestroyEntry(gsCwq.psDIEntry); -+ gsCwq.psDIEntry = NULL; -+ } -+#endif -+} -diff --git a/drivers/gpu/drm/img-rogue/cache_km.h b/drivers/gpu/drm/img-rogue/cache_km.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/cache_km.h -@@ -0,0 +1,151 @@ -+/*************************************************************************/ /*! -+@File cache_km.h -+@Title CPU cache management header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef CACHE_KM_H -+#define CACHE_KM_H -+ -+#if defined(__linux__) -+#include -+#else -+#define KERNEL_VERSION -+#endif -+ -+#include "pvrsrv_error.h" -+#include "os_cpu_cache.h" -+#include "img_types.h" -+#include "cache_ops.h" -+#include "device.h" -+#include "pmr.h" -+ -+typedef IMG_UINT32 PVRSRV_CACHE_OP_ADDR_TYPE; /*!< Represents CPU address type required for CPU d-cache maintenance */ -+#define PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL 0x1 /*!< Operation requires CPU virtual address only */ -+#define PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL 0x2 /*!< Operation requires CPU physical address only */ -+#define PVRSRV_CACHE_OP_ADDR_TYPE_BOTH 0x3 /*!< Operation requires both CPU virtual & physical addresses */ -+ -+#include "connection_server.h" -+ -+/* -+ * CacheOpInit() & CacheOpDeInit() -+ * -+ * This must be called to initialise the KM cache maintenance framework. -+ * This is called early during the driver/module (un)loading phase. -+ */ -+PVRSRV_ERROR CacheOpInit(void); -+void CacheOpDeInit(void); -+ -+/* -+ * CacheOpInit2() & CacheOpDeInit2() -+ * -+ * This must be called to initialise the UM cache maintenance framework. -+ * This is called when the driver is loaded/unloaded from the kernel. -+ */ -+PVRSRV_ERROR CacheOpInit2(void); -+void CacheOpDeInit2(void); -+ -+/* -+ * CacheOpExec() -+ * -+ * This is the primary CPU data-cache maintenance interface and it is -+ * always guaranteed to be synchronous; the arguments supplied must be -+ * pre-validated for performance reasons else the d-cache maintenance -+ * operation might cause the underlying OS kernel to fault. -+ */ -+PVRSRV_ERROR CacheOpExec(PPVRSRV_DEVICE_NODE psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd, -+ PVRSRV_CACHE_OP uiCacheOp); -+ -+/* -+ * CacheOpValExec() -+ * -+ * Same as CacheOpExec(), except arguments are _Validated_ before being -+ * presented to the underlying OS kernel for CPU data-cache maintenance. -+ * The uiAddress is the start CPU virtual address for the to-be d-cache -+ * maintained PMR, it can be NULL in which case a remap will be performed -+ * internally, if required for cache maintenance. This is primarily used -+ * as the services client bridge call handler for synchronous user-mode -+ * cache maintenance requests. -+ */ -+PVRSRV_ERROR CacheOpValExec(PMR *psPMR, -+ IMG_UINT64 uiAddress, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PVRSRV_CACHE_OP uiCacheOp); -+ -+/* -+ * CacheOpQueue() -+ * -+ * This is the secondary cache maintenance interface and it is not -+ * guaranteed to be synchronous in that requests could be deferred -+ * and executed asynchronously. This interface is primarily meant -+ * as services client bridge call handler. Both uiInfoPgGFSeqNum -+ * and ui32[Current,Next]FenceSeqNum implements an internal client -+ * server queueing protocol so making use of this interface outside -+ * of services client is not recommended and should not be done. -+ */ -+PVRSRV_ERROR CacheOpQueue(CONNECTION_DATA *psConnection, -+ PPVRSRV_DEVICE_NODE psDevNode, -+ IMG_UINT32 ui32OpCount, -+ PMR **ppsPMR, -+ IMG_UINT64 *puiAddress, -+ IMG_DEVMEM_OFFSET_T *puiOffset, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ PVRSRV_CACHE_OP *puiCacheOp, -+ IMG_UINT32 ui32OpTimeline); -+ -+/* -+ * CacheOpLog() -+ * -+ * This is used for logging client cache maintenance operations that -+ * was executed in user-space. -+ */ -+PVRSRV_ERROR CacheOpLog(PMR *psPMR, -+ IMG_UINT64 uiAddress, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT64 ui64StartTime, -+ IMG_UINT64 ui64EndTime, -+ PVRSRV_CACHE_OP uiCacheOp); -+ -+#endif /* CACHE_KM_H */ -diff --git a/drivers/gpu/drm/img-rogue/cache_ops.h b/drivers/gpu/drm/img-rogue/cache_ops.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/cache_ops.h -@@ -0,0 +1,61 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services cache management header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines for cache management which are visible internally -+ and externally -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef CACHE_OPS_H -+#define CACHE_OPS_H -+#include "img_types.h" -+/*! -+* @Defgroup CPUCacheAPIs -+* @{ -+*/ -+#define CACHE_BATCH_MAX (8U) -+#define MAX_DMA_OPS (34) -+typedef IMG_UINT32 PVRSRV_CACHE_OP; /*!< Type represents cache maintenance operation */ -+#define PVRSRV_CACHE_OP_NONE 0x0U /*!< No operation */ -+#define PVRSRV_CACHE_OP_CLEAN 0x1U /*!< Flush w/o invalidate */ -+#define PVRSRV_CACHE_OP_INVALIDATE 0x2U /*!< Invalidate w/o flush */ -+#define PVRSRV_CACHE_OP_FLUSH 0x3U /*!< Flush w/ invalidate */ -+/*! @} End of Defgroup CPUCacheAPIs */ -+ -+#endif /* CACHE_OPS_H */ -diff --git a/drivers/gpu/drm/img-rogue/client_cache_bridge.h b/drivers/gpu/drm/img-rogue/client_cache_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_cache_bridge.h -@@ -0,0 +1,80 @@ -+/******************************************************************************* -+@File -+@Title Client bridge header for cache -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Exports the client bridge functions for cache -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef CLIENT_CACHE_BRIDGE_H -+#define CLIENT_CACHE_BRIDGE_H -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) -+#include "pvr_bridge_client.h" -+#include "pvr_bridge.h" -+#endif -+ -+#include "common_cache_bridge.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpQueue(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32NumCacheOps, -+ IMG_HANDLE * phPMR, -+ IMG_UINT64 * pui64Address, -+ IMG_DEVMEM_OFFSET_T * puiOffset, -+ IMG_DEVMEM_SIZE_T * puiSize, -+ PVRSRV_CACHE_OP * piuCacheOp, -+ IMG_UINT32 ui32OpTimeline); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpExec(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_UINT64 ui64Address, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, PVRSRV_CACHE_OP iuCacheOp); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpLog(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_UINT64 ui64Address, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_INT64 i64StartTime, -+ IMG_INT64 i64EndTime, PVRSRV_CACHE_OP iuCacheOp); -+ -+#endif /* CLIENT_CACHE_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/client_cache_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_cache_direct_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_cache_direct_bridge.c -@@ -0,0 +1,112 @@ -+/******************************************************************************* -+@File -+@Title Direct client bridge for cache -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the client side of the bridge for cache -+ which is used in calls from Server context. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include "client_cache_bridge.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+ -+/* Module specific includes */ -+#include "cache_ops.h" -+ -+#include "cache_km.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpQueue(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32NumCacheOps, -+ IMG_HANDLE * phPMR, -+ IMG_UINT64 * pui64Address, -+ IMG_DEVMEM_OFFSET_T * puiOffset, -+ IMG_DEVMEM_SIZE_T * puiSize, -+ PVRSRV_CACHE_OP * piuCacheOp, -+ IMG_UINT32 ui32OpTimeline) -+{ -+ PVRSRV_ERROR eError; -+ PMR **psPMRInt; -+ -+ psPMRInt = (PMR **) phPMR; -+ -+ eError = -+ CacheOpQueue(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ ui32NumCacheOps, -+ psPMRInt, pui64Address, puiOffset, puiSize, piuCacheOp, ui32OpTimeline); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpExec(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_UINT64 ui64Address, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, PVRSRV_CACHE_OP iuCacheOp) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = CacheOpValExec(psPMRInt, ui64Address, uiOffset, uiSize, iuCacheOp); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpLog(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_UINT64 ui64Address, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_INT64 i64StartTime, -+ IMG_INT64 i64EndTime, PVRSRV_CACHE_OP iuCacheOp) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = -+ CacheOpLog(psPMRInt, -+ ui64Address, uiOffset, uiSize, i64StartTime, i64EndTime, iuCacheOp); -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/client_devicememhistory_bridge.h b/drivers/gpu/drm/img-rogue/client_devicememhistory_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_devicememhistory_bridge.h -@@ -0,0 +1,111 @@ -+/******************************************************************************* -+@File -+@Title Client bridge header for devicememhistory -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Exports the client bridge functions for devicememhistory -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef CLIENT_DEVICEMEMHISTORY_BRIDGE_H -+#define CLIENT_DEVICEMEMHISTORY_BRIDGE_H -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) -+#include "pvr_bridge_client.h" -+#include "pvr_bridge.h" -+#endif -+ -+#include "common_devicememhistory_bridge.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMap(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_DEVMEM_SIZE_T uiOffset, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR * puiText, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 * pui32AllocationIndexOut); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_DEVMEM_SIZE_T uiOffset, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR * puiText, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 * pui32AllocationIndexOut); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge, -+ IMG_DEV_VIRTADDR sBaseDevVAddr, -+ IMG_UINT32 ui32ui32StartPage, -+ IMG_UINT32 ui32NumPages, -+ IMG_DEVMEM_SIZE_T uiAllocSize, -+ const IMG_CHAR * puiText, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 * pui32AllocationIndexOut); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge, -+ IMG_DEV_VIRTADDR sBaseDevVAddr, -+ IMG_UINT32 ui32ui32StartPage, -+ IMG_UINT32 ui32NumPages, -+ IMG_DEVMEM_SIZE_T uiAllocSize, -+ const IMG_CHAR * puiText, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 * pui32AllocationIndexOut); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_DEVMEM_SIZE_T uiOffset, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR * puiText, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 * pui32AllocPageIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 * pui32FreePageIndices, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 * pui32AllocationIndexOut); -+ -+#endif /* CLIENT_DEVICEMEMHISTORY_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/client_devicememhistory_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_devicememhistory_direct_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_devicememhistory_direct_bridge.c -@@ -0,0 +1,195 @@ -+/******************************************************************************* -+@File -+@Title Direct client bridge for devicememhistory -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the client side of the bridge for devicememhistory -+ which is used in calls from Server context. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include "client_devicememhistory_bridge.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+ -+/* Module specific includes */ -+#include "img_types.h" -+#include "img_defs.h" -+#include "devicemem_typedefs.h" -+ -+#include "pmr.h" -+#include "devicemem_history_server.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMap(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_DEVMEM_SIZE_T uiOffset, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR * puiText, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 * pui32AllocationIndexOut) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = -+ DevicememHistoryMapKM(psPMRInt, -+ uiOffset, -+ sDevVAddr, -+ uiSize, -+ puiText, -+ ui32Log2PageSize, ui32AllocationIndex, pui32AllocationIndexOut); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_DEVMEM_SIZE_T uiOffset, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR * puiText, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 * pui32AllocationIndexOut) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = -+ DevicememHistoryUnmapKM(psPMRInt, -+ uiOffset, -+ sDevVAddr, -+ uiSize, -+ puiText, -+ ui32Log2PageSize, ui32AllocationIndex, pui32AllocationIndexOut); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge, -+ IMG_DEV_VIRTADDR sBaseDevVAddr, -+ IMG_UINT32 ui32ui32StartPage, -+ IMG_UINT32 ui32NumPages, -+ IMG_DEVMEM_SIZE_T uiAllocSize, -+ const IMG_CHAR * puiText, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 * pui32AllocationIndexOut) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = -+ DevicememHistoryMapVRangeKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ sBaseDevVAddr, -+ ui32ui32StartPage, -+ ui32NumPages, -+ uiAllocSize, -+ puiText, -+ ui32Log2PageSize, -+ ui32AllocationIndex, pui32AllocationIndexOut); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge, -+ IMG_DEV_VIRTADDR sBaseDevVAddr, -+ IMG_UINT32 ui32ui32StartPage, -+ IMG_UINT32 ui32NumPages, -+ IMG_DEVMEM_SIZE_T uiAllocSize, -+ const IMG_CHAR * puiText, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 * pui32AllocationIndexOut) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = -+ DevicememHistoryUnmapVRangeKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ sBaseDevVAddr, -+ ui32ui32StartPage, -+ ui32NumPages, -+ uiAllocSize, -+ puiText, -+ ui32Log2PageSize, -+ ui32AllocationIndex, pui32AllocationIndexOut); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_DEVMEM_SIZE_T uiOffset, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR * puiText, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 * pui32AllocPageIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 * pui32FreePageIndices, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 * pui32AllocationIndexOut) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = -+ DevicememHistorySparseChangeKM(psPMRInt, -+ uiOffset, -+ sDevVAddr, -+ uiSize, -+ puiText, -+ ui32Log2PageSize, -+ ui32AllocPageCount, -+ pui32AllocPageIndices, -+ ui32FreePageCount, -+ pui32FreePageIndices, -+ ui32AllocationIndex, pui32AllocationIndexOut); -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/client_htbuffer_bridge.h b/drivers/gpu/drm/img-rogue/client_htbuffer_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_htbuffer_bridge.h -@@ -0,0 +1,64 @@ -+/******************************************************************************* -+@File -+@Title Client bridge header for htbuffer -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Exports the client bridge functions for htbuffer -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef CLIENT_HTBUFFER_BRIDGE_H -+#define CLIENT_HTBUFFER_BRIDGE_H -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) -+#include "pvr_bridge_client.h" -+#include "pvr_bridge.h" -+#endif -+ -+#include "common_htbuffer_bridge.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32NumGroups, -+ IMG_UINT32 * pui32GroupEnable, -+ IMG_UINT32 ui32LogLevel, -+ IMG_UINT32 ui32EnablePID, -+ IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode); -+ -+#endif /* CLIENT_HTBUFFER_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/client_htbuffer_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_htbuffer_direct_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_htbuffer_direct_bridge.c -@@ -0,0 +1,70 @@ -+/******************************************************************************* -+@File -+@Title Direct client bridge for htbuffer -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the client side of the bridge for htbuffer -+ which is used in calls from Server context. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include "client_htbuffer_bridge.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+ -+/* Module specific includes */ -+#include "devicemem_typedefs.h" -+#include "htbuffer_types.h" -+ -+#include "htbserver.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32NumGroups, -+ IMG_UINT32 * pui32GroupEnable, -+ IMG_UINT32 ui32LogLevel, -+ IMG_UINT32 ui32EnablePID, -+ IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode) -+{ -+ PVRSRV_ERROR eError; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ eError = -+ HTBControlKM(ui32NumGroups, -+ pui32GroupEnable, ui32LogLevel, ui32EnablePID, ui32LogMode, ui32OpMode); -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/client_mm_bridge.h b/drivers/gpu/drm/img-rogue/client_mm_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_mm_bridge.h -@@ -0,0 +1,243 @@ -+/******************************************************************************* -+@File -+@Title Client bridge header for mm -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Exports the client bridge functions for mm -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef CLIENT_MM_BRIDGE_H -+#define CLIENT_MM_BRIDGE_H -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) -+#include "pvr_bridge_client.h" -+#include "pvr_bridge.h" -+#endif -+ -+#include "common_mm_bridge.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_HANDLE * phPMRExport, -+ IMG_UINT64 * pui64Size, -+ IMG_UINT32 * pui32Log2Contig, -+ IMG_UINT64 * pui64Password); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMRExport); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRGetUID(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, IMG_UINT64 * pui64UID); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge, -+ IMG_HANDLE hBuffer, IMG_HANDLE * phExtMem); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMRExport, -+ IMG_UINT64 ui64uiPassword, -+ IMG_UINT64 ui64uiSize, -+ IMG_UINT32 ui32uiLog2Contig, IMG_HANDLE * phPMR); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hExtHandle, -+ IMG_HANDLE * phPMR, -+ IMG_DEVMEM_SIZE_T * puiSize, -+ IMG_DEVMEM_ALIGN_T * puiAlign); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 * pui32MappingTable, -+ IMG_UINT32 ui32Log2PageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 ui32AnnotationLength, -+ const IMG_CHAR * puiAnnotation, -+ IMG_PID ui32PID, -+ IMG_HANDLE * phPMRPtr, -+ IMG_UINT32 ui32PDumpFlags, -+ PVRSRV_MEMALLOCFLAGS_T * puiOutFlags); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge, -+ IMG_BOOL bbKernelMemoryCtx, -+ IMG_HANDLE * phDevMemServerContext, -+ IMG_HANDLE * phPrivData, -+ IMG_UINT32 * pui32CPUCacheLineSize); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemServerContext); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemCtx, -+ IMG_UINT32 ui32HeapConfigIndex, -+ IMG_UINT32 ui32HeapIndex, -+ IMG_DEV_VIRTADDR sHeapBaseAddr, -+ IMG_UINT32 ui32Log2DataPageSize, -+ IMG_HANDLE * phDevmemHeapPtr); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HANDLE hDevmemHeap); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemServerHeap, -+ IMG_HANDLE hReservation, -+ IMG_HANDLE hPMR, -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, -+ IMG_HANDLE * phMapping); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemServerHeap, -+ IMG_DEV_VIRTADDR sAddress, -+ IMG_DEVMEM_SIZE_T uiLength, -+ IMG_HANDLE * phReservation); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRangeAndMapPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemServerHeap, -+ IMG_DEV_VIRTADDR sAddress, -+ IMG_DEVMEM_SIZE_T uiLength, -+ IMG_HANDLE hPMR, -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, -+ IMG_HANDLE * phMapping); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRangeAndUnmapPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hMapping); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, -+ IMG_HANDLE hReservation); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge, -+ IMG_HANDLE hSrvDevMemHeap, -+ IMG_HANDLE hPMR, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 * pui32AllocPageIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 * pui32FreePageIndices, -+ IMG_UINT32 ui32SparseFlags, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_UINT64 ui64CPUVAddr); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemCtx, -+ IMG_DEV_VIRTADDR sAddress); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemCtx, -+ IMG_UINT64 ui64FBSCEntries); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge, -+ IMG_UINT32 * pui32NumHeapConfigs); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapCount(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32HeapConfigIndex, -+ IMG_UINT32 * pui32NumHeaps); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32HeapConfigIndex, -+ IMG_UINT32 ui32HeapConfigNameBufSz, -+ IMG_CHAR * puiHeapConfigName); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32HeapConfigIndex, -+ IMG_UINT32 ui32HeapIndex, -+ IMG_UINT32 ui32HeapNameBufSz, -+ IMG_CHAR * puiHeapNameOut, -+ IMG_DEV_VIRTADDR * psDevVAddrBase, -+ IMG_DEVMEM_SIZE_T * puiHeapLength, -+ IMG_DEVMEM_SIZE_T * puiReservedRegionLength, -+ IMG_UINT32 * pui32Log2DataPageSizeOut, -+ IMG_UINT32 * pui32Log2ImportAlignmentOut); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemCtx, -+ IMG_BOOL bRegister); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32PhysHeapCount, -+ PVRSRV_PHYS_HEAP * peaPhysHeapID, -+ PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge, -+ PVRSRV_PHYS_HEAP * peHeap); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemCtx, -+ IMG_DEV_VIRTADDR * psFaultAddress); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVStatsUpdateOOMStat(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32ui32StatType, -+ IMG_PID ui32pid); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntReserveRange(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemServerHeap, -+ IMG_DEV_VIRTADDR sAddress, -+ IMG_DEVMEM_SIZE_T uiLength, -+ IMG_HANDLE * phReservation); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnreserveRange(IMG_HANDLE hBridge, -+ IMG_HANDLE hReservation); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapPages(IMG_HANDLE hBridge, -+ IMG_HANDLE hReservation, -+ IMG_HANDLE hPMR, -+ IMG_UINT32 ui32PageCount, -+ IMG_UINT32 ui32PhysPageOffset, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 ui32VirtPageOffset); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnmapPages(IMG_HANDLE hBridge, -+ IMG_HANDLE hReservation, -+ IMG_UINT32 ui32VirtPageOffset, -+ IMG_UINT32 ui32PageCount); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapVRangeToBackingPage(IMG_HANDLE hBridge, -+ IMG_HANDLE hReservation, -+ IMG_UINT32 ui32PageCount, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 ui32VirtPageOffset); -+ -+#endif /* CLIENT_MM_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/client_mm_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_mm_direct_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_mm_direct_bridge.c -@@ -0,0 +1,752 @@ -+/******************************************************************************* -+@File -+@Title Direct client bridge for mm -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the client side of the bridge for mm -+ which is used in calls from Server context. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include "client_mm_bridge.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+ -+/* Module specific includes */ -+#include "pvrsrv_memallocflags.h" -+#include "pvrsrv_memalloc_physheap.h" -+#include "devicemem_typedefs.h" -+ -+#include "pvrsrv_memalloc_physheap.h" -+#include "devicemem.h" -+#include "devicemem_server.h" -+#include "pmr.h" -+#include "devicemem_heapcfg.h" -+#include "physmem.h" -+#include "devicemem_utils.h" -+#include "process_stats.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_HANDLE * phPMRExport, -+ IMG_UINT64 * pui64Size, -+ IMG_UINT32 * pui32Log2Contig, -+ IMG_UINT64 * pui64Password) -+{ -+#if defined(SUPPORT_INSECURE_EXPORT) -+ PVRSRV_ERROR eError; -+ PMR *psPMRInt; -+ PMR_EXPORT *psPMRExportInt = NULL; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = PMRExportPMR(psPMRInt, &psPMRExportInt, pui64Size, pui32Log2Contig, pui64Password); -+ -+ *phPMRExport = psPMRExportInt; -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ PVR_UNREFERENCED_PARAMETER(hPMR); -+ PVR_UNREFERENCED_PARAMETER(phPMRExport); -+ PVR_UNREFERENCED_PARAMETER(pui64Size); -+ PVR_UNREFERENCED_PARAMETER(pui32Log2Contig); -+ PVR_UNREFERENCED_PARAMETER(pui64Password); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+#endif -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMRExport) -+{ -+#if defined(SUPPORT_INSECURE_EXPORT) -+ PVRSRV_ERROR eError; -+ PMR_EXPORT *psPMRExportInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRExportInt = (PMR_EXPORT *) hPMRExport; -+ -+ eError = PMRUnexportPMR(psPMRExportInt); -+ -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ PVR_UNREFERENCED_PARAMETER(hPMRExport); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+#endif -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRGetUID(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, IMG_UINT64 * pui64UID) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = PMRGetUID(psPMRInt, pui64UID); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge, -+ IMG_HANDLE hBuffer, IMG_HANDLE * phExtMem) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psBufferInt; -+ PMR *psExtMemInt = NULL; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psBufferInt = (PMR *) hBuffer; -+ -+ eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt); -+ -+ *phExtMem = psExtMemInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psExtMemInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psExtMemInt = (PMR *) hExtMem; -+ -+ eError = PMRUnmakeLocalImportHandle(psExtMemInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMRExport, -+ IMG_UINT64 ui64uiPassword, -+ IMG_UINT64 ui64uiSize, -+ IMG_UINT32 ui32uiLog2Contig, IMG_HANDLE * phPMR) -+{ -+#if defined(SUPPORT_INSECURE_EXPORT) -+ PVRSRV_ERROR eError; -+ PMR_EXPORT *psPMRExportInt; -+ PMR *psPMRInt = NULL; -+ -+ psPMRExportInt = (PMR_EXPORT *) hPMRExport; -+ -+ eError = -+ PhysmemImportPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ psPMRExportInt, -+ ui64uiPassword, ui64uiSize, ui32uiLog2Contig, &psPMRInt); -+ -+ *phPMR = psPMRInt; -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(hPMRExport); -+ PVR_UNREFERENCED_PARAMETER(ui64uiPassword); -+ PVR_UNREFERENCED_PARAMETER(ui64uiSize); -+ PVR_UNREFERENCED_PARAMETER(ui32uiLog2Contig); -+ PVR_UNREFERENCED_PARAMETER(phPMR); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+#endif -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hExtHandle, -+ IMG_HANDLE * phPMR, -+ IMG_DEVMEM_SIZE_T * puiSize, -+ IMG_DEVMEM_ALIGN_T * puiAlign) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psExtHandleInt; -+ PMR *psPMRInt = NULL; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psExtHandleInt = (PMR *) hExtHandle; -+ -+ eError = PMRLocalImportPMR(psExtHandleInt, &psPMRInt, puiSize, puiAlign); -+ -+ *phPMR = psPMRInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = PMRUnrefPMR(psPMRInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = PMRUnrefUnlockPMR(psPMRInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 * pui32MappingTable, -+ IMG_UINT32 ui32Log2PageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 ui32AnnotationLength, -+ const IMG_CHAR * puiAnnotation, -+ IMG_PID ui32PID, -+ IMG_HANDLE * phPMRPtr, -+ IMG_UINT32 ui32PDumpFlags, -+ PVRSRV_MEMALLOCFLAGS_T * puiOutFlags) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRPtrInt = NULL; -+ -+ eError = -+ PhysmemNewRamBackedPMR_direct(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ uiSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ pui32MappingTable, -+ ui32Log2PageSize, -+ uiFlags, -+ ui32AnnotationLength, -+ puiAnnotation, -+ ui32PID, &psPMRPtrInt, ui32PDumpFlags, puiOutFlags); -+ -+ *phPMRPtr = psPMRPtrInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge, -+ IMG_BOOL bbKernelMemoryCtx, -+ IMG_HANDLE * phDevMemServerContext, -+ IMG_HANDLE * phPrivData, -+ IMG_UINT32 * pui32CPUCacheLineSize) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_CTX *psDevMemServerContextInt = NULL; -+ IMG_HANDLE hPrivDataInt = NULL; -+ -+ eError = -+ DevmemIntCtxCreate(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ bbKernelMemoryCtx, -+ &psDevMemServerContextInt, &hPrivDataInt, pui32CPUCacheLineSize); -+ -+ *phDevMemServerContext = psDevMemServerContextInt; -+ *phPrivData = hPrivDataInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemServerContext) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_CTX *psDevmemServerContextInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext; -+ -+ eError = DevmemIntCtxDestroy(psDevmemServerContextInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemCtx, -+ IMG_UINT32 ui32HeapConfigIndex, -+ IMG_UINT32 ui32HeapIndex, -+ IMG_DEV_VIRTADDR sHeapBaseAddr, -+ IMG_UINT32 ui32Log2DataPageSize, -+ IMG_HANDLE * phDevmemHeapPtr) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_CTX *psDevmemCtxInt; -+ DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; -+ -+ eError = -+ DevmemIntHeapCreate(psDevmemCtxInt, -+ ui32HeapConfigIndex, -+ ui32HeapIndex, -+ sHeapBaseAddr, ui32Log2DataPageSize, &psDevmemHeapPtrInt); -+ -+ *phDevmemHeapPtr = psDevmemHeapPtrInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HANDLE hDevmemHeap) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_HEAP *psDevmemHeapInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psDevmemHeapInt = (DEVMEMINT_HEAP *) hDevmemHeap; -+ -+ eError = DevmemIntHeapDestroy(psDevmemHeapInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemServerHeap, -+ IMG_HANDLE hReservation, -+ IMG_HANDLE hPMR, -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, -+ IMG_HANDLE * phMapping) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_HEAP *psDevmemServerHeapInt; -+ DEVMEMINT_RESERVATION *psReservationInt; -+ PMR *psPMRInt; -+ DEVMEMINT_MAPPING *psMappingInt = NULL; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; -+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = -+ DevmemIntMapPMR(psDevmemServerHeapInt, -+ psReservationInt, psPMRInt, uiMapFlags, &psMappingInt); -+ -+ *phMapping = psMappingInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_MAPPING *psMappingInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psMappingInt = (DEVMEMINT_MAPPING *) hMapping; -+ -+ eError = DevmemIntUnmapPMR(psMappingInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemServerHeap, -+ IMG_DEV_VIRTADDR sAddress, -+ IMG_DEVMEM_SIZE_T uiLength, -+ IMG_HANDLE * phReservation) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_HEAP *psDevmemServerHeapInt; -+ DEVMEMINT_RESERVATION *psReservationInt = NULL; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; -+ -+ eError = -+ DevmemIntReserveRange(psDevmemServerHeapInt, sAddress, uiLength, &psReservationInt); -+ -+ *phReservation = psReservationInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRangeAndMapPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemServerHeap, -+ IMG_DEV_VIRTADDR sAddress, -+ IMG_DEVMEM_SIZE_T uiLength, -+ IMG_HANDLE hPMR, -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, -+ IMG_HANDLE * phMapping) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_HEAP *psDevmemServerHeapInt; -+ PMR *psPMRInt; -+ DEVMEMINT_MAPPING *psMappingInt = NULL; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = -+ DevmemIntReserveRangeAndMapPMR(psDevmemServerHeapInt, -+ sAddress, uiLength, psPMRInt, uiMapFlags, &psMappingInt); -+ -+ *phMapping = psMappingInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRangeAndUnmapPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hMapping) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_MAPPING *psMappingInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psMappingInt = (DEVMEMINT_MAPPING *) hMapping; -+ -+ eError = DevmemIntUnreserveRangeAndUnmapPMR(psMappingInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, IMG_HANDLE hReservation) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_RESERVATION *psReservationInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; -+ -+ eError = DevmemIntUnreserveRange(psReservationInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge, -+ IMG_HANDLE hSrvDevMemHeap, -+ IMG_HANDLE hPMR, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 * pui32AllocPageIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 * pui32FreePageIndices, -+ IMG_UINT32 ui32SparseFlags, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_DEV_VIRTADDR sDevVAddr, IMG_UINT64 ui64CPUVAddr) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_HEAP *psSrvDevMemHeapInt; -+ PMR *psPMRInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap; -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = -+ DevmemIntChangeSparse(psSrvDevMemHeapInt, -+ psPMRInt, -+ ui32AllocPageCount, -+ pui32AllocPageIndices, -+ ui32FreePageCount, -+ pui32FreePageIndices, -+ ui32SparseFlags, uiFlags, sDevVAddr, ui64CPUVAddr); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemCtx, -+ IMG_DEV_VIRTADDR sAddress) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_CTX *psDevmemCtxInt; -+ -+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; -+ -+ eError = -+ DevmemIntIsVDevAddrValid(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ psDevmemCtxInt, sAddress); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemCtx, -+ IMG_UINT64 ui64FBSCEntries) -+{ -+#if defined(RGX_FEATURE_FBCDC) -+ PVRSRV_ERROR eError; -+ DEVMEMINT_CTX *psDevmemCtxInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; -+ -+ eError = DevmemIntInvalidateFBSCTable(psDevmemCtxInt, ui64FBSCEntries); -+ -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ PVR_UNREFERENCED_PARAMETER(hDevmemCtx); -+ PVR_UNREFERENCED_PARAMETER(ui64FBSCEntries); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+#endif -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge, -+ IMG_UINT32 * pui32NumHeapConfigs) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = -+ HeapCfgHeapConfigCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ pui32NumHeapConfigs); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapCount(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32HeapConfigIndex, -+ IMG_UINT32 * pui32NumHeaps) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = -+ HeapCfgHeapCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ ui32HeapConfigIndex, pui32NumHeaps); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32HeapConfigIndex, -+ IMG_UINT32 ui32HeapConfigNameBufSz, -+ IMG_CHAR * puiHeapConfigName) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = -+ HeapCfgHeapConfigName(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ ui32HeapConfigIndex, ui32HeapConfigNameBufSz, puiHeapConfigName); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32HeapConfigIndex, -+ IMG_UINT32 ui32HeapIndex, -+ IMG_UINT32 ui32HeapNameBufSz, -+ IMG_CHAR * puiHeapNameOut, -+ IMG_DEV_VIRTADDR * psDevVAddrBase, -+ IMG_DEVMEM_SIZE_T * puiHeapLength, -+ IMG_DEVMEM_SIZE_T * puiReservedRegionLength, -+ IMG_UINT32 * pui32Log2DataPageSizeOut, -+ IMG_UINT32 * pui32Log2ImportAlignmentOut) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = -+ HeapCfgHeapDetails(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ ui32HeapConfigIndex, -+ ui32HeapIndex, -+ ui32HeapNameBufSz, -+ puiHeapNameOut, -+ psDevVAddrBase, -+ puiHeapLength, -+ puiReservedRegionLength, -+ pui32Log2DataPageSizeOut, pui32Log2ImportAlignmentOut); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemCtx, -+ IMG_BOOL bRegister) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_CTX *psDevmemCtxInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; -+ -+ eError = DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, bRegister); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32PhysHeapCount, -+ PVRSRV_PHYS_HEAP * peaPhysHeapID, -+ PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = -+ PVRSRVPhysHeapGetMemInfoKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ ui32PhysHeapCount, peaPhysHeapID, pasapPhysHeapMemStats); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge, -+ PVRSRV_PHYS_HEAP * peHeap) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = -+ PVRSRVGetDefaultPhysicalHeapKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), peHeap); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemCtx, -+ IMG_DEV_VIRTADDR * psFaultAddress) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_CTX *psDevmemCtxInt; -+ -+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; -+ -+ eError = -+ DevmemIntGetFaultAddress(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ psDevmemCtxInt, psFaultAddress); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVStatsUpdateOOMStat(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32ui32StatType, -+ IMG_PID ui32pid) -+{ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVRSRV_ERROR eError; -+ -+ eError = -+ PVRSRVStatsUpdateOOMStat(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ ui32ui32StatType, ui32pid); -+ -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(ui32ui32StatType); -+ PVR_UNREFERENCED_PARAMETER(ui32pid); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+#endif -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntReserveRange(IMG_HANDLE hBridge, -+ IMG_HANDLE hDevmemServerHeap, -+ IMG_DEV_VIRTADDR sAddress, -+ IMG_DEVMEM_SIZE_T uiLength, -+ IMG_HANDLE * phReservation) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_HEAP *psDevmemServerHeapInt; -+ DEVMEMXINT_RESERVATION *psReservationInt = NULL; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; -+ -+ eError = -+ DevmemXIntReserveRange(psDevmemServerHeapInt, sAddress, uiLength, &psReservationInt); -+ -+ *phReservation = psReservationInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnreserveRange(IMG_HANDLE hBridge, -+ IMG_HANDLE hReservation) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMXINT_RESERVATION *psReservationInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation; -+ -+ eError = DevmemXIntUnreserveRange(psReservationInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapPages(IMG_HANDLE hBridge, -+ IMG_HANDLE hReservation, -+ IMG_HANDLE hPMR, -+ IMG_UINT32 ui32PageCount, -+ IMG_UINT32 ui32PhysPageOffset, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 ui32VirtPageOffset) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMXINT_RESERVATION *psReservationInt; -+ PMR *psPMRInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation; -+ psPMRInt = (PMR *) hPMR; -+ -+ eError = -+ DevmemXIntMapPages(psReservationInt, -+ psPMRInt, -+ ui32PageCount, ui32PhysPageOffset, uiFlags, ui32VirtPageOffset); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnmapPages(IMG_HANDLE hBridge, -+ IMG_HANDLE hReservation, -+ IMG_UINT32 ui32VirtPageOffset, -+ IMG_UINT32 ui32PageCount) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMXINT_RESERVATION *psReservationInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation; -+ -+ eError = DevmemXIntUnmapPages(psReservationInt, ui32VirtPageOffset, ui32PageCount); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapVRangeToBackingPage(IMG_HANDLE hBridge, -+ IMG_HANDLE hReservation, -+ IMG_UINT32 ui32PageCount, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 ui32VirtPageOffset) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMXINT_RESERVATION *psReservationInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation; -+ -+ eError = -+ DevmemXIntMapVRangeToBackingPage(psReservationInt, -+ ui32PageCount, uiFlags, ui32VirtPageOffset); -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/client_pvrtl_bridge.h b/drivers/gpu/drm/img-rogue/client_pvrtl_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_pvrtl_bridge.h -@@ -0,0 +1,93 @@ -+/******************************************************************************* -+@File -+@Title Client bridge header for pvrtl -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Exports the client bridge functions for pvrtl -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef CLIENT_PVRTL_BRIDGE_H -+#define CLIENT_PVRTL_BRIDGE_H -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) -+#include "pvr_bridge_client.h" -+#include "pvr_bridge.h" -+#endif -+ -+#include "common_pvrtl_bridge.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLOpenStream(IMG_HANDLE hBridge, -+ const IMG_CHAR * puiName, -+ IMG_UINT32 ui32Mode, -+ IMG_HANDLE * phSD, IMG_HANDLE * phTLPMR); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLCloseStream(IMG_HANDLE hBridge, IMG_HANDLE hSD); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLAcquireData(IMG_HANDLE hBridge, -+ IMG_HANDLE hSD, -+ IMG_UINT32 * pui32ReadOffset, -+ IMG_UINT32 * pui32ReadLen); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLReleaseData(IMG_HANDLE hBridge, -+ IMG_HANDLE hSD, -+ IMG_UINT32 ui32ReadOffset, IMG_UINT32 ui32ReadLen); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLDiscoverStreams(IMG_HANDLE hBridge, -+ const IMG_CHAR * puiNamePattern, -+ IMG_UINT32 ui32Size, -+ IMG_CHAR * puiStreams, -+ IMG_UINT32 * pui32NumFound); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLReserveStream(IMG_HANDLE hBridge, -+ IMG_HANDLE hSD, -+ IMG_UINT32 * pui32BufferOffset, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32SizeMin, -+ IMG_UINT32 * pui32Available); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLCommitStream(IMG_HANDLE hBridge, -+ IMG_HANDLE hSD, IMG_UINT32 ui32ReqSize); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLWriteData(IMG_HANDLE hBridge, -+ IMG_HANDLE hSD, -+ IMG_UINT32 ui32Size, IMG_BYTE * pui8Data); -+ -+#endif /* CLIENT_PVRTL_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/client_pvrtl_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_pvrtl_direct_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_pvrtl_direct_bridge.c -@@ -0,0 +1,175 @@ -+/******************************************************************************* -+@File -+@Title Direct client bridge for pvrtl -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the client side of the bridge for pvrtl -+ which is used in calls from Server context. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include "client_pvrtl_bridge.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+ -+/* Module specific includes */ -+#include "devicemem_typedefs.h" -+#include "pvrsrv_tlcommon.h" -+ -+#include "tlserver.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLOpenStream(IMG_HANDLE hBridge, -+ const IMG_CHAR * puiName, -+ IMG_UINT32 ui32Mode, -+ IMG_HANDLE * phSD, IMG_HANDLE * phTLPMR) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC *psSDInt = NULL; -+ PMR *psTLPMRInt = NULL; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ eError = TLServerOpenStreamKM(puiName, ui32Mode, &psSDInt, &psTLPMRInt); -+ -+ *phSD = psSDInt; -+ *phTLPMR = psTLPMRInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLCloseStream(IMG_HANDLE hBridge, IMG_HANDLE hSD) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC *psSDInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSDInt = (TL_STREAM_DESC *) hSD; -+ -+ eError = TLServerCloseStreamKM(psSDInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLAcquireData(IMG_HANDLE hBridge, -+ IMG_HANDLE hSD, -+ IMG_UINT32 * pui32ReadOffset, -+ IMG_UINT32 * pui32ReadLen) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC *psSDInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSDInt = (TL_STREAM_DESC *) hSD; -+ -+ eError = TLServerAcquireDataKM(psSDInt, pui32ReadOffset, pui32ReadLen); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLReleaseData(IMG_HANDLE hBridge, -+ IMG_HANDLE hSD, -+ IMG_UINT32 ui32ReadOffset, IMG_UINT32 ui32ReadLen) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC *psSDInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSDInt = (TL_STREAM_DESC *) hSD; -+ -+ eError = TLServerReleaseDataKM(psSDInt, ui32ReadOffset, ui32ReadLen); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLDiscoverStreams(IMG_HANDLE hBridge, -+ const IMG_CHAR * puiNamePattern, -+ IMG_UINT32 ui32Size, -+ IMG_CHAR * puiStreams, IMG_UINT32 * pui32NumFound) -+{ -+ PVRSRV_ERROR eError; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ eError = TLServerDiscoverStreamsKM(puiNamePattern, ui32Size, puiStreams, pui32NumFound); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLReserveStream(IMG_HANDLE hBridge, -+ IMG_HANDLE hSD, -+ IMG_UINT32 * pui32BufferOffset, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32SizeMin, IMG_UINT32 * pui32Available) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC *psSDInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSDInt = (TL_STREAM_DESC *) hSD; -+ -+ eError = -+ TLServerReserveStreamKM(psSDInt, -+ pui32BufferOffset, ui32Size, ui32SizeMin, pui32Available); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLCommitStream(IMG_HANDLE hBridge, -+ IMG_HANDLE hSD, IMG_UINT32 ui32ReqSize) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC *psSDInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSDInt = (TL_STREAM_DESC *) hSD; -+ -+ eError = TLServerCommitStreamKM(psSDInt, ui32ReqSize); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeTLWriteData(IMG_HANDLE hBridge, -+ IMG_HANDLE hSD, -+ IMG_UINT32 ui32Size, IMG_BYTE * pui8Data) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC *psSDInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSDInt = (TL_STREAM_DESC *) hSD; -+ -+ eError = TLServerWriteDataKM(psSDInt, ui32Size, pui8Data); -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/client_ri_bridge.h b/drivers/gpu/drm/img-rogue/client_ri_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_ri_bridge.h -@@ -0,0 +1,89 @@ -+/******************************************************************************* -+@File -+@Title Client bridge header for ri -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Exports the client bridge functions for ri -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef CLIENT_RI_BRIDGE_H -+#define CLIENT_RI_BRIDGE_H -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) -+#include "pvr_bridge_client.h" -+#include "pvr_bridge.h" -+#endif -+ -+#include "common_ri_bridge.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntry(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMRHandle, -+ IMG_UINT32 ui32TextBSize, -+ const IMG_CHAR * puiTextB, -+ IMG_UINT64 ui64Offset, -+ IMG_UINT64 ui64Size, -+ IMG_BOOL bIsImport, -+ IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteProcListEntry(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32TextBSize, -+ const IMG_CHAR * puiTextB, -+ IMG_UINT64 ui64Size, -+ IMG_UINT64 ui64DevVAddr, -+ IMG_HANDLE * phRIHandle); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge, -+ IMG_HANDLE hRIHandle, IMG_DEV_VIRTADDR sAddr); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge, IMG_HANDLE hRIHandle); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpList(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpAll(IMG_HANDLE hBridge); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpProcess(IMG_HANDLE hBridge, IMG_PID ui32Pid); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntryWithOwner(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMRHandle, IMG_PID ui32Owner); -+ -+#endif /* CLIENT_RI_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/client_ri_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_ri_direct_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_ri_direct_bridge.c -@@ -0,0 +1,182 @@ -+/******************************************************************************* -+@File -+@Title Direct client bridge for ri -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the client side of the bridge for ri -+ which is used in calls from Server context. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include "client_ri_bridge.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+ -+/* Module specific includes */ -+#include "ri_typedefs.h" -+ -+#include "ri_server.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntry(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRHandleInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRHandleInt = (PMR *) hPMRHandle; -+ -+ eError = RIWritePMREntryKM(psPMRHandleInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMRHandle, -+ IMG_UINT32 ui32TextBSize, -+ const IMG_CHAR * puiTextB, -+ IMG_UINT64 ui64Offset, -+ IMG_UINT64 ui64Size, -+ IMG_BOOL bIsImport, -+ IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRHandleInt; -+ RI_HANDLE psRIHandleInt = NULL; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRHandleInt = (PMR *) hPMRHandle; -+ -+ eError = -+ RIWriteMEMDESCEntryKM(psPMRHandleInt, -+ ui32TextBSize, -+ puiTextB, -+ ui64Offset, ui64Size, bIsImport, bIsSuballoc, &psRIHandleInt); -+ -+ *phRIHandle = psRIHandleInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteProcListEntry(IMG_HANDLE hBridge, -+ IMG_UINT32 ui32TextBSize, -+ const IMG_CHAR * puiTextB, -+ IMG_UINT64 ui64Size, -+ IMG_UINT64 ui64DevVAddr, -+ IMG_HANDLE * phRIHandle) -+{ -+ PVRSRV_ERROR eError; -+ RI_HANDLE psRIHandleInt = NULL; -+ -+ eError = -+ RIWriteProcListEntryKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ ui32TextBSize, puiTextB, ui64Size, ui64DevVAddr, &psRIHandleInt); -+ -+ *phRIHandle = psRIHandleInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge, -+ IMG_HANDLE hRIHandle, IMG_DEV_VIRTADDR sAddr) -+{ -+ PVRSRV_ERROR eError; -+ RI_HANDLE psRIHandleInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psRIHandleInt = (RI_HANDLE) hRIHandle; -+ -+ eError = RIUpdateMEMDESCAddrKM(psRIHandleInt, sAddr); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge, IMG_HANDLE hRIHandle) -+{ -+ PVRSRV_ERROR eError; -+ RI_HANDLE psRIHandleInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psRIHandleInt = (RI_HANDLE) hRIHandle; -+ -+ eError = RIDeleteMEMDESCEntryKM(psRIHandleInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpList(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRHandleInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRHandleInt = (PMR *) hPMRHandle; -+ -+ eError = RIDumpListKM(psPMRHandleInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpAll(IMG_HANDLE hBridge) -+{ -+ PVRSRV_ERROR eError; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ eError = RIDumpAllKM(); -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpProcess(IMG_HANDLE hBridge, IMG_PID ui32Pid) -+{ -+ PVRSRV_ERROR eError; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ eError = RIDumpProcessKM(ui32Pid); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntryWithOwner(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMRHandle, IMG_PID ui32Owner) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMRHandleInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psPMRHandleInt = (PMR *) hPMRHandle; -+ -+ eError = RIWritePMREntryWithOwnerKM(psPMRHandleInt, ui32Owner); -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/client_sync_bridge.h b/drivers/gpu/drm/img-rogue/client_sync_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_sync_bridge.h -@@ -0,0 +1,102 @@ -+/******************************************************************************* -+@File -+@Title Client bridge header for sync -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Exports the client bridge functions for sync -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef CLIENT_SYNC_BRIDGE_H -+#define CLIENT_SYNC_BRIDGE_H -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) -+#include "pvr_bridge_client.h" -+#include "pvr_bridge.h" -+#endif -+ -+#include "common_sync_bridge.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge, -+ IMG_HANDLE * phSyncHandle, -+ IMG_UINT32 * pui32SyncPrimVAddr, -+ IMG_UINT32 * pui32SyncPrimBlockSize, -+ IMG_HANDLE * phhSyncPMR); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge, IMG_HANDLE hSyncHandle); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimSet(IMG_HANDLE hBridge, -+ IMG_HANDLE hSyncHandle, -+ IMG_UINT32 ui32Index, IMG_UINT32 ui32Value); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDump(IMG_HANDLE hBridge, -+ IMG_HANDLE hSyncHandle, IMG_UINT32 ui32Offset); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge, -+ IMG_HANDLE hSyncHandle, -+ IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge, -+ IMG_HANDLE hSyncHandle, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge, -+ IMG_HANDLE hSyncHandle, -+ IMG_UINT32 ui32Offset, -+ IMG_DEVMEM_OFFSET_T uiWriteOffset, -+ IMG_DEVMEM_SIZE_T uiPacketSize, -+ IMG_DEVMEM_SIZE_T uiBufferSize); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncAllocEvent(IMG_HANDLE hBridge, -+ IMG_BOOL bServerSync, -+ IMG_UINT32 ui32FWAddr, -+ IMG_UINT32 ui32ClassNameSize, -+ const IMG_CHAR * puiClassName); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncFreeEvent(IMG_HANDLE hBridge, IMG_UINT32 ui32FWAddr); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge, -+ PVRSRV_FENCE hFence); -+ -+#endif /* CLIENT_SYNC_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/client_sync_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_sync_direct_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_sync_direct_bridge.c -@@ -0,0 +1,262 @@ -+/******************************************************************************* -+@File -+@Title Direct client bridge for sync -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the client side of the bridge for sync -+ which is used in calls from Server context. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include "client_sync_bridge.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+ -+/* Module specific includes */ -+#include "pdump.h" -+#include "pdumpdefs.h" -+#include "devicemem_typedefs.h" -+#include "pvrsrv_sync_km.h" -+#include -+ -+#include "sync.h" -+#include "sync_server.h" -+#include "pdump.h" -+#include "pvrsrv_sync_km.h" -+#include "sync_fallback_server.h" -+#include "sync_checkpoint.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge, -+ IMG_HANDLE * phSyncHandle, -+ IMG_UINT32 * pui32SyncPrimVAddr, -+ IMG_UINT32 * pui32SyncPrimBlockSize, -+ IMG_HANDLE * phhSyncPMR) -+{ -+ PVRSRV_ERROR eError; -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; -+ PMR *pshSyncPMRInt = NULL; -+ -+ eError = -+ PVRSRVAllocSyncPrimitiveBlockKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ &psSyncHandleInt, -+ pui32SyncPrimVAddr, -+ pui32SyncPrimBlockSize, &pshSyncPMRInt); -+ -+ *phSyncHandle = psSyncHandleInt; -+ *phhSyncPMR = pshSyncPMRInt; -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge, IMG_HANDLE hSyncHandle) -+{ -+ PVRSRV_ERROR eError; -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; -+ -+ eError = PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimSet(IMG_HANDLE hBridge, -+ IMG_HANDLE hSyncHandle, -+ IMG_UINT32 ui32Index, IMG_UINT32 ui32Value) -+{ -+ PVRSRV_ERROR eError; -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; -+ -+ eError = PVRSRVSyncPrimSetKM(psSyncHandleInt, ui32Index, ui32Value); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDump(IMG_HANDLE hBridge, -+ IMG_HANDLE hSyncHandle, IMG_UINT32 ui32Offset) -+{ -+#if defined(PDUMP) -+ PVRSRV_ERROR eError; -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; -+ -+ eError = PVRSRVSyncPrimPDumpKM(psSyncHandleInt, ui32Offset); -+ -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ PVR_UNREFERENCED_PARAMETER(hSyncHandle); -+ PVR_UNREFERENCED_PARAMETER(ui32Offset); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+#endif -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge, -+ IMG_HANDLE hSyncHandle, -+ IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) -+{ -+#if defined(PDUMP) -+ PVRSRV_ERROR eError; -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; -+ -+ eError = PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, ui32Offset, ui32Value); -+ -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ PVR_UNREFERENCED_PARAMETER(hSyncHandle); -+ PVR_UNREFERENCED_PARAMETER(ui32Offset); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+#endif -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge, -+ IMG_HANDLE hSyncHandle, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T uiPDumpFlags) -+{ -+#if defined(PDUMP) -+ PVRSRV_ERROR eError; -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; -+ -+ eError = -+ PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt, -+ ui32Offset, ui32Value, ui32Mask, eOperator, uiPDumpFlags); -+ -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ PVR_UNREFERENCED_PARAMETER(hSyncHandle); -+ PVR_UNREFERENCED_PARAMETER(ui32Offset); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ PVR_UNREFERENCED_PARAMETER(ui32Mask); -+ PVR_UNREFERENCED_PARAMETER(eOperator); -+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+#endif -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge, -+ IMG_HANDLE hSyncHandle, -+ IMG_UINT32 ui32Offset, -+ IMG_DEVMEM_OFFSET_T uiWriteOffset, -+ IMG_DEVMEM_SIZE_T uiPacketSize, -+ IMG_DEVMEM_SIZE_T uiBufferSize) -+{ -+#if defined(PDUMP) -+ PVRSRV_ERROR eError; -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; -+ -+ eError = -+ PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt, -+ ui32Offset, uiWriteOffset, uiPacketSize, uiBufferSize); -+ -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ PVR_UNREFERENCED_PARAMETER(hSyncHandle); -+ PVR_UNREFERENCED_PARAMETER(ui32Offset); -+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); -+ PVR_UNREFERENCED_PARAMETER(uiPacketSize); -+ PVR_UNREFERENCED_PARAMETER(uiBufferSize); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+#endif -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncAllocEvent(IMG_HANDLE hBridge, -+ IMG_BOOL bServerSync, -+ IMG_UINT32 ui32FWAddr, -+ IMG_UINT32 ui32ClassNameSize, -+ const IMG_CHAR * puiClassName) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = -+ PVRSRVSyncAllocEventKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ bServerSync, ui32FWAddr, ui32ClassNameSize, puiClassName); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncFreeEvent(IMG_HANDLE hBridge, IMG_UINT32 ui32FWAddr) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = PVRSRVSyncFreeEventKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ui32FWAddr); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge, -+ PVRSRV_FENCE hFence) -+{ -+#if defined(PDUMP) -+ PVRSRV_ERROR eError; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ eError = PVRSRVSyncCheckpointSignalledPDumpPolKM(hFence); -+ -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ PVR_UNREFERENCED_PARAMETER(hFence); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+#endif -+} -diff --git a/drivers/gpu/drm/img-rogue/client_synctracking_bridge.h b/drivers/gpu/drm/img-rogue/client_synctracking_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_synctracking_bridge.h -@@ -0,0 +1,68 @@ -+/******************************************************************************* -+@File -+@Title Client bridge header for synctracking -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Exports the client bridge functions for synctracking -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef CLIENT_SYNCTRACKING_BRIDGE_H -+#define CLIENT_SYNCTRACKING_BRIDGE_H -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) -+#include "pvr_bridge_client.h" -+#include "pvr_bridge.h" -+#endif -+ -+#include "common_synctracking_bridge.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge, IMG_HANDLE hhRecord); -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordAdd(IMG_HANDLE hBridge, -+ IMG_HANDLE * phhRecord, -+ IMG_HANDLE hhServerSyncPrimBlock, -+ IMG_UINT32 ui32ui32FwBlockAddr, -+ IMG_UINT32 ui32ui32SyncOffset, -+ IMG_BOOL bbServerSync, -+ IMG_UINT32 ui32ClassNameSize, -+ const IMG_CHAR * puiClassName); -+ -+#endif /* CLIENT_SYNCTRACKING_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/client_synctracking_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_synctracking_direct_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/client_synctracking_direct_bridge.c -@@ -0,0 +1,92 @@ -+/******************************************************************************* -+@File -+@Title Direct client bridge for synctracking -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the client side of the bridge for synctracking -+ which is used in calls from Server context. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include "client_synctracking_bridge.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+ -+/* Module specific includes */ -+ -+#include "sync.h" -+#include "sync_server.h" -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge, IMG_HANDLE hhRecord) -+{ -+ PVRSRV_ERROR eError; -+ SYNC_RECORD_HANDLE pshRecordInt; -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ -+ pshRecordInt = (SYNC_RECORD_HANDLE) hhRecord; -+ -+ eError = PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordAdd(IMG_HANDLE hBridge, -+ IMG_HANDLE * phhRecord, -+ IMG_HANDLE hhServerSyncPrimBlock, -+ IMG_UINT32 ui32ui32FwBlockAddr, -+ IMG_UINT32 ui32ui32SyncOffset, -+ IMG_BOOL bbServerSync, -+ IMG_UINT32 ui32ClassNameSize, -+ const IMG_CHAR * puiClassName) -+{ -+ PVRSRV_ERROR eError; -+ SYNC_RECORD_HANDLE pshRecordInt = NULL; -+ SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt; -+ -+ pshServerSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK *) hhServerSyncPrimBlock; -+ -+ eError = -+ PVRSRVSyncRecordAddKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), -+ &pshRecordInt, -+ pshServerSyncPrimBlockInt, -+ ui32ui32FwBlockAddr, -+ ui32ui32SyncOffset, -+ bbServerSync, ui32ClassNameSize, puiClassName); -+ -+ *phhRecord = pshRecordInt; -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/common_cache_bridge.h b/drivers/gpu/drm/img-rogue/common_cache_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_cache_bridge.h -@@ -0,0 +1,126 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for cache -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for cache -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_CACHE_BRIDGE_H -+#define COMMON_CACHE_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "cache_ops.h" -+ -+#define PVRSRV_BRIDGE_CACHE_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE PVRSRV_BRIDGE_CACHE_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_CACHE_CACHEOPEXEC PVRSRV_BRIDGE_CACHE_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_CACHE_CACHEOPLOG PVRSRV_BRIDGE_CACHE_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_CACHE_CMD_LAST (PVRSRV_BRIDGE_CACHE_CMD_FIRST+2) -+ -+/******************************************* -+ CacheOpQueue -+ *******************************************/ -+ -+/* Bridge in structure for CacheOpQueue */ -+typedef struct PVRSRV_BRIDGE_IN_CACHEOPQUEUE_TAG -+{ -+ PVRSRV_CACHE_OP *piuCacheOp; -+ IMG_UINT64 *pui64Address; -+ IMG_DEVMEM_OFFSET_T *puiOffset; -+ IMG_DEVMEM_SIZE_T *puiSize; -+ IMG_HANDLE *phPMR; -+ IMG_UINT32 ui32NumCacheOps; -+ IMG_UINT32 ui32OpTimeline; -+} __packed PVRSRV_BRIDGE_IN_CACHEOPQUEUE; -+ -+/* Bridge out structure for CacheOpQueue */ -+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPQUEUE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_CACHEOPQUEUE; -+ -+/******************************************* -+ CacheOpExec -+ *******************************************/ -+ -+/* Bridge in structure for CacheOpExec */ -+typedef struct PVRSRV_BRIDGE_IN_CACHEOPEXEC_TAG -+{ -+ IMG_UINT64 ui64Address; -+ IMG_DEVMEM_OFFSET_T uiOffset; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_HANDLE hPMR; -+ PVRSRV_CACHE_OP iuCacheOp; -+} __packed PVRSRV_BRIDGE_IN_CACHEOPEXEC; -+ -+/* Bridge out structure for CacheOpExec */ -+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPEXEC_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_CACHEOPEXEC; -+ -+/******************************************* -+ CacheOpLog -+ *******************************************/ -+ -+/* Bridge in structure for CacheOpLog */ -+typedef struct PVRSRV_BRIDGE_IN_CACHEOPLOG_TAG -+{ -+ IMG_INT64 i64EndTime; -+ IMG_INT64 i64StartTime; -+ IMG_UINT64 ui64Address; -+ IMG_DEVMEM_OFFSET_T uiOffset; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_HANDLE hPMR; -+ PVRSRV_CACHE_OP iuCacheOp; -+} __packed PVRSRV_BRIDGE_IN_CACHEOPLOG; -+ -+/* Bridge out structure for CacheOpLog */ -+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPLOG_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_CACHEOPLOG; -+ -+#endif /* COMMON_CACHE_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_cmm_bridge.h b/drivers/gpu/drm/img-rogue/common_cmm_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_cmm_bridge.h -@@ -0,0 +1,114 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for cmm -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for cmm -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_CMM_BRIDGE_H -+#define COMMON_CMM_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "devicemem_typedefs.h" -+ -+#define PVRSRV_BRIDGE_CMM_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX PVRSRV_BRIDGE_CMM_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_CMM_CMD_LAST (PVRSRV_BRIDGE_CMM_CMD_FIRST+2) -+ -+/******************************************* -+ DevmemIntExportCtx -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntExportCtx */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX_TAG -+{ -+ IMG_HANDLE hContext; -+ IMG_HANDLE hPMR; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX; -+ -+/* Bridge out structure for DevmemIntExportCtx */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX_TAG -+{ -+ IMG_HANDLE hContextExport; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX; -+ -+/******************************************* -+ DevmemIntUnexportCtx -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntUnexportCtx */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX_TAG -+{ -+ IMG_HANDLE hContextExport; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX; -+ -+/* Bridge out structure for DevmemIntUnexportCtx */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX; -+ -+/******************************************* -+ DevmemIntAcquireRemoteCtx -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntAcquireRemoteCtx */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX_TAG -+{ -+ IMG_HANDLE hPMR; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX; -+ -+/* Bridge out structure for DevmemIntAcquireRemoteCtx */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX_TAG -+{ -+ IMG_HANDLE hContext; -+ IMG_HANDLE hPrivData; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX; -+ -+#endif /* COMMON_CMM_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_devicememhistory_bridge.h b/drivers/gpu/drm/img-rogue/common_devicememhistory_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_devicememhistory_bridge.h -@@ -0,0 +1,185 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for devicememhistory -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for devicememhistory -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_DEVICEMEMHISTORY_BRIDGE_H -+#define COMMON_DEVICEMEMHISTORY_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "devicemem_typedefs.h" -+ -+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4) -+ -+/******************************************* -+ DevicememHistoryMap -+ *******************************************/ -+ -+/* Bridge in structure for DevicememHistoryMap */ -+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP_TAG -+{ -+ IMG_DEV_VIRTADDR sDevVAddr; -+ IMG_DEVMEM_SIZE_T uiOffset; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_HANDLE hPMR; -+ const IMG_CHAR *puiText; -+ IMG_UINT32 ui32AllocationIndex; -+ IMG_UINT32 ui32Log2PageSize; -+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP; -+ -+/* Bridge out structure for DevicememHistoryMap */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32AllocationIndexOut; -+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP; -+ -+/******************************************* -+ DevicememHistoryUnmap -+ *******************************************/ -+ -+/* Bridge in structure for DevicememHistoryUnmap */ -+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP_TAG -+{ -+ IMG_DEV_VIRTADDR sDevVAddr; -+ IMG_DEVMEM_SIZE_T uiOffset; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_HANDLE hPMR; -+ const IMG_CHAR *puiText; -+ IMG_UINT32 ui32AllocationIndex; -+ IMG_UINT32 ui32Log2PageSize; -+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP; -+ -+/* Bridge out structure for DevicememHistoryUnmap */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32AllocationIndexOut; -+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP; -+ -+/******************************************* -+ DevicememHistoryMapVRange -+ *******************************************/ -+ -+/* Bridge in structure for DevicememHistoryMapVRange */ -+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE_TAG -+{ -+ IMG_DEV_VIRTADDR sBaseDevVAddr; -+ IMG_DEVMEM_SIZE_T uiAllocSize; -+ const IMG_CHAR *puiText; -+ IMG_UINT32 ui32AllocationIndex; -+ IMG_UINT32 ui32Log2PageSize; -+ IMG_UINT32 ui32NumPages; -+ IMG_UINT32 ui32ui32StartPage; -+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE; -+ -+/* Bridge out structure for DevicememHistoryMapVRange */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32AllocationIndexOut; -+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE; -+ -+/******************************************* -+ DevicememHistoryUnmapVRange -+ *******************************************/ -+ -+/* Bridge in structure for DevicememHistoryUnmapVRange */ -+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE_TAG -+{ -+ IMG_DEV_VIRTADDR sBaseDevVAddr; -+ IMG_DEVMEM_SIZE_T uiAllocSize; -+ const IMG_CHAR *puiText; -+ IMG_UINT32 ui32AllocationIndex; -+ IMG_UINT32 ui32Log2PageSize; -+ IMG_UINT32 ui32NumPages; -+ IMG_UINT32 ui32ui32StartPage; -+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE; -+ -+/* Bridge out structure for DevicememHistoryUnmapVRange */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32AllocationIndexOut; -+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE; -+ -+/******************************************* -+ DevicememHistorySparseChange -+ *******************************************/ -+ -+/* Bridge in structure for DevicememHistorySparseChange */ -+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE_TAG -+{ -+ IMG_DEV_VIRTADDR sDevVAddr; -+ IMG_DEVMEM_SIZE_T uiOffset; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_HANDLE hPMR; -+ IMG_UINT32 *pui32AllocPageIndices; -+ IMG_UINT32 *pui32FreePageIndices; -+ const IMG_CHAR *puiText; -+ IMG_UINT32 ui32AllocPageCount; -+ IMG_UINT32 ui32AllocationIndex; -+ IMG_UINT32 ui32FreePageCount; -+ IMG_UINT32 ui32Log2PageSize; -+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE; -+ -+/* Bridge out structure for DevicememHistorySparseChange */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32AllocationIndexOut; -+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE; -+ -+#endif /* COMMON_DEVICEMEMHISTORY_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_di_bridge.h b/drivers/gpu/drm/img-rogue/common_di_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_di_bridge.h -@@ -0,0 +1,153 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for di -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for di -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_DI_BRIDGE_H -+#define COMMON_DI_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "pvrsrv_tlcommon.h" -+#include "pvr_dicommon.h" -+ -+#define PVRSRV_BRIDGE_DI_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_DI_DICREATECONTEXT PVRSRV_BRIDGE_DI_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT PVRSRV_BRIDGE_DI_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_DI_DIREADENTRY PVRSRV_BRIDGE_DI_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_DI_DIWRITEENTRY PVRSRV_BRIDGE_DI_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_DI_DILISTALLENTRIES PVRSRV_BRIDGE_DI_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_DI_CMD_LAST (PVRSRV_BRIDGE_DI_CMD_FIRST+4) -+ -+/******************************************* -+ DICreateContext -+ *******************************************/ -+ -+/* Bridge in structure for DICreateContext */ -+typedef struct PVRSRV_BRIDGE_IN_DICREATECONTEXT_TAG -+{ -+ IMG_CHAR *puiStreamName; -+} __packed PVRSRV_BRIDGE_IN_DICREATECONTEXT; -+ -+/* Bridge out structure for DICreateContext */ -+typedef struct PVRSRV_BRIDGE_OUT_DICREATECONTEXT_TAG -+{ -+ IMG_HANDLE hContext; -+ IMG_CHAR *puiStreamName; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DICREATECONTEXT; -+ -+/******************************************* -+ DIDestroyContext -+ *******************************************/ -+ -+/* Bridge in structure for DIDestroyContext */ -+typedef struct PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT_TAG -+{ -+ IMG_HANDLE hContext; -+} __packed PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT; -+ -+/* Bridge out structure for DIDestroyContext */ -+typedef struct PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT; -+ -+/******************************************* -+ DIReadEntry -+ *******************************************/ -+ -+/* Bridge in structure for DIReadEntry */ -+typedef struct PVRSRV_BRIDGE_IN_DIREADENTRY_TAG -+{ -+ IMG_UINT64 ui64Offset; -+ IMG_UINT64 ui64Size; -+ IMG_HANDLE hContext; -+ const IMG_CHAR *puiEntryPath; -+} __packed PVRSRV_BRIDGE_IN_DIREADENTRY; -+ -+/* Bridge out structure for DIReadEntry */ -+typedef struct PVRSRV_BRIDGE_OUT_DIREADENTRY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DIREADENTRY; -+ -+/******************************************* -+ DIWriteEntry -+ *******************************************/ -+ -+/* Bridge in structure for DIWriteEntry */ -+typedef struct PVRSRV_BRIDGE_IN_DIWRITEENTRY_TAG -+{ -+ IMG_HANDLE hContext; -+ const IMG_CHAR *puiEntryPath; -+ const IMG_CHAR *puiValue; -+ IMG_UINT32 ui32ValueSize; -+} __packed PVRSRV_BRIDGE_IN_DIWRITEENTRY; -+ -+/* Bridge out structure for DIWriteEntry */ -+typedef struct PVRSRV_BRIDGE_OUT_DIWRITEENTRY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DIWRITEENTRY; -+ -+/******************************************* -+ DIListAllEntries -+ *******************************************/ -+ -+/* Bridge in structure for DIListAllEntries */ -+typedef struct PVRSRV_BRIDGE_IN_DILISTALLENTRIES_TAG -+{ -+ IMG_HANDLE hContext; -+} __packed PVRSRV_BRIDGE_IN_DILISTALLENTRIES; -+ -+/* Bridge out structure for DIListAllEntries */ -+typedef struct PVRSRV_BRIDGE_OUT_DILISTALLENTRIES_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DILISTALLENTRIES; -+ -+#endif /* COMMON_DI_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_dmabuf_bridge.h b/drivers/gpu/drm/img-rogue/common_dmabuf_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_dmabuf_bridge.h -@@ -0,0 +1,150 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for dmabuf -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for dmabuf -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_DMABUF_BRIDGE_H -+#define COMMON_DMABUF_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "pvrsrv_memallocflags.h" -+ -+#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_DMABUF_CMD_LAST (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+3) -+ -+/******************************************* -+ PhysmemImportDmaBuf -+ *******************************************/ -+ -+/* Bridge in structure for PhysmemImportDmaBuf */ -+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF_TAG -+{ -+ const IMG_CHAR *puiName; -+ IMG_INT ifd; -+ IMG_UINT32 ui32NameSize; -+ PVRSRV_MEMALLOCFLAGS_T uiFlags; -+} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF; -+ -+/* Bridge out structure for PhysmemImportDmaBuf */ -+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG -+{ -+ IMG_DEVMEM_ALIGN_T uiAlign; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_HANDLE hPMRPtr; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF; -+ -+/******************************************* -+ PhysmemImportDmaBufLocked -+ *******************************************/ -+ -+/* Bridge in structure for PhysmemImportDmaBufLocked */ -+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED_TAG -+{ -+ const IMG_CHAR *puiName; -+ IMG_INT ifd; -+ IMG_UINT32 ui32NameSize; -+ PVRSRV_MEMALLOCFLAGS_T uiFlags; -+} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED; -+ -+/* Bridge out structure for PhysmemImportDmaBufLocked */ -+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED_TAG -+{ -+ IMG_DEVMEM_ALIGN_T uiAlign; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_HANDLE hPMRPtr; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED; -+ -+/******************************************* -+ PhysmemExportDmaBuf -+ *******************************************/ -+ -+/* Bridge in structure for PhysmemExportDmaBuf */ -+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF_TAG -+{ -+ IMG_HANDLE hPMR; -+} __packed PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF; -+ -+/* Bridge out structure for PhysmemExportDmaBuf */ -+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_INT iFd; -+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF; -+ -+/******************************************* -+ PhysmemImportSparseDmaBuf -+ *******************************************/ -+ -+/* Bridge in structure for PhysmemImportSparseDmaBuf */ -+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF_TAG -+{ -+ IMG_DEVMEM_SIZE_T uiChunkSize; -+ IMG_UINT32 *pui32MappingTable; -+ const IMG_CHAR *puiName; -+ IMG_INT ifd; -+ IMG_UINT32 ui32NameSize; -+ IMG_UINT32 ui32NumPhysChunks; -+ IMG_UINT32 ui32NumVirtChunks; -+ PVRSRV_MEMALLOCFLAGS_T uiFlags; -+} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF; -+ -+/* Bridge out structure for PhysmemImportSparseDmaBuf */ -+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF_TAG -+{ -+ IMG_DEVMEM_ALIGN_T uiAlign; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_HANDLE hPMRPtr; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF; -+ -+#endif /* COMMON_DMABUF_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_htbuffer_bridge.h b/drivers/gpu/drm/img-rogue/common_htbuffer_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_htbuffer_bridge.h -@@ -0,0 +1,82 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for htbuffer -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for htbuffer -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_HTBUFFER_BRIDGE_H -+#define COMMON_HTBUFFER_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "devicemem_typedefs.h" -+#include "htbuffer_types.h" -+ -+#define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0) -+ -+/******************************************* -+ HTBControl -+ *******************************************/ -+ -+/* Bridge in structure for HTBControl */ -+typedef struct PVRSRV_BRIDGE_IN_HTBCONTROL_TAG -+{ -+ IMG_UINT32 *pui32GroupEnable; -+ IMG_UINT32 ui32EnablePID; -+ IMG_UINT32 ui32LogLevel; -+ IMG_UINT32 ui32LogMode; -+ IMG_UINT32 ui32NumGroups; -+ IMG_UINT32 ui32OpMode; -+} __packed PVRSRV_BRIDGE_IN_HTBCONTROL; -+ -+/* Bridge out structure for HTBControl */ -+typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_HTBCONTROL; -+ -+#endif /* COMMON_HTBUFFER_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_mm_bridge.h b/drivers/gpu/drm/img-rogue/common_mm_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_mm_bridge.h -@@ -0,0 +1,779 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for mm -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for mm -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_MM_BRIDGE_H -+#define COMMON_MM_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "pvrsrv_memallocflags.h" -+#include "pvrsrv_memalloc_physheap.h" -+#include "devicemem_typedefs.h" -+ -+#define PVRSRV_BRIDGE_MM_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_MM_PMREXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_MM_PMRGETUID PVRSRV_BRIDGE_MM_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_MM_PMRIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+5 -+#define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+6 -+#define PVRSRV_BRIDGE_MM_PMRUNREFPMR PVRSRV_BRIDGE_MM_CMD_FIRST+7 -+#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8 -+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+9 -+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+10 -+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+11 -+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+12 -+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+13 -+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+14 -+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+15 -+#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+16 -+#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGEANDMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+17 -+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGEANDUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+18 -+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+19 -+#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+20 -+#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+21 -+#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE PVRSRV_BRIDGE_MM_CMD_FIRST+22 -+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+23 -+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+24 -+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+25 -+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+26 -+#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+27 -+#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO PVRSRV_BRIDGE_MM_CMD_FIRST+28 -+#define PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP PVRSRV_BRIDGE_MM_CMD_FIRST+29 -+#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+30 -+#define PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT PVRSRV_BRIDGE_MM_CMD_FIRST+31 -+#define PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+32 -+#define PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+33 -+#define PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+34 -+#define PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+35 -+#define PVRSRV_BRIDGE_MM_DEVMEMXINTMAPVRANGETOBACKINGPAGE PVRSRV_BRIDGE_MM_CMD_FIRST+36 -+#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+36) -+ -+/******************************************* -+ PMRExportPMR -+ *******************************************/ -+ -+/* Bridge in structure for PMRExportPMR */ -+typedef struct PVRSRV_BRIDGE_IN_PMREXPORTPMR_TAG -+{ -+ IMG_HANDLE hPMR; -+} __packed PVRSRV_BRIDGE_IN_PMREXPORTPMR; -+ -+/* Bridge out structure for PMRExportPMR */ -+typedef struct PVRSRV_BRIDGE_OUT_PMREXPORTPMR_TAG -+{ -+ IMG_UINT64 ui64Password; -+ IMG_UINT64 ui64Size; -+ IMG_HANDLE hPMRExport; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32Log2Contig; -+} __packed PVRSRV_BRIDGE_OUT_PMREXPORTPMR; -+ -+/******************************************* -+ PMRUnexportPMR -+ *******************************************/ -+ -+/* Bridge in structure for PMRUnexportPMR */ -+typedef struct PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR_TAG -+{ -+ IMG_HANDLE hPMRExport; -+} __packed PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR; -+ -+/* Bridge out structure for PMRUnexportPMR */ -+typedef struct PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR; -+ -+/******************************************* -+ PMRGetUID -+ *******************************************/ -+ -+/* Bridge in structure for PMRGetUID */ -+typedef struct PVRSRV_BRIDGE_IN_PMRGETUID_TAG -+{ -+ IMG_HANDLE hPMR; -+} __packed PVRSRV_BRIDGE_IN_PMRGETUID; -+ -+/* Bridge out structure for PMRGetUID */ -+typedef struct PVRSRV_BRIDGE_OUT_PMRGETUID_TAG -+{ -+ IMG_UINT64 ui64UID; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PMRGETUID; -+ -+/******************************************* -+ PMRMakeLocalImportHandle -+ *******************************************/ -+ -+/* Bridge in structure for PMRMakeLocalImportHandle */ -+typedef struct PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE_TAG -+{ -+ IMG_HANDLE hBuffer; -+} __packed PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE; -+ -+/* Bridge out structure for PMRMakeLocalImportHandle */ -+typedef struct PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE_TAG -+{ -+ IMG_HANDLE hExtMem; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE; -+ -+/******************************************* -+ PMRUnmakeLocalImportHandle -+ *******************************************/ -+ -+/* Bridge in structure for PMRUnmakeLocalImportHandle */ -+typedef struct PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE_TAG -+{ -+ IMG_HANDLE hExtMem; -+} __packed PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE; -+ -+/* Bridge out structure for PMRUnmakeLocalImportHandle */ -+typedef struct PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE; -+ -+/******************************************* -+ PMRImportPMR -+ *******************************************/ -+ -+/* Bridge in structure for PMRImportPMR */ -+typedef struct PVRSRV_BRIDGE_IN_PMRIMPORTPMR_TAG -+{ -+ IMG_UINT64 ui64uiPassword; -+ IMG_UINT64 ui64uiSize; -+ IMG_HANDLE hPMRExport; -+ IMG_UINT32 ui32uiLog2Contig; -+} __packed PVRSRV_BRIDGE_IN_PMRIMPORTPMR; -+ -+/* Bridge out structure for PMRImportPMR */ -+typedef struct PVRSRV_BRIDGE_OUT_PMRIMPORTPMR_TAG -+{ -+ IMG_HANDLE hPMR; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PMRIMPORTPMR; -+ -+/******************************************* -+ PMRLocalImportPMR -+ *******************************************/ -+ -+/* Bridge in structure for PMRLocalImportPMR */ -+typedef struct PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR_TAG -+{ -+ IMG_HANDLE hExtHandle; -+} __packed PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR; -+ -+/* Bridge out structure for PMRLocalImportPMR */ -+typedef struct PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR_TAG -+{ -+ IMG_DEVMEM_ALIGN_T uiAlign; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_HANDLE hPMR; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR; -+ -+/******************************************* -+ PMRUnrefPMR -+ *******************************************/ -+ -+/* Bridge in structure for PMRUnrefPMR */ -+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFPMR_TAG -+{ -+ IMG_HANDLE hPMR; -+} __packed PVRSRV_BRIDGE_IN_PMRUNREFPMR; -+ -+/* Bridge out structure for PMRUnrefPMR */ -+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PMRUNREFPMR; -+ -+/******************************************* -+ PMRUnrefUnlockPMR -+ *******************************************/ -+ -+/* Bridge in structure for PMRUnrefUnlockPMR */ -+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG -+{ -+ IMG_HANDLE hPMR; -+} __packed PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR; -+ -+/* Bridge out structure for PMRUnrefUnlockPMR */ -+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR; -+ -+/******************************************* -+ PhysmemNewRamBackedPMR -+ *******************************************/ -+ -+/* Bridge in structure for PhysmemNewRamBackedPMR */ -+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG -+{ -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_UINT32 *pui32MappingTable; -+ const IMG_CHAR *puiAnnotation; -+ IMG_UINT32 ui32AnnotationLength; -+ IMG_UINT32 ui32Log2PageSize; -+ IMG_UINT32 ui32NumPhysChunks; -+ IMG_UINT32 ui32NumVirtChunks; -+ IMG_UINT32 ui32PDumpFlags; -+ IMG_PID ui32PID; -+ PVRSRV_MEMALLOCFLAGS_T uiFlags; -+} __packed PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR; -+ -+/* Bridge out structure for PhysmemNewRamBackedPMR */ -+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG -+{ -+ IMG_HANDLE hPMRPtr; -+ PVRSRV_ERROR eError; -+ PVRSRV_MEMALLOCFLAGS_T uiOutFlags; -+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR; -+ -+/******************************************* -+ DevmemIntCtxCreate -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntCtxCreate */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE_TAG -+{ -+ IMG_BOOL bbKernelMemoryCtx; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE; -+ -+/* Bridge out structure for DevmemIntCtxCreate */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE_TAG -+{ -+ IMG_HANDLE hDevMemServerContext; -+ IMG_HANDLE hPrivData; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32CPUCacheLineSize; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE; -+ -+/******************************************* -+ DevmemIntCtxDestroy -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntCtxDestroy */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY_TAG -+{ -+ IMG_HANDLE hDevmemServerContext; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY; -+ -+/* Bridge out structure for DevmemIntCtxDestroy */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY; -+ -+/******************************************* -+ DevmemIntHeapCreate -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntHeapCreate */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG -+{ -+ IMG_DEV_VIRTADDR sHeapBaseAddr; -+ IMG_HANDLE hDevmemCtx; -+ IMG_UINT32 ui32HeapConfigIndex; -+ IMG_UINT32 ui32HeapIndex; -+ IMG_UINT32 ui32Log2DataPageSize; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE; -+ -+/* Bridge out structure for DevmemIntHeapCreate */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE_TAG -+{ -+ IMG_HANDLE hDevmemHeapPtr; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE; -+ -+/******************************************* -+ DevmemIntHeapDestroy -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntHeapDestroy */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY_TAG -+{ -+ IMG_HANDLE hDevmemHeap; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY; -+ -+/* Bridge out structure for DevmemIntHeapDestroy */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY; -+ -+/******************************************* -+ DevmemIntMapPMR -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntMapPMR */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG -+{ -+ IMG_HANDLE hDevmemServerHeap; -+ IMG_HANDLE hPMR; -+ IMG_HANDLE hReservation; -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR; -+ -+/* Bridge out structure for DevmemIntMapPMR */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG -+{ -+ IMG_HANDLE hMapping; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR; -+ -+/******************************************* -+ DevmemIntUnmapPMR -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntUnmapPMR */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG -+{ -+ IMG_HANDLE hMapping; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR; -+ -+/* Bridge out structure for DevmemIntUnmapPMR */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR; -+ -+/******************************************* -+ DevmemIntReserveRange -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntReserveRange */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG -+{ -+ IMG_DEV_VIRTADDR sAddress; -+ IMG_DEVMEM_SIZE_T uiLength; -+ IMG_HANDLE hDevmemServerHeap; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE; -+ -+/* Bridge out structure for DevmemIntReserveRange */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG -+{ -+ IMG_HANDLE hReservation; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE; -+ -+/******************************************* -+ DevmemIntReserveRangeAndMapPMR -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntReserveRangeAndMapPMR */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR_TAG -+{ -+ IMG_DEV_VIRTADDR sAddress; -+ IMG_DEVMEM_SIZE_T uiLength; -+ IMG_HANDLE hDevmemServerHeap; -+ IMG_HANDLE hPMR; -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR; -+ -+/* Bridge out structure for DevmemIntReserveRangeAndMapPMR */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR_TAG -+{ -+ IMG_HANDLE hMapping; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR; -+ -+/******************************************* -+ DevmemIntUnreserveRangeAndUnmapPMR -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntUnreserveRangeAndUnmapPMR */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGEANDUNMAPPMR_TAG -+{ -+ IMG_HANDLE hMapping; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGEANDUNMAPPMR; -+ -+/* Bridge out structure for DevmemIntUnreserveRangeAndUnmapPMR */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGEANDUNMAPPMR_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGEANDUNMAPPMR; -+ -+/******************************************* -+ DevmemIntUnreserveRange -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntUnreserveRange */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE_TAG -+{ -+ IMG_HANDLE hReservation; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE; -+ -+/* Bridge out structure for DevmemIntUnreserveRange */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE; -+ -+/******************************************* -+ ChangeSparseMem -+ *******************************************/ -+ -+/* Bridge in structure for ChangeSparseMem */ -+typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG -+{ -+ IMG_DEV_VIRTADDR sDevVAddr; -+ IMG_UINT64 ui64CPUVAddr; -+ IMG_HANDLE hPMR; -+ IMG_HANDLE hSrvDevMemHeap; -+ IMG_UINT32 *pui32AllocPageIndices; -+ IMG_UINT32 *pui32FreePageIndices; -+ IMG_UINT32 ui32AllocPageCount; -+ IMG_UINT32 ui32FreePageCount; -+ IMG_UINT32 ui32SparseFlags; -+ PVRSRV_MEMALLOCFLAGS_T uiFlags; -+} __packed PVRSRV_BRIDGE_IN_CHANGESPARSEMEM; -+ -+/* Bridge out structure for ChangeSparseMem */ -+typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM; -+ -+/******************************************* -+ DevmemIsVDevAddrValid -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIsVDevAddrValid */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID_TAG -+{ -+ IMG_DEV_VIRTADDR sAddress; -+ IMG_HANDLE hDevmemCtx; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID; -+ -+/* Bridge out structure for DevmemIsVDevAddrValid */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID; -+ -+/******************************************* -+ DevmemInvalidateFBSCTable -+ *******************************************/ -+ -+/* Bridge in structure for DevmemInvalidateFBSCTable */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE_TAG -+{ -+ IMG_UINT64 ui64FBSCEntries; -+ IMG_HANDLE hDevmemCtx; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE; -+ -+/* Bridge out structure for DevmemInvalidateFBSCTable */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE; -+ -+/******************************************* -+ HeapCfgHeapConfigCount -+ *******************************************/ -+ -+/* Bridge in structure for HeapCfgHeapConfigCount */ -+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT; -+ -+/* Bridge out structure for HeapCfgHeapConfigCount */ -+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32NumHeapConfigs; -+} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT; -+ -+/******************************************* -+ HeapCfgHeapCount -+ *******************************************/ -+ -+/* Bridge in structure for HeapCfgHeapCount */ -+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT_TAG -+{ -+ IMG_UINT32 ui32HeapConfigIndex; -+} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT; -+ -+/* Bridge out structure for HeapCfgHeapCount */ -+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32NumHeaps; -+} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT; -+ -+/******************************************* -+ HeapCfgHeapConfigName -+ *******************************************/ -+ -+/* Bridge in structure for HeapCfgHeapConfigName */ -+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME_TAG -+{ -+ IMG_CHAR *puiHeapConfigName; -+ IMG_UINT32 ui32HeapConfigIndex; -+ IMG_UINT32 ui32HeapConfigNameBufSz; -+} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME; -+ -+/* Bridge out structure for HeapCfgHeapConfigName */ -+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME_TAG -+{ -+ IMG_CHAR *puiHeapConfigName; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME; -+ -+/******************************************* -+ HeapCfgHeapDetails -+ *******************************************/ -+ -+/* Bridge in structure for HeapCfgHeapDetails */ -+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS_TAG -+{ -+ IMG_CHAR *puiHeapNameOut; -+ IMG_UINT32 ui32HeapConfigIndex; -+ IMG_UINT32 ui32HeapIndex; -+ IMG_UINT32 ui32HeapNameBufSz; -+} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS; -+ -+/* Bridge out structure for HeapCfgHeapDetails */ -+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG -+{ -+ IMG_DEV_VIRTADDR sDevVAddrBase; -+ IMG_DEVMEM_SIZE_T uiHeapLength; -+ IMG_DEVMEM_SIZE_T uiReservedRegionLength; -+ IMG_CHAR *puiHeapNameOut; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32Log2DataPageSizeOut; -+ IMG_UINT32 ui32Log2ImportAlignmentOut; -+} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS; -+ -+/******************************************* -+ DevmemIntRegisterPFNotifyKM -+ *******************************************/ -+ -+/* Bridge in structure for DevmemIntRegisterPFNotifyKM */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG -+{ -+ IMG_HANDLE hDevmemCtx; -+ IMG_BOOL bRegister; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM; -+ -+/* Bridge out structure for DevmemIntRegisterPFNotifyKM */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM; -+ -+/******************************************* -+ PhysHeapGetMemInfo -+ *******************************************/ -+ -+/* Bridge in structure for PhysHeapGetMemInfo */ -+typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO_TAG -+{ -+ PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; -+ PVRSRV_PHYS_HEAP *peaPhysHeapID; -+ IMG_UINT32 ui32PhysHeapCount; -+} __packed PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO; -+ -+/* Bridge out structure for PhysHeapGetMemInfo */ -+typedef struct PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO_TAG -+{ -+ PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO; -+ -+/******************************************* -+ GetDefaultPhysicalHeap -+ *******************************************/ -+ -+/* Bridge in structure for GetDefaultPhysicalHeap */ -+typedef struct PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP; -+ -+/* Bridge out structure for GetDefaultPhysicalHeap */ -+typedef struct PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP_TAG -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_PHYS_HEAP eHeap; -+} __packed PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP; -+ -+/******************************************* -+ DevmemGetFaultAddress -+ *******************************************/ -+ -+/* Bridge in structure for DevmemGetFaultAddress */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS_TAG -+{ -+ IMG_HANDLE hDevmemCtx; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS; -+ -+/* Bridge out structure for DevmemGetFaultAddress */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS_TAG -+{ -+ IMG_DEV_VIRTADDR sFaultAddress; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS; -+ -+/******************************************* -+ PVRSRVStatsUpdateOOMStat -+ *******************************************/ -+ -+/* Bridge in structure for PVRSRVStatsUpdateOOMStat */ -+typedef struct PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT_TAG -+{ -+ IMG_PID ui32pid; -+ IMG_UINT32 ui32ui32StatType; -+} __packed PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT; -+ -+/* Bridge out structure for PVRSRVStatsUpdateOOMStat */ -+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT; -+ -+/******************************************* -+ DevmemXIntReserveRange -+ *******************************************/ -+ -+/* Bridge in structure for DevmemXIntReserveRange */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE_TAG -+{ -+ IMG_DEV_VIRTADDR sAddress; -+ IMG_DEVMEM_SIZE_T uiLength; -+ IMG_HANDLE hDevmemServerHeap; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE; -+ -+/* Bridge out structure for DevmemXIntReserveRange */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE_TAG -+{ -+ IMG_HANDLE hReservation; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE; -+ -+/******************************************* -+ DevmemXIntUnreserveRange -+ *******************************************/ -+ -+/* Bridge in structure for DevmemXIntUnreserveRange */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE_TAG -+{ -+ IMG_HANDLE hReservation; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE; -+ -+/* Bridge out structure for DevmemXIntUnreserveRange */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE; -+ -+/******************************************* -+ DevmemXIntMapPages -+ *******************************************/ -+ -+/* Bridge in structure for DevmemXIntMapPages */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES_TAG -+{ -+ IMG_HANDLE hPMR; -+ IMG_HANDLE hReservation; -+ IMG_UINT32 ui32PageCount; -+ IMG_UINT32 ui32PhysPageOffset; -+ IMG_UINT32 ui32VirtPageOffset; -+ PVRSRV_MEMALLOCFLAGS_T uiFlags; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES; -+ -+/* Bridge out structure for DevmemXIntMapPages */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES; -+ -+/******************************************* -+ DevmemXIntUnmapPages -+ *******************************************/ -+ -+/* Bridge in structure for DevmemXIntUnmapPages */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES_TAG -+{ -+ IMG_HANDLE hReservation; -+ IMG_UINT32 ui32PageCount; -+ IMG_UINT32 ui32VirtPageOffset; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES; -+ -+/* Bridge out structure for DevmemXIntUnmapPages */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES; -+ -+/******************************************* -+ DevmemXIntMapVRangeToBackingPage -+ *******************************************/ -+ -+/* Bridge in structure for DevmemXIntMapVRangeToBackingPage */ -+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE_TAG -+{ -+ IMG_HANDLE hReservation; -+ IMG_UINT32 ui32PageCount; -+ IMG_UINT32 ui32VirtPageOffset; -+ PVRSRV_MEMALLOCFLAGS_T uiFlags; -+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE; -+ -+/* Bridge out structure for DevmemXIntMapVRangeToBackingPage */ -+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE; -+ -+#endif /* COMMON_MM_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_mmextmem_bridge.h b/drivers/gpu/drm/img-rogue/common_mmextmem_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_mmextmem_bridge.h -@@ -0,0 +1,80 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for mmextmem -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for mmextmem -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_MMEXTMEM_BRIDGE_H -+#define COMMON_MMEXTMEM_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "pvrsrv_memallocflags.h" -+#include "devicemem_typedefs.h" -+ -+#define PVRSRV_BRIDGE_MMEXTMEM_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_MMEXTMEM_PHYSMEMWRAPEXTMEM PVRSRV_BRIDGE_MMEXTMEM_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_MMEXTMEM_CMD_LAST (PVRSRV_BRIDGE_MMEXTMEM_CMD_FIRST+0) -+ -+/******************************************* -+ PhysmemWrapExtMem -+ *******************************************/ -+ -+/* Bridge in structure for PhysmemWrapExtMem */ -+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMWRAPEXTMEM_TAG -+{ -+ IMG_UINT64 ui64CpuVAddr; -+ IMG_DEVMEM_SIZE_T uiSize; -+ PVRSRV_MEMALLOCFLAGS_T uiFlags; -+} __packed PVRSRV_BRIDGE_IN_PHYSMEMWRAPEXTMEM; -+ -+/* Bridge out structure for PhysmemWrapExtMem */ -+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMWRAPEXTMEM_TAG -+{ -+ IMG_HANDLE hPMRPtr; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMWRAPEXTMEM; -+ -+#endif /* COMMON_MMEXTMEM_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_pvrtl_bridge.h b/drivers/gpu/drm/img-rogue/common_pvrtl_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_pvrtl_bridge.h -@@ -0,0 +1,214 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for pvrtl -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for pvrtl -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_PVRTL_BRIDGE_H -+#define COMMON_PVRTL_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "devicemem_typedefs.h" -+#include "pvrsrv_tlcommon.h" -+ -+#define PVRSRV_BRIDGE_PVRTL_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS PVRSRV_BRIDGE_PVRTL_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5 -+#define PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+6 -+#define PVRSRV_BRIDGE_PVRTL_TLWRITEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7 -+#define PVRSRV_BRIDGE_PVRTL_CMD_LAST (PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7) -+ -+/******************************************* -+ TLOpenStream -+ *******************************************/ -+ -+/* Bridge in structure for TLOpenStream */ -+typedef struct PVRSRV_BRIDGE_IN_TLOPENSTREAM_TAG -+{ -+ const IMG_CHAR *puiName; -+ IMG_UINT32 ui32Mode; -+} __packed PVRSRV_BRIDGE_IN_TLOPENSTREAM; -+ -+/* Bridge out structure for TLOpenStream */ -+typedef struct PVRSRV_BRIDGE_OUT_TLOPENSTREAM_TAG -+{ -+ IMG_HANDLE hSD; -+ IMG_HANDLE hTLPMR; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_TLOPENSTREAM; -+ -+/******************************************* -+ TLCloseStream -+ *******************************************/ -+ -+/* Bridge in structure for TLCloseStream */ -+typedef struct PVRSRV_BRIDGE_IN_TLCLOSESTREAM_TAG -+{ -+ IMG_HANDLE hSD; -+} __packed PVRSRV_BRIDGE_IN_TLCLOSESTREAM; -+ -+/* Bridge out structure for TLCloseStream */ -+typedef struct PVRSRV_BRIDGE_OUT_TLCLOSESTREAM_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_TLCLOSESTREAM; -+ -+/******************************************* -+ TLAcquireData -+ *******************************************/ -+ -+/* Bridge in structure for TLAcquireData */ -+typedef struct PVRSRV_BRIDGE_IN_TLACQUIREDATA_TAG -+{ -+ IMG_HANDLE hSD; -+} __packed PVRSRV_BRIDGE_IN_TLACQUIREDATA; -+ -+/* Bridge out structure for TLAcquireData */ -+typedef struct PVRSRV_BRIDGE_OUT_TLACQUIREDATA_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32ReadLen; -+ IMG_UINT32 ui32ReadOffset; -+} __packed PVRSRV_BRIDGE_OUT_TLACQUIREDATA; -+ -+/******************************************* -+ TLReleaseData -+ *******************************************/ -+ -+/* Bridge in structure for TLReleaseData */ -+typedef struct PVRSRV_BRIDGE_IN_TLRELEASEDATA_TAG -+{ -+ IMG_HANDLE hSD; -+ IMG_UINT32 ui32ReadLen; -+ IMG_UINT32 ui32ReadOffset; -+} __packed PVRSRV_BRIDGE_IN_TLRELEASEDATA; -+ -+/* Bridge out structure for TLReleaseData */ -+typedef struct PVRSRV_BRIDGE_OUT_TLRELEASEDATA_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_TLRELEASEDATA; -+ -+/******************************************* -+ TLDiscoverStreams -+ *******************************************/ -+ -+/* Bridge in structure for TLDiscoverStreams */ -+typedef struct PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS_TAG -+{ -+ const IMG_CHAR *puiNamePattern; -+ IMG_CHAR *puiStreams; -+ IMG_UINT32 ui32Size; -+} __packed PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS; -+ -+/* Bridge out structure for TLDiscoverStreams */ -+typedef struct PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS_TAG -+{ -+ IMG_CHAR *puiStreams; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32NumFound; -+} __packed PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS; -+ -+/******************************************* -+ TLReserveStream -+ *******************************************/ -+ -+/* Bridge in structure for TLReserveStream */ -+typedef struct PVRSRV_BRIDGE_IN_TLRESERVESTREAM_TAG -+{ -+ IMG_HANDLE hSD; -+ IMG_UINT32 ui32Size; -+ IMG_UINT32 ui32SizeMin; -+} __packed PVRSRV_BRIDGE_IN_TLRESERVESTREAM; -+ -+/* Bridge out structure for TLReserveStream */ -+typedef struct PVRSRV_BRIDGE_OUT_TLRESERVESTREAM_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32Available; -+ IMG_UINT32 ui32BufferOffset; -+} __packed PVRSRV_BRIDGE_OUT_TLRESERVESTREAM; -+ -+/******************************************* -+ TLCommitStream -+ *******************************************/ -+ -+/* Bridge in structure for TLCommitStream */ -+typedef struct PVRSRV_BRIDGE_IN_TLCOMMITSTREAM_TAG -+{ -+ IMG_HANDLE hSD; -+ IMG_UINT32 ui32ReqSize; -+} __packed PVRSRV_BRIDGE_IN_TLCOMMITSTREAM; -+ -+/* Bridge out structure for TLCommitStream */ -+typedef struct PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM; -+ -+/******************************************* -+ TLWriteData -+ *******************************************/ -+ -+/* Bridge in structure for TLWriteData */ -+typedef struct PVRSRV_BRIDGE_IN_TLWRITEDATA_TAG -+{ -+ IMG_HANDLE hSD; -+ IMG_BYTE *pui8Data; -+ IMG_UINT32 ui32Size; -+} __packed PVRSRV_BRIDGE_IN_TLWRITEDATA; -+ -+/* Bridge out structure for TLWriteData */ -+typedef struct PVRSRV_BRIDGE_OUT_TLWRITEDATA_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_TLWRITEDATA; -+ -+#endif /* COMMON_PVRTL_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_rgxbreakpoint_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxbreakpoint_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_rgxbreakpoint_bridge.h -@@ -0,0 +1,150 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for rgxbreakpoint -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for rgxbreakpoint -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_RGXBREAKPOINT_BRIDGE_H -+#define COMMON_RGXBREAKPOINT_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "rgx_bridge.h" -+ -+#define PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_LAST (PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+4) -+ -+/******************************************* -+ RGXSetBreakpoint -+ *******************************************/ -+ -+/* Bridge in structure for RGXSetBreakpoint */ -+typedef struct PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT_TAG -+{ -+ IMG_UINT64 ui64TempSpillingAddr; -+ IMG_HANDLE hPrivData; -+ IMG_UINT32 eFWDataMaster; -+ IMG_UINT32 ui32BreakpointAddr; -+ IMG_UINT32 ui32DM; -+ IMG_UINT32 ui32HandlerAddr; -+} __packed PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT; -+ -+/* Bridge out structure for RGXSetBreakpoint */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT; -+ -+/******************************************* -+ RGXClearBreakpoint -+ *******************************************/ -+ -+/* Bridge in structure for RGXClearBreakpoint */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT_TAG -+{ -+ IMG_HANDLE hPrivData; -+} __packed PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT; -+ -+/* Bridge out structure for RGXClearBreakpoint */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT; -+ -+/******************************************* -+ RGXEnableBreakpoint -+ *******************************************/ -+ -+/* Bridge in structure for RGXEnableBreakpoint */ -+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT_TAG -+{ -+ IMG_HANDLE hPrivData; -+} __packed PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT; -+ -+/* Bridge out structure for RGXEnableBreakpoint */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT; -+ -+/******************************************* -+ RGXDisableBreakpoint -+ *******************************************/ -+ -+/* Bridge in structure for RGXDisableBreakpoint */ -+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT_TAG -+{ -+ IMG_HANDLE hPrivData; -+} __packed PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT; -+ -+/* Bridge out structure for RGXDisableBreakpoint */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT; -+ -+/******************************************* -+ RGXOverallocateBPRegisters -+ *******************************************/ -+ -+/* Bridge in structure for RGXOverallocateBPRegisters */ -+typedef struct PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS_TAG -+{ -+ IMG_UINT32 ui32SharedRegs; -+ IMG_UINT32 ui32TempRegs; -+} __packed PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS; -+ -+/* Bridge out structure for RGXOverallocateBPRegisters */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS; -+ -+#endif /* COMMON_RGXBREAKPOINT_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_rgxcmp_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxcmp_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_rgxcmp_bridge.h -@@ -0,0 +1,250 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for rgxcmp -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for rgxcmp -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_RGXCMP_BRIDGE_H -+#define COMMON_RGXCMP_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "rgx_bridge.h" -+#include "pvrsrv_sync_km.h" -+ -+#define PVRSRV_BRIDGE_RGXCMP_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2 PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5 -+#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6 -+#define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7 -+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+8 -+#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+8) -+ -+/******************************************* -+ RGXCreateComputeContext -+ *******************************************/ -+ -+/* Bridge in structure for RGXCreateComputeContext */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT_TAG -+{ -+ IMG_UINT64 ui64RobustnessAddress; -+ IMG_HANDLE hPrivData; -+ IMG_BYTE *pui8FrameworkCmd; -+ IMG_BYTE *pui8StaticComputeContextState; -+ IMG_INT32 i32Priority; -+ IMG_UINT32 ui32ContextFlags; -+ IMG_UINT32 ui32FrameworkCmdSize; -+ IMG_UINT32 ui32MaxDeadlineMS; -+ IMG_UINT32 ui32PackedCCBSizeU88; -+ IMG_UINT32 ui32StaticComputeContextStateSize; -+} __packed PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT; -+ -+/* Bridge out structure for RGXCreateComputeContext */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT_TAG -+{ -+ IMG_HANDLE hComputeContext; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT; -+ -+/******************************************* -+ RGXDestroyComputeContext -+ *******************************************/ -+ -+/* Bridge in structure for RGXDestroyComputeContext */ -+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT_TAG -+{ -+ IMG_HANDLE hComputeContext; -+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT; -+ -+/* Bridge out structure for RGXDestroyComputeContext */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT; -+ -+/******************************************* -+ RGXFlushComputeData -+ *******************************************/ -+ -+/* Bridge in structure for RGXFlushComputeData */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA_TAG -+{ -+ IMG_HANDLE hComputeContext; -+} __packed PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA; -+ -+/* Bridge out structure for RGXFlushComputeData */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA; -+ -+/******************************************* -+ RGXSetComputeContextPriority -+ *******************************************/ -+ -+/* Bridge in structure for RGXSetComputeContextPriority */ -+typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG -+{ -+ IMG_HANDLE hComputeContext; -+ IMG_INT32 i32Priority; -+} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY; -+ -+/* Bridge out structure for RGXSetComputeContextPriority */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY; -+ -+/******************************************* -+ RGXNotifyComputeWriteOffsetUpdate -+ *******************************************/ -+ -+/* Bridge in structure for RGXNotifyComputeWriteOffsetUpdate */ -+typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG -+{ -+ IMG_HANDLE hComputeContext; -+} __packed PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE; -+ -+/* Bridge out structure for RGXNotifyComputeWriteOffsetUpdate */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE; -+ -+/******************************************* -+ RGXKickCDM2 -+ *******************************************/ -+ -+/* Bridge in structure for RGXKickCDM2 */ -+typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG -+{ -+ IMG_UINT64 ui64DeadlineInus; -+ IMG_HANDLE hComputeContext; -+ IMG_UINT32 *pui32ClientUpdateOffset; -+ IMG_UINT32 *pui32ClientUpdateValue; -+ IMG_UINT32 *pui32SyncPMRFlags; -+ IMG_BYTE *pui8DMCmd; -+ IMG_CHAR *puiUpdateFenceName; -+ IMG_HANDLE *phClientUpdateUFOSyncPrimBlock; -+ IMG_HANDLE *phSyncPMRs; -+ PVRSRV_FENCE hCheckFenceFd; -+ PVRSRV_TIMELINE hUpdateTimeline; -+ IMG_UINT32 ui32ClientUpdateCount; -+ IMG_UINT32 ui32CmdSize; -+ IMG_UINT32 ui32ExtJobRef; -+ IMG_UINT32 ui32NumOfWorkgroups; -+ IMG_UINT32 ui32NumOfWorkitems; -+ IMG_UINT32 ui32PDumpFlags; -+ IMG_UINT32 ui32SyncPMRCount; -+} __packed PVRSRV_BRIDGE_IN_RGXKICKCDM2; -+ -+/* Bridge out structure for RGXKickCDM2 */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM2_TAG -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_FENCE hUpdateFence; -+} __packed PVRSRV_BRIDGE_OUT_RGXKICKCDM2; -+ -+/******************************************* -+ RGXSetComputeContextProperty -+ *******************************************/ -+ -+/* Bridge in structure for RGXSetComputeContextProperty */ -+typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY_TAG -+{ -+ IMG_UINT64 ui64Input; -+ IMG_HANDLE hComputeContext; -+ IMG_UINT32 ui32Property; -+} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY; -+ -+/* Bridge out structure for RGXSetComputeContextProperty */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY_TAG -+{ -+ IMG_UINT64 ui64Output; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY; -+ -+/******************************************* -+ RGXGetLastDeviceError -+ *******************************************/ -+ -+/* Bridge in structure for RGXGetLastDeviceError */ -+typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR; -+ -+/* Bridge out structure for RGXGetLastDeviceError */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32Error; -+} __packed PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR; -+ -+/******************************************* -+ RGXKickTimestampQuery -+ *******************************************/ -+ -+/* Bridge in structure for RGXKickTimestampQuery */ -+typedef struct PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY_TAG -+{ -+ IMG_HANDLE hComputeContext; -+ IMG_BYTE *pui8DMCmd; -+ PVRSRV_FENCE hCheckFenceFd; -+ IMG_UINT32 ui32CmdSize; -+ IMG_UINT32 ui32ExtJobRef; -+} __packed PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY; -+ -+/* Bridge out structure for RGXKickTimestampQuery */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY; -+ -+#endif /* COMMON_RGXCMP_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_rgxfwdbg_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxfwdbg_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_rgxfwdbg_bridge.h -@@ -0,0 +1,339 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for rgxfwdbg -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for rgxfwdbg -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_RGXFWDBG_BRIDGE_H -+#define COMMON_RGXFWDBG_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "devicemem_typedefs.h" -+#include "rgx_bridge.h" -+#include "pvrsrv_memallocflags.h" -+ -+#define PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSUSPENDDEVICE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGRESUMEDEVICE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+8 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+9 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+10 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+11 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+12 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+13 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+14 -+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+15 -+#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+15) -+ -+/******************************************* -+ RGXFWDebugSetFWLog -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugSetFWLog */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG_TAG -+{ -+ IMG_UINT32 ui32RGXFWLogType; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG; -+ -+/* Bridge out structure for RGXFWDebugSetFWLog */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG; -+ -+/******************************************* -+ RGXFWDebugDumpFreelistPageList -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugDumpFreelistPageList */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST; -+ -+/* Bridge out structure for RGXFWDebugDumpFreelistPageList */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST; -+ -+/******************************************* -+ RGXFWDebugSuspendDevice -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugSuspendDevice */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSUSPENDDEVICE_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSUSPENDDEVICE; -+ -+/* Bridge out structure for RGXFWDebugSuspendDevice */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSUSPENDDEVICE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSUSPENDDEVICE; -+ -+/******************************************* -+ RGXFWDebugResumeDevice -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugResumeDevice */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGRESUMEDEVICE_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGRESUMEDEVICE; -+ -+/* Bridge out structure for RGXFWDebugResumeDevice */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGRESUMEDEVICE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGRESUMEDEVICE; -+ -+/******************************************* -+ RGXFWDebugSetVzConnectionCooldownPeriodInSec -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugSetVzConnectionCooldownPeriodInSec */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC_TAG -+{ -+ IMG_UINT32 ui32ui32VzConnectionCooldownPeriodInSec; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC; -+ -+/* Bridge out structure for RGXFWDebugSetVzConnectionCooldownPeriodInSec */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC; -+ -+/******************************************* -+ RGXFWDebugSetHCSDeadline -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugSetHCSDeadline */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE_TAG -+{ -+ IMG_UINT32 ui32RGXHCSDeadline; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE; -+ -+/* Bridge out structure for RGXFWDebugSetHCSDeadline */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE; -+ -+/******************************************* -+ RGXFWDebugSetDriverPriority -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugSetDriverPriority */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY_TAG -+{ -+ IMG_UINT32 ui32DriverID; -+ IMG_UINT32 ui32Priority; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY; -+ -+/* Bridge out structure for RGXFWDebugSetDriverPriority */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY; -+ -+/******************************************* -+ RGXFWDebugSetDriverTimeSlice -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugSetDriverTimeSlice */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE_TAG -+{ -+ IMG_UINT32 ui32DriverID; -+ IMG_UINT32 ui32TimeSlice; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE; -+ -+/* Bridge out structure for RGXFWDebugSetDriverTimeSlice */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE; -+ -+/******************************************* -+ RGXFWDebugSetDriverTimeSliceInterval -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugSetDriverTimeSliceInterval */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL_TAG -+{ -+ IMG_UINT32 ui32TimeSliceInterval; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL; -+ -+/* Bridge out structure for RGXFWDebugSetDriverTimeSliceInterval */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL; -+ -+/******************************************* -+ RGXFWDebugSetDriverIsolationGroup -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugSetDriverIsolationGroup */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP_TAG -+{ -+ IMG_UINT32 ui32DriverID; -+ IMG_UINT32 ui32IsolationGroup; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP; -+ -+/* Bridge out structure for RGXFWDebugSetDriverIsolationGroup */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP; -+ -+/******************************************* -+ RGXFWDebugSetOSNewOnlineState -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugSetOSNewOnlineState */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE_TAG -+{ -+ IMG_UINT32 ui32DriverID; -+ IMG_UINT32 ui32OSNewState; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE; -+ -+/* Bridge out structure for RGXFWDebugSetOSNewOnlineState */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE; -+ -+/******************************************* -+ RGXFWDebugMapGuestHeap -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugMapGuestHeap */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP_TAG -+{ -+ IMG_UINT64 ui64ui64GuestHeapBase; -+ IMG_UINT32 ui32DriverID; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP; -+ -+/* Bridge out structure for RGXFWDebugMapGuestHeap */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP; -+ -+/******************************************* -+ RGXFWDebugPHRConfigure -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugPHRConfigure */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE_TAG -+{ -+ IMG_UINT32 ui32ui32PHRMode; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE; -+ -+/* Bridge out structure for RGXFWDebugPHRConfigure */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE; -+ -+/******************************************* -+ RGXFWDebugWdgConfigure -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugWdgConfigure */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE_TAG -+{ -+ IMG_UINT32 ui32ui32WdgPeriodUs; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE; -+ -+/* Bridge out structure for RGXFWDebugWdgConfigure */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE; -+ -+/******************************************* -+ RGXCurrentTime -+ *******************************************/ -+ -+/* Bridge in structure for RGXCurrentTime */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXCURRENTTIME; -+ -+/* Bridge out structure for RGXCurrentTime */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCURRENTTIME_TAG -+{ -+ IMG_UINT64 ui64Time; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCURRENTTIME; -+ -+/******************************************* -+ RGXFWDebugInjectFault -+ *******************************************/ -+ -+/* Bridge in structure for RGXFWDebugInjectFault */ -+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT; -+ -+/* Bridge out structure for RGXFWDebugInjectFault */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT; -+ -+#endif /* COMMON_RGXFWDBG_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_rgxhwperf_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxhwperf_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_rgxhwperf_bridge.h -@@ -0,0 +1,248 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for rgxhwperf -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for rgxhwperf -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_RGXHWPERF_BRIDGE_H -+#define COMMON_RGXHWPERF_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "rgx_bridge.h" -+#include "rgx_hwperf.h" -+ -+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5 -+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFMUXCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+6 -+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+7 -+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+8 -+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFTIMESTAMP PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+9 -+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+9) -+ -+/******************************************* -+ RGXCtrlHWPerf -+ *******************************************/ -+ -+/* Bridge in structure for RGXCtrlHWPerf */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG -+{ -+ IMG_UINT64 ui64Mask; -+ IMG_UINT32 ui32StreamId; -+ IMG_BOOL bToggle; -+} __packed PVRSRV_BRIDGE_IN_RGXCTRLHWPERF; -+ -+/* Bridge out structure for RGXCtrlHWPerf */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF; -+ -+/******************************************* -+ RGXGetHWPerfBvncFeatureFlags -+ *******************************************/ -+ -+/* Bridge in structure for RGXGetHWPerfBvncFeatureFlags */ -+typedef struct PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS; -+ -+/* Bridge out structure for RGXGetHWPerfBvncFeatureFlags */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS_TAG -+{ -+ RGX_HWPERF_BVNC sBVNC; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS; -+ -+/******************************************* -+ RGXConfigMuxHWPerfCounters -+ *******************************************/ -+ -+/* Bridge in structure for RGXConfigMuxHWPerfCounters */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS_TAG -+{ -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigs; -+ IMG_UINT32 ui32ArrayLen; -+} __packed PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS; -+ -+/* Bridge out structure for RGXConfigMuxHWPerfCounters */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS; -+ -+/******************************************* -+ RGXControlHWPerfBlocks -+ *******************************************/ -+ -+/* Bridge in structure for RGXControlHWPerfBlocks */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS_TAG -+{ -+ IMG_UINT16 *pui16BlockIDs; -+ IMG_UINT32 ui32ArrayLen; -+ IMG_BOOL bEnable; -+} __packed PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS; -+ -+/* Bridge out structure for RGXControlHWPerfBlocks */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS; -+ -+/******************************************* -+ RGXConfigCustomCounters -+ *******************************************/ -+ -+/* Bridge in structure for RGXConfigCustomCounters */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS_TAG -+{ -+ IMG_UINT32 *pui32CustomCounterIDs; -+ IMG_UINT16 ui16CustomBlockID; -+ IMG_UINT16 ui16NumCustomCounters; -+} __packed PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS; -+ -+/* Bridge out structure for RGXConfigCustomCounters */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS; -+ -+/******************************************* -+ RGXConfigureHWPerfBlocks -+ *******************************************/ -+ -+/* Bridge in structure for RGXConfigureHWPerfBlocks */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS_TAG -+{ -+ RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigs; -+ IMG_UINT32 ui32ArrayLen; -+ IMG_UINT32 ui32CtrlWord; -+} __packed PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS; -+ -+/* Bridge out structure for RGXConfigureHWPerfBlocks */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS; -+ -+/******************************************* -+ RGXGetConfiguredHWPerfMuxCounters -+ *******************************************/ -+ -+/* Bridge in structure for RGXGetConfiguredHWPerfMuxCounters */ -+typedef struct PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFMUXCOUNTERS_TAG -+{ -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters; -+ IMG_UINT32 ui32BlockID; -+} __packed PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFMUXCOUNTERS; -+ -+/* Bridge out structure for RGXGetConfiguredHWPerfMuxCounters */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFMUXCOUNTERS_TAG -+{ -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFMUXCOUNTERS; -+ -+/******************************************* -+ RGXGetConfiguredHWPerfCounters -+ *******************************************/ -+ -+/* Bridge in structure for RGXGetConfiguredHWPerfCounters */ -+typedef struct PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG -+{ -+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters; -+ IMG_UINT32 ui32BlockID; -+} __packed PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS; -+ -+/* Bridge out structure for RGXGetConfiguredHWPerfCounters */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG -+{ -+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS; -+ -+/******************************************* -+ RGXGetEnabledHWPerfBlocks -+ *******************************************/ -+ -+/* Bridge in structure for RGXGetEnabledHWPerfBlocks */ -+typedef struct PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS_TAG -+{ -+ IMG_UINT32 *pui32EnabledBlockIDs; -+ IMG_UINT32 ui32ArrayLen; -+} __packed PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS; -+ -+/* Bridge out structure for RGXGetEnabledHWPerfBlocks */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS_TAG -+{ -+ IMG_UINT32 *pui32EnabledBlockIDs; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32BlockCount; -+} __packed PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS; -+ -+/******************************************* -+ RGXGetHWPerfTimeStamp -+ *******************************************/ -+ -+/* Bridge in structure for RGXGetHWPerfTimeStamp */ -+typedef struct PVRSRV_BRIDGE_IN_RGXGETHWPERFTIMESTAMP_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXGETHWPERFTIMESTAMP; -+ -+/* Bridge out structure for RGXGetHWPerfTimeStamp */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP_TAG -+{ -+ IMG_UINT64 ui64TimeStamp; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP; -+ -+#endif /* COMMON_RGXHWPERF_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_rgxkicksync_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxkicksync_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_rgxkicksync_bridge.h -@@ -0,0 +1,143 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for rgxkicksync -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for rgxkicksync -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_RGXKICKSYNC_BRIDGE_H -+#define COMMON_RGXKICKSYNC_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "rgx_bridge.h" -+#include "pvrsrv_sync_km.h" -+ -+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2 PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3) -+ -+/******************************************* -+ RGXCreateKickSyncContext -+ *******************************************/ -+ -+/* Bridge in structure for RGXCreateKickSyncContext */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT_TAG -+{ -+ IMG_HANDLE hPrivData; -+ IMG_UINT32 ui32ContextFlags; -+ IMG_UINT32 ui32PackedCCBSizeU88; -+} __packed PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT; -+ -+/* Bridge out structure for RGXCreateKickSyncContext */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT_TAG -+{ -+ IMG_HANDLE hKickSyncContext; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT; -+ -+/******************************************* -+ RGXDestroyKickSyncContext -+ *******************************************/ -+ -+/* Bridge in structure for RGXDestroyKickSyncContext */ -+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT_TAG -+{ -+ IMG_HANDLE hKickSyncContext; -+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT; -+ -+/* Bridge out structure for RGXDestroyKickSyncContext */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT; -+ -+/******************************************* -+ RGXKickSync2 -+ *******************************************/ -+ -+/* Bridge in structure for RGXKickSync2 */ -+typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC2_TAG -+{ -+ IMG_HANDLE hKickSyncContext; -+ IMG_UINT32 *pui32UpdateDevVarOffset; -+ IMG_UINT32 *pui32UpdateValue; -+ IMG_CHAR *puiUpdateFenceName; -+ IMG_HANDLE *phUpdateUFODevVarBlock; -+ PVRSRV_FENCE hCheckFenceFD; -+ PVRSRV_TIMELINE hTimelineFenceFD; -+ IMG_UINT32 ui32ClientUpdateCount; -+ IMG_UINT32 ui32ExtJobRef; -+} __packed PVRSRV_BRIDGE_IN_RGXKICKSYNC2; -+ -+/* Bridge out structure for RGXKickSync2 */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC2_TAG -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_FENCE hUpdateFenceFD; -+} __packed PVRSRV_BRIDGE_OUT_RGXKICKSYNC2; -+ -+/******************************************* -+ RGXSetKickSyncContextProperty -+ *******************************************/ -+ -+/* Bridge in structure for RGXSetKickSyncContextProperty */ -+typedef struct PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY_TAG -+{ -+ IMG_UINT64 ui64Input; -+ IMG_HANDLE hKickSyncContext; -+ IMG_UINT32 ui32Property; -+} __packed PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY; -+ -+/* Bridge out structure for RGXSetKickSyncContextProperty */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY_TAG -+{ -+ IMG_UINT64 ui64Output; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY; -+ -+#endif /* COMMON_RGXKICKSYNC_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_rgxregconfig_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxregconfig_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_rgxregconfig_bridge.h -@@ -0,0 +1,146 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for rgxregconfig -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for rgxregconfig -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_RGXREGCONFIG_BRIDGE_H -+#define COMMON_RGXREGCONFIG_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "rgx_bridge.h" -+ -+#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST (PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4) -+ -+/******************************************* -+ RGXSetRegConfigType -+ *******************************************/ -+ -+/* Bridge in structure for RGXSetRegConfigType */ -+typedef struct PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE_TAG -+{ -+ IMG_UINT8 ui8RegPowerIsland; -+} __packed PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE; -+ -+/* Bridge out structure for RGXSetRegConfigType */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE; -+ -+/******************************************* -+ RGXAddRegconfig -+ *******************************************/ -+ -+/* Bridge in structure for RGXAddRegconfig */ -+typedef struct PVRSRV_BRIDGE_IN_RGXADDREGCONFIG_TAG -+{ -+ IMG_UINT64 ui64RegMask; -+ IMG_UINT64 ui64RegValue; -+ IMG_UINT32 ui32RegAddr; -+} __packed PVRSRV_BRIDGE_IN_RGXADDREGCONFIG; -+ -+/* Bridge out structure for RGXAddRegconfig */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG; -+ -+/******************************************* -+ RGXClearRegConfig -+ *******************************************/ -+ -+/* Bridge in structure for RGXClearRegConfig */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG; -+ -+/* Bridge out structure for RGXClearRegConfig */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG; -+ -+/******************************************* -+ RGXEnableRegConfig -+ *******************************************/ -+ -+/* Bridge in structure for RGXEnableRegConfig */ -+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG; -+ -+/* Bridge out structure for RGXEnableRegConfig */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG; -+ -+/******************************************* -+ RGXDisableRegConfig -+ *******************************************/ -+ -+/* Bridge in structure for RGXDisableRegConfig */ -+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG; -+ -+/* Bridge out structure for RGXDisableRegConfig */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG; -+ -+#endif /* COMMON_RGXREGCONFIG_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_rgxta3d_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxta3d_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_rgxta3d_bridge.h -@@ -0,0 +1,424 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for rgxta3d -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for rgxta3d -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_RGXTA3D_BRIDGE_H -+#define COMMON_RGXTA3D_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "rgx_bridge.h" -+#include "rgx_fwif_shared.h" -+#include "devicemem_typedefs.h" -+#include "pvrsrv_sync_km.h" -+ -+#define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXSENDZSSTOREDISABLE PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2 PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13 -+#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14 -+#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14) -+ -+/******************************************* -+ RGXCreateHWRTDataSet -+ *******************************************/ -+ -+/* Bridge in structure for RGXCreateHWRTDataSet */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET_TAG -+{ -+ IMG_UINT64 ui64FlippedMultiSampleCtl; -+ IMG_UINT64 ui64MultiSampleCtl; -+ IMG_DEV_VIRTADDR *psMacrotileArrayDevVAddr; -+ IMG_DEV_VIRTADDR *psPMMlistDevVAddr; -+ IMG_DEV_VIRTADDR *psRTCDevVAddr; -+ IMG_DEV_VIRTADDR *psRgnHeaderDevVAddr; -+ IMG_DEV_VIRTADDR *psTailPtrsDevVAddr; -+ IMG_DEV_VIRTADDR *psVHeapTableDevVAddr; -+ IMG_HANDLE *phKmHwRTDataSet; -+ IMG_HANDLE *phapsFreeLists; -+ IMG_UINT32 ui32ISPMergeLowerX; -+ IMG_UINT32 ui32ISPMergeLowerY; -+ IMG_UINT32 ui32ISPMergeScaleX; -+ IMG_UINT32 ui32ISPMergeScaleY; -+ IMG_UINT32 ui32ISPMergeUpperX; -+ IMG_UINT32 ui32ISPMergeUpperY; -+ IMG_UINT32 ui32ISPMtileSize; -+ IMG_UINT32 ui32MTileStride; -+ IMG_UINT32 ui32PPPScreen; -+ IMG_UINT32 ui32RgnHeaderSize; -+ IMG_UINT32 ui32TEAA; -+ IMG_UINT32 ui32TEMTILE1; -+ IMG_UINT32 ui32TEMTILE2; -+ IMG_UINT32 ui32TEScreen; -+ IMG_UINT32 ui32TPCSize; -+ IMG_UINT32 ui32TPCStride; -+ IMG_UINT16 ui16MaxRTs; -+} __packed PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET; -+ -+/* Bridge out structure for RGXCreateHWRTDataSet */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET_TAG -+{ -+ IMG_HANDLE *phKmHwRTDataSet; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET; -+ -+/******************************************* -+ RGXDestroyHWRTDataSet -+ *******************************************/ -+ -+/* Bridge in structure for RGXDestroyHWRTDataSet */ -+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET_TAG -+{ -+ IMG_HANDLE hKmHwRTDataSet; -+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET; -+ -+/* Bridge out structure for RGXDestroyHWRTDataSet */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET; -+ -+/******************************************* -+ RGXCreateZSBuffer -+ *******************************************/ -+ -+/* Bridge in structure for RGXCreateZSBuffer */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER_TAG -+{ -+ IMG_HANDLE hPMR; -+ IMG_HANDLE hReservation; -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags; -+} __packed PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER; -+ -+/* Bridge out structure for RGXCreateZSBuffer */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER_TAG -+{ -+ IMG_HANDLE hsZSBufferKM; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER; -+ -+/******************************************* -+ RGXDestroyZSBuffer -+ *******************************************/ -+ -+/* Bridge in structure for RGXDestroyZSBuffer */ -+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER_TAG -+{ -+ IMG_HANDLE hsZSBufferMemDesc; -+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER; -+ -+/* Bridge out structure for RGXDestroyZSBuffer */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER; -+ -+/******************************************* -+ RGXPopulateZSBuffer -+ *******************************************/ -+ -+/* Bridge in structure for RGXPopulateZSBuffer */ -+typedef struct PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER_TAG -+{ -+ IMG_HANDLE hsZSBufferKM; -+} __packed PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER; -+ -+/* Bridge out structure for RGXPopulateZSBuffer */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER_TAG -+{ -+ IMG_HANDLE hsPopulation; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER; -+ -+/******************************************* -+ RGXUnpopulateZSBuffer -+ *******************************************/ -+ -+/* Bridge in structure for RGXUnpopulateZSBuffer */ -+typedef struct PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER_TAG -+{ -+ IMG_HANDLE hsPopulation; -+} __packed PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER; -+ -+/* Bridge out structure for RGXUnpopulateZSBuffer */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER; -+ -+/******************************************* -+ RGXCreateFreeList -+ *******************************************/ -+ -+/* Bridge in structure for RGXCreateFreeList */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG -+{ -+ IMG_DEV_VIRTADDR spsFreeListDevVAddr; -+ IMG_DEVMEM_OFFSET_T uiPMROffset; -+ IMG_HANDLE hMemCtxPrivData; -+ IMG_HANDLE hsFreeListPMR; -+ IMG_HANDLE hsGlobalFreeList; -+ IMG_UINT32 ui32GrowFLPages; -+ IMG_UINT32 ui32GrowParamThreshold; -+ IMG_UINT32 ui32InitFLPages; -+ IMG_UINT32 ui32MaxFLPages; -+ IMG_BOOL bbFreeListCheck; -+} __packed PVRSRV_BRIDGE_IN_RGXCREATEFREELIST; -+ -+/* Bridge out structure for RGXCreateFreeList */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG -+{ -+ IMG_HANDLE hCleanupCookie; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST; -+ -+/******************************************* -+ RGXDestroyFreeList -+ *******************************************/ -+ -+/* Bridge in structure for RGXDestroyFreeList */ -+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST_TAG -+{ -+ IMG_HANDLE hCleanupCookie; -+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST; -+ -+/* Bridge out structure for RGXDestroyFreeList */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST; -+ -+/******************************************* -+ RGXCreateRenderContext -+ *******************************************/ -+ -+/* Bridge in structure for RGXCreateRenderContext */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG -+{ -+ IMG_DEV_VIRTADDR sVDMCallStackAddr; -+ IMG_UINT64 ui64RobustnessAddress; -+ IMG_HANDLE hPrivData; -+ IMG_BYTE *pui8FrameworkCmd; -+ IMG_BYTE *pui8StaticRenderContextState; -+ IMG_INT32 i32Priority; -+ IMG_UINT32 ui32ContextFlags; -+ IMG_UINT32 ui32FrameworkCmdSize; -+ IMG_UINT32 ui32Max3DDeadlineMS; -+ IMG_UINT32 ui32MaxTADeadlineMS; -+ IMG_UINT32 ui32PackedCCBSizeU8888; -+ IMG_UINT32 ui32StaticRenderContextStateSize; -+ IMG_UINT32 ui32ui32CallStackDepth; -+} __packed PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT; -+ -+/* Bridge out structure for RGXCreateRenderContext */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG -+{ -+ IMG_HANDLE hRenderContext; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT; -+ -+/******************************************* -+ RGXDestroyRenderContext -+ *******************************************/ -+ -+/* Bridge in structure for RGXDestroyRenderContext */ -+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT_TAG -+{ -+ IMG_HANDLE hCleanupCookie; -+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT; -+ -+/* Bridge out structure for RGXDestroyRenderContext */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT; -+ -+/******************************************* -+ RGXSendZSStoreDisable -+ *******************************************/ -+ -+/* Bridge in structure for RGXSendZSStoreDisable */ -+typedef struct PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE_TAG -+{ -+ IMG_HANDLE hRenderContext; -+ IMG_INT32 i32ExtJobRefToDisableZSStore; -+ IMG_BOOL bDisableDepthStore; -+ IMG_BOOL bDisableStencilStore; -+} __packed PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE; -+ -+/* Bridge out structure for RGXSendZSStoreDisable */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE; -+ -+/******************************************* -+ RGXSetRenderContextPriority -+ *******************************************/ -+ -+/* Bridge in structure for RGXSetRenderContextPriority */ -+typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG -+{ -+ IMG_HANDLE hRenderContext; -+ IMG_INT32 i32Priority; -+} __packed PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY; -+ -+/* Bridge out structure for RGXSetRenderContextPriority */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY; -+ -+/******************************************* -+ RGXRenderContextStalled -+ *******************************************/ -+ -+/* Bridge in structure for RGXRenderContextStalled */ -+typedef struct PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED_TAG -+{ -+ IMG_HANDLE hRenderContext; -+} __packed PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED; -+ -+/* Bridge out structure for RGXRenderContextStalled */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED; -+ -+/******************************************* -+ RGXKickTA3D2 -+ *******************************************/ -+ -+/* Bridge in structure for RGXKickTA3D2 */ -+typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D2_TAG -+{ -+ IMG_UINT64 ui64Deadline; -+ IMG_HANDLE hKMHWRTDataSet; -+ IMG_HANDLE hMSAAScratchBuffer; -+ IMG_HANDLE hPRFenceUFOSyncPrimBlock; -+ IMG_HANDLE hRenderContext; -+ IMG_HANDLE hZSBuffer; -+ IMG_UINT32 *pui32Client3DUpdateSyncOffset; -+ IMG_UINT32 *pui32Client3DUpdateValue; -+ IMG_UINT32 *pui32ClientTAFenceSyncOffset; -+ IMG_UINT32 *pui32ClientTAFenceValue; -+ IMG_UINT32 *pui32ClientTAUpdateSyncOffset; -+ IMG_UINT32 *pui32ClientTAUpdateValue; -+ IMG_UINT32 *pui32SyncPMRFlags; -+ IMG_BYTE *pui83DCmd; -+ IMG_BYTE *pui83DPRCmd; -+ IMG_BYTE *pui8TACmd; -+ IMG_CHAR *puiUpdateFenceName; -+ IMG_CHAR *puiUpdateFenceName3D; -+ IMG_HANDLE *phClient3DUpdateSyncPrimBlock; -+ IMG_HANDLE *phClientTAFenceSyncPrimBlock; -+ IMG_HANDLE *phClientTAUpdateSyncPrimBlock; -+ IMG_HANDLE *phSyncPMRs; -+ PVRSRV_FENCE hCheckFence; -+ PVRSRV_FENCE hCheckFence3D; -+ PVRSRV_TIMELINE hUpdateTimeline; -+ PVRSRV_TIMELINE hUpdateTimeline3D; -+ IMG_UINT32 ui323DCmdSize; -+ IMG_UINT32 ui323DPRCmdSize; -+ IMG_UINT32 ui32Client3DUpdateCount; -+ IMG_UINT32 ui32ClientTAFenceCount; -+ IMG_UINT32 ui32ClientTAUpdateCount; -+ IMG_UINT32 ui32ExtJobRef; -+ IMG_UINT32 ui32NumberOfDrawCalls; -+ IMG_UINT32 ui32NumberOfIndices; -+ IMG_UINT32 ui32NumberOfMRTs; -+ IMG_UINT32 ui32PDumpFlags; -+ IMG_UINT32 ui32PRFenceUFOSyncOffset; -+ IMG_UINT32 ui32PRFenceValue; -+ IMG_UINT32 ui32RenderTargetSize; -+ IMG_UINT32 ui32SyncPMRCount; -+ IMG_UINT32 ui32TACmdSize; -+ IMG_BOOL bbAbort; -+ IMG_BOOL bbKick3D; -+ IMG_BOOL bbKickPR; -+ IMG_BOOL bbKickTA; -+} __packed PVRSRV_BRIDGE_IN_RGXKICKTA3D2; -+ -+/* Bridge out structure for RGXKickTA3D2 */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D2_TAG -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_FENCE hUpdateFence; -+ PVRSRV_FENCE hUpdateFence3D; -+} __packed PVRSRV_BRIDGE_OUT_RGXKICKTA3D2; -+ -+/******************************************* -+ RGXSetRenderContextProperty -+ *******************************************/ -+ -+/* Bridge in structure for RGXSetRenderContextProperty */ -+typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY_TAG -+{ -+ IMG_UINT64 ui64Input; -+ IMG_HANDLE hRenderContext; -+ IMG_UINT32 ui32Property; -+} __packed PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY; -+ -+/* Bridge out structure for RGXSetRenderContextProperty */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY_TAG -+{ -+ IMG_UINT64 ui64Output; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY; -+ -+#endif /* COMMON_RGXTA3D_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_rgxtimerquery_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxtimerquery_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_rgxtimerquery_bridge.h -@@ -0,0 +1,112 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for rgxtimerquery -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for rgxtimerquery -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_RGXTIMERQUERY_BRIDGE_H -+#define COMMON_RGXTIMERQUERY_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "rgx_bridge.h" -+ -+#define PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_LAST (PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+2) -+ -+/******************************************* -+ RGXBeginTimerQuery -+ *******************************************/ -+ -+/* Bridge in structure for RGXBeginTimerQuery */ -+typedef struct PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY_TAG -+{ -+ IMG_UINT32 ui32QueryId; -+} __packed PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY; -+ -+/* Bridge out structure for RGXBeginTimerQuery */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY; -+ -+/******************************************* -+ RGXEndTimerQuery -+ *******************************************/ -+ -+/* Bridge in structure for RGXEndTimerQuery */ -+typedef struct PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY; -+ -+/* Bridge out structure for RGXEndTimerQuery */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY; -+ -+/******************************************* -+ RGXQueryTimer -+ *******************************************/ -+ -+/* Bridge in structure for RGXQueryTimer */ -+typedef struct PVRSRV_BRIDGE_IN_RGXQUERYTIMER_TAG -+{ -+ IMG_UINT32 ui32QueryId; -+} __packed PVRSRV_BRIDGE_IN_RGXQUERYTIMER; -+ -+/* Bridge out structure for RGXQueryTimer */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXQUERYTIMER_TAG -+{ -+ IMG_UINT64 ui64EndTime; -+ IMG_UINT64 ui64StartTime; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXQUERYTIMER; -+ -+#endif /* COMMON_RGXTIMERQUERY_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_rgxtq2_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxtq2_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_rgxtq2_bridge.h -@@ -0,0 +1,228 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for rgxtq2 -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for rgxtq2 -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_RGXTQ2_BRIDGE_H -+#define COMMON_RGXTQ2_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "rgx_bridge.h" -+#include "pvrsrv_sync_km.h" -+ -+#define PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2 PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+5 -+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+6 -+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7 -+#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7) -+ -+/******************************************* -+ RGXTDMCreateTransferContext -+ *******************************************/ -+ -+/* Bridge in structure for RGXTDMCreateTransferContext */ -+typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG -+{ -+ IMG_UINT64 ui64RobustnessAddress; -+ IMG_HANDLE hPrivData; -+ IMG_BYTE *pui8FrameworkCmd; -+ IMG_INT32 i32Priority; -+ IMG_UINT32 ui32ContextFlags; -+ IMG_UINT32 ui32FrameworkCmdSize; -+ IMG_UINT32 ui32PackedCCBSizeU88; -+} __packed PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT; -+ -+/* Bridge out structure for RGXTDMCreateTransferContext */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT_TAG -+{ -+ IMG_HANDLE hTransferContext; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT; -+ -+/******************************************* -+ RGXTDMDestroyTransferContext -+ *******************************************/ -+ -+/* Bridge in structure for RGXTDMDestroyTransferContext */ -+typedef struct PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT_TAG -+{ -+ IMG_HANDLE hTransferContext; -+} __packed PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT; -+ -+/* Bridge out structure for RGXTDMDestroyTransferContext */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT; -+ -+/******************************************* -+ RGXTDMSetTransferContextPriority -+ *******************************************/ -+ -+/* Bridge in structure for RGXTDMSetTransferContextPriority */ -+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG -+{ -+ IMG_HANDLE hTransferContext; -+ IMG_INT32 i32Priority; -+} __packed PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY; -+ -+/* Bridge out structure for RGXTDMSetTransferContextPriority */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY; -+ -+/******************************************* -+ RGXTDMNotifyWriteOffsetUpdate -+ *******************************************/ -+ -+/* Bridge in structure for RGXTDMNotifyWriteOffsetUpdate */ -+typedef struct PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG -+{ -+ IMG_HANDLE hTransferContext; -+ IMG_UINT32 ui32PDumpFlags; -+} __packed PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE; -+ -+/* Bridge out structure for RGXTDMNotifyWriteOffsetUpdate */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE; -+ -+/******************************************* -+ RGXTDMSubmitTransfer2 -+ *******************************************/ -+ -+/* Bridge in structure for RGXTDMSubmitTransfer2 */ -+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2_TAG -+{ -+ IMG_UINT64 ui64DeadlineInus; -+ IMG_HANDLE hTransferContext; -+ IMG_UINT32 *pui32SyncPMRFlags; -+ IMG_UINT32 *pui32UpdateSyncOffset; -+ IMG_UINT32 *pui32UpdateValue; -+ IMG_UINT8 *pui8FWCommand; -+ IMG_CHAR *puiUpdateFenceName; -+ IMG_HANDLE *phSyncPMRs; -+ IMG_HANDLE *phUpdateUFOSyncPrimBlock; -+ PVRSRV_FENCE hCheckFenceFD; -+ PVRSRV_TIMELINE hUpdateTimeline; -+ IMG_UINT32 ui32Characteristic1; -+ IMG_UINT32 ui32Characteristic2; -+ IMG_UINT32 ui32ClientUpdateCount; -+ IMG_UINT32 ui32CommandSize; -+ IMG_UINT32 ui32ExternalJobReference; -+ IMG_UINT32 ui32PDumpFlags; -+ IMG_UINT32 ui32SyncPMRCount; -+} __packed PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2; -+ -+/* Bridge out structure for RGXTDMSubmitTransfer2 */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2_TAG -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_FENCE hUpdateFence; -+} __packed PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2; -+ -+/******************************************* -+ RGXTDMGetSharedMemory -+ *******************************************/ -+ -+/* Bridge in structure for RGXTDMGetSharedMemory */ -+typedef struct PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY; -+ -+/* Bridge out structure for RGXTDMGetSharedMemory */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY_TAG -+{ -+ IMG_HANDLE hCLIPMRMem; -+ IMG_HANDLE hUSCPMRMem; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY; -+ -+/******************************************* -+ RGXTDMReleaseSharedMemory -+ *******************************************/ -+ -+/* Bridge in structure for RGXTDMReleaseSharedMemory */ -+typedef struct PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY_TAG -+{ -+ IMG_HANDLE hPMRMem; -+} __packed PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY; -+ -+/* Bridge out structure for RGXTDMReleaseSharedMemory */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY; -+ -+/******************************************* -+ RGXTDMSetTransferContextProperty -+ *******************************************/ -+ -+/* Bridge in structure for RGXTDMSetTransferContextProperty */ -+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG -+{ -+ IMG_UINT64 ui64Input; -+ IMG_HANDLE hTransferContext; -+ IMG_UINT32 ui32Property; -+} __packed PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY; -+ -+/* Bridge out structure for RGXTDMSetTransferContextProperty */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG -+{ -+ IMG_UINT64 ui64Output; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY; -+ -+#endif /* COMMON_RGXTQ2_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_rgxtq_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxtq_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_rgxtq_bridge.h -@@ -0,0 +1,210 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for rgxtq -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for rgxtq -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_RGXTQ_BRIDGE_H -+#define COMMON_RGXTQ_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "rgx_bridge.h" -+#include "pvrsrv_sync_km.h" -+ -+#define PVRSRV_BRIDGE_RGXTQ_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2 PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_RGXTQ_RGXTQGETSHAREDMEMORY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_RGXTQ_RGXTQRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+5 -+#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+6 -+#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST (PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+6) -+ -+/******************************************* -+ RGXCreateTransferContext -+ *******************************************/ -+ -+/* Bridge in structure for RGXCreateTransferContext */ -+typedef struct PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT_TAG -+{ -+ IMG_UINT64 ui64RobustnessAddress; -+ IMG_HANDLE hPrivData; -+ IMG_BYTE *pui8FrameworkCmd; -+ IMG_INT32 i32Priority; -+ IMG_UINT32 ui32ContextFlags; -+ IMG_UINT32 ui32FrameworkCmdize; -+ IMG_UINT32 ui32PackedCCBSizeU8888; -+} __packed PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT; -+ -+/* Bridge out structure for RGXCreateTransferContext */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT_TAG -+{ -+ IMG_HANDLE hTransferContext; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT; -+ -+/******************************************* -+ RGXDestroyTransferContext -+ *******************************************/ -+ -+/* Bridge in structure for RGXDestroyTransferContext */ -+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT_TAG -+{ -+ IMG_HANDLE hTransferContext; -+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT; -+ -+/* Bridge out structure for RGXDestroyTransferContext */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT; -+ -+/******************************************* -+ RGXSetTransferContextPriority -+ *******************************************/ -+ -+/* Bridge in structure for RGXSetTransferContextPriority */ -+typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY_TAG -+{ -+ IMG_HANDLE hTransferContext; -+ IMG_INT32 i32Priority; -+} __packed PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY; -+ -+/* Bridge out structure for RGXSetTransferContextPriority */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY; -+ -+/******************************************* -+ RGXSubmitTransfer2 -+ *******************************************/ -+ -+/* Bridge in structure for RGXSubmitTransfer2 */ -+typedef struct PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2_TAG -+{ -+ IMG_HANDLE hTransferContext; -+ IMG_UINT32 *pui32ClientUpdateCount; -+ IMG_UINT32 *pui32CommandSize; -+ IMG_UINT32 *pui32SyncPMRFlags; -+ IMG_UINT32 *pui32TQPrepareFlags; -+ IMG_UINT32 **pui32UpdateSyncOffset; -+ IMG_UINT32 **pui32UpdateValue; -+ IMG_UINT8 **pui8FWCommand; -+ IMG_CHAR *puiUpdateFenceName; -+ IMG_HANDLE *phSyncPMRs; -+ IMG_HANDLE **phUpdateUFOSyncPrimBlock; -+ PVRSRV_TIMELINE h2DUpdateTimeline; -+ PVRSRV_TIMELINE h3DUpdateTimeline; -+ PVRSRV_FENCE hCheckFenceFD; -+ IMG_UINT32 ui32ExtJobRef; -+ IMG_UINT32 ui32PrepareCount; -+ IMG_UINT32 ui32SyncPMRCount; -+} __packed PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2; -+ -+/* Bridge out structure for RGXSubmitTransfer2 */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2_TAG -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_FENCE h2DUpdateFence; -+ PVRSRV_FENCE h3DUpdateFence; -+} __packed PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2; -+ -+/******************************************* -+ RGXTQGetSharedMemory -+ *******************************************/ -+ -+/* Bridge in structure for RGXTQGetSharedMemory */ -+typedef struct PVRSRV_BRIDGE_IN_RGXTQGETSHAREDMEMORY_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RGXTQGETSHAREDMEMORY; -+ -+/* Bridge out structure for RGXTQGetSharedMemory */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY_TAG -+{ -+ IMG_HANDLE hCLIPMRMem; -+ IMG_HANDLE hUSCPMRMem; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY; -+ -+/******************************************* -+ RGXTQReleaseSharedMemory -+ *******************************************/ -+ -+/* Bridge in structure for RGXTQReleaseSharedMemory */ -+typedef struct PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY_TAG -+{ -+ IMG_HANDLE hPMRMem; -+} __packed PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY; -+ -+/* Bridge out structure for RGXTQReleaseSharedMemory */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY; -+ -+/******************************************* -+ RGXSetTransferContextProperty -+ *******************************************/ -+ -+/* Bridge in structure for RGXSetTransferContextProperty */ -+typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY_TAG -+{ -+ IMG_UINT64 ui64Input; -+ IMG_HANDLE hTransferContext; -+ IMG_UINT32 ui32Property; -+} __packed PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY; -+ -+/* Bridge out structure for RGXSetTransferContextProperty */ -+typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY_TAG -+{ -+ IMG_UINT64 ui64Output; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY; -+ -+#endif /* COMMON_RGXTQ_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_ri_bridge.h b/drivers/gpu/drm/img-rogue/common_ri_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_ri_bridge.h -@@ -0,0 +1,225 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for ri -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for ri -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_RI_BRIDGE_H -+#define COMMON_RI_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "ri_typedefs.h" -+ -+#define PVRSRV_BRIDGE_RI_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR PVRSRV_BRIDGE_RI_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_RI_RIDUMPLIST PVRSRV_BRIDGE_RI_CMD_FIRST+5 -+#define PVRSRV_BRIDGE_RI_RIDUMPALL PVRSRV_BRIDGE_RI_CMD_FIRST+6 -+#define PVRSRV_BRIDGE_RI_RIDUMPPROCESS PVRSRV_BRIDGE_RI_CMD_FIRST+7 -+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER PVRSRV_BRIDGE_RI_CMD_FIRST+8 -+#define PVRSRV_BRIDGE_RI_CMD_LAST (PVRSRV_BRIDGE_RI_CMD_FIRST+8) -+ -+/******************************************* -+ RIWritePMREntry -+ *******************************************/ -+ -+/* Bridge in structure for RIWritePMREntry */ -+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY_TAG -+{ -+ IMG_HANDLE hPMRHandle; -+} __packed PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY; -+ -+/* Bridge out structure for RIWritePMREntry */ -+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY; -+ -+/******************************************* -+ RIWriteMEMDESCEntry -+ *******************************************/ -+ -+/* Bridge in structure for RIWriteMEMDESCEntry */ -+typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG -+{ -+ IMG_UINT64 ui64Offset; -+ IMG_UINT64 ui64Size; -+ IMG_HANDLE hPMRHandle; -+ const IMG_CHAR *puiTextB; -+ IMG_UINT32 ui32TextBSize; -+ IMG_BOOL bIsImport; -+ IMG_BOOL bIsSuballoc; -+} __packed PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY; -+ -+/* Bridge out structure for RIWriteMEMDESCEntry */ -+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY_TAG -+{ -+ IMG_HANDLE hRIHandle; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY; -+ -+/******************************************* -+ RIWriteProcListEntry -+ *******************************************/ -+ -+/* Bridge in structure for RIWriteProcListEntry */ -+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY_TAG -+{ -+ IMG_UINT64 ui64DevVAddr; -+ IMG_UINT64 ui64Size; -+ const IMG_CHAR *puiTextB; -+ IMG_UINT32 ui32TextBSize; -+} __packed PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY; -+ -+/* Bridge out structure for RIWriteProcListEntry */ -+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY_TAG -+{ -+ IMG_HANDLE hRIHandle; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY; -+ -+/******************************************* -+ RIUpdateMEMDESCAddr -+ *******************************************/ -+ -+/* Bridge in structure for RIUpdateMEMDESCAddr */ -+typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR_TAG -+{ -+ IMG_DEV_VIRTADDR sAddr; -+ IMG_HANDLE hRIHandle; -+} __packed PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR; -+ -+/* Bridge out structure for RIUpdateMEMDESCAddr */ -+typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR; -+ -+/******************************************* -+ RIDeleteMEMDESCEntry -+ *******************************************/ -+ -+/* Bridge in structure for RIDeleteMEMDESCEntry */ -+typedef struct PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY_TAG -+{ -+ IMG_HANDLE hRIHandle; -+} __packed PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY; -+ -+/* Bridge out structure for RIDeleteMEMDESCEntry */ -+typedef struct PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY; -+ -+/******************************************* -+ RIDumpList -+ *******************************************/ -+ -+/* Bridge in structure for RIDumpList */ -+typedef struct PVRSRV_BRIDGE_IN_RIDUMPLIST_TAG -+{ -+ IMG_HANDLE hPMRHandle; -+} __packed PVRSRV_BRIDGE_IN_RIDUMPLIST; -+ -+/* Bridge out structure for RIDumpList */ -+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPLIST_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RIDUMPLIST; -+ -+/******************************************* -+ RIDumpAll -+ *******************************************/ -+ -+/* Bridge in structure for RIDumpAll */ -+typedef struct PVRSRV_BRIDGE_IN_RIDUMPALL_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_RIDUMPALL; -+ -+/* Bridge out structure for RIDumpAll */ -+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPALL_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RIDUMPALL; -+ -+/******************************************* -+ RIDumpProcess -+ *******************************************/ -+ -+/* Bridge in structure for RIDumpProcess */ -+typedef struct PVRSRV_BRIDGE_IN_RIDUMPPROCESS_TAG -+{ -+ IMG_PID ui32Pid; -+} __packed PVRSRV_BRIDGE_IN_RIDUMPPROCESS; -+ -+/* Bridge out structure for RIDumpProcess */ -+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPPROCESS_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RIDUMPPROCESS; -+ -+/******************************************* -+ RIWritePMREntryWithOwner -+ *******************************************/ -+ -+/* Bridge in structure for RIWritePMREntryWithOwner */ -+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER_TAG -+{ -+ IMG_HANDLE hPMRHandle; -+ IMG_PID ui32Owner; -+} __packed PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER; -+ -+/* Bridge out structure for RIWritePMREntryWithOwner */ -+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER; -+ -+#endif /* COMMON_RI_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_srvcore_bridge.h b/drivers/gpu/drm/img-rogue/common_srvcore_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_srvcore_bridge.h -@@ -0,0 +1,369 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for srvcore -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for srvcore -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_SRVCORE_BRIDGE_H -+#define COMMON_SRVCORE_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "pvrsrv_device_types.h" -+#include "cache_ops.h" -+ -+#define PVRSRV_BRIDGE_SRVCORE_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_SRVCORE_CONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_SRVCORE_DISCONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+5 -+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+6 -+#define PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+7 -+#define PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+8 -+#define PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+9 -+#define PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+10 -+#define PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+11 -+#define PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+12 -+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13 -+#define PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+14 -+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+15 -+#define PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16 -+#define PVRSRV_BRIDGE_SRVCORE_CMD_LAST (PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16) -+ -+/******************************************* -+ Connect -+ *******************************************/ -+ -+/* Bridge in structure for Connect */ -+typedef struct PVRSRV_BRIDGE_IN_CONNECT_TAG -+{ -+ IMG_UINT32 ui32ClientBuildOptions; -+ IMG_UINT32 ui32ClientDDKBuild; -+ IMG_UINT32 ui32ClientDDKVersion; -+ IMG_UINT32 ui32Flags; -+} __packed PVRSRV_BRIDGE_IN_CONNECT; -+ -+/* Bridge out structure for Connect */ -+typedef struct PVRSRV_BRIDGE_OUT_CONNECT_TAG -+{ -+ IMG_UINT64 ui64PackedBvnc; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32CapabilityFlags; -+ IMG_UINT8 ui8KernelArch; -+} __packed PVRSRV_BRIDGE_OUT_CONNECT; -+ -+/******************************************* -+ Disconnect -+ *******************************************/ -+ -+/* Bridge in structure for Disconnect */ -+typedef struct PVRSRV_BRIDGE_IN_DISCONNECT_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_DISCONNECT; -+ -+/* Bridge out structure for Disconnect */ -+typedef struct PVRSRV_BRIDGE_OUT_DISCONNECT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DISCONNECT; -+ -+/******************************************* -+ AcquireGlobalEventObject -+ *******************************************/ -+ -+/* Bridge in structure for AcquireGlobalEventObject */ -+typedef struct PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT; -+ -+/* Bridge out structure for AcquireGlobalEventObject */ -+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT_TAG -+{ -+ IMG_HANDLE hGlobalEventObject; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT; -+ -+/******************************************* -+ ReleaseGlobalEventObject -+ *******************************************/ -+ -+/* Bridge in structure for ReleaseGlobalEventObject */ -+typedef struct PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT_TAG -+{ -+ IMG_HANDLE hGlobalEventObject; -+} __packed PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT; -+ -+/* Bridge out structure for ReleaseGlobalEventObject */ -+typedef struct PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT; -+ -+/******************************************* -+ EventObjectOpen -+ *******************************************/ -+ -+/* Bridge in structure for EventObjectOpen */ -+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN_TAG -+{ -+ IMG_HANDLE hEventObject; -+} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN; -+ -+/* Bridge out structure for EventObjectOpen */ -+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN_TAG -+{ -+ IMG_HANDLE hOSEvent; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN; -+ -+/******************************************* -+ EventObjectWait -+ *******************************************/ -+ -+/* Bridge in structure for EventObjectWait */ -+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT_TAG -+{ -+ IMG_HANDLE hOSEventKM; -+} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT; -+ -+/* Bridge out structure for EventObjectWait */ -+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT; -+ -+/******************************************* -+ EventObjectClose -+ *******************************************/ -+ -+/* Bridge in structure for EventObjectClose */ -+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE_TAG -+{ -+ IMG_HANDLE hOSEventKM; -+} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE; -+ -+/* Bridge out structure for EventObjectClose */ -+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE; -+ -+/******************************************* -+ DumpDebugInfo -+ *******************************************/ -+ -+/* Bridge in structure for DumpDebugInfo */ -+typedef struct PVRSRV_BRIDGE_IN_DUMPDEBUGINFO_TAG -+{ -+ IMG_UINT32 ui32VerbLevel; -+} __packed PVRSRV_BRIDGE_IN_DUMPDEBUGINFO; -+ -+/* Bridge out structure for DumpDebugInfo */ -+typedef struct PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO; -+ -+/******************************************* -+ GetDevClockSpeed -+ *******************************************/ -+ -+/* Bridge in structure for GetDevClockSpeed */ -+typedef struct PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED; -+ -+/* Bridge out structure for GetDevClockSpeed */ -+typedef struct PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32ClockSpeed; -+} __packed PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED; -+ -+/******************************************* -+ HWOpTimeout -+ *******************************************/ -+ -+/* Bridge in structure for HWOpTimeout */ -+typedef struct PVRSRV_BRIDGE_IN_HWOPTIMEOUT_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_HWOPTIMEOUT; -+ -+/* Bridge out structure for HWOpTimeout */ -+typedef struct PVRSRV_BRIDGE_OUT_HWOPTIMEOUT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_HWOPTIMEOUT; -+ -+/******************************************* -+ AlignmentCheck -+ *******************************************/ -+ -+/* Bridge in structure for AlignmentCheck */ -+typedef struct PVRSRV_BRIDGE_IN_ALIGNMENTCHECK_TAG -+{ -+ IMG_UINT32 *pui32AlignChecks; -+ IMG_UINT32 ui32AlignChecksSize; -+} __packed PVRSRV_BRIDGE_IN_ALIGNMENTCHECK; -+ -+/* Bridge out structure for AlignmentCheck */ -+typedef struct PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK; -+ -+/******************************************* -+ GetDeviceStatus -+ *******************************************/ -+ -+/* Bridge in structure for GetDeviceStatus */ -+typedef struct PVRSRV_BRIDGE_IN_GETDEVICESTATUS_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_GETDEVICESTATUS; -+ -+/* Bridge out structure for GetDeviceStatus */ -+typedef struct PVRSRV_BRIDGE_OUT_GETDEVICESTATUS_TAG -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32DeviceSatus; -+} __packed PVRSRV_BRIDGE_OUT_GETDEVICESTATUS; -+ -+/******************************************* -+ GetMultiCoreInfo -+ *******************************************/ -+ -+/* Bridge in structure for GetMultiCoreInfo */ -+typedef struct PVRSRV_BRIDGE_IN_GETMULTICOREINFO_TAG -+{ -+ IMG_UINT64 *pui64Caps; -+ IMG_UINT32 ui32CapsSize; -+} __packed PVRSRV_BRIDGE_IN_GETMULTICOREINFO; -+ -+/* Bridge out structure for GetMultiCoreInfo */ -+typedef struct PVRSRV_BRIDGE_OUT_GETMULTICOREINFO_TAG -+{ -+ IMG_UINT64 *pui64Caps; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32NumCores; -+} __packed PVRSRV_BRIDGE_OUT_GETMULTICOREINFO; -+ -+/******************************************* -+ EventObjectWaitTimeout -+ *******************************************/ -+ -+/* Bridge in structure for EventObjectWaitTimeout */ -+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT_TAG -+{ -+ IMG_UINT64 ui64uiTimeoutus; -+ IMG_HANDLE hOSEventKM; -+} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT; -+ -+/* Bridge out structure for EventObjectWaitTimeout */ -+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT; -+ -+/******************************************* -+ FindProcessMemStats -+ *******************************************/ -+ -+/* Bridge in structure for FindProcessMemStats */ -+typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG -+{ -+ IMG_UINT64 *pui64MemStatsArray; -+ IMG_UINT32 ui32ArrSize; -+ IMG_UINT32 ui32PID; -+ IMG_BOOL bbAllProcessStats; -+} __packed PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS; -+ -+/* Bridge out structure for FindProcessMemStats */ -+typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG -+{ -+ IMG_UINT64 *pui64MemStatsArray; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS; -+ -+/******************************************* -+ AcquireInfoPage -+ *******************************************/ -+ -+/* Bridge in structure for AcquireInfoPage */ -+typedef struct PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE; -+ -+/* Bridge out structure for AcquireInfoPage */ -+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE_TAG -+{ -+ IMG_HANDLE hPMR; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE; -+ -+/******************************************* -+ ReleaseInfoPage -+ *******************************************/ -+ -+/* Bridge in structure for ReleaseInfoPage */ -+typedef struct PVRSRV_BRIDGE_IN_RELEASEINFOPAGE_TAG -+{ -+ IMG_HANDLE hPMR; -+} __packed PVRSRV_BRIDGE_IN_RELEASEINFOPAGE; -+ -+/* Bridge out structure for ReleaseInfoPage */ -+typedef struct PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE; -+ -+#endif /* COMMON_SRVCORE_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_sync_bridge.h b/drivers/gpu/drm/img-rogue/common_sync_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_sync_bridge.h -@@ -0,0 +1,254 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for sync -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for sync -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_SYNC_BRIDGE_H -+#define COMMON_SYNC_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "pdump.h" -+#include "pdumpdefs.h" -+#include "devicemem_typedefs.h" -+#include "pvrsrv_sync_km.h" -+#include -+ -+#define PVRSRV_BRIDGE_SYNC_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMSET PVRSRV_BRIDGE_SYNC_CMD_FIRST+2 -+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP PVRSRV_BRIDGE_SYNC_CMD_FIRST+3 -+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE PVRSRV_BRIDGE_SYNC_CMD_FIRST+4 -+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+5 -+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP PVRSRV_BRIDGE_SYNC_CMD_FIRST+6 -+#define PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+7 -+#define PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+8 -+#define PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+9 -+#define PVRSRV_BRIDGE_SYNC_CMD_LAST (PVRSRV_BRIDGE_SYNC_CMD_FIRST+9) -+ -+/******************************************* -+ AllocSyncPrimitiveBlock -+ *******************************************/ -+ -+/* Bridge in structure for AllocSyncPrimitiveBlock */ -+typedef struct PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK_TAG -+{ -+ IMG_UINT32 ui32EmptyStructPlaceholder; -+} __packed PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK; -+ -+/* Bridge out structure for AllocSyncPrimitiveBlock */ -+typedef struct PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK_TAG -+{ -+ IMG_HANDLE hSyncHandle; -+ IMG_HANDLE hhSyncPMR; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32SyncPrimBlockSize; -+ IMG_UINT32 ui32SyncPrimVAddr; -+} __packed PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK; -+ -+/******************************************* -+ FreeSyncPrimitiveBlock -+ *******************************************/ -+ -+/* Bridge in structure for FreeSyncPrimitiveBlock */ -+typedef struct PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK_TAG -+{ -+ IMG_HANDLE hSyncHandle; -+} __packed PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK; -+ -+/* Bridge out structure for FreeSyncPrimitiveBlock */ -+typedef struct PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK; -+ -+/******************************************* -+ SyncPrimSet -+ *******************************************/ -+ -+/* Bridge in structure for SyncPrimSet */ -+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSET_TAG -+{ -+ IMG_HANDLE hSyncHandle; -+ IMG_UINT32 ui32Index; -+ IMG_UINT32 ui32Value; -+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMSET; -+ -+/* Bridge out structure for SyncPrimSet */ -+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSET_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMSET; -+ -+/******************************************* -+ SyncPrimPDump -+ *******************************************/ -+ -+/* Bridge in structure for SyncPrimPDump */ -+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP_TAG -+{ -+ IMG_HANDLE hSyncHandle; -+ IMG_UINT32 ui32Offset; -+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP; -+ -+/* Bridge out structure for SyncPrimPDump */ -+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP; -+ -+/******************************************* -+ SyncPrimPDumpValue -+ *******************************************/ -+ -+/* Bridge in structure for SyncPrimPDumpValue */ -+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE_TAG -+{ -+ IMG_HANDLE hSyncHandle; -+ IMG_UINT32 ui32Offset; -+ IMG_UINT32 ui32Value; -+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE; -+ -+/* Bridge out structure for SyncPrimPDumpValue */ -+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE; -+ -+/******************************************* -+ SyncPrimPDumpPol -+ *******************************************/ -+ -+/* Bridge in structure for SyncPrimPDumpPol */ -+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL_TAG -+{ -+ IMG_HANDLE hSyncHandle; -+ PDUMP_POLL_OPERATOR eOperator; -+ IMG_UINT32 ui32Mask; -+ IMG_UINT32 ui32Offset; -+ IMG_UINT32 ui32Value; -+ PDUMP_FLAGS_T uiPDumpFlags; -+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL; -+ -+/* Bridge out structure for SyncPrimPDumpPol */ -+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL; -+ -+/******************************************* -+ SyncPrimPDumpCBP -+ *******************************************/ -+ -+/* Bridge in structure for SyncPrimPDumpCBP */ -+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP_TAG -+{ -+ IMG_DEVMEM_SIZE_T uiBufferSize; -+ IMG_DEVMEM_SIZE_T uiPacketSize; -+ IMG_DEVMEM_OFFSET_T uiWriteOffset; -+ IMG_HANDLE hSyncHandle; -+ IMG_UINT32 ui32Offset; -+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP; -+ -+/* Bridge out structure for SyncPrimPDumpCBP */ -+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP; -+ -+/******************************************* -+ SyncAllocEvent -+ *******************************************/ -+ -+/* Bridge in structure for SyncAllocEvent */ -+typedef struct PVRSRV_BRIDGE_IN_SYNCALLOCEVENT_TAG -+{ -+ const IMG_CHAR *puiClassName; -+ IMG_UINT32 ui32ClassNameSize; -+ IMG_UINT32 ui32FWAddr; -+ IMG_BOOL bServerSync; -+} __packed PVRSRV_BRIDGE_IN_SYNCALLOCEVENT; -+ -+/* Bridge out structure for SyncAllocEvent */ -+typedef struct PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT; -+ -+/******************************************* -+ SyncFreeEvent -+ *******************************************/ -+ -+/* Bridge in structure for SyncFreeEvent */ -+typedef struct PVRSRV_BRIDGE_IN_SYNCFREEEVENT_TAG -+{ -+ IMG_UINT32 ui32FWAddr; -+} __packed PVRSRV_BRIDGE_IN_SYNCFREEEVENT; -+ -+/* Bridge out structure for SyncFreeEvent */ -+typedef struct PVRSRV_BRIDGE_OUT_SYNCFREEEVENT_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_SYNCFREEEVENT; -+ -+/******************************************* -+ SyncCheckpointSignalledPDumpPol -+ *******************************************/ -+ -+/* Bridge in structure for SyncCheckpointSignalledPDumpPol */ -+typedef struct PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG -+{ -+ PVRSRV_FENCE hFence; -+} __packed PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL; -+ -+/* Bridge out structure for SyncCheckpointSignalledPDumpPol */ -+typedef struct PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL; -+ -+#endif /* COMMON_SYNC_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/common_synctracking_bridge.h b/drivers/gpu/drm/img-rogue/common_synctracking_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/common_synctracking_bridge.h -@@ -0,0 +1,97 @@ -+/******************************************************************************* -+@File -+@Title Common bridge header for synctracking -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declares common defines and structures used by both the client -+ and server side of the bridge for synctracking -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#ifndef COMMON_SYNCTRACKING_BRIDGE_H -+#define COMMON_SYNCTRACKING_BRIDGE_H -+ -+#include -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST 0 -+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+0 -+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1 -+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST (PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1) -+ -+/******************************************* -+ SyncRecordRemoveByHandle -+ *******************************************/ -+ -+/* Bridge in structure for SyncRecordRemoveByHandle */ -+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE_TAG -+{ -+ IMG_HANDLE hhRecord; -+} __packed PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE; -+ -+/* Bridge out structure for SyncRecordRemoveByHandle */ -+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE_TAG -+{ -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE; -+ -+/******************************************* -+ SyncRecordAdd -+ *******************************************/ -+ -+/* Bridge in structure for SyncRecordAdd */ -+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDADD_TAG -+{ -+ IMG_HANDLE hhServerSyncPrimBlock; -+ const IMG_CHAR *puiClassName; -+ IMG_UINT32 ui32ClassNameSize; -+ IMG_UINT32 ui32ui32FwBlockAddr; -+ IMG_UINT32 ui32ui32SyncOffset; -+ IMG_BOOL bbServerSync; -+} __packed PVRSRV_BRIDGE_IN_SYNCRECORDADD; -+ -+/* Bridge out structure for SyncRecordAdd */ -+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDADD_TAG -+{ -+ IMG_HANDLE hhRecord; -+ PVRSRV_ERROR eError; -+} __packed PVRSRV_BRIDGE_OUT_SYNCRECORDADD; -+ -+#endif /* COMMON_SYNCTRACKING_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/config_kernel.h b/drivers/gpu/drm/img-rogue/config_kernel.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/config_kernel.h -@@ -0,0 +1,203 @@ -+#define PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY 5 -+#define RGX_FW_FILENAME "rgx.fw" -+#define RGX_SH_FILENAME "rgx.sh" -+#define PVR_BUILD_DIR "spacemit" -+#define PVR_BUILD_TYPE "release" -+#define PVRSRV_MODNAME "pvrsrvkm" -+#define PVRSYNC_MODNAME "pvr_sync" -+#define SUPPORT_RGX 1 -+#define PVRSRV_MAX_DEVICES 4 -+#define PVRSRV_HWPERF_COUNTERS_PERBLK 12 -+#define RGX_BVNC_CORE_KM_HEADER "cores/rgxcore_km_36.29.52.182.h" -+#define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_36.V.52.182.h" -+#define PVRSRV_NEED_PVR_DPF -+#define PVRSRV_TRACE_ROGUE_EVENTS -+#define SUPPORT_PHYSMEM_TEST -+#define SUPPORT_RGXTQ_BRIDGE -+#define PVRSRV_POISON_ON_ALLOC_VALUE 0xd9 -+#define PVRSRV_POISON_ON_FREE_VALUE 0x63 -+#define RGX_NUM_DRIVERS_SUPPORTED 1 -+#define RGX_DRIVERID_0_DEFAULT_PRIORITY (1 - 0) -+#define RGX_DRIVERID_1_DEFAULT_PRIORITY (1 - 1) -+#define RGX_DRIVERID_2_DEFAULT_PRIORITY (1 - 2) -+#define RGX_DRIVERID_3_DEFAULT_PRIORITY (1 - 3) -+#define RGX_DRIVERID_4_DEFAULT_PRIORITY (1 - 4) -+#define RGX_DRIVERID_5_DEFAULT_PRIORITY (1 - 5) -+#define RGX_DRIVERID_6_DEFAULT_PRIORITY (1 - 6) -+#define RGX_DRIVERID_7_DEFAULT_PRIORITY (1 - 7) -+#define RGX_DRIVERID_0_DEFAULT_ISOLATION_GROUP 0 -+#define RGX_DRIVERID_1_DEFAULT_ISOLATION_GROUP 0 -+#define RGX_DRIVERID_2_DEFAULT_ISOLATION_GROUP 0 -+#define RGX_DRIVERID_3_DEFAULT_ISOLATION_GROUP 0 -+#define RGX_DRIVERID_4_DEFAULT_ISOLATION_GROUP 0 -+#define RGX_DRIVERID_5_DEFAULT_ISOLATION_GROUP 0 -+#define RGX_DRIVERID_6_DEFAULT_ISOLATION_GROUP 0 -+#define RGX_DRIVERID_7_DEFAULT_ISOLATION_GROUP 0 -+#define RGX_DRIVERID_0_DEFAULT_TIME_SLICE 0 -+#define RGX_DRIVERID_1_DEFAULT_TIME_SLICE 0 -+#define RGX_DRIVERID_2_DEFAULT_TIME_SLICE 0 -+#define RGX_DRIVERID_3_DEFAULT_TIME_SLICE 0 -+#define RGX_DRIVERID_4_DEFAULT_TIME_SLICE 0 -+#define RGX_DRIVERID_5_DEFAULT_TIME_SLICE 0 -+#define RGX_DRIVERID_6_DEFAULT_TIME_SLICE 0 -+#define RGX_DRIVERID_7_DEFAULT_TIME_SLICE 0 -+#define RGX_DRIVER_DEFAULT_TIME_SLICE_INTERVAL 0 -+#define RGX_HCS_DEFAULT_DEADLINE_MS 0xFFFFFFFFU -+#define DRIVER0_SECURITY_SUPPORT 0 -+#define DRIVER1_SECURITY_SUPPORT 0 -+#define DRIVER2_SECURITY_SUPPORT 0 -+#define DRIVER3_SECURITY_SUPPORT 0 -+#define DRIVER4_SECURITY_SUPPORT 0 -+#define DRIVER5_SECURITY_SUPPORT 0 -+#define DRIVER6_SECURITY_SUPPORT 0 -+#define DRIVER7_SECURITY_SUPPORT 0 -+#define RGX_FW_HEAP_USES_FIRMWARE_OSID 0 -+#define RGX_FW_HEAP_USES_HOST_OSID 1 -+#define RGX_FW_HEAP_USES_DEDICATED_OSID 2 -+#define RGX_FW_HEAP_OSID_ASSIGNMENT RGX_FW_HEAP_USES_FIRMWARE_OSID -+#define PVRSRV_APPHINT_PHYSHEAPMINMEMONCONNECTION 0 -+#define RGX_FW_PHYSHEAP_MINMEM_ON_CONNECTION 512 -+#define PVRSRV_APPHINT_DRIVERMODE 0x7FFFFFFF -+#define RGX_FW_HEAP_SHIFT 25 -+#define RGX_VZ_CONNECTION_TIMEOUT_US 60000000 -+#define GPUVIRT_VALIDATION_NUM_OS 8 -+#define PVRSRV_ENABLE_CCCB_GROW -+#define FIX_DUSTS_POW_ON_INIT -+#define SUPPORT_POWMON_COMPONENT -+#define PVR_POWER_ACTOR_MEASUREMENT_PERIOD_MS 10U -+#define PVR_POWER_MONITOR_HWPERF -+#define PVR_LDM_PLATFORM_PRE_REGISTERED -+#define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm" -+#define PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN 256 -+#define ION_DEFAULT_HEAP_NAME "ion_system_heap" -+#define ION_DEFAULT_HEAP_ID_MASK (1 << ION_HEAP_TYPE_SYSTEM) -+#define PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT APPHNT_BLDVAR_DBGDUMPLIMIT -+#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE -+#define PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE 0x4000 -+#define PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE 786432 -+#define PVRSRV_APPHINT_ENABLESIGNATURECHECKS APPHNT_BLDVAR_ENABLESIGNATURECHECKS -+#define PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE RGXFW_SIG_BUFFER_SIZE_MIN -+#define PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING IMG_FALSE -+#define PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG -+#define PVRSRV_APPHINT_VALIDATEIRQ 0 -+#define PVRSRV_APPHINT_DISABLECLOCKGATING 0 -+#define PVRSRV_APPHINT_DISABLEDMOVERLAP 0 -+#define PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE 0 -+#define PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH 0 -+#define PVRSRV_APPHINT_ENABLESOFTRESETCONTEXTSWITCH 0 -+#define PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL -+#define PVRSRV_APPHINT_ENABLERDPOWERISLAND RGX_RD_POWER_ISLAND_DEFAULT -+#define PVRSRV_APPHINT_ENABLESPUCLOCKGATING IMG_FALSE -+#define PVRSRV_APPHINT_FIRMWAREPERF FW_PERF_CONF_NONE -+#define PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN -+#define PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER 0 -+#define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB 2048 -+#define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB 2048 -+#define PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS 50 -+#define PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP 1 -+#define PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME 0 -+#define PVRSRV_APPHINT_TFBCVERSION 0 -+#define PVRSRV_APPHINT_JONESDISABLEMASK 0 -+#define PVRSRV_APPHINT_NEWFILTERINGMODE 1 -+#define PVRSRV_APPHINT_TRUNCATEMODE 0 -+#define PVRSRV_APPHINT_EMUMAXFREQ 0 -+#define PVRSRV_APPHINT_GPIOVALIDATIONMODE 0 -+#define PVRSRV_APPHINT_RGXBVNC "" -+#define PVRSRV_APPHINT_CLEANUPTHREADPRIORITY 5 -+#define PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY 0 -+#define PVRSRV_APPHINT_CACHEOPTHREADPRIORITY 1 -+#define PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 11 -+#define PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES 10000 -+#define PVRSRV_APPHINT_ASSERTONHWRTRIGGER IMG_FALSE -+#define PVRSRV_APPHINT_ASSERTOUTOFMEMORY IMG_FALSE -+#define PVRSRV_APPHINT_CHECKMLIST APPHNT_BLDVAR_DEBUG -+#define PVRSRV_APPHINT_DISABLEFEDLOGGING IMG_FALSE -+#define PVRSRV_APPHINT_KCCB_SIZE_LOG2 10 -+#define PVRSRV_APPHINT_ENABLEAPM RGX_ACTIVEPM_DEFAULT -+#define PVRSRV_APPHINT_ENABLEHTBLOGGROUP 0 -+#define PVRSRV_APPHINT_ENABLELOGGROUP RGXFWIF_LOG_TYPE_NONE -+#define PVRSRV_APPHINT_FIRMWARELOGTYPE 0 -+#define PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS -+#define PVRSRV_APPHINT_DEBUGDUMPFWTLOGTYPE 1 -+#define PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE 0 -+#define PVRSRV_APPHINT_HTBOPERATIONMODE HTB_OPMODE_DROPOLDEST -+#define PVRSRV_APPHINT_HTBUFFERSIZE 64 -+#define PVRSRV_APPHINT_ENABLEFTRACEGPU IMG_FALSE -+#define PVRSRV_APPHINT_HWPERFFWFILTER 0 -+#define PVRSRV_APPHINT_HWPERFHOSTFILTER 0 -+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES 0 -+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL 0 -+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES 0 -+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL 0 -+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN 0 -+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGL 0 -+#define PVRSRV_APPHINT_TIMECORRCLOCK 0 -+#define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_FALSE -+#define PVRSRV_APPHINT_FWPOISONONFREEVALUE 0xBD -+#define PVRSRV_APPHINT_ZEROFREELIST IMG_FALSE -+#define PVRSRV_APPHINT_GPUUNITSPOWERCHANGE IMG_FALSE -+#define PVRSRV_APPHINT_DISABLEPDUMPPANIC IMG_FALSE -+#define PVRSRV_APPHINT_CACHEOPCONFIG 0 -+#define PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE 0 -+#define PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC IMG_FALSE -+#define PVRSRV_APPHINT_PHYSMEMTESTPASSES APPHNT_PHYSMEMTEST_ENABLE -+#define PVRSRV_APPHINT_TESTSLRINTERVAL 0 -+#define PVRSRV_APPHINT_RISCVDMITEST 0 -+#define PVRSRV_APPHINT_VALIDATESOCUSCTIMERS 0 -+#define PVRSRV_APPHINT_CHECKPOINTPOOLMAXLOG2 8 -+#define PVRSRV_APPHINT_CHECKPOINTPOOLINITLOG2 7 -+#define SOC_TIMER_FREQ 20 -+#define PDVFS_COM_HOST 1 -+#define PDVFS_COM_AP 2 -+#define PDVFS_COM_PMC 3 -+#define PDVFS_COM_IMG_CLKDIV 4 -+#define PDVFS_COM PDVFS_COM_HOST -+#define PVR_GPIO_MODE_GENERAL 1 -+#define PVR_GPIO_MODE_POWMON_PIN 2 -+#define PVR_GPIO_MODE PVR_GPIO_MODE_GENERAL -+#define PVRSRV_ENABLE_PROCESS_STATS -+#define SUPPORT_USC_BREAKPOINT -+#define SUPPORT_AGP -+#define RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US 2000000 -+#define PVR_ANNOTATION_MAX_LEN 63 -+#define PVRSRV_DEVICE_INIT_MODE PVRSRV_LINUX_DEV_INIT_ON_CONNECT -+#define SUPPORT_DI_BRG_IMPL -+#define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240 -+#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 20480 -+#define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 524288 -+#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256 -+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 2 -+#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD 16384 -+#define SUPPORT_PMR_DEFERRED_FREE -+#define SUPPORT_MMU_DEFERRED_FREE -+#define PVRSRV_USE_LINUX_CONFIG_INIT_ON_ALLOC 1 -+#define SUPPORT_NATIVE_FENCE_SYNC -+#define PVRSRV_STALLED_CCB_ACTION -+#define UPDATE_FENCE_CHECKPOINT_COUNT 1 -+#define RGX_VZ_CONNECTION_COOLDOWN_PERIOD 0 -+#define PVR_DRM_NAME "pvr" -+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES 16 -+#define RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS 0 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D 14 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D 14 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM 13 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA 15 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D 16 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC 13 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM 14 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RDM 13 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D 17 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D 17 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM 15 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA 16 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D 17 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC 13 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM 17 -+#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_RDM 15 -+#define SUPPORT_BUFFER_SYNC 1 -+#define PVRSRV_ENABLE_SYNC_POISONING 1 -+#define SUPPORT_RGXKICKSYNC_BRIDGE -+#define SUPPORT_TQ -+#define TRACK_FW_BOOT -+#define LMA -+#define PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY -diff --git a/drivers/gpu/drm/img-rogue/config_kernel.mk b/drivers/gpu/drm/img-rogue/config_kernel.mk -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/config_kernel.mk -@@ -0,0 +1,53 @@ -+override PVRSRV_DIR := services -+override HOST_PRIMARY_ARCH := host_x86_64 -+override HOST_32BIT_ARCH := host_i386 -+override HOST_FORCE_32BIT := -m32 -+override HOST_ALL_ARCH := host_x86_64 host_i386 -+override TARGET_PRIMARY_ARCH := target_riscv64 -+override TARGET_SECONDARY_ARCH := -+override TARGET_ALL_ARCH := target_riscv64 -+override TARGET_FORCE_32BIT := -+override PVR_ARCH := rogue -+override METAG_VERSION_NEEDED := 2.8.1.0.3 -+override MIPS_VERSION_NEEDED := 2014.07-1 -+override RISCV_VERSION_NEEDED := 1.0.1 -+override KERNELDIR := /home/likaiyang/work/Jindie/out/bootable/fpga/linux -+override KERNEL_ID := 6.1.15-00003-g57b93bb899b2-dirty -+override PVRSRV_MODULE_BASEDIR := /lib/modules/6.1.15-00003-g57b93bb899b2-dirty/extra/ -+override KERNEL_COMPONENTS := srvkm -+override KERNEL_CROSS_COMPILE := riscv64-unknown-linux-gnu- -+override WINDOW_SYSTEM := wayland -+override PVRSRV_MODNAME := pvrsrvkm -+override PVR_BUILD_DIR := spacemit -+override PVR_BUILD_TYPE := release -+override SUPPORT_RGX := 1 -+override PVR_SYSTEM := spacemit -+override PVR_LOADER := -+override BUILD := release -+override SORT_BRIDGE_STRUCTS := 1 -+override DEBUGLINK := 1 -+override RGX_BNC := 36.V.52.182 -+override PVRSRV_TRACE_ROGUE_EVENTS := 1 -+override SUPPORT_PHYSMEM_TEST := 1 -+override SUPPORT_MIPS_64K_PAGE_SIZE := -+override RGX_NUM_DRIVERS_SUPPORTED := 1 -+override RGX_FW_HEAP_OSID_ASSIGNMENT := RGX_FW_HEAP_USES_FIRMWARE_OSID -+override VMM_TYPE := stub -+override SUPPORT_POWMON_COMPONENT := 1 -+override RGX_TIMECORR_CLOCK := mono -+override PDVFS_COM_HOST := 1 -+override PDVFS_COM_AP := 2 -+override PDVFS_COM_PMC := 3 -+override PDVFS_COM_IMG_CLKDIV := 4 -+override PDVFS_COM := PDVFS_COM_HOST -+override PVR_GPIO_MODE_GENERAL := 1 -+override PVR_GPIO_MODE_POWMON_PIN := 2 -+override PVR_GPIO_MODE := PVR_GPIO_MODE_GENERAL -+override PVR_HANDLE_BACKEND := idr -+override SUPPORT_DMABUF_BRIDGE := 1 -+override SUPPORT_USC_BREAKPOINT := 1 -+override SUPPORT_DI_BRG_IMPL := 1 -+override SUPPORT_WRAP_EXTMEM := 1 -+override SUPPORT_NATIVE_FENCE_SYNC := 1 -+override SUPPORT_DMA_FENCE := 1 -+override SUPPORT_BUFFER_SYNC := 1 -diff --git a/drivers/gpu/drm/img-rogue/configs/rgxconfig_km_36.V.52.182.h b/drivers/gpu/drm/img-rogue/configs/rgxconfig_km_36.V.52.182.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/configs/rgxconfig_km_36.V.52.182.h -@@ -0,0 +1,104 @@ -+/*************************************************************************/ /*! -+@Title RGX Configuration for BVNC 36.V.52.182 (kernel defines) -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXCONFIG_KM_36_V_52_182_H -+#define RGXCONFIG_KM_36_V_52_182_H -+ -+/***** Automatically generated file. Do not edit manually ********************/ -+ -+/****************************************************************************** -+ * B.V.N.C Validation defines -+ *****************************************************************************/ -+#define RGX_BNC_KM_B 36 -+#define RGX_BNC_KM_N 52 -+#define RGX_BNC_KM_C 182 -+ -+/****************************************************************************** -+ * DDK Defines -+ *****************************************************************************/ -+#define RGX_FEATURE_AXI_ACELITE -+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) -+#define RGX_FEATURE_COMPUTE -+#define RGX_FEATURE_COMPUTE_OVERLAP -+#define RGX_FEATURE_COREID_PER_OS -+#define RGX_FEATURE_FBCDC (50U) -+#define RGX_FEATURE_FBCDC_ALGORITHM (50U) -+#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U) -+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) -+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U) -+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT -+#define RGX_FEATURE_GPU_VIRTUALISATION -+#define RGX_FEATURE_GS_RTA_SUPPORT -+#define RGX_FEATURE_IRQ_PER_OS -+#define RGX_FEATURE_LAYOUT_MARS (1U) -+#define RGX_FEATURE_MIPS -+#define RGX_FEATURE_NUM_CLUSTERS (1U) -+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2U) -+#define RGX_FEATURE_NUM_OSIDS (8U) -+#define RGX_FEATURE_NUM_RASTER_PIPES (1U) -+#define RGX_FEATURE_PBE2_IN_XE -+#define RGX_FEATURE_PBVNC_COREID_REG -+#define RGX_FEATURE_PERFBUS -+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) -+#define RGX_FEATURE_ROGUEXE -+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT -+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 -+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) -+#define RGX_FEATURE_SLC_BANKS (1U) -+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) -+#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */ -+ /* customer-configurable. True SLC */ -+ /* size must be sourced from */ -+ /* register. */ -+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U) -+#define RGX_FEATURE_SOC_TIMER -+#define RGX_FEATURE_SYS_BUS_SECURE_RESET -+#define RGX_FEATURE_TFBC_VERSION (10U) -+#define RGX_FEATURE_TILE_SIZE_X (16U) -+#define RGX_FEATURE_TILE_SIZE_Y (16U) -+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) -+#define RGX_FEATURE_XE_ARCHITECTURE (1U) -+#define RGX_FEATURE_XE_MEMORY_HIERARCHY -+#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH (19U) -+#define RGX_FEATURE_XPU_MAX_SLAVES (3U) -+#define RGX_FEATURE_XPU_REGISTER_BROADCAST (1U) -+ -+#endif /* RGXCONFIG_KM_36_V_52_182_H */ -diff --git a/drivers/gpu/drm/img-rogue/connection_server.c b/drivers/gpu/drm/img-rogue/connection_server.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/connection_server.c -@@ -0,0 +1,556 @@ -+/*************************************************************************/ /*! -+@File -+@Title Server side connection management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Handles connections coming from the client and the management -+ connection based information -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "handle.h" -+#include "pvrsrv.h" -+#include "connection_server.h" -+#include "osconnection_server.h" -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "sync_server.h" -+#include "process_stats.h" -+#include "pdump_km.h" -+#include "osfunc.h" -+#include "tlstream.h" -+#include "rgxhwperf_common.h" -+ -+/* PID associated with Connection currently being purged by Cleanup thread */ -+static IMG_PID gCurrentPurgeConnectionPid; -+ -+static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection) -+{ -+ PVRSRV_ERROR eError; -+ PROCESS_HANDLE_BASE *psProcessHandleBase; -+ IMG_UINT64 ui64MaxBridgeTime; -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnection); -+ -+ if (psPVRSRVData->bUnload) -+ { -+ /* driver is unloading so do not allow the bridge lock to be released */ -+ ui64MaxBridgeTime = 0; -+ } -+ else -+ { -+ ui64MaxBridgeTime = CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS; -+ } -+ -+ PVR_ASSERT(psConnection != NULL); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psConnection, "psConnection"); -+ -+ /* Close HWPerfClient stream here even though we created it in -+ * PVRSRVConnectKM(). */ -+ if (psConnection->hClientTLStream) -+ { -+ TLStreamClose(psConnection->hClientTLStream); -+ psConnection->hClientTLStream = NULL; -+ PVR_DPF((PVR_DBG_MESSAGE, "Destroyed private stream.")); -+ } -+ -+ /* Get process handle base to decrement the refcount */ -+ psProcessHandleBase = psConnection->psProcessHandleBase; -+ -+ if (psProcessHandleBase != NULL) -+ { -+ /* PVRSRVReleaseProcessHandleBase() calls PVRSRVFreeKernelHendles() -+ * and PVRSRVFreeHandleBase() for the process handle base. -+ * Releasing kernel handles can never return RETRY error because -+ * release function for those handles are NOPs and PVRSRVFreeKernelHendles() -+ * doesn't even call pfnReleaseData() callback. -+ * Process handles can potentially return RETRY hence additional check -+ * below. */ -+ eError = PVRSRVReleaseProcessHandleBase(psProcessHandleBase, psConnection->pid, -+ ui64MaxBridgeTime); -+ if (PVRSRVIsRetryError(eError)) -+ { -+ return eError; -+ } -+ else -+ { -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVReleaseProcessHandleBase"); -+ } -+ -+ psConnection->psProcessHandleBase = NULL; -+ } -+ -+ /* Free handle base for this connection */ -+ if (psConnection->psHandleBase != NULL) -+ { -+ eError = PVRSRVFreeHandleBase(psConnection->psHandleBase, ui64MaxBridgeTime); -+ /* -+ * If we get PVRSRV_ERROR_RETRY we need to pass this back to the caller -+ * who will schedule a retry. -+ * Do not log this as it is an expected exception. -+ * This can occur if the Firmware is still processing a workload from -+ * the client when a tear-down request is received. -+ * Retrying will allow the in-flight work to be completed and the -+ * tear-down request can be completed when the FW is no longer busy. -+ */ -+ if (PVRSRVIsRetryError(eError)) -+ { -+ return eError; -+ } -+ else -+ { -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeHandleBase"); -+ } -+ -+ psConnection->psHandleBase = NULL; -+ } -+ -+ if (psConnection->psSyncConnectionData != NULL) -+ { -+ SyncUnregisterConnection(psConnection->psSyncConnectionData); -+ psConnection->psSyncConnectionData = NULL; -+ } -+ -+ if (psConnection->psPDumpConnectionData != NULL) -+ { -+ PDumpUnregisterConnection(psDevNode, -+ psConnection->psPDumpConnectionData); -+ psConnection->psPDumpConnectionData = NULL; -+ } -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVRSRVStatsDeviceDisconnect(psDevNode); -+#endif -+ -+ /* Call environment specific connection data deinit function */ -+ if (psConnection->hOsPrivateData != NULL) -+ { -+ eError = OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData); -+ PVR_LOG_RETURN_IF_ERROR(eError, "OSConnectionPrivateDataDeInit"); -+ -+ psConnection->hOsPrivateData = NULL; -+ } -+ -+ /* Close the PID stats entry as late as possible to catch all frees */ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ if (psConnection->hProcessStats != NULL) -+ { -+ PVRSRVStatsDeregisterProcess(psConnection->hProcessStats); -+ psConnection->hProcessStats = NULL; -+ } -+#endif -+ -+ OSFreeMemNoStats(psConnection); -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ /* Kick the Firmware to invalidate caches to clear all the zombie PMRs. -+ * If there are not zombie PMRs or no mappings were freed the kick will not -+ * be executed. -+ * -+ * This is needed: -+ * - when the process is killed and the connection cleanup has to clean up -+ * all dangling handles. -+ * - there are any outstanding PMRs in the zombie list due to no -+ * invalidation being executed before connection destruction -+ */ -+ eError = MMU_CacheInvalidateKick(psDevNode, NULL); -+ PVR_LOG_IF_ERROR(eError, "MMU_CacheInvalidateKick"); -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData) -+{ -+ CONNECTION_DATA *psConnection; -+ PVRSRV_ERROR eError; -+ PROCESS_HANDLE_BASE *psProcessHandleBase; -+ -+ /* Allocate connection data area, no stats since process not registered yet */ -+ psConnection = OSAllocZMemNoStats(sizeof(*psConnection)); -+ PVR_LOG_RETURN_IF_NOMEM(psConnection, "psConnection"); -+ psConnection->bSyncConnection = IMG_FALSE; -+ -+ /* Allocate process statistics as early as possible to catch all allocs */ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ eError = PVRSRVStatsRegisterProcess(&psConnection->hProcessStats); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsRegisterProcess", failure); -+#endif -+ -+ /* Call environment specific connection data init function */ -+ eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, pvOSData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSConnectionPrivateDataInit", failure); -+ -+ psConnection->pid = OSGetCurrentClientProcessIDKM(); -+ psConnection->vpid = OSGetCurrentVirtualProcessID(); -+ psConnection->tid = (IMG_UINT32)OSGetCurrentClientThreadIDKM(); -+ OSStringLCopy(psConnection->pszProcName, OSGetCurrentClientProcessNameKM(), PVRSRV_CONNECTION_PROCESS_NAME_LEN); -+ -+#if defined(SUPPORT_DMA_TRANSFER) -+ OSLockCreate(&psConnection->hDmaReqLock); -+ -+ eError = OSEventObjectCreate("Dma transfer cleanup event object", -+ &psConnection->hDmaEventObject); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", failure); -+ -+ OSAtomicWrite(&psConnection->ui32NumDmaTransfersInFlight, 0); -+ psConnection->bAcceptDmaRequests = IMG_TRUE; -+#endif -+ -+ /* Register this connection with the sync core */ -+ eError = SyncRegisterConnection(&psConnection->psSyncConnectionData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "SyncRegisterConnection", failure); -+ -+ /* -+ * Register this connection and Sync PDump callback with -+ * the pdump core. Pass in the Sync connection data. -+ */ -+ eError = PDumpRegisterConnection(OSGetDevNode(psConnection), -+ psConnection->psSyncConnectionData, -+ SyncConnectionPDumpSyncBlocks, -+ &psConnection->psPDumpConnectionData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PDumpRegisterConnection", failure); -+ -+ /* Allocate handle base for this connection */ -+ eError = PVRSRVAllocHandleBase(&psConnection->psHandleBase, -+ PVRSRV_HANDLE_BASE_TYPE_CONNECTION); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", failure); -+ -+ /* get process handle base (if it doesn't exist it will be allocated) */ -+ eError = PVRSRVAcquireProcessHandleBase(psConnection->pid, &psProcessHandleBase); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAcquireProcessHandleBase", failure); -+ -+ /* hConnectionsLock now resides in PVRSRV_DEVICE_NODE */ -+ { -+ IMG_BOOL bHostStreamIsNull; -+ PVRSRV_RGXDEV_INFO *psRgxDevInfo; -+ PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnection); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ eError = PVRSRVStatsDeviceConnect(psDevNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsDeviceConnect", failure); -+#endif -+ -+ OSLockAcquire(psDevNode->hConnectionsLock); -+ dllist_add_to_tail(&psDevNode->sConnections, &psConnection->sConnectionListNode); -+#if defined(DEBUG) || defined(PDUMP) -+ PVR_LOG(("%s connected - (devID = %u)", psConnection->pszProcName, -+ psDevNode->sDevId.ui32InternalID)); -+#endif -+ OSLockRelease(psDevNode->hConnectionsLock); -+ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ psRgxDevInfo = _RGX_DEVICE_INFO_FROM_NODE(psDevNode); -+ -+ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); -+ bHostStreamIsNull = (IMG_BOOL)(psRgxDevInfo->hHWPerfHostStream == NULL); -+ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); -+ -+ if (!bHostStreamIsNull) -+ { -+ if (TLStreamIsOpenForReading(psRgxDevInfo->hHWPerfHostStream)) -+ { -+ /* Announce this client connection in the host stream, if event mask is set */ -+ RGXSRV_HWPERF_HOST_CLIENT_INFO_PROCESS_NAME(psDevNode, psConnection->pid, psConnection->pszProcName); -+ } -+ } -+ } -+ } -+ -+ psConnection->psProcessHandleBase = psProcessHandleBase; -+ -+ *ppvPrivData = psConnection; -+ -+ return PVRSRV_OK; -+ -+failure: -+ ConnectionDataDestroy(psConnection); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR _CleanupThreadPurgeConnectionData(void *pvConnectionData) -+{ -+ PVRSRV_ERROR eErrorConnection, eErrorKernel; -+ CONNECTION_DATA *psConnectionData = pvConnectionData; -+ -+ gCurrentPurgeConnectionPid = psConnectionData->pid; -+ -+ eErrorConnection = ConnectionDataDestroy(psConnectionData); -+ if (eErrorConnection != PVRSRV_OK) -+ { -+ if (PVRSRVIsRetryError(eErrorConnection)) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Failed to purge connection data %p " -+ "(deferring destruction)", __func__, psConnectionData)); -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Connection data %p deferred destruction " -+ "finished", __func__, psConnectionData)); -+ } -+ -+ /* Check if possible resize the global handle base */ -+ eErrorKernel = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE); -+ PVR_LOG_IF_ERROR(eErrorKernel, "PVRSRVPurgeHandles"); -+ -+ gCurrentPurgeConnectionPid = 0; -+ -+ return eErrorConnection; -+} -+ -+#if defined(SUPPORT_DMA_TRANSFER) -+static void WaitForOutstandingDma(CONNECTION_DATA *psConnectionData) -+{ -+ -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hEvent; -+ IMG_UINT32 ui32Tries = 100; -+ -+#if defined(DMA_VERBOSE) -+ PVR_DPF((PVR_DBG_ERROR, -+ "Waiting on %d DMA transfers in flight...", OSAtomicRead(&psConnectionData->ui32NumDmaTransfersInFlight))); -+#endif -+ -+ eError = OSEventObjectOpen(psConnectionData->hDmaEventObject, &hEvent); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); -+ return; -+ } -+ -+ while (OSAtomicRead(&psConnectionData->ui32NumDmaTransfersInFlight) != 0) -+ { -+ /* -+ #define DMA_TRANSFER_TIMEOUT_US (5000000ULL) -+ -+ This currently doesn't work properly. Wait time is not as requested. -+ Using OSSleepms instead -+ -+ OSEventObjectWaitKernel(hEvent, DMA_TRANSFER_TIMEOUT_US); -+ */ -+ OSSleepms(50); -+ if (!ui32Tries) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Timeout while waiting on outstanding DMA transfers!", __func__)); -+ break; -+ } -+ -+ ui32Tries--; -+ } -+ -+ OSEventObjectClose(hEvent); -+} -+#endif -+ -+void PVRSRVCommonConnectionDisconnect(void *pvDataPtr) -+{ -+ CONNECTION_DATA *psConnectionData = pvDataPtr; -+ PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnectionData); -+ -+ OSLockAcquire(psDevNode->hConnectionsLock); -+ dllist_remove_node(&psConnectionData->sConnectionListNode); -+ OSLockRelease(psDevNode->hConnectionsLock); -+ -+ /* Notify the PDump core if the pdump control client is disconnecting */ -+ if (psConnectionData->ui32ClientFlags & SRV_FLAGS_PDUMPCTRL) -+ { -+ PDumpDisconnectionNotify(psDevNode); -+ } -+ -+ /* Add a HOST_CLIENT_INFO event to match the one on connection */ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ IMG_BOOL bHostStreamIsNull; -+ PVRSRV_RGXDEV_INFO *psRgxDevInfo; -+ -+ psRgxDevInfo = _RGX_DEVICE_INFO_FROM_NODE(psDevNode); -+ -+ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); -+ bHostStreamIsNull = (IMG_BOOL)(psRgxDevInfo->hHWPerfHostStream == NULL); -+ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); -+ -+ if (!bHostStreamIsNull) -+ { -+ if (TLStreamIsOpenForReading(psRgxDevInfo->hHWPerfHostStream)) -+ { -+ /* Announce this client connection in the host stream, if event mask is set */ -+ RGXSRV_HWPERF_HOST_CLIENT_INFO_PROCESS_NAME(psDevNode, psConnectionData->pid, psConnectionData->pszProcName); -+ } -+ } -+ } -+ -+#if defined(SUPPORT_DMA_TRANSFER) -+ OSLockAcquire(psConnectionData->hDmaReqLock); -+ -+ psConnectionData->bAcceptDmaRequests = IMG_FALSE; -+ -+ OSLockRelease(psConnectionData->hDmaReqLock); -+ -+ WaitForOutstandingDma(psConnectionData); -+ -+ OSEventObjectDestroy(psConnectionData->hDmaEventObject); -+ OSLockDestroy(psConnectionData->hDmaReqLock); -+#endif -+ -+#if defined(DEBUG) || defined(PDUMP) -+ PVR_LOG(("%s disconnected - (devID = %u)", psConnectionData->pszProcName, -+ psDevNode->sDevId.ui32InternalID)); -+#endif -+ -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+ if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK) -+#endif -+ { -+ /* Defer the release of the connection data */ -+ psConnectionData->sCleanupThreadFn.pfnFree = _CleanupThreadPurgeConnectionData; -+ psConnectionData->sCleanupThreadFn.pvData = psConnectionData; -+ /* Some resources in HANDLE_BASE may need FW idle confirmation -+ * hence setting to TRUE to use the global EO for retries which is -+ * signalled by the device MISR */ -+ psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_TRUE; -+ psConnectionData->sCleanupThreadFn.eCleanupType = PVRSRV_CLEANUP_TYPE_CONNECTION; -+ CLEANUP_THREAD_SET_RETRY_COUNT(&psConnectionData->sCleanupThreadFn, -+ CLEANUP_THREAD_RETRY_COUNT_DEFAULT); -+ PVRSRVCleanupThreadAddWork(psDevNode, &psConnectionData->sCleanupThreadFn); -+ } -+} -+ -+IMG_PID PVRSRVGetPurgeConnectionPid(void) -+{ -+ return gCurrentPurgeConnectionPid; -+} -+ -+/* Prefix for debug messages about Active Connections */ -+#define DEBUG_DUMP_CONNECTION_FORMAT_STR " P%d-V%d-T%d-%s," -+#define CONNECTIONS_PREFIX "Connections Device ID:%u(%d)" -+#define MAX_CONNECTIONS_PREFIX (29) -+#define MAX_DEBUG_DUMP_CONNECTION_STR_LEN (1+10+10+10+7+PVRSRV_CONNECTION_PROCESS_NAME_LEN) -+#define MAX_DEBUG_DUMP_STRING_LEN (1+MAX_CONNECTIONS_PREFIX+(3*MAX_DEBUG_DUMP_CONNECTION_STR_LEN)) -+ -+void PVRSRVConnectionDebugNotify(PVRSRV_DEVICE_NODE *psDevNode, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ PDLLIST_NODE pNext, pNode; -+ -+ /* We must check for an initialised device before accessing its mutex. -+ * The mutex is initialised as part of DeviceInitialize() which occurs -+ * on first access to the device node. -+ */ -+ if ((psDevNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) && -+ (psDevNode->eDevState != PVRSRV_DEVICE_STATE_FROZEN)) -+ { -+ PVR_DUMPDEBUG_LOG("Connections: No Devices: No active connections"); -+ return; -+ } -+ -+ OSLockAcquire(psDevNode->hConnectionsLock); -+ if (dllist_is_empty(&psDevNode->sConnections)) -+ { -+ PVR_DUMPDEBUG_LOG(CONNECTIONS_PREFIX " No active connections", -+ (unsigned char)psDevNode->sDevId.ui32InternalID, -+ (unsigned char)psDevNode->sDevId.i32KernelDeviceID); -+ } -+ else -+ { -+ IMG_CHAR sActiveConnections[MAX_DEBUG_DUMP_STRING_LEN]; -+ IMG_UINT16 i, uiPos = 0; -+ IMG_BOOL bPrinted = IMG_FALSE; -+ size_t uiSize = sizeof(sActiveConnections); -+ -+ IMG_CHAR szTmpConBuff[MAX_CONNECTIONS_PREFIX + 1]; -+ i = OSSNPrintf(szTmpConBuff, -+ MAX_CONNECTIONS_PREFIX, -+ CONNECTIONS_PREFIX, -+ (unsigned char)psDevNode->sDevId.ui32InternalID, -+ (unsigned char)psDevNode->sDevId.i32KernelDeviceID); -+ OSStringLCopy(sActiveConnections+uiPos, szTmpConBuff, uiSize); -+ -+ /* Move the write offset to the end of the current string */ -+ uiPos += i; -+ /* Update the amount of remaining space available to copy into */ -+ uiSize -= i; -+ -+ dllist_foreach_node(&psDevNode->sConnections, pNode, pNext) -+ { -+ CONNECTION_DATA *sData = IMG_CONTAINER_OF(pNode, CONNECTION_DATA, sConnectionListNode); -+ -+ IMG_CHAR sTmpBuff[MAX_DEBUG_DUMP_CONNECTION_STR_LEN]; -+ i = OSSNPrintf(sTmpBuff, MAX_DEBUG_DUMP_CONNECTION_STR_LEN, -+ DEBUG_DUMP_CONNECTION_FORMAT_STR, sData->pid, sData->vpid, sData->tid, sData->pszProcName); -+ i = MIN(MAX_DEBUG_DUMP_CONNECTION_STR_LEN, i); -+ bPrinted = IMG_FALSE; -+ -+ OSStringLCopy(sActiveConnections+uiPos, sTmpBuff, uiSize); -+ -+ /* Move the write offset to the end of the current string */ -+ uiPos += i; -+ /* Update the amount of remaining space available to copy into */ -+ uiSize -= i; -+ -+ /* If there is not enough space to add another connection to this line, output the line */ -+ if (uiSize <= MAX_DEBUG_DUMP_CONNECTION_STR_LEN) -+ { -+ PVR_DUMPDEBUG_LOG("%s", sActiveConnections); -+ -+ /* -+ * Remove the "Connections:" prefix from the buffer. -+ * Leave the subsequent buffer contents indented by the same -+ * amount to aid in interpreting the debug output. -+ */ -+ uiPos = sizeof(CONNECTIONS_PREFIX) - 1; -+ /* Reset the amount of space available to copy into */ -+ uiSize = MAX_DEBUG_DUMP_STRING_LEN - uiPos; -+ bPrinted = IMG_TRUE; -+ } -+ } -+ -+ /* Only print the current line if it hasn't already been printed */ -+ if (!bPrinted) -+ { -+ /* Strip off the final comma */ -+ sActiveConnections[OSStringNLength(sActiveConnections, MAX_DEBUG_DUMP_STRING_LEN) - 1] = '\0'; -+ PVR_DUMPDEBUG_LOG("%s", sActiveConnections); -+ } -+#undef MAX_DEBUG_DUMP_STRING_LEN -+#undef MAX_DEBUG_DUMP_CONNECTIONS_PER_LINE -+ } -+ OSLockRelease(psDevNode->hConnectionsLock); -+} -diff --git a/drivers/gpu/drm/img-rogue/connection_server.h b/drivers/gpu/drm/img-rogue/connection_server.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/connection_server.h -@@ -0,0 +1,145 @@ -+/*************************************************************************/ /*! -+@File -+@Title Server side connection management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description API for server side connection management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(CONNECTION_SERVER_H) -+#define CONNECTION_SERVER_H -+ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "handle.h" -+#include "pvrsrv_cleanup.h" -+ -+/* Variable used to hold in memory the timeout for the current time slice*/ -+extern IMG_UINT64 gui64TimesliceLimit; -+/* Counter number of handle data freed during the current time slice */ -+extern IMG_UINT32 gui32HandleDataFreeCounter; -+/* Set the maximum time the freeing of the resources can keep the lock */ -+#define CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS (3000 * 1000) /* 3ms */ -+ -+typedef struct _CONNECTION_DATA_ -+{ -+ PVRSRV_HANDLE_BASE *psHandleBase; -+ PROCESS_HANDLE_BASE *psProcessHandleBase; -+ struct _SYNC_CONNECTION_DATA_ *psSyncConnectionData; -+ struct _PDUMP_CONNECTION_DATA_ *psPDumpConnectionData; -+ -+ /* Holds the client flags supplied at connection time */ -+ IMG_UINT32 ui32ClientFlags; -+ -+ /* -+ * OS specific data can be stored via this handle. -+ * See osconnection_server.h for a generic mechanism -+ * for initialising this field. -+ */ -+ IMG_HANDLE hOsPrivateData; -+ -+#define PVRSRV_CONNECTION_PROCESS_NAME_LEN (16) -+ IMG_PID pid; -+ IMG_PID vpid; -+ IMG_UINT32 tid; -+ IMG_CHAR pszProcName[PVRSRV_CONNECTION_PROCESS_NAME_LEN]; -+ -+ IMG_HANDLE hProcessStats; -+ -+ IMG_HANDLE hClientTLStream; -+ -+#if defined(SUPPORT_CUSTOM_OSID_EMISSION) -+ /* -+ * Connection-based values per application which can be modified by the -+ * AppHint settings 'OSid, OSidReg, bOSidAxiProtReg' for each application. -+ * These control where the connection's memory allocation is sourced from. -+ * ui32OSid, ui32OSidReg range from 0..(GPUVIRT_VALIDATION_NUM_OS - 1). -+ */ -+ IMG_UINT32 ui32OSid; -+ IMG_UINT32 ui32OSidReg; -+ IMG_BOOL bOSidAxiProtReg; -+#endif /* defined(SUPPORT_CUSTOM_OSID_EMISSION) */ -+ -+#if defined(SUPPORT_DMA_TRANSFER) -+ IMG_BOOL bAcceptDmaRequests; -+ ATOMIC_T ui32NumDmaTransfersInFlight; -+ POS_LOCK hDmaReqLock; -+ IMG_HANDLE hDmaEventObject; -+#endif -+ /* Structure which is hooked into the cleanup thread work list */ -+ PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn; -+ -+ DLLIST_NODE sConnectionListNode; -+ -+ /* List navigation for deferred freeing of connection data */ -+ struct _CONNECTION_DATA_ **ppsThis; -+ struct _CONNECTION_DATA_ *psNext; -+ IMG_BOOL bSyncConnection; -+} CONNECTION_DATA; -+ -+#include "osconnection_server.h" -+ -+PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData); -+void PVRSRVCommonConnectionDisconnect(void *pvPrivData); -+ -+/**************************************************************************/ /*! -+@Function PVRSRVGetPurgeConnectionPid -+ -+@Description Returns PID associated with Connection currently being purged by -+ Cleanup Thread. If no Connection is purged 0 is returned. -+ -+@Return PID associated with currently purged connection or 0 if no -+ connection is being purged -+*/ /***************************************************************************/ -+IMG_PID PVRSRVGetPurgeConnectionPid(void); -+ -+void PVRSRVConnectionDebugNotify(PVRSRV_DEVICE_NODE *psDevNode, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PVRSRVConnectionPrivateData) -+#endif -+static INLINE -+IMG_HANDLE PVRSRVConnectionPrivateData(CONNECTION_DATA *psConnection) -+{ -+ return (psConnection != NULL) ? psConnection->hOsPrivateData : NULL; -+} -+ -+#endif /* !defined(CONNECTION_SERVER_H) */ -diff --git a/drivers/gpu/drm/img-rogue/cores/rgxcore_km_36.29.52.182.h b/drivers/gpu/drm/img-rogue/cores/rgxcore_km_36.29.52.182.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/cores/rgxcore_km_36.29.52.182.h -@@ -0,0 +1,75 @@ -+/*************************************************************************/ /*! -+@Title RGX Core BVNC 36.29.52.182 -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXCORE_KM_36_29_52_182_H -+#define RGXCORE_KM_36_29_52_182_H -+ -+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */ -+/* CS: @5908879 */ -+ -+/****************************************************************************** -+ * BVNC = 36.29.52.182 -+ *****************************************************************************/ -+#define RGX_BVNC_KM_B 36 -+#define RGX_BVNC_KM_V 29 -+#define RGX_BVNC_KM_N 52 -+#define RGX_BVNC_KM_C 182 -+ -+/****************************************************************************** -+ * Errata -+ *****************************************************************************/ -+ -+#define FIX_HW_BRN_63553 -+#define FIX_HW_BRN_71317 -+ -+ -+ -+/****************************************************************************** -+ * Enhancements -+ *****************************************************************************/ -+#define HW_ERN_42290 -+#define HW_ERN_42606 -+#define HW_ERN_47025 -+#define HW_ERN_57596 -+ -+ -+ -+#endif /* RGXCORE_KM_36_29_52_182_H */ -diff --git a/drivers/gpu/drm/img-rogue/debug_common.c b/drivers/gpu/drm/img-rogue/debug_common.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/debug_common.c -@@ -0,0 +1,2172 @@ -+/*************************************************************************/ /*! -+@File -+@Title Debug Functionality -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Creates common debug info entries. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(__linux__) -+#include -+#endif /* #if !defined(__linux__) */ -+ -+#include "debug_common.h" -+#include "pvrsrv.h" -+#include "di_server.h" -+#include "lists.h" -+#include "pvrversion.h" -+#include "rgx_options.h" -+#include "allocmem.h" -+#include "rgxfwutils.h" -+#include "rgxfwriscv.h" -+#include "osfunc.h" -+#if defined(SUPPORT_RGX) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+#include "rgxfwdbg.h" -+#endif -+ -+#ifdef SUPPORT_RGX -+#include "rgxdevice.h" -+#include "rgxdebug_common.h" -+#include "rgxinit.h" -+#include "rgxmmudefs_km.h" -+#endif -+ -+static DI_ENTRY *gpsVersionDIEntry; -+static DI_ENTRY *gpsStatusDIEntry; -+ -+#ifdef SUPPORT_VALIDATION -+static DI_ENTRY *gpsTestMemLeakDIEntry; -+#endif /* SUPPORT_VALIDATION */ -+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) -+static DI_ENTRY *gpsDebugLevelDIEntry; -+#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ -+ -+#if defined(SUPPORT_RGX) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+struct DI_VZ_DATA { -+ PVRSRV_DEVICE_NODE *psDevNode; -+ IMG_UINT32 ui32DriverID; -+}; -+#endif -+ -+static void _DumpDebugDIPrintfWrapper(void *pvDumpDebugFile, const IMG_CHAR *pszFormat, ...) -+{ -+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; -+ va_list ArgList; -+ -+ OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, "%s\n", pszFormat); -+ -+ va_start(ArgList, pszFormat); -+ DIVPrintf(pvDumpDebugFile, szBuffer, ArgList); -+ va_end(ArgList); -+} -+ -+/*************************************************************************/ /*! -+ Version DebugFS entry -+*/ /**************************************************************************/ -+ -+static void *_DebugVersionCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, -+ va_list va) -+{ -+ IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); -+ IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); -+ IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; -+ -+ (*pui64CurrentPosition)++; -+ -+ return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; -+} -+ -+static void *_VersionDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) -+{ -+ PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); -+ IMG_UINT64 uiCurrentPosition = 1; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ PVR_UNREFERENCED_PARAMETER(psEntry); -+ -+ if (psPVRSRVData == NULL) { -+ PVR_DPF((PVR_DBG_ERROR, "psPVRSRVData = NULL")); -+ return NULL; -+ } -+ -+ if (*pui64Pos == 0) -+ { -+ return DI_START_TOKEN; -+ } -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, -+ _DebugVersionCompare_AnyVaCb, -+ &uiCurrentPosition, -+ *pui64Pos); -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ -+ return psDeviceNode; -+} -+ -+static void _VersionDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvPriv) -+{ -+ PVR_UNREFERENCED_PARAMETER(psEntry); -+ PVR_UNREFERENCED_PARAMETER(pvPriv); -+} -+ -+static void *_VersionDINext(OSDI_IMPL_ENTRY *psEntry,void *pvPriv, -+ IMG_UINT64 *pui64Pos) -+{ -+ PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); -+ IMG_UINT64 uiCurrentPosition = 1; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ (*pui64Pos)++; -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, -+ _DebugVersionCompare_AnyVaCb, -+ &uiCurrentPosition, -+ *pui64Pos); -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ -+ return psDeviceNode; -+} -+ -+#define DI_PRINT_VERSION_FMTSPEC \ -+ "%s Version: %u.%u @ %u (%s) build options: 0x%08x %s\n" -+#define STR_DEBUG "debug" -+#define STR_RELEASE "release" -+ -+#if defined(DEBUG) || defined(SUPPORT_VALIDATION) -+#define BUILD_OPT_LEN 80 -+ -+static inline void _AppendOptionStr(IMG_CHAR pszBuildOptions[], const IMG_CHAR* str, OSDI_IMPL_ENTRY *psEntry, IMG_UINT32* pui32BuildOptionLen) -+{ -+ IMG_UINT32 ui32BuildOptionLen = *pui32BuildOptionLen; -+ const IMG_UINT32 strLen = OSStringLength(str); -+ const IMG_UINT32 optStrLen = sizeof(IMG_CHAR) * (BUILD_OPT_LEN-1); -+ -+ if ((ui32BuildOptionLen + strLen) > optStrLen) -+ { -+ pszBuildOptions[ui32BuildOptionLen] = '\0'; -+ DIPrintf(psEntry, "%s\n", pszBuildOptions); -+ ui32BuildOptionLen = 0; -+ } -+ if (strLen < optStrLen) -+ { -+ OSStringLCopy(pszBuildOptions+ui32BuildOptionLen, str, strLen); -+ ui32BuildOptionLen += strLen - 1; -+ } -+ *pui32BuildOptionLen = ui32BuildOptionLen; -+} -+#endif /* DEBUG || SUPPORT_VALIDATION */ -+ -+static int _VersionDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvPriv) -+{ -+ PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); -+ -+ if (pvPriv == DI_START_TOKEN) -+ { -+ if (psPVRSRVData->sDriverInfo.bIsNoMatch) -+ { -+ const BUILD_INFO *psBuildInfo; -+ -+ psBuildInfo = &psPVRSRVData->sDriverInfo.sUMBuildInfo; -+ DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, -+ "UM Driver", -+ PVRVERSION_UNPACK_MAJ(psBuildInfo->ui32BuildVersion), -+ PVRVERSION_UNPACK_MIN(psBuildInfo->ui32BuildVersion), -+ psBuildInfo->ui32BuildRevision, -+ (psBuildInfo->ui32BuildType == BUILD_TYPE_DEBUG) ? -+ STR_DEBUG : STR_RELEASE, -+ psBuildInfo->ui32BuildOptions, -+ PVR_BUILD_DIR); -+ -+ psBuildInfo = &psPVRSRVData->sDriverInfo.sKMBuildInfo; -+ DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, -+ "KM Driver (" PVR_ARCH_NAME ")", -+ PVRVERSION_UNPACK_MAJ(psBuildInfo->ui32BuildVersion), -+ PVRVERSION_UNPACK_MIN(psBuildInfo->ui32BuildVersion), -+ psBuildInfo->ui32BuildRevision, -+ (psBuildInfo->ui32BuildType == BUILD_TYPE_DEBUG) ? -+ STR_DEBUG : STR_RELEASE, -+ psBuildInfo->ui32BuildOptions, -+ PVR_BUILD_DIR); -+ } -+ else -+ { -+ /* bIsNoMatch is `false` in one of the following cases: -+ * - UM & KM version parameters actually match. -+ * - A comparison between UM & KM has not been made yet, because no -+ * client ever connected. -+ * -+ * In both cases, available (KM) version info is the best output we -+ * can provide. -+ */ -+ DIPrintf(psEntry, "Driver Version: %s (%s) (%s) build options: " -+ "0x%08lx %s\n", PVRVERSION_STRING, PVR_ARCH_NAME, -+ PVR_BUILD_TYPE, RGX_BUILD_OPTIONS_KM, PVR_BUILD_DIR); -+ } -+ } -+ else if (pvPriv != NULL) -+ { -+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) pvPriv; -+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; -+#ifdef SUPPORT_RGX -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; -+#if defined(DEBUG) || defined(SUPPORT_VALIDATION) -+ IMG_CHAR pszBuildOptions[BUILD_OPT_LEN]; -+ IMG_UINT32 ui32BuildOptionLen = 0; -+ static const char* aszOptions[] = RGX_BUILD_OPTIONS_LIST; -+ int i = 0; -+#endif -+#endif /* SUPPORT_RGX */ -+ IMG_BOOL bFwVersionInfoPrinted = IMG_FALSE; -+ -+ DIPrintf(psEntry, "\nDevice Name: %s\n", psDevConfig->pszName); -+ DIPrintf(psEntry, "Device ID: %u:%d\n", psDevNode->sDevId.ui32InternalID, -+ psDevNode->sDevId.i32KernelDeviceID); -+ -+ if (psDevConfig->pszVersion) -+ { -+ DIPrintf(psEntry, "Device Version: %s\n", -+ psDevConfig->pszVersion); -+ } -+ -+ if (psDevNode->pfnDeviceVersionString) -+ { -+ IMG_CHAR *pszVerStr; -+ -+ if (psDevNode->pfnDeviceVersionString(psDevNode, -+ &pszVerStr) == PVRSRV_OK) -+ { -+ DIPrintf(psEntry, "%s\n", pszVerStr); -+ -+ OSFreeMem(pszVerStr); -+ } -+ } -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+#ifdef SUPPORT_RGX -+ /* print device's firmware version info */ -+ if (psDevInfo->psRGXFWIfOsInitMemDesc != NULL) -+ { -+ /* psDevInfo->psRGXFWIfOsInitMemDesc should be permanently mapped */ -+ if (psDevInfo->psRGXFWIfOsInit != NULL) -+ { -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks, -+ INVALIDATE); -+ if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) -+ { -+ const RGXFWIF_COMPCHECKS *psRGXCompChecks = -+ &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks; -+ IMG_UINT32 ui32DDKVer = psRGXCompChecks->ui32DDKVersion; -+ -+ DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, -+ "Firmware", -+ PVRVERSION_UNPACK_MAJ(ui32DDKVer), -+ PVRVERSION_UNPACK_MIN(ui32DDKVer), -+ psRGXCompChecks->ui32DDKBuild, -+ ((psRGXCompChecks->ui32BuildOptions & -+ OPTIONS_DEBUG_EN) ? STR_DEBUG : STR_RELEASE), -+ psRGXCompChecks->ui32BuildOptions, -+ PVR_BUILD_DIR); -+ bFwVersionInfoPrinted = IMG_TRUE; -+ -+#if defined(DEBUG) || defined(SUPPORT_VALIDATION) -+ DIPrintf(psEntry, "Firmware Build Options:\n"); -+ -+ for (i = 0; i < ARRAY_SIZE(aszOptions); i++) -+ { -+ if ((psRGXCompChecks->ui32BuildOptions & 1<sFWInfoHeader; -+ DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, -+ "Firmware", -+ psFWInfoHeader->ui16PVRVersionMajor, -+ psFWInfoHeader->ui16PVRVersionMinor, -+ psFWInfoHeader->ui32PVRVersionBuild, -+ ((psFWInfoHeader->ui32Flags & -+ OPTIONS_DEBUG_EN) ? STR_DEBUG : STR_RELEASE), -+ psFWInfoHeader->ui32Flags, -+ PVR_BUILD_DIR); -+ -+ bFwVersionInfoPrinted = IMG_TRUE; -+#if defined(DEBUG) || defined(SUPPORT_VALIDATION) -+ DIPrintf(psEntry, "Firmware Build Options:\n"); -+ -+ for (i = 0; i < ARRAY_SIZE(aszOptions); i++) -+ { -+ if ((psFWInfoHeader->ui32Flags & 1<pvDevice, -+ RGXFWIF_DM_GP, -+ &sCounterDumpCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ pui32kCCBCommandSlot); -+ PVR_LOG_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); -+ -+ return eError; -+} -+ -+static int _DebugPowerDataDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ int eError = 0; -+ -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Device not initialised when " -+ "power counter data was requested!")); -+ return -EIO; -+ } -+ -+ OSLockAcquire(psDevInfo->hCounterDumpingLock); -+ -+ eError = SendPowerCounterCommand(psDeviceNode, -+ RGXFWIF_PWR_COUNTER_DUMP_SAMPLE, -+ &ui32kCCBCommandSlot); -+ -+ if (eError != PVRSRV_OK) -+ { -+ OSLockRelease(psDevInfo->hCounterDumpingLock); -+ return -EIO; -+ } -+ -+ /* Wait for FW complete completion */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, -+ ui32kCCBCommandSlot, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); -+ OSLockRelease(psDevInfo->hCounterDumpingLock); -+ return -EIO; -+ } -+ -+ /* Read back the buffer */ -+ { -+ IMG_UINT32* pui32PowerBuffer; -+ IMG_UINT32 ui32NumOfRegs, ui32SamplePeriod, ui32NumOfCores; -+ IMG_UINT32 i, j; -+ -+ if (!psDevInfo->psCounterBufferMemDesc) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Counter buffer not allocated!")); -+ return -EINVAL; -+ } -+ -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCounterBufferMemDesc, -+ (void**)&pui32PowerBuffer); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "DevmemAcquireCpuVirtAddr"); -+ OSLockRelease(psDevInfo->hCounterDumpingLock); -+ return -EIO; -+ } -+ -+ RGXFwSharedMemCacheOpExec(pui32PowerBuffer, PAGE_SIZE, PVRSRV_CACHE_OP_INVALIDATE); -+ -+ ui32NumOfRegs = *pui32PowerBuffer++; -+ ui32SamplePeriod = *pui32PowerBuffer++; -+ ui32NumOfCores = *pui32PowerBuffer++; -+ PVR_DPF((PVR_DBG_MESSAGE, "Number of power counters: %u.", ui32NumOfRegs)); -+ -+ if (ui32NumOfCores == 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "No GPU cores enabled!")); -+ eError = -EINVAL; -+ } -+ -+ if (ui32NumOfRegs && ui32NumOfCores) -+ { -+ DIPrintf(psEntry, "Power counter data for device\n"); -+ DIPrintf(psEntry, "Sample period: 0x%08x\n", ui32SamplePeriod); -+ -+ for (i = 0; i < ui32NumOfRegs; i++) -+ { -+ IMG_UINT32 ui32High, ui32Low; -+ IMG_UINT32 ui32RegOffset = *pui32PowerBuffer++; -+ IMG_UINT32 ui32NumOfInstances = *pui32PowerBuffer++; -+ -+ PVR_ASSERT(ui32NumOfInstances); -+ -+ DIPrintf(psEntry, "0x%08x:", ui32RegOffset); -+ -+ for (j = 0; j < ui32NumOfInstances * ui32NumOfCores; j++) -+ { -+ ui32Low = *pui32PowerBuffer++; -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CATURIX_XTP_TOP_INFRASTRUCTURE)) -+ { -+ /* Power counters have 32-bit range */ -+ DIPrintf(psEntry, " 0x%08x", ui32Low); -+ } -+ else -+ { -+ /* Power counters have 64-bit range */ -+ ui32High = *pui32PowerBuffer++; -+ -+ DIPrintf(psEntry, " 0x%016" IMG_UINT64_FMTSPECx, -+ (IMG_UINT64) ui32Low | (IMG_UINT64) ui32High << 32); -+ } -+ } -+ -+ DIPrintf(psEntry, "\n"); -+ } -+ } -+ -+ DevmemReleaseCpuVirtAddr(psDevInfo->psCounterBufferMemDesc); -+ } -+ -+ OSLockRelease(psDevInfo->hCounterDumpingLock); -+ -+ return eError; -+} -+ -+static IMG_INT64 PowerDataSet(const IMG_CHAR __user *pcBuffer, -+ IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, -+ void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_COUNTER_DUMP_REQUEST eRequest; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ -+ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); -+ PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); -+ PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL); -+ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); -+ -+ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Device not initialised when " -+ "power counter data was requested!")); -+ return -EIO; -+ } -+ -+ if (pcBuffer[0] == '1') -+ { -+ eRequest = RGXFWIF_PWR_COUNTER_DUMP_START; -+ } -+ else if (pcBuffer[0] == '0') -+ { -+ eRequest = RGXFWIF_PWR_COUNTER_DUMP_STOP; -+ } -+ else -+ { -+ return -EINVAL; -+ } -+ -+ OSLockAcquire(psDevInfo->hCounterDumpingLock); -+ -+ SendPowerCounterCommand(psDeviceNode, -+ eRequest, -+ &ui32kCCBCommandSlot); -+ -+ OSLockRelease(psDevInfo->hCounterDumpingLock); -+ -+ *pui64Pos += ui64Count; -+ return ui64Count; -+} -+ -+#endif /* defined(SUPPORT_RGX) && defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) */ -+ -+/*************************************************************************/ /*! -+ Status DebugFS entry -+*/ /**************************************************************************/ -+ -+static void *_DebugStatusCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, -+ va_list va) -+{ -+ IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); -+ IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); -+ IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; -+ -+ (*pui64CurrentPosition)++; -+ -+ return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; -+} -+ -+static void *_DebugStatusDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) -+{ -+ PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); -+ IMG_UINT64 uiCurrentPosition = 1; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ if (*pui64Pos == 0) -+ { -+ return DI_START_TOKEN; -+ } -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, -+ _DebugStatusCompare_AnyVaCb, -+ &uiCurrentPosition, -+ *pui64Pos); -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ -+ return psDeviceNode; -+} -+ -+static void _DebugStatusDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVR_UNREFERENCED_PARAMETER(psEntry); -+ PVR_UNREFERENCED_PARAMETER(pvData); -+} -+ -+static void *_DebugStatusDINext(OSDI_IMPL_ENTRY *psEntry, -+ void *pvData, -+ IMG_UINT64 *pui64Pos) -+{ -+ PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); -+ IMG_UINT64 uiCurrentPosition = 1; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ (*pui64Pos)++; -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, -+ _DebugStatusCompare_AnyVaCb, -+ &uiCurrentPosition, -+ *pui64Pos); -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ -+ return psDeviceNode; -+} -+ -+static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ if (pvData == DI_START_TOKEN) -+ { -+ PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); -+ -+ if (psPVRSRVData != NULL) -+ { -+ switch (psPVRSRVData->eServicesState) -+ { -+ case PVRSRV_SERVICES_STATE_OK: -+ DIPrintf(psEntry, "Driver Status: OK\n"); -+ break; -+ case PVRSRV_SERVICES_STATE_BAD: -+ DIPrintf(psEntry, "Driver Status: BAD\n"); -+ break; -+ case PVRSRV_SERVICES_STATE_UNDEFINED: -+ DIPrintf(psEntry, "Driver Status: UNDEFINED\n"); -+ break; -+ default: -+ DIPrintf(psEntry, "Driver Status: UNKNOWN (%d)\n", -+ psPVRSRVData->eServicesState); -+ break; -+ } -+ } -+ } -+ else if (pvData != NULL) -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; -+ IMG_CHAR *pszStatus = ""; -+ IMG_CHAR *pszReason = ""; -+ PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus; -+ PVRSRV_DEVICE_HEALTH_REASON eHealthReason; -+ -+ DIPrintf(psEntry, "\nDevice ID: %u:%d\n", psDeviceNode->sDevId.ui32InternalID, -+ psDeviceNode->sDevId.i32KernelDeviceID); -+ -+ /* Update the health status now if possible... */ -+ if (psDeviceNode->pfnUpdateHealthStatus) -+ { -+ psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_FALSE); -+ } -+ eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus); -+ eHealthReason = OSAtomicRead(&psDeviceNode->eHealthReason); -+ -+ switch (eHealthStatus) -+ { -+ case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszStatus = "OK"; break; -+ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszStatus = "NOT RESPONDING"; break; -+ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszStatus = "DEAD"; break; -+ case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszStatus = "FAULT"; break; -+ case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszStatus = "UNDEFINED"; break; -+ default: pszStatus = "UNKNOWN"; break; -+ } -+ -+ switch (eHealthReason) -+ { -+ case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " (Asserted)"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " (Poll failing)"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " (Global Event Object timeouts rising)"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " (KCCB offset invalid)"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " (KCCB stalled)"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " (Idling)"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " (Restarting)"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: pszReason = " (Missing interrupts)"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_PCI_ERROR: pszReason = " (PCI error)"; break; -+ default: pszReason = " (Unknown reason)"; break; -+ } -+ -+ DIPrintf(psEntry, "Firmware Status: %s%s\n", pszStatus, pszReason); -+ if (PVRSRV_ERROR_LIMIT_REACHED) -+ { -+ DIPrintf(psEntry, "Server Errors: %d+\n", IMG_UINT32_MAX); -+ } -+ else -+ { -+ DIPrintf(psEntry, "Server Errors: %d\n", PVRSRV_KM_ERRORS); -+ } -+ -+ -+ /* Write other useful stats to aid the test cycle... */ -+ if (psDeviceNode->pvDevice != NULL) -+ { -+#ifdef SUPPORT_RGX -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ const RGXFWIF_HWRINFOBUF *psHWRInfoBuf; -+ const RGXFWIF_SYSDATA *psFwSysData; -+ -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfHWRInfoBufCtl, INVALIDATE); -+ psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; -+ -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); -+ psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ -+#ifdef PVRSRV_DEBUG_LISR_EXECUTION -+ /* Show the detected #LISR, #MISR scheduled calls */ -+ DIPrintf(psEntry, "RGX #LISR: %" IMG_UINT64_FMTSPEC "\n", psDeviceNode->ui64nLISR); -+ DIPrintf(psEntry, "RGX #MISR: %" IMG_UINT64_FMTSPEC "\n", psDeviceNode->ui64nMISR); -+#endif /* PVRSRV_DEBUG_LISR_EXECUTION */ -+ -+ /* Calculate the number of HWR events in total across all the DMs... */ -+ if (psHWRInfoBuf != NULL) -+ { -+ IMG_UINT32 ui32HWREventCount = 0; -+ IMG_UINT32 ui32CRREventCount = 0; -+ IMG_UINT32 ui32DMIndex; -+ -+ for (ui32DMIndex = 0; ui32DMIndex < RGXFWIF_DM_MAX; ui32DMIndex++) -+ { -+ ui32HWREventCount += psHWRInfoBuf->aui32HwrDmLockedUpCount[ui32DMIndex]; -+ ui32CRREventCount += psHWRInfoBuf->aui32HwrDmOverranCount[ui32DMIndex]; -+ } -+ -+ DIPrintf(psEntry, "HWR Event Count: %d\n", ui32HWREventCount); -+ DIPrintf(psEntry, "CRR Event Count: %d\n", ui32CRREventCount); -+#ifdef PVRSRV_STALLED_CCB_ACTION -+ /* Write the number of Sync Lockup Recovery (SLR) events... */ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested, -+ INVALIDATE); -+ DIPrintf(psEntry, "SLR Event Count: %d\n", psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested); -+#endif /* PVRSRV_STALLED_CCB_ACTION */ -+ } -+ -+ /* Show error counts */ -+ DIPrintf(psEntry, "WGP Error Count: %d\n", psDevInfo->sErrorCounts.ui32WGPErrorCount); -+ DIPrintf(psEntry, "TRP Error Count: %d\n", psDevInfo->sErrorCounts.ui32TRPErrorCount); -+ -+ /* -+ * Guest drivers do not support the following functionality: -+ * - Perform actual on-chip fw tracing. -+ * - Collect actual on-chip GPU utilization stats. -+ * - Perform actual on-chip GPU power/dvfs management. -+ * - As a result no more information can be provided. -+ */ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ if (psFwSysData != NULL) -+ { -+ DIPrintf(psEntry, "FWF Event Count: %d\n", psFwSysData->ui32FWFaults); -+ } -+ -+ /* Write the number of APM events... */ -+ DIPrintf(psEntry, "APM Event Count: %d\n", psDevInfo->ui32ActivePMReqTotal); -+ -+ /* Write the current GPU Utilisation values... */ -+ if (psDevInfo->pfnGetGpuUtilStats && -+ eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK) -+ { -+ PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; -+ RGXFWIF_GPU_UTIL_STATS *psGpuUtilStats = OSAllocMem(sizeof(*psGpuUtilStats)); -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (psGpuUtilStats == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate GPU stats memory", __func__)); -+ goto return_; -+ } -+ -+ eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode, -+ psDebugInfo->hGpuUtilUserDebugFS, -+ psGpuUtilStats); -+ -+ if ((eError == PVRSRV_OK) && -+ ((IMG_UINT32)psGpuUtilStats->ui64GpuStatCumulative)) -+ { -+ const IMG_CHAR *apszDmNames[RGXFWIF_DM_MAX] = {"GP", "TDM", "GEOM", "3D", "CDM", "RAY", "GEOM2", "GEOM3", "GEOM4"}; -+ IMG_UINT64 util; -+ IMG_UINT32 rem; -+ IMG_UINT32 ui32DriverID; -+ RGXFWIF_DM eDM; -+ -+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))) -+ { -+ apszDmNames[RGXFWIF_DM_TDM] = "2D"; -+ } -+ -+ util = 100 * psGpuUtilStats->ui64GpuStatActive; -+ util = OSDivide64(util, (IMG_UINT32)psGpuUtilStats->ui64GpuStatCumulative, &rem); -+ -+ DIPrintf(psEntry, "GPU Utilisation: %u%%\n", (IMG_UINT32)util); -+ -+ DIPrintf(psEntry, " "); -+ -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ DIPrintf(psEntry, " VM%u", ui32DriverID); -+ } -+ -+ DIPrintf(psEntry, "\n"); -+ -+ for (eDM = RGXFWIF_DM_TDM; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++) -+ { -+ DIPrintf(psEntry, "%-5s Utilisation: ", apszDmNames[eDM]); -+ -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ IMG_UINT32 uiDivisor = (IMG_UINT32)psGpuUtilStats->aaui64DMOSStatCumulative[eDM][ui32DriverID]; -+ -+ if (uiDivisor == 0U) -+ { -+ DIPrintf(psEntry, " - "); -+ continue; -+ } -+ -+ util = 100 * psGpuUtilStats->aaui64DMOSStatActive[eDM][ui32DriverID]; -+ util = OSDivide64(util, uiDivisor, &rem); -+ -+ DIPrintf(psEntry, "%3u%% ", (IMG_UINT32)util); -+ } -+ -+ -+ DIPrintf(psEntry, "\n"); -+ } -+ } -+ else -+ { -+ DIPrintf(psEntry, "GPU Utilisation: -\n"); -+ } -+ -+ OSFreeMem(psGpuUtilStats); -+ } -+ } -+#endif /* SUPPORT_RGX */ -+ } -+ } -+ -+#ifdef SUPPORT_RGX -+return_: -+#endif -+ return 0; -+} -+ -+#if defined(DEBUG) -+static IMG_INT64 DebugStatusSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); -+ PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); -+ PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL); -+ PVR_RETURN_IF_FALSE(pcBuffer[0] == 'k' || pcBuffer[0] == 'K', -EINVAL); -+ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); -+ -+ psPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_BAD; -+ -+ *pui64Pos += ui64Count; -+ return ui64Count; -+} -+#endif -+ -+const IMG_CHAR *PVRSRVGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState) -+{ -+ static const char *const _pszDeviceStateStrings[] = { -+ #define X(_name) #_name, -+ PVRSRV_DEVICE_STATE_LIST -+ #undef X -+ }; -+ -+ if (eDevState < 0 || eDevState > PVRSRV_DEVICE_STATE_LAST) -+ { -+ return "Undefined"; -+ } -+ -+ return _pszDeviceStateStrings[eDevState]; -+} -+ -+/*************************************************************************/ /*! -+ Dump Debug DebugFS entry -+*/ /**************************************************************************/ -+ -+static int _DebugDumpDebugDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); -+ -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ if (psDeviceNode->pvDevice != NULL) -+ { -+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, -+ _DumpDebugDIPrintfWrapper, psEntry); -+ } -+ -+ return 0; -+} -+ -+#ifdef SUPPORT_RGX -+ -+/*************************************************************************/ /*! -+ Firmware Trace DebugFS entry -+*/ /**************************************************************************/ -+ -+static int _DebugFWTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ if (psDevInfo != NULL) -+ { -+ RGXDumpFirmwareTrace(_DumpDebugDIPrintfWrapper, psEntry, psDevInfo); -+ } -+ -+ return 0; -+} -+ -+/*************************************************************************/ /*! -+ Firmware Translated Page Tables DebugFS entry -+*/ /**************************************************************************/ -+ -+static int _FirmwareMappingsDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ IMG_UINT32 ui32FwVA; -+ IMG_UINT32 ui32FwPageSize; -+ IMG_UINT32 ui32DriverID; -+ -+ psDeviceNode = DIGetPrivData(psEntry); -+ -+ if ((psDeviceNode == NULL) || -+ (psDeviceNode->pvDevice == NULL) || -+ (((PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice)->psKernelMMUCtx == NULL)) -+ { -+ /* The Kernel MMU context containing the Firmware mappings is not initialised */ -+ return 0; -+ } -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ -+ DIPrintf(psEntry, "+-----------------+------------------------+------------------------+--------------+\n" -+ "| Firmware | CPU | Device | PTE |\n" -+ "| Virtual Address | Physical Address | Physical Address | Flags |\n" -+ "+-----------------+------------------------+------------------------+ +\n"); -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ DIPrintf(psEntry, "| RI/XI = Read / Execution Inhibit |\n" -+ "| C = Cache Coherent |\n" -+ "| D = Dirty Page Table Entry |\n" -+ "| V = Valid Page Table Entry |\n" -+ "| G = Global Page Table Entry |\n" -+ "+-----------------+------------------------+------------------------+--------------+\n"); -+ -+ /* MIPS uses the same page size as the OS */ -+ ui32FwPageSize = OSGetPageSize(); -+ } -+ else -+#endif -+ { -+ DIPrintf(psEntry, "| P = Pending Page Table Entry |\n" -+ "| PM = Parameter Manager Source |\n" -+ "| B = Bypass SLC |\n" -+ "| C = Cache Coherent |\n" -+ "| RW/RO = Device Access Rights |\n" -+ "| V = Valid Page Table Entry |\n" -+ "+-----------------+------------------------+------------------------+--------------+\n"); -+ -+ ui32FwPageSize = BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT); -+ } -+ -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) ((RGX_FIRMWARE_RAW_HEAP_BASE + -+ (ui32DriverID * RGX_FIRMWARE_RAW_HEAP_SIZE)) & UINT_MAX); -+ IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_SIZE & UINT_MAX); -+ -+ DIPrintf(psEntry, "| OS ID %u |\n" -+ "+-----------------+------------------------+------------------------+--------------+\n", ui32DriverID); -+ -+ for (ui32FwVA = ui32FwHeapBase; -+ ui32FwVA < ui32FwHeapEnd; -+ ui32FwVA += ui32FwPageSize) -+ { -+ PVRSRV_ERROR eError; -+ IMG_UINT64 ui64PTE = 0U; -+ IMG_CPU_PHYADDR sCpuPA = {0U}; -+ IMG_DEV_PHYADDR sDevPA = {0U}; -+ -+ eError = RGXGetFwMapping(psDevInfo, ui32FwVA, &sCpuPA, &sDevPA, &ui64PTE); -+ -+ if (eError == PVRSRV_OK) -+ { -+ RGXDocumentFwMapping(psDevInfo, _DumpDebugDIPrintfWrapper, psEntry, -+ ui32FwVA, sCpuPA, sDevPA, ui64PTE); -+ } -+ else if (eError != PVRSRV_ERROR_DEVICEMEM_NO_MAPPING) -+ { -+ PVR_LOG_ERROR(eError, "RGXGetFwMapping"); -+ return -EIO; -+ } -+ } -+ -+ DIPrintf(psEntry, "+-----------------+------------------------+------------------------+--------------+\n"); -+ -+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ if (PVRSRV_VZ_MODE_IS(NATIVE)) -+ { -+ break; -+ } -+#endif -+ } -+ -+ return 0; -+} -+ -+#ifdef SUPPORT_FIRMWARE_GCOV -+ -+static void *_FirmwareGcovDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ if (psDevInfo != NULL) -+ { -+ if (psDevInfo->psFirmwareGcovBufferMemDesc != NULL) -+ { -+ void *pvCpuVirtAddr; -+ DevmemAcquireCpuVirtAddr(psDevInfo->psFirmwareGcovBufferMemDesc, &pvCpuVirtAddr); -+ return *pui64Pos ? NULL : pvCpuVirtAddr; -+ } -+ } -+ -+ return NULL; -+} -+ -+static void _FirmwareGcovDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ if (psDevInfo != NULL) -+ { -+ if (psDevInfo->psFirmwareGcovBufferMemDesc != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psFirmwareGcovBufferMemDesc); -+ } -+ } -+} -+ -+static void *_FirmwareGcovDINext(OSDI_IMPL_ENTRY *psEntry, -+ void *pvData, -+ IMG_UINT64 *pui64Pos) -+{ -+ PVR_UNREFERENCED_PARAMETER(psEntry); -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ PVR_UNREFERENCED_PARAMETER(pui64Pos); -+ return NULL; -+} -+ -+static int _FirmwareGcovDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ if (psDevInfo != NULL) -+ { -+ DIWrite(psEntry, pvData, psDevInfo->ui32FirmwareGcovSize); -+ } -+ return 0; -+} -+ -+#endif /* SUPPORT_FIRMWARE_GCOV */ -+ -+#ifdef SUPPORT_VALIDATION -+ -+#ifndef SYS_RGX_DEV_UNMAPPED_FW_REG -+#define SYS_RGX_DEV_UNMAPPED_FW_REG 0XFFFFFFFF -+#endif -+#define DI_RGXREGS_TIMEOUT_MS 1000 -+ -+/*************************************************************************/ /*! -+ RGX Registers Dump DebugFS entry -+*/ /**************************************************************************/ -+ -+static IMG_INT64 _RgxRegsSeek(IMG_UINT64 ui64Offset, void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -1); -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ -+ PVR_LOG_RETURN_IF_FALSE(ui64Offset <= (psDevInfo->ui32RegSize - 4), -+ "register offset is too big", -1); -+ -+ return ui64Offset; -+} -+ -+static IMG_INT64 _RgxRegsRead(IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT64 ui64RegVal = 0; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ IMG_UINT64 ui64CompRes; -+ -+ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -ENXIO); -+ PVR_LOG_RETURN_IF_FALSE(ui64Count == 4 || ui64Count == 8, -+ "wrong RGX register size", -EIO); -+ PVR_LOG_RETURN_IF_FALSE(!(*pui64Pos & (ui64Count - 1)), -+ "register read offset isn't aligned", -EINVAL); -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ -+ if (*pui64Pos >= SYS_RGX_DEV_UNMAPPED_FW_REG) -+ { -+ if (!psDevInfo->bFirmwareInitialised) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGX Register offset is above PCI mapped range but " -+ "Firmware isn't yet initialised\n")); -+ return -EIO; -+ } -+ -+ reinit_completion(&psDevInfo->sFwRegs.sRegComp); -+ -+ eError = RGXScheduleRgxRegCommand(psDevInfo, -+ 0x00, -+ ui64Count, -+ (IMG_UINT32) *pui64Pos, -+ IMG_FALSE); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "RGXScheduleRgxRegCommand"); -+ return -EIO; -+ } -+ -+ ui64CompRes = wait_for_completion_timeout(&psDevInfo->sFwRegs.sRegComp, -+ msecs_to_jiffies(DI_RGXREGS_TIMEOUT_MS)); -+ if (!ui64CompRes) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "FW RGX Register access timeout %#x\n", -+ (IMG_UINT32) *pui64Pos)); -+ return -EIO; -+ } -+ -+ OSCachedMemCopy(pcBuffer, &psDevInfo->sFwRegs.ui64RegVal, ui64Count); -+ } -+ else -+ { -+ ui64RegVal = ui64Count == 4 ? -+ OSReadHWReg32(psDevInfo->pvRegsBaseKM, *pui64Pos) : -+ OSReadHWReg64(psDevInfo->pvRegsBaseKM, *pui64Pos); -+ OSCachedMemCopy(pcBuffer, &ui64RegVal, ui64Count); -+ } -+ -+ return ui64Count; -+} -+ -+static IMG_INT64 _RgxRegsWrite(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT64 ui64RegVal = 0; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ /* ignore the '\0' character */ -+ ui64Count -= 1; -+ -+ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -ENXIO); -+ PVR_LOG_RETURN_IF_FALSE(ui64Count == 4 || ui64Count == 8, -+ "wrong RGX register size", -EIO); -+ PVR_LOG_RETURN_IF_FALSE(!(*pui64Pos & (ui64Count - 1)), -+ "register read offset isn't aligned", -EINVAL); -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ -+ if (*pui64Pos >= SYS_RGX_DEV_UNMAPPED_FW_REG) -+ { -+ if (!psDevInfo->bFirmwareInitialised) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGX Register offset is above PCI mapped range but " -+ "Firmware isn't yet initialised\n")); -+ return -EIO; -+ } -+ -+ if (ui64Count == 4) -+ ui64RegVal = (IMG_UINT64) *((IMG_UINT32 *) pcBuffer); -+ else -+ ui64RegVal = *((IMG_UINT64 *) pcBuffer); -+ -+ eError = RGXScheduleRgxRegCommand(psDevInfo, -+ ui64RegVal, -+ ui64Count, -+ (IMG_UINT32) *pui64Pos, -+ IMG_TRUE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "RGXScheduleRgxRegCommand"); -+ return -EIO; -+ } -+ -+ } -+ else -+ { -+ if (ui64Count == 4) -+ { -+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, *pui64Pos, -+ *((IMG_UINT32 *) (void *) pcBuffer)); -+ } -+ else -+ { -+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, *pui64Pos, -+ *((IMG_UINT64 *) (void *) pcBuffer)); -+ } -+ } -+ -+ return ui64Count; -+} -+ -+#endif /* SUPPORT_VALIDATION */ -+ -+#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) -+#define RISCV_DMI_SIZE (8U) -+ -+static IMG_INT64 _RiscvDmiRead(IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; -+ PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; -+ -+ ui64Count = MIN(RISCV_DMI_SIZE, ui64Count); -+ memcpy(pcBuffer, &psDebugInfo->ui64RiscvDmi, ui64Count); -+ -+ return ui64Count; -+} -+ -+static IMG_INT64 _RiscvDmiWrite(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; -+ -+ if (psDevInfo == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: devinfo is NULL", __func__)); -+ return 0; -+ } -+ -+ ui64Count -= 1; /* Drop `\0` */ -+ ui64Count = MIN(RISCV_DMI_SIZE, ui64Count); -+ -+ memcpy(&psDebugInfo->ui64RiscvDmi, pcBuffer, ui64Count); -+ -+ RGXRiscvDmiOp(psDevInfo, &psDebugInfo->ui64RiscvDmi); -+ -+ return ui64Count; -+} -+#endif -+ -+#endif /* SUPPORT_RGX */ -+ -+#ifdef SUPPORT_VALIDATION -+ -+static int TestMemLeakDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ PVR_RETURN_IF_FALSE(pvData != NULL, -EINVAL); -+ -+ DIPrintf(psEntry, "os: %s, %u\ngpu: %s, %u\nmmu: %s, %u\n", -+ psPVRSRVData->sMemLeakIntervals.ui32OSAlloc ? "enabled" : "disabled", -+ psPVRSRVData->sMemLeakIntervals.ui32OSAlloc, -+ psPVRSRVData->sMemLeakIntervals.ui32GPU ? "enabled" : "disabled", -+ psPVRSRVData->sMemLeakIntervals.ui32GPU, -+ psPVRSRVData->sMemLeakIntervals.ui32MMU ? "enabled" : "disabled", -+ psPVRSRVData->sMemLeakIntervals.ui32MMU); -+ -+ return 0; -+} -+ -+static IMG_INT64 TestMemLeakDISet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ IMG_CHAR *pcTemp; -+ unsigned long ui32MemLeakInterval; -+ -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); -+ PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); -+ PVR_RETURN_IF_FALSE(ui64Count <= 16, -EINVAL); -+ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); -+ -+ pcTemp = strchr(pcBuffer, ','); -+ -+ if (kstrtoul(pcTemp+1, 0, &ui32MemLeakInterval) != 0) -+ { -+ return -EINVAL; -+ } -+ -+ if (strncmp(pcBuffer, "os", pcTemp-pcBuffer) == 0) -+ { -+ psPVRSRVData->sMemLeakIntervals.ui32OSAlloc = ui32MemLeakInterval; -+ } -+ else if (strncmp(pcBuffer, "gpu", pcTemp-pcBuffer) == 0) -+ { -+ psPVRSRVData->sMemLeakIntervals.ui32GPU = ui32MemLeakInterval; -+ } -+ else if (strncmp(pcBuffer, "mmu", pcTemp-pcBuffer) == 0) -+ { -+ psPVRSRVData->sMemLeakIntervals.ui32MMU = ui32MemLeakInterval; -+ } -+ else -+ { -+ return -EINVAL; -+ } -+ -+ *pui64Pos += ui64Count; -+ return ui64Count; -+} -+ -+#endif /* SUPPORT_VALIDATION */ -+ -+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) -+ -+/*************************************************************************/ /*! -+ Debug level DebugFS entry -+*/ /**************************************************************************/ -+ -+static int DebugLevelDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ DIPrintf(psEntry, "%u\n", OSDebugLevel()); -+ -+ return 0; -+} -+ -+#ifndef __GNUC__ -+static int __builtin_ffsl(long int x) -+{ -+ for (size_t i = 0; i < sizeof(x) * 8; i++) -+ { -+ if (x & (1 << i)) -+ { -+ return i + 1; -+ } -+ } -+ return 0; -+} -+#endif /* __GNUC__ */ -+ -+static IMG_INT64 DebugLevelSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ const IMG_UINT uiMaxBufferSize = 6; -+ IMG_UINT32 ui32Level; -+ -+ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); -+ PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); -+ PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); -+ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); -+ -+ if (sscanf(pcBuffer, "%u", &ui32Level) == 0) -+ { -+ return -EINVAL; -+ } -+ -+ OSSetDebugLevel(ui32Level & ((1 << __builtin_ffsl(DBGPRIV_LAST)) - 1)); -+ -+ *pui64Pos += ui64Count; -+ return ui64Count; -+} -+#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ -+ -+#if defined(SUPPORT_RGX) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+static int VZPriorityDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ DI_VZ_DATA *psVZDriverData = DIGetPrivData(psEntry); -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg; -+ IMG_UINT32 ui32DriverID; -+ -+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -+ -+ psDevInfo = psVZDriverData->psDevNode->pvDevice; -+ PVR_RETURN_IF_FALSE(psDevInfo != NULL, -EIO); -+ -+ psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; -+ PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, -EIO); -+ -+ ui32DriverID = psVZDriverData->ui32DriverID; -+ PVR_RETURN_IF_FALSE(ui32DriverID < (RGXFW_HOST_DRIVER_ID + RGX_NUM_DRIVERS_SUPPORTED), -+ -EINVAL); -+ -+ RGXFwSharedMemCacheOpValue(psRuntimeCfg->aui32DriverPriority[ui32DriverID], INVALIDATE); -+ DIPrintf(psEntry, "%u\n", psRuntimeCfg->aui32DriverPriority[ui32DriverID]); -+ -+ return 0; -+} -+ -+static IMG_INT64 VZPrioritySet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ const DI_VZ_DATA *psVZDriverData = (const DI_VZ_DATA*)pvData; -+ const IMG_UINT32 uiMaxBufferSize = 12; -+ IMG_UINT32 ui32Priority; -+ PVRSRV_ERROR eError; -+ -+ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); -+ PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); -+ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -+ -+ if (OSStringToUINT32(pcBuffer, 10, &ui32Priority) != PVRSRV_OK) -+ { -+ return -EINVAL; -+ } -+ -+ eError = PVRSRVRGXFWDebugSetDriverPriorityKM(NULL, psVZDriverData->psDevNode, -+ psVZDriverData->ui32DriverID, ui32Priority); -+ if (eError != PVRSRV_OK) -+ { -+ return -EIO; -+ } -+ -+ *pui64Pos += ui64Count; -+ return ui64Count; -+} -+ -+static int VZTimeSliceIntervalDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ DI_VZ_DATA *psVZDriverData = DIGetPrivData(psEntry); -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg; -+ -+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -+ -+ psDevInfo = psVZDriverData->psDevNode->pvDevice; -+ PVR_RETURN_IF_FALSE(psDevInfo != NULL, -EIO); -+ -+ psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; -+ PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, -EIO); -+ -+ DIPrintf(psEntry, "%u ms\n", psRuntimeCfg->ui32DriverTimeSliceInterval); -+ -+ return 0; -+} -+ -+static IMG_INT64 VZTimeSliceIntervalSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ const DI_VZ_DATA *psVZDriverData = (const DI_VZ_DATA*)pvData; -+ const IMG_UINT32 uiMaxBufferSize = 12; -+ IMG_UINT32 ui32TimeSliceInterval; -+ PVRSRV_ERROR eError; -+ -+ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); -+ PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); -+ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -+ -+ if (OSStringToUINT32(pcBuffer, 10, &ui32TimeSliceInterval) != PVRSRV_OK) -+ { -+ return -EINVAL; -+ } -+ -+ eError = PVRSRVRGXFWDebugSetDriverTimeSliceIntervalKM(NULL, psVZDriverData->psDevNode, -+ ui32TimeSliceInterval); -+ if (eError != PVRSRV_OK) -+ { -+ return -EIO; -+ } -+ -+ *pui64Pos += ui64Count; -+ return ui64Count; -+} -+ -+static int VZTimeSliceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ DI_VZ_DATA *psVZDriverData = DIGetPrivData(psEntry); -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg; -+ IMG_UINT32 ui32DriverID; -+ -+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -+ -+ psDevInfo = psVZDriverData->psDevNode->pvDevice; -+ PVR_RETURN_IF_FALSE(psDevInfo != NULL, -EIO); -+ -+ psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; -+ PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, -EIO); -+ -+ ui32DriverID = psVZDriverData->ui32DriverID; -+ PVR_RETURN_IF_FALSE(ui32DriverID < (RGXFW_HOST_DRIVER_ID + RGX_NUM_DRIVERS_SUPPORTED), -+ -EINVAL); -+ -+ DIPrintf(psEntry, "%u (0: disable; 1pc to 100pc)\n", psRuntimeCfg->aui32DriverTimeSlice[ui32DriverID]); -+ -+ return 0; -+} -+ -+static IMG_INT64 VZTimeSliceSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ const DI_VZ_DATA *psVZDriverData = (const DI_VZ_DATA*)pvData; -+ const IMG_UINT32 uiMaxBufferSize = 12; -+ IMG_UINT32 ui32TimeSlice; -+ PVRSRV_ERROR eError; -+ -+ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); -+ PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); -+ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -+ -+ if (OSStringToUINT32(pcBuffer, 10, &ui32TimeSlice) != PVRSRV_OK) -+ { -+ return -EINVAL; -+ } -+ -+ eError = PVRSRVRGXFWDebugSetDriverTimeSliceKM(NULL, psVZDriverData->psDevNode, -+ psVZDriverData->ui32DriverID, ui32TimeSlice); -+ if (eError != PVRSRV_OK) -+ { -+ return -EIO; -+ } -+ -+ *pui64Pos += ui64Count; -+ return ui64Count; -+} -+ -+static int VZIsolationGroupDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ DI_VZ_DATA *psVZDriverData = DIGetPrivData(psEntry); -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg; -+ IMG_UINT32 ui32DriverID; -+ -+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -+ -+ psDevInfo = psVZDriverData->psDevNode->pvDevice; -+ PVR_RETURN_IF_FALSE(psDevInfo != NULL, -EIO); -+ -+ psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; -+ PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, -EIO); -+ -+ ui32DriverID = psVZDriverData->ui32DriverID; -+ PVR_RETURN_IF_FALSE(ui32DriverID < (RGXFW_HOST_DRIVER_ID + RGX_NUM_DRIVERS_SUPPORTED), -+ -EINVAL); -+ -+ RGXFwSharedMemCacheOpValue(psRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID], INVALIDATE); -+ DIPrintf(psEntry, "%u\n", psRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID]); -+ -+ return 0; -+} -+ -+static IMG_INT64 VZIsolationGroupSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ const DI_VZ_DATA *psVZDriverData = (const DI_VZ_DATA*)pvData; -+ const IMG_UINT32 uiMaxBufferSize = 12; -+ IMG_UINT32 ui32IsolationGroup; -+ PVRSRV_ERROR eError; -+ -+ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); -+ PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); -+ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -+ -+ if (OSStringToUINT32(pcBuffer, 10, &ui32IsolationGroup) != PVRSRV_OK) -+ { -+ return -EINVAL; -+ } -+ -+ eError = PVRSRVRGXFWDebugSetDriverIsolationGroupKM(NULL, psVZDriverData->psDevNode, -+ psVZDriverData->ui32DriverID, ui32IsolationGroup); -+ if (eError != PVRSRV_OK) -+ { -+ return -EIO; -+ } -+ -+ *pui64Pos += ui64Count; -+ return ui64Count; -+} -+ -+static int VZConnectionCooldownPeriodDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ DI_VZ_DATA *psVZDriverData = DIGetPrivData(psEntry); -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg; -+ -+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -+ -+ psDevInfo = psVZDriverData->psDevNode->pvDevice; -+ PVR_RETURN_IF_FALSE(psDevInfo != NULL, -EIO); -+ -+ psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; -+ PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, -EIO); -+ -+ DIPrintf(psEntry, "%u sec\n", psRuntimeCfg->ui32VzConnectionCooldownPeriodInSec); -+ -+ return 0; -+} -+ -+static IMG_INT64 VZConnectionCooldownPeriodSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ const DI_VZ_DATA *psVZDriverData = (const DI_VZ_DATA*)pvData; -+ const IMG_UINT32 uiMaxBufferSize = 12; -+ IMG_UINT32 ui32VzConnectionCooldownPeriodInSec; -+ PVRSRV_ERROR eError; -+ -+ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); -+ PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); -+ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL); -+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO); -+ -+ if (OSStringToUINT32(pcBuffer, 10, &ui32VzConnectionCooldownPeriodInSec) != PVRSRV_OK) -+ { -+ return -EINVAL; -+ } -+ -+ eError = PVRSRVRGXFWDebugSetVzConnectionCooldownPeriodInSecKM(NULL, psVZDriverData->psDevNode, -+ ui32VzConnectionCooldownPeriodInSec); -+ if (eError != PVRSRV_OK) -+ { -+ return -EIO; -+ } -+ -+ *pui64Pos += ui64Count; -+ return ui64Count; -+} -+ -+#endif -+ -+PVRSRV_ERROR DebugCommonInitDriver(void) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psPVRSRVData != NULL); -+ -+ /* -+ * The DebugFS entries are designed to work in a single device system but -+ * this function will be called multiple times in a multi-device system. -+ * Return an error in this case. -+ */ -+ if (gpsVersionDIEntry) -+ { -+ return -EEXIST; -+ } -+ -+ { -+ DI_ITERATOR_CB sIterator = { -+ .pfnStart = _VersionDIStart, -+ .pfnStop = _VersionDIStop, -+ .pfnNext = _VersionDINext, -+ .pfnShow = _VersionDIShow -+ }; -+ -+ eError = DICreateEntry("version", NULL, &sIterator, psPVRSRVData, -+ DI_ENTRY_TYPE_GENERIC, &gpsVersionDIEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+ -+ { -+ DI_ITERATOR_CB sIterator = { -+ .pfnStart = _DebugStatusDIStart, -+ .pfnStop = _DebugStatusDIStop, -+ .pfnNext = _DebugStatusDINext, -+ .pfnShow = _DebugStatusDIShow, -+#if defined(DEBUG) -+ .pfnWrite = DebugStatusSet, -+ //'K' expected + Null terminator -+#endif -+ .ui32WriteLenMax= ((1U)+1U) -+ }; -+ eError = DICreateEntry("status", NULL, &sIterator, psPVRSRVData, -+ DI_ENTRY_TYPE_GENERIC, &gpsStatusDIEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+ -+#ifdef SUPPORT_VALIDATION -+ { -+ DI_ITERATOR_CB sIterator = { -+ .pfnShow = TestMemLeakDIShow, -+ .pfnWrite = TestMemLeakDISet, -+ //Function only allows max 15 chars + Null terminator -+ .ui32WriteLenMax = ((15U)+1U) -+ }; -+ eError = DICreateEntry("test_memleak", NULL, &sIterator, psPVRSRVData, -+ DI_ENTRY_TYPE_GENERIC, &gpsTestMemLeakDIEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+#endif /* SUPPORT_VALIDATION */ -+ -+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) -+ { -+ DI_ITERATOR_CB sIterator = { -+ .pfnShow = DebugLevelDIShow, -+ .pfnWrite = DebugLevelSet, -+ //Max value of 255(3 char) + Null terminator -+ .ui32WriteLenMax =((3U)+1U) -+ }; -+ eError = DICreateEntry("debug_level", NULL, &sIterator, NULL, -+ DI_ENTRY_TYPE_GENERIC, &gpsDebugLevelDIEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ -+ -+ return PVRSRV_OK; -+ -+return_error_: -+ DebugCommonDeInitDriver(); -+ -+ return eError; -+} -+ -+void DebugCommonDeInitDriver(void) -+{ -+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) -+ if (gpsDebugLevelDIEntry != NULL) -+ { -+ DIDestroyEntry(gpsDebugLevelDIEntry); -+ } -+#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ -+ -+#ifdef SUPPORT_VALIDATION -+ if (gpsTestMemLeakDIEntry != NULL) -+ { -+ DIDestroyEntry(gpsTestMemLeakDIEntry); -+ } -+#endif /* SUPPORT_VALIDATION */ -+ -+ if (gpsStatusDIEntry != NULL) -+ { -+ DIDestroyEntry(gpsStatusDIEntry); -+ } -+ -+ if (gpsVersionDIEntry != NULL) -+ { -+ DIDestroyEntry(gpsVersionDIEntry); -+ } -+} -+ -+PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; -+ PVRSRV_ERROR eError; -+ IMG_CHAR pszDeviceId[sizeof("gpu4294967296")]; -+ -+ OSSNPrintf(pszDeviceId, sizeof(pszDeviceId), "gpu%02d", -+ psDeviceNode->sDevId.ui32InternalID); -+ eError = DICreateGroup(pszDeviceId, NULL, &psDebugInfo->psGroup); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ -+#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE) -+ eError = SORgxGpuUtilStatsRegister(&psDebugInfo->hGpuUtilUserDebugFS); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+#endif -+ -+ { -+ DI_ITERATOR_CB sIterator = {.pfnShow = _DebugDumpDebugDIShow}; -+ eError = DICreateEntry("debug_dump", psDebugInfo->psGroup, &sIterator, -+ psDeviceNode, DI_ENTRY_TYPE_GENERIC, -+ &psDebugInfo->psDumpDebugEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+ -+#ifdef SUPPORT_RGX -+ if (! PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ { -+ DI_ITERATOR_CB sIterator = {.pfnShow = _DebugFWTraceDIShow}; -+ eError = DICreateEntry("firmware_trace", psDebugInfo->psGroup, &sIterator, -+ psDeviceNode, DI_ENTRY_TYPE_GENERIC, -+ &psDebugInfo->psFWTraceEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+ -+#ifdef SUPPORT_FIRMWARE_GCOV -+ { -+ DI_ITERATOR_CB sIterator = { -+ .pfnStart = _FirmwareGcovDIStart, -+ .pfnStop = _FirmwareGcovDIStop, -+ .pfnNext = _FirmwareGcovDINext, -+ .pfnShow = _FirmwareGcovDIShow -+ }; -+ -+ eError = DICreateEntry("firmware_gcov", psDebugInfo->psGroup, &sIterator, -+ psDeviceNode, DI_ENTRY_TYPE_GENERIC, -+ &psDebugInfo->psFWGCOVEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+#endif /* SUPPORT_FIRMWARE_GCOV */ -+ -+ { -+ DI_ITERATOR_CB sIterator = {.pfnShow = _FirmwareMappingsDIShow}; -+ eError = DICreateEntry("firmware_mappings", psDebugInfo->psGroup, &sIterator, -+ psDeviceNode, DI_ENTRY_TYPE_GENERIC, -+ &psDebugInfo->psFWMappingsEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+ -+#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) -+ { -+ DI_ITERATOR_CB sIterator = { -+ .pfnRead = _RiscvDmiRead, -+ .pfnWrite = _RiscvDmiWrite, -+ .ui32WriteLenMax = ((RISCV_DMI_SIZE)+1U) -+ }; -+ eError = DICreateEntry("riscv_dmi", psDebugInfo->psGroup, &sIterator, psDeviceNode, -+ DI_ENTRY_TYPE_RANDOM_ACCESS, &psDebugInfo->psRiscvDmiDIEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ psDebugInfo->ui64RiscvDmi = 0ULL; -+ } -+#endif /* SUPPORT_VALIDATION || SUPPORT_RISCV_GDB */ -+ -+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ if (PVRSRV_VZ_MODE_IS(HOST)) -+ { -+ eError = DICreateGroup("vz", psDebugInfo->psGroup, &psDebugInfo->psVZGroup); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ -+ { -+ IMG_UINT32 ui32DriverID; -+ -+ DI_ITERATOR_CB sPriorityIterator = { -+ .pfnShow = VZPriorityDIShow, -+ .pfnWrite = VZPrioritySet, -+ //Max value of UINT_MAX (10 chars) + Null terminator -+ .ui32WriteLenMax = sizeof("4294967295") -+ }; -+ -+ DI_ITERATOR_CB sTimeSliceIntervalIterator = { -+ .pfnShow = VZTimeSliceIntervalDIShow, -+ .pfnWrite = VZTimeSliceIntervalSet, -+ //Max value of UINT_MAX (10 chars) + Null terminator -+ .ui32WriteLenMax = sizeof("4294967295") -+ }; -+ -+ DI_ITERATOR_CB sTimeSliceIterator = { -+ .pfnShow = VZTimeSliceDIShow, -+ .pfnWrite = VZTimeSliceSet, -+ //Max value of UINT_MAX (10 chars) + Null terminator -+ .ui32WriteLenMax = sizeof("4294967295") -+ }; -+ -+ DI_ITERATOR_CB sIsolationGroupIterator = { -+ .pfnShow = VZIsolationGroupDIShow, -+ .pfnWrite = VZIsolationGroupSet, -+ //Max value of UINT_MAX (10 chars) + Null terminator -+ .ui32WriteLenMax = sizeof("4294967295") -+ }; -+ -+ DI_ITERATOR_CB sVzConnectionCooldownPeriodIterator = { -+ .pfnShow = VZConnectionCooldownPeriodDIShow, -+ .pfnWrite = VZConnectionCooldownPeriodSet, -+ //Max value of UINT_MAX (10 chars) + Null terminator -+ .ui32WriteLenMax = sizeof("4294967295") -+ }; -+ -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ IMG_CHAR szDriverID[2]; -+ OSSNPrintf(szDriverID, 2, "%u", ui32DriverID); -+ -+ eError = DICreateGroup(szDriverID, psDebugInfo->psVZGroup, &psDebugInfo->apsVZDriverGroups[ui32DriverID]); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ -+ psDebugInfo->apsVZDriverData[ui32DriverID] = OSAllocMem(sizeof(PVRSRV_DEVICE_DEBUG_INFO)); -+ PVR_GOTO_IF_NOMEM(psDebugInfo->apsVZDriverData[ui32DriverID], eError, return_error_); -+ -+ psDebugInfo->apsVZDriverData[ui32DriverID]->psDevNode = psDeviceNode; -+ psDebugInfo->apsVZDriverData[ui32DriverID]->ui32DriverID = ui32DriverID; -+ -+ eError = DICreateEntry("priority", psDebugInfo->apsVZDriverGroups[ui32DriverID], -+ &sPriorityIterator, psDebugInfo->apsVZDriverData[ui32DriverID], DI_ENTRY_TYPE_GENERIC, -+ &psDebugInfo->apsVZDriverPriorityDIEntries[ui32DriverID]); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ -+ eError = DICreateEntry("time_slice", psDebugInfo->apsVZDriverGroups[ui32DriverID], -+ &sTimeSliceIterator, psDebugInfo->apsVZDriverData[ui32DriverID], DI_ENTRY_TYPE_GENERIC, -+ &psDebugInfo->apsVZDriverTimeSliceDIEntries[ui32DriverID]); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ -+ if (ui32DriverID == RGXFW_HOST_DRIVER_ID) -+ { -+ eError = DICreateEntry("time_slice_interval", psDebugInfo->apsVZDriverGroups[ui32DriverID], -+ &sTimeSliceIntervalIterator, psDebugInfo->apsVZDriverData[ui32DriverID], DI_ENTRY_TYPE_GENERIC, -+ &psDebugInfo->psVZDriverTimeSliceIntervalDIEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ -+ eError = DICreateEntry("vz_connection_cooldown_period", psDebugInfo->apsVZDriverGroups[ui32DriverID], -+ &sVzConnectionCooldownPeriodIterator, psDebugInfo->apsVZDriverData[ui32DriverID], DI_ENTRY_TYPE_GENERIC, -+ &psDebugInfo->psVZDriverConnectionCooldownPeriodDIEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+ -+ eError = DICreateEntry("isolation_group", psDebugInfo->apsVZDriverGroups[ui32DriverID], -+ &sIsolationGroupIterator, psDebugInfo->apsVZDriverData[ui32DriverID], DI_ENTRY_TYPE_GENERIC, -+ &psDebugInfo->apsVZDriverIsolationGroupDIEntries[ui32DriverID]); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+ } -+ } -+#endif -+ } -+#ifdef SUPPORT_VALIDATION -+ { -+ DI_ITERATOR_CB sIterator = { -+ .pfnSeek = _RgxRegsSeek, -+ .pfnRead = _RgxRegsRead, -+ .pfnWrite = _RgxRegsWrite, -+ //Max size of input binary data is 4 bytes (UINT32) or 8 bytes (UINT64) -+ .ui32WriteLenMax = ((8U)+1U) -+ }; -+ eError = DICreateEntry("rgxregs", psDebugInfo->psGroup, &sIterator, psDeviceNode, -+ DI_ENTRY_TYPE_RANDOM_ACCESS, &psDebugInfo->psRGXRegsEntry); -+ -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+#endif /* SUPPORT_VALIDATION */ -+ -+#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS -+ { -+ DI_ITERATOR_CB sIterator = { -+ .pfnShow = _DebugPowerDataDIShow, -+ .pfnWrite = PowerDataSet, -+ //Expects '0' or '1' plus Null terminator -+ .ui32WriteLenMax = ((1U)+1U) -+ }; -+ eError = DICreateEntry("power_data", psDebugInfo->psGroup, &sIterator, psDeviceNode, -+ DI_ENTRY_TYPE_GENERIC, &psDebugInfo->psPowerDataEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+#endif /* SUPPORT_POWER_SAMPLING_VIA_DEBUGFS */ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ { -+ DI_ITERATOR_CB sIterator = { -+ .pfnShow = PVRSRVPowerStatsPrintElements, -+ }; -+ eError = DICreateEntry("power_timing_stats", psDebugInfo->psGroup, &sIterator, psDeviceNode, -+ DI_ENTRY_TYPE_GENERIC, &psDebugInfo->psPowerTimingStatsEntry); -+ PVR_GOTO_IF_ERROR(eError, return_error_); -+ } -+#endif -+#endif /* SUPPORT_RGX */ -+ -+ return PVRSRV_OK; -+ -+return_error_: -+ DebugCommonDeInitDevice(psDeviceNode); -+ -+ return eError; -+} -+ -+void DebugCommonDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ if (psDebugInfo->psPowerTimingStatsEntry != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->psPowerTimingStatsEntry); -+ psDebugInfo->psPowerTimingStatsEntry = NULL; -+ } -+#endif -+ -+#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS -+ if (psDebugInfo->psPowerDataEntry != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->psPowerDataEntry); -+ psDebugInfo->psPowerDataEntry = NULL; -+ } -+#endif /* SUPPORT_POWER_SAMPLING_VIA_DEBUGFS */ -+ -+#ifdef SUPPORT_VALIDATION -+ if (psDebugInfo->psRGXRegsEntry != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->psRGXRegsEntry); -+ psDebugInfo->psRGXRegsEntry = NULL; -+ } -+#endif /* SUPPORT_VALIDATION */ -+ -+#ifdef SUPPORT_RGX -+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ if (PVRSRV_VZ_MODE_IS(HOST)) -+ { -+ IMG_UINT32 ui32DriverID; -+ -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ if (psDebugInfo->apsVZDriverIsolationGroupDIEntries[ui32DriverID] != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->apsVZDriverIsolationGroupDIEntries[ui32DriverID]); -+ psDebugInfo->apsVZDriverIsolationGroupDIEntries[ui32DriverID] = NULL; -+ } -+ -+ if (psDebugInfo->apsVZDriverPriorityDIEntries[ui32DriverID] != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->apsVZDriverPriorityDIEntries[ui32DriverID]); -+ psDebugInfo->apsVZDriverPriorityDIEntries[ui32DriverID] = NULL; -+ } -+ -+ if (psDebugInfo->apsVZDriverTimeSliceDIEntries[ui32DriverID] != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->apsVZDriverTimeSliceDIEntries[ui32DriverID]); -+ psDebugInfo->apsVZDriverTimeSliceDIEntries[ui32DriverID] = NULL; -+ } -+ -+ if (ui32DriverID == RGXFW_HOST_DRIVER_ID) -+ { -+ if (psDebugInfo->psVZDriverTimeSliceIntervalDIEntry != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->psVZDriverTimeSliceIntervalDIEntry); -+ psDebugInfo->psVZDriverTimeSliceIntervalDIEntry = NULL; -+ } -+ -+ if (psDebugInfo->psVZDriverConnectionCooldownPeriodDIEntry != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->psVZDriverConnectionCooldownPeriodDIEntry); -+ psDebugInfo->psVZDriverConnectionCooldownPeriodDIEntry = NULL; -+ } -+ } -+ -+ if (psDebugInfo->apsVZDriverData[ui32DriverID] != NULL) -+ { -+ OSFreeMem(psDebugInfo->apsVZDriverData[ui32DriverID]); -+ psDebugInfo->apsVZDriverData[ui32DriverID] = NULL; -+ } -+ -+ if (psDebugInfo->apsVZDriverGroups[ui32DriverID] != NULL) -+ { -+ DIDestroyGroup(psDebugInfo->apsVZDriverGroups[ui32DriverID]); -+ psDebugInfo->apsVZDriverGroups[ui32DriverID] = NULL; -+ } -+ } -+ -+ if (psDebugInfo->psVZGroup != NULL) -+ { -+ DIDestroyGroup(psDebugInfo->psVZGroup); -+ psDebugInfo->psVZGroup = NULL; -+ } -+ } -+#endif -+ -+ if (psDebugInfo->psFWTraceEntry != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->psFWTraceEntry); -+ psDebugInfo->psFWTraceEntry = NULL; -+ } -+ -+#ifdef SUPPORT_FIRMWARE_GCOV -+ if (psDebugInfo->psFWGCOVEntry != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->psFWGCOVEntry); -+ psDebugInfo->psFWGCOVEntry = NULL; -+ } -+#endif -+ -+ if (psDebugInfo->psFWMappingsEntry != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->psFWMappingsEntry); -+ psDebugInfo->psFWMappingsEntry = NULL; -+ } -+ -+#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) -+ if (psDebugInfo->psRiscvDmiDIEntry != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->psRiscvDmiDIEntry); -+ psDebugInfo->psRiscvDmiDIEntry = NULL; -+ } -+#endif -+#endif /* SUPPORT_RGX */ -+ -+ if (psDebugInfo->psDumpDebugEntry != NULL) -+ { -+ DIDestroyEntry(psDebugInfo->psDumpDebugEntry); -+ psDebugInfo->psDumpDebugEntry = NULL; -+ } -+ -+#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE) -+ if (psDebugInfo->hGpuUtilUserDebugFS != NULL) -+ { -+ SORgxGpuUtilStatsUnregister(psDebugInfo->hGpuUtilUserDebugFS); -+ psDebugInfo->hGpuUtilUserDebugFS = NULL; -+ } -+#endif /* defined(SUPPORT_RGX) && !defined(NO_HARDWARE) */ -+ -+ if (psDebugInfo->psGroup != NULL) -+ { -+ DIDestroyGroup(psDebugInfo->psGroup); -+ psDebugInfo->psGroup = NULL; -+ } -+} -+ -+/* -+ Appends flags strings to a null-terminated string buffer -+*/ -+void DebugCommonFlagStrings(IMG_CHAR *psDesc, -+ IMG_UINT32 ui32DescSize, -+ const IMG_FLAGS2DESC *psConvTable, -+ IMG_UINT32 ui32TableSize, -+ IMG_UINT32 ui32Flags) -+{ -+ IMG_UINT32 ui32Idx; -+ -+ for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++) -+ { -+ if (BITMASK_HAS(ui32Flags, psConvTable[ui32Idx].uiFlag)) -+ { -+ OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); -+ } -+ } -+} -diff --git a/drivers/gpu/drm/img-rogue/debug_common.h b/drivers/gpu/drm/img-rogue/debug_common.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/debug_common.h -@@ -0,0 +1,69 @@ -+/*************************************************************************/ /*! -+@File -+@Title Common debug definitions and functions. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef DEBUG_COMMON_H -+#define DEBUG_COMMON_H -+ -+#include "pvrsrv_error.h" -+#include "device.h" -+ -+PVRSRV_ERROR DebugCommonInitDriver(void); -+void DebugCommonDeInitDriver(void); -+ -+const IMG_CHAR *PVRSRVGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState); -+ -+PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); -+void DebugCommonDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+typedef struct _IMG_FLAGS2DESC_ -+{ -+ IMG_UINT32 uiFlag; -+ const IMG_CHAR *pszLabel; -+} IMG_FLAGS2DESC; -+ -+void DebugCommonFlagStrings(IMG_CHAR *psDesc, -+ IMG_UINT32 ui32DescSize, -+ const IMG_FLAGS2DESC *psConvTable, -+ IMG_UINT32 ui32TableSize, -+ IMG_UINT32 ui32Flags); -+ -+#endif /* DEBUG_COMMON_H */ -diff --git a/drivers/gpu/drm/img-rogue/device.h b/drivers/gpu/drm/img-rogue/device.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/device.h -@@ -0,0 +1,647 @@ -+/**************************************************************************/ /*! -+@File -+@Title Common Device header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device related function templates and defines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef DEVICE_H -+#define DEVICE_H -+ -+#include "devicemem_heapcfg.h" -+#include "mmu_common.h" -+#include "ra.h" /* RA_ARENA */ -+#include "pvrsrv_device.h" -+#include "sync_checkpoint.h" -+#include "srvkm.h" -+#include "physheap.h" -+#include "sync_internal.h" -+#include "dllist.h" -+ -+#include "rgx_bvnc_defs_km.h" -+ -+#include "lock.h" -+ -+#include "power.h" -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+#include "virt_validation_defs.h" -+#endif -+ -+typedef struct _PVRSRV_POWER_DEV_TAG_ *PPVRSRV_POWER_DEV; -+ -+struct SYNC_RECORD; -+ -+struct _CONNECTION_DATA_; -+ -+/*************************************************************************/ /*! -+ @Function AllocUFOBlockCallback -+ @Description Device specific callback for allocation of a UFO block -+ -+ @Input psDeviceNode Pointer to device node to allocate -+ the UFO for. -+ @Output ppsMemDesc Pointer to pointer for the memdesc of -+ the allocation -+ @Output pui32SyncAddr FW Base address of the UFO block -+ @Output puiSyncPrimBlockSize Size of the UFO block -+ -+ @Return PVRSRV_OK if allocation was successful -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*AllocUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, -+ DEVMEM_MEMDESC **ppsMemDesc, -+ IMG_UINT32 *pui32SyncAddr, -+ IMG_UINT32 *puiSyncPrimBlockSize); -+ -+/*************************************************************************/ /*! -+ @Function FreeUFOBlockCallback -+ @Description Device specific callback for freeing of a UFO -+ -+ @Input psDeviceNode Pointer to device node that the UFO block was -+ allocated from. -+ @Input psMemDesc Pointer to pointer for the memdesc of the UFO -+ block to free. -+*/ /**************************************************************************/ -+typedef void (*FreeUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, -+ DEVMEM_MEMDESC *psMemDesc); -+ -+typedef struct _PVRSRV_DEVICE_IDENTIFIER_ -+{ -+ /* Pdump memory and register bank names */ -+ IMG_CHAR *pszPDumpDevName; -+ IMG_CHAR *pszPDumpRegName; -+ -+ /* Under Linux, this is the minor number of RenderNode corresponding to this Device */ -+ IMG_INT32 i32KernelDeviceID; -+ /* Services layer enumeration of the device used in pvrdebug */ -+ IMG_UINT32 ui32InternalID; -+} PVRSRV_DEVICE_IDENTIFIER; -+ -+typedef struct _DEVICE_MEMORY_INFO_ -+{ -+ /* Heap count. Doesn't include additional heaps from PVRSRVCreateDeviceMemHeap */ -+ IMG_UINT32 ui32HeapCount; -+ -+ /* Blueprints for creating new device memory contexts */ -+ IMG_UINT32 uiNumHeapConfigs; -+ DEVMEM_HEAP_CONFIG *psDeviceMemoryHeapConfigArray; -+ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeap; -+} DEVICE_MEMORY_INFO; -+ -+#define MMU_BAD_PHYS_ADDR (0xbadbad00badULL) -+ -+typedef struct __DEFAULT_PAGE__ -+{ -+ /*Page handle for the page allocated (UMA/LMA)*/ -+ PG_HANDLE sPageHandle; -+ POS_LOCK psPgLock; -+ /*Default page size in terms of log2 */ -+ IMG_UINT32 ui32Log2PgSize; -+ IMG_UINT64 ui64PgPhysAddr; -+#if defined(PDUMP) -+ IMG_HANDLE hPdumpPg; -+#endif -+} PVRSRV_DEF_PAGE; -+ -+#define PVRSRV_DEVICE_STATE_LIST \ -+ X(UNDEFINED) \ -+ X(CREATING) \ -+ X(CREATED) \ -+ X(ACTIVE) \ -+ X(FROZEN) \ -+ X(DEINIT) \ -+ X(DEINIT_POWERED_OFF) \ -+ X(BAD) \ -+ X(PCI_ERROR) \ -+ X(LAST) \ -+ -+typedef enum _PVRSRV_DEVICE_STATE_ -+{ -+#define X(_name) PVRSRV_DEVICE_STATE_ ## _name, -+ PVRSRV_DEVICE_STATE_LIST -+#undef X -+ -+} PVRSRV_DEVICE_STATE; -+ -+typedef enum _PVRSRV_DEVICE_HEALTH_STATUS_ -+{ -+ PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED = 0, -+ PVRSRV_DEVICE_HEALTH_STATUS_OK, -+ PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING, -+ PVRSRV_DEVICE_HEALTH_STATUS_DEAD, -+ PVRSRV_DEVICE_HEALTH_STATUS_FAULT -+} PVRSRV_DEVICE_HEALTH_STATUS; -+ -+typedef enum _PVRSRV_DEVICE_HEALTH_REASON_ -+{ -+ PVRSRV_DEVICE_HEALTH_REASON_NONE = 0, -+ PVRSRV_DEVICE_HEALTH_REASON_ASSERTED, -+ PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING, -+ PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS, -+ PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, -+ PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED, -+ PVRSRV_DEVICE_HEALTH_REASON_IDLING, -+ PVRSRV_DEVICE_HEALTH_REASON_RESTARTING, -+ PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS, -+ PVRSRV_DEVICE_HEALTH_REASON_PCI_ERROR -+} PVRSRV_DEVICE_HEALTH_REASON; -+ -+typedef enum _PVRSRV_DEVICE_DEBUG_DUMP_STATUS_ -+{ -+ PVRSRV_DEVICE_DEBUG_DUMP_NONE = 0, -+ PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE -+} PVRSRV_DEVICE_DEBUG_DUMP_STATUS; -+ -+#ifndef DI_GROUP_DEFINED -+#define DI_GROUP_DEFINED -+typedef struct DI_GROUP DI_GROUP; -+#endif -+#ifndef DI_ENTRY_DEFINED -+#define DI_ENTRY_DEFINED -+typedef struct DI_ENTRY DI_ENTRY; -+#endif -+ -+#if (RGX_NUM_DRIVERS_SUPPORTED > 1) -+#ifndef DI_VZ_DATA_DEFINED -+#define DI_VZ_DATA_DEFINED -+typedef struct DI_VZ_DATA DI_VZ_DATA; -+#endif -+#endif -+ -+typedef struct _PVRSRV_DEVICE_DEBUG_INFO_ -+{ -+ DI_GROUP *psGroup; -+ DI_ENTRY *psDumpDebugEntry; -+#ifdef SUPPORT_RGX -+ DI_ENTRY *psFWTraceEntry; -+#ifdef SUPPORT_FIRMWARE_GCOV -+ DI_ENTRY *psFWGCOVEntry; -+#endif -+ DI_ENTRY *psFWMappingsEntry; -+#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) -+ DI_ENTRY *psRiscvDmiDIEntry; -+ IMG_UINT64 ui64RiscvDmi; -+#endif -+ DI_ENTRY *psDevMemEntry; -+ IMG_HANDLE hGpuUtilUserDebugFS; -+#endif /* SUPPORT_RGX */ -+#ifdef SUPPORT_VALIDATION -+ DI_ENTRY *psRGXRegsEntry; -+#endif /* SUPPORT_VALIDATION */ -+#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS -+ DI_ENTRY *psPowerDataEntry; -+#endif -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ DI_ENTRY *psPowerTimingStatsEntry; -+#endif -+#if (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ DI_GROUP *psVZGroup; -+ DI_GROUP *apsVZDriverGroups[RGX_NUM_DRIVERS_SUPPORTED]; -+ DI_ENTRY *apsVZDriverPriorityDIEntries[RGX_NUM_DRIVERS_SUPPORTED]; -+ DI_ENTRY *apsVZDriverTimeSliceDIEntries[RGX_NUM_DRIVERS_SUPPORTED]; -+ DI_ENTRY *psVZDriverTimeSliceIntervalDIEntry; -+ DI_ENTRY *apsVZDriverIsolationGroupDIEntries[RGX_NUM_DRIVERS_SUPPORTED]; -+ DI_VZ_DATA *apsVZDriverData[RGX_NUM_DRIVERS_SUPPORTED]; -+ DI_ENTRY *psVZDriverConnectionCooldownPeriodDIEntry; -+#endif -+#if defined(PVR_TESTING_UTILS) -+ DI_ENTRY *psTestLBistDIEntry; -+ DI_ENTRY *psLBistNumWaitersDIEntry; -+#endif -+} PVRSRV_DEVICE_DEBUG_INFO; -+ -+#if defined(PVRSRV_DEBUG_LISR_EXECUTION) -+#define RGX_LISR_INIT (0U) -+#define RGX_LISR_DEVICE_NOT_POWERED (1U) -+#define RGX_LISR_NOT_TRIGGERED_BY_HW (2U) -+#define RGX_LISR_FW_IRQ_COUNTER_NOT_UPDATED (3U) -+#define RGX_LISR_PROCESSED (4U) -+ -+typedef IMG_UINT32 LISR_STATUS; -+ -+typedef struct _LISR_EXECUTION_INFO_ -+{ -+ /* status of last LISR invocation */ -+ LISR_STATUS ui32Status; -+ -+ /* snapshot from the last LISR invocation */ -+#if defined(RGX_FW_IRQ_OS_COUNTERS) -+ IMG_UINT32 aui32InterruptCountSnapshot[RGX_NUM_DRIVERS_SUPPORTED]; -+#else -+ IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_THREAD_NUM]; -+#endif -+ -+ /* time of the last LISR invocation */ -+ IMG_UINT64 ui64Clockns; -+} LISR_EXECUTION_INFO; -+ -+#define UPDATE_LISR_DBG_STATUS(status) \ -+ do { \ -+ psDeviceNode->sLISRExecutionInfo.ui32Status = (status); \ -+ if ((status > RGX_LISR_INIT) && (status < RGX_LISR_PROCESSED)) \ -+ { \ -+ PVR_DPF((PVR_DBG_ERROR, "%s: IRQ %" IMG_UINT64_FMTSPEC " rejected: %s", __func__, psDeviceNode->ui64nLISR, #status)); \ -+ } \ -+ } while (0) -+ -+#define UPDATE_LISR_DBG_SNAPSHOT(idx, val) psDeviceNode->sLISRExecutionInfo.aui32InterruptCountSnapshot[idx] = (val) -+#define UPDATE_LISR_DBG_TIMESTAMP() psDeviceNode->sLISRExecutionInfo.ui64Clockns = OSClockns64() -+#define UPDATE_LISR_DBG_COUNTER() psDeviceNode->ui64nLISR++ -+#define UPDATE_MISR_DBG_COUNTER() psDeviceNode->ui64nMISR++ -+#else -+#define UPDATE_LISR_DBG_STATUS(status) -+#define UPDATE_LISR_DBG_SNAPSHOT(idx, val) -+#define UPDATE_LISR_DBG_TIMESTAMP() -+#define UPDATE_LISR_DBG_COUNTER() -+#define UPDATE_MISR_DBG_COUNTER() -+#endif /* defined(PVRSRV_DEBUG_LISR_EXECUTION) */ -+ -+typedef struct _PVRSRV_DEVICE_NODE_ -+{ -+ PVRSRV_DEVICE_IDENTIFIER sDevId; -+ PVRSRV_DEVICE_STATE eDevState; /* Set using PVRSRVDeviceSetState, not directly. */ -+ PVRSRV_DEVICE_FABRIC_TYPE eDevFabricType; -+ -+ ATOMIC_T eHealthStatus; /* Holds values from PVRSRV_DEVICE_HEALTH_STATUS */ -+ ATOMIC_T eHealthReason; /* Holds values from PVRSRV_DEVICE_HEALTH_REASON */ -+ ATOMIC_T eDebugDumpRequested; /* Holds values from PVRSRV_DEVICE_DEBUG_DUMP_STATUS */ -+ -+ IMG_HANDLE *hDebugTable; -+ -+ /* device specific MMU attributes */ -+ MMU_DEVICEATTRIBS *psMMUDevAttrs; -+ /* Device specific MMU firmware attributes, used only in some devices */ -+ MMU_DEVICEATTRIBS *psFirmwareMMUDevAttrs; -+ /* Physical Heap where MMU PT pages are allocated from, normally the -+ * system's default physical heap but can be different for AutoVz driver. */ -+ PHYS_HEAP *psMMUPhysHeap; -+ -+ /* lock for power state transitions */ -+ POS_LOCK hPowerLock; -+ IMG_PID uiPwrLockOwnerPID; /* Only valid between lock and corresponding unlock -+ operations of hPowerLock */ -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) || defined(SUPPORT_MMU_DEFERRED_FREE) -+ IMG_UINT32 uiPowerOffCounter; /* Counts how many times the device has been powered -+ off. Incremented in PVRSRVSetDeviceCurrentPowerState().*/ -+ IMG_UINT32 uiPowerOffCounterNext; /* Value of next update to uiPowerOffCounter. */ -+#endif -+ -+ /* current system device power state */ -+ PVRSRV_SYS_POWER_STATE eCurrentSysPowerState; -+ PPVRSRV_POWER_DEV psPowerDev; -+ -+ /* -+ callbacks the device must support: -+ */ -+ -+ PVRSRV_ERROR (*pfnInvalFBSCTable)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ MMU_CONTEXT *psMMUContext, -+ IMG_UINT64 ui64FBSCEntries); -+ -+ PVRSRV_ERROR (*pfnValidateOrTweakPhysAddrs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ MMU_DEVICEATTRIBS *psDevAttrs, -+ IMG_UINT64 *pui64Addr); -+ -+ void (*pfnMMUCacheInvalidate)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ MMU_CONTEXT *psMMUContext, -+ MMU_LEVEL eLevel, -+ IMG_BOOL bUnmap); -+ -+ PVRSRV_ERROR (*pfnMMUCacheInvalidateKick)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ IMG_UINT32 *pui32NextMMUInvalidateUpdate); -+ -+ IMG_UINT32 (*pfnMMUCacheGetInvalidateCounter)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); -+ -+ /* Callback pfnMMUTopLevelPxWorkarounds may be NULL if not required */ -+ void (*pfnMMUTopLevelPxWorkarounds)(struct _CONNECTION_DATA_ *psConnection, -+ struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ IMG_DEV_PHYADDR sPhysAddrL1Px, -+ void *pxL1PxCpuVAddr); -+ -+ /* Callback pfnMMUTweakProtFlags may be NULL if not required */ -+ void (*pfnMMUTweakProtFlags)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ MMU_DEVICEATTRIBS *psDevAttrs, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, -+ MMU_PROTFLAGS_T *uiMMUProtFlags); -+ -+ -+ void (*pfnDumpDebugInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); -+ -+ PVRSRV_ERROR (*pfnUpdateHealthStatus)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ IMG_BOOL bIsTimerPoll); -+ -+#if defined(SUPPORT_AUTOVZ) -+ void (*pfnUpdateAutoVzWatchdog)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); -+#endif -+ -+ PVRSRV_ERROR (*pfnValidationGPUUnitsPowerChange)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32NewState); -+ -+ PVRSRV_ERROR (*pfnResetHWRLogs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); -+ -+ PVRSRV_ERROR (*pfnVerifyBVNC)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask); -+ -+ /* Method to drain device HWPerf packets from firmware buffer to host buffer */ -+ PVRSRV_ERROR (*pfnServiceHWPerf)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); -+ -+ PVRSRV_ERROR (*pfnDeviceVersionString)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_CHAR **ppszVersionString); -+ -+ PVRSRV_ERROR (*pfnDeviceClockSpeed)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_PUINT32 pui32RGXClockSpeed); -+ -+ PVRSRV_ERROR (*pfnSoftReset)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2); -+ -+ PVRSRV_ERROR (*pfnAlignmentCheck)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32FWAlignChecksSize, IMG_UINT32 aui32FWAlignChecks[]); -+ IMG_BOOL (*pfnCheckDeviceFeature)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64FeatureMask); -+ -+ IMG_INT32 (*pfnGetDeviceFeatureValue)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, enum _RGX_FEATURE_WITH_VALUE_INDEX_ eFeatureIndex); -+ -+ PVRSRV_ERROR (*pfnGetMultiCoreInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32CapsSize, -+ IMG_UINT32 *pui32NumCores, IMG_UINT64 *pui64Caps); -+ -+ IMG_BOOL (*pfnHasFBCDCVersion31)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); -+ -+ IMG_UINT32 (*pfnGetTFBCLossyGroup)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); -+ -+ MMU_DEVICEATTRIBS* (*pfnGetMMUDeviceAttributes)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_BOOL bKernelMemoryCtx); -+ -+ PVRSRV_DEVICE_CONFIG *psDevConfig; -+ -+ /* device post-finalise compatibility check */ -+ PVRSRV_ERROR (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*); -+ -+ /* initialise device-specific physheaps */ -+ PVRSRV_ERROR (*pfnPhysMemDeviceHeapsInit) (struct _PVRSRV_DEVICE_NODE_ *); -+ -+ /* determining the appropriate LMA allocation policy */ -+ PHYS_HEAP_POLICY (*pfnPhysHeapGetLMAPolicy) (PHYS_HEAP_USAGE_FLAGS); -+ -+ /* initialise fw mmu, if FW not using GPU mmu, NULL otherwise. */ -+ PVRSRV_ERROR (*pfnFwMMUInit) (struct _PVRSRV_DEVICE_NODE_ *); -+ -+ /* Check device's FW Main physheap free memory */ -+ PVRSRV_ERROR (*pfnCheckForSufficientFWPhysMem) (struct _PVRSRV_DEVICE_NODE_ *); -+ -+ /* information about the device's address space and heaps */ -+ DEVICE_MEMORY_INFO sDevMemoryInfo; -+ -+ /* device's shared-virtual-memory heap max virtual address */ -+ IMG_UINT64 ui64GeneralSVMHeapTopVA; -+ -+ ATOMIC_T iNumClockSpeedChanges; -+ -+ /* private device information */ -+ void *pvDevice; -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ RA_ARENA *psOSSharedArena; -+ RA_ARENA *psOSidSubArena[GPUVIRT_VALIDATION_NUM_OS]; -+#endif -+ -+ /* When virtualisation support is enabled the Firmware heaps of virtualised -+ * drivers can be entirely premapped into the Fw's VA space, during init -+ * or during runtime on explicit request from Guest drivers. */ -+ PHYS_HEAP *apsFWPremapPhysHeap[RGX_NUM_DRIVERS_SUPPORTED]; -+ -+ /* Head of the physical heap list. Tracks PhysHeap objects created from -+ * the PHYS_HEAP_CONFIG definitions supplied by the system layer at -+ * device creation time. There could be 1 or more and varies from system -+ * to system. -+ */ -+ struct _PHYS_HEAP_ *psPhysHeapList; -+ POS_LOCK hPhysHeapLock; -+ -+ /* The apsPhysHeap array is a mapping table to the system's, often fewer, -+ * physical memory heaps defined for this device. It contains -+ * PVRSRV_PHYS_HEAP_LAST entries, one for each possible physical -+ * heaps allowed in the design. Each PhysHeap in the design is acquired -+ * and stored in the mapping table during device create. Fall-back logic -+ * is employed to ensure a valid heap is always found from the set defined -+ * in the system layer for the device. Responsibility for this is shared -+ * between the common layer (PhysHeapInitDeviceHeaps) and sub-device -+ * layer (pfnPhysMemDeviceHeapsInit). -+ * It is used in the PhysMem module to create PMRs from a given PhysHeap -+ * of memory. See PhysHeapCreatePMR() -+ */ -+ PHYS_HEAP *apsPhysHeap[PVRSRV_PHYS_HEAP_LAST]; -+ IMG_UINT32 ui32UserAllocHeapCount; -+ -+ /* Flag indicating if the firmware has been initialised during the -+ * 1st boot of the Host driver according to the AutoVz life-cycle. */ -+ IMG_BOOL bAutoVzFwIsUp; -+ -+#if defined(SUPPORT_AUTOVZ) -+ IMG_BOOL bAutoVzAllowGPUPowerdown; -+#endif -+ -+ /* Flags indicating VM state and if PVZ calls originating from it are valid */ -+ IMG_UINT32 ui32VmState; -+ -+ struct _PVRSRV_DEVICE_NODE_ *psNext; -+ struct _PVRSRV_DEVICE_NODE_ **ppsThis; -+ -+ /* Functions for notification about memory contexts */ -+ PVRSRV_ERROR (*pfnRegisterMemoryContext)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, -+ MMU_CONTEXT *psMMUContext, -+ IMG_HANDLE *hPrivData); -+ void (*pfnUnregisterMemoryContext)(IMG_HANDLE hPrivData); -+ -+ /* Functions for allocation/freeing of UFOs */ -+ AllocUFOBlockCallback pfnAllocUFOBlock; /*!< Callback for allocation of a block of UFO memory */ -+ FreeUFOBlockCallback pfnFreeUFOBlock; /*!< Callback for freeing of a block of UFO memory */ -+ -+ IMG_HANDLE hSyncServerRecordNotify; -+ POS_LOCK hSyncServerRecordLock; -+ IMG_UINT32 ui32SyncServerRecordCount; -+ IMG_UINT32 ui32SyncServerRecordCountHighWatermark; -+ DLLIST_NODE sSyncServerRecordList; -+ struct SYNC_RECORD *apsSyncServerRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN]; -+ IMG_UINT32 uiSyncServerRecordFreeIdx; -+ -+ IMG_HANDLE hSyncCheckpointRecordNotify; -+ POS_LOCK hSyncCheckpointRecordLock; -+ IMG_UINT32 ui32SyncCheckpointRecordCount; -+ IMG_UINT32 ui32SyncCheckpointRecordCountHighWatermark; -+ DLLIST_NODE sSyncCheckpointRecordList; -+ struct SYNC_CHECKPOINT_RECORD *apsSyncCheckpointRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN]; -+ IMG_UINT32 uiSyncCheckpointRecordFreeIdx; -+ -+ IMG_HANDLE hSyncCheckpointNotify; -+ POS_SPINLOCK hSyncCheckpointListLock; /*!< Protects sSyncCheckpointSyncsList */ -+ DLLIST_NODE sSyncCheckpointSyncsList; -+ -+ PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext; -+ PSYNC_PRIM_CONTEXT hSyncPrimContext; -+ -+ /* With this sync-prim we make sure the MMU cache is flushed -+ * before we free the page table memory */ -+ PVRSRV_CLIENT_SYNC_PRIM *psMMUCacheSyncPrim; -+ IMG_UINT32 ui32NextMMUInvalidateUpdate; -+ -+ IMG_HANDLE hCmdCompNotify; -+ IMG_HANDLE hDbgReqNotify; -+ IMG_HANDLE hAppHintDbgReqNotify; -+ IMG_HANDLE hPhysHeapDbgReqNotify; -+ -+ /* Device MMU common module can support one other larger page size -+ * (e.g. 16KB) in addition to the default OS page size (often 4KB). -+ * If supported, device code will store the size here as a log2 value -+ * during device creation/registration. A 0 value will imply it is -+ * not supported. -+ */ -+ IMG_UINT32 ui32Non4KPageSizeLog2; -+ -+ PVRSRV_DEF_PAGE sScratchPage; -+ PVRSRV_DEF_PAGE sDevZeroPage; -+ -+ POSWR_LOCK hMemoryContextPageFaultNotifyListLock; -+ DLLIST_NODE sMemoryContextPageFaultNotifyListHead; -+ -+ /* System DMA capability */ -+ IMG_BOOL bHasSystemDMA; -+ IMG_HANDLE hDmaTxChan; -+ IMG_HANDLE hDmaRxChan; -+ -+#if defined(PDUMP) -+ /* -+ * FBC clear color register default value to use. -+ */ -+ IMG_UINT64 ui64FBCClearColour; -+ -+ /* Device-level callback which is called when pdump.exe starts. -+ * Should be implemented in device-specific init code, e.g. rgxinit.c -+ */ -+ PVRSRV_ERROR (*pfnPDumpInitDevice)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); -+ /* device-level callback to return pdump ID associated to a memory context */ -+ IMG_UINT32 (*pfnMMUGetContextID)(IMG_HANDLE hDevMemContext); -+ -+ IMG_UINT8 *pui8DeferredSyncCPSignal; /*! Deferred fence events buffer */ -+ -+ IMG_UINT16 ui16SyncCPReadIdx; /*! Read index in the above deferred fence events buffer */ -+ -+ IMG_UINT16 ui16SyncCPWriteIdx; /*! Write index in the above deferred fence events buffer */ -+ -+ POS_LOCK hSyncCheckpointSignalLock; /*! Guards data shared between an sleepable-contexts */ -+ -+ void *pvSyncCPMISR; /*! MISR to emit pending/deferred fence signals */ -+ -+ void *hTransition; /*!< SyncCheckpoint PdumpTransition Cookie */ -+ -+ DLLIST_NODE sSyncCheckpointContextListHead; /*!< List head for the sync chkpt contexts */ -+ -+ POS_LOCK hSyncCheckpointContextListLock; /*! lock for accessing sync chkpt contexts list */ -+ -+#endif -+ -+#if defined(SUPPORT_VALIDATION) -+ POS_LOCK hValidationLock; -+#endif -+ -+ /* Members for linking which connections are open on this device */ -+ POS_LOCK hConnectionsLock; /*!< Lock protecting sConnections */ -+ DLLIST_NODE sConnections; /*!< The list of currently active connection objects for this device node */ -+ -+#if defined(PVRSRV_DEBUG_LISR_EXECUTION) -+ LISR_EXECUTION_INFO sLISRExecutionInfo; /*!< Information about the last execution of the LISR */ -+ IMG_UINT64 ui64nLISR; /*!< Number of LISR calls seen */ -+ IMG_UINT64 ui64nMISR; /*!< Number of MISR calls made */ -+#endif -+ -+ PVRSRV_DEVICE_DEBUG_INFO sDebugInfo; -+ IMG_BOOL bEnablePFDebug; /*!< EnablePageFaultDebug AppHint setting for device */ -+ -+ DLLIST_NODE sCleanupThreadWorkList; /*!< List of work for the cleanup thread associated with the device */ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ /* Data for the deferred freeing of a PMR physical pages for a given device */ -+ DLLIST_NODE sPMRZombieList; /*!< List of PMRs to free */ -+ POS_LOCK hPMRZombieListLock; /*!< List lock */ -+ IMG_UINT32 uiPMRZombieCount; /*!< Number of elements in the list */ -+ IMG_UINT32 uiPMRZombieCountInCleanup; /*!< Number of elements in cleanup items */ -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ ATOMIC_T eFrozen; /*< Frozen / Unfrozen indicator */ -+ IMG_HANDLE hDeviceThreadEvObj; /*< Event Object for Freeze indicator */ -+ IMG_HANDLE hDeviceFreezeThaw; /*< Event handle for Freeze/Thaw */ -+ POS_LOCK hFreezeThawLock; /*< Freeze/Thaw lock */ -+ ATOMIC_T iFreezeCount; /*< Number of blocked on frozen tasks */ -+ ATOMIC_T iTotalFreezes; /*< Total number of times device frozen */ -+ ATOMIC_T iThreadsActive; /*< Number of threads active on this device */ -+} PVRSRV_DEVICE_NODE; -+ -+/* -+ * Macros to be used instead of calling directly the pfns since these macros -+ * will expand the feature passed as argument into the bitmask/index to work -+ * with the macros defined in rgx_bvnc_defs_km.h -+ */ -+#define PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, Feature) \ -+ psDevNode->pfnCheckDeviceFeature(psDevNode, RGX_FEATURE_##Feature##_BIT_MASK) -+#define PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, Feature) \ -+ psDevNode->pfnGetDeviceFeatureValue(psDevNode, RGX_FEATURE_##Feature##_IDX) -+ -+PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bInitSuccessful); -+ -+PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions); -+ -+void PVRSRVDeviceSetState(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_STATE eNewDevState); -+ -+#define PVRSRVIsStatusRecoverable(eStatus) \ -+ (((eStatus == PVRSRV_DEVICE_HEALTH_STATUS_DEAD)) ? \ -+ IMG_FALSE : IMG_TRUE) -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) || defined(SUPPORT_MMU_DEFERRED_FREE) -+/* Determines if a 32-bit `uiCurrent` counter advanced to or beyond -+ * `uiRequired` value. The function takes into consideration that the -+ * counter could have wrapped around. */ -+static INLINE IMG_BOOL PVRSRVHasCounter32Advanced(IMG_UINT32 uiCurrent, -+ IMG_UINT32 uiRequired) -+{ -+ return uiCurrent >= uiRequired ? -+ /* ... with the counter wrapped around ... -+ * There can't be ~4 billion transactions completed, so consider wrapped */ -+ (((uiCurrent - uiRequired) > 0xF0000000UL) ? IMG_FALSE : IMG_TRUE) : -+ /* There can't be ~4 billion transactions pending, so consider wrapped */ -+ (((uiRequired - uiCurrent) > 0xF0000000UL) ? IMG_TRUE : IMG_FALSE); -+} -+#endif -+ -+#endif /* DEVICE_H */ -+ -+/****************************************************************************** -+ End of file (device.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/device_connection.h b/drivers/gpu/drm/img-rogue/device_connection.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/device_connection.h -@@ -0,0 +1,130 @@ -+/*************************************************************************/ /*! -+@File device_connection.h -+@Title -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(DEVICE_CONNECTION_H) -+#define DEVICE_CONNECTION_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+ -+#if defined(__KERNEL__) -+typedef struct _PVRSRV_DEVICE_NODE_ *SHARED_DEV_CONNECTION; -+#else -+#include "connection.h" -+typedef const struct PVRSRV_DEV_CONNECTION_TAG *SHARED_DEV_CONNECTION; -+#endif -+ -+/****************************************************************************** -+ * Device capability flags and masks -+ * -+ * Following bitmask shows allocated ranges and values for our device -+ * capability settings: -+ * -+ * 31 27 23 19 15 11 7 3 0 -+ * |...|...|...|...|...|...|...|... -+ * ** CACHE_COHERENT [0x1..0x2] -+ * x PVRSRV_CACHE_COHERENT_DEVICE_FLAG -+ * x. PVRSRV_CACHE_COHERENT_CPU_FLAG -+ * *... NONMAPPABLE_MEMORY [0x8] -+ * x... PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG -+ * *.... PDUMP_IS_RECORDING [0x10] -+ * x.... PVRSRV_PDUMP_IS_RECORDING -+ * ***........ DEVMEM_SVM_ALLOC [0x100..0x400] -+ * x........ PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED -+ * x......... PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED -+ * x.......... PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL -+ * *........... FBCDC_V3_1 [0x800] -+ * x........... FBCDC_V3_1_USED -+ * *............ PVRSRV_SYSTEM_DMA -+ * x............ PVRSRV_SYSTEM_DMA_USED -+ * *............. TFBC_LOSSY_GROUP -+ * x............. TFBC_LOSSY_GROUP_1 -+ * |...|...|...|...|...|...|...|... -+ *****************************************************************************/ -+ -+/* Flag to be passed over the bridge during connection stating whether CPU cache coherent is available*/ -+#define PVRSRV_CACHE_COHERENT_SHIFT (0) -+#define PVRSRV_CACHE_COHERENT_DEVICE_FLAG (1U << PVRSRV_CACHE_COHERENT_SHIFT) -+#define PVRSRV_CACHE_COHERENT_CPU_FLAG (2U << PVRSRV_CACHE_COHERENT_SHIFT) -+#define PVRSRV_CACHE_COHERENT_EMULATE_FLAG (4U << PVRSRV_CACHE_COHERENT_SHIFT) -+#define PVRSRV_CACHE_COHERENT_MASK (7U << PVRSRV_CACHE_COHERENT_SHIFT) -+ -+/* Flag to be passed over the bridge during connection stating whether CPU non-mappable memory is present */ -+#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT (7) -+#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG (1U << PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT) -+ -+/* Flag to be passed over the bridge to indicate PDump activity */ -+#define PVRSRV_PDUMP_IS_RECORDING_SHIFT (4) -+#define PVRSRV_PDUMP_IS_RECORDING (1U << PVRSRV_PDUMP_IS_RECORDING_SHIFT) -+ -+/* Flag to be passed over the bridge during connection stating SVM allocation availability */ -+#define PVRSRV_DEVMEM_SVM_ALLOC_SHIFT (8) -+#define PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED (1U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT) -+#define PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED (2U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT) -+#define PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL (4U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT) -+ -+/* Flag to be passed over the bridge during connection stating whether GPU uses FBCDC v3.1 */ -+#define PVRSRV_FBCDC_V3_1_USED_SHIFT (11) -+#define PVRSRV_FBCDC_V3_1_USED (1U << PVRSRV_FBCDC_V3_1_USED_SHIFT) -+ -+/* Flag to be passed over the bridge during connection stating whether System has -+ DMA transfer capability to and from device memory */ -+#define PVRSRV_SYSTEM_DMA_SHIFT (12) -+#define PVRSRV_SYSTEM_DMA_USED (1U << PVRSRV_SYSTEM_DMA_SHIFT) -+ -+/* Flag to be passed over the bridge during connection stating whether GPU supports TFBC and is -+ configured to use lossy compression control group 1 (25% / 37.5% / 50%) */ -+#define PVRSRV_TFBC_LOSSY_GROUP_SHIFT (13) -+#define PVRSRV_TFBC_LOSSY_GROUP_1 (1U << PVRSRV_TFBC_LOSSY_GROUP_SHIFT) -+ -+static INLINE IMG_HANDLE GetBridgeHandle(SHARED_DEV_CONNECTION hDevConnection) -+{ -+#if defined(__KERNEL__) -+ return hDevConnection; -+#else -+ return hDevConnection->hServices; -+#endif -+} -+ -+ -+#endif /* !defined(DEVICE_CONNECTION_H) */ -diff --git a/drivers/gpu/drm/img-rogue/devicemem.c b/drivers/gpu/drm/img-rogue/devicemem.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem.c -@@ -0,0 +1,2779 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device Memory Management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Front End (nominally Client side part, but now invokable -+ from server too) of device memory management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ /**************************************************************************/ -+ -+#include "devicemem.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+#include "allocmem.h" -+#include "ra.h" -+#include "osfunc.h" -+#include "osmmap.h" -+#include "devicemem_utils.h" -+#include "client_mm_bridge.h" -+#include "client_cache_bridge.h" -+#include "services_km.h" -+ -+#if defined(PDUMP) -+#if defined(__KERNEL__) -+#include "pdump_km.h" -+#else -+#include "pdump_um.h" -+#endif -+#include "devicemem_pdump.h" -+#endif -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+#include "client_ri_bridge.h" -+#endif -+#include "client_devicememhistory_bridge.h" -+#include "info_page_client.h" -+ -+#include "rgx_heaps.h" -+#if defined(__KERNEL__) -+#include "pvrsrv.h" -+#include "rgxdefs_km.h" -+#include "rgx_bvnc_defs_km.h" -+#include "device.h" -+#include "rgxdevice.h" -+#include "pvr_ricommon.h" -+#include "pvrsrv_apphint.h" -+#include "os_apphint.h" -+#include "srvcore.h" -+#if defined(__linux__) -+#include "linux/kernel.h" -+#endif -+#else -+#include "srvcore_intern.h" -+#include "rgxdefs.h" -+#endif -+ -+#if defined(__KERNEL__) && defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+extern PVRSRV_ERROR RIDumpAllKM(void); -+#endif -+ -+#if defined(__KERNEL__) -+#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError) -+#else -+#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError) -+#endif -+ -+#if defined(__KERNEL__) -+/* Derive the virtual from the hPMR */ -+static -+IMG_UINT64 _GetPremappedVA(PMR *psPMR, PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS; -+ -+ IMG_DEV_PHYADDR sDevAddr; -+ IMG_BOOL bValid; -+ PHYS_HEAP *psPhysHeap = psDevNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM]; -+ IMG_DEV_PHYADDR sHeapAddr; -+ -+ eError = PhysHeapGetDevPAddr(psPhysHeap, &sHeapAddr); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", fail); -+ -+#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) -+{ -+ if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_UMA || -+ PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_DMA) -+ { -+ IMG_DEV_PHYADDR sDevPAddrCorrected; -+ -+ PhysHeapCpuPAddrToDevPAddr(psPhysHeap, 1, &sDevPAddrCorrected, (IMG_CPU_PHYADDR *)&sHeapAddr); -+ sHeapAddr.uiAddr = sDevPAddrCorrected.uiAddr; -+ } -+} -+#endif -+ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddr", fail); -+ -+ eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sDevAddr, &bValid, DEVICE_USE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_IF_ERROR(eError, "PMR_DevPhysAddr"); -+ eError = PMRUnlockSysPhysAddresses(psPMR); -+ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddr"); -+ goto fail; -+ } -+ -+ eError = PMRUnlockSysPhysAddresses(psPMR); -+ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddr"); -+ -+ ui64OptionalMapAddress = RGX_FIRMWARE_RAW_HEAP_BASE | (sDevAddr.uiAddr - sHeapAddr.uiAddr); -+ -+ PVR_DPF((PVR_DBG_ALLOC, "%s: sDevAddr.uiAddr = 0x%"IMG_UINT64_FMTSPECx" sHeapAddr.uiAddr = 0x%"IMG_UINT64_FMTSPECx" => ui64OptionalMapAddress = 0x%"IMG_UINT64_FMTSPECx, -+ __func__, sDevAddr.uiAddr, sHeapAddr.uiAddr, ui64OptionalMapAddress)); -+fail: -+ return ui64OptionalMapAddress; -+} -+#endif -+ -+/***************************************************************************** -+ * Sub allocation internals * -+ *****************************************************************************/ -+static INLINE PVRSRV_MEMALLOCFLAGS_T -+DevmemOverrideFlagsOrPassThrough(SHARED_DEV_CONNECTION hDevConnection, PVRSRV_MEMALLOCFLAGS_T uiFlags) -+{ -+#if defined(__KERNEL__) && defined(RGX_FEATURE_GPU_CPU_COHERENCY_BIT_MASK) -+ /* -+ * Override the requested memory flags of FW allocations only, -+ * non-FW allocations pass-through unmodified. -+ * -+ * On fully coherent platforms: -+ * - We upgrade uncached, CPU-only cached or GPU-only cached to -+ * full coherency. This gives caching improvements for free. -+ * -+ * On ace-lite platforms: -+ * - If the allocation is not CPU cached, then there is nothing -+ * for the GPU to snoop regardless of the GPU cache setting. -+ * - If the allocation is not GPU cached, then the SLC will not -+ * be used and will not snoop the CPU even if it is CPU cached. -+ * - Therefore only the GPU setting can be upgraded to coherent -+ * if it is already GPU cached incoherent and the CPU is cached. -+ * -+ * All other platforms: -+ * - Do not modify the allocation flags. -+ */ -+ -+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDevConnection; -+ -+ if (psDevNode->pvDevice != NULL && PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, GPU_CPU_COHERENCY)) -+ { -+ if (PVRSRV_CHECK_FW_MAIN(uiFlags)) -+ { -+ if (PVRSRVSystemSnoopingOfDeviceCache(psDevNode->psDevConfig) && -+ PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig)) -+ { -+ /* Clear existing flags, mark the allocation as fully coherent. */ -+ uiFlags &= ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK); -+ uiFlags |= PVRSRV_MEMALLOCFLAG_CACHE_COHERENT; -+ } -+ else if ((PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)) && -+ (PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)) && -+ PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig) && -+ psDevNode->eDevFabricType == PVRSRV_DEVICE_FABRIC_ACELITE) -+ { -+ /* Upgrade the allocation from GPU cached incoherent to GPU cached coherent. */ -+ uiFlags &= ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK; -+ uiFlags |= PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT; -+ } -+ } -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(hDevConnection); -+#endif -+ -+ return uiFlags; -+} -+ -+static INLINE void -+CheckAnnotationLength(const IMG_CHAR *pszAnnotation) -+{ -+ IMG_UINT32 length = OSStringLength(pszAnnotation); -+ -+ if (length >= DEVMEM_ANNOTATION_MAX_LEN) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Annotation \"%s\" has been truncated to %d characters from %d characters", -+ __func__, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN - 1, length)); -+ } -+} -+ -+static PVRSRV_ERROR -+AllocateDeviceMemory(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_UINT32 uiLog2Quantum, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_BOOL bExportable, -+ const IMG_CHAR *pszAnnotation, -+ DEVMEM_IMPORT **ppsImport) -+{ -+ DEVMEM_IMPORT *psImport; -+ PVRSRV_MEMALLOCFLAGS_T uiOutFlags; -+ IMG_HANDLE hPMR; -+ PVRSRV_ERROR eError; -+ -+ eError = DevmemImportStructAlloc(hDevConnection, -+ &psImport); -+ PVR_GOTO_IF_ERROR(eError, failAlloc); -+ -+ /* check if shift value is not too big (sizeof(1ULL)) */ -+ PVR_ASSERT(uiLog2Quantum < sizeof(unsigned long long) * 8); -+ /* Check the size is a multiple of the quantum */ -+ PVR_ASSERT((uiSize & ((1ULL<psImport; -+ SHARED_DEV_CONNECTION hDevConnection; -+ IMG_HANDLE hPMR; -+ DEVMEM_HEAP *psHeap; -+ IMG_DEV_VIRTADDR sDevVAddr; -+ IMG_CPU_VIRTADDR pvCpuVAddr; -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ DEVMEM_PROPERTIES_T uiProperties; -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ PVR_ASSERT(psImport != NULL); -+ -+ hDevConnection = psImport->hDevConnection; -+ hPMR = psImport->hPMR; -+ psHeap = psImport->sDeviceImport.psHeap; -+ sDevVAddr = psImport->sDeviceImport.sDevVAddr; -+ pvCpuVAddr = psImport->sCPUImport.pvCPUVAddr; -+ -+ PVR_ASSERT(hDevConnection != NULL); -+ PVR_ASSERT(hPMR != NULL); -+ PVR_ASSERT(psHeap != NULL); -+ PVR_ASSERT(!BITMASK_HAS(uiSparseFlags, SPARSE_RESIZE_BOTH) || (sDevVAddr.uiAddr != 0)); -+ PVR_ASSERT(!BITMASK_HAS(uiSparseFlags, SPARSE_MAP_CPU_ADDR) || (pvCpuVAddr == NULL)); -+ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ uiProperties = GetImportProperties(psMemDesc->psImport); -+ -+ PVR_ASSERT(!BITMASK_HAS(uiProperties, DEVMEM_PROPERTIES_SECURE)); -+ PVR_ASSERT(!BITMASK_HAS(uiProperties, DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE)); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE -+ PVR_ASSERT(psMemDesc->sCPUMemDesc.ui32RefCount == 0); -+#endif -+ -+ OSLockAcquire(psImport->hLock); -+ -+ eError = BridgeChangeSparseMem(GetBridgeHandle(hDevConnection), -+ psHeap->hDevMemServerHeap, -+ hPMR, -+ ui32AllocPageCount, -+ paui32AllocPageIndices, -+ ui32FreePageCount, -+ pauiFreePageIndices, -+ uiSparseFlags, -+ psImport->uiFlags, -+ sDevVAddr, -+ (IMG_UINT64) ((uintptr_t) pvCpuVAddr)); -+ -+ OSLockRelease(psImport->hLock); -+ -+ PVR_RETURN_IF_ERROR(eError); -+ -+ if (GetInfoPageDebugFlags(hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) -+ { -+ BridgeDevicememHistorySparseChange(GetBridgeHandle(hDevConnection), -+ hPMR, -+ psMemDesc->uiOffset, -+ psMemDesc->sDeviceMemDesc.sDevVAddr, -+ psMemDesc->uiAllocSize, -+ psMemDesc->szText, -+ DevmemGetHeapLog2PageSize(psHeap), -+ ui32AllocPageCount, -+ paui32AllocPageIndices, -+ ui32FreePageCount, -+ pauiFreePageIndices, -+ psMemDesc->ui32AllocationIndex, -+ &psMemDesc->ui32AllocationIndex); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static void -+FreeDeviceMemory(DEVMEM_IMPORT *psImport) -+{ -+ DevmemImportStructRelease(psImport); -+} -+ -+static PVRSRV_ERROR -+SubAllocImportAlloc(RA_PERARENA_HANDLE hArena, -+ RA_LENGTH_T uiSize, -+ RA_FLAGS_T _flags, -+ RA_LENGTH_T uBaseAlignment, -+ const IMG_CHAR *pszAnnotation, -+ /* returned data */ -+ RA_BASE_T *puiBase, -+ RA_LENGTH_T *puiActualSize, -+ RA_PERISPAN_HANDLE *phImport) -+{ -+ /* When suballocations need a new lump of memory, the RA calls -+ back here. Later, in the kernel, we must construct a new PMR -+ and a pairing between the new lump of virtual memory and the -+ PMR (whether or not such PMR is backed by physical memory) */ -+ DEVMEM_HEAP *psHeap; -+ DEVMEM_IMPORT *psImport; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32MappingTable = 0; -+ PVRSRV_MEMALLOCFLAGS_T uiFlags = (PVRSRV_MEMALLOCFLAGS_T) _flags; -+ IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS; -+ -+ /* Per-arena private handle is, for us, the heap */ -+ psHeap = hArena; -+ -+ /* The RA should not have invoked us with a size that is not a -+ multiple of the quantum anyway */ -+ PVR_ASSERT((uiSize & ((1ULL<uiLog2Quantum)-1)) == 0); -+ -+ eError = AllocateDeviceMemory(psHeap->psCtx->hDevConnection, -+ psHeap->uiLog2Quantum, -+ uiSize, -+ 1, -+ 1, -+ &ui32MappingTable, -+ uBaseAlignment, -+ uiFlags, -+ IMG_FALSE, -+ "PMR sub-allocated", -+ &psImport); -+ PVR_GOTO_IF_ERROR(eError, failAlloc); -+ -+#if defined(PDUMP) && defined(DEBUG) -+#if defined(__KERNEL__) -+ PDUMPCOMMENTWITHFLAGS(PMR_DeviceNode((PMR*)psImport->hPMR), PDUMP_CONT, -+ "Created PMR for sub-allocations with handle ID: 0x%p Annotation: \"%s\" (PID %u)", -+ psImport->hPMR, pszAnnotation, OSGetCurrentProcessID()); -+#else -+ PDUMPCOMMENTF(psHeap->psCtx->hDevConnection, PDUMP_FLAGS_CONTINUOUS, -+ "Created PMR for sub-allocations with handle ID: %p Annotation: \"%s\" (PID %u)", -+ psImport->hPMR, pszAnnotation, OSGetCurrentProcessID()); -+#endif -+#else -+ PVR_UNREFERENCED_PARAMETER(pszAnnotation); -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) -+ { -+#if defined(__KERNEL__) -+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection; -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice; -+ -+ PVR_ASSERT(PVRSRV_CHECK_FW_MAIN(uiFlags)); -+ -+ /* If allocation is made by the Kernel from the firmware heap, account for it -+ * under the PVR_SYS_ALLOC_PID. -+ */ -+ if ((psHeap == psDevInfo->psFirmwareMainHeap) || (psHeap == psDevInfo->psFirmwareConfigHeap)) -+ { -+ eError = BridgeRIWritePMREntryWithOwner (GetBridgeHandle(psImport->hDevConnection), -+ psImport->hPMR, -+ PVR_SYS_ALLOC_PID); -+ PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntryWithOwner"); -+ } -+ else -+#endif -+ { -+ eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection), -+ psImport->hPMR); -+ PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntry"); -+ } -+ } -+#endif -+ -+#if defined(__KERNEL__) -+ if (psHeap->bPremapped) -+ { -+ ui64OptionalMapAddress = _GetPremappedVA(psImport->hPMR, psHeap->psCtx->hDevConnection); -+ } -+#endif -+ -+ /* -+ Suballocations always get mapped into the device was we need to -+ key the RA off something and as we can't export suballocations -+ there is no valid reason to request an allocation an not map it -+ */ -+ eError = DevmemImportStructDevMap(psHeap, -+ IMG_TRUE, -+ psImport, -+ ui64OptionalMapAddress); -+ PVR_GOTO_IF_ERROR(eError, failMap); -+ -+ OSLockAcquire(psImport->hLock); -+ /* Mark this import struct as zeroed so we can save some PDump LDBs -+ * and do not have to CPU map + mem set()*/ -+ if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) -+ { -+ psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_ZEROED; -+ } -+ else if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) -+ { -+ psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_POISONED; -+ } -+ psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_CLEAN; -+ OSLockRelease(psImport->hLock); -+ -+ *puiBase = psImport->sDeviceImport.sDevVAddr.uiAddr; -+ *puiActualSize = uiSize; -+ *phImport = psImport; -+ -+ return PVRSRV_OK; -+ -+ /* error exit paths follow */ -+ -+failMap: -+ FreeDeviceMemory(psImport); -+failAlloc: -+ -+ return eError; -+} -+ -+static void -+SubAllocImportFree(RA_PERARENA_HANDLE hArena, -+ RA_BASE_T uiBase, -+ RA_PERISPAN_HANDLE hImport) -+{ -+ DEVMEM_IMPORT *psImport = hImport; -+#if !defined(PVRSRV_NEED_PVR_ASSERT) -+ PVR_UNREFERENCED_PARAMETER(hArena); -+ PVR_UNREFERENCED_PARAMETER(uiBase); -+#endif -+ -+ PVR_ASSERT(psImport != NULL); -+ PVR_ASSERT(hArena == psImport->sDeviceImport.psHeap); -+ PVR_ASSERT(uiBase == psImport->sDeviceImport.sDevVAddr.uiAddr); -+ -+ (void) DevmemImportStructDevUnmap(psImport); -+ (void) DevmemImportStructRelease(psImport); -+} -+ -+/***************************************************************************** -+ * Devmem context internals * -+ *****************************************************************************/ -+ -+static PVRSRV_ERROR -+PopulateContextFromBlueprint(struct DEVMEM_CONTEXT_TAG *psCtx, -+ DEVMEM_HEAPCFGID uiHeapBlueprintID) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_ERROR eError2; -+ struct DEVMEM_HEAP_TAG **ppsHeapArray; -+ IMG_UINT32 uiNumHeaps; -+ IMG_UINT32 uiHeapsToUnwindOnError; -+ IMG_UINT32 uiHeapIndex; -+ IMG_DEV_VIRTADDR sDevVAddrBase; -+ IMG_CHAR aszHeapName[DEVMEM_HEAPNAME_MAXLENGTH]; -+ IMG_DEVMEM_SIZE_T uiHeapLength; -+ IMG_DEVMEM_SIZE_T uiReservedRegionLength; -+ IMG_DEVMEM_LOG2ALIGN_T uiLog2DataPageSize; -+ IMG_DEVMEM_LOG2ALIGN_T uiLog2ImportAlignment; -+ -+ eError = DevmemHeapCount(psCtx->hDevConnection, -+ uiHeapBlueprintID, -+ &uiNumHeaps); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ if (uiNumHeaps == 0) -+ { -+ ppsHeapArray = NULL; -+ } -+ else -+ { -+ ppsHeapArray = OSAllocMem(sizeof(*ppsHeapArray) * uiNumHeaps); -+ PVR_GOTO_IF_NOMEM(ppsHeapArray, eError, e0); -+ } -+ -+ uiHeapsToUnwindOnError = 0; -+ -+ for (uiHeapIndex = 0; uiHeapIndex < uiNumHeaps; uiHeapIndex++) -+ { -+ eError = DevmemHeapDetails(psCtx->hDevConnection, -+ uiHeapBlueprintID, -+ uiHeapIndex, -+ &aszHeapName[0], -+ sizeof(aszHeapName), -+ &sDevVAddrBase, -+ &uiHeapLength, -+ &uiReservedRegionLength, -+ &uiLog2DataPageSize, -+ &uiLog2ImportAlignment); -+ PVR_GOTO_IF_ERROR(eError, e1); -+ -+ eError = DevmemCreateHeap(psCtx, -+ sDevVAddrBase, -+ uiHeapLength, -+ uiReservedRegionLength, -+ uiLog2DataPageSize, -+ uiLog2ImportAlignment, -+ aszHeapName, -+ uiHeapBlueprintID, -+ uiHeapIndex, -+ &ppsHeapArray[uiHeapIndex]); -+ PVR_GOTO_IF_ERROR(eError, e1); -+ -+ uiHeapsToUnwindOnError = uiHeapIndex + 1; -+ } -+ -+ psCtx->uiAutoHeapCount = uiNumHeaps; -+ psCtx->ppsAutoHeapArray = ppsHeapArray; -+ -+ PVR_ASSERT(psCtx->uiNumHeaps >= psCtx->uiAutoHeapCount); -+ PVR_ASSERT(psCtx->uiAutoHeapCount == uiNumHeaps); -+ -+ return PVRSRV_OK; -+ -+ /* error exit paths */ -+e1: -+ for (uiHeapIndex = 0; uiHeapIndex < uiHeapsToUnwindOnError; uiHeapIndex++) -+ { -+ eError2 = DevmemDestroyHeap(ppsHeapArray[uiHeapIndex]); -+ PVR_ASSERT(eError2 == PVRSRV_OK); -+ } -+ -+ if (uiNumHeaps != 0) -+ { -+ OSFreeMem(ppsHeapArray); -+ } -+ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+static PVRSRV_ERROR -+UnpopulateContextFromBlueprint(struct DEVMEM_CONTEXT_TAG *psCtx) -+{ -+ PVRSRV_ERROR eReturn = PVRSRV_OK; -+ PVRSRV_ERROR eError2; -+ IMG_UINT32 uiHeapIndex; -+ IMG_BOOL bDoCheck = IMG_TRUE; -+#if defined(__KERNEL__) -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) -+ { -+ bDoCheck = IMG_FALSE; -+ } -+#endif -+ -+ for (uiHeapIndex = 0; uiHeapIndex < psCtx->uiAutoHeapCount; uiHeapIndex++) -+ { -+ if (!psCtx->ppsAutoHeapArray[uiHeapIndex]) -+ { -+ continue; -+ } -+ -+ eError2 = DevmemDestroyHeap(psCtx->ppsAutoHeapArray[uiHeapIndex]); -+ if (eError2 != PVRSRV_OK) -+ { -+ eReturn = eError2; -+ } -+ else -+ { -+ psCtx->ppsAutoHeapArray[uiHeapIndex] = NULL; -+ } -+ } -+ -+ if ((!bDoCheck || (eReturn == PVRSRV_OK)) && psCtx->ppsAutoHeapArray) -+ { -+ OSFreeMem(psCtx->ppsAutoHeapArray); -+ psCtx->ppsAutoHeapArray = NULL; -+ psCtx->uiAutoHeapCount = 0; -+ } -+ -+ return eReturn; -+} -+ -+/***************************************************************************** -+ * Devmem context functions * -+ *****************************************************************************/ -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection, -+ DEVMEM_HEAPCFGID uiHeapBlueprintID, -+ DEVMEM_CONTEXT **ppsCtxPtr) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEM_CONTEXT *psCtx; -+ /* handle to the server-side counterpart of the device memory -+ context (specifically, for handling mapping to device MMU) */ -+ IMG_HANDLE hDevMemServerContext; -+ IMG_HANDLE hPrivData; -+ IMG_BOOL bHeapCfgFWId = (uiHeapBlueprintID == DEVMEM_HEAPCFG_FORFW); -+ -+ PVR_GOTO_IF_NOMEM(ppsCtxPtr, eError, e0); -+ -+ psCtx = OSAllocMem(sizeof(*psCtx)); -+ PVR_GOTO_IF_NOMEM(psCtx, eError, e0); -+ -+ psCtx->uiNumHeaps = 0; -+ -+ psCtx->hDevConnection = hDevConnection; -+ -+ /* Create (server-side) Device Memory context */ -+ eError = BridgeDevmemIntCtxCreate(GetBridgeHandle(psCtx->hDevConnection), -+ bHeapCfgFWId, -+ &hDevMemServerContext, -+ &hPrivData, -+ &psCtx->ui32CPUCacheLineSize); -+ PVR_GOTO_IF_ERROR(eError, e1); -+ -+ psCtx->hDevMemServerContext = hDevMemServerContext; -+ psCtx->hPrivData = hPrivData; -+ -+ /* automagic heap creation */ -+ psCtx->uiAutoHeapCount = 0; -+ -+ eError = PopulateContextFromBlueprint(psCtx, uiHeapBlueprintID); -+ PVR_GOTO_IF_ERROR(eError, e2); -+ -+ *ppsCtxPtr = psCtx; -+ -+ PVR_ASSERT(psCtx->uiNumHeaps == psCtx->uiAutoHeapCount); -+ return PVRSRV_OK; -+ -+ /* error exit paths follow */ -+ -+e2: -+ PVR_ASSERT(psCtx->uiAutoHeapCount == 0); -+ PVR_ASSERT(psCtx->uiNumHeaps == 0); -+ BridgeDevmemIntCtxDestroy(GetBridgeHandle(psCtx->hDevConnection), hDevMemServerContext); -+ -+e1: -+ OSFreeMem(psCtx); -+ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx, -+ IMG_HANDLE *hPrivData) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_GOTO_IF_INVALID_PARAM(psCtx, eError, e0); -+ PVR_GOTO_IF_INVALID_PARAM(hPrivData, eError, e0); -+ -+ *hPrivData = psCtx->hPrivData; -+ return PVRSRV_OK; -+ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_GOTO_IF_INVALID_PARAM(psCtx, eError, e0); -+ return PVRSRV_OK; -+ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemFindHeapByName(const struct DEVMEM_CONTEXT_TAG *psCtx, -+ const IMG_CHAR *pszHeapName, -+ struct DEVMEM_HEAP_TAG **ppsHeapRet) -+{ -+ IMG_UINT32 uiHeapIndex; -+ -+ /* N.B. This func is only useful for finding "automagic" heaps by name */ -+ for (uiHeapIndex = 0; -+ uiHeapIndex < psCtx->uiAutoHeapCount; -+ uiHeapIndex++) -+ { -+ if (!OSStringNCompare(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName, pszHeapName, OSStringLength(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName) + 1)) -+ { -+ *ppsHeapRet = psCtx->ppsAutoHeapArray[uiHeapIndex]; -+ return PVRSRV_OK; -+ } -+ } -+ -+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx) -+{ -+ PVRSRV_ERROR eError; -+ IMG_BOOL bDoCheck = IMG_TRUE; -+ -+#if defined(__KERNEL__) -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) -+ { -+ bDoCheck = IMG_FALSE; -+ } -+#endif -+ -+ PVR_RETURN_IF_INVALID_PARAM(psCtx); -+ -+ eError = UnpopulateContextFromBlueprint(psCtx); -+ if (bDoCheck && eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: UnpopulateContextFromBlueprint failed (%d) leaving %d heaps", -+ __func__, eError, psCtx->uiNumHeaps)); -+ goto e1; -+ } -+ -+ eError = DestroyServerResource(psCtx->hDevConnection, -+ NULL, -+ BridgeDevmemIntCtxDestroy, -+ psCtx->hDevMemServerContext); -+ if (bDoCheck) -+ { -+ PVR_LOG_GOTO_IF_ERROR(eError, "BridgeDevMemIntCtxDestroy", e1); -+ -+ /* should be no more heaps left */ -+ if (psCtx->uiNumHeaps) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Additional heaps remain in DEVMEM_CONTEXT", -+ __func__)); -+ eError = PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT; -+ goto e1; -+ } -+ } -+ -+ OSCachedMemSet(psCtx, 0, sizeof(*psCtx)); -+ OSFreeMem(psCtx); -+ -+e1: -+ return eError; -+} -+ -+/***************************************************************************** -+ * Devmem heap query functions * -+ *****************************************************************************/ -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_UINT32 *puiNumHeapConfigsOut) -+{ -+ PVRSRV_ERROR eError; -+ eError = BridgeHeapCfgHeapConfigCount(GetBridgeHandle(hDevConnection), -+ puiNumHeapConfigsOut); -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 *puiNumHeapsOut) -+{ -+ PVRSRV_ERROR eError; -+ eError = BridgeHeapCfgHeapCount(GetBridgeHandle(hDevConnection), -+ uiHeapConfigIndex, -+ puiNumHeapsOut); -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemHeapConfigName(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_CHAR *pszConfigNameOut, -+ IMG_UINT32 uiConfigNameBufSz) -+{ -+ PVRSRV_ERROR eError; -+ eError = BridgeHeapCfgHeapConfigName(GetBridgeHandle(hDevConnection), -+ uiHeapConfigIndex, -+ uiConfigNameBufSz, -+ pszConfigNameOut); -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 uiHeapIndex, -+ IMG_CHAR *pszHeapNameOut, -+ IMG_UINT32 uiHeapNameBufSz, -+ IMG_DEV_VIRTADDR *psDevVAddrBaseOut, -+ IMG_DEVMEM_SIZE_T *puiHeapLengthOut, -+ IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, -+ IMG_UINT32 *puiLog2DataPageSizeOut, -+ IMG_UINT32 *puiLog2ImportAlignmentOut) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = BridgeHeapCfgHeapDetails(GetBridgeHandle(hDevConnection), -+ uiHeapConfigIndex, -+ uiHeapIndex, -+ uiHeapNameBufSz, -+ pszHeapNameOut, -+ psDevVAddrBaseOut, -+ puiHeapLengthOut, -+ puiReservedRegionLengthOut, -+ puiLog2DataPageSizeOut, -+ puiLog2ImportAlignmentOut); -+ -+ VG_MARK_INITIALIZED(pszHeapNameOut, uiHeapNameBufSz); -+ -+ return eError; -+} -+ -+/***************************************************************************** -+ * Devmem heap functions * -+ *****************************************************************************/ -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemGetHeapInt(DEVMEM_HEAP *psHeap, -+ IMG_HANDLE *phDevmemHeap) -+{ -+ PVR_RETURN_IF_INVALID_PARAM(psHeap); -+ *phDevmemHeap = psHeap->hDevMemServerHeap; -+ return PVRSRV_OK; -+} -+ -+/* See devicemem.h for important notes regarding the arguments -+ to this function */ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemCreateHeap(DEVMEM_CONTEXT *psCtx, -+ IMG_DEV_VIRTADDR sBaseAddress, -+ IMG_DEVMEM_SIZE_T uiLength, -+ IMG_DEVMEM_SIZE_T uiReservedRegionLength, -+ IMG_UINT32 ui32Log2Quantum, -+ IMG_UINT32 ui32Log2ImportAlignment, -+ const IMG_CHAR *pszName, -+ DEVMEM_HEAPCFGID uiHeapBlueprintID, -+ IMG_UINT32 uiHeapIndex, -+ DEVMEM_HEAP **ppsHeapPtr) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_ERROR eError2; -+ DEVMEM_HEAP *psHeap; -+ /* handle to the server-side counterpart of the device memory heap -+ (specifically, for handling mapping to device MMU) */ -+ IMG_HANDLE hDevMemServerHeap; -+ IMG_UINT32 ui32Policy = RA_POLICY_DEFAULT, ui32PolicyVMRA; -+ -+ IMG_CHAR aszBuf[100]; -+ IMG_CHAR *pszStr; -+ IMG_UINT32 ui32pszStrSize; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppsHeapPtr, "ppsHeapPtr"); -+ -+ /* Reserved VA space of a heap must always be multiple of DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY. -+ * Granularity has been chosen to support the max possible practically used OS page size. */ -+ PVR_LOG_RETURN_IF_INVALID_PARAM((uiReservedRegionLength % DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY) == 0, "uiReservedRegionLength"); -+ -+ ui32PolicyVMRA = RA_POLICY_DEFAULT; -+ -+ PVR_ASSERT(uiReservedRegionLength + DEVMEM_HEAP_MINIMUM_SIZE <= uiLength); -+ -+ psHeap = OSAllocMem(sizeof(*psHeap)); -+ PVR_GOTO_IF_NOMEM(psHeap, eError, e0); -+ -+ /* Need to keep local copy of heap name, so caller may free theirs */ -+ ui32pszStrSize = OSStringLength(pszName) + 1; -+ pszStr = OSAllocMem(ui32pszStrSize); -+ PVR_GOTO_IF_NOMEM(pszStr, eError, e1); -+ OSStringLCopy(pszStr, pszName, ui32pszStrSize); -+ psHeap->pszName = pszStr; -+ -+ psHeap->uiSize = uiLength; -+ psHeap->uiReservedRegionSize = uiReservedRegionLength; -+ psHeap->sBaseAddress = sBaseAddress; -+ psHeap->bPremapped = IMG_FALSE; -+ OSAtomicWrite(&psHeap->hImportCount, 0); -+ -+ OSSNPrintf(aszBuf, sizeof(aszBuf), -+ "NDM heap '%s' (suballocs) ctx:%p", -+ pszName, psCtx); -+ ui32pszStrSize = OSStringLength(aszBuf) + 1; -+ pszStr = OSAllocMem(ui32pszStrSize); -+ PVR_GOTO_IF_NOMEM(pszStr, eError, e2); -+ OSStringLCopy(pszStr, aszBuf, ui32pszStrSize); -+ psHeap->pszSubAllocRAName = pszStr; -+ -+#if defined(__KERNEL__) -+ if (uiHeapBlueprintID == DEVMEM_HEAPCFG_FORFW) -+ { -+ void *pvAppHintState = NULL; -+ IMG_UINT32 ui32FirmwarePolicydefault = PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY; -+ IMG_UINT32 ui32FirmwarePolicy = PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY; -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DevMemFWHeapPolicy, -+ &ui32FirmwarePolicydefault, &ui32FirmwarePolicy); -+ ui32PolicyVMRA = ui32Policy = ui32FirmwarePolicy; -+ OSFreeAppHintState(pvAppHintState); -+ -+ /* Flag the change from default setting */ -+ if (ui32FirmwarePolicy != ui32FirmwarePolicydefault) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: %s: DevMemFWHeapPolicy set to %u, default %u", -+ __func__, pszStr, -+ ui32FirmwarePolicy, ui32FirmwarePolicydefault)); -+ } -+ } -+#endif -+ -+#if defined(PDUMP) -+ /* The META heap is shared globally so a single physical memory import -+ * may be used to satisfy allocations of different processes. -+ * This is problematic when PDumping because the physical memory -+ * import used to satisfy a new allocation may actually have been -+ * imported (and thus the PDump MALLOC generated) before the PDump -+ * client was started, leading to the MALLOC being missing. -+ * -+ * This is solved by disabling splitting of imports for the META physmem -+ * RA, meaning that every firmware allocation gets its own import, thus -+ * ensuring the MALLOC is present for every allocation made within the -+ * pdump capture range -+ */ -+ if (uiHeapBlueprintID == DEVMEM_HEAPCFG_FORFW) -+ { -+ ui32Policy |= RA_POLICY_NO_SPLIT; -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(uiHeapBlueprintID); -+#endif -+ -+ psHeap->psSubAllocRA = RA_Create(psHeap->pszSubAllocRAName, -+ /* Subsequent imports: */ -+ ui32Log2Quantum, -+ RA_LOCKCLASS_2, -+ SubAllocImportAlloc, -+ SubAllocImportFree, -+ (RA_PERARENA_HANDLE) psHeap, -+ ui32Policy); -+ if (psHeap->psSubAllocRA == NULL) -+ { -+ eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA; -+ goto e3; -+ } -+ -+ psHeap->uiLog2ImportAlignment = ui32Log2ImportAlignment; -+ psHeap->uiLog2Quantum = ui32Log2Quantum; -+ -+ if (!OSStringNCompare(pszName, RGX_GENERAL_SVM_HEAP_IDENT, sizeof(RGX_GENERAL_SVM_HEAP_IDENT))) -+ { -+ /* The SVM heap normally starts out as this type though -+ it may transition to DEVMEM_HEAP_MANAGER_USER -+ on platforms with more processor virtual address -+ bits than device virtual address bits */ -+ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_KERNEL; -+ } -+ else if (uiReservedRegionLength != 0) -+ { -+ /* Heaps which specify reserved VA space range are dual managed: -+ * - sBaseAddress to (sBaseAddress+uiReservedRegionLength-1): User managed -+ * - (sBaseAddress+uiReservedRegionLength) to uiLength: RA managed -+ */ -+ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_DUAL_USER_RA; -+ } -+ else -+ { -+ /* Otherwise, heap manager is decided (USER or RA) at first map */ -+ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_UNKNOWN; -+ } -+ -+ /* Mark the heap to be managed by RA */ -+ if (!OSStringNCompare(pszName, RGX_VK_CAPT_REPLAY_HEAP_IDENT, -+ sizeof(RGX_VK_CAPT_REPLAY_HEAP_IDENT))) -+ { -+ psHeap->ui32HeapManagerFlags |= DEVMEM_HEAP_MANAGER_RA; -+ } -+ -+ OSSNPrintf(aszBuf, sizeof(aszBuf), -+ "NDM heap '%s' (QVM) ctx:%p", -+ pszName, psCtx); -+ ui32pszStrSize = OSStringLength(aszBuf) + 1; -+ pszStr = OSAllocMem(ui32pszStrSize); -+ if (pszStr == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e4; -+ } -+ OSStringLCopy(pszStr, aszBuf, ui32pszStrSize); -+ psHeap->pszQuantizedVMRAName = pszStr; -+ -+ psHeap->psQuantizedVMRA = RA_Create(psHeap->pszQuantizedVMRAName, -+ /* Subsequent import: */ -+ 0, RA_LOCKCLASS_1, NULL, NULL, -+ (RA_PERARENA_HANDLE) psHeap, -+ ui32PolicyVMRA); -+ if (psHeap->psQuantizedVMRA == NULL) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, e5); -+ } -+ -+ if (!RA_Add(psHeap->psQuantizedVMRA, -+ /* Make sure the VMRA doesn't allocate from reserved VAs */ -+ (RA_BASE_T)sBaseAddress.uiAddr + uiReservedRegionLength, -+ (RA_LENGTH_T)uiLength, -+ (RA_FLAGS_T)0, /* This RA doesn't use or need flags */ -+ NULL /* per ispan handle */)) -+ { -+ RA_Delete(psHeap->psQuantizedVMRA); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, e5); -+ } -+ -+ psHeap->psCtx = psCtx; -+ -+ -+ /* Create server-side counterpart of Device Memory heap */ -+ eError = BridgeDevmemIntHeapCreate(GetBridgeHandle(psCtx->hDevConnection), -+ psCtx->hDevMemServerContext, -+ uiHeapBlueprintID, -+ uiHeapIndex, -+ sBaseAddress, -+ ui32Log2Quantum, -+ &hDevMemServerHeap); -+ PVR_GOTO_IF_ERROR(eError, e6); -+ -+ psHeap->hDevMemServerHeap = hDevMemServerHeap; -+ -+ eError = OSLockCreate(&psHeap->hLock); -+ PVR_GOTO_IF_ERROR(eError, e7); -+ -+ psHeap->psCtx->uiNumHeaps++; -+ *ppsHeapPtr = psHeap; -+ -+#if defined(PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING) -+ psHeap->psMemDescList = NULL; -+#endif /* PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING */ -+ -+ return PVRSRV_OK; -+ -+ /* error exit paths */ -+e7: -+ eError2 = BridgeDevmemIntHeapDestroy(GetBridgeHandle(psCtx->hDevConnection), -+ psHeap->hDevMemServerHeap); -+ PVR_ASSERT (eError2 == PVRSRV_OK); -+e6: -+ if (psHeap->psQuantizedVMRA) -+ RA_Delete(psHeap->psQuantizedVMRA); -+e5: -+ if (psHeap->pszQuantizedVMRAName) -+ OSFreeMem(psHeap->pszQuantizedVMRAName); -+e4: -+ RA_Delete(psHeap->psSubAllocRA); -+e3: -+ OSFreeMem(psHeap->pszSubAllocRAName); -+e2: -+ OSFreeMem(psHeap->pszName); -+e1: -+ OSFreeMem(psHeap); -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemGetHeapBaseDevVAddr(struct DEVMEM_HEAP_TAG *psHeap, -+ IMG_DEV_VIRTADDR *pDevVAddr) -+{ -+ PVR_RETURN_IF_INVALID_PARAM(psHeap); -+ -+ *pDevVAddr = psHeap->sBaseAddress; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_INTERNAL DEVMEM_SIZE_T -+DevmemGetHeapSize(struct DEVMEM_HEAP_TAG *psHeap) -+{ -+ PVR_RETURN_IF_INVALID_PARAM(psHeap != NULL); -+ -+ return psHeap->uiSize; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign) -+{ -+ IMG_DEVMEM_SIZE_T uiSize = *puiSize; -+ IMG_DEVMEM_ALIGN_T uiAlign = *puiAlign; -+ -+ /* Just in case someone changes definition of IMG_DEVMEM_ALIGN_T. */ -+ static_assert(sizeof(unsigned long long) == sizeof(uiAlign), -+ "invalid uiAlign size"); -+ /* This value is used for shifting so it cannot be greater than number -+ * of bits in unsigned long long (sizeof(1ULL)). Using greater value is -+ * undefined behaviour. */ -+ if (uiLog2Quantum >= sizeof(unsigned long long) * 8) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if ((1ULL << uiLog2Quantum) > uiAlign) -+ { -+ uiAlign = 1ULL << uiLog2Quantum; -+ } -+ uiSize = PVR_ALIGN(uiSize, uiAlign); -+ -+ *puiSize = uiSize; -+ *puiAlign = uiAlign; -+ -+ return PVRSRV_OK; -+} -+ -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemDestroyHeap(DEVMEM_HEAP *psHeap) -+{ -+ PVRSRV_ERROR eError; -+ IMG_INT uiImportCount; -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+ IMG_BOOL bDoCheck = IMG_TRUE; -+#if defined(__KERNEL__) -+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) -+ { -+ bDoCheck = IMG_FALSE; -+ } -+#endif -+#endif -+ -+ PVR_RETURN_IF_INVALID_PARAM(psHeap); -+ -+ uiImportCount = OSAtomicRead(&psHeap->hImportCount); -+ if (uiImportCount > 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%d(%s) leaks remain", uiImportCount, psHeap->pszName)); -+#if defined(__KERNEL__) -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ PVR_DPF((PVR_DBG_ERROR, "Details of remaining allocated device memory (for all processes):")); -+ RIDumpAllKM(); -+#else -+ PVR_DPF((PVR_DBG_ERROR, "Compile with PVRSRV_ENABLE_GPU_MEMORY_INFO=1 to get a full " -+ "list of all driver allocations.")); -+#endif -+#endif -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+ if (bDoCheck) -+#endif -+ { -+ return PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP; -+ } -+ } -+ -+ eError = DestroyServerResource(psHeap->psCtx->hDevConnection, -+ NULL, -+ BridgeDevmemIntHeapDestroy, -+ psHeap->hDevMemServerHeap); -+ -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+ if (bDoCheck) -+#endif -+ { -+ PVR_LOG_RETURN_IF_ERROR(eError, "BridgeDevmemIntHeapDestroy"); -+ } -+ -+ PVR_ASSERT(psHeap->psCtx->uiNumHeaps > 0); -+ psHeap->psCtx->uiNumHeaps--; -+ -+ OSLockDestroy(psHeap->hLock); -+ -+ if (psHeap->psQuantizedVMRA) -+ { -+ RA_Delete(psHeap->psQuantizedVMRA); -+ } -+ if (psHeap->pszQuantizedVMRAName) -+ { -+ OSFreeMem(psHeap->pszQuantizedVMRAName); -+ } -+ -+ RA_Delete(psHeap->psSubAllocRA); -+ OSFreeMem(psHeap->pszSubAllocRAName); -+ -+ OSFreeMem(psHeap->pszName); -+ -+ OSCachedMemSet(psHeap, 0, sizeof(*psHeap)); -+ OSFreeMem(psHeap); -+ -+ return PVRSRV_OK; -+} -+ -+/***************************************************************************** -+ * Devmem allocation/free functions * -+ *****************************************************************************/ -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemSubAllocateAndMap(IMG_UINT8 uiPreAllocMultiplier, -+ DEVMEM_HEAP *psHeap, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszText, -+ DEVMEM_MEMDESC **ppsMemDescPtr, -+ IMG_DEV_VIRTADDR *psDevVirtAddr) -+{ -+ PVRSRV_ERROR eError; -+ eError = DevmemSubAllocate(uiPreAllocMultiplier, -+ psHeap, -+ uiSize, -+ uiAlign, -+ uiFlags, -+ pszText, -+ ppsMemDescPtr); -+ PVR_GOTO_IF_ERROR(eError, fail_alloc); -+ -+ eError = DevmemMapToDevice(*ppsMemDescPtr, -+ psHeap, -+ psDevVirtAddr); -+ PVR_GOTO_IF_ERROR(eError, fail_map); -+ -+ return PVRSRV_OK; -+ -+fail_map: -+ DevmemFree(*ppsMemDescPtr); -+fail_alloc: -+ *ppsMemDescPtr = NULL; -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+ -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier, -+ DEVMEM_HEAP *psHeap, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszText, -+ DEVMEM_MEMDESC **ppsMemDescPtr) -+{ -+ RA_BASE_T uiAllocatedAddr = 0; -+ RA_LENGTH_T uiAllocatedSize; -+ RA_PERISPAN_HANDLE hImport; /* the "import" from which this sub-allocation came */ -+ PVRSRV_ERROR eError; -+ DEVMEM_MEMDESC *psMemDesc = NULL; -+ IMG_DEVMEM_OFFSET_T uiOffset = 0; -+ DEVMEM_IMPORT *psImport; -+ IMG_UINT32 ui32CPUCacheLineSize; -+ void *pvAddr = NULL; -+ -+ IMG_BOOL bImportClean; -+ IMG_BOOL bCPUCleanFlag = PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags); -+ IMG_BOOL bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags); -+ IMG_BOOL bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags); -+ IMG_BOOL bCPUCached = (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || -+ PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)); -+ IMG_BOOL bGPUCached = (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) || -+ PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)); -+ IMG_BOOL bAlign = ! (PVRSRV_CHECK_NO_CACHE_LINE_ALIGN(uiFlags)); -+ PVRSRV_CACHE_OP eOp = PVRSRV_CACHE_OP_INVALIDATE; -+ IMG_UINT32 ui32CacheLineSize = 0; -+ DEVMEM_PROPERTIES_T uiProperties; -+ -+/* On nohw PDump builds, we try to minimise the amount -+ * of uninitialised data in captures. -+ */ -+#if defined(PDUMP) && defined(NO_HARDWARE) -+ if (PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) && !PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)) -+ { -+ uiFlags |= PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC; -+ bPoisonOnAlloc = IMG_TRUE; -+ } -+#endif -+ -+ if (uiFlags & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC) -+ { -+ /* Deferred Allocation not supported on SubAllocs*/ -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failParams); -+ } -+ -+ PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams); -+ PVR_GOTO_IF_INVALID_PARAM(psHeap->psCtx, eError, failParams); -+ PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams); -+ -+ uiFlags = DevmemOverrideFlagsOrPassThrough(psHeap->psCtx->hDevConnection, uiFlags); -+ -+#if defined(__KERNEL__) -+ { -+ /* The hDevConnection holds two different types of pointers depending on the -+ * address space in which it is used. -+ * In this instance the variable points to the device node in server */ -+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection; -+ ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, SLC_CACHE_LINE_SIZE_BITS)); -+ } -+#else -+ ui32CacheLineSize = ROGUE_CACHE_LINE_SIZE; -+#endif -+ -+ /* The following logic makes sure that any cached memory is aligned to both the CPU and GPU. -+ * To be aligned on both you have to take the Lowest Common Multiple (LCM) of the cache line sizes of each. -+ * As the possibilities are all powers of 2 then simply the largest number can be picked as the LCM. -+ * Therefore this algorithm just picks the highest from the CPU, GPU and given alignments. -+ */ -+ ui32CPUCacheLineSize = psHeap->psCtx->ui32CPUCacheLineSize; -+ /* If the CPU cache line size is larger than the alignment given then it is the lowest common multiple -+ * Also checking if the allocation is going to be cached on the CPU -+ * Currently there is no check for the validity of the cache coherent option. -+ * In this case, the alignment could be applied but the mode could still fall back to uncached. -+ */ -+ if (bAlign && ui32CPUCacheLineSize > uiAlign && bCPUCached) -+ { -+ uiAlign = ui32CPUCacheLineSize; -+ } -+ -+ /* If the GPU cache line size is larger than the alignment given then it is the lowest common multiple -+ * Also checking if the allocation is going to be cached on the GPU via checking for any of the cached options. -+ * Currently there is no check for the validity of the cache coherent option. -+ * In this case, the alignment could be applied but the mode could still fall back to uncached. -+ */ -+ if (bAlign && ui32CacheLineSize > uiAlign && bGPUCached) -+ { -+ uiAlign = ui32CacheLineSize; -+ } -+ -+ eError = DevmemValidateParams(uiSize, -+ uiAlign, -+ &uiFlags); -+ PVR_GOTO_IF_ERROR(eError, failParams); -+ -+ eError = DevmemMemDescAlloc(&psMemDesc); -+ PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); -+ -+ /* No request for exportable memory so use the RA */ -+ eError = RA_Alloc(psHeap->psSubAllocRA, -+ uiSize, -+ uiPreAllocMultiplier, -+ uiFlags, -+ uiAlign, -+ pszText, -+ &uiAllocatedAddr, -+ &uiAllocatedSize, -+ &hImport); -+ PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc); -+ -+ psImport = hImport; -+ -+ /* This assignment is assuming the RA returns an hImport where suballocations -+ * can be made from if uiSize is NOT a page multiple of the passed heap. -+ * -+ * So we check if uiSize is a page multiple and mark it as exportable -+ * if it is not. -+ * */ -+ OSLockAcquire(psImport->hLock); -+ if (!(uiSize & ((1ULL << psHeap->uiLog2Quantum) - 1)) && -+ (uiPreAllocMultiplier == RA_NO_IMPORT_MULTIPLIER)) -+ { -+ psImport->uiProperties |= DEVMEM_PROPERTIES_EXPORTABLE; -+ } -+ psImport->uiProperties |= DEVMEM_PROPERTIES_SUBALLOCATABLE; -+ uiProperties = psImport->uiProperties; -+ OSLockRelease(psImport->hLock); -+ -+ uiOffset = uiAllocatedAddr - psImport->sDeviceImport.sDevVAddr.uiAddr; -+ -+#if defined(PDUMP) && defined(DEBUG) -+#if defined(__KERNEL__) -+ PDUMPCOMMENTWITHFLAGS(PMR_DeviceNode((PMR*)psImport->hPMR), PDUMP_CONT, -+ "Suballocated %u Byte for \"%s\" from PMR with handle ID: 0x%p (PID %u)", -+ (IMG_UINT32) uiSize, pszText, psImport->hPMR, OSGetCurrentProcessID()); -+#else -+ PDUMPCOMMENTF(psHeap->psCtx->hDevConnection, PDUMP_FLAGS_CONTINUOUS, -+ "Suballocated %u Byte for \"%s\" from PMR with handle ID: %p (PID %u)", -+ (IMG_UINT32) uiSize, -+ pszText, -+ psImport->hPMR, -+ OSGetCurrentProcessID()); -+#endif -+#endif -+ -+ DevmemMemDescInit(psMemDesc, -+ uiOffset, -+ psImport, -+ uiSize); -+ -+#if defined(DEBUG) -+ DevmemMemDescSetPoF(psMemDesc, uiFlags); -+#endif -+ -+ bImportClean = ((uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_CLEAN) != 0); -+ -+ /* Zero the memory */ -+ if (bZero) -+ { -+ /* Has the import been zeroed on allocation and were no suballocations returned to it so far? */ -+ bImportClean = bImportClean && ((uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_ZEROED) != 0); -+ -+ if (!bImportClean) -+ { -+ eOp = PVRSRV_CACHE_OP_FLUSH; -+ -+ eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr); -+ PVR_GOTO_IF_ERROR(eError, failMaintenance); -+ -+ /* uiSize is a 64-bit quantity whereas the 3rd argument -+ * to OSDeviceMemSet is a 32-bit quantity on 32-bit systems -+ * hence a compiler warning of implicit cast and loss of data. -+ * Added explicit cast and assert to remove warning. -+ */ -+ PVR_ASSERT(uiSize < IMG_UINT32_MAX); -+ -+ DevmemCPUMemSet(pvAddr, 0, uiSize, uiFlags); -+ -+#if defined(PDUMP) -+ DevmemPDumpLoadZeroMem(psMemDesc, 0, uiSize, PDUMP_FLAGS_CONTINUOUS); -+#endif -+ } -+ } -+ else if (bPoisonOnAlloc) -+ { -+ /* Has the import been poisoned on allocation and were no suballocations returned to it so far? */ -+ bPoisonOnAlloc = (uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_POISONED) != 0; -+ -+ if (!bPoisonOnAlloc) -+ { -+ eOp = PVRSRV_CACHE_OP_FLUSH; -+ -+ eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr); -+ PVR_GOTO_IF_ERROR(eError, failMaintenance); -+ -+ DevmemCPUMemSet(pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, uiSize, uiFlags); -+ -+ bPoisonOnAlloc = IMG_TRUE; -+ } -+ } -+ -+ /* Flush or invalidate */ -+ if (bCPUCached && !bImportClean && (bZero || bCPUCleanFlag || bPoisonOnAlloc)) -+ { -+ eError = BridgeCacheOpExec (GetBridgeHandle(psMemDesc->psImport->hDevConnection), -+ psMemDesc->psImport->hPMR, -+ (IMG_UINT64)(uintptr_t) -+ pvAddr - psMemDesc->uiOffset, -+ psMemDesc->uiOffset, -+ psMemDesc->uiAllocSize, -+ eOp); -+ PVR_GOTO_IF_ERROR(eError, failMaintenance); -+ } -+ -+ if (pvAddr) -+ { -+ DevmemReleaseCpuVirtAddr(psMemDesc); -+ pvAddr = NULL; -+ } -+ -+ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when -+ * the allocation gets mapped/unmapped -+ */ -+ CheckAnnotationLength(pszText); -+ OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) -+ { -+ /* Attach RI information */ -+ eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), -+ psMemDesc->psImport->hPMR, -+ OSStringNLength(psMemDesc->szText, DEVMEM_ANNOTATION_MAX_LEN), -+ psMemDesc->szText, -+ psMemDesc->uiOffset, -+ uiAllocatedSize, -+ IMG_FALSE, -+ IMG_TRUE, -+ &(psMemDesc->hRIHandle)); -+ PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); -+ } -+#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ -+ PVR_UNREFERENCED_PARAMETER (pszText); -+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ -+ -+ *ppsMemDescPtr = psMemDesc; -+ -+ return PVRSRV_OK; -+ -+ /* error exit paths follow */ -+ -+failMaintenance: -+ if (pvAddr) -+ { -+ DevmemReleaseCpuVirtAddr(psMemDesc); -+ pvAddr = NULL; -+ } -+ DevmemMemDescRelease(psMemDesc); -+ psMemDesc = NULL; /* Make sure we don't do a discard after the release */ -+failDeviceMemAlloc: -+ if (psMemDesc) -+ { -+ DevmemMemDescDiscard(psMemDesc); -+ } -+failMemDescAlloc: -+failParams: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC, -+ __func__, -+ PVRSRVGETERRORSTRING(eError), -+ uiSize)); -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ IMG_UINT32 uiLog2HeapPageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszText, -+ DEVMEM_MEMDESC **ppsMemDescPtr) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEM_MEMDESC *psMemDesc = NULL; -+ DEVMEM_IMPORT *psImport; -+ IMG_UINT32 ui32MappingTable = 0; -+ -+/* On nohw PDump builds, we try to minimise the amount -+ * of uninitialised data in captures. -+ */ -+#if defined(PDUMP) && defined(NO_HARDWARE) -+ if (PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) && !PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)) -+ { -+ uiFlags |= PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC; -+ } -+#endif -+ -+ eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize, -+ &uiSize, -+ &uiAlign); -+ PVR_GOTO_IF_ERROR(eError, failParams); -+ -+ uiFlags = DevmemOverrideFlagsOrPassThrough(hDevConnection, uiFlags); -+ -+ eError = DevmemValidateParams(uiSize, -+ uiAlign, -+ &uiFlags); -+ PVR_GOTO_IF_ERROR(eError, failParams); -+ -+ eError = DevmemMemDescAlloc(&psMemDesc); -+ PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); -+ -+ eError = AllocateDeviceMemory(hDevConnection, -+ uiLog2HeapPageSize, -+ uiSize, -+ 1, -+ 1, -+ &ui32MappingTable, -+ uiAlign, -+ uiFlags, -+ IMG_TRUE, -+ pszText, -+ &psImport); -+ PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc); -+ -+ DevmemMemDescInit(psMemDesc, -+ 0, -+ psImport, -+ uiSize); -+ -+#if defined(DEBUG) -+ DevmemMemDescSetPoF(psMemDesc, uiFlags); -+#endif -+ -+ *ppsMemDescPtr = psMemDesc; -+ -+ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when -+ * the allocation gets mapped/unmapped -+ */ -+ CheckAnnotationLength(pszText); -+ OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) -+ { -+ eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection), -+ psImport->hPMR); -+ PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntry"); -+ -+ /* Attach RI information */ -+ eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psImport->hDevConnection), -+ psImport->hPMR, -+ sizeof("^"), -+ "^", -+ psMemDesc->uiOffset, -+ uiSize, -+ IMG_FALSE, -+ IMG_FALSE, -+ &psMemDesc->hRIHandle); -+ PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); -+ } -+#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ -+ PVR_UNREFERENCED_PARAMETER (pszText); -+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ -+ -+ return PVRSRV_OK; -+ -+ /* error exit paths follow */ -+ -+failDeviceMemAlloc: -+ DevmemMemDescDiscard(psMemDesc); -+ -+failMemDescAlloc: -+failParams: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC, -+ __func__, -+ PVRSRVGETERRORSTRING(eError), -+ uiSize)); -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ IMG_UINT32 uiLog2HeapPageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszText, -+ DEVMEM_MEMDESC **ppsMemDescPtr) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEM_MEMDESC *psMemDesc = NULL; -+ DEVMEM_IMPORT *psImport; -+ -+/* On nohw PDump builds, we try to minimise the amount -+ * of uninitialised data in captures. -+ */ -+#if defined(PDUMP) && defined(NO_HARDWARE) -+ if (PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) && !PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)) -+ { -+ uiFlags |= PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC; -+ } -+#endif -+ -+ eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize, -+ &uiSize, -+ &uiAlign); -+ PVR_GOTO_IF_ERROR(eError, failParams); -+ -+ uiFlags = DevmemOverrideFlagsOrPassThrough(hDevConnection, uiFlags); -+ -+ eError = DevmemValidateParams(uiSize, -+ uiAlign, -+ &uiFlags); -+ PVR_GOTO_IF_ERROR(eError, failParams); -+ -+ eError = DevmemMemDescAlloc(&psMemDesc); -+ PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); -+ -+ eError = AllocateDeviceMemory(hDevConnection, -+ uiLog2HeapPageSize, -+ uiSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ pui32MappingTable, -+ uiAlign, -+ uiFlags, -+ IMG_TRUE, -+ pszText, -+ &psImport); -+ PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc); -+ -+ DevmemMemDescInit(psMemDesc, -+ 0, -+ psImport, -+ uiSize); -+ -+#if defined(DEBUG) -+ DevmemMemDescSetPoF(psMemDesc, uiFlags); -+#endif -+ -+ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when -+ * the allocation gets mapped/unmapped -+ */ -+ CheckAnnotationLength(pszText); -+ OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) -+ { -+ eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection), -+ psImport->hPMR); -+ PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntry"); -+ -+ /* Attach RI information */ -+ eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), -+ psMemDesc->psImport->hPMR, -+ sizeof("^"), -+ "^", -+ psMemDesc->uiOffset, -+ uiSize, -+ IMG_FALSE, -+ IMG_FALSE, -+ &psMemDesc->hRIHandle); -+ PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); -+ } -+#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ -+ PVR_UNREFERENCED_PARAMETER (pszText); -+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ -+ -+ *ppsMemDescPtr = psMemDesc; -+ -+ return PVRSRV_OK; -+ -+ /* error exit paths follow */ -+ -+failDeviceMemAlloc: -+ DevmemMemDescDiscard(psMemDesc); -+ -+failMemDescAlloc: -+failParams: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC, -+ __func__, -+ PVRSRVGETERRORSTRING(eError), -+ uiSize)); -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hServerHandle, -+ IMG_HANDLE *hLocalImportHandle) -+{ -+ return BridgePMRMakeLocalImportHandle(GetBridgeHandle(hDevConnection), -+ hServerHandle, -+ hLocalImportHandle); -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hLocalImportHandle) -+{ -+ return DestroyServerResource(hDevConnection, -+ NULL, -+ BridgePMRUnmakeLocalImportHandle, -+ hLocalImportHandle); -+} -+ -+/***************************************************************************** -+ * Devmem unsecure export functions * -+ *****************************************************************************/ -+ -+#if defined(SUPPORT_INSECURE_EXPORT) -+ -+static PVRSRV_ERROR -+_Mapping_Export(DEVMEM_IMPORT *psImport, -+ DEVMEM_EXPORTHANDLE *phPMRExportHandlePtr, -+ DEVMEM_EXPORTKEY *puiExportKeyPtr, -+ DEVMEM_SIZE_T *puiSize, -+ DEVMEM_LOG2ALIGN_T *puiLog2Contig) -+{ -+ /* Gets an export handle and key for the PMR used for this mapping */ -+ /* Can only be done if there are no suballocations for this mapping */ -+ -+ PVRSRV_ERROR eError; -+ DEVMEM_EXPORTHANDLE hPMRExportHandle; -+ DEVMEM_EXPORTKEY uiExportKey; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig; -+ -+ PVR_GOTO_IF_INVALID_PARAM(psImport, eError, failParams); -+ -+ if ((GetImportProperties(psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION, failParams); -+ } -+ -+ eError = BridgePMRExportPMR(GetBridgeHandle(psImport->hDevConnection), -+ psImport->hPMR, -+ &hPMRExportHandle, -+ &uiSize, -+ &uiLog2Contig, -+ &uiExportKey); -+ PVR_GOTO_IF_ERROR(eError, failExport); -+ -+ PVR_ASSERT(uiSize == psImport->uiSize); -+ -+ *phPMRExportHandlePtr = hPMRExportHandle; -+ *puiExportKeyPtr = uiExportKey; -+ *puiSize = uiSize; -+ *puiLog2Contig = uiLog2Contig; -+ -+ return PVRSRV_OK; -+ -+ /* error exit paths follow */ -+ -+failExport: -+failParams: -+ -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+ -+} -+ -+static void -+_Mapping_Unexport(DEVMEM_IMPORT *psImport, -+ DEVMEM_EXPORTHANDLE hPMRExportHandle) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT (psImport != NULL); -+ -+ eError = DestroyServerResource(psImport->hDevConnection, -+ NULL, -+ BridgePMRUnexportPMR, -+ hPMRExportHandle); -+ PVR_ASSERT(eError == PVRSRV_OK); -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemExport(DEVMEM_MEMDESC *psMemDesc, -+ DEVMEM_EXPORTCOOKIE *psExportCookie) -+{ -+ /* Caller to provide storage for export cookie struct */ -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hPMRExportHandle = 0; -+ IMG_UINT64 uiPMRExportPassword = 0; -+ IMG_DEVMEM_SIZE_T uiSize = 0; -+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig = 0; -+ -+ PVR_GOTO_IF_INVALID_PARAM(psMemDesc, eError, e0); -+ PVR_GOTO_IF_INVALID_PARAM(psExportCookie, eError, e0); -+ -+ if (DEVMEM_PROPERTIES_EXPORTABLE != -+ (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: This Memory (0x%p) cannot be exported!...", -+ __func__, psMemDesc)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_REQUEST, e0); -+ } -+ -+ eError = _Mapping_Export(psMemDesc->psImport, -+ &hPMRExportHandle, -+ &uiPMRExportPassword, -+ &uiSize, -+ &uiLog2Contig); -+ if (eError != PVRSRV_OK) -+ { -+ psExportCookie->uiSize = 0; -+ goto e0; -+ } -+ -+ psExportCookie->hPMRExportHandle = hPMRExportHandle; -+ psExportCookie->uiPMRExportPassword = uiPMRExportPassword; -+ psExportCookie->uiSize = uiSize; -+ psExportCookie->uiLog2ContiguityGuarantee = uiLog2Contig; -+ -+ return PVRSRV_OK; -+ -+ /* error exit paths follow */ -+ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+IMG_INTERNAL void -+DevmemUnexport(DEVMEM_MEMDESC *psMemDesc, -+ DEVMEM_EXPORTCOOKIE *psExportCookie) -+{ -+ _Mapping_Unexport(psMemDesc->psImport, -+ psExportCookie->hPMRExportHandle); -+ -+ psExportCookie->uiSize = 0; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemImport(SHARED_DEV_CONNECTION hDevConnection, -+ DEVMEM_EXPORTCOOKIE *psCookie, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ DEVMEM_MEMDESC **ppsMemDescPtr) -+{ -+ DEVMEM_MEMDESC *psMemDesc = NULL; -+ DEVMEM_IMPORT *psImport; -+ IMG_HANDLE hPMR; -+ PVRSRV_ERROR eError; -+ -+ PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams); -+ -+ eError = DevmemMemDescAlloc(&psMemDesc); -+ PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); -+ -+ eError = DevmemImportStructAlloc(hDevConnection, -+ &psImport); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, failImportAlloc); -+ } -+ -+ /* Get a handle to the PMR (inc refcount) */ -+ eError = BridgePMRImportPMR(GetBridgeHandle(hDevConnection), -+ psCookie->hPMRExportHandle, -+ psCookie->uiPMRExportPassword, -+ psCookie->uiSize, /* not trusted - just for validation */ -+ psCookie->uiLog2ContiguityGuarantee, /* not trusted - just for validation */ -+ &hPMR); -+ PVR_GOTO_IF_ERROR(eError, failImport); -+ -+ DevmemImportStructInit(psImport, -+ psCookie->uiSize, -+ 1ULL << psCookie->uiLog2ContiguityGuarantee, -+ uiFlags, -+ hPMR, -+ DEVMEM_PROPERTIES_IMPORTED | -+ DEVMEM_PROPERTIES_EXPORTABLE); -+ -+ DevmemMemDescInit(psMemDesc, -+ 0, -+ psImport, -+ psImport->uiSize); -+ -+ *ppsMemDescPtr = psMemDesc; -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) -+ { -+ /* Attach RI information */ -+ eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), -+ psMemDesc->psImport->hPMR, -+ sizeof("^"), -+ "^", -+ psMemDesc->uiOffset, -+ psMemDesc->psImport->uiSize, -+ IMG_TRUE, -+ IMG_TRUE, -+ &psMemDesc->hRIHandle); -+ PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); -+ } -+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ -+ -+ return PVRSRV_OK; -+ -+ /* error exit paths follow */ -+ -+failImport: -+ DevmemImportDiscard(psImport); -+failImportAlloc: -+ DevmemMemDescDiscard(psMemDesc); -+failMemDescAlloc: -+failParams: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ return eError; -+} -+ -+#endif /* SUPPORT_INSECURE_EXPORT */ -+ -+/***************************************************************************** -+ * Common MemDesc functions * -+ *****************************************************************************/ -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, IMG_DEVMEM_SIZE_T* puiSize) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ *puiSize = psMemDesc->uiAllocSize; -+ -+ return eError; -+} -+ -+IMG_INTERNAL void -+DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc, IMG_CHAR **pszAnnotation) -+{ -+ /* -+ * It is expected that psMemDesc->szText is a valid NUL-terminated string, -+ * since DevmemMemDescAlloc uses OSAllocZMem to create the memdesc. -+ */ -+ *pszAnnotation = psMemDesc->szText; -+} -+ -+/* -+ This function is called for freeing any class of memory -+ */ -+IMG_INTERNAL IMG_BOOL -+DevmemFree(DEVMEM_MEMDESC *psMemDesc) -+{ -+ if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_SECURE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Please use methods dedicated to secure buffers.", -+ __func__)); -+ return IMG_FALSE; -+ } -+ -+ return DevmemMemDescRelease(psMemDesc); -+} -+ -+IMG_INTERNAL IMG_BOOL -+DevmemReleaseDevAddrAndFree(DEVMEM_MEMDESC *psMemDesc) -+{ -+ DevmemReleaseDevVirtAddr(psMemDesc); -+ return DevmemFree(psMemDesc); -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc, -+ DEVMEM_HEAP *psHeap, -+ IMG_DEV_VIRTADDR *psDevVirtAddr) -+{ -+ DEVMEM_IMPORT *psImport; -+ IMG_DEV_VIRTADDR sDevVAddr; -+ PVRSRV_ERROR eError; -+ IMG_BOOL bMap = IMG_TRUE; -+ IMG_BOOL bDestroyed = IMG_FALSE; -+ IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS; -+ DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport); -+ -+ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); -+ PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams); -+ -+ if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED, failCheck); -+ } -+ -+ /* Don't map memory for deferred allocations */ -+ if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC) -+ { -+ PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE); -+ bMap = IMG_FALSE; -+ } -+ -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psMemDesc, -+ psMemDesc->sDeviceMemDesc.ui32RefCount, -+ psMemDesc->sDeviceMemDesc.ui32RefCount+1); -+ -+ psImport = psMemDesc->psImport; -+ DevmemMemDescAcquire(psMemDesc); -+ -+#if defined(__KERNEL__) -+ if (psHeap->bPremapped) -+ { -+ ui64OptionalMapAddress = _GetPremappedVA(psImport->hPMR, psHeap->psCtx->hDevConnection); -+ } -+#endif -+ -+ eError = DevmemImportStructDevMap(psHeap, -+ bMap, -+ psImport, -+ ui64OptionalMapAddress); -+ PVR_GOTO_IF_ERROR(eError, failMap); -+ -+ sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr; -+ sDevVAddr.uiAddr += psMemDesc->uiOffset; -+ psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr; -+ psMemDesc->sDeviceMemDesc.ui32RefCount++; -+ -+ *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr; -+ -+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); -+ -+ if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) -+ { -+ BridgeDevicememHistoryMap(GetBridgeHandle(psMemDesc->psImport->hDevConnection), -+ psMemDesc->psImport->hPMR, -+ psMemDesc->uiOffset, -+ psMemDesc->sDeviceMemDesc.sDevVAddr, -+ psMemDesc->uiAllocSize, -+ psMemDesc->szText, -+ DevmemGetHeapLog2PageSize(psHeap), -+ psMemDesc->ui32AllocationIndex, -+ &psMemDesc->ui32AllocationIndex); -+ } -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) -+ { -+ if (psMemDesc->hRIHandle) -+ { -+ eError = BridgeRIUpdateMEMDESCAddr(GetBridgeHandle(psImport->hDevConnection), -+ psMemDesc->hRIHandle, -+ psImport->sDeviceImport.sDevVAddr); -+ PVR_LOG_IF_ERROR(eError, "BridgeRIUpdateMEMDESCAddr"); -+ } -+ } -+#endif -+ -+ return PVRSRV_OK; -+ -+failMap: -+ bDestroyed = DevmemMemDescRelease(psMemDesc); -+failCheck: -+failParams: -+ if (!bDestroyed) -+ { -+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); -+ } -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc, -+ DEVMEM_HEAP *psHeap, -+ IMG_DEV_VIRTADDR sDevVirtAddr) -+{ -+ DEVMEM_IMPORT *psImport; -+ IMG_DEV_VIRTADDR sDevVAddr; -+ PVRSRV_ERROR eError; -+ IMG_BOOL bMap = IMG_TRUE; -+ IMG_BOOL bDestroyed = IMG_FALSE; -+ DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport); -+ -+ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); -+ PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams); -+ -+ if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED, failCheck); -+ } -+ -+ /* Don't map memory for deferred allocations */ -+ if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC) -+ { -+ PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE); -+ bMap = IMG_FALSE; -+ } -+ -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psMemDesc, -+ psMemDesc->sDeviceMemDesc.ui32RefCount, -+ psMemDesc->sDeviceMemDesc.ui32RefCount+1); -+ -+ psImport = psMemDesc->psImport; -+ DevmemMemDescAcquire(psMemDesc); -+ -+ eError = DevmemImportStructDevMap(psHeap, -+ bMap, -+ psImport, -+ sDevVirtAddr.uiAddr); -+ PVR_GOTO_IF_ERROR(eError, failMap); -+ -+ sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr; -+ sDevVAddr.uiAddr += psMemDesc->uiOffset; -+ psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr; -+ psMemDesc->sDeviceMemDesc.ui32RefCount++; -+ -+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); -+ -+ if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) -+ { -+ BridgeDevicememHistoryMap(GetBridgeHandle(psMemDesc->psImport->hDevConnection), -+ psMemDesc->psImport->hPMR, -+ psMemDesc->uiOffset, -+ psMemDesc->sDeviceMemDesc.sDevVAddr, -+ psMemDesc->uiAllocSize, -+ psMemDesc->szText, -+ DevmemGetHeapLog2PageSize(psHeap), -+ psMemDesc->ui32AllocationIndex, -+ &psMemDesc->ui32AllocationIndex); -+ } -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) -+ { -+ if (psMemDesc->hRIHandle) -+ { -+ eError = BridgeRIUpdateMEMDESCAddr(GetBridgeHandle(psImport->hDevConnection), -+ psMemDesc->hRIHandle, -+ psImport->sDeviceImport.sDevVAddr); -+ PVR_LOG_IF_ERROR(eError, "BridgeRIUpdateMEMDESCAddr"); -+ } -+ } -+#endif -+ -+ return PVRSRV_OK; -+ -+failMap: -+ bDestroyed = DevmemMemDescRelease(psMemDesc); -+failCheck: -+failParams: -+ if (!bDestroyed) -+ { -+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); -+ } -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ return eError; -+} -+ -+IMG_INTERNAL IMG_DEV_VIRTADDR -+DevmemGetDevVirtAddr(DEVMEM_MEMDESC *psMemDesc) -+{ -+ if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0) -+ { -+ PVR_LOG_ERROR(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING, "DevmemGetDevVirtAddr"); -+ } -+ -+ PVR_ASSERT(psMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr !=0 ); -+ -+ return psMemDesc->sDeviceMemDesc.sDevVAddr; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEV_VIRTADDR *psDevVirtAddr) -+{ -+ PVRSRV_ERROR eError; -+ -+ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psMemDesc, -+ psMemDesc->sDeviceMemDesc.ui32RefCount, -+ psMemDesc->sDeviceMemDesc.ui32RefCount+1); -+ -+ if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_NO_MAPPING, failRelease); -+ } -+ psMemDesc->sDeviceMemDesc.ui32RefCount++; -+ -+ *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr; -+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); -+ -+ return PVRSRV_OK; -+ -+failRelease: -+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ return eError; -+} -+ -+IMG_INTERNAL void -+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc) -+{ -+ PVR_ASSERT(psMemDesc != NULL); -+ -+ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psMemDesc, -+ psMemDesc->sDeviceMemDesc.ui32RefCount, -+ psMemDesc->sDeviceMemDesc.ui32RefCount-1); -+ -+ PVR_ASSERT(psMemDesc->sDeviceMemDesc.ui32RefCount != 0); -+ -+ if (--psMemDesc->sDeviceMemDesc.ui32RefCount == 0) -+ { -+ if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) -+ { -+ BridgeDevicememHistoryUnmap(GetBridgeHandle(psMemDesc->psImport->hDevConnection), -+ psMemDesc->psImport->hPMR, -+ psMemDesc->uiOffset, -+ psMemDesc->sDeviceMemDesc.sDevVAddr, -+ psMemDesc->uiAllocSize, -+ psMemDesc->szText, -+ DevmemGetHeapLog2PageSize(psMemDesc->psImport->sDeviceImport.psHeap), -+ psMemDesc->ui32AllocationIndex, -+ &psMemDesc->ui32AllocationIndex); -+ } -+ -+ /* When device mapping destroyed, zero Dev VA so DevmemGetDevVirtAddr() -+ * returns 0 */ -+ if (DevmemImportStructDevUnmap(psMemDesc->psImport) == IMG_TRUE) -+ { -+ psMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr = 0; -+ } -+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); -+ -+ DevmemMemDescRelease(psMemDesc); -+ } -+ else -+ { -+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); -+ } -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, -+ void **ppvCpuVirtAddr) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psMemDesc != NULL); -+ PVR_ASSERT(ppvCpuVirtAddr != NULL); -+ -+ eError = DevmemCPUMapCheckImportProperties(psMemDesc); -+ PVR_LOG_RETURN_IF_ERROR(eError, "DevmemCPUMapCheckImportProperties"); -+ -+ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psMemDesc, -+ psMemDesc->sCPUMemDesc.ui32RefCount, -+ psMemDesc->sCPUMemDesc.ui32RefCount+1); -+ -+ if (psMemDesc->sCPUMemDesc.ui32RefCount++ == 0) -+ { -+ DEVMEM_IMPORT *psImport = psMemDesc->psImport; -+ IMG_UINT8 *pui8CPUVAddr; -+ -+ DevmemMemDescAcquire(psMemDesc); -+ eError = DevmemImportStructCPUMap(psImport); -+ PVR_GOTO_IF_ERROR(eError, failMap); -+ -+ pui8CPUVAddr = psImport->sCPUImport.pvCPUVAddr; -+ pui8CPUVAddr += psMemDesc->uiOffset; -+ psMemDesc->sCPUMemDesc.pvCPUVAddr = pui8CPUVAddr; -+ } -+ *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr; -+ -+ VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize); -+ -+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock); -+ -+ return PVRSRV_OK; -+ -+failMap: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ psMemDesc->sCPUMemDesc.ui32RefCount--; -+ -+ if (!DevmemMemDescRelease(psMemDesc)) -+ { -+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock); -+ } -+ return eError; -+} -+ -+IMG_INTERNAL void -+DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, -+ void **ppvCpuVirtAddr) -+{ -+ PVR_ASSERT(psMemDesc != NULL); -+ PVR_ASSERT(ppvCpuVirtAddr != NULL); -+ -+ if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_NO_CPU_MAPPING) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: CPU UnMapping is not possible on this allocation!", -+ __func__)); -+ return; -+ } -+ -+ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psMemDesc, -+ psMemDesc->sCPUMemDesc.ui32RefCount, -+ psMemDesc->sCPUMemDesc.ui32RefCount+1); -+ -+ *ppvCpuVirtAddr = NULL; -+ if (psMemDesc->sCPUMemDesc.ui32RefCount) -+ { -+ *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr; -+ psMemDesc->sCPUMemDesc.ui32RefCount += 1; -+ } -+ -+ VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize); -+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock); -+} -+ -+IMG_INTERNAL void -+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc) -+{ -+ PVR_ASSERT(psMemDesc != NULL); -+ -+ if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_NO_CPU_MAPPING) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: CPU UnMapping is not possible on this allocation!", -+ __func__)); -+ return; -+ } -+ -+ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psMemDesc, -+ psMemDesc->sCPUMemDesc.ui32RefCount, -+ psMemDesc->sCPUMemDesc.ui32RefCount-1); -+ -+ PVR_ASSERT(psMemDesc->sCPUMemDesc.ui32RefCount != 0); -+ -+ if (--psMemDesc->sCPUMemDesc.ui32RefCount == 0) -+ { -+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock); -+ DevmemImportStructCPUUnmap(psMemDesc->psImport); -+ DevmemMemDescRelease(psMemDesc); -+ } -+ else -+ { -+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock); -+ } -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc, -+ IMG_HANDLE *phImport) -+{ -+ if ((GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0) -+ { -+ return PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION; -+ } -+ -+ *phImport = psMemDesc->psImport->hPMR; -+ -+ return PVRSRV_OK; -+} -+ -+#if !defined(__KERNEL__) -+IMG_INTERNAL PVRSRV_ERROR -+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc, -+ IMG_UINT64 *pui64UID) -+{ -+ DEVMEM_IMPORT *psImport = psMemDesc->psImport; -+ PVRSRV_ERROR eError; -+ -+ if (!(GetImportProperties(psImport) & (DEVMEM_PROPERTIES_IMPORTED | -+ DEVMEM_PROPERTIES_EXPORTABLE))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: This Memory (0x%p) doesn't support the functionality requested...", -+ __func__, psMemDesc)); -+ return PVRSRV_ERROR_INVALID_REQUEST; -+ } -+ -+ eError = BridgePMRGetUID(GetBridgeHandle(psImport->hDevConnection), -+ psImport->hPMR, -+ pui64UID); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc, -+ IMG_HANDLE *hReservation) -+{ -+ DEVMEM_IMPORT *psImport; -+ -+ PVR_ASSERT(psMemDesc); -+ psImport = psMemDesc->psImport; -+ -+ PVR_ASSERT(psImport); -+ *hReservation = psImport->sDeviceImport.hReservation; -+ -+ return PVRSRV_OK; -+} -+ -+#endif /* !__KERNEL__ */ -+ -+/* Kernel usage of this function will only work with -+ * memdescs of buffers allocated in the FW memory context -+ * that is created in the Server -+ */ -+void -+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc, -+ IMG_HANDLE *phPMR, -+ IMG_DEVMEM_OFFSET_T *puiPMROffset) -+{ -+ DEVMEM_IMPORT *psImport; -+ -+ PVR_ASSERT(psMemDesc); -+ *puiPMROffset = psMemDesc->uiOffset; -+ psImport = psMemDesc->psImport; -+ -+ PVR_ASSERT(psImport); -+ *phPMR = psImport->hPMR; -+} -+ -+#if defined(__KERNEL__) -+IMG_INTERNAL void -+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc, -+ PVRSRV_MEMALLOCFLAGS_T *puiFlags) -+{ -+ DEVMEM_IMPORT *psImport; -+ -+ PVR_ASSERT(psMemDesc); -+ psImport = psMemDesc->psImport; -+ -+ PVR_ASSERT(psImport); -+ *puiFlags = psImport->uiFlags; -+} -+ -+IMG_INTERNAL SHARED_DEV_CONNECTION -+DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc) -+{ -+ return psMemDesc->psImport->hDevConnection; -+} -+#endif /* __KERNEL__ */ -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hExtHandle, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ DEVMEM_MEMDESC **ppsMemDescPtr, -+ IMG_DEVMEM_SIZE_T *puiSizePtr, -+ const IMG_CHAR *pszAnnotation) -+{ -+ DEVMEM_MEMDESC *psMemDesc = NULL; -+ DEVMEM_IMPORT *psImport; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_DEVMEM_ALIGN_T uiAlign; -+ IMG_HANDLE hPMR; -+ PVRSRV_ERROR eError; -+ -+ PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams); -+ -+ eError = DevmemMemDescAlloc(&psMemDesc); -+ PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); -+ -+ eError = DevmemImportStructAlloc(hDevConnection, -+ &psImport); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, failImportAlloc); -+ } -+ -+ /* Get the PMR handle and its size from the server */ -+ eError = BridgePMRLocalImportPMR(GetBridgeHandle(hDevConnection), -+ hExtHandle, -+ &hPMR, -+ &uiSize, -+ &uiAlign); -+ PVR_GOTO_IF_ERROR(eError, failImport); -+ -+ DevmemImportStructInit(psImport, -+ uiSize, -+ uiAlign, -+ uiFlags, -+ hPMR, -+ DEVMEM_PROPERTIES_IMPORTED | -+ DEVMEM_PROPERTIES_EXPORTABLE); -+ -+ DevmemMemDescInit(psMemDesc, -+ 0, -+ psImport, -+ uiSize); -+ -+ *ppsMemDescPtr = psMemDesc; -+ if (puiSizePtr) -+ *puiSizePtr = uiSize; -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) -+ { -+ /* Attach RI information. -+ * Set backed size to 0 since this allocation has been allocated -+ * by the same process and has been accounted for. */ -+ eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), -+ psMemDesc->psImport->hPMR, -+ sizeof("^"), -+ "^", -+ psMemDesc->uiOffset, -+ psMemDesc->psImport->uiSize, -+ IMG_TRUE, -+ IMG_FALSE, -+ &(psMemDesc->hRIHandle)); -+ PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); -+ } -+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ -+ -+ -+ /* Copy the allocation descriptive name and size so it can be passed -+ * to DevicememHistory when the allocation gets mapped/unmapped -+ */ -+ CheckAnnotationLength(pszAnnotation); -+ OSStringLCopy(psMemDesc->szText, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN); -+ -+ return PVRSRV_OK; -+ -+failImport: -+ DevmemImportDiscard(psImport); -+failImportAlloc: -+ DevmemMemDescDiscard(psMemDesc); -+failMemDescAlloc: -+failParams: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ return eError; -+} -+ -+#if !defined(__KERNEL__) -+IMG_INTERNAL PVRSRV_ERROR -+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext, -+ IMG_DEV_VIRTADDR sDevVAddr) -+{ -+ return BridgeDevmemIsVDevAddrValid(GetBridgeHandle(psContext->hDevConnection), -+ psContext->hDevMemServerContext, -+ sDevVAddr); -+} -+ -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext, -+ IMG_DEV_VIRTADDR *psFaultAddress) -+{ -+ return BridgeDevmemGetFaultAddress(GetBridgeHandle(psContext->hDevConnection), -+ psContext->hDevMemServerContext, -+ psFaultAddress); -+} -+ -+#if defined(RGX_FEATURE_FBCDC) -+IMG_INTERNAL PVRSRV_ERROR -+DevmemInvalidateFBSCTable(DEVMEM_CONTEXT *psContext, -+ IMG_UINT64 ui64FBSCEntries) -+{ -+ return BridgeDevmemInvalidateFBSCTable(GetBridgeHandle(psContext->hDevConnection), -+ psContext->hDevMemServerContext, -+ ui64FBSCEntries); -+} -+#endif -+ -+#endif /* !__KERNEL__ */ -+ -+IMG_INTERNAL IMG_UINT32 -+DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap) -+{ -+ return psHeap->uiLog2Quantum; -+} -+ -+IMG_INTERNAL PVRSRV_MEMALLOCFLAGS_T -+DevmemGetMemAllocFlags(DEVMEM_MEMDESC *psMemDesc) -+{ -+ return psMemDesc->psImport->uiFlags; -+} -+ -+IMG_INTERNAL IMG_DEVMEM_SIZE_T -+DevmemGetHeapReservedSize(DEVMEM_HEAP *psHeap) -+{ -+ return psHeap->uiReservedRegionSize; -+} -+ -+#if !defined(__KERNEL__) -+/**************************************************************************/ /*! -+@Function RegisterDevMemPFNotify -+@Description Registers that the application wants to be signaled when a page -+ fault occurs. -+ -+@Input psContext Memory context the process that would like to -+ be notified about. -+@Input bRegister If true, register. If false, de-register. -+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ -+ error code -+ */ /***************************************************************************/ -+IMG_INTERNAL PVRSRV_ERROR -+RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext, -+ IMG_BOOL bRegister) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = BridgeDevmemIntRegisterPFNotifyKM(GetBridgeHandle(psContext->hDevConnection), -+ psContext->hDevMemServerContext, -+ bRegister); -+ if (eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED) -+ { -+ PVR_LOG_ERROR(eError, "BridgeDevmemIntRegisterPFNotifyKM"); -+ } -+ -+ return eError; -+} -+#endif /* !__KERNEL__ */ -+ -+IMG_INTERNAL void -+DevmemHeapSetPremapStatus(DEVMEM_HEAP *psHeap, IMG_BOOL IsPremapped) -+{ -+ psHeap->bPremapped = IsPremapped; -+} -diff --git a/drivers/gpu/drm/img-rogue/devicemem.h b/drivers/gpu/drm/img-rogue/devicemem.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem.h -@@ -0,0 +1,694 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device Memory Management core internal -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Services internal interface to core device memory management -+ functions that are shared between client and server code. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SRVCLIENT_DEVICEMEM_H -+#define SRVCLIENT_DEVICEMEM_H -+ -+/****************************************************************************** -+ * * -+ * +------------+ +------------+ +--------------+ +--------------+ * -+ * | a sub- | | a sub- | | an | | allocation | * -+ * | allocation | | allocation | | allocation | | also mapped | * -+ * | | | | | in proc 1 | | into proc 2 | * -+ * +------------+ +------------+ +--------------+ +--------------+ * -+ * | | | | * -+ * +--------------+ +--------------+ +--------------+ * -+ * | page gran- | | page gran- | | page gran- | * -+ * | ular mapping | | ular mapping | | ular mapping | * -+ * +--------------+ +--------------+ +--------------+ * -+ * | | | * -+ * | | | * -+ * | | | * -+ * +--------------+ +--------------+ * -+ * | | | | * -+ * | A "P.M.R." | | A "P.M.R." | * -+ * | | | | * -+ * +--------------+ +--------------+ * -+ * * -+ ******************************************************************************/ -+ -+/* -+ All device memory allocations are ultimately a view upon (not -+ necessarily the whole of) a "PMR". -+ -+ A PMR is a "Physical Memory Resource", which may be a -+ "pre-faulted" lump of physical memory, or it may be a -+ representation of some physical memory that will be instantiated -+ at some future time. -+ -+ PMRs always represent multiple of some power-of-2 "contiguity" -+ promised by the PMR, which will allow them to be mapped in whole -+ pages into the device MMU. As memory allocations may be smaller -+ than a page, these mappings may be suballocated and thus shared -+ between multiple allocations in one process. A PMR may also be -+ mapped simultaneously into multiple device memory contexts -+ (cross-process scenario), however, for security reasons, it is not -+ legal to share a PMR "both ways" at once, that is, mapped into -+ multiple processes and divided up amongst several suballocations. -+ -+ This PMR terminology is introduced here for background -+ information, but is generally of little concern to the caller of -+ this API. This API handles suballocations and mappings, and the -+ caller thus deals primarily with MEMORY DESCRIPTORS representing -+ an allocation or suballocation, HEAPS representing ranges of -+ virtual addresses in a CONTEXT. -+*/ -+ -+/* -+ |<---------------------------context------------------------------>| -+ |<-------heap------->| |<-------heap------->|<-------heap------->| -+ |<-alloc->| | |<-alloc->|<-alloc->|| |<-alloc->| | -+*/ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "devicemem_typedefs.h" -+#include "pdumpdefs.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+ -+#include "pdump.h" -+ -+#include "device_connection.h" -+ -+ -+typedef IMG_UINT32 DEVMEM_HEAPCFGID; -+#define DEVMEM_HEAPCFG_FORCLIENTS 0 -+#define DEVMEM_HEAPCFG_FORFW 1 -+ -+ -+/* -+ In order to call the server side functions, we need a bridge handle. -+ We abstract that here, as we may wish to change its form. -+ */ -+ -+typedef IMG_HANDLE DEVMEM_BRIDGE_HANDLE; -+ -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemGetHeapInt(DEVMEM_HEAP *psHeap, -+ IMG_HANDLE *phDevmemHeap); -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_SIZE_T* puiSize); -+ -+IMG_INTERNAL void -+DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc, -+ IMG_CHAR **pszAnnotation); -+ -+/* -+ * DevmemCreateContext() -+ * -+ * Create a device memory context -+ * -+ * This must be called before any heap is created in this context -+ * -+ * Caller to provide bridge handle which will be recorded internally and used -+ * for all future operations on items from this memory context. Caller also -+ * to provide devicenode handle, as this is used for MMU configuration and -+ * also to determine the heap configuration for the auto-instantiated heaps. -+ * -+ * Note that when compiled in services/server, the hBridge is not used and -+ * is thrown away by the "fake" direct bridge. (This may change. It is -+ * recommended that NULL be passed for the handle for now.) -+ * -+ * hDeviceNode and uiHeapBlueprintID shall together dictate which heap-config -+ * to use. -+ * -+ * This will cause the server side counterpart to be created also. -+ * -+ * If you call DevmemCreateContext() (and the call succeeds) you are promising -+ * that you will later call Devmem_ContextDestroy(), except for abnormal -+ * process termination in which case it is expected it will be destroyed as -+ * part of handle clean up. -+ * -+ * Caller to provide storage for the pointer to the newly created -+ * NEWDEVMEM_CONTEXT object. -+ */ -+PVRSRV_ERROR -+DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection, -+ DEVMEM_HEAPCFGID uiHeapBlueprintID, -+ DEVMEM_CONTEXT **ppsCtxPtr); -+ -+/* -+ * DevmemAcquireDevPrivData() -+ * -+ * Acquire the device private data for this memory context -+ */ -+PVRSRV_ERROR -+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx, -+ IMG_HANDLE *hPrivData); -+ -+/* -+ * DevmemReleaseDevPrivData() -+ * -+ * Release the device private data for this memory context -+ */ -+PVRSRV_ERROR -+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx); -+ -+/* -+ * DevmemDestroyContext() -+ * -+ * Undoes that done by DevmemCreateContext() -+ */ -+PVRSRV_ERROR -+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx); -+ -+/* -+ * DevmemCreateHeap() -+ * -+ * Create a heap in the given context. -+ * -+ * N.B. Not intended to be called directly, though it can be. -+ * Normally, heaps are instantiated at context creation time according -+ * to the specified blueprint. See DevmemCreateContext() for details. -+ * -+ * This will cause MMU code to set up data structures for the heap, -+ * but may not cause page tables to be modified until allocations are -+ * made from the heap. -+ * -+ * uiReservedRegionLength Reserved address space for static VAs shared -+ * between clients and firmware -+ * -+ * The "Quantum" is both the device MMU page size to be configured for -+ * this heap, and the unit multiples of which "quantized" allocations -+ * are made (allocations smaller than this, known as "suballocations" -+ * will be made from a "sub alloc RA" and will "import" chunks -+ * according to this quantum) -+ * -+ * Where imported PMRs (or, for example, PMRs created by device class -+ * buffers) are mapped into this heap, it is important that the -+ * physical contiguity guarantee offered by the PMR is greater than or -+ * equal to the quantum size specified here, otherwise the attempt to -+ * map it will fail. "Normal" allocations via Devmem_Allocate -+ * shall automatically meet this requirement, as each "import" will -+ * trigger the creation of a PMR with the desired contiguity. The -+ * supported quantum sizes in that case shall be dictated by the OS -+ * specific implementation of PhysmemNewOSRamBackedPMR() (see) -+ */ -+PVRSRV_ERROR -+DevmemCreateHeap(DEVMEM_CONTEXT *psCtxPtr, -+ /* base and length of heap */ -+ IMG_DEV_VIRTADDR sBaseAddress, -+ IMG_DEVMEM_SIZE_T uiLength, -+ IMG_DEVMEM_SIZE_T uiReservedRegionLength, -+ /* log2 of allocation quantum, i.e. "page" size. -+ All allocations (that go to server side) are -+ multiples of this. We use a client-side RA to -+ make sub-allocations from this */ -+ IMG_UINT32 ui32Log2Quantum, -+ /* The minimum import alignment for this heap */ -+ IMG_UINT32 ui32Log2ImportAlignment, -+ /* Name of heap for debug */ -+ /* N.B. Okay to exist on caller's stack - this -+ func takes a copy if it needs it. */ -+ const IMG_CHAR *pszName, -+ DEVMEM_HEAPCFGID uiHeapBlueprintID, -+ IMG_UINT32 uiHeapIndex, -+ DEVMEM_HEAP **ppsHeapPtr); -+/* -+ * DevmemDestroyHeap() -+ * -+ * Reverses DevmemCreateHeap() -+ * -+ * N.B. All allocations must have been freed and all mappings must -+ * have been unmapped before invoking this call -+ */ -+PVRSRV_ERROR -+DevmemDestroyHeap(DEVMEM_HEAP *psHeap); -+ -+/* -+ * DevmemExportalignAdjustSizeAndAlign() -+ * Compute the Size and Align passed to avoid suballocations -+ * (used when allocation with PVRSRV_MEMALLOCFLAG_EXPORTALIGN). -+ * -+ * Returns PVRSRV_ERROR_INVALID_PARAMS if uiLog2Quantum has invalid value. -+ */ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign); -+ -+/* -+ * DevmemSubAllocate() -+ * -+ * Makes an allocation (possibly a "suballocation", as described -+ * below) of device virtual memory from this heap. -+ * -+ * The size and alignment of the allocation will be honoured by the RA -+ * that allocates the "suballocation". The resulting allocation will -+ * be mapped into GPU virtual memory and the physical memory to back -+ * it will exist, by the time this call successfully completes. -+ * -+ * The size must be a positive integer multiple of the alignment. -+ * (i.e. the alignment specifies the alignment of both the start and -+ * the end of the resulting allocation.) -+ * -+ * Allocations made via this API are routed through a "suballocation -+ * RA" which is responsible for ensuring that small allocations can be -+ * made without wasting physical memory in the server. Furthermore, -+ * such suballocations can be made entirely client side without -+ * needing to go to the server unless the allocation spills into a new -+ * page. -+ * -+ * Such suballocations cause many allocations to share the same "PMR". -+ * This happens only when the flags match exactly. -+ * -+ */ -+ -+PVRSRV_ERROR -+DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier, -+ DEVMEM_HEAP *psHeap, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszText, -+ DEVMEM_MEMDESC **ppsMemDescPtr); -+ -+#define DevmemAllocate(...) \ -+ DevmemSubAllocate(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__) -+ -+PVRSRV_ERROR -+DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ IMG_UINT32 uiLog2HeapPageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszText, -+ DEVMEM_MEMDESC **ppsMemDescPtr); -+ -+PVRSRV_ERROR -+DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *paui32AllocPageIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pauiFreePageIndices, -+ SPARSE_MEM_RESIZE_FLAGS uiFlags); -+ -+PVRSRV_ERROR -+DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ IMG_UINT32 uiLog2HeapPageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszText, -+ DEVMEM_MEMDESC **ppsMemDescPtr); -+ -+PVRSRV_ERROR -+DevmemSubAllocateAndMap(IMG_UINT8 uiPreAllocMultiplier, -+ DEVMEM_HEAP *psHeap, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszText, -+ DEVMEM_MEMDESC **ppsMemDescPtr, -+ IMG_DEV_VIRTADDR *psDevVirtAddr); -+ -+#define DevmemAllocateAndMap(...) \ -+ DevmemSubAllocateAndMap(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__) -+ -+/* -+ * DevmemFree() -+ * -+ * Reverses that done by DevmemSubAllocate() N.B. The underlying -+ * mapping and server side allocation _may_ not be torn down, for -+ * example, if the allocation has been exported, or if multiple -+ * allocations were suballocated from the same mapping, but this is -+ * properly refcounted, so the caller does not have to care. -+ */ -+ -+IMG_BOOL -+DevmemFree(DEVMEM_MEMDESC *psMemDesc); -+ -+IMG_BOOL -+DevmemReleaseDevAddrAndFree(DEVMEM_MEMDESC *psMemDesc); -+ -+/* -+ DevmemMapToDevice: -+ -+ Map an allocation to the device it was allocated from. -+ This function _must_ be called before any call to -+ DevmemAcquireDevVirtAddr is made as it binds the allocation -+ to the heap. -+ DevmemReleaseDevVirtAddr is used to release the reference -+ to the device mapping this function created, but it doesn't -+ mean that the memory will actually be unmapped from the -+ device as other references to the mapping obtained via -+ DevmemAcquireDevVirtAddr could still be active. -+*/ -+PVRSRV_ERROR DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc, -+ DEVMEM_HEAP *psHeap, -+ IMG_DEV_VIRTADDR *psDevVirtAddr); -+ -+/* -+ DevmemMapToDeviceAddress: -+ -+ Same as DevmemMapToDevice but the caller chooses the address -+ to map to. -+*/ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc, -+ DEVMEM_HEAP *psHeap, -+ IMG_DEV_VIRTADDR sDevVirtAddr); -+ -+/* -+ DevmemGetDevVirtAddr -+ -+ Obtain the MemDesc's device virtual address. -+ This function _must_ be called after DevmemMapToDevice(Address) -+ and is expected to be used be functions which didn't allocate -+ the MemDesc but need to know it's address. -+ It will PVR_ASSERT if no device mapping exists and 0 is returned. -+ */ -+IMG_DEV_VIRTADDR -+DevmemGetDevVirtAddr(DEVMEM_MEMDESC *psMemDesc); -+ -+/* -+ DevmemAcquireDevVirtAddr -+ -+ Acquire the MemDesc's device virtual address. -+ This function _must_ be called after DevmemMapToDevice -+ and is expected to be used be functions which didn't allocate -+ the MemDesc but need to know it's address -+ */ -+PVRSRV_ERROR DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEV_VIRTADDR *psDevVirtAddrRet); -+ -+/* -+ * DevmemReleaseDevVirtAddr() -+ * -+ * give up the licence to use the device virtual address that was -+ * acquired by "Acquire" or "MapToDevice" -+ */ -+void -+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc); -+ -+/* -+ * DevmemAcquireCpuVirtAddr() -+ * -+ * Acquires a license to use the cpu virtual address of this mapping. -+ * Note that the memory may not have been mapped into cpu virtual -+ * memory prior to this call. On first "acquire" the memory will be -+ * mapped in (if it wasn't statically mapped in) and on last put it -+ * _may_ become unmapped. Later calling "Acquire" again, _may_ cause -+ * the memory to be mapped at a different address. -+ */ -+PVRSRV_ERROR DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, -+ void **ppvCpuVirtAddr); -+ -+/* -+ * DevmemReacquireCpuVirtAddr() -+ * -+ * (Re)acquires license to use the cpu virtual address of this mapping -+ * if (and only if) there is already a pre-existing license to use the -+ * cpu virtual address for the mapping, returns NULL otherwise. -+ */ -+void DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, -+ void **ppvCpuVirtAddr); -+ -+/* -+ * DevmemReleaseDevVirtAddr() -+ * -+ * give up the licence to use the cpu virtual address that was granted -+ * with the "Get" call. -+ */ -+void -+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc); -+ -+#if defined(SUPPORT_INSECURE_EXPORT) -+/* -+ * DevmemExport() -+ * -+ * Given a memory allocation allocated with DevmemAllocateExportable() -+ * create a "cookie" that can be passed intact by the caller's own choice -+ * of secure IPC to another process and used as the argument to "map" -+ * to map this memory into a heap in the target processes. N.B. This can -+ * also be used to map into multiple heaps in one process, though that's not -+ * the intention. -+ * -+ * Note, the caller must later call Unexport before freeing the -+ * memory. -+ */ -+PVRSRV_ERROR DevmemExport(DEVMEM_MEMDESC *psMemDesc, -+ DEVMEM_EXPORTCOOKIE *psExportCookie); -+ -+ -+void DevmemUnexport(DEVMEM_MEMDESC *psMemDesc, -+ DEVMEM_EXPORTCOOKIE *psExportCookie); -+ -+PVRSRV_ERROR -+DevmemImport(SHARED_DEV_CONNECTION hDevConnection, -+ DEVMEM_EXPORTCOOKIE *psCookie, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ DEVMEM_MEMDESC **ppsMemDescPtr); -+#endif /* SUPPORT_INSECURE_EXPORT */ -+ -+/* -+ * DevmemMakeLocalImportHandle() -+ * -+ * This is a "special case" function for making a server export cookie -+ * which went through the direct bridge into an export cookie that can -+ * be passed through the client bridge. -+ */ -+PVRSRV_ERROR -+DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hServerExport, -+ IMG_HANDLE *hClientExport); -+ -+/* -+ * DevmemUnmakeLocalImportHandle() -+ * -+ * Free any resource associated with the Make operation -+ */ -+PVRSRV_ERROR -+DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hClientExport); -+ -+/* -+ * -+ * The following set of functions is specific to the heap "blueprint" -+ * stuff, for automatic creation of heaps when a context is created -+ * -+ */ -+ -+ -+/* Devmem_HeapConfigCount: returns the number of heap configs that -+ this device has. Note that there is no acquire/release semantics -+ required, as this data is guaranteed to be constant for the -+ lifetime of the device node */ -+PVRSRV_ERROR -+DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_UINT32 *puiNumHeapConfigsOut); -+ -+/* Devmem_HeapCount: returns the number of heaps that a given heap -+ config on this device has. Note that there is no acquire/release -+ semantics required, as this data is guaranteed to be constant for -+ the lifetime of the device node */ -+PVRSRV_ERROR -+DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 *puiNumHeapsOut); -+/* Devmem_HeapConfigName: return the name of the given heap config. -+ The caller is to provide the storage for the returned string and -+ indicate the number of bytes (including null terminator) for such -+ string in the BufSz arg. Note that there is no acquire/release -+ semantics required, as this data is guaranteed to be constant for -+ the lifetime of the device node. -+ */ -+PVRSRV_ERROR -+DevmemHeapConfigName(SHARED_DEV_CONNECTION hsDevConnection, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_CHAR *pszConfigNameOut, -+ IMG_UINT32 uiConfigNameBufSz); -+ -+/* Devmem_HeapDetails: fetches all the metadata that is recorded in -+ this heap "blueprint". Namely: heap name (caller to provide -+ storage, and indicate buffer size (including null terminator) in -+ BufSz arg), device virtual address and length, log2 of data page -+ size (will be one of 12, 14, 16, 18, 20, 21, at time of writing). -+ Note that there is no acquire/release semantics required, as this -+ data is guaranteed to be constant for the lifetime of the device -+ node. */ -+PVRSRV_ERROR -+DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 uiHeapIndex, -+ IMG_CHAR *pszHeapNameOut, -+ IMG_UINT32 uiHeapNameBufSz, -+ IMG_DEV_VIRTADDR *psDevVAddrBaseOut, -+ IMG_DEVMEM_SIZE_T *puiHeapLengthOut, -+ IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, -+ IMG_UINT32 *puiLog2DataPageSize, -+ IMG_UINT32 *puiLog2ImportAlignmentOut); -+ -+/* -+ * Devmem_FindHeapByName() -+ * -+ * returns the heap handle for the named _automagic_ heap in this -+ * context. "automagic" heaps are those that are born with the -+ * context from a blueprint -+ */ -+PVRSRV_ERROR -+DevmemFindHeapByName(const DEVMEM_CONTEXT *psCtx, -+ const IMG_CHAR *pszHeapName, -+ DEVMEM_HEAP **ppsHeapRet); -+ -+/* -+ * DevmemGetHeapBaseDevVAddr() -+ * -+ * returns the device virtual address of the base of the heap. -+ */ -+PVRSRV_ERROR -+DevmemGetHeapBaseDevVAddr(DEVMEM_HEAP *psHeap, -+ IMG_DEV_VIRTADDR *pDevVAddr); -+ -+/* -+ * DevmemGetHeapSize() -+ * -+ * returns the size of the heap. -+ */ -+IMG_INTERNAL DEVMEM_SIZE_T -+DevmemGetHeapSize(struct DEVMEM_HEAP_TAG *psHeap); -+ -+PVRSRV_ERROR -+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc, -+ IMG_HANDLE *phImport); -+ -+PVRSRV_ERROR -+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc, -+ IMG_UINT64 *pui64UID); -+ -+PVRSRV_ERROR -+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc, -+ IMG_HANDLE *hReservation); -+ -+IMG_INTERNAL void -+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc, -+ IMG_HANDLE *hPMR, -+ IMG_DEVMEM_OFFSET_T *puiPMROffset); -+ -+IMG_INTERNAL void -+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc, -+ PVRSRV_MEMALLOCFLAGS_T *puiFlags); -+ -+IMG_INTERNAL SHARED_DEV_CONNECTION -+DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc); -+ -+PVRSRV_ERROR -+DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hExtHandle, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ DEVMEM_MEMDESC **ppsMemDescPtr, -+ IMG_DEVMEM_SIZE_T *puiSizePtr, -+ const IMG_CHAR *pszAnnotation); -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext, -+ IMG_DEV_VIRTADDR sDevVAddr); -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext, -+ IMG_DEV_VIRTADDR *psFaultAddress); -+ -+IMG_INTERNAL PVRSRV_ERROR -+DevmemInvalidateFBSCTable(DEVMEM_CONTEXT *psContext, -+ IMG_UINT64 ui64FBSCEntries); -+ -+/* DevmemGetHeapLog2PageSize() -+ * -+ * Get the page size used for a certain heap. -+ */ -+IMG_UINT32 -+DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap); -+ -+/* DevmemGetMemFlags() -+ * -+ * Get the memalloc flags for a certain memdesc. -+ */ -+PVRSRV_MEMALLOCFLAGS_T -+DevmemGetMemAllocFlags(DEVMEM_MEMDESC *psMemDesc); -+ -+/* DevmemGetHeapReservedSize() -+ * -+ * Get the reserved size used for a certain heap. -+ */ -+IMG_DEVMEM_SIZE_T -+DevmemGetHeapReservedSize(DEVMEM_HEAP *psHeap); -+ -+/*************************************************************************/ /*! -+@Function RegisterDevMemPFNotify -+@Description Registers that the application wants to be signaled when a page -+ fault occurs. -+ -+@Input psContext Memory context the process that would like to -+ be notified about. -+@Input bRegister If true, register. If false, de-register. -+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ -+ error code -+*/ /**************************************************************************/ -+IMG_INTERNAL PVRSRV_ERROR -+RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext, -+ IMG_BOOL bRegister); -+ -+/*************************************************************************/ /*! -+@Function DevmemHeapSetPremapStatus -+@Description In some special cases like virtualisation, a device memory heap -+ must be entirely backed by physical memory and mapped into the -+ device's virtual address space. This is done at context creation. -+ When objects are allocated from such a heap, the mapping part -+ must be skipped. The 'bPremapped' flag dictates if allocations -+ are to be mapped or not. -+ -+@Input psHeap Device memory heap to be updated -+@Input IsPremapped The premapping status to be set -+*/ /**************************************************************************/ -+IMG_INTERNAL void -+DevmemHeapSetPremapStatus(DEVMEM_HEAP *psHeap, IMG_BOOL IsPremapped); -+ -+#endif /* #ifndef SRVCLIENT_DEVICEMEM_H */ -diff --git a/drivers/gpu/drm/img-rogue/devicemem_heapcfg.c b/drivers/gpu/drm/img-rogue/devicemem_heapcfg.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem_heapcfg.c -@@ -0,0 +1,219 @@ -+/*************************************************************************/ /*! -+@File devicemem_heapcfg.c -+@Title Device Heap Configuration Helper Functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device memory management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+/* our exported API */ -+#include "devicemem_heapcfg.h" -+#include "devicemem_utils.h" -+ -+#include "device.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+#include "osfunc.h" -+ -+#include "connection_server.h" -+ -+static INLINE void _CheckBlueprintHeapAlignment(DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint) -+{ -+ IMG_UINT32 ui32OSPageSize = OSGetPageSize(); -+ -+ /* Any heap length should at least match OS page size at the minimum or -+ * a multiple of OS page size */ -+ if ((psHeapBlueprint->uiHeapLength < DEVMEM_HEAP_MINIMUM_SIZE) || -+ (psHeapBlueprint->uiHeapLength & (ui32OSPageSize - 1))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid Heap \"%s\" Size: " -+ "%"IMG_UINT64_FMTSPEC -+ "("IMG_DEVMEM_SIZE_FMTSPEC")", -+ __func__, -+ psHeapBlueprint->pszName, -+ psHeapBlueprint->uiHeapLength, -+ psHeapBlueprint->uiHeapLength)); -+ PVR_DPF((PVR_DBG_ERROR, -+ "Heap Size should always be at least the DevMem minimum size and a " -+ "multiple of OS Page Size:%u(0x%x)", -+ ui32OSPageSize, ui32OSPageSize)); -+ PVR_ASSERT(psHeapBlueprint->uiHeapLength >= ui32OSPageSize); -+ } -+ -+ -+ PVR_ASSERT(psHeapBlueprint->uiReservedRegionLength % DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY == 0); -+} -+ -+void HeapCfgBlueprintInit(const IMG_CHAR *pszName, -+ IMG_UINT64 ui64HeapBaseAddr, -+ IMG_DEVMEM_SIZE_T uiHeapLength, -+ IMG_DEVMEM_SIZE_T uiReservedRegionLength, -+ IMG_UINT32 ui32Log2DataPageSize, -+ IMG_UINT32 uiLog2ImportAlignment, -+ PFN_HEAP_INIT pfnInit, -+ PFN_HEAP_DEINIT pfnDeInit, -+ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint) -+{ -+ psHeapBlueprint->pszName = pszName; -+ psHeapBlueprint->sHeapBaseAddr.uiAddr = ui64HeapBaseAddr; -+ psHeapBlueprint->uiHeapLength = uiHeapLength; -+ psHeapBlueprint->uiReservedRegionLength = uiReservedRegionLength; -+ psHeapBlueprint->uiLog2DataPageSize = ui32Log2DataPageSize; -+ psHeapBlueprint->uiLog2ImportAlignment = uiLog2ImportAlignment; -+ psHeapBlueprint->pfnInit = pfnInit; -+ psHeapBlueprint->pfnDeInit = pfnDeInit; -+ -+ _CheckBlueprintHeapAlignment(psHeapBlueprint); -+} -+ -+PVRSRV_ERROR -+HeapCfgHeapConfigCount(CONNECTION_DATA * psConnection, -+ const PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 *puiNumHeapConfigsOut) -+{ -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ *puiNumHeapConfigsOut = psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+HeapCfgHeapCount(CONNECTION_DATA * psConnection, -+ const PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 *puiNumHeapsOut) -+{ -+ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) -+ { -+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; -+ } -+ -+ *puiNumHeapsOut = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+HeapCfgHeapConfigName(CONNECTION_DATA * psConnection, -+ const PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 uiHeapConfigNameBufSz, -+ IMG_CHAR *pszHeapConfigNameOut) -+{ -+ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) -+ { -+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; -+ } -+ -+ OSSNPrintf(pszHeapConfigNameOut, uiHeapConfigNameBufSz, "%s", psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].pszName); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+HeapCfgGetCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 uiHeapIndex, -+ PFN_HEAP_INIT *ppfnInit, -+ PFN_HEAP_DEINIT *ppfnDeinit) -+{ -+ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppfnInit, "ppfnInit"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppfnDeinit, "ppfnDeinit"); -+ -+ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) -+ { -+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; -+ } -+ -+ if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps) -+ { -+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX; -+ } -+ -+ psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex]; -+ -+ *ppfnInit = psHeapBlueprint->pfnInit; -+ *ppfnDeinit = psHeapBlueprint->pfnDeInit; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+HeapCfgHeapDetails(CONNECTION_DATA * psConnection, -+ const PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 uiHeapIndex, -+ IMG_UINT32 uiHeapNameBufSz, -+ IMG_CHAR *pszHeapNameOut, -+ IMG_DEV_VIRTADDR *psDevVAddrBaseOut, -+ IMG_DEVMEM_SIZE_T *puiHeapLengthOut, -+ IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, -+ IMG_UINT32 *puiLog2DataPageSizeOut, -+ IMG_UINT32 *puiLog2ImportAlignmentOut) -+{ -+ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint; -+ -+ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) -+ { -+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; -+ } -+ -+ if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps) -+ { -+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX; -+ } -+ -+ psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex]; -+ -+ OSSNPrintf(pszHeapNameOut, uiHeapNameBufSz, "%s", psHeapBlueprint->pszName); -+ *psDevVAddrBaseOut = psHeapBlueprint->sHeapBaseAddr; -+ *puiHeapLengthOut = psHeapBlueprint->uiHeapLength; -+ *puiReservedRegionLengthOut = psHeapBlueprint->uiReservedRegionLength; -+ *puiLog2DataPageSizeOut = psHeapBlueprint->uiLog2DataPageSize; -+ *puiLog2ImportAlignmentOut = psHeapBlueprint->uiLog2ImportAlignment; -+ -+ return PVRSRV_OK; -+} -diff --git a/drivers/gpu/drm/img-rogue/devicemem_heapcfg.h b/drivers/gpu/drm/img-rogue/devicemem_heapcfg.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem_heapcfg.h -@@ -0,0 +1,223 @@ -+/**************************************************************************/ /*! -+@File -+@Title Device Heap Configuration Helper Functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device memory management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef DEVICEMEMHEAPCFG_H -+#define DEVICEMEMHEAPCFG_H -+ -+#include -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+/* -+ * Supported log2 page size values for RGX_GENERAL_NON_4K_HEAP_ID -+ */ -+#define RGX_HEAP_4KB_PAGE_SHIFT (12U) -+#define RGX_HEAP_16KB_PAGE_SHIFT (14U) -+#define RGX_HEAP_64KB_PAGE_SHIFT (16U) -+#define RGX_HEAP_256KB_PAGE_SHIFT (18U) -+#define RGX_HEAP_1MB_PAGE_SHIFT (20U) -+#define RGX_HEAP_2MB_PAGE_SHIFT (21U) -+ -+struct _PVRSRV_DEVICE_NODE_; -+struct _CONNECTION_DATA_; -+struct _DEVMEMINT_HEAP_; -+ -+/*************************************************************************/ /*! -+@Function Callback function PFN_HEAP_INIT -+@Description Device heap initialisation function. Called in server devmem -+ heap create if the callback pointer in RGX_HEAP_INFO is -+ not NULL. -+@Input psDeviceNode The device node. -+@Input psDevmemHeap Server internal devmem heap. -+@Output phPrivData Private data handle. Allocated resources -+ can be freed in PFN_HEAP_DEINIT. -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_HEAP_INIT)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, -+ struct _DEVMEMINT_HEAP_ *psDevmemHeap, -+ IMG_HANDLE *phPrivData); -+ -+/*************************************************************************/ /*! -+@Function Callback function PFN_HEAP_DEINIT -+@Description Device heap deinit function. Called in server devmem -+ heap create if the callback pointer in RGX_HEAP_INFO is -+ not NULL. -+@Input hPrivData Private data handle. To free any resources. -+*/ /**************************************************************************/ -+typedef void (*PFN_HEAP_DEINIT)(IMG_HANDLE hPrivData); -+ -+/* -+ A "heap config" is a blueprint to be used for initial setting up of heaps -+ when a device memory context is created. -+ -+ We define a data structure to define this, but it's really down to the -+ caller to populate it. This is all expected to be in-kernel. We provide an -+ API that client code can use to enquire about the blueprint, such that it may -+ do the heap set-up during the context creation call on behalf of the user. -+*/ -+ -+/* Blueprint for a single heap */ -+typedef struct _DEVMEM_HEAP_BLUEPRINT_ -+{ -+ /* Name of this heap - for debug purposes, and perhaps for lookup -+ by name */ -+ const IMG_CHAR *pszName; -+ -+ /* Virtual address of the beginning of the heap. This _must_ be a -+ multiple of the data page size for the heap. It is -+ _recommended_ that it be coarser than that - especially, it -+ should begin on a boundary appropriate to the MMU for the -+ device. For Rogue, this is a Page Directory boundary, or 1GB -+ (virtual address a multiple of 0x0040000000). */ -+ IMG_DEV_VIRTADDR sHeapBaseAddr; -+ -+ /* Length of the heap. Given that the END address of the heap has -+ a similar restriction to that of the _beginning_ of the heap. -+ That is the heap length _must_ be a whole number of data pages. -+ Again, the recommendation is that it ends on a 1GB boundary. -+ Again, this is not essential, but we do know that (at the time -+ of writing) the current implementation of mmu_common.c is such -+ that no two heaps may share a page directory, thus the -+ remaining virtual space would be wasted if the length were not -+ a multiple of 1GB */ -+ IMG_DEVMEM_SIZE_T uiHeapLength; -+ -+ /* VA space starting sHeapBaseAddr to uiReservedRegionLength-1 are reserved -+ for statically defined addresses (shared/known between clients and FW). -+ Services never maps allocations into this reserved address space _unless_ -+ explicitly requested via PVRSRVMapToDeviceAddress by passing sDevVirtAddr -+ which falls within this reserved range. Since this range is completely for -+ clients to manage (where allocations are page granular), it _must_ again be -+ a whole number of data pages. Additionally, another constraint enforces this -+ to be a multiple of DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY (which evaluates to -+ max page size supported) to support varied pages sizes */ -+ IMG_DEVMEM_SIZE_T uiReservedRegionLength; -+ -+ /* Data page size. This is the page size that is going to get -+ programmed into the MMU, so it needs to be a valid one for the -+ device. Importantly, the start address and length _must_ be -+ multiples of this page size. Note that the page size is -+ specified as the log 2 relative to 1 byte (e.g. 12 indicates -+ 4kB) */ -+ IMG_UINT32 uiLog2DataPageSize; -+ -+ /* Import alignment. Force imports to this heap to be -+ aligned to at least this value */ -+ IMG_UINT32 uiLog2ImportAlignment; -+ -+ /* Callback function for device specific heap init. */ -+ PFN_HEAP_INIT pfnInit; -+ -+ /* Callback function for device specific heap deinit. */ -+ PFN_HEAP_DEINIT pfnDeInit; -+ -+} DEVMEM_HEAP_BLUEPRINT; -+ -+void HeapCfgBlueprintInit(const IMG_CHAR *pszName, -+ IMG_UINT64 ui64HeapBaseAddr, -+ IMG_DEVMEM_SIZE_T uiHeapLength, -+ IMG_DEVMEM_SIZE_T uiReservedRegionLength, -+ IMG_UINT32 ui32Log2DataPageSize, -+ IMG_UINT32 uiLog2ImportAlignment, -+ PFN_HEAP_INIT pfnInit, -+ PFN_HEAP_DEINIT pfnDeInit, -+ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint); -+ -+/* Entire named heap config */ -+typedef struct _DEVMEM_HEAP_CONFIG_ -+{ -+ /* Name of this heap config - for debug and maybe lookup */ -+ const IMG_CHAR *pszName; -+ -+ /* Number of heaps in this config */ -+ IMG_UINT32 uiNumHeaps; -+ -+ /* Array of individual heap blueprints as defined above */ -+ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprintArray; -+} DEVMEM_HEAP_CONFIG; -+ -+ -+PVRSRV_ERROR -+HeapCfgHeapConfigCount(struct _CONNECTION_DATA_ *psConnection, -+ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, -+ IMG_UINT32 *puiNumHeapConfigsOut -+); -+ -+PVRSRV_ERROR -+HeapCfgHeapCount(struct _CONNECTION_DATA_ *psConnection, -+ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 *puiNumHeapsOut -+); -+ -+PVRSRV_ERROR -+HeapCfgHeapConfigName(struct _CONNECTION_DATA_ *psConnection, -+ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 uiHeapConfigNameBufSz, -+ IMG_CHAR *pszHeapConfigNameOut -+); -+ -+PVRSRV_ERROR -+HeapCfgHeapDetails(struct _CONNECTION_DATA_ *psConnection, -+ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 uiHeapIndex, -+ IMG_UINT32 uiHeapNameBufSz, -+ IMG_CHAR *pszHeapNameOut, -+ IMG_DEV_VIRTADDR *psDevVAddrBaseOut, -+ IMG_DEVMEM_SIZE_T *puiHeapLengthOut, -+ IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, -+ IMG_UINT32 *puiLog2DataPageSizeOut, -+ IMG_UINT32 *puiLog2ImportAlignmentOut -+); -+ -+PVRSRV_ERROR -+HeapCfgGetCallbacks(const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 uiHeapIndex, -+ PFN_HEAP_INIT *ppfnInit, -+ PFN_HEAP_DEINIT *ppfnDeinit); -+ -+#endif -diff --git a/drivers/gpu/drm/img-rogue/devicemem_history_server.c b/drivers/gpu/drm/img-rogue/devicemem_history_server.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem_history_server.c -@@ -0,0 +1,2313 @@ -+/*************************************************************************/ /*! -+@File -+@Title Devicemem history functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Devicemem history functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "allocmem.h" -+#include "img_defs.h" -+#include "pmr.h" -+#include "pvrsrv.h" -+#include "pvrsrv_device.h" -+#include "pvr_debug.h" -+#include "devicemem_server.h" -+#include "lock.h" -+#include "di_server.h" -+#include "devicemem_history_server.h" -+#include "pdump_km.h" -+ -+ -+#if (PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES < 5000) -+#error PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES is too low. -+#elif (PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES > 250000) -+#error PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES is too high. -+#else -+#define ALLOCATION_LIST_NUM_ENTRIES PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES -+#endif -+ -+ -+/* data type to hold an allocation index. -+ * we make it 16 bits wide if possible -+ */ -+#if ALLOCATION_LIST_NUM_ENTRIES <= 0xFFFF -+typedef uint16_t ALLOC_INDEX_T; -+#else -+typedef uint32_t ALLOC_INDEX_T; -+#endif -+ -+/* a record describing a single allocation known to DeviceMemHistory. -+ * this is an element in a doubly linked list of allocations -+ */ -+typedef struct _RECORD_ALLOCATION_ -+{ -+ /* time when this RECORD_ALLOCATION was created/initialised */ -+ IMG_UINT64 ui64CreationTime; -+ /* serial number of the PMR relating to this allocation */ -+ IMG_UINT64 ui64Serial; -+ /* base DevVAddr of this allocation */ -+ IMG_DEV_VIRTADDR sDevVAddr; -+ /* size in bytes of this allocation */ -+ IMG_DEVMEM_SIZE_T uiSize; -+ /* Log2 page size of this allocation's GPU pages */ -+ IMG_UINT32 ui32Log2PageSize; -+ /* Process ID (PID) this allocation belongs to */ -+ IMG_PID uiPID; -+ /* index of previous allocation in the list */ -+ ALLOC_INDEX_T ui32Prev; -+ /* index of next allocation in the list */ -+ ALLOC_INDEX_T ui32Next; -+ /* annotation/name of this allocation */ -+ IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN]; -+} RECORD_ALLOCATION; -+ -+/* each command in the circular buffer is prefixed with an 8-bit value -+ * denoting the command type -+ */ -+typedef enum _COMMAND_TYPE_ -+{ -+ COMMAND_TYPE_NONE, -+ COMMAND_TYPE_TIMESTAMP, -+ COMMAND_TYPE_MAP_ALL, -+ COMMAND_TYPE_UNMAP_ALL, -+ COMMAND_TYPE_MAP_RANGE, -+ COMMAND_TYPE_UNMAP_RANGE, -+ /* sentinel value */ -+ COMMAND_TYPE_COUNT, -+} COMMAND_TYPE; -+ -+/* Timestamp command: -+ * This command is inserted into the circular buffer to provide an updated -+ * timestamp. -+ * The nanosecond-accuracy timestamp is packed into a 56-bit integer, in order -+ * for the whole command to fit into 10 bytes. -+ */ -+typedef struct __attribute__((__packed__))_COMMAND_TIMESTAMP_ -+{ -+ IMG_UINT8 aui8TimeNs[7]; -+} COMMAND_TIMESTAMP; -+ -+/* MAP_ALL command: -+ * This command denotes the allocation at the given index was wholly mapped -+ * in to the GPU MMU -+ */ -+typedef struct __attribute__((__packed__))_COMMAND_MAP_ALL_ -+{ -+ ALLOC_INDEX_T uiAllocIndex; -+} COMMAND_MAP_ALL; -+ -+/* UNMAP_ALL command: -+ * This command denotes the allocation at the given index was wholly unmapped -+ * from the GPU MMU -+ * Note: COMMAND_MAP_ALL and COMMAND_UNMAP_ALL commands have the same layout. -+ */ -+typedef COMMAND_MAP_ALL COMMAND_UNMAP_ALL; -+ -+// This shift allows room for 512GiB virtual memory regions at 4Kb pages. -+#define VM_RANGE_SHIFT 27 -+ -+/* packing attributes for the MAP_RANGE command */ -+#define MAP_RANGE_MAX_START ((1 << VM_RANGE_SHIFT) - 1) -+#define MAP_RANGE_MAX_RANGE ((1 << VM_RANGE_SHIFT) - 1) -+ -+/* MAP_RANGE command: -+ * Denotes a range of pages within the given allocation being mapped. -+ * The range is expressed as [Page Index] + [Page Count] -+ * This information is packed into a 72/88-bit struct, in order to make -+ * the command size 9/11 bytes. -+ */ -+ -+typedef struct __attribute__((__packed__))_COMMAND_MAP_RANGE_ -+{ -+ IMG_UINT8 aui8Data[7]; -+ ALLOC_INDEX_T uiAllocIndex; -+} COMMAND_MAP_RANGE; -+ -+/* UNMAP_RANGE command: -+ * Denotes a range of pages within the given allocation being unmapped. -+ * The range is expressed as [Page Index] + [Page Count] -+ * This information is packed into a 72/88-bit struct, in order to make -+ * the command size 9/11 bytes. -+ */ -+typedef COMMAND_MAP_RANGE COMMAND_UNMAP_RANGE; -+ -+/* wrapper structure for a command */ -+typedef struct __attribute__((__packed__))_COMMAND_WRAPPER_ -+{ -+ IMG_UINT8 ui8Type; -+ union { -+ COMMAND_TIMESTAMP sTimeStamp; -+ COMMAND_MAP_ALL sMapAll; -+ COMMAND_UNMAP_ALL sUnmapAll; -+ COMMAND_MAP_RANGE sMapRange; -+ COMMAND_UNMAP_RANGE sUnmapRange; -+ } u; -+} COMMAND_WRAPPER; -+ -+/* target size for the circular buffer of commands */ -+#if (PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 < 5) -+#error PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 is too low. -+#elif (PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 > 18) -+#error PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 is too high. -+#else -+#define CIRCULAR_BUFFER_SIZE_KB (1 << PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2) -+#endif -+ -+ -+/* turn the circular buffer target size into a number of commands */ -+#define CIRCULAR_BUFFER_NUM_COMMANDS ((CIRCULAR_BUFFER_SIZE_KB * 1024) / sizeof(COMMAND_WRAPPER)) -+ -+/* index value denoting the end of a list */ -+#define END_OF_LIST 0xFFFFFFFF -+#define ALLOC_INDEX_TO_PTR(psDevHData, idx) (&((psDevHData)->sRecords.pasAllocations[idx])) -+#define CHECK_ALLOC_INDEX(idx) (idx < ALLOCATION_LIST_NUM_ENTRIES) -+ -+/* wrapper structure for the allocation records and the commands circular buffer */ -+typedef struct _RECORDS_ -+{ -+ RECORD_ALLOCATION *pasAllocations; -+ IMG_UINT32 ui32AllocationsListHead; -+ -+ IMG_UINT32 ui32Head; -+ IMG_UINT32 ui32Tail; -+ COMMAND_WRAPPER *pasCircularBuffer; -+ /* Times the CB has wrapped back to start */ -+ IMG_UINT64 ui64CBWrapCount; -+ /* Records of CB commands sent */ -+ IMG_UINT64 ui64MapAllCount;//Incremented by InsertMapAllCommand() -+ IMG_UINT64 ui64UnMapAllCount;//Incremented by InsertUnmapAllCommand() -+ IMG_UINT64 ui64MapRangeCount;//Incremented by InsertMapRangeCommand() -+ IMG_UINT64 ui64UnMapRangeCount;//Incremented by InsertUnmapRangeCommand() -+ IMG_UINT64 ui64TimeStampCount;//Incremented by InsertTimeStampCommand() -+} RECORDS; -+ -+typedef struct _DEVICEMEM_HISTORY_DATA_ -+{ -+ RECORDS sRecords; -+ POS_LOCK hLock; -+} DEVICEMEM_HISTORY_DATA; -+ -+/* Maximum number of device instances supported. This should be DDK global */ -+static DEVICEMEM_HISTORY_DATA *gapsDevicememHistoryData[PVRSRV_MAX_DEVICES] = { NULL }; -+ -+/* DevmemFindDataFromDev -+ * -+ * Return the address of the associated DEVICEMEM_HISTORY_DATA for the given -+ * device. If psDevNode associated unit is out of range we return NULL. -+ */ -+static DEVICEMEM_HISTORY_DATA *DevmemFindDataFromDev(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ DEVICEMEM_HISTORY_DATA *psDevmemData = NULL; -+ -+ IMG_UINT32 uiUnit = psDevNode->sDevId.ui32InternalID; -+ -+ PVR_ASSERT(uiUnit < PVRSRV_MAX_DEVICES); -+ if ((uiUnit < PVRSRV_MAX_DEVICES) && (gapsDevicememHistoryData[uiUnit] != NULL)) -+ { -+ psDevmemData = gapsDevicememHistoryData[uiUnit]; -+ } -+ -+ return psDevmemData; -+} -+ -+/* gsDevicememHistoryData is static, hLock is NULL unless -+ * EnablePageFaultDebug is set and DevicememHistoryInitKM() -+ * was called. -+ */ -+static void DevicememHistoryLock(DEVICEMEM_HISTORY_DATA *psDevHData) -+{ -+ if (psDevHData->hLock) -+ { -+ OSLockAcquire(psDevHData->hLock); -+ } -+} -+ -+static void DevicememHistoryUnlock(DEVICEMEM_HISTORY_DATA *psDevHData) -+{ -+ if (psDevHData->hLock) -+ { -+ OSLockRelease(psDevHData->hLock); -+ } -+} -+ -+/* given a time stamp, calculate the age in nanoseconds */ -+static IMG_UINT64 _CalculateAge(IMG_UINT64 ui64Now, -+ IMG_UINT64 ui64Then, -+ IMG_UINT64 ui64Max) -+{ -+ if (ui64Now >= ui64Then) -+ { -+ /* no clock wrap */ -+ return ui64Now - ui64Then; -+ } -+ else -+ { -+ /* clock has wrapped */ -+ return (ui64Max - ui64Then) + ui64Now + 1; -+ } -+} -+ -+/* AcquireCBSlot: -+ * Acquire the next slot in the circular buffer and -+ * move the circular buffer head along by one -+ * Returns a pointer to the acquired slot. -+ */ -+static COMMAND_WRAPPER *AcquireCBSlot(DEVICEMEM_HISTORY_DATA *psDevHData) -+{ -+ COMMAND_WRAPPER *psSlot; -+ -+ psSlot = &psDevHData->sRecords.pasCircularBuffer[psDevHData->sRecords.ui32Head]; -+ -+ psDevHData->sRecords.ui32Head = -+ (psDevHData->sRecords.ui32Head + 1) -+ % CIRCULAR_BUFFER_NUM_COMMANDS; -+ -+ if (psDevHData->sRecords.ui32Head == 0) -+ { -+ psDevHData->sRecords.ui64CBWrapCount++; -+ } -+ -+ return psSlot; -+} -+ -+/* TimeStampPack: -+ * Packs the given timestamp value into the COMMAND_TIMESTAMP structure. -+ * This takes a 64-bit nanosecond timestamp and packs it in to a 56-bit -+ * integer in the COMMAND_TIMESTAMP command. -+ */ -+static void TimeStampPack(COMMAND_TIMESTAMP *psTimeStamp, IMG_UINT64 ui64Now) -+{ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < ARRAY_SIZE(psTimeStamp->aui8TimeNs); i++) -+ { -+ psTimeStamp->aui8TimeNs[i] = ui64Now & 0xFF; -+ ui64Now >>= 8; -+ } -+} -+ -+/* packing a 64-bit nanosecond into a 7-byte integer loses the -+ * top 8 bits of data. This must be taken into account when -+ * comparing a full timestamp against an unpacked timestamp -+ */ -+#define TIME_STAMP_MASK ((1LLU << 56) - 1) -+#define DO_TIME_STAMP_MASK(ns64) (ns64 & TIME_STAMP_MASK) -+ -+/* TimeStampUnpack: -+ * Unpack the timestamp value from the given COMMAND_TIMESTAMP command -+ */ -+static IMG_UINT64 TimeStampUnpack(COMMAND_TIMESTAMP *psTimeStamp) -+{ -+ IMG_UINT64 ui64TimeNs = 0; -+ IMG_UINT32 i; -+ -+ for (i = ARRAY_SIZE(psTimeStamp->aui8TimeNs); i > 0; i--) -+ { -+ ui64TimeNs <<= 8; -+ ui64TimeNs |= (IMG_UINT64) psTimeStamp->aui8TimeNs[i - 1]; -+ } -+ -+ return ui64TimeNs; -+} -+ -+#if defined(PDUMP) -+ -+static void EmitPDumpAllocation(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32AllocationIndex, -+ RECORD_ALLOCATION *psAlloc) -+{ -+ PDUMPCOMMENT(psDeviceNode, -+ "[SrvPFD] Allocation: %u" -+ " Addr: " IMG_DEV_VIRTADDR_FMTSPEC -+ " Size: " IMG_DEVMEM_SIZE_FMTSPEC -+ " Page size: %u" -+ " PID: %u" -+ " Process: %s" -+ " Name: %s", -+ ui32AllocationIndex, -+ psAlloc->sDevVAddr.uiAddr, -+ psAlloc->uiSize, -+ 1U << psAlloc->ui32Log2PageSize, -+ psAlloc->uiPID, -+ OSGetCurrentClientProcessNameKM(), -+ psAlloc->szName); -+} -+ -+static void EmitPDumpMapUnmapAll(PVRSRV_DEVICE_NODE *psDeviceNode, -+ COMMAND_TYPE eType, -+ IMG_UINT32 ui32AllocationIndex) -+{ -+ const IMG_CHAR *pszOpName; -+ -+ switch (eType) -+ { -+ case COMMAND_TYPE_MAP_ALL: -+ pszOpName = "MAP_ALL"; -+ break; -+ case COMMAND_TYPE_UNMAP_ALL: -+ pszOpName = "UNMAP_ALL"; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapAll: Invalid type: %u", -+ eType)); -+ return; -+ -+ } -+ -+ PDUMPCOMMENT(psDeviceNode, -+ "[SrvPFD] Op: %s Allocation: %u", -+ pszOpName, -+ ui32AllocationIndex); -+} -+ -+static void EmitPDumpMapUnmapRange(PVRSRV_DEVICE_NODE *psDeviceNode, -+ COMMAND_TYPE eType, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 ui32StartPage, -+ IMG_UINT32 ui32Count) -+{ -+ const IMG_CHAR *pszOpName; -+ -+ switch (eType) -+ { -+ case COMMAND_TYPE_MAP_RANGE: -+ pszOpName = "MAP_RANGE"; -+ break; -+ case COMMAND_TYPE_UNMAP_RANGE: -+ pszOpName = "UNMAP_RANGE"; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapRange: Invalid type: %u", -+ eType)); -+ return; -+ } -+ -+ PDUMPCOMMENT(psDeviceNode, -+ "[SrvPFD] Op: %s Allocation: %u Start Page: %u Count: %u", -+ pszOpName, -+ ui32AllocationIndex, -+ ui32StartPage, -+ ui32Count); -+} -+ -+#endif -+ -+/* InsertTimeStampCommand: -+ * Insert a timestamp command into the circular buffer. -+ */ -+static void InsertTimeStampCommand(IMG_UINT64 ui64Now, PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ COMMAND_WRAPPER *psCommand; -+ DEVICEMEM_HISTORY_DATA *psDevHData = DevmemFindDataFromDev(psDevNode); -+ -+ if (psDevHData == NULL) -+ { -+ return; -+ } -+ -+ psCommand = AcquireCBSlot(psDevHData); -+ -+ psCommand->ui8Type = COMMAND_TYPE_TIMESTAMP; -+ psDevHData->sRecords.ui64TimeStampCount++; -+ TimeStampPack(&psCommand->u.sTimeStamp, ui64Now); -+} -+ -+/* InsertMapAllCommand: -+ * Insert a "MAP_ALL" command for the given allocation into the circular buffer -+ */ -+static void InsertMapAllCommand(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32AllocIndex) -+{ -+ COMMAND_WRAPPER *psCommand; -+ DEVICEMEM_HISTORY_DATA *psDevHData = DevmemFindDataFromDev(psDeviceNode); -+ -+ if (psDevHData == NULL) -+ { -+ return; -+ } -+ -+ psCommand = AcquireCBSlot(psDevHData); -+ -+ psCommand->ui8Type = COMMAND_TYPE_MAP_ALL; -+ psCommand->u.sMapAll.uiAllocIndex = ui32AllocIndex; -+ psDevHData->sRecords.ui64MapAllCount++; -+ -+#if defined(PDUMP) -+ EmitPDumpMapUnmapAll(psDeviceNode, COMMAND_TYPE_MAP_ALL, ui32AllocIndex); -+#endif -+} -+ -+/* InsertUnmapAllCommand: -+ * Insert a "UNMAP_ALL" command for the given allocation into the circular buffer -+ */ -+static void InsertUnmapAllCommand(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32AllocIndex) -+{ -+ COMMAND_WRAPPER *psCommand; -+ DEVICEMEM_HISTORY_DATA *psDevHData = DevmemFindDataFromDev(psDeviceNode); -+ -+ if (psDevHData == NULL) -+ { -+ return; -+ } -+ -+ psCommand = AcquireCBSlot(psDevHData); -+ -+ psCommand->ui8Type = COMMAND_TYPE_UNMAP_ALL; -+ psCommand->u.sUnmapAll.uiAllocIndex = ui32AllocIndex; -+ psDevHData->sRecords.ui64UnMapAllCount++; -+ -+#if defined(PDUMP) -+ EmitPDumpMapUnmapAll(psDeviceNode, COMMAND_TYPE_UNMAP_ALL, ui32AllocIndex); -+#endif -+} -+ -+/* MapRangePack: -+ * Pack the given StartPage and Count values into the 75-bit representation -+ * in the MAP_RANGE command. -+ */ -+static void MapRangePack(COMMAND_MAP_RANGE *psMapRange, -+ IMG_UINT32 ui32StartPage, -+ IMG_UINT32 ui32Count) -+ -+{ -+ IMG_UINT64 ui64Data; -+ IMG_UINT32 i; -+ -+ /* we must encode the data into 54 bits: -+ * 27 bits for the start page index -+ * 27 bits for the range -+ */ -+ -+ PVR_ASSERT(ui32StartPage <= MAP_RANGE_MAX_START); -+ PVR_ASSERT(ui32Count <= MAP_RANGE_MAX_RANGE); -+ -+ ui32StartPage &= MAP_RANGE_MAX_START; -+ ui32Count &= MAP_RANGE_MAX_RANGE; -+ -+ ui64Data = (((IMG_UINT64) ui32StartPage) << VM_RANGE_SHIFT) | ui32Count; -+ -+ for (i = 0; i < ARRAY_SIZE(psMapRange->aui8Data); i++) -+ { -+ psMapRange->aui8Data[i] = ui64Data & 0xFF; -+ ui64Data >>= 8; -+ } -+} -+ -+/* MapRangeUnpack: -+ * Unpack the StartPage and Count values from the 75-bit representation -+ * in the MAP_RANGE command. -+ */ -+static void MapRangeUnpack(COMMAND_MAP_RANGE *psMapRange, -+ IMG_UINT32 *pui32StartPage, -+ IMG_UINT32 *pui32Count) -+{ -+ IMG_UINT64 ui64Data = 0; -+ IMG_UINT32 i; -+ -+ for (i = ARRAY_SIZE(psMapRange->aui8Data); i > 0; i--) -+ { -+ ui64Data <<= 8; -+ ui64Data |= (IMG_UINT64) psMapRange->aui8Data[i - 1]; -+ } -+ -+ *pui32StartPage = (IMG_UINT32)(ui64Data >> VM_RANGE_SHIFT); -+ *pui32Count = (IMG_UINT32) ui64Data & (MAP_RANGE_MAX_RANGE); -+} -+ -+/* InsertMapRangeCommand: -+ * Insert a MAP_RANGE command into the circular buffer with the given -+ * StartPage and Count values. -+ */ -+static void InsertMapRangeCommand(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32AllocIndex, -+ IMG_UINT32 ui32StartPage, -+ IMG_UINT32 ui32Count) -+{ -+ COMMAND_WRAPPER *psCommand; -+ DEVICEMEM_HISTORY_DATA *psDevHData = DevmemFindDataFromDev(psDeviceNode); -+ -+ if (psDevHData == NULL) -+ { -+ return; -+ } -+ -+ psCommand = AcquireCBSlot(psDevHData); -+ -+ psCommand->ui8Type = COMMAND_TYPE_MAP_RANGE; -+ psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex; -+ psDevHData->sRecords.ui64MapRangeCount++; -+ -+ MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count); -+ -+#if defined(PDUMP) -+ EmitPDumpMapUnmapRange(psDeviceNode, -+ COMMAND_TYPE_MAP_RANGE, -+ ui32AllocIndex, -+ ui32StartPage, -+ ui32Count); -+#endif -+} -+ -+/* InsertUnmapRangeCommand: -+ * Insert a UNMAP_RANGE command into the circular buffer with the given -+ * StartPage and Count values. -+ */ -+static void InsertUnmapRangeCommand(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32AllocIndex, -+ IMG_UINT32 ui32StartPage, -+ IMG_UINT32 ui32Count) -+{ -+ COMMAND_WRAPPER *psCommand; -+ DEVICEMEM_HISTORY_DATA *psDevHData = DevmemFindDataFromDev(psDeviceNode); -+ -+ if (psDevHData == NULL) -+ { -+ return; -+ } -+ -+ psCommand = AcquireCBSlot(psDevHData); -+ -+ psCommand->ui8Type = COMMAND_TYPE_UNMAP_RANGE; -+ psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex; -+ psDevHData->sRecords.ui64UnMapRangeCount++; -+ -+ MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count); -+ -+#if defined(PDUMP) -+ EmitPDumpMapUnmapRange(psDeviceNode, -+ COMMAND_TYPE_UNMAP_RANGE, -+ ui32AllocIndex, -+ ui32StartPage, -+ ui32Count); -+#endif -+} -+ -+/* InsertAllocationToList: -+ * Helper function for the allocation list. -+ * Inserts the given allocation at the head of the list, whose current head is -+ * pointed to by pui32ListHead -+ */ -+static void InsertAllocationToList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc) -+{ -+ RECORD_ALLOCATION *psAlloc; -+ DEVICEMEM_HISTORY_DATA *psDevHData; -+ -+ psDevHData = IMG_CONTAINER_OF(pui32ListHead, DEVICEMEM_HISTORY_DATA, -+ sRecords.ui32AllocationsListHead); -+ -+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32Alloc); -+ -+ if (*pui32ListHead == END_OF_LIST) -+ { -+ /* list is currently empty, so just replace it */ -+ *pui32ListHead = ui32Alloc; -+ psAlloc->ui32Next = psAlloc->ui32Prev = *pui32ListHead; -+ } -+ else -+ { -+ RECORD_ALLOCATION *psHeadAlloc; -+ RECORD_ALLOCATION *psTailAlloc; -+ -+ psHeadAlloc = ALLOC_INDEX_TO_PTR(psDevHData, *pui32ListHead); -+ psTailAlloc = ALLOC_INDEX_TO_PTR(psDevHData, psHeadAlloc->ui32Prev); -+ -+ /* make the new alloc point forwards to the previous head */ -+ psAlloc->ui32Next = *pui32ListHead; -+ /* make the new alloc point backwards to the previous tail */ -+ psAlloc->ui32Prev = psHeadAlloc->ui32Prev; -+ -+ /* the head is now our new alloc */ -+ *pui32ListHead = ui32Alloc; -+ -+ /* the old head now points back to the new head */ -+ psHeadAlloc->ui32Prev = *pui32ListHead; -+ -+ /* the tail now points forward to the new head */ -+ psTailAlloc->ui32Next = ui32Alloc; -+ } -+} -+ -+static void InsertAllocationToBusyList(DEVICEMEM_HISTORY_DATA *psDevHData, -+ IMG_UINT32 ui32Alloc) -+{ -+ InsertAllocationToList(&psDevHData->sRecords.ui32AllocationsListHead, ui32Alloc); -+} -+ -+/* RemoveAllocationFromList: -+ * Helper function for the allocation list. -+ * Removes the given allocation from the list, whose head is -+ * pointed to by pui32ListHead -+ */ -+static void RemoveAllocationFromList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc) -+{ -+ RECORD_ALLOCATION *psAlloc; -+ -+ DEVICEMEM_HISTORY_DATA *psDevHData; -+ -+ psDevHData = IMG_CONTAINER_OF(pui32ListHead, DEVICEMEM_HISTORY_DATA, -+ sRecords.ui32AllocationsListHead); -+ -+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32Alloc); -+ -+ /* if this is the only element in the list then just make the list empty */ -+ if ((*pui32ListHead == ui32Alloc) && (psAlloc->ui32Next == ui32Alloc)) -+ { -+ *pui32ListHead = END_OF_LIST; -+ } -+ else -+ { -+ RECORD_ALLOCATION *psPrev, *psNext; -+ -+ psPrev = ALLOC_INDEX_TO_PTR(psDevHData, psAlloc->ui32Prev); -+ psNext = ALLOC_INDEX_TO_PTR(psDevHData, psAlloc->ui32Next); -+ -+ /* remove the allocation from the list */ -+ psPrev->ui32Next = psAlloc->ui32Next; -+ psNext->ui32Prev = psAlloc->ui32Prev; -+ -+ /* if this allocation is the head then update the head */ -+ if (*pui32ListHead == ui32Alloc) -+ { -+ *pui32ListHead = psAlloc->ui32Prev; -+ } -+ } -+} -+ -+static void RemoveAllocationFromBusyList(DEVICEMEM_HISTORY_DATA *psDevHData, IMG_UINT32 ui32Alloc) -+{ -+ RemoveAllocationFromList(&psDevHData->sRecords.ui32AllocationsListHead, ui32Alloc); -+} -+ -+/* TouchBusyAllocation: -+ * Move the given allocation to the head of the list -+ */ -+static void TouchBusyAllocation(DEVICEMEM_HISTORY_DATA *psDevHData, IMG_UINT32 ui32Alloc) -+{ -+ RemoveAllocationFromBusyList(psDevHData, ui32Alloc); -+ InsertAllocationToBusyList(psDevHData, ui32Alloc); -+} -+ -+/* GetOldestBusyAllocation: -+ * Returns the index of the oldest allocation in the MRU list -+ */ -+static IMG_UINT32 GetOldestBusyAllocation(DEVICEMEM_HISTORY_DATA *psDevHData) -+{ -+ IMG_UINT32 ui32Alloc; -+ RECORD_ALLOCATION *psAlloc; -+ -+ if (psDevHData != NULL) -+ { -+ ui32Alloc = psDevHData->sRecords.ui32AllocationsListHead; -+ } -+ else -+ { -+ ui32Alloc = END_OF_LIST; /* Default if no psDevHData */ -+ } -+ -+ if (ui32Alloc == END_OF_LIST) -+ { -+ return END_OF_LIST; -+ } -+ -+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32Alloc); -+ -+ return psAlloc->ui32Prev; -+} -+ -+static IMG_UINT32 GetFreeAllocation(DEVICEMEM_HISTORY_DATA *psDevHData) -+{ -+ IMG_UINT32 ui32Alloc; -+ -+ ui32Alloc = GetOldestBusyAllocation(psDevHData); -+ -+ return ui32Alloc; -+} -+ -+ -+/* InitialiseAllocation: -+ * Initialise the given allocation structure with the given properties -+ */ -+static void InitialiseAllocation(RECORD_ALLOCATION *psAlloc, -+ const IMG_CHAR *pszName, -+ IMG_UINT64 ui64Serial, -+ IMG_PID uiPID, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32Log2PageSize) -+{ -+ OSStringLCopy(psAlloc->szName, pszName, sizeof(psAlloc->szName)); -+ psAlloc->ui64Serial = ui64Serial; -+ psAlloc->uiPID = uiPID; -+ psAlloc->sDevVAddr = sDevVAddr; -+ psAlloc->uiSize = uiSize; -+ psAlloc->ui32Log2PageSize = ui32Log2PageSize; -+ psAlloc->ui64CreationTime = OSClockns64(); -+} -+ -+/* CreateAllocation: -+ * Creates a new allocation with the given properties then outputs the -+ * index of the allocation -+ */ -+static PVRSRV_ERROR CreateAllocation(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszName, -+ IMG_UINT64 ui64Serial, -+ IMG_PID uiPID, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_BOOL bAutoPurge, -+ IMG_UINT32 *puiAllocationIndex) -+{ -+ IMG_UINT32 ui32Alloc; -+ RECORD_ALLOCATION *psAlloc; -+ -+ DEVICEMEM_HISTORY_DATA *psDevHData; -+ -+ psDevHData = DevmemFindDataFromDev(psDeviceNode); -+ -+ if (psDevHData == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ ui32Alloc = GetFreeAllocation(psDevHData); -+ -+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32Alloc); -+ -+ InitialiseAllocation(ALLOC_INDEX_TO_PTR(psDevHData, ui32Alloc), -+ pszName, -+ ui64Serial, -+ uiPID, -+ sDevVAddr, -+ uiSize, -+ ui32Log2PageSize); -+ -+ /* put the newly initialised allocation at the front of the MRU list */ -+ TouchBusyAllocation(psDevHData, ui32Alloc); -+ -+ *puiAllocationIndex = ui32Alloc; -+ -+#if defined(PDUMP) -+ EmitPDumpAllocation(psDeviceNode, ui32Alloc, psAlloc); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/* MatchAllocation: -+ * Tests if the allocation at the given index matches the supplied properties. -+ * Returns IMG_TRUE if it is a match, otherwise IMG_FALSE. -+ */ -+static IMG_BOOL MatchAllocation(DEVICEMEM_HISTORY_DATA *psDevHData, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT64 ui64Serial, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszName, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_PID uiPID) -+{ -+ RECORD_ALLOCATION *psAlloc; -+ -+ if (psDevHData == NULL) -+ { -+ return IMG_FALSE; -+ } -+ -+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32AllocationIndex); -+ -+ return (psAlloc->ui64Serial == ui64Serial) && -+ (psAlloc->sDevVAddr.uiAddr == sDevVAddr.uiAddr) && -+ (psAlloc->uiSize == uiSize) && -+ (psAlloc->ui32Log2PageSize == ui32Log2PageSize) && -+ (OSStringNCompare(psAlloc->szName, pszName, DEVMEM_ANNOTATION_MAX_LEN) == 0); -+} -+ -+/* FindOrCreateAllocation: -+ * Convenience function. -+ * Given a set of allocation properties (serial, DevVAddr, size, name, etc), -+ * this function will look for an existing record of this allocation and -+ * create the allocation if there is no existing record -+ */ -+static PVRSRV_ERROR FindOrCreateAllocation(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32AllocationIndexHint, -+ IMG_UINT64 ui64Serial, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const char *pszName, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_PID uiPID, -+ IMG_BOOL bSparse, -+ IMG_UINT32 *pui32AllocationIndexOut, -+ IMG_BOOL *pbCreated) -+{ -+ IMG_UINT32 ui32AllocationIndex; -+ PVRSRV_ERROR eError; -+ -+ if (ui32AllocationIndexHint != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) -+ { -+ IMG_BOOL bHaveAllocation; -+ -+ /* first, try to match against the index given by the client. -+ * if the caller provided a hint but the allocation record is no longer -+ * there, it must have been purged, so go ahead and create a new allocation -+ */ -+ bHaveAllocation = MatchAllocation(DevmemFindDataFromDev(psDeviceNode), -+ ui32AllocationIndexHint, -+ ui64Serial, -+ sDevVAddr, -+ uiSize, -+ pszName, -+ ui32Log2PageSize, -+ uiPID); -+ if (bHaveAllocation) -+ { -+ *pbCreated = IMG_FALSE; -+ *pui32AllocationIndexOut = ui32AllocationIndexHint; -+ return PVRSRV_OK; -+ } -+ } -+ -+ /* if there is no record of the allocation then we -+ * create it now -+ */ -+ eError = CreateAllocation(psDeviceNode, -+ pszName, -+ ui64Serial, -+ uiPID, -+ sDevVAddr, -+ uiSize, -+ ui32Log2PageSize, -+ IMG_TRUE, -+ &ui32AllocationIndex); -+ -+ if (eError == PVRSRV_OK) -+ { -+ *pui32AllocationIndexOut = ui32AllocationIndex; -+ *pbCreated = IMG_TRUE; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to create record for allocation %s", -+ __func__, -+ pszName)); -+ } -+ -+ return eError; -+} -+ -+/* GenerateMapUnmapCommandsForSparsePMR: -+ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the PMR's -+ * current mapping table -+ * -+ * PMR: The PMR whose mapping table to read. -+ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to. -+ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping -+ * -+ * This function goes through every page in the PMR's mapping table and looks for -+ * virtually contiguous ranges to record as being mapped or unmapped. -+ */ -+static void GenerateMapUnmapCommandsForSparsePMR(PMR *psPMR, -+ IMG_UINT32 ui32AllocIndex, -+ IMG_BOOL bMap) -+{ -+ PMR_MAPPING_TABLE *psMappingTable; -+ IMG_UINT32 ui32DonePages = 0; -+ IMG_UINT32 ui32NumPages; -+ IMG_UINT32 i; -+ IMG_BOOL bInARun = IMG_FALSE; -+ IMG_UINT32 ui32CurrentStart = 0; -+ IMG_UINT32 ui32RunCount = 0; -+ -+ psMappingTable = PMR_GetMappingTable(psPMR); -+ ui32NumPages = psMappingTable->ui32NumPhysChunks; -+ -+ if (ui32NumPages == 0) -+ { -+ /* nothing to do */ -+ return; -+ } -+ -+ for (i = 0; i < psMappingTable->ui32NumVirtChunks; i++) -+ { -+ if (psMappingTable->aui32Translation[i] != TRANSLATION_INVALID) -+ { -+ if (!bInARun) -+ { -+ bInARun = IMG_TRUE; -+ ui32CurrentStart = i; -+ ui32RunCount = 1; -+ } -+ else -+ { -+ ui32RunCount++; -+ } -+ } -+ -+ if (bInARun) -+ { -+ /* test if we need to end this current run and generate the command, -+ * either because the next page is not virtually contiguous -+ * to the current page, we have reached the maximum range, -+ * or this is the last page in the mapping table -+ */ -+ if ((psMappingTable->aui32Translation[i] == TRANSLATION_INVALID) || -+ (ui32RunCount == MAP_RANGE_MAX_RANGE) || -+ (i == (psMappingTable->ui32NumVirtChunks - 1))) -+ { -+ if (bMap) -+ { -+ InsertMapRangeCommand(PMR_DeviceNode(psPMR), -+ ui32AllocIndex, -+ ui32CurrentStart, -+ ui32RunCount); -+ } -+ else -+ { -+ InsertUnmapRangeCommand(PMR_DeviceNode(psPMR), -+ ui32AllocIndex, -+ ui32CurrentStart, -+ ui32RunCount); -+ } -+ -+ ui32DonePages += ui32RunCount; -+ -+ if (ui32DonePages == ui32NumPages) -+ { -+ break; -+ } -+ -+ bInARun = IMG_FALSE; -+ } -+ } -+ } -+ -+} -+ -+/* GenerateMapUnmapCommandsForChangeList: -+ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the -+ * list of page change (page map or page unmap) indices given. -+ * -+ * ui32NumPages: Number of pages which have changed. -+ * pui32PageList: List of indices of the pages which have changed. -+ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to. -+ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping -+ * -+ * This function goes through every page in the list and looks for -+ * virtually contiguous ranges to record as being mapped or unmapped. -+ */ -+static void GenerateMapUnmapCommandsForChangeList(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32NumPages, -+ IMG_UINT32 *pui32PageList, -+ IMG_UINT32 ui32AllocIndex, -+ IMG_BOOL bMap) -+{ -+ IMG_UINT32 i; -+ IMG_BOOL bInARun = IMG_FALSE; -+ IMG_UINT32 ui32CurrentStart = 0; -+ IMG_UINT32 ui32RunCount = 0; -+ -+ for (i = 0; i < ui32NumPages; i++) -+ { -+ if (!bInARun) -+ { -+ bInARun = IMG_TRUE; -+ ui32CurrentStart = pui32PageList[i]; -+ } -+ -+ ui32RunCount++; -+ -+ /* we flush if: -+ * - the next page in the list is not one greater than the current page -+ * - this is the last page in the list -+ * - we have reached the maximum range size -+ */ -+ if ((i == (ui32NumPages - 1)) || -+ ((pui32PageList[i] + 1) != pui32PageList[i + 1]) || -+ (ui32RunCount == MAP_RANGE_MAX_RANGE)) -+ { -+ if (bMap) -+ { -+ InsertMapRangeCommand(psDeviceNode, -+ ui32AllocIndex, -+ ui32CurrentStart, -+ ui32RunCount); -+ } -+ else -+ { -+ InsertUnmapRangeCommand(psDeviceNode, -+ ui32AllocIndex, -+ ui32CurrentStart, -+ ui32RunCount); -+ } -+ -+ bInARun = IMG_FALSE; -+ ui32RunCount = 0; -+ } -+ } -+} -+ -+/* DevicememHistoryMapKM: -+ * Entry point for when an allocation is mapped into the MMU GPU -+ * -+ * psPMR: The PMR to which the allocation belongs. -+ * ui32Offset: The offset within the PMR at which the allocation begins. -+ * sDevVAddr: The DevVAddr at which the allocation begins. -+ * szName: Annotation/name for the allocation. -+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form. -+ * ui32AllocationIndex: Allocation index as provided by the client. -+ * We will use this as a short-cut to find the allocation -+ * in our records. -+ * pui32AllocationIndexOut: An updated allocation index for the client. -+ * This may be a new value if we just created the -+ * allocation record. -+ */ -+PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR, -+ IMG_UINT32 ui32Offset, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const char szName[DEVMEM_ANNOTATION_MAX_LEN], -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 *pui32AllocationIndexOut) -+{ -+ IMG_BOOL bSparse = PMR_IsSparse(psPMR); -+ IMG_UINT64 ui64Serial; -+ IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); -+ PVRSRV_ERROR eError; -+ IMG_BOOL bCreated; -+ DEVICEMEM_HISTORY_DATA *psDevHData; -+ -+ if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && -+ !CHECK_ALLOC_INDEX(ui32AllocationIndex)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", -+ __func__, -+ ui32AllocationIndex)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PMRGetUID(psPMR, &ui64Serial); -+ -+ psDevHData = DevmemFindDataFromDev(PMR_DeviceNode(psPMR)); -+ -+ if (psDevHData == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ DevicememHistoryLock(psDevHData); -+ -+ eError = FindOrCreateAllocation(PMR_DeviceNode(psPMR), -+ ui32AllocationIndex, -+ ui64Serial, -+ sDevVAddr, -+ uiSize, -+ szName, -+ ui32Log2PageSize, -+ uiPID, -+ bSparse, -+ &ui32AllocationIndex, -+ &bCreated); -+ -+ if ((eError == PVRSRV_OK) && !bCreated) -+ { -+ /* touch the allocation so it goes to the head of our MRU list */ -+ TouchBusyAllocation(psDevHData, ui32AllocationIndex); -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", -+ __func__, -+ szName, -+ PVRSRVGETERRORSTRING(eError))); -+ goto out_unlock; -+ } -+ -+ if (!bSparse) -+ { -+ InsertMapAllCommand(PMR_DeviceNode(psPMR), ui32AllocationIndex); -+ } -+ else -+ { -+ GenerateMapUnmapCommandsForSparsePMR(psPMR, -+ ui32AllocationIndex, -+ IMG_TRUE); -+ } -+ -+ InsertTimeStampCommand(OSClockns64(), PMR_DeviceNode(psPMR)); -+ -+ *pui32AllocationIndexOut = ui32AllocationIndex; -+ -+out_unlock: -+ DevicememHistoryUnlock(psDevHData); -+ -+ return eError; -+} -+ -+static void VRangeInsertMapUnmapCommands(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bMap, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_DEV_VIRTADDR sBaseDevVAddr, -+ IMG_UINT32 ui32StartPage, -+ IMG_UINT32 ui32NumPages, -+ const IMG_CHAR *pszName) -+{ -+ while (ui32NumPages > 0) -+ { -+ IMG_UINT32 ui32PagesToAdd; -+ -+ ui32PagesToAdd = MIN(ui32NumPages, MAP_RANGE_MAX_RANGE); -+ -+ if (ui32StartPage > MAP_RANGE_MAX_START) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "Cannot record %s range beginning at page " -+ "%u on allocation %s", -+ bMap ? "map" : "unmap", -+ ui32StartPage, -+ pszName)); -+ return; -+ } -+ -+ if (bMap) -+ { -+ InsertMapRangeCommand(psDeviceNode, -+ ui32AllocationIndex, -+ ui32StartPage, -+ ui32PagesToAdd); -+ } -+ else -+ { -+ InsertUnmapRangeCommand(psDeviceNode, -+ ui32AllocationIndex, -+ ui32StartPage, -+ ui32PagesToAdd); -+ } -+ -+ ui32StartPage += ui32PagesToAdd; -+ ui32NumPages -= ui32PagesToAdd; -+ } -+} -+ -+PVRSRV_ERROR DevicememHistoryMapVRangeKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEV_VIRTADDR sBaseDevVAddr, -+ IMG_UINT32 ui32StartPage, -+ IMG_UINT32 ui32NumPages, -+ IMG_DEVMEM_SIZE_T uiAllocSize, -+ const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 *pui32AllocationIndexOut) -+{ -+ IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); -+ PVRSRV_ERROR eError; -+ IMG_BOOL bCreated; -+ DEVICEMEM_HISTORY_DATA *psDevHData; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && -+ !CHECK_ALLOC_INDEX(ui32AllocationIndex)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", -+ __func__, -+ ui32AllocationIndex)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevHData = DevmemFindDataFromDev(psDeviceNode); -+ -+ if (psDevHData == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ DevicememHistoryLock(psDevHData); -+ -+ eError = FindOrCreateAllocation(psDeviceNode, -+ ui32AllocationIndex, -+ 0, -+ sBaseDevVAddr, -+ uiAllocSize, -+ szName, -+ ui32Log2PageSize, -+ uiPID, -+ IMG_FALSE, -+ &ui32AllocationIndex, -+ &bCreated); -+ -+ if ((eError == PVRSRV_OK) && !bCreated) -+ { -+ /* touch the allocation so it goes to the head of our MRU list */ -+ TouchBusyAllocation(psDevHData, ui32AllocationIndex); -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", -+ __func__, -+ szName, -+ PVRSRVGETERRORSTRING(eError))); -+ goto out_unlock; -+ } -+ -+ VRangeInsertMapUnmapCommands(psDeviceNode, -+ IMG_TRUE, -+ ui32AllocationIndex, -+ sBaseDevVAddr, -+ ui32StartPage, -+ ui32NumPages, -+ szName); -+ -+ *pui32AllocationIndexOut = ui32AllocationIndex; -+ -+out_unlock: -+ DevicememHistoryUnlock(psDevHData); -+ -+ return eError; -+ -+} -+ -+PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEV_VIRTADDR sBaseDevVAddr, -+ IMG_UINT32 ui32StartPage, -+ IMG_UINT32 ui32NumPages, -+ IMG_DEVMEM_SIZE_T uiAllocSize, -+ const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 *pui32AllocationIndexOut) -+{ -+ IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); -+ PVRSRV_ERROR eError; -+ IMG_BOOL bCreated; -+ DEVICEMEM_HISTORY_DATA *psDevHData; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && -+ !CHECK_ALLOC_INDEX(ui32AllocationIndex)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", -+ __func__, -+ ui32AllocationIndex)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevHData = DevmemFindDataFromDev(psDeviceNode); -+ -+ if (psDevHData == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ DevicememHistoryLock(psDevHData); -+ -+ eError = FindOrCreateAllocation(psDeviceNode, -+ ui32AllocationIndex, -+ 0, -+ sBaseDevVAddr, -+ uiAllocSize, -+ szName, -+ ui32Log2PageSize, -+ uiPID, -+ IMG_FALSE, -+ &ui32AllocationIndex, -+ &bCreated); -+ -+ if ((eError == PVRSRV_OK) && !bCreated) -+ { -+ /* touch the allocation so it goes to the head of our MRU list */ -+ TouchBusyAllocation(psDevHData, ui32AllocationIndex); -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", -+ __func__, -+ szName, -+ PVRSRVGETERRORSTRING(eError))); -+ goto out_unlock; -+ } -+ -+ VRangeInsertMapUnmapCommands(psDeviceNode, -+ IMG_FALSE, -+ ui32AllocationIndex, -+ sBaseDevVAddr, -+ ui32StartPage, -+ ui32NumPages, -+ szName); -+ -+ *pui32AllocationIndexOut = ui32AllocationIndex; -+ -+out_unlock: -+ DevicememHistoryUnlock(psDevHData); -+ -+ return eError; -+} -+ -+ -+ -+/* DevicememHistoryUnmapKM: -+ * Entry point for when an allocation is unmapped from the MMU GPU -+ * -+ * psPMR: The PMR to which the allocation belongs. -+ * ui32Offset: The offset within the PMR at which the allocation begins. -+ * sDevVAddr: The DevVAddr at which the allocation begins. -+ * szName: Annotation/name for the allocation. -+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form. -+ * ui32AllocationIndex: Allocation index as provided by the client. -+ * We will use this as a short-cut to find the allocation -+ * in our records. -+ * pui32AllocationIndexOut: An updated allocation index for the client. -+ * This may be a new value if we just created the -+ * allocation record. -+ */ -+PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR, -+ IMG_UINT32 ui32Offset, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const char szName[DEVMEM_ANNOTATION_MAX_LEN], -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 *pui32AllocationIndexOut) -+{ -+ IMG_BOOL bSparse = PMR_IsSparse(psPMR); -+ IMG_UINT64 ui64Serial; -+ IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); -+ PVRSRV_ERROR eError; -+ IMG_BOOL bCreated; -+ DEVICEMEM_HISTORY_DATA *psDevHData; -+ -+ if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && -+ !CHECK_ALLOC_INDEX(ui32AllocationIndex)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", -+ __func__, -+ ui32AllocationIndex)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PMRGetUID(psPMR, &ui64Serial); -+ -+ psDevHData = DevmemFindDataFromDev(PMR_DeviceNode(psPMR)); -+ -+ if (psDevHData == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ DevicememHistoryLock(psDevHData); -+ -+ eError = FindOrCreateAllocation(PMR_DeviceNode(psPMR), -+ ui32AllocationIndex, -+ ui64Serial, -+ sDevVAddr, -+ uiSize, -+ szName, -+ ui32Log2PageSize, -+ uiPID, -+ bSparse, -+ &ui32AllocationIndex, -+ &bCreated); -+ -+ if ((eError == PVRSRV_OK) && !bCreated) -+ { -+ /* touch the allocation so it goes to the head of our MRU list */ -+ TouchBusyAllocation(psDevHData, ui32AllocationIndex); -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", -+ __func__, -+ szName, -+ PVRSRVGETERRORSTRING(eError))); -+ goto out_unlock; -+ } -+ -+ if (!bSparse) -+ { -+ InsertUnmapAllCommand(PMR_DeviceNode(psPMR), ui32AllocationIndex); -+ } -+ else -+ { -+ GenerateMapUnmapCommandsForSparsePMR(psPMR, -+ ui32AllocationIndex, -+ IMG_FALSE); -+ } -+ -+ InsertTimeStampCommand(OSClockns64(), PMR_DeviceNode(psPMR)); -+ -+ *pui32AllocationIndexOut = ui32AllocationIndex; -+ -+out_unlock: -+ DevicememHistoryUnlock(psDevHData); -+ -+ return eError; -+} -+ -+/* DevicememHistorySparseChangeKM: -+ * Entry point for when a sparse allocation is changed, such that some of the -+ * pages within the sparse allocation are mapped or unmapped. -+ * -+ * psPMR: The PMR to which the allocation belongs. -+ * ui32Offset: The offset within the PMR at which the allocation begins. -+ * sDevVAddr: The DevVAddr at which the allocation begins. -+ * szName: Annotation/name for the allocation. -+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form. -+ * ui32AllocPageCount: Number of pages which have been mapped. -+ * paui32AllocPageIndices: Indices of pages which have been mapped. -+ * ui32FreePageCount: Number of pages which have been unmapped. -+ * paui32FreePageIndices: Indices of pages which have been unmapped. -+ * ui32AllocationIndex: Allocation index as provided by the client. -+ * We will use this as a short-cut to find the allocation -+ * in our records. -+ * pui32AllocationIndexOut: An updated allocation index for the client. -+ * This may be a new value if we just created the -+ * allocation record. -+ */ -+PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR, -+ IMG_UINT32 ui32Offset, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const char szName[DEVMEM_ANNOTATION_MAX_LEN], -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *paui32AllocPageIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *paui32FreePageIndices, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 *pui32AllocationIndexOut) -+{ -+ IMG_UINT64 ui64Serial; -+ IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); -+ PVRSRV_ERROR eError; -+ IMG_BOOL bCreated; -+ DEVICEMEM_HISTORY_DATA *psDevHData; -+ -+ if (!PMRValidateSize((IMG_UINT64) ui32AllocPageCount << ui32Log2PageSize)) -+ { -+ PVR_LOG_VA(PVR_DBG_ERROR, -+ "PMR size exceeds limit #Chunks: %u ChunkSz %"IMG_UINT64_FMTSPECX"", -+ ui32AllocPageCount, -+ (IMG_UINT64) 1ULL << ui32Log2PageSize); -+ return PVRSRV_ERROR_PMR_TOO_LARGE; -+ } -+ -+ if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && -+ !CHECK_ALLOC_INDEX(ui32AllocationIndex)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", -+ __func__, -+ ui32AllocationIndex)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PMRGetUID(psPMR, &ui64Serial); -+ -+ psDevHData = DevmemFindDataFromDev(PMR_DeviceNode(psPMR)); -+ -+ if (psDevHData == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ DevicememHistoryLock(psDevHData); -+ -+ eError = FindOrCreateAllocation(PMR_DeviceNode(psPMR), -+ ui32AllocationIndex, -+ ui64Serial, -+ sDevVAddr, -+ uiSize, -+ szName, -+ ui32Log2PageSize, -+ uiPID, -+ IMG_TRUE /* bSparse */, -+ &ui32AllocationIndex, -+ &bCreated); -+ -+ if ((eError == PVRSRV_OK) && !bCreated) -+ { -+ /* touch the allocation so it goes to the head of our MRU list */ -+ TouchBusyAllocation(psDevHData, ui32AllocationIndex); -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", -+ __func__, -+ szName, -+ PVRSRVGETERRORSTRING(eError))); -+ goto out_unlock; -+ } -+ -+ GenerateMapUnmapCommandsForChangeList(PMR_DeviceNode(psPMR), -+ ui32AllocPageCount, -+ paui32AllocPageIndices, -+ ui32AllocationIndex, -+ IMG_TRUE); -+ -+ GenerateMapUnmapCommandsForChangeList(PMR_DeviceNode(psPMR), -+ ui32FreePageCount, -+ paui32FreePageIndices, -+ ui32AllocationIndex, -+ IMG_FALSE); -+ -+ InsertTimeStampCommand(OSClockns64(), PMR_DeviceNode(psPMR)); -+ -+ *pui32AllocationIndexOut = ui32AllocationIndex; -+ -+out_unlock: -+ DevicememHistoryUnlock(psDevHData); -+ -+ return eError; -+ -+} -+ -+/* CircularBufferIterateStart: -+ * Initialise local state for iterating over the circular buffer -+ */ -+static void CircularBufferIterateStart(DEVICEMEM_HISTORY_DATA *psHData, IMG_UINT32 *pui32Head, IMG_UINT32 *pui32Iter) -+{ -+ *pui32Head = psHData->sRecords.ui32Head; -+ -+ if (*pui32Head != 0) -+ { -+ *pui32Iter = *pui32Head - 1; -+ } -+ else -+ { -+ *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1; -+ } -+} -+ -+/* CircularBufferIteratePrevious: -+ * Iterate to the previous item in the circular buffer. -+ * This is called repeatedly to iterate over the whole circular buffer. -+ */ -+static COMMAND_WRAPPER *CircularBufferIteratePrevious(DEVICEMEM_HISTORY_DATA *psHData, -+ IMG_UINT32 ui32Head, -+ IMG_UINT32 *pui32Iter, -+ COMMAND_TYPE *peType, -+ IMG_BOOL *pbLast) -+{ -+ IMG_UINT8 *pui8Header; -+ COMMAND_WRAPPER *psOut = NULL; -+ -+ psOut = psHData->sRecords.pasCircularBuffer + *pui32Iter; -+ -+ pui8Header = (void *) psOut; -+ -+ /* Check the command looks valid. -+ * this condition should never happen, but check for it anyway -+ * and try to handle it -+ */ -+ if (*pui8Header >= COMMAND_TYPE_COUNT) -+ { -+ /* invalid header detected. Circular buffer corrupted? */ -+ PVR_DPF((PVR_DBG_ERROR, "CircularBufferIteratePrevious: " -+ "Invalid header: %u", -+ *pui8Header)); -+ *pbLast = IMG_TRUE; -+ return NULL; -+ } -+ -+ *peType = *pui8Header; -+ -+ if (*pui32Iter != 0) -+ { -+ (*pui32Iter)--; -+ } -+ else -+ { -+ *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1; -+ } -+ -+ -+ /* inform the caller this is the last command if either we have reached -+ * the head (where we started) or if we have reached an empty command, -+ * which means we have covered all populated entries -+ */ -+ if ((*pui32Iter == ui32Head) || (*peType == COMMAND_TYPE_NONE)) -+ { -+ /* this is the final iteration */ -+ *pbLast = IMG_TRUE; -+ } -+ -+ return psOut; -+} -+ -+/* MapUnmapCommandGetInfo: -+ * Helper function to get the address and mapping information from a MAP_ALL, UNMAP_ALL, -+ * MAP_RANGE or UNMAP_RANGE command -+ */ -+static void MapUnmapCommandGetInfo(DEVICEMEM_HISTORY_DATA *psHData, -+ COMMAND_WRAPPER *psCommand, -+ COMMAND_TYPE eType, -+ IMG_DEV_VIRTADDR *psDevVAddrStart, -+ IMG_DEV_VIRTADDR *psDevVAddrEnd, -+ IMG_BOOL *pbMap, -+ IMG_UINT32 *pui32AllocIndex) -+{ -+ if ((eType == COMMAND_TYPE_MAP_ALL) || ((eType == COMMAND_TYPE_UNMAP_ALL))) -+ { -+ COMMAND_MAP_ALL *psMapAll = &psCommand->u.sMapAll; -+ RECORD_ALLOCATION *psAlloc; -+ -+ *pbMap = (eType == COMMAND_TYPE_MAP_ALL); -+ *pui32AllocIndex = psMapAll->uiAllocIndex; -+ -+ psAlloc = ALLOC_INDEX_TO_PTR(psHData, psMapAll->uiAllocIndex); -+ -+ *psDevVAddrStart = psAlloc->sDevVAddr; -+ psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + psAlloc->uiSize - 1; -+ } -+ else if ((eType == COMMAND_TYPE_MAP_RANGE) || ((eType == COMMAND_TYPE_UNMAP_RANGE))) -+ { -+ COMMAND_MAP_RANGE *psMapRange = &psCommand->u.sMapRange; -+ RECORD_ALLOCATION *psAlloc; -+ IMG_UINT32 ui32StartPage, ui32Count; -+ -+ *pbMap = (eType == COMMAND_TYPE_MAP_RANGE); -+ *pui32AllocIndex = psMapRange->uiAllocIndex; -+ -+ psAlloc = ALLOC_INDEX_TO_PTR(psHData, psMapRange->uiAllocIndex); -+ -+ MapRangeUnpack(psMapRange, &ui32StartPage, &ui32Count); -+ -+ psDevVAddrStart->uiAddr = psAlloc->sDevVAddr.uiAddr + -+ ((1ULL << psAlloc->ui32Log2PageSize) * ui32StartPage); -+ -+ psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + -+ ((1ULL << psAlloc->ui32Log2PageSize) * ui32Count) - 1; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid command type: %u", -+ __func__, -+ eType)); -+ } -+} -+ -+void DevicememHistoryDumpRecordStats(PVRSRV_DEVICE_NODE *psDevNode, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ DEVICEMEM_HISTORY_DATA *psDevHData; -+ psDevHData = DevmemFindDataFromDev(psDevNode); -+ -+ if (psDevHData) -+ { -+ PVR_DUMPDEBUG_LOG(" DevmemHistoryRecordStats -" -+ " CBWC:%"IMG_UINT64_FMTSPEC -+ " MAC:%"IMG_UINT64_FMTSPEC -+ " UMAC:%"IMG_UINT64_FMTSPEC -+ " MRC:%"IMG_UINT64_FMTSPEC -+ " UMRC:%"IMG_UINT64_FMTSPEC -+ " TSC:%"IMG_UINT64_FMTSPEC -+ " MAX:%"IMG_UINT64_FMTSPEC -+ " CHD:%u", -+ psDevHData->sRecords.ui64CBWrapCount, -+ psDevHData->sRecords.ui64MapAllCount, -+ psDevHData->sRecords.ui64UnMapAllCount, -+ psDevHData->sRecords.ui64MapRangeCount, -+ psDevHData->sRecords.ui64UnMapRangeCount, -+ psDevHData->sRecords.ui64TimeStampCount, -+ (IMG_UINT64)CIRCULAR_BUFFER_NUM_COMMANDS, -+ psDevHData->sRecords.ui32Head); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG(" DevmemHistoryRecordStats - None"); -+ } -+} -+ -+/* DevicememHistoryQuery: -+ * Entry point for rgxdebug to look up addresses relating to a page fault -+ */ -+IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, -+ DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, -+ IMG_UINT32 ui32PageSizeBytes, -+ IMG_BOOL bMatchAnyAllocInPage) -+{ -+ IMG_UINT32 ui32Head, ui32Iter; -+ COMMAND_TYPE eType = COMMAND_TYPE_NONE; -+ COMMAND_WRAPPER *psCommand = NULL; -+ IMG_BOOL bLast = IMG_FALSE; -+ IMG_UINT64 ui64StartTime = OSClockns64(); -+ IMG_UINT64 ui64TimeNs = 0; -+ DEVICEMEM_HISTORY_DATA *psDevHData; -+ -+ /* initialise the results count for the caller */ -+ psQueryOut->ui32NumResults = 0; -+ psQueryOut->ui64SearchCount = 0; -+ -+ psDevHData = DevmemFindDataFromDev(psQueryIn->psDevNode); -+ -+ if (psDevHData == NULL) -+ { -+ return IMG_FALSE; -+ } -+ -+ DevicememHistoryLock(psDevHData); -+ -+ /* if the search is constrained to a particular PID then we -+ * first search the list of allocations to see if this -+ * PID is known to us -+ */ -+ if (psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) -+ { -+ IMG_UINT32 ui32Alloc; -+ ui32Alloc = psDevHData->sRecords.ui32AllocationsListHead; -+ -+ while (ui32Alloc != END_OF_LIST) -+ { -+ RECORD_ALLOCATION *psAlloc; -+ -+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32Alloc); -+ -+ if (psAlloc->uiPID == psQueryIn->uiPID) -+ { -+ goto found_pid; -+ } -+ -+ if (ui32Alloc == psDevHData->sRecords.ui32AllocationsListHead) -+ { -+ /* gone through whole list */ -+ break; -+ } -+ } -+ -+ /* PID not found, so we do not have any suitable data for this -+ * page fault -+ */ -+ goto out_unlock; -+ } -+ -+found_pid: -+ -+ CircularBufferIterateStart(psDevHData, &ui32Head, &ui32Iter); -+ -+ while (!bLast) -+ { -+ psCommand = CircularBufferIteratePrevious(psDevHData, ui32Head, &ui32Iter, &eType, &bLast); -+ if (psCommand == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: CircularBufferIteratePrevious returned NULL psCommand", -+ __func__)); -+ return IMG_FALSE; -+ } -+ -+ if (eType == COMMAND_TYPE_TIMESTAMP) -+ { -+ ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp); -+ continue; -+ } -+ -+ if ((eType == COMMAND_TYPE_MAP_ALL) || -+ (eType == COMMAND_TYPE_UNMAP_ALL) || -+ (eType == COMMAND_TYPE_MAP_RANGE) || -+ (eType == COMMAND_TYPE_UNMAP_RANGE)) -+ { -+ RECORD_ALLOCATION *psAlloc; -+ IMG_DEV_VIRTADDR sAllocStartAddrOrig, sAllocEndAddrOrig; -+ IMG_DEV_VIRTADDR sAllocStartAddr, sAllocEndAddr; -+ IMG_BOOL bMap; -+ IMG_UINT32 ui32AllocIndex; -+ -+ MapUnmapCommandGetInfo(psDevHData, -+ psCommand, -+ eType, -+ &sAllocStartAddrOrig, -+ &sAllocEndAddrOrig, -+ &bMap, -+ &ui32AllocIndex); -+ -+ sAllocStartAddr = sAllocStartAddrOrig; -+ sAllocEndAddr = sAllocEndAddrOrig; -+ -+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32AllocIndex); -+ -+ /* skip this command if we need to search within -+ * a particular PID, and this allocation is not from -+ * that PID -+ */ -+ if ((psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) && -+ (psAlloc->uiPID != psQueryIn->uiPID)) -+ { -+ continue; -+ } -+ -+ /* if the allocation was created after this event, then this -+ * event must be for an old/removed allocation, so skip it -+ */ -+ if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs) -+ { -+ continue; -+ } -+ -+ psQueryOut->ui64SearchCount++; -+ -+ /* if the caller wants us to match any allocation in the -+ * same page as the allocation then tweak the real start/end -+ * addresses of the allocation here -+ */ -+ if (bMatchAnyAllocInPage) -+ { -+ sAllocStartAddr.uiAddr = sAllocStartAddr.uiAddr & ~(IMG_UINT64) (ui32PageSizeBytes - 1); -+ sAllocEndAddr.uiAddr = PVR_ALIGN(sAllocEndAddr.uiAddr, (IMG_UINT64)ui32PageSizeBytes); -+ } -+ -+ if ((psQueryIn->sDevVAddr.uiAddr >= sAllocStartAddr.uiAddr) && -+ (psQueryIn->sDevVAddr.uiAddr < sAllocEndAddr.uiAddr)) -+ { -+ DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult = &psQueryOut->sResults[psQueryOut->ui32NumResults]; -+ -+ OSStringLCopy(psResult->szString, psAlloc->szName, sizeof(psResult->szString)); -+ psResult->sBaseDevVAddr = psAlloc->sDevVAddr; -+ psResult->uiSize = psAlloc->uiSize; -+ psResult->bMap = bMap; -+ psResult->ui64Age = _CalculateAge(ui64StartTime, ui64TimeNs, TIME_STAMP_MASK); -+ psResult->ui64When = ui64TimeNs; -+ /* write the responsible PID in the placeholder */ -+ psResult->sProcessInfo.uiPID = psAlloc->uiPID; -+ -+ if ((eType == COMMAND_TYPE_MAP_ALL) || (eType == COMMAND_TYPE_UNMAP_ALL)) -+ { -+ psResult->bRange = IMG_FALSE; -+ psResult->bAll = IMG_TRUE; -+ } -+ else -+ { -+ psResult->bRange = IMG_TRUE; -+ MapRangeUnpack(&psCommand->u.sMapRange, -+ &psResult->ui32StartPage, -+ &psResult->ui32PageCount); -+ psResult->bAll = (psResult->ui32PageCount * (1U << psAlloc->ui32Log2PageSize)) -+ == psAlloc->uiSize; -+ psResult->sMapStartAddr = sAllocStartAddrOrig; -+ psResult->sMapEndAddr = sAllocEndAddrOrig; -+ } -+ -+ psQueryOut->ui32NumResults++; -+ -+ if (psQueryOut->ui32NumResults == DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS) -+ { -+ break; -+ } -+ } -+ } -+ } -+ -+out_unlock: -+ DevicememHistoryUnlock(psDevHData); -+ -+ return psQueryOut->ui32NumResults > 0; -+} -+ -+#if defined(SUPPORT_RGX) -+static void DeviceMemHistoryFmt(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN], -+ IMG_PID uiPID, -+ const IMG_CHAR *pszName, -+ const IMG_CHAR *pszAction, -+ IMG_DEV_VIRTADDR sDevVAddrStart, -+ IMG_DEV_VIRTADDR sDevVAddrEnd, -+ IMG_UINT64 ui64TimeNs) -+{ -+ -+ OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, -+ /* PID NAME MAP/UNMAP MIN-MAX SIZE AbsUS AgeUS*/ -+ "%04u %-40s %-10s " -+ IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC " " -+ "0x%08" IMG_UINT64_FMTSPECX " " -+ "%013" IMG_UINT64_FMTSPEC, /* 13 digits is over 2 hours of ns */ -+ uiPID, -+ pszName, -+ pszAction, -+ sDevVAddrStart.uiAddr, -+ sDevVAddrEnd.uiAddr, -+ sDevVAddrEnd.uiAddr - sDevVAddrStart.uiAddr + 1, -+ ui64TimeNs); -+} -+ -+static void DeviceMemHistoryFmtHeader(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]) -+{ -+ OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, -+ "%-4s %-40s %-6s %10s %10s %8s %13s", -+ "PID", -+ "NAME", -+ "ACTION", -+ "ADDR MIN", -+ "ADDR MAX", -+ "SIZE", -+ "ABS NS"); -+} -+ -+static const char *CommandTypeToString(COMMAND_TYPE eType) -+{ -+ switch (eType) -+ { -+ case COMMAND_TYPE_MAP_ALL: -+ return "MapAll"; -+ case COMMAND_TYPE_UNMAP_ALL: -+ return "UnmapAll"; -+ case COMMAND_TYPE_MAP_RANGE: -+ return "MapRange"; -+ case COMMAND_TYPE_UNMAP_RANGE: -+ return "UnmapRange"; -+ case COMMAND_TYPE_TIMESTAMP: -+ return "TimeStamp"; -+ default: -+ return "???"; -+ } -+} -+ -+static void DevicememHistoryPrintAll(OSDI_IMPL_ENTRY *psEntry) -+{ -+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; -+ IMG_UINT32 ui32Iter; -+ IMG_UINT32 ui32Head; -+ IMG_BOOL bLast = IMG_FALSE; -+ IMG_UINT64 ui64TimeNs = 0; -+ IMG_UINT64 ui64StartTime = OSClockns64(); -+ DEVICEMEM_HISTORY_DATA *psDevHData; -+ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); -+ -+ DeviceMemHistoryFmtHeader(szBuffer); -+ DIPrintf(psEntry, "%s\n", szBuffer); -+ -+ psDevHData = DevmemFindDataFromDev(psDeviceNode); -+ -+ if (psDevHData == NULL) -+ { -+ return; -+ } -+ -+ CircularBufferIterateStart(psDevHData, &ui32Head, &ui32Iter); -+ -+ while (!bLast) -+ { -+ COMMAND_WRAPPER *psCommand; -+ COMMAND_TYPE eType = COMMAND_TYPE_NONE; -+ -+ psCommand = CircularBufferIteratePrevious(psDevHData, ui32Head, &ui32Iter, -+ &eType, &bLast); -+ if (psCommand == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: CircularBufferIteratePrevious returned NULL psCommand", -+ __func__)); -+ return; -+ } -+ -+ if (eType == COMMAND_TYPE_TIMESTAMP) -+ { -+ ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp); -+ continue; -+ } -+ -+ -+ if ((eType == COMMAND_TYPE_MAP_ALL) || -+ (eType == COMMAND_TYPE_UNMAP_ALL) || -+ (eType == COMMAND_TYPE_MAP_RANGE) || -+ (eType == COMMAND_TYPE_UNMAP_RANGE)) -+ { -+ RECORD_ALLOCATION *psAlloc; -+ IMG_DEV_VIRTADDR sDevVAddrStart, sDevVAddrEnd; -+ IMG_BOOL bMap; -+ IMG_UINT32 ui32AllocIndex; -+ -+ MapUnmapCommandGetInfo(psDevHData, -+ psCommand, -+ eType, -+ &sDevVAddrStart, -+ &sDevVAddrEnd, -+ &bMap, -+ &ui32AllocIndex); -+ -+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32AllocIndex); -+ -+ if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs) -+ { -+ /* if this event relates to an allocation we -+ * are no longer tracking then do not print it -+ */ -+ continue; -+ } -+ -+ DeviceMemHistoryFmt(szBuffer, -+ psAlloc->uiPID, -+ psAlloc->szName, -+ CommandTypeToString(eType), -+ sDevVAddrStart, -+ sDevVAddrEnd, -+ ui64TimeNs); -+ -+ DIPrintf(psEntry, "%s\n", szBuffer); -+ } -+ } -+ -+ DIPrintf(psEntry, "\nTimestamp reference: %013" IMG_UINT64_FMTSPEC "\n", -+ ui64StartTime); -+} -+ -+static int DevicememHistoryPrintAllWrapper(OSDI_IMPL_ENTRY *psEntry, -+ void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)DIGetPrivData(psEntry); -+ DEVICEMEM_HISTORY_DATA *psDevHData; -+ -+ /* Get the backing store associated with the device. If we are -+ * called before the device has been started (i.e. FW loaded) -+ * then we haven't yet had this data allocated. -+ * Return to provide a NULL data stream to the consumer. -+ */ -+ psDevHData = DevmemFindDataFromDev(psDeviceNode); -+ if (psDevHData == NULL) -+ { -+ return 0; -+ } -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ DevicememHistoryLock(psDevHData); -+ DevicememHistoryPrintAll(psEntry); -+ DevicememHistoryUnlock(psDevHData); -+ -+ return 0; -+} -+#endif /* defined(SUPPORT_RGX) */ -+ -+static PVRSRV_ERROR CreateRecords(DEVICEMEM_HISTORY_DATA *psDevHData) -+{ -+ psDevHData->sRecords.pasAllocations = -+ OSAllocMemNoStats(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES); -+ -+ PVR_RETURN_IF_NOMEM(psDevHData->sRecords.pasAllocations); -+ -+ /* Allocated and initialise the circular buffer with zeros so every -+ * command is initialised as a command of type COMMAND_TYPE_NONE. */ -+ psDevHData->sRecords.pasCircularBuffer = -+ OSAllocZMemNoStats(sizeof(COMMAND_WRAPPER) * CIRCULAR_BUFFER_NUM_COMMANDS); -+ -+ if (psDevHData->sRecords.pasCircularBuffer == NULL) -+ { -+ OSFreeMemNoStats(psDevHData->sRecords.pasAllocations); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static void DestroyRecords(DEVICEMEM_HISTORY_DATA *psDevHData) -+{ -+ OSFreeMemNoStats(psDevHData->sRecords.pasCircularBuffer); -+ OSFreeMemNoStats(psDevHData->sRecords.pasAllocations); -+} -+ -+static void InitialiseRecords(DEVICEMEM_HISTORY_DATA *psDevHData) -+{ -+ IMG_UINT32 i; -+ -+ /* initialise the allocations list */ -+ -+ psDevHData->sRecords.pasAllocations[0].ui32Prev = ALLOCATION_LIST_NUM_ENTRIES - 1; -+ psDevHData->sRecords.pasAllocations[0].ui32Next = 1; -+ -+ for (i = 1; i < ALLOCATION_LIST_NUM_ENTRIES; i++) -+ { -+ psDevHData->sRecords.pasAllocations[i].ui32Prev = i - 1; -+ psDevHData->sRecords.pasAllocations[i].ui32Next = i + 1; -+ } -+ -+ psDevHData->sRecords.pasAllocations[ALLOCATION_LIST_NUM_ENTRIES - 1].ui32Next = 0; -+ -+ psDevHData->sRecords.ui32AllocationsListHead = 0; -+} -+ -+static void DevicememHistoryDevDeInitUnit(IMG_UINT32 uiUnit); -+static PVRSRV_ERROR DevicememHistoryDevInitUnit(IMG_UINT32 uiUnit); -+ -+PVRSRV_ERROR DevicememHistoryInitKM(void) -+{ -+ IMG_UINT32 ui; -+ -+ /* Zero-fill the gapsDevicememHistoryData array entries */ -+ for (ui = 0; ui < PVRSRV_MAX_DEVICES; ui++) -+ { -+ gapsDevicememHistoryData[ui] = NULL; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+void DevicememHistoryDeInitKM(void) -+{ -+ IMG_UINT32 uiUnit; -+ -+ /* Iterate over all potential units and remove their data. -+ * DI entry is removed by DevicememHistoryDeviceDestroy() -+ */ -+ for (uiUnit = 0; uiUnit < PVRSRV_MAX_DEVICES; uiUnit++) -+ { -+ DevicememHistoryDevDeInitUnit(uiUnit); -+ } -+} -+ -+/* Allocate DEVICEMEM_HISTORY_DATA entry for the specified unit */ -+static PVRSRV_ERROR DevicememHistoryDevInitUnit(IMG_UINT32 uiUnit) -+{ -+ PVRSRV_ERROR eError; -+ DEVICEMEM_HISTORY_DATA *psDevicememHistoryData; -+ -+ if (uiUnit >= PVRSRV_MAX_DEVICES) -+ { -+ PVR_LOG_RETURN_IF_FALSE(uiUnit < PVRSRV_MAX_DEVICES, "Invalid Unit", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Valid unit, try and allocate and fill all structure members */ -+ -+ psDevicememHistoryData = OSAllocZMemNoStats(sizeof(DEVICEMEM_HISTORY_DATA)); -+ PVR_RETURN_IF_NOMEM(psDevicememHistoryData); -+ -+ eError = OSLockCreate(&psDevicememHistoryData->hLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", err_lock); -+ -+ eError = CreateRecords(psDevicememHistoryData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "CreateRecords", err_allocations); -+ -+ InitialiseRecords(psDevicememHistoryData); -+ -+ gapsDevicememHistoryData[uiUnit] = psDevicememHistoryData; -+ -+ return PVRSRV_OK; -+ -+err_allocations: -+ OSLockDestroy(psDevicememHistoryData->hLock); -+ psDevicememHistoryData->hLock = NULL; -+err_lock: -+ OSFreeMemNoStats(psDevicememHistoryData); -+ return eError; -+} -+ -+/* Allocate DI entry for specified psDeviceNode */ -+PVRSRV_ERROR DevicememHistoryDeviceCreate(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 uiUnit = psDeviceNode->sDevId.ui32InternalID; -+#if defined(SUPPORT_RGX) -+ PVRSRV_DEVICE_DEBUG_INFO *psDevDebugInfo = &psDeviceNode->sDebugInfo; -+ DI_ITERATOR_CB sIterator = {.pfnShow = DevicememHistoryPrintAllWrapper}; -+#endif -+ -+ if (uiUnit >= PVRSRV_MAX_DEVICES) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Create the DI entry for the device's devicemem_history handle */ -+ -+ -+#if defined(SUPPORT_RGX) -+ eError = DICreateEntry("devicemem_history", psDevDebugInfo->psGroup, -+ &sIterator, psDeviceNode, -+ DI_ENTRY_TYPE_GENERIC, -+ &psDevDebugInfo->psDevMemEntry); -+#endif /* defined(SUPPORT_RGX) */ -+ -+ return eError; -+} -+ -+/* Allocate the DEVICEMEM_HISTORY_DATA for specified psDeviceNode */ -+PVRSRV_ERROR DevicememHistoryDeviceInit(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiUnit = psDeviceNode->sDevId.ui32InternalID; -+ -+ if (uiUnit >= PVRSRV_MAX_DEVICES) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = DevicememHistoryDevInitUnit(uiUnit); -+ -+ return eError; -+} -+ -+static void DevicememHistoryDevDeInitUnit(IMG_UINT32 uiUnit) -+{ -+ DEVICEMEM_HISTORY_DATA *psDevicememHistoryData; -+ -+ if (uiUnit >= PVRSRV_MAX_DEVICES) -+ { -+ return; -+ } -+ -+ psDevicememHistoryData = gapsDevicememHistoryData[uiUnit]; -+ -+ if (psDevicememHistoryData == NULL) -+ { -+ return; -+ } -+ -+ DestroyRecords(psDevicememHistoryData); -+ -+ if (psDevicememHistoryData->hLock != NULL) -+ { -+ OSLockDestroy(psDevicememHistoryData->hLock); -+ psDevicememHistoryData->hLock = NULL; -+ } -+ -+ OSFreeMemNoStats(psDevicememHistoryData); -+ gapsDevicememHistoryData[uiUnit] = NULL; -+} -+ -+void DevicememHistoryDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+#if defined(SUPPORT_RGX) -+ PVRSRV_DEVICE_DEBUG_INFO *psDevDebugInfo = &psDeviceNode->sDebugInfo; -+ -+ /* Remove the DI entry associated with this device */ -+ DIDestroyEntry(psDevDebugInfo->psDevMemEntry); -+#endif /* defined(SUPPORT_RGX) */ -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/devicemem_history_server.h b/drivers/gpu/drm/img-rogue/devicemem_history_server.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem_history_server.h -@@ -0,0 +1,167 @@ -+/*************************************************************************/ /*! -+@File devicemem_history_server.h -+@Title Resource Information abstraction -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Devicemem History functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef DEVICEMEM_HISTORY_SERVER_H -+#define DEVICEMEM_HISTORY_SERVER_H -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "rgxmem.h" -+#include "devicemem_utils.h" -+#include "connection_server.h" -+ -+PVRSRV_ERROR DevicememHistoryInitKM(void); -+ -+void DevicememHistoryDeInitKM(void); -+ -+PVRSRV_ERROR DevicememHistoryDeviceInit(PVRSRV_DEVICE_NODE *psDevNode); -+PVRSRV_ERROR DevicememHistoryDeviceCreate(PVRSRV_DEVICE_NODE *psDevNode); -+void DevicememHistoryDeviceDestroy(PVRSRV_DEVICE_NODE *psDevNode); -+ -+PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR, -+ IMG_UINT32 ui32Offset, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const char szName[DEVMEM_ANNOTATION_MAX_LEN], -+ IMG_UINT32 ui32PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 *pui32AllocationIndexOut); -+ -+PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR, -+ IMG_UINT32 ui32Offset, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const char szName[DEVMEM_ANNOTATION_MAX_LEN], -+ IMG_UINT32 ui32PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 *pui32AllocationIndexOut); -+ -+PVRSRV_ERROR DevicememHistoryMapVRangeKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEV_VIRTADDR sBaseDevVAddr, -+ IMG_UINT32 ui32StartPage, -+ IMG_UINT32 ui32NumPages, -+ IMG_DEVMEM_SIZE_T uiAllocSize, -+ const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 *ui32AllocationIndexOut); -+ -+PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEV_VIRTADDR sBaseDevVAddr, -+ IMG_UINT32 ui32StartPage, -+ IMG_UINT32 ui32NumPages, -+ IMG_DEVMEM_SIZE_T uiAllocSize, -+ const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32AllocationIndex, -+ IMG_UINT32 *ui32AllocationIndexOut); -+ -+PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR, -+ IMG_UINT32 ui32Offset, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const char szName[DEVMEM_ANNOTATION_MAX_LEN], -+ IMG_UINT32 ui32PageSize, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *paui32AllocPageIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pauiFreePageIndices, -+ IMG_UINT32 AllocationIndex, -+ IMG_UINT32 *pui32AllocationIndexOut); -+ -+/* used when the PID does not matter */ -+#define DEVICEMEM_HISTORY_PID_ANY 0xFFFFFFFE -+ -+typedef struct _DEVICEMEM_HISTORY_QUERY_IN_ -+{ -+ IMG_PID uiPID; -+ IMG_DEV_VIRTADDR sDevVAddr; -+ PVRSRV_DEVICE_NODE *psDevNode; -+} DEVICEMEM_HISTORY_QUERY_IN; -+ -+/* Store up to 4 results for a lookup. In the case of the faulting page being -+ * re-mapped between the page fault occurring on HW and the page fault analysis -+ * being done, the second result entry will show the allocation being unmapped. -+ * A further 2 entries are added to cater for multiple buffers in the same page. -+ */ -+#define DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS 4 -+ -+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_RESULT_ -+{ -+ IMG_CHAR szString[DEVMEM_ANNOTATION_MAX_LEN]; -+ IMG_DEV_VIRTADDR sBaseDevVAddr; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_BOOL bMap; -+ IMG_BOOL bRange; -+ IMG_BOOL bAll; -+ IMG_UINT64 ui64When; -+ IMG_UINT64 ui64Age; -+ /* info for sparse map/unmap operations (i.e. bRange=IMG_TRUE) */ -+ IMG_UINT32 ui32StartPage; -+ IMG_UINT32 ui32PageCount; -+ IMG_DEV_VIRTADDR sMapStartAddr; -+ IMG_DEV_VIRTADDR sMapEndAddr; -+ RGXMEM_PROCESS_INFO sProcessInfo; -+} DEVICEMEM_HISTORY_QUERY_OUT_RESULT; -+ -+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_ -+{ -+ IMG_UINT32 ui32NumResults; -+ IMG_UINT64 ui64SearchCount; -+ /* result 0 is the newest */ -+ DEVICEMEM_HISTORY_QUERY_OUT_RESULT sResults[DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS]; -+} DEVICEMEM_HISTORY_QUERY_OUT; -+ -+void DevicememHistoryDumpRecordStats(PVRSRV_DEVICE_NODE *psDevNode, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+ -+IMG_BOOL -+DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, -+ DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, -+ IMG_UINT32 ui32PageSizeBytes, -+ IMG_BOOL bMatchAnyAllocInPage); -+ -+#endif -diff --git a/drivers/gpu/drm/img-rogue/devicemem_pdump.h b/drivers/gpu/drm/img-rogue/devicemem_pdump.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem_pdump.h -@@ -0,0 +1,363 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device Memory Management PDump internal -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Services internal interface to PDump device memory management -+ functions that are shared between client and server code. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef DEVICEMEM_PDUMP_H -+#define DEVICEMEM_PDUMP_H -+ -+#include "devicemem.h" -+#include "pdumpdefs.h" -+#include "pdump.h" -+ -+#if defined(PDUMP) -+/* -+ * DevmemPDumpLoadMem() -+ * -+ * takes a memory descriptor, offset, and size, and takes the current contents -+ * of the memory at that location and writes it to the prm pdump file, and -+ * emits a pdump LDB to load the data from that file. The intention here is -+ * that the contents of the simulated buffer upon pdump playback will be made -+ * to be the same as they are when this command is run, enabling pdump of -+ * cases where the memory has been modified externally, i.e. by the host cpu -+ * or by a third party. -+ */ -+void -+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+/* -+ * DevmemPDumpLoadZeroMem() -+ * -+ * As DevmemPDumpLoadMem() but the PDump allocation will be populated with -+ * zeros from the zero page in the parameter stream -+ */ -+void -+DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+/* -+ * DevmemPDumpLoadMemValue32() -+ * -+ * As above but dumps the value at a dword-aligned address in plain text to -+ * the pdump script2 file. Useful for patching a buffer at pdump playback by -+ * simply editing the script output file. -+ * -+ * (The same functionality can be achieved by the above function but the -+ * binary PARAM file must be patched in that case.) -+ */ -+IMG_INTERNAL void -+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT32 ui32Value, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+/* -+ * DevmemPDumpMemValue64() -+ * -+ * As above but dumps the 64bit-value at a dword-aligned address in plain text -+ * to the pdump script2 file. Useful for patching a buffer at pdump playback by -+ * simply editing the script output file. -+ * -+ * (The same functionality can be achieved by the above function but the -+ * binary PARAM file must be patched in that case.) -+ */ -+IMG_INTERNAL void -+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT64 ui64Value, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+/* -+ * DevmemPDumpPageCatBaseToSAddr() -+ * -+ * Returns the symbolic address of a piece of memory represented by an offset -+ * into the mem descriptor. -+ */ -+PVRSRV_ERROR -+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T *puiMemOffset, -+ IMG_CHAR *pszName, -+ IMG_UINT32 ui32Size); -+ -+/* -+ * DevmemPDumpSaveToFile() -+ * -+ * Emits a pdump SAB to cause the current contents of the memory to be written -+ * to the given file during playback -+ */ -+void -+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 uiFileOffset); -+ -+/* -+ * DevmemPDumpSaveToFileVirtual() -+ * -+ * Emits a pdump SAB, just like DevmemPDumpSaveToFile(), but uses the virtual -+ * address and device MMU context to cause the pdump player to traverse the -+ * MMU page tables itself. -+ */ -+void -+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32PdumpFlags); -+ -+/* -+ * DevmemPDumpDataDescriptor() -+ * -+ * Emits a pdump CMD:OutputData, using the virtual address and device MMU -+ * context. Provides more flexibility than a pdump SAB because metadata can -+ * be passed to an external pdump player library via the command header. -+ */ -+void -+DevmemPDumpDataDescriptor(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 ui32HeaderType, -+ IMG_UINT32 ui32ElementType, -+ IMG_UINT32 ui32ElementCount, -+ IMG_UINT32 ui32PdumpFlags); -+ -+ -+/* -+ * -+ * DevmemPDumpDevmemPol32() -+ * -+ * Writes a PDump 'POL' command to wait for a masked 32-bit memory location to -+ * become the specified value. -+ */ -+PVRSRV_ERROR -+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T ui32PDumpFlags); -+ -+#if defined(__KERNEL__) -+/* -+ * -+ * DevmemPDumpDevmemCheck32() -+ * -+ * Writes a PDump 'POL' command to run a single-shot check for a masked -+ * 32-bit memory location to match the specified value. -+ */ -+PVRSRV_ERROR -+DevmemPDumpDevmemCheck32(const DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T ui32PDumpFlags); -+#endif -+ -+/* -+ * DevmemPDumpCBP() -+ * -+ * Polls for space in circular buffer. Reads the read offset from memory and -+ * waits until there is enough space to write the packet. -+ * -+ * psMemDesc - MemDesc which contains the read offset -+ * uiReadOffset - Offset into MemDesc to the read offset -+ * uiWriteOffset - Current write offset -+ * uiPacketSize - Size of packet to write -+ * uiBufferSize - Size of circular buffer -+ */ -+PVRSRV_ERROR -+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiReadOffset, -+ IMG_DEVMEM_OFFSET_T uiWriteOffset, -+ IMG_DEVMEM_SIZE_T uiPacketSize, -+ IMG_DEVMEM_SIZE_T uiBufferSize); -+ -+#else /* PDUMP */ -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemPDumpLoadMem) -+#endif -+static INLINE void -+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PDUMP_FLAGS_T uiPDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psMemDesc); -+ PVR_UNREFERENCED_PARAMETER(uiOffset); -+ PVR_UNREFERENCED_PARAMETER(uiSize); -+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemPDumpLoadMemValue32) -+#endif -+static INLINE void -+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT32 ui32Value, -+ PDUMP_FLAGS_T uiPDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psMemDesc); -+ PVR_UNREFERENCED_PARAMETER(uiOffset); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemPDumpLoadMemValue64) -+#endif -+static INLINE void -+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT64 ui64Value, -+ PDUMP_FLAGS_T uiPDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psMemDesc); -+ PVR_UNREFERENCED_PARAMETER(uiOffset); -+ PVR_UNREFERENCED_PARAMETER(ui64Value); -+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemPDumpPageCatBaseToSAddr) -+#endif -+static INLINE PVRSRV_ERROR -+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T *puiMemOffset, -+ IMG_CHAR *pszName, -+ IMG_UINT32 ui32Size) -+{ -+ PVR_UNREFERENCED_PARAMETER(psMemDesc); -+ PVR_UNREFERENCED_PARAMETER(puiMemOffset); -+ PVR_UNREFERENCED_PARAMETER(pszName); -+ PVR_UNREFERENCED_PARAMETER(ui32Size); -+ -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemPDumpSaveToFile) -+#endif -+static INLINE void -+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 uiFileOffset) -+{ -+ PVR_UNREFERENCED_PARAMETER(psMemDesc); -+ PVR_UNREFERENCED_PARAMETER(uiOffset); -+ PVR_UNREFERENCED_PARAMETER(uiSize); -+ PVR_UNREFERENCED_PARAMETER(pszFilename); -+ PVR_UNREFERENCED_PARAMETER(uiFileOffset); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemPDumpSaveToFileVirtual) -+#endif -+static INLINE void -+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32PdumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psMemDesc); -+ PVR_UNREFERENCED_PARAMETER(uiOffset); -+ PVR_UNREFERENCED_PARAMETER(uiSize); -+ PVR_UNREFERENCED_PARAMETER(pszFilename); -+ PVR_UNREFERENCED_PARAMETER(ui32FileOffset); -+ PVR_UNREFERENCED_PARAMETER(ui32PdumpFlags); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemPDumpDevmemPol32) -+#endif -+static INLINE PVRSRV_ERROR -+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T ui32PDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psMemDesc); -+ PVR_UNREFERENCED_PARAMETER(uiOffset); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ PVR_UNREFERENCED_PARAMETER(ui32Mask); -+ PVR_UNREFERENCED_PARAMETER(eOperator); -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+ -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemPDumpCBP) -+#endif -+static INLINE PVRSRV_ERROR -+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiReadOffset, -+ IMG_DEVMEM_OFFSET_T uiWriteOffset, -+ IMG_DEVMEM_SIZE_T uiPacketSize, -+ IMG_DEVMEM_SIZE_T uiBufferSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(psMemDesc); -+ PVR_UNREFERENCED_PARAMETER(uiReadOffset); -+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); -+ PVR_UNREFERENCED_PARAMETER(uiPacketSize); -+ PVR_UNREFERENCED_PARAMETER(uiBufferSize); -+ -+ return PVRSRV_OK; -+} -+#endif /* PDUMP */ -+#endif /* DEVICEMEM_PDUMP_H */ -diff --git a/drivers/gpu/drm/img-rogue/devicemem_server.c b/drivers/gpu/drm/img-rogue/devicemem_server.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem_server.c -@@ -0,0 +1,2326 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device Memory Management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Server-side component of the Device Memory Management. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+/* our exported API */ -+#include "devicemem_server.h" -+#include "devicemem_utils.h" -+#include "devicemem.h" -+ -+#include "device.h" /* For device node */ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+ -+#include "mmu_common.h" -+#include "pdump_km.h" -+#include "pmr.h" -+#include "physmem.h" -+#include "pdumpdesc.h" -+ -+#include "allocmem.h" -+#include "osfunc.h" -+#include "lock.h" -+ -+#include "pvrsrv.h" /* for PVRSRVGetPVRSRVData() */ -+ -+#define DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE (1 << 0) -+#define DEVMEMHEAP_REFCOUNT_MIN 1 -+#define DEVMEMHEAP_REFCOUNT_MAX IMG_INT32_MAX -+#define DEVMEMRESERVATION_REFCOUNT_MIN 0 -+#define DEVMEMRESERVATION_REFCOUNT_MAX IMG_INT32_MAX -+ -+struct _DEVMEMINT_CTX_ -+{ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ -+ /* MMU common code needs to have a context. There's a one-to-one -+ correspondence between device memory context and MMU context, -+ but we have the abstraction here so that we don't need to care -+ what the MMU does with its context, and the MMU code need not -+ know about us at all. */ -+ MMU_CONTEXT *psMMUContext; -+ -+ ATOMIC_T hRefCount; -+ -+ /* This handle is for devices that require notification when a new -+ memory context is created and they need to store private data that -+ is associated with the context. */ -+ IMG_HANDLE hPrivData; -+ -+ /* Protects access to sProcessNotifyListHead */ -+ POSWR_LOCK hListLock; -+ -+ /* The following tracks UM applications that need to be notified of a -+ * page fault */ -+ DLLIST_NODE sProcessNotifyListHead; -+ /* The following is a node for the list of registered devmem contexts */ -+ DLLIST_NODE sPageFaultNotifyListElem; -+ -+ /* Device virtual address of a page fault on this context */ -+ IMG_DEV_VIRTADDR sFaultAddress; -+ -+ /* General purpose flags */ -+ IMG_UINT32 ui32Flags; -+}; -+ -+struct _DEVMEMINT_CTX_EXPORT_ -+{ -+ DEVMEMINT_CTX *psDevmemCtx; -+ PMR *psPMR; -+ ATOMIC_T hRefCount; -+ DLLIST_NODE sNode; -+}; -+ -+struct _DEVMEMINT_HEAP_ -+{ -+ struct _DEVMEMINT_CTX_ *psDevmemCtx; -+ IMG_UINT32 uiLog2PageSize; -+ IMG_DEV_VIRTADDR sBaseAddr; -+ IMG_DEV_VIRTADDR sLastAddr; -+ ATOMIC_T uiRefCount; -+ -+ /* Private data for callback functions */ -+ IMG_HANDLE hPrivData; -+ -+ /* Callback function init */ -+ PFN_HEAP_INIT pfnInit; -+ -+ /* Callback function deinit */ -+ PFN_HEAP_DEINIT pfnDeInit; -+}; -+ -+struct _DEVMEMINT_RESERVATION_ -+{ -+ struct _DEVMEMINT_HEAP_ *psDevmemHeap; -+ IMG_DEV_VIRTADDR sBase; -+ IMG_DEVMEM_SIZE_T uiLength; -+ /* lock used to guard against potential race when freeing reservation */ -+ POS_LOCK hLock; -+ IMG_INT32 i32RefCount; -+}; -+ -+struct _DEVMEMINT_MAPPING_ -+{ -+ struct _DEVMEMINT_RESERVATION_ *psReservation; -+ PMR *psPMR; -+ IMG_UINT32 uiNumPages; -+}; -+ -+/*! Object representing a virtual range reservation and mapping between -+ * the virtual range and a set of PMRs. -+ * -+ * The physical allocations may be mapped entirely or partially to the entire -+ * or partial virtual range. */ -+struct _DEVMEMXINT_RESERVATION_ -+{ -+ /*! Pointer to a device memory heap this reservation is made on. */ -+ struct _DEVMEMINT_HEAP_ *psDevmemHeap; -+ /*! Base device virtual address of this reservation. */ -+ IMG_DEV_VIRTADDR sBase; -+ /*! Size of this reservation (in bytes). */ -+ IMG_DEVMEM_SIZE_T uiLength; -+ /*! Lock for protecting concurrent operations on the mapping. */ -+ POS_LOCK hLock; -+ /*! Array of PMRs of size `uiNumPages`. This array represents how the -+ * physical memory is mapped to the virtual range. Each entry in the array -+ * represents to one device page which means that one PMR may be spread -+ * across many indices. */ -+ PMR **ppsPMR; -+}; -+ -+struct _DEVMEMINT_PF_NOTIFY_ -+{ -+ IMG_UINT32 ui32PID; -+ DLLIST_NODE sProcessNotifyListElem; -+}; -+ -+/*************************************************************************/ /*! -+@Function DevmemIntCtxAcquire -+@Description Acquire a reference to the provided device memory context. -+@Return None -+*/ /**************************************************************************/ -+static INLINE void DevmemIntCtxAcquire(DEVMEMINT_CTX *psDevmemCtx) -+{ -+ OSAtomicIncrement(&psDevmemCtx->hRefCount); -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemIntCtxRelease -+@Description Release the reference to the provided device memory context. -+ If this is the last reference which was taken then the -+ memory context will be freed. -+@Return None -+*/ /**************************************************************************/ -+static INLINE void DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx) -+{ -+ if (OSAtomicDecrement(&psDevmemCtx->hRefCount) == 0) -+ { -+ /* The last reference has gone, destroy the context */ -+ PVRSRV_DEVICE_NODE *psDevNode = psDevmemCtx->psDevNode; -+ DLLIST_NODE *psNode, *psNodeNext; -+ -+ /* If there are any PIDs registered for page fault notification. -+ * Loop through the registered PIDs and free each one */ -+ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) -+ { -+ DEVMEMINT_PF_NOTIFY *psNotifyNode = -+ IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); -+ dllist_remove_node(psNode); -+ OSFreeMem(psNotifyNode); -+ } -+ -+ /* If this context is in the list registered for a debugger, remove -+ * from that list */ -+ if (dllist_node_is_in_list(&psDevmemCtx->sPageFaultNotifyListElem)) -+ { -+ dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem); -+ } -+ -+ if (psDevNode->pfnUnregisterMemoryContext) -+ { -+ psDevNode->pfnUnregisterMemoryContext(psDevmemCtx->hPrivData); -+ } -+ MMU_ContextDestroy(psDevmemCtx->psMMUContext); -+ -+ OSWRLockDestroy(psDevmemCtx->hListLock); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed memory context %p", -+ __func__, psDevmemCtx)); -+ OSFreeMem(psDevmemCtx); -+ } -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemIntHeapAcquire -+@Description Acquire a reference to the provided device memory heap. -+@Return IMG_TRUE if referenced and IMG_FALSE in case of error -+*/ /**************************************************************************/ -+static INLINE IMG_BOOL DevmemIntHeapAcquire(DEVMEMINT_HEAP *psDevmemHeap) -+{ -+ IMG_BOOL bSuccess = OSAtomicAddUnless(&psDevmemHeap->uiRefCount, 1, -+ DEVMEMHEAP_REFCOUNT_MAX); -+ -+ if (!bSuccess) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory " -+ "heap, reference count has overflowed.", __func__)); -+ return IMG_FALSE; -+ } -+ -+ return IMG_TRUE; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemIntHeapRelease -+@Description Release the reference to the provided device memory heap. -+ If this is the last reference which was taken then the -+ memory context will be freed. -+@Return None -+*/ /**************************************************************************/ -+static INLINE void DevmemIntHeapRelease(DEVMEMINT_HEAP *psDevmemHeap) -+{ -+ IMG_BOOL bSuccess = OSAtomicSubtractUnless(&psDevmemHeap->uiRefCount, 1, -+ DEVMEMHEAP_REFCOUNT_MIN); -+ -+ if (!bSuccess) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory " -+ "heap, reference count has underflowed.", __func__)); -+ } -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemIntReservationAcquire -+@Description Acquire a reference to the provided device memory reservation. -+@Return IMG_TRUE if referenced and IMG_FALSE in case of error -+*/ /**************************************************************************/ -+IMG_BOOL DevmemIntReservationAcquire(DEVMEMINT_RESERVATION *psDevmemReservation) -+{ -+ IMG_BOOL bSuccess; -+ -+ OSLockAcquire(psDevmemReservation->hLock); -+ -+ bSuccess = (psDevmemReservation->i32RefCount < DEVMEMRESERVATION_REFCOUNT_MAX); -+ -+ if (!bSuccess) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory " -+ "reservation, reference count has overflowed.", __func__)); -+ } -+ else -+ { -+ psDevmemReservation->i32RefCount++; -+ } -+ -+ OSLockRelease(psDevmemReservation->hLock); -+ return bSuccess; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemIntReservationRelease -+@Description Release the reference to the provided device memory reservation. -+ If this is the last reference which was taken then the -+ reservation will be freed. -+@Return None. -+*/ /**************************************************************************/ -+void DevmemIntReservationRelease(DEVMEMINT_RESERVATION *psDevmemReservation) -+{ -+ OSLockAcquire(psDevmemReservation->hLock); -+ -+ if (psDevmemReservation->i32RefCount == DEVMEMRESERVATION_REFCOUNT_MIN) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to release the device memory " -+ "reservation, reference count has underflowed.", __func__)); -+ } -+ else -+ { -+ /* Decrement reservation reference count and free it -+ * if this was the final reference -+ */ -+ if (--psDevmemReservation->i32RefCount == DEVMEMRESERVATION_REFCOUNT_MIN) -+ { -+ /* Destroy lock */ -+ OSLockRelease(psDevmemReservation->hLock); -+ OSLockDestroy(psDevmemReservation->hLock); -+ OSFreeMem(psDevmemReservation); -+ goto exit_noderef; -+ } -+ } -+ -+ OSLockRelease(psDevmemReservation->hLock); -+exit_noderef: -+ return; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemServerGetImportHandle -+@Description For given exportable memory descriptor returns PMR handle. -+@Return Memory is exportable - Success -+ PVRSRV_ERROR failure code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc, -+ IMG_HANDLE *phImport) -+{ -+ PVRSRV_ERROR eError; -+ -+ if ((GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION, e0); -+ } -+ -+ /* A new handle means a new import tracking the PMR. -+ * Hence the source PMR memory layout should be marked fixed -+ * to make sure the importer view of the memory is the same as -+ * the exporter throughout its lifetime */ -+ PMR_SetLayoutFixed((PMR *)psMemDesc->psImport->hPMR, IMG_TRUE); -+ -+ *phImport = psMemDesc->psImport->hPMR; -+ return PVRSRV_OK; -+ -+e0: -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemServerGetHeapHandle -+@Description For given reservation returns the Heap handle. -+@Return PVRSRV_ERROR failure code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation, -+ IMG_HANDLE *phHeap) -+{ -+ if (psReservation == NULL || phHeap == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ *phHeap = psReservation->psDevmemHeap; -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemServerGetContext -+@Description For given heap returns the context. -+@Return PVRSRV_ERROR failure code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemServerGetContext(DEVMEMINT_HEAP *psDevmemHeap, -+ DEVMEMINT_CTX **ppsDevmemCtxPtr) -+{ -+ if (psDevmemHeap == NULL || ppsDevmemCtxPtr == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ *ppsDevmemCtxPtr = psDevmemHeap->psDevmemCtx; -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemServerGetPrivData -+@Description For given context returns the private data handle. -+@Return PVRSRV_ERROR failure code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemServerGetPrivData(DEVMEMINT_CTX *psDevmemCtx, -+ IMG_HANDLE *phPrivData) -+{ -+ if (psDevmemCtx == NULL || phPrivData == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ *phPrivData = psDevmemCtx->hPrivData; -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemIntCtxCreate -+@Description Creates and initialises a device memory context. -+@Return valid Device Memory context handle - Success -+ PVRSRV_ERROR failure code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemIntCtxCreate(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bKernelFWMemoryCtx, -+ DEVMEMINT_CTX **ppsDevmemCtxPtr, -+ IMG_HANDLE *hPrivData, -+ IMG_UINT32 *pui32CPUCacheLineSize) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_CTX *psDevmemCtx; -+ IMG_HANDLE hPrivDataInt = NULL; -+ MMU_DEVICEATTRIBS *psMMUDevAttrs = psDeviceNode->pfnGetMMUDeviceAttributes(psDeviceNode, -+ bKernelFWMemoryCtx); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__)); -+ -+ /* Only allow request for a kernel context that comes from a direct bridge -+ * (psConnection == NULL). Only the FW/KM Ctx is created over the direct bridge. */ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(!bKernelFWMemoryCtx || psConnection == NULL, -+ "bKernelFWMemoryCtx && psConnection"); -+ -+ /* -+ * Ensure that we are safe to perform unaligned accesses on memory -+ * we mark write-combine, as the compiler might generate -+ * instructions operating on this memory which require this -+ * assumption to be true. -+ */ -+ PVR_ASSERT(OSIsWriteCombineUnalignedSafe()); -+ -+ /* allocate a Devmem context */ -+ psDevmemCtx = OSAllocMem(sizeof(*psDevmemCtx)); -+ PVR_LOG_GOTO_IF_NOMEM(psDevmemCtx, eError, fail_alloc); -+ -+ OSAtomicWrite(&psDevmemCtx->hRefCount, 1); -+ psDevmemCtx->psDevNode = psDeviceNode; -+ -+ /* Call down to MMU context creation */ -+ -+ eError = MMU_ContextCreate(psConnection, -+ psDeviceNode, -+ &psDevmemCtx->psMMUContext, -+ psMMUDevAttrs); -+ PVR_LOG_GOTO_IF_ERROR(eError, "MMU_ContextCreate", fail_mmucontext); -+ -+ if (psDeviceNode->pfnRegisterMemoryContext) -+ { -+ eError = psDeviceNode->pfnRegisterMemoryContext(psDeviceNode, psDevmemCtx->psMMUContext, &hPrivDataInt); -+ PVR_LOG_GOTO_IF_ERROR(eError, "pfnRegisterMemoryContext", fail_register); -+ } -+ -+ /* Store the private data as it is required to unregister the memory context */ -+ psDevmemCtx->hPrivData = hPrivDataInt; -+ *hPrivData = hPrivDataInt; -+ *ppsDevmemCtxPtr = psDevmemCtx; -+ -+ /* Pass the CPU cache line size through the bridge to the user mode as it can't be queried in user mode.*/ -+ *pui32CPUCacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); -+ -+ /* Initialise the PID notify list */ -+ OSWRLockCreate(&psDevmemCtx->hListLock); -+ dllist_init(&(psDevmemCtx->sProcessNotifyListHead)); -+ psDevmemCtx->sPageFaultNotifyListElem.psNextNode = NULL; -+ psDevmemCtx->sPageFaultNotifyListElem.psPrevNode = NULL; -+ -+ /* Initialise page fault address */ -+ psDevmemCtx->sFaultAddress.uiAddr = 0ULL; -+ -+ /* Initialise flags */ -+ psDevmemCtx->ui32Flags = 0; -+ -+ return PVRSRV_OK; -+ -+fail_register: -+ MMU_ContextDestroy(psDevmemCtx->psMMUContext); -+fail_mmucontext: -+ OSFreeMem(psDevmemCtx); -+fail_alloc: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemIntHeapCreate -+@Description Creates and initialises a device memory heap. -+@Return valid Device Memory heap handle - Success -+ PVRSRV_ERROR failure code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 uiHeapIndex, -+ IMG_DEV_VIRTADDR sHeapBaseAddr, -+ IMG_UINT32 uiLog2DataPageSize, -+ DEVMEMINT_HEAP **ppsDevmemHeapPtr) -+{ -+ DEVMEMINT_HEAP *psDevmemHeap; -+ PVRSRV_ERROR eError; -+ IMG_DEV_VIRTADDR sBlueprintHeapBaseAddr; -+ IMG_DEVMEM_SIZE_T uiBlueprintHeapLength; -+ IMG_DEVMEM_SIZE_T uiBlueprintResRgnLength; -+ IMG_UINT32 ui32BlueprintLog2DataPageSize; -+ IMG_UINT32 ui32BlueprintLog2ImportAlignment; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__)); -+ -+ /* allocate a Devmem context */ -+ psDevmemHeap = OSAllocMem(sizeof(*psDevmemHeap)); -+ PVR_LOG_RETURN_IF_NOMEM(psDevmemHeap, "psDevmemHeap"); -+ -+ psDevmemHeap->psDevmemCtx = psDevmemCtx; -+ -+ DevmemIntCtxAcquire(psDevmemHeap->psDevmemCtx); -+ -+ OSAtomicWrite(&psDevmemHeap->uiRefCount, 1); -+ -+ /* Check page size and base addr match the heap blueprint */ -+ eError = HeapCfgHeapDetails(NULL, -+ psDevmemHeap->psDevmemCtx->psDevNode, -+ uiHeapConfigIndex, -+ uiHeapIndex, -+ 0, NULL, -+ &sBlueprintHeapBaseAddr, -+ &uiBlueprintHeapLength, -+ &uiBlueprintResRgnLength, -+ &ui32BlueprintLog2DataPageSize, -+ &ui32BlueprintLog2ImportAlignment); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get details for HeapConfig:%d HeapIndex:%d.", -+ __func__, uiHeapConfigIndex, uiHeapIndex)); -+ goto ErrorCtxRelease; -+ } -+ -+ if (uiLog2DataPageSize != ui32BlueprintLog2DataPageSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Incorrect page size passed - Passed: %d, Expected: %d for HeapConfig:%d HeapIndex:%d.", -+ uiLog2DataPageSize, ui32BlueprintLog2DataPageSize, uiHeapConfigIndex, uiHeapIndex)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto ErrorCtxRelease; -+ } -+ -+ if (sHeapBaseAddr.uiAddr != sBlueprintHeapBaseAddr.uiAddr) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Incorrect heap address passed - Passed: "IMG_DEV_VIRTADDR_FMTSPEC", Expected: "IMG_DEV_VIRTADDR_FMTSPEC" for HeapConfig: %d HeapIndex: %d.", -+ (IMG_UINT64)sHeapBaseAddr.uiAddr, -+ (IMG_UINT64)sBlueprintHeapBaseAddr.uiAddr, uiHeapConfigIndex, uiHeapIndex)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto ErrorCtxRelease; -+ } -+ -+ psDevmemHeap->uiLog2PageSize = uiLog2DataPageSize; -+ psDevmemHeap->sBaseAddr = sHeapBaseAddr; -+ /* Store the first non-accessible address as our LastAddr. We can access -+ * every address between sHeapBaseAddr and sHeapBaseAddr + HeapLength - 1 -+ */ -+ psDevmemHeap->sLastAddr = sHeapBaseAddr; -+ psDevmemHeap->sLastAddr.uiAddr = sHeapBaseAddr.uiAddr + uiBlueprintHeapLength; -+ -+ eError = HeapCfgGetCallbacks(psDevmemHeap->psDevmemCtx->psDevNode, -+ uiHeapConfigIndex, -+ uiHeapIndex, -+ &psDevmemHeap->pfnInit, -+ &psDevmemHeap->pfnDeInit); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get callbacks for HeapConfig:%d HeapIndex:%d.", -+ __func__, uiHeapConfigIndex, uiHeapIndex)); -+ goto ErrorCtxRelease; -+ } -+ -+ if (psDevmemHeap->pfnInit != NULL) -+ { -+ eError = psDevmemHeap->pfnInit(psDevmemHeap->psDevmemCtx->psDevNode, -+ psDevmemHeap, -+ &psDevmemHeap->hPrivData); -+ PVR_GOTO_IF_ERROR(eError, ErrorCtxRelease); -+ } -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "%s: sBaseAddr = %" IMG_UINT64_FMTSPECX ", " -+ "sLastAddr = %" IMG_UINT64_FMTSPECX, __func__, -+ psDevmemHeap->sBaseAddr.uiAddr, psDevmemHeap->sLastAddr.uiAddr)); -+ -+ *ppsDevmemHeapPtr = psDevmemHeap; -+ -+ return PVRSRV_OK; -+ -+ErrorCtxRelease: -+ DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx); -+ OSFreeMem(psDevmemHeap); -+ -+ return eError; -+} -+ -+static INLINE IMG_UINT32 -+_ReservationPageCount(DEVMEMXINT_RESERVATION *psRsrv) -+{ -+ return psRsrv->uiLength >> psRsrv->psDevmemHeap->uiLog2PageSize; -+} -+ -+static INLINE IMG_DEV_VIRTADDR -+_ReservationPageAddress(DEVMEMXINT_RESERVATION *psRsrv, IMG_UINT32 uiVirtPageOffset) -+{ -+ IMG_DEV_VIRTADDR sAddr = { -+ .uiAddr = psRsrv->sBase.uiAddr + (uiVirtPageOffset << psRsrv->psDevmemHeap->uiLog2PageSize) -+ }; -+ -+ return sAddr; -+} -+ -+PVRSRV_ERROR -+DevmemXIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, -+ IMG_DEV_VIRTADDR sAllocationDevVAddr, -+ IMG_DEVMEM_SIZE_T uiAllocationSize, -+ DEVMEMXINT_RESERVATION **ppsRsrv) -+{ -+ DEVMEMXINT_RESERVATION *psRsrv; -+ IMG_UINT32 uiNumPages; -+ PVRSRV_ERROR eError; -+ -+ if (!DevmemIntHeapAcquire(psDevmemHeap)) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, ErrorReturnError); -+ } -+ -+ /* align address to full device page size */ -+ uiAllocationSize = PVR_ALIGN(uiAllocationSize, IMG_UINT64_C(1) << psDevmemHeap->uiLog2PageSize); -+ uiNumPages = uiAllocationSize >> psDevmemHeap->uiLog2PageSize; -+ -+ /* Check that requested address + size fits in our heap */ -+ PVR_LOG_GOTO_IF_INVALID_PARAM(sAllocationDevVAddr.uiAddr >= psDevmemHeap->sBaseAddr.uiAddr, -+ eError, ErrorUnreferenceHeap); -+ -+ PVR_LOG_GOTO_IF_INVALID_PARAM(sAllocationDevVAddr.uiAddr + uiAllocationSize <= psDevmemHeap->sLastAddr.uiAddr, -+ eError, ErrorUnreferenceHeap); -+ -+ psRsrv = OSAllocZMem(sizeof(*psRsrv->ppsPMR) * uiNumPages + sizeof(*psRsrv)); -+ PVR_LOG_GOTO_IF_NOMEM(psRsrv, eError, ErrorUnreferenceHeap); -+ -+ eError = OSLockCreate(&psRsrv->hLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", ErrorFreeReservation); -+ -+ psRsrv->sBase = sAllocationDevVAddr; -+ psRsrv->uiLength = uiAllocationSize; -+ psRsrv->ppsPMR = IMG_OFFSET_ADDR(psRsrv, sizeof(*psRsrv)); -+ -+ eError = MMU_Alloc(psDevmemHeap->psDevmemCtx->psMMUContext, -+ uiAllocationSize, -+ &uiAllocationSize, -+ 0, /* IMG_UINT32 uiProtFlags */ -+ 0, /* alignment is n/a since we supply devvaddr */ -+ &sAllocationDevVAddr, -+ psDevmemHeap->uiLog2PageSize); -+ PVR_GOTO_IF_ERROR(eError, ErrorDestroyLock); -+ -+ /* since we supplied the virt addr, MMU_Alloc shouldn't have -+ chosen a new one for us */ -+ PVR_ASSERT(sAllocationDevVAddr.uiAddr == psRsrv->sBase.uiAddr); -+ -+ psRsrv->psDevmemHeap = psDevmemHeap; -+ *ppsRsrv = psRsrv; -+ -+ return PVRSRV_OK; -+ -+ErrorDestroyLock: -+ OSLockDestroy(psRsrv->hLock); -+ErrorFreeReservation: -+ OSFreeMem(psRsrv); -+ErrorUnreferenceHeap: -+ DevmemIntHeapRelease(psDevmemHeap); -+ErrorReturnError: -+ return eError; -+} -+ -+PVRSRV_ERROR -+DevmemXIntUnreserveRange(DEVMEMXINT_RESERVATION *psRsrv) -+{ -+ IMG_UINT32 i; -+ -+ MMU_Free(psRsrv->psDevmemHeap->psDevmemCtx->psMMUContext, -+ psRsrv->sBase, -+ psRsrv->uiLength, -+ psRsrv->psDevmemHeap->uiLog2PageSize); -+ -+ /* No need to lock the mapping here since this is a handle destruction path which can not be -+ * executed while there are outstanding handle lookups, i.e. other operations are performed -+ * on the mapping. Bridge and handle framework also make sure this path can also not be executed -+ * concurrently. */ -+ -+ for (i = 0; i < _ReservationPageCount(psRsrv); i++) -+ { -+ if (psRsrv->ppsPMR[i] != NULL) -+ { -+ PMRUnrefPMR2(psRsrv->ppsPMR[i]); -+ } -+ } -+ -+ /* Don't bother with refcount on reservation, as a reservation only ever -+ * holds one mapping, so we directly decrement the refcount on the heap -+ * instead. -+ * Function will print an error if the heap could not be unreferenced. */ -+ DevmemIntHeapRelease(psRsrv->psDevmemHeap); -+ -+ OSLockDestroy(psRsrv->hLock); -+ OSFreeMem(psRsrv); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+DevmemXIntMapPages(DEVMEMXINT_RESERVATION *psRsrv, -+ PMR *psPMR, -+ IMG_UINT32 uiPageCount, -+ IMG_UINT32 uiPhysPageOffset, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 uiVirtPageOffset) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiPMRMaxChunkCount = PMRGetMaxChunkCount(psPMR); -+ DEVMEMINT_HEAP *psDevmemHeap = psRsrv->psDevmemHeap; -+ IMG_UINT32 uiLog2PageSize = psDevmemHeap->uiLog2PageSize; -+ IMG_UINT32 i; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM((uiPageCount + uiPhysPageOffset) <= uiPMRMaxChunkCount, "uiPageCount+uiPhysPageOffset"); -+ -+ /* The range is not valid for the given virtual descriptor */ -+ PVR_LOG_RETURN_IF_FALSE((uiVirtPageOffset + uiPageCount) <= _ReservationPageCount(psRsrv), -+ "mapping offset out of range", PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE); -+ PVR_LOG_RETURN_IF_FALSE((uiFlags & ~PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK) == 0, -+ "invalid flags", PVRSRV_ERROR_INVALID_FLAGS); -+ -+ if (uiLog2PageSize > PMR_GetLog2Contiguity(psPMR)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Device heap and PMR have incompatible " -+ "Log2Contiguity (%u - %u). PMR contiguity must be a multiple " -+ "of the heap contiguity!", __func__, uiLog2PageSize, -+ PMR_GetLog2Contiguity(psPMR))); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ OSLockAcquire(psRsrv->hLock); -+ -+ eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext, -+ uiFlags, -+ _ReservationPageAddress(psRsrv, uiVirtPageOffset), -+ psPMR, -+ uiPhysPageOffset, -+ uiPageCount, -+ NULL, -+ psDevmemHeap->uiLog2PageSize); -+ PVR_GOTO_IF_ERROR(eError, ErrUnlock); -+ -+ for (i = uiVirtPageOffset; i < (uiVirtPageOffset + uiPageCount); i++) -+ { -+ PMRRefPMR2(psPMR); -+ -+ if (psRsrv->ppsPMR[i] != NULL) -+ { -+ PMRUnrefPMR2(psRsrv->ppsPMR[i]); -+ } -+ -+ psRsrv->ppsPMR[i] = psPMR; -+ } -+ -+ OSLockRelease(psRsrv->hLock); -+ -+ return PVRSRV_OK; -+ -+ErrUnlock: -+ OSLockRelease(psRsrv->hLock); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+DevmemXIntUnmapPages(DEVMEMXINT_RESERVATION *psRsrv, -+ IMG_UINT32 uiVirtPageOffset, -+ IMG_UINT32 uiPageCount) -+{ -+ DEVMEMINT_HEAP *psDevmemHeap = psRsrv->psDevmemHeap; -+ IMG_UINT32 i; -+ -+ PVR_LOG_RETURN_IF_FALSE((uiVirtPageOffset + uiPageCount) <= _ReservationPageCount(psRsrv), -+ "mapping offset out of range", PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE); -+ -+ OSLockAcquire(psRsrv->hLock); -+ -+ /* Unmap the pages and mark them invalid in the MMU PTE */ -+ MMU_UnmapPages(psDevmemHeap->psDevmemCtx->psMMUContext, -+ 0, -+ _ReservationPageAddress(psRsrv, uiVirtPageOffset), -+ uiPageCount, -+ NULL, -+ psDevmemHeap->uiLog2PageSize, -+ 0); -+ -+ for (i = uiVirtPageOffset; i < (uiVirtPageOffset + uiPageCount); i++) -+ { -+ if (psRsrv->ppsPMR[i] != NULL) -+ { -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ /* If PMR is allocated on demand the backing memory is freed by -+ * pfnUnlockPhysAddresses(). */ -+ if (!PVRSRV_CHECK_ON_DEMAND(PMR_Flags(psRsrv->ppsPMR[i]))) -+ { -+ PMRMarkForDeferFree(psRsrv->ppsPMR[i]); -+ } -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ PMRUnrefPMR2(psRsrv->ppsPMR[i]); -+ psRsrv->ppsPMR[i] = NULL; -+ } -+ } -+ -+ OSLockRelease(psRsrv->hLock); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+DevmemXIntMapVRangeToBackingPage(DEVMEMXINT_RESERVATION *psRsrv, -+ IMG_UINT32 uiPageCount, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 uiVirtPageOffset) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_HEAP *psDevmemHeap = psRsrv->psDevmemHeap; -+ IMG_UINT32 i; -+ -+ /* The range is not valid for the given virtual descriptor */ -+ PVR_LOG_RETURN_IF_FALSE((uiVirtPageOffset + uiPageCount) <= _ReservationPageCount(psRsrv), -+ "mapping offset out of range", PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE); -+ PVR_LOG_RETURN_IF_FALSE((uiFlags & ~(PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK | -+ PVRSRV_MEMALLOCFLAG_ZERO_BACKING)) == 0, -+ "invalid flags", PVRSRV_ERROR_INVALID_FLAGS); -+ -+ OSLockAcquire(psRsrv->hLock); -+ -+ eError = MMUX_MapVRangeToBackingPage(psDevmemHeap->psDevmemCtx->psMMUContext, -+ uiFlags, -+ _ReservationPageAddress(psRsrv, uiVirtPageOffset), -+ uiPageCount, -+ psDevmemHeap->uiLog2PageSize); -+ if (eError == PVRSRV_OK) -+ { -+ for (i = uiVirtPageOffset; i < (uiVirtPageOffset + uiPageCount); i++) -+ { -+ if (psRsrv->ppsPMR[i] != NULL) -+ { -+ PMRUnrefPMR2(psRsrv->ppsPMR[i]); -+ psRsrv->ppsPMR[i] = NULL; -+ } -+ } -+ } -+ -+ OSLockRelease(psRsrv->hLock); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap, -+ DEVMEMINT_RESERVATION *psReservation, -+ PMR *psPMR, -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, -+ DEVMEMINT_MAPPING **ppsMappingPtr) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_MAPPING *psMapping; -+ /* number of pages (device pages) that allocation spans */ -+ IMG_UINT32 ui32NumDevPages; -+ /* device virtual address of start of allocation */ -+ IMG_DEV_VIRTADDR sAllocationDevVAddr; -+ /* and its length */ -+ IMG_DEVMEM_SIZE_T uiAllocationSize; -+ IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize; -+ IMG_BOOL bIsSparse = IMG_FALSE; -+ -+ if (uiLog2HeapContiguity > PMR_GetLog2Contiguity(psPMR)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Device heap and PMR have incompatible contiguity (%u - %u). " -+ "Heap contiguity must be a multiple of the heap contiguity!", -+ __func__, -+ uiLog2HeapContiguity, -+ PMR_GetLog2Contiguity(psPMR) )); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, ErrorReturnError); -+ } -+ -+ if (!DevmemIntReservationAcquire(psReservation)) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, ErrorReturnError); -+ } -+ -+ /* allocate memory to record the mapping info */ -+ psMapping = OSAllocMem(sizeof(*psMapping)); -+ PVR_LOG_GOTO_IF_NOMEM(psMapping, eError, ErrorUnreference); -+ -+ uiAllocationSize = psReservation->uiLength; -+ -+ ui32NumDevPages = 0xffffffffU & ( ( (uiAllocationSize - 1) >> uiLog2HeapContiguity) + 1); -+ PVR_ASSERT((IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity == uiAllocationSize); -+ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ PVR_GOTO_IF_ERROR(eError, ErrorFreeMapping); -+ -+ sAllocationDevVAddr = psReservation->sBase; -+ -+ /*Check if the PMR that needs to be mapped is sparse */ -+ bIsSparse = PMR_IsSparse(psPMR); -+ if (bIsSparse) -+ { -+ /* N.B. We pass mapping permission flags to MMU_MapPages and let -+ * it reject the mapping if the permissions on the PMR are not compatible. */ -+ eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext, -+ uiMapFlags, -+ sAllocationDevVAddr, -+ psPMR, -+ 0, -+ ui32NumDevPages, -+ NULL, -+ uiLog2HeapContiguity); -+ PVR_GOTO_IF_ERROR(eError, ErrorUnlockPhysAddr); -+ } -+ else -+ { -+ eError = MMU_MapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext, -+ sAllocationDevVAddr, -+ psPMR, -+ (IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity, -+ uiMapFlags, -+ uiLog2HeapContiguity); -+ PVR_GOTO_IF_ERROR(eError, ErrorUnlockPhysAddr); -+ } -+ -+ psMapping->psReservation = psReservation; -+ psMapping->uiNumPages = ui32NumDevPages; -+ psMapping->psPMR = psPMR; -+ -+ *ppsMappingPtr = psMapping; -+ -+ return PVRSRV_OK; -+ -+ErrorUnlockPhysAddr: -+ { -+ PVRSRV_ERROR eError1 = PVRSRV_OK; -+ eError1 = PMRUnlockSysPhysAddresses(psPMR); -+ PVR_LOG_IF_ERROR(eError1, "PMRUnlockSysPhysAddresses"); -+ -+ *ppsMappingPtr = NULL; -+ } -+ -+ErrorFreeMapping: -+ OSFreeMem(psMapping); -+ -+ErrorUnreference: -+ /* if fails there's not much to do (the function will print an error) */ -+ DevmemIntReservationRelease(psReservation); -+ -+ErrorReturnError: -+ PVR_ASSERT (eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR -+DevmemIntReserveRangeAndMapPMR(DEVMEMINT_HEAP *psDevmemHeap, -+ IMG_DEV_VIRTADDR sAllocationDevVAddr, -+ IMG_DEVMEM_SIZE_T uiAllocationSize, -+ PMR *psPMR, -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, -+ DEVMEMINT_MAPPING **ppsMappingPtr) -+{ -+ PVRSRV_ERROR eError, eUnreserveError; -+ -+ DEVMEMINT_RESERVATION* psReservation; -+ -+ eError = DevmemIntReserveRange(psDevmemHeap, sAllocationDevVAddr, uiAllocationSize, &psReservation); -+ PVR_GOTO_IF_ERROR(eError, ErrorReturnError); -+ -+ eError = DevmemIntMapPMR(psDevmemHeap, psReservation, psPMR, uiMapFlags, ppsMappingPtr); -+ PVR_GOTO_IF_ERROR(eError, ErrorUnreserve); -+ -+ return PVRSRV_OK; -+ -+ErrorUnreserve: -+ eUnreserveError = DevmemIntUnreserveRange(psReservation); -+ PVR_LOG_IF_ERROR(eUnreserveError, "DevmemIntUnreserveRange"); -+ErrorReturnError: -+ return eError; -+} -+ -+PVRSRV_ERROR -+DevmemIntUnreserveRangeAndUnmapPMR(DEVMEMINT_MAPPING *psMappingPtr) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_RESERVATION *psReservation = psMappingPtr->psReservation; -+ -+ eError = DevmemIntUnmapPMR(psMappingPtr); -+ PVR_GOTO_IF_ERROR(eError, ErrorReturnError); -+ eError = DevmemIntUnreserveRange(psReservation); -+ PVR_GOTO_IF_ERROR(eError, ErrorReturnError); -+ -+ return PVRSRV_OK; -+ -+ErrorReturnError: -+ return eError; -+} -+ -+PVRSRV_ERROR -+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_HEAP *psDevmemHeap = psMapping->psReservation->psDevmemHeap; -+ /* device virtual address of start of allocation */ -+ IMG_DEV_VIRTADDR sAllocationDevVAddr; -+ /* number of pages (device pages) that allocation spans */ -+ IMG_UINT32 ui32NumDevPages; -+ IMG_BOOL bIsSparse = IMG_FALSE; -+ -+ ui32NumDevPages = psMapping->uiNumPages; -+ sAllocationDevVAddr = psMapping->psReservation->sBase; -+ -+ /*Check if the PMR that needs to be mapped is sparse */ -+ bIsSparse = PMR_IsSparse(psMapping->psPMR); -+ -+ if (bIsSparse) -+ { -+ MMU_UnmapPages(psDevmemHeap->psDevmemCtx->psMMUContext, -+ 0, -+ sAllocationDevVAddr, -+ ui32NumDevPages, -+ NULL, -+ psDevmemHeap->uiLog2PageSize, -+ 0); -+ } -+ else -+ { -+ MMU_UnmapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext, -+ sAllocationDevVAddr, -+ ui32NumDevPages, -+ psDevmemHeap->uiLog2PageSize); -+ } -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ /* If PMR is allocated on demand the backing memory is freed by -+ * pfnUnlockPhysAddresses(). */ -+ if (!PVRSRV_CHECK_ON_DEMAND(PMR_Flags(psMapping->psPMR))) -+ { -+ PMRMarkForDeferFree(psMapping->psPMR); -+ } -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+ eError = PMRUnlockSysPhysAddresses(psMapping->psPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ DevmemIntReservationRelease(psMapping->psReservation); -+ -+ OSFreeMem(psMapping); -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR -+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, -+ IMG_DEV_VIRTADDR sAllocationDevVAddr, -+ IMG_DEVMEM_SIZE_T uiAllocationSize, -+ DEVMEMINT_RESERVATION **ppsReservationPtr) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEMINT_RESERVATION *psReservation; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(sAllocationDevVAddr.uiAddr >= psDevmemHeap->sBaseAddr.uiAddr, "sAllocationDevVAddr"); -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM((sAllocationDevVAddr.uiAddr + uiAllocationSize) <= psDevmemHeap->sLastAddr.uiAddr, "uiAllocationSize"); -+ -+ if (!DevmemIntHeapAcquire(psDevmemHeap)) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, -+ ErrorReturnError); -+ } -+ -+ /* allocate memory to record the reservation info */ -+ psReservation = OSAllocMem(sizeof(*psReservation)); -+ PVR_LOG_GOTO_IF_NOMEM(psReservation, eError, ErrorUnreference); -+ -+ /* Create lock */ -+ eError = OSLockCreate(&psReservation->hLock); -+ -+ /* Initialise refcount */ -+ psReservation->i32RefCount = 1; -+ -+ psReservation->sBase = sAllocationDevVAddr; -+ psReservation->uiLength = uiAllocationSize; -+ -+ eError = MMU_Alloc(psDevmemHeap->psDevmemCtx->psMMUContext, -+ uiAllocationSize, -+ &uiAllocationSize, -+ 0, /* IMG_UINT32 uiProtFlags */ -+ 0, /* alignment is n/a since we supply devvaddr */ -+ &sAllocationDevVAddr, -+ psDevmemHeap->uiLog2PageSize); -+ PVR_GOTO_IF_ERROR(eError, ErrorFreeReservation); -+ -+ /* since we supplied the virt addr, MMU_Alloc shouldn't have -+ chosen a new one for us */ -+ PVR_ASSERT(sAllocationDevVAddr.uiAddr == psReservation->sBase.uiAddr); -+ -+ psReservation->psDevmemHeap = psDevmemHeap; -+ *ppsReservationPtr = psReservation; -+ -+ return PVRSRV_OK; -+ -+ /* -+ * error exit paths follow -+ */ -+ -+ErrorFreeReservation: -+ OSFreeMem(psReservation); -+ -+ErrorUnreference: -+ /* if fails there's not much to do (the function will print an error) */ -+ DevmemIntHeapRelease(psDevmemHeap); -+ -+ErrorReturnError: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR -+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psReservation) -+{ -+ IMG_DEV_VIRTADDR sBase = psReservation->sBase; -+ IMG_UINT32 uiLength = psReservation->uiLength; -+ DEVMEMINT_HEAP *psDevmemHeap = psReservation->psDevmemHeap; -+ IMG_UINT32 uiLog2DataPageSize = psDevmemHeap->uiLog2PageSize; -+ -+ MMU_Free(psDevmemHeap->psDevmemCtx->psMMUContext, -+ sBase, -+ uiLength, -+ uiLog2DataPageSize); -+ -+ DevmemIntReservationRelease(psReservation); -+ DevmemIntHeapRelease(psDevmemHeap); -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR -+DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap) -+{ -+ if (psDevmemHeap->pfnDeInit != NULL) -+ { -+ psDevmemHeap->pfnDeInit(psDevmemHeap->hPrivData); -+ psDevmemHeap->pfnDeInit = NULL; -+ } -+ -+ if (OSAtomicRead(&psDevmemHeap->uiRefCount) != DEVMEMHEAP_REFCOUNT_MIN) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "BUG! %s called but has too many references (%d) " -+ "which probably means reservations & mappings have been made from " -+ "the heap and not freed", __func__, -+ OSAtomicRead(&psDevmemHeap->uiRefCount))); -+ -+ /* -+ * Try again later when you've freed all the memory -+ * -+ * Note: -+ * We don't expect the application to retry (after all this call would -+ * succeed if the client had freed all the memory which it should have -+ * done before calling this function). However, given there should be -+ * an associated handle, when the handle base is destroyed it will free -+ * any allocations leaked by the client and then it will retry this call, -+ * which should then succeed. -+ */ -+ return PVRSRV_ERROR_RETRY; -+ } -+ -+ PVR_ASSERT(OSAtomicRead(&psDevmemHeap->uiRefCount) == DEVMEMHEAP_REFCOUNT_MIN); -+ -+ DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed heap %p", __func__, psDevmemHeap)); -+ OSFreeMem(psDevmemHeap); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_DEV_VIRTADDR -+DevmemIntHeapGetBaseAddr(DEVMEMINT_HEAP *psDevmemHeap) -+{ -+ PVR_ASSERT(psDevmemHeap != NULL); -+ -+ return psDevmemHeap->sBaseAddr; -+} -+ -+PVRSRV_ERROR -+DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap, -+ PMR *psPMR, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices, -+ SPARSE_MEM_RESIZE_FLAGS uiSparseFlags, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_DEV_VIRTADDR sDevVAddrBase, -+ IMG_UINT64 sCpuVAddrBase) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ IMG_UINT32 uiLog2PMRContiguity = PMR_GetLog2Contiguity(psPMR); -+ IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize; -+ IMG_UINT32 uiOrderDiff = uiLog2PMRContiguity - uiLog2HeapContiguity; -+ IMG_UINT32 uiPagesPerOrder = 1 << uiOrderDiff; -+ -+ IMG_UINT32 *pai32MapIndices = pai32AllocIndices; -+ IMG_UINT32 *pai32UnmapIndices = pai32FreeIndices; -+ IMG_UINT32 uiMapPageCount = ui32AllocPageCount; -+ IMG_UINT32 uiUnmapPageCount = ui32FreePageCount; -+ -+ /* Special case: -+ * Adjust indices if we map into a heap that uses smaller page sizes -+ * than the physical allocation itself. -+ * The incoming parameters are all based on the page size of the PMR -+ * but the mapping functions expects parameters to be in terms of heap page sizes. */ -+ if (uiOrderDiff != 0) -+ { -+ IMG_UINT32 uiPgIdx, uiPgOffset; -+ -+ uiMapPageCount = (uiMapPageCount << uiOrderDiff); -+ uiUnmapPageCount = (uiUnmapPageCount << uiOrderDiff); -+ -+ pai32MapIndices = OSAllocMem(uiMapPageCount * sizeof(*pai32MapIndices)); -+ PVR_GOTO_IF_NOMEM(pai32MapIndices, eError, e0); -+ -+ pai32UnmapIndices = OSAllocMem(uiUnmapPageCount * sizeof(*pai32UnmapIndices)); -+ if (!pai32UnmapIndices) -+ { -+ OSFreeMem(pai32MapIndices); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); -+ } -+ -+ /* Every chunk index needs to be translated from physical indices -+ * into heap based indices. */ -+ for (uiPgIdx = 0; uiPgIdx < ui32AllocPageCount; uiPgIdx++) -+ { -+ for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++) -+ { -+ pai32MapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] = -+ pai32AllocIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset; -+ } -+ } -+ -+ for (uiPgIdx = 0; uiPgIdx < ui32FreePageCount; uiPgIdx++) -+ { -+ for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++) -+ { -+ pai32UnmapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] = -+ pai32FreeIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset; -+ } -+ } -+ } -+ -+ /* -+ * The order of steps in which this request is done is given below. The order of -+ * operations is very important in this case: -+ * -+ * 1. The parameters are validated in function PMR_ChangeSparseMem below. -+ * A successful response indicates all the parameters are correct. -+ * In failure case we bail out from here without processing further. -+ * 2. On success, get the PMR specific operations done. this includes page alloc, page free -+ * and the corresponding PMR status changes. -+ * when this call fails, it is ensured that the state of the PMR before is -+ * not disturbed. If it succeeds, then we can go ahead with the subsequent steps. -+ * 3. Invalidate the GPU page table entries for the pages to be freed. -+ * 4. Write the GPU page table entries for the pages that got allocated. -+ * 5. Change the corresponding CPU space map. -+ * -+ * The above steps can be selectively controlled using flags. -+ */ -+ if (uiSparseFlags & SPARSE_RESIZE_BOTH) -+ { -+ /* Do the PMR specific changes first */ -+ eError = PMR_ChangeSparseMem(psPMR, -+ ui32AllocPageCount, -+ pai32AllocIndices, -+ ui32FreePageCount, -+ pai32FreeIndices, -+ uiSparseFlags); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Failed to do PMR specific changes.", -+ __func__)); -+ goto e1; -+ } -+ -+ /* Invalidate the page table entries for the free pages. -+ * Optimisation later would be not to touch the ones that gets re-mapped */ -+ if ((0 != ui32FreePageCount) && (uiSparseFlags & SPARSE_RESIZE_FREE)) -+ { -+ PMR_FLAGS_T uiPMRFlags; -+ -+ /*Get the flags*/ -+ uiPMRFlags = PMR_Flags(psPMR); -+ -+ /* Unmap the pages and mark them invalid in the MMU PTE */ -+ MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext, -+ uiFlags, -+ sDevVAddrBase, -+ uiUnmapPageCount, -+ pai32UnmapIndices, -+ uiLog2HeapContiguity, -+ uiPMRFlags); -+ } -+ -+ /* Wire the pages tables that got allocated */ -+ if ((0 != ui32AllocPageCount) && (uiSparseFlags & SPARSE_RESIZE_ALLOC)) -+ { -+ /* Map the pages and mark them Valid in the MMU PTE */ -+ eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext, -+ uiFlags, -+ sDevVAddrBase, -+ psPMR, -+ 0, -+ uiMapPageCount, -+ pai32MapIndices, -+ uiLog2HeapContiguity); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Failed to map alloc indices.", -+ __func__)); -+ goto e1; -+ } -+ } -+ } -+ -+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE -+ /* Do the changes in sparse on to the CPU virtual map accordingly */ -+ if (uiSparseFlags & SPARSE_MAP_CPU_ADDR) -+ { -+ if (sCpuVAddrBase != 0) -+ { -+ eError = PMR_ChangeSparseMemCPUMap(psPMR, -+ sCpuVAddrBase, -+ ui32AllocPageCount, -+ pai32AllocIndices, -+ ui32FreePageCount, -+ pai32FreeIndices); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Failed to map to CPU addr space.", -+ __func__)); -+ goto e0; -+ } -+ } -+ } -+#endif -+ -+e1: -+ if (pai32MapIndices != pai32AllocIndices) -+ { -+ OSFreeMem(pai32MapIndices); -+ } -+ if (pai32UnmapIndices != pai32FreeIndices) -+ { -+ OSFreeMem(pai32UnmapIndices); -+ } -+e0: -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemIntCtxDestroy -+@Description Destroy that created by DevmemIntCtxCreate -+@Input psDevmemCtx Device Memory context -+@Return cannot fail. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx) -+{ -+ /* -+ We can't determine if we should be freeing the context here -+ as a refcount!=1 could be due to either the fact that heap(s) -+ remain with allocations on them, or that this memory context -+ has been exported. -+ As the client couldn't do anything useful with this information -+ anyway and the fact that the refcount will ensure we only -+ free the context when _all_ references have been released -+ don't bother checking and just return OK regardless. -+ */ -+ DevmemIntCtxRelease(psDevmemCtx); -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemIntGetVDevAddrPageSize -+@Description Get the page size for a virtual address. -+@Input psConnection -+@Input psDevNode -+@Input psDevmemCtx Device Memory context -+@Input sDevAddr Get the page size for this virtual address. -+@Output puiLog2HeapPageSize On success returns log2 of the page size. -+@Return Failure code if the virtual address is outside any heap. -+*/ /**************************************************************************/ -+static -+PVRSRV_ERROR DevmemIntGetVDevAddrPageSize(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ DEVMEMINT_CTX *psDevMemContext, -+ IMG_DEV_VIRTADDR sDevAddr, -+ IMG_PUINT32 puiLog2HeapPageSize) -+{ -+ IMG_UINT32 i, j, uiLog2HeapPageSize = 0; -+ DEVICE_MEMORY_INFO *psDinfo = &psDevNode->sDevMemoryInfo; -+ DEVMEM_HEAP_CONFIG *psConfig = psDinfo->psDeviceMemoryHeapConfigArray; -+ -+ IMG_BOOL bFound = IMG_FALSE; -+ -+ for (i = 0; -+ i < psDinfo->uiNumHeapConfigs && !bFound; -+ i++) -+ { -+ for (j = 0; -+ j < psConfig[i].uiNumHeaps && !bFound; -+ j++) -+ { -+ IMG_DEV_VIRTADDR uiBase = -+ psConfig[i].psHeapBlueprintArray[j].sHeapBaseAddr; -+ IMG_DEVMEM_SIZE_T uiSize = -+ psConfig[i].psHeapBlueprintArray[j].uiHeapLength; -+ -+ if ((sDevAddr.uiAddr >= uiBase.uiAddr) && -+ (sDevAddr.uiAddr < (uiBase.uiAddr + uiSize))) -+ { -+ uiLog2HeapPageSize = -+ psConfig[i].psHeapBlueprintArray[j].uiLog2DataPageSize; -+ bFound = IMG_TRUE; -+ } -+ } -+ } -+ -+ if (uiLog2HeapPageSize == 0) -+ { -+ return PVRSRV_ERROR_INVALID_GPU_ADDR; -+ } -+ -+ *puiLog2HeapPageSize = uiLog2HeapPageSize; -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemIntIsVDevAddrValid -+@Description Checks if a virtual address is valid for access. -+@Input psConnection -+@Input psDevNode -+@Input psDevmemCtx Device Memory context -+@Input sDevAddr Virtual address to check. -+@Return Failure code if the virtual address is invalid. -+*/ /**************************************************************************/ -+PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ DEVMEMINT_CTX *psDevMemContext, -+ IMG_DEV_VIRTADDR sDevAddr) -+{ -+ IMG_UINT32 uiLog2HeapPageSize = 0; -+ PVRSRV_ERROR eError; -+ eError = DevmemIntGetVDevAddrPageSize(psConnection, -+ psDevNode, -+ psDevMemContext, -+ sDevAddr, -+ &uiLog2HeapPageSize); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext, -+ uiLog2HeapPageSize, -+ sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR; -+} -+ -+PVRSRV_ERROR -+DevmemIntInvalidateFBSCTable(DEVMEMINT_CTX *psDevMemContext, -+ IMG_UINT64 ui64FBSCEntryMask) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode = psDevMemContext->psDevNode; -+ MMU_CONTEXT *psMMUContext = psDevMemContext->psMMUContext; -+ -+ if (psDevNode->pfnInvalFBSCTable) -+ { -+ return psDevNode->pfnInvalFBSCTable(psDevNode, -+ psMMUContext, -+ ui64FBSCEntryMask); -+ } -+ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+} -+ -+PVRSRV_ERROR DevmemIntGetFaultAddress(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ DEVMEMINT_CTX *psDevMemContext, -+ IMG_DEV_VIRTADDR *psFaultAddress) -+{ -+ if ((psDevMemContext->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0) -+ { -+ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; -+ } -+ -+ *psFaultAddress = psDevMemContext->sFaultAddress; -+ psDevMemContext->ui32Flags &= ~DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE; -+ -+ return PVRSRV_OK; -+} -+ -+static POSWR_LOCK g_hExportCtxListLock; -+static DLLIST_NODE g_sExportCtxList; -+ -+PVRSRV_ERROR -+DevmemIntInit(void) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ dllist_init(&g_sExportCtxList); -+ -+ eError = OSWRLockCreate(&g_hExportCtxListLock); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+DevmemIntDeInit(void) -+{ -+ PVR_ASSERT(dllist_is_empty(&g_sExportCtxList)); -+ -+ OSWRLockDestroy(g_hExportCtxListLock); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+DevmemIntExportCtx(DEVMEMINT_CTX *psContext, -+ PMR *psPMR, -+ DEVMEMINT_CTX_EXPORT **ppsContextExport) -+{ -+ DEVMEMINT_CTX_EXPORT *psCtxExport; -+ -+ psCtxExport = OSAllocMem(sizeof(DEVMEMINT_CTX_EXPORT)); -+ PVR_LOG_RETURN_IF_NOMEM(psCtxExport, "psCtxExport"); -+ -+ DevmemIntCtxAcquire(psContext); -+ PMRRefPMR(psPMR); -+ /* Now that the source PMR is exported, the layout -+ * can't change as there could be outstanding importers -+ * This is to make sure both exporter and importers view of -+ * the memory is same */ -+ PMR_SetLayoutFixed(psPMR, IMG_TRUE); -+ psCtxExport->psDevmemCtx = psContext; -+ psCtxExport->psPMR = psPMR; -+ OSWRLockAcquireWrite(g_hExportCtxListLock); -+ dllist_add_to_tail(&g_sExportCtxList, &psCtxExport->sNode); -+ OSWRLockReleaseWrite(g_hExportCtxListLock); -+ -+ *ppsContextExport = psCtxExport; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport) -+{ -+ PMRUnrefPMR(psContextExport->psPMR); -+ DevmemIntCtxRelease(psContextExport->psDevmemCtx); -+ OSWRLockAcquireWrite(g_hExportCtxListLock); -+ dllist_remove_node(&psContextExport->sNode); -+ OSWRLockReleaseWrite(g_hExportCtxListLock); -+ OSFreeMem(psContextExport); -+ -+ /* Unable to find exported context, return error */ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+DevmemIntAcquireRemoteCtx(PMR *psPMR, -+ DEVMEMINT_CTX **ppsContext, -+ IMG_HANDLE *phPrivData) -+{ -+ PDLLIST_NODE psListNode, psListNodeNext; -+ DEVMEMINT_CTX_EXPORT *psCtxExport; -+ -+ OSWRLockAcquireRead(g_hExportCtxListLock); -+ /* Find context from list using PMR as key */ -+ dllist_foreach_node(&g_sExportCtxList, psListNode, psListNodeNext) -+ { -+ psCtxExport = IMG_CONTAINER_OF(psListNode, DEVMEMINT_CTX_EXPORT, sNode); -+ if (psCtxExport->psPMR == psPMR) -+ { -+ DevmemIntCtxAcquire(psCtxExport->psDevmemCtx); -+ *ppsContext = psCtxExport->psDevmemCtx; -+ *phPrivData = psCtxExport->psDevmemCtx->hPrivData; -+ -+ OSWRLockReleaseRead(g_hExportCtxListLock); -+ -+ /* PMR should have been already exported to import it -+ * If a PMR is exported, its immutable and the same is -+ * checked here */ -+ PVR_ASSERT(IMG_TRUE == PMR_IsMemLayoutFixed(psPMR)); -+ -+ return PVRSRV_OK; -+ } -+ } -+ OSWRLockReleaseRead(g_hExportCtxListLock); -+ -+ /* Unable to find exported context, return error */ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to acquire remote context. Could not retrieve context with given PMR", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemIntRegisterPFNotify -+@Description Registers a PID to be notified when a page fault occurs on a -+ specific device memory context. -+@Input psDevmemCtx The context to be notified about. -+@Input bRegister If true, register. If false, de-register. -+@Return PVRSRV_ERROR. -+*/ /**************************************************************************/ -+PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, -+ IMG_BOOL bRegister) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ DLLIST_NODE *psNode, *psNodeNext; -+ DEVMEMINT_PF_NOTIFY *psNotifyNode; -+ IMG_BOOL bPresent = IMG_FALSE; -+ PVRSRV_ERROR eError; -+ IMG_PID ui32PID; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemCtx, "psDevmemCtx"); -+ -+ /* Acquire write lock for the duration, to avoid resource free -+ * while trying to read (no need to then also acquire the read lock -+ * as we have exclusive access while holding the write lock) -+ */ -+ OSWRLockAcquireWrite(psDevmemCtx->hListLock); -+ -+ psDevNode = psDevmemCtx->psDevNode; -+ -+ if (bRegister) -+ { -+ /* If this is the first PID in the list, the device memory context -+ * needs to be registered for notification */ -+ if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead)) -+ { -+ OSWRLockAcquireWrite(psDevNode->hMemoryContextPageFaultNotifyListLock); -+ dllist_add_to_tail(&psDevNode->sMemoryContextPageFaultNotifyListHead, -+ &psDevmemCtx->sPageFaultNotifyListElem); -+ OSWRLockReleaseWrite(psDevNode->hMemoryContextPageFaultNotifyListLock); -+ } -+ } -+ -+ /* Obtain current client PID */ -+ ui32PID = OSGetCurrentClientProcessIDKM(); -+ -+ /* Loop through the registered PIDs and check whether this one is -+ * present */ -+ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) -+ { -+ psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); -+ -+ if (psNotifyNode->ui32PID == ui32PID) -+ { -+ bPresent = IMG_TRUE; -+ break; -+ } -+ } -+ -+ if (bRegister) -+ { -+ if (bPresent) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Trying to register a PID that is already registered", -+ __func__)); -+ eError = PVRSRV_ERROR_PID_ALREADY_REGISTERED; -+ goto err_already_registered; -+ } -+ -+ psNotifyNode = OSAllocMem(sizeof(*psNotifyNode)); -+ if (psNotifyNode == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unable to allocate memory for the notify list", -+ __func__)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_out_of_mem; -+ } -+ psNotifyNode->ui32PID = ui32PID; -+ /* Write lock is already held */ -+ dllist_add_to_tail(&(psDevmemCtx->sProcessNotifyListHead), &(psNotifyNode->sProcessNotifyListElem)); -+ } -+ else -+ { -+ if (!bPresent) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Trying to unregister a PID that is not registered", -+ __func__)); -+ eError = PVRSRV_ERROR_PID_NOT_REGISTERED; -+ goto err_not_registered; -+ } -+ /* Write lock is already held */ -+ dllist_remove_node(psNode); -+ psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); -+ OSFreeMem(psNotifyNode); -+ -+ /* If the last process in the list is being unregistered, then also -+ * unregister the device memory context from the notify list. */ -+ if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead)) -+ { -+ OSWRLockAcquireWrite(psDevNode->hMemoryContextPageFaultNotifyListLock); -+ dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem); -+ OSWRLockReleaseWrite(psDevNode->hMemoryContextPageFaultNotifyListLock); -+ } -+ } -+ eError = PVRSRV_OK; -+ -+err_already_registered: -+err_out_of_mem: -+err_not_registered: -+ -+ OSWRLockReleaseWrite(psDevmemCtx->hListLock); -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function DevmemIntPFNotify -+@Description Notifies any processes that have registered themselves to be -+ notified when a page fault happens on a specific device memory -+ context. -+@Input *psDevNode The device node. -+@Input ui64FaultedPCAddress The page catalogue address that faulted. -+@Input sFaultAddress The address that triggered the fault. -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT64 ui64FaultedPCAddress, -+ IMG_DEV_VIRTADDR sFaultAddress) -+{ -+ DLLIST_NODE *psNode, *psNodeNext; -+ DEVMEMINT_PF_NOTIFY *psNotifyNode; -+ PVRSRV_ERROR eError; -+ DEVMEMINT_CTX *psDevmemCtx = NULL; -+ IMG_BOOL bFailed = IMG_FALSE; -+ -+ OSWRLockAcquireRead(psDevNode->hMemoryContextPageFaultNotifyListLock); -+ if (dllist_is_empty(&(psDevNode->sMemoryContextPageFaultNotifyListHead))) -+ { -+ OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); -+ return PVRSRV_OK; -+ } -+ -+ dllist_foreach_node(&(psDevNode->sMemoryContextPageFaultNotifyListHead), psNode, psNodeNext) -+ { -+ DEVMEMINT_CTX *psThisContext = -+ IMG_CONTAINER_OF(psNode, DEVMEMINT_CTX, sPageFaultNotifyListElem); -+ IMG_DEV_PHYADDR sPCDevPAddr; -+ -+ eError = MMU_AcquireBaseAddr(psThisContext->psMMUContext, &sPCDevPAddr); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "MMU_AcquireBaseAddr"); -+ OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); -+ return eError; -+ } -+ -+ if (sPCDevPAddr.uiAddr == ui64FaultedPCAddress) -+ { -+ psDevmemCtx = psThisContext; -+ break; -+ } -+ } -+ OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); -+ -+ if (psDevmemCtx == NULL) -+ { -+ /* Not found, just return */ -+ return PVRSRV_OK; -+ } -+ OSWRLockAcquireRead(psDevmemCtx->hListLock); -+ -+ /* -+ * Store the first occurrence of a page fault address, -+ * until that address is consumed by a client. -+ */ -+ if ((psDevmemCtx->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0) -+ { -+ psDevmemCtx->sFaultAddress = sFaultAddress; -+ psDevmemCtx->ui32Flags |= DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE; -+ } -+ -+ /* Loop through each registered PID and send a signal to the process */ -+ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) -+ { -+ psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); -+ -+ eError = OSDebugSignalPID(psNotifyNode->ui32PID); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unable to signal process for PID: %u", -+ __func__, -+ psNotifyNode->ui32PID)); -+ -+ PVR_ASSERT(!"Unable to signal process"); -+ -+ bFailed = IMG_TRUE; -+ } -+ } -+ OSWRLockReleaseRead(psDevmemCtx->hListLock); -+ -+ if (bFailed) -+ { -+ return PVRSRV_ERROR_SIGNAL_FAILED; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+#if defined(PDUMP) -+typedef struct _DEVMEMINT_PDUMP_VALID_REGION_ -+{ -+ DLLIST_NODE sNode; -+ IMG_DEV_VIRTADDR sDevVAddr; -+ IMG_DEVMEM_SIZE_T uiSize; -+} DEVMEMINT_PDUMP_VALID_REGION; -+ -+IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext) -+{ -+ IMG_UINT32 ui32MMUContextID; -+ MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32MMUContextID, PDUMP_FLAGS_CONTINUOUS); -+ return ui32MMUContextID; -+} -+ -+PVRSRV_ERROR -+DevmemIntPDumpGetValidRegions(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevmemCtx, -+ IMG_DEV_VIRTADDR sDevAddrStart, -+ IMG_DEVMEM_SIZE_T uiSize, -+ DLLIST_NODE *psValidRegionsList) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiLog2HeapPageSize; -+ IMG_UINT32 uiHeapPageSize; -+ IMG_DEV_VIRTADDR sValidStart, sValidEnd, sCurrent, sEnd, sStartPage; -+ -+ /* Get the page size for heap containing the start virtual address. */ -+ eError = DevmemIntGetVDevAddrPageSize(psConnection, -+ psDeviceNode, -+ psDevmemCtx, -+ sDevAddrStart, -+ &uiLog2HeapPageSize); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ uiHeapPageSize = 1 << uiLog2HeapPageSize; -+ -+ /* Iterate every page in the region to dump... */ -+ sValidStart.uiAddr = sValidEnd.uiAddr = 0; /* Start/end of the current region which is valid to read. */ -+ sStartPage.uiAddr = sDevAddrStart.uiAddr & ~((IMG_UINT64) uiHeapPageSize - 1); /* Page aligned start of the region to dump. */ -+ sCurrent = sStartPage; -+ sEnd.uiAddr = sDevAddrStart.uiAddr + uiSize; /* End of the region to dump. */ -+ -+ while (IMG_TRUE) -+ { -+ IMG_BOOL bAtEnd = sCurrent.uiAddr >= sEnd.uiAddr; -+ IMG_BOOL bValid = IMG_FALSE; -+ -+ if (!bAtEnd) -+ { -+ /* Check if the page starting at the current address is valid for reading. */ -+ eError = DevmemIntIsVDevAddrValid(psConnection, -+ psDeviceNode, -+ psDevmemCtx, -+ sCurrent); -+ if (eError == PVRSRV_OK) -+ { -+ /* If the current valid region is empty then set the start -+ * to the current page. */ -+ if (sValidStart.uiAddr == 0) -+ { -+ if (sCurrent.uiAddr == sStartPage.uiAddr) -+ { -+ /* Use the start of the region to dump if it doesn't -+ * start page aligned. */ -+ sValidStart = sDevAddrStart; -+ } -+ else -+ { -+ sValidStart = sCurrent; -+ } -+ } -+ /* Set the end of the valid region. */ -+ sValidEnd.uiAddr = sCurrent.uiAddr + uiHeapPageSize; -+ /* Restrict to the region to dump. */ -+ if (sValidEnd.uiAddr > sEnd.uiAddr) -+ { -+ sValidEnd = sEnd; -+ } -+ bValid = IMG_TRUE; -+ } -+ /* Move to the next page. */ -+ sCurrent.uiAddr += uiHeapPageSize; -+ } -+ -+ /* If the current page is invalid or we've reached the end of the region -+ * to dump then pdump the current valid region. */ -+ if (!bValid && sValidEnd.uiAddr > sValidStart.uiAddr) -+ { -+ DEVMEMINT_PDUMP_VALID_REGION *psRegion = OSAllocMem(sizeof(*psRegion)); -+ PVR_LOG_GOTO_IF_NOMEM(psRegion, eError, ErrFreeRegions); -+ -+ psRegion->sDevVAddr = sValidStart; -+ psRegion->uiSize = sValidEnd.uiAddr - sValidStart.uiAddr; -+ -+ dllist_add_to_tail(psValidRegionsList, &psRegion->sNode); -+ -+ sValidStart.uiAddr = sValidEnd.uiAddr = 0; -+ } -+ -+ if (bAtEnd) -+ { -+ break; -+ } -+ } -+ -+ return PVRSRV_OK; -+ -+ErrFreeRegions: -+ DevmemIntPDumpFreeValidRegions(psValidRegionsList); -+ return eError; -+} -+ -+void -+DevmemIntPDumpFreeValidRegions(DLLIST_NODE *psValidRegionsList) -+{ -+ DLLIST_NODE *psThis, *psNext; -+ -+ dllist_foreach_node(psValidRegionsList, psThis, psNext) -+ { -+ DEVMEMINT_PDUMP_VALID_REGION *psRegion = -+ IMG_CONTAINER_OF(psThis, DEVMEMINT_PDUMP_VALID_REGION, sNode); -+ -+ dllist_remove_node(psThis); -+ OSFreeMem(psRegion); -+ } -+} -+ -+PVRSRV_ERROR -+DevmemIntPDumpSaveFromRegionListToFileVirtual(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevmemCtx, -+ DLLIST_NODE *psDevAddrRegions, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiPDumpMMUCtx; -+ DLLIST_NODE *psThis, *psNext; -+ -+ /* Confirm that the device node's ui32InternalID matches the bound -+ * PDump device stored in PVRSRV_DATA. -+ */ -+ if (psDevmemCtx->psDevNode->sDevId.ui32InternalID != -+ (PVRSRVGetPVRSRVData())->ui32PDumpBoundDevice) -+ { -+ return PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE; -+ } -+ -+ eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext, -+ &uiPDumpMMUCtx, -+ ui32PDumpFlags); -+ -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* The following SYSMEM refers to the 'MMU Context', hence it -+ * should be the MMU context, not the PMR, that says what the PDump -+ * MemSpace tag is? -+ * From a PDump P.O.V. it doesn't matter which name space we use as long -+ * as that MemSpace is used on the 'MMU Context' we're dumping from -+ */ -+ -+ dllist_foreach_node(psDevAddrRegions, psThis, psNext) -+ { -+ DEVMEMINT_PDUMP_VALID_REGION *psRegion = -+ IMG_CONTAINER_OF(psThis, DEVMEMINT_PDUMP_VALID_REGION, sNode); -+ -+ eError = PDumpMMUSAB(psDevmemCtx->psDevNode, -+ psDevmemCtx->psDevNode->sDevId.pszPDumpDevName, -+ uiPDumpMMUCtx, -+ psRegion->sDevVAddr, -+ psRegion->uiSize, -+ pszFilename, -+ ui32FileOffset, -+ ui32PDumpFlags); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ ui32FileOffset += psRegion->uiSize; -+ -+ dllist_remove_node(psThis); -+ OSFreeMem(psRegion); -+ } -+ -+ MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext, ui32PDumpFlags); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+DevmemIntPDumpSaveToFileVirtual(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevmemCtx, -+ IMG_DEV_VIRTADDR sDevAddrStart, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32ArraySize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiPDumpMMUCtx; -+ IMG_UINT32 uiLog2HeapPageSize; -+ IMG_UINT32 uiHeapPageSize; -+ IMG_DEV_VIRTADDR sValidStart, sValidEnd, sCurrent, sEnd, sStartPage; -+ IMG_UINT64 ui64PageMask; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32ArraySize); -+ -+ /* Confirm that the device node's ui32InternalID matches the bound -+ * PDump device stored in PVRSRV_DATA. -+ */ -+ if (psDevmemCtx->psDevNode->sDevId.ui32InternalID != -+ (PVRSRVGetPVRSRVData())->ui32PDumpBoundDevice) -+ { -+ return PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE; -+ } -+ -+ eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext, -+ &uiPDumpMMUCtx, -+ ui32PDumpFlags); -+ -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* -+ Get the page size for heap containing the start virtual address. -+ */ -+ eError = DevmemIntGetVDevAddrPageSize(psConnection, -+ psDeviceNode, -+ psDevmemCtx, -+ sDevAddrStart, -+ &uiLog2HeapPageSize); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ uiHeapPageSize = 1 << uiLog2HeapPageSize; -+ ui64PageMask = uiHeapPageSize - 1; -+ -+ /* -+ Iterate every page in the region to dump... -+ */ -+ sValidStart.uiAddr = sValidEnd.uiAddr = 0; /* Start/end of the current region which is valid to read. */ -+ sStartPage.uiAddr = sDevAddrStart.uiAddr & ~ui64PageMask; /* Page aligned start of the region to dump. */ -+ sCurrent = sStartPage; -+ sEnd.uiAddr = sDevAddrStart.uiAddr + uiSize; /* End of the region to dump. */ -+ for (;;) -+ { -+ IMG_BOOL bAtEnd = sCurrent.uiAddr >= sEnd.uiAddr; -+ IMG_BOOL bValid = IMG_FALSE; -+ -+ if (!bAtEnd) -+ { -+ /* Check if the page starting at the current address is valid for reading. */ -+ eError = DevmemIntIsVDevAddrValid(psConnection, -+ psDeviceNode, -+ psDevmemCtx, -+ sCurrent); -+ if (eError == PVRSRV_OK) -+ { -+ /* If the current valid region is empty then set the start to the current page. */ -+ if (sValidStart.uiAddr == 0) -+ { -+ if (sCurrent.uiAddr == sStartPage.uiAddr) -+ { -+ /* Use the start of the region to dump if it doesn't start page aligned. */ -+ sValidStart = sDevAddrStart; -+ } -+ else -+ { -+ sValidStart = sCurrent; -+ } -+ } -+ /* Set the end of the valid region. */ -+ sValidEnd.uiAddr = sCurrent.uiAddr + uiHeapPageSize; -+ /* Restrict to the region to dump. */ -+ if (sValidEnd.uiAddr > sEnd.uiAddr) -+ { -+ sValidEnd = sEnd; -+ } -+ bValid = IMG_TRUE; -+ } -+ /* Move to the next page. */ -+ sCurrent.uiAddr += uiHeapPageSize; -+ } -+ /* -+ If the current page is invalid or we've reached the end of the region to dump then pdump the current valid region. -+ */ -+ if (!bValid && sValidEnd.uiAddr > sValidStart.uiAddr) -+ { -+ IMG_DEVMEM_SIZE_T uiValidSize = sValidEnd.uiAddr - sValidStart.uiAddr; -+ eError = PDumpMMUSAB(psDevmemCtx->psDevNode, -+ psDevmemCtx->psDevNode->sDevId.pszPDumpDevName, -+ uiPDumpMMUCtx, -+ sValidStart, -+ uiValidSize, -+ pszFilename, -+ ui32FileOffset, -+ ui32PDumpFlags); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ ui32FileOffset += uiValidSize; -+ -+ sValidStart.uiAddr = sValidEnd.uiAddr = 0; -+ } -+ -+ if (bAtEnd) -+ { -+ break; -+ } -+ } -+ -+ MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext, ui32PDumpFlags); -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+DevmemIntPDumpSaveToFileVirtualNoValidate(PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevmemCtx, -+ IMG_DEV_VIRTADDR sDevAddrStart, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiPDumpMMUCtx; -+ -+ /* Confirm that the device node's ui32InternalID matches the bound -+ * PDump device stored in PVRSRV_DATA. -+ */ -+ if (psDevmemCtx->psDevNode->sDevId.ui32InternalID != -+ (PVRSRVGetPVRSRVData())->ui32PDumpBoundDevice) -+ { -+ return PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE; -+ } -+ -+ eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext, -+ &uiPDumpMMUCtx, -+ ui32PDumpFlags); -+ -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* The following SYSMEM refers to the 'MMU Context', hence it -+ * should be the MMU context, not the PMR, that says what the PDump -+ * MemSpace tag is? -+ * From a PDump P.O.V. it doesn't matter which name space we use as long -+ * as that MemSpace is used on the 'MMU Context' we're dumping from -+ */ -+ eError = PDumpMMUSAB(psDevmemCtx->psDevNode, -+ psDevmemCtx->psDevNode->sDevId.pszPDumpDevName, -+ uiPDumpMMUCtx, -+ sDevAddrStart, -+ uiSize, -+ pszFilename, -+ ui32FileOffset, -+ ui32PDumpFlags); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext, ui32PDumpFlags); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevMemContext, -+ IMG_UINT32 ui32Size, -+ const IMG_CHAR *pszFileName, -+ IMG_DEV_VIRTADDR sData, -+ IMG_UINT32 ui32DataSize, -+ IMG_UINT32 ui32LogicalWidth, -+ IMG_UINT32 ui32LogicalHeight, -+ IMG_UINT32 ui32PhysicalWidth, -+ IMG_UINT32 ui32PhysicalHeight, -+ PDUMP_PIXEL_FORMAT ePixFmt, -+ IMG_MEMLAYOUT eMemLayout, -+ IMG_FB_COMPRESSION eFBCompression, -+ const IMG_UINT32 *paui32FBCClearColour, -+ PDUMP_FBC_SWIZZLE eFBCSwizzle, -+ IMG_DEV_VIRTADDR sHeader, -+ IMG_UINT32 ui32HeaderSize, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ IMG_UINT32 ui32ContextID; -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(ui32Size); -+ -+ eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID, ui32PDumpFlags); -+ PVR_LOG_RETURN_IF_ERROR(eError, "MMU_AcquirePDumpMMUContext"); -+ -+ eError = PDumpImageDescriptor(psDeviceNode, -+ ui32ContextID, -+ (IMG_CHAR *)pszFileName, -+ sData, -+ ui32DataSize, -+ ui32LogicalWidth, -+ ui32LogicalHeight, -+ ui32PhysicalWidth, -+ ui32PhysicalHeight, -+ ePixFmt, -+ eMemLayout, -+ eFBCompression, -+ paui32FBCClearColour, -+ eFBCSwizzle, -+ sHeader, -+ ui32HeaderSize, -+ ui32PDumpFlags); -+ PVR_LOG_IF_ERROR(eError, "PDumpImageDescriptor"); -+ -+ /* Don't care about return value */ -+ (void) MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext, ui32PDumpFlags); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevMemContext, -+ IMG_UINT32 ui32Size, -+ const IMG_CHAR *pszFileName, -+ IMG_DEV_VIRTADDR sData, -+ IMG_UINT32 ui32DataSize, -+ IMG_UINT32 ui32HeaderType, -+ IMG_UINT32 ui32ElementType, -+ IMG_UINT32 ui32ElementCount, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ IMG_UINT32 ui32ContextID; -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(ui32Size); -+ -+ if ((ui32HeaderType != IBIN_HEADER_TYPE) && -+ (ui32HeaderType != DATA_HEADER_TYPE)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid header type (%u)", -+ __func__, -+ ui32HeaderType)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID, ui32PDumpFlags); -+ PVR_LOG_RETURN_IF_ERROR(eError, "MMU_AcquirePDumpMMUContext"); -+ -+ eError = PDumpDataDescriptor(psDeviceNode, -+ ui32ContextID, -+ (IMG_CHAR *)pszFileName, -+ sData, -+ ui32DataSize, -+ ui32HeaderType, -+ ui32ElementType, -+ ui32ElementCount, -+ ui32PDumpFlags); -+ PVR_LOG_IF_ERROR(eError, "PDumpDataDescriptor"); -+ -+ /* Don't care about return value */ -+ (void) MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext, ui32PDumpFlags); -+ -+ return eError; -+} -+ -+#endif -diff --git a/drivers/gpu/drm/img-rogue/devicemem_server.h b/drivers/gpu/drm/img-rogue/devicemem_server.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem_server.h -@@ -0,0 +1,729 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device Memory Management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Server side component for device memory management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef DEVICEMEM_SERVER_H -+#define DEVICEMEM_SERVER_H -+ -+#include "device.h" /* For device node */ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+ -+#include "connection_server.h" -+#include "pmr.h" -+ -+typedef struct _DEVMEMINT_CTX_ DEVMEMINT_CTX; -+typedef struct _DEVMEMINT_CTX_EXPORT_ DEVMEMINT_CTX_EXPORT; -+typedef struct _DEVMEMINT_HEAP_ DEVMEMINT_HEAP; -+ -+typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION; -+typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING; -+typedef struct _DEVMEMXINT_RESERVATION_ DEVMEMXINT_RESERVATION; -+typedef struct _DEVMEMINT_PF_NOTIFY_ DEVMEMINT_PF_NOTIFY; -+ -+/* -+ * DevmemServerGetImportHandle() -+ * -+ * For given exportable memory descriptor returns PMR handle -+ * -+ */ -+PVRSRV_ERROR -+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc, -+ IMG_HANDLE *phImport); -+ -+/* -+ * DevmemServerGetHeapHandle() -+ * -+ * For given reservation returns the Heap handle -+ * -+ */ -+PVRSRV_ERROR -+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation, -+ IMG_HANDLE *phHeap); -+ -+/* -+ * DevmemServerGetContext() -+ * -+ * For given heap returns the context. -+ * -+ */ -+PVRSRV_ERROR -+DevmemServerGetContext(DEVMEMINT_HEAP *psDevmemHeap, -+ DEVMEMINT_CTX **ppsDevmemCtxPtr); -+ -+/* -+ * DevmemServerGetPrivData() -+ * -+ * For given context returns the private data handle. -+ * -+ */ -+PVRSRV_ERROR -+DevmemServerGetPrivData(DEVMEMINT_CTX *psDevmemCtx, -+ IMG_HANDLE *phPrivData); -+ -+/* -+ * DevmemIntCtxCreate() -+ * -+ * Create a Server-side Device Memory Context. This is usually the counterpart -+ * of the client side memory context, and indeed is usually created at the -+ * same time. -+ * -+ * You must have one of these before creating any heaps. -+ * -+ * All heaps must have been destroyed before calling -+ * DevmemIntCtxDestroy() -+ * -+ * If you call DevmemIntCtxCreate() (and it succeeds) you are promising to -+ * later call DevmemIntCtxDestroy() -+ * -+ * Note that this call will cause the device MMU code to do some work for -+ * creating the device memory context, but it does not guarantee that a page -+ * catalogue will have been created, as this may be deferred until the first -+ * allocation. -+ * -+ * Caller to provide storage for a pointer to the DEVMEM_CTX object that will -+ * be created by this call. -+ */ -+PVRSRV_ERROR -+DevmemIntCtxCreate(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ /* devnode / perproc etc */ -+ IMG_BOOL bKernelMemoryCtx, -+ DEVMEMINT_CTX **ppsDevmemCtxPtr, -+ IMG_HANDLE *hPrivData, -+ IMG_UINT32 *pui32CPUCacheLineSize); -+/* -+ * DevmemIntCtxDestroy() -+ * -+ * Undoes a prior DevmemIntCtxCreate or DevmemIntCtxImport. -+ */ -+PVRSRV_ERROR -+DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx); -+ -+/* -+ * DevmemIntHeapCreate() -+ * -+ * Creates a new heap in this device memory context. This will cause a call -+ * into the MMU code to allocate various data structures for managing this -+ * heap. It will not necessarily cause any page tables to be set up, as this -+ * can be deferred until first allocation. (i.e. we shouldn't care - it's up -+ * to the MMU code) -+ * -+ * Note that the data page size must be specified (as log 2). The data page -+ * size as specified here will be communicated to the mmu module, and thus may -+ * determine the page size configured in page directory entries for subsequent -+ * allocations from this heap. It is essential that the page size here is less -+ * than or equal to the "minimum contiguity guarantee" of any PMR that you -+ * subsequently attempt to map to this heap. -+ * -+ * If you call DevmemIntHeapCreate() (and the call succeeds) you are promising -+ * that you shall subsequently call DevmemIntHeapDestroy() -+ * -+ * Caller to provide storage for a pointer to the DEVMEM_HEAP object that will -+ * be created by this call. -+ */ -+PVRSRV_ERROR -+DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx, -+ IMG_UINT32 uiHeapConfigIndex, -+ IMG_UINT32 uiHeapIndex, -+ IMG_DEV_VIRTADDR sHeapBaseAddr, -+ IMG_UINT32 uiLog2DataPageSize, -+ DEVMEMINT_HEAP **ppsDevmemHeapPtr); -+/* -+ * DevmemIntHeapDestroy() -+ * -+ * Destroys a heap previously created with DevmemIntHeapCreate() -+ * -+ * All allocations from his heap must have been freed before this -+ * call. -+ */ -+PVRSRV_ERROR -+DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap); -+ -+/* DevmemIntHeapGetBaseAddr() -+ * -+ * Get heap base address pre carveouts. -+ */ -+IMG_DEV_VIRTADDR -+DevmemIntHeapGetBaseAddr(DEVMEMINT_HEAP *psDevmemHeap); -+ -+/* -+ * DevmemIntMapPMR() -+ * -+ * Maps the given PMR to the virtual range previously allocated with -+ * DevmemIntReserveRange() -+ * -+ * If appropriate, the PMR must have had its physical backing committed, as -+ * this call will call into the MMU code to set up the page tables for this -+ * allocation, which shall in turn request the physical addresses from the -+ * PMR. Alternatively, the PMR implementation can choose to do so off the -+ * the back of the "lock" callback, which it will receive as a result -+ * (indirectly) of this call. -+ * -+ * This function makes no promise w.r.t. the circumstances that it can be -+ * called, and these would be "inherited" from the implementation of the PMR. -+ * For example if the PMR "lock" callback causes pages to be pinned at that -+ * time (which may cause scheduling or disk I/O etc.) then it would not be -+ * legal to "Map" the PMR in a context where scheduling events are disallowed. -+ * -+ * If you call DevmemIntMapPMR() (and the call succeeds) then you are promising -+ * that you shall later call DevmemIntUnmapPMR() -+ */ -+PVRSRV_ERROR -+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap, -+ DEVMEMINT_RESERVATION *psReservation, -+ PMR *psPMR, -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, -+ DEVMEMINT_MAPPING **ppsMappingPtr); -+/* -+ * DevmemIntUnmapPMR() -+ * -+ * Reverses the mapping caused by DevmemIntMapPMR() -+ */ -+PVRSRV_ERROR -+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping); -+ -+ -+ -+/* -+ * DevmemIntReserveRangeAndMapPMR() -+ * Bundled call to reserve range and map. -+ */ -+PVRSRV_ERROR -+DevmemIntReserveRangeAndMapPMR(DEVMEMINT_HEAP *psDevmemHeap, -+ IMG_DEV_VIRTADDR sAllocationDevVAddr, -+ IMG_DEVMEM_SIZE_T uiAllocationSize, -+ PMR *psPMR, -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, -+ DEVMEMINT_MAPPING **ppsMappingPtr); -+ -+PVRSRV_ERROR -+DevmemIntUnreserveRangeAndUnmapPMR(DEVMEMINT_MAPPING *psMappingPtr); -+ -+/* -+ * DevmemIntReserveRange() -+ * -+ * Indicates that the specified range should be reserved from the given heap. -+ * -+ * In turn causes the page tables to be allocated to cover the specified range. -+ * -+ * If you call DevmemIntReserveRange() (and the call succeeds) then you are -+ * promising that you shall later call DevmemIntUnreserveRange() -+ */ -+PVRSRV_ERROR -+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, -+ IMG_DEV_VIRTADDR sAllocationDevVAddr, -+ IMG_DEVMEM_SIZE_T uiAllocationSize, -+ DEVMEMINT_RESERVATION **ppsReservationPtr); -+/* -+ * DevmemIntUnreserveRange() -+ * -+ * Undoes the state change caused by DevmemIntReserveRage() -+ */ -+PVRSRV_ERROR -+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation); -+ -+/*************************************************************************/ /*! -+ * @Function DevmemXIntReserveRange() -+ * @Description Indicates that the specified range should be reserved from the -+ * given heap. -+ * -+ * In turn causes the page tables to be allocated to cover the -+ * specified range. -+ * -+ * If you call DevmemIntReserveRange() (and the call succeeds) -+ * then you are promising that you shall later call -+ * DevmemIntUnreserveRange(). -+ * -+ * @Input psDevmemHeap Pointer to the heap the reservation is made -+ * on -+ * @Input sAllocationDevVAddr Virtual address of the reservation -+ * @Input uiAllocationSize Size of the reservation (in bytes) -+ * @Input ppsRsrv Return pointer to the reservation object -+ * -+ * @Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemXIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, -+ IMG_DEV_VIRTADDR sAllocationDevVAddr, -+ IMG_DEVMEM_SIZE_T uiAllocationSize, -+ DEVMEMXINT_RESERVATION **ppsRsrv); -+ -+/*************************************************************************/ /*! -+ * @Function DevmemXIntUnreserveRange() -+ * @Description Undoes the state change caused by DevmemXIntReserveRage() -+ * -+ * @Input psRsrv Reservation handle for the range -+ * -+ * @Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemXIntUnreserveRange(DEVMEMXINT_RESERVATION *psRsrv); -+ -+/*************************************************************************/ /*! -+@Function DevmemIntReservationAcquire -+@Description Acquire a reference to the provided device memory reservation. -+@Return IMG_TRUE if referenced and IMG_FALSE in case of error -+*/ /**************************************************************************/ -+IMG_BOOL -+DevmemIntReservationAcquire(DEVMEMINT_RESERVATION *psDevmemReservation); -+ -+/*************************************************************************/ /*! -+@Function DevmemIntReservationRelease -+@Description Release the reference to the provided device memory reservation. -+ If this is the last reference which was taken then the -+ reservation will be freed. -+@Return None. -+*/ /**************************************************************************/ -+void -+DevmemIntReservationRelease(DEVMEMINT_RESERVATION *psDevmemReservation); -+ -+/*************************************************************************/ /*! -+ * @Function DevmemXIntMapPages() -+ * @Description Maps an arbitrary amount of pages from a PMR to a reserved range -+ * and takes references on the PMR. -+ * -+ * @Input psRsrv Reservation handle for the range -+ * @Input psPMR PMR that is mapped -+ * @Input uiPageCount Number of consecutive pages that are -+ * mapped -+ * @Input uiPhysPageOffset Logical offset in the PMR (measured in pages) -+ * @Input uiFlags Mapping flags -+ * @Input uiVirtPageOffset Offset from the reservation base to start the -+ * mapping from (measured in pages) -+ * -+ * @Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemXIntMapPages(DEVMEMXINT_RESERVATION *psRsrv, -+ PMR *psPMR, -+ IMG_UINT32 uiPageCount, -+ IMG_UINT32 uiPhysPageOffset, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 uiVirtPageOffset); -+ -+/*************************************************************************/ /*! -+ * @Function DevmemXIntUnmapPages() -+ * @Description Unmaps an arbitrary amount of pages from a reserved range and -+ * releases references on associated PMRs. -+ * -+ * @Input psRsrv Reservation handle for the range -+ * @Input uiVirtPageOffset Offset from the reservation base to start the -+ * mapping from (measured in pages) -+ * @Input uiPageCount Number of consecutive pages that are -+ * unmapped -+ * -+ * @Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemXIntUnmapPages(DEVMEMXINT_RESERVATION *psRsrv, -+ IMG_UINT32 uiVirtPageOffset, -+ IMG_UINT32 uiPageCount); -+ -+/*************************************************************************/ /*! -+ * @Function DevmemXIntMapVRangeToBackingPage() -+ * @Description Maps a kernel internal backing page to a reserved range. -+ * -+ * @Input psRsrv Reservation handle for the range -+ * @Input uiPageCount Number of consecutive pages that are -+ * mapped -+ * @Input uiFlags Mapping flags -+ * @Input uiVirtPageOffset Offset from the reservation base to start the -+ * mapping from (measured in pages) -+ * -+ * @Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemXIntMapVRangeToBackingPage(DEVMEMXINT_RESERVATION *psRsrv, -+ IMG_UINT32 uiPageCount, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 uiVirtPageOffset); -+ -+/*************************************************************************/ /*! -+@Function DevmemIntChangeSparse -+@Description Changes the sparse allocations of a PMR by allocating and freeing -+ pages and changing their corresponding CPU and GPU mappings. -+ -+@input psDevmemHeap Pointer to the heap we map on -+@input psPMR The PMR we want to map -+@input ui32AllocPageCount Number of pages to allocate -+@input pai32AllocIndices The logical PMR indices where pages will -+ be allocated. May be NULL. -+@input ui32FreePageCount Number of pages to free -+@input pai32FreeIndices The logical PMR indices where pages will -+ be freed. May be NULL. -+@input uiSparseFlags Flags passed in to determine which kind -+ of sparse change the user wanted. -+ See devicemem_typedefs.h for details. -+@input uiFlags Memalloc flags for this virtual range. -+@input sDevVAddrBase The base address of the virtual range of -+ this sparse allocation. -+@input sCpuVAddrBase The CPU base address of this allocation. -+ May be 0 if not existing. -+@Return PVRSRV_ERROR failure code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap, -+ PMR *psPMR, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices, -+ SPARSE_MEM_RESIZE_FLAGS uiSparseFlags, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_DEV_VIRTADDR sDevVAddrBase, -+ IMG_UINT64 sCpuVAddrBase); -+ -+/* -+ * DevmemIntRGXInvalidateFBSCTable() -+ * -+ * Invalidate selected FBSC table indices. -+ * -+ */ -+PVRSRV_ERROR -+DevmemIntInvalidateFBSCTable(DEVMEMINT_CTX *psDevmemCtx, -+ IMG_UINT64 ui64FBSCEntryMask); -+ -+PVRSRV_ERROR -+DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ DEVMEMINT_CTX *psDevMemContext, -+ IMG_DEV_VIRTADDR sDevAddr); -+ -+PVRSRV_ERROR -+DevmemIntGetFaultAddress(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ DEVMEMINT_CTX *psDevMemContext, -+ IMG_DEV_VIRTADDR *psFaultAddress); -+ -+/*************************************************************************/ /*! -+@Function DevmemIntRegisterPFNotifyKM -+@Description Registers a PID to be notified when a page fault occurs on a -+ specific device memory context. -+@Input psDevmemCtx The context to be notified about. -+@Input bRegister If true, register. If false, de-register. -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, -+ IMG_BOOL bRegister); -+ -+/*************************************************************************/ /*! -+@Function DevmemIntPFNotify -+@Description Notifies any processes that have registered themselves to be -+ notified when a page fault happens on a specific device memory -+ context. -+@Input *psDevNode The device node. -+@Input ui64FaultedPCAddress The page catalogue address that faulted. -+@Input sFaultAddress The address that triggered the fault. -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT64 ui64FaultedPCAddress, -+ IMG_DEV_VIRTADDR sFaultAddress); -+ -+#if defined(PDUMP) -+PVRSRV_ERROR -+DevmemIntPDumpGetValidRegions(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevmemCtx, -+ IMG_DEV_VIRTADDR sDevAddrStart, -+ IMG_DEVMEM_SIZE_T uiSize, -+ DLLIST_NODE *psValidRegionsList); -+ -+void -+DevmemIntPDumpFreeValidRegions(DLLIST_NODE *psValidRegionsList); -+ -+PVRSRV_ERROR -+DevmemIntPDumpSaveFromRegionListToFileVirtual(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevmemCtx, -+ DLLIST_NODE *psDevAddrRegions, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32PDumpFlags); -+ -+/* -+ * DevmemIntPDumpSaveToFileVirtual() -+ * -+ * Writes out PDump "SAB" commands with the data found in memory at -+ * the given virtual address. -+ */ -+PVRSRV_ERROR -+DevmemIntPDumpSaveToFileVirtual(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevmemCtx, -+ IMG_DEV_VIRTADDR sDevAddrStart, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 uiArraySize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32PDumpFlags); -+ -+/* -+ * DevmemIntPDumpSaveToFileVirtualNoValidate() -+ * -+ * Writes out PDump "SAB" commands with the data found in memory at -+ * the given virtual address. Doesn't perform address validation. -+ */ -+PVRSRV_ERROR -+DevmemIntPDumpSaveToFileVirtualNoValidate(PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevmemCtx, -+ IMG_DEV_VIRTADDR sDevAddrStart, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32PDumpFlags); -+ -+IMG_UINT32 -+DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext); -+ -+PVRSRV_ERROR -+DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevMemContext, -+ IMG_UINT32 ui32Size, -+ const IMG_CHAR *pszFileName, -+ IMG_DEV_VIRTADDR sData, -+ IMG_UINT32 ui32DataSize, -+ IMG_UINT32 ui32LogicalWidth, -+ IMG_UINT32 ui32LogicalHeight, -+ IMG_UINT32 ui32PhysicalWidth, -+ IMG_UINT32 ui32PhysicalHeight, -+ PDUMP_PIXEL_FORMAT ePixFmt, -+ IMG_MEMLAYOUT eMemLayout, -+ IMG_FB_COMPRESSION eFBCompression, -+ const IMG_UINT32 *paui32FBCClearColour, -+ PDUMP_FBC_SWIZZLE eFBCSwizzle, -+ IMG_DEV_VIRTADDR sHeader, -+ IMG_UINT32 ui32HeaderSize, -+ IMG_UINT32 ui32PDumpFlags); -+ -+PVRSRV_ERROR -+DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevMemContext, -+ IMG_UINT32 ui32Size, -+ const IMG_CHAR *pszFileName, -+ IMG_DEV_VIRTADDR sData, -+ IMG_UINT32 ui32DataSize, -+ IMG_UINT32 ui32HeaderType, -+ IMG_UINT32 ui32ElementType, -+ IMG_UINT32 ui32ElementCount, -+ IMG_UINT32 ui32PDumpFlags); -+#else /* PDUMP */ -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemIntPDumpGetValidRegions) -+#endif -+static INLINE PVRSRV_ERROR -+DevmemIntPDumpGetValidRegions(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevmemCtx, -+ IMG_DEV_VIRTADDR sDevAddrStart, -+ IMG_DEVMEM_SIZE_T uiSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psDevmemCtx); -+ PVR_UNREFERENCED_PARAMETER(sDevAddrStart); -+ PVR_UNREFERENCED_PARAMETER(uiSize); -+ -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemIntPDumpFreeValidRegions) -+#endif -+static INLINE void -+DevmemIntPDumpFreeValidRegions(DLLIST_NODE *psDevAddrRegions) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevAddrRegions); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemIntPDumpSaveFromRegionListToFileVirtual) -+#endif -+static INLINE PVRSRV_ERROR -+DevmemIntPDumpSaveFromRegionListToFileVirtual(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevmemCtx, -+ DLLIST_NODE *psDevAddrRegions, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psDevmemCtx); -+ PVR_UNREFERENCED_PARAMETER(psDevAddrRegions); -+ PVR_UNREFERENCED_PARAMETER(pszFilename); -+ PVR_UNREFERENCED_PARAMETER(ui32FileOffset); -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+ -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemIntPDumpSaveToFileVirtual) -+#endif -+static INLINE PVRSRV_ERROR -+DevmemIntPDumpSaveToFileVirtual(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevmemCtx, -+ IMG_DEV_VIRTADDR sDevAddrStart, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 uiArraySize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psDevmemCtx); -+ PVR_UNREFERENCED_PARAMETER(sDevAddrStart); -+ PVR_UNREFERENCED_PARAMETER(uiSize); -+ PVR_UNREFERENCED_PARAMETER(uiArraySize); -+ PVR_UNREFERENCED_PARAMETER(pszFilename); -+ PVR_UNREFERENCED_PARAMETER(ui32FileOffset); -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemIntPDumpImageDescriptor) -+#endif -+static INLINE PVRSRV_ERROR -+DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevMemContext, -+ IMG_UINT32 ui32Size, -+ const IMG_CHAR *pszFileName, -+ IMG_DEV_VIRTADDR sData, -+ IMG_UINT32 ui32DataSize, -+ IMG_UINT32 ui32LogicalWidth, -+ IMG_UINT32 ui32LogicalHeight, -+ IMG_UINT32 ui32PhysicalWidth, -+ IMG_UINT32 ui32PhysicalHeight, -+ PDUMP_PIXEL_FORMAT ePixFmt, -+ IMG_MEMLAYOUT eMemLayout, -+ IMG_FB_COMPRESSION eFBCompression, -+ const IMG_UINT32 *paui32FBCClearColour, -+ PDUMP_FBC_SWIZZLE eFBCSwizzle, -+ IMG_DEV_VIRTADDR sHeader, -+ IMG_UINT32 ui32HeaderSize, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psDevMemContext); -+ PVR_UNREFERENCED_PARAMETER(ui32Size); -+ PVR_UNREFERENCED_PARAMETER(pszFileName); -+ PVR_UNREFERENCED_PARAMETER(sData); -+ PVR_UNREFERENCED_PARAMETER(ui32DataSize); -+ PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth); -+ PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight); -+ PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth); -+ PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight); -+ PVR_UNREFERENCED_PARAMETER(ePixFmt); -+ PVR_UNREFERENCED_PARAMETER(eMemLayout); -+ PVR_UNREFERENCED_PARAMETER(eFBCompression); -+ PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour); -+ PVR_UNREFERENCED_PARAMETER(eFBCSwizzle); -+ PVR_UNREFERENCED_PARAMETER(sHeader); -+ PVR_UNREFERENCED_PARAMETER(ui32HeaderSize); -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(DevmemIntPDumpDataDescriptor) -+#endif -+static INLINE PVRSRV_ERROR -+DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_CTX *psDevMemContext, -+ IMG_UINT32 ui32Size, -+ const IMG_CHAR *pszFileName, -+ IMG_DEV_VIRTADDR sData, -+ IMG_UINT32 ui32DataSize, -+ IMG_UINT32 ui32ElementType, -+ IMG_UINT32 ui32ElementCount, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psDevMemContext); -+ PVR_UNREFERENCED_PARAMETER(ui32Size); -+ PVR_UNREFERENCED_PARAMETER(pszFileName); -+ PVR_UNREFERENCED_PARAMETER(sData); -+ PVR_UNREFERENCED_PARAMETER(ui32DataSize); -+ PVR_UNREFERENCED_PARAMETER(ui32ElementType); -+ PVR_UNREFERENCED_PARAMETER(ui32ElementCount); -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+ return PVRSRV_OK; -+} -+ -+#endif /* PDUMP */ -+ -+PVRSRV_ERROR -+DevmemIntInit(void); -+ -+PVRSRV_ERROR -+DevmemIntDeInit(void); -+ -+PVRSRV_ERROR -+DevmemIntExportCtx(DEVMEMINT_CTX *psContext, -+ PMR *psPMR, -+ DEVMEMINT_CTX_EXPORT **ppsContextExport); -+ -+PVRSRV_ERROR -+DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport); -+ -+PVRSRV_ERROR -+DevmemIntAcquireRemoteCtx(PMR *psPMR, -+ DEVMEMINT_CTX **ppsContext, -+ IMG_HANDLE *phPrivData); -+ -+#endif /* DEVICEMEM_SERVER_H */ -diff --git a/drivers/gpu/drm/img-rogue/devicemem_server_utils.h b/drivers/gpu/drm/img-rogue/devicemem_server_utils.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem_server_utils.h -@@ -0,0 +1,198 @@ -+/**************************************************************************/ /*! -+@File -+@Title Device Memory Management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header file utilities that are specific to device memory functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "device.h" -+#include "pvrsrv_memallocflags.h" -+#include "pvrsrv.h" -+ -+static INLINE PVRSRV_ERROR DevmemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PVRSRV_MEMALLOCFLAGS_T ulFlags, -+ IMG_UINT32 *pui32Ret) -+{ -+ IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags); -+ IMG_UINT32 ui32Ret; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags)); -+ -+ switch (ui32CPUCacheMode) -+ { -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: -+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED; -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: -+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT: -+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED; -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT: -+ -+ /* -+ * If system has no coherency but coherency has been requested for CPU -+ * and GPU we currently fall back to write-combine. -+ * This avoids errors on arm64 when uncached is turned into ordered device memory -+ * and suffers from problems with unaligned access. -+ */ -+ if ( (PVRSRV_GPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) && -+ !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ) -+ { -+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; -+ } -+ else -+ { -+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED; -+ } -+ break; -+ -+ default: -+ PVR_LOG(("DevmemCPUCacheMode: Unknown CPU cache mode 0x%08x", ui32CPUCacheMode)); -+ PVR_ASSERT(0); -+ /* -+ We should never get here, but if we do then setting the mode -+ to uncached is the safest thing to do. -+ */ -+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED; -+ eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; -+ break; -+ } -+ -+ *pui32Ret = ui32Ret; -+ -+ return eError; -+} -+ -+static INLINE PVRSRV_ERROR DevmemDeviceCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PVRSRV_MEMALLOCFLAGS_T ulFlags, -+ IMG_UINT32 *pui32Ret) -+{ -+ IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags); -+ IMG_UINT32 ui32Ret; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags)); -+ -+ switch (ui32DeviceCacheMode) -+ { -+ case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED: -+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED; -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC: -+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC; -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT: -+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED; -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT: -+ -+ /* -+ * If system has no coherency but coherency has been requested for CPU -+ * and GPU we currently fall back to write-combine. -+ * This avoids errors on arm64 when uncached is turned into ordered device memory -+ * and suffers from problems with unaligned access. -+ */ -+ if ( (PVRSRV_CPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) && -+ !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ) -+ { -+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC; -+ } -+ else -+ { -+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED; -+ } -+ break; -+ -+ default: -+ PVR_LOG(("DevmemDeviceCacheMode: Unknown device cache mode 0x%08x", ui32DeviceCacheMode)); -+ PVR_ASSERT(0); -+ /* -+ We should never get here, but if we do then setting the mode -+ to uncached is the safest thing to do. -+ */ -+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED; -+ eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; -+ break; -+ } -+ -+ *pui32Ret = ui32Ret; -+ -+ return eError; -+} -+ -+static INLINE IMG_BOOL DevmemCPUCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PVRSRV_MEMALLOCFLAGS_T ulFlags) -+{ -+ IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags); -+ IMG_BOOL bRet = IMG_FALSE; -+ -+ PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags)); -+ -+ if (ui32CPUCacheMode == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) -+ { -+ bRet = PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig); -+ } -+ return bRet; -+} -+ -+static INLINE IMG_BOOL DevmemDeviceCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PVRSRV_MEMALLOCFLAGS_T ulFlags) -+{ -+ IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags); -+ IMG_BOOL bRet = IMG_FALSE; -+ -+ PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags)); -+ -+ if (ui32DeviceCacheMode == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) -+ { -+ bRet = PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig); -+ } -+ return bRet; -+} -diff --git a/drivers/gpu/drm/img-rogue/devicemem_typedefs.h b/drivers/gpu/drm/img-rogue/devicemem_typedefs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem_typedefs.h -@@ -0,0 +1,141 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device Memory Management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Client side part of device memory management -- this file -+ is forked from new_devmem_allocation.h as this one has to -+ reside in the top level include so that client code is able -+ to make use of the typedefs. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef DEVICEMEM_TYPEDEFS_H -+#define DEVICEMEM_TYPEDEFS_H -+ -+#include -+#include "img_types.h" -+#include "pvrsrv_memallocflags.h" -+ -+typedef struct DEVMEM_CONTEXT_TAG DEVMEM_CONTEXT; /*!< Convenience typedef for struct DEVMEM_CONTEXT_TAG */ -+typedef struct DEVMEM_HEAP_TAG DEVMEM_HEAP; /*!< Convenience typedef for struct DEVMEM_HEAP_TAG */ -+typedef struct DEVMEM_MEMDESC_TAG DEVMEM_MEMDESC; /*!< Convenience typedef for struct DEVMEM_MEMDESC_TAG */ -+typedef struct DEVMEM_PAGELIST_TAG DEVMEM_PAGELIST; /*!< Convenience typedef for struct DEVMEM_PAGELIST_TAG */ -+ -+typedef IMG_HANDLE DEVMEM_EXPORTHANDLE; /*!< Typedef for DeviceMem Export Handle */ -+typedef IMG_UINT64 DEVMEM_EXPORTKEY; /*!< Typedef for DeviceMem Export Key */ -+typedef IMG_DEVMEM_SIZE_T DEVMEM_SIZE_T; /*!< Typedef for DeviceMem SIZE_T */ -+typedef IMG_DEVMEM_LOG2ALIGN_T DEVMEM_LOG2ALIGN_T; /*!< Typedef for DeviceMem LOG2 Alignment */ -+ -+typedef struct DEVMEMX_PHYS_MEMDESC_TAG DEVMEMX_PHYSDESC; /*!< Convenience typedef for DevmemX physical */ -+typedef struct DEVMEMX_VIRT_MEMDESC_TAG DEVMEMX_VIRTDESC; /*!< Convenience typedef for DevmemX virtual */ -+ -+/*! calling code needs all the info in this struct, to be able to pass it around */ -+typedef struct -+{ -+ /*! A handle to the PMR. */ -+ IMG_HANDLE hPMRExportHandle; -+ /*! The "key" to prove we have authorisation to use this PMR */ -+ IMG_UINT64 uiPMRExportPassword; -+ /*! Size and alignment properties for this PMR. Note, these -+ numbers are not trusted in kernel, but we need to cache them -+ client-side in order to allocate from the VM arena. The kernel -+ will know the actual alignment and size of the PMR and thus -+ would prevent client code from breaching security here. Ditto -+ for physmem granularity (aka page size) if this is different -+ from alignment */ -+ IMG_DEVMEM_SIZE_T uiSize; -+ /*! We call this "contiguity guarantee" to be more precise than -+ calling it "alignment" or "page size", terms which may seem -+ similar but have different emphasis. The number reported here -+ is the minimum contiguity guarantee from the creator of the -+ PMR. Now, there is no requirement to allocate that coarsely -+ from the RA. The alignment given to the RA simply needs to be -+ at least as coarse as the device page size for the heap we -+ ultimately intend to map into. What is important is that the -+ device MMU data page size is not greater than the minimum -+ contiguity guarantee from the PMR. This value is reported to -+ the client in order that it can choose to make early checks and -+ perhaps decide which heap (in a variable page size scenario) it -+ would be safe to map this PMR into. For convenience, the -+ client may choose to use this argument as the alignment of the -+ virtual range he chooses to allocate, but this is _not_ -+ necessary and in many cases would be able to get away with a -+ finer alignment, should the heap into which this PMR will be -+ mapped support it. */ -+ IMG_DEVMEM_LOG2ALIGN_T uiLog2ContiguityGuarantee; -+} DEVMEM_EXPORTCOOKIE; -+ -+/* Enum that describes the operation associated with changing sparse memory */ -+typedef IMG_UINT32 SPARSE_MEM_RESIZE_FLAGS; -+#define SPARSE_RESIZE_NONE 0U -+ -+ /* This should be set to indicate the change needs allocation */ -+#define SPARSE_RESIZE_ALLOC 1U -+ -+ /* This should be set to indicate the change needs free */ -+#define SPARSE_RESIZE_FREE 2U -+ -+#define SPARSE_RESIZE_BOTH (SPARSE_RESIZE_ALLOC | SPARSE_RESIZE_FREE) -+ -+ /* Should be set to get the sparse changes appear in cpu virtual map */ -+#define SPARSE_MAP_CPU_ADDR 8U -+ -+ -+/* To be used with all the sparse allocations that gets mapped to CPU Virtual -+ * space. The sparse allocation CPU mapping is torn down and re-mapped every -+ * time the sparse allocation layout changes. -+ */ -+#define PVRSRV_UNMAP_ON_SPARSE_CHANGE 1 -+ -+/* To use with DevmemSubAllocate() as the default factor if no over-allocation -+ * is desired. -+ */ -+#define DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER (1U) -+ -+/* Defines the max length for PMR, MemDesc, Device memory History and RI debug -+ * annotations stored in memory, including the null terminator. -+ */ -+#define DEVMEM_ANNOTATION_MAX_LEN ((IMG_UINT32)PVR_ANNOTATION_MAX_LEN + 1U) -+ -+ -+/* Reserved VA space of a heap must always be multiple of DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY -+ * Granularity has been chosen to support the max possible practically used OS page size. -+ */ -+#define DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY 0x10000 /* 64KB is MAX anticipated OS page size */ -+ -+#endif /* #ifndef DEVICEMEM_TYPEDEFS_H */ -diff --git a/drivers/gpu/drm/img-rogue/devicemem_utils.c b/drivers/gpu/drm/img-rogue/devicemem_utils.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem_utils.c -@@ -0,0 +1,1267 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device Memory Management internal utility functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Utility functions used internally by device memory management -+ code. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ /**************************************************************************/ -+ -+#include "allocmem.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "ra.h" -+#include "devicemem_utils.h" -+#include "client_mm_bridge.h" -+#include "client_cache_bridge.h" -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+#include "client_ri_bridge.h" -+#if defined(__KERNEL__) -+#include "pvrsrv.h" -+#else -+#include "pvr_bridge_client.h" -+#endif -+#endif -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#include "proc_stats.h" -+#endif -+ -+#if defined(__KERNEL__) -+#include "srvcore.h" -+#else -+#include "srvcore_intern.h" -+#endif -+ -+/* -+ SVM heap management support functions for CPU (un)mapping -+ */ -+#define DEVMEM_MAP_SVM_USER_MANAGED_RETRY 2 -+ -+static inline PVRSRV_ERROR -+DevmemCPUMapSVMKernelManaged(DEVMEM_HEAP *psHeap, -+ DEVMEM_IMPORT *psImport, -+ IMG_UINT64 *ui64MapAddress) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT64 ui64SvmMapAddr; -+ IMG_UINT64 ui64SvmMapAddrEnd; -+ IMG_UINT64 ui64SvmHeapAddrEnd; -+ -+ /* SVM heap management always has XXX_MANAGER_KERNEL unless we -+ have triggered the fall back code-path in which case we -+ should not be calling into this code-path */ -+ PVR_ASSERT(psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_KERNEL); -+ -+ /* By acquiring the CPU virtual address here, it essentially -+ means we lock-down the virtual address for the duration -+ of the life-cycle of the allocation until a de-allocation -+ request comes in. Thus the allocation is guaranteed not to -+ change its virtual address on the CPU during its life-time. -+ NOTE: Import might have already been CPU Mapped before now, -+ normally this is not a problem, see fall back */ -+ eError = DevmemImportStructCPUMap(psImport); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "DevmemImportStructCPUMap"); -+ eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED; -+ goto failSVM; -+ } -+ -+ /* Supplied kernel mmap virtual address is also device virtual address; -+ calculate the heap & kernel supplied mmap virtual address limits */ -+ ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr; -+ ui64SvmHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize; -+ ui64SvmMapAddrEnd = ui64SvmMapAddr + psImport->uiSize; -+ PVR_ASSERT(ui64SvmMapAddr != (IMG_UINT64)0); -+ -+ /* SVM limit test may fail if processor has more virtual address bits than device */ -+ if ((ui64SvmMapAddr >= ui64SvmHeapAddrEnd || ui64SvmMapAddrEnd > ui64SvmHeapAddrEnd) || -+ (ui64SvmMapAddr & ~(ui64SvmHeapAddrEnd - 1))) -+ { -+ /* Unmap incompatible SVM virtual address, this -+ may not release address if it was elsewhere -+ CPU Mapped before call into this function */ -+ DevmemImportStructCPUUnmap(psImport); -+ -+ /* Flag incompatible SVM mapping */ -+ eError = PVRSRV_ERROR_BAD_MAPPING; -+ goto failSVM; -+ } -+ -+ *ui64MapAddress = ui64SvmMapAddr; -+failSVM: -+ /* either OK, MAP_FAILED or BAD_MAPPING */ -+ return eError; -+} -+ -+static inline void -+DevmemCPUUnmapSVMKernelManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport) -+{ -+ PVR_UNREFERENCED_PARAMETER(psHeap); -+ DevmemImportStructCPUUnmap(psImport); -+} -+ -+static inline PVRSRV_ERROR -+DevmemCPUMapSVMUserManaged(DEVMEM_HEAP *psHeap, -+ DEVMEM_IMPORT *psImport, -+ IMG_UINT uiAlign, -+ IMG_UINT64 *ui64MapAddress) -+{ -+ RA_LENGTH_T uiAllocatedSize; -+ RA_BASE_T uiAllocatedAddr; -+ IMG_UINT64 ui64SvmMapAddr; -+ IMG_UINT uiRetry = 0; -+ PVRSRV_ERROR eError; -+ -+ /* If SVM heap management has transitioned to XXX_MANAGER_USER, -+ this is essentially a fall back approach that ensures we -+ continue to satisfy SVM alloc. This approach is not without -+ hazards in that we may specify a virtual address that is -+ already in use by the user process */ -+ PVR_ASSERT(psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER); -+ -+ /* Normally, for SVM heap allocations, CPUMap _must_ be done -+ before DevMap; ideally the initial CPUMap should be done by -+ SVM functions though this is not a hard requirement as long -+ as the prior elsewhere obtained CPUMap virtual address meets -+ SVM address requirements. This is a fall-back code-pathway -+ so we have to test that this assumption holds before we -+ progress any further */ -+ OSLockAcquire(psImport->sCPUImport.hLock); -+ -+ if (psImport->sCPUImport.ui32RefCount) -+ { -+ /* Already CPU Mapped SVM heap allocation, this prior elsewhere -+ obtained virtual address is responsible for the above -+ XXX_MANAGER_KERNEL failure. As we are not responsible for -+ this, we cannot progress any further so need to fail */ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Previously obtained CPU map address not SVM compatible" -+ , __func__)); -+ -+ /* Revert SVM heap to DEVMEM_HEAP_MANAGER_KERNEL */ -+ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_KERNEL; -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Reverting SVM heap back to kernel managed", -+ __func__)); -+ -+ OSLockRelease(psImport->sCPUImport.hLock); -+ -+ /* Do we need a more specific error code here */ -+ eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED; -+ goto failSVM; -+ } -+ -+ OSLockRelease(psImport->sCPUImport.hLock); -+ -+ do -+ { -+ /* Next we proceed to instruct the kernel to use the RA_Alloc supplied -+ virtual address to map-in this SVM import suballocation; there is no -+ guarantee that this RA_Alloc virtual address may not collide with an -+ already in-use VMA range in the process */ -+ eError = RA_Alloc(psHeap->psQuantizedVMRA, -+ psImport->uiSize, -+ RA_NO_IMPORT_MULTIPLIER, -+ 0, /* flags: this RA doesn't use flags*/ -+ uiAlign, -+ "SVM_Virtual_Alloc", -+ &uiAllocatedAddr, -+ &uiAllocatedSize, -+ NULL /* don't care about per-import priv data */); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "RA_Alloc"); -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) -+ { -+ PVRSRV_ERROR eErr; -+ eErr = BridgePVRSRVStatsUpdateOOMStat(GetBridgeHandle(psHeap->psCtx->hDevConnection), -+ PVRSRV_DEVICE_STAT_TYPE_OOM_VIRTMEM_COUNT, -+ OSGetCurrentProcessID()); -+ PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVStatsUpdateOOMStat"); -+ } -+#endif -+ goto failSVM; -+ } -+ -+ /* No reason for allocated virtual size to be different from -+ the PMR's size */ -+ psImport->sCPUImport.pvCPUVAddr = (void*)(uintptr_t)uiAllocatedAddr; -+ PVR_ASSERT(uiAllocatedSize == psImport->uiSize); -+ -+ /* Map the import or allocation using the RA_Alloc virtual address; -+ the kernel may fail the request if the supplied virtual address -+ is already in-use in which case we re-try using another virtual -+ address obtained from the RA_Alloc */ -+ eError = DevmemImportStructCPUMap(psImport); -+ if (eError != PVRSRV_OK) -+ { -+ /* For now we simply discard failed RA_Alloc() obtained virtual -+ address (i.e. plenty of virtual space), this prevents us from -+ re-using these and furthermore essentially blacklists these -+ addresses from future SVM consideration; We exit fall-back -+ attempt if retry exceeds the fall-back retry limit */ -+ if (uiRetry++ > DEVMEM_MAP_SVM_USER_MANAGED_RETRY) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Cannot find SVM compatible address, bad mapping", -+ __func__)); -+ eError = PVRSRV_ERROR_BAD_MAPPING; -+ goto failSVM; -+ } -+ } -+ else -+ { -+ /* Found compatible SVM virtual address, set as device virtual address */ -+ ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr; -+ } -+ } while (eError != PVRSRV_OK); -+ -+ *ui64MapAddress = ui64SvmMapAddr; -+failSVM: -+ return eError; -+} -+ -+static inline void -+DevmemCPUUnmapSVMUserManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport) -+{ -+ RA_BASE_T uiAllocatedAddr; -+ -+ /* We only free SVM compatible addresses, all addresses in -+ the blacklist are essentially excluded from future RA_Alloc */ -+ uiAllocatedAddr = psImport->sDeviceImport.sDevVAddr.uiAddr; -+ RA_Free(psHeap->psQuantizedVMRA, uiAllocatedAddr); -+ -+ DevmemImportStructCPUUnmap(psImport); -+} -+ -+static inline PVRSRV_ERROR -+DevmemImportStructDevMapSVM(DEVMEM_HEAP *psHeap, -+ DEVMEM_IMPORT *psImport, -+ IMG_UINT uiAlign, -+ IMG_UINT64 *ui64MapAddress) -+{ -+ PVRSRV_ERROR eError; -+ -+ switch (psHeap->ui32HeapManagerFlags) -+ { -+ case DEVMEM_HEAP_MANAGER_KERNEL: -+ eError = DevmemCPUMapSVMKernelManaged(psHeap, -+ psImport, -+ ui64MapAddress); -+ if (eError == PVRSRV_ERROR_BAD_MAPPING) -+ { -+ /* If the SVM map address is outside of SVM heap limits, -+ change heap type to DEVMEM_HEAP_MANAGER_USER */ -+ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_USER; -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Kernel managed SVM heap is now user managed", -+ __func__)); -+ -+ /* Retry using user managed fall-back approach */ -+ eError = DevmemCPUMapSVMUserManaged(psHeap, -+ psImport, -+ uiAlign, -+ ui64MapAddress); -+ } -+ break; -+ -+ case DEVMEM_HEAP_MANAGER_USER: -+ eError = DevmemCPUMapSVMUserManaged(psHeap, -+ psImport, -+ uiAlign, -+ ui64MapAddress); -+ break; -+ -+ default: -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ break; -+ } -+ -+ return eError; -+} -+ -+static inline void -+DevmemImportStructDevUnmapSVM(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport) -+{ -+ switch (psHeap->ui32HeapManagerFlags) -+ { -+ case DEVMEM_HEAP_MANAGER_KERNEL: -+ DevmemCPUUnmapSVMKernelManaged(psHeap, psImport); -+ break; -+ -+ case DEVMEM_HEAP_MANAGER_USER: -+ DevmemCPUUnmapSVMUserManaged(psHeap, psImport); -+ break; -+ -+ default: -+ break; -+ } -+} -+ -+/* -+ The Devmem import structure is the structure we use -+ to manage memory that is "imported" (which is page -+ granular) from the server into our process, this -+ includes allocations. -+ -+ This allows memory to be imported without requiring -+ any CPU or device mapping. Memory can then be mapped -+ into the device or CPU on demand, but neither is -+ required. -+ */ -+ -+IMG_INTERNAL -+void DevmemImportStructAcquire(DEVMEM_IMPORT *psImport) -+{ -+ IMG_INT iRefCount = OSAtomicIncrement(&psImport->hRefCount); -+ PVR_UNREFERENCED_PARAMETER(iRefCount); -+ PVR_ASSERT(iRefCount != 1); -+ -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psImport, -+ iRefCount-1, -+ iRefCount); -+} -+ -+IMG_INTERNAL -+IMG_BOOL DevmemImportStructRelease(DEVMEM_IMPORT *psImport) -+{ -+ IMG_INT iRefCount = OSAtomicDecrement(&psImport->hRefCount); -+ PVR_ASSERT(iRefCount >= 0); -+ -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psImport, -+ iRefCount+1, -+ iRefCount); -+ -+ if (iRefCount == 0) -+ { -+ PVRSRV_ERROR eError = DestroyServerResource(psImport->hDevConnection, -+ NULL, -+ BridgePMRUnrefPMR, -+ psImport->hPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ OSLockDestroy(psImport->sCPUImport.hLock); -+ OSLockDestroy(psImport->sDeviceImport.hLock); -+ OSLockDestroy(psImport->hLock); -+ OSFreeMem(psImport); -+ -+ return IMG_TRUE; -+ } -+ -+ return IMG_FALSE; -+} -+ -+IMG_INTERNAL -+void DevmemImportDiscard(DEVMEM_IMPORT *psImport) -+{ -+ PVR_ASSERT(OSAtomicRead(&psImport->hRefCount) == 0); -+ OSLockDestroy(psImport->sCPUImport.hLock); -+ OSLockDestroy(psImport->sDeviceImport.hLock); -+ OSLockDestroy(psImport->hLock); -+ OSFreeMem(psImport); -+} -+ -+IMG_INTERNAL -+PVRSRV_ERROR DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc) -+{ -+ DEVMEM_MEMDESC *psMemDesc; -+ PVRSRV_ERROR eError; -+ -+ /* Must be zeroed in case it needs to be freed before it is initialised */ -+ psMemDesc = OSAllocZMem(sizeof(DEVMEM_MEMDESC)); -+ PVR_GOTO_IF_NOMEM(psMemDesc, eError, failAlloc); -+ -+ eError = OSLockCreate(&psMemDesc->hLock); -+ PVR_GOTO_IF_ERROR(eError, failMDLock); -+ -+ eError = OSLockCreate(&psMemDesc->sDeviceMemDesc.hLock); -+ PVR_GOTO_IF_ERROR(eError, failDMDLock); -+ -+ eError = OSLockCreate(&psMemDesc->sCPUMemDesc.hLock); -+ PVR_GOTO_IF_ERROR(eError, failCMDLock); -+ -+ OSAtomicWrite(&psMemDesc->hRefCount, 0); -+ -+ *ppsMemDesc = psMemDesc; -+ -+ return PVRSRV_OK; -+ -+failCMDLock: -+ OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); -+failDMDLock: -+ OSLockDestroy(psMemDesc->hLock); -+failMDLock: -+ OSFreeMem(psMemDesc); -+failAlloc: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ return eError; -+} -+ -+/* -+ Init the MemDesc structure -+ */ -+IMG_INTERNAL -+void DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ DEVMEM_IMPORT *psImport, -+ IMG_DEVMEM_SIZE_T uiSize) -+{ -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psMemDesc, -+ 0, -+ 1); -+ -+ psMemDesc->psImport = psImport; -+ psMemDesc->uiOffset = uiOffset; -+ -+ psMemDesc->sDeviceMemDesc.ui32RefCount = 0; -+ psMemDesc->sCPUMemDesc.ui32RefCount = 0; -+ psMemDesc->uiAllocSize = uiSize; -+ psMemDesc->hPrivData = NULL; -+ psMemDesc->ui32AllocationIndex = DEVICEMEM_HISTORY_ALLOC_INDEX_NONE; -+ -+#if defined(DEBUG) -+ psMemDesc->bPoisonOnFree = IMG_FALSE; -+#endif -+ -+ OSAtomicWrite(&psMemDesc->hRefCount, 1); -+} -+ -+#if defined(DEBUG) -+IMG_INTERNAL -+void DevmemMemDescSetPoF(DEVMEM_MEMDESC *psMemDesc, PVRSRV_MEMALLOCFLAGS_T uiFlags) -+{ -+ if (PVRSRV_CHECK_POISON_ON_FREE(uiFlags)) -+ { -+ psMemDesc->bPoisonOnFree = IMG_TRUE; -+ } -+} -+#endif -+ -+IMG_INTERNAL -+void DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc) -+{ -+ IMG_INT iRefCount = 0; -+ -+ iRefCount = OSAtomicIncrement(&psMemDesc->hRefCount); -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psMemDesc, -+ iRefCount-1, -+ iRefCount); -+ -+ PVR_UNREFERENCED_PARAMETER(iRefCount); -+} -+ -+#if defined(DEBUG) -+static void _DevmemPoisonOnFree(DEVMEM_MEMDESC *psMemDesc) -+{ -+ void *pvAddr = NULL; -+ IMG_UINT8 *pui8CPUVAddr; -+ PVRSRV_ERROR eError; -+ -+ eError = DevmemCPUMapCheckImportProperties(psMemDesc); -+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "DevmemCPUMapCheckImportProperties"); -+ -+ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); -+ eError = DevmemImportStructCPUMap(psMemDesc->psImport); -+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock); -+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "DevmemImportStructCPUMap"); -+ -+ pui8CPUVAddr = psMemDesc->psImport->sCPUImport.pvCPUVAddr; -+ pui8CPUVAddr += psMemDesc->uiOffset; -+ pvAddr = pui8CPUVAddr; -+ -+ DevmemCPUMemSet(pvAddr, -+ PVRSRV_POISON_ON_FREE_VALUE, -+ psMemDesc->uiAllocSize, -+ psMemDesc->psImport->uiFlags); -+ -+ if (PVRSRV_CHECK_CPU_CACHE_COHERENT(psMemDesc->psImport->uiFlags) || -+ PVRSRV_CHECK_CPU_CACHE_INCOHERENT(psMemDesc->psImport->uiFlags)) -+ { -+ eError = BridgeCacheOpExec(GetBridgeHandle(psMemDesc->psImport->hDevConnection), -+ psMemDesc->psImport->hPMR, -+ (IMG_UINT64) (uintptr_t) -+ pvAddr - psMemDesc->uiOffset, -+ psMemDesc->uiOffset, -+ psMemDesc->uiAllocSize, -+ PVRSRV_CACHE_OP_FLUSH); -+ PVR_LOG_IF_ERROR(eError, "BridgeCacheOpExec"); -+ } -+ -+ DevmemImportStructCPUUnmap(psMemDesc->psImport); -+ pvAddr = NULL; -+} -+#endif -+ -+IMG_INTERNAL -+IMG_BOOL DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc) -+{ -+ IMG_INT iRefCount; -+ PVR_ASSERT(psMemDesc != NULL); -+ -+ iRefCount = OSAtomicDecrement(&psMemDesc->hRefCount); -+ PVR_ASSERT(iRefCount >= 0); -+ -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psMemDesc, -+ iRefCount+1, -+ iRefCount); -+ -+ if (iRefCount == 0) -+ { -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI) && -+ (psMemDesc->hRIHandle)) -+ { -+ PVRSRV_ERROR eError; -+ -+ eError = DestroyServerResource(psMemDesc->psImport->hDevConnection, -+ NULL, -+ BridgeRIDeleteMEMDESCEntry, -+ psMemDesc->hRIHandle); -+ PVR_LOG_IF_ERROR(eError, "BridgeRIDeleteMEMDESCEntry"); -+ } -+#endif -+ -+ OSLockAcquire(psMemDesc->psImport->hLock); -+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SUBALLOCATABLE) -+ { -+ /* As soon as the first sub-allocation on the psImport is freed -+ * we might get dirty memory when reusing it. -+ * We have to delete the ZEROED, CLEAN & POISONED flag */ -+ psMemDesc->psImport->uiProperties &= -+ ~(DEVMEM_PROPERTIES_IMPORT_IS_ZEROED | -+ DEVMEM_PROPERTIES_IMPORT_IS_CLEAN | -+ DEVMEM_PROPERTIES_IMPORT_IS_POISONED); -+ -+ OSLockRelease(psMemDesc->psImport->hLock); -+ -+#if defined(DEBUG) -+ if (psMemDesc->bPoisonOnFree) -+ { -+ _DevmemPoisonOnFree(psMemDesc); -+ } -+#endif -+ -+ RA_Free(psMemDesc->psImport->sDeviceImport.psHeap->psSubAllocRA, -+ psMemDesc->psImport->sDeviceImport.sDevVAddr.uiAddr + -+ psMemDesc->uiOffset); -+ } -+ else -+ { -+ OSLockRelease(psMemDesc->psImport->hLock); -+ DevmemImportStructRelease(psMemDesc->psImport); -+ } -+ -+ OSLockDestroy(psMemDesc->sCPUMemDesc.hLock); -+ OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); -+ OSLockDestroy(psMemDesc->hLock); -+ OSFreeMem(psMemDesc); -+ -+ return IMG_TRUE; -+ } -+ -+ return IMG_FALSE; -+} -+ -+IMG_INTERNAL -+void DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc) -+{ -+ PVR_ASSERT(OSAtomicRead(&psMemDesc->hRefCount) == 0); -+ -+ OSLockDestroy(psMemDesc->sCPUMemDesc.hLock); -+ OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); -+ OSLockDestroy(psMemDesc->hLock); -+ OSFreeMem(psMemDesc); -+} -+ -+ -+IMG_INTERNAL -+PVRSRV_ERROR DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ PVRSRV_MEMALLOCFLAGS_T *puiFlags) -+{ -+ if ((*puiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) && -+ (*puiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if ((*puiFlags & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC) && -+ (*puiFlags & PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Defer Alloc and Alloc Now are mutually exclusive.", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (uiAlign & (uiAlign-1)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: The requested alignment is not a power of two.", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (uiSize == 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Please request a non-zero size value.", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* If zero or poison flags are set we have to have write access to the page. */ -+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(*puiFlags) || -+ PVRSRV_CHECK_POISON_ON_ALLOC(*puiFlags) || -+#if defined(DEBUG) -+ PVRSRV_CHECK_POISON_ON_FREE(*puiFlags) || -+#endif -+ PVRSRV_CHECK_CPU_WRITEABLE(*puiFlags)) -+ { -+ (*puiFlags) |= PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ Allocate and init an import structure -+ */ -+IMG_INTERNAL -+PVRSRV_ERROR DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection, -+ DEVMEM_IMPORT **ppsImport) -+{ -+ DEVMEM_IMPORT *psImport; -+ PVRSRV_ERROR eError; -+ -+ psImport = OSAllocMem(sizeof(*psImport)); -+ PVR_RETURN_IF_FALSE(psImport != NULL, PVRSRV_ERROR_OUT_OF_MEMORY); -+ -+ /* Setup some known bad values for things we don't have yet */ -+ psImport->sDeviceImport.hReservation = LACK_OF_RESERVATION_POISON; -+ psImport->sDeviceImport.hMapping = LACK_OF_MAPPING_POISON; -+ psImport->sDeviceImport.psHeap = NULL; -+ psImport->sDeviceImport.bMapped = IMG_FALSE; -+ -+ eError = OSLockCreate(&psImport->sDeviceImport.hLock); -+ PVR_GOTO_IF_ERROR(eError, failDIOSLockCreate); -+ -+ psImport->sCPUImport.hOSMMapData = NULL; -+ psImport->sCPUImport.pvCPUVAddr = NULL; -+ -+ eError = OSLockCreate(&psImport->sCPUImport.hLock); -+ PVR_GOTO_IF_ERROR(eError, failCIOSLockCreate); -+ -+ /* Set up common elements */ -+ psImport->hDevConnection = hDevConnection; -+ -+ /* Setup properties */ -+ psImport->uiProperties = 0; -+ -+ /* Setup refcounts */ -+ psImport->sDeviceImport.ui32RefCount = 0; -+ psImport->sCPUImport.ui32RefCount = 0; -+ OSAtomicWrite(&psImport->hRefCount, 0); -+ -+ /* Create the lock */ -+ eError = OSLockCreate(&psImport->hLock); -+ PVR_GOTO_IF_ERROR(eError, failILockAlloc); -+ -+ *ppsImport = psImport; -+ -+ return PVRSRV_OK; -+ -+failILockAlloc: -+ OSLockDestroy(psImport->sCPUImport.hLock); -+failCIOSLockCreate: -+ OSLockDestroy(psImport->sDeviceImport.hLock); -+failDIOSLockCreate: -+ OSFreeMem(psImport); -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ return eError; -+} -+ -+/* -+ Initialise the import structure -+ */ -+IMG_INTERNAL -+void DevmemImportStructInit(DEVMEM_IMPORT *psImport, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_HANDLE hPMR, -+ DEVMEM_PROPERTIES_T uiProperties) -+{ -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psImport, -+ 0, -+ 1); -+ -+ psImport->uiSize = uiSize; -+ psImport->uiAlign = uiAlign; -+ psImport->uiFlags = uiFlags; -+ psImport->hPMR = hPMR; -+ psImport->uiProperties = uiProperties; -+ OSAtomicWrite(&psImport->hRefCount, 1); -+} -+ -+/* Allocate the requested device virtual address region -+ * from the heap */ -+static PVRSRV_ERROR DevmemReserveVARange(DEVMEM_HEAP *psHeap, -+ DEVMEM_SIZE_T uiSize, -+ IMG_UINT uiAlign, -+ RA_LENGTH_T *puiAllocatedSize, -+ IMG_UINT64 ui64OptionalMapAddress) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Allocate space in the VM */ -+ eError = RA_Alloc_Range(psHeap->psQuantizedVMRA, -+ uiSize, -+ 0, -+ uiAlign, -+ ui64OptionalMapAddress, -+ puiAllocatedSize); -+ -+ if (PVRSRV_OK != eError) -+ { -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ if ((eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) || -+ (eError == PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL)) -+ { -+ PVRSRV_ERROR eErr; -+ eErr = BridgePVRSRVStatsUpdateOOMStat(GetBridgeHandle(psHeap->psCtx->hDevConnection), -+ PVRSRV_DEVICE_STAT_TYPE_INVALID_VIRTMEM, -+ OSGetCurrentProcessID()); -+ PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVStatsUpdateOOMStat"); -+ } -+#endif -+ return eError; -+ } -+ -+ /* No reason for the allocated virtual size to be different from -+ the PMR's size */ -+ PVR_ASSERT(*puiAllocatedSize == uiSize); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ Map an import to the device -+ */ -+IMG_INTERNAL -+PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, -+ IMG_BOOL bMap, -+ DEVMEM_IMPORT *psImport, -+ IMG_UINT64 ui64OptionalMapAddress) -+{ -+ DEVMEM_DEVICE_IMPORT *psDeviceImport; -+ RA_BASE_T uiAllocatedAddr; -+ RA_LENGTH_T uiAllocatedSize; -+ IMG_DEV_VIRTADDR sBase; -+ PVRSRV_ERROR eError; -+ IMG_UINT uiAlign; -+ IMG_BOOL bDestroyed = IMG_FALSE; -+ -+ /* Round the provided import alignment to the configured heap alignment */ -+ uiAlign = 1ULL << psHeap->uiLog2ImportAlignment; -+ uiAlign = PVR_ALIGN(psImport->uiAlign, uiAlign); -+ -+ psDeviceImport = &psImport->sDeviceImport; -+ -+ OSLockAcquire(psDeviceImport->hLock); -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psImport, -+ psDeviceImport->ui32RefCount, -+ psDeviceImport->ui32RefCount+1); -+ -+ if (psDeviceImport->ui32RefCount++ == 0) -+ { -+ DevmemImportStructAcquire(psImport); -+ -+ OSAtomicIncrement(&psHeap->hImportCount); -+ -+ if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags)) -+ { -+ /* SVM (shared virtual memory) imports or allocations always -+ need to acquire CPU virtual address first as address is -+ used to map the allocation into the device virtual address -+ space; i.e. the virtual address of the allocation for both -+ the CPU/GPU must be identical. */ -+ eError = DevmemImportStructDevMapSVM(psHeap, -+ psImport, -+ uiAlign, -+ &ui64OptionalMapAddress); -+ PVR_GOTO_IF_ERROR(eError, failVMRAAlloc); -+ } -+ -+ if (ui64OptionalMapAddress == 0) -+ { -+ /* If heap is _completely_ managed by USER or KERNEL, we shouldn't -+ * be here, as this is RA manager code-path */ -+ if (psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER || -+ psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_KERNEL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER ? -+ "%s: Heap is user managed, please use PVRSRVMapToDeviceAddress().": -+ "%s: Heap is kernel managed, use right allocation flags (e.g. SVM).", -+ __func__)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); -+ } -+ -+ if (psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_UNKNOWN) -+ { -+ /* Only set the heap manager (to RA) at first map when heap manager -+ * is unknown. It might be a dual heap (both, user and RA managed), -+ * in which case heap manager is set at creation time */ -+ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_RA; -+ } -+ -+ /* Allocate space in the VM */ -+ eError = RA_Alloc(psHeap->psQuantizedVMRA, -+ psImport->uiSize, -+ RA_NO_IMPORT_MULTIPLIER, -+ 0, /* flags: this RA doesn't use flags*/ -+ uiAlign, -+ "Virtual_Alloc", -+ &uiAllocatedAddr, -+ &uiAllocatedSize, -+ NULL /* don't care about per-import priv data */ -+ ); -+ if (PVRSRV_OK != eError) -+ { -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) -+ { -+ PVRSRV_ERROR eErr; -+ eErr = BridgePVRSRVStatsUpdateOOMStat(GetBridgeHandle(psHeap->psCtx->hDevConnection), -+ PVRSRV_DEVICE_STAT_TYPE_OOM_VIRTMEM_COUNT, -+ OSGetCurrentProcessID()); -+ PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVStatsUpdateOOMStat"); -+ } -+#endif -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM, failVMRAAlloc); -+ } -+ -+ /* No reason for the allocated virtual size to be different from -+ the PMR's size */ -+ PVR_ASSERT(uiAllocatedSize == psImport->uiSize); -+ -+ sBase.uiAddr = uiAllocatedAddr; -+ -+ } -+ else -+ { -+ IMG_UINT64 ui64ValidEndAddr; -+ -+ /* Ensure supplied ui64OptionalMapAddress is within heap range */ -+ ui64ValidEndAddr = psHeap->sBaseAddress.uiAddr + psHeap->uiSize; -+ if ((ui64OptionalMapAddress + psImport->uiSize > ui64ValidEndAddr) || -+ (ui64OptionalMapAddress < psHeap->sBaseAddress.uiAddr)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: ui64OptionalMapAddress %p is outside of heap limits <%p:%p>." -+ , __func__ -+ , (void*)(uintptr_t)ui64OptionalMapAddress -+ , (void*)(uintptr_t)psHeap->sBaseAddress.uiAddr -+ , (void*)(uintptr_t)ui64ValidEndAddr)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); -+ } -+ -+ switch (psHeap->ui32HeapManagerFlags) -+ { -+ case DEVMEM_HEAP_MANAGER_UNKNOWN: -+ /* DEVMEM_HEAP_MANAGER_USER can apply to _any_ heap and can only -+ * be determined here. This heap type transitions from -+ * DEVMEM_HEAP_MANAGER_UNKNOWN to DEVMEM_HEAP_MANAGER_USER on -+ * 1st alloc. */ -+ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_USER; -+ break; -+ -+ case DEVMEM_HEAP_MANAGER_USER: -+ case DEVMEM_HEAP_MANAGER_KERNEL: -+ if (! psHeap->uiSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER ? -+ "%s: Heap DEVMEM_HEAP_MANAGER_USER is disabled.": -+ "%s: Heap DEVMEM_HEAP_MANAGER_KERNEL is disabled." -+ , __func__)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAP, failVMRAAlloc); -+ } -+ break; -+ -+ case DEVMEM_HEAP_MANAGER_DUAL_USER_RA: -+ /* When the heap is dual managed, ensure supplied ui64OptionalMapAddress -+ * and import size are within heap address space range */ -+ if (ui64OptionalMapAddress + psImport->uiSize <= -+ psHeap->sBaseAddress.uiAddr + psHeap->uiReservedRegionSize) -+ { -+ break; -+ } -+ else -+ { -+ /* Allocate requested VM range */ -+ eError = DevmemReserveVARange(psHeap, -+ psImport->uiSize, -+ uiAlign, -+ &uiAllocatedSize, -+ ui64OptionalMapAddress); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED, failVMRAAlloc); -+ } -+ -+ } -+ break; -+ case DEVMEM_HEAP_MANAGER_RA: -+ /* Allocate requested VM range */ -+ eError = DevmemReserveVARange(psHeap, -+ psImport->uiSize, -+ uiAlign, -+ &uiAllocatedSize, -+ ui64OptionalMapAddress); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED, failVMRAAlloc); -+ } -+ break; -+ -+ default: -+ break; -+ } -+ -+ if (ui64OptionalMapAddress & ((1 << psHeap->uiLog2Quantum) - 1)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid address to map to. Please provide an " -+ "address aligned to a page multiple of the heap." -+ , __func__)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); -+ } -+ -+ if (psImport->uiSize & ((1 << psHeap->uiLog2Quantum) - 1)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid heap to map to. " -+ "Please choose a heap that can handle smaller page sizes." -+ , __func__)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); -+ } -+ -+ uiAllocatedAddr = ui64OptionalMapAddress; -+ uiAllocatedSize = psImport->uiSize; -+ sBase.uiAddr = uiAllocatedAddr; -+ } -+ -+ if (psHeap->bPremapped) -+ { -+ /* No virtual address reservation and mapping are required for -+ * memory that is already pre-mapped e.g. FW heaps in VZ configs */ -+ psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON; -+ psDeviceImport->hMapping = LACK_OF_MAPPING_POISON; -+ } -+ else -+ { -+ if (bMap) -+ { -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags; -+ uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK; -+ -+ eError = BridgeDevmemIntReserveRangeAndMapPMR(GetBridgeHandle(psHeap->psCtx->hDevConnection), -+ psHeap->hDevMemServerHeap, -+ sBase, -+ uiAllocatedSize, -+ psImport->hPMR, -+ uiMapFlags, -+ &psDeviceImport->hMapping); -+ PVR_GOTO_IF_ERROR(eError, failReserve); -+ -+ psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON; -+ psDeviceImport->bMapped = IMG_TRUE; -+ } -+ else -+ { -+ /* Setup page tables for the allocated VM space */ -+ eError = BridgeDevmemIntReserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection), -+ psHeap->hDevMemServerHeap, -+ sBase, -+ uiAllocatedSize, -+ &psDeviceImport->hReservation); -+ PVR_GOTO_IF_ERROR(eError, failReserve); -+ } -+ } -+ -+ /* Setup device mapping specific parts of the mapping info */ -+ psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr; -+ psDeviceImport->psHeap = psHeap; -+ } -+ else -+ { -+ /* -+ Check that we've been asked to map it into the -+ same heap 2nd time around -+ */ -+ if (psHeap != psDeviceImport->psHeap) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAP, failParams); -+ } -+ } -+ OSLockRelease(psDeviceImport->hLock); -+ -+ return PVRSRV_OK; -+ -+failReserve: -+ if (ui64OptionalMapAddress == 0) -+ { -+ RA_Free(psHeap->psQuantizedVMRA, -+ uiAllocatedAddr); -+ } -+failVMRAAlloc: -+ if ((ui64OptionalMapAddress) && PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags)) -+ { -+ DevmemImportStructDevUnmapSVM(psHeap, psImport); -+ } -+ bDestroyed = DevmemImportStructRelease(psImport); -+ OSAtomicDecrement(&psHeap->hImportCount); -+failParams: -+ if (!bDestroyed) -+ { -+ psDeviceImport->ui32RefCount--; -+ OSLockRelease(psDeviceImport->hLock); -+ } -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+/* -+ Unmap an import from the Device -+ */ -+IMG_INTERNAL -+IMG_BOOL DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEM_DEVICE_IMPORT *psDeviceImport; -+ -+ psDeviceImport = &psImport->sDeviceImport; -+ -+ OSLockAcquire(psDeviceImport->hLock); -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psImport, -+ psDeviceImport->ui32RefCount, -+ psDeviceImport->ui32RefCount-1); -+ -+ if (--psDeviceImport->ui32RefCount == 0) -+ { -+ DEVMEM_HEAP *psHeap = psDeviceImport->psHeap; -+ -+ if (!psHeap->bPremapped) -+ { -+ if (psDeviceImport->bMapped) -+ { -+ PVR_ASSERT(psDeviceImport->hReservation == LACK_OF_RESERVATION_POISON); -+ -+ eError = DestroyServerResource(psImport->hDevConnection, -+ NULL, -+ BridgeDevmemIntUnreserveRangeAndUnmapPMR, -+ psDeviceImport->hMapping); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ } -+ else -+ { -+ eError = DestroyServerResource(psImport->hDevConnection, -+ NULL, -+ BridgeDevmemIntUnreserveRange, -+ psDeviceImport->hReservation); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ } -+ -+ } -+ -+ psDeviceImport->bMapped = IMG_FALSE; -+ psDeviceImport->hMapping = LACK_OF_MAPPING_POISON; -+ psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON; -+ -+ /* DEVMEM_HEAP_MANAGER_RA can also come from a dual managed heap in which case, -+ we need to check if the allocated VA falls within RA managed range */ -+ if ((psHeap->ui32HeapManagerFlags & DEVMEM_HEAP_MANAGER_RA) && -+ psDeviceImport->sDevVAddr.uiAddr >= (psHeap->sBaseAddress.uiAddr + psHeap->uiReservedRegionSize) && -+ psDeviceImport->sDevVAddr.uiAddr < (psHeap->sBaseAddress.uiAddr + psHeap->uiSize)) -+ { -+ RA_Free(psHeap->psQuantizedVMRA, psDeviceImport->sDevVAddr.uiAddr); -+ } -+ -+ if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags)) -+ { -+ DevmemImportStructDevUnmapSVM(psHeap, psImport); -+ } -+ -+ OSLockRelease(psDeviceImport->hLock); -+ -+ DevmemImportStructRelease(psImport); -+ -+ OSAtomicDecrement(&psHeap->hImportCount); -+ -+ return IMG_TRUE; -+ } -+ else -+ { -+ OSLockRelease(psDeviceImport->hLock); -+ return IMG_FALSE; -+ } -+} -+ -+/* -+ Map an import into the CPU -+ */ -+IMG_INTERNAL -+PVRSRV_ERROR DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEM_CPU_IMPORT *psCPUImport; -+ size_t uiMappingLength; -+ -+ psCPUImport = &psImport->sCPUImport; -+ -+ OSLockAcquire(psCPUImport->hLock); -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psImport, -+ psCPUImport->ui32RefCount, -+ psCPUImport->ui32RefCount+1); -+ -+ if (psCPUImport->ui32RefCount++ == 0) -+ { -+ DevmemImportStructAcquire(psImport); -+ -+ eError = OSMMapPMR(GetBridgeHandle(psImport->hDevConnection), -+ psImport->hPMR, -+ psImport->uiSize, -+ psImport->uiFlags, -+ &psCPUImport->hOSMMapData, -+ &psCPUImport->pvCPUVAddr, -+ &uiMappingLength); -+ PVR_GOTO_IF_ERROR(eError, failMap); -+ -+ /* MappingLength might be rounded up to page size */ -+ PVR_ASSERT(uiMappingLength >= psImport->uiSize); -+ } -+ OSLockRelease(psCPUImport->hLock); -+ -+ return PVRSRV_OK; -+ -+failMap: -+ psCPUImport->ui32RefCount--; -+ if (!DevmemImportStructRelease(psImport)) -+ { -+ OSLockRelease(psCPUImport->hLock); -+ } -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+/* -+ Unmap an import from the CPU -+ */ -+IMG_INTERNAL -+void DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport) -+{ -+ DEVMEM_CPU_IMPORT *psCPUImport; -+ -+ psCPUImport = &psImport->sCPUImport; -+ -+ OSLockAcquire(psCPUImport->hLock); -+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", -+ __func__, -+ psImport, -+ psCPUImport->ui32RefCount, -+ psCPUImport->ui32RefCount-1); -+ -+ if (--psCPUImport->ui32RefCount == 0) -+ { -+ /* psImport->uiSize is a 64-bit quantity whereas the 5th -+ * argument to OSUnmapPMR is a 32-bit quantity on 32-bit systems -+ * hence a compiler warning of implicit cast and loss of data. -+ * Added explicit cast and assert to remove warning. -+ */ -+#if defined(__linux__) && defined(__i386__) -+ PVR_ASSERT(psImport->uiSizehDevConnection), -+ psImport->hPMR, -+ psCPUImport->hOSMMapData, -+ psCPUImport->pvCPUVAddr, -+ (size_t)psImport->uiSize); -+ -+ psCPUImport->hOSMMapData = NULL; -+ psCPUImport->pvCPUVAddr = NULL; -+ -+ OSLockRelease(psCPUImport->hLock); -+ -+ DevmemImportStructRelease(psImport); -+ } -+ else -+ { -+ OSLockRelease(psCPUImport->hLock); -+ } -+} -diff --git a/drivers/gpu/drm/img-rogue/devicemem_utils.h b/drivers/gpu/drm/img-rogue/devicemem_utils.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/devicemem_utils.h -@@ -0,0 +1,582 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device Memory Management internal utility functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Utility functions used internally by device memory management -+ code. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef DEVICEMEM_UTILS_H -+#define DEVICEMEM_UTILS_H -+ -+#include "devicemem.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "pvr_debug.h" -+#include "allocmem.h" -+#include "ra.h" -+#include "osfunc.h" -+#include "lock.h" -+#include "osmmap.h" -+#include "pvrsrv_memallocflags_internal.h" -+ -+#define DEVMEM_HEAPNAME_MAXLENGTH 160 -+ -+/* -+ * VA heap size should be at least OS page size. This check is validated in the DDK. -+ */ -+#define DEVMEM_HEAP_MINIMUM_SIZE 0x10000 /* 64KB is MAX anticipated OS page size */ -+ -+#if defined(DEVMEM_DEBUG) && defined(REFCOUNT_DEBUG) -+#define DEVMEM_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_ERROR, __FILE__, __LINE__, fmt, __VA_ARGS__) -+#else -+#define DEVMEM_REFCOUNT_PRINT(fmt, ...) -+#endif -+ -+/* If we need a "hMapping" but we don't have a server-side mapping, we poison -+ * the entry with this value so that it's easily recognised in the debugger. -+ * Note that this is potentially a valid handle, but then so is NULL, which is -+ * no better, indeed worse, as it's not obvious in the debugger. The value -+ * doesn't matter. We _never_ use it (and because it's valid, we never assert -+ * it isn't this) but it's nice to have a value in the source code that we can -+ * grep for if things go wrong. -+ */ -+#define LACK_OF_MAPPING_POISON ((IMG_HANDLE)0x6116dead) -+#define LACK_OF_RESERVATION_POISON ((IMG_HANDLE)0x7117dead) -+ -+#define DEVICEMEM_HISTORY_ALLOC_INDEX_NONE 0xFFFFFFFF -+ -+struct DEVMEM_CONTEXT_TAG -+{ -+ -+ SHARED_DEV_CONNECTION hDevConnection; -+ -+ /* Number of heaps that have been created in this context -+ * (regardless of whether they have allocations) -+ */ -+ IMG_UINT32 uiNumHeaps; -+ -+ /* Each "DEVMEM_CONTEXT" has a counterpart in the server, which -+ * is responsible for handling the mapping into device MMU. -+ * We have a handle to that here. -+ */ -+ IMG_HANDLE hDevMemServerContext; -+ -+ /* Number of automagically created heaps in this context, -+ * i.e. those that are born at context creation time from the -+ * chosen "heap config" or "blueprint" -+ */ -+ IMG_UINT32 uiAutoHeapCount; -+ -+ /* Pointer to array of such heaps */ -+ struct DEVMEM_HEAP_TAG **ppsAutoHeapArray; -+ -+ /* The cache line size for use when allocating memory, -+ * as it is not queryable on the client side -+ */ -+ IMG_UINT32 ui32CPUCacheLineSize; -+ -+ /* Private data handle for device specific data */ -+ IMG_HANDLE hPrivData; -+}; -+ -+/* Flags that record how a heaps virtual address space is managed. */ -+#define DEVMEM_HEAP_MANAGER_UNKNOWN 0 -+/* Heap VAs assigned by the client of Services APIs, heap's RA not used at all. */ -+#define DEVMEM_HEAP_MANAGER_USER (1U << 0) -+/* Heap VAs managed by the OSs kernel, VA from CPU mapping call used */ -+#define DEVMEM_HEAP_MANAGER_KERNEL (1U << 1) -+/* Heap VAs managed by the heap's own RA */ -+#define DEVMEM_HEAP_MANAGER_RA (1U << 2) -+/* Heap VAs managed jointly by Services and the client of Services. -+ * The reserved region of the heap is managed explicitly by the client of Services -+ * The non-reserved region of the heap is managed by the heap's own RA */ -+#define DEVMEM_HEAP_MANAGER_DUAL_USER_RA (DEVMEM_HEAP_MANAGER_USER | DEVMEM_HEAP_MANAGER_RA) -+ -+struct DEVMEM_HEAP_TAG -+{ -+ /* Name of heap - for debug and lookup purposes. */ -+ IMG_CHAR *pszName; -+ -+ /* Number of live imports in the heap */ -+ ATOMIC_T hImportCount; -+ -+ /* Base address and size of heap, required by clients due to some -+ * requesters not being full range -+ */ -+ IMG_DEV_VIRTADDR sBaseAddress; -+ DEVMEM_SIZE_T uiSize; -+ -+ DEVMEM_SIZE_T uiReservedRegionSize; /* uiReservedRegionLength in DEVMEM_HEAP_BLUEPRINT */ -+ -+ /* The heap manager, describing if the space is managed by the user, an RA, -+ * kernel or combination */ -+ IMG_UINT32 ui32HeapManagerFlags; -+ -+ /* This RA is for managing sub-allocations within the imports (PMRs) -+ * within the heap's virtual space. RA only used in DevmemSubAllocate() -+ * to track sub-allocated buffers. -+ * -+ * Resource Span - a PMR import added when the RA calls the -+ * imp_alloc CB (SubAllocImportAlloc) which returns the -+ * PMR import and size (span length). -+ * Resource - an allocation/buffer i.e. a MemDesc. Resource size represents -+ * the size of the sub-allocation. -+ */ -+ RA_ARENA *psSubAllocRA; -+ IMG_CHAR *pszSubAllocRAName; -+ -+ /* The psQuantizedVMRA is for the coarse allocation (PMRs) of virtual -+ * space from the heap. -+ * -+ * Resource Span - the heap's VM space from base to base+length, -+ * only one is added at heap creation. -+ * Resource - a PMR import associated with the heap. Dynamic number -+ * as memory is allocated/freed from or mapped/unmapped to -+ * the heap. Resource size follows PMR logical size. -+ */ -+ RA_ARENA *psQuantizedVMRA; -+ IMG_CHAR *pszQuantizedVMRAName; -+ -+ /* We also need to store a copy of the quantum size in order to feed -+ * this down to the server. -+ */ -+ IMG_UINT32 uiLog2Quantum; -+ -+ /* Store a copy of the minimum import alignment */ -+ IMG_UINT32 uiLog2ImportAlignment; -+ -+ /* The parent memory context for this heap */ -+ struct DEVMEM_CONTEXT_TAG *psCtx; -+ -+ /* Lock to protect this structure */ -+ POS_LOCK hLock; -+ -+ /* Each "DEVMEM_HEAP" has a counterpart in the server, which is -+ * responsible for handling the mapping into device MMU. -+ * We have a handle to that here. -+ */ -+ IMG_HANDLE hDevMemServerHeap; -+ -+ /* This heap is fully allocated and premapped into the device address space. -+ * Used in virtualisation for firmware heaps of Guest and optionally Host -+ * drivers. */ -+ IMG_BOOL bPremapped; -+}; -+ -+typedef IMG_UINT32 DEVMEM_PROPERTIES_T; /*!< Typedef for Devicemem properties */ -+#define DEVMEM_PROPERTIES_EXPORTABLE (1UL<<0) /*!< Is it exportable? */ -+#define DEVMEM_PROPERTIES_IMPORTED (1UL<<1) /*!< Is it imported from another process? */ -+#define DEVMEM_PROPERTIES_SUBALLOCATABLE (1UL<<2) /*!< Is it suballocatable? */ -+#define DEVMEM_PROPERTIES_IMPORT_IS_ZEROED (1UL<<4) /*!< Is the memory fully zeroed? */ -+#define DEVMEM_PROPERTIES_IMPORT_IS_CLEAN (1UL<<5) /*!< Is the memory clean, i.e. not been used before? */ -+#define DEVMEM_PROPERTIES_SECURE (1UL<<6) /*!< Is it a special secure buffer? No CPU maps allowed! */ -+#define DEVMEM_PROPERTIES_IMPORT_IS_POISONED (1UL<<7) /*!< Is the memory fully poisoned? */ -+#define DEVMEM_PROPERTIES_NO_CPU_MAPPING (1UL<<8) /* No CPU Mapping is allowed, RW attributes -+ are further derived from allocation memory flags */ -+#define DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE (1UL<<9) /* No sparse resizing allowed, once a memory -+ layout is chosen, no change allowed later */ -+ -+ -+typedef struct DEVMEM_DEVICE_IMPORT_TAG -+{ -+ DEVMEM_HEAP *psHeap; /*!< Heap this import is bound to */ -+ IMG_DEV_VIRTADDR sDevVAddr; /*!< Device virtual address of the import */ -+ IMG_UINT32 ui32RefCount; /*!< Refcount of the device virtual address */ -+ IMG_HANDLE hReservation; /*!< Device memory reservation handle */ -+ IMG_HANDLE hMapping; /*!< Device mapping handle */ -+ IMG_BOOL bMapped; /*!< This is import mapped? */ -+ POS_LOCK hLock; /*!< Lock to protect the device import */ -+} DEVMEM_DEVICE_IMPORT; -+ -+typedef struct DEVMEM_CPU_IMPORT_TAG -+{ -+ void *pvCPUVAddr; /*!< CPU virtual address of the import */ -+ IMG_UINT32 ui32RefCount; /*!< Refcount of the CPU virtual address */ -+ IMG_HANDLE hOSMMapData; /*!< CPU mapping handle */ -+ POS_LOCK hLock; /*!< Lock to protect the CPU import */ -+} DEVMEM_CPU_IMPORT; -+ -+typedef struct DEVMEM_IMPORT_TAG -+{ -+ SHARED_DEV_CONNECTION hDevConnection; -+ IMG_DEVMEM_ALIGN_T uiAlign; /*!< Alignment of the PMR */ -+ DEVMEM_SIZE_T uiSize; /*!< Size of import */ -+ ATOMIC_T hRefCount; /*!< Refcount for this import */ -+ DEVMEM_PROPERTIES_T uiProperties; /*!< Stores properties of an import like if -+ it is exportable, pinned or suballocatable */ -+ IMG_HANDLE hPMR; /*!< Handle to the PMR */ -+ PVRSRV_MEMALLOCFLAGS_T uiFlags; /*!< Flags for this import */ -+ POS_LOCK hLock; /*!< Lock to protect the import */ -+ -+ DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the import */ -+ DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the import */ -+} DEVMEM_IMPORT; -+ -+typedef struct DEVMEM_DEVICE_MEMDESC_TAG -+{ -+ IMG_DEV_VIRTADDR sDevVAddr; /*!< Device virtual address of the allocation */ -+ IMG_UINT32 ui32RefCount; /*!< Refcount of the device virtual address */ -+ POS_LOCK hLock; /*!< Lock to protect device memdesc */ -+} DEVMEM_DEVICE_MEMDESC; -+ -+typedef struct DEVMEM_CPU_MEMDESC_TAG -+{ -+ void *pvCPUVAddr; /*!< CPU virtual address of the import */ -+ IMG_UINT32 ui32RefCount; /*!< Refcount of the device CPU address */ -+ POS_LOCK hLock; /*!< Lock to protect CPU memdesc */ -+} DEVMEM_CPU_MEMDESC; -+ -+struct DEVMEM_MEMDESC_TAG -+{ -+ DEVMEM_IMPORT *psImport; /*!< Import this memdesc is on */ -+ IMG_DEVMEM_OFFSET_T uiOffset; /*!< Offset into import where our allocation starts */ -+ IMG_DEVMEM_SIZE_T uiAllocSize; /*!< Size of the allocation */ -+ ATOMIC_T hRefCount; /*!< Refcount of the memdesc */ -+ POS_LOCK hLock; /*!< Lock to protect memdesc */ -+ IMG_HANDLE hPrivData; -+ -+ DEVMEM_DEVICE_MEMDESC sDeviceMemDesc; /*!< Device specifics of the memdesc */ -+ DEVMEM_CPU_MEMDESC sCPUMemDesc; /*!< CPU specifics of the memdesc */ -+ -+ IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this memdesc */ -+ -+ IMG_UINT32 ui32AllocationIndex; -+ -+#if defined(DEBUG) -+ IMG_BOOL bPoisonOnFree; -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ IMG_HANDLE hRIHandle; /*!< Handle to RI information */ -+#endif -+}; -+ -+/* The physical descriptor used to store handles and information of device -+ * physical allocations. -+ */ -+struct DEVMEMX_PHYS_MEMDESC_TAG -+{ -+ IMG_UINT32 uiNumPages; /*!< Number of pages that the import has*/ -+ IMG_UINT32 uiLog2PageSize; /*!< Page size */ -+ ATOMIC_T hRefCount; /*!< Refcount of the memdesc */ -+ PVRSRV_MEMALLOCFLAGS_T uiFlags; /*!< Flags for this import */ -+ IMG_HANDLE hPMR; /*!< Handle to the PMR */ -+ DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the memdesc */ -+ SHARED_DEV_CONNECTION hConnection; /*!< Services connection for the server */ -+ void *pvUserData; /*!< User data */ -+}; -+ -+/* The virtual descriptor used to store handles and information of a device -+ * virtual range and the mappings to it. -+ */ -+struct DEVMEMX_VIRT_MEMDESC_TAG -+{ -+ IMG_UINT32 uiNumPages; /*!< Number of pages that the import has*/ -+ PVRSRV_MEMALLOCFLAGS_T uiFlags; /*!< Flags for this import */ -+ DEVMEMX_PHYSDESC **apsPhysDescTable; /*!< Table to store links to physical descs */ -+ DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the memdesc */ -+ -+ IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this virt memdesc */ -+ IMG_UINT32 ui32AllocationIndex; /*!< To track mappings in this range */ -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ IMG_HANDLE hRIHandle; /*!< Handle to RI information */ -+#endif -+}; -+ -+#define DEVICEMEM_UTILS_NO_ADDRESS 0 -+ -+/****************************************************************************** -+@Function DevmemValidateParams -+@Description Check if flags are conflicting and if align is a size multiple. -+ -+@Input uiSize Size of the import. -+@Input uiAlign Alignment of the import. -+@Input puiFlags Pointer to the flags for the import. -+@return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ PVRSRV_MEMALLOCFLAGS_T *puiFlags); -+ -+/****************************************************************************** -+@Function DevmemImportStructAlloc -+@Description Allocates memory for an import struct. Does not allocate a PMR! -+ Create locks for CPU and Devmem mappings. -+ -+@Input hDevConnection Connection to use for calls from the import. -+@Input ppsImport The import to allocate. -+@return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection, -+ DEVMEM_IMPORT **ppsImport); -+ -+/****************************************************************************** -+@Function DevmemImportStructInit -+@Description Initialises the import struct with the given parameters. -+ Set it's refcount to 1! -+ -+@Input psImport The import to initialise. -+@Input uiSize Size of the import. -+@Input uiAlign Alignment of allocations in the import. -+@Input uiMapFlags -+@Input hPMR Reference to the PMR of this import struct. -+@Input uiProperties Properties of the import. Is it exportable, -+ imported, suballocatable? -+******************************************************************************/ -+void DevmemImportStructInit(DEVMEM_IMPORT *psImport, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, -+ IMG_HANDLE hPMR, -+ DEVMEM_PROPERTIES_T uiProperties); -+ -+/****************************************************************************** -+@Function DevmemImportStructDevMap -+@Description NEVER call after the last DevmemMemDescRelease() -+ Maps the PMR referenced by the import struct to the device's -+ virtual address space. -+ Does nothing but increase the cpu mapping refcount if the -+ import struct was already mapped. -+ -+@Input psHeap The heap to map to. -+@Input bMap Caller can choose if the import should be really -+ mapped in the page tables or if just a virtual range -+ should be reserved and the refcounts increased. -+@Input psImport The import we want to map. -+@Input uiOptionalMapAddress An optional address to map to. -+ Pass DEVICEMEM_UTILS_NOADDRESS if not used. -+@return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, -+ IMG_BOOL bMap, -+ DEVMEM_IMPORT *psImport, -+ IMG_UINT64 uiOptionalMapAddress); -+ -+/****************************************************************************** -+@Function DevmemImportStructDevUnmap -+@Description Unmaps the PMR referenced by the import struct from the -+ device's virtual address space. -+ If this was not the last remaining CPU mapping on the import -+ struct only the cpu mapping refcount is decreased. -+@return A boolean to signify if the import was unmapped. -+******************************************************************************/ -+IMG_BOOL DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport); -+ -+/****************************************************************************** -+@Function DevmemImportStructCPUMap -+@Description NEVER call after the last DevmemMemDescRelease() -+ Maps the PMR referenced by the import struct to the CPU's -+ virtual address space. -+ Does nothing but increase the cpu mapping refcount if the -+ import struct was already mapped. -+@return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport); -+ -+/****************************************************************************** -+@Function DevmemImportStructCPUUnmap -+@Description Unmaps the PMR referenced by the import struct from the CPU's -+ virtual address space. -+ If this was not the last remaining CPU mapping on the import -+ struct only the cpu mapping refcount is decreased. -+******************************************************************************/ -+void DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport); -+ -+ -+/****************************************************************************** -+@Function DevmemImportStructAcquire -+@Description Acquire an import struct by increasing it's refcount. -+******************************************************************************/ -+void DevmemImportStructAcquire(DEVMEM_IMPORT *psImport); -+ -+/****************************************************************************** -+@Function DevmemImportStructRelease -+@Description Reduces the refcount of the import struct. -+ Destroys the import in the case it was the last reference. -+ Destroys underlying PMR if this import was the last reference -+ to it. -+@return A boolean to signal if the import was destroyed. True = yes. -+******************************************************************************/ -+IMG_BOOL DevmemImportStructRelease(DEVMEM_IMPORT *psImport); -+ -+/****************************************************************************** -+@Function DevmemImportDiscard -+@Description Discard a created, but uninitialised import structure. -+ This must only be called before DevmemImportStructInit -+ after which DevmemImportStructRelease must be used to -+ "free" the import structure. -+******************************************************************************/ -+void DevmemImportDiscard(DEVMEM_IMPORT *psImport); -+ -+/****************************************************************************** -+@Function DevmemMemDescAlloc -+@Description Allocates a MemDesc and create it's various locks. -+ Zero the allocated memory. -+@return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc); -+ -+#if defined(DEBUG) -+/****************************************************************************** -+@Function DevmemMemDescSetPoF -+@Description Sets the Poison on Free flag to true for this MemDesc if the -+ given MemAllocFlags have the Poison on Free bit set. -+ Poison on Free is a debug only feature. -+******************************************************************************/ -+void DevmemMemDescSetPoF(DEVMEM_MEMDESC *psMemDesc, PVRSRV_MEMALLOCFLAGS_T uiFlags); -+#endif -+ -+/****************************************************************************** -+@Function DevmemMemDescInit -+@Description Sets the given offset and import struct fields in the MemDesc. -+ Initialises refcount to 1 and other values to 0. -+ -+@Input psMemDesc MemDesc to initialise. -+@Input uiOffset Offset in the import structure. -+@Input psImport Import the MemDesc is on. -+@Input uiAllocSize Size of the allocation -+******************************************************************************/ -+void DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ DEVMEM_IMPORT *psImport, -+ IMG_DEVMEM_SIZE_T uiAllocSize); -+ -+/****************************************************************************** -+@Function DevmemMemDescAcquire -+@Description Acquires the MemDesc by increasing it's refcount. -+******************************************************************************/ -+void DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc); -+ -+/****************************************************************************** -+@Function DevmemMemDescRelease -+@Description Releases the MemDesc by reducing it's refcount. -+ Destroy the MemDesc if it's recount is 0. -+ Destroy the import struct the MemDesc is on if that was the -+ last MemDesc on the import, probably following the destruction -+ of the underlying PMR. -+@return A boolean to signal if the MemDesc was destroyed. True = yes. -+******************************************************************************/ -+IMG_BOOL DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc); -+ -+/****************************************************************************** -+@Function DevmemMemDescDiscard -+@Description Discard a created, but uninitialised MemDesc structure. -+ This must only be called before DevmemMemDescInit after -+ which DevmemMemDescRelease must be used to "free" the -+ MemDesc structure. -+******************************************************************************/ -+void DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc); -+ -+ -+/****************************************************************************** -+@Function GetImportProperties -+@Description Atomically read psImport->uiProperties -+ It's possible that another thread modifies uiProperties -+ immediately after this function returns, making its result -+ stale. So, it's recommended to use this function only to -+ check if certain non-volatile flags were set. -+******************************************************************************/ -+static INLINE DEVMEM_PROPERTIES_T GetImportProperties(DEVMEM_IMPORT *psImport) -+{ -+ DEVMEM_PROPERTIES_T uiProperties; -+ -+ OSLockAcquire(psImport->hLock); -+ uiProperties = psImport->uiProperties; -+ OSLockRelease(psImport->hLock); -+ return uiProperties; -+} -+ -+/****************************************************************************** -+@Function DevmemCPUMemSet -+@Description Given a CPU Mapped Devmem address, set the memory at that -+ range (address, address + size) to the uiPattern provided. -+ Flags determine the OS abstracted MemSet method to use. -+******************************************************************************/ -+static INLINE void DevmemCPUMemSet(void *pvMem, -+ IMG_UINT8 uiPattern, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags) -+{ -+ if (PVRSRV_CHECK_CPU_UNCACHED(uiFlags)) -+ { -+ OSDeviceMemSet(pvMem, uiPattern, uiSize); -+ } -+ else -+ { -+ /* it's safe to use OSCachedMemSet() for cached and wc memory */ -+ OSCachedMemSet(pvMem, uiPattern, uiSize); -+ } -+} -+ -+/****************************************************************************** -+@Function DevmemCPUMapCheckImportProperties -+@Description Given a MemDesc check that the import properties are correct -+ to allow for mapping the MemDesc to the CPU. -+ Returns PVRSRV_OK on success. -+******************************************************************************/ -+static INLINE PVRSRV_ERROR DevmemCPUMapCheckImportProperties(DEVMEM_MEMDESC *psMemDesc) -+{ -+ DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport); -+ -+#if defined(SUPPORT_SECURITY_VALIDATION) -+ if (uiProperties & DEVMEM_PROPERTIES_SECURE) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Allocation is a secure buffer. " -+ "It should not be possible to map to CPU, but for security " -+ "validation this will be allowed for testing purposes, " -+ "as long as the buffer is pinned.", -+ __func__)); -+ } -+#endif -+ -+ if (uiProperties & DEVMEM_PROPERTIES_NO_CPU_MAPPING) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: CPU Mapping is not possible on this allocation!", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_MAP_REQUEST; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+#endif /* DEVICEMEM_UTILS_H */ -diff --git a/drivers/gpu/drm/img-rogue/di_common.h b/drivers/gpu/drm/img-rogue/di_common.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/di_common.h -@@ -0,0 +1,236 @@ -+/*************************************************************************/ /*! -+@File -+@Title Common types for Debug Info framework. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef DI_COMMON_H -+#define DI_COMMON_H -+ -+#include "img_types.h" -+ -+/* Token that signals that a header should be printed. */ -+#define DI_START_TOKEN ((void *) 1) -+ -+/* This is a public handle to an entry. */ -+#ifndef DI_GROUP_DEFINED -+#define DI_GROUP_DEFINED -+typedef struct DI_GROUP DI_GROUP; -+#endif -+#ifndef DI_ENTRY_DEFINED -+#define DI_ENTRY_DEFINED -+typedef struct DI_ENTRY DI_ENTRY; -+#endif -+typedef struct OSDI_IMPL_ENTRY OSDI_IMPL_ENTRY; -+ -+/*! Debug Info entries types. */ -+typedef enum DI_ENTRY_TYPE -+{ -+ DI_ENTRY_TYPE_GENERIC, /*!< generic entry type, implements -+ start/stop/next/show iterator -+ interface */ -+ DI_ENTRY_TYPE_RANDOM_ACCESS, /*!< random access entry, implements -+ seek/read iterator interface */ -+} DI_ENTRY_TYPE; -+ -+/*! @Function DI_PFN_START -+ * -+ * @Description -+ * Start operation returns first entry and passes it to Show operation. -+ * -+ * @Input psEntry pointer to the implementation entry -+ * @InOut pui64Pos current data position in the entry -+ * -+ * @Return pointer to data that will be passed to the other iterator -+ * functions in pvData argument -+ */ -+typedef void *(*DI_PFN_START)(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos); -+ -+/*! @Function DI_PFN_STOP -+ * -+ * @Description -+ * Stop operations is called after iterator reaches end of data. -+ * -+ * If pvData was allocated in pfnStart it should be freed here. -+ * -+ * @Input psEntry pointer to the implementation entry -+ * @Input pvData pointer to data returned from pfnStart/pfnNext -+ */ -+typedef void (*DI_PFN_STOP)(OSDI_IMPL_ENTRY *psEntry, void *pvData); -+ -+/*! @Function DI_PFN_NEXT -+ * -+ * @Description -+ * Next returns next data entry and passes it to Show operation. -+ * -+ * @Input psEntry pointer to the implementation entry -+ * @Input pvData pointer to data returned from pfnStart/pfnNext -+ * @InOut pui64Pos current data position in the entry -+ */ -+typedef void *(*DI_PFN_NEXT)(OSDI_IMPL_ENTRY *psEntry, void *pvData, -+ IMG_UINT64 *pui64Pos); -+ -+/*! @Function DI_PFN_SHOW -+ * -+ * @Description -+ * Outputs the data element. -+ * -+ * @Input psEntry pointer to the implementation entry -+ * @Input pvData pointer to data returned from pfnStart/pfnNext -+ */ -+typedef int (*DI_PFN_SHOW)(OSDI_IMPL_ENTRY *psEntry, void *pvData); -+ -+/*! @Function DI_PFN_SEEK -+ * -+ * @Description -+ * Changes position of the entry data pointer -+ * -+ * @Input uiOffset new entry offset (absolute) -+ * @Input pvData private data provided during entry creation -+ */ -+typedef IMG_INT64 (*DI_PFN_SEEK)(IMG_UINT64 ui64Offset, void *pvData); -+ -+/*! @Function DI_PFN_READ -+ * -+ * @Description -+ * Retrieves data from the entry from position previously set by Seek. -+ * -+ * @Input pszBuffer output buffer -+ * @Input ui64Count length of the output buffer -+ * @InOut pui64Pos pointer to the current position in the entry -+ * @Input pvData private data provided during entry creation -+ */ -+typedef IMG_INT64 (*DI_PFN_READ)(IMG_CHAR *pszBuffer, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData); -+ -+/*! @Function DI_PFN_WRITE -+ * -+ * @Description -+ * Handle writes operation to the entry. -+ * -+ * @Input pszBuffer NUL-terminated buffer containing written data -+ * @Input ui64Count length of the data in pszBuffer (length of the buffer) -+ * @InOut pui64Pos pointer to the current position in the entry -+ * @Input pvData private data provided during entry creation -+ */ -+typedef IMG_INT64 (*DI_PFN_WRITE)(const IMG_CHAR *pszBuffer, -+ IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, -+ void *pvData); -+ -+/*! Debug info entry iterator. -+ * -+ * This covers all entry types: GENERIC and RANDOM_ACCESS. -+ * -+ * The GENERIC entry type -+ * -+ * The GENERIC type should implement either a full set of following callbacks: -+ * pfnStart, pfnStop, pfnNext and pfnShow, or pfnShow only. If only pfnShow -+ * callback is given the framework will use default handlers in place of the -+ * other ones. -+ * -+ * e.g. for generic entry: -+ * -+ * struct sIter = { -+ * .pfnStart = StartCb, .pfnStop = StopCb, pfnNext = NextCb, -+ * .pfnShow = ShowCb -+ * }; -+ * -+ * The use case for implementing pfnShow only is if the data for the given -+ * entry is short and can be printed in one go because the pfnShow callback -+ * will be called only once. -+ * -+ * e.g. for one-shot print generic entry: -+ * -+ * struct sIter = { -+ * .pfnShow = SingleShowCb -+ * }; -+ * -+ * The DICreateEntry() function will return error if DI_ENTRY_TYPE_GENERIC -+ * type is used and invalid combination of callbacks is given. -+ * -+ * The RANDOM_ACCESS entry -+ * -+ * The RANDOM_ACCESS type should implement either both pfnSeek and pfnRead -+ * or pfnRead only callbacks. -+ * -+ * e.g. of seekable and readable random access entry: -+ * -+ * struct sIter = { -+ * .pfnSeek = SeekCb, .pfnRead = ReadCb -+ * }; -+ * -+ * The DICreateEntry() function will return error if DI_ENTRY_TYPE_RANDOM_ACCESS -+ * type is used and invalid combination of callbacks is given. -+ * -+ * Writing to file (optional) -+ * -+ * The iterator allows also to pass a pfnWrite callback that allows implementing -+ * write operation on the entry. The write operation is entry type agnostic -+ * which means that it can be defined for both GENERIC and RANDOM_ACCESS -+ * entries. -+ * -+ * e.g. for writable one-shot print generic entry -+ * -+ * struct sIter = { -+ * .pfnShow = SingleShowCb, .pfnWrite = WriteCb -+ * }; -+ */ -+typedef struct DI_ITERATOR_CB -+{ -+ /* Generic entry interface. */ -+ -+ DI_PFN_START pfnStart; /*!< Starts iteration and returns first element -+ of entry's data. */ -+ DI_PFN_STOP pfnStop; /*!< Stops iteration. */ -+ DI_PFN_NEXT pfnNext; /*!< Returns next element of entry's data. */ -+ DI_PFN_SHOW pfnShow; /*!< Shows current data element of an entry. */ -+ -+ /* Optional random access entry interface. */ -+ -+ DI_PFN_SEEK pfnSeek; /*!< Sets data pointer in an entry. */ -+ DI_PFN_READ pfnRead; /*!< Reads data from an entry. */ -+ -+ /* Optional writing to entry interface. Null terminated. */ -+ -+ DI_PFN_WRITE pfnWrite; /*!< Performs write operation on an entry. */ -+ IMG_UINT32 ui32WriteLenMax; /*!< Maximum char length of entry -+ accepted for write. Includes \0 */ -+} DI_ITERATOR_CB; -+ -+#endif /* DI_COMMON_H */ -diff --git a/drivers/gpu/drm/img-rogue/di_impl_brg.c b/drivers/gpu/drm/img-rogue/di_impl_brg.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/di_impl_brg.c -@@ -0,0 +1,889 @@ -+/*************************************************************************/ /*! -+@File -+@Title OS agnostic implementation of Debug Info interface. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements osdi_impl.h API to provide access to driver's -+ debug data via pvrdebug. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "allocmem.h" -+#include "hash.h" -+#include "img_defs.h" -+#include "img_types.h" -+#include "lock.h" -+#include "osfunc_common.h" -+#include "osfunc.h" /* for thread */ -+#include "tlstream.h" -+#include "dllist.h" -+ -+#include "osdi_impl.h" -+#include "di_impl_brg.h" -+#include "di_impl_brg_intern.h" -+#include "pvr_dicommon.h" -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+#include "pvrsrv.h" -+#endif -+ -+#define ENTRIES_TABLE_INIT_SIZE 64 -+#define STREAM_BUFFER_SIZE 0x4000 /* 16KB */ -+#define STREAM_LINE_LENGTH 512 -+ -+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+#define WRITER_THREAD_SLEEP_TIMEOUT 0ull -+#else -+#define WRITER_THREAD_SLEEP_TIMEOUT 28800000000ull -+#endif -+#define WRITER_THREAD_DESTROY_TIMEOUT 100000ull -+#define WRITER_THREAD_DESTROY_RETRIES 10u -+ -+#define WRITE_RETRY_COUNT 10 /* retry a write to a TL buffer 10 times */ -+#define WRITE_RETRY_WAIT_TIME 100 /* wait 10ms between write retries */ -+ -+typedef enum THREAD_STATE -+{ -+ THREAD_STATE_NULL, -+ THREAD_STATE_ALIVE, -+ THREAD_STATE_TERMINATED, -+} THREAD_STATE; -+ -+static struct DIIB_IMPL -+{ -+ HASH_TABLE *psEntriesTable; /*!< Table of entries. */ -+ POS_LOCK psEntriesLock; /*!< Protects psEntriesTable. */ -+ IMG_HANDLE hWriterThread; -+ IMG_HANDLE hWriterEventObject; -+ ATOMIC_T eThreadState; -+ -+ DLLIST_NODE sWriterQueue; -+ POS_LOCK psWriterLock; /*!< Protects sWriterQueue. */ -+} *_g_psImpl; -+ -+struct DIIB_GROUP -+{ -+ const IMG_CHAR *pszName; -+ struct DIIB_GROUP *psParentGroup; -+}; -+ -+struct DIIB_ENTRY -+{ -+ struct DIIB_GROUP *psParentGroup; -+ OSDI_IMPL_ENTRY sImplEntry; -+ DI_ITERATOR_CB sIterCb; -+ DI_ENTRY_TYPE eType; -+ IMG_CHAR pszFullPath[DI_IMPL_BRG_PATH_LEN]; -+ void *pvPrivData; -+ -+ POS_LOCK hLock; /*!< Protects access to entry's iterator. */ -+}; -+ -+struct DI_CONTEXT_TAG -+{ -+ IMG_HANDLE hStream; -+ ATOMIC_T iRefCnt; -+ IMG_BOOL bClientConnected; /*!< Indicated that the client is or is not -+ connected to the DI. */ -+}; -+ -+struct DIIB_WORK_ITEM -+{ -+ DI_CONTEXT *psContext; -+ DIIB_ENTRY *psEntry; -+ IMG_UINT64 ui64Size; -+ IMG_UINT64 ui64Offset; -+ -+ DLLIST_NODE sQueueElement; -+}; -+ -+/* Declaring function here to avoid dependencies that are introduced by -+ * including osfunc.h. */ -+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, -+ size_t uiSize); -+ -+/* djb2 hash function is public domain */ -+static IMG_UINT32 _Hash(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) -+{ -+ IMG_CHAR *pszStr = pKey; -+ IMG_UINT32 ui32Hash = 5381, ui32Char; -+ -+ PVR_UNREFERENCED_PARAMETER(uKeySize); -+ PVR_UNREFERENCED_PARAMETER(uHashTabLen); -+ -+ while ((ui32Char = *pszStr++) != '\0') -+ { -+ ui32Hash = ((ui32Hash << 5) + ui32Hash) + ui32Char; /* hash * 33 + c */ -+ } -+ -+ return ui32Hash; -+} -+ -+static IMG_BOOL _Compare(size_t uKeySize, void *pKey1, void *pKey2) -+{ -+ IMG_CHAR *pszKey1 = pKey1, *pszKey2 = pKey2; -+ -+ return OSStringNCompare(pszKey1, pszKey2, uKeySize) == 0; -+} -+ -+/* ----- native callbacks interface ----------------------------------------- */ -+ -+static void _WriteWithRetires(void *pvNativeHandle, const IMG_CHAR *pszStr, -+ IMG_UINT uiLen) -+{ -+ PVRSRV_ERROR eError; -+ IMG_INT iRetry = 0; -+ IMG_UINT32 ui32Flags = TL_FLAG_NO_WRITE_FAILED; -+ -+ do -+ { -+ /* Try to write to the buffer but don't inject MOST_RECENT_WRITE_FAILED -+ * packet in case of failure because we're going to retry. */ -+ eError = TLStreamWriteRetFlags(pvNativeHandle, (IMG_UINT8 *) pszStr, -+ uiLen, &ui32Flags); -+ if (eError == PVRSRV_ERROR_STREAM_FULL) -+ { -+ // wait to give the client a change to read -+ OSSleepms(WRITE_RETRY_WAIT_TIME); -+ } -+ } -+ while (eError == PVRSRV_ERROR_STREAM_FULL && iRetry++ < WRITE_RETRY_COUNT); -+ -+ /* One last try to write to the buffer. In this case upon failure -+ * a MOST_RECENT_WRITE_FAILED packet will be inject to the buffer to -+ * indicate data loss. */ -+ if (eError == PVRSRV_ERROR_STREAM_FULL) -+ { -+ eError = TLStreamWrite(pvNativeHandle, (IMG_UINT8 *) pszStr, uiLen); -+ } -+ -+ PVR_LOG_IF_ERROR(eError, "TLStreamWrite"); -+} -+ -+static void _WriteData(void *pvNativeHandle, const void *pvData, -+ IMG_UINT32 uiSize) -+{ -+ _WriteWithRetires(pvNativeHandle, pvData, uiSize); -+} -+ -+__printf(2, 0) -+static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt, -+ va_list pArgs) -+{ -+ IMG_CHAR pcBuffer[STREAM_LINE_LENGTH]; -+ IMG_UINT uiLen = OSVSNPrintf(pcBuffer, sizeof(pcBuffer) - 1, pszFmt, pArgs); -+ pcBuffer[uiLen] = '\0'; -+ -+ _WriteWithRetires(pvNativeHandle, pcBuffer, uiLen + 1); -+} -+ -+static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr) -+{ -+ _WriteWithRetires(pvNativeHandle, pszStr, OSStringLength(pszStr) + 1); -+} -+ -+static IMG_BOOL _HasOverflowed(void *pvNativeHandle) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvNativeHandle); -+ return IMG_FALSE; -+} -+ -+static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = { -+ .pfnWrite = _WriteData, -+ .pfnVPrintf = _VPrintf, -+ .pfnPuts = _Puts, -+ .pfnHasOverflowed = _HasOverflowed, -+}; -+ -+/* ----- entry operations --------------------------------------------------- */ -+ -+static PVRSRV_ERROR _ContextUnrefAndMaybeDestroy(DI_CONTEXT *psContext) -+{ -+ if (OSAtomicDecrement(&psContext->iRefCnt) == 0) -+ { -+ TLStreamClose(psContext->hStream); -+ OSFreeMem(psContext); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static IMG_INT64 _ReadGeneric(const DI_CONTEXT *psContext, DIIB_ENTRY *psEntry) -+{ -+ IMG_INT64 iRet = 0; -+ IMG_UINT64 ui64Pos = 0; -+ DI_ITERATOR_CB *psIter = &psEntry->sIterCb; -+ OSDI_IMPL_ENTRY *psImplEntry = &psEntry->sImplEntry; -+ PVRSRV_ERROR eError; -+ -+ if (psIter->pfnStart != NULL) -+ { -+ /* this is a full sequence of the operation */ -+ void *pvData = psIter->pfnStart(psImplEntry, &ui64Pos); -+ -+ while (pvData != NULL && psContext->bClientConnected) -+ { -+ iRet = psIter->pfnShow(psImplEntry, pvData); -+ if (iRet < 0) -+ { -+ break; -+ } -+ -+ pvData = psIter->pfnNext(psImplEntry, pvData, &ui64Pos); -+ } -+ -+ psIter->pfnStop(psImplEntry, pvData); -+ } -+ else if (psIter->pfnShow != NULL) -+ { -+ /* this is a simplified sequence of the operation */ -+ iRet = psIter->pfnShow(psImplEntry, NULL); -+ } -+ -+ eError = TLStreamMarkEOS(psImplEntry->pvNative, IMG_FALSE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamMarkEOS", return_error_); -+ -+ return iRet; -+ -+return_error_: -+ return -1; -+} -+ -+static IMG_INT64 _ReadRndAccess(DIIB_ENTRY *psEntry, IMG_UINT64 ui64Count, -+ IMG_UINT64 *pui64Pos, void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT8 *pui8Buffer; -+ IMG_HANDLE hStream = psEntry->sImplEntry.pvNative; -+ -+ if (psEntry->sIterCb.pfnRead == NULL) -+ { -+ return -1; -+ } -+ -+ eError = TLStreamReserve(hStream, &pui8Buffer, ui64Count); -+ PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamReserve", return_error_); -+ -+ psEntry->sIterCb.pfnRead((IMG_CHAR *) pui8Buffer, ui64Count, pui64Pos, -+ pvData); -+ -+ eError = TLStreamCommit(hStream, ui64Count); -+ PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCommit", return_error_); -+ -+ eError = TLStreamMarkEOS(psEntry->sImplEntry.pvNative, IMG_FALSE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamMarkEOS", return_error_); -+ -+ return 0; -+ -+return_error_: -+ return -1; -+} -+ -+static void _WriterThread(void *pvArg) -+{ -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hEvent; -+ DLLIST_NODE *psNode; -+ -+ eError = OSEventObjectOpen(_g_psImpl->hWriterEventObject, &hEvent); -+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); -+ -+#ifdef PVRSRV_FORCE_UNLOAD_IF_BAD_STATE -+ while (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK && -+ OSAtomicRead(&_g_psImpl->eThreadState) == THREAD_STATE_ALIVE) -+#else -+ while (OSAtomicRead(&_g_psImpl->eThreadState) == THREAD_STATE_ALIVE) -+#endif -+ { -+ struct DIIB_WORK_ITEM *psItem = NULL; -+ -+ OSLockAcquire(_g_psImpl->psWriterLock); -+ /* Get element from list tail so that we always get the oldest element -+ * (elements are added to head). */ -+ while ((psNode = dllist_get_prev_node(&_g_psImpl->sWriterQueue)) != NULL) -+ { -+ IMG_INT64 i64Ret; -+ DIIB_ENTRY *psEntry; -+ OSDI_IMPL_ENTRY *psImplEntry; -+ -+ dllist_remove_node(psNode); -+ OSLockRelease(_g_psImpl->psWriterLock); -+ -+ psItem = IMG_CONTAINER_OF(psNode, struct DIIB_WORK_ITEM, -+ sQueueElement); -+ -+ psEntry = psItem->psEntry; -+ psImplEntry = &psItem->psEntry->sImplEntry; -+ -+ /* if client has already disconnected we can just drop this item */ -+ if (psItem->psContext->bClientConnected) -+ { -+ -+ PVR_ASSERT(psItem->psContext->hStream != NULL); -+ -+ psImplEntry->pvNative = psItem->psContext->hStream; -+ -+ if (psEntry->eType == DI_ENTRY_TYPE_GENERIC) -+ { -+ i64Ret = _ReadGeneric(psItem->psContext, psEntry); -+ PVR_LOG_IF_FALSE(i64Ret >= 0, "generic access read operation " -+ "failed"); -+ } -+ else if (psEntry->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) -+ { -+ IMG_UINT64 ui64Pos = psItem->ui64Offset; -+ -+ i64Ret = _ReadRndAccess(psEntry, psItem->ui64Size, &ui64Pos, -+ psEntry->pvPrivData); -+ PVR_LOG_IF_FALSE(i64Ret >= 0, "random access read operation " -+ "failed"); -+ } -+ else -+ { -+ PVR_ASSERT(psEntry->eType == DI_ENTRY_TYPE_GENERIC || -+ psEntry->eType == DI_ENTRY_TYPE_RANDOM_ACCESS); -+ } -+ -+ psImplEntry->pvNative = NULL; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "client reading entry \"%s\" has " -+ "disconnected", psEntry->pszFullPath)); -+ } -+ -+ _ContextUnrefAndMaybeDestroy(psItem->psContext); -+ OSFreeMemNoStats(psItem); -+ -+ OSLockAcquire(_g_psImpl->psWriterLock); -+ } -+ OSLockRelease(_g_psImpl->psWriterLock); -+ -+ eError = OSEventObjectWaitKernel(hEvent, WRITER_THREAD_SLEEP_TIMEOUT); -+ if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_TIMEOUT) -+ { -+ PVR_LOG_ERROR(eError, "OSEventObjectWaitKernel"); -+ } -+ } -+ -+ OSLockAcquire(_g_psImpl->psWriterLock); -+ /* clear the queue if there are any items pending */ -+ while ((psNode = dllist_get_prev_node(&_g_psImpl->sWriterQueue)) != NULL) -+ { -+ struct DIIB_WORK_ITEM *psItem = IMG_CONTAINER_OF(psNode, -+ struct DIIB_WORK_ITEM, -+ sQueueElement); -+ -+ dllist_remove_node(psNode); -+ _ContextUnrefAndMaybeDestroy(psItem->psContext); -+ OSFreeMem(psItem); -+ } -+ OSLockRelease(_g_psImpl->psWriterLock); -+ -+ eError = OSEventObjectClose(hEvent); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); -+ -+ OSAtomicWrite(&_g_psImpl->eThreadState, THREAD_STATE_TERMINATED); -+} -+ -+/* ----- DI internal API ---------------------------------------------------- */ -+ -+DIIB_ENTRY *DIImplBrgFind(const IMG_CHAR *pszPath) -+{ -+ DIIB_ENTRY *psEntry; -+ -+ OSLockAcquire(_g_psImpl->psEntriesLock); -+ psEntry = (void *) HASH_Retrieve_Extended(_g_psImpl->psEntriesTable, -+ (IMG_CHAR *) pszPath); -+ OSLockRelease(_g_psImpl->psEntriesLock); -+ -+ return psEntry; -+} -+ -+/* ----- DI bridge interface ------------------------------------------------ */ -+ -+static PVRSRV_ERROR _CreateStream(IMG_CHAR *pszStreamName, IMG_HANDLE *phStream) -+{ -+ IMG_UINT32 iRet; -+ IMG_HANDLE hStream; -+ PVRSRV_ERROR eError; -+ -+ /* for now only one stream can be created. Should we be able to create -+ * per context stream? */ -+ iRet = OSSNPrintf(pszStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE, -+ "di_stream_%x", OSGetCurrentClientProcessIDKM()); -+ if (iRet >= PRVSRVTL_MAX_STREAM_NAME_SIZE) -+ { -+ /* this check is superfluous because it can never happen but in case -+ * someone changes the definition of PRVSRVTL_MAX_STREAM_NAME_SIZE -+ * handle this case */ -+ pszStreamName[0] = '\0'; -+ return PVRSRV_ERROR_INTERNAL_ERROR; -+ } -+ -+ eError = TLStreamCreate(&hStream, pszStreamName, STREAM_BUFFER_SIZE, -+ TL_OPMODE_DROP_NEWER, NULL, NULL, NULL, NULL); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ *phStream = hStream; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR DICreateContextKM(IMG_CHAR *pszStreamName, DI_CONTEXT **ppsContext) -+{ -+ PVRSRV_ERROR eError; -+ DI_CONTEXT *psContext; -+ IMG_HANDLE hStream = NULL; -+ THREAD_STATE eTState; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppsContext != NULL, "ppsContext"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszStreamName != NULL, "pszStreamName"); -+ -+ psContext = OSAllocMem(sizeof(*psContext)); -+ PVR_LOG_GOTO_IF_NOMEM(psContext, eError, return_); -+ -+ eError = _CreateStream(pszStreamName, &hStream); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_CreateStream", free_desc_); -+ -+ psContext->hStream = hStream; -+ /* indicated to the write thread if the client is still connected and -+ * waiting for the data */ -+ psContext->bClientConnected = IMG_TRUE; -+ OSAtomicWrite(&psContext->iRefCnt, 1); -+ -+ eTState = OSAtomicCompareExchange(&_g_psImpl->eThreadState, -+ THREAD_STATE_NULL, -+ THREAD_STATE_ALIVE); -+ -+ /* if the thread has not been started yet do it */ -+ if (eTState == THREAD_STATE_NULL) -+ { -+ PVR_ASSERT(_g_psImpl->hWriterThread == NULL); -+ -+ eError = OSThreadCreate(&_g_psImpl->hWriterThread, "di_writer", -+ _WriterThread, NULL, IMG_FALSE, NULL); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreate", free_close_stream_); -+ } -+ -+ *ppsContext = psContext; -+ -+ return PVRSRV_OK; -+ -+free_close_stream_: -+ TLStreamClose(psContext->hStream); -+ OSAtomicWrite(&_g_psImpl->eThreadState, THREAD_STATE_TERMINATED); -+free_desc_: -+ OSFreeMem(psContext); -+return_: -+ return eError; -+} -+ -+PVRSRV_ERROR DIDestroyContextKM(DI_CONTEXT *psContext) -+{ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); -+ -+ /* pass the information to the write thread that the client has -+ * disconnected */ -+ psContext->bClientConnected = IMG_FALSE; -+ -+ return _ContextUnrefAndMaybeDestroy(psContext); -+} -+ -+PVRSRV_ERROR DIReadEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, -+ IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size) -+{ -+ PVRSRV_ERROR eError; -+ struct DIIB_WORK_ITEM *psItem; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszEntryPath != NULL, "pszEntryPath"); -+ -+ /* 'no stats' to avoid acquiring the process stats locks */ -+ psItem = OSAllocMemNoStats(sizeof(*psItem)); -+ PVR_LOG_GOTO_IF_NOMEM(psItem, eError, return_); -+ -+ psItem->psContext = psContext; -+ psItem->psEntry = DIImplBrgFind(pszEntryPath); -+ PVR_LOG_GOTO_IF_FALSE_VA(psItem->psEntry != NULL, free_item_, -+ "entry %s does not exist", pszEntryPath); -+ psItem->ui64Size = ui64Size; -+ psItem->ui64Offset = ui64Offset; -+ -+ /* increment ref count on the context so that it doesn't get freed -+ * before it gets processed by the writer thread. */ -+ OSAtomicIncrement(&psContext->iRefCnt); -+ -+ OSLockAcquire(_g_psImpl->psWriterLock); -+ dllist_add_to_head(&_g_psImpl->sWriterQueue, &psItem->sQueueElement); -+ OSLockRelease(_g_psImpl->psWriterLock); -+ -+ eError = OSEventObjectSignal(_g_psImpl->hWriterEventObject); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); -+ -+ return PVRSRV_OK; -+ -+free_item_: -+ eError = PVRSRV_ERROR_NOT_FOUND; -+ OSFreeMemNoStats(psItem); -+return_: -+ return eError; -+} -+ -+PVRSRV_ERROR DIWriteEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, -+ IMG_UINT32 ui32ValueSize, const IMG_CHAR *pszValue) -+{ -+ DIIB_ENTRY *psEntry; -+ DI_PFN_WRITE pfnEntryPuts; -+ IMG_INT64 i64Length = 0; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszEntryPath != NULL, "pszEntryPath"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszValue != NULL, "pszValue"); -+ -+ psEntry = DIImplBrgFind(pszEntryPath); -+ PVR_LOG_RETURN_IF_FALSE_VA(psEntry != NULL, PVRSRV_ERROR_NOT_FOUND, -+ "entry %s does not exist", pszEntryPath); -+ -+ pfnEntryPuts = psEntry->sIterCb.pfnWrite; -+ if (pfnEntryPuts != NULL) -+ { -+ i64Length = pfnEntryPuts(pszValue, ui32ValueSize, (IMG_UINT64*)&i64Length, psEntry->pvPrivData); -+ -+ /* To deal with -EINVAL being returned */ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(i64Length >= 0, pszValue); -+ } -+ else -+ { -+ PVR_LOG_MSG(PVR_DBG_WARNING, "Unable to write to Entry. Write callback not enabled"); -+ return PVRSRV_ERROR_INVALID_REQUEST; -+ } -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR _listName(uintptr_t k, -+ uintptr_t v, -+ void* hStream) -+{ -+ PVRSRV_ERROR eError; -+ DIIB_ENTRY *psEntry; -+ IMG_UINT32 ui32Size; -+ IMG_CHAR aszName[DI_IMPL_BRG_PATH_LEN]; -+ -+ psEntry = (DIIB_ENTRY*) v; -+ PVR_ASSERT(psEntry != NULL); -+ PVR_UNREFERENCED_PARAMETER(k); -+ -+ ui32Size = OSSNPrintf(aszName, DI_IMPL_BRG_PATH_LEN, "%s\n", psEntry->pszFullPath); -+ PVR_LOG_IF_FALSE(ui32Size > 5, "ui32Size too small, Error suspected!"); -+ eError = TLStreamWrite(hStream, (IMG_UINT8 *)aszName, ui32Size+1); -+ -+ return eError; -+} -+ -+ -+PVRSRV_ERROR DIListAllEntriesKM(DI_CONTEXT *psContext) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); -+ -+ eError = HASH_Iterate(_g_psImpl->psEntriesTable, _listName, psContext->hStream); -+ PVR_LOG_IF_ERROR(eError, "HASH_Iterate_Extended"); -+ -+ eError = TLStreamMarkEOS(psContext->hStream, IMG_FALSE); -+ return eError; -+} -+ -+/* ----- DI implementation interface ---------------------------------------- */ -+ -+static PVRSRV_ERROR _Init(void) -+{ -+ PVRSRV_ERROR eError; -+ -+ _g_psImpl = OSAllocMem(sizeof(*_g_psImpl)); -+ PVR_LOG_GOTO_IF_NOMEM(_g_psImpl, eError, return_); -+ -+ _g_psImpl->psEntriesTable = HASH_Create_Extended(ENTRIES_TABLE_INIT_SIZE, -+ DI_IMPL_BRG_PATH_LEN, -+ _Hash, _Compare); -+ PVR_LOG_GOTO_IF_NOMEM(_g_psImpl->psEntriesTable, eError, free_impl_); -+ -+ eError = OSLockCreate(&_g_psImpl->psEntriesLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSCreateLock", free_table_); -+ -+ eError = OSLockCreate(&_g_psImpl->psWriterLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSCreateLock", free_entries_lock_); -+ -+ eError = OSEventObjectCreate("DI_WRITER_EO", -+ &_g_psImpl->hWriterEventObject); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", free_writer_lock_); -+ -+ _g_psImpl->hWriterThread = NULL; -+ OSAtomicWrite(&_g_psImpl->eThreadState, THREAD_STATE_NULL); -+ -+ dllist_init(&_g_psImpl->sWriterQueue); -+ -+ return PVRSRV_OK; -+ -+free_writer_lock_: -+ OSLockDestroy(_g_psImpl->psWriterLock); -+free_entries_lock_: -+ OSLockDestroy(_g_psImpl->psEntriesLock); -+free_table_: -+ HASH_Delete_Extended(_g_psImpl->psEntriesTable, IMG_FALSE); -+free_impl_: -+ OSFreeMem(_g_psImpl); -+ _g_psImpl = NULL; -+return_: -+ return eError; -+} -+ -+static void _DeInit(void) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ THREAD_STATE eTState; -+ -+ eTState = OSAtomicCompareExchange(&_g_psImpl->eThreadState, -+ THREAD_STATE_ALIVE, -+ THREAD_STATE_TERMINATED); -+ -+ if (eTState == THREAD_STATE_ALIVE) -+ { -+ if (_g_psImpl->hWriterEventObject != NULL) -+ { -+ eError = OSEventObjectSignal(_g_psImpl->hWriterEventObject); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); -+ } -+ -+ LOOP_UNTIL_TIMEOUT(WRITER_THREAD_DESTROY_TIMEOUT) -+ { -+ eError = OSThreadDestroy(_g_psImpl->hWriterThread); -+ if (eError == PVRSRV_OK) -+ { -+ break; -+ } -+ OSWaitus(WRITER_THREAD_DESTROY_TIMEOUT/WRITER_THREAD_DESTROY_RETRIES); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); -+ } -+ -+ if (_g_psImpl->hWriterEventObject != NULL) -+ { -+ eError = OSEventObjectDestroy(_g_psImpl->hWriterEventObject); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); -+ } -+ -+ HASH_Delete_Extended(_g_psImpl->psEntriesTable, IMG_FALSE); -+ OSLockDestroy(_g_psImpl->psWriterLock); -+ OSLockDestroy(_g_psImpl->psEntriesLock); -+ OSFreeMem(_g_psImpl); -+ _g_psImpl = NULL; -+} -+ -+/* Recursively traverses the ancestors list up to the root group and -+ * appends their names preceded by "/" to the path in reverse order -+ * (root group's name first and psGroup group's name last). -+ * Returns current offset in the path (the current path length without the -+ * NUL character). If there is no more space in the path returns -1 -+ * to indicate an error (the path is too long to fit into the buffer). */ -+static IMG_INT _BuildGroupPath(IMG_CHAR *pszPath, const DIIB_GROUP *psGroup) -+{ -+ IMG_INT iOff; -+ -+ if (psGroup == NULL) -+ { -+ return 0; -+ } -+ -+ PVR_ASSERT(pszPath != NULL); -+ -+ iOff = _BuildGroupPath(pszPath, psGroup->psParentGroup); -+ PVR_RETURN_IF_FALSE(iOff != -1, -1); -+ -+ iOff += OSStringLCopy(pszPath + iOff, "/", -+ DI_IMPL_BRG_PATH_LEN - iOff); -+ PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, -1); -+ -+ iOff += OSStringLCopy(pszPath + iOff, psGroup->pszName, -+ DI_IMPL_BRG_PATH_LEN - iOff); -+ PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, -1); -+ -+ return iOff; -+} -+ -+static PVRSRV_ERROR _BuildEntryPath(IMG_CHAR *pszPath, const IMG_CHAR *pszName, -+ const DIIB_GROUP *psGroup) -+{ -+ IMG_INT iOff = _BuildGroupPath(pszPath, psGroup); -+ PVR_RETURN_IF_FALSE(iOff != -1, PVRSRV_ERROR_INVALID_OFFSET); -+ -+ iOff += OSStringLCopy(pszPath + iOff, "/", DI_IMPL_BRG_PATH_LEN - iOff); -+ PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, -+ PVRSRV_ERROR_INVALID_OFFSET); -+ -+ iOff += OSStringLCopy(pszPath + iOff, pszName, DI_IMPL_BRG_PATH_LEN - iOff); -+ PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, -+ PVRSRV_ERROR_INVALID_OFFSET); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR _CreateEntry(const IMG_CHAR *pszName, -+ DI_ENTRY_TYPE eType, -+ const DI_ITERATOR_CB *psIterCb, -+ void *pvPrivData, -+ void *pvParentGroup, -+ void **pvEntry) -+{ -+ DIIB_GROUP *psParentGroup = pvParentGroup; -+ DIIB_ENTRY *psEntry; -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pvEntry != NULL, "pvEntry"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pvParentGroup != NULL, "pvParentGroup"); -+ -+ switch (eType) -+ { -+ case DI_ENTRY_TYPE_GENERIC: -+ break; -+ case DI_ENTRY_TYPE_RANDOM_ACCESS: -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "eType invalid in %s()", __func__)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, return_); -+ } -+ -+ psEntry = OSAllocMem(sizeof(*psEntry)); -+ PVR_LOG_GOTO_IF_NOMEM(psEntry, eError, return_); -+ -+ eError = OSLockCreate(&psEntry->hLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", free_entry_); -+ -+ psEntry->eType = eType; -+ psEntry->sIterCb = *psIterCb; -+ psEntry->pvPrivData = pvPrivData; -+ psEntry->psParentGroup = psParentGroup; -+ psEntry->pszFullPath[0] = '\0'; -+ -+ psEntry->sImplEntry.pvPrivData = pvPrivData; -+ psEntry->sImplEntry.pvNative = NULL; -+ psEntry->sImplEntry.psCb = &_g_sEntryCallbacks; -+ -+ eError = _BuildEntryPath(psEntry->pszFullPath, pszName, -+ psEntry->psParentGroup); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s() failed in _BuildEntryPath() for \"%s\" " -+ "entry", __func__, pszName)); -+ goto destroy_lock_; -+ } -+ -+ OSLockAcquire(_g_psImpl->psEntriesLock); -+ eError = HASH_Insert_Extended(_g_psImpl->psEntriesTable, -+ psEntry->pszFullPath, -+ (uintptr_t) psEntry) ? -+ PVRSRV_OK : PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; -+ OSLockRelease(_g_psImpl->psEntriesLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "HASH_Insert_Extended failed", destroy_lock_); -+ -+ *pvEntry = psEntry; -+ -+ return PVRSRV_OK; -+ -+destroy_lock_: -+ OSLockDestroy(psEntry->hLock); -+free_entry_: -+ OSFreeMem(psEntry); -+return_: -+ return eError; -+} -+ -+static void _DestroyEntry(void *pvEntry) -+{ -+ DIIB_ENTRY *psEntry = pvEntry; -+ PVR_ASSERT(psEntry != NULL); -+ -+ OSLockAcquire(_g_psImpl->psEntriesLock); -+ HASH_Remove_Extended(_g_psImpl->psEntriesTable, psEntry->pszFullPath); -+ OSLockRelease(_g_psImpl->psEntriesLock); -+ -+ OSLockDestroy(psEntry->hLock); -+ OSFreeMem(psEntry); -+} -+ -+static PVRSRV_ERROR _CreateGroup(const IMG_CHAR *pszName, -+ void *pvParentGroup, -+ void **ppvGroup) -+{ -+ DIIB_GROUP *psNewGroup; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppvGroup != NULL, "ppvGroup"); -+ -+ psNewGroup = OSAllocMem(sizeof(*psNewGroup)); -+ PVR_LOG_RETURN_IF_NOMEM(psNewGroup, "OSAllocMem"); -+ -+ psNewGroup->pszName = pszName; -+ psNewGroup->psParentGroup = pvParentGroup; -+ -+ *ppvGroup = psNewGroup; -+ -+ return PVRSRV_OK; -+} -+ -+static void _DestroyGroup(void *pvGroup) -+{ -+ DIIB_GROUP *psGroup = pvGroup; -+ PVR_ASSERT(psGroup != NULL); -+ -+ OSFreeMem(psGroup); -+} -+ -+PVRSRV_ERROR PVRDIImplBrgRegister(void) -+{ -+ OSDI_IMPL_CB sImplCb = { -+ .pfnInit = _Init, -+ .pfnDeInit = _DeInit, -+ .pfnCreateEntry = _CreateEntry, -+ .pfnDestroyEntry = _DestroyEntry, -+ .pfnCreateGroup = _CreateGroup, -+ .pfnDestroyGroup = _DestroyGroup -+ }; -+ -+ return DIRegisterImplementation("impl_brg", &sImplCb); -+} -diff --git a/drivers/gpu/drm/img-rogue/di_impl_brg.h b/drivers/gpu/drm/img-rogue/di_impl_brg.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/di_impl_brg.h -@@ -0,0 +1,92 @@ -+/*************************************************************************/ /*! -+@File -+@Title OS agnostic implementation of Debug Info interface. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVR_IMPL_BRG_H -+#define PVR_IMPL_BRG_H -+ -+#include "pvrsrv_error.h" -+ -+typedef struct DI_CONTEXT_TAG DI_CONTEXT; -+typedef struct DI_ENTRY_DESC DI_ENTRY_DESC; -+ -+PVRSRV_ERROR PVRDIImplBrgRegister(void); -+ -+/*! @Function DICreateContextKM -+ * -+ * @Description -+ * Creates DI context which among others also creates a TL stream for reading -+ * entries. -+ * -+ * @Output pszStreamName: name of the TL stream created in this context -+ * @Output ppsContext: pointer to the new context -+ * -+ * @Return PVRSRV_ERROR error code -+ * PVRSRV_OK in case of a success -+ * PVRSRV_ERROR_INVALID_PARAMS if any of the parameters is invalid -+ * PVRSRV_ERROR_OUT_OF_MEMORY if any of the memory allocations failed -+ * error codes returned by TLStreamCreate() -+ */ -+PVRSRV_ERROR DICreateContextKM(IMG_CHAR *pszStreamName, -+ DI_CONTEXT **ppsContext); -+ -+/*! @Function DIDestroyContextKM -+ * -+ * @Description -+ * Destroy the DI context and all underlying dependencies. -+ * -+ * @Input psContext: pointer to the context -+ * -+ * @Return PVRSRV_ERROR error code -+ * PVRSRV_OK in case of a success -+ * PVRSRV_ERROR_INVALID_PARAMS if invalid context pointer given -+ */ -+PVRSRV_ERROR DIDestroyContextKM(DI_CONTEXT *psContext); -+ -+PVRSRV_ERROR DIReadEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, -+ IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size); -+ -+PVRSRV_ERROR DIWriteEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, -+ IMG_UINT32 ui32ValueSize, const IMG_CHAR *pszValue); -+ -+PVRSRV_ERROR DIListAllEntriesKM(DI_CONTEXT *psContext); -+ -+#endif /* PVR_IMPL_BRG_H */ -diff --git a/drivers/gpu/drm/img-rogue/di_impl_brg_intern.h b/drivers/gpu/drm/img-rogue/di_impl_brg_intern.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/di_impl_brg_intern.h -@@ -0,0 +1,61 @@ -+/*************************************************************************/ /*! -+@File -+@Title OS agnostic implementation of Debug Info internal interface. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVR_IMPL_BRG_INTERN_H -+#define PVR_IMPL_BRG_INTERN_H -+ -+typedef struct DIIB_GROUP DIIB_GROUP; -+typedef struct DIIB_ENTRY DIIB_ENTRY; -+ -+/*! @Function DIImplBrgFind -+ * -+ * @Description -+ * Retrieves an entry based on a given path. -+ * -+ * @Input pszPath: Full entry path in form of -+ * /rootGroup/.../parentGroup/entryName. -+ * -+ * @Return Returns entry object if exists or NULL otherwise. -+ */ -+DIIB_ENTRY *DIImplBrgFind(const IMG_CHAR *pszPath); -+ -+#endif /* PVR_IMPL_BRG_INTERN_H */ -diff --git a/drivers/gpu/drm/img-rogue/di_server.c b/drivers/gpu/drm/img-rogue/di_server.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/di_server.c -@@ -0,0 +1,800 @@ -+/*************************************************************************/ /*! -+@File -+@Title Debug Info framework functions and types. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "di_server.h" -+#include "osdi_impl.h" -+#include "pvrsrv_error.h" -+#include "dllist.h" -+#include "lock.h" -+#include "allocmem.h" -+#include "osfunc.h" -+ -+#define ROOT_GROUP_NAME PVR_DRM_NAME -+ -+/*! Implementation object. */ -+typedef struct DI_IMPL -+{ -+ const IMG_CHAR *pszName; /*pszName = OSAllocMemNoStats(sizeof(ROOT_GROUP_NAME)); -+ PVR_LOG_GOTO_IF_NOMEM(_g_psRootGroup->pszName, eError, cleanup_name_); -+ OSStringLCopy(_g_psRootGroup->pszName, ROOT_GROUP_NAME, -+ sizeof(ROOT_GROUP_NAME)); -+ -+ dllist_init(&_g_psRootGroup->sListNode); -+ dllist_init(&_g_psRootGroup->sGroupList); -+ dllist_init(&_g_psRootGroup->sEntryList); -+ dllist_init(&_g_psRootGroup->sNativeHandleList); -+ -+ return PVRSRV_OK; -+ -+cleanup_name_: -+ OSFreeMemNoStats(_g_psRootGroup); -+destroy_lock_: -+#if defined(__linux__) && defined(__KERNEL__) -+ OSLockDestroyNoStats(_g_hLock); -+#else -+ OSLockDestroy(_g_hLock); -+#endif -+return_: -+ return eError; -+} -+ -+/* Destroys the whole tree of group and entries for a given group as a root. */ -+static void _DeInitGroupRecursively(DI_GROUP *psGroup) -+{ -+ DLLIST_NODE *psThis, *psNext; -+ -+ dllist_foreach_node(&psGroup->sEntryList, psThis, psNext) -+ { -+ DI_ENTRY *psThisEntry = IMG_CONTAINER_OF(psThis, DI_ENTRY, sListNode); -+ DIDestroyEntry(psThisEntry); -+ } -+ -+ dllist_foreach_node(&psGroup->sGroupList, psThis, psNext) -+ { -+ DI_GROUP *psThisGroup = IMG_CONTAINER_OF(psThis, DI_GROUP, sListNode); -+ -+ _DeInitGroupRecursively(psThisGroup); -+ } -+ -+ DIDestroyGroup(psGroup); -+} -+ -+void DIDeInit(void) -+{ -+ DLLIST_NODE *psThis, *psNext; -+ -+ OSLockAcquire(_g_hLock); -+ -+ if (!dllist_is_empty(&_g_psRootGroup->sGroupList) || -+ !dllist_is_empty(&_g_psRootGroup->sEntryList)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: entries or groups still exist during " -+ "de-initialisation process, destroying all", __func__)); -+ } -+ -+ _DeInitGroupRecursively(_g_psRootGroup); -+ _g_psRootGroup = NULL; -+ -+ /* Remove all of the implementations. */ -+ dllist_foreach_node(&_g_sImpls, psThis, psNext) -+ { -+ DI_IMPL *psDiImpl = IMG_CONTAINER_OF(psThis, DI_IMPL, sListNode); -+ -+ if (psDiImpl->bInitialised) -+ { -+ psDiImpl->sCb.pfnDeInit(); -+ psDiImpl->bInitialised = IMG_FALSE; -+ } -+ -+ dllist_remove_node(&psDiImpl->sListNode); -+ OSFreeMem(psDiImpl); -+ } -+ -+ OSLockRelease(_g_hLock); -+ -+ /* all resources freed so free the lock itself too */ -+ -+#if defined(__linux__) && defined(__KERNEL__) -+ OSLockDestroyNoStats(_g_hLock); -+#else -+ OSLockDestroy(_g_hLock); -+#endif -+} -+ -+static IMG_BOOL _ValidateIteratorCb(const DI_ITERATOR_CB *psIterCb, -+ DI_ENTRY_TYPE eType) -+{ -+ IMG_UINT32 uiFlags = 0; -+ -+ if (psIterCb == NULL) -+ { -+ return IMG_FALSE; -+ } -+ -+ if (eType == DI_ENTRY_TYPE_GENERIC) -+ { -+ uiFlags |= psIterCb->pfnShow != NULL ? BIT(0) : 0; -+ uiFlags |= psIterCb->pfnStart != NULL ? BIT(1) : 0; -+ uiFlags |= psIterCb->pfnStop != NULL ? BIT(2) : 0; -+ uiFlags |= psIterCb->pfnNext != NULL ? BIT(3) : 0; -+ -+ /* either only pfnShow or all callbacks need to be set */ -+ if (uiFlags != BIT(0) && !BITMASK_HAS(uiFlags, 0x0f)) -+ { -+ return IMG_FALSE; -+ } -+ } -+ else if (eType == DI_ENTRY_TYPE_RANDOM_ACCESS) -+ { -+ uiFlags |= psIterCb->pfnRead != NULL ? BIT(0) : 0; -+ uiFlags |= psIterCb->pfnSeek != NULL ? BIT(1) : 0; -+ -+ /* either only pfnRead or all callbacks need to be set */ -+ if (uiFlags != BIT(0) && !BITMASK_HAS(uiFlags, 0x03)) -+ { -+ return IMG_FALSE; -+ } -+ } -+ else -+ { -+ return IMG_FALSE; -+ } -+ -+ return IMG_TRUE; -+} -+ -+static PVRSRV_ERROR _CreateNativeEntry(DI_ENTRY *psEntry, -+ const DI_NATIVE_HANDLE *psNativeParent) -+{ -+ PVRSRV_ERROR eError; -+ DI_IMPL *psImpl = psNativeParent->psDiImpl; -+ -+ DI_NATIVE_HANDLE *psNativeEntry = OSAllocMem(sizeof(*psNativeEntry)); -+ PVR_LOG_GOTO_IF_NOMEM(psNativeEntry, eError, return_); -+ -+ eError = psImpl->sCb.pfnCreateEntry(psEntry->pszName, -+ psEntry->eType, -+ &psEntry->sIterCb, -+ psEntry->pvPrivData, -+ psNativeParent->pvHandle, -+ &psNativeEntry->pvHandle); -+ PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateEntry", free_memory_); -+ -+ psNativeEntry->psDiImpl = psImpl; -+ -+ dllist_add_to_head(&psEntry->sNativeHandleList, &psNativeEntry->sListNode); -+ -+ return PVRSRV_OK; -+ -+free_memory_: -+ OSFreeMem(psNativeEntry); -+return_: -+ return eError; -+} -+ -+static void _DestroyNativeEntry(DI_NATIVE_HANDLE *psNativeEntry) -+{ -+ dllist_remove_node(&psNativeEntry->sListNode); -+ OSFreeMem(psNativeEntry); -+} -+ -+PVRSRV_ERROR DICreateEntry(const IMG_CHAR *pszName, -+ DI_GROUP *psGroup, -+ const DI_ITERATOR_CB *psIterCb, -+ void *pvPriv, -+ DI_ENTRY_TYPE eType, -+ DI_ENTRY **ppsEntry) -+{ -+ PVRSRV_ERROR eError; -+ DLLIST_NODE *psThis, *psNext; -+ DI_ENTRY *psEntry; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(_ValidateIteratorCb(psIterCb, eType), -+ "psIterCb"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppsEntry != NULL, "psEntry"); -+ -+ psEntry = OSAllocMem(sizeof(*psEntry)); -+ PVR_LOG_RETURN_IF_NOMEM(psEntry, "OSAllocMem"); -+ -+ if (psGroup == NULL) -+ { -+ psGroup = _g_psRootGroup; -+ } -+ -+ psEntry->pszName = pszName; -+ psEntry->pvPrivData = pvPriv; -+ psEntry->eType = eType; -+ psEntry->sIterCb = *psIterCb; -+ dllist_init(&psEntry->sNativeHandleList); -+ -+ OSLockAcquire(_g_hLock); -+ -+ dllist_add_to_tail(&psGroup->sEntryList, &psEntry->sListNode); -+ -+ /* Iterate over all of the native handles of parent group to create -+ * the entry for every registered implementation. */ -+ dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) -+ { -+ DI_NATIVE_HANDLE *psNativeGroup = -+ IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); -+ -+ eError = _CreateNativeEntry(psEntry, psNativeGroup); -+ PVR_GOTO_IF_ERROR(eError, cleanup_); -+ } -+ -+ OSLockRelease(_g_hLock); -+ -+ *ppsEntry = psEntry; -+ -+ return PVRSRV_OK; -+ -+cleanup_: -+ OSLockRelease(_g_hLock); -+ -+ /* Something went wrong so if there were any native entries created remove -+ * them from the list, free them and free the DI entry itself. */ -+ dllist_foreach_node(&psEntry->sNativeHandleList, psThis, psNext) -+ { -+ DI_NATIVE_HANDLE *psNativeEntry = -+ IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); -+ -+ _DestroyNativeEntry(psNativeEntry); -+ } -+ -+ OSFreeMem(psEntry); -+ -+ return eError; -+} -+ -+void DIDestroyEntry(DI_ENTRY *psEntry) -+{ -+ DLLIST_NODE *psThis, *psNext; -+ -+ PVR_LOG_RETURN_VOID_IF_FALSE(psEntry != NULL, -+ "psEntry invalid in DIDestroyEntry()"); -+ -+ /* Iterate through all of the native entries of the DI entry, remove -+ * them from the list and then destroy them. After that, destroy the -+ * DI entry itself. */ -+ dllist_foreach_node(&psEntry->sNativeHandleList, psThis, psNext) -+ { -+ DI_NATIVE_HANDLE *psNative = IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, -+ sListNode); -+ -+ /* The implementation must ensure that entry is not removed if any -+ * operations are being executed on the entry. If this is the case -+ * the implementation should block until all of them are finished -+ * and prevent any further operations. -+ * This will guarantee proper synchronisation between the DI framework -+ * and underlying implementations and prevent destruction/access -+ * races. */ -+ psNative->psDiImpl->sCb.pfnDestroyEntry(psNative->pvHandle); -+ dllist_remove_node(&psNative->sListNode); -+ OSFreeMem(psNative); -+ } -+ -+ dllist_remove_node(&psEntry->sListNode); -+ -+ OSFreeMem(psEntry); -+} -+ -+static PVRSRV_ERROR _CreateNativeGroup(DI_GROUP *psGroup, -+ const DI_NATIVE_HANDLE *psNativeParent, -+ DI_NATIVE_HANDLE **ppsNativeGroup) -+{ -+ PVRSRV_ERROR eError; -+ DI_IMPL *psImpl = psNativeParent->psDiImpl; -+ -+ DI_NATIVE_HANDLE *psNativeGroup = OSAllocMem(sizeof(*psNativeGroup)); -+ PVR_LOG_GOTO_IF_NOMEM(psNativeGroup, eError, return_); -+ -+ eError = psImpl->sCb.pfnCreateGroup(psGroup->pszName, -+ psNativeParent->pvHandle, -+ &psNativeGroup->pvHandle); -+ PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateGroup", free_memory_); -+ -+ psNativeGroup->psDiImpl = psImpl; -+ -+ dllist_add_to_head(&psGroup->sNativeHandleList, &psNativeGroup->sListNode); -+ -+ *ppsNativeGroup = psNativeGroup; -+ -+ return PVRSRV_OK; -+ -+free_memory_: -+ OSFreeMem(psNativeGroup); -+return_: -+ return eError; -+} -+ -+static void _DestroyNativeGroup(DI_NATIVE_HANDLE *psNativeEntry) -+{ -+ dllist_remove_node(&psNativeEntry->sListNode); -+ OSFreeMem(psNativeEntry); -+} -+ -+PVRSRV_ERROR DICreateGroup(const IMG_CHAR *pszName, -+ DI_GROUP *psParent, -+ DI_GROUP **ppsGroup) -+{ -+ PVRSRV_ERROR eError; -+ DLLIST_NODE *psThis, *psNext; -+ DI_GROUP *psGroup; -+ size_t uSize; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppsGroup != NULL, "ppsDiGroup"); -+ -+ psGroup = OSAllocMem(sizeof(*psGroup)); -+ PVR_LOG_RETURN_IF_NOMEM(psGroup, "OSAllocMem"); -+ -+ if (psParent == NULL) -+ { -+ psParent = _g_psRootGroup; -+ } -+ -+ uSize = OSStringLength(pszName) + 1; -+ psGroup->pszName = OSAllocMem(uSize * sizeof(*psGroup->pszName)); -+ PVR_LOG_GOTO_IF_NOMEM(psGroup->pszName, eError, cleanup_name_); -+ OSStringLCopy(psGroup->pszName, pszName, uSize); -+ -+ psGroup->psParent = psParent; -+ dllist_init(&psGroup->sGroupList); -+ dllist_init(&psGroup->sEntryList); -+ dllist_init(&psGroup->sNativeHandleList); -+ -+ OSLockAcquire(_g_hLock); -+ -+ dllist_add_to_tail(&psParent->sGroupList, &psGroup->sListNode); -+ -+ /* Iterate over all of the native handles of parent group to create -+ * the group for every registered implementation. */ -+ dllist_foreach_node(&psParent->sNativeHandleList, psThis, psNext) -+ { -+ DI_NATIVE_HANDLE *psNativeGroup = NULL, *psNativeParent = -+ IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); -+ -+ eError = _CreateNativeGroup(psGroup, psNativeParent, &psNativeGroup); -+ PVR_GOTO_IF_ERROR(eError, cleanup_); -+ } -+ -+ OSLockRelease(_g_hLock); -+ -+ *ppsGroup = psGroup; -+ -+ return PVRSRV_OK; -+ -+cleanup_: -+ OSLockRelease(_g_hLock); -+ -+ /* Something went wrong so if there were any native groups created remove -+ * them from the list, free them and free the DI group itself. */ -+ dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) -+ { -+ DI_NATIVE_HANDLE *psNativeGroup = -+ IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); -+ -+ dllist_remove_node(&psNativeGroup->sListNode); -+ OSFreeMem(psNativeGroup); -+ } -+ -+ OSFreeMem(psGroup->pszName); -+cleanup_name_: -+ OSFreeMem(psGroup); -+ -+ return eError; -+} -+ -+void DIDestroyGroup(DI_GROUP *psGroup) -+{ -+ DLLIST_NODE *psThis, *psNext; -+ -+ PVR_LOG_RETURN_VOID_IF_FALSE(psGroup != NULL, -+ "psGroup invalid in DIDestroyGroup()"); -+ -+ /* Iterate through all of the native groups of the DI group, remove -+ * them from the list and then destroy them. After that destroy the -+ * DI group itself. */ -+ dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) -+ { -+ DI_NATIVE_HANDLE *psNative = IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, -+ sListNode); -+ -+ psNative->psDiImpl->sCb.pfnDestroyGroup(psNative->pvHandle); -+ dllist_remove_node(&psNative->sListNode); -+ OSFreeMem(psNative); -+ } -+ -+ dllist_remove_node(&psGroup->sListNode); -+ -+ if (psGroup == _g_psRootGroup) -+ { -+ OSFreeMemNoStats(psGroup->pszName); -+ OSFreeMemNoStats(psGroup); -+ } -+ else -+ { -+ OSFreeMem(psGroup->pszName); -+ OSFreeMem(psGroup); -+ } -+} -+ -+void *DIGetPrivData(const OSDI_IMPL_ENTRY *psEntry) -+{ -+ PVR_ASSERT(psEntry != NULL); -+ -+ return psEntry->pvPrivData; -+} -+ -+void DIWrite(const OSDI_IMPL_ENTRY *psEntry, const void *pvData, -+ IMG_UINT32 uiSize) -+{ -+ PVR_ASSERT(psEntry != NULL); -+ PVR_ASSERT(psEntry->psCb != NULL); -+ PVR_ASSERT(psEntry->psCb->pfnWrite != NULL); -+ PVR_ASSERT(psEntry->pvNative != NULL); -+ -+ psEntry->psCb->pfnWrite(psEntry->pvNative, pvData, uiSize); -+} -+ -+void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...) -+{ -+ va_list args; -+ -+ PVR_ASSERT(psEntry != NULL); -+ PVR_ASSERT(psEntry->psCb != NULL); -+ PVR_ASSERT(psEntry->psCb->pfnVPrintf != NULL); -+ PVR_ASSERT(psEntry->pvNative != NULL); -+ -+ va_start(args, pszFmt); -+ psEntry->psCb->pfnVPrintf(psEntry->pvNative, pszFmt, args); -+ va_end(args); -+} -+ -+void DIVPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, -+ va_list pArgs) -+{ -+ PVR_ASSERT(psEntry != NULL); -+ PVR_ASSERT(psEntry->psCb != NULL); -+ PVR_ASSERT(psEntry->psCb->pfnVPrintf != NULL); -+ PVR_ASSERT(psEntry->pvNative != NULL); -+ -+ psEntry->psCb->pfnVPrintf(psEntry->pvNative, pszFmt, pArgs); -+} -+ -+void DIPuts(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszStr) -+{ -+ PVR_ASSERT(psEntry != NULL); -+ PVR_ASSERT(psEntry->psCb != NULL); -+ PVR_ASSERT(psEntry->psCb->pfnPuts != NULL); -+ PVR_ASSERT(psEntry->pvNative != NULL); -+ -+ psEntry->psCb->pfnPuts(psEntry->pvNative, pszStr); -+} -+ -+IMG_BOOL DIHasOverflowed(const OSDI_IMPL_ENTRY *psEntry) -+{ -+ PVR_ASSERT(psEntry != NULL); -+ PVR_ASSERT(psEntry->psCb != NULL); -+ PVR_ASSERT(psEntry->psCb->pfnHasOverflowed != NULL); -+ PVR_ASSERT(psEntry->pvNative != NULL); -+ -+ return psEntry->psCb->pfnHasOverflowed(psEntry->pvNative); -+} -+ -+/* ---- OS implementation API ---------------------------------------------- */ -+ -+static IMG_BOOL _ValidateImplCb(const OSDI_IMPL_CB *psImplCb) -+{ -+ PVR_GOTO_IF_FALSE(psImplCb->pfnInit != NULL, failed_); -+ PVR_GOTO_IF_FALSE(psImplCb->pfnDeInit != NULL, failed_); -+ PVR_GOTO_IF_FALSE(psImplCb->pfnCreateGroup != NULL, failed_); -+ PVR_GOTO_IF_FALSE(psImplCb->pfnDestroyGroup != NULL, failed_); -+ PVR_GOTO_IF_FALSE(psImplCb->pfnCreateEntry != NULL, failed_); -+ PVR_GOTO_IF_FALSE(psImplCb->pfnDestroyEntry != NULL, failed_); -+ -+ return IMG_TRUE; -+ -+failed_: -+ return IMG_FALSE; -+} -+ -+/* Walks the tree of groups and entries and create all of the native handles -+ * for the given implementation for all of the already existing groups and -+ * entries. */ -+static PVRSRV_ERROR _InitNativeHandlesRecursively(DI_IMPL *psImpl, -+ DI_GROUP *psGroup, -+ DI_NATIVE_HANDLE *psNativeParent) -+{ -+ PVRSRV_ERROR eError; -+ DLLIST_NODE *psThis, *psNext; -+ DI_NATIVE_HANDLE *psNativeGroup; -+ -+ psNativeGroup = OSAllocMem(sizeof(*psNativeGroup)); -+ PVR_LOG_RETURN_IF_NOMEM(psNativeGroup, "OSAllocMem"); -+ -+ eError = psImpl->sCb.pfnCreateGroup(psGroup->pszName, -+ psNativeParent ? psNativeParent->pvHandle : NULL, -+ &psNativeGroup->pvHandle); -+ PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateGroup", free_memory_); -+ -+ psNativeGroup->psDiImpl = psImpl; -+ -+ dllist_add_to_head(&psGroup->sNativeHandleList, -+ &psNativeGroup->sListNode); -+ -+ dllist_foreach_node(&psGroup->sGroupList, psThis, psNext) -+ { -+ DI_GROUP *psThisGroup = IMG_CONTAINER_OF(psThis, DI_GROUP, sListNode); -+ -+ // and then walk the new group -+ eError = _InitNativeHandlesRecursively(psImpl, psThisGroup, -+ psNativeGroup); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_InitNativeHandlesRecursively"); -+ } -+ -+ dllist_foreach_node(&psGroup->sEntryList, psThis, psNext) -+ { -+ DI_ENTRY *psThisEntry = IMG_CONTAINER_OF(psThis, DI_ENTRY, sListNode); -+ -+ eError = _CreateNativeEntry(psThisEntry, psNativeGroup); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_CreateNativeEntry"); -+ } -+ -+ return PVRSRV_OK; -+ -+free_memory_: -+ OSFreeMem(psNativeGroup); -+ -+ return eError; -+} -+ -+/* Walks the tree of groups and entries and destroys all of the native handles -+ * for the given implementation. */ -+static void _DeInitNativeHandlesRecursively(DI_IMPL *psImpl, DI_GROUP *psGroup) -+{ -+ DLLIST_NODE *psThis, *psNext; -+ -+ dllist_foreach_node(&psGroup->sEntryList, psThis, psNext) -+ { -+ DI_ENTRY *psThisEntry = IMG_CONTAINER_OF(psThis, DI_ENTRY, sListNode); -+ -+ // free all of the native entries that belong to this implementation -+ dllist_foreach_node(&psThisEntry->sNativeHandleList, psThis, psNext) -+ { -+ DI_NATIVE_HANDLE *psNativeEntry = -+ IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); -+ -+ if (psNativeEntry->psDiImpl == psImpl) -+ { -+ _DestroyNativeEntry(psNativeEntry); -+ // there can be only one entry on the list for a given -+ // implementation -+ break; -+ } -+ } -+ } -+ -+ dllist_foreach_node(&psGroup->sGroupList, psThis, psNext) -+ { -+ DI_GROUP *psThisGroup = IMG_CONTAINER_OF(psThis, DI_GROUP, sListNode); -+ -+ // and then walk the new group -+ _DeInitNativeHandlesRecursively(psImpl, psThisGroup); -+ } -+ -+ // free all of the native entries that belong to this implementation -+ dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) -+ { -+ DI_NATIVE_HANDLE *psNativeGroup = -+ IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); -+ -+ if (psNativeGroup->psDiImpl == psImpl) -+ { -+ _DestroyNativeGroup(psNativeGroup); -+ // there can be only one entry on the list for a given -+ // implementation -+ break; -+ } -+ } -+} -+ -+static PVRSRV_ERROR _InitImpl(DI_IMPL *psImpl) -+{ -+ PVRSRV_ERROR eError; -+ // DI_NATIVE_HANDLE *psNativeGroup; -+ -+ eError = psImpl->sCb.pfnInit(); -+ PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->pfnInit()", return_); -+ -+ /* if the implementation is being created after any groups or entries -+ * have been created we need to walk the current tree and create -+ * native groups and entries for all of the existing ones */ -+ eError = _InitNativeHandlesRecursively(psImpl, _g_psRootGroup, NULL); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_InitNativeHandlesRecursively", -+ free_native_handles_and_deinit_); -+ -+ psImpl->bInitialised = IMG_TRUE; -+ -+ return PVRSRV_OK; -+ -+free_native_handles_and_deinit_: -+ /* something went wrong so we need to walk the tree and remove all of the -+ * native entries and groups that we've created before we can destroy -+ * the implementation */ -+ _DeInitNativeHandlesRecursively(psImpl, _g_psRootGroup); -+ psImpl->sCb.pfnDeInit(); -+return_: -+ return eError; -+} -+ -+PVRSRV_ERROR DIRegisterImplementation(const IMG_CHAR *pszName, -+ const OSDI_IMPL_CB *psImplCb) -+{ -+ DI_IMPL *psImpl; -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(_ValidateImplCb(psImplCb), "psImplCb"); -+ /* if root group does not exist it can mean 2 things: -+ * - DIInit() was not called so initialisation order is incorrect and needs -+ * to be fixed -+ * - DIInit() failed but if that happens we should never make it here */ -+ PVR_ASSERT(_g_psRootGroup != NULL); -+ -+ psImpl = OSAllocMem(sizeof(*psImpl)); -+ PVR_LOG_RETURN_IF_NOMEM(psImpl, "OSAllocMem"); -+ -+ psImpl->pszName = pszName; -+ psImpl->sCb = *psImplCb; -+ -+ OSLockAcquire(_g_hLock); -+ -+ eError = _InitImpl(psImpl); -+ if (eError != PVRSRV_OK) -+ { -+ /* implementation could not be initialised so remove it from the -+ * list, free the memory and forget about it */ -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: could not initialise \"%s\" debug " -+ "info implementation, discarding", __func__, -+ psImpl->pszName)); -+ -+ goto free_impl_; -+ } -+ -+ psImpl->bInitialised = IMG_TRUE; -+ -+ dllist_add_to_tail(&_g_sImpls, &psImpl->sListNode); -+ -+ OSLockRelease(_g_hLock); -+ -+ return PVRSRV_OK; -+ -+free_impl_: -+ OSLockRelease(_g_hLock); -+ -+ OSFreeMem(psImpl); -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/di_server.h b/drivers/gpu/drm/img-rogue/di_server.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/di_server.h -@@ -0,0 +1,219 @@ -+/*************************************************************************/ /*! -+@File -+@Title Functions for creating Debug Info groups and entries. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef DI_SERVER_H -+#define DI_SERVER_H -+ -+#if defined(__linux__) -+ #include -+ -+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) -+ #include -+ #else -+ #include -+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ -+#else -+ #include -+#endif /* __linux__ */ -+ -+#include "di_common.h" -+#include "pvrsrv_error.h" -+#include "img_defs.h" -+ -+/*! @Function DIInit -+ * -+ * @Description -+ * Initialises Debug Info framework. This function will create common resources -+ * for the framework. -+ * -+ * Note: This function must be called before first call to -+ * DIRegisterImplementation() all of the implementations. -+ */ -+PVRSRV_ERROR DIInit(void); -+ -+/*! @Function DIDeInit -+ * -+ * @Description -+ * De-initialises Debug Info framework. This function will call pfnDeInit() -+ * on each implementation and clean up common resources. -+ * -+ * In case some of the entries and groups have not been cleaned up this function -+ * will also perform recursive sweep and remove all entries and group for -+ * all implementations. -+ */ -+void DIDeInit(void); -+ -+/*! @Function DICreateEntry -+ * -+ * @Description -+ * Creates debug info entry. Depending on different implementations the entry -+ * might be for example a DebugFS file or something totally different. -+ * -+ * The entry will belong to a parent group if provided or to the root group -+ * if not. -+ * -+ * @Input pszName: name of the new entry -+ * @Input psDiGroup: parent group, if NULL entry will belong to the root group -+ * @Input psIterCb: implementation of the iterator for the entry -+ * @Input psPriv: private data that will be passed to the iterator operations -+ * @Input eType: type of the entry -+ * -+ * @Output ppsEntry: handle to the newly created entry -+ * -+ * @Return PVRSRV_ERROR error code -+ */ -+PVRSRV_ERROR DICreateEntry(const IMG_CHAR *pszName, -+ DI_GROUP *psGroup, -+ const DI_ITERATOR_CB *psIterCb, -+ void *psPriv, -+ DI_ENTRY_TYPE eType, -+ DI_ENTRY **ppsEntry); -+ -+/*! @Function DIDestroyEntry -+ * -+ * @Description -+ * Destroys debug info entry. -+ * -+ * @Input psEntry: handle to the entry -+ */ -+void DIDestroyEntry(DI_ENTRY *psEntry); -+ -+/*! @Function DICreateGroup -+ * -+ * @Description -+ * Creates debug info group. Depending on different implementations the group -+ * might be for example a DebugFS directory or something totally different. -+ * -+ * The group will belong to a parent group if provided or to the root group -+ * if not. -+ * -+ * @Input pszName: name of the new entry -+ * @Input psParent: parent group, if NULL entry will belong to the root group -+ * -+ * @Output ppsGroup: handle to the newly created entry -+ * -+ * @Return PVRSRV_ERROR error code -+ */ -+PVRSRV_ERROR DICreateGroup(const IMG_CHAR *pszName, -+ DI_GROUP *psParent, -+ DI_GROUP **ppsGroup); -+ -+/*! @Function DIDestroyGroup -+ * -+ * @Description -+ * Destroys debug info group. -+ * -+ * @Input psGroup: handle to the group -+ */ -+void DIDestroyGroup(DI_GROUP *psGroup); -+ -+/*! @Function DIGetPrivData -+ * -+ * @Description -+ * Retrieves private data from psEntry. The data is either passed during -+ * entry creation via psPriv parameter of DICreateEntry() function -+ * or by explicitly setting it with DIGetPrivData() function. -+ * -+ * @Input psEntry pointer to OSDI_IMPL_ENTRY object -+ * -+ * @Returns pointer to the private data (can be NULL if private data -+ * has not been specified) -+ */ -+void *DIGetPrivData(const OSDI_IMPL_ENTRY *psEntry); -+ -+/*! @Function DIWrite -+ * -+ * @Description -+ * Writes the binary data of the DI entry to the output sync, whatever that may -+ * be for the DI implementation. -+ * -+ * @Input psEntry pointer to OSDI_IMPL_ENTRY object -+ * @Input pvData data -+ * @Input uiSize pvData length -+ */ -+void DIWrite(const OSDI_IMPL_ENTRY *psEntry, const void *pvData, -+ IMG_UINT32 uiSize); -+ -+/*! @Function DIPrintf -+ * -+ * @Description -+ * Prints formatted string to the DI entry. -+ * -+ * @Input psEntry pointer to OSDI_IMPL_ENTRY object -+ * @Input pszFmt NUL-terminated format string -+ */ -+void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...) -+ __printf(2, 3); -+ -+/*! @Function DIVPrintf -+ * -+ * @Description -+ * Prints formatted string to the DI entry. Equivalent to DIPrintf but takes -+ * va_list instead of a variable number of arguments. -+ * -+ * @Input psEntry pointer to OSDI_IMPL_ENTRY object -+ * @Input pszFmt NUL-terminated format string -+ * @Input pArgs vs_list object -+ */ -+void DIVPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, -+ va_list pArgs); -+ -+/*! @Function DIPrintf -+ * -+ * @Description -+ * Prints a string to the DI entry. -+ * -+ * @Input psEntry pointer to OSDI_IMPL_ENTRY object -+ * @Input pszFmt NUL-terminated string -+ */ -+void DIPuts(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszStr); -+ -+/*! @Function DIHasOverflowed -+ * -+ * @Description -+ * Checks if the DI buffer has overflowed. -+ * -+ * @Return IMG_TRUE if buffer overflowed -+ */ -+IMG_BOOL DIHasOverflowed(const OSDI_IMPL_ENTRY *psEntry); -+ -+#endif /* DI_SERVER_H */ -diff --git a/drivers/gpu/drm/img-rogue/dllist.h b/drivers/gpu/drm/img-rogue/dllist.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/dllist.h -@@ -0,0 +1,407 @@ -+/*************************************************************************/ /*! -+@File -+@Title Double linked list header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Double linked list interface -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef DLLIST_H -+#define DLLIST_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+ -+/*! -+ Pointer to a linked list node -+*/ -+typedef struct DLLIST_NODE_TAG *PDLLIST_NODE; -+ -+ -+/*! -+ Node in a linked list -+*/ -+/* -+ * Note: the following structure's size is architecture-dependent and clients -+ * may need to create a mirror of the structure definition if it needs to be -+ * used in a structure shared between host and device. -+ * Consider such clients if any changes are made to this structure. -+ */ -+typedef struct DLLIST_NODE_TAG -+{ -+ struct DLLIST_NODE_TAG *psPrevNode; -+ struct DLLIST_NODE_TAG *psNextNode; -+} DLLIST_NODE; -+ -+ -+/*! -+ Static initialiser -+*/ -+#define DECLARE_DLLIST(n) \ -+DLLIST_NODE (n) = {&(n), &(n)} -+ -+/*************************************************************************/ /*! -+@Function dllist_foreach_node -+ -+@Description Walk through all the nodes on the list. -+ Safe against removal of (node). -+ -+@Input list_head List node to start the operation -+@Input node Current list node -+@Input next Node after the current one -+ -+*/ -+/*****************************************************************************/ -+#define dllist_foreach_node(list_head, node, next) \ -+ for ((node) = (list_head)->psNextNode, (next) = (node)->psNextNode; \ -+ (node) != (list_head); \ -+ (node) = (next), (next) = (node)->psNextNode) -+ -+#define dllist_foreach_node_backwards(list_head, node, prev) \ -+ for ((node) = (list_head)->psPrevNode, (prev) = (node)->psPrevNode; \ -+ (node) != (list_head); \ -+ (node) = (prev), (prev) = (node)->psPrevNode) -+ -+ -+/*************************************************************************/ /*! -+@Function dllist_foreach -+ -+@Description Simplification of dllist_foreach_node. -+ Walk through all the nodes on the list. -+ Safe against removal of currently-iterated node. -+ -+ Adds utility-macro dllist_cur() to typecast the current node. -+ -+@Input list_head List node to start the operation -+ -+*/ -+/*****************************************************************************/ -+#define dllist_foreach(list_head) \ -+ for (DLLIST_NODE *DllCurNode = (list_head).psNextNode, *DllNextNode = DllCurNode->psNextNode; \ -+ DllCurNode != &(list_head); \ -+ DllCurNode = DllNextNode, DllNextNode = DllCurNode->psNextNode) -+ -+#define dllist_foreach_backwards(list_head) \ -+ for (DLLIST_NODE *DllCurNode = (list_head).psPrevNode, *DllPrevNode = DllCurNode->psPrevNode; \ -+ DllCurNode != &(list_head); \ -+ DllCurNode = DllPrevNode, DllPrevNode = DllCurNode->psPrevNode) -+ -+#define dllist_cur(type, member) IMG_CONTAINER_OF(DllCurNode, type, member) -+ -+/*************************************************************************/ /*! -+@Function dllist_init -+ -+@Description Initialize a new double linked list -+ -+@Input psListHead List head Node -+ -+*/ -+/*****************************************************************************/ -+static INLINE -+void dllist_init(PDLLIST_NODE psListHead) -+{ -+ psListHead->psPrevNode = psListHead; -+ psListHead->psNextNode = psListHead; -+} -+ -+/*************************************************************************/ /*! -+@Function dllist_is_empty -+ -+@Description Returns whether the list is empty -+ -+@Input psListHead List head Node -+ -+*/ -+/*****************************************************************************/ -+static INLINE -+bool dllist_is_empty(const DLLIST_NODE *const psListHead) -+{ -+ return (psListHead->psPrevNode == psListHead); -+} -+ -+/*************************************************************************/ /*! -+@Function dllist_add_to_head -+ -+@Description Add psNewNode to head of list psListHead -+ -+@Input psListHead Head Node -+@Input psNewNode New Node -+ -+*/ -+/*****************************************************************************/ -+static INLINE -+void dllist_add_to_head(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode) -+{ -+ PDLLIST_NODE psTmp; -+ -+ psTmp = psListHead->psNextNode; -+ -+ psListHead->psNextNode = psNewNode; -+ psNewNode->psNextNode = psTmp; -+ -+ psTmp->psPrevNode = psNewNode; -+ psNewNode->psPrevNode = psListHead; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function dllist_add_to_tail -+ -+@Description Add psNewNode to tail of list psListHead -+ -+@Input psListHead Head Node -+@Input psNewNode New Node -+ -+*/ -+/*****************************************************************************/ -+static INLINE -+void dllist_add_to_tail(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode) -+{ -+ PDLLIST_NODE psTmp; -+ -+ psTmp = psListHead->psPrevNode; -+ -+ psListHead->psPrevNode = psNewNode; -+ psNewNode->psPrevNode = psTmp; -+ -+ psTmp->psNextNode = psNewNode; -+ psNewNode->psNextNode = psListHead; -+} -+ -+/*************************************************************************/ /*! -+@Function dllist_node_is_in_list -+ -+@Description Returns true if psNode is in a list -+ -+@Input psNode List node -+ -+*/ -+/*****************************************************************************/ -+static INLINE -+bool dllist_node_is_in_list(const DLLIST_NODE *const psNode) -+{ -+ return (psNode->psNextNode != NULL); -+} -+ -+/*************************************************************************/ /*! -+@Function dllist_get_next_node -+ -+@Description Returns the list node after psListHead or NULL psListHead is -+ the only element in the list. -+ -+@Input psListHead List node to start the operation -+ -+*/ -+/*****************************************************************************/ -+static INLINE -+PDLLIST_NODE dllist_get_next_node(PDLLIST_NODE psListHead) -+{ -+ if (psListHead->psNextNode == psListHead) -+ { -+ return NULL; -+ } -+ else -+ { -+ return psListHead->psNextNode; -+ } -+} -+ -+/*************************************************************************/ /*! -+@Function dllist_get_prev_node -+ -+@Description Returns the list node preceding psListHead or NULL if -+ psListHead is the only element in the list. -+ -+@Input psListHead List node to start the operation -+ -+*/ -+/*****************************************************************************/ -+static INLINE -+PDLLIST_NODE dllist_get_prev_node(PDLLIST_NODE psListHead) -+{ -+ if (psListHead->psPrevNode == psListHead) -+ { -+ return NULL; -+ } -+ else -+ { -+ return psListHead->psPrevNode; -+ } -+} -+ -+/*************************************************************************/ /*! -+@Function dllist_remove_node -+ -+@Description Removes psListNode from the list where it currently belongs -+ -+@Input psListNode List node to be removed -+ -+*/ -+/*****************************************************************************/ -+static INLINE -+void dllist_remove_node(PDLLIST_NODE psListNode) -+{ -+ psListNode->psNextNode->psPrevNode = psListNode->psPrevNode; -+ psListNode->psPrevNode->psNextNode = psListNode->psNextNode; -+ -+ /* Clear the node to show it's not in a list */ -+ psListNode->psPrevNode = NULL; -+ psListNode->psNextNode = NULL; -+} -+ -+/*************************************************************************/ /*! -+@Function dllist_replace_head -+ -+@Description Moves the list from psOldHead to psNewHead -+ -+@Input psOldHead List node to be replaced. Will become a -+ head node of an empty list. -+@Input psNewHead List node to be inserted. Must be an -+ empty list head. -+ -+*/ -+/*****************************************************************************/ -+static INLINE -+void dllist_replace_head(PDLLIST_NODE psOldHead, PDLLIST_NODE psNewHead) -+{ -+ if (dllist_is_empty(psOldHead)) -+ { -+ psNewHead->psNextNode = psNewHead; -+ psNewHead->psPrevNode = psNewHead; -+ } -+ else -+ { -+ /* Change the neighbouring nodes */ -+ psOldHead->psNextNode->psPrevNode = psNewHead; -+ psOldHead->psPrevNode->psNextNode = psNewHead; -+ -+ /* Copy the old data to the new node */ -+ psNewHead->psNextNode = psOldHead->psNextNode; -+ psNewHead->psPrevNode = psOldHead->psPrevNode; -+ -+ /* Remove links to the previous list */ -+ psOldHead->psNextNode = psOldHead; -+ psOldHead->psPrevNode = psOldHead; -+ } -+} -+ -+/**************************************************************************/ /*! -+@Function dllist_insert_list_at_head -+ -+@Description Inserts psInHead list into the head of the psOutHead list. -+ After this operation psOutHead will contain psInHead at the -+ head of the list and the remaining elements that were -+ already in psOutHead will be places after the psInList (so -+ at a tail of the original list). -+ -+@Input psOutHead List node psInHead will be inserted to. -+@Input psInHead List node to be inserted to psOutHead. -+ After this operation this becomes an empty list. -+*/ /***************************************************************************/ -+static INLINE -+void dllist_insert_list_at_head(PDLLIST_NODE psOutHead, PDLLIST_NODE psInHead) -+{ -+ PDLLIST_NODE psInHeadNextNode = psInHead->psNextNode; -+ PDLLIST_NODE psOutHeadNextNode = psOutHead->psNextNode; -+ -+ if (!dllist_is_empty(psInHead)) -+ { -+ psOutHead->psNextNode = psInHeadNextNode; -+ psInHeadNextNode->psPrevNode = psOutHead; -+ -+ psInHead->psPrevNode->psNextNode = psOutHeadNextNode; -+ psOutHeadNextNode->psPrevNode = psInHead->psPrevNode; -+ -+ dllist_init(psInHead); -+ } -+ } -+ -+/*************************************************************************/ /*! -+@Description Pointer to a dllist comparison callback function. -+@Input psNode Pointer to a node in a dllist. -+@Input psNext Pointer to psNode's next neighbour. -+*/ /**************************************************************************/ -+typedef bool (*DLLIST_CMP_CB)(const DLLIST_NODE *psNode, const DLLIST_NODE *psNext); -+ -+/*************************************************************************/ /*! -+@Function dllist_sort -+ -+@Description Insert-sorts the List in place -+ The cmpr function passes the current and next node, -+ From which the user writes the function responsible -+ for choosing to swap order or not. -+ The function returns true if a swap is required -+ -+@Input psListHead List Head to be sorted. -+ -+@Input cmpr Function pointer to use for sorting -+ -+*/ -+/*****************************************************************************/ -+static INLINE void dllist_sort(PDLLIST_NODE psListHead, -+ DLLIST_CMP_CB cmpr) -+{ -+ DLLIST_NODE *node, *next; -+ DLLIST_NODE sTempHead; -+ -+ dllist_init(&sTempHead); -+ -+ dllist_foreach_node(psListHead, node, next) -+ { -+ dllist_remove_node(node); -+ dllist_add_to_head(&sTempHead, node); -+ } -+ -+ while (!dllist_is_empty(&sTempHead)) -+ { -+ DLLIST_NODE *psSmallestNode = NULL; -+ -+ dllist_foreach_node(&sTempHead, node, next) -+ { -+ if (!psSmallestNode || cmpr(psSmallestNode, node)) -+ { -+ psSmallestNode = node; -+ } -+ } -+ -+ dllist_remove_node(psSmallestNode); -+ dllist_add_to_tail(psListHead, psSmallestNode); -+ } -+} -+ -+#endif /* DLLIST_H */ -diff --git a/drivers/gpu/drm/img-rogue/dma_km.h b/drivers/gpu/drm/img-rogue/dma_km.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/dma_km.h -@@ -0,0 +1,83 @@ -+/*************************************************************************/ /*! -+@File dma_km.h -+@Title DMA transfer module header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef DMA_KM_H -+#define DMA_KM_H -+ -+#if defined(__linux__) -+#include -+#else -+#define KERNEL_VERSION -+#endif -+ -+#include "pvrsrv_error.h" -+#include "img_types.h" -+#include "cache_ops.h" -+#include "device.h" -+#include "pmr.h" -+#include "pvrsrv_sync_km.h" -+#include "connection_server.h" -+ -+PVRSRV_ERROR DmaDeviceParams(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 *ui32DmaBuffAlign, -+ IMG_UINT32 *ui32DmaTransferMult); -+ -+PVRSRV_ERROR DmaSparseMappingTable(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT32 ui32SizeInPages, -+ IMG_BOOL *pbTable); -+ -+PVRSRV_ERROR DmaTransfer(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 uiNumDMAs, -+ PMR** ppsPMR, -+ IMG_UINT64 *puiAddress, -+ IMG_DEVMEM_OFFSET_T *puiOffset, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_UINT32 uiFlags, -+ PVRSRV_TIMELINE iUpdateTimeline); -+ -+PVRSRV_ERROR PVRSRVInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode); -+void PVRSRVDeInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+#endif /* DMA_KM_H */ -diff --git a/drivers/gpu/drm/img-rogue/dma_support.c b/drivers/gpu/drm/img-rogue/dma_support.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/dma_support.c -@@ -0,0 +1,523 @@ -+/*************************************************************************/ /*! -+@File dma_support.c -+@Title System DMA support -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Provides a contiguous memory allocator (i.e. DMA allocator); -+ APIs are used for allocation/ioremapping (DMA/PA <-> CPU/VA) -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "allocmem.h" -+#include "dma_support.h" -+#include "pvr_vmap.h" -+#include "kernel_compatibility.h" -+ -+#define DMA_MAX_IOREMAP_ENTRIES 2 -+static IMG_BOOL gbEnableDmaIoRemapping = IMG_FALSE; -+static DMA_ALLOC gsDmaIoRemapArray[DMA_MAX_IOREMAP_ENTRIES] = {{0}}; -+ -+static void* -+SysDmaAcquireKernelAddress(struct page *psPage, IMG_UINT64 ui64Size, DMA_ALLOC *psDmaAlloc) -+{ -+ IMG_BOOL bPageByPage = IMG_TRUE; -+ IMG_UINT32 uiIdx; -+ void *pvVirtAddr = NULL; -+ IMG_UINT32 ui32PgCount = (IMG_UINT32)(ui64Size >> OSGetPageShift()); -+ PVRSRV_DEVICE_NODE *psDevNode = OSAllocZMemNoStats(sizeof(*psDevNode)); -+ PVRSRV_DEVICE_CONFIG *psDevConfig = OSAllocZMemNoStats(sizeof(*psDevConfig)); -+ struct page **pagearray = OSAllocZMemNoStats(ui32PgCount * sizeof(struct page *)); -+ void *pvOSDevice = psDmaAlloc->pvOSDevice; -+#if defined(CONFIG_ARM64) -+ pgprot_t prot = pgprot_writecombine(PAGE_KERNEL); -+#else -+ pgprot_t prot = pgprot_noncached(PAGE_KERNEL); -+#endif -+ -+ /* Validate all required dynamic tmp buffer allocations */ -+ if (psDevNode == NULL || psDevConfig == NULL || pagearray == NULL) -+ { -+ if (psDevNode) -+ { -+ OSFreeMem(psDevNode); -+ } -+ -+ if (psDevConfig) -+ { -+ OSFreeMem(psDevConfig); -+ } -+ -+ if (pagearray) -+ { -+ OSFreeMem(pagearray); -+ } -+ -+ goto e0; -+ } -+ -+ /* Fake psDevNode->psDevConfig->pvOSDevice */ -+ psDevConfig->pvOSDevice = pvOSDevice; -+ psDevNode->psDevConfig = psDevConfig; -+ -+ /* Evict any page data contents from d-cache */ -+ for (uiIdx = 0; uiIdx < ui32PgCount; uiIdx++) -+ { -+ void *pvVirtStart, *pvVirtEnd; -+ IMG_CPU_PHYADDR sCPUPhysStart, sCPUPhysEnd; -+ -+ /* Prepare array required for vmap */ -+ pagearray[uiIdx] = &psPage[uiIdx]; -+ -+ if (bPageByPage) -+ { -+#if defined(CONFIG_64BIT) -+ bPageByPage = IMG_FALSE; -+ -+ pvVirtStart = kmap(&psPage[uiIdx]); -+ pvVirtEnd = pvVirtStart + ui64Size; -+ -+ sCPUPhysStart.uiAddr = page_to_phys(&psPage[uiIdx]); -+ sCPUPhysEnd.uiAddr = sCPUPhysStart.uiAddr + ui64Size; -+ /* all pages have a kernel linear address, flush entire range */ -+#else -+ pvVirtStart = kmap(&psPage[uiIdx]); -+ pvVirtEnd = pvVirtStart + PAGE_SIZE; -+ -+ sCPUPhysStart.uiAddr = page_to_phys(&psPage[uiIdx]); -+ sCPUPhysEnd.uiAddr = sCPUPhysStart.uiAddr + PAGE_SIZE; -+ /* pages might be from HIGHMEM, need to kmap/flush per page */ -+#endif -+ -+ /* Fallback to range-based d-cache flush */ -+ OSCPUCacheInvalidateRangeKM(psDevNode, -+ pvVirtStart, pvVirtEnd, -+ sCPUPhysStart, sCPUPhysEnd); -+ -+ kunmap(&psPage[uiIdx]); -+ } -+ } -+ -+ /* Remap pages into VMALLOC space */ -+ pvVirtAddr = pvr_vmap(pagearray, ui32PgCount, VM_MAP, prot); -+ psDmaAlloc->PageProps = prot; -+ -+ /* Clean-up tmp buffers */ -+ OSFreeMem(psDevConfig); -+ OSFreeMem(psDevNode); -+ OSFreeMem(pagearray); -+ -+e0: -+ return pvVirtAddr; -+} -+ -+static void SysDmaReleaseKernelAddress(void *pvVirtAddr, IMG_UINT64 ui64Size, pgprot_t pgprot) -+{ -+ pvr_vunmap(pvVirtAddr, ui64Size >> OSGetPageShift(), pgprot); -+} -+ -+/*! -+****************************************************************************** -+ @Function SysDmaAllocMem -+ -+ @Description Allocates physically contiguous memory -+ -+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ -+ error code -+ ******************************************************************************/ -+PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ struct device *psDev; -+ struct page *psPage; -+ size_t uiSize; -+ -+ if (psDmaAlloc == NULL || -+ psDmaAlloc->hHandle || -+ psDmaAlloc->pvVirtAddr || -+ psDmaAlloc->ui64Size == 0 || -+ psDmaAlloc->sBusAddr.uiAddr || -+ psDmaAlloc->pvOSDevice == NULL) -+ { -+ PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter"); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE); -+ psDev = (struct device *)psDmaAlloc->pvOSDevice; -+ -+ psDmaAlloc->hHandle = dma_alloc_coherent(psDev, uiSize, (dma_addr_t *)&psDmaAlloc->sBusAddr.uiAddr, GFP_KERNEL); -+ -+ if (psDmaAlloc->hHandle) -+ { -+ psDmaAlloc->pvVirtAddr = psDmaAlloc->hHandle; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "Allocated DMA buffer V:0x%p P:0x%llx S:0x"IMG_SIZE_FMTSPECX, -+ psDmaAlloc->pvVirtAddr, -+ psDmaAlloc->sBusAddr.uiAddr, -+ uiSize)); -+ } -+ else if ((psPage = alloc_pages(GFP_KERNEL, get_order(uiSize)))) -+ { -+ psDmaAlloc->sBusAddr.uiAddr = dma_map_page(psDev, psPage, 0, uiSize, DMA_BIDIRECTIONAL); -+ if (dma_mapping_error(psDev, psDmaAlloc->sBusAddr.uiAddr)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "dma_map_page() failed, page 0x%p order %d", -+ psPage, -+ get_order(uiSize))); -+ __free_pages(psPage, get_order(uiSize)); -+ goto e0; -+ } -+ psDmaAlloc->psPage = psPage; -+ -+ psDmaAlloc->pvVirtAddr = SysDmaAcquireKernelAddress(psPage, uiSize, psDmaAlloc); -+ if (! psDmaAlloc->pvVirtAddr) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "SysDmaAcquireKernelAddress() failed, page 0x%p order %d", -+ psPage, -+ get_order(uiSize))); -+ dma_unmap_page(psDev, psDmaAlloc->sBusAddr.uiAddr, uiSize, DMA_BIDIRECTIONAL); -+ __free_pages(psPage, get_order(uiSize)); -+ goto e0; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "Allocated contiguous buffer V:0x%p P:0x%llx S:0x"IMG_SIZE_FMTSPECX, -+ psDmaAlloc->pvVirtAddr, -+ psDmaAlloc->sBusAddr.uiAddr, -+ uiSize)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Unable to allocate contiguous buffer, size: 0x"IMG_SIZE_FMTSPECX, uiSize)); -+ eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; -+ } -+ -+e0: -+ PVR_LOG_RETURN_IF_FALSE((psDmaAlloc->pvVirtAddr), "DMA/CMA allocation failed", PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES); -+ return eError; -+} -+ -+/*! -+****************************************************************************** -+ @Function SysDmaFreeMem -+ -+ @Description Free physically contiguous memory -+ -+ @Return void -+ ******************************************************************************/ -+void SysDmaFreeMem(DMA_ALLOC *psDmaAlloc) -+{ -+ size_t uiSize; -+ struct device *psDev; -+ -+ if (psDmaAlloc == NULL || -+ psDmaAlloc->ui64Size == 0 || -+ psDmaAlloc->pvOSDevice == NULL || -+ psDmaAlloc->pvVirtAddr == NULL || -+ psDmaAlloc->sBusAddr.uiAddr == 0) -+ { -+ PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter"); -+ return; -+ } -+ -+ uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE); -+ psDev = (struct device *)psDmaAlloc->pvOSDevice; -+ -+ if (psDmaAlloc->pvVirtAddr != psDmaAlloc->hHandle) -+ { -+ SysDmaReleaseKernelAddress(psDmaAlloc->pvVirtAddr, uiSize, psDmaAlloc->PageProps); -+ } -+ -+ if (! psDmaAlloc->hHandle) -+ { -+ struct page *psPage; -+ dma_unmap_page(psDev, psDmaAlloc->sBusAddr.uiAddr, uiSize, DMA_BIDIRECTIONAL); -+ psPage = psDmaAlloc->psPage; -+ __free_pages(psPage, get_order(uiSize)); -+ return; -+ } -+ -+ dma_free_coherent(psDev, uiSize, psDmaAlloc->hHandle, (dma_addr_t )psDmaAlloc->sBusAddr.uiAddr); -+} -+ -+/*! -+****************************************************************************** -+ @Function SysDmaRegisterForIoRemapping -+ -+ @Description Registers DMA_ALLOC for manual I/O remapping -+ -+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ -+ error code -+ ******************************************************************************/ -+PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psDmaAlloc) -+{ -+ size_t uiSize; -+ IMG_UINT32 ui32Idx; -+ IMG_BOOL bTabEntryFound = IMG_TRUE; -+ PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS; -+ -+ if (psDmaAlloc == NULL || -+ psDmaAlloc->ui64Size == 0 || -+ psDmaAlloc->pvOSDevice == NULL || -+ psDmaAlloc->pvVirtAddr == NULL || -+ psDmaAlloc->sBusAddr.uiAddr == 0) -+ { -+ PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter"); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE); -+ -+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) -+ { -+ /* Check if an I/O remap entry exists for remapping */ -+ if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == NULL) -+ { -+ PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == 0); -+ PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].ui64Size == 0); -+ break; -+ } -+ } -+ -+ if (ui32Idx >= DMA_MAX_IOREMAP_ENTRIES) -+ { -+ bTabEntryFound = IMG_FALSE; -+ } -+ -+ if (bTabEntryFound) -+ { -+ IMG_BOOL bSameVAddr, bSamePAddr, bSameSize; -+ -+ bSamePAddr = gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == psDmaAlloc->sBusAddr.uiAddr; -+ bSameVAddr = gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr; -+ bSameSize = gsDmaIoRemapArray[ui32Idx].ui64Size == uiSize; -+ -+ if (bSameVAddr) -+ { -+ if (bSamePAddr && bSameSize) -+ { -+ eError = PVRSRV_OK; -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_ALREADY_EXISTS; -+ } -+ } -+ else -+ { -+ PVR_ASSERT(bSamePAddr == IMG_FALSE); -+ -+ gsDmaIoRemapArray[ui32Idx].ui64Size = uiSize; -+ gsDmaIoRemapArray[ui32Idx].sBusAddr = psDmaAlloc->sBusAddr; -+ gsDmaIoRemapArray[ui32Idx].pvVirtAddr = psDmaAlloc->pvVirtAddr; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "DMA: register I/O remap: " -+ "VA: 0x%p, PA: 0x%llx, Size: 0x"IMG_SIZE_FMTSPECX, -+ psDmaAlloc->pvVirtAddr, -+ psDmaAlloc->sBusAddr.uiAddr, -+ uiSize)); -+ -+ gbEnableDmaIoRemapping = IMG_TRUE; -+ eError = PVRSRV_OK; -+ } -+ } -+ -+ return eError; -+} -+ -+/*! -+****************************************************************************** -+ @Function SysDmaDeregisterForIoRemapping -+ -+ @Description Deregisters DMA_ALLOC from manual I/O remapping -+ -+ @Return void -+ ******************************************************************************/ -+void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psDmaAlloc) -+{ -+ size_t uiSize; -+ IMG_UINT32 ui32Idx; -+ -+ if (psDmaAlloc == NULL || -+ psDmaAlloc->ui64Size == 0 || -+ psDmaAlloc->pvOSDevice == NULL || -+ psDmaAlloc->pvVirtAddr == NULL || -+ psDmaAlloc->sBusAddr.uiAddr == 0) -+ { -+ PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter"); -+ return; -+ } -+ -+ uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE); -+ -+ /* Remove specified entries from list of I/O remap entries */ -+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) -+ { -+ if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr) -+ { -+ gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr = 0; -+ gsDmaIoRemapArray[ui32Idx].pvVirtAddr = NULL; -+ gsDmaIoRemapArray[ui32Idx].ui64Size = 0; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "DMA: deregister I/O remap: " -+ "VA: 0x%p, PA: 0x%llx, Size: 0x"IMG_SIZE_FMTSPECX, -+ psDmaAlloc->pvVirtAddr, -+ psDmaAlloc->sBusAddr.uiAddr, -+ uiSize)); -+ -+ break; -+ } -+ } -+ -+ /* Check if no other I/O remap entries exists for remapping */ -+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) -+ { -+ if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr != NULL) -+ { -+ break; -+ } -+ } -+ -+ if (ui32Idx == DMA_MAX_IOREMAP_ENTRIES) -+ { -+ /* No entries found so disable remapping */ -+ gbEnableDmaIoRemapping = IMG_FALSE; -+ } -+} -+ -+/*! -+****************************************************************************** -+ @Function SysDmaDevPAddrToCpuVAddr -+ -+ @Description Maps a DMA_ALLOC physical address to CPU virtual address -+ -+ @Return IMG_CPU_VIRTADDR on success. Otherwise, a NULL -+ ******************************************************************************/ -+IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size) -+{ -+ IMG_CPU_VIRTADDR pvDMAVirtAddr = NULL; -+ DMA_ALLOC *psHeapDmaAlloc; -+ IMG_UINT32 ui32Idx; -+ -+ if (gbEnableDmaIoRemapping == IMG_FALSE) -+ { -+ return pvDMAVirtAddr; -+ } -+ -+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) -+ { -+ psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx]; -+ if (psHeapDmaAlloc->sBusAddr.uiAddr && uiAddr >= psHeapDmaAlloc->sBusAddr.uiAddr) -+ { -+ IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size; -+ IMG_UINT64 uiOffset = uiAddr - psHeapDmaAlloc->sBusAddr.uiAddr; -+ -+ if (uiOffset < uiSpan) -+ { -+ PVR_ASSERT((uiOffset+ui64Size-1) < uiSpan); -+ pvDMAVirtAddr = psHeapDmaAlloc->pvVirtAddr + uiOffset; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "DMA: remap: PA: 0x%llx => VA: 0x%p", -+ uiAddr, pvDMAVirtAddr)); -+ -+ break; -+ } -+ } -+ } -+ -+ return pvDMAVirtAddr; -+} -+ -+/*! -+****************************************************************************** -+ @Function SysDmaCpuVAddrToDevPAddr -+ -+ @Description Maps a DMA_ALLOC CPU virtual address to physical address -+ -+ @Return Non-zero value on success. Otherwise, a 0 -+ ******************************************************************************/ -+IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr) -+{ -+ IMG_UINT64 uiAddr = 0; -+ DMA_ALLOC *psHeapDmaAlloc; -+ IMG_UINT32 ui32Idx; -+ -+ if (gbEnableDmaIoRemapping == IMG_FALSE) -+ { -+ return uiAddr; -+ } -+ -+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) -+ { -+ psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx]; -+ if (psHeapDmaAlloc->pvVirtAddr && pvDMAVirtAddr >= psHeapDmaAlloc->pvVirtAddr) -+ { -+ IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size; -+ IMG_UINT64 uiOffset = pvDMAVirtAddr - psHeapDmaAlloc->pvVirtAddr; -+ -+ if (uiOffset < uiSpan) -+ { -+ uiAddr = psHeapDmaAlloc->sBusAddr.uiAddr + uiOffset; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "DMA: remap: VA: 0x%p => PA: 0x%llx", -+ pvDMAVirtAddr, uiAddr)); -+ -+ break; -+ } -+ } -+ } -+ -+ return uiAddr; -+} -+ -+/****************************************************************************** -+ End of file (dma_support.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/dma_support.h b/drivers/gpu/drm/img-rogue/dma_support.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/dma_support.h -@@ -0,0 +1,117 @@ -+/*************************************************************************/ /*! -+@File dma_support.h -+@Title Device contiguous memory allocator and I/O re-mapper -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This header provides a contiguous memory allocator API; mainly -+ used for allocating / ioremapping (DMA/PA <-> CPU/VA) -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef DMA_SUPPORT_H -+#define DMA_SUPPORT_H -+ -+#include "osfunc.h" -+#include "pvrsrv.h" -+ -+typedef struct _DMA_ALLOC_ -+{ -+ IMG_UINT64 ui64Size; -+ IMG_CPU_VIRTADDR pvVirtAddr; -+ IMG_DEV_PHYADDR sBusAddr; -+ IMG_HANDLE hHandle; -+#if defined(__linux__) -+ struct page *psPage; -+ pgprot_t PageProps; -+#endif -+ void *pvOSDevice; -+} DMA_ALLOC; -+ -+/*! -+******************************************************************************* -+ @Function SysDmaAllocMem -+ @Description Allocates physically contiguous memory -+ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc); -+ -+/*! -+******************************************************************************* -+ @Function SysDmaFreeMem -+ @Description Free physically contiguous memory -+ @Return void -+******************************************************************************/ -+void SysDmaFreeMem(DMA_ALLOC *psCmaAlloc); -+ -+/*! -+******************************************************************************* -+ @Function SysDmaRegisterForIoRemapping -+ @Description Registers DMA_ALLOC for manual I/O remapping -+ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc); -+ -+/*! -+******************************************************************************* -+ @Function SysDmaDeregisterForIoRemapping -+ @Description Deregisters DMA_ALLOC from manual I/O remapping -+ @Return void -+******************************************************************************/ -+void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc); -+ -+/*! -+******************************************************************************* -+ @Function SysDmaDevPAddrToCpuVAddr -+ @Description Maps a DMA_ALLOC physical address to CPU virtual address -+ @Return IMG_CPU_VIRTADDR on success. Otherwise, a NULL -+******************************************************************************/ -+IMG_CPU_VIRTADDR -+SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size); -+ -+/*! -+******************************************************************************* -+ @Function SysDmaCpuVAddrToDevPAddr -+ @Description Maps a DMA_ALLOC CPU virtual address to physical address -+ @Return Non-zero value on success. Otherwise, a 0 -+******************************************************************************/ -+IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr); -+ -+#endif /* DMA_SUPPORT_H */ -+ -+/****************************************************************************** -+ End of file (dma_support.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/env_connection.h b/drivers/gpu/drm/img-rogue/env_connection.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/env_connection.h -@@ -0,0 +1,92 @@ -+/*************************************************************************/ /*! -+@File -+@Title Server side connection management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Linux specific server side connection management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(ENV_CONNECTION_H) -+#define ENV_CONNECTION_H -+ -+#include -+#include -+#include -+ -+#include "handle.h" -+#include "pvr_debug.h" -+#include "device.h" -+ -+#if defined(SUPPORT_ION) -+#include PVR_ANDROID_ION_HEADER -+#include "ion_sys.h" -+#include "allocmem.h" -+#endif -+ -+typedef struct _ENV_CONNECTION_PRIVATE_DATA_ -+{ -+ PVRSRV_DEVICE_NODE *psDevNode; -+} ENV_CONNECTION_PRIVATE_DATA; -+ -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+#define ION_CLIENT_NAME_SIZE 50 -+ -+typedef struct _ENV_ION_CONNECTION_DATA_ -+{ -+ IMG_CHAR azIonClientName[ION_CLIENT_NAME_SIZE]; -+ struct ion_device *psIonDev; -+ struct ion_client *psIonClient; -+} ENV_ION_CONNECTION_DATA; -+#endif -+ -+typedef struct _ENV_CONNECTION_DATA_ -+{ -+ pid_t owner; -+ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) -+ void *pvPvrSyncPrivateData; -+#endif -+ -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ ENV_ION_CONNECTION_DATA *psIonData; -+#endif -+} ENV_CONNECTION_DATA; -+ -+#endif /* !defined(ENV_CONNECTION_H) */ -diff --git a/drivers/gpu/drm/img-rogue/event.c b/drivers/gpu/drm/img-rogue/event.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/event.c -@@ -0,0 +1,514 @@ -+/*************************************************************************/ /*! -+@File -+@Title Event Object -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0) -+#include -+#endif -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "allocmem.h" -+#include "event.h" -+#include "pvr_debug.h" -+#include "pvrsrv.h" -+#include "pvr_bridge_k.h" -+ -+#include "osfunc.h" -+ -+/* Uncomment to enable event object stats that are useful for debugging. -+ * The stats can be gotten at any time (during lifetime of event object) -+ * using OSEventObjectDumpdebugInfo API */ -+// #define LINUX_EVENT_OBJECT_STATS -+ -+ -+typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG -+{ -+ rwlock_t sLock; -+ /* Counts how many times event object was signalled i.e. how many times -+ * LinuxEventObjectSignal() was called on a given event object. -+ * Used for detecting pending signals. -+ * Note that this is in no way related to OS signals. */ -+ atomic_t sEventSignalCount; -+ struct list_head sList; -+} PVRSRV_LINUX_EVENT_OBJECT_LIST; -+ -+ -+typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG -+{ -+ IMG_UINT32 ui32EventSignalCountPrevious; -+#if defined(DEBUG) -+ IMG_UINT ui32Stats; -+#endif -+ -+#ifdef LINUX_EVENT_OBJECT_STATS -+ POS_LOCK hLock; -+ IMG_UINT32 ui32ScheduleAvoided; -+ IMG_UINT32 ui32ScheduleCalled; -+ IMG_UINT32 ui32ScheduleSleptFully; -+ IMG_UINT32 ui32ScheduleSleptPartially; -+ IMG_UINT32 ui32ScheduleReturnedImmediately; -+#endif -+ wait_queue_head_t sWait; -+ struct list_head sList; -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList; -+} PVRSRV_LINUX_EVENT_OBJECT; -+ -+/*! -+****************************************************************************** -+ -+ @Function LinuxEventObjectListCreate -+ -+ @Description -+ -+ Linux wait object list creation -+ -+ @Output hOSEventKM : Pointer to the event object list handle -+ -+ @Return PVRSRV_ERROR : Error code -+ -+******************************************************************************/ -+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList) -+{ -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList; -+ -+ psEvenObjectList = OSAllocMem(sizeof(*psEvenObjectList)); -+ if (psEvenObjectList == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ INIT_LIST_HEAD(&psEvenObjectList->sList); -+ -+ rwlock_init(&psEvenObjectList->sLock); -+ atomic_set(&psEvenObjectList->sEventSignalCount, 0); -+ -+ *phEventObjectList = (IMG_HANDLE *) psEvenObjectList; -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function LinuxEventObjectListDestroy -+ -+ @Description -+ -+ Linux wait object list destruction -+ -+ @Input hOSEventKM : Event object list handle -+ -+ @Return PVRSRV_ERROR : Error code -+ -+******************************************************************************/ -+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList) -+{ -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList; -+ -+ if (psEvenObjectList) -+ { -+ if (!list_empty(&psEvenObjectList->sList)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty")); -+ return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; -+ } -+ OSFreeMem(psEvenObjectList); -+ /*not nulling pointer, copy on stack*/ -+ } -+ return PVRSRV_OK; -+} -+ -+ -+/*! -+****************************************************************************** -+ -+ @Function LinuxEventObjectDelete -+ -+ @Description -+ -+ Linux wait object removal -+ -+ @Input hOSEventObject : Event object handle -+ -+ @Return PVRSRV_ERROR : Error code -+ -+******************************************************************************/ -+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject) -+{ -+ if (hOSEventObject) -+ { -+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject; -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList; -+ -+ write_lock_bh(&psLinuxEventObjectList->sLock); -+ list_del(&psLinuxEventObject->sList); -+ write_unlock_bh(&psLinuxEventObjectList->sLock); -+ -+#ifdef LINUX_EVENT_OBJECT_STATS -+ OSLockDestroy(psLinuxEventObject->hLock); -+#endif -+ -+#if defined(DEBUG) -+// PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDelete: Event object waits: %u", psLinuxEventObject->ui32Stats)); -+#endif -+ -+ OSFreeMem(psLinuxEventObject); -+ /*not nulling pointer, copy on stack*/ -+ -+ return PVRSRV_OK; -+ } -+ return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function LinuxEventObjectAdd -+ -+ @Description -+ -+ Linux wait object addition -+ -+ @Input hOSEventObjectList : Event object list handle -+ @Output phOSEventObject : Pointer to the event object handle -+ -+ @Return PVRSRV_ERROR : Error code -+ -+******************************************************************************/ -+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject) -+ { -+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; -+ -+ /* allocate completion variable */ -+ psLinuxEventObject = OSAllocMem(sizeof(*psLinuxEventObject)); -+ if (psLinuxEventObject == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ INIT_LIST_HEAD(&psLinuxEventObject->sList); -+ -+ /* Start with the timestamp at which event object was added to the list */ -+ psLinuxEventObject->ui32EventSignalCountPrevious = atomic_read(&psLinuxEventObjectList->sEventSignalCount); -+ -+#ifdef LINUX_EVENT_OBJECT_STATS -+ PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&psLinuxEventObject->hLock), "OSLockCreate"); -+ psLinuxEventObject->ui32ScheduleAvoided = 0; -+ psLinuxEventObject->ui32ScheduleCalled = 0; -+ psLinuxEventObject->ui32ScheduleSleptFully = 0; -+ psLinuxEventObject->ui32ScheduleSleptPartially = 0; -+ psLinuxEventObject->ui32ScheduleReturnedImmediately = 0; -+#endif -+ -+#if defined(DEBUG) -+ psLinuxEventObject->ui32Stats = 0; -+#endif -+ init_waitqueue_head(&psLinuxEventObject->sWait); -+ -+ psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList; -+ -+ write_lock_bh(&psLinuxEventObjectList->sLock); -+ list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList); -+ write_unlock_bh(&psLinuxEventObjectList->sLock); -+ -+ *phOSEventObject = psLinuxEventObject; -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function LinuxEventObjectSignal -+ -+ @Description -+ -+ Linux wait object signaling function -+ -+ @Input hOSEventObjectList : Event object list handle -+ -+ @Return PVRSRV_ERROR : Error code -+ -+******************************************************************************/ -+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList) -+{ -+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; -+ struct list_head *psListEntry, *psListEntryTemp, *psList; -+ psList = &psLinuxEventObjectList->sList; -+ -+ /* Move the timestamp ahead for this call, so a potential "Wait" from any -+ * EventObject/s doesn't wait for the signal to occur before returning. Early -+ * setting/incrementing of timestamp reduces the window where a concurrent -+ * "Wait" call might block while "this" Signal call is being processed */ -+ atomic_inc(&psLinuxEventObjectList->sEventSignalCount); -+ -+ read_lock_bh(&psLinuxEventObjectList->sLock); -+ list_for_each_safe(psListEntry, psListEntryTemp, psList) -+ { -+ psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList); -+ wake_up_interruptible(&psLinuxEventObject->sWait); -+ } -+ read_unlock_bh(&psLinuxEventObjectList->sLock); -+ -+ return PVRSRV_OK; -+} -+ -+static void _TryToFreeze(void) -+{ -+ /* if we reach zero it means that all of the threads called try_to_freeze */ -+ LinuxBridgeNumActiveKernelThreadsDecrement(); -+ -+ /* Returns true if the thread was frozen, should we do anything with this -+ * information? What do we return? Which one is the error case? */ -+ try_to_freeze(); -+ -+ LinuxBridgeNumActiveKernelThreadsIncrement(); -+} -+ -+void LinuxEventObjectDumpDebugInfo(IMG_HANDLE hOSEventObject) -+{ -+#ifdef LINUX_EVENT_OBJECT_STATS -+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject; -+ -+ OSLockAcquire(psLinuxEventObject->hLock); -+ PVR_LOG(("%s: EvObj(%p) schedule: Avoided(%u) Called(%u) ReturnedImmediately(%u) SleptFully(%u) SleptPartially(%u)", -+ __func__, psLinuxEventObject, psLinuxEventObject->ui32ScheduleAvoided, -+ psLinuxEventObject->ui32ScheduleCalled, psLinuxEventObject->ui32ScheduleReturnedImmediately, -+ psLinuxEventObject->ui32ScheduleSleptFully, psLinuxEventObject->ui32ScheduleSleptPartially)); -+ OSLockRelease(psLinuxEventObject->hLock); -+#else -+ PVR_LOG(("%s: LINUX_EVENT_OBJECT_STATS disabled!", __func__)); -+#endif -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function LinuxEventObjectWait -+ -+ @Description -+ -+ Linux wait object routine -+ -+ @Input hOSEventObject : Event object handle -+ -+ @Input ui64Timeoutus : Time out value in usec -+ -+ @Return PVRSRV_ERROR : Error code -+ -+******************************************************************************/ -+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, -+ IMG_UINT64 ui64Timeoutus, -+ IMG_BOOL bFreezable) -+{ -+ IMG_UINT32 ui32EventSignalCount; -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ IMG_UINT32 ui32Remainder; -+ long timeOutJiffies; -+#ifdef LINUX_EVENT_OBJECT_STATS -+ long totalTimeoutJiffies; -+ IMG_BOOL bScheduleCalled = IMG_FALSE; -+#endif -+ -+ DEFINE_WAIT(sWait); -+ -+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList; -+ -+ /* Check if the driver is good shape */ -+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) -+ { -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ /* usecs_to_jiffies only takes an uint. So if our timeout is bigger than an -+ * uint use the msec version. With such a long timeout we really don't need -+ * the high resolution of usecs. */ -+ if (ui64Timeoutus > 0xffffffffULL) -+ timeOutJiffies = msecs_to_jiffies(OSDivide64(ui64Timeoutus, 1000, &ui32Remainder)); -+ else -+ timeOutJiffies = usecs_to_jiffies(ui64Timeoutus); -+ -+#ifdef LINUX_EVENT_OBJECT_STATS -+ totalTimeoutJiffies = timeOutJiffies; -+#endif -+ -+ do -+ { -+ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE); -+ ui32EventSignalCount = (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount); -+ -+ if (psLinuxEventObject->ui32EventSignalCountPrevious != ui32EventSignalCount) -+ { -+ /* There is a pending event signal i.e. LinuxEventObjectSignal() -+ * was called on the event object since the last time we checked. -+ * Return without waiting. */ -+ break; -+ } -+ -+ if (signal_pending(current)) -+ { -+ /* There is an OS signal pending so return. -+ * This allows to kill/interrupt user space processes which -+ * are waiting on this event object. */ -+ break; -+ } -+ -+#ifdef LINUX_EVENT_OBJECT_STATS -+ bScheduleCalled = IMG_TRUE; -+#endif -+ timeOutJiffies = schedule_timeout(timeOutJiffies); -+ -+ if (bFreezable) -+ { -+ _TryToFreeze(); -+ } -+ -+#if defined(DEBUG) -+ psLinuxEventObject->ui32Stats++; -+#endif -+ -+ -+ } while (timeOutJiffies); -+ -+ finish_wait(&psLinuxEventObject->sWait, &sWait); -+ -+ psLinuxEventObject->ui32EventSignalCountPrevious = ui32EventSignalCount; -+ -+#ifdef LINUX_EVENT_OBJECT_STATS -+ OSLockAcquire(psLinuxEventObject->hLock); -+ if (bScheduleCalled) -+ { -+ psLinuxEventObject->ui32ScheduleCalled++; -+ if (totalTimeoutJiffies == timeOutJiffies) -+ { -+ psLinuxEventObject->ui32ScheduleReturnedImmediately++; -+ } -+ else if (timeOutJiffies == 0) -+ { -+ psLinuxEventObject->ui32ScheduleSleptFully++; -+ } -+ else -+ { -+ psLinuxEventObject->ui32ScheduleSleptPartially++; -+ } -+ } -+ else -+ { -+ psLinuxEventObject->ui32ScheduleAvoided++; -+ } -+ OSLockRelease(psLinuxEventObject->hLock); -+#endif -+ -+ if (signal_pending(current) && test_tsk_thread_flag(current, TIF_SIGPENDING)) -+ { -+ return PVRSRV_ERROR_INTERRUPTED; -+ } -+ else -+ { -+ return timeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT; -+ } -+} -+ -+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+ -+PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ DEFINE_WAIT(sWait); -+ -+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = -+ (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; -+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = -+ psLinuxEventObject->psLinuxEventObjectList; -+ -+ /* Check if the driver is in good shape */ -+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) -+ { -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE); -+ -+ if (psLinuxEventObject->ui32EventSignalCountPrevious != -+ (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount)) -+ { -+ /* There is a pending signal, so return without waiting */ -+ goto finish; -+ } -+ -+ schedule(); -+ -+ _TryToFreeze(); -+ -+finish: -+ finish_wait(&psLinuxEventObject->sWait, &sWait); -+ -+ psLinuxEventObject->ui32EventSignalCountPrevious = -+ (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount); -+ -+ return PVRSRV_OK; -+} -+ -+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ -diff --git a/drivers/gpu/drm/img-rogue/event.h b/drivers/gpu/drm/img-rogue/event.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/event.h -@@ -0,0 +1,54 @@ -+/*************************************************************************/ /*! -+@File -+@Title Event Object -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList); -+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList); -+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject); -+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject); -+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList); -+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, -+ IMG_UINT64 ui64Timeoutus, -+ IMG_BOOL bFreezable); -+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject); -+#endif -+void LinuxEventObjectDumpDebugInfo(IMG_HANDLE hOSEventObject); -diff --git a/drivers/gpu/drm/img-rogue/fwload.c b/drivers/gpu/drm/img-rogue/fwload.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/fwload.c -@@ -0,0 +1,255 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services firmware load and access routines for Linux -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+#include -+#include -+#include -+ -+#include "device.h" -+#include "module_common.h" -+#include "fwload.h" -+#include "pvr_debug.h" -+#include "srvkm.h" -+ -+#if defined(RGX_FW_SIGNED) -+ -+#include -+#include -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) -+#include -+#else -+#define PKEY_ID_PKCS7 2 -+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) */ -+ -+#include "signfw.h" -+#endif /* RGX_FW_SIGNED */ -+ -+struct OS_FW_IMAGE_t -+{ -+ const struct firmware *psFW; -+ size_t uSignatureSize; -+}; -+ -+#if defined(RGX_FW_SIGNED) -+ -+static int OSCheckSignature(const struct FirmwareSignatureHeader *psHeader, size_t uSize) -+{ -+ if (be32_to_cpu(psHeader->ui32SignatureLen) >= uSize - sizeof(*psHeader)) -+ { -+ return -EBADMSG; -+ } -+ -+ if (psHeader->ui8IDType != PKEY_ID_PKCS7) -+ { -+ return -ENOPKG; -+ } -+ -+ if (psHeader->ui8Algo != 0 || psHeader->ui8HashAlgo != 0 || -+ psHeader->ui8SignerLen != 0 || psHeader->ui8KeyIDLen != 0 || -+ psHeader->__ui8Padding[0] != 0 || psHeader->__ui8Padding[1] != 0 || -+ psHeader->__ui8Padding[2] != 0) -+ { -+ return -EBADMSG; -+ } -+ -+ return 0; -+} -+ -+bool OSVerifyFirmware(OS_FW_IMAGE *psFWImage) -+{ -+ const struct firmware *psFW = psFWImage->psFW; -+ const u8 *pui8FWData = psFW->data; -+ size_t uFWSize = psFW->size; -+ uint32_t ui32MagicLen = sizeof(MODULE_SIG_STRING) - 1; -+ struct FirmwareSignatureHeader sHeader; -+ int err; -+ -+ if (uFWSize <= ui32MagicLen) -+ { -+ return false; -+ } -+ -+ /* -+ * Linux Kernel's sign-file utility is primarily intended for signing -+ * modules, and so appends the MODULE_SIG_STRING magic at the end of -+ * the signature. Only proceed with verification if this magic is found. -+ */ -+ if (memcmp(pui8FWData + uFWSize - ui32MagicLen, MODULE_SIG_STRING, ui32MagicLen) != 0) -+ { -+ return false; -+ } -+ -+ uFWSize -= ui32MagicLen; -+ if (uFWSize <= sizeof(sHeader)) -+ { -+ return false; -+ } -+ -+ /* -+ * After the magic, a header is placed which informs about the digest / -+ * crypto algorithm etc. Copy that header and ensure that it has valid -+ * contents (We only support RSA Crypto, SHA Hash, X509 certificate and -+ * PKCS#7 signature). -+ */ -+ memcpy(&sHeader, pui8FWData + (uFWSize - sizeof(sHeader)), sizeof(sHeader)); -+ if (OSCheckSignature(&sHeader, uFWSize) != 0) -+ { -+ return false; -+ } -+ -+ /* -+ * As all information is now extracted, we can go ahead and ask PKCS -+ * module to verify the sign. -+ */ -+ uFWSize -= be32_to_cpu(sHeader.ui32SignatureLen) + sizeof(sHeader); -+ err = verify_pkcs7_signature(pui8FWData, uFWSize, pui8FWData + uFWSize, -+ be32_to_cpu(sHeader.ui32SignatureLen), NULL, -+ VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL); -+ if (err == 0) -+ { -+ psFWImage->uSignatureSize = psFW->size - uFWSize; -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware Successfully Verified", -+ __func__)); -+ return true; -+ } -+ -+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Verification Failed (%d)", -+ __func__, err)); -+ return false; -+} -+ -+#else /* defined(RGX_FW_SIGNED) */ -+ -+inline bool OSVerifyFirmware(OS_FW_IMAGE *psFWImage) -+{ -+ return true; -+} -+ -+#endif /* defined(RGX_FW_SIGNED) */ -+ -+PVRSRV_ERROR -+OSLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR *pszBVNCString, -+ bool (*pfnVerifyFirmware)(OS_FW_IMAGE*), OS_FW_IMAGE **ppsFWImage) -+{ -+ const struct firmware *psFW = NULL; -+ OS_FW_IMAGE *psFWImage; -+ IMG_INT32 res; -+ PVRSRV_ERROR eError; -+ -+ res = request_firmware(&psFW, pszBVNCString, psDeviceNode->psDevConfig->pvOSDevice); -+ if (res != 0) -+ { -+ release_firmware(psFW); -+ if (res == -ENOENT) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') not found (%d)", -+ __func__, pszBVNCString, res)); -+ eError = PVRSRV_ERROR_NOT_FOUND; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') not ready (%d)", -+ __func__, pszBVNCString, res)); -+ eError = PVRSRV_ERROR_NOT_READY; -+ } -+ goto err_exit; -+ } -+ -+ psFWImage = OSAllocZMem(sizeof(*psFWImage)); -+ if (psFWImage == NULL) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: OSAllocZMem('%s') failed.", -+ __func__, pszBVNCString)); -+ -+ release_firmware(psFW); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_exit; -+ } -+ -+ psFWImage->psFW = psFW; -+ if (pfnVerifyFirmware != NULL && !pfnVerifyFirmware(psFWImage)) -+ { -+ release_firmware(psFW); -+ OSFreeMem(psFWImage); -+ eError = PVRSRV_ERROR_NOT_AUTHENTICATED; -+ goto err_exit; -+ } -+ -+ *ppsFWImage = psFWImage; -+ return PVRSRV_OK; -+ -+err_exit: -+ *ppsFWImage = NULL; -+ return eError; -+} -+ -+void -+OSUnloadFirmware(OS_FW_IMAGE *psFWImage) -+{ -+ const struct firmware *psFW = psFWImage->psFW; -+ -+ release_firmware(psFW); -+ OSFreeMem(psFWImage); -+} -+ -+size_t -+OSFirmwareSize(OS_FW_IMAGE *psFWImage) -+{ -+ const struct firmware *psFW = psFWImage->psFW; -+ return psFW->size - psFWImage->uSignatureSize; -+} -+ -+const void * -+OSFirmwareData(OS_FW_IMAGE *psFWImage) -+{ -+ const struct firmware *psFW = psFWImage->psFW; -+ -+ return psFW->data; -+} -+ -+/****************************************************************************** -+ End of file (fwload.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/fwload.h b/drivers/gpu/drm/img-rogue/fwload.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/fwload.h -@@ -0,0 +1,158 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services RGX OS Interface for loading the firmware -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This file defines the OS interface through which the RGX -+ device initialisation code in the kernel/server will obtain -+ the RGX firmware binary image. The API is used during the -+ initialisation of an RGX device via the -+ PVRSRVCommonDeviceInitialise() -+ call sequence. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef FWLOAD_H -+#define FWLOAD_H -+ -+#include "img_defs.h" -+#include "device_connection.h" -+#include "device.h" -+ -+/*! Opaque type handle defined and known to the OS layer implementation of this -+ * fwload.h OS API. This private data is allocated in the implementation of -+ * OSLoadFirmware() and contains whatever data and information needed to be -+ * able to acquire and return the firmware binary image to the Services -+ * kernel/server during initialisation. -+ * It is no longer required and may be freed when OSUnloadFirmware() is called. -+ */ -+typedef struct OS_FW_IMAGE_t OS_FW_IMAGE; -+ -+#if defined(__linux__) -+ -+bool OSVerifyFirmware(OS_FW_IMAGE* psFWImage); -+ -+#endif -+ -+/*************************************************************************/ /*! -+@Function OSLoadFirmware -+@Description The OS implementation must load or acquire the firmware (FW) -+ image binary needed by the driver stack. -+ A handle to the common layer device node is given to identify -+ which device instance in the system is being initialised. The -+ BVNC string is also supplied so that the implementation knows -+ which FW image to retrieve since each FW image only supports one -+ GPU type/revision. -+ The calling server code supports multiple GPU types and revisions -+ and will detect the specific GPU type and revision before calling -+ this API. It will also have runtime configuration of the VZ mode, -+ hence this API must be able to retrieve different FW binary -+ images based on the pszBVNCString given. The purpose of the end -+ platform/system is key to understand which FW images must be -+ available to the kernel server. -+ On exit the implementation must return a pointer to some private -+ data it uses to hold the FW image information and data. It will -+ be passed onto later API calls by the kernel server code. -+ NULL should be returned if the FW image could not be retrieved. -+ The format of the BVNC string is as follows ([x] denotes -+ optional field): -+ "rgx.fw[.signed].B.V[p].N.C[.vz]" -+ The implementation must first try to load the FW identified -+ by the pszBVpNCString parameter. If this is not available then it -+ should drop back to retrieving the FW identified by the -+ pszBVNCString parameter. The fields in the string are: -+ B, V, N, C are all unsigned integer identifying type/revision. -+ [.signed] is present when RGX_FW_SIGNED=1 is defined in the -+ server build. -+ [p] denotes a provisional (pre-silicon) GPU configuration. -+ [.vz] is present when the kernel server is loaded on the HOST -+ of a virtualised platform. See the DriverMode server -+ AppHint for details. -+ -+@Input psDeviceNode Device instance identifier. -+@Input pszBVNCString Identifier string of the FW image to -+ be loaded/acquired in production driver. -+@Input pfnVerifyFirmware Callback which checks validity of FW image. -+@Output ppsFWImage Ptr to private data on success, -+ NULL otherwise. -+@Return PVRSRV_ERROR PVRSRV_OK on success, -+ PVRSRV_ERROR_NOT_READY if filesystem is not -+ ready/initialised, -+ PVRSRV_ERROR_NOT_FOUND if no suitable FW -+ image could be found -+ PVRSRV_ERROR_OUT_OF_MEMORY if unable to alloc -+ memory for FW image -+ PVRSRV_ERROR_NOT_AUTHENTICATED if FW image -+ cannot be verified. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszBVNCString, -+ bool (*pfnVerifyFirmware)(OS_FW_IMAGE*), -+ OS_FW_IMAGE **ppsFWImage); -+ -+/*************************************************************************/ /*! -+@Function OSFirmwareData -+@Description This function returns a pointer to the start of the FW image -+ binary data held in memory. It must remain valid until -+ OSUnloadFirmware() is called. -+@Input psFWImage Private data opaque handle -+@Return void* Ptr to FW binary image to start on GPU. -+*/ /**************************************************************************/ -+const void* OSFirmwareData(OS_FW_IMAGE *psFWImage); -+ -+/*************************************************************************/ /*! -+@Function OSFirmwareSize -+@Description This function returns the size of the FW image binary data. -+@Input psFWImage Private data opaque handle -+@Return size_t Size in bytes of the firmware binary image -+*/ /**************************************************************************/ -+size_t OSFirmwareSize(OS_FW_IMAGE *psFWImage); -+ -+/*************************************************************************/ /*! -+@Function OSUnloadFirmware -+@Description This is called when the server has completed firmware -+ initialisation and no longer needs the private data, possibly -+ allocated by OSLoadFirmware(). -+@Input psFWImage Private data opaque handle -+*/ /**************************************************************************/ -+void OSUnloadFirmware(OS_FW_IMAGE *psFWImage); -+ -+#endif /* FWLOAD_H */ -+ -+/****************************************************************************** -+ End of file (fwload.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/fwtrace_string.h b/drivers/gpu/drm/img-rogue/fwtrace_string.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/fwtrace_string.h -@@ -0,0 +1,52 @@ -+/*************************************************************************/ /*! -+@File fwtrace_string.h -+@Title RGX Firmware trace strings for KM -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Platform Generic -+@Description This file defines SFs tuple. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef KM_TRACE_STRING_H -+#define KM_TRACE_STRING_H -+ -+#include "rgx_fwif_sf.h" -+ -+extern const RGXKM_STID_FMT SFs[]; -+extern const IMG_UINT32 g_ui32SFsCount; -+ -+#endif /* KM_TRACE_STRING_H */ -diff --git a/drivers/gpu/drm/img-rogue/handle.c b/drivers/gpu/drm/img-rogue/handle.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/handle.c -@@ -0,0 +1,2498 @@ -+/*************************************************************************/ /*! -+@File -+@Title Resource Handle Manager -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Provide resource handle management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+/* See handle.h for a description of the handle API. */ -+ -+/* -+ * The implementation supports movable handle structures, allowing the address -+ * of a handle structure to change without having to fix up pointers in -+ * any of the handle structures. For example, the linked list mechanism -+ * used to link subhandles together uses handle array indices rather than -+ * pointers to the structures themselves. -+ */ -+ -+#if defined(__linux__) -+#include -+#else -+#include -+#endif -+ -+#include "img_defs.h" -+#include "handle.h" -+#include "handle_impl.h" -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "osfunc.h" -+#include "lock.h" -+#include "connection_server.h" -+ -+#define HANDLE_HASH_TAB_INIT_SIZE 32 -+#define HANDLE_PROC_HANDLE_HASH_INIT_SIZE 10 -+ -+#define TEST_FLAG(v, f) BITMASK_HAS(v, f) -+#define TEST_ALLOC_FLAG(psHandleData, f) BITMASK_HAS((psHandleData)->eFlag, f) -+ -+ -+/* Linked list structure. Used for both the list head and list items */ -+typedef struct _HANDLE_LIST_ -+{ -+ IMG_HANDLE hPrev; -+ IMG_HANDLE hNext; -+ IMG_HANDLE hParent; -+} HANDLE_LIST; -+ -+typedef struct _HANDLE_DATA_ -+{ -+ /* The handle that represents this structure */ -+ IMG_HANDLE hHandle; -+ -+ /* Handle type */ -+ PVRSRV_HANDLE_TYPE eType; -+ -+ /* Flags specified when the handle was allocated */ -+ PVRSRV_HANDLE_ALLOC_FLAG eFlag; -+ -+ /* Pointer to the data that the handle represents */ -+ void *pvData; -+ -+ /* -+ * Callback specified at handle allocation time to -+ * release/destroy/free the data represented by the -+ * handle when it's reference count reaches 0. This -+ * should always be NULL for subhandles. -+ */ -+ PFN_HANDLE_RELEASE pfnReleaseData; -+ -+ /* List head for subhandles of this handle */ -+ HANDLE_LIST sChildren; -+ -+ /* List entry for sibling subhandles */ -+ HANDLE_LIST sSiblings; -+ -+ /* Reference count of lookups made. It helps track which resources are in -+ * use in concurrent bridge calls. */ -+ IMG_INT32 iLookupCount; -+ /* State of a handle. If the handle was already destroyed this is false. -+ * If this is false and iLookupCount is 0 the pfnReleaseData callback is -+ * called on the handle. */ -+ IMG_BOOL bCanLookup; -+ -+#if defined(PVRSRV_DEBUG_HANDLE_LOCK) -+ /* Store the handle base used for this handle, so we -+ * can later access the handle base lock (or check if -+ * it has been already acquired) -+ */ -+ PVRSRV_HANDLE_BASE *psBase; -+#endif -+ -+} HANDLE_DATA; -+ -+struct _HANDLE_BASE_ -+{ -+ /* Pointer to a handle implementations base structure */ -+ HANDLE_IMPL_BASE *psImplBase; -+ -+ /* -+ * Pointer to handle hash table. -+ * The hash table is used to do reverse lookups, converting data -+ * pointers to handles. -+ */ -+ HASH_TABLE *psHashTab; -+ -+ /* Type specific (connection/global/process) Lock handle */ -+ POS_LOCK hLock; -+ -+ /* Can be connection, process, global */ -+ PVRSRV_HANDLE_BASE_TYPE eType; -+}; -+ -+/* -+ * The key for the handle hash table is an array of three elements, the -+ * pointer to the resource, the resource type and the parent handle (or -+ * NULL if there is no parent). The eHandKey enumeration gives the -+ * array indices of the elements making up the key. -+ */ -+enum eHandKey -+{ -+ HAND_KEY_DATA = 0, -+ HAND_KEY_TYPE, -+ HAND_KEY_PARENT, -+ HAND_KEY_LEN /* Must be last item in list */ -+}; -+ -+/* HAND_KEY is the type of the hash table key */ -+typedef uintptr_t HAND_KEY[HAND_KEY_LEN]; -+ -+typedef struct FREE_HANDLE_DATA_TAG -+{ -+ PVRSRV_HANDLE_BASE *psBase; -+ PVRSRV_HANDLE_TYPE eHandleFreeType; -+ /* timing data (ns) to release bridge lock upon the deadline */ -+ IMG_UINT64 ui64TimeStart; -+ IMG_UINT64 ui64MaxBridgeTime; -+} FREE_HANDLE_DATA; -+ -+typedef struct FREE_KERNEL_HANDLE_DATA_TAG -+{ -+ PVRSRV_HANDLE_BASE *psBase; -+ HANDLE_DATA *psProcessHandleData; -+ IMG_HANDLE hKernelHandle; -+} FREE_KERNEL_HANDLE_DATA; -+ -+/* Stores a pointer to the function table of the handle back-end in use */ -+static HANDLE_IMPL_FUNCTAB const *gpsHandleFuncs; -+ -+static POS_LOCK gKernelHandleLock; -+static IMG_BOOL gbLockInitialised = IMG_FALSE; -+/* Pointer to process handle base currently being freed */ -+static PVRSRV_HANDLE_BASE *g_psProcessHandleBaseBeingFreed; -+/* Lock for the process handle base table */ -+static POS_LOCK g_hProcessHandleBaseLock; -+/* Hash table with process handle bases */ -+static HASH_TABLE *g_psProcessHandleBaseTable; -+ -+void LockHandle(PVRSRV_HANDLE_BASE *psBase) -+{ -+ OSLockAcquire(psBase->hLock); -+} -+ -+void UnlockHandle(PVRSRV_HANDLE_BASE *psBase) -+{ -+ OSLockRelease(psBase->hLock); -+} -+ -+/* -+ * Kernel handle base structure. This is used for handles that are not -+ * allocated on behalf of a particular process. -+ */ -+PVRSRV_HANDLE_BASE *gpsKernelHandleBase = NULL; -+ -+/* Increase the lookup reference count on the given handle. -+ * The handle lock must already be acquired. -+ * Returns: the reference count after the increment -+ */ -+static inline IMG_UINT32 HandleGet(HANDLE_DATA *psHandleData) -+{ -+#if defined(PVRSRV_DEBUG_HANDLE_LOCK) -+ if (!OSLockIsLocked(psHandleData->psBase->hLock)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__)); -+ OSDumpStack(); -+ } -+#endif -+ -+#ifdef DEBUG_REFCNT -+ PVR_DPF((PVR_DBG_ERROR, "%s: bCanLookup = %u, iLookupCount %d -> %d", -+ __func__, psHandleData->bCanLookup, psHandleData->iLookupCount, -+ psHandleData->iLookupCount + 1)); -+#endif /* DEBUG_REFCNT */ -+ -+ PVR_ASSERT(psHandleData->bCanLookup); -+ -+ return ++psHandleData->iLookupCount; -+} -+ -+/* Decrease the lookup reference count on the given handle. -+ * The handle lock must already be acquired. -+ * Returns: the reference count after the decrement -+ */ -+static inline IMG_UINT32 HandlePut(HANDLE_DATA *psHandleData) -+{ -+#if defined(PVRSRV_DEBUG_HANDLE_LOCK) -+ if (!OSLockIsLocked(psHandleData->psBase->hLock)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__)); -+ OSDumpStack(); -+ } -+#endif -+ -+#ifdef DEBUG_REFCNT -+ PVR_DPF((PVR_DBG_ERROR, "%s: bCanLookup = %u, iLookupCount %d -> %d", -+ __func__, psHandleData->bCanLookup, psHandleData->iLookupCount, -+ psHandleData->iLookupCount - 1)); -+#endif /* DEBUG_REFCNT */ -+ -+ /* psHandleData->bCanLookup can be false at this point */ -+ PVR_ASSERT(psHandleData->iLookupCount > 0); -+ -+ return --psHandleData->iLookupCount; -+} -+ -+#if defined(PVRSRV_NEED_PVR_DPF) -+static const IMG_CHAR *HandleTypeToString(PVRSRV_HANDLE_TYPE eType) -+{ -+ #define HANDLETYPE(x) \ -+ case PVRSRV_HANDLE_TYPE_##x: \ -+ return #x; -+ switch (eType) -+ { -+ #include "handle_types.h" -+ #undef HANDLETYPE -+ -+ default: -+ return "INVALID"; -+ } -+} -+ -+static const IMG_CHAR *HandleBaseTypeToString(PVRSRV_HANDLE_BASE_TYPE eType) -+{ -+ #define HANDLEBASETYPE(x) \ -+ case PVRSRV_HANDLE_BASE_TYPE_##x: \ -+ return #x; -+ switch (eType) -+ { -+ HANDLEBASETYPE(CONNECTION); -+ HANDLEBASETYPE(PROCESS); -+ HANDLEBASETYPE(GLOBAL); -+ #undef HANDLEBASETYPE -+ -+ default: -+ return "INVALID"; -+ } -+} -+#endif -+ -+static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFree(PVRSRV_HANDLE_BASE *psBase, -+ HANDLE_DATA *psHandleData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType); -+ -+static PVRSRV_ERROR HandleFreePrivData(PVRSRV_HANDLE_BASE *psBase, -+ HANDLE_DATA *psHandleData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType); -+ -+static PVRSRV_ERROR HandleFreeDestroy(PVRSRV_HANDLE_BASE *psBase, -+ HANDLE_DATA *psHandleData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType); -+ -+/*! -+******************************************************************************* -+ @Function GetHandleData -+ @Description Get the handle data structure for a given handle -+ @Input psBase - pointer to handle base structure -+ hHandle - handle from client -+ eType - handle type or PVRSRV_HANDLE_TYPE_NONE if the handle -+ type is not to be checked. -+ @Output ppsHandleData - pointer to a pointer to the handle data struct -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(GetHandleData) -+#endif -+static INLINE -+PVRSRV_ERROR GetHandleData(PVRSRV_HANDLE_BASE *psBase, -+ HANDLE_DATA **ppsHandleData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ HANDLE_DATA *psHandleData; -+ PVRSRV_ERROR eError; -+ -+ eError = gpsHandleFuncs->pfnGetHandleData(psBase->psImplBase, -+ hHandle, -+ (void **)&psHandleData); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ /* -+ * Unless PVRSRV_HANDLE_TYPE_NONE was passed in to this function, -+ * check handle is of the correct type. -+ */ -+ if (unlikely(eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandleData->eType)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "GetHandleData: Type mismatch. Lookup request: Handle %p, type: %s (%u) but stored handle is type %s (%u)", -+ hHandle, -+ HandleTypeToString(eType), -+ eType, -+ HandleTypeToString(psHandleData->eType), -+ psHandleData->eType)); -+ return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH; -+ } -+ -+ /* Return the handle structure */ -+ *ppsHandleData = psHandleData; -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ @Function HandleListInit -+ @Description Initialise a linked list structure embedded in a handle -+ structure. -+ @Input hHandle - handle containing the linked list structure -+ psList - pointer to linked list structure -+ hParent - parent handle or NULL -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(HandleListInit) -+#endif -+static INLINE -+void HandleListInit(IMG_HANDLE hHandle, HANDLE_LIST *psList, IMG_HANDLE hParent) -+{ -+ psList->hPrev = hHandle; -+ psList->hNext = hHandle; -+ psList->hParent = hParent; -+} -+ -+/*! -+******************************************************************************* -+ @Function InitParentList -+ @Description Initialise the children list head in a handle structure. -+ The children are the subhandles of this handle. -+ @Input psHandleData - pointer to handle data structure -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(InitParentList) -+#endif -+static INLINE -+void InitParentList(HANDLE_DATA *psHandleData) -+{ -+ IMG_HANDLE hParent = psHandleData->hHandle; -+ -+ HandleListInit(hParent, &psHandleData->sChildren, hParent); -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function InitChildEntry -+ @Description Initialise the child list entry in a handle structure. The list -+ entry is used to link together subhandles of a given handle. -+ @Input psHandleData - pointer to handle data structure -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(InitChildEntry) -+#endif -+static INLINE -+void InitChildEntry(HANDLE_DATA *psHandleData) -+{ -+ HandleListInit(psHandleData->hHandle, &psHandleData->sSiblings, NULL); -+} -+ -+/*! -+******************************************************************************* -+ @Function HandleListIsEmpty -+ @Description Determine whether a given linked list is empty. -+ @Input hHandle - handle containing the list head -+ psList - pointer to the list head -+ @Return IMG_TRUE if the list is empty, IMG_FALSE if it isn't. -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(HandleListIsEmpty) -+#endif -+static INLINE -+IMG_BOOL HandleListIsEmpty(IMG_HANDLE hHandle, HANDLE_LIST *psList) /* Instead of passing in the handle can we not just do (psList->hPrev == psList->hNext) ? IMG_TRUE : IMG_FALSE ??? */ -+{ -+ IMG_BOOL bIsEmpty; -+ -+ bIsEmpty = (IMG_BOOL)(psList->hNext == hHandle); -+ -+#ifdef DEBUG -+ { -+ IMG_BOOL bIsEmpty2; -+ -+ bIsEmpty2 = (IMG_BOOL)(psList->hPrev == hHandle); -+ PVR_ASSERT(bIsEmpty == bIsEmpty2); -+ } -+#endif -+ -+ return bIsEmpty; -+} -+ -+#ifdef DEBUG -+/*! -+******************************************************************************* -+ @Function NoChildren -+ @Description Determine whether a handle has any subhandles -+ @Input psHandleData - pointer to handle data structure -+ @Return IMG_TRUE if the handle has no subhandles, IMG_FALSE if it does. -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(NoChildren) -+#endif -+static INLINE -+IMG_BOOL NoChildren(HANDLE_DATA *psHandleData) -+{ -+ PVR_ASSERT(psHandleData->sChildren.hParent == psHandleData->hHandle); -+ -+ return HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sChildren); -+} -+ -+/*! -+******************************************************************************* -+ @Function NoParent -+ @Description Determine whether a handle is a subhandle -+ @Input psHandleData - pointer to handle data structure -+ @Return IMG_TRUE if the handle is not a subhandle, IMG_FALSE if it is. -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(NoParent) -+#endif -+static INLINE -+IMG_BOOL NoParent(HANDLE_DATA *psHandleData) -+{ -+ if (HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sSiblings)) -+ { -+ PVR_ASSERT(psHandleData->sSiblings.hParent == NULL); -+ -+ return IMG_TRUE; -+ } -+ -+ PVR_ASSERT(psHandleData->sSiblings.hParent != NULL); -+ return IMG_FALSE; -+} -+#endif /*DEBUG*/ -+ -+/*! -+******************************************************************************* -+ @Function ParentHandle -+ @Description Determine the parent of a handle -+ @Input psHandleData - pointer to handle data structure -+ @Return Parent handle, or NULL if the handle is not a subhandle. -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(ParentHandle) -+#endif -+static INLINE -+IMG_HANDLE ParentHandle(HANDLE_DATA *psHandleData) -+{ -+ return psHandleData->sSiblings.hParent; -+} -+ -+/* -+ * GetHandleListFromHandleAndOffset is used to generate either a -+ * pointer to the subhandle list head, or a pointer to the linked list -+ * structure of an item on a subhandle list. -+ * The list head is itself on the list, but is at a different offset -+ * in the handle structure to the linked list structure for items on -+ * the list. The two linked list structures are differentiated by -+ * the third parameter, containing the parent handle. The parent field -+ * in the list head structure references the handle structure that contains -+ * it. For items on the list, the parent field in the linked list structure -+ * references the parent handle, which will be different from the handle -+ * containing the linked list structure. -+ */ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(GetHandleListFromHandleAndOffset) -+#endif -+static INLINE -+HANDLE_LIST *GetHandleListFromHandleAndOffset(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE hEntry, -+ IMG_HANDLE hParent, -+ size_t uiParentOffset, -+ size_t uiEntryOffset) -+{ -+ HANDLE_DATA *psHandleData = NULL; -+ -+ PVR_ASSERT(psBase != NULL); -+ -+ if (GetHandleData(psBase, &psHandleData, hEntry, -+ PVRSRV_HANDLE_TYPE_NONE) != PVRSRV_OK) -+ { -+ return NULL; -+ } -+ -+ if (hEntry == hParent) -+ { -+ return (HANDLE_LIST *)IMG_OFFSET_ADDR(psHandleData, uiParentOffset); -+ } -+ else -+ { -+ return (HANDLE_LIST *)IMG_OFFSET_ADDR(psHandleData, uiEntryOffset); -+ } -+} -+ -+/*! -+******************************************************************************* -+ @Function HandleListInsertBefore -+ @Description Insert a handle before a handle currently on the list. -+ @Input hEntry - handle to be inserted after -+ psEntry - pointer to handle structure to be inserted after -+ uiParentOffset - offset to list head struct in handle structure -+ hNewEntry - handle to be inserted -+ psNewEntry - pointer to handle structure of item to be inserted -+ uiEntryOffset - offset of list item struct in handle structure -+ hParent - parent handle of hNewEntry -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(HandleListInsertBefore) -+#endif -+static INLINE -+PVRSRV_ERROR HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE hEntry, -+ HANDLE_LIST *psEntry, -+ size_t uiParentOffset, -+ IMG_HANDLE hNewEntry, -+ HANDLE_LIST *psNewEntry, -+ size_t uiEntryOffset, -+ IMG_HANDLE hParent) -+{ -+ HANDLE_LIST *psPrevEntry; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psEntry != NULL, "psEntry"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psNewEntry != NULL, "psNewEntry"); -+ -+ psPrevEntry = GetHandleListFromHandleAndOffset(psBase, -+ psEntry->hPrev, -+ hParent, -+ uiParentOffset, -+ uiEntryOffset); -+ if (psPrevEntry == NULL) -+ { -+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; -+ } -+ -+ PVR_ASSERT(psNewEntry->hParent == NULL); -+ PVR_ASSERT(hEntry == psPrevEntry->hNext); -+ -+#if defined(DEBUG) -+ { -+ HANDLE_LIST *psParentList; -+ -+ psParentList = GetHandleListFromHandleAndOffset(psBase, -+ hParent, -+ hParent, -+ uiParentOffset, -+ uiParentOffset); -+ PVR_ASSERT(psParentList && psParentList->hParent == hParent); -+ } -+#endif /* defined(DEBUG) */ -+ -+ psNewEntry->hPrev = psEntry->hPrev; -+ psEntry->hPrev = hNewEntry; -+ -+ psNewEntry->hNext = hEntry; -+ psPrevEntry->hNext = hNewEntry; -+ -+ psNewEntry->hParent = hParent; -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ @Function AdoptChild -+ @Description Assign a subhandle to a handle -+ @Input psParentData - pointer to handle structure of parent handle -+ psChildData - pointer to handle structure of child subhandle -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(AdoptChild) -+#endif -+static INLINE -+PVRSRV_ERROR AdoptChild(PVRSRV_HANDLE_BASE *psBase, -+ HANDLE_DATA *psParentData, -+ HANDLE_DATA *psChildData) -+{ -+ IMG_HANDLE hParent = psParentData->sChildren.hParent; -+ -+ PVR_ASSERT(hParent == psParentData->hHandle); -+ -+ return HandleListInsertBefore(psBase, -+ hParent, -+ &psParentData->sChildren, -+ offsetof(HANDLE_DATA, sChildren), -+ psChildData->hHandle, -+ &psChildData->sSiblings, -+ offsetof(HANDLE_DATA, sSiblings), -+ hParent); -+} -+ -+/*! -+******************************************************************************* -+ @Function HandleListRemove -+ @Description Remove a handle from a list -+ @Input hEntry - handle to be removed -+ psEntry - pointer to handle structure of item to be removed -+ uiEntryOffset - offset of list item struct in handle structure -+ uiParentOffset - offset to list head struct in handle structure -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(HandleListRemove) -+#endif -+static INLINE -+PVRSRV_ERROR HandleListRemove(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE hEntry, -+ HANDLE_LIST *psEntry, -+ size_t uiEntryOffset, -+ size_t uiParentOffset) -+{ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psEntry != NULL, "psEntry"); -+ -+ if (!HandleListIsEmpty(hEntry, psEntry)) -+ { -+ HANDLE_LIST *psPrev; -+ HANDLE_LIST *psNext; -+ -+ psPrev = GetHandleListFromHandleAndOffset(psBase, -+ psEntry->hPrev, -+ psEntry->hParent, -+ uiParentOffset, -+ uiEntryOffset); -+ if (psPrev == NULL) -+ { -+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; -+ } -+ -+ psNext = GetHandleListFromHandleAndOffset(psBase, -+ psEntry->hNext, -+ psEntry->hParent, -+ uiParentOffset, -+ uiEntryOffset); -+ if (psNext == NULL) -+ { -+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; -+ } -+ -+ /* -+ * The list head is on the list, and we don't want to -+ * remove it. -+ */ -+ PVR_ASSERT(psEntry->hParent != NULL); -+ -+ psPrev->hNext = psEntry->hNext; -+ psNext->hPrev = psEntry->hPrev; -+ -+ HandleListInit(hEntry, psEntry, NULL); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ @Function UnlinkFromParent -+ @Description Remove a subhandle from its parents list -+ @Input psHandleData - pointer to handle data structure of child -+ subhandle. -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(UnlinkFromParent) -+#endif -+static INLINE -+PVRSRV_ERROR UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase, -+ HANDLE_DATA *psHandleData) -+{ -+ return HandleListRemove(psBase, -+ psHandleData->hHandle, -+ &psHandleData->sSiblings, -+ offsetof(HANDLE_DATA, sSiblings), -+ offsetof(HANDLE_DATA, sChildren)); -+} -+ -+/*! -+******************************************************************************* -+ @Function HandleListIterate -+ @Description Iterate over the items in a list -+ @Input psHead - pointer to list head -+ uiParentOffset - offset to list head struct in handle structure -+ uiEntryOffset - offset of list item struct in handle structure -+ pfnIterFunc - function to be called for each handle in the list -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(HandleListIterate) -+#endif -+static INLINE -+PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase, -+ HANDLE_LIST *psHead, -+ size_t uiParentOffset, -+ size_t uiEntryOffset, -+ PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE)) -+{ -+ IMG_HANDLE hHandle = psHead->hNext; -+ IMG_HANDLE hParent = psHead->hParent; -+ IMG_HANDLE hNext; -+ -+ PVR_ASSERT(psHead->hParent != NULL); -+ -+ /* -+ * Follow the next chain from the list head until we reach -+ * the list head again, which signifies the end of the list. -+ */ -+ while (hHandle != hParent) -+ { -+ HANDLE_LIST *psEntry; -+ PVRSRV_ERROR eError; -+ -+ psEntry = GetHandleListFromHandleAndOffset(psBase, -+ hHandle, -+ hParent, -+ uiParentOffset, -+ uiEntryOffset); -+ if (psEntry == NULL) -+ { -+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; -+ } -+ -+ PVR_ASSERT(psEntry->hParent == psHead->hParent); -+ -+ /* -+ * Get the next index now, in case the list item is -+ * modified by the iteration function. -+ */ -+ hNext = psEntry->hNext; -+ -+ eError = (*pfnIterFunc)(psBase, hHandle); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ hHandle = hNext; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ @Function IterateOverChildren -+ @Description Iterate over the subhandles of a parent handle -+ @Input psParentData - pointer to parent handle structure -+ pfnIterFunc - function to be called for each subhandle -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(IterateOverChildren) -+#endif -+static INLINE -+PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase, -+ HANDLE_DATA *psParentData, -+ PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE)) -+{ -+ return HandleListIterate(psBase, -+ &psParentData->sChildren, -+ offsetof(HANDLE_DATA, sChildren), -+ offsetof(HANDLE_DATA, sSiblings), -+ pfnIterFunc); -+} -+ -+/*! -+******************************************************************************* -+ @Function ParentIfPrivate -+ @Description Return the parent handle if the handle was allocated with -+ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE, else return NULL. -+ @Input psHandleData - pointer to handle data structure -+ @Return Parent handle or NULL -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(ParentIfPrivate) -+#endif -+static INLINE -+IMG_HANDLE ParentIfPrivate(HANDLE_DATA *psHandleData) -+{ -+ return TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? -+ ParentHandle(psHandleData) : NULL; -+} -+ -+/*! -+******************************************************************************* -+ @Function InitKey -+ @Description Initialise a hash table key for the current process -+ @Input aKey - pointer to key -+ psBase - pointer to handle base structure -+ pvData - pointer to the resource the handle represents -+ eType - type of resource -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(InitKey) -+#endif -+static INLINE -+void InitKey(HAND_KEY aKey, -+ PVRSRV_HANDLE_BASE *psBase, -+ void *pvData, -+ PVRSRV_HANDLE_TYPE eType, -+ IMG_HANDLE hParent) -+{ -+ PVR_UNREFERENCED_PARAMETER(psBase); -+ -+ aKey[HAND_KEY_DATA] = (uintptr_t)pvData; -+ aKey[HAND_KEY_TYPE] = (uintptr_t)eType; -+ aKey[HAND_KEY_PARENT] = (uintptr_t)hParent; -+} -+ -+/*! -+******************************************************************************* -+ @Function FindHandle -+ @Description Find handle corresponding to a resource pointer -+ @Input psBase - pointer to handle base structure -+ pvData - pointer to resource to be associated with the handle -+ eType - the type of resource -+ @Return The handle, or NULL if not found -+******************************************************************************/ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(FindHandle) -+#endif -+static INLINE -+IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase, -+ void *pvData, -+ PVRSRV_HANDLE_TYPE eType, -+ IMG_HANDLE hParent) -+{ -+ HAND_KEY aKey; -+ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ -+ InitKey(aKey, psBase, pvData, eType, hParent); -+ -+ return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey); -+} -+ -+/*! -+******************************************************************************* -+ @Function AllocHandle -+ @Description Allocate a new handle -+ @Input phHandle - location for new handle -+ pvData - pointer to resource to be associated with the handle -+ eType - the type of resource -+ hParent - parent handle or NULL -+ pfnReleaseData - Function to release resource at handle release -+ time -+ @Output phHandle - points to new handle -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE *phHandle, -+ void *pvData, -+ PVRSRV_HANDLE_TYPE eType, -+ PVRSRV_HANDLE_ALLOC_FLAG eFlag, -+ IMG_HANDLE hParent, -+ PFN_HANDLE_RELEASE pfnReleaseData) -+{ -+ HANDLE_DATA *psNewHandleData; -+ IMG_HANDLE hHandle; -+ PVRSRV_ERROR eError; -+ -+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ PVR_ASSERT(psBase != NULL && psBase->psHashTab != NULL); -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) -+ { -+ /* Handle must not already exist */ -+ PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == NULL); -+ } -+ -+ psNewHandleData = OSAllocZMem(sizeof(*psNewHandleData)); -+ PVR_LOG_RETURN_IF_NOMEM(psNewHandleData, "OSAllocZMem"); -+ -+ eError = gpsHandleFuncs->pfnAcquireHandle(psBase->psImplBase, &hHandle, -+ psNewHandleData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "pfnAcquireHandle", -+ ErrorFreeHandleData); -+ -+ /* -+ * If a data pointer can be associated with multiple handles, we -+ * don't put the handle in the hash table, as the data pointer -+ * may not map to a unique handle -+ */ -+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) -+ { -+ HAND_KEY aKey; -+ -+ /* Initialise hash key */ -+ InitKey(aKey, psBase, pvData, eType, hParent); -+ -+ /* Put the new handle in the hash table */ -+ eError = HASH_Insert_Extended(psBase->psHashTab, aKey, (uintptr_t)hHandle) ? -+ PVRSRV_OK : PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; -+ PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "couldn't add handle to hash table", -+ ErrorReleaseHandle); -+ } -+ -+ psNewHandleData->hHandle = hHandle; -+ psNewHandleData->eType = eType; -+ psNewHandleData->eFlag = eFlag; -+ psNewHandleData->pvData = pvData; -+ psNewHandleData->pfnReleaseData = pfnReleaseData; -+ psNewHandleData->iLookupCount = 0; -+ psNewHandleData->bCanLookup = IMG_TRUE; -+ -+#ifdef DEBUG_REFCNT -+ PVR_DPF((PVR_DBG_ERROR, "%s: bCanLookup = true", __func__)); -+#endif /* DEBUG_REFCNT */ -+ -+ InitParentList(psNewHandleData); -+#if defined(DEBUG) -+ PVR_ASSERT(NoChildren(psNewHandleData)); -+#endif -+ -+ InitChildEntry(psNewHandleData); -+#if defined(DEBUG) -+ PVR_ASSERT(NoParent(psNewHandleData)); -+#endif -+ -+#if defined(PVRSRV_DEBUG_HANDLE_LOCK) -+ psNewHandleData->psBase = psBase; -+#endif -+ -+ /* Return the new handle to the client */ -+ *phHandle = psNewHandleData->hHandle; -+ -+ return PVRSRV_OK; -+ -+ErrorReleaseHandle: -+ (void)gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, hHandle, NULL); -+ -+ErrorFreeHandleData: -+ OSFreeMem(psNewHandleData); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVAllocHandle -+ @Description Allocate a handle -+ @Input psBase - pointer to handle base structure -+ pvData - pointer to resource to be associated with the handle -+ eType - the type of resource -+ pfnReleaseData - Function to release resource at handle release -+ time -+ @Output phHandle - points to new handle -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE *phHandle, -+ void *pvData, -+ PVRSRV_HANDLE_TYPE eType, -+ PVRSRV_HANDLE_ALLOC_FLAG eFlag, -+ PFN_HANDLE_RELEASE pfnReleaseData) -+{ -+ PVRSRV_ERROR eError; -+ -+ LockHandle(psBase); -+ eError = PVRSRVAllocHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, pfnReleaseData); -+ UnlockHandle(psBase); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVAllocHandleUnlocked -+ @Description Allocate a handle without acquiring/releasing the handle lock. -+ The function assumes you hold the lock when called. -+ @Input phHandle - location for new handle -+ pvData - pointer to resource to be associated with the handle -+ eType - the type of resource -+ pfnReleaseData - Function to release resource at handle release -+ time -+ @Output phHandle - points to new handle -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE *phHandle, -+ void *pvData, -+ PVRSRV_HANDLE_TYPE eType, -+ PVRSRV_HANDLE_ALLOC_FLAG eFlag, -+ PFN_HANDLE_RELEASE pfnReleaseData) -+{ -+ *phHandle = NULL; -+ -+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pfnReleaseData != NULL, "pfnReleaseData"); -+ -+ return AllocHandle(psBase, phHandle, pvData, eType, eFlag, NULL, pfnReleaseData); -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVAllocSubHandle -+ @Description Allocate a subhandle -+ @Input pvData - pointer to resource to be associated with the subhandle -+ eType - the type of resource -+ hParent - parent handle -+ @Output phHandle - points to new subhandle -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE *phHandle, -+ void *pvData, -+ PVRSRV_HANDLE_TYPE eType, -+ PVRSRV_HANDLE_ALLOC_FLAG eFlag, -+ IMG_HANDLE hParent) -+{ -+ PVRSRV_ERROR eError; -+ -+ LockHandle(psBase); -+ eError = PVRSRVAllocSubHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, hParent); -+ UnlockHandle(psBase); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVAllocSubHandleUnlocked -+ @Description Allocate a subhandle without acquiring/releasing the handle -+ lock. The function assumes you hold the lock when called. -+ @Input pvData - pointer to resource to be associated with the subhandle -+ eType - the type of resource -+ hParent - parent handle -+ @Output phHandle - points to new subhandle -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE *phHandle, -+ void *pvData, -+ PVRSRV_HANDLE_TYPE eType, -+ PVRSRV_HANDLE_ALLOC_FLAG eFlag, -+ IMG_HANDLE hParent) -+{ -+ HANDLE_DATA *psPHandleData = NULL; -+ HANDLE_DATA *psCHandleData = NULL; -+ IMG_HANDLE hParentKey; -+ IMG_HANDLE hHandle; -+ PVRSRV_ERROR eError; -+ -+ *phHandle = NULL; -+ -+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ PVR_LOG_GOTO_IF_INVALID_PARAM(psBase, eError, Exit); -+ -+ hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? hParent : NULL; -+ -+ /* Lookup the parent handle */ -+ eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE); -+ PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to get parent handle structure", -+ Exit); -+ -+ eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey, NULL); -+ PVR_GOTO_IF_ERROR(eError, Exit); -+ -+ eError = GetHandleData(psBase, &psCHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE); -+ /* If we were able to allocate the handle then there should be no reason why we -+ * can't also get it's handle structure. Otherwise something has gone badly wrong. -+ */ -+ PVR_ASSERT(eError == PVRSRV_OK); -+ PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "Failed to get parent handle structure", -+ ExitFreeHandle); -+ -+ /* -+ * Get the parent handle structure again, in case the handle -+ * structure has moved (depending on the implementation -+ * of AllocHandle). -+ */ -+ eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE); -+ PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to get parent handle structure", -+ ExitFreeHandle); -+ -+ eError = AdoptChild(psBase, psPHandleData, psCHandleData); -+ PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "parent handle failed to adopt subhandle", -+ ExitFreeHandle); -+ -+ *phHandle = hHandle; -+ -+ return PVRSRV_OK; -+ -+ExitFreeHandle: -+ PVRSRVDestroyHandleUnlocked(psBase, hHandle, eType); -+Exit: -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVFindHandle -+ @Description Find handle corresponding to a resource pointer -+ @Input pvData - pointer to resource to be associated with the handle -+ eType - the type of resource -+ @Output phHandle - points to returned handle -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE *phHandle, -+ void *pvData, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ PVRSRV_ERROR eError; -+ -+ LockHandle(psBase); -+ eError = PVRSRVFindHandleUnlocked(psBase, phHandle, pvData, eType); -+ UnlockHandle(psBase); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVFindHandleUnlocked -+ @Description Find handle corresponding to a resource pointer without -+ acquiring/releasing the handle lock. The function assumes you -+ hold the lock when called. -+ @Input pvData - pointer to resource to be associated with the handle -+ eType - the type of resource -+ @Output phHandle - points to the returned handle -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE *phHandle, -+ void *pvData, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ IMG_HANDLE hHandle; -+ -+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); -+ -+ /* See if there is a handle for this data pointer */ -+ hHandle = FindHandle(psBase, pvData, eType, NULL); -+ if (hHandle == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error finding handle. Type %u", -+ __func__, -+ eType)); -+ -+ return PVRSRV_ERROR_HANDLE_NOT_FOUND; -+ } -+ -+ *phHandle = hHandle; -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVLookupHandle -+ @Description Lookup the data pointer corresponding to a handle -+ @Input hHandle - handle from client -+ eType - handle type -+ bRef - If TRUE, a reference will be added on the handle if the -+ lookup is successful. -+ @Output ppvData - points to the return data pointer -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, -+ void **ppvData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType, -+ IMG_BOOL bRef) -+{ -+ PVRSRV_ERROR eError; -+ -+ LockHandle(psBase); -+ eError = PVRSRVLookupHandleUnlocked(psBase, ppvData, hHandle, eType, bRef); -+ UnlockHandle(psBase); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVLookupHandleUnlocked -+ @Description Lookup the data pointer corresponding to a handle without -+ acquiring/releasing the handle lock. The function assumes you -+ hold the lock when called. -+ @Input hHandle - handle from client -+ eType - handle type -+ bRef - If TRUE, a reference will be added on the handle if the -+ lookup is successful. -+ @Output ppvData - points to the returned data pointer -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, -+ void **ppvData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType, -+ IMG_BOOL bRef) -+{ -+ HANDLE_DATA *psHandleData = NULL; -+ PVRSRV_ERROR eError; -+ -+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); -+ -+ eError = GetHandleData(psBase, &psHandleData, hHandle, eType); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Error looking up handle (%s) for base %p of type %s. Handle %p, type %s", -+ __func__, -+ PVRSRVGetErrorString(eError), -+ psBase, -+ HandleBaseTypeToString(psBase->eType), -+ (void*) hHandle, -+ HandleTypeToString(eType))); -+#if defined(DEBUG) //|| defined(PVRSRV_NEED_PVR_DPF) -+ OSDumpStack(); -+#endif -+ return eError; -+ } -+ -+ /* If bCanLookup is false it means that a destroy operation was already -+ * called on this handle; therefore it can no longer be looked up. */ -+ if (!psHandleData->bCanLookup) -+ { -+ return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED; -+ } -+ -+ if (bRef) -+ { -+ HandleGet(psHandleData); -+ } -+ -+ *ppvData = psHandleData->pvData; -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVLookupSubHandle -+ @Description Lookup the data pointer corresponding to a subhandle -+ @Input hHandle - handle from client -+ eType - handle type -+ hAncestor - ancestor handle -+ @Output ppvData - points to the returned data pointer -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, -+ void **ppvData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType, -+ IMG_HANDLE hAncestor) -+{ -+ HANDLE_DATA *psPHandleData = NULL; -+ HANDLE_DATA *psCHandleData = NULL; -+ PVRSRV_ERROR eError; -+ -+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); -+ -+ LockHandle(psBase); -+ -+ eError = GetHandleData(psBase, &psCHandleData, hHandle, eType); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error looking up subhandle (%s). Handle %p, type %u", -+ __func__, -+ PVRSRVGetErrorString(eError), -+ (void*) hHandle, -+ eType)); -+ OSDumpStack(); -+ goto ExitUnlock; -+ } -+ -+ /* Look for hAncestor among the handle's ancestors */ -+ for (psPHandleData = psCHandleData; ParentHandle(psPHandleData) != hAncestor; ) -+ { -+ eError = GetHandleData(psBase, &psPHandleData, ParentHandle(psPHandleData), PVRSRV_HANDLE_TYPE_NONE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "GetHandleData"); -+ eError = PVRSRV_ERROR_INVALID_SUBHANDLE; -+ goto ExitUnlock; -+ } -+ } -+ -+ *ppvData = psCHandleData->pvData; -+ -+ eError = PVRSRV_OK; -+ -+ExitUnlock: -+ UnlockHandle(psBase); -+ -+ return eError; -+} -+ -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVReleaseHandle -+ @Description Release a handle that is no longer needed -+ @Input hHandle - handle from client -+ eType - handle type -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+void PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ LockHandle(psBase); -+ PVRSRVReleaseHandleUnlocked(psBase, hHandle, eType); -+ UnlockHandle(psBase); -+} -+ -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVReleaseHandleUnlocked -+ @Description Release a handle that is no longer needed without -+ acquiring/releasing the handle lock. The function assumes you -+ hold the lock when called. -+ @Input hHandle - handle from client -+ eType - handle type -+******************************************************************************/ -+void PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ HANDLE_DATA *psHandleData = NULL; -+ PVRSRV_ERROR eError; -+ -+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ -+ PVR_ASSERT(psBase != NULL); -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ PVR_LOG_RETURN_VOID_IF_FALSE(psBase != NULL, "invalid psBase"); -+ -+ eError = GetHandleData(psBase, &psHandleData, hHandle, eType); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Error (%s) looking up handle %p of type %s " -+ "for base %p of type %s.", __func__, PVRSRVGetErrorString(eError), -+ (void*) hHandle, HandleTypeToString(eType), psBase, -+ HandleBaseTypeToString(psBase->eType))); -+ -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ return; -+ } -+ -+ PVR_ASSERT(psHandleData->bCanLookup); -+ PVR_ASSERT(psHandleData->iLookupCount > 0); -+ -+ /* If there are still outstanding lookups for this handle or the handle -+ * has not been destroyed yet, return early */ -+ HandlePut(psHandleData); -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVPurgeHandles -+ @Description Purge handles for a given handle base -+ @Input psBase - pointer to handle base structure -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); -+ -+ LockHandle(psBase); -+ eError = gpsHandleFuncs->pfnPurgeHandles(psBase->psImplBase); -+ UnlockHandle(psBase); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFreeWrapper(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE hHandle) -+{ -+ HANDLE_DATA *psHandleData; -+ PVRSRV_ERROR eError = GetHandleData(psBase, &psHandleData, hHandle, -+ PVRSRV_HANDLE_TYPE_NONE); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ return HandleUnrefAndMaybeMarkForFree(psBase, psHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE); -+} -+ -+static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFree(PVRSRV_HANDLE_BASE *psBase, -+ HANDLE_DATA *psHandleData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* If bCanLookup is false it means that the destructor was called more than -+ * once on this handle. */ -+ if (!psHandleData->bCanLookup) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Handle %p of type %s already freed.", -+ __func__, psHandleData->hHandle, -+ HandleTypeToString(psHandleData->eType))); -+ return PVRSRV_ERROR_HANDLE_NOT_FOUND; -+ } -+ -+ if (psHandleData->iLookupCount > 0) -+ { -+ return PVRSRV_ERROR_OBJECT_STILL_REFERENCED; -+ } -+ -+ /* Mark this handle as freed only if it's no longer referenced by any -+ * lookup. The user space should retry freeing this handle once there are -+ * no outstanding lookups. */ -+ psHandleData->bCanLookup = IMG_FALSE; -+ -+#ifdef DEBUG_REFCNT -+ PVR_DPF((PVR_DBG_ERROR, "%s: bCanLookup = false, iLookupCount = %d", __func__, -+ psHandleData->iLookupCount)); -+#endif /* DEBUG_REFCNT */ -+ -+ /* Prepare children for destruction */ -+ eError = IterateOverChildren(psBase, psHandleData, -+ HandleUnrefAndMaybeMarkForFreeWrapper); -+ PVR_LOG_RETURN_IF_ERROR(eError, "HandleUnrefAndMaybeMarkForFreeWrapper"); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR HandleFreePrivDataWrapper(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE hHandle) -+{ -+ HANDLE_DATA *psHandleData; -+ PVRSRV_ERROR eError = GetHandleData(psBase, &psHandleData, hHandle, -+ PVRSRV_HANDLE_TYPE_NONE); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ return HandleFreePrivData(psBase, psHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE); -+} -+ -+static PVRSRV_ERROR HandleFreePrivData(PVRSRV_HANDLE_BASE *psBase, -+ HANDLE_DATA *psHandleData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Call the release data callback for each reference on the handle */ -+ if (psHandleData->pfnReleaseData != NULL) -+ { -+ eError = psHandleData->pfnReleaseData(psHandleData->pvData); -+ if (eError != PVRSRV_OK) -+ { -+ if (PVRSRVIsRetryError(eError)) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Got retry while calling release " -+ "data callback for handle %p of type = %s", __func__, -+ hHandle, HandleTypeToString(psHandleData->eType))); -+ } -+ else -+ { -+ PVR_LOG_ERROR(eError, "pfnReleaseData"); -+ } -+ -+ return eError; -+ } -+ -+ /* we don't need this so make sure it's not called on -+ * the pvData for the second time -+ */ -+ psHandleData->pfnReleaseData = NULL; -+ } -+ -+ /* Free children's data */ -+ eError = IterateOverChildren(psBase, psHandleData, -+ HandleFreePrivDataWrapper); -+ PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren->HandleFreePrivData"); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR HandleFreeDestroyWrapper(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE hHandle) -+{ -+ HANDLE_DATA *psHandleData; -+ PVRSRV_ERROR eError = GetHandleData(psBase, &psHandleData, hHandle, -+ PVRSRV_HANDLE_TYPE_NONE); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ return HandleFreeDestroy(psBase, psHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE); -+} -+ -+static PVRSRV_ERROR HandleFreeDestroy(PVRSRV_HANDLE_BASE *psBase, -+ HANDLE_DATA *psHandleData, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ HANDLE_DATA *psReleasedHandleData; -+ PVRSRV_ERROR eError; -+ -+ eError = UnlinkFromParent(psBase, psHandleData); -+ PVR_LOG_RETURN_IF_ERROR(eError, "UnlinkFromParent"); -+ -+ if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) -+ { -+ HAND_KEY aKey; -+ IMG_HANDLE hRemovedHandle; -+ -+ InitKey(aKey, psBase, psHandleData->pvData, psHandleData->eType, -+ ParentIfPrivate(psHandleData)); -+ -+ hRemovedHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, -+ aKey); -+ -+ PVR_ASSERT(hRemovedHandle != NULL); -+ PVR_ASSERT(hRemovedHandle == psHandleData->hHandle); -+ PVR_UNREFERENCED_PARAMETER(hRemovedHandle); -+ } -+ -+ /* Free children */ -+ eError = IterateOverChildren(psBase, psHandleData, HandleFreeDestroyWrapper); -+ PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren->HandleFreeDestroy"); -+ -+ eError = gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, -+ psHandleData->hHandle, -+ (void **)&psReleasedHandleData); -+ OSFreeMem(psHandleData); -+ PVR_LOG_RETURN_IF_ERROR(eError, "pfnReleaseHandle"); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR DestroyHandle(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType, -+ IMG_BOOL bReleaseLock) -+{ -+ PVRSRV_ERROR eError; -+ HANDLE_DATA *psHandleData = NULL; -+ -+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); -+ -+ eError = GetHandleData(psBase, &psHandleData, hHandle, eType); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ eError = HandleUnrefAndMaybeMarkForFree(psBase, psHandleData, hHandle, eType); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ if (bReleaseLock) -+ { -+ UnlockHandle(psBase); -+ } -+ -+ eError = HandleFreePrivData(psBase, psHandleData, hHandle, eType); -+ if (eError != PVRSRV_OK) -+ { -+ if (bReleaseLock) -+ { -+ LockHandle(psBase); -+ } -+ -+ /* If the data could not be freed due to a temporary condition the -+ * handle must be kept alive so that the next destroy call can try again */ -+ if (PVRSRVIsRetryError(eError)) -+ { -+ psHandleData->bCanLookup = IMG_TRUE; -+ } -+ -+ return eError; -+ } -+ -+ if (bReleaseLock) -+ { -+ LockHandle(psBase); -+ } -+ -+ return HandleFreeDestroy(psBase, psHandleData, hHandle, eType); -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVDestroyHandle -+ @Description Destroys a handle that is no longer needed. Will -+ acquiring the handle lock for duration of the call. -+ Can return RETRY or KERNEL_CCB_FULL if resource could not be -+ destroyed, caller should retry sometime later. -+ @Input psBase - pointer to handle base structure -+ hHandle - handle from client -+ eType - handle type -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVDestroyHandle(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ PVRSRV_ERROR eError; -+ -+ LockHandle(psBase); -+ eError = DestroyHandle(psBase, hHandle, eType, IMG_FALSE); -+ UnlockHandle(psBase); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVDestroyHandleUnlocked -+ @Description Destroys a handle that is no longer needed without -+ acquiring/releasing the handle lock. The function assumes you -+ hold the lock when called. -+ Can return RETRY or KERNEL_CCB_FULL if resource could not be -+ destroyed, caller should retry sometime later. -+ @Input psBase - pointer to handle base structure -+ hHandle - handle from client -+ eType - handle type -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVDestroyHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ return DestroyHandle(psBase, hHandle, eType, IMG_FALSE); -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVDestroyHandleStagedUnlocked -+ @Description Destroys a handle that is no longer needed without -+ acquiring/releasing the handle lock. The function assumes you -+ hold the lock when called. This function, unlike -+ PVRSRVDestroyHandleUnlocked(), releases the handle lock while -+ destroying handle private data. This is done to open the -+ bridge for other bridge calls. -+ Can return RETRY or KERNEL_CCB_FULL if resource could not be -+ destroyed, caller should retry sometime later. -+ @Input psBase - pointer to handle base structure -+ hHandle - handle from client -+ eType - handle type -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVDestroyHandleStagedUnlocked(PVRSRV_HANDLE_BASE *psBase, -+ IMG_HANDLE hHandle, -+ PVRSRV_HANDLE_TYPE eType) -+{ -+ return DestroyHandle(psBase, hHandle, eType, IMG_TRUE); -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVAllocHandleBase -+ @Description Allocate a handle base structure for a process -+ @Input eType - handle type -+ @Output ppsBase - points to handle base structure pointer -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, -+ PVRSRV_HANDLE_BASE_TYPE eType) -+{ -+ PVRSRV_HANDLE_BASE *psBase; -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_FALSE(gpsHandleFuncs != NULL, "handle management not initialised", -+ PVRSRV_ERROR_NOT_READY); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppsBase != NULL, "ppsBase"); -+ -+ psBase = OSAllocZMem(sizeof(*psBase)); -+ PVR_LOG_RETURN_IF_NOMEM(psBase, "psBase"); -+ -+ eError = OSLockCreate(&psBase->hLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", ErrorFreeHandleBase); -+ -+ psBase->eType = eType; -+ -+ LockHandle(psBase); -+ -+ eError = gpsHandleFuncs->pfnCreateHandleBase(&psBase->psImplBase); -+ PVR_GOTO_IF_ERROR(eError, ErrorUnlock); -+ -+ psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, -+ sizeof(HAND_KEY), -+ HASH_Func_Default, -+ HASH_Key_Comp_Default); -+ PVR_LOG_GOTO_IF_FALSE(psBase->psHashTab != NULL, "couldn't create data pointer" -+ " hash table", ErrorDestroyHandleBase); -+ -+ *ppsBase = psBase; -+ -+ UnlockHandle(psBase); -+ -+ return PVRSRV_OK; -+ -+ErrorDestroyHandleBase: -+ (void)gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase); -+ -+ErrorUnlock: -+ UnlockHandle(psBase); -+ OSLockDestroy(psBase->hLock); -+ -+ErrorFreeHandleBase: -+ OSFreeMem(psBase); -+ -+ return eError; -+} -+ -+#if defined(DEBUG) -+typedef struct _COUNT_HANDLE_DATA_ -+{ -+ PVRSRV_HANDLE_BASE *psBase; -+ IMG_UINT32 uiHandleDataCount; -+} COUNT_HANDLE_DATA; -+ -+/* Used to count the number of handles that have data associated with them */ -+static PVRSRV_ERROR CountHandleDataWrapper(IMG_HANDLE hHandle, void *pvData) -+{ -+ COUNT_HANDLE_DATA *psData = (COUNT_HANDLE_DATA *)pvData; -+ HANDLE_DATA *psHandleData = NULL; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psData != NULL, "psData"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psData->psBase != NULL, "psData->psBase"); -+ -+ eError = GetHandleData(psData->psBase, -+ &psHandleData, -+ hHandle, -+ PVRSRV_HANDLE_TYPE_NONE); -+ PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); -+ -+ if (psHandleData != NULL) -+ { -+ psData->uiHandleDataCount++; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/* Print a handle in the handle base. Used with the iterator callback. */ -+static PVRSRV_ERROR ListHandlesInBase(IMG_HANDLE hHandle, void *pvData) -+{ -+ PVRSRV_HANDLE_BASE *psBase = (PVRSRV_HANDLE_BASE*) pvData; -+ HANDLE_DATA *psHandleData = NULL; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); -+ -+ eError = GetHandleData(psBase, -+ &psHandleData, -+ hHandle, -+ PVRSRV_HANDLE_TYPE_NONE); -+ PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); -+ -+ if (psHandleData != NULL) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ " Handle: %6u, CanLookup: %u, LookupCount: %3u, Type: %s (%u), pvData<%p>", -+ (IMG_UINT32) (uintptr_t) psHandleData->hHandle, psHandleData->bCanLookup, -+ psHandleData->iLookupCount, HandleTypeToString(psHandleData->eType), -+ psHandleData->eType, psHandleData->pvData)); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+#endif /* defined(DEBUG) */ -+ -+static INLINE IMG_BOOL _CheckIfMaxTimeExpired(IMG_UINT64 ui64TimeStart, IMG_UINT64 ui64MaxBridgeTime) -+{ -+ /* unsigned arithmetic is well defined so this will wrap around correctly */ -+ return (IMG_BOOL)((OSClockns64() - ui64TimeStart) >= ui64MaxBridgeTime); -+} -+ -+static PVRSRV_ERROR FreeKernelHandlesWrapperIterKernel(IMG_HANDLE hHandle, void *pvData) -+{ -+ FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData; -+ HANDLE_DATA *psKernelHandleData = NULL; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ /* Get kernel handle data. */ -+ eError = GetHandleData(KERNEL_HANDLE_BASE, -+ &psKernelHandleData, -+ hHandle, -+ PVRSRV_HANDLE_TYPE_NONE); -+ PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); -+ -+ if (psKernelHandleData->pvData == psData->psProcessHandleData->pvData) -+ { -+ /* This kernel handle belongs to our process handle. */ -+ psData->hKernelHandle = hHandle; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR FreeKernelHandlesWrapperIterProcess(IMG_HANDLE hHandle, void *pvData) -+{ -+ FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ /* Get process handle data. */ -+ eError = GetHandleData(psData->psBase, -+ &psData->psProcessHandleData, -+ hHandle, -+ PVRSRV_HANDLE_TYPE_NONE); -+ PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); -+ -+ if (psData->psProcessHandleData->eFlag == PVRSRV_HANDLE_ALLOC_FLAG_MULTI -+#if defined(SUPPORT_INSECURE_EXPORT) -+ || psData->psProcessHandleData->eType == PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT -+#endif -+ ) -+ { -+ /* Only multi alloc process handles might be in kernel handle base. */ -+ psData->hKernelHandle = NULL; -+ /* Iterate over kernel handles. */ -+ eError = gpsHandleFuncs->pfnIterateOverHandles(KERNEL_HANDLE_BASE->psImplBase, -+ &FreeKernelHandlesWrapperIterKernel, -+ (void *)psData); -+ PVR_LOG_RETURN_IF_FALSE(eError == PVRSRV_OK, "failed to iterate over kernel handles", -+ eError); -+ -+ if (psData->hKernelHandle) -+ { -+ /* Release kernel handle which belongs to our process handle. */ -+ eError = gpsHandleFuncs->pfnReleaseHandle(KERNEL_HANDLE_BASE->psImplBase, -+ psData->hKernelHandle, -+ NULL); -+ PVR_LOG_RETURN_IF_FALSE(eError == PVRSRV_OK, "couldn't release kernel handle", -+ eError); -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR FreeHandleDataWrapper(IMG_HANDLE hHandle, void *pvData) -+{ -+ FREE_HANDLE_DATA *psData = (FREE_HANDLE_DATA *)pvData; -+ HANDLE_DATA *psHandleData = NULL; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psData != NULL, "psData"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psData->psBase != NULL, "psData->psBase"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psData->eHandleFreeType != PVRSRV_HANDLE_TYPE_NONE, -+ "psData->eHandleFreeType"); -+ -+ eError = GetHandleData(psData->psBase, -+ &psHandleData, -+ hHandle, -+ PVRSRV_HANDLE_TYPE_NONE); -+ PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); -+ -+ if (psHandleData == NULL || psHandleData->eType != psData->eHandleFreeType) -+ { -+ return PVRSRV_OK; -+ } -+ -+ PVR_ASSERT(psHandleData->bCanLookup && psHandleData->iLookupCount == 0); -+ -+ if (psHandleData->bCanLookup) -+ { -+ if (psHandleData->pfnReleaseData != NULL) -+ { -+ eError = psHandleData->pfnReleaseData(psHandleData->pvData); -+ if (PVRSRVIsRetryError(eError)) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Got retry while calling release " -+ "data callback for handle %p of type = %s", __func__, -+ hHandle, HandleTypeToString(psHandleData->eType))); -+ -+ return eError; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected error from pfnReleaseData (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ } -+ -+ psHandleData->bCanLookup = IMG_FALSE; -+ } -+ -+ if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) -+ { -+ HAND_KEY aKey; -+ IMG_HANDLE hRemovedHandle; -+ -+ InitKey(aKey, -+ psData->psBase, -+ psHandleData->pvData, -+ psHandleData->eType, -+ ParentIfPrivate(psHandleData)); -+ -+ hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psData->psBase->psHashTab, aKey); -+ -+ PVR_ASSERT(hRemovedHandle != NULL); -+ PVR_ASSERT(hRemovedHandle == psHandleData->hHandle); -+ PVR_UNREFERENCED_PARAMETER(hRemovedHandle); -+ } -+ -+ eError = gpsHandleFuncs->pfnSetHandleData(psData->psBase->psImplBase, hHandle, NULL); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ OSFreeMem(psHandleData); -+ -+ /* If we reach the end of the time slice release we can release the global -+ * lock, invoke the scheduler and reacquire the lock */ -+ if ((psData->ui64MaxBridgeTime != 0) && _CheckIfMaxTimeExpired(psData->ui64TimeStart, psData->ui64MaxBridgeTime)) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Lock timeout (timeout: %" IMG_UINT64_FMTSPEC")", -+ __func__, -+ psData->ui64MaxBridgeTime)); -+ UnlockHandle(psData->psBase); -+ /* Invoke the scheduler to check if other processes are waiting for the lock */ -+ OSReleaseThreadQuanta(); -+ LockHandle(psData->psBase); -+ /* Set again lock timeout and reset the counter */ -+ psData->ui64TimeStart = OSClockns64(); -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Lock acquired again", __func__)); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/* The Ordered Array of PVRSRV_HANDLE_TYPE Enum Entries. -+ * -+ * Some handles must be destroyed prior to other handles, -+ * such relationships are established with respect to handle types. -+ * Therefore elements of this array have to maintain specific order, -+ * e.g. the PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET must be placed -+ * before PVRSRV_HANDLE_TYPE_RGX_FREELIST. -+ * -+ * If ordering is incorrect driver may fail on the ground of cleanup -+ * routines. Unfortunately, we can mainly rely on the actual definition of -+ * the array, there is no explicit information about all relationships -+ * between handle types. These relationships do not necessarily come from -+ * bridge-specified handle attributes such as 'sub handle' and 'parent -+ * handle'. They may come from internal/private ref-counters contained by -+ * objects referenced by our kernel handles. -+ * -+ * For example, at the bridge level, PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET -+ * and PVRSRV_HANDLE_TYPE_RGX_FREELIST have no explicit relationship, meaning -+ * none of them is a sub-handle for the other. -+ * However the freelist contains internal ref-count that is decremented by -+ * the destroy routine for KM_HW_RT_DATASET. -+ * -+ * BE CAREFUL when adding/deleting/moving handle types. -+ */ -+static const PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] = -+{ -+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, -+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, -+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC, -+ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, -+ PVRSRV_HANDLE_TYPE_RGX_FREELIST, -+ PVRSRV_HANDLE_TYPE_RGX_MEMORY_BLOCK, -+ PVRSRV_HANDLE_TYPE_RGX_POPULATION, -+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, -+#if defined(PVR_TESTING_UTILS) && defined(SUPPORT_VALIDATION) -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_GPUMAP_CONTEXT, -+#endif -+ PVRSRV_HANDLE_TYPE_RI_HANDLE, -+ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, -+ PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER, -+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT, -+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, -+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT, -+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_PAGELIST, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, -+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, -+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, -+ PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE, -+ PVRSRV_HANDLE_TYPE_DC_BUFFER, -+ PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT, -+ PVRSRV_HANDLE_TYPE_DC_DEVICE, -+ PVRSRV_HANDLE_TYPE_PVR_TL_SD, -+ PVRSRV_HANDLE_TYPE_DI_CONTEXT, -+ PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP -+}; -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVFreeKernelHandles -+ @Description Free kernel handles which belongs to process handles -+ @Input psBase - pointer to handle base structure -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase) -+{ -+ FREE_KERNEL_HANDLE_DATA sHandleData = {NULL}; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ LockHandle(psBase); -+ -+ sHandleData.psBase = psBase; -+ /* Iterate over process handles. */ -+ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, -+ &FreeKernelHandlesWrapperIterProcess, -+ (void *)&sHandleData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "pfnIterateOverHandles", ExitUnlock); -+ -+ eError = PVRSRV_OK; -+ -+ExitUnlock: -+ UnlockHandle(psBase); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRetrieveProcessHandleBase -+ @Description Returns a pointer to the process handle base for the current -+ process. If the current process is the cleanup thread, then the -+ process handle base for the process currently being cleaned up -+ is returned -+ @Return Pointer to the process handle base, or NULL if not found. -+******************************************************************************/ -+PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void) -+{ -+ PVRSRV_HANDLE_BASE *psHandleBase = NULL; -+ PROCESS_HANDLE_BASE *psProcHandleBase = NULL; -+ IMG_PID ui32PurgePid = PVRSRVGetPurgeConnectionPid(); -+ IMG_PID uiCleanupPid = PVRSRVCleanupThreadGetPid(); -+ uintptr_t uiCleanupTid = PVRSRVCleanupThreadGetTid(); -+ -+ OSLockAcquire(g_hProcessHandleBaseLock); -+ -+ /* Check to see if we're being called from the cleanup thread... */ -+ if ((OSGetCurrentProcessID() == uiCleanupPid) && -+ (OSGetCurrentThreadID() == uiCleanupTid) && -+ (ui32PurgePid > 0)) -+ { -+ /* Check to see if the cleanup thread has already removed the -+ * process handle base from the HASH table. -+ */ -+ psHandleBase = g_psProcessHandleBaseBeingFreed; -+ /* psHandleBase shouldn't be null, as cleanup thread -+ * should be removing this from the HASH table before -+ * we get here, so assert if not. -+ */ -+ PVR_ASSERT(psHandleBase); -+ } -+ else -+ { -+ /* Not being called from the cleanup thread, so return the process -+ * handle base for the current process. -+ */ -+ psProcHandleBase = (PROCESS_HANDLE_BASE *) -+ HASH_Retrieve(g_psProcessHandleBaseTable, OSGetCurrentClientProcessIDKM()); -+ } -+ -+ OSLockRelease(g_hProcessHandleBaseLock); -+ -+ if (psHandleBase == NULL && psProcHandleBase != NULL) -+ { -+ psHandleBase = psProcHandleBase->psHandleBase; -+ } -+ return psHandleBase; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVAcquireProcessHandleBase -+ @Description Increments reference count on a process handle base identified -+ by uiPid and returns pointer to the base. If the handle base -+ does not exist it will be allocated. -+ @Inout uiPid - PID of a process -+ @Output ppsBase - pointer to a handle base for the process identified by -+ uiPid -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE **ppsBase) -+{ -+ PROCESS_HANDLE_BASE *psBase; -+ PVRSRV_ERROR eError; -+ -+ OSLockAcquire(g_hProcessHandleBaseLock); -+ -+ psBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(g_psProcessHandleBaseTable, uiPid); -+ -+ /* In case there is none we are going to allocate one */ -+ if (psBase == NULL) -+ { -+ IMG_BOOL bSuccess; -+ -+ psBase = OSAllocZMem(sizeof(*psBase)); -+ PVR_LOG_GOTO_IF_NOMEM(psBase, eError, ErrorUnlock); -+ -+ /* Allocate handle base for this process */ -+ eError = PVRSRVAllocHandleBase(&psBase->psHandleBase, PVRSRV_HANDLE_BASE_TYPE_PROCESS); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", ErrorFreeProcessHandleBase); -+ -+ /* Insert the handle base into the global hash table */ -+ bSuccess = HASH_Insert(g_psProcessHandleBaseTable, uiPid, (uintptr_t) psBase); -+ PVR_LOG_GOTO_IF_FALSE(bSuccess, "HASH_Insert failed", ErrorFreeHandleBase); -+ } -+ -+ OSAtomicIncrement(&psBase->iRefCount); -+ -+ OSLockRelease(g_hProcessHandleBaseLock); -+ -+ *ppsBase = psBase; -+ -+ return PVRSRV_OK; -+ -+ErrorFreeHandleBase: -+ PVRSRVFreeHandleBase(psBase->psHandleBase, 0); -+ErrorFreeProcessHandleBase: -+ OSFreeMem(psBase); -+ErrorUnlock: -+ OSLockRelease(g_hProcessHandleBaseLock); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVReleaseProcessHandleBase -+ @Description Decrements reference count on a process handle base psBase -+ for a process identified by uiPid. If the reference count -+ reaches 0 the handle base will be freed.. -+ @Input psBase - pointer to a process handle base -+ @Inout uiPid - PID of a process -+ @Inout ui64MaxBridgeTime - maximum time a handle destroy operation -+ can hold the handle base lock (after that -+ time a lock will be release and reacquired -+ for another time slice) -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID uiPid, -+ IMG_UINT64 ui64MaxBridgeTime) -+{ -+ PVRSRV_ERROR eError; -+ IMG_INT iRefCount; -+ uintptr_t uiHashValue; -+ -+ OSLockAcquire(g_hProcessHandleBaseLock); -+ -+ iRefCount = OSAtomicDecrement(&psBase->iRefCount); -+ -+ if (iRefCount != 0) -+ { -+ OSLockRelease(g_hProcessHandleBaseLock); -+ return PVRSRV_OK; -+ } -+ -+ /* in case the refcount becomes 0 we can remove the process handle base -+ * and all related objects */ -+ -+ uiHashValue = HASH_Remove(g_psProcessHandleBaseTable, uiPid); -+ OSLockRelease(g_hProcessHandleBaseLock); -+ -+ PVR_LOG_RETURN_IF_FALSE(uiHashValue != 0, "HASH_Remove failed", -+ PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE); -+ -+ eError = PVRSRVFreeKernelHandles(psBase->psHandleBase); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeKernelHandles"); -+ -+ eError = PVRSRVFreeHandleBase(psBase->psHandleBase, ui64MaxBridgeTime); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeHandleBase"); -+ -+ OSFreeMem(psBase); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVFreeHandleBase -+ @Description Free a handle base structure -+ @Input psBase - pointer to handle base structure -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime) -+{ -+#if defined(DEBUG) -+ COUNT_HANDLE_DATA sCountData = {NULL}; -+#endif -+ FREE_HANDLE_DATA sHandleData = {NULL}; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ IMG_PID uiCleanupPid = PVRSRVCleanupThreadGetPid(); -+ uintptr_t uiCleanupTid = PVRSRVCleanupThreadGetTid(); -+ IMG_UINT32 ui32ErrorCount = 0; -+ -+ PVR_ASSERT(gpsHandleFuncs); -+ -+ LockHandle(psBase); -+ -+ /* If this is a process handle base being freed by the cleanup -+ * thread, store this in g_psProcessHandleBaseBeingFreed -+ */ -+ if ((OSGetCurrentProcessID() == uiCleanupPid) && -+ (OSGetCurrentThreadID() == uiCleanupTid) && -+ (psBase->eType == PVRSRV_HANDLE_BASE_TYPE_PROCESS)) -+ { -+ g_psProcessHandleBaseBeingFreed = psBase; -+ } -+ -+ sHandleData.psBase = psBase; -+ sHandleData.ui64TimeStart = OSClockns64(); -+ sHandleData.ui64MaxBridgeTime = ui64MaxBridgeTime; -+ -+ -+#if defined(DEBUG) -+ -+ sCountData.psBase = psBase; -+ -+ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, -+ &CountHandleDataWrapper, -+ (void *)&sCountData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "pfnIterateOverHandles", ExitUnlock); -+ -+ if (sCountData.uiHandleDataCount != 0) -+ { -+ IMG_BOOL bList = (IMG_BOOL)(sCountData.uiHandleDataCount < HANDLE_DEBUG_LISTING_MAX_NUM); -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: %u remaining handles in handle base 0x%p " -+ "(PVRSRV_HANDLE_BASE_TYPE %u).%s", -+ __func__, -+ sCountData.uiHandleDataCount, -+ psBase, -+ psBase->eType, -+ bList ? "": " Skipping details, too many items...")); -+ -+ if (bList) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "-------- Listing Handles --------")); -+ (void) gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, -+ &ListHandlesInBase, -+ psBase); -+ PVR_DPF((PVR_DBG_WARNING, "-------- Done Listing --------")); -+ } -+ } -+ -+#endif /* defined(DEBUG) */ -+ -+ /* -+ * As we're freeing handles based on type, make sure all -+ * handles have actually had their data freed to avoid -+ * resources being leaked -+ */ -+ for (i = 0; i < ARRAY_SIZE(g_aeOrderedFreeList); i++) -+ { -+ sHandleData.eHandleFreeType = g_aeOrderedFreeList[i]; -+ -+ /* Make sure all handles have been freed before destroying the handle base */ -+ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, -+ &FreeHandleDataWrapper, -+ (void *)&sHandleData); -+ -+ /* On retry error return without destroying handle base. Caller may retry. */ -+ if (PVRSRVIsRetryError(eError)) -+ { -+ PVR_GOTO_IF_ERROR(eError, ExitUnlock); -+ } -+ -+ /* Retry is not possible. Continue freeing remaining handles. */ -+ if (eError != PVRSRV_OK) -+ { -+ ui32ErrorCount++; -+ } -+ } -+ -+ if (ui32ErrorCount > 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Error freeing %d handles.", __func__, ui32ErrorCount)); -+ } -+ -+ if (psBase->psHashTab != NULL) -+ { -+ HASH_Delete(psBase->psHashTab); -+ } -+ -+ eError = gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase); -+ PVR_GOTO_IF_ERROR(eError, ExitUnlock); -+ -+ UnlockHandle(psBase); -+ OSLockDestroy(psBase->hLock); -+ OSFreeMem(psBase); -+ -+ return eError; -+ -+ExitUnlock: -+ if ((OSGetCurrentProcessID() == uiCleanupPid) && -+ (OSGetCurrentThreadID() == uiCleanupTid)) -+ { -+ g_psProcessHandleBaseBeingFreed = NULL; -+ } -+ UnlockHandle(psBase); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVHandleInit -+ @Description Initialise handle management -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVHandleInit(void) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(gpsKernelHandleBase == NULL); -+ PVR_ASSERT(gpsHandleFuncs == NULL); -+ PVR_ASSERT(g_hProcessHandleBaseLock == NULL); -+ PVR_ASSERT(g_psProcessHandleBaseTable == NULL); -+ PVR_ASSERT(!gbLockInitialised); -+ -+ eError = OSLockCreate(&gKernelHandleLock); -+ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate:1"); -+ -+ eError = OSLockCreate(&g_hProcessHandleBaseLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:2", ErrorHandleDeinit); -+ -+ gbLockInitialised = IMG_TRUE; -+ -+ eError = PVRSRVHandleGetFuncTable(&gpsHandleFuncs); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVHandleGetFuncTable", ErrorHandleDeinit); -+ -+ eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase, -+ PVRSRV_HANDLE_BASE_TYPE_GLOBAL); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", ErrorHandleDeinit); -+ -+ g_psProcessHandleBaseTable = HASH_Create(HANDLE_PROC_HANDLE_HASH_INIT_SIZE); -+ PVR_LOG_GOTO_IF_NOMEM(g_psProcessHandleBaseTable, eError, ErrorHandleDeinit); -+ -+ eError = gpsHandleFuncs->pfnEnableHandlePurging(gpsKernelHandleBase->psImplBase); -+ PVR_LOG_GOTO_IF_ERROR(eError, "pfnEnableHandlePurging", ErrorHandleDeinit); -+ -+ return PVRSRV_OK; -+ -+ErrorHandleDeinit: -+ (void) PVRSRVHandleDeInit(); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVHandleDeInit -+ @Description De-initialise handle management -+ @Return Error code or PVRSRV_OK -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVHandleDeInit(void) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (gpsHandleFuncs != NULL) -+ { -+ if (gpsKernelHandleBase != NULL) -+ { -+ eError = PVRSRVFreeHandleBase(gpsKernelHandleBase, 0 /* do not release bridge lock */); -+ if (eError == PVRSRV_OK) -+ { -+ gpsKernelHandleBase = NULL; -+ } -+ else -+ { -+ PVR_LOG_ERROR(eError, "PVRSRVFreeHandleBase"); -+ } -+ } -+ -+ if (eError == PVRSRV_OK) -+ { -+ gpsHandleFuncs = NULL; -+ } -+ } -+ else -+ { -+ /* If we don't have a handle function table we shouldn't have a handle base either */ -+ PVR_ASSERT(gpsKernelHandleBase == NULL); -+ } -+ -+ if (g_psProcessHandleBaseTable != NULL) -+ { -+ HASH_Delete(g_psProcessHandleBaseTable); -+ g_psProcessHandleBaseTable = NULL; -+ } -+ -+ if (g_hProcessHandleBaseLock != NULL) -+ { -+ OSLockDestroy(g_hProcessHandleBaseLock); -+ g_hProcessHandleBaseLock = NULL; -+ } -+ -+ if (gKernelHandleLock != NULL) -+ { -+ OSLockDestroy(gKernelHandleLock); -+ gbLockInitialised = IMG_FALSE; -+ } -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/handle.h b/drivers/gpu/drm/img-rogue/handle.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/handle.h -@@ -0,0 +1,206 @@ -+/**************************************************************************/ /*! -+@File -+@Title Handle Manager API -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Provide handle management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#if !defined(HANDLE_API_H) -+#define HANDLE_API_H -+ -+#include "lock_types.h" -+ -+/* -+ * Handle API -+ * ---------- -+ * The handle API is intended to provide handles for kernel resources, which -+ * can then be passed back to user space processes. -+ * -+ * The following functions comprise the API. Each function takes a pointer to -+ * a PVRSRV_HANDLE_BASE structure, one of which is allocated for each process, -+ * and stored in the per-process data area. Use KERNEL_HANDLE_BASE for handles -+ * not allocated for a particular process, or for handles that need to be -+ * allocated before the PVRSRV_HANDLE_BASE structure for the process is -+ * available. -+ * -+ * PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, -+ * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, -+ * PVRSRV_HANDLE_ALLOC_FLAG eFlag); -+ * -+ * Allocate a handle phHandle, for the resource of type eType pointed to by -+ * pvData. -+ * -+ * For handles that have a definite lifetime, where the corresponding resource -+ * is explicitly created and destroyed, eFlag should be zero. -+ * -+ * If a particular resource may be referenced multiple times by a given -+ * process, setting eFlag to PVRSRV_HANDLE_ALLOC_FLAG_MULTI will allow multiple -+ * handles to be allocated for the resource. Such handles cannot be found with -+ * PVRSRVFindHandle. -+ * -+ * PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, -+ * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, -+ * PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); -+ * -+ * This function is similar to PVRSRVAllocHandle, except that the allocated -+ * handles are associated with a parent handle, hParent, that has been -+ * allocated previously. Subhandles are automatically deallocated when their -+ * parent handle is deallocated. -+ * Subhandles can be treated as ordinary handles. For example, they may have -+ * subhandles of their own, and may be explicitly deallocated using -+ * PVRSRVReleaseHandle (see below). -+ * -+ * PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, -+ * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType); -+ * -+ * Find the handle previously allocated for the resource pointed to by pvData, -+ * of type eType. Handles allocated with the flag -+ * PVRSRV_HANDLE_ALLOC_FLAG_MULTI cannot be found using this function. -+ * -+ * PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, -+ * void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); -+ * -+ * Given a handle for a resource of type eType, return the pointer to the -+ * resource. -+ * -+ * PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, -+ * void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, -+ * IMH_HANDLE hAncestor); -+ * -+ * Similar to PVRSRVLookupHandle, but checks the handle is a descendant -+ * of hAncestor. -+ * -+ * PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, -+ * IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); -+ * -+ * Deallocate a handle of given type. -+ * -+ * Return the parent of a handle in *phParent, or NULL if the handle has -+ * no parent. -+ */ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "hash.h" -+ -+typedef enum -+{ -+ #define HANDLETYPE(x) PVRSRV_HANDLE_TYPE_##x, -+ #include "handle_types.h" -+ #undef HANDLETYPE -+} PVRSRV_HANDLE_TYPE; -+ -+static_assert(PVRSRV_HANDLE_TYPE_NONE == 0, "PVRSRV_HANDLE_TYPE_NONE must be zero"); -+ -+typedef enum -+{ -+ PVRSRV_HANDLE_BASE_TYPE_CONNECTION, -+ PVRSRV_HANDLE_BASE_TYPE_PROCESS, -+ PVRSRV_HANDLE_BASE_TYPE_GLOBAL -+} PVRSRV_HANDLE_BASE_TYPE; -+ -+ -+typedef enum -+{ -+ /* No flags */ -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0, -+ /* Multiple handles can point at the given data pointer */ -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x01, -+ /* Subhandles are allocated in a private handle space */ -+ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x02 -+} PVRSRV_HANDLE_ALLOC_FLAG; -+ -+typedef struct _HANDLE_BASE_ PVRSRV_HANDLE_BASE; -+ -+typedef struct _PROCESS_HANDLE_BASE_ -+{ -+ PVRSRV_HANDLE_BASE *psHandleBase; -+ ATOMIC_T iRefCount; -+} PROCESS_HANDLE_BASE; -+ -+extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase; -+#define KERNEL_HANDLE_BASE (gpsKernelHandleBase) -+ -+#define HANDLE_DEBUG_LISTING_MAX_NUM 20 -+ -+typedef PVRSRV_ERROR (*PFN_HANDLE_RELEASE)(void *pvData); -+ -+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData); -+PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData); -+ -+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); -+PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); -+ -+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType); -+PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType); -+ -+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef); -+PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef); -+ -+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor); -+ -+void PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); -+void PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); -+ -+PVRSRV_ERROR PVRSRVDestroyHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); -+PVRSRV_ERROR PVRSRVDestroyHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); -+PVRSRV_ERROR PVRSRVDestroyHandleStagedUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); -+ -+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase); -+ -+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, -+ PVRSRV_HANDLE_BASE_TYPE eType); -+ -+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime); -+ -+PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase); -+ -+PVRSRV_ERROR PVRSRVHandleInit(void); -+ -+PVRSRV_ERROR PVRSRVHandleDeInit(void); -+ -+PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void); -+ -+PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE **ppsBase); -+PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID uiPid, IMG_UINT64 ui64MaxBridgeTime); -+ -+void LockHandle(PVRSRV_HANDLE_BASE *psBase); -+void UnlockHandle(PVRSRV_HANDLE_BASE *psBase); -+ -+#endif /* !defined(HANDLE_API_H) */ -diff --git a/drivers/gpu/drm/img-rogue/handle_idr.c b/drivers/gpu/drm/img-rogue/handle_idr.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/handle_idr.c -@@ -0,0 +1,440 @@ -+/*************************************************************************/ /*! -+@File -+@Title Resource Handle Manager - IDR Back-end -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Provide IDR based resource handle management back-end -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "handle_impl.h" -+#include "allocmem.h" -+#include "osfunc.h" -+#include "pvr_debug.h" -+ -+#define ID_VALUE_MIN 1 -+#define ID_VALUE_MAX INT_MAX -+ -+#define ID_TO_HANDLE(i) ((IMG_HANDLE)(uintptr_t)(i)) -+#define HANDLE_TO_ID(h) ((IMG_INT)(uintptr_t)(h)) -+ -+struct _HANDLE_IMPL_BASE_ -+{ -+ struct idr sIdr; -+ -+ IMG_UINT32 ui32MaxHandleValue; -+ -+ IMG_UINT32 ui32TotalHandCount; -+}; -+ -+typedef struct _HANDLE_ITER_DATA_WRAPPER_ -+{ -+ PFN_HANDLE_ITER pfnHandleIter; -+ void *pvHandleIterData; -+} HANDLE_ITER_DATA_WRAPPER; -+ -+ -+static int HandleIterFuncWrapper(int id, void *data, void *iter_data) -+{ -+ HANDLE_ITER_DATA_WRAPPER *psIterData = (HANDLE_ITER_DATA_WRAPPER *)iter_data; -+ -+ PVR_UNREFERENCED_PARAMETER(data); -+ -+ return (int)psIterData->pfnHandleIter(ID_TO_HANDLE(id), psIterData->pvHandleIterData); -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function AcquireHandle -+ -+ @Description Acquire a new handle -+ -+ @Input psBase - Pointer to handle base structure -+ phHandle - Points to a handle pointer -+ pvData - Pointer to resource to be associated with the handle -+ -+ @Output phHandle - Points to a handle pointer -+ -+ @Return Error code or PVRSRV_OK -+ -+******************************************************************************/ -+static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase, -+ IMG_HANDLE *phHandle, -+ void *pvData) -+{ -+ int id; -+ int result; -+ -+ PVR_ASSERT(psBase != NULL); -+ PVR_ASSERT(phHandle != NULL); -+ PVR_ASSERT(pvData != NULL); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) -+ idr_preload(GFP_KERNEL); -+ id = idr_alloc(&psBase->sIdr, pvData, ID_VALUE_MIN, psBase->ui32MaxHandleValue + 1, 0); -+ idr_preload_end(); -+ -+ result = id; -+#else -+ do -+ { -+ if (idr_pre_get(&psBase->sIdr, GFP_KERNEL) == 0) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ result = idr_get_new_above(&psBase->sIdr, pvData, ID_VALUE_MIN, &id); -+ } while (result == -EAGAIN); -+ -+ if ((IMG_UINT32)id > psBase->ui32MaxHandleValue) -+ { -+ idr_remove(&psBase->sIdr, id); -+ result = -ENOSPC; -+ } -+#endif -+ -+ if (result < 0) -+ { -+ if (result == -ENOSPC) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Limit of %u handles reached", -+ __func__, psBase->ui32MaxHandleValue)); -+ -+ return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; -+ } -+ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psBase->ui32TotalHandCount++; -+ -+ *phHandle = ID_TO_HANDLE(id); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function ReleaseHandle -+ -+ @Description Release a handle that is no longer needed. -+ -+ @Input psBase - Pointer to handle base structure -+ hHandle - Handle to release -+ ppvData - Points to a void data pointer -+ -+ @Output ppvData - Points to a void data pointer -+ -+ @Return PVRSRV_OK or PVRSRV_ERROR -+ -+******************************************************************************/ -+static PVRSRV_ERROR ReleaseHandle(HANDLE_IMPL_BASE *psBase, -+ IMG_HANDLE hHandle, -+ void **ppvData) -+{ -+ int id = HANDLE_TO_ID(hHandle); -+ void *pvData; -+ -+ PVR_ASSERT(psBase); -+ -+ /* Get the data associated with the handle. If we get back NULL then -+ it's an invalid handle */ -+ -+ pvData = idr_find(&psBase->sIdr, id); -+ if (likely(pvData)) -+ { -+ idr_remove(&psBase->sIdr, id); -+ psBase->ui32TotalHandCount--; -+ } -+ -+ if (unlikely(pvData == NULL)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Handle out of range (%u > %u)", -+ __func__, id, psBase->ui32TotalHandCount)); -+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; -+ } -+ -+ if (ppvData) -+ { -+ *ppvData = pvData; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function GetHandleData -+ -+ @Description Get the data associated with the given handle -+ -+ @Input psBase - Pointer to handle base structure -+ hHandle - Handle from which data should be retrieved -+ ppvData - Points to a void data pointer -+ -+ @Output ppvData - Points to a void data pointer -+ -+ @Return Error code or PVRSRV_OK -+ -+******************************************************************************/ -+static PVRSRV_ERROR GetHandleData(HANDLE_IMPL_BASE *psBase, -+ IMG_HANDLE hHandle, -+ void **ppvData) -+{ -+ int id = HANDLE_TO_ID(hHandle); -+ void *pvData; -+ -+ PVR_ASSERT(psBase); -+ PVR_ASSERT(ppvData); -+ -+ pvData = idr_find(&psBase->sIdr, id); -+ if (likely(pvData)) -+ { -+ *ppvData = pvData; -+ -+ return PVRSRV_OK; -+ } -+ else -+ { -+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; -+ } -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function SetHandleData -+ -+ @Description Set the data associated with the given handle -+ -+ @Input psBase - Pointer to handle base structure -+ hHandle - Handle for which data should be changed -+ pvData - Pointer to new data to be associated with the handle -+ -+ @Return Error code or PVRSRV_OK -+ -+******************************************************************************/ -+static PVRSRV_ERROR SetHandleData(HANDLE_IMPL_BASE *psBase, -+ IMG_HANDLE hHandle, -+ void *pvData) -+{ -+ int id = HANDLE_TO_ID(hHandle); -+ void *pvOldData; -+ -+ PVR_ASSERT(psBase); -+ -+ pvOldData = idr_replace(&psBase->sIdr, pvData, id); -+ if (IS_ERR(pvOldData)) -+ { -+ if (PTR_ERR(pvOldData) == -ENOENT) -+ { -+ return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED; -+ } -+ else -+ { -+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR IterateOverHandles(HANDLE_IMPL_BASE *psBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData) -+{ -+ HANDLE_ITER_DATA_WRAPPER sIterData; -+ -+ PVR_ASSERT(psBase); -+ PVR_ASSERT(pfnHandleIter); -+ -+ sIterData.pfnHandleIter = pfnHandleIter; -+ sIterData.pvHandleIterData = pvHandleIterData; -+ -+ return (PVRSRV_ERROR)idr_for_each(&psBase->sIdr, HandleIterFuncWrapper, &sIterData); -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function EnableHandlePurging -+ -+ @Description Enable purging for a given handle base -+ -+ @Input psBase - pointer to handle base structure -+ -+ @Return Error code or PVRSRV_OK -+ -+******************************************************************************/ -+static PVRSRV_ERROR EnableHandlePurging(HANDLE_IMPL_BASE *psBase) -+{ -+ PVR_UNREFERENCED_PARAMETER(psBase); -+ PVR_ASSERT(psBase); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function PurgeHandles -+ -+ @Description Purge handles for a given handle base -+ -+ @Input psBase - Pointer to handle base structure -+ -+ @Return Error code or PVRSRV_OK -+ -+******************************************************************************/ -+static PVRSRV_ERROR PurgeHandles(HANDLE_IMPL_BASE *psBase) -+{ -+ PVR_UNREFERENCED_PARAMETER(psBase); -+ PVR_ASSERT(psBase); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function CreateHandleBase -+ -+ @Description Create a handle base structure -+ -+ @Input ppsBase - pointer to handle base structure pointer -+ -+ @Output ppsBase - points to handle base structure pointer -+ -+ @Return Error code or PVRSRV_OK -+ -+******************************************************************************/ -+static PVRSRV_ERROR CreateHandleBase(HANDLE_IMPL_BASE **ppsBase) -+{ -+ HANDLE_IMPL_BASE *psBase; -+ -+ PVR_ASSERT(ppsBase); -+ -+ psBase = OSAllocZMem(sizeof(*psBase)); -+ if (psBase == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate generic handle base", -+ __func__)); -+ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ idr_init(&psBase->sIdr); -+ -+ psBase->ui32MaxHandleValue = ID_VALUE_MAX; -+ psBase->ui32TotalHandCount = 0; -+ -+ *ppsBase = psBase; -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function DestroyHandleBase -+ -+ @Description Destroy a handle base structure -+ -+ @Input psBase - pointer to handle base structure -+ -+ @Return Error code or PVRSRV_OK -+ -+******************************************************************************/ -+static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase) -+{ -+ PVR_ASSERT(psBase); -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) -+ idr_remove_all(&psBase->sIdr); -+#endif -+ -+ /* Finally destroy the idr */ -+ idr_destroy(&psBase->sIdr); -+ -+ OSFreeMem(psBase); -+ -+ return PVRSRV_OK; -+} -+ -+ -+static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab = -+{ -+ .pfnAcquireHandle = AcquireHandle, -+ .pfnReleaseHandle = ReleaseHandle, -+ .pfnGetHandleData = GetHandleData, -+ .pfnSetHandleData = SetHandleData, -+ .pfnIterateOverHandles = IterateOverHandles, -+ .pfnEnableHandlePurging = EnableHandlePurging, -+ .pfnPurgeHandles = PurgeHandles, -+ .pfnCreateHandleBase = CreateHandleBase, -+ .pfnDestroyHandleBase = DestroyHandleBase -+}; -+ -+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs) -+{ -+ static IMG_BOOL bAcquired = IMG_FALSE; -+ -+ if (bAcquired) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Function table already acquired", -+ __func__)); -+ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; -+ } -+ -+ if (ppsFuncs == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ *ppsFuncs = &g_sHandleFuncTab; -+ -+ bAcquired = IMG_TRUE; -+ -+ return PVRSRV_OK; -+} -diff --git a/drivers/gpu/drm/img-rogue/handle_impl.h b/drivers/gpu/drm/img-rogue/handle_impl.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/handle_impl.h -@@ -0,0 +1,89 @@ -+/**************************************************************************/ /*! -+@File -+@Title Implementation Callbacks for Handle Manager API -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of the handle manager API. This file is for declarations -+ and definitions that are private/internal to the handle manager -+ API but need to be shared between the generic handle manager -+ code and the various handle manager backends, i.e. the code that -+ implements the various callbacks. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#if !defined(HANDLE_IMPL_H) -+#define HANDLE_IMPL_H -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+typedef struct _HANDLE_IMPL_BASE_ HANDLE_IMPL_BASE; -+ -+typedef PVRSRV_ERROR (*PFN_HANDLE_ITER)(IMG_HANDLE hHandle, void *pvData); -+ -+typedef struct _HANDLE_IMPL_FUNCTAB_ -+{ -+ /* Acquire a new handle which is associated with the given data */ -+ PVRSRV_ERROR (*pfnAcquireHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE *phHandle, void *pvData); -+ -+ /* Release the given handle (optionally returning the data associated with it) */ -+ PVRSRV_ERROR (*pfnReleaseHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData); -+ -+ /* Get the data associated with the given handle */ -+ PVRSRV_ERROR (*pfnGetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData); -+ -+ /* Set the data associated with the given handle */ -+ PVRSRV_ERROR (*pfnSetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void *pvData); -+ -+ PVRSRV_ERROR (*pfnIterateOverHandles)(HANDLE_IMPL_BASE *psHandleBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData); -+ -+ /* Enable handle purging on the given handle base */ -+ PVRSRV_ERROR (*pfnEnableHandlePurging)(HANDLE_IMPL_BASE *psHandleBase); -+ -+ /* Purge handles on the given handle base */ -+ PVRSRV_ERROR (*pfnPurgeHandles)(HANDLE_IMPL_BASE *psHandleBase); -+ -+ /* Create handle base */ -+ PVRSRV_ERROR (*pfnCreateHandleBase)(HANDLE_IMPL_BASE **psHandleBase); -+ -+ /* Destroy handle base */ -+ PVRSRV_ERROR (*pfnDestroyHandleBase)(HANDLE_IMPL_BASE *psHandleBase); -+} HANDLE_IMPL_FUNCTAB; -+ -+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs); -+ -+#endif /* !defined(HANDLE_IMPL_H) */ -diff --git a/drivers/gpu/drm/img-rogue/handle_types.h b/drivers/gpu/drm/img-rogue/handle_types.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/handle_types.h -@@ -0,0 +1,89 @@ -+/**************************************************************************/ /*! -+@File -+@Title Handle Manager handle types -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Provide handle management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+/* NOTE: Do not add include guards to this file */ -+ -+HANDLETYPE(NONE) -+HANDLETYPE(SHARED_EVENT_OBJECT) -+HANDLETYPE(EVENT_OBJECT_CONNECT) -+HANDLETYPE(PMR_LOCAL_EXPORT_HANDLE) -+HANDLETYPE(PHYSMEM_PMR) -+HANDLETYPE(PHYSMEM_PMR_EXPORT) -+HANDLETYPE(PHYSMEM_PMR_SECURE_EXPORT) -+HANDLETYPE(DEVMEMINT_CTX) -+HANDLETYPE(DEVMEMINT_CTX_EXPORT) -+HANDLETYPE(DEVMEMINT_HEAP) -+HANDLETYPE(DEVMEMINT_RESERVATION) -+HANDLETYPE(DEVMEMXINT_RESERVATION) -+HANDLETYPE(DEVMEMINT_MAPPING) -+HANDLETYPE(RGX_FW_MEMDESC) -+HANDLETYPE(RGX_FREELIST) -+HANDLETYPE(RGX_MEMORY_BLOCK) -+HANDLETYPE(RGX_SERVER_RENDER_CONTEXT) -+HANDLETYPE(RGX_SERVER_TQ_CONTEXT) -+HANDLETYPE(RGX_SERVER_TQ_TDM_CONTEXT) -+HANDLETYPE(RGX_SERVER_COMPUTE_CONTEXT) -+HANDLETYPE(RGX_SERVER_RAY_CONTEXT) -+HANDLETYPE(RGX_SERVER_KICKSYNC_CONTEXT) -+#if defined(PVR_TESTING_UTILS) && defined(SUPPORT_VALIDATION) -+HANDLETYPE(RGX_SERVER_GPUMAP_CONTEXT) -+#endif -+HANDLETYPE(SYNC_PRIMITIVE_BLOCK) -+HANDLETYPE(SYNC_RECORD_HANDLE) -+HANDLETYPE(PVRSRV_TIMELINE_SERVER) -+HANDLETYPE(PVRSRV_FENCE_SERVER) -+HANDLETYPE(PVRSRV_FENCE_EXPORT) -+HANDLETYPE(RGX_KM_HW_RT_DATASET) -+HANDLETYPE(RGX_FWIF_ZSBUFFER) -+HANDLETYPE(RGX_POPULATION) -+HANDLETYPE(DC_DEVICE) -+HANDLETYPE(DC_DISPLAY_CONTEXT) -+HANDLETYPE(DC_BUFFER) -+HANDLETYPE(DC_PIN_HANDLE) -+HANDLETYPE(DEVMEM_MEM_IMPORT) -+HANDLETYPE(PHYSMEM_PMR_PAGELIST) -+HANDLETYPE(PVR_TL_SD) -+HANDLETYPE(RI_HANDLE) -+HANDLETYPE(DEV_PRIV_DATA) -+HANDLETYPE(MM_PLAT_CLEANUP) -+HANDLETYPE(WORKEST_RETURN_DATA) -+HANDLETYPE(DI_CONTEXT) -diff --git a/drivers/gpu/drm/img-rogue/hash.c b/drivers/gpu/drm/img-rogue/hash.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/hash.c -@@ -0,0 +1,734 @@ -+/*************************************************************************/ /*! -+@File -+@Title Self scaling hash tables. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description -+ Implements simple self scaling hash tables. Hash collisions are handled by -+ chaining entries together. Hash tables are increased in size when they -+ become more than (50%?) full and decreased in size when less than (25%?) -+ full. Hash tables are never decreased below their initial size. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* include/ */ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+ -+/* services/shared/include/ */ -+#include "hash.h" -+ -+/* services/client/include/ or services/server/include/ */ -+#include "osfunc_common.h" -+#include "allocmem.h" -+ -+//#define PERF_DBG_RESIZE -+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) -+#include -+#endif -+ -+#if defined(__KERNEL__) -+#include "pvrsrv.h" -+#endif -+ -+#define KEY_TO_INDEX(pHash, key, uSize) \ -+ ((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize)) -+ -+#define KEY_COMPARE(pHash, pKey1, pKey2) \ -+ ((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2))) -+ -+#if defined(__linux__) && defined(__KERNEL__) -+#define _AllocMem OSAllocMemNoStats -+#define _AllocZMem OSAllocZMemNoStats -+#define _FreeMem OSFreeMemNoStats -+#else -+#define _AllocMem OSAllocMem -+#define _AllocZMem OSAllocZMem -+#define _FreeMem OSFreeMem -+#endif -+ -+#define NO_SHRINK 0 -+ -+/* Each entry in a hash table is placed into a bucket */ -+typedef struct _BUCKET_ -+{ -+ struct _BUCKET_ *pNext; /*!< the next bucket on the same chain */ -+ uintptr_t v; /*!< entry value */ -+ uintptr_t k[]; /* PRQA S 0642 */ -+ /* override dynamic array declaration warning */ -+} BUCKET; -+ -+struct _HASH_TABLE_ -+{ -+ IMG_UINT32 uSize; /*!< current size of the hash table */ -+ IMG_UINT32 uCount; /*!< number of entries currently in the hash table */ -+ IMG_UINT32 uMinimumSize; /*!< the minimum size that the hash table should be re-sized to */ -+ IMG_UINT32 uKeySize; /*!< size of key in bytes */ -+ IMG_UINT32 uShrinkThreshold; /*!< The threshold at which to trigger a shrink */ -+ IMG_UINT32 uGrowThreshold; /*!< The threshold at which to trigger a grow */ -+ HASH_FUNC* pfnHashFunc; /*!< hash function */ -+ HASH_KEY_COMP* pfnKeyComp; /*!< key comparison function */ -+ BUCKET** ppBucketTable; /*!< the hash table array */ -+#if defined(DEBUG) -+ const char* pszFile; -+ unsigned int ui32LineNum; -+#endif -+}; -+ -+/*************************************************************************/ /*! -+@Function HASH_Func_Default -+@Description Hash function intended for hashing keys composed of uintptr_t -+ arrays. -+@Input uKeySize The size of the hash key, in bytes. -+@Input pKey A pointer to the key to hash. -+@Input uHashTabLen The length of the hash table. -+@Return The hash value. -+*/ /**************************************************************************/ -+IMG_INTERNAL IMG_UINT32 -+HASH_Func_Default(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) -+{ -+ uintptr_t *p = (uintptr_t *)pKey; -+ IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t); -+ IMG_UINT32 ui; -+ IMG_UINT32 uHashKey = 0; -+ -+ PVR_UNREFERENCED_PARAMETER(uHashTabLen); -+ -+ PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0); -+ -+ for (ui = 0; ui < uKeyLen; ui++) -+ { -+ IMG_UINT32 uHashPart = (IMG_UINT32)*p++; -+ -+ uHashPart += (uHashPart << 12); -+ uHashPart ^= (uHashPart >> 22); -+ uHashPart += (uHashPart << 4); -+ uHashPart ^= (uHashPart >> 9); -+ uHashPart += (uHashPart << 10); -+ uHashPart ^= (uHashPart >> 2); -+ uHashPart += (uHashPart << 7); -+ uHashPart ^= (uHashPart >> 12); -+ -+ uHashKey += uHashPart; -+ } -+ -+ return uHashKey; -+} -+ -+/*************************************************************************/ /*! -+@Function HASH_Key_Comp_Default -+@Description Compares keys composed of uintptr_t arrays. -+@Input uKeySize The size of the hash key, in bytes. -+@Input pKey1 Pointer to first hash key to compare. -+@Input pKey2 Pointer to second hash key to compare. -+@Return IMG_TRUE - The keys match. -+ IMG_FALSE - The keys don't match. -+*/ /**************************************************************************/ -+IMG_INTERNAL IMG_BOOL -+HASH_Key_Comp_Default(size_t uKeySize, void *pKey1, void *pKey2) -+{ -+ uintptr_t *p1 = (uintptr_t *)pKey1; -+ uintptr_t *p2 = (uintptr_t *)pKey2; -+ IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t); -+ IMG_UINT32 ui; -+ -+ PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0); -+ -+ for (ui = 0; ui < uKeyLen; ui++) -+ { -+ if (*p1++ != *p2++) -+ return IMG_FALSE; -+ } -+ -+ return IMG_TRUE; -+} -+ -+/*************************************************************************/ /*! -+@Function _ChainInsert -+@Description Insert a bucket into the appropriate hash table chain. -+@Input pBucket The bucket -+@Input ppBucketTable The hash table -+@Input uSize The size of the hash table -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+static void -+_ChainInsert(HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize) -+{ -+ IMG_UINT32 uIndex; -+ -+ /* We assume that all parameters passed by the caller are valid. */ -+ PVR_ASSERT(pBucket != NULL); -+ PVR_ASSERT(ppBucketTable != NULL); -+ PVR_ASSERT(uSize != 0); -+ -+ uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize); /* PRQA S 0432,0541 */ /* ignore dynamic array warning */ -+ pBucket->pNext = ppBucketTable[uIndex]; -+ ppBucketTable[uIndex] = pBucket; -+} -+ -+/*************************************************************************/ /*! -+@Function _Rehash -+@Description Iterate over every entry in an old hash table and rehash into -+ the new table. -+@Input ppOldTable The old hash table -+@Input uOldSize The size of the old hash table -+@Input ppNewTable The new hash table -+@Input uNewSize The size of the new hash table -+@Return None -+*/ /**************************************************************************/ -+static void -+_Rehash(HASH_TABLE *pHash, -+ BUCKET **ppOldTable, IMG_UINT32 uOldSize, -+ BUCKET **ppNewTable, IMG_UINT32 uNewSize) -+{ -+ IMG_UINT32 uIndex; -+ for (uIndex=0; uIndex< uOldSize; uIndex++) -+ { -+ BUCKET *pBucket; -+ pBucket = ppOldTable[uIndex]; -+ while (pBucket != NULL) -+ { -+ BUCKET *pNextBucket = pBucket->pNext; -+ _ChainInsert(pHash, pBucket, ppNewTable, uNewSize); -+ pBucket = pNextBucket; -+ } -+ } -+} -+ -+/*************************************************************************/ /*! -+@Function _Resize -+@Description Attempt to resize a hash table, failure to allocate a new -+ larger hash table is not considered a hard failure. We simply -+ continue and allow the table to fill up, the effect is to -+ allow hash chains to become longer. -+@Input pHash Hash table to resize. -+@Input uNewSize Required table size. -+@Return IMG_TRUE Success -+ IMG_FALSE Failed -+*/ /**************************************************************************/ -+static IMG_BOOL -+_Resize(HASH_TABLE *pHash, IMG_UINT32 uNewSize) -+{ -+ BUCKET **ppNewTable; -+ IMG_UINT32 uiThreshold = uNewSize >> 2; -+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) -+ struct timeval start, end; -+#endif -+ -+ if (uNewSize == pHash->uSize) -+ { -+ return IMG_TRUE; -+ } -+ -+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) -+ gettimeofday(&start, NULL); -+#endif -+ -+ ppNewTable = _AllocZMem(sizeof(BUCKET *) * uNewSize); -+ if (ppNewTable == NULL) -+ { -+ return IMG_FALSE; -+ } -+ -+ _Rehash(pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize); -+ -+ _FreeMem(pHash->ppBucketTable); -+ -+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) -+ gettimeofday(&end, NULL); -+ if (start.tv_usec > end.tv_usec) -+ { -+ end.tv_usec = 1000000 - start.tv_usec + end.tv_usec; -+ } -+ else -+ { -+ end.tv_usec -= start.tv_usec; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: H:%p O:%d N:%d C:%d G:%d S:%d T:%06luus", __func__, pHash, pHash->uSize, uNewSize, pHash->uCount, pHash->uGrowThreshold, pHash->uShrinkThreshold, end.tv_usec)); -+#endif -+ -+ /*not nulling pointer, being reassigned just below*/ -+ pHash->ppBucketTable = ppNewTable; -+ pHash->uSize = uNewSize; -+ -+ pHash->uGrowThreshold = uiThreshold * 3; -+ pHash->uShrinkThreshold = (uNewSize <= pHash->uMinimumSize) ? NO_SHRINK : uiThreshold; -+ -+ return IMG_TRUE; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function HASH_Create_Extended -+@Description Create a self scaling hash table, using the supplied key size, -+ and the supplied hash and key comparison functions. -+@Input uInitialLen Initial and minimum length of the hash table, -+ where the length refers to the number of entries -+ in the hash table, not its size in bytes. -+@Input uKeySize The size of the key, in bytes. -+@Input pfnHashFunc Pointer to hash function. -+@Input pfnKeyComp Pointer to key comparison function. -+@Return NULL or hash table handle. -+*/ /**************************************************************************/ -+IMG_INTERNAL -+HASH_TABLE * HASH_Create_Extended_Int (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp) -+{ -+ HASH_TABLE *pHash; -+ -+ if (uInitialLen == 0 || uKeySize == 0 || pfnHashFunc == NULL || pfnKeyComp == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input parameters", __func__)); -+ return NULL; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: InitialSize=0x%x", __func__, uInitialLen)); -+ -+ pHash = _AllocMem(sizeof(HASH_TABLE)); -+ if (pHash == NULL) -+ { -+ return NULL; -+ } -+ -+ pHash->uCount = 0; -+ pHash->uSize = uInitialLen; -+ pHash->uMinimumSize = uInitialLen; -+ pHash->uKeySize = uKeySize; -+ pHash->uGrowThreshold = (uInitialLen >> 2) * 3; -+ pHash->uShrinkThreshold = NO_SHRINK; -+ pHash->pfnHashFunc = pfnHashFunc; -+ pHash->pfnKeyComp = pfnKeyComp; -+ -+ pHash->ppBucketTable = _AllocZMem(sizeof(BUCKET *) * pHash->uSize); -+ if (pHash->ppBucketTable == NULL) -+ { -+ _FreeMem(pHash); -+ /*not nulling pointer, out of scope*/ -+ return NULL; -+ } -+ -+ return pHash; -+} -+ -+#if defined(DEBUG) -+IMG_INTERNAL -+HASH_TABLE * HASH_Create_Extended_Debug (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp, -+ const char *file, const unsigned int line) -+{ -+ HASH_TABLE *hash; -+ hash = HASH_Create_Extended_Int(uInitialLen, uKeySize, -+ pfnHashFunc, pfnKeyComp); -+ if (hash) -+ { -+ hash->pszFile = file; -+ hash->ui32LineNum = line; -+ } -+ return hash; -+} -+#endif -+ -+/*************************************************************************/ /*! -+@Function HASH_Create -+@Description Create a self scaling hash table with a key consisting of a -+ single uintptr_t, and using the default hash and key -+ comparison functions. -+@Input uInitialLen Initial and minimum length of the hash table, -+ where the length refers to the number of entries -+ in the hash table, not its size in bytes. -+@Return NULL or hash table handle. -+*/ /**************************************************************************/ -+IMG_INTERNAL -+HASH_TABLE * HASH_Create_Int (IMG_UINT32 uInitialLen) -+{ -+ return HASH_Create_Extended_Int(uInitialLen, sizeof(uintptr_t), -+ &HASH_Func_Default, &HASH_Key_Comp_Default); -+} -+ -+#if defined(DEBUG) -+IMG_INTERNAL -+HASH_TABLE * HASH_Create_Debug(IMG_UINT32 uInitialLen, const char *file, const unsigned int line) -+{ -+ HASH_TABLE *hash; -+ hash = HASH_Create_Extended_Int(uInitialLen, sizeof(uintptr_t), -+ &HASH_Func_Default, &HASH_Key_Comp_Default); -+ if (hash) -+ { -+ hash->pszFile = file; -+ hash->ui32LineNum = line; -+ } -+ return hash; -+} -+#endif -+ -+/*************************************************************************/ /*! -+@Function HASH_Delete_Extended -+@Description Delete a hash table created by HASH_Create_Extended or -+ HASH_Create. All entries in the table should have been removed -+ before calling this function. -+@Input pHash Hash table -+@Input bWarn Set false to suppress warnings in the case of -+ deletion with active entries. -+*/ /**************************************************************************/ -+IMG_INTERNAL void -+HASH_Delete_Extended(HASH_TABLE *pHash, IMG_BOOL bWarn) -+{ -+ IMG_BOOL bDoCheck = IMG_TRUE; -+#if defined(__KERNEL__) && !defined(__QNXNTO__) -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ if (psPVRSRVData != NULL) -+ { -+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) -+ { -+ bDoCheck = IMG_FALSE; -+ } -+ } -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+ else -+ { -+ bDoCheck = IMG_FALSE; -+ } -+#endif -+#endif -+ if (pHash != NULL) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Delete")); -+ -+ if (bDoCheck) -+ { -+ PVR_ASSERT(pHash->uCount==0); -+ } -+ if (pHash->uCount != 0) -+ { -+ IMG_UINT32 i; -+ if (bWarn) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Leak detected in hash table!", __func__)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: Removing remaining %u hash entries.", __func__, pHash->uCount)); -+#if defined(DEBUG) -+ PVR_DPF ((PVR_DBG_ERROR, "%s: Hash %p created at %s:%u.", __func__, (uintptr_t*)pHash, pHash->pszFile, pHash->ui32LineNum)); -+#endif -+ } -+ -+ for (i = 0; i < pHash->uSize; i++) -+ { -+ BUCKET *pBucket = pHash->ppBucketTable[i]; -+ while (pBucket != NULL) -+ { -+ BUCKET *pNextBucket = pBucket->pNext; -+ _FreeMem(pBucket); -+ pBucket = pNextBucket; -+ } -+ } -+ -+ } -+ _FreeMem(pHash->ppBucketTable); -+ pHash->ppBucketTable = NULL; -+ _FreeMem(pHash); -+ /*not nulling pointer, copy on stack*/ -+ } -+} -+ -+/*************************************************************************/ /*! -+@Function HASH_Delete -+@Description Delete a hash table created by HASH_Create_Extended or -+ HASH_Create. All entries in the table must have been removed -+ before calling this function. -+@Input pHash Hash table -+*/ /**************************************************************************/ -+IMG_INTERNAL void -+HASH_Delete(HASH_TABLE *pHash) -+{ -+ HASH_Delete_Extended(pHash, IMG_TRUE); -+} -+ -+/*************************************************************************/ /*! -+@Function HASH_Insert_Extended -+@Description Insert a key value pair into a hash table created with -+ HASH_Create_Extended. -+@Input pHash The hash table. -+@Input pKey Pointer to the key. -+@Input v The value associated with the key. -+@Return IMG_TRUE - success. -+ IMG_FALSE - failure. -+*/ /**************************************************************************/ -+IMG_INTERNAL IMG_BOOL -+HASH_Insert_Extended(HASH_TABLE *pHash, void *pKey, uintptr_t v) -+{ -+ BUCKET *pBucket; -+ -+ PVR_ASSERT(pHash != NULL); -+ -+ if (pHash == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter", __func__)); -+ return IMG_FALSE; -+ } -+ -+ pBucket = _AllocMem(sizeof(BUCKET) + pHash->uKeySize); -+ if (pBucket == NULL) -+ { -+ return IMG_FALSE; -+ } -+ -+ pBucket->v = v; -+ /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k (linux)*/ -+ OSCachedMemCopy(pBucket->k, pKey, pHash->uKeySize); -+ -+ _ChainInsert(pHash, pBucket, pHash->ppBucketTable, pHash->uSize); -+ -+ pHash->uCount++; -+ -+ /* check if we need to think about re-balancing */ -+ if (pHash->uCount > pHash->uGrowThreshold) -+ { -+ /* Ignore the return code from _Resize because the hash table is -+ still in a valid state and although not ideally sized, it is still -+ functional */ -+ _Resize(pHash, pHash->uSize << 1); -+ } -+ -+ return IMG_TRUE; -+} -+ -+/*************************************************************************/ /*! -+@Function HASH_Insert -+@Description Insert a key value pair into a hash table created with -+ HASH_Create. -+@Input pHash The hash table. -+@Input k The key value. -+@Input v The value associated with the key. -+@Return IMG_TRUE - success. -+ IMG_FALSE - failure. -+*/ /**************************************************************************/ -+IMG_INTERNAL IMG_BOOL -+HASH_Insert(HASH_TABLE *pHash, uintptr_t k, uintptr_t v) -+{ -+ return HASH_Insert_Extended(pHash, &k, v); -+} -+ -+/*************************************************************************/ /*! -+@Function HASH_Remove_Extended -+@Description Remove a key from a hash table created with -+ HASH_Create_Extended. -+@Input pHash The hash table. -+@Input pKey Pointer to key. -+@Return 0 if the key is missing, or the value associated with the key. -+*/ /**************************************************************************/ -+IMG_INTERNAL uintptr_t -+HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey) -+{ -+ BUCKET **ppBucket; -+ IMG_UINT32 uIndex; -+ -+ PVR_ASSERT(pHash != NULL); -+ -+ if (pHash == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Null hash table", __func__)); -+ return 0; -+ } -+ -+ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); -+ -+ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext)) -+ { -+ /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */ -+ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) -+ { -+ BUCKET *pBucket = *ppBucket; -+ uintptr_t v = pBucket->v; -+ (*ppBucket) = pBucket->pNext; -+ -+ _FreeMem(pBucket); -+ /*not nulling original pointer, already overwritten*/ -+ -+ pHash->uCount--; -+ -+ /* check if we need to think about re-balancing, when the shrink -+ * threshold is 0 we are at the minimum size, no further shrink */ -+ if (pHash->uCount < pHash->uShrinkThreshold) -+ { -+ /* Ignore the return code from _Resize because the -+ hash table is still in a valid state and although -+ not ideally sized, it is still functional */ -+ _Resize(pHash, MAX(pHash->uSize >> 1, pHash->uMinimumSize)); -+ } -+ -+ return v; -+ } -+ } -+ return 0; -+} -+ -+/*************************************************************************/ /*! -+@Function HASH_Remove -+@Description Remove a key value pair from a hash table created with -+ HASH_Create. -+@Input pHash The hash table. -+@Input pKey Pointer to key. -+@Return 0 if the key is missing, or the value associated with the key. -+*/ /**************************************************************************/ -+IMG_INTERNAL uintptr_t -+HASH_Remove(HASH_TABLE *pHash, uintptr_t k) -+{ -+ return HASH_Remove_Extended(pHash, &k); -+} -+ -+/*************************************************************************/ /*! -+@Function HASH_Retrieve_Extended -+@Description Retrieve a value from a hash table created with -+ HASH_Create_Extended. -+@Input pHash The hash table. -+@Input pKey Pointer to key. -+@Return 0 if the key is missing, or the value associated with the key. -+*/ /**************************************************************************/ -+IMG_INTERNAL uintptr_t -+HASH_Retrieve_Extended(HASH_TABLE *pHash, void *pKey) -+{ -+ BUCKET **ppBucket; -+ IMG_UINT32 uIndex; -+ -+ PVR_ASSERT(pHash != NULL); -+ -+ if (pHash == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Null hash table", __func__)); -+ return 0; -+ } -+ -+ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); -+ -+ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext)) -+ { -+ /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */ -+ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) -+ { -+ BUCKET *pBucket = *ppBucket; -+ uintptr_t v = pBucket->v; -+ -+ return v; -+ } -+ } -+ return 0; -+} -+ -+/*************************************************************************/ /*! -+@Function HASH_Retrieve -+@Description Retrieve a value from a hash table created with HASH_Create. -+@Input pHash The hash table. -+@Input pKey Pointer to key. -+@Return 0 if the key is missing, or the value associated with the key. -+*/ /**************************************************************************/ -+IMG_INTERNAL uintptr_t -+HASH_Retrieve(HASH_TABLE *pHash, uintptr_t k) -+{ -+ return HASH_Retrieve_Extended(pHash, &k); -+} -+ -+/*************************************************************************/ /*! -+@Function HASH_Iterate -+@Description Iterate over every entry in the hash table. -+@Input pHash Hash table to iterate. -+@Input pfnCallback Callback to call with the key and data for each -+. entry in the hash table -+@Return Callback error if any, otherwise PVRSRV_OK -+*/ /**************************************************************************/ -+IMG_INTERNAL PVRSRV_ERROR -+HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback, void* args) -+{ -+ IMG_UINT32 uIndex; -+ for (uIndex=0; uIndex < pHash->uSize; uIndex++) -+ { -+ BUCKET *pBucket; -+ pBucket = pHash->ppBucketTable[uIndex]; -+ while (pBucket != NULL) -+ { -+ PVRSRV_ERROR eError; -+ BUCKET *pNextBucket = pBucket->pNext; -+ -+ eError = pfnCallback((uintptr_t) ((void *) *(pBucket->k)), pBucket->v, args); -+ -+ /* The callback might want us to break out early */ -+ if (eError != PVRSRV_OK) -+ return eError; -+ -+ pBucket = pNextBucket; -+ } -+ } -+ return PVRSRV_OK; -+} -+ -+#ifdef HASH_TRACE -+/*************************************************************************/ /*! -+@Function HASH_Dump -+@Description Dump out some information about a hash table. -+@Input pHash The hash table. -+*/ /**************************************************************************/ -+void -+HASH_Dump(HASH_TABLE *pHash) -+{ -+ IMG_UINT32 uIndex; -+ IMG_UINT32 uMaxLength=0; -+ IMG_UINT32 uEmptyCount=0; -+ -+ PVR_ASSERT(pHash != NULL); -+ for (uIndex=0; uIndexuSize; uIndex++) -+ { -+ BUCKET *pBucket; -+ IMG_UINT32 uLength = 0; -+ if (pHash->ppBucketTable[uIndex] == NULL) -+ { -+ uEmptyCount++; -+ } -+ for (pBucket=pHash->ppBucketTable[uIndex]; -+ pBucket != NULL; -+ pBucket = pBucket->pNext) -+ { -+ uLength++; -+ } -+ uMaxLength = MAX(uMaxLength, uLength); -+ } -+ -+ PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d", -+ pHash->uMinimumSize, pHash->uSize, pHash->uCount)); -+ PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength)); -+} -+#endif -diff --git a/drivers/gpu/drm/img-rogue/hash.h b/drivers/gpu/drm/img-rogue/hash.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/hash.h -@@ -0,0 +1,247 @@ -+/*************************************************************************/ /*! -+@File -+@Title Self scaling hash tables -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements simple self scaling hash tables. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef HASH_H -+#define HASH_H -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+/* -+ * Keys passed to the comparison function are only guaranteed to be aligned on -+ * an uintptr_t boundary. -+ */ -+typedef IMG_UINT32 HASH_FUNC(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); -+typedef IMG_BOOL HASH_KEY_COMP(size_t uKeySize, void *pKey1, void *pKey2); -+ -+typedef struct _HASH_TABLE_ HASH_TABLE; -+ -+typedef PVRSRV_ERROR (*HASH_pfnCallback) ( -+ uintptr_t k, -+ uintptr_t v, -+ void* pvPriv -+); -+ -+#if defined(DEBUG) -+#else -+#define HASH_CREATE(LEN) HASH_Create(LEN) -+#endif -+ -+/*************************************************************************/ /*! -+@Function HASH_Func_Default -+@Description Hash function intended for hashing keys composed of uintptr_t -+ arrays. -+@Input uKeySize The size of the hash key, in bytes. -+@Input pKey A pointer to the key to hash. -+@Input uHashTabLen The length of the hash table. -+@Return The hash value. -+*/ /**************************************************************************/ -+IMG_UINT32 HASH_Func_Default(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); -+ -+/*************************************************************************/ /*! -+@Function HASH_Key_Comp_Default -+@Description Compares keys composed of uintptr_t arrays. -+@Input uKeySize The size of the hash key, in bytes. -+@Input pKey1 Pointer to first hash key to compare. -+@Input pKey2 Pointer to second hash key to compare. -+@Return IMG_TRUE - The keys match. -+ IMG_FALSE - The keys don't match. -+*/ /**************************************************************************/ -+IMG_BOOL HASH_Key_Comp_Default(size_t uKeySize, void *pKey1, void *pKey2); -+ -+/*************************************************************************/ /*! -+@Function HASH_Create_Extended -+@Description Create a self scaling hash table, using the supplied key size, -+ and the supplied hash and key comparison functions. -+@Input uInitialLen Initial and minimum length of the hash table, -+ where the length refers to the number of entries -+ in the hash table, not its size in bytes. -+@Input uKeySize The size of the key, in bytes. -+@Input pfnHashFunc Pointer to hash function. -+@Input pfnKeyComp Pointer to key comparison function. -+@Return NULL or hash table handle. -+*/ /**************************************************************************/ -+HASH_TABLE * HASH_Create_Extended_Int(IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp); -+#if defined(DEBUG) -+#define HASH_Create_Extended(LEN, KS, FUN, CMP) HASH_Create_Extended_Debug(LEN, KS, FUN, CMP, __FILE__, __LINE__) -+HASH_TABLE * HASH_Create_Extended_Debug (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp, -+ const char *file, const unsigned int line); -+#else -+#define HASH_Create_Extended HASH_Create_Extended_Int -+#endif -+ -+/*************************************************************************/ /*! -+@Function HASH_Create -+@Description Create a self scaling hash table with a key consisting of a -+ single uintptr_t, and using the default hash and key -+ comparison functions. -+@Input uInitialLen Initial and minimum length of the hash table, -+ where the length refers to the number of entries -+ in the hash table, not its size in bytes. -+@Return NULL or hash table handle. -+*/ /**************************************************************************/ -+HASH_TABLE * HASH_Create_Int(IMG_UINT32 uInitialLen); -+#if defined(DEBUG) -+#define HASH_Create(LEN) HASH_Create_Debug(LEN, __FILE__, __LINE__) -+HASH_TABLE * HASH_Create_Debug (IMG_UINT32 uInitialLen, const char *file, const unsigned int line); -+#else -+#define HASH_Create HASH_Create_Int -+#endif -+ -+/*************************************************************************/ /*! -+@Function HASH_Delete_Extended -+@Description Delete a hash table created by HASH_Create_Extended or -+ HASH_Create. All entries in the table should have been removed -+ before calling this function. -+@Input pHash Hash table -+@Input bWarn Set false to suppress warnings in the case of -+ deletion with active entries. -+@Return None -+*/ /**************************************************************************/ -+void HASH_Delete_Extended(HASH_TABLE *pHash, IMG_BOOL bWarn); -+ -+/*************************************************************************/ /*! -+@Function HASH_Delete -+@Description Delete a hash table created by HASH_Create_Extended or -+ HASH_Create. All entries in the table must have been removed -+ before calling this function. -+@Input pHash Hash table -+@Return None -+*/ /**************************************************************************/ -+void HASH_Delete(HASH_TABLE *pHash); -+ -+/*************************************************************************/ /*! -+@Function HASH_Insert_Extended -+@Description Insert a key value pair into a hash table created with -+ HASH_Create_Extended. -+@Input pHash The hash table. -+@Input pKey Pointer to the key. -+@Input v The value associated with the key. -+@Return IMG_TRUE - success. -+ IMG_FALSE - failure. -+*/ /**************************************************************************/ -+IMG_BOOL HASH_Insert_Extended(HASH_TABLE *pHash, void *pKey, uintptr_t v); -+ -+/*************************************************************************/ /*! -+@Function HASH_Insert -+@Description Insert a key value pair into a hash table created with -+ HASH_Create. -+@Input pHash The hash table. -+@Input k The key value. -+@Input v The value associated with the key. -+@Return IMG_TRUE - success. -+ IMG_FALSE - failure. -+*/ /**************************************************************************/ -+IMG_BOOL HASH_Insert(HASH_TABLE *pHash, uintptr_t k, uintptr_t v); -+ -+/*************************************************************************/ /*! -+@Function HASH_Remove_Extended -+@Description Remove a key from a hash table created with -+ HASH_Create_Extended. -+@Input pHash The hash table. -+@Input pKey Pointer to key. -+@Return 0 if the key is missing, or the value associated with the key. -+*/ /**************************************************************************/ -+uintptr_t HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey); -+ -+/*************************************************************************/ /*! -+@Function HASH_Remove -+@Description Remove a key value pair from a hash table created with -+ HASH_Create. -+@Input pHash The hash table. -+@Input k The key value. -+@Return 0 if the key is missing, or the value associated with the key. -+*/ /**************************************************************************/ -+uintptr_t HASH_Remove(HASH_TABLE *pHash, uintptr_t k); -+ -+/*************************************************************************/ /*! -+@Function HASH_Retrieve_Extended -+@Description Retrieve a value from a hash table created with -+ HASH_Create_Extended. -+@Input pHash The hash table. -+@Input pKey Pointer to key. -+@Return 0 if the key is missing, or the value associated with the key. -+*/ /**************************************************************************/ -+uintptr_t HASH_Retrieve_Extended(HASH_TABLE *pHash, void *pKey); -+ -+/*************************************************************************/ /*! -+@Function HASH_Retrieve -+@Description Retrieve a value from a hash table created with HASH_Create. -+@Input pHash The hash table. -+@Input k The key value. -+@Return 0 if the key is missing, or the value associated with the key. -+*/ /**************************************************************************/ -+uintptr_t HASH_Retrieve(HASH_TABLE *pHash, uintptr_t k); -+ -+/*************************************************************************/ /*! -+@Function HASH_Iterate -+@Description Iterate over every entry in the hash table. -+@Input pHash Hash table to iterate. -+@Input pfnCallback Callback to call with the key and data for each -+. entry in the hash table -+@Return Callback error if any, otherwise PVRSRV_OK -+*/ /**************************************************************************/ -+PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback, void* args); -+ -+#ifdef HASH_TRACE -+/*************************************************************************/ /*! -+@Function HASH_Dump -+@Description Dump out some information about a hash table. -+@Input pHash The hash table. -+*/ /**************************************************************************/ -+void HASH_Dump(HASH_TABLE *pHash); -+#endif -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* HASH_H */ -+ -+/****************************************************************************** -+ End of file (hash.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/htb_debug.c b/drivers/gpu/drm/img-rogue/htb_debug.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/htb_debug.c -@@ -0,0 +1,1189 @@ -+/*************************************************************************/ /*! -+@File htb_debug.c -+@Title Debug Functionality -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Provides kernel side debugFS Functionality. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include "rgxdevice.h" -+#include "htbserver.h" -+#include "htbuffer_types.h" -+#include "tlstream.h" -+#include "tlclient.h" -+#include "pvrsrv_tlcommon.h" -+#include "di_server.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "osfunc.h" -+#include "allocmem.h" -+#include "pvr_notifier.h" -+#include "pvrsrv.h" -+#include "htb_debug.h" -+ -+// Global data handles for buffer manipulation and processing -+ -+typedef struct { -+ IMG_PBYTE pBuf; /* Raw data buffer from TL stream */ -+ IMG_UINT32 uiBufLen; /* Amount of data to process from 'pBuf' */ -+ IMG_UINT32 uiTotal; /* Total bytes processed */ -+ IMG_UINT32 uiMsgLen; /* Length of HTB message to be processed */ -+ IMG_PBYTE pCurr; /* pointer to current message to be decoded */ -+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; /* Output string */ -+} HTB_Sentinel_t; -+ -+typedef struct -+{ -+ DI_ENTRY *psDumpHostDiEntry; /* debug info entry */ -+ HTB_Sentinel_t sSentinel; /* private control structure for HTB DI -+ operations */ -+ IMG_HANDLE hStream; /* stream handle for debugFS use */ -+} HTB_DBG_INFO; -+ -+static HTB_DBG_INFO g_sHTBData; -+ -+// Comment out for extra debug level -+// #define HTB_CHATTY_PRINT(x) PVR_DPF(x) -+#define HTB_CHATTY_PRINT(x) -+ -+typedef void (DI_PRINTF)(const OSDI_IMPL_ENTRY *, const IMG_CHAR *, ...); -+ -+/****************************************************************************** -+ * debugFS display routines -+ *****************************************************************************/ -+static int HTBDumpBuffer(DI_PRINTF, OSDI_IMPL_ENTRY *, void *); -+ -+static int _DebugHBTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ int retVal; -+ -+ PVR_ASSERT(psEntry != NULL); -+ -+ /* psEntry should never be NULL */ -+ if (psEntry == NULL) -+ { -+ return -1; -+ } -+ -+ /* Ensure that we have a valid address to use to dump info from. If NULL we -+ * return a failure code to terminate the DI read call. pvData is either -+ * DI_START_TOKEN (for the initial call) or an HTB buffer address for -+ * subsequent calls [returned from the NEXT function]. */ -+ if (pvData == NULL) -+ { -+ return -1; -+ } -+ -+ retVal = HTBDumpBuffer(DIPrintf, psEntry, pvData); -+ -+ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning %d", __func__, retVal)); -+ -+ return retVal; -+} -+ -+static IMG_UINT32 idToLogIdx(IMG_UINT32); /* Forward declaration */ -+ -+/* -+ * HTB_GetNextMessage -+ * -+ * Get next non-empty message block from the buffer held in pSentinel->pBuf -+ * If we exhaust the data buffer we refill it (after releasing the previous -+ * message(s) [only one non-NULL message, but PAD messages will get released -+ * as we traverse them]. -+ * -+ * Input: -+ * pSentinel references the already acquired data buffer -+ * -+ * Output: -+ * pSentinel -+ * -> uiMsglen updated to the size of the non-NULL message -+ * -+ * Returns: -+ * Address of first non-NULL message in the buffer (if any) -+ * NULL if there is no further data available from the stream and the buffer -+ * contents have been drained. -+ */ -+static IMG_PBYTE HTB_GetNextMessage(HTB_Sentinel_t *pSentinel) -+{ -+ void *pNext, *pLast, *pStart, *pData = NULL; -+ void *pCurrent; /* Current processing point within buffer */ -+ PVRSRVTL_PPACKETHDR ppHdr; /* Current packet header */ -+ IMG_UINT32 uiHdrType; /* Packet header type */ -+ IMG_UINT32 uiMsgSize; /* Message size of current packet (bytes) */ -+ IMG_BOOL bUnrecognizedErrorPrinted = IMG_FALSE; -+ IMG_UINT32 ui32Data; -+ IMG_UINT32 ui32LogIdx; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(pSentinel != NULL); -+ -+ pLast = pSentinel->pBuf + pSentinel->uiBufLen; -+ -+ pStart = pSentinel->pBuf; -+ -+ pNext = pStart; -+ pSentinel->uiMsgLen = 0; // Reset count for this message -+ uiMsgSize = 0; // nothing processed so far -+ ui32LogIdx = HTB_SF_LAST; // Loop terminator condition -+ -+ do -+ { -+ /* -+ * If we've drained the buffer we must RELEASE and ACQUIRE some more. -+ */ -+ if (pNext >= pLast) -+ { -+ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, -+ g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); -+ -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'", __func__, -+ "TLClientAcquireData", PVRSRVGETERRORSTRING(eError))); -+ return NULL; -+ } -+ -+ // Reset our limits - if we've returned an empty buffer we're done. -+ pLast = pSentinel->pBuf + pSentinel->uiBufLen; -+ pStart = pSentinel->pBuf; -+ pNext = pStart; -+ -+ if (pStart == NULL || pLast == NULL) -+ { -+ return NULL; -+ } -+ } -+ -+ /* -+ * We should have a header followed by data block(s) in the stream. -+ */ -+ -+ pCurrent = pNext; -+ ppHdr = GET_PACKET_HDR(pCurrent); -+ -+ if (ppHdr == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unexpected NULL packet in Host Trace buffer", -+ __func__)); -+ pSentinel->uiMsgLen += uiMsgSize; -+ return NULL; // This should never happen -+ } -+ -+ /* -+ * This should *NEVER* fire. If it does it means we have got some -+ * dubious packet header back from the HTB stream. In this case -+ * the sensible thing is to abort processing and return to -+ * the caller -+ */ -+ uiHdrType = GET_PACKET_TYPE(ppHdr); -+ -+ PVR_ASSERT(uiHdrType < PVRSRVTL_PACKETTYPE_LAST && -+ uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF); -+ -+ if (uiHdrType < PVRSRVTL_PACKETTYPE_LAST && -+ uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF) -+ { -+ /* -+ * We have a (potentially) valid data header. We should see if -+ * the associated packet header matches one of our expected -+ * types. -+ */ -+ pNext = GET_NEXT_PACKET_ADDR(ppHdr); -+ -+ PVR_ASSERT(pNext != NULL); -+ -+ uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr); -+ -+ pSentinel->uiMsgLen += uiMsgSize; -+ -+ pData = GET_PACKET_DATA_PTR(ppHdr); -+ -+ /* -+ * Handle non-DATA packet types. These include PAD fields which -+ * may have data associated and other types. We simply discard -+ * these as they have no decodable information within them. -+ */ -+ if (uiHdrType != PVRSRVTL_PACKETTYPE_DATA) -+ { -+ /* -+ * Now release the current non-data packet and proceed to the -+ * next entry (if any). -+ */ -+ eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, -+ g_sHTBData.hStream, uiMsgSize); -+ -+ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Packet Type %x " -+ "Length %u", __func__, uiHdrType, uiMsgSize)); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - '%s' message" -+ " size %u", __func__, "TLClientReleaseDataLess", -+ PVRSRVGETERRORSTRING(eError), uiMsgSize)); -+ } -+ -+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, -+ g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); -+ -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s Giving up", -+ __func__, "TLClientAcquireData", -+ PVRSRVGETERRORSTRING(eError))); -+ -+ return NULL; -+ } -+ pSentinel->uiMsgLen = 0; -+ // Reset our limits - if we've returned an empty buffer we're done. -+ pLast = pSentinel->pBuf + pSentinel->uiBufLen; -+ pStart = pSentinel->pBuf; -+ pNext = pStart; -+ -+ if (pStart == NULL || pLast == NULL) -+ { -+ return NULL; -+ } -+ continue; -+ } -+ if (pData == NULL || pData >= pLast) -+ { -+ continue; -+ } -+ ui32Data = *(IMG_UINT32 *)pData; -+ ui32LogIdx = idToLogIdx(ui32Data); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "Unexpected Header @%p value %x", -+ ppHdr, uiHdrType)); -+ -+ return NULL; -+ } -+ -+ /* -+ * Check if the unrecognized ID is valid and therefore, tracebuf -+ * needs updating. -+ */ -+ if (HTB_SF_LAST == ui32LogIdx && HTB_LOG_VALIDID(ui32Data) -+ && IMG_FALSE == bUnrecognizedErrorPrinted) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'", -+ __func__, ui32Data, HTB_SF_GID(ui32Data), -+ HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData)); -+ bUnrecognizedErrorPrinted = IMG_TRUE; -+ } -+ -+ } while (HTB_SF_LAST == ui32LogIdx); -+ -+ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning data @ %p Log value '%x'", -+ __func__, pCurrent, ui32Data)); -+ -+ return pCurrent; -+} -+ -+/* -+ * HTB_GetFirstMessage -+ * -+ * Called from START to obtain the buffer address of the first message within -+ * pSentinel->pBuf. Will ACQUIRE data if the buffer is empty. -+ * -+ * Input: -+ * pSentinel -+ * pui64Pos Offset within the debugFS file -+ * -+ * Output: -+ * pSentinel->pCurr Set to reference the first valid non-NULL message within -+ * the buffer. If no valid message is found set to NULL. -+ * pSentinel -+ * ->pBuf if unset on entry -+ * ->uiBufLen if pBuf unset on entry -+ * -+ * Side-effects: -+ * HTB TL stream will be updated to bypass any zero-length PAD messages before -+ * the first non-NULL message (if any). -+ */ -+static void HTB_GetFirstMessage(HTB_Sentinel_t *pSentinel, IMG_UINT64 *pui64Pos) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(pui64Pos); -+ -+ if (pSentinel == NULL) -+ return; -+ -+ if (pSentinel->pBuf == NULL) -+ { -+ /* Acquire data */ -+ pSentinel->uiMsgLen = 0; -+ -+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, -+ g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); -+ -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'", -+ __func__, "TLClientAcquireData", PVRSRVGETERRORSTRING(eError))); -+ -+ pSentinel->pBuf = NULL; -+ pSentinel->pCurr = NULL; -+ } -+ else -+ { -+ /* -+ * If there is no data available we set pSentinel->pCurr to NULL -+ * and return. This is expected behaviour if we've drained the -+ * data and nothing else has yet been produced. -+ */ -+ if (pSentinel->uiBufLen == 0 || pSentinel->pBuf == NULL) -+ { -+ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Empty Buffer @ %p", -+ __func__, pSentinel->pBuf)); -+ -+ pSentinel->pCurr = NULL; -+ return; -+ } -+ } -+ } -+ -+ /* Locate next message within buffer. NULL => no more data to process */ -+ pSentinel->pCurr = HTB_GetNextMessage(pSentinel); -+} -+ -+/* -+ * _DebugHBTraceDIStart: -+ * -+ * Returns the address to use for subsequent 'Show', 'Next', 'Stop' file ops. -+ * Return DI_START_TOKEN for the very first call and allocate a sentinel for -+ * use by the 'Show' routine and its helpers. -+ * This is stored in the psEntry's private hook field. -+ * -+ * We obtain access to the TLstream associated with the HTB. If this doesn't -+ * exist (because no pvrdebug capture trace has been set) we simply return with -+ * a NULL value which will stop the DI traversal. -+ */ -+static void *_DebugHBTraceDIStart(OSDI_IMPL_ENTRY *psEntry, -+ IMG_UINT64 *pui64Pos) -+{ -+ HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiTLMode; -+ void *retVal; -+ IMG_HANDLE hStream; -+ -+ /* The sentinel object should have been allocated during the creation -+ * of the DI entry. If it's not there it means that something went -+ * wrong. Return NULL in such case. */ -+ if (pSentinel == NULL) -+ { -+ return NULL; -+ } -+ -+ /* Check to see if the HTB stream has been configured yet. If not, there is -+ * nothing to display so we just return NULL to stop the stream access. -+ */ -+ if (!HTBIsConfigured()) -+ { -+ return NULL; -+ } -+ -+ /* Open the stream in non-blocking mode so that we can determine if there -+ * is no data to consume. Also disable the producer callback (if any) and -+ * the open callback so that we do not generate spurious trace data when -+ * accessing the stream. -+ */ -+ uiTLMode = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING| -+ PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK| -+ PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK; -+ -+ /* If two or more processes try to read from this file at the same time -+ * the TLClientOpenStream() function will handle this by allowing only -+ * one of them to actually open the stream. The other process will get -+ * an error stating that the stream is already open. The open function -+ * is threads safe. */ -+ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, HTB_STREAM_NAME, uiTLMode, -+ &hStream); -+ -+ if (eError == PVRSRV_ERROR_ALREADY_OPEN) -+ { -+ /* Stream allows only one reader so return error if it's already -+ * opened. */ -+ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Stream handle %p already " -+ "exists for %s", __func__, g_sHTBData.hStream, -+ HTB_STREAM_NAME)); -+ return NULL; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ /* -+ * No stream available so nothing to report -+ */ -+ return NULL; -+ } -+ -+ /* There is a window where hStream can be NULL but the stream is already -+ * opened. This shouldn't matter since the TLClientOpenStream() will make -+ * sure that only one stream can be opened and only one process can reach -+ * this place at a time. Also the .stop function will be always called -+ * after this function returns so there should be no risk of stream -+ * not being closed. */ -+ PVR_ASSERT(g_sHTBData.hStream == NULL); -+ g_sHTBData.hStream = hStream; -+ -+ /* We're starting the read operation so ensure we properly zero the -+ * sentinel object. */ -+ memset(pSentinel, 0, sizeof(*pSentinel)); -+ -+ /* -+ * Find the first message location within pSentinel->pBuf -+ * => for DI_START_TOKEN we must issue our first ACQUIRE, also for the -+ * subsequent re-START calls (if any). -+ */ -+ -+ HTB_GetFirstMessage(pSentinel, pui64Pos); -+ -+ retVal = *pui64Pos == 0 ? DI_START_TOKEN : pSentinel->pCurr; -+ -+ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning %p, Stream %s @ %p", -+ __func__, retVal, HTB_STREAM_NAME, g_sHTBData.hStream)); -+ -+ return retVal; -+} -+ -+/* -+ * _DebugTBTraceDIStop: -+ * -+ * Stop processing data collection and release any previously allocated private -+ * data structure if we have exhausted the previously filled data buffers. -+ */ -+static void _DebugHBTraceDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); -+ IMG_UINT32 uiMsgLen; -+ PVRSRV_ERROR eError; -+ -+ if (pSentinel == NULL) -+ { -+ return; -+ } -+ -+ uiMsgLen = pSentinel->uiMsgLen; -+ -+ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: MsgLen = %d", __func__, uiMsgLen)); -+ -+ /* If we get here the handle should never be NULL because -+ * _DebugHBTraceDIStart() shouldn't allow that. */ -+ if (g_sHTBData.hStream == NULL) -+ { -+ return; -+ } -+ -+ if (uiMsgLen != 0) -+ { -+ eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, -+ g_sHTBData.hStream, uiMsgLen); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s, nBytes %u", -+ __func__, "TLClientReleaseDataLess", -+ PVRSRVGETERRORSTRING(eError), uiMsgLen)); -+ } -+ } -+ -+ eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", -+ "TLClientCloseStream", PVRSRVGETERRORSTRING(eError), -+ __func__)); -+ } -+ -+ g_sHTBData.hStream = NULL; -+} -+ -+ -+/* -+ * _DebugHBTraceDINext: -+ * -+ * This is where we release any acquired data which has been processed by the -+ * DIShow routine. If we have encountered a DI entry overflow we stop -+ * processing and return NULL. Otherwise we release the message that we -+ * previously processed and simply update our position pointer to the next -+ * valid HTB message (if any) -+ */ -+static void *_DebugHBTraceDINext(OSDI_IMPL_ENTRY *psEntry, void *pvPriv, -+ IMG_UINT64 *pui64Pos) -+{ -+ HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); -+ IMG_UINT64 ui64CurPos; -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(pvPriv); -+ -+ if (pui64Pos) -+ { -+ ui64CurPos = *pui64Pos; -+ *pui64Pos = ui64CurPos + 1; -+ } -+ -+ /* Determine if we've had an overflow on the previous 'Show' call. If so -+ * we leave the previously acquired data in the queue (by releasing 0 bytes) -+ * and return NULL to end this DI entry iteration. -+ * If we have not overflowed we simply get the next HTB message and use that -+ * for our display purposes. */ -+ -+ if (DIHasOverflowed(psEntry)) -+ { -+ (void) TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream, -+ 0); -+ -+ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: OVERFLOW - returning NULL", -+ __func__)); -+ -+ return NULL; -+ } -+ else -+ { -+ eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, -+ g_sHTBData.hStream, -+ pSentinel->uiMsgLen); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s' @ %p Length %d", -+ __func__, "TLClientReleaseDataLess", -+ PVRSRVGETERRORSTRING(eError), pSentinel->pCurr, -+ pSentinel->uiMsgLen)); -+ PVR_DPF((PVR_DBG_WARNING, "%s: Buffer @ %p..%p", __func__, -+ pSentinel->pBuf, -+ (IMG_PBYTE) (pSentinel->pBuf + pSentinel->uiBufLen))); -+ -+ } -+ -+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, -+ g_sHTBData.hStream, &pSentinel->pBuf, -+ &pSentinel->uiBufLen); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'\nPrev message len %d", -+ __func__, "TLClientAcquireData", -+ PVRSRVGETERRORSTRING(eError), pSentinel->uiMsgLen)); -+ pSentinel->pBuf = NULL; -+ } -+ -+ pSentinel->uiMsgLen = 0; /* We don't (yet) know the message size */ -+ } -+ -+ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning %p Msglen %d", __func__, -+ pSentinel->pBuf, pSentinel->uiMsgLen)); -+ -+ if (pSentinel->pBuf == NULL || pSentinel->uiBufLen == 0) -+ { -+ return NULL; -+ } -+ -+ pSentinel->pCurr = HTB_GetNextMessage(pSentinel); -+ -+ return pSentinel->pCurr; -+} -+ -+/****************************************************************************** -+ * HTB Dumping routines and definitions -+ *****************************************************************************/ -+#define IS_VALID_FMT_STRING(FMT) (strchr(FMT, '%') != NULL) -+#define MAX_STRING_SIZE (128) -+ -+typedef enum -+{ -+ TRACEBUF_ARG_TYPE_INT, -+ TRACEBUF_ARG_TYPE_ERR, -+ TRACEBUF_ARG_TYPE_NONE -+} TRACEBUF_ARG_TYPE; -+ -+/* -+ * Array of all Host Trace log IDs used to convert the tracebuf data -+ */ -+typedef struct _HTB_TRACEBUF_LOG_ { -+ HTB_LOG_SFids eSFId; -+ IMG_CHAR *pszName; -+ IMG_CHAR *pszFmt; -+ IMG_UINT32 ui32ArgNum; -+} HTB_TRACEBUF_LOG; -+ -+static const HTB_TRACEBUF_LOG aLogs[] = { -+#define X(a, b, c, d, e) {HTB_LOG_CREATESFID(a,b,e), #c, d, e}, -+ HTB_LOG_SFIDLIST -+#undef X -+}; -+ -+static const IMG_CHAR *aGroups[] = { -+#define X(A,B) #B, -+ HTB_LOG_SFGROUPLIST -+#undef X -+}; -+static const IMG_UINT32 uiMax_aGroups = ARRAY_SIZE(aGroups) - 1; -+ -+static TRACEBUF_ARG_TYPE ExtractOneArgFmt(IMG_CHAR **, IMG_CHAR *); -+/* -+ * ExtractOneArgFmt -+ * -+ * Scan the input 'printf-like' string *ppszFmt and return the next -+ * value string to be displayed. If there is no '%' format field in the -+ * string we return 'TRACEBUF_ARG_TYPE_NONE' and leave the input string -+ * untouched. -+ * -+ * Input -+ * ppszFmt reference to format string to be decoded -+ * pszOneArgFmt single field format from *ppszFmt -+ * -+ * Returns -+ * TRACEBUF_ARG_TYPE_ERR unrecognised argument -+ * TRACEBUF_ARG_TYPE_INT variable is of numeric type -+ * TRACEBUF_ARG_TYPE_NONE no variable reference in *ppszFmt -+ * -+ * Side-effect -+ * *ppszFmt is updated to reference the next part of the format string -+ * to be scanned -+ */ -+static TRACEBUF_ARG_TYPE ExtractOneArgFmt( -+ IMG_CHAR **ppszFmt, -+ IMG_CHAR *pszOneArgFmt) -+{ -+ IMG_CHAR *pszFmt; -+ IMG_CHAR *psT; -+ IMG_UINT32 ui32Count = MAX_STRING_SIZE; -+ IMG_UINT32 ui32OneArgSize; -+ TRACEBUF_ARG_TYPE eRet = TRACEBUF_ARG_TYPE_ERR; -+ -+ if (NULL == ppszFmt) -+ return TRACEBUF_ARG_TYPE_ERR; -+ -+ pszFmt = *ppszFmt; -+ if (NULL == pszFmt) -+ return TRACEBUF_ARG_TYPE_ERR; -+ -+ /* -+ * Find the first '%' -+ * NOTE: we can be passed a simple string to display which will have no -+ * parameters embedded within it. In this case we simply return -+ * TRACEBUF_ARG_TYPE_NONE and the string contents will be the full pszFmt -+ */ -+ psT = strchr(pszFmt, '%'); -+ if (psT == NULL) -+ { -+ return TRACEBUF_ARG_TYPE_NONE; -+ } -+ -+ /* Find next conversion identifier after the initial '%' */ -+ while ((*psT++) && (ui32Count-- > 0)) -+ { -+ switch (*psT) -+ { -+ case 'd': -+ case 'i': -+ case 'o': -+ case 'u': -+ case 'x': -+ case 'X': -+ { -+ eRet = TRACEBUF_ARG_TYPE_INT; -+ goto _found_arg; -+ } -+ case 's': -+ { -+ eRet = TRACEBUF_ARG_TYPE_ERR; -+ goto _found_arg; -+ } -+ } -+ } -+ -+ if ((psT == NULL) || (ui32Count == 0)) return TRACEBUF_ARG_TYPE_ERR; -+ -+_found_arg: -+ ui32OneArgSize = psT - pszFmt + 1; -+ OSCachedMemCopy(pszOneArgFmt, pszFmt, ui32OneArgSize); -+ pszOneArgFmt[ui32OneArgSize] = '\0'; -+ -+ *ppszFmt = psT + 1; -+ -+ return eRet; -+} -+ -+static IMG_UINT32 idToLogIdx(IMG_UINT32 ui32CheckData) -+{ -+ IMG_UINT32 i = 0; -+ for (i = 0; aLogs[i].eSFId != HTB_SF_LAST; i++) -+ { -+ if ( ui32CheckData == aLogs[i].eSFId ) -+ return i; -+ } -+ /* Nothing found, return max value */ -+ return HTB_SF_LAST; -+} -+ -+/* -+ * DecodeHTB -+ * -+ * Decode the data buffer message located at pBuf. This should be a valid -+ * HTB message as we are provided with the start of the buffer. If empty there -+ * is no message to process. We update the uiMsgLen field with the size of the -+ * HTB message that we have processed so that it can be returned to the system -+ * on successful logging of the message to the output file. -+ * -+ * Input -+ * pSentinel reference to newly read data and pending completion data -+ * from a previous invocation [handle DI entry buffer overflow] -+ * -> pBuf reference to raw data that we are to parse -+ * -> uiBufLen total number of bytes of data available -+ * -> pCurr start of message to decode -+ * -+ * pvDumpDebugFile output file -+ * pfnDumpDebugPrintf output generating routine -+ * -+ * Output -+ * pSentinel -+ * -> uiMsgLen length of the decoded message which will be freed to -+ * the system on successful completion of the DI entry -+ * update via _DebugHBTraceDINext(), -+ * Return Value -+ * 0 successful decode -+ * -1 unsuccessful decode -+ */ -+static int -+DecodeHTB(HTB_Sentinel_t *pSentinel, OSDI_IMPL_ENTRY *pvDumpDebugFile, -+ DI_PRINTF pfnDumpDebugPrintf) -+{ -+ IMG_UINT32 ui32Data, ui32LogIdx, ui32ArgsCur; -+ IMG_CHAR *pszFmt = NULL; -+ IMG_CHAR aszOneArgFmt[MAX_STRING_SIZE]; -+ IMG_BOOL bUnrecognizedErrorPrinted = IMG_FALSE; -+ -+ size_t nPrinted; -+ -+ void *pNext, *pLast, *pStart, *pData = NULL; -+ PVRSRVTL_PPACKETHDR ppHdr; /* Current packet header */ -+ IMG_UINT32 uiHdrType; /* Packet header type */ -+ IMG_UINT32 uiMsgSize; /* Message size of current packet (bytes) */ -+ IMG_BOOL bPacketsDropped; -+ -+ pLast = pSentinel->pBuf + pSentinel->uiBufLen; -+ pStart = pSentinel->pCurr; -+ -+ pSentinel->uiMsgLen = 0; // Reset count for this message -+ -+ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Buf @ %p..%p, Length = %d", -+ __func__, pStart, pLast, pSentinel->uiBufLen)); -+ -+ /* -+ * We should have a DATA header with the necessary information following -+ */ -+ ppHdr = GET_PACKET_HDR(pStart); -+ -+ if (ppHdr == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unexpected NULL packet in Host Trace buffer", __func__)); -+ return -1; -+ } -+ -+ uiHdrType = GET_PACKET_TYPE(ppHdr); -+ PVR_ASSERT(uiHdrType == PVRSRVTL_PACKETTYPE_DATA); -+ -+ pNext = GET_NEXT_PACKET_ADDR(ppHdr); -+ -+ PVR_ASSERT(pNext != NULL); -+ -+ uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr); -+ -+ pSentinel->uiMsgLen += uiMsgSize; -+ -+ pData = GET_PACKET_DATA_PTR(ppHdr); -+ -+ if (pData == NULL || pData >= pLast) -+ { -+ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: pData = %p, pLast = %p " -+ "Returning 0", __func__, pData, pLast)); -+ return 0; -+ } -+ -+ ui32Data = *(IMG_UINT32 *)pData; -+ ui32LogIdx = idToLogIdx(ui32Data); -+ -+ /* -+ * Check if the unrecognised ID is valid and therefore, tracebuf -+ * needs updating. -+ */ -+ if (ui32LogIdx == HTB_SF_LAST) -+ { -+ if (HTB_LOG_VALIDID(ui32Data)) -+ { -+ if (!bUnrecognizedErrorPrinted) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'", -+ __func__, ui32Data, HTB_SF_GID(ui32Data), -+ HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData)); -+ bUnrecognizedErrorPrinted = IMG_TRUE; -+ } -+ -+ return 0; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unrecognised and invalid LOG value detected '%x'", -+ __func__, ui32Data)); -+ -+ return -1; -+ } -+ -+ /* The string format we are going to display */ -+ /* -+ * The display will show the header (log-ID, group-ID, number of params) -+ * The maximum parameter list length = 15 (only 4bits used to encode) -+ * so we need HEADER + 15 * sizeof(UINT32) and the displayed string -+ * describing the event. We use a buffer in the per-process pSentinel -+ * structure to hold the data. -+ */ -+ pszFmt = aLogs[ui32LogIdx].pszFmt; -+ -+ /* add the message payload size to the running count */ -+ ui32ArgsCur = HTB_SF_PARAMNUM(ui32Data); -+ -+ /* Determine if we've over-filled the buffer and had to drop packets */ -+ bPacketsDropped = CHECK_PACKETS_DROPPED(ppHdr); -+ if (bPacketsDropped || -+ (uiHdrType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)) -+ { -+ /* Flag this as it is useful to know ... */ -+ -+ PVR_DUMPDEBUG_LOG("\n<========================== *** PACKETS DROPPED *** ======================>\n"); -+ } -+ -+ { -+ IMG_UINT32 ui32Timestampns, ui32PID, ui32TID; -+ IMG_UINT64 ui64Timestamp, ui64TimestampSec; -+ IMG_CHAR *szBuffer = pSentinel->szBuffer; // Buffer start -+ IMG_CHAR *pszBuffer = pSentinel->szBuffer; // Current place in buf -+ size_t uBufBytesAvailable = sizeof(pSentinel->szBuffer); -+ IMG_UINT32 *pui32Data = (IMG_UINT32 *)pData; -+ IMG_UINT32 ui_aGroupIdx; -+ -+ // Get PID field from data stream -+ pui32Data++; -+ ui32PID = *pui32Data; -+ // Get TID field from data stream -+ pui32Data++; -+ ui32TID = *pui32Data; -+ // Get Timestamp part 1 from data stream -+ pui32Data++; -+ ui64Timestamp = (IMG_UINT64) *pui32Data << 32; -+ // Get Timestamp part 2 from data stream -+ pui32Data++; -+ ui64Timestamp |= (IMG_UINT64) *pui32Data; -+ // Move to start of message contents data -+ pui32Data++; -+ -+ /* -+ * We need to snprintf the data to a local in-kernel buffer -+ * and then PVR_DUMPDEBUG_LOG() that in one shot -+ */ -+ ui_aGroupIdx = MIN(HTB_SF_GID(ui32Data), uiMax_aGroups); -+ -+ /* Divide by 1B to get seconds & mod using output var (nanosecond resolution)*/ -+ ui64TimestampSec = OSDivide64r64(ui64Timestamp, 1000000000, &ui32Timestampns); -+ -+ nPrinted = OSSNPrintf(szBuffer, uBufBytesAvailable, "%010"IMG_UINT64_FMTSPEC".%09u:%-5u-%-5u-%s> ", -+ ui64TimestampSec, ui32Timestampns, ui32PID, ui32TID, aGroups[ui_aGroupIdx]); -+ if (nPrinted >= uBufBytesAvailable) -+ { -+ PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," -+ " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, -+ uBufBytesAvailable); -+ -+ nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ -+ } -+ -+ PVR_DUMPDEBUG_LOG("%s", pszBuffer); -+ /* Update where our next 'output' point in the buffer is */ -+ pszBuffer += nPrinted; -+ uBufBytesAvailable -= nPrinted; -+ -+ /* -+ * Print one argument at a time as this simplifies handling variable -+ * number of arguments. Special case handling for no arguments. -+ * This is the case for simple format strings such as -+ * HTB_SF_MAIN_KICK_UNCOUNTED. -+ */ -+ if (ui32ArgsCur == 0) -+ { -+ if (pszFmt) -+ { -+ nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable); -+ if (nPrinted >= uBufBytesAvailable) -+ { -+ PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," -+ " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, -+ uBufBytesAvailable); -+ nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ -+ } -+ PVR_DUMPDEBUG_LOG("%s", pszBuffer); -+ pszBuffer += nPrinted; -+ /* Don't update the uBufBytesAvailable as we have finished this -+ * message decode. pszBuffer - szBuffer is the total amount of -+ * data we have decoded. -+ */ -+ } -+ } -+ else -+ { -+ if (HTB_SF_GID(ui32Data) == HTB_GID_CTRL && HTB_SF_ID(ui32Data) == HTB_ID_MARK_SCALE) -+ { -+ IMG_UINT32 i; -+ IMG_UINT32 ui32ArgArray[HTB_MARK_SCALE_ARG_ARRAY_SIZE]; -+ IMG_UINT64 ui64OSTS = 0; -+ IMG_UINT32 ui32OSTSRem = 0; -+ IMG_UINT64 ui64CRTS = 0; -+ -+ /* Retrieve 6 args to an array */ -+ for (i = 0; i < ARRAY_SIZE(ui32ArgArray); i++) -+ { -+ ui32ArgArray[i] = *pui32Data; -+ pui32Data++; -+ --ui32ArgsCur; -+ } -+ -+ ui64OSTS = (IMG_UINT64) ui32ArgArray[HTB_ARG_OSTS_PT1] << 32 | ui32ArgArray[HTB_ARG_OSTS_PT2]; -+ ui64CRTS = (IMG_UINT64) ui32ArgArray[HTB_ARG_CRTS_PT1] << 32 | ui32ArgArray[HTB_ARG_CRTS_PT2]; -+ -+ /* Divide by 1B to get seconds, remainder in nano seconds*/ -+ ui64OSTS = OSDivide64r64(ui64OSTS, 1000000000, &ui32OSTSRem); -+ -+ nPrinted = OSSNPrintf(pszBuffer, -+ uBufBytesAvailable, -+ "HTBFWMkSync Mark=%u OSTS=%010" IMG_UINT64_FMTSPEC ".%09u CRTS=%" IMG_UINT64_FMTSPEC " CalcClkSpd=%u\n", -+ ui32ArgArray[HTB_ARG_SYNCMARK], -+ ui64OSTS, -+ ui32OSTSRem, -+ ui64CRTS, -+ ui32ArgArray[HTB_ARG_CLKSPD]); -+ -+ if (nPrinted >= uBufBytesAvailable) -+ { -+ PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," -+ " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, -+ uBufBytesAvailable); -+ nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ -+ } -+ -+ PVR_DUMPDEBUG_LOG("%s", pszBuffer); -+ pszBuffer += nPrinted; -+ uBufBytesAvailable -= nPrinted; -+ } -+ else -+ { -+ while (IS_VALID_FMT_STRING(pszFmt) && (uBufBytesAvailable > 0)) -+ { -+ IMG_UINT32 ui32TmpArg = *pui32Data; -+ TRACEBUF_ARG_TYPE eArgType; -+ -+ eArgType = ExtractOneArgFmt(&pszFmt, aszOneArgFmt); -+ -+ pui32Data++; -+ ui32ArgsCur--; -+ -+ switch (eArgType) -+ { -+ case TRACEBUF_ARG_TYPE_INT: -+ nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable, -+ aszOneArgFmt, ui32TmpArg); -+ break; -+ -+ case TRACEBUF_ARG_TYPE_NONE: -+ nPrinted = OSStringLCopy(pszBuffer, pszFmt, -+ uBufBytesAvailable); -+ break; -+ -+ default: -+ nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable, -+ "Error processing arguments, type not " -+ "recognized (fmt: %s)", aszOneArgFmt); -+ break; -+ } -+ if (nPrinted >= uBufBytesAvailable) -+ { -+ PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," -+ " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, -+ uBufBytesAvailable); -+ nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ -+ } -+ PVR_DUMPDEBUG_LOG("%s", pszBuffer); -+ pszBuffer += nPrinted; -+ uBufBytesAvailable -= nPrinted; -+ } -+ /* Display any remaining text in pszFmt string */ -+ if (pszFmt) -+ { -+ nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable); -+ if (nPrinted >= uBufBytesAvailable) -+ { -+ PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," -+ " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, -+ uBufBytesAvailable); -+ nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ -+ } -+ PVR_DUMPDEBUG_LOG("%s", pszBuffer); -+ pszBuffer += nPrinted; -+ /* Don't update the uBufBytesAvailable as we have finished this -+ * message decode. pszBuffer - szBuffer is the total amount of -+ * data we have decoded. -+ */ -+ } -+ } -+ } -+ -+ /* Update total bytes processed */ -+ pSentinel->uiTotal += (pszBuffer - szBuffer); -+ } -+ return 0; -+} -+ -+/* -+ * HTBDumpBuffer: Dump the Host Trace Buffer using the TLClient API -+ * -+ * This routine just parses *one* message from the buffer. -+ * The stream will be opened by the Start() routine, closed by the Stop() and -+ * updated for data consumed by this routine once we have DebugPrintf'd it. -+ * We use the new TLReleaseDataLess() routine which enables us to update the -+ * HTB contents with just the amount of data we have successfully processed. -+ * If we need to leave the data available we can call this with a 0 count. -+ * This will happen in the case of a buffer overflow so that we can reprocess -+ * any data which wasn't handled before. -+ * -+ * In case of overflow or an error we return -1 otherwise 0 -+ * -+ * Input: -+ * pfnPrintf output routine to display data -+ * psEntry handle to debug frontend -+ * pvData data address to start dumping from -+ * (set by Start() / Next()) -+ */ -+static int HTBDumpBuffer(DI_PRINTF pfnPrintf, OSDI_IMPL_ENTRY *psEntry, -+ void *pvData) -+{ -+ HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); -+ -+ PVR_ASSERT(pvData != NULL); -+ -+ if (pvData == DI_START_TOKEN) -+ { -+ if (pSentinel->pCurr == NULL) -+ { -+ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: DI_START_TOKEN, " -+ "Empty buffer", __func__)); -+ return 0; -+ } -+ PVR_ASSERT(pSentinel->pCurr != NULL); -+ -+ /* Display a Header as we have data to process */ -+ pfnPrintf(psEntry, "%-20s:%-5s-%-5s-%s %s\n", "Timestamp", "PID", "TID", "Group>", -+ "Log Entry"); -+ } -+ else -+ { -+ if (pvData != NULL) -+ { -+ PVR_ASSERT(pSentinel->pCurr == pvData); -+ } -+ } -+ -+ return DecodeHTB(pSentinel, psEntry, pfnPrintf); -+} -+ -+ -+/****************************************************************************** -+ * External Entry Point routines ... -+ *****************************************************************************/ -+/*************************************************************************/ /*! -+ @Function HTB_CreateDIEntry -+ -+ @Description Create the debugFS entry-point for the host-trace-buffer -+ -+ @Returns eError internal error code, PVRSRV_OK on success -+ -+ */ /*************************************************************************/ -+PVRSRV_ERROR HTB_CreateDIEntry_Impl(void) -+{ -+ PVRSRV_ERROR eError; -+ -+ DI_ITERATOR_CB sIterator = { -+ .pfnStart = _DebugHBTraceDIStart, -+ .pfnStop = _DebugHBTraceDIStop, -+ .pfnNext = _DebugHBTraceDINext, -+ .pfnShow = _DebugHBTraceDIShow, -+ }; -+ -+ eError = DICreateEntry("host_trace", NULL, &sIterator, -+ &g_sHTBData.sSentinel, -+ DI_ENTRY_TYPE_GENERIC, -+ &g_sHTBData.psDumpHostDiEntry); -+ PVR_LOG_RETURN_IF_ERROR(eError, "DICreateEntry"); -+ -+ return PVRSRV_OK; -+} -+ -+ -+/*************************************************************************/ /*! -+ @Function HTB_DestroyDIEntry -+ -+ @Description Destroy the debugFS entry-point created by earlier -+ HTB_CreateDIEntry() call. -+*/ /**************************************************************************/ -+void HTB_DestroyDIEntry_Impl(void) -+{ -+ if (g_sHTBData.psDumpHostDiEntry != NULL) -+ { -+ DIDestroyEntry(g_sHTBData.psDumpHostDiEntry); -+ g_sHTBData.psDumpHostDiEntry = NULL; -+ } -+} -+ -+/* EOF */ -diff --git a/drivers/gpu/drm/img-rogue/htb_debug.h b/drivers/gpu/drm/img-rogue/htb_debug.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/htb_debug.h -@@ -0,0 +1,72 @@ -+/*************************************************************************/ /*! -+@File htb_debug.h -+@Title Linux debugFS routine setup header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef HTB_DEBUG_H -+#define HTB_DEBUG_H -+ -+/**************************************************************************/ /*! -+ @Function HTB_CreateDIEntry -+ -+ @Description Create the debugFS entry-point for the host-trace-buffer -+ -+ @Returns eError internal error code, PVRSRV_OK on success -+ -+ */ /**************************************************************************/ -+PVRSRV_ERROR HTB_CreateDIEntry_Impl(void); -+ -+/**************************************************************************/ /*! -+ @Function HTB_DestroyFSEntry -+ -+ @Description Destroy the debugFS entry-point created by earlier -+ HTB_CreateDIEntry() call. -+*/ /**************************************************************************/ -+void HTB_DestroyDIEntry_Impl(void); -+ -+#if defined(PVRSRV_ENABLE_HTB) -+#define HTB_CreateDIEntry() HTB_CreateDIEntry_Impl() -+#define HTB_DestroyDIEntry() HTB_DestroyDIEntry_Impl() -+#else /* !PVRSRV_ENABLE_HTB */ -+#define HTB_CreateDIEntry() PVRSRV_OK -+#define HTB_DestroyDIEntry() -+#endif /* PVRSRV_ENABLE_HTB */ -+ -+#endif /* HTB_DEBUG_H */ -diff --git a/drivers/gpu/drm/img-rogue/htbserver.c b/drivers/gpu/drm/img-rogue/htbserver.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/htbserver.c -@@ -0,0 +1,936 @@ -+/*************************************************************************/ /*! -+@File htbserver.c -+@Title Host Trace Buffer server implementation. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Host Trace Buffer provides a mechanism to log Host events to a -+ buffer in a similar way to the Firmware Trace mechanism. -+ Host Trace Buffer logs data using a Transport Layer buffer. -+ The Transport Layer and pvrtld tool provides the mechanism to -+ retrieve the trace data. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "htbserver.h" -+#include "htbuffer_types.h" -+#include "tlstream.h" -+#include "pvrsrv_tlcommon.h" -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "osfunc.h" -+#include "allocmem.h" -+#include "pvr_notifier.h" -+#include "pvrsrv.h" -+#include "pvrsrv_apphint.h" -+#include "os_apphint.h" -+ -+/* size of circular buffer controlling the maximum number of concurrent PIDs logged */ -+#define HTB_MAX_NUM_PID 8 -+ -+/* number of times to try rewriting a log entry */ -+#define HTB_LOG_RETRY_COUNT 5 -+ -+#if defined(__linux__) -+ #include -+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) -+ #include -+ #else -+ #include -+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ -+#else -+ #include -+#endif /* __linux__ */ -+ -+/*************************************************************************/ /*! -+ Host Trace Buffer control information structure -+*/ /**************************************************************************/ -+typedef struct -+{ -+ IMG_UINT32 ui32BufferSize; /*!< Requested buffer size in bytes -+ Once set this may not be changed */ -+ -+ HTB_OPMODE_CTRL eOpMode; /*!< Control what trace data is dropped if -+ the buffer is full. -+ Once set this may not be changed */ -+ -+/* IMG_UINT32 ui32GroupEnable; */ /*!< Flags word controlling groups to be -+ logged */ -+ -+ IMG_UINT32 ui32LogLevel; /*!< Log level to control messages logged */ -+ -+ IMG_UINT32 aui32EnablePID[HTB_MAX_NUM_PID]; /*!< PIDs to enable logging for -+ a specific set of processes */ -+ -+ IMG_UINT32 ui32PIDCount; /*!< Current number of PIDs being logged */ -+ -+ IMG_UINT32 ui32PIDHead; /*!< Head of the PID circular buffer */ -+ -+ HTB_LOGMODE_CTRL eLogMode; /*!< Logging mode control */ -+ -+ IMG_BOOL bLogDropSignalled; /*!< Flag indicating if a log message has -+ been signalled as dropped */ -+ -+ /* synchronisation parameters */ -+ IMG_UINT64 ui64SyncOSTS; -+ IMG_UINT64 ui64SyncCRTS; -+ IMG_UINT32 ui32SyncCalcClkSpd; -+ IMG_UINT32 ui32SyncMarker; -+ -+ IMG_BOOL bInitDone; /* Set by HTBInit, reset by HTBDeInit */ -+ -+ POS_SPINLOCK hRepeatMarkerLock; /*!< Spinlock used in HTBLogKM to protect global variables -+ (ByteCount, OSTS, CRTS ClkSpeed) -+ from becoming inconsistent due to calls from -+ both KM and UM */ -+ -+ IMG_UINT32 ui32ByteCount; /* Byte count used for triggering repeat sync point */ -+ /* static variables containing details of previous sync point */ -+ IMG_UINT64 ui64OSTS; -+ IMG_UINT64 ui64CRTS; -+ IMG_UINT32 ui32ClkSpeed; -+ -+} HTB_CTRL_INFO; -+ -+ -+/*************************************************************************/ /*! -+*/ /**************************************************************************/ -+static const IMG_UINT32 MapFlags[] = -+{ -+ 0, /* HTB_OPMODE_UNDEF = 0 */ -+ TL_OPMODE_DROP_NEWER, /* HTB_OPMODE_DROPLATEST */ -+ TL_OPMODE_DROP_OLDEST,/* HTB_OPMODE_DROPOLDEST */ -+ TL_OPMODE_BLOCK /* HTB_OPMODE_BLOCK */ -+}; -+ -+static_assert(0 == HTB_OPMODE_UNDEF, "Unexpected value for HTB_OPMODE_UNDEF"); -+static_assert(1 == HTB_OPMODE_DROPLATEST, "Unexpected value for HTB_OPMODE_DROPLATEST"); -+static_assert(2 == HTB_OPMODE_DROPOLDEST, "Unexpected value for HTB_OPMODE_DROPOLDEST"); -+static_assert(3 == HTB_OPMODE_BLOCK, "Unexpected value for HTB_OPMODE_BLOCK"); -+ -+static_assert(1 == TL_OPMODE_DROP_NEWER, "Unexpected value for TL_OPMODE_DROP_NEWER"); -+static_assert(2 == TL_OPMODE_DROP_OLDEST, "Unexpected value for TL_OPMODE_DROP_OLDEST"); -+static_assert(3 == TL_OPMODE_BLOCK, "Unexpected value for TL_OPMODE_BLOCK"); -+ -+static const IMG_UINT32 g_ui32TLBaseFlags; //TL_FLAG_NO_SIGNAL_ON_COMMIT -+ -+/* Minimum TL buffer size. -+ * Large enough for around 60 worst case messages or 200 average messages -+ */ -+#define HTB_TL_BUFFER_SIZE_MIN (0x10000) -+ -+/* Minimum concentration of HTB packets in a TL Stream is 60% -+ * If we just put the HTB header in the TL stream (12 bytes), the TL overhead -+ * is 8 bytes for its own header, so for the smallest possible (and most -+ * inefficient) packet we have 3/5 of the buffer used for actual HTB data. -+ * This shift is used as a guaranteed estimation on when to produce a repeat -+ * packet. By shifting the size of the buffer by 1 we effectively /2 this -+ * under the 60% boundary chance we may have overwritten the marker and thus -+ * guaranteed to always have a marker in the stream */ -+#define HTB_MARKER_PREDICTION_THRESHOLD(val) (val >> 1) -+ -+static HTB_CTRL_INFO g_sCtrl; -+static IMG_BOOL g_bConfigured = IMG_FALSE; -+static IMG_HANDLE g_hTLStream; -+ -+static IMG_HANDLE hHtbDbgReqNotify; -+ -+/************************************************************************/ /*! -+ @Function _LookupFlags -+ @Description Convert HTBuffer Operation mode to TLStream flags -+ -+ @Input eModeHTBuffer Operation Mode -+ -+ @Return IMG_UINT32 TLStream FLags -+*/ /**************************************************************************/ -+static IMG_UINT32 -+_LookupFlags( HTB_OPMODE_CTRL eMode ) -+{ -+ return (eMode < ARRAY_SIZE(MapFlags)) ? MapFlags[eMode] : 0; -+} -+ -+ -+/************************************************************************/ /*! -+ @Function _HTBLogDebugInfo -+ @Description Debug dump handler used to dump the state of the HTB module. -+ Called for each verbosity level during a debug dump. Function -+ only prints state when called for High verbosity. -+ -+ @Input hDebugRequestHandle See PFN_DBGREQ_NOTIFY -+ -+ @Input ui32VerbLevel See PFN_DBGREQ_NOTIFY -+ -+ @Input pfnDumpDebugPrintf See PFN_DBGREQ_NOTIFY -+ -+ @Input pvDumpDebugFile See PFN_DBGREQ_NOTIFY -+ -+*/ /**************************************************************************/ -+static void _HTBLogDebugInfo( -+ PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile -+) -+{ -+ PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle); -+ -+ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) -+ { -+ -+ if (g_bConfigured) -+ { -+ IMG_INT i; -+ -+ PVR_DUMPDEBUG_LOG("------[ HTB Log state: On ]------"); -+ -+ PVR_DUMPDEBUG_LOG("HTB Log mode: %d", g_sCtrl.eLogMode); -+ PVR_DUMPDEBUG_LOG("HTB Log level: %d", g_sCtrl.ui32LogLevel); -+ PVR_DUMPDEBUG_LOG("HTB Buffer Opmode: %d", g_sCtrl.eOpMode); -+ -+ for (i=0; i < HTB_FLAG_NUM_EL; i++) -+ { -+ PVR_DUMPDEBUG_LOG("HTB Log group %d: %x", i, g_auiHTBGroupEnable[i]); -+ } -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("------[ HTB Log state: Off ]------"); -+ } -+ } -+} -+ -+static IMG_UINT32 g_ui32HTBufferSize = HTB_TL_BUFFER_SIZE_MIN; -+ -+/* -+ * AppHint access routine forward definitions -+ */ -+static PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *, const void *, -+ IMG_UINT32); -+static PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *, const void *, -+ IMG_UINT32 *); -+ -+static PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *, const void *, -+ IMG_UINT32); -+static PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *, const void *, -+ IMG_UINT32 *); -+ -+static void _OnTLReaderOpenCallback(void *); -+ -+/************************************************************************/ /*! -+ @Function HTBInit -+ @Description Allocate and initialise the Host Trace Buffer -+ The buffer size may be changed by specifying -+ HTBufferSizeInKB=xxxx -+ -+ @Return eError Internal services call returned eError error -+ number -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+HTBInit_Impl(void) -+{ -+ void *pvAppHintState = NULL; -+ IMG_UINT32 ui32AppHintDefault; -+ IMG_UINT32 ui32BufBytes; -+ PVRSRV_ERROR eError; -+ -+ if (g_sCtrl.bInitDone) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "HTBInit: Driver already initialised")); -+ return PVRSRV_ERROR_ALREADY_EXISTS; -+ } -+ -+ /* -+ * Buffer Size can be configured by specifying a value in the AppHint -+ * This will only take effect at module load time so there is no query -+ * or setting mechanism available. -+ */ -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBufferSizeInKB, -+ NULL, -+ NULL, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ NULL); -+ -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableHTBLogGroup, -+ _HTBReadLogGroup, -+ _HTBSetLogGroup, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ NULL); -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBOperationMode, -+ _HTBReadOpMode, -+ _HTBSetOpMode, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ NULL); -+ -+ /* -+ * Now get whatever values have been configured for our AppHints -+ */ -+ OSCreateAppHintState(&pvAppHintState); -+ ui32AppHintDefault = HTB_TL_BUFFER_SIZE_MIN / 1024; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HTBufferSizeInKB, -+ &ui32AppHintDefault, &g_ui32HTBufferSize); -+ OSFreeAppHintState(pvAppHintState); -+ -+ ui32BufBytes = g_ui32HTBufferSize * 1024; -+ -+ /* initialise rest of state */ -+ g_sCtrl.ui32BufferSize = -+ (ui32BufBytes < HTB_TL_BUFFER_SIZE_MIN) -+ ? HTB_TL_BUFFER_SIZE_MIN -+ : ui32BufBytes; -+ g_sCtrl.eOpMode = HTB_OPMODE_DROPOLDEST; -+ g_sCtrl.ui32LogLevel = 0; -+ g_sCtrl.ui32PIDCount = 0; -+ g_sCtrl.ui32PIDHead = 0; -+ g_sCtrl.eLogMode = HTB_LOGMODE_ALLPID; -+ g_sCtrl.bLogDropSignalled = IMG_FALSE; -+ -+ eError = OSSpinLockCreate(&g_sCtrl.hRepeatMarkerLock); -+ PVR_LOG_RETURN_IF_ERROR(eError, "OSSpinLockCreate"); -+ -+ eError = PVRSRVRegisterDriverDbgRequestNotify(&hHtbDbgReqNotify, -+ _HTBLogDebugInfo, DEBUG_REQUEST_HTB, NULL); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDeviceDbgRequestNotify"); -+ -+ g_sCtrl.bInitDone = IMG_TRUE; -+ -+ /* Log the current driver parameter setting for the HTBufferSizeInKB. -+ * We do this here as there is no other infrastructure for obtaining -+ * the value. -+ */ -+ if (g_ui32HTBufferSize != ui32AppHintDefault) -+ { -+ PVR_LOG(("Increasing HTBufferSize to %uKB", g_ui32HTBufferSize)); -+ } -+ -+ -+ return PVRSRV_OK; -+} -+ -+/************************************************************************/ /*! -+ @Function HTBDeInit -+ @Description Close the Host Trace Buffer and free all resources. Must -+ perform a no-op if already de-initialised. -+ -+ @Return eError Internal services call returned eError error -+ number -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+HTBDeInit_Impl( void ) -+{ -+ if (!g_sCtrl.bInitDone) -+ return PVRSRV_OK; -+ -+ if (hHtbDbgReqNotify) -+ { -+ /* Not much we can do if it fails, driver unloading */ -+ (void)PVRSRVUnregisterDriverDbgRequestNotify(hHtbDbgReqNotify); -+ hHtbDbgReqNotify = NULL; -+ } -+ -+ if (g_hTLStream) -+ { -+ TLStreamClose( g_hTLStream ); -+ g_hTLStream = NULL; -+ } -+ -+ if (g_sCtrl.hRepeatMarkerLock != NULL) -+ { -+ OSSpinLockDestroy(g_sCtrl.hRepeatMarkerLock); -+ g_sCtrl.hRepeatMarkerLock = NULL; -+ } -+ -+ g_sCtrl.bInitDone = IMG_FALSE; -+ return PVRSRV_OK; -+} -+ -+ -+/*************************************************************************/ /*! -+ AppHint interface functions -+*/ /**************************************************************************/ -+static -+PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT32 ui32Value) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psPrivate); -+ -+ return HTBControlKM(1, &ui32Value, 0, 0, -+ HTB_LOGMODE_UNDEF, HTB_OPMODE_UNDEF); -+} -+ -+static -+PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT32 *pui32Value) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psPrivate); -+ -+ *pui32Value = g_auiHTBGroupEnable[0]; -+ return PVRSRV_OK; -+} -+ -+static -+PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT32 ui32Value) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psPrivate); -+ -+ return HTBControlKM(0, NULL, 0, 0, HTB_LOGMODE_UNDEF, ui32Value); -+} -+ -+static -+PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT32 *pui32Value) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psPrivate); -+ -+ *pui32Value = (IMG_UINT32)g_sCtrl.eOpMode; -+ return PVRSRV_OK; -+} -+ -+#if defined(PVRSRV_ENABLE_HTB) -+static IMG_BOOL -+_ValidPID( IMG_UINT32 PID ) -+{ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < g_sCtrl.ui32PIDCount; i++) -+ { -+ if ( g_sCtrl.aui32EnablePID[i] == PID ) -+ { -+ return IMG_TRUE; -+ } -+ } -+ return IMG_FALSE; -+} -+#endif /* PVRSRV_ENABLE_HTB */ -+ -+/*************************************************************************/ /*! -+ @Function HTBLogKM -+ @Description Record a Host Trace Buffer log event -+ -+ @Input PID The PID of the process the event is associated -+ with. This is provided as an argument rather -+ than querying internally so that events associated -+ with a particular process, but performed by -+ another can be logged correctly. -+ -+ @Input ui64TimeStamp The timestamp to be associated with this log event -+ -+ @Input SF The log event ID -+ -+ @Input ... Log parameters -+ -+ @Return PVRSRV_OK Success. -+ -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+HTBLogKM(IMG_UINT32 PID, -+ IMG_UINT32 TID, -+ IMG_UINT64 ui64TimeStamp, -+ HTB_LOG_SFids SF, -+ va_list args -+) -+{ -+#if defined(PVRSRV_ENABLE_HTB) -+ OS_SPINLOCK_FLAGS uiSpinLockFlags; -+ IMG_UINT32 ui32ReturnFlags = 0; -+ IMG_UINT32 i = 0; -+ -+ /* Local snapshot variables of global counters */ -+ IMG_UINT64 ui64OSTSSnap; -+ IMG_UINT64 ui64CRTSSnap; -+ IMG_UINT32 ui32ClkSpeedSnap; -+ -+ /* format of messages is: SF:PID:TID:TIMEPT1:TIMEPT2:[PARn]* -+ * Buffer is on the stack so we don't need a semaphore to guard it -+ */ -+ IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS]; -+ IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS + 1] = {0}; -+ -+ /* Min HTB size is HTB_TL_BUFFER_SIZE_MIN : 10000 bytes and Max message/ -+ * packet size is 4*(HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS) = 80 bytes, -+ * hence with these constraints this design is unlikely to get -+ * PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED error -+ */ -+ PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_ENABLED; -+ IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT; -+ IMG_UINT32 * pui32Message = aui32MessageBuffer; -+ IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF); -+ -+ IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs); -+ -+ PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS); -+ ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS) ? -+ HTB_LOG_MAX_PARAMS : ui32NumArgs; -+ -+ PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs == HTB_SF_PARAMNUM(SF), eError, ReturnError); -+ PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs <= HTB_LOG_MAX_PARAMS, eError, ReturnError); -+ -+ /* Needs to be set up here because it's accessed from both `if` blocks below -+ * and it needs to be pre-populated for both of them (pui32Message case and -+ * HTB_SF_CTRL_FWSYNC_MARK_SCALE case). */ -+ for (i = 0; i < ui32NumArgs; i++) -+ { -+ aui32Args[i] = va_arg(args, IMG_UINT32); -+ } -+ -+ if ( g_hTLStream -+ && ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) ) -+/* && ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */ -+/* && ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */ -+ ) -+ { -+ *pui32Message++ = SF; -+ *pui32Message++ = PID; -+ *pui32Message++ = TID; -+ *pui32Message++ = ((IMG_UINT32)((ui64TimeStamp>>32)&0xffffffff)); -+ *pui32Message++ = ((IMG_UINT32)(ui64TimeStamp&0xffffffff)); -+ for (i = 0; i < ui32NumArgs; i++) -+ { -+ pui32Message[i] = aui32Args[i]; -+ } -+ -+ eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags ); -+ while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- ) -+ { -+ OSReleaseThreadQuanta(); -+ eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags ); -+ } -+ -+ if ( PVRSRV_OK == eError ) -+ { -+ g_sCtrl.bLogDropSignalled = IMG_FALSE; -+ } -+ else if ( PVRSRV_ERROR_STREAM_FULL != eError || !g_sCtrl.bLogDropSignalled ) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__)); -+ } -+ if ( PVRSRV_ERROR_STREAM_FULL == eError ) -+ { -+ g_sCtrl.bLogDropSignalled = IMG_TRUE; -+ } -+ -+ } -+ -+ if (SF == HTB_SF_CTRL_FWSYNC_MARK_SCALE) -+ { -+ OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); -+ -+ /* If a marker is being placed reset byte count from last marker */ -+ g_sCtrl.ui32ByteCount = 0; -+ g_sCtrl.ui64OSTS = (IMG_UINT64)aui32Args[HTB_ARG_OSTS_PT1] << 32 | aui32Args[HTB_ARG_OSTS_PT2]; -+ g_sCtrl.ui64CRTS = (IMG_UINT64)aui32Args[HTB_ARG_CRTS_PT1] << 32 | aui32Args[HTB_ARG_CRTS_PT2]; -+ g_sCtrl.ui32ClkSpeed = aui32Args[HTB_ARG_CLKSPD]; -+ -+ OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); -+ } -+ else -+ { -+ OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); -+ /* Increase global count */ -+ g_sCtrl.ui32ByteCount += ui32MessageSize; -+ -+ /* Check if packet has overwritten last marker/rpt && -+ If the packet count is over half the size of the buffer */ -+ if (ui32ReturnFlags & TL_FLAG_OVERWRITE_DETECTED && -+ g_sCtrl.ui32ByteCount > HTB_MARKER_PREDICTION_THRESHOLD(g_sCtrl.ui32BufferSize)) -+ { -+ /* Take snapshot of global variables */ -+ ui64OSTSSnap = g_sCtrl.ui64OSTS; -+ ui64CRTSSnap = g_sCtrl.ui64CRTS; -+ ui32ClkSpeedSnap = g_sCtrl.ui32ClkSpeed; -+ /* Reset global variable counter */ -+ g_sCtrl.ui32ByteCount = 0; -+ OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); -+ -+ /* Produce a repeat marker */ -+ HTBSyncPartitionMarkerRepeat(g_sCtrl.ui32SyncMarker, ui64OSTSSnap, ui64CRTSSnap, ui32ClkSpeedSnap); -+ } -+ else -+ { -+ OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); -+ } -+ } -+ -+ReturnError: -+ return eError; -+#else -+ /* HTB support is disabled. Just return PVRSRV_OK and do nothing. */ -+ PVR_UNREFERENCED_PARAMETER(PID); -+ PVR_UNREFERENCED_PARAMETER(TID); -+ PVR_UNREFERENCED_PARAMETER(ui64TimeStamp); -+ PVR_UNREFERENCED_PARAMETER(SF); -+ PVR_UNREFERENCED_PARAMETER(args); -+ -+ return PVRSRV_OK; -+#endif -+} -+ -+/*************************************************************************/ /*! -+ @Function HTBLog -+ @Description Record a Host Trace Buffer log event -+ @Input PID The PID of the process the event is associated -+ with. This is provided as an argument rather -+ than querying internally so that events -+ associated with a particular process, but -+ performed by another can be logged correctly. -+ @Input ui64TimeStamp The timestamp to be associated with this -+ log event -+ @Input SF The log event ID -+ @Input ... Log parameters -+ @Return PVRSRV_OK Success. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+HTBLog(IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStamp, -+ IMG_UINT32 SF, ...) -+{ -+ PVRSRV_ERROR eError; -+ -+ va_list args; -+ va_start(args, SF); -+ eError = HTBLogKM(PID, TID, ui64TimeStamp, SF, args); -+ va_end(args); -+ return eError; -+} -+ -+static void -+_OnTLReaderOpenCallback( void *pvArg ) -+{ -+ if ( g_hTLStream ) -+ { -+ IMG_UINT64 ui64Time; -+ OSClockMonotonicns64(&ui64Time); -+ (void) HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, -+ g_sCtrl.ui32SyncMarker, -+ ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), -+ ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), -+ ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), -+ ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), -+ g_sCtrl.ui32SyncCalcClkSpd); -+ } -+ -+ PVR_UNREFERENCED_PARAMETER(pvArg); -+} -+ -+ -+/*************************************************************************/ /*! -+ @Function HTBControlKM -+ @Description Update the configuration of the Host Trace Buffer -+ -+ @Input ui32NumFlagGroups Number of group enable flags words -+ -+ @Input aui32GroupEnable Flags words controlling groups to be logged -+ -+ @Input ui32LogLevel Log level to record -+ -+ @Input ui32EnablePID PID to enable logging for a specific process -+ -+ @Input eLogMode Enable logging for all or specific processes, -+ -+ @Input eOpMode Control the behaviour of the data buffer -+ -+ @Return eError Internal services call returned eError error -+ number -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+HTBControlKM_Impl( -+ const IMG_UINT32 ui32NumFlagGroups, -+ const IMG_UINT32 * aui32GroupEnable, -+ const IMG_UINT32 ui32LogLevel, -+ const IMG_UINT32 ui32EnablePID, -+ const HTB_LOGMODE_CTRL eLogMode, -+ const HTB_OPMODE_CTRL eOpMode -+) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT; -+ IMG_UINT32 i; -+ IMG_UINT64 ui64Time; -+ OSClockMonotonicns64(&ui64Time); -+ -+ if ( !g_bConfigured && ui32NumFlagGroups ) -+ { -+ eError = TLStreamCreate( -+ &g_hTLStream, -+ HTB_STREAM_NAME, -+ g_sCtrl.ui32BufferSize, -+ _LookupFlags(HTB_OPMODE_DROPOLDEST) | g_ui32TLBaseFlags, -+ _OnTLReaderOpenCallback, NULL, NULL, NULL); -+ PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate"); -+ g_bConfigured = IMG_TRUE; -+ } -+ -+ if (HTB_OPMODE_UNDEF != eOpMode && g_sCtrl.eOpMode != eOpMode) -+ { -+ g_sCtrl.eOpMode = eOpMode; -+ eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags)); -+ while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- ) -+ { -+ OSReleaseThreadQuanta(); -+ eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags)); -+ } -+ PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamReconfigure"); -+ } -+ -+ if ( ui32EnablePID ) -+ { -+ g_sCtrl.aui32EnablePID[g_sCtrl.ui32PIDHead] = ui32EnablePID; -+ g_sCtrl.ui32PIDHead++; -+ g_sCtrl.ui32PIDHead %= HTB_MAX_NUM_PID; -+ g_sCtrl.ui32PIDCount++; -+ if ( g_sCtrl.ui32PIDCount > HTB_MAX_NUM_PID ) -+ { -+ g_sCtrl.ui32PIDCount = HTB_MAX_NUM_PID; -+ } -+ } -+ -+ /* HTB_LOGMODE_ALLPID overrides ui32EnablePID */ -+ if ( HTB_LOGMODE_ALLPID == eLogMode ) -+ { -+ OSCachedMemSet(g_sCtrl.aui32EnablePID, 0, sizeof(g_sCtrl.aui32EnablePID)); -+ g_sCtrl.ui32PIDCount = 0; -+ g_sCtrl.ui32PIDHead = 0; -+ } -+ if ( HTB_LOGMODE_UNDEF != eLogMode ) -+ { -+ g_sCtrl.eLogMode = eLogMode; -+ } -+ -+ if ( ui32NumFlagGroups ) -+ { -+ for (i = 0; i < HTB_FLAG_NUM_EL && i < ui32NumFlagGroups; i++) -+ { -+ g_auiHTBGroupEnable[i] = aui32GroupEnable[i]; -+ } -+ for (; i < HTB_FLAG_NUM_EL; i++) -+ { -+ g_auiHTBGroupEnable[i] = 0; -+ } -+ } -+ -+ if ( ui32LogLevel ) -+ { -+ g_sCtrl.ui32LogLevel = ui32LogLevel; -+ } -+ -+ /* Dump the current configuration state */ -+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode); -+ PVR_LOG_IF_ERROR(eError, "HTBLog"); -+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]); -+ PVR_LOG_IF_ERROR(eError, "HTBLog"); -+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel); -+ PVR_LOG_IF_ERROR(eError, "HTBLog"); -+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode); -+ PVR_LOG_IF_ERROR(eError, "HTBLog"); -+ for (i = 0; i < g_sCtrl.ui32PIDCount; i++) -+ { -+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]); -+ PVR_LOG_IF_ERROR(eError, "HTBLog"); -+ } -+ /* Else should never be hit as we set the spd when the power state is updated */ -+ if (0 != g_sCtrl.ui32SyncMarker && 0 != g_sCtrl.ui32SyncCalcClkSpd) -+ { -+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, -+ g_sCtrl.ui32SyncMarker, -+ ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), -+ ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), -+ g_sCtrl.ui32SyncCalcClkSpd); -+ PVR_LOG_IF_ERROR(eError, "HTBLog"); -+ } -+ -+ return eError; -+} -+ -+ -+/*************************************************************************/ /*! -+ @Function HTBSyncPartitionMarker -+ @Description Write an HTB sync partition marker to the HTB log -+ -+ @Input ui33Marker Marker value -+ -+*/ /**************************************************************************/ -+void -+HTBSyncPartitionMarker_Impl( -+ const IMG_UINT32 ui32Marker -+) -+{ -+ g_sCtrl.ui32SyncMarker = ui32Marker; -+ if ( g_hTLStream ) -+ { -+ PVRSRV_ERROR eError; -+ IMG_UINT64 ui64Time; -+ OSClockMonotonicns64(&ui64Time); -+ -+ /* Else should never be hit as we set the spd when the power state is updated */ -+ if (0 != g_sCtrl.ui32SyncCalcClkSpd) -+ { -+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, -+ ui32Marker, -+ ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), -+ ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), -+ g_sCtrl.ui32SyncCalcClkSpd); -+ PVR_WARN_IF_ERROR(eError, "HTBLog"); -+ } -+ } -+} -+ -+/*************************************************************************/ /*! -+ @Function HTBSyncPartitionMarkerRepeat -+ @Description Write a HTB sync partition marker to the HTB log, given -+ the previous values to repeat. -+ -+ @Input ui33Marker Marker value -+ @Input ui64SyncOSTS previous OSTS -+ @Input ui64SyncCRTS previous CRTS -+ @Input ui32ClkSpeed previous Clock speed -+ -+*/ /**************************************************************************/ -+void -+HTBSyncPartitionMarkerRepeat_Impl( -+ const IMG_UINT32 ui32Marker, -+ const IMG_UINT64 ui64SyncOSTS, -+ const IMG_UINT64 ui64SyncCRTS, -+ const IMG_UINT32 ui32ClkSpeed -+) -+{ -+ if ( g_hTLStream ) -+ { -+ PVRSRV_ERROR eError; -+ IMG_UINT64 ui64Time; -+ OSClockMonotonicns64(&ui64Time); -+ -+ /* Else should never be hit as we set the spd when the power state is updated */ -+ if (0 != ui32ClkSpeed) -+ { -+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, -+ ui32Marker, -+ ((IMG_UINT32)((ui64SyncOSTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncOSTS&0xffffffffU)), -+ ((IMG_UINT32)((ui64SyncCRTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncCRTS&0xffffffffU)), -+ ui32ClkSpeed); -+ PVR_WARN_IF_ERROR(eError, "HTBLog"); -+ } -+ } -+} -+ -+/*************************************************************************/ /*! -+ @Function HTBSyncScale -+ @Description Write FW-Host synchronisation data to the HTB log when clocks -+ change or are re-calibrated -+ -+ @Input bLogValues IMG_TRUE if value should be immediately written -+ out to the log -+ -+ @Input ui32OSTS OS Timestamp -+ -+ @Input ui32CRTS Rogue timestamp -+ -+ @Input ui32CalcClkSpd Calculated clock speed -+ -+*/ /**************************************************************************/ -+void -+HTBSyncScale_Impl( -+ const IMG_BOOL bLogValues, -+ const IMG_UINT64 ui64OSTS, -+ const IMG_UINT64 ui64CRTS, -+ const IMG_UINT32 ui32CalcClkSpd -+) -+{ -+ g_sCtrl.ui64SyncOSTS = ui64OSTS; -+ g_sCtrl.ui64SyncCRTS = ui64CRTS; -+ g_sCtrl.ui32SyncCalcClkSpd = ui32CalcClkSpd; -+ if (g_hTLStream && bLogValues) -+ { -+ PVRSRV_ERROR eError; -+ IMG_UINT64 ui64Time; -+ OSClockMonotonicns64(&ui64Time); -+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, -+ g_sCtrl.ui32SyncMarker, -+ ((IMG_UINT32)((ui64OSTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64OSTS&0xffffffff)), -+ ((IMG_UINT32)((ui64CRTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64CRTS&0xffffffff)), -+ ui32CalcClkSpd); -+ /* -+ * Don't spam the log with non-failure cases -+ */ -+ PVR_WARN_IF_ERROR(eError, "HTBLog"); -+ } -+} -+ -+/*************************************************************************/ /*! -+ @Function HTBLogSimple -+ @Description Record a Host Trace Buffer log event with implicit PID and -+ Timestamp -+ @Input SF The log event ID -+ @Input ... Log parameters -+ @Return PVRSRV_OK Success. -+*/ /**************************************************************************/ -+IMG_INTERNAL PVRSRV_ERROR -+HTBLogSimple_Impl(IMG_UINT32 SF, ...) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT64 ui64TimeStamp; -+ va_list args; -+ va_start(args, SF); -+ OSClockMonotonicns64(&ui64TimeStamp); -+ eError = HTBLogKM(OSGetCurrentProcessID(), OSGetCurrentThreadID(), ui64TimeStamp, -+ SF, args); -+ va_end(args); -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+ @Function HTBIsConfigured -+ @Description Determine if HTB stream has been configured -+ -+ @Input none -+ -+ @Return IMG_FALSE Stream has not been configured -+ IMG_TRUE Stream has been configured -+ -+*/ /**************************************************************************/ -+IMG_BOOL -+HTBIsConfigured(void) -+{ -+ return g_bConfigured; -+} -+/* EOF */ -diff --git a/drivers/gpu/drm/img-rogue/htbserver.h b/drivers/gpu/drm/img-rogue/htbserver.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/htbserver.h -@@ -0,0 +1,240 @@ -+/*************************************************************************/ /*! -+@File htbserver.h -+@Title Host Trace Buffer server implementation. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ -+@Description Host Trace Buffer provides a mechanism to log Host events to a -+ buffer in a similar way to the Firmware Trace mechanism. -+ Host Trace Buffer logs data using a Transport Layer buffer. -+ The Transport Layer and pvrtld tool provides the mechanism to -+ retrieve the trace data. -+ -+ A Host Trace can be merged with a corresponding Firmware Trace. -+ This is achieved by inserting synchronisation data into both -+ traces and post processing to merge them. -+ -+ The FW Trace will contain a "Sync Partition Marker". This is -+ updated every time the RGX is brought out of reset (RGX clock -+ timestamps reset at this point) and is repeated when the FW -+ Trace buffer wraps to ensure there is always at least 1 -+ partition marker in the Firmware Trace buffer whenever it is -+ read. -+ -+ The Host Trace will contain corresponding "Sync Partition -+ Markers" - #HTBSyncPartitionMarker(). Each partition is then -+ subdivided into "Sync Scale" sections - #HTBSyncScale(). The -+ "Sync Scale" data allows the timestamps from the two traces to -+ be correlated. The "Sync Scale" data is updated as part of the -+ standard RGX time correlation code (rgxtimecorr.c) and is -+ updated periodically including on power and clock changes. -+ -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef HTBSERVER_H -+#define HTBSERVER_H -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "htbuffer_types.h" -+ -+#define HTBLOGK(SF, args...) do { if (HTB_GROUP_ENABLED(SF)) { (void)HTBLogSimple(SF, ## args); } } while (0) -+ -+/* macros to cast 64 or 32-bit pointers into 32-bit integer components for Host Trace */ -+#define HTBLOG_PTR_BITS_HIGH(p) ((IMG_UINT32)((((IMG_UINT64)((uintptr_t)p))>>32)&0xffffffff)) -+#define HTBLOG_PTR_BITS_LOW(p) ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff)) -+ -+/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */ -+#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff)) -+#define HTBLOG_U64_BITS_LOW(u) ((IMG_UINT32)(u&0xffffffff)) -+ -+/* Host Trace Buffer name */ -+#define HTB_STREAM_NAME "PVRHTBuffer" -+ -+/************************************************************************/ /*! -+ @Function HTBInit -+ @Description Initialise the Host Trace Buffer and allocate all resources -+ -+ @Return eError Internal services call returned eError error -+ number -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+HTBInit_Impl(void); -+ -+/************************************************************************/ /*! -+ @Function HTBDeInit -+ @Description Close the Host Trace Buffer and free all resources -+ -+ @Return eError Internal services call returned eError error -+ number -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+HTBDeInit_Impl(void); -+ -+/*************************************************************************/ /*! -+ @Function HTBControlKM -+ @Description Update the configuration of the Host Trace Buffer -+ -+ @Input ui32NumFlagGroups Number of group enable flags words -+ -+ @Input aui32GroupEnable Flags words controlling groups to be logged -+ -+ @Input ui32LogLevel Log level to record -+ -+ @Input ui32EnablePID PID to enable logging for a specific process -+ -+ @Input eLogMode Enable logging for all or specific processes, -+ -+ @Input eOpMode Control the behaviour of the data buffer -+ -+ @Return eError Internal services call returned eError error -+ number -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+HTBControlKM_Impl(const IMG_UINT32 ui32NumFlagGroups, -+ const IMG_UINT32 *aui32GroupEnable, -+ const IMG_UINT32 ui32LogLevel, -+ const IMG_UINT32 ui32EnablePID, -+ const HTB_LOGMODE_CTRL eLogMode, -+ const HTB_OPMODE_CTRL eOpMode); -+ -+ -+/*************************************************************************/ /*! -+ @Function HTBSyncPartitionMarker -+ @Description Write an HTB sync partition marker to the HTB log -+ -+ @Input ui32Marker Marker value -+ -+*/ /**************************************************************************/ -+void -+HTBSyncPartitionMarker_Impl(const IMG_UINT32 ui32Marker); -+ -+/*************************************************************************/ /*! -+ @Function HTBSyncPartitionMarkerRpt -+ @Description Write a HTB sync partition marker to the HTB log, given -+ the previous values to repeat. -+ -+ @Input ui32Marker Marker value -+ @Input ui64SyncOSTS previous OSTS -+ @Input ui64SyncCRTS previous CRTS -+ @Input ui32ClkSpeed previous Clockspeed -+ -+*/ /**************************************************************************/ -+void -+HTBSyncPartitionMarkerRepeat_Impl(const IMG_UINT32 ui32Marker, -+ const IMG_UINT64 ui64SyncOSTS, -+ const IMG_UINT64 ui64SyncCRTS, -+ const IMG_UINT32 ui32ClkSpeed); -+ -+/*************************************************************************/ /*! -+ @Function HTBSyncScale -+ @Description Write FW-Host synchronisation data to the HTB log when clocks -+ change or are re-calibrated -+ -+ @Input bLogValues IMG_TRUE if value should be immediately written -+ out to the log -+ -+ @Input ui64OSTS OS Timestamp -+ -+ @Input ui64CRTS Rogue timestamp -+ -+ @Input ui32CalcClkSpd Calculated clock speed -+ -+*/ /**************************************************************************/ -+void -+HTBSyncScale_Impl(const IMG_BOOL bLogValues, const IMG_UINT64 ui64OSTS, -+ const IMG_UINT64 ui64CRTS, const IMG_UINT32 ui32CalcClkSpd); -+ -+/*************************************************************************/ /*! -+ @Function HTBLogSimple -+ @Description Record a Host Trace Buffer log event with implicit PID and Timestamp -+ -+ @Input SF The log event ID -+ -+ @Input ... Log parameters -+ -+ @Return PVRSRV_OK Success. -+ -+*/ /**************************************************************************/ -+IMG_INTERNAL PVRSRV_ERROR -+HTBLogSimple_Impl(IMG_UINT32 SF, ...); -+ -+/* DEBUG log group enable */ -+#if !defined(HTB_DEBUG_LOG_GROUP) -+#undef HTB_LOG_TYPE_DBG /* No trace statements in this log group should be checked in */ -+#define HTB_LOG_TYPE_DBG __BUILDERROR__ -+#endif -+ -+#if defined(PVRSRV_ENABLE_HTB) -+/*************************************************************************/ /*! -+ @Function HTBIsConfigured -+ @Description Determine if HTB stream has been configured -+ -+ @Input none -+ -+ @Return IMG_FALSE Stream has not been configured -+ IMG_TRUE Stream has been configured -+ -+*/ /**************************************************************************/ -+IMG_BOOL -+HTBIsConfigured_Impl(void); -+ -+#define HTBIsConfigured HTBIsConfigured_Impl -+#define HTBLogSimple HTBLogSimple_Impl -+#define HTBSyncScale(bLogValues, ui64OSTS, ui64CRTS, ui32CalcClkSpd) \ -+ HTBSyncScale_Impl((bLogValues), (ui64OSTS), (ui64CRTS), (ui32CalcClkSpd)) -+#define HTBSyncPartitionMarkerRepeat(ui32Marker, ui64SyncOSTS, ui64SyncCRTS, ui32ClkSpeed) \ -+ HTBSyncPartitionMarkerRepeat_Impl((ui32Marker), (ui64SyncOSTS), (ui64SyncCRTS), (ui32ClkSpeed)) -+#define HTBSyncPartitionMarker(a) HTBSyncPartitionMarker_Impl((a)) -+#define HTBControlKM(ui32NumFlagGroups, aui32GroupEnable, ui32LogLevel, ui32EnablePID, eLogMode, eOpMode) \ -+ HTBControlKM_Impl((ui32NumFlagGroups), (aui32GroupEnable), (ui32LogLevel), (ui32EnablePID), (eLogMode), (eOpMode)) -+#define HTBInit() HTBInit_Impl() -+#define HTBDeInit() HTBDeInit_Impl() -+#else /* !PVRSRV_ENABLE_HTB) */ -+#define HTBIsConfigured() IMG_FALSE -+#define HTBLogSimple(SF, args...) PVRSRV_OK -+#define HTBSyncScale(a, b, c, d) -+#define HTBSyncPartitionMarkerRepeat(a, b, c, d) -+#define HTBSyncPartitionMarker(a) -+#define HTBControlKM(a, b, c, d, e, f) PVRSRV_OK -+#define HTBDeInit() PVRSRV_OK -+#define HTBInit() PVRSRV_OK -+#endif /* PVRSRV_ENABLE_HTB */ -+#endif /* HTBSERVER_H */ -+ -+/* EOF */ -diff --git a/drivers/gpu/drm/img-rogue/htbuffer.c b/drivers/gpu/drm/img-rogue/htbuffer.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/htbuffer.c -@@ -0,0 +1,106 @@ -+/*************************************************************************/ /*! -+@File htbuffer.c -+@Title Host Trace Buffer shared API. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Host Trace Buffer provides a mechanism to log Host events to a -+ buffer in a similar way to the Firmware Trace mechanism. -+ Host Trace Buffer logs data using a Transport Layer buffer. -+ The Transport Layer and pvrtld tool provides the mechanism to -+ retrieve the trace data. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if defined(__linux__) && defined(__KERNEL__) -+ #include -+ -+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) -+ #include -+ #else -+ #include -+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ -+#else -+ #include -+#endif /* __linux__ */ -+ -+#include "htbuffer.h" -+#include "osfunc.h" -+#include "client_htbuffer_bridge.h" -+ -+/* The group flags array of ints large enough to store all the group flags -+ * NB: This will only work while all logging is in the kernel -+ */ -+IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL] = {0}; -+ -+ -+/*************************************************************************/ /*! -+ @Function HTBControl -+ @Description Update the configuration of the Host Trace Buffer -+ @Input hSrvHandle Server Handle -+ @Input ui32NumFlagGroups Number of group enable flags words -+ @Input aui32GroupEnable Flags words controlling groups to be logged -+ @Input ui32LogLevel Log level to record -+ @Input ui32EnablePID PID to enable logging for a specific process -+ @Input eLogPidMode Enable logging for all or specific processes, -+ @Input eOpMode Control what trace data is dropped if the TL -+ buffer is full -+ @Return eError Internal services call returned eError error -+ number -+*/ /**************************************************************************/ -+IMG_INTERNAL PVRSRV_ERROR -+HTBControl( -+ IMG_HANDLE hSrvHandle, -+ IMG_UINT32 ui32NumFlagGroups, -+ IMG_UINT32 * aui32GroupEnable, -+ IMG_UINT32 ui32LogLevel, -+ IMG_UINT32 ui32EnablePID, -+ HTB_LOGMODE_CTRL eLogPidMode, -+ HTB_OPMODE_CTRL eOpMode -+) -+{ -+ return BridgeHTBControl( -+ hSrvHandle, -+ ui32NumFlagGroups, -+ aui32GroupEnable, -+ ui32LogLevel, -+ ui32EnablePID, -+ eLogPidMode, -+ eOpMode -+ ); -+} -+ -+/* EOF */ -diff --git a/drivers/gpu/drm/img-rogue/htbuffer.h b/drivers/gpu/drm/img-rogue/htbuffer.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/htbuffer.h -@@ -0,0 +1,92 @@ -+/*************************************************************************/ /*! -+@File htbuffer.h -+@Title Host Trace Buffer shared API. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Host Trace Buffer provides a mechanism to log Host events to a -+ buffer in a similar way to the Firmware Trace mechanism. -+ Host Trace Buffer logs data using a Transport Layer buffer. -+ The Transport Layer and pvrtld tool provides the mechanism to -+ retrieve the trace data. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef HTBUFFER_H -+#define HTBUFFER_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "htbuffer_sf.h" -+#include "htbuffer_types.h" -+ -+/*************************************************************************/ /*! -+ @Function HTBControl -+ @Description Update the configuration of the Host Trace Buffer -+ @Input hSrvHandle Server Handle -+ @Input ui32NumFlagGroups Number of group enable flags words -+ @Input aui32GroupEnable Flags words controlling groups to be logged -+ @Input ui32LogLevel Log level to record -+ @Input ui32EnablePID PID to enable logging for a specific process -+ @Input eLogPidMode Enable logging for all or specific processes, -+ @Input eOpMode Control what trace data is dropped if the TL -+ buffer is full -+ @Return eError Internal services call returned eError error -+ number -+*/ /**************************************************************************/ -+IMG_INTERNAL PVRSRV_ERROR -+HTBControl( -+ IMG_HANDLE hSrvHandle, -+ IMG_UINT32 ui32NumFlagGroups, -+ IMG_UINT32 * aui32GroupEnable, -+ IMG_UINT32 ui32LogLevel, -+ IMG_UINT32 ui32EnablePID, -+ HTB_LOGMODE_CTRL eLogPidMode, -+ HTB_OPMODE_CTRL eOpMode); -+ -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* HTBUFFER_H */ -+/***************************************************************************** -+ End of file (htbuffer.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/htbuffer_init.h b/drivers/gpu/drm/img-rogue/htbuffer_init.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/htbuffer_init.h -@@ -0,0 +1,92 @@ -+/*************************************************************************/ /*! -+@File htbuffer_init.h -+@Title Host Trace Buffer functions needed for Services initialisation -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef HTBUFFER_INIT_H -+#define HTBUFFER_INIT_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#include "img_types.h" -+#include "img_defs.h" -+ -+/*************************************************************************/ /*! -+ @Function HTBControl -+ @Description Update the configuration of the Host Trace Buffer -+ -+ @Input hSrvHandle Server Handle -+ -+ @Input ui32NumFlagGroups Number of group enable flags words -+ -+ @Input aui32GroupEnable Flags words controlling groups to be logged -+ -+ @Input ui32LogLevel Log level to record -+ -+ @Input ui32EnablePID PID to enable logging for a specific process -+ -+ @Input eLogMode Enable logging for all or specific processes, -+ -+ @Input eOpMode Control what trace data is dropped if the TL -+ buffer is full -+ -+ @Return eError Internal services call returned eError error -+ number -+*/ /**************************************************************************/ -+IMG_INTERNAL PVRSRV_ERROR -+HTBControl( -+ IMG_HANDLE hSrvHandle, -+ IMG_UINT32 ui32NumFlagGroups, -+ IMG_UINT32 * aui32GroupEnable, -+ IMG_UINT32 ui32LogLevel, -+ IMG_UINT32 ui32EnablePID, -+ HTB_LOGMODE_CTRL eLogMode, -+ HTB_OPMODE_CTRL eOpMode -+); -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* HTBUFFER_INIT_H */ -+/***************************************************************************** -+ End of file (htbuffer_init.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/htbuffer_sf.h b/drivers/gpu/drm/img-rogue/htbuffer_sf.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/htbuffer_sf.h -@@ -0,0 +1,245 @@ -+/*************************************************************************/ /*! -+@File htbuffer_sf.h -+@Title Host Trace Buffer interface string format specifiers -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the Host Trace Buffer logging messages. The following -+ list are the messages the host driver prints. Changing anything -+ but the first column or spelling mistakes in the strings will -+ break compatibility with log files created with older/newer -+ driver versions. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef HTBUFFER_SF_H -+#define HTBUFFER_SF_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+ -+/****************************************************************************** -+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you -+ * WILL BREAK host tracing message compatibility with previous -+ * driver versions. Only add new ones, if so required. -+ *****************************************************************************/ -+ -+ -+/* String used in pvrdebug -h output */ -+#define HTB_LOG_GROUPS_STRING_LIST "ctrl,mmu,sync,main,brg" -+ -+/* Used in print statements to display log group state, one %s per group defined */ -+#define HTB_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s" -+ -+/* Available log groups - Master template -+ * -+ * Group usage is as follows: -+ * CTRL - Internal Host Trace information and synchronisation data -+ * MMU - MMU page mapping information -+ * SYNC - Synchronisation debug -+ * MAIN - Data master kicks, etc. tying in with the MAIN group in FWTrace -+ * DBG - Temporary debugging group, logs not to be left in the driver -+ * -+ */ -+#define HTB_LOG_SFGROUPLIST \ -+ X( HTB_GROUP_NONE, NONE ) \ -+/* gid, group flag / apphint name */ \ -+ X( HTB_GROUP_CTRL, CTRL ) \ -+ X( HTB_GROUP_MMU, MMU ) \ -+ X( HTB_GROUP_SYNC, SYNC ) \ -+ X( HTB_GROUP_MAIN, MAIN ) \ -+ X( HTB_GROUP_BRG, BRG ) \ -+/* Debug group HTB_GROUP_DBG must always be last */ \ -+ X( HTB_GROUP_DBG, DBG ) -+ -+ -+/* Table of String Format specifiers, the group they belong and the number of -+ * arguments each expects. Xmacro styled macros are used to generate what is -+ * needed without requiring hand editing. -+ * -+ * id : unique id within a group -+ * gid : group id as defined above -+ * sym name : symbolic name of enumerations used to identify message strings -+ * string : Actual string -+ * #args : number of arguments the string format requires -+ */ -+#define HTB_LOG_SFIDLIST \ -+/*id, gid, sym name, string, # arguments */ \ -+X( 0, HTB_GROUP_NONE, HTB_SF_FIRST, "You should not use this string", 0) \ -+\ -+X( 1, HTB_GROUP_CTRL, HTB_SF_CTRL_LOGMODE, "HTB log mode set to %d (1- all PID, 2 - restricted PID)\n", 1) \ -+X( 2, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_PID, "HTB enable logging for PID %d\n", 1) \ -+X( 3, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_GROUP, "HTB enable logging groups 0x%08x\n", 1) \ -+X( 4, HTB_GROUP_CTRL, HTB_SF_CTRL_LOG_LEVEL, "HTB log level set to %d\n", 1) \ -+X( 5, HTB_GROUP_CTRL, HTB_SF_CTRL_OPMODE, "HTB operating mode set to %d (1 - droplatest, 2 - drop oldest, 3 - block)\n", 1) \ -+X( 6, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE, "HTBFWSync OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \ -+X( 7, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE_RPT, "FW Sync scale info OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \ -+X( 8, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK, "FW Sync Partition marker: %d\n", 1) \ -+X( 9, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_RPT, "FW Sync Partition repeat: %d\n", 1) \ -+X( 10, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_SCALE, "Text not used", 6)\ -+\ -+X( 1, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_TABLE, "MMU page op table entry page_id=%08x%08x index=%d level=%d val=%08x%08x map=%d\n", 7) \ -+X( 2, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_ALLOC, "MMU allocating DevVAddr from %08x%08x to %08x%08x\n", 4) \ -+X( 3, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_FREE, "MMU freeing DevVAddr from %08x%08x to %08x%08x\n", 4) \ -+X( 4, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_MAP, "MMU mapping DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \ -+X( 5, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_PMRMAP, "MMU mapping PMR DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \ -+X( 6, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_UNMAP, "MMU unmapping DevVAddr %08x%08x\n", 2) \ -+\ -+X( 1, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_ALLOC, "Server sync allocation [%08X]\n", 1) \ -+X( 2, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_UNREF, "Server sync unreferenced [%08X]\n", 1) \ -+X( 3, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_CREATE, "Sync OP create 0x%08x, block count=%d, server syncs=%d, client syncs=%d\n", 4) \ -+X( 4, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_TAKE, "Sync OP take 0x%08x server syncs=%d, client syncs=%d\n", 3) \ -+X( 5, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_COMPLETE, "Sync OP complete 0x%08x\n", 1) \ -+X( 6, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_DESTROY, "Sync OP destroy 0x%08x\n", 1) \ -+\ -+X( 1, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx %08X @ %d\n", 2) \ -+X( 2, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx %08X @ %d\n", 2) \ -+X( 3, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM_DEPRECATED,"Kick CDM: FWCtx %08X @ %d\n", 2) \ -+X( 4, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_RTU, "Kick RTU: FWCtx %08X @ %d\n", 2) \ -+X( 5, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_SHG, "Kick SHG: FWCtx %08X @ %d\n", 2) \ -+X( 6, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D_DEPRECATED, "Kick 2D: FWCtx %08X @ %d\n", 2) \ -+X( 7, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_UNCOUNTED, "Kick (uncounted) for all DMs\n", 0) \ -+X( 8, HTB_GROUP_MAIN, HTB_SF_MAIN_FWCCB_CMD, "FW CCB Cmd: %d\n", 1) \ -+X( 9, HTB_GROUP_MAIN, HTB_SF_MAIN_PRE_POWER, "Pre-power duration @ phase [%d] (0-shutdown,1-startup) RGX: %llu ns SYS: %llu ns\n", 3) \ -+X(10, HTB_GROUP_MAIN, HTB_SF_MAIN_POST_POWER, "Post-power duration @ phase [%d] (0-shutdown,1-startup) SYS: %llu ns RGX: %llu ns\n", 3) \ -+X(11, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA, "Kick TA: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ -+X(12, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D, "Kick 3D: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ -+X(13, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM, "Kick CDM: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ -+X(14, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D, "Kick 2D: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ -+X(15, HTB_GROUP_MAIN, HTB_SF_MAIN_DBG_ERROR, "Error: (%u) @ line: %u\n", 2) \ -+X(16, HTB_GROUP_MAIN, HTB_SF_MAIN_DBG_COND_ERROR_T, "Error: (%u) Conditional is unexpectedly true @ line: %u\n", 2) \ -+X(17, HTB_GROUP_MAIN, HTB_SF_MAIN_DBG_COND_ERROR_F, "Error: (%u) Conditional is unexpectedly false @ line: %u\n", 2) \ -+X(18, HTB_GROUP_MAIN, HTB_SF_MAIN_DBG_WARNING, "Warning: (%u) in file: @ line: %u\n", 2) \ -+\ -+X( 1, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL, "Bridge call: start: %010u: bid %03d fid %d\n", 3) \ -+X( 2, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL_ERR, "Bridge call: start: %010u: bid %03d fid %d error %d\n", 4) \ -+\ -+X( 1, HTB_GROUP_DBG, HTB_SF_DBG_INTPAIR, "0x%8.8x 0x%8.8x\n", 2) \ -+\ -+X( 65535, HTB_GROUP_NONE, HTB_SF_LAST, "You should not use this string\n", 15) -+ -+ -+ -+/* gid - Group numbers */ -+typedef enum _HTB_LOG_SFGROUPS { -+#define X(A,B) A, -+ HTB_LOG_SFGROUPLIST -+#undef X -+} HTB_LOG_SFGROUPS; -+ -+ -+/* Group flags are stored in an array of elements. -+ * Each of which have a certain number of bits. -+ */ -+#define HTB_FLAG_EL_T IMG_UINT32 -+#define HTB_FLAG_NUM_BITS_IN_EL (sizeof(HTB_FLAG_EL_T) * 8) -+ -+#define HTB_LOG_GROUP_FLAG_GROUP(gid) ((gid-1) / HTB_FLAG_NUM_BITS_IN_EL) -+#define HTB_LOG_GROUP_FLAG(gid) (gid ? (0x1 << ((gid-1)%HTB_FLAG_NUM_BITS_IN_EL)) : 0) -+#define HTB_LOG_GROUP_FLAG_NAME(gid) HTB_LOG_TYPE_ ## gid -+ -+/* Group enable flags */ -+typedef enum _HTB_LOG_TYPE { -+#define X(a, b) HTB_LOG_GROUP_FLAG_NAME(b) = HTB_LOG_GROUP_FLAG(a), -+ HTB_LOG_SFGROUPLIST -+#undef X -+} HTB_LOG_TYPE; -+ -+ -+ -+/* The symbolic names found in the table above are assigned an ui32 value of -+ * the following format: -+ * 31 30 28 27 20 19 16 15 12 11 0 bits -+ * - --- ---- ---- ---- ---- ---- ---- ---- -+ * 0-11: id number -+ * 12-15: group id number -+ * 16-19: number of parameters -+ * 20-27: unused -+ * 28-30: active: identify SF packet, otherwise regular int32 -+ * 31: reserved for signed/unsigned compatibility -+ * -+ * The following macro assigns those values to the enum generated SF ids list. -+ */ -+#define HTB_LOG_IDMARKER (0x70000000) -+#define HTB_LOG_CREATESFID(a,b,e) (((a) | (b << 12) | (e << 16)) | HTB_LOG_IDMARKER) -+ -+#define HTB_LOG_IDMASK (0xFFF00000) -+#define HTB_LOG_VALIDID(I) ( ((I) & HTB_LOG_IDMASK) == HTB_LOG_IDMARKER ) -+ -+typedef enum HTB_LOG_SFids { -+#define X(a, b, c, d, e) c = HTB_LOG_CREATESFID(a,b,e), -+ HTB_LOG_SFIDLIST -+#undef X -+} HTB_LOG_SFids; -+ -+/* Return the group id that the given (enum generated) id belongs to */ -+#define HTB_SF_GID(x) (((x)>>12) & 0xf) -+/* Future improvement to support log levels */ -+#define HTB_SF_LVL(x) (0) -+/* Returns how many arguments the SF(string format) for the given -+ * (enum generated) id requires. -+ */ -+#define HTB_SF_PARAMNUM(x) (((x)>>16) & 0xf) -+/* Returns the id of given enum */ -+#define HTB_SF_ID(x) (x & 0xfff) -+ -+/* Format of messages is: SF:PID:TID:TIMEPT1:TIMEPT2:[PARn]* -+ */ -+#define HTB_LOG_HEADER_SIZE 5 -+#define HTB_LOG_MAX_PARAMS 15 -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+/* Defines for handling MARK_SCALE special case */ -+#define HTB_GID_CTRL 1 -+#define HTB_ID_MARK_SCALE 10 -+#define HTB_MARK_SCALE_ARG_ARRAY_SIZE 6 -+ -+/* Defines for extracting args from array for special case MARK_SCALE */ -+#define HTB_ARG_SYNCMARK 0 -+#define HTB_ARG_OSTS_PT1 1 -+#define HTB_ARG_OSTS_PT2 2 -+#define HTB_ARG_CRTS_PT1 3 -+#define HTB_ARG_CRTS_PT2 4 -+#define HTB_ARG_CLKSPD 5 -+ -+#endif /* HTBUFFER_SF_H */ -+/***************************************************************************** -+ End of file (htbuffer_sf.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/htbuffer_types.h b/drivers/gpu/drm/img-rogue/htbuffer_types.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/htbuffer_types.h -@@ -0,0 +1,122 @@ -+/*************************************************************************/ /*! -+@File htbuffer_types.h -+@Title Host Trace Buffer types. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Host Trace Buffer provides a mechanism to log Host events to a -+ buffer in a similar way to the Firmware Trace mechanism. -+ Host Trace Buffer logs data using a Transport Layer buffer. -+ The Transport Layer and pvrtld tool provides the mechanism to -+ retrieve the trace data. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef HTBUFFER_TYPES_H -+#define HTBUFFER_TYPES_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#include "img_defs.h" -+#include "htbuffer_sf.h" -+ -+/* The group flags array of ints large enough to store all the group flags */ -+#if defined(PVRSRV_ENABLE_HTB) -+#define HTB_FLAG_NUM_EL (((HTB_GROUP_DBG-1) / HTB_FLAG_NUM_BITS_IN_EL) + 1) -+extern IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL]; -+ -+#define HTB_GROUP_ENABLED(SF) (g_auiHTBGroupEnable[HTB_LOG_GROUP_FLAG_GROUP(HTB_SF_GID(SF))] & HTB_LOG_GROUP_FLAG(HTB_SF_GID(SF))) -+#else -+#define HTB_GROUP_ENABLED(SF) IMG_FALSE -+#endif -+ -+/*************************************************************************/ /*! -+ Host Trace Buffer operation mode -+ Care must be taken if changing this enum to ensure the MapFlags[] array -+ in htbserver.c is kept in-step. -+*/ /**************************************************************************/ -+typedef enum -+{ -+ /*! Undefined operation mode */ -+ HTB_OPMODE_UNDEF = 0, -+ -+ /*! Drop latest, intended for continuous logging to a UM daemon. -+ * If the daemon does not keep up, the most recent log data -+ * will be dropped -+ */ -+ HTB_OPMODE_DROPLATEST, -+ -+ /*! Drop oldest, intended for crash logging. -+ * Data will be continuously written to a circular buffer. -+ * After a crash the buffer will contain events leading up to the crash -+ */ -+ HTB_OPMODE_DROPOLDEST, -+ -+ /*! Block write if buffer is full */ -+ HTB_OPMODE_BLOCK, -+ -+ HTB_OPMODE_LAST = HTB_OPMODE_BLOCK -+} HTB_OPMODE_CTRL; -+ -+ -+/*************************************************************************/ /*! -+ Host Trace Buffer log mode control -+*/ /**************************************************************************/ -+typedef enum -+{ -+ /*! Undefined log mode, used if update is not applied */ -+ HTB_LOGMODE_UNDEF = 0, -+ -+ /*! Log trace messages for all PIDs. */ -+ HTB_LOGMODE_ALLPID, -+ -+ /*! Log trace messages for specific PIDs only. */ -+ HTB_LOGMODE_RESTRICTEDPID, -+ -+ HTB_LOGMODE_LAST = HTB_LOGMODE_RESTRICTEDPID -+} HTB_LOGMODE_CTRL; -+ -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* HTBUFFER_TYPES_H */ -+ -+/****************************************************************************** -+ End of file (htbuffer_types.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/img_3dtypes.h b/drivers/gpu/drm/img-rogue/img_3dtypes.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/img_3dtypes.h -@@ -0,0 +1,248 @@ -+/*************************************************************************/ /*! -+@File -+@Title Global 3D types for use by IMG APIs -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines 3D types for use by IMG APIs -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef IMG_3DTYPES_H -+#define IMG_3DTYPES_H -+ -+#include -+#include "img_types.h" -+#include "img_defs.h" -+ -+/** -+ * Comparison functions -+ * This comparison function is defined as: -+ * A {CmpFunc} B -+ * A is a reference value, e.g., incoming depth etc. -+ * B is the sample value, e.g., value in depth buffer. -+ */ -+typedef enum _IMG_COMPFUNC_ -+{ -+ IMG_COMPFUNC_NEVER, /**< The comparison never succeeds */ -+ IMG_COMPFUNC_LESS, /**< The comparison is a less-than operation */ -+ IMG_COMPFUNC_EQUAL, /**< The comparison is an equal-to operation */ -+ IMG_COMPFUNC_LESS_EQUAL, /**< The comparison is a less-than or equal-to -+ operation */ -+ IMG_COMPFUNC_GREATER, /**< The comparison is a greater-than operation -+ */ -+ IMG_COMPFUNC_NOT_EQUAL, /**< The comparison is a no-equal-to operation -+ */ -+ IMG_COMPFUNC_GREATER_EQUAL, /**< The comparison is a greater-than or -+ equal-to operation */ -+ IMG_COMPFUNC_ALWAYS, /**< The comparison always succeeds */ -+} IMG_COMPFUNC; -+ -+/** -+ * Stencil op functions -+ */ -+typedef enum _IMG_STENCILOP_ -+{ -+ IMG_STENCILOP_KEEP, /**< Keep original value */ -+ IMG_STENCILOP_ZERO, /**< Set stencil to 0 */ -+ IMG_STENCILOP_REPLACE, /**< Replace stencil entry */ -+ IMG_STENCILOP_INCR_SAT, /**< Increment stencil entry, clamping to max */ -+ IMG_STENCILOP_DECR_SAT, /**< Decrement stencil entry, clamping to zero */ -+ IMG_STENCILOP_INVERT, /**< Invert bits in stencil entry */ -+ IMG_STENCILOP_INCR, /**< Increment stencil entry, -+ wrapping if necessary */ -+ IMG_STENCILOP_DECR, /**< Decrement stencil entry, -+ wrapping if necessary */ -+} IMG_STENCILOP; -+ -+/** -+ * Alpha blending allows colours and textures on one surface -+ * to be blended with transparency onto another surface. -+ * These definitions apply to both source and destination blending -+ * states -+ */ -+typedef enum _IMG_BLEND_ -+{ -+ IMG_BLEND_ZERO = 0, /**< Blend factor is (0,0,0,0) */ -+ IMG_BLEND_ONE, /**< Blend factor is (1,1,1,1) */ -+ IMG_BLEND_SRC_COLOUR, /**< Blend factor is the source colour */ -+ IMG_BLEND_INV_SRC_COLOUR, /**< Blend factor is the inverted source colour -+ (i.e. 1-src_col) */ -+ IMG_BLEND_SRC_ALPHA, /**< Blend factor is the source alpha */ -+ IMG_BLEND_INV_SRC_ALPHA, /**< Blend factor is the inverted source alpha -+ (i.e. 1-src_alpha) */ -+ IMG_BLEND_DEST_ALPHA, /**< Blend factor is the destination alpha */ -+ IMG_BLEND_INV_DEST_ALPHA, /**< Blend factor is the inverted destination -+ alpha */ -+ IMG_BLEND_DEST_COLOUR, /**< Blend factor is the destination colour */ -+ IMG_BLEND_INV_DEST_COLOUR, /**< Blend factor is the inverted destination -+ colour */ -+ IMG_BLEND_SRC_ALPHASAT, /**< Blend factor is the alpha saturation (the -+ minimum of (Src alpha, -+ 1 - destination alpha)) */ -+ IMG_BLEND_BLEND_FACTOR, /**< Blend factor is a constant */ -+ IMG_BLEND_INVBLEND_FACTOR, /**< Blend factor is a constant (inverted)*/ -+ IMG_BLEND_SRC1_COLOUR, /**< Blend factor is the colour outputted from -+ the pixel shader */ -+ IMG_BLEND_INV_SRC1_COLOUR, /**< Blend factor is the inverted colour -+ outputted from the pixel shader */ -+ IMG_BLEND_SRC1_ALPHA, /**< Blend factor is the alpha outputted from -+ the pixel shader */ -+ IMG_BLEND_INV_SRC1_ALPHA /**< Blend factor is the inverted alpha -+ outputted from the pixel shader */ -+} IMG_BLEND; -+ -+/** -+ * The arithmetic operation to perform when blending -+ */ -+typedef enum _IMG_BLENDOP_ -+{ -+ IMG_BLENDOP_ADD = 0, /**< Result = (Source + Destination) */ -+ IMG_BLENDOP_SUBTRACT, /**< Result = (Source - Destination) */ -+ IMG_BLENDOP_REV_SUBTRACT, /**< Result = (Destination - Source) */ -+ IMG_BLENDOP_MIN, /**< Result = min (Source, Destination) */ -+ IMG_BLENDOP_MAX /**< Result = max (Source, Destination) */ -+} IMG_BLENDOP; -+ -+/** -+ * Logical operation to perform when logic ops are enabled -+ */ -+typedef enum _IMG_LOGICOP_ -+{ -+ IMG_LOGICOP_CLEAR = 0, /**< Result = 0 */ -+ IMG_LOGICOP_SET, /**< Result = -1 */ -+ IMG_LOGICOP_COPY, /**< Result = Source */ -+ IMG_LOGICOP_COPY_INVERTED, /**< Result = ~Source */ -+ IMG_LOGICOP_NOOP, /**< Result = Destination */ -+ IMG_LOGICOP_INVERT, /**< Result = ~Destination */ -+ IMG_LOGICOP_AND, /**< Result = Source & Destination */ -+ IMG_LOGICOP_NAND, /**< Result = ~(Source & Destination) */ -+ IMG_LOGICOP_OR, /**< Result = Source | Destination */ -+ IMG_LOGICOP_NOR, /**< Result = ~(Source | Destination) */ -+ IMG_LOGICOP_XOR, /**< Result = Source ^ Destination */ -+ IMG_LOGICOP_EQUIV, /**< Result = ~(Source ^ Destination) */ -+ IMG_LOGICOP_AND_REVERSE, /**< Result = Source & ~Destination */ -+ IMG_LOGICOP_AND_INVERTED, /**< Result = ~Source & Destination */ -+ IMG_LOGICOP_OR_REVERSE, /**< Result = Source | ~Destination */ -+ IMG_LOGICOP_OR_INVERTED /**< Result = ~Source | Destination */ -+} IMG_LOGICOP; -+ -+/** -+ * Type of fog blending supported -+ */ -+typedef enum _IMG_FOGMODE_ -+{ -+ IMG_FOGMODE_NONE, /**< No fog blending - fog calculations are -+ * based on the value output from the vertex phase */ -+ IMG_FOGMODE_LINEAR, /**< Linear interpolation */ -+ IMG_FOGMODE_EXP, /**< Exponential */ -+ IMG_FOGMODE_EXP2, /**< Exponential squaring */ -+} IMG_FOGMODE; -+ -+/** -+ * Types of filtering -+ */ -+typedef enum _IMG_FILTER_ -+{ -+ IMG_FILTER_DONTCARE, /**< Any filtering mode is acceptable */ -+ IMG_FILTER_POINT, /**< Point filtering */ -+ IMG_FILTER_LINEAR, /**< Bi-linear filtering */ -+ IMG_FILTER_BICUBIC, /**< Bi-cubic filtering */ -+} IMG_FILTER; -+ -+/** -+ * Addressing modes for textures -+ */ -+typedef enum _IMG_ADDRESSMODE_ -+{ -+ IMG_ADDRESSMODE_DONTCARE, -+ IMG_ADDRESSMODE_REPEAT, /**< Texture repeats continuously */ -+ IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */ -+ IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */ -+ IMG_ADDRESSMODE_FLIPCLAMP, /**< Flipped once, then clamp */ -+ IMG_ADDRESSMODE_CLAMPBORDER, -+ IMG_ADDRESSMODE_OGL_CLAMP, -+ IMG_ADDRESSMODE_OVG_TILEFILL, -+} IMG_ADDRESSMODE; -+ -+/** -+ * Culling based on winding order of triangle. -+ */ -+typedef enum _IMG_CULLMODE_ -+{ -+ IMG_CULLMODE_NONE, /**< Don't cull */ -+ IMG_CULLMODE_FRONTFACING, /**< Front facing triangles */ -+ IMG_CULLMODE_BACKFACING, /**< Back facing triangles */ -+} IMG_CULLMODE; -+ -+/** -+ * Colour for clearing surfaces. -+ * The four elements of the 4 x 32 bit array will map to colour -+ * R,G,B,A components, in order. -+ * For YUV colour space the order is Y,U,V. -+ * For Depth and Stencil formats D maps to R and S maps to G. -+ */ -+typedef union IMG_CLEAR_COLOUR_TAG { -+ IMG_UINT32 aui32[4]; -+ IMG_INT32 ai32[4]; -+ IMG_FLOAT af32[4]; -+} IMG_CLEAR_COLOUR; -+ -+static_assert(sizeof(IMG_FLOAT) == sizeof(IMG_INT32), "Size of IMG_FLOAT is not 32 bits."); -+ -+/*! ************************************************************************//** -+@brief Specifies the MSAA resolve operation. -+*/ /**************************************************************************/ -+typedef enum _IMG_RESOLVE_OP_ -+{ -+ IMG_RESOLVE_BLEND = 0, /*!< box filter on the samples */ -+ IMG_RESOLVE_MIN = 1, /*!< minimum of the samples */ -+ IMG_RESOLVE_MAX = 2, /*!< maximum of the samples */ -+ IMG_RESOLVE_SAMPLE0 = 3, /*!< choose sample 0 */ -+ IMG_RESOLVE_SAMPLE1 = 4, /*!< choose sample 1 */ -+ IMG_RESOLVE_SAMPLE2 = 5, /*!< choose sample 2 */ -+ IMG_RESOLVE_SAMPLE3 = 6, /*!< choose sample 3 */ -+ IMG_RESOLVE_SAMPLE4 = 7, /*!< choose sample 4 */ -+ IMG_RESOLVE_SAMPLE5 = 8, /*!< choose sample 5 */ -+ IMG_RESOLVE_SAMPLE6 = 9, /*!< choose sample 6 */ -+ IMG_RESOLVE_SAMPLE7 = 10, /*!< choose sample 7 */ -+} IMG_RESOLVE_OP; -+ -+ -+#endif /* IMG_3DTYPES_H */ -+/****************************************************************************** -+ End of file (img_3dtypes.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/img_defs.h b/drivers/gpu/drm/img-rogue/img_defs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/img_defs.h -@@ -0,0 +1,599 @@ -+/*************************************************************************/ /*! -+@File -+@Title Common header containing type definitions for portability -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Contains variable and structure definitions. Any platform -+ specific types should be defined in this file. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef IMG_DEFS_H -+#define IMG_DEFS_H -+ -+#if defined(__linux__) && defined(__KERNEL__) -+#include -+#else -+#include -+#endif -+#if !(defined(__linux__) && defined(__KERNEL__)) -+#if defined(__riscv) -+#pragma GCC diagnostic push -+#pragma GCC diagnostic ignored "-Wundef" -+#endif -+#include -+#if defined(__riscv) -+#pragma GCC diagnostic pop -+#endif -+#endif -+ -+#include "img_types.h" -+ -+#if defined(NO_INLINE_FUNCS) -+ #define INLINE -+ #define FORCE_INLINE -+#else -+#if defined(__cplusplus) || defined(INTEGRITY_OS) -+ #if !defined(INLINE) -+ #define INLINE inline -+ #endif -+ #define FORCE_INLINE static inline -+#else -+#if !defined(INLINE) -+ #define INLINE __inline -+#endif -+#if (defined(UNDER_WDDM) || defined(WINDOWS_WDF)) && defined(_X86_) -+ #define FORCE_INLINE __forceinline -+#else -+ #define FORCE_INLINE static __inline -+#endif -+#endif -+#endif -+ -+/* True if the GCC version is at least the given version. False for older -+ * versions of GCC, or other compilers. -+ */ -+#if defined(__GNUC__) -+#define GCC_VERSION_AT_LEAST(major, minor) \ -+ (__GNUC__ > (major) || \ -+ (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) -+#else -+#define GCC_VERSION_AT_LEAST(major, minor) 0 -+#endif -+ -+#if defined(__clang__) -+#define CLANG_VERSION_AT_LEAST(major) \ -+ (__clang_major__ >= (major)) -+#else -+#define CLANG_VERSION_AT_LEAST(major) 0 -+#endif -+ -+/* Use Clang's __has_extension and __has_builtin macros if available. */ -+#if defined(__has_extension) -+#define has_clang_extension(e) __has_extension(e) -+#else -+#define has_clang_extension(e) 0 -+#endif -+ -+#if defined(__has_builtin) -+#define has_clang_builtin(e) __has_builtin(e) -+#else -+#define has_clang_builtin(e) 0 -+#endif -+ -+/* Use this in any file, or use attributes under GCC - see below */ -+#ifndef PVR_UNREFERENCED_PARAMETER -+#define PVR_UNREFERENCED_PARAMETER(param) ((void)(param)) -+#endif -+ -+/* static_assert(condition, "message to print if it fails"); -+ * -+ * Assert something at compile time. If the assertion fails, try to print -+ * the message, otherwise do nothing. static_assert is available if: -+ * -+ * - It's already defined as a macro (e.g. by in C11) -+ * - We're using MSVC which exposes static_assert unconditionally -+ * - We're using a C++ compiler that supports C++11 -+ * - We're using GCC 4.6 and up in C mode (in which case it's available as -+ * _Static_assert) -+ * -+ * In all other cases, fall back to an equivalent that makes an invalid -+ * declaration. -+ */ -+#if !defined(static_assert) && !defined(_MSC_VER) && \ -+ (!defined(__cplusplus) || __cplusplus < 201103L) || defined(__KLOCWORK__) -+ /* static_assert isn't already available */ -+ #if !defined(__cplusplus) && (GCC_VERSION_AT_LEAST(4, 6) || \ -+ (defined(__clang__) && has_clang_extension(c_static_assert))) -+ #define static_assert _Static_assert -+ #else -+ #define static_assert(expr, message) \ -+ extern int static_assert_failed[(expr) ? 1 : -1] __attribute__((unused)) -+ #endif -+#endif -+ -+/* -+ * unreachable("explanation") can be used to indicate to the compiler that -+ * some parts of the code can never be reached, like the default branch -+ * of a switch that covers all real-world possibilities, even though there -+ * are other ints that exist for instance. -+ * -+ * The message will be printed as an assert() when debugging. -+ * -+ * Note: there is no need to add a 'return' or any error handling after -+ * calling unreachable(), as this call will never return. -+ */ -+#if defined(__linux__) && defined(__KERNEL__) -+/* Kernel has its own unreachable(), which is a simple infinite loop */ -+#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable) -+ #define unreachable(msg) \ -+ do { \ -+ assert(!(msg)); \ -+ __builtin_unreachable(); \ -+ } while (false) -+#elif defined(_MSC_VER) -+ #define unreachable(msg) \ -+ do { \ -+ assert(!(msg)); \ -+ __assume(0); \ -+ } while (false) -+#else -+ #define unreachable(msg) \ -+ do { \ -+ assert(!(msg)); \ -+ while (1); \ -+ } while (false) -+#endif -+ -+/* -+ * assume(x > 2 && x <= 7) works like an assert(), except it hints to the -+ * compiler what it can assume to optimise the code, like a limited range -+ * of parameter values. -+ */ -+#if has_clang_builtin(__builtin_assume) -+ #define assume(expr) \ -+ do { \ -+ assert(expr); \ -+ __builtin_assume(expr); \ -+ } while (false) -+#elif defined(_MSC_VER) -+ #define assume(expr) \ -+ do { \ -+ assert(expr); \ -+ __assume(expr); \ -+ } while (false) -+#elif defined(__linux__) && defined(__KERNEL__) -+ #define assume(expr) ((void)(expr)) -+#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable) -+ #define assume(expr) \ -+ do { \ -+ if (unlikely(!(expr))) \ -+ unreachable("Assumption isn't true: " # expr); \ -+ } while (false) -+#else -+ #define assume(expr) assert(expr) -+#endif -+ -+/*! Macro to calculate the n-byte aligned value from that supplied rounding up. -+ * n must be a power of two. -+ * -+ * Both arguments should be of a type with the same size otherwise the macro may -+ * cut off digits, e.g. imagine a 64 bit address in _x and a 32 bit value in _n. -+ */ -+#define PVR_ALIGN(_x, _n) (((_x)+((_n)-1U)) & ~((_n)-1U)) -+ -+#if defined(_WIN32) -+ -+#if defined(WINDOWS_WDF) -+ -+ /* -+ * For WINDOWS_WDF drivers we don't want these defines to overwrite calling conventions propagated through the build system. -+ * This 'empty' choice helps to resolve all the calling conv issues. -+ * -+ */ -+ #define IMG_CALLCONV -+ #define C_CALLCONV -+ -+ #define IMG_INTERNAL -+ #define IMG_RESTRICT __restrict -+ -+ /* -+ * The proper way of dll linking under MS compilers is made of two things: -+ * - decorate implementation with __declspec(dllexport) -+ * this decoration helps compiler with making the so called -+ * 'export library' -+ * - decorate forward-declaration (in a source dependent on a dll) with -+ * __declspec(dllimport), this decoration helps the compiler to make -+ * faster and smaller code in terms of calling dll-imported functions -+ * -+ * Usually these decorations are performed by having a single macro define -+ * making that expands to a proper __declspec() depending on the -+ * translation unit, dllexport inside the dll source and dllimport outside -+ * the dll source. Having IMG_EXPORT and IMG_IMPORT resolving to the same -+ * __declspec() makes no sense, but at least works. -+ */ -+ #define IMG_IMPORT __declspec(dllexport) -+ #define IMG_EXPORT __declspec(dllexport) -+ -+#else -+ -+ #define IMG_CALLCONV __stdcall -+ #define IMG_INTERNAL -+ #define IMG_EXPORT __declspec(dllexport) -+ #define IMG_RESTRICT __restrict -+ #define C_CALLCONV __cdecl -+ -+ /* -+ * IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations -+ * match. Some compilers require the header to be declared IMPORT, while -+ * the implementation is declared EXPORT. -+ */ -+ #define IMG_IMPORT IMG_EXPORT -+ -+#endif -+ -+#if defined(UNDER_WDDM) -+ #ifndef _INC_STDLIB -+ #if defined(__mips) -+ /* do nothing */ -+ #elif defined(UNDER_MSBUILD) -+ /* do nothing */ -+ #else -+ _CRTIMP void __cdecl abort(void); -+ #endif -+ #endif -+#endif /* UNDER_WDDM */ -+#else -+ #if (defined(__linux__) || defined(__QNXNTO__)) && defined(__KERNEL__) -+ #define IMG_INTERNAL -+ #define IMG_EXPORT -+ #define IMG_CALLCONV -+ #elif defined(__linux__) || defined(__METAG) || defined(__mips) || defined(__QNXNTO__) || defined(__riscv) || defined(__APPLE__) -+ #define IMG_CALLCONV -+ #define C_CALLCONV -+ -+ #if defined(__METAG) -+ #define IMG_INTERNAL -+ #else -+ #define IMG_INTERNAL __attribute__((visibility("hidden"))) -+ #endif -+ -+ #define IMG_EXPORT __attribute__((visibility("default"))) -+ #define IMG_RESTRICT __restrict__ -+ #elif defined(INTEGRITY_OS) -+ #define IMG_CALLCONV -+ #define IMG_INTERNAL -+ #define IMG_EXPORT -+ #define IMG_RESTRICT -+ #define C_CALLCONV -+ #define __cdecl -+ -+ #ifndef USE_CODE -+ #define IMG_ABORT() printf("IMG_ABORT was called.\n") -+ #endif -+ #else -+ #error("define an OS") -+ #endif -+ -+#endif -+ -+/* Use default definition if not overridden */ -+#ifndef IMG_ABORT -+ #if defined(EXIT_ON_ABORT) -+ #define IMG_ABORT() exit(1) -+ #else -+ #define IMG_ABORT() abort() -+ #endif -+#endif -+ -+/* The best way to suppress unused parameter warnings using GCC is to use a -+ * variable attribute. Place the __maybe_unused between the type and name of an -+ * unused parameter in a function parameter list e.g. 'int __maybe_unused var'. -+ * This should only be used in GCC build environments, for example, in files -+ * that compile only on Linux. -+ * Other files should use PVR_UNREFERENCED_PARAMETER -+ */ -+ -+/* Kernel macros for compiler attributes */ -+/* Note: param positions start at 1 */ -+#if defined(__linux__) && defined(__KERNEL__) -+ #include -+ -+ #if !defined(__fallthrough) -+ #if GCC_VERSION_AT_LEAST(7, 0) || CLANG_VERSION_AT_LEAST(10) -+ #define __fallthrough __attribute__((__fallthrough__)) -+ #else -+ #define __fallthrough -+ #endif -+ #endif -+#elif defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) -+ #define __must_check __attribute__((warn_unused_result)) -+ #define __maybe_unused __attribute__((unused)) -+ #define __malloc __attribute__((malloc)) -+ -+ /* Bionic's might have defined these already */ -+ /* See https://android.googlesource.com/platform/bionic.git/+/master/libc/include/sys/cdefs.h */ -+ #if !defined(__packed) -+ #define __packed __attribute__((packed)) -+ #endif -+ #if !defined(__aligned) -+ #define __aligned(n) __attribute__((aligned(n))) -+ #endif -+ #if !defined(__noreturn) -+ #define __noreturn __attribute__((noreturn)) -+ #endif -+ -+ /* That one compiler that supports attributes but doesn't support -+ * the printf attribute... */ -+ #if defined(__GNUC__) -+ #if defined(__MINGW32__) -+ #define __printf(fmt, va) __attribute__((format(gnu_printf, (fmt), (va)))) -+ #else -+ #define __printf(fmt, va) __attribute__((format(printf, (fmt), (va)))) -+ #endif -+ #else -+ #define __printf(fmt, va) -+ #endif /* defined(__GNUC__) */ -+ -+ #if defined(__cplusplus) && (__cplusplus >= 201703L) -+ #define __fallthrough [[fallthrough]] -+ #elif GCC_VERSION_AT_LEAST(7, 0) || CLANG_VERSION_AT_LEAST(10) -+ #define __fallthrough __attribute__((__fallthrough__)) -+ #else -+ #define __fallthrough -+ #endif -+ -+ #define __user -+ #define __force -+ #define __iomem -+#else -+ /* Silently ignore those attributes */ -+ #define __printf(fmt, va) -+ #define __packed -+ #define __aligned(n) -+ #define __must_check -+ #define __maybe_unused -+ #define __malloc -+ -+ #if defined(_MSC_VER) || defined(CC_ARM) -+ #define __noreturn __declspec(noreturn) -+ #else -+ #define __noreturn -+ #endif -+ -+ /* This may already been defined, e.g. by SAL (Source Annotation Language) */ -+ #if !defined(__fallthrough) -+ #define __fallthrough -+ #endif -+ -+ #define __user -+ #define __force -+ #define __iomem -+#endif -+ -+ -+/* Other attributes, following the same style */ -+#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) -+ #define __const_function __attribute__((const)) -+#else -+ #define __const_function -+#endif -+ -+ -+/* GCC builtins */ -+#if defined(__linux__) && defined(__KERNEL__) -+ #include -+#elif defined(__GNUC__) || defined(INTEGRITY_OS) -+ -+/* Klocwork does not support __builtin_expect, which makes the actual condition -+ * expressions hidden during analysis, affecting it negatively. */ -+#if !defined(__KLOCWORK__) && !defined(INTEGRITY_OS) && !defined(DEBUG) -+ #define likely(x) __builtin_expect(!!(x), 1) -+ #define unlikely(x) __builtin_expect(!!(x), 0) -+#endif -+ -+ /* Compiler memory barrier to prevent reordering */ -+ #define barrier() __asm__ __volatile__("": : :"memory") -+#else -+ #define barrier() static_assert(0, "barrier() isn't supported by your compiler"); -+#endif -+ -+/* That one OS that defines one but not the other... */ -+#ifndef likely -+ #define likely(x) (x) -+#endif -+#ifndef unlikely -+ #define unlikely(x) (x) -+#endif -+ -+#if !defined(BITS_PER_BYTE) -+#define BITS_PER_BYTE (8) -+#endif /* BITS_PER_BYTE */ -+ -+/* These two macros are also provided by the kernel */ -+#ifndef BIT -+#define BIT(b) (1UL << (b)) -+#endif -+ -+#ifndef BIT_ULL -+#define BIT_ULL(b) (1ULL << (b)) -+#endif -+ -+#define BIT_SET(f, b) BITMASK_SET((f), BIT(b)) -+#define BIT_UNSET(f, b) BITMASK_UNSET((f), BIT(b)) -+#define BIT_TOGGLE(f, b) BITMASK_TOGGLE((f), BIT(b)) -+#define BIT_ISSET(f, b) BITMASK_HAS((f), BIT(b)) -+ -+#define BITMASK_SET(f, m) do { ((f) |= (m)); } while (false) -+#define BITMASK_UNSET(f, m) do { ((f) &= ~(m)); } while (false) -+#define BITMASK_TOGGLE(f, m) do { ((f) ^= (m)); } while (false) -+#define BITMASK_HAS(f, m) (((f) & (m)) == (m)) /* the bits from the mask are all set */ -+#define BITMASK_ANY(f, m) (((f) & (m)) != 0U) /* any bit from the mask is set */ -+ -+#ifndef MAX -+#define MAX(a ,b) (((a) > (b)) ? (a) : (b)) -+#endif -+ -+#ifndef MIN -+#define MIN(a, b) (((a) < (b)) ? (a) : (b)) -+#endif -+ -+#ifndef CLAMP -+#define CLAMP(min, max, n) ((n) < (min) ? (min) : ((n) > (max) ? (max) : (n))) -+#endif -+ -+#define SWAP(X, Y) (X) ^= (Y); (Y) ^= (X); (X) ^= (Y); -+ -+#if defined(__linux__) && defined(__KERNEL__) -+ #include -+ #include -+#endif -+ -+/* Get a structure's address from the address of a member */ -+#define IMG_CONTAINER_OF(ptr, type, member) \ -+ (type *) ((uintptr_t) (ptr) - offsetof(type, member)) -+ -+/* Get a new pointer with an offset (in bytes) from a base address, useful -+ * when traversing byte buffers and accessing data in buffers through struct -+ * pointers. -+ * Note, this macro is not equivalent to or replacing offsetof() */ -+#define IMG_OFFSET_ADDR(addr, offset_in_bytes) \ -+ (void*)&(((IMG_UINT8*)(void*)(addr))[offset_in_bytes]) -+ -+/* Get a new pointer with an offset (in bytes) from a base address, version -+ * for volatile memory. -+ */ -+#define IMG_OFFSET_ADDR_VOLATILE(addr, offset_in_bytes) \ -+ (volatile void*)&(((volatile IMG_UINT8*)(volatile void*)(addr))[offset_in_bytes]) -+ -+/* Get a new pointer with an offset (in dwords) from a base address, useful -+ * when traversing byte buffers and accessing data in buffers through struct -+ * pointers. -+ * Note, this macro is not equivalent to or replacing offsetof() */ -+#define IMG_OFFSET_ADDR_DW(addr, offset_in_dwords) \ -+ (void*)(((IMG_UINT32*)(void*)(addr)) + (offset_in_dwords)) -+ -+/* The number of elements in a fixed-sized array */ -+#ifndef ARRAY_SIZE -+#define ARRAY_SIZE(ARR) (sizeof(ARR) / sizeof((ARR)[0])) -+#endif -+ -+/* To guarantee that __func__ can be used, define it as a macro here if it -+ isn't already provided by the compiler. */ -+#if defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus < 201103L) -+#define __func__ __FUNCTION__ -+#endif -+ -+#if defined(__cplusplus) -+/* C++ Specific: -+ * Disallow use of copy and assignment operator within a class. -+ * Should be placed under private. */ -+#define IMG_DISALLOW_COPY_AND_ASSIGN(C) \ -+ C(const C&); \ -+ void operator=(const C&) -+#endif -+ -+#if defined(SUPPORT_PVR_VALGRIND) && !defined(__METAG) && !defined(__mips) && !defined(__riscv) -+ #include "/usr/include/valgrind/memcheck.h" -+ -+ #define VG_MARK_INITIALIZED(pvData,ui32Size) VALGRIND_MAKE_MEM_DEFINED(pvData,ui32Size) -+ #define VG_MARK_NOACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_NOACCESS(pvData,ui32Size) -+ #define VG_MARK_ACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_UNDEFINED(pvData,ui32Size) -+ #define VG_ASSERT_DEFINED(pvData,ui32Size) VALGRIND_CHECK_MEM_IS_DEFINED(pvData,ui32Size) -+#else -+ #if defined(_MSC_VER) -+ # define PVR_MSC_SUPPRESS_4127 __pragma(warning(suppress:4127)) -+ #else -+ # define PVR_MSC_SUPPRESS_4127 -+ #endif -+ -+ #define VG_MARK_INITIALIZED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false) -+ #define VG_MARK_NOACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false) -+ #define VG_MARK_ACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false) -+ #define VG_ASSERT_DEFINED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false) -+#endif -+ -+#define IMG_STRINGIFY_IMPL(x) # x -+#define IMG_STRINGIFY(x) IMG_STRINGIFY_IMPL(x) -+ -+#define IMG_CONCATENATE_IMPL(x,y) x ## y -+#define IMG_CONCATENATE(x,y) IMG_CONCATENATE_IMPL(x,y) -+ -+#if defined(INTEGRITY_OS) -+ /* Definitions not present in INTEGRITY. */ -+ #define PATH_MAX 200 -+#endif -+ -+#if defined(__clang__) || defined(__GNUC__) -+ /* __SIZEOF_POINTER__ is defined already by these compilers */ -+#elif defined(INTEGRITY_OS) -+ #if defined(__Ptr_Is_64) -+ #define __SIZEOF_POINTER__ 8 -+ #else -+ #define __SIZEOF_POINTER__ 4 -+ #endif -+#elif defined(_WIN32) -+ #define __SIZEOF_POINTER__ sizeof(char *) -+#else -+ #warning Unknown OS - using default method to determine whether CPU arch is 64-bit. -+ #define __SIZEOF_POINTER__ sizeof(char *) -+#endif -+ -+/* RDI8567: gcc/clang/llvm load/store optimisations may cause issues with -+ * uncached device memory allocations. Some pointers are made 'volatile' -+ * to prevent those optimisations being applied to writes through those -+ * pointers. -+ */ -+#if (GCC_VERSION_AT_LEAST(7, 0) || defined(__clang__)) && (defined(__arm64__) || defined(__aarch64__)) -+#define NOLDSTOPT volatile -+/* after applying 'volatile' to a pointer, we may need to cast it to 'void *' -+ * to keep it compatible with its existing uses. -+ */ -+#define NOLDSTOPT_VOID (void *) -+ -+#define NOLDSTOPT_REQUIRED 1 -+#else -+#define NOLDSTOPT -+#define NOLDSTOPT_VOID -+#endif -+ -+#define PVR_PRE_DPF (void) printf -+ -+#endif /* IMG_DEFS_H */ -+/***************************************************************************** -+ End of file (img_defs.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/img_elf.h b/drivers/gpu/drm/img-rogue/img_elf.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/img_elf.h -@@ -0,0 +1,111 @@ -+/*************************************************************************/ /*! -+@File img_elf.h -+@Title IMG ELF file definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Platform RGX -+@Description Definitions for ELF file structures used in the DDK. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(IMG_ELF_H) -+#define IMG_ELF_H -+ -+#include "img_types.h" -+ -+/* ELF format defines */ -+#define ELF_PT_LOAD (0x1U) /* Program header identifier as Load */ -+#define ELF_SHT_SYMTAB (0x2U) /* Section identifier as Symbol Table */ -+#define ELF_SHT_STRTAB (0x3U) /* Section identifier as String Table */ -+#define MAX_STRTAB_NUM (0x8U) /* Maximum number of string table in the ELF file */ -+ -+/* Redefined structs of ELF format */ -+typedef struct -+{ -+ IMG_UINT8 ui32Eident[16]; -+ IMG_UINT16 ui32Etype; -+ IMG_UINT16 ui32Emachine; -+ IMG_UINT32 ui32Eversion; -+ IMG_UINT32 ui32Eentry; -+ IMG_UINT32 ui32Ephoff; -+ IMG_UINT32 ui32Eshoff; -+ IMG_UINT32 ui32Eflags; -+ IMG_UINT16 ui32Eehsize; -+ IMG_UINT16 ui32Ephentsize; -+ IMG_UINT16 ui32Ephnum; -+ IMG_UINT16 ui32Eshentsize; -+ IMG_UINT16 ui32Eshnum; -+ IMG_UINT16 ui32Eshtrndx; -+} IMG_ELF_HDR; -+ -+typedef struct -+{ -+ IMG_UINT32 ui32Stname; -+ IMG_UINT32 ui32Stvalue; -+ IMG_UINT32 ui32Stsize; -+ IMG_UINT8 ui32Stinfo; -+ IMG_UINT8 ui32Stother; -+ IMG_UINT16 ui32Stshndx; -+} IMG_ELF_SYM; -+ -+typedef struct -+{ -+ IMG_UINT32 ui32Shname; -+ IMG_UINT32 ui32Shtype; -+ IMG_UINT32 ui32Shflags; -+ IMG_UINT32 ui32Shaddr; -+ IMG_UINT32 ui32Shoffset; -+ IMG_UINT32 ui32Shsize; -+ IMG_UINT32 ui32Shlink; -+ IMG_UINT32 ui32Shinfo; -+ IMG_UINT32 ui32Shaddralign; -+ IMG_UINT32 ui32Shentsize; -+} IMG_ELF_SHDR; -+ -+typedef struct -+{ -+ IMG_UINT32 ui32Ptype; -+ IMG_UINT32 ui32Poffset; -+ IMG_UINT32 ui32Pvaddr; -+ IMG_UINT32 ui32Ppaddr; -+ IMG_UINT32 ui32Pfilesz; -+ IMG_UINT32 ui32Pmemsz; -+ IMG_UINT32 ui32Pflags; -+ IMG_UINT32 ui32Palign; -+} IMG_ELF_PROGRAM_HDR; -+ -+#endif /* IMG_ELF_H */ -diff --git a/drivers/gpu/drm/img-rogue/img_types.h b/drivers/gpu/drm/img-rogue/img_types.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/img_types.h -@@ -0,0 +1,331 @@ -+/*************************************************************************/ /*! -+@File -+@Title Global types for use by IMG APIs -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines type aliases for use by IMG APIs. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef IMG_TYPES_H -+#define IMG_TYPES_H -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+/* To use C99 types and definitions, there are two special cases we need to -+ * cater for: -+ * -+ * - Visual Studio: in VS2010 or later, some standard headers are available, -+ * and MSVC has its own built-in sized types. We can define the C99 types -+ * in terms of these. -+ * -+ * - Linux kernel code: C99 sized types are defined in , but -+ * some other features (like macros for constants or printf format -+ * strings) are missing, so we need to fill in the gaps ourselves. -+ * -+ * For other cases (userspace code under Linux, Android or Neutrino, or -+ * firmware code), we can include the standard headers. -+ */ -+#if defined(_MSC_VER) -+ #include /* bool */ -+ #include "msvc_types.h" -+#elif defined(__linux__) && defined(__KERNEL__) -+ #include -+ #include -+ #include "kernel_types.h" -+#elif defined(__linux__) || defined(__METAG) || defined(__MINGW32__) || \ -+ defined(__QNXNTO__) || defined(INTEGRITY_OS) || defined(__riscv) || defined(__APPLE__) -+ #include /* NULL */ -+ #include -+#if defined(__riscv) -+#pragma GCC diagnostic push -+#pragma GCC diagnostic ignored "-Wundef" -+#endif -+ #include /* intX_t/uintX_t, format specifiers */ -+ #include /* INT_MIN, etc */ -+#if defined(__riscv) -+#pragma GCC diagnostic pop -+#endif -+ #include /* bool */ -+#elif defined(__mips) -+ #include /* NULL */ -+ #include /* intX_t/uintX_t, format specifiers */ -+ #include /* bool */ -+#else -+ #error C99 support not set up for this build -+#endif -+ -+/* -+ * Due to a Klocwork bug, 'true'/'false' constants are not recognized to be of -+ * boolean type. This results in large number of false-positives being reported -+ * (MISRA.ETYPE.ASSIGN.2012: "An expression value of essential type 'signed char' -+ * is assigned to an object of essential type 'bool'"). Work around this by -+ * redefining those constants with cast to bool added. -+ */ -+#if defined(__KLOCWORK__) && !defined(__cplusplus) -+#undef true -+#undef false -+#define true ((bool) 1) -+#define false ((bool) 0) -+#endif -+ -+typedef unsigned int IMG_UINT; -+typedef int IMG_INT; -+ -+typedef uint8_t IMG_UINT8, *IMG_PUINT8; -+typedef uint8_t IMG_BYTE, *IMG_PBYTE; -+typedef int8_t IMG_INT8; -+typedef char IMG_CHAR, *IMG_PCHAR; -+ -+typedef uint16_t IMG_UINT16, *IMG_PUINT16; -+typedef int16_t IMG_INT16; -+typedef uint32_t IMG_UINT32, *IMG_PUINT32; -+typedef int32_t IMG_INT32, *IMG_PINT32; -+#if defined(INTEGRITY_OS) -+#if __INT_BIT >= 32U -+#define IMG_UINT32_C(n) ((IMG_UINT32)(n ## U)) -+#elif __LONG_BIT >= 32U -+#define IMG_UINT32_C(n) ((IMG_UINT32)(n ## UL)) -+#elif defined(__LLONG_BIT) && __LLONG_BIT >= 32U -+#define IMG_UINT32_C(n) ((IMG_UINT32)(n ## ULL)) -+#endif -+#else /* defined(INTEGRITY_OS) */ -+#define IMG_UINT32_C(c) ((IMG_UINT32)UINT32_C(c)) -+#endif /* defined(INTEGRITY_OS) */ -+ -+typedef uint64_t IMG_UINT64, *IMG_PUINT64; -+typedef int64_t IMG_INT64; -+#define IMG_INT64_C(c) INT64_C(c) -+#if defined(INTEGRITY_OS) -+#if __INT_BIT >= 64U -+#define IMG_UINT64_C(n) (n ## U) -+#elif defined(__LONG_BIT) && __LONG_BIT >= 64U -+#define IMG_UINT64_C(n) (n ## UL) -+#elif defined(__LLONG_BIT) && __LLONG_BIT >= 64U -+#define IMG_UINT64_C(n) (n ## ULL) -+#endif -+#else /* defined(INTEGRITY_OS) */ -+#define IMG_UINT64_C(c) UINT64_C(c) -+#endif /* defined(INTEGRITY_OS) */ -+#define IMG_UINT16_C(c) UINT16_C(c) -+#define IMG_UINT64_FMTSPEC PRIu64 -+#define IMG_UINT64_FMTSPECX PRIX64 -+#define IMG_UINT64_FMTSPECx PRIx64 -+#define IMG_UINT64_FMTSPECo PRIo64 -+#define IMG_INT64_FMTSPECd PRId64 -+ -+#define IMG_UINT8_MAX UINT8_MAX -+#define IMG_UINT16_MAX UINT16_MAX -+#define IMG_UINT32_MAX UINT32_MAX -+#define IMG_UINT64_MAX UINT64_MAX -+ -+#define IMG_INT8_MAX INT8_MAX -+#define IMG_INT16_MAX INT16_MAX -+#define IMG_INT32_MAX INT32_MAX -+#define IMG_INT64_MAX INT64_MAX -+ -+/* Linux kernel mode does not use floating point */ -+typedef float IMG_FLOAT, *IMG_PFLOAT; -+typedef double IMG_DOUBLE; -+ -+typedef union -+{ -+ IMG_UINT32 ui32; -+ IMG_FLOAT f; -+} IMG_UINT32_FLOAT; -+ -+typedef int IMG_SECURE_TYPE; -+ -+typedef bool IMG_BOOL; -+typedef bool* IMG_PBOOL; -+#define IMG_FALSE ((bool) 0) -+#define IMG_TRUE ((bool) 1) -+ -+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF) -+typedef IMG_CHAR const* IMG_PCCHAR; -+#endif -+ -+/* Format specifiers for 'size_t' type */ -+#if defined(_MSC_VER) -+#define IMG_SIZE_FMTSPEC "%Iu" -+#define IMG_SIZE_FMTSPECX "%Ix" -+#else -+#define IMG_SIZE_FMTSPEC "%zu" -+#define IMG_SIZE_FMTSPECX "%zx" -+#endif -+ -+#if defined(__linux__) && defined(__KERNEL__) -+/* prints the function name when used with printk */ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#define IMG_PFN_FMTSPEC "%ps" -+#else -+#define IMG_PFN_FMTSPEC "%pf" -+#endif -+#else -+#define IMG_PFN_FMTSPEC "%p" -+#endif -+ -+typedef void *IMG_HANDLE; -+ -+/* Process IDs */ -+typedef IMG_UINT32 IMG_PID; -+ -+/* OS connection type */ -+typedef int IMG_OS_CONNECTION; -+ -+ -+/* -+ * Address types. -+ * All types used to refer to a block of memory are wrapped in structures -+ * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot -+ * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the -+ * same thing. -+ * -+ * There is an assumption that the system contains at most one non-cpu mmu, -+ * and a memory block is only mapped by the MMU once. -+ * -+ * Different devices could have offset views of the physical address space. -+ * -+ */ -+ -+ -+/* -+ * -+ * +------------+ +------------+ +------------+ +------------+ -+ * | CPU | | DEV | | DEV | | DEV | -+ * +------------+ +------------+ +------------+ +------------+ -+ * | | | | -+ * | void * |IMG_DEV_VIRTADDR |IMG_DEV_VIRTADDR | -+ * | \-------------------/ | -+ * | | | -+ * +------------+ +------------+ | -+ * | MMU | | MMU | | -+ * +------------+ +------------+ | -+ * | | | -+ * | | | -+ * | | | -+ * +--------+ +---------+ +--------+ -+ * | Offset | | (Offset)| | Offset | -+ * +--------+ +---------+ +--------+ -+ * | | IMG_DEV_PHYADDR | -+ * | | | -+ * | | IMG_DEV_PHYADDR | -+ * +---------------------------------------------------------------------+ -+ * | System Address bus | -+ * +---------------------------------------------------------------------+ -+ * -+ */ -+ -+#define IMG_DEV_VIRTADDR_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX -+#define IMG_DEVMEM_SIZE_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX -+#define IMG_DEVMEM_ALIGN_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX -+#define IMG_DEVMEM_OFFSET_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX -+ -+/* cpu physical address */ -+typedef struct -+{ -+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF) -+ uintptr_t uiAddr; -+#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (uintptr_t)(var) -+#define CPUPHYADDR_FMTARG(var) (IMG_UINT64)(var) -+#define CPUPHYADDR_UINT_FMTSPEC "0x%016" IMG_UINT64_FMTSPECx -+#elif defined(__linux__) && defined(__KERNEL__) -+ phys_addr_t uiAddr; -+#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (phys_addr_t)(var) -+#define CPUPHYADDR_FMTARG(var) (&var) -+#define CPUPHYADDR_UINT_FMTSPEC "%pa" -+#else -+ IMG_UINT64 uiAddr; -+#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (IMG_UINT64)(var) -+#define CPUPHYADDR_FMTARG(var) (var) -+#define CPUPHYADDR_UINT_FMTSPEC "0x%016" IMG_UINT64_FMTSPECx -+#endif -+} IMG_CPU_PHYADDR; -+ -+/* device physical address */ -+typedef struct -+{ -+ IMG_UINT64 uiAddr; -+} IMG_DEV_PHYADDR; -+ -+/* dma address */ -+typedef struct -+{ -+ IMG_UINT64 uiAddr; -+} IMG_DMA_ADDR; -+ -+/* -+ rectangle structure -+*/ -+typedef struct -+{ -+ IMG_INT32 x0; -+ IMG_INT32 y0; -+ IMG_INT32 x1; -+ IMG_INT32 y1; -+} IMG_RECT; -+ -+typedef struct -+{ -+ IMG_INT16 x0; -+ IMG_INT16 y0; -+ IMG_INT16 x1; -+ IMG_INT16 y1; -+} IMG_RECT_16; -+ -+/* -+ * box structure -+ */ -+typedef struct -+{ -+ IMG_INT32 x0; -+ IMG_INT32 y0; -+ IMG_INT32 z0; -+ IMG_INT32 x1; -+ IMG_INT32 y1; -+ IMG_INT32 z1; -+} IMG_BOX; -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* IMG_TYPES_H */ -+/****************************************************************************** -+ End of file (img_types.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/img_types_check.h b/drivers/gpu/drm/img-rogue/img_types_check.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/img_types_check.h -@@ -0,0 +1,58 @@ -+/*************************************************************************/ /*! -+@File -+@Title Global types for use by IMG APIs -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Performs size checks on some of the IMG types. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef IMG_TYPES_CHECK_H -+#define IMG_TYPES_CHECK_H -+ -+#ifndef __KERNEL__ -+#include -+#endif /* __KERNEL__ */ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+static_assert(sizeof(IMG_BOOL) == 1, "invalid size of IMG_BOOL"); -+static_assert(sizeof(IMG_INT) == 4, "invalid size of IMG_INT"); -+static_assert(sizeof(IMG_UINT) == 4, "invalid size of IMG_UINT"); -+static_assert(sizeof(PVRSRV_ERROR) == 4, "invalid size of PVRSRV_ERROR"); -+ -+#endif /* IMG_TYPES_CHECK_H */ -diff --git a/drivers/gpu/drm/img-rogue/info_page.h b/drivers/gpu/drm/img-rogue/info_page.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/info_page.h -@@ -0,0 +1,99 @@ -+/*************************************************************************/ /*! -+@File -+@Title Kernel/User mode general purpose shared memory. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description General purpose memory shared between kernel driver and user -+ mode. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef INFO_PAGE_KM_H -+#define INFO_PAGE_KM_H -+ -+#include "pvrsrv_error.h" -+ -+#include "pmr.h" -+#include "pvrsrv.h" -+#include "info_page_defs.h" -+ -+/** -+ * @Function InfoPageCreate -+ * @Description Allocates resources for global information page. -+ * @Input psData pointer to PVRSRV data -+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error. -+ */ -+PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData); -+ -+/** -+ * @Function InfoPageDestroy -+ * @Description Frees all of the resource of global information page. -+ * @Input psData pointer to PVRSRV data -+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error. -+ */ -+void InfoPageDestroy(PVRSRV_DATA *psData); -+ -+/** -+ * @Function PVRSRVAcquireInfoPageKM() -+ * @Description This interface is used for obtaining the global information page -+ * which acts as a general purpose shared memory between KM and UM. -+ * The use of this information page outside of services is _not_ -+ * recommended. -+ * @Output ppsPMR handle to exported PMR -+ * @Return -+ */ -+PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR); -+ -+/** -+ * @Function PVRSRVReleaseInfoPageKM() -+ * @Description This function matches PVRSRVAcquireInfoPageKM(). -+ * @Input psPMR handle to exported PMR -+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error. -+ */ -+PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *psPMR); -+ -+/** -+ * @Function GetInfoPageDebugFlagsKM() -+ * @Description Return info page debug flags -+ * @Return info page debug flags -+ */ -+static INLINE IMG_UINT32 GetInfoPageDebugFlagsKM(void) -+{ -+ return (PVRSRVGetPVRSRVData())->pui32InfoPage[DEBUG_FEATURE_FLAGS]; -+} -+ -+#endif /* INFO_PAGE_KM_H */ -diff --git a/drivers/gpu/drm/img-rogue/info_page_client.h b/drivers/gpu/drm/img-rogue/info_page_client.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/info_page_client.h -@@ -0,0 +1,89 @@ -+/*************************************************************************/ /*! -+@File -+@Title Kernel/User mode general purpose shared memory. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description General purpose shared memory (i.e. information page) mapped by -+ kernel space driver and user space clients. All info page -+ entries are sizeof(IMG_UINT32) on both 32/64-bit environments. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef INFO_PAGE_CLIENT_H -+#define INFO_PAGE_CLIENT_H -+ -+#include "device_connection.h" -+#include "info_page_defs.h" -+#if defined(__KERNEL__) -+#include "pvrsrv.h" -+#endif -+ -+/*************************************************************************/ /*! -+@Function GetInfoPage -+ -+@Description Return Info Page address -+ -+@Input hDevConnection - Services device connection -+ -+@Return Info Page address -+*/ -+/*****************************************************************************/ -+static INLINE IMG_PUINT32 GetInfoPage(SHARED_DEV_CONNECTION hDevConnection) -+{ -+#if defined(__KERNEL__) -+ return (PVRSRVGetPVRSRVData())->pui32InfoPage; -+#else -+ return hDevConnection->pui32InfoPage; -+#endif -+} -+ -+/*************************************************************************/ /*! -+@Function GetInfoPageDebugFlags -+ -+@Description Return Info Page debug flags -+ -+@Input hDevConnection - Services device connection -+ -+@Return Info Page debug flags -+*/ -+/*****************************************************************************/ -+static INLINE IMG_UINT32 GetInfoPageDebugFlags(SHARED_DEV_CONNECTION hDevConnection) -+{ -+ return GetInfoPage(hDevConnection)[DEBUG_FEATURE_FLAGS]; -+} -+ -+#endif /* INFO_PAGE_CLIENT_H */ -diff --git a/drivers/gpu/drm/img-rogue/info_page_defs.h b/drivers/gpu/drm/img-rogue/info_page_defs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/info_page_defs.h -@@ -0,0 +1,133 @@ -+/*************************************************************************/ /*! -+@File -+@Title Kernel/User mode general purpose shared memory. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description General purpose shared memory (i.e. information page) mapped by -+ kernel space driver and user space clients. All information page -+ entries are sizeof(IMG_UINT32) on both 32/64-bit environments. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef INFO_PAGE_DEFS_H -+#define INFO_PAGE_DEFS_H -+ -+/* Info page is divided in "blocks" of size INFO_PAGE_CHUNK_SIZE. Each block -+ * should start with the INFO_PAGE_[NAME]_BLOCK_START macro which takes the -+ * value of previous block (except for the first block which starts from 0). -+ * -+ * Each last value of the block (INFO_PAGE_[NAME]_BLOCK_END) should be unused -+ * within that block since it's a first value of the next block. This value -+ * should be a multiple of INFO_PAGE_CHUNK_SIZE. -+ * -+ * Care must be taken to not go over allowed number of elements in each block -+ * which is marked with the INFO_PAGE_[NAME]_BLOCK_END macro. -+ * -+ * Blocks consist of entries that are defined with the INFO_PAGE_ENTRY() macro. -+ * Each entry must define a unique index within the block and as mentioned -+ * can't go over the INFO_PAGE_[NAME]_BLOCK_END limit. -+ * -+ * Always add blocks to the end of the existing list and update -+ * INFO_PAGE_TOTAL_SIZE after. -+ * -+ * See current usage of the Info Page below for examples. -+ */ -+ -+#define INFO_PAGE_CHUNK_SIZE 8 -+#define INFO_PAGE_BLOCK_END(start,size) ((start) + (size) * INFO_PAGE_CHUNK_SIZE) -+#define INFO_PAGE_ENTRY(start,index) ((start) + (index)) -+#define INFO_PAGE_SIZE_IN_BYTES(end) ((end) * sizeof(IMG_UINT32)) -+ -+#define INFO_PAGE_CACHEOP_BLOCK_START 0 -+#define INFO_PAGE_CACHEOP_BLOCK_END INFO_PAGE_BLOCK_END(INFO_PAGE_CACHEOP_BLOCK_START, 1) -+#define INFO_PAGE_HWPERF_BLOCK_START INFO_PAGE_CACHEOP_BLOCK_END -+#define INFO_PAGE_HWPERF_BLOCK_END INFO_PAGE_BLOCK_END(INFO_PAGE_HWPERF_BLOCK_START, 1) -+#define INFO_PAGE_TIMEOUT_BLOCK_START INFO_PAGE_HWPERF_BLOCK_END -+#define INFO_PAGE_TIMEOUT_BLOCK_END INFO_PAGE_BLOCK_END(INFO_PAGE_TIMEOUT_BLOCK_START, 2) -+#define INFO_PAGE_BRIDGE_BLOCK_START INFO_PAGE_TIMEOUT_BLOCK_END -+#define INFO_PAGE_BRIDGE_BLOCK_END INFO_PAGE_BLOCK_END(INFO_PAGE_BRIDGE_BLOCK_START, 1) -+#define INFO_PAGE_DEBUG_BLOCK_START INFO_PAGE_BRIDGE_BLOCK_END -+#define INFO_PAGE_DEBUG_BLOCK_END INFO_PAGE_BLOCK_END(INFO_PAGE_DEBUG_BLOCK_START, 1) -+#define INFO_PAGE_DEVMEM_BLOCK_START INFO_PAGE_DEBUG_BLOCK_END -+#define INFO_PAGE_DEVMEM_BLOCK_END INFO_PAGE_BLOCK_END(INFO_PAGE_DEVMEM_BLOCK_START, 1) -+ -+/* IMPORTANT: Make sure this always uses the last INFO_PAGE_[NAME]_BLOCK_END definition.*/ -+#define INFO_PAGE_TOTAL_SIZE INFO_PAGE_SIZE_IN_BYTES(INFO_PAGE_DEVMEM_BLOCK_END) -+ -+/* CacheOp information page entries */ -+ -+#define CACHEOP_INFO_UMKMTHRESHLD INFO_PAGE_ENTRY(INFO_PAGE_CACHEOP_BLOCK_START, 0) /*!< UM=>KM routing threshold in bytes */ -+#define CACHEOP_INFO_KMDFTHRESHLD INFO_PAGE_ENTRY(INFO_PAGE_CACHEOP_BLOCK_START, 1) /*!< KM/DF threshold in bytes */ -+#define CACHEOP_INFO_LINESIZE INFO_PAGE_ENTRY(INFO_PAGE_CACHEOP_BLOCK_START, 2) /*!< CPU data cache line size */ -+#define CACHEOP_INFO_PGSIZE INFO_PAGE_ENTRY(INFO_PAGE_CACHEOP_BLOCK_START, 3) /*!< CPU MMU page size */ -+ -+/* HWPerf information page entries */ -+ -+#define HWPERF_FILTER_SERVICES_IDX INFO_PAGE_ENTRY(INFO_PAGE_HWPERF_BLOCK_START, 0) -+#define HWPERF_FILTER_EGL_IDX INFO_PAGE_ENTRY(INFO_PAGE_HWPERF_BLOCK_START, 1) -+#define HWPERF_FILTER_OPENGLES_IDX INFO_PAGE_ENTRY(INFO_PAGE_HWPERF_BLOCK_START, 2) -+#define HWPERF_FILTER_OPENCL_IDX INFO_PAGE_ENTRY(INFO_PAGE_HWPERF_BLOCK_START, 3) -+#define HWPERF_FILTER_VULKAN_IDX INFO_PAGE_ENTRY(INFO_PAGE_HWPERF_BLOCK_START, 4) -+#define HWPERF_FILTER_OPENGL_IDX INFO_PAGE_ENTRY(INFO_PAGE_HWPERF_BLOCK_START, 5) -+ -+/* Timeout values */ -+ -+#define TIMEOUT_INFO_VALUE_RETRIES INFO_PAGE_ENTRY(INFO_PAGE_TIMEOUT_BLOCK_START, 0) -+#define TIMEOUT_INFO_VALUE_TIMEOUT_MS INFO_PAGE_ENTRY(INFO_PAGE_TIMEOUT_BLOCK_START, 1) -+#define TIMEOUT_INFO_CONDITION_RETRIES INFO_PAGE_ENTRY(INFO_PAGE_TIMEOUT_BLOCK_START, 2) -+#define TIMEOUT_INFO_CONDITION_TIMEOUT_MS INFO_PAGE_ENTRY(INFO_PAGE_TIMEOUT_BLOCK_START, 3) -+#define TIMEOUT_INFO_TASK_QUEUE_RETRIES INFO_PAGE_ENTRY(INFO_PAGE_TIMEOUT_BLOCK_START, 4) -+#define TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS INFO_PAGE_ENTRY(INFO_PAGE_TIMEOUT_BLOCK_START, 5) -+ -+/* Bridge Info */ -+ -+#define BRIDGE_INFO_RGX_BRIDGES INFO_PAGE_ENTRY(INFO_PAGE_BRIDGE_BLOCK_START, 0) -+#define BRIDGE_INFO_PVR_BRIDGES INFO_PAGE_ENTRY(INFO_PAGE_BRIDGE_BLOCK_START, 1) -+ -+/* Debug features */ -+ -+#define DEBUG_FEATURE_FLAGS INFO_PAGE_ENTRY(INFO_PAGE_DEBUG_BLOCK_START, 0) -+ -+#define DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED 0x1 /* flag - not part of info page */ -+#define DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED 0x2 /* flag - not part of info page */ -+ -+/* Device memory related information */ -+ -+/* This value is 64-bits wide, next value should have index larger by 2 */ -+#define DEVMEM_INFO_PHYS_BUF_MAX_SIZE INFO_PAGE_ENTRY(INFO_PAGE_DEVMEM_BLOCK_START, 0) -+ -+#endif /* INFO_PAGE_DEFS_H */ -diff --git a/drivers/gpu/drm/img-rogue/info_page_km.c b/drivers/gpu/drm/img-rogue/info_page_km.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/info_page_km.c -@@ -0,0 +1,142 @@ -+/*************************************************************************/ /*! -+@File info_page_km.c -+@Title Kernel/User space shared memory -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements general purpose shared memory between kernel driver -+ and user mode. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "info_page_defs.h" -+#include "info_page.h" -+#include "pvrsrv.h" -+#include "devicemem.h" -+#include "pmr.h" -+ -+PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData) -+{ -+ const PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL); -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psData != NULL); -+ -+ /* Allocate single page of memory for driver information page */ -+ eError = DevmemAllocateExportable(psData->psHostMemDeviceNode, -+ PVR_ALIGN(INFO_PAGE_TOTAL_SIZE, OSGetPageSize()), -+ OSGetPageSize(), -+ OSGetPageShift(), -+ uiMemFlags, -+ "PVRSRVInfoPage", -+ &psData->psInfoPageMemDesc); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0); -+ -+ eError = DevmemAcquireCpuVirtAddr(psData->psInfoPageMemDesc, -+ (void **) &psData->pui32InfoPage); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0); -+ -+ /* Look-up the memory descriptor PMR handle */ -+ eError = DevmemLocalGetImportHandle(psData->psInfoPageMemDesc, -+ (void **) &psData->psInfoPagePMR); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemLocalGetImportHandle", e0); -+ -+ eError = OSLockCreate(&psData->hInfoPageLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); -+ -+ /* Because the memory is allocated read only we need to explicitly set it to -+ * 0. The reason for this is that if the memory is allocated with -+ * ZERO_ON_ALLOC a WRITEABLE attribute is implicitly added to the flags (see -+ * DevmemValidateParams()). */ -+ OSCachedMemSet(psData->pui32InfoPage, 0, OSGetPageSize()); -+ -+ return PVRSRV_OK; -+ -+e0: -+ InfoPageDestroy(psData); -+ return eError; -+} -+ -+void InfoPageDestroy(PVRSRV_DATA *psData) -+{ -+ if (psData->psInfoPageMemDesc) -+ { -+ if (psData->pui32InfoPage != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(psData->psInfoPageMemDesc); -+ psData->pui32InfoPage = NULL; -+ } -+ -+ DevmemFree(psData->psInfoPageMemDesc); -+ psData->psInfoPageMemDesc = NULL; -+ } -+ -+ if (psData->hInfoPageLock) -+ { -+ OSLockDestroy(psData->hInfoPageLock); -+ psData->hInfoPageLock = NULL; -+ } -+} -+ -+PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR) -+{ -+ PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); -+ -+ PVR_LOG_RETURN_IF_FALSE(psData->psInfoPageMemDesc != NULL, "invalid MEMDESC" -+ " handle", PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_LOG_RETURN_IF_FALSE(psData->psInfoPagePMR != NULL, "invalid PMR handle", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ /* Copy the PMR import handle back */ -+ *ppsPMR = psData->psInfoPagePMR; -+ -+ /* Mark the PMR such that no layout changes can happen -+ * This is a fixed layout created during early stages of -+ * driver loading and shouldn't change later */ -+ PMR_SetLayoutFixed(psData->psInfoPagePMR, IMG_TRUE); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *ppsPMR) -+{ -+ /* Nothing to do here as PMR is singleton */ -+ PVR_UNREFERENCED_PARAMETER(ppsPMR); -+ return PVRSRV_OK; -+} -diff --git a/drivers/gpu/drm/img-rogue/interrupt_support.c b/drivers/gpu/drm/img-rogue/interrupt_support.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/interrupt_support.c -@@ -0,0 +1,151 @@ -+/*************************************************************************/ /*! -+@File -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+ -+#include "pvr_debug.h" -+#include "allocmem.h" -+#include "interrupt_support.h" -+ -+typedef struct LISR_DATA_TAG -+{ -+ IMG_UINT32 ui32IRQ; -+ PFN_SYS_LISR pfnLISR; -+ void *pvData; -+} LISR_DATA; -+ -+static irqreturn_t SystemISRWrapper(int irq, void *dev_id) -+{ -+ LISR_DATA *psLISRData = (LISR_DATA *)dev_id; -+ -+ PVR_UNREFERENCED_PARAMETER(irq); -+ -+ if (psLISRData) -+ { -+ if (psLISRData->pfnLISR(psLISRData->pvData)) -+ { -+ return IRQ_HANDLED; -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Missing interrupt data", __func__)); -+ } -+ -+ return IRQ_NONE; -+} -+ -+PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR, -+ IMG_UINT32 ui32IRQ, -+ const IMG_CHAR *pszDevName, -+ PFN_SYS_LISR pfnLISR, -+ void *pvData, -+ IMG_UINT32 ui32Flags) -+{ -+ LISR_DATA *psLISRData; -+ unsigned long ulIRQFlags = 0; -+ -+ if (pfnLISR == NULL || pvData == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (ui32Flags & ~SYS_IRQ_FLAG_MASK) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ switch (ui32Flags & SYS_IRQ_FLAG_TRIGGER_MASK) -+ { -+ case SYS_IRQ_FLAG_TRIGGER_DEFAULT: -+ break; -+ case SYS_IRQ_FLAG_TRIGGER_LOW: -+ ulIRQFlags |= IRQF_TRIGGER_LOW; -+ break; -+ case SYS_IRQ_FLAG_TRIGGER_HIGH: -+ ulIRQFlags |= IRQF_TRIGGER_HIGH; -+ break; -+ default: -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (ui32Flags & SYS_IRQ_FLAG_SHARED) -+ { -+ ulIRQFlags |= IRQF_SHARED; -+ } -+ -+ psLISRData = OSAllocMem(sizeof(*psLISRData)); -+ if (psLISRData == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psLISRData->ui32IRQ = ui32IRQ; -+ psLISRData->pfnLISR = pfnLISR; -+ psLISRData->pvData = pvData; -+ -+ if (request_irq(ui32IRQ, SystemISRWrapper, ulIRQFlags, pszDevName, psLISRData)) -+ { -+ OSFreeMem(psLISRData); -+ -+ return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER; -+ } -+ -+ *phLISR = (IMG_HANDLE)psLISRData; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISR) -+{ -+ LISR_DATA *psLISRData = (LISR_DATA *)hLISR; -+ -+ if (psLISRData == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ free_irq(psLISRData->ui32IRQ, psLISRData); -+ -+ OSFreeMem(psLISRData); -+ -+ return PVRSRV_OK; -+} -diff --git a/drivers/gpu/drm/img-rogue/interrupt_support.h b/drivers/gpu/drm/img-rogue/interrupt_support.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/interrupt_support.h -@@ -0,0 +1,103 @@ -+/*************************************************************************/ /*! -+@File -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(INTERRUPT_SUPPORT_H) -+#define INTERRUPT_SUPPORT_H -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_device.h" -+ -+/*! Default trigger type for the interrupt line. */ -+#define SYS_IRQ_FLAG_TRIGGER_DEFAULT (0x0 << 0) -+/*! Interrupt triggered when interrupt line is low. */ -+#define SYS_IRQ_FLAG_TRIGGER_LOW (0x1 << 0) -+/*! Interrupt triggered when interrupt line is high. */ -+#define SYS_IRQ_FLAG_TRIGGER_HIGH (0x2 << 0) -+/*! Interrupt trigger mask. */ -+#define SYS_IRQ_FLAG_TRIGGER_MASK (SYS_IRQ_FLAG_TRIGGER_DEFAULT | \ -+ SYS_IRQ_FLAG_TRIGGER_LOW | \ -+ SYS_IRQ_FLAG_TRIGGER_HIGH) -+/*! The irq is allowed to be shared among several devices. */ -+#define SYS_IRQ_FLAG_SHARED (0x1 << 8) -+ -+/*! Interrupt flags mask. */ -+#define SYS_IRQ_FLAG_MASK (SYS_IRQ_FLAG_TRIGGER_MASK | \ -+ SYS_IRQ_FLAG_SHARED) -+ -+/*************************************************************************/ /*! -+@Description Pointer to a system Low-level Interrupt Service Routine (LISR). -+@Input pvData Private data provided to the LISR. -+@Return IMG_TRUE if interrupt handled, IMG_FALSE otherwise. -+*/ /**************************************************************************/ -+typedef IMG_BOOL (*PFN_SYS_LISR)(void *pvData); -+ -+/*************************************************************************/ /*! -+@Function OSInstallSystemLISR -+@Description Installs a system low-level interrupt handler -+@Output phLISR On return, contains a handle to the -+ installed LISR -+@Input ui32IRQ The IRQ number for which the -+ interrupt handler should be installed -+@Input pszDevName Name of the device for which the handler -+ is being installed -+@Input pfnLISR A pointer to an interrupt handler -+ function -+@Input pvData A pointer to data that should be passed -+ to pfnLISR when it is called -+@Input ui32Flags Interrupt flags -+@Return PVRSRV_OK on success, a failure code otherwise -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR, -+ IMG_UINT32 ui32IRQ, -+ const IMG_CHAR *pszDevName, -+ PFN_SYS_LISR pfnLISR, -+ void *pvData, -+ IMG_UINT32 ui32Flags); -+ -+/*************************************************************************/ /*! -+@Function OSUninstallSystemLISR -+@Description Uninstalls a system low-level interrupt handler -+@Input hLISRData The handle to the LISR to uninstall -+@Return PVRSRV_OK on success, a failure code otherwise -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISRData); -+#endif /* !defined(INTERRUPT_SUPPORT_H) */ -diff --git a/drivers/gpu/drm/img-rogue/kernel_compatibility.h b/drivers/gpu/drm/img-rogue/kernel_compatibility.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/kernel_compatibility.h -@@ -0,0 +1,605 @@ -+/*************************************************************************/ /*! -+@Title Kernel versions compatibility macros -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Per-version macros to allow code to seamlessly use older kernel -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef __KERNEL_COMPATIBILITY_H__ -+#define __KERNEL_COMPATIBILITY_H__ -+ -+#include -+#include -+ -+/* -+ * Stop supporting an old kernel? Remove the top block. -+ * New incompatible kernel? Append a new block at the bottom. -+ * -+ * Please write your version test as `VERSION < X.Y`, and use the earliest -+ * possible version :) -+ * -+ * If including this header file in other files, this should always be the -+ * last file included, as it can affect definitions/declarations in files -+ * included after it. -+ */ -+ -+/* Linux 3.6 introduced seq_vprintf(). Earlier versions don't have this -+ * so we work around the limitation by vsnprintf() + seq_puts(). -+ */ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) -+#define seq_vprintf(seq_file, fmt, args) \ -+do { \ -+ char aszBuffer[512]; /* maximum message buffer size */ \ -+ vsnprintf(aszBuffer, sizeof(aszBuffer), fmt, args); \ -+ seq_puts(seq_file, aszBuffer); \ -+} while (0) -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) -+ -+/* Linux 3.7 split VM_RESERVED into VM_DONTDUMP and VM_DONTEXPAND */ -+#define VM_DONTDUMP VM_RESERVED -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) */ -+ -+/* -+ * Note: this fix had to be written backwards because get_unused_fd_flags -+ * was already defined but not exported on kernels < 3.7 -+ * -+ * When removing support for kernels < 3.7, this block should be removed -+ * and all `get_unused_fd()` should be manually replaced with -+ * `get_unused_fd_flags(0)` -+ */ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) -+ -+/* Linux 3.19 removed get_unused_fd() */ -+/* get_unused_fd_flags was introduced in 3.7 */ -+#define get_unused_fd() get_unused_fd_flags(0) -+ -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) -+ -+/* -+ * Headers shouldn't normally be included by this file but this is a special -+ * case as it's not obvious from the name that devfreq_add_device needs this -+ * include. -+ */ -+#include -+ -+#define devfreq_add_device(dev, profile, name, data) \ -+ ({ \ -+ struct devfreq *__devfreq; \ -+ if (name && !strcmp(name, "simple_ondemand")) \ -+ __devfreq = devfreq_add_device(dev, profile, \ -+ &devfreq_simple_ondemand, data); \ -+ else \ -+ __devfreq = ERR_PTR(-EINVAL); \ -+ __devfreq; \ -+ }) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) */ -+ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) -+ -+#define DRIVER_RENDER 0 -+#define DRM_RENDER_ALLOW 0 -+ -+/* Linux 3.12 introduced a new shrinker API */ -+#define SHRINK_STOP (~0UL) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) */ -+ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) -+ -+#define dev_pm_opp_get_opp_count(dev) opp_get_opp_count(dev) -+#define dev_pm_opp_get_freq(opp) opp_get_freq(opp) -+#define dev_pm_opp_get_voltage(opp) opp_get_voltage(opp) -+#define dev_pm_opp_add(dev, freq, u_volt) opp_add(dev, freq, u_volt) -+#define dev_pm_opp_find_freq_ceil(dev, freq) opp_find_freq_ceil(dev, freq) -+ -+#if defined(CONFIG_ARM) -+/* Linux 3.13 renamed ioremap_cached to ioremap_cache */ -+#define ioremap_cache(cookie, size) ioremap_cached(cookie, size) -+#endif /* defined(CONFIG_ARM) */ -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) */ -+ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) -+ -+/* Linux 3.14 introduced a new set of sized min and max defines */ -+#ifndef U32_MAX -+#define U32_MAX ((u32)UINT_MAX) -+#endif -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) */ -+ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) -+ -+/* Linux 3.17 changed the 3rd argument from a `struct page ***pages` to -+ * `struct page **pages` */ -+#define map_vm_area(area, prot, pages) map_vm_area(area, prot, &pages) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ -+ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) -+ -+/* -+ * Linux 4.7 removed this function but its replacement was available since 3.19. -+ */ -+#define drm_crtc_send_vblank_event(crtc, e) drm_send_vblank_event((crtc)->dev, drm_crtc_index(crtc), e) -+ -+/* seq_has_overflowed() was introduced in 3.19 but the structure elements -+ * have been available since 2.x -+ */ -+#include -+static inline bool seq_has_overflowed(struct seq_file *m) -+{ -+ return m->count == m->size; -+} -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) */ -+ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) -+ -+#define debugfs_create_file_size(name, mode, parent, data, fops, file_size) \ -+ ({ \ -+ struct dentry *de; \ -+ de = debugfs_create_file(name, mode, parent, data, fops); \ -+ if (de) \ -+ de->d_inode->i_size = file_size; \ -+ de; \ -+ }) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) -+#define drm_fb_helper_unregister_fbi(fb_helper) \ -+ ({ \ -+ if ((fb_helper) && (fb_helper)->fbdev) \ -+ unregister_framebuffer((fb_helper)->fbdev); \ -+ }) -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) -+ -+/* Linux 4.4 renamed GFP_WAIT to GFP_RECLAIM */ -+#define __GFP_RECLAIM __GFP_WAIT -+ -+#if !defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) -+#define dev_pm_opp_of_add_table(dev) of_init_opp_table(dev) -+#define dev_pm_opp_of_remove_table(dev) of_free_opp_table(dev) -+#else -+#define sync_fence_create(data_name, sync_pt) sync_fence_create(data_name, &(sync_pt)->base) -+#endif -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */ -+ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \ -+ (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) -+ -+/* Linux 4.5 added a new printf-style parameter for debug messages */ -+ -+#define drm_encoder_init(dev, encoder, funcs, encoder_type, name, ...) \ -+ drm_encoder_init(dev, encoder, funcs, encoder_type) -+ -+#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \ -+ ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type); }) -+ -+#define drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs, name, ...) \ -+ drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs) -+ -+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) -+ -+#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \ -+ ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type, name, ##__VA_ARGS__); }) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */ -+ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) -+ -+/* -+ * Linux 4.6 removed the first two parameters, the "struct task_struct" type -+ * pointer "current" is defined in asm/current.h, which makes it pointless -+ * to pass it on every function call. -+*/ -+#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \ -+ get_user_pages(current, current->mm, start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas) -+ -+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) -+ -+/* Linux 4.9 replaced the write/force parameters with "gup_flags" */ -+#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \ -+ get_user_pages(start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) */ -+ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \ -+ (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) -+ -+/* -+ * Linux 4.6 removed the start and end arguments as it now always maps -+ * the entire DMA-BUF. -+ * Additionally, dma_buf_end_cpu_access() now returns an int error. -+ */ -+#define dma_buf_begin_cpu_access(DMABUF, DIRECTION) dma_buf_begin_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION) -+#define dma_buf_end_cpu_access(DMABUF, DIRECTION) ({ dma_buf_end_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION); 0; }) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \ -+ (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) -+ -+/* Linux 4.7 removed the first arguments as it's never been used */ -+#define drm_gem_object_lookup(filp, handle) drm_gem_object_lookup((filp)->minor->dev, filp, handle) -+ -+/* Linux 4.7 replaced nla_put_u64 with nla_put_u64_64bit */ -+#define nla_put_u64_64bit(skb, attrtype, value, padattr) nla_put_u64(skb, attrtype, value) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) -+ -+/* Linux 4.9 changed the second argument to a drm_file pointer */ -+#define drm_vma_node_is_allowed(node, file_priv) drm_vma_node_is_allowed(node, (file_priv)->filp) -+#define drm_vma_node_allow(node, file_priv) drm_vma_node_allow(node, (file_priv)->filp) -+#define drm_vma_node_revoke(node, file_priv) drm_vma_node_revoke(node, (file_priv)->filp) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) -+#define refcount_read(r) atomic_read(r) -+#define drm_mm_insert_node(mm, node, size) drm_mm_insert_node(mm, node, size, 0, DRM_MM_SEARCH_DEFAULT) -+ -+#define drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd) drm_helper_mode_fill_fb_struct(fb, mode_cmd) -+ -+/* -+ * In Linux Kernels >= 4.12 for x86 another level of page tables has been -+ * added. The added level (p4d) sits between pgd and pud, so when it -+ * doesn`t exist, pud_offset function takes pgd as a parameter instead -+ * of p4d. -+ */ -+#define p4d_t pgd_t -+#define p4d_offset(pgd, address) (pgd) -+#define p4d_none(p4d) (0) -+#define p4d_bad(p4d) (0) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) */ -+ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ -+#define drm_mode_object_get(obj) drm_mode_object_reference(obj) -+#define drm_mode_object_put(obj) drm_mode_object_unreference(obj) -+#define drm_connector_get(obj) drm_connector_reference(obj) -+#define drm_connector_put(obj) drm_connector_unreference(obj) -+#define drm_framebuffer_get(obj) drm_framebuffer_reference(obj) -+#define drm_framebuffer_put(obj) drm_framebuffer_unreference(obj) -+#define drm_gem_object_get(obj) drm_gem_object_reference(obj) -+#define drm_gem_object_put_locked(obj) drm_gem_object_unreference(obj) -+#define __drm_gem_object_put(obj) __drm_gem_object_unreference(obj) -+#define drm_property_blob_get(obj) drm_property_reference_blob(obj) -+#define drm_property_blob_put(obj) drm_property_unreference_blob(obj) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) -+ -+#define drm_dev_put(dev) drm_dev_unref(dev) -+ -+#define drm_mode_object_find(dev, file_priv, id, type) drm_mode_object_find(dev, id, type) -+#define drm_encoder_find(dev, file_priv, id) drm_encoder_find(dev, id) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) -+ -+#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \ -+ min_scale, max_scale, \ -+ can_position, can_update_disabled) \ -+ ({ \ -+ const struct drm_rect __clip = { \ -+ .x2 = crtc_state->crtc->mode.hdisplay, \ -+ .y2 = crtc_state->crtc->mode.vdisplay, \ -+ }; \ -+ int __ret = drm_plane_helper_check_state(plane_state, \ -+ &__clip, \ -+ min_scale, max_scale, \ -+ can_position, \ -+ can_update_disabled); \ -+ __ret; \ -+ }) -+ -+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) -+ -+#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \ -+ min_scale, max_scale, \ -+ can_position, can_update_disabled) \ -+ ({ \ -+ const struct drm_rect __clip = { \ -+ .x2 = crtc_state->crtc->mode.hdisplay, \ -+ .y2 = crtc_state->crtc->mode.vdisplay, \ -+ }; \ -+ int __ret = drm_atomic_helper_check_plane_state(plane_state, \ -+ crtc_state, \ -+ &__clip, \ -+ min_scale, max_scale, \ -+ can_position, \ -+ can_update_disabled); \ -+ __ret; \ -+ }) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) -+ -+#define drm_connector_attach_encoder(connector, encoder) \ -+ drm_mode_connector_attach_encoder(connector, encoder) -+ -+#define drm_connector_update_edid_property(connector, edid) \ -+ drm_mode_connector_update_edid_property(connector, edid) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) */ -+ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) -+ -+/* -+ * Work around architectures, e.g. MIPS, that define copy_from_user and -+ * copy_to_user as macros that call access_ok, as this gets redefined below. -+ * As of kernel 4.12, these functions are no longer defined per-architecture -+ * so this work around isn't needed. -+ */ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+#if defined(copy_from_user) -+ /* -+ * NOTE: This function should not be called directly as it exists simply to -+ * work around copy_from_user being defined as a macro that calls access_ok. -+ */ -+static inline int -+__pvr_copy_from_user(void *to, const void __user *from, unsigned long n) -+{ -+ return copy_from_user(to, from, n); -+} -+ -+#undef copy_from_user -+#define copy_from_user(to, from, n) __copy_from_user(to, from, n) -+#endif -+ -+#if defined(copy_to_user) -+ /* -+ * NOTE: This function should not be called directly as it exists simply to -+ * work around copy_to_user being defined as a macro that calls access_ok. -+ */ -+static inline int -+__pvr_copy_to_user(void __user *to, const void *from, unsigned long n) -+{ -+ return copy_to_user(to, from, n); -+} -+ -+#undef copy_to_user -+#define copy_to_user(to, from, n) __copy_to_user(to, from, n) -+#endif -+#endif -+ -+/* -+ * Linux 5.0 dropped the type argument. -+ * -+ * This is unused in at least Linux 3.4 and above for all architectures other -+ * than 'um' (User Mode Linux), which stopped using it in 4.2. -+ */ -+#if defined(access_ok) -+ /* -+ * NOTE: This function should not be called directly as it exists simply to -+ * work around access_ok being defined as a macro. -+ */ -+static inline int -+__pvr_access_ok_compat(int type, const void __user * addr, unsigned long size) -+{ -+ return access_ok(type, addr, size); -+} -+ -+#undef access_ok -+#define access_ok(addr, size) __pvr_access_ok_compat(0, addr, size) -+#else -+#define access_ok(addr, size) access_ok(0, addr, size) -+#endif -+ -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)) -+#define MODULE_IMPORT_NS(ns) -+#endif -+ -+/* -+ * Before v5.8, the "struct mm" has a semaphore named "mmap_sem" which is -+ * renamed to "mmap_lock" in v5.8. Moreover, new APIs are provided to -+ * access this lock starting from v5.8. -+ */ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) -+ -+#define mmap_write_lock(mm) down_write(&mm->mmap_sem) -+#define mmap_write_unlock(mm) up_write(&mm->mmap_sem) -+ -+#define mmap_read_lock(mm) down_read(&mm->mmap_sem) -+#define mmap_read_unlock(mm) up_read(&mm->mmap_sem) -+ -+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+#define drm_gem_object_put(obj) drm_gem_object_unreference_unlocked(obj) -+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) -+#define drm_gem_object_put(obj) drm_gem_object_put_unlocked(obj) -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) -+ -+#define drm_prime_pages_to_sg(dev, pages, nr_pages) \ -+ drm_prime_pages_to_sg(pages, nr_pages) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) -+ -+struct dma_buf_map { -+ void *vaddr; -+}; -+ -+#define dma_buf_vmap(dmabuf, map) \ -+ ({ \ -+ (map)->vaddr = dma_buf_vmap(dmabuf); \ -+ (map)->vaddr ? 0 : ((dmabuf) && (dmabuf)->ops->vmap) ? -ENOMEM : -EINVAL; \ -+ }) -+ -+#define dma_buf_vunmap(dmabuf, map) \ -+ ({ \ -+ dma_buf_vunmap(dmabuf, (map)->vaddr); \ -+ (map)->vaddr = NULL; \ -+ }) -+ -+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0)) -+ -+#define drm_prime_sg_to_page_array(sgt, pages, npages) \ -+ drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL, npages) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0)) -+ -+#define drm_gem_plane_helper_prepare_fb drm_gem_fb_prepare_fb -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0)) */ -+ -+/* -+ * Linux 5.11 renames the privileged uaccess routines for arm64 and Android -+ * kernel v5.10 merges the change as well. These routines are only used for -+ * arm64 so CONFIG_ARM64 testing can be ignored. -+ */ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) || \ -+ ((LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) && !defined(ANDROID)) -+#define uaccess_enable_privileged() uaccess_enable() -+#define uaccess_disable_privileged() uaccess_disable() -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)) -+#define pde_data PDE_DATA -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)) -+#define kthread_complete_and_exit(comp, ret) complete_and_exit(comp, ret); -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0)) -+#define iosys_map dma_buf_map -+#define iosys_map_set_vaddr_iomem dma_buf_map_set_vaddr_iomem -+#define iosys_map_clear dma_buf_map_clear -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) -+ -+#define register_shrinker(shrinker, name) \ -+ register_shrinker(shrinker) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) -+#define DRM_PLANE_NO_SCALING DRM_PLANE_HELPER_NO_SCALING -+#define drm_plane_helper_destroy drm_primary_helper_destroy -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) -+#define genl_split_ops genl_ops -+#define COMPAT_FB_INFO fbdev -+#define drm_fb_helper_alloc_info drm_fb_helper_alloc_fbi -+#define drm_fb_helper_unregister_info drm_fb_helper_unregister_fbi -+#else -+#define COMPAT_FB_INFO info -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)) */ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) || \ -+ ((LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)) && !defined(ANDROID)) -+static inline void pvr_vm_flags_set(struct vm_area_struct *vma, -+ vm_flags_t flags) -+{ -+ vma->vm_flags |= flags; -+} -+static inline void pvr_vm_flags_init(struct vm_area_struct *vma, -+ vm_flags_t flags) -+{ -+ vma->vm_flags = flags; -+} -+#else -+#define pvr_vm_flags_set vm_flags_set -+#define pvr_vm_flags_init vm_flags_init -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)) */ -+ -+#if defined(__GNUC__) -+#define GCC_VERSION_AT_LEAST(major, minor) \ -+ (__GNUC__ > (major) || \ -+ (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) -+#else -+#define GCC_VERSION_AT_LEAST(major, minor) 0 -+#endif -+ -+#if defined(__clang__) -+#define CLANG_VERSION_AT_LEAST(major) \ -+ (__clang_major__ >= (major)) -+#else -+#define CLANG_VERSION_AT_LEAST(major) 0 -+#endif -+ -+#if !defined(__fallthrough) -+ #if GCC_VERSION_AT_LEAST(7, 0) || CLANG_VERSION_AT_LEAST(10) -+ #define __fallthrough __attribute__((__fallthrough__)) -+ #else -+ #define __fallthrough -+ #endif -+#endif -+ -+#endif /* __KERNEL_COMPATIBILITY_H__ */ -diff --git a/drivers/gpu/drm/img-rogue/kernel_config_compatibility.h b/drivers/gpu/drm/img-rogue/kernel_config_compatibility.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/kernel_config_compatibility.h -@@ -0,0 +1,54 @@ -+/*************************************************************************/ /*! -+@Title Kernel config compatibility define options -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This file is exclusively for Linux config kernel options. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef __KERNEL_CONFIG_COMPATIBILITY_H__ -+#define __KERNEL_CONFIG_COMPATIBILITY_H__ -+ -+#include -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) -+#ifdef SUPPORT_DRM_FBDEV_EMULATION -+#define CONFIG_DRM_FBDEV_EMULATION -+#endif -+#endif -+ -+#endif /* __KERNEL_CONFIG_COMPATIBILITY_H__ */ -diff --git a/drivers/gpu/drm/img-rogue/kernel_nospec.h b/drivers/gpu/drm/img-rogue/kernel_nospec.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/kernel_nospec.h -@@ -0,0 +1,71 @@ -+/*************************************************************************/ /*! -+@Title Macro to limit CPU speculative execution in kernel code -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Per-version macros to allow code to seamlessly use older kernel -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef __KERNEL_NOSPEC_H__ -+#define __KERNEL_NOSPEC_H__ -+ -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 2) || \ -+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) && \ -+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 18)) || \ -+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && \ -+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 81)) || \ -+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && \ -+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 118))) -+#include -+#include -+#include -+#else -+#define array_index_nospec(index, size) (index) -+#endif -+ -+/* -+ * For Ubuntu kernels, the features available for a given Linux version code -+ * may not match those in upstream kernels. This is the case for the -+ * availability of the array_index_nospec macro. -+ */ -+#if !defined(array_index_nospec) -+#define array_index_nospec(index, size) (index) -+#endif -+ -+#endif /* __KERNEL_NOSPEC_H__ */ -diff --git a/drivers/gpu/drm/img-rogue/kernel_types.h b/drivers/gpu/drm/img-rogue/kernel_types.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/kernel_types.h -@@ -0,0 +1,137 @@ -+/*************************************************************************/ /*! -+@Title C99-compatible types and definitions for Linux kernel code -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+ -+/* Limits of specified-width integer types */ -+ -+/* S8_MIN, etc were added in kernel version 3.14. The other versions are for -+ * earlier kernels. They can be removed once older kernels don't need to be -+ * supported. -+ */ -+#ifdef S8_MIN -+ #define INT8_MIN S8_MIN -+#else -+ #define INT8_MIN (-128) -+#endif -+ -+#ifdef S8_MAX -+ #define INT8_MAX S8_MAX -+#else -+ #define INT8_MAX 127 -+#endif -+ -+#ifdef U8_MAX -+ #define UINT8_MAX U8_MAX -+#else -+ #define UINT8_MAX 0xFF -+#endif -+ -+#ifdef S16_MIN -+ #define INT16_MIN S16_MIN -+#else -+ #define INT16_MIN (-32768) -+#endif -+ -+#ifdef S16_MAX -+ #define INT16_MAX S16_MAX -+#else -+ #define INT16_MAX 32767 -+#endif -+ -+#ifdef U16_MAX -+ #define UINT16_MAX U16_MAX -+#else -+ #define UINT16_MAX 0xFFFF -+#endif -+ -+#ifdef S32_MIN -+ #define INT32_MIN S32_MIN -+#else -+ #define INT32_MIN (-2147483647 - 1) -+#endif -+ -+#ifdef S32_MAX -+ #define INT32_MAX S32_MAX -+#else -+ #define INT32_MAX 2147483647 -+#endif -+ -+#ifdef U32_MAX -+ #define UINT32_MAX U32_MAX -+#else -+ #define UINT32_MAX 0xFFFFFFFF -+#endif -+ -+#ifdef S64_MIN -+ #define INT64_MIN S64_MIN -+#else -+ #define INT64_MIN (-9223372036854775807LL) -+#endif -+ -+#ifdef S64_MAX -+ #define INT64_MAX S64_MAX -+#else -+ #define INT64_MAX 9223372036854775807LL -+#endif -+ -+#ifdef U64_MAX -+ #define UINT64_MAX U64_MAX -+#else -+ #define UINT64_MAX 0xFFFFFFFFFFFFFFFFULL -+#endif -+ -+/* Macros for integer constants */ -+#define INT8_C S8_C -+#define UINT8_C U8_C -+#define INT16_C S16_C -+#define UINT16_C U16_C -+#define INT32_C S32_C -+#define UINT32_C U32_C -+#define INT64_C S64_C -+#define UINT64_C U64_C -+ -+/* Format conversion of integer types */ -+ -+#define PRIX64 "llX" -+#define PRIx64 "llx" -+#define PRIu64 "llu" -+#define PRId64 "lld" -diff --git a/drivers/gpu/drm/img-rogue/km/rgx_bvnc_defs_km.h b/drivers/gpu/drm/img-rogue/km/rgx_bvnc_defs_km.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/km/rgx_bvnc_defs_km.h -@@ -0,0 +1,393 @@ -+/*************************************************************************/ /*! -+@Title Hardware definition file rgx_bvnc_defs_km.h -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/****************************************************************************** -+ * Auto generated file by rgxbvnc_tablegen.py * -+ * This file should not be edited manually * -+ *****************************************************************************/ -+ -+#ifndef RGX_BVNC_DEFS_KM_H -+#define RGX_BVNC_DEFS_KM_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+ -+#if defined(RGX_BVNC_DEFS_UM_H) -+#error "This file should not be included in conjunction with rgx_bvnc_defs_um.h" -+#endif -+ -+#define BVNC_FIELD_WIDTH (16U) -+ -+#define PVR_ARCH_NAME "rogue" -+ -+ -+/****************************************************************************** -+ * Mask and bit-position macros for features without values -+ *****************************************************************************/ -+ -+#define RGX_FEATURE_AXI_ACELITE_POS (0U) -+#define RGX_FEATURE_AXI_ACELITE_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) -+ -+#define RGX_FEATURE_CLUSTER_GROUPING_POS (1U) -+#define RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) -+ -+#define RGX_FEATURE_COMPUTE_POS (2U) -+#define RGX_FEATURE_COMPUTE_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) -+ -+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS (3U) -+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) -+ -+#define RGX_FEATURE_COMPUTE_ONLY_POS (4U) -+#define RGX_FEATURE_COMPUTE_ONLY_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) -+ -+#define RGX_FEATURE_COMPUTE_OVERLAP_POS (5U) -+#define RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) -+ -+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS (6U) -+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) -+ -+#define RGX_FEATURE_COREID_PER_OS_POS (7U) -+#define RGX_FEATURE_COREID_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) -+ -+#define RGX_FEATURE_DUST_POWER_ISLAND_S7_POS (8U) -+#define RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) -+ -+#define RGX_FEATURE_DYNAMIC_DUST_POWER_POS (9U) -+#define RGX_FEATURE_DYNAMIC_DUST_POWER_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) -+ -+#define RGX_FEATURE_FASTRENDER_DM_POS (10U) -+#define RGX_FEATURE_FASTRENDER_DM_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) -+ -+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_POS (11U) -+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) -+ -+#define RGX_FEATURE_GPU_VIRTUALISATION_POS (12U) -+#define RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) -+ -+#define RGX_FEATURE_GS_RTA_SUPPORT_POS (13U) -+#define RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) -+ -+#define RGX_FEATURE_IRQ_PER_OS_POS (14U) -+#define RGX_FEATURE_IRQ_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) -+ -+#define RGX_FEATURE_META_DMA_POS (15U) -+#define RGX_FEATURE_META_DMA_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) -+ -+#define RGX_FEATURE_MIPS_POS (16U) -+#define RGX_FEATURE_MIPS_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) -+ -+#define RGX_FEATURE_PBE2_IN_XE_POS (17U) -+#define RGX_FEATURE_PBE2_IN_XE_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) -+ -+#define RGX_FEATURE_PBE_CHECKSUM_2D_POS (18U) -+#define RGX_FEATURE_PBE_CHECKSUM_2D_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) -+ -+#define RGX_FEATURE_PBVNC_COREID_REG_POS (19U) -+#define RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) -+ -+#define RGX_FEATURE_PDS_PER_DUST_POS (20U) -+#define RGX_FEATURE_PDS_PER_DUST_BIT_MASK (IMG_UINT64_C(0x0000000000100000)) -+ -+#define RGX_FEATURE_PDS_TEMPSIZE8_POS (21U) -+#define RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000000200000)) -+ -+#define RGX_FEATURE_PERFBUS_POS (22U) -+#define RGX_FEATURE_PERFBUS_BIT_MASK (IMG_UINT64_C(0x0000000000400000)) -+ -+#define RGX_FEATURE_PERF_COUNTER_BATCH_POS (23U) -+#define RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK (IMG_UINT64_C(0x0000000000800000)) -+ -+#define RGX_FEATURE_PM_MMU_VFP_POS (24U) -+#define RGX_FEATURE_PM_MMU_VFP_BIT_MASK (IMG_UINT64_C(0x0000000001000000)) -+ -+#define RGX_FEATURE_RISCV_FW_PROCESSOR_POS (25U) -+#define RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK (IMG_UINT64_C(0x0000000002000000)) -+ -+#define RGX_FEATURE_ROGUEXE_POS (26U) -+#define RGX_FEATURE_ROGUEXE_BIT_MASK (IMG_UINT64_C(0x0000000004000000)) -+ -+#define RGX_FEATURE_S7_CACHE_HIERARCHY_POS (27U) -+#define RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000000008000000)) -+ -+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS (28U) -+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000010000000)) -+ -+#define RGX_FEATURE_SCALABLE_VDM_GPP_POS (29U) -+#define RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK (IMG_UINT64_C(0x0000000020000000)) -+ -+#define RGX_FEATURE_SIGNAL_SNOOPING_POS (30U) -+#define RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK (IMG_UINT64_C(0x0000000040000000)) -+ -+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_POS (31U) -+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_BIT_MASK (IMG_UINT64_C(0x0000000080000000)) -+ -+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_POS (32U) -+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_BIT_MASK (IMG_UINT64_C(0x0000000100000000)) -+ -+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_POS (33U) -+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_BIT_MASK (IMG_UINT64_C(0x0000000200000000)) -+ -+#define RGX_FEATURE_SINGLE_BIF_POS (34U) -+#define RGX_FEATURE_SINGLE_BIF_BIT_MASK (IMG_UINT64_C(0x0000000400000000)) -+ -+#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_POS (35U) -+#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_BIT_MASK (IMG_UINT64_C(0x0000000800000000)) -+ -+#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE_POS (36U) -+#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE_BIT_MASK (IMG_UINT64_C(0x0000001000000000)) -+ -+#define RGX_FEATURE_SLC_VIVT_POS (37U) -+#define RGX_FEATURE_SLC_VIVT_BIT_MASK (IMG_UINT64_C(0x0000002000000000)) -+ -+#define RGX_FEATURE_SOC_TIMER_POS (38U) -+#define RGX_FEATURE_SOC_TIMER_BIT_MASK (IMG_UINT64_C(0x0000004000000000)) -+ -+#define RGX_FEATURE_SYS_BUS_SECURE_RESET_POS (39U) -+#define RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK (IMG_UINT64_C(0x0000008000000000)) -+ -+#define RGX_FEATURE_TDM_PDS_CHECKSUM_POS (40U) -+#define RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000010000000000)) -+ -+#define RGX_FEATURE_TESSELLATION_POS (41U) -+#define RGX_FEATURE_TESSELLATION_BIT_MASK (IMG_UINT64_C(0x0000020000000000)) -+ -+#define RGX_FEATURE_TFBC_DELTA_CORRELATION_POS (42U) -+#define RGX_FEATURE_TFBC_DELTA_CORRELATION_BIT_MASK (IMG_UINT64_C(0x0000040000000000)) -+ -+#define RGX_FEATURE_TFBC_LOSSY_37_PERCENT_POS (43U) -+#define RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK (IMG_UINT64_C(0x0000080000000000)) -+ -+#define RGX_FEATURE_TFBC_NATIVE_YUV10_POS (44U) -+#define RGX_FEATURE_TFBC_NATIVE_YUV10_BIT_MASK (IMG_UINT64_C(0x0000100000000000)) -+ -+#define RGX_FEATURE_TILE_REGION_PROTECTION_POS (45U) -+#define RGX_FEATURE_TILE_REGION_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0000200000000000)) -+ -+#define RGX_FEATURE_TLA_POS (46U) -+#define RGX_FEATURE_TLA_BIT_MASK (IMG_UINT64_C(0x0000400000000000)) -+ -+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS (47U) -+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000800000000000)) -+ -+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS (48U) -+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0001000000000000)) -+ -+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_POS (49U) -+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK (IMG_UINT64_C(0x0002000000000000)) -+ -+#define RGX_FEATURE_VDM_DRAWINDIRECT_POS (50U) -+#define RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK (IMG_UINT64_C(0x0004000000000000)) -+ -+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS (51U) -+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK (IMG_UINT64_C(0x0008000000000000)) -+ -+#define RGX_FEATURE_VOLCANIC_TB_POS (52U) -+#define RGX_FEATURE_VOLCANIC_TB_BIT_MASK (IMG_UINT64_C(0x0010000000000000)) -+ -+#define RGX_FEATURE_WATCHDOG_TIMER_POS (53U) -+#define RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK (IMG_UINT64_C(0x0020000000000000)) -+ -+#define RGX_FEATURE_WORKGROUP_PROTECTION_POS (54U) -+#define RGX_FEATURE_WORKGROUP_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0040000000000000)) -+ -+#define RGX_FEATURE_XE_MEMORY_HIERARCHY_POS (55U) -+#define RGX_FEATURE_XE_MEMORY_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0080000000000000)) -+ -+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_POS (56U) -+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0100000000000000)) -+ -+ -+/****************************************************************************** -+ * Defines for each feature with values used -+ * for handling the corresponding values -+ *****************************************************************************/ -+ -+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (3U) -+#define RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX (3U) -+#define RGX_FEATURE_FBCDC_MAX_VALUE_IDX (4U) -+#define RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX (6U) -+#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (5U) -+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX (2U) -+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX (2U) -+#define RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX (3U) -+#define RGX_FEATURE_META_MAX_VALUE_IDX (5U) -+#define RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX (2U) -+#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (4U) -+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX (2U) -+#define RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX (5U) -+#define RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX (9U) -+#define RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX (2U) -+#define RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX (3U) -+#define RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX (4U) -+#define RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX (4U) -+#define RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX (2U) -+#define RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX (2U) -+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX (3U) -+#define RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX (4U) -+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX (2U) -+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (7U) -+#define RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX (4U) -+#define RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX (3U) -+#define RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX (3U) -+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX (2U) -+#define RGX_FEATURE_XE_ARCHITECTURE_MAX_VALUE_IDX (2U) -+#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX (2U) -+#define RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX (3U) -+#define RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX (3U) -+ -+/****************************************************************************** -+ * Features with values indexes -+ *****************************************************************************/ -+ -+typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ { -+ RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_IDX, -+ RGX_FEATURE_ECC_RAMS_IDX, -+ RGX_FEATURE_FBCDC_IDX, -+ RGX_FEATURE_FBCDC_ALGORITHM_IDX, -+ RGX_FEATURE_FBCDC_ARCHITECTURE_IDX, -+ RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_IDX, -+ RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_IDX, -+ RGX_FEATURE_LAYOUT_MARS_IDX, -+ RGX_FEATURE_META_IDX, -+ RGX_FEATURE_META_COREMEM_BANKS_IDX, -+ RGX_FEATURE_META_COREMEM_SIZE_IDX, -+ RGX_FEATURE_META_DMA_CHANNEL_COUNT_IDX, -+ RGX_FEATURE_NUM_CLUSTERS_IDX, -+ RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX, -+ RGX_FEATURE_NUM_MEMBUS_IDX, -+ RGX_FEATURE_NUM_OSIDS_IDX, -+ RGX_FEATURE_NUM_RASTER_PIPES_IDX, -+ RGX_FEATURE_PHYS_BUS_WIDTH_IDX, -+ RGX_FEATURE_SCALABLE_TE_ARCH_IDX, -+ RGX_FEATURE_SCALABLE_VCE_IDX, -+ RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_IDX, -+ RGX_FEATURE_SLC_BANKS_IDX, -+ RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_IDX, -+ RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_IDX, -+ RGX_FEATURE_TFBC_VERSION_IDX, -+ RGX_FEATURE_TILE_SIZE_X_IDX, -+ RGX_FEATURE_TILE_SIZE_Y_IDX, -+ RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_IDX, -+ RGX_FEATURE_XE_ARCHITECTURE_IDX, -+ RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_IDX, -+ RGX_FEATURE_XPU_MAX_SLAVES_IDX, -+ RGX_FEATURE_XPU_REGISTER_BROADCAST_IDX, -+ RGX_FEATURE_WITH_VALUES_MAX_IDX, -+} RGX_FEATURE_WITH_VALUE_INDEX; -+ -+ -+/****************************************************************************** -+ * Mask and bit-position macros for ERNs and BRNs -+ *****************************************************************************/ -+ -+#define FIX_HW_BRN_38344_POS (0U) -+#define FIX_HW_BRN_38344_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) -+ -+#define HW_ERN_42290_POS (1U) -+#define HW_ERN_42290_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) -+ -+#define FIX_HW_BRN_42321_POS (2U) -+#define FIX_HW_BRN_42321_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) -+ -+#define HW_ERN_42606_POS (3U) -+#define HW_ERN_42606_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) -+ -+#define HW_ERN_46066_POS (4U) -+#define HW_ERN_46066_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) -+ -+#define HW_ERN_47025_POS (5U) -+#define HW_ERN_47025_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) -+ -+#define HW_ERN_50539_POS (6U) -+#define HW_ERN_50539_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) -+ -+#define FIX_HW_BRN_50767_POS (7U) -+#define FIX_HW_BRN_50767_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) -+ -+#define HW_ERN_57596_POS (8U) -+#define HW_ERN_57596_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) -+ -+#define FIX_HW_BRN_60084_POS (9U) -+#define FIX_HW_BRN_60084_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) -+ -+#define HW_ERN_61389_POS (10U) -+#define HW_ERN_61389_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) -+ -+#define FIX_HW_BRN_61450_POS (11U) -+#define FIX_HW_BRN_61450_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) -+ -+#define FIX_HW_BRN_63142_POS (12U) -+#define FIX_HW_BRN_63142_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) -+ -+#define FIX_HW_BRN_63553_POS (13U) -+#define FIX_HW_BRN_63553_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) -+ -+#define FIX_HW_BRN_64502_POS (14U) -+#define FIX_HW_BRN_64502_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) -+ -+#define FIX_HW_BRN_65101_POS (15U) -+#define FIX_HW_BRN_65101_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) -+ -+#define FIX_HW_BRN_65273_POS (16U) -+#define FIX_HW_BRN_65273_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) -+ -+#define HW_ERN_66622_POS (17U) -+#define HW_ERN_66622_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) -+ -+#define FIX_HW_BRN_66927_POS (18U) -+#define FIX_HW_BRN_66927_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) -+ -+#define FIX_HW_BRN_68186_POS (19U) -+#define FIX_HW_BRN_68186_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) -+ -+#define FIX_HW_BRN_71317_POS (20U) -+#define FIX_HW_BRN_71317_BIT_MASK (IMG_UINT64_C(0x0000000000100000)) -+ -+#define FIX_HW_BRN_73472_POS (21U) -+#define FIX_HW_BRN_73472_BIT_MASK (IMG_UINT64_C(0x0000000000200000)) -+ -+/* Macro used for padding the unavailable values for features with values */ -+#define RGX_FEATURE_VALUE_INVALID (0xFFFFFFFEU) -+ -+/* Macro used for marking a feature with value as disabled for a specific bvnc */ -+#define RGX_FEATURE_VALUE_DISABLED (0xFFFFFFFFU) -+ -+#endif /* RGX_BVNC_DEFS_KM_H */ -diff --git a/drivers/gpu/drm/img-rogue/km/rgx_bvnc_table_km.h b/drivers/gpu/drm/img-rogue/km/rgx_bvnc_table_km.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/km/rgx_bvnc_table_km.h -@@ -0,0 +1,487 @@ -+/*************************************************************************/ /*! -+@Title Hardware definition file rgx_bvnc_table_km.h -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/****************************************************************************** -+ * Auto generated file by rgxbvnc_tablegen.py * -+ * This file should not be edited manually * -+ *****************************************************************************/ -+ -+#ifndef RGX_BVNC_TABLE_KM_H -+#define RGX_BVNC_TABLE_KM_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "rgxdefs_km.h" -+#include "rgx_bvnc_defs_km.h" -+ -+#ifndef RGXBVNC_C -+#error "This file should only be included from rgxbvnc.c" -+#endif -+ -+#if defined(RGX_BVNC_TABLE_UM_H) -+#error "This file should not be included in conjunction with rgx_bvnc_table_um.h" -+#endif -+ -+ -+/****************************************************************************** -+ * Arrays for each feature with values used -+ * for handling the corresponding values -+ *****************************************************************************/ -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_ECC_RAMS_values[RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 2, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_values[RGX_FEATURE_FBCDC_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, 50, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ALGORITHM_values[RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 50, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 7, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_values[RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_values[RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_LAYOUT_MARS_values[RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, LTP217, LTP218, MTP218, MTP219, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 32, 256, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 6, 7, 8, 12, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_MEMBUS_values[RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_OSIDS_values[RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 8, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_RASTER_PIPES_values[RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, 2, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, 36, 40, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values[RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 512, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 8, 16, 64, 128, 512, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_TFBC_VERSION_values[RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 10, 11, 20, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_X_values[RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16, 32, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_Y_values[RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16, 32, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values[RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_XE_ARCHITECTURE_values[RGX_FEATURE_XE_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_values[RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 19, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_XPU_MAX_SLAVES_values[RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 3, }; -+ -+static const IMG_UINT16 aui16_RGX_FEATURE_XPU_REGISTER_BROADCAST_values[RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, }; -+ -+ -+/****************************************************************************** -+ * Table contains pointers to each feature value array for features that have -+ * values. -+ * Indexed using enum RGX_FEATURE_WITH_VALUE_INDEX from rgx_bvnc_defs_km.h -+ *****************************************************************************/ -+ -+static const void * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = { -+ aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values, -+ aui16_RGX_FEATURE_ECC_RAMS_values, -+ aui16_RGX_FEATURE_FBCDC_values, -+ aui16_RGX_FEATURE_FBCDC_ALGORITHM_values, -+ aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values, -+ aui16_RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_values, -+ aui16_RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_values, -+ aui16_RGX_FEATURE_LAYOUT_MARS_values, -+ aui16_RGX_FEATURE_META_values, -+ aui16_RGX_FEATURE_META_COREMEM_BANKS_values, -+ aui16_RGX_FEATURE_META_COREMEM_SIZE_values, -+ aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values, -+ aui16_RGX_FEATURE_NUM_CLUSTERS_values, -+ aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values, -+ aui16_RGX_FEATURE_NUM_MEMBUS_values, -+ aui16_RGX_FEATURE_NUM_OSIDS_values, -+ aui16_RGX_FEATURE_NUM_RASTER_PIPES_values, -+ aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values, -+ aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values, -+ aui16_RGX_FEATURE_SCALABLE_VCE_values, -+ aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values, -+ aui16_RGX_FEATURE_SLC_BANKS_values, -+ aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values, -+ aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values, -+ aui16_RGX_FEATURE_TFBC_VERSION_values, -+ aui16_RGX_FEATURE_TILE_SIZE_X_values, -+ aui16_RGX_FEATURE_TILE_SIZE_Y_values, -+ aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values, -+ aui16_RGX_FEATURE_XE_ARCHITECTURE_values, -+ aui16_RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_values, -+ aui16_RGX_FEATURE_XPU_MAX_SLAVES_values, -+ aui16_RGX_FEATURE_XPU_REGISTER_BROADCAST_values, -+}; -+ -+ -+/****************************************************************************** -+ * Array containing the lengths of the arrays containing the values. -+ * Used for indexing the aui16__values defined upwards -+ *****************************************************************************/ -+ -+ -+static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = { -+ RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX, -+ RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX, -+ RGX_FEATURE_FBCDC_MAX_VALUE_IDX, -+ RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX, -+ RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX, -+ RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX, -+ RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX, -+ RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX, -+ RGX_FEATURE_META_MAX_VALUE_IDX, -+ RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX, -+ RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX, -+ RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX, -+ RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX, -+ RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX, -+ RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX, -+ RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX, -+ RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX, -+ RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX, -+ RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX, -+ RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX, -+ RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX, -+ RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX, -+ RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX, -+ RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX, -+ RGX_FEATURE_TFBC_VERSION_MAX_VALUE_IDX, -+ RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX, -+ RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX, -+ RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX, -+ RGX_FEATURE_XE_ARCHITECTURE_MAX_VALUE_IDX, -+ RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX, -+ RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX, -+ RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX, -+}; -+ -+#define RGX_FEATURE_VALUE_TYPE_UINT16 (0x0000U) -+#define RGX_FEATURE_VALUE_TYPE_UINT32 (0x8000U) -+#define RGX_FEATURE_TYPE_BIT_SHIFT 14 -+ -+/****************************************************************************** -+ * Bit-positions for features with values -+ *****************************************************************************/ -+ -+static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = { -+ (0U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */ -+ (2U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_ECC_RAMS_POS */ -+ (4U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_POS */ -+ (7U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_ALGORITHM_POS */ -+ (10U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */ -+ (13U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_POS */ -+ (15U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_POS */ -+ (17U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_LAYOUT_MARS_POS */ -+ (19U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_POS */ -+ (22U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_COREMEM_BANKS_POS */ -+ (24U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_COREMEM_SIZE_POS */ -+ (27U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */ -+ (29U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_CLUSTERS_POS */ -+ (32U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */ -+ (36U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_MEMBUS_POS */ -+ (38U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_OSIDS_POS */ -+ (40U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_RASTER_PIPES_POS */ -+ (43U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */ -+ (46U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */ -+ (48U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SCALABLE_VCE_POS */ -+ (50U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_POS */ -+ (52U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_BANKS_POS */ -+ (55U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */ -+ (57U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */ -+ (60U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TFBC_VERSION_POS */ -+ (64U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TILE_SIZE_X_POS */ -+ (66U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TILE_SIZE_Y_POS */ -+ (68U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */ -+ (70U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_XE_ARCHITECTURE_POS */ -+ (72U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_POS */ -+ (74U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_XPU_MAX_SLAVES_POS */ -+ (76U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_XPU_REGISTER_BROADCAST_POS */ -+}; -+ -+ -+/****************************************************************************** -+ * Bit-masks for features with values -+ *****************************************************************************/ -+ -+static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = { -+ (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_BIT_MASK */ -+ (IMG_UINT64_C(0x000000000000000C)), /* RGX_FEATURE_ECC_RAMS_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000000070)), /* RGX_FEATURE_FBCDC_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000000380)), /* RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000001C00)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000006000)), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000018000)), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000060000)), /* RGX_FEATURE_LAYOUT_MARS_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000380000)), /* RGX_FEATURE_META_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000C00000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000007000000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000018000000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */ -+ (IMG_UINT64_C(0x00000000E0000000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000F00000000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */ -+ (IMG_UINT64_C(0x0000003000000000)), /* RGX_FEATURE_NUM_MEMBUS_BIT_MASK */ -+ (IMG_UINT64_C(0x000000C000000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */ -+ (IMG_UINT64_C(0x0000070000000000)), /* RGX_FEATURE_NUM_RASTER_PIPES_BIT_MASK */ -+ (IMG_UINT64_C(0x0000380000000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */ -+ (IMG_UINT64_C(0x0000C00000000000)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */ -+ (IMG_UINT64_C(0x0003000000000000)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */ -+ (IMG_UINT64_C(0x000C000000000000)), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_BIT_MASK */ -+ (IMG_UINT64_C(0x0070000000000000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */ -+ (IMG_UINT64_C(0x0180000000000000)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */ -+ (IMG_UINT64_C(0x0E00000000000000)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */ -+ (IMG_UINT64_C(0x7000000000000000)), /* RGX_FEATURE_TFBC_VERSION_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_TILE_SIZE_X_BIT_MASK */ -+ (IMG_UINT64_C(0x000000000000000C)), /* RGX_FEATURE_TILE_SIZE_Y_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000000030)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */ -+ (IMG_UINT64_C(0x00000000000000C0)), /* RGX_FEATURE_XE_ARCHITECTURE_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000000300)), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000000C00)), /* RGX_FEATURE_XPU_MAX_SLAVES_BIT_MASK */ -+ (IMG_UINT64_C(0x0000000000003000)), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_BIT_MASK */ -+}; -+ -+ -+/****************************************************************************** -+ * Table mapping bitmasks for features and features with values -+ *****************************************************************************/ -+ -+ -+static const IMG_UINT64 gaFeatures[][4]= -+{ -+ { IMG_UINT64_C(0x000100000002001e), IMG_UINT64_C(0x0000400000402025), IMG_UINT64_C(0x0a801a03411aa481), IMG_UINT64_C(0x000000000000001a) }, /* 1.0.2.30 */ -+ { IMG_UINT64_C(0x0001000000040005), IMG_UINT64_C(0x0000400000402024), IMG_UINT64_C(0x0a801a03611aa481), IMG_UINT64_C(0x000000000000001a) }, /* 1.0.4.5 */ -+ { IMG_UINT64_C(0x0001000000040013), IMG_UINT64_C(0x0000400000402025), IMG_UINT64_C(0x0a801a03611aa481), IMG_UINT64_C(0x000000000000001a) }, /* 1.0.4.19 */ -+ { IMG_UINT64_C(0x0004000000020033), IMG_UINT64_C(0x0102c04000c0222f), IMG_UINT64_C(0x0a801a074212a901), IMG_UINT64_C(0x000000000000001a) }, /* 4.0.2.51 */ -+ { IMG_UINT64_C(0x000400000002003a), IMG_UINT64_C(0x0102c04000c0322f), IMG_UINT64_C(0x0a801a874212a901), IMG_UINT64_C(0x000000000000001a) }, /* 4.0.2.58 */ -+ { IMG_UINT64_C(0x0004000000040037), IMG_UINT64_C(0x0102c04000c0222e), IMG_UINT64_C(0x0a801a076212a901), IMG_UINT64_C(0x000000000000001a) }, /* 4.0.4.55 */ -+ { IMG_UINT64_C(0x000400000006003e), IMG_UINT64_C(0x0102c04000c0322f), IMG_UINT64_C(0x0ab01b878212a901), IMG_UINT64_C(0x000000000000001a) }, /* 4.0.6.62 */ -+ { IMG_UINT64_C(0x000500000001002e), IMG_UINT64_C(0x0000004004402205), IMG_UINT64_C(0x06901a01210aa501), IMG_UINT64_C(0x000000000000005a) }, /* 5.0.1.46 */ -+ { IMG_UINT64_C(0x0006000000040023), IMG_UINT64_C(0x0102c04000c0222f), IMG_UINT64_C(0x0a801a076212a901), IMG_UINT64_C(0x000000000000001a) }, /* 6.0.4.35 */ -+ { IMG_UINT64_C(0x000f000000010040), IMG_UINT64_C(0x0000004004403205), IMG_UINT64_C(0x08901a82210aa501), IMG_UINT64_C(0x000000000000005a) }, /* 15.0.1.64 */ -+ { IMG_UINT64_C(0x0016000000150010), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x04940a8220020001), IMG_UINT64_C(0x0000000000000055) }, /* 22.0.21.16 */ -+ { IMG_UINT64_C(0x0016000000360019), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x08940a8320020001), IMG_UINT64_C(0x0000000000000055) }, /* 22.0.54.25 */ -+ { IMG_UINT64_C(0x001600000036001e), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x08940a8420020001), IMG_UINT64_C(0x0000000000000055) }, /* 22.0.54.30 */ -+ { IMG_UINT64_C(0x0016000000360026), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0894128420020001), IMG_UINT64_C(0x0000000000000055) }, /* 22.0.54.38 */ -+ { IMG_UINT64_C(0x001600000036014a), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x08940a842002a591), IMG_UINT64_C(0x0000000000000055) }, /* 22.0.54.330 */ -+ { IMG_UINT64_C(0x0016000000680012), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0894128620020001), IMG_UINT64_C(0x0000000000000055) }, /* 22.0.104.18 */ -+ { IMG_UINT64_C(0x00160000006800da), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x089412862002a591), IMG_UINT64_C(0x0000000000000055) }, /* 22.0.104.218 */ -+ { IMG_UINT64_C(0x0016000000d0013e), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x08a413884002a591), IMG_UINT64_C(0x0000000000000055) }, /* 22.0.208.318 */ -+ { IMG_UINT64_C(0x00180000003600cc), IMG_UINT64_C(0x008001c2844f7425), IMG_UINT64_C(0x089812842002a591), IMG_UINT64_C(0x0000000000000055) }, /* 24.0.54.204 */ -+ { IMG_UINT64_C(0x00180000006801f8), IMG_UINT64_C(0x008001c2844f7425), IMG_UINT64_C(0x089812852002a591), IMG_UINT64_C(0x0000000000000055) }, /* 24.0.104.504 */ -+ { IMG_UINT64_C(0x0018000000d001f8), IMG_UINT64_C(0x008001c2844f7425), IMG_UINT64_C(0x0aa813884002a591), IMG_UINT64_C(0x0000000000000055) }, /* 24.0.208.504 */ -+ { IMG_UINT64_C(0x0018000000d001f9), IMG_UINT64_C(0x008001c2844f7425), IMG_UINT64_C(0x0aa813884002a591), IMG_UINT64_C(0x0000000000000055) }, /* 24.0.208.505 */ -+ { IMG_UINT64_C(0x001d0000003400ca), IMG_UINT64_C(0x008181c2844f74a5), IMG_UINT64_C(0x069812822002a621), IMG_UINT64_C(0x0000000000000055) }, /* 29.0.52.202 */ -+ { IMG_UINT64_C(0x001d0000006c00d0), IMG_UINT64_C(0x008181c2844f74a5), IMG_UINT64_C(0x0aa813874002a621), IMG_UINT64_C(0x0000000000000055) }, /* 29.0.108.208 */ -+ { IMG_UINT64_C(0x00210000000b0003), IMG_UINT64_C(0x00800092844b5085), IMG_UINT64_C(0x0298124120020001), IMG_UINT64_C(0x0000000000000055) }, /* 33.0.11.3 */ -+ { IMG_UINT64_C(0x0021000000160001), IMG_UINT64_C(0x008180c2854b70a5), IMG_UINT64_C(0x0698128220020001), IMG_UINT64_C(0x0000000000000055) }, /* 33.0.22.1 */ -+ { IMG_UINT64_C(0x00240000003400b6), IMG_UINT64_C(0x008000d2844b78a5), IMG_UINT64_C(0x169812822004b2b1), IMG_UINT64_C(0x0000000000002955) }, /* 36.0.52.182 */ -+ { IMG_UINT64_C(0x0024000000360067), IMG_UINT64_C(0x008180d2844b38a5), IMG_UINT64_C(0x169812842002b2b1), IMG_UINT64_C(0x0000000000000055) }, /* 36.0.54.103 */ -+ { IMG_UINT64_C(0x00240000003600b6), IMG_UINT64_C(0x008180d2844b78a5), IMG_UINT64_C(0x169812842004b2b1), IMG_UINT64_C(0x0000000000002955) }, /* 36.0.54.182 */ -+ { IMG_UINT64_C(0x00240000003600b7), IMG_UINT64_C(0x008180d2844b78a5), IMG_UINT64_C(0x169812842004b2b1), IMG_UINT64_C(0x0000000000002955) }, /* 36.0.54.183 */ -+ { IMG_UINT64_C(0x0024000000360118), IMG_UINT64_C(0x00819cd2844b78a5), IMG_UINT64_C(0x269812842004b2b1), IMG_UINT64_C(0x0000000000002955) }, /* 36.0.54.280 */ -+ { IMG_UINT64_C(0x00240000006800b6), IMG_UINT64_C(0x008180d2844b78a5), IMG_UINT64_C(0x169812852004b2b1), IMG_UINT64_C(0x0000000000002955) }, /* 36.0.104.182 */ -+ { IMG_UINT64_C(0x00240000006800b7), IMG_UINT64_C(0x008180d2844b78a5), IMG_UINT64_C(0x169812852004b2b1), IMG_UINT64_C(0x0000000000002955) }, /* 36.0.104.183 */ -+ { IMG_UINT64_C(0x002400000068031c), IMG_UINT64_C(0x00e1a0d2864a78a5), IMG_UINT64_C(0x169812852004b2b9), IMG_UINT64_C(0x0000000000002955) }, /* 36.0.104.796 */ -+ { IMG_UINT64_C(0x002e000000660185), IMG_UINT64_C(0x00901cd2844b78a5), IMG_UINT64_C(0x389812922004b2b5), IMG_UINT64_C(0x0000000000002955) }, /* 46.0.102.389 */ -+}; -+ -+/****************************************************************************** -+ * Table mapping bitmasks for ERNs/BRNs -+ *****************************************************************************/ -+ -+ -+static const IMG_UINT64 gaErnsBrns[][2]= -+{ -+ { IMG_UINT64_C(0x0001002700040013), IMG_UINT64_C(0x0000000000000005) }, /* 1.39.4.19 */ -+ { IMG_UINT64_C(0x0001004b0002001e), IMG_UINT64_C(0x0000000000000004) }, /* 1.75.2.30 */ -+ { IMG_UINT64_C(0x0001005200040005), IMG_UINT64_C(0x0000000000000000) }, /* 1.82.4.5 */ -+ { IMG_UINT64_C(0x0004001f00040037), IMG_UINT64_C(0x000000000000108a) }, /* 4.31.4.55 */ -+ { IMG_UINT64_C(0x0004002800020033), IMG_UINT64_C(0x000000000000108a) }, /* 4.40.2.51 */ -+ { IMG_UINT64_C(0x0004002b0006003e), IMG_UINT64_C(0x000000000000508a) }, /* 4.43.6.62 */ -+ { IMG_UINT64_C(0x0004002d0002003a), IMG_UINT64_C(0x000000000000500a) }, /* 4.45.2.58 */ -+ { IMG_UINT64_C(0x0004002e0006003e), IMG_UINT64_C(0x000000000000508a) }, /* 4.46.6.62 */ -+ { IMG_UINT64_C(0x000500090001002e), IMG_UINT64_C(0x0000000000000001) }, /* 5.9.1.46 */ -+ { IMG_UINT64_C(0x0006002200040023), IMG_UINT64_C(0x000000000000100a) }, /* 6.34.4.35 */ -+ { IMG_UINT64_C(0x000f000500010040), IMG_UINT64_C(0x0000000000004008) }, /* 15.5.1.64 */ -+ { IMG_UINT64_C(0x0016001e00360019), IMG_UINT64_C(0x0000000000116b08) }, /* 22.30.54.25 */ -+ { IMG_UINT64_C(0x001600280036001e), IMG_UINT64_C(0x0000000000116b08) }, /* 22.40.54.30 */ -+ { IMG_UINT64_C(0x0016002e0036014a), IMG_UINT64_C(0x000000000011ea0a) }, /* 22.46.54.330 */ -+ { IMG_UINT64_C(0x0016003100150010), IMG_UINT64_C(0x0000000000116b08) }, /* 22.49.21.16 */ -+ { IMG_UINT64_C(0x001600430036001e), IMG_UINT64_C(0x0000000000116708) }, /* 22.67.54.30 */ -+ { IMG_UINT64_C(0x001600440036001e), IMG_UINT64_C(0x0000000000116508) }, /* 22.68.54.30 */ -+ { IMG_UINT64_C(0x00160056006800da), IMG_UINT64_C(0x000000000010e408) }, /* 22.86.104.218 */ -+ { IMG_UINT64_C(0x0016005700680012), IMG_UINT64_C(0x0000000000106508) }, /* 22.87.104.18 */ -+ { IMG_UINT64_C(0x0016006600360026), IMG_UINT64_C(0x0000000000106508) }, /* 22.102.54.38 */ -+ { IMG_UINT64_C(0x0016006800d0013e), IMG_UINT64_C(0x000000000010e40a) }, /* 22.104.208.318 */ -+ { IMG_UINT64_C(0x0016006900d0013e), IMG_UINT64_C(0x000000000010e40a) }, /* 22.105.208.318 */ -+ { IMG_UINT64_C(0x0018003200d001f8), IMG_UINT64_C(0x000000000012210a) }, /* 24.50.208.504 */ -+ { IMG_UINT64_C(0x0018003800d001f9), IMG_UINT64_C(0x000000000012210a) }, /* 24.56.208.505 */ -+ { IMG_UINT64_C(0x00180042003600cc), IMG_UINT64_C(0x000000000012210a) }, /* 24.66.54.204 */ -+ { IMG_UINT64_C(0x00180043006801f8), IMG_UINT64_C(0x000000000012210a) }, /* 24.67.104.504 */ -+ { IMG_UINT64_C(0x001d000e006c00d0), IMG_UINT64_C(0x00000000001a212a) }, /* 29.14.108.208 */ -+ { IMG_UINT64_C(0x001d0013003400ca), IMG_UINT64_C(0x00000000001a212a) }, /* 29.19.52.202 */ -+ { IMG_UINT64_C(0x0021000800160001), IMG_UINT64_C(0x000000000010212a) }, /* 33.8.22.1 */ -+ { IMG_UINT64_C(0x0021000f000b0003), IMG_UINT64_C(0x000000000010212a) }, /* 33.15.11.3 */ -+ { IMG_UINT64_C(0x0024001d003400b6), IMG_UINT64_C(0x000000000010212a) }, /* 36.29.52.182 */ -+ { IMG_UINT64_C(0x00240032003600b6), IMG_UINT64_C(0x000000000010212a) }, /* 36.50.54.182 */ -+ { IMG_UINT64_C(0x00240034006800b6), IMG_UINT64_C(0x000000000010212a) }, /* 36.52.104.182 */ -+ { IMG_UINT64_C(0x002400350068031c), IMG_UINT64_C(0x000000000010012a) }, /* 36.53.104.796 */ -+ { IMG_UINT64_C(0x00240036003600b7), IMG_UINT64_C(0x000000000010212a) }, /* 36.54.54.183 */ -+ { IMG_UINT64_C(0x0024003700360067), IMG_UINT64_C(0x000000000010212a) }, /* 36.55.54.103 */ -+ { IMG_UINT64_C(0x00240038006800b7), IMG_UINT64_C(0x000000000010212a) }, /* 36.56.104.183 */ -+ { IMG_UINT64_C(0x0024003c00360118), IMG_UINT64_C(0x000000000010212a) }, /* 36.60.54.280 */ -+ { IMG_UINT64_C(0x002e004800660185), IMG_UINT64_C(0x000000000014212a) }, /* 46.72.102.389 */ -+}; -+ -+#if defined(DEBUG) -+ -+#define FEATURE_NO_VALUES_NAMES_MAX_IDX (57U) -+ -+static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_MAX_IDX] = -+{ -+ "AXI_ACELITE", -+ "CLUSTER_GROUPING", -+ "COMPUTE", -+ "COMPUTE_MORTON_CAPABLE", -+ "COMPUTE_ONLY", -+ "COMPUTE_OVERLAP", -+ "COMPUTE_OVERLAP_WITH_BARRIERS", -+ "COREID_PER_OS", -+ "DUST_POWER_ISLAND_S7", -+ "DYNAMIC_DUST_POWER", -+ "FASTRENDER_DM", -+ "GPU_MULTICORE_SUPPORT", -+ "GPU_VIRTUALISATION", -+ "GS_RTA_SUPPORT", -+ "IRQ_PER_OS", -+ "META_DMA", -+ "MIPS", -+ "PBE2_IN_XE", -+ "PBE_CHECKSUM_2D", -+ "PBVNC_COREID_REG", -+ "PDS_PER_DUST", -+ "PDS_TEMPSIZE8", -+ "PERFBUS", -+ "PERF_COUNTER_BATCH", -+ "PM_MMU_VFP", -+ "RISCV_FW_PROCESSOR", -+ "ROGUEXE", -+ "S7_CACHE_HIERARCHY", -+ "S7_TOP_INFRASTRUCTURE", -+ "SCALABLE_VDM_GPP", -+ "SIGNAL_SNOOPING", -+ "SIMPLE_INTERNAL_PARAMETER_FORMAT", -+ "SIMPLE_INTERNAL_PARAMETER_FORMAT_V1", -+ "SIMPLE_INTERNAL_PARAMETER_FORMAT_V2", -+ "SINGLE_BIF", -+ "SLC_HYBRID_CACHELINE_64_128", -+ "SLC_SIZE_CONFIGURABLE", -+ "SLC_VIVT", -+ "SOC_TIMER", -+ "SYS_BUS_SECURE_RESET", -+ "TDM_PDS_CHECKSUM", -+ "TESSELLATION", -+ "TFBC_DELTA_CORRELATION", -+ "TFBC_LOSSY_37_PERCENT", -+ "TFBC_NATIVE_YUV10", -+ "TILE_REGION_PROTECTION", -+ "TLA", -+ "TPU_CEM_DATAMASTER_GLOBAL_REGISTERS", -+ "TPU_DM_GLOBAL_REGISTERS", -+ "TPU_FILTERING_MODE_CONTROL", -+ "VDM_DRAWINDIRECT", -+ "VDM_OBJECT_LEVEL_LLS", -+ "VOLCANIC_TB", -+ "WATCHDOG_TIMER", -+ "WORKGROUP_PROTECTION", -+ "XE_MEMORY_HIERARCHY", -+ "XT_TOP_INFRASTRUCTURE", -+}; -+ -+#define ERNSBRNS_IDS_MAX_IDX (22U) -+ -+static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] = -+{ -+ 38344, -+ 42290, -+ 42321, -+ 42606, -+ 46066, -+ 47025, -+ 50539, -+ 50767, -+ 57596, -+ 60084, -+ 61389, -+ 61450, -+ 63142, -+ 63553, -+ 64502, -+ 65101, -+ 65273, -+ 66622, -+ 66927, -+ 68186, -+ 71317, -+ 73472, -+}; -+ -+#endif /* defined(DEBUG) */ -+#endif /* RGX_BVNC_TABLE_KM_H */ -diff --git a/drivers/gpu/drm/img-rogue/km/rgx_cr_defs_km.h b/drivers/gpu/drm/img-rogue/km/rgx_cr_defs_km.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/km/rgx_cr_defs_km.h -@@ -0,0 +1,8472 @@ -+/*************************************************************************/ /*! -+@Title Hardware definition file rgx_cr_defs_km.h -+@Brief The file contains auto-generated hardware definitions without -+ BVNC-specific compile time conditionals. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* **** Autogenerated C -- do not edit **** */ -+ -+/* -+ */ -+ -+ -+#ifndef RGX_CR_DEFS_KM_H -+#define RGX_CR_DEFS_KM_H -+ -+#if !defined(IMG_EXPLICIT_INCLUDE_HWDEFS) -+#error This file may only be included if explicitly defined -+#endif -+ -+#include "img_types.h" -+#include "img_defs.h" -+ -+ -+#define RGX_CR_DEFS_KM_REVISION 1 -+ -+/* -+ Register RGX_CR_RASTERISATION_INDIRECT -+*/ -+#define RGX_CR_RASTERISATION_INDIRECT (0x8238U) -+#define RGX_CR_RASTERISATION_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -+#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) -+ -+ -+/* -+ Register RGX_CR_USC_INDIRECT -+*/ -+#define RGX_CR_USC_INDIRECT (0x8000U) -+#define RGX_CR_USC_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -+#define RGX_CR_USC_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_USC_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) -+ -+ -+/* -+ Register RGX_CR_PBE_INDIRECT -+*/ -+#define RGX_CR_PBE_INDIRECT (0x83E0U) -+#define RGX_CR_PBE_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -+#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) -+ -+ -+/* -+ Register RGX_CR_PBE_PERF_INDIRECT -+*/ -+#define RGX_CR_PBE_PERF_INDIRECT (0x83D8U) -+#define RGX_CR_PBE_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -+#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) -+ -+ -+/* -+ Register RGX_CR_TPU_PERF_INDIRECT -+*/ -+#define RGX_CR_TPU_PERF_INDIRECT (0x83F0U) -+#define RGX_CR_TPU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) -+#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) -+ -+ -+/* -+ Register RGX_CR_RASTERISATION_PERF_INDIRECT -+*/ -+#define RGX_CR_RASTERISATION_PERF_INDIRECT (0x8318U) -+#define RGX_CR_RASTERISATION_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -+#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) -+ -+ -+/* -+ Register RGX_CR_TPU_MCU_L0_PERF_INDIRECT -+*/ -+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT (0x8028U) -+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) -+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) -+ -+ -+/* -+ Register RGX_CR_USC_PERF_INDIRECT -+*/ -+#define RGX_CR_USC_PERF_INDIRECT (0x8030U) -+#define RGX_CR_USC_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -+#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) -+ -+ -+/* -+ Register RGX_CR_BLACKPEARL_INDIRECT -+*/ -+#define RGX_CR_BLACKPEARL_INDIRECT (0x8388U) -+#define RGX_CR_BLACKPEARL_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -+#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) -+ -+ -+/* -+ Register RGX_CR_BLACKPEARL_PERF_INDIRECT -+*/ -+#define RGX_CR_BLACKPEARL_PERF_INDIRECT (0x83F8U) -+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) -+ -+ -+/* -+ Register RGX_CR_TEXAS3_PERF_INDIRECT -+*/ -+#define RGX_CR_TEXAS3_PERF_INDIRECT (0x83D0U) -+#define RGX_CR_TEXAS3_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) -+#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) -+ -+ -+/* -+ Register RGX_CR_TEXAS_PERF_INDIRECT -+*/ -+#define RGX_CR_TEXAS_PERF_INDIRECT (0x8288U) -+#define RGX_CR_TEXAS_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -+#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) -+ -+ -+/* -+ Register RGX_CR_BX_TU_PERF_INDIRECT -+*/ -+#define RGX_CR_BX_TU_PERF_INDIRECT (0xC900U) -+#define RGX_CR_BX_TU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -+#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT (0U) -+#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) -+ -+ -+/* -+ Register RGX_CR_CLK_CTRL -+*/ -+#define RGX_CR_CLK_CTRL (0x0000U) -+#define RGX_CR_CLK_CTRL__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF)) -+#define RGX_CR_CLK_CTRL__S7_INFRA__MASKFULL (IMG_UINT64_C(0xCFCF03000F3F3F0F)) -+#define RGX_CR_CLK_CTRL_MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF)) -+#define RGX_CR_CLK_CTRL_BIF_TEXAS_SHIFT (62U) -+#define RGX_CR_CLK_CTRL_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) -+#define RGX_CR_CLK_CTRL_BIF_TEXAS_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF_TEXAS_ON (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF_TEXAS_AUTO (IMG_UINT64_C(0x8000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF_TEXAS__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF_TEXAS__S7_INFRA__ON (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF_TEXAS__S7_INFRA__AUTO (IMG_UINT64_C(0x8000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF_TEXAS__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF_TEXAS__PBE2_XE__ON (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF_TEXAS__PBE2_XE__AUTO (IMG_UINT64_C(0x8000000000000000)) -+#define RGX_CR_CLK_CTRL_IPP_SHIFT (60U) -+#define RGX_CR_CLK_CTRL_IPP_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) -+#define RGX_CR_CLK_CTRL_IPP_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_IPP_ON (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_CLK_CTRL_IPP_AUTO (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_CLK_CTRL_IPP__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_IPP__S7_INFRA__ON (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_CLK_CTRL_IPP__S7_INFRA__AUTO (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_CLK_CTRL_IPP__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_IPP__PBE2_XE__ON (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_CLK_CTRL_IPP__PBE2_XE__AUTO (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_CLK_CTRL_FBC_SHIFT (58U) -+#define RGX_CR_CLK_CTRL_FBC_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) -+#define RGX_CR_CLK_CTRL_FBC_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_FBC_ON (IMG_UINT64_C(0x0400000000000000)) -+#define RGX_CR_CLK_CTRL_FBC_AUTO (IMG_UINT64_C(0x0800000000000000)) -+#define RGX_CR_CLK_CTRL_FBC__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_FBC__S7_INFRA__ON (IMG_UINT64_C(0x0400000000000000)) -+#define RGX_CR_CLK_CTRL_FBC__S7_INFRA__AUTO (IMG_UINT64_C(0x0800000000000000)) -+#define RGX_CR_CLK_CTRL_FBC__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_FBC__PBE2_XE__ON (IMG_UINT64_C(0x0400000000000000)) -+#define RGX_CR_CLK_CTRL_FBC__PBE2_XE__AUTO (IMG_UINT64_C(0x0800000000000000)) -+#define RGX_CR_CLK_CTRL_FBDC_SHIFT (56U) -+#define RGX_CR_CLK_CTRL_FBDC_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) -+#define RGX_CR_CLK_CTRL_FBDC_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_FBDC_ON (IMG_UINT64_C(0x0100000000000000)) -+#define RGX_CR_CLK_CTRL_FBDC_AUTO (IMG_UINT64_C(0x0200000000000000)) -+#define RGX_CR_CLK_CTRL_FBDC__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_FBDC__S7_INFRA__ON (IMG_UINT64_C(0x0100000000000000)) -+#define RGX_CR_CLK_CTRL_FBDC__S7_INFRA__AUTO (IMG_UINT64_C(0x0200000000000000)) -+#define RGX_CR_CLK_CTRL_FBDC__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_FBDC__PBE2_XE__ON (IMG_UINT64_C(0x0100000000000000)) -+#define RGX_CR_CLK_CTRL_FBDC__PBE2_XE__AUTO (IMG_UINT64_C(0x0200000000000000)) -+#define RGX_CR_CLK_CTRL_FB_TLCACHE_SHIFT (54U) -+#define RGX_CR_CLK_CTRL_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) -+#define RGX_CR_CLK_CTRL_FB_TLCACHE_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_FB_TLCACHE_ON (IMG_UINT64_C(0x0040000000000000)) -+#define RGX_CR_CLK_CTRL_FB_TLCACHE_AUTO (IMG_UINT64_C(0x0080000000000000)) -+#define RGX_CR_CLK_CTRL_FB_TLCACHE__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_FB_TLCACHE__S7_INFRA__ON (IMG_UINT64_C(0x0040000000000000)) -+#define RGX_CR_CLK_CTRL_FB_TLCACHE__S7_INFRA__AUTO (IMG_UINT64_C(0x0080000000000000)) -+#define RGX_CR_CLK_CTRL_FB_TLCACHE__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_FB_TLCACHE__PBE2_XE__ON (IMG_UINT64_C(0x0040000000000000)) -+#define RGX_CR_CLK_CTRL_FB_TLCACHE__PBE2_XE__AUTO (IMG_UINT64_C(0x0080000000000000)) -+#define RGX_CR_CLK_CTRL_USCS_SHIFT (52U) -+#define RGX_CR_CLK_CTRL_USCS_CLRMSK (IMG_UINT64_C(0xFFCFFFFFFFFFFFFF)) -+#define RGX_CR_CLK_CTRL_USCS_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_USCS_ON (IMG_UINT64_C(0x0010000000000000)) -+#define RGX_CR_CLK_CTRL_USCS_AUTO (IMG_UINT64_C(0x0020000000000000)) -+#define RGX_CR_CLK_CTRL_USCS__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_USCS__S7_INFRA__ON (IMG_UINT64_C(0x0010000000000000)) -+#define RGX_CR_CLK_CTRL_USCS__S7_INFRA__AUTO (IMG_UINT64_C(0x0020000000000000)) -+#define RGX_CR_CLK_CTRL_USCS__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_USCS__PBE2_XE__ON (IMG_UINT64_C(0x0010000000000000)) -+#define RGX_CR_CLK_CTRL_USCS__PBE2_XE__AUTO (IMG_UINT64_C(0x0020000000000000)) -+#define RGX_CR_CLK_CTRL_PBE_SHIFT (50U) -+#define RGX_CR_CLK_CTRL_PBE_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) -+#define RGX_CR_CLK_CTRL_PBE_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_PBE_ON (IMG_UINT64_C(0x0004000000000000)) -+#define RGX_CR_CLK_CTRL_PBE_AUTO (IMG_UINT64_C(0x0008000000000000)) -+#define RGX_CR_CLK_CTRL_PBE__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_PBE__S7_INFRA__ON (IMG_UINT64_C(0x0004000000000000)) -+#define RGX_CR_CLK_CTRL_PBE__S7_INFRA__AUTO (IMG_UINT64_C(0x0008000000000000)) -+#define RGX_CR_CLK_CTRL_PBE__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_PBE__PBE2_XE__ON (IMG_UINT64_C(0x0004000000000000)) -+#define RGX_CR_CLK_CTRL_PBE__PBE2_XE__AUTO (IMG_UINT64_C(0x0008000000000000)) -+#define RGX_CR_CLK_CTRL_MCU_L1_SHIFT (48U) -+#define RGX_CR_CLK_CTRL_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) -+#define RGX_CR_CLK_CTRL_MCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_MCU_L1_ON (IMG_UINT64_C(0x0001000000000000)) -+#define RGX_CR_CLK_CTRL_MCU_L1_AUTO (IMG_UINT64_C(0x0002000000000000)) -+#define RGX_CR_CLK_CTRL_MCU_L1__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_MCU_L1__S7_INFRA__ON (IMG_UINT64_C(0x0001000000000000)) -+#define RGX_CR_CLK_CTRL_MCU_L1__S7_INFRA__AUTO (IMG_UINT64_C(0x0002000000000000)) -+#define RGX_CR_CLK_CTRL_MCU_L1__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_MCU_L1__PBE2_XE__ON (IMG_UINT64_C(0x0001000000000000)) -+#define RGX_CR_CLK_CTRL_MCU_L1__PBE2_XE__AUTO (IMG_UINT64_C(0x0002000000000000)) -+#define RGX_CR_CLK_CTRL_CDM_SHIFT (46U) -+#define RGX_CR_CLK_CTRL_CDM_CLRMSK (IMG_UINT64_C(0xFFFF3FFFFFFFFFFF)) -+#define RGX_CR_CLK_CTRL_CDM_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_CDM_ON (IMG_UINT64_C(0x0000400000000000)) -+#define RGX_CR_CLK_CTRL_CDM_AUTO (IMG_UINT64_C(0x0000800000000000)) -+#define RGX_CR_CLK_CTRL_CDM__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_CDM__S7_INFRA__ON (IMG_UINT64_C(0x0000400000000000)) -+#define RGX_CR_CLK_CTRL_CDM__S7_INFRA__AUTO (IMG_UINT64_C(0x0000800000000000)) -+#define RGX_CR_CLK_CTRL_CDM__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_CDM__PBE2_XE__ON (IMG_UINT64_C(0x0000400000000000)) -+#define RGX_CR_CLK_CTRL_CDM__PBE2_XE__AUTO (IMG_UINT64_C(0x0000800000000000)) -+#define RGX_CR_CLK_CTRL_SIDEKICK_SHIFT (44U) -+#define RGX_CR_CLK_CTRL_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFCFFFFFFFFFFF)) -+#define RGX_CR_CLK_CTRL_SIDEKICK_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_SIDEKICK_ON (IMG_UINT64_C(0x0000100000000000)) -+#define RGX_CR_CLK_CTRL_SIDEKICK_AUTO (IMG_UINT64_C(0x0000200000000000)) -+#define RGX_CR_CLK_CTRL_SIDEKICK__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_SIDEKICK__S7_INFRA__ON (IMG_UINT64_C(0x0000100000000000)) -+#define RGX_CR_CLK_CTRL_SIDEKICK__S7_INFRA__AUTO (IMG_UINT64_C(0x0000200000000000)) -+#define RGX_CR_CLK_CTRL_SIDEKICK__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_SIDEKICK__PBE2_XE__ON (IMG_UINT64_C(0x0000100000000000)) -+#define RGX_CR_CLK_CTRL_SIDEKICK__PBE2_XE__AUTO (IMG_UINT64_C(0x0000200000000000)) -+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT (42U) -+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFF3FFFFFFFFFF)) -+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_ON (IMG_UINT64_C(0x0000040000000000)) -+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_AUTO (IMG_UINT64_C(0x0000080000000000)) -+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK__S7_INFRA__ON (IMG_UINT64_C(0x0000040000000000)) -+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK__S7_INFRA__AUTO (IMG_UINT64_C(0x0000080000000000)) -+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK__PBE2_XE__ON (IMG_UINT64_C(0x0000040000000000)) -+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK__PBE2_XE__AUTO (IMG_UINT64_C(0x0000080000000000)) -+#define RGX_CR_CLK_CTRL_BIF_SHIFT (40U) -+#define RGX_CR_CLK_CTRL_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) -+#define RGX_CR_CLK_CTRL_BIF_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF_ON (IMG_UINT64_C(0x0000010000000000)) -+#define RGX_CR_CLK_CTRL_BIF_AUTO (IMG_UINT64_C(0x0000020000000000)) -+#define RGX_CR_CLK_CTRL_BIF__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF__S7_INFRA__ON (IMG_UINT64_C(0x0000010000000000)) -+#define RGX_CR_CLK_CTRL_BIF__S7_INFRA__AUTO (IMG_UINT64_C(0x0000020000000000)) -+#define RGX_CR_CLK_CTRL_BIF__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_BIF__PBE2_XE__ON (IMG_UINT64_C(0x0000010000000000)) -+#define RGX_CR_CLK_CTRL_BIF__PBE2_XE__AUTO (IMG_UINT64_C(0x0000020000000000)) -+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT (28U) -+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) -+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_ON (IMG_UINT64_C(0x0000000010000000)) -+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO (IMG_UINT64_C(0x0000000020000000)) -+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX__S7_INFRA__ON (IMG_UINT64_C(0x0000000010000000)) -+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000020000000)) -+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX__PBE2_XE__ON (IMG_UINT64_C(0x0000000010000000)) -+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000020000000)) -+#define RGX_CR_CLK_CTRL_MCU_L0_SHIFT (26U) -+#define RGX_CR_CLK_CTRL_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) -+#define RGX_CR_CLK_CTRL_MCU_L0_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_MCU_L0_ON (IMG_UINT64_C(0x0000000004000000)) -+#define RGX_CR_CLK_CTRL_MCU_L0_AUTO (IMG_UINT64_C(0x0000000008000000)) -+#define RGX_CR_CLK_CTRL_MCU_L0__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_MCU_L0__S7_INFRA__ON (IMG_UINT64_C(0x0000000004000000)) -+#define RGX_CR_CLK_CTRL_MCU_L0__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000008000000)) -+#define RGX_CR_CLK_CTRL_MCU_L0__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_MCU_L0__PBE2_XE__ON (IMG_UINT64_C(0x0000000004000000)) -+#define RGX_CR_CLK_CTRL_MCU_L0__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000008000000)) -+#define RGX_CR_CLK_CTRL_TPU_SHIFT (24U) -+#define RGX_CR_CLK_CTRL_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) -+#define RGX_CR_CLK_CTRL_TPU_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TPU_ON (IMG_UINT64_C(0x0000000001000000)) -+#define RGX_CR_CLK_CTRL_TPU_AUTO (IMG_UINT64_C(0x0000000002000000)) -+#define RGX_CR_CLK_CTRL_TPU__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TPU__S7_INFRA__ON (IMG_UINT64_C(0x0000000001000000)) -+#define RGX_CR_CLK_CTRL_TPU__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000002000000)) -+#define RGX_CR_CLK_CTRL_TPU__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TPU__PBE2_XE__ON (IMG_UINT64_C(0x0000000001000000)) -+#define RGX_CR_CLK_CTRL_TPU__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000002000000)) -+#define RGX_CR_CLK_CTRL_USC_SHIFT (20U) -+#define RGX_CR_CLK_CTRL_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) -+#define RGX_CR_CLK_CTRL_USC_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_USC_ON (IMG_UINT64_C(0x0000000000100000)) -+#define RGX_CR_CLK_CTRL_USC_AUTO (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_CLK_CTRL_USC__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_USC__S7_INFRA__ON (IMG_UINT64_C(0x0000000000100000)) -+#define RGX_CR_CLK_CTRL_USC__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_CLK_CTRL_USC__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_USC__PBE2_XE__ON (IMG_UINT64_C(0x0000000000100000)) -+#define RGX_CR_CLK_CTRL_USC__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_CLK_CTRL_TLA_SHIFT (18U) -+#define RGX_CR_CLK_CTRL_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) -+#define RGX_CR_CLK_CTRL_TLA_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TLA_ON (IMG_UINT64_C(0x0000000000040000)) -+#define RGX_CR_CLK_CTRL_TLA_AUTO (IMG_UINT64_C(0x0000000000080000)) -+#define RGX_CR_CLK_CTRL_TLA__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TLA__S7_INFRA__ON (IMG_UINT64_C(0x0000000000040000)) -+#define RGX_CR_CLK_CTRL_TLA__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000080000)) -+#define RGX_CR_CLK_CTRL_TLA__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TLA__PBE2_XE__ON (IMG_UINT64_C(0x0000000000040000)) -+#define RGX_CR_CLK_CTRL_TLA__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000080000)) -+#define RGX_CR_CLK_CTRL_SLC_SHIFT (16U) -+#define RGX_CR_CLK_CTRL_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) -+#define RGX_CR_CLK_CTRL_SLC_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_SLC_ON (IMG_UINT64_C(0x0000000000010000)) -+#define RGX_CR_CLK_CTRL_SLC_AUTO (IMG_UINT64_C(0x0000000000020000)) -+#define RGX_CR_CLK_CTRL_SLC__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_SLC__S7_INFRA__ON (IMG_UINT64_C(0x0000000000010000)) -+#define RGX_CR_CLK_CTRL_SLC__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000020000)) -+#define RGX_CR_CLK_CTRL_SLC__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_SLC__PBE2_XE__ON (IMG_UINT64_C(0x0000000000010000)) -+#define RGX_CR_CLK_CTRL_SLC__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000020000)) -+#define RGX_CR_CLK_CTRL_UVS_SHIFT (14U) -+#define RGX_CR_CLK_CTRL_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) -+#define RGX_CR_CLK_CTRL_UVS_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_UVS_ON (IMG_UINT64_C(0x0000000000004000)) -+#define RGX_CR_CLK_CTRL_UVS_AUTO (IMG_UINT64_C(0x0000000000008000)) -+#define RGX_CR_CLK_CTRL_UVS__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_UVS__S7_INFRA__ON (IMG_UINT64_C(0x0000000000004000)) -+#define RGX_CR_CLK_CTRL_UVS__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000008000)) -+#define RGX_CR_CLK_CTRL_UVS__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_UVS__PBE2_XE__ON (IMG_UINT64_C(0x0000000000004000)) -+#define RGX_CR_CLK_CTRL_UVS__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000008000)) -+#define RGX_CR_CLK_CTRL_PDS_SHIFT (12U) -+#define RGX_CR_CLK_CTRL_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) -+#define RGX_CR_CLK_CTRL_PDS_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_PDS_ON (IMG_UINT64_C(0x0000000000001000)) -+#define RGX_CR_CLK_CTRL_PDS_AUTO (IMG_UINT64_C(0x0000000000002000)) -+#define RGX_CR_CLK_CTRL_PDS__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_PDS__S7_INFRA__ON (IMG_UINT64_C(0x0000000000001000)) -+#define RGX_CR_CLK_CTRL_PDS__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000002000)) -+#define RGX_CR_CLK_CTRL_PDS__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_PDS__PBE2_XE__ON (IMG_UINT64_C(0x0000000000001000)) -+#define RGX_CR_CLK_CTRL_PDS__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000002000)) -+#define RGX_CR_CLK_CTRL_VDM_SHIFT (10U) -+#define RGX_CR_CLK_CTRL_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) -+#define RGX_CR_CLK_CTRL_VDM_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_VDM_ON (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_CLK_CTRL_VDM_AUTO (IMG_UINT64_C(0x0000000000000800)) -+#define RGX_CR_CLK_CTRL_VDM__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_VDM__S7_INFRA__ON (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_CLK_CTRL_VDM__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000000800)) -+#define RGX_CR_CLK_CTRL_VDM__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_VDM__PBE2_XE__ON (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_CLK_CTRL_VDM__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000000800)) -+#define RGX_CR_CLK_CTRL_PM_SHIFT (8U) -+#define RGX_CR_CLK_CTRL_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) -+#define RGX_CR_CLK_CTRL_PM_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_PM_ON (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_CLK_CTRL_PM_AUTO (IMG_UINT64_C(0x0000000000000200)) -+#define RGX_CR_CLK_CTRL_PM__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_PM__S7_INFRA__ON (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_CLK_CTRL_PM__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000000200)) -+#define RGX_CR_CLK_CTRL_PM__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_PM__PBE2_XE__ON (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_CLK_CTRL_PM__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000000200)) -+#define RGX_CR_CLK_CTRL_GPP_SHIFT (6U) -+#define RGX_CR_CLK_CTRL_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F)) -+#define RGX_CR_CLK_CTRL_GPP_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_GPP_ON (IMG_UINT64_C(0x0000000000000040)) -+#define RGX_CR_CLK_CTRL_GPP_AUTO (IMG_UINT64_C(0x0000000000000080)) -+#define RGX_CR_CLK_CTRL_GPP__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_GPP__S7_INFRA__ON (IMG_UINT64_C(0x0000000000000040)) -+#define RGX_CR_CLK_CTRL_GPP__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000000080)) -+#define RGX_CR_CLK_CTRL_GPP__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_GPP__PBE2_XE__ON (IMG_UINT64_C(0x0000000000000040)) -+#define RGX_CR_CLK_CTRL_GPP__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000000080)) -+#define RGX_CR_CLK_CTRL_TE_SHIFT (4U) -+#define RGX_CR_CLK_CTRL_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) -+#define RGX_CR_CLK_CTRL_TE_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TE_ON (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_CR_CLK_CTRL_TE_AUTO (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_CLK_CTRL_TE__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TE__S7_INFRA__ON (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_CR_CLK_CTRL_TE__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_CLK_CTRL_TE__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TE__PBE2_XE__ON (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_CR_CLK_CTRL_TE__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_CLK_CTRL_TSP_SHIFT (2U) -+#define RGX_CR_CLK_CTRL_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3)) -+#define RGX_CR_CLK_CTRL_TSP_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TSP_ON (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_CLK_CTRL_TSP_AUTO (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_CLK_CTRL_TSP__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TSP__S7_INFRA__ON (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_CLK_CTRL_TSP__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_CLK_CTRL_TSP__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_TSP__PBE2_XE__ON (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_CLK_CTRL_TSP__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_CLK_CTRL_ISP_SHIFT (0U) -+#define RGX_CR_CLK_CTRL_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) -+#define RGX_CR_CLK_CTRL_ISP_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_ISP_ON (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_CLK_CTRL_ISP_AUTO (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_CLK_CTRL_ISP__S7_INFRA__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_ISP__S7_INFRA__ON (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_CLK_CTRL_ISP__S7_INFRA__AUTO (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_CLK_CTRL_ISP__PBE2_XE__OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL_ISP__PBE2_XE__ON (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_CLK_CTRL_ISP__PBE2_XE__AUTO (IMG_UINT64_C(0x0000000000000002)) -+ -+ -+/* -+ Register RGX_CR_CLK_STATUS -+*/ -+#define RGX_CR_CLK_STATUS (0x0008U) -+#define RGX_CR_CLK_STATUS__PBE2_XE__MASKFULL (IMG_UINT64_C(0x00000001FFF077FF)) -+#define RGX_CR_CLK_STATUS__S7_INFRA__MASKFULL (IMG_UINT64_C(0x00000001B3101773)) -+#define RGX_CR_CLK_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFF077FF)) -+#define RGX_CR_CLK_STATUS_MCU_FBTC_SHIFT (32U) -+#define RGX_CR_CLK_STATUS_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -+#define RGX_CR_CLK_STATUS_MCU_FBTC_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_MCU_FBTC_RUNNING (IMG_UINT64_C(0x0000000100000000)) -+#define RGX_CR_CLK_STATUS_MCU_FBTC__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_MCU_FBTC__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000100000000)) -+#define RGX_CR_CLK_STATUS_MCU_FBTC__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_MCU_FBTC__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000100000000)) -+#define RGX_CR_CLK_STATUS_BIF_TEXAS_SHIFT (31U) -+#define RGX_CR_CLK_STATUS_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -+#define RGX_CR_CLK_STATUS_BIF_TEXAS_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_BIF_TEXAS_RUNNING (IMG_UINT64_C(0x0000000080000000)) -+#define RGX_CR_CLK_STATUS_BIF_TEXAS__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_BIF_TEXAS__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000080000000)) -+#define RGX_CR_CLK_STATUS_BIF_TEXAS__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_BIF_TEXAS__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000080000000)) -+#define RGX_CR_CLK_STATUS_IPP_SHIFT (30U) -+#define RGX_CR_CLK_STATUS_IPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -+#define RGX_CR_CLK_STATUS_IPP_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_IPP_RUNNING (IMG_UINT64_C(0x0000000040000000)) -+#define RGX_CR_CLK_STATUS_IPP__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_IPP__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000040000000)) -+#define RGX_CR_CLK_STATUS_IPP__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_IPP__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000040000000)) -+#define RGX_CR_CLK_STATUS_FBC_SHIFT (29U) -+#define RGX_CR_CLK_STATUS_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -+#define RGX_CR_CLK_STATUS_FBC_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_FBC_RUNNING (IMG_UINT64_C(0x0000000020000000)) -+#define RGX_CR_CLK_STATUS_FBC__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_FBC__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000020000000)) -+#define RGX_CR_CLK_STATUS_FBC__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_FBC__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000020000000)) -+#define RGX_CR_CLK_STATUS_FBDC_SHIFT (28U) -+#define RGX_CR_CLK_STATUS_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -+#define RGX_CR_CLK_STATUS_FBDC_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_FBDC_RUNNING (IMG_UINT64_C(0x0000000010000000)) -+#define RGX_CR_CLK_STATUS_FBDC__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_FBDC__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000010000000)) -+#define RGX_CR_CLK_STATUS_FBDC__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_FBDC__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000010000000)) -+#define RGX_CR_CLK_STATUS_FB_TLCACHE_SHIFT (27U) -+#define RGX_CR_CLK_STATUS_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -+#define RGX_CR_CLK_STATUS_FB_TLCACHE_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_FB_TLCACHE_RUNNING (IMG_UINT64_C(0x0000000008000000)) -+#define RGX_CR_CLK_STATUS_FB_TLCACHE__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_FB_TLCACHE__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000008000000)) -+#define RGX_CR_CLK_STATUS_FB_TLCACHE__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_FB_TLCACHE__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000008000000)) -+#define RGX_CR_CLK_STATUS_USCS_SHIFT (26U) -+#define RGX_CR_CLK_STATUS_USCS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -+#define RGX_CR_CLK_STATUS_USCS_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_USCS_RUNNING (IMG_UINT64_C(0x0000000004000000)) -+#define RGX_CR_CLK_STATUS_USCS__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_USCS__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000004000000)) -+#define RGX_CR_CLK_STATUS_USCS__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_USCS__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000004000000)) -+#define RGX_CR_CLK_STATUS_PBE_SHIFT (25U) -+#define RGX_CR_CLK_STATUS_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -+#define RGX_CR_CLK_STATUS_PBE_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_PBE_RUNNING (IMG_UINT64_C(0x0000000002000000)) -+#define RGX_CR_CLK_STATUS_PBE__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_PBE__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000002000000)) -+#define RGX_CR_CLK_STATUS_PBE__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_PBE__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000002000000)) -+#define RGX_CR_CLK_STATUS_MCU_L1_SHIFT (24U) -+#define RGX_CR_CLK_STATUS_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -+#define RGX_CR_CLK_STATUS_MCU_L1_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_MCU_L1_RUNNING (IMG_UINT64_C(0x0000000001000000)) -+#define RGX_CR_CLK_STATUS_MCU_L1__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_MCU_L1__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000001000000)) -+#define RGX_CR_CLK_STATUS_MCU_L1__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_MCU_L1__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000001000000)) -+#define RGX_CR_CLK_STATUS_CDM_SHIFT (23U) -+#define RGX_CR_CLK_STATUS_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -+#define RGX_CR_CLK_STATUS_CDM_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_CDM_RUNNING (IMG_UINT64_C(0x0000000000800000)) -+#define RGX_CR_CLK_STATUS_CDM__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_CDM__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000800000)) -+#define RGX_CR_CLK_STATUS_CDM__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_CDM__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000800000)) -+#define RGX_CR_CLK_STATUS_SIDEKICK_SHIFT (22U) -+#define RGX_CR_CLK_STATUS_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -+#define RGX_CR_CLK_STATUS_SIDEKICK_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000400000)) -+#define RGX_CR_CLK_STATUS_SIDEKICK__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_SIDEKICK__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000400000)) -+#define RGX_CR_CLK_STATUS_SIDEKICK__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_SIDEKICK__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000400000)) -+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT (21U) -+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_CLK_STATUS_BIF_SHIFT (20U) -+#define RGX_CR_CLK_STATUS_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -+#define RGX_CR_CLK_STATUS_BIF_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_BIF_RUNNING (IMG_UINT64_C(0x0000000000100000)) -+#define RGX_CR_CLK_STATUS_BIF__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_BIF__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000100000)) -+#define RGX_CR_CLK_STATUS_BIF__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_BIF__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000100000)) -+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT (14U) -+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING (IMG_UINT64_C(0x0000000000004000)) -+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000004000)) -+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000004000)) -+#define RGX_CR_CLK_STATUS_MCU_L0_SHIFT (13U) -+#define RGX_CR_CLK_STATUS_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -+#define RGX_CR_CLK_STATUS_MCU_L0_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_MCU_L0_RUNNING (IMG_UINT64_C(0x0000000000002000)) -+#define RGX_CR_CLK_STATUS_MCU_L0__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_MCU_L0__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000002000)) -+#define RGX_CR_CLK_STATUS_MCU_L0__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_MCU_L0__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000002000)) -+#define RGX_CR_CLK_STATUS_TPU_SHIFT (12U) -+#define RGX_CR_CLK_STATUS_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -+#define RGX_CR_CLK_STATUS_TPU_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TPU_RUNNING (IMG_UINT64_C(0x0000000000001000)) -+#define RGX_CR_CLK_STATUS_TPU__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TPU__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000001000)) -+#define RGX_CR_CLK_STATUS_TPU__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TPU__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000001000)) -+#define RGX_CR_CLK_STATUS_USC_SHIFT (10U) -+#define RGX_CR_CLK_STATUS_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -+#define RGX_CR_CLK_STATUS_USC_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_USC_RUNNING (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_CLK_STATUS_USC__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_USC__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_CLK_STATUS_USC__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_USC__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_CLK_STATUS_TLA_SHIFT (9U) -+#define RGX_CR_CLK_STATUS_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -+#define RGX_CR_CLK_STATUS_TLA_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TLA_RUNNING (IMG_UINT64_C(0x0000000000000200)) -+#define RGX_CR_CLK_STATUS_TLA__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TLA__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000200)) -+#define RGX_CR_CLK_STATUS_TLA__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TLA__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000200)) -+#define RGX_CR_CLK_STATUS_SLC_SHIFT (8U) -+#define RGX_CR_CLK_STATUS_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -+#define RGX_CR_CLK_STATUS_SLC_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_SLC_RUNNING (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_CLK_STATUS_SLC__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_SLC__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_CLK_STATUS_SLC__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_SLC__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_CLK_STATUS_UVS_SHIFT (7U) -+#define RGX_CR_CLK_STATUS_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -+#define RGX_CR_CLK_STATUS_UVS_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_UVS_RUNNING (IMG_UINT64_C(0x0000000000000080)) -+#define RGX_CR_CLK_STATUS_UVS__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_UVS__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000080)) -+#define RGX_CR_CLK_STATUS_UVS__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_UVS__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000080)) -+#define RGX_CR_CLK_STATUS_PDS_SHIFT (6U) -+#define RGX_CR_CLK_STATUS_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -+#define RGX_CR_CLK_STATUS_PDS_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_PDS_RUNNING (IMG_UINT64_C(0x0000000000000040)) -+#define RGX_CR_CLK_STATUS_PDS__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_PDS__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000040)) -+#define RGX_CR_CLK_STATUS_PDS__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_PDS__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000040)) -+#define RGX_CR_CLK_STATUS_VDM_SHIFT (5U) -+#define RGX_CR_CLK_STATUS_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -+#define RGX_CR_CLK_STATUS_VDM_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_VDM_RUNNING (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_CLK_STATUS_VDM__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_VDM__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_CLK_STATUS_VDM__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_VDM__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_CLK_STATUS_PM_SHIFT (4U) -+#define RGX_CR_CLK_STATUS_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -+#define RGX_CR_CLK_STATUS_PM_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_PM_RUNNING (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_CR_CLK_STATUS_PM__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_PM__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_CR_CLK_STATUS_PM__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_PM__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_CR_CLK_STATUS_GPP_SHIFT (3U) -+#define RGX_CR_CLK_STATUS_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -+#define RGX_CR_CLK_STATUS_GPP_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_GPP_RUNNING (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_CLK_STATUS_GPP__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_GPP__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_CLK_STATUS_GPP__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_GPP__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_CLK_STATUS_TE_SHIFT (2U) -+#define RGX_CR_CLK_STATUS_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -+#define RGX_CR_CLK_STATUS_TE_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TE_RUNNING (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_CLK_STATUS_TE__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TE__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_CLK_STATUS_TE__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TE__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_CLK_STATUS_TSP_SHIFT (1U) -+#define RGX_CR_CLK_STATUS_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -+#define RGX_CR_CLK_STATUS_TSP_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TSP_RUNNING (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_CLK_STATUS_TSP__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TSP__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_CLK_STATUS_TSP__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_TSP__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_CLK_STATUS_ISP_SHIFT (0U) -+#define RGX_CR_CLK_STATUS_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_CLK_STATUS_ISP_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_ISP_RUNNING (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_CLK_STATUS_ISP__S7_INFRA__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_ISP__S7_INFRA__RUNNING (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_CLK_STATUS_ISP__PBE2_XE__GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS_ISP__PBE2_XE__RUNNING (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_CORE_ID -+*/ -+#define RGX_CR_CORE_ID__PBVNC (0x0020U) -+#define RGX_CR_CORE_ID__PBVNC__MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT (48U) -+#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -+#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT (32U) -+#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) -+#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT (16U) -+#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) -+#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT (0U) -+#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_CORE_ID -+*/ -+#define RGX_CR_CORE_ID (0x0018U) -+#define RGX_CR_CORE_ID_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_CORE_ID_ID_SHIFT (16U) -+#define RGX_CR_CORE_ID_ID_CLRMSK (0x0000FFFFU) -+#define RGX_CR_CORE_ID_CONFIG_SHIFT (0U) -+#define RGX_CR_CORE_ID_CONFIG_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_CORE_REVISION -+*/ -+#define RGX_CR_CORE_REVISION (0x0020U) -+#define RGX_CR_CORE_REVISION_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_CORE_REVISION_DESIGNER_SHIFT (24U) -+#define RGX_CR_CORE_REVISION_DESIGNER_CLRMSK (0x00FFFFFFU) -+#define RGX_CR_CORE_REVISION_MAJOR_SHIFT (16U) -+#define RGX_CR_CORE_REVISION_MAJOR_CLRMSK (0xFF00FFFFU) -+#define RGX_CR_CORE_REVISION_MINOR_SHIFT (8U) -+#define RGX_CR_CORE_REVISION_MINOR_CLRMSK (0xFFFF00FFU) -+#define RGX_CR_CORE_REVISION_MAINTENANCE_SHIFT (0U) -+#define RGX_CR_CORE_REVISION_MAINTENANCE_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_DESIGNER_REV_FIELD1 -+*/ -+#define RGX_CR_DESIGNER_REV_FIELD1 (0x0028U) -+#define RGX_CR_DESIGNER_REV_FIELD1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT (0U) -+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_DESIGNER_REV_FIELD2 -+*/ -+#define RGX_CR_DESIGNER_REV_FIELD2 (0x0030U) -+#define RGX_CR_DESIGNER_REV_FIELD2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT (0U) -+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_CHANGESET_NUMBER -+*/ -+#define RGX_CR_CHANGESET_NUMBER (0x0040U) -+#define RGX_CR_CHANGESET_NUMBER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT (0U) -+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_SOC_TIMER_GRAY -+*/ -+#define RGX_CR_SOC_TIMER_GRAY (0x00E0U) -+#define RGX_CR_SOC_TIMER_GRAY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_SOC_TIMER_GRAY_VALUE_SHIFT (0U) -+#define RGX_CR_SOC_TIMER_GRAY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_SOC_TIMER_BINARY -+*/ -+#define RGX_CR_SOC_TIMER_BINARY (0x00E8U) -+#define RGX_CR_SOC_TIMER_BINARY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_SOC_TIMER_BINARY_VALUE_SHIFT (0U) -+#define RGX_CR_SOC_TIMER_BINARY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_CLK_XTPLUS_CTRL -+*/ -+#define RGX_CR_CLK_XTPLUS_CTRL (0x0080U) -+#define RGX_CR_CLK_XTPLUS_CTRL_MASKFULL (IMG_UINT64_C(0x0000003FFFFF0000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_SHIFT (36U) -+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) -+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_ON (IMG_UINT64_C(0x0000001000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_AUTO (IMG_UINT64_C(0x0000002000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT (34U) -+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFF3FFFFFFFF)) -+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_ON (IMG_UINT64_C(0x0000000400000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_AUTO (IMG_UINT64_C(0x0000000800000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_SHIFT (32U) -+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF)) -+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_ON (IMG_UINT64_C(0x0000000100000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_AUTO (IMG_UINT64_C(0x0000000200000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT (30U) -+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF)) -+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_ON (IMG_UINT64_C(0x0000000040000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO (IMG_UINT64_C(0x0000000080000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT (28U) -+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) -+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_ON (IMG_UINT64_C(0x0000000010000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO (IMG_UINT64_C(0x0000000020000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT (26U) -+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) -+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_ON (IMG_UINT64_C(0x0000000004000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO (IMG_UINT64_C(0x0000000008000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT (24U) -+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) -+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_ON (IMG_UINT64_C(0x0000000001000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_AUTO (IMG_UINT64_C(0x0000000002000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT (22U) -+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF)) -+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON (IMG_UINT64_C(0x0000000000400000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO (IMG_UINT64_C(0x0000000000800000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT (20U) -+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) -+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON (IMG_UINT64_C(0x0000000000100000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT (18U) -+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) -+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON (IMG_UINT64_C(0x0000000000040000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO (IMG_UINT64_C(0x0000000000080000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT (16U) -+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) -+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON (IMG_UINT64_C(0x0000000000010000)) -+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO (IMG_UINT64_C(0x0000000000020000)) -+ -+ -+/* -+ Register RGX_CR_CLK_XTPLUS_STATUS -+*/ -+#define RGX_CR_CLK_XTPLUS_STATUS (0x0088U) -+#define RGX_CR_CLK_XTPLUS_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000007FF)) -+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_SHIFT (10U) -+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_RUNNING (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_SHIFT (9U) -+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_RUNNING (IMG_UINT64_C(0x0000000000000200)) -+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT (8U) -+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT (7U) -+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING (IMG_UINT64_C(0x0000000000000080)) -+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT (6U) -+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING (IMG_UINT64_C(0x0000000000000040)) -+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT (5U) -+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT (4U) -+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT (3U) -+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT (2U) -+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT (1U) -+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT (0U) -+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_SOFT_RESET -+*/ -+#define RGX_CR_SOFT_RESET (0x0100U) -+#define RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFEFFFFFFFFFFC3D)) -+#define RGX_CR_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00E7FFFFFFFFFC3D)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_SHIFT (63U) -+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_EN (IMG_UINT64_C(0x8000000000000000)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_SHIFT (62U) -+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_SHIFT (61U) -+#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_SHIFT (60U) -+#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_SHIFT (59U) -+#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_EN (IMG_UINT64_C(0x0800000000000000)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_SHIFT (58U) -+#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_EN (IMG_UINT64_C(0x0400000000000000)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_SHIFT (57U) -+#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_EN (IMG_UINT64_C(0x0200000000000000)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_SHIFT (56U) -+#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_EN (IMG_UINT64_C(0x0100000000000000)) -+#define RGX_CR_SOFT_RESET_DPX1_CORE_SHIFT (55U) -+#define RGX_CR_SOFT_RESET_DPX1_CORE_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_DPX1_CORE_EN (IMG_UINT64_C(0x0080000000000000)) -+#define RGX_CR_SOFT_RESET_DPX0_CORE_SHIFT (54U) -+#define RGX_CR_SOFT_RESET_DPX0_CORE_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_DPX0_CORE_EN (IMG_UINT64_C(0x0040000000000000)) -+#define RGX_CR_SOFT_RESET_FBA_SHIFT (53U) -+#define RGX_CR_SOFT_RESET_FBA_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_FBA_EN (IMG_UINT64_C(0x0020000000000000)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__FB_CDC_SHIFT (51U) -+#define RGX_CR_SOFT_RESET__PBE2_XE__FB_CDC_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET__PBE2_XE__FB_CDC_EN (IMG_UINT64_C(0x0008000000000000)) -+#define RGX_CR_SOFT_RESET_SH_SHIFT (50U) -+#define RGX_CR_SOFT_RESET_SH_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_SH_EN (IMG_UINT64_C(0x0004000000000000)) -+#define RGX_CR_SOFT_RESET_VRDM_SHIFT (49U) -+#define RGX_CR_SOFT_RESET_VRDM_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_VRDM_EN (IMG_UINT64_C(0x0002000000000000)) -+#define RGX_CR_SOFT_RESET_MCU_FBTC_SHIFT (48U) -+#define RGX_CR_SOFT_RESET_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_MCU_FBTC_EN (IMG_UINT64_C(0x0001000000000000)) -+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT (47U) -+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_EN (IMG_UINT64_C(0x0000800000000000)) -+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT (46U) -+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_EN (IMG_UINT64_C(0x0000400000000000)) -+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_SHIFT (45U) -+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_EN (IMG_UINT64_C(0x0000200000000000)) -+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_SHIFT (44U) -+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_EN (IMG_UINT64_C(0x0000100000000000)) -+#define RGX_CR_SOFT_RESET_IPP_SHIFT (43U) -+#define RGX_CR_SOFT_RESET_IPP_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_IPP_EN (IMG_UINT64_C(0x0000080000000000)) -+#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT (42U) -+#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN (IMG_UINT64_C(0x0000040000000000)) -+#define RGX_CR_SOFT_RESET_TORNADO_CORE_SHIFT (41U) -+#define RGX_CR_SOFT_RESET_TORNADO_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_TORNADO_CORE_EN (IMG_UINT64_C(0x0000020000000000)) -+#define RGX_CR_SOFT_RESET_DUST_H_CORE_SHIFT (40U) -+#define RGX_CR_SOFT_RESET_DUST_H_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_DUST_H_CORE_EN (IMG_UINT64_C(0x0000010000000000)) -+#define RGX_CR_SOFT_RESET_DUST_G_CORE_SHIFT (39U) -+#define RGX_CR_SOFT_RESET_DUST_G_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_DUST_G_CORE_EN (IMG_UINT64_C(0x0000008000000000)) -+#define RGX_CR_SOFT_RESET_DUST_F_CORE_SHIFT (38U) -+#define RGX_CR_SOFT_RESET_DUST_F_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_DUST_F_CORE_EN (IMG_UINT64_C(0x0000004000000000)) -+#define RGX_CR_SOFT_RESET_DUST_E_CORE_SHIFT (37U) -+#define RGX_CR_SOFT_RESET_DUST_E_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_DUST_E_CORE_EN (IMG_UINT64_C(0x0000002000000000)) -+#define RGX_CR_SOFT_RESET_DUST_D_CORE_SHIFT (36U) -+#define RGX_CR_SOFT_RESET_DUST_D_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_DUST_D_CORE_EN (IMG_UINT64_C(0x0000001000000000)) -+#define RGX_CR_SOFT_RESET_DUST_C_CORE_SHIFT (35U) -+#define RGX_CR_SOFT_RESET_DUST_C_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) -+#define RGX_CR_SOFT_RESET_DUST_C_CORE_EN (IMG_UINT64_C(0x0000000800000000)) -+#define RGX_CR_SOFT_RESET_MMU_SHIFT (34U) -+#define RGX_CR_SOFT_RESET_MMU_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_MMU_EN (IMG_UINT64_C(0x0000000400000000)) -+#define RGX_CR_SOFT_RESET_BIF1_SHIFT (33U) -+#define RGX_CR_SOFT_RESET_BIF1_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_BIF1_EN (IMG_UINT64_C(0x0000000200000000)) -+#define RGX_CR_SOFT_RESET_GARTEN_SHIFT (32U) -+#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_GARTEN_EN (IMG_UINT64_C(0x0000000100000000)) -+#define RGX_CR_SOFT_RESET_CPU_SHIFT (32U) -+#define RGX_CR_SOFT_RESET_CPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -+#define RGX_CR_SOFT_RESET_CPU_EN (IMG_UINT64_C(0x0000000100000000)) -+#define RGX_CR_SOFT_RESET_RASCAL_CORE_SHIFT (31U) -+#define RGX_CR_SOFT_RESET_RASCAL_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -+#define RGX_CR_SOFT_RESET_RASCAL_CORE_EN (IMG_UINT64_C(0x0000000080000000)) -+#define RGX_CR_SOFT_RESET_DUST_B_CORE_SHIFT (30U) -+#define RGX_CR_SOFT_RESET_DUST_B_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -+#define RGX_CR_SOFT_RESET_DUST_B_CORE_EN (IMG_UINT64_C(0x0000000040000000)) -+#define RGX_CR_SOFT_RESET_DUST_A_CORE_SHIFT (29U) -+#define RGX_CR_SOFT_RESET_DUST_A_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -+#define RGX_CR_SOFT_RESET_DUST_A_CORE_EN (IMG_UINT64_C(0x0000000020000000)) -+#define RGX_CR_SOFT_RESET_FB_TLCACHE_SHIFT (28U) -+#define RGX_CR_SOFT_RESET_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -+#define RGX_CR_SOFT_RESET_FB_TLCACHE_EN (IMG_UINT64_C(0x0000000010000000)) -+#define RGX_CR_SOFT_RESET_SLC_SHIFT (27U) -+#define RGX_CR_SOFT_RESET_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -+#define RGX_CR_SOFT_RESET_SLC_EN (IMG_UINT64_C(0x0000000008000000)) -+#define RGX_CR_SOFT_RESET_TLA_SHIFT (26U) -+#define RGX_CR_SOFT_RESET_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -+#define RGX_CR_SOFT_RESET_TLA_EN (IMG_UINT64_C(0x0000000004000000)) -+#define RGX_CR_SOFT_RESET_UVS_SHIFT (25U) -+#define RGX_CR_SOFT_RESET_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -+#define RGX_CR_SOFT_RESET_UVS_EN (IMG_UINT64_C(0x0000000002000000)) -+#define RGX_CR_SOFT_RESET_TE_SHIFT (24U) -+#define RGX_CR_SOFT_RESET_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -+#define RGX_CR_SOFT_RESET_TE_EN (IMG_UINT64_C(0x0000000001000000)) -+#define RGX_CR_SOFT_RESET_GPP_SHIFT (23U) -+#define RGX_CR_SOFT_RESET_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -+#define RGX_CR_SOFT_RESET_GPP_EN (IMG_UINT64_C(0x0000000000800000)) -+#define RGX_CR_SOFT_RESET_FBDC_SHIFT (22U) -+#define RGX_CR_SOFT_RESET_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -+#define RGX_CR_SOFT_RESET_FBDC_EN (IMG_UINT64_C(0x0000000000400000)) -+#define RGX_CR_SOFT_RESET_FBC_SHIFT (21U) -+#define RGX_CR_SOFT_RESET_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_SOFT_RESET_FBC_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_SOFT_RESET_PM_SHIFT (20U) -+#define RGX_CR_SOFT_RESET_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -+#define RGX_CR_SOFT_RESET_PM_EN (IMG_UINT64_C(0x0000000000100000)) -+#define RGX_CR_SOFT_RESET_PBE_SHIFT (19U) -+#define RGX_CR_SOFT_RESET_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -+#define RGX_CR_SOFT_RESET_PBE_EN (IMG_UINT64_C(0x0000000000080000)) -+#define RGX_CR_SOFT_RESET_USC_SHARED_SHIFT (18U) -+#define RGX_CR_SOFT_RESET_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -+#define RGX_CR_SOFT_RESET_USC_SHARED_EN (IMG_UINT64_C(0x0000000000040000)) -+#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT (17U) -+#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -+#define RGX_CR_SOFT_RESET_MCU_L1_EN (IMG_UINT64_C(0x0000000000020000)) -+#define RGX_CR_SOFT_RESET_BIF_SHIFT (16U) -+#define RGX_CR_SOFT_RESET_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -+#define RGX_CR_SOFT_RESET_BIF_EN (IMG_UINT64_C(0x0000000000010000)) -+#define RGX_CR_SOFT_RESET_CDM_SHIFT (15U) -+#define RGX_CR_SOFT_RESET_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -+#define RGX_CR_SOFT_RESET_CDM_EN (IMG_UINT64_C(0x0000000000008000)) -+#define RGX_CR_SOFT_RESET_VDM_SHIFT (14U) -+#define RGX_CR_SOFT_RESET_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -+#define RGX_CR_SOFT_RESET_VDM_EN (IMG_UINT64_C(0x0000000000004000)) -+#define RGX_CR_SOFT_RESET_TESS_SHIFT (13U) -+#define RGX_CR_SOFT_RESET_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -+#define RGX_CR_SOFT_RESET_TESS_EN (IMG_UINT64_C(0x0000000000002000)) -+#define RGX_CR_SOFT_RESET_PDS_SHIFT (12U) -+#define RGX_CR_SOFT_RESET_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -+#define RGX_CR_SOFT_RESET_PDS_EN (IMG_UINT64_C(0x0000000000001000)) -+#define RGX_CR_SOFT_RESET_ISP_SHIFT (11U) -+#define RGX_CR_SOFT_RESET_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -+#define RGX_CR_SOFT_RESET_ISP_EN (IMG_UINT64_C(0x0000000000000800)) -+#define RGX_CR_SOFT_RESET_TSP_SHIFT (10U) -+#define RGX_CR_SOFT_RESET_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -+#define RGX_CR_SOFT_RESET_TSP_EN (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_SOFT_RESET_SYSARB_SHIFT (5U) -+#define RGX_CR_SOFT_RESET_SYSARB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -+#define RGX_CR_SOFT_RESET_SYSARB_EN (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT (4U) -+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_EN (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT (3U) -+#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -+#define RGX_CR_SOFT_RESET_MCU_L0_EN (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_SOFT_RESET_TPU_SHIFT (2U) -+#define RGX_CR_SOFT_RESET_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -+#define RGX_CR_SOFT_RESET_TPU_EN (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_SOFT_RESET_USC_SHIFT (0U) -+#define RGX_CR_SOFT_RESET_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_SOFT_RESET_USC_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_SOFT_RESET2 -+*/ -+#define RGX_CR_SOFT_RESET2 (0x0108U) -+#define RGX_CR_SOFT_RESET2_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) -+#define RGX_CR_SOFT_RESET2_SPFILTER_SHIFT (12U) -+#define RGX_CR_SOFT_RESET2_SPFILTER_CLRMSK (0xFFE00FFFU) -+#define RGX_CR_SOFT_RESET2_TDM_SHIFT (11U) -+#define RGX_CR_SOFT_RESET2_TDM_CLRMSK (0xFFFFF7FFU) -+#define RGX_CR_SOFT_RESET2_TDM_EN (0x00000800U) -+#define RGX_CR_SOFT_RESET2_ASTC_SHIFT (10U) -+#define RGX_CR_SOFT_RESET2_ASTC_CLRMSK (0xFFFFFBFFU) -+#define RGX_CR_SOFT_RESET2_ASTC_EN (0x00000400U) -+#define RGX_CR_SOFT_RESET2_BLACKPEARL_SHIFT (9U) -+#define RGX_CR_SOFT_RESET2_BLACKPEARL_CLRMSK (0xFFFFFDFFU) -+#define RGX_CR_SOFT_RESET2_BLACKPEARL_EN (0x00000200U) -+#define RGX_CR_SOFT_RESET2_USCPS_SHIFT (8U) -+#define RGX_CR_SOFT_RESET2_USCPS_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_SOFT_RESET2_USCPS_EN (0x00000100U) -+#define RGX_CR_SOFT_RESET2_IPF_SHIFT (7U) -+#define RGX_CR_SOFT_RESET2_IPF_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_SOFT_RESET2_IPF_EN (0x00000080U) -+#define RGX_CR_SOFT_RESET2_GEOMETRY_SHIFT (6U) -+#define RGX_CR_SOFT_RESET2_GEOMETRY_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_SOFT_RESET2_GEOMETRY_EN (0x00000040U) -+#define RGX_CR_SOFT_RESET2_USC_SHARED_SHIFT (5U) -+#define RGX_CR_SOFT_RESET2_USC_SHARED_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_SOFT_RESET2_USC_SHARED_EN (0x00000020U) -+#define RGX_CR_SOFT_RESET2_PDS_SHARED_SHIFT (4U) -+#define RGX_CR_SOFT_RESET2_PDS_SHARED_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_SOFT_RESET2_PDS_SHARED_EN (0x00000010U) -+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT (3U) -+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_EN (0x00000008U) -+#define RGX_CR_SOFT_RESET2_PIXEL_SHIFT (2U) -+#define RGX_CR_SOFT_RESET2_PIXEL_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_SOFT_RESET2_PIXEL_EN (0x00000004U) -+#define RGX_CR_SOFT_RESET2_CDM_SHIFT (1U) -+#define RGX_CR_SOFT_RESET2_CDM_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_SOFT_RESET2_CDM_EN (0x00000002U) -+#define RGX_CR_SOFT_RESET2_VERTEX_SHIFT (0U) -+#define RGX_CR_SOFT_RESET2_VERTEX_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_SOFT_RESET2_VERTEX_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_EVENT_ENABLE -+*/ -+#define RGX_CR_EVENT_ENABLE (0x0128U) -+#define RGX_CR_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E01DFFFF)) -+#define RGX_CR_EVENT_ENABLE__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) -+#define RGX_CR_EVENT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_SHIFT (31U) -+#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) -+#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_EN (0x80000000U) -+#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_SHIFT (30U) -+#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) -+#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_EN (0x40000000U) -+#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) -+#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) -+#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) -+#define RGX_CR_EVENT_ENABLE_DPX_OUT_OF_MEMORY_SHIFT (28U) -+#define RGX_CR_EVENT_ENABLE_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_EVENT_ENABLE_DPX_OUT_OF_MEMORY_EN (0x10000000U) -+#define RGX_CR_EVENT_ENABLE_DPX_MMU_PAGE_FAULT_SHIFT (27U) -+#define RGX_CR_EVENT_ENABLE_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU) -+#define RGX_CR_EVENT_ENABLE_DPX_MMU_PAGE_FAULT_EN (0x08000000U) -+#define RGX_CR_EVENT_ENABLE_RPM_OUT_OF_MEMORY_SHIFT (26U) -+#define RGX_CR_EVENT_ENABLE_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) -+#define RGX_CR_EVENT_ENABLE_RPM_OUT_OF_MEMORY_EN (0x04000000U) -+#define RGX_CR_EVENT_ENABLE_FBA_FC3_FINISHED_SHIFT (25U) -+#define RGX_CR_EVENT_ENABLE_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU) -+#define RGX_CR_EVENT_ENABLE_FBA_FC3_FINISHED_EN (0x02000000U) -+#define RGX_CR_EVENT_ENABLE_FBA_FC2_FINISHED_SHIFT (24U) -+#define RGX_CR_EVENT_ENABLE_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU) -+#define RGX_CR_EVENT_ENABLE_FBA_FC2_FINISHED_EN (0x01000000U) -+#define RGX_CR_EVENT_ENABLE_FBA_FC1_FINISHED_SHIFT (23U) -+#define RGX_CR_EVENT_ENABLE_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) -+#define RGX_CR_EVENT_ENABLE_FBA_FC1_FINISHED_EN (0x00800000U) -+#define RGX_CR_EVENT_ENABLE_FBA_FC0_FINISHED_SHIFT (22U) -+#define RGX_CR_EVENT_ENABLE_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) -+#define RGX_CR_EVENT_ENABLE_FBA_FC0_FINISHED_EN (0x00400000U) -+#define RGX_CR_EVENT_ENABLE_RDM_FC3_FINISHED_SHIFT (21U) -+#define RGX_CR_EVENT_ENABLE_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU) -+#define RGX_CR_EVENT_ENABLE_RDM_FC3_FINISHED_EN (0x00200000U) -+#define RGX_CR_EVENT_ENABLE_RDM_FC2_FINISHED_SHIFT (20U) -+#define RGX_CR_EVENT_ENABLE_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_EVENT_ENABLE_RDM_FC2_FINISHED_EN (0x00100000U) -+#define RGX_CR_EVENT_ENABLE__ROGUEXE__SAFETY_SHIFT (20U) -+#define RGX_CR_EVENT_ENABLE__ROGUEXE__SAFETY_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_EVENT_ENABLE__ROGUEXE__SAFETY_EN (0x00100000U) -+#define RGX_CR_EVENT_ENABLE_RDM_FC1_FINISHED_SHIFT (19U) -+#define RGX_CR_EVENT_ENABLE_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU) -+#define RGX_CR_EVENT_ENABLE_RDM_FC1_FINISHED_EN (0x00080000U) -+#define RGX_CR_EVENT_ENABLE__ROGUEXE__SLAVE_REQ_SHIFT (19U) -+#define RGX_CR_EVENT_ENABLE__ROGUEXE__SLAVE_REQ_CLRMSK (0xFFF7FFFFU) -+#define RGX_CR_EVENT_ENABLE__ROGUEXE__SLAVE_REQ_EN (0x00080000U) -+#define RGX_CR_EVENT_ENABLE_RDM_FC0_FINISHED_SHIFT (18U) -+#define RGX_CR_EVENT_ENABLE_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_EVENT_ENABLE_RDM_FC0_FINISHED_EN (0x00040000U) -+#define RGX_CR_EVENT_ENABLE__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) -+#define RGX_CR_EVENT_ENABLE__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_EVENT_ENABLE__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) -+#define RGX_CR_EVENT_ENABLE__SIGNALS__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) -+#define RGX_CR_EVENT_ENABLE__SIGNALS__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_EVENT_ENABLE__SIGNALS__TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) -+#define RGX_CR_EVENT_ENABLE_SHG_FINISHED_SHIFT (17U) -+#define RGX_CR_EVENT_ENABLE_SHG_FINISHED_CLRMSK (0xFFFDFFFFU) -+#define RGX_CR_EVENT_ENABLE_SHG_FINISHED_EN (0x00020000U) -+#define RGX_CR_EVENT_ENABLE__SIGNALS__SPFILTER_SIGNAL_UPDATE_SHIFT (17U) -+#define RGX_CR_EVENT_ENABLE__SIGNALS__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) -+#define RGX_CR_EVENT_ENABLE__SIGNALS__SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) -+#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_SHIFT (16U) -+#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) -+#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_EN (0x00010000U) -+#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_SHIFT (15U) -+#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) -+#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_EN (0x00008000U) -+#define RGX_CR_EVENT_ENABLE_ZLS_FINISHED_SHIFT (14U) -+#define RGX_CR_EVENT_ENABLE_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU) -+#define RGX_CR_EVENT_ENABLE_ZLS_FINISHED_EN (0x00004000U) -+#define RGX_CR_EVENT_ENABLE_GPIO_ACK_SHIFT (13U) -+#define RGX_CR_EVENT_ENABLE_GPIO_ACK_CLRMSK (0xFFFFDFFFU) -+#define RGX_CR_EVENT_ENABLE_GPIO_ACK_EN (0x00002000U) -+#define RGX_CR_EVENT_ENABLE_GPIO_REQ_SHIFT (12U) -+#define RGX_CR_EVENT_ENABLE_GPIO_REQ_CLRMSK (0xFFFFEFFFU) -+#define RGX_CR_EVENT_ENABLE_GPIO_REQ_EN (0x00001000U) -+#define RGX_CR_EVENT_ENABLE_POWER_ABORT_SHIFT (11U) -+#define RGX_CR_EVENT_ENABLE_POWER_ABORT_CLRMSK (0xFFFFF7FFU) -+#define RGX_CR_EVENT_ENABLE_POWER_ABORT_EN (0x00000800U) -+#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_SHIFT (10U) -+#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) -+#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_EN (0x00000400U) -+#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_SHIFT (9U) -+#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) -+#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_EN (0x00000200U) -+#define RGX_CR_EVENT_ENABLE_PM_3D_MEM_FREE_SHIFT (8U) -+#define RGX_CR_EVENT_ENABLE_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_EVENT_ENABLE_PM_3D_MEM_FREE_EN (0x00000100U) -+#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_SHIFT (7U) -+#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_EN (0x00000080U) -+#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_SHIFT (6U) -+#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_EN (0x00000040U) -+#define RGX_CR_EVENT_ENABLE_TA_FINISHED_SHIFT (5U) -+#define RGX_CR_EVENT_ENABLE_TA_FINISHED_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_EVENT_ENABLE_TA_FINISHED_EN (0x00000020U) -+#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_SHIFT (4U) -+#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_EN (0x00000010U) -+#define RGX_CR_EVENT_ENABLE_PIXELBE_END_RENDER_SHIFT (3U) -+#define RGX_CR_EVENT_ENABLE_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_EVENT_ENABLE_PIXELBE_END_RENDER_EN (0x00000008U) -+#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_SHIFT (2U) -+#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_EN (0x00000004U) -+#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_SHIFT (1U) -+#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_EN (0x00000002U) -+#define RGX_CR_EVENT_ENABLE_TLA_COMPLETE_SHIFT (0U) -+#define RGX_CR_EVENT_ENABLE_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_EVENT_ENABLE_TLA_COMPLETE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_EVENT_STATUS -+*/ -+#define RGX_CR_EVENT_STATUS (0x0130U) -+#define RGX_CR_EVENT_STATUS__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E01DFFFF)) -+#define RGX_CR_EVENT_STATUS__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) -+#define RGX_CR_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT (31U) -+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) -+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN (0x80000000U) -+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT (30U) -+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) -+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN (0x40000000U) -+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) -+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) -+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) -+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT (28U) -+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN (0x10000000U) -+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT (27U) -+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU) -+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN (0x08000000U) -+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT (26U) -+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) -+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN (0x04000000U) -+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT (25U) -+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU) -+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN (0x02000000U) -+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT (24U) -+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU) -+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN (0x01000000U) -+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT (23U) -+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) -+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN (0x00800000U) -+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT (22U) -+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) -+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN (0x00400000U) -+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT (21U) -+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU) -+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN (0x00200000U) -+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT (20U) -+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN (0x00100000U) -+#define RGX_CR_EVENT_STATUS__ROGUEXE__SAFETY_SHIFT (20U) -+#define RGX_CR_EVENT_STATUS__ROGUEXE__SAFETY_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_EVENT_STATUS__ROGUEXE__SAFETY_EN (0x00100000U) -+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT (19U) -+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU) -+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN (0x00080000U) -+#define RGX_CR_EVENT_STATUS__ROGUEXE__SLAVE_REQ_SHIFT (19U) -+#define RGX_CR_EVENT_STATUS__ROGUEXE__SLAVE_REQ_CLRMSK (0xFFF7FFFFU) -+#define RGX_CR_EVENT_STATUS__ROGUEXE__SLAVE_REQ_EN (0x00080000U) -+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT (18U) -+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN (0x00040000U) -+#define RGX_CR_EVENT_STATUS__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) -+#define RGX_CR_EVENT_STATUS__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_EVENT_STATUS__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) -+#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) -+#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) -+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_SHIFT (17U) -+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK (0xFFFDFFFFU) -+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_EN (0x00020000U) -+#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_SHIFT (17U) -+#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) -+#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) -+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT (16U) -+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) -+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN (0x00010000U) -+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT (15U) -+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) -+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN (0x00008000U) -+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT (14U) -+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU) -+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_EN (0x00004000U) -+#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT (13U) -+#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK (0xFFFFDFFFU) -+#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN (0x00002000U) -+#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT (12U) -+#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK (0xFFFFEFFFU) -+#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN (0x00001000U) -+#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT (11U) -+#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK (0xFFFFF7FFU) -+#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN (0x00000800U) -+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT (10U) -+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) -+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN (0x00000400U) -+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT (9U) -+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) -+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN (0x00000200U) -+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT (8U) -+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN (0x00000100U) -+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT (7U) -+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN (0x00000080U) -+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT (6U) -+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN (0x00000040U) -+#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT (5U) -+#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN (0x00000020U) -+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT (4U) -+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN (0x00000010U) -+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT (3U) -+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN (0x00000008U) -+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT (2U) -+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN (0x00000004U) -+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT (1U) -+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN (0x00000002U) -+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT (0U) -+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_EVENT_CLEAR -+*/ -+#define RGX_CR_EVENT_CLEAR (0x0138U) -+#define RGX_CR_EVENT_CLEAR__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E01DFFFF)) -+#define RGX_CR_EVENT_CLEAR__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) -+#define RGX_CR_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT (31U) -+#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) -+#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN (0x80000000U) -+#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT (30U) -+#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) -+#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN (0x40000000U) -+#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) -+#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) -+#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) -+#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_SHIFT (28U) -+#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_EN (0x10000000U) -+#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_SHIFT (27U) -+#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU) -+#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_EN (0x08000000U) -+#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_SHIFT (26U) -+#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) -+#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_EN (0x04000000U) -+#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_SHIFT (25U) -+#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU) -+#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_EN (0x02000000U) -+#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_SHIFT (24U) -+#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU) -+#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_EN (0x01000000U) -+#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_SHIFT (23U) -+#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) -+#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_EN (0x00800000U) -+#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_SHIFT (22U) -+#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) -+#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_EN (0x00400000U) -+#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_SHIFT (21U) -+#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU) -+#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_EN (0x00200000U) -+#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_SHIFT (20U) -+#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_EN (0x00100000U) -+#define RGX_CR_EVENT_CLEAR__ROGUEXE__SAFETY_SHIFT (20U) -+#define RGX_CR_EVENT_CLEAR__ROGUEXE__SAFETY_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_EVENT_CLEAR__ROGUEXE__SAFETY_EN (0x00100000U) -+#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_SHIFT (19U) -+#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU) -+#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_EN (0x00080000U) -+#define RGX_CR_EVENT_CLEAR__ROGUEXE__SLAVE_REQ_SHIFT (19U) -+#define RGX_CR_EVENT_CLEAR__ROGUEXE__SLAVE_REQ_CLRMSK (0xFFF7FFFFU) -+#define RGX_CR_EVENT_CLEAR__ROGUEXE__SLAVE_REQ_EN (0x00080000U) -+#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_SHIFT (18U) -+#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_EN (0x00040000U) -+#define RGX_CR_EVENT_CLEAR__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) -+#define RGX_CR_EVENT_CLEAR__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_EVENT_CLEAR__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) -+#define RGX_CR_EVENT_CLEAR__SIGNALS__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) -+#define RGX_CR_EVENT_CLEAR__SIGNALS__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_EVENT_CLEAR__SIGNALS__TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) -+#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_SHIFT (17U) -+#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_CLRMSK (0xFFFDFFFFU) -+#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_EN (0x00020000U) -+#define RGX_CR_EVENT_CLEAR__SIGNALS__SPFILTER_SIGNAL_UPDATE_SHIFT (17U) -+#define RGX_CR_EVENT_CLEAR__SIGNALS__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) -+#define RGX_CR_EVENT_CLEAR__SIGNALS__SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) -+#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT (16U) -+#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) -+#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN (0x00010000U) -+#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT (15U) -+#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) -+#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_EN (0x00008000U) -+#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_SHIFT (14U) -+#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU) -+#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_EN (0x00004000U) -+#define RGX_CR_EVENT_CLEAR_GPIO_ACK_SHIFT (13U) -+#define RGX_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK (0xFFFFDFFFU) -+#define RGX_CR_EVENT_CLEAR_GPIO_ACK_EN (0x00002000U) -+#define RGX_CR_EVENT_CLEAR_GPIO_REQ_SHIFT (12U) -+#define RGX_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK (0xFFFFEFFFU) -+#define RGX_CR_EVENT_CLEAR_GPIO_REQ_EN (0x00001000U) -+#define RGX_CR_EVENT_CLEAR_POWER_ABORT_SHIFT (11U) -+#define RGX_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK (0xFFFFF7FFU) -+#define RGX_CR_EVENT_CLEAR_POWER_ABORT_EN (0x00000800U) -+#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT (10U) -+#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) -+#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_EN (0x00000400U) -+#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT (9U) -+#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) -+#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN (0x00000200U) -+#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_SHIFT (8U) -+#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_EN (0x00000100U) -+#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT (7U) -+#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN (0x00000080U) -+#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT (6U) -+#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_EN (0x00000040U) -+#define RGX_CR_EVENT_CLEAR_TA_FINISHED_SHIFT (5U) -+#define RGX_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_EVENT_CLEAR_TA_FINISHED_EN (0x00000020U) -+#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT (4U) -+#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN (0x00000010U) -+#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_SHIFT (3U) -+#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_EN (0x00000008U) -+#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT (2U) -+#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN (0x00000004U) -+#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT (1U) -+#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_EN (0x00000002U) -+#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_SHIFT (0U) -+#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_TIMER -+*/ -+#define RGX_CR_TIMER (0x0160U) -+#define RGX_CR_TIMER_MASKFULL (IMG_UINT64_C(0x8000FFFFFFFFFFFF)) -+#define RGX_CR_TIMER_BIT31_SHIFT (63U) -+#define RGX_CR_TIMER_BIT31_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) -+#define RGX_CR_TIMER_BIT31_EN (IMG_UINT64_C(0x8000000000000000)) -+#define RGX_CR_TIMER_VALUE_SHIFT (0U) -+#define RGX_CR_TIMER_VALUE_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) -+ -+ -+/* -+ Register RGX_CR_TLA_STATUS -+*/ -+#define RGX_CR_TLA_STATUS (0x0178U) -+#define RGX_CR_TLA_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_TLA_STATUS_BLIT_COUNT_SHIFT (39U) -+#define RGX_CR_TLA_STATUS_BLIT_COUNT_CLRMSK (IMG_UINT64_C(0x0000007FFFFFFFFF)) -+#define RGX_CR_TLA_STATUS_REQUEST_SHIFT (7U) -+#define RGX_CR_TLA_STATUS_REQUEST_CLRMSK (IMG_UINT64_C(0xFFFFFF800000007F)) -+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT (1U) -+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF81)) -+#define RGX_CR_TLA_STATUS_BUSY_SHIFT (0U) -+#define RGX_CR_TLA_STATUS_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_TLA_STATUS_BUSY_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_PM_PARTIAL_RENDER_ENABLE -+*/ -+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE (0x0338U) -+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT (0U) -+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_SIDEKICK_IDLE -+*/ -+#define RGX_CR_SIDEKICK_IDLE (0x03C8U) -+#define RGX_CR_SIDEKICK_IDLE_MASKFULL (IMG_UINT64_C(0x000000000000007F)) -+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_SHIFT (6U) -+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_EN (0x00000040U) -+#define RGX_CR_SIDEKICK_IDLE_MMU_SHIFT (5U) -+#define RGX_CR_SIDEKICK_IDLE_MMU_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_SIDEKICK_IDLE_MMU_EN (0x00000020U) -+#define RGX_CR_SIDEKICK_IDLE_BIF128_SHIFT (4U) -+#define RGX_CR_SIDEKICK_IDLE_BIF128_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_SIDEKICK_IDLE_BIF128_EN (0x00000010U) -+#define RGX_CR_SIDEKICK_IDLE_TLA_SHIFT (3U) -+#define RGX_CR_SIDEKICK_IDLE_TLA_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_SIDEKICK_IDLE_TLA_EN (0x00000008U) -+#define RGX_CR_SIDEKICK_IDLE_GARTEN_SHIFT (2U) -+#define RGX_CR_SIDEKICK_IDLE_GARTEN_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_SIDEKICK_IDLE_GARTEN_EN (0x00000004U) -+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_SHIFT (1U) -+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_EN (0x00000002U) -+#define RGX_CR_SIDEKICK_IDLE_SOCIF_SHIFT (0U) -+#define RGX_CR_SIDEKICK_IDLE_SOCIF_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_SIDEKICK_IDLE_SOCIF_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MARS_IDLE -+*/ -+#define RGX_CR_MARS_IDLE (0x08F8U) -+#define RGX_CR_MARS_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000000007)) -+#define RGX_CR_MARS_IDLE_MH_SYSARB0_SHIFT (2U) -+#define RGX_CR_MARS_IDLE_MH_SYSARB0_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_MARS_IDLE_MH_SYSARB0_EN (0x00000004U) -+#define RGX_CR_MARS_IDLE_CPU_SHIFT (1U) -+#define RGX_CR_MARS_IDLE_CPU_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_MARS_IDLE_CPU_EN (0x00000002U) -+#define RGX_CR_MARS_IDLE_SOCIF_SHIFT (0U) -+#define RGX_CR_MARS_IDLE_SOCIF_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_MARS_IDLE_SOCIF_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_VDM_CONTEXT_STORE_STATUS -+*/ -+#define RGX_CR_VDM_CONTEXT_STORE_STATUS (0x0430U) -+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000F3)) -+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT (4U) -+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK (0xFFFFFF0FU) -+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) -+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) -+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) -+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_VDM_CONTEXT_STORE_TASK0 -+*/ -+#define RGX_CR_VDM_CONTEXT_STORE_TASK0 (0x0438U) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT (32U) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT (0U) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) -+ -+ -+/* -+ Register RGX_CR_VDM_CONTEXT_STORE_TASK1 -+*/ -+#define RGX_CR_VDM_CONTEXT_STORE_TASK1 (0x0440U) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT (0U) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_VDM_CONTEXT_STORE_TASK2 -+*/ -+#define RGX_CR_VDM_CONTEXT_STORE_TASK2 (0x0448U) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT (32U) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT (0U) -+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) -+ -+ -+/* -+ Register RGX_CR_VDM_CONTEXT_RESUME_TASK0 -+*/ -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0 (0x0450U) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT (32U) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT (0U) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) -+ -+ -+/* -+ Register RGX_CR_VDM_CONTEXT_RESUME_TASK1 -+*/ -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1 (0x0458U) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT (0U) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_VDM_CONTEXT_RESUME_TASK2 -+*/ -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2 (0x0460U) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT (32U) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT (0U) -+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) -+ -+ -+/* -+ Register RGX_CR_CDM_CONTEXT_STORE_STATUS -+*/ -+#define RGX_CR_CDM_CONTEXT_STORE_STATUS (0x04A0U) -+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) -+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) -+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) -+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_CDM_CONTEXT_PDS0 -+*/ -+#define RGX_CR_CDM_CONTEXT_PDS0 (0x04A8U) -+#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) -+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT (36U) -+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT (4U) -+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE (16U) -+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT (4U) -+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT (4U) -+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE (16U) -+ -+ -+/* -+ Register RGX_CR_CDM_CONTEXT_PDS1 -+*/ -+#define RGX_CR_CDM_CONTEXT_PDS1 (0x04B0U) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) -+#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) -+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT (29U) -+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) -+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN (0x20000000U) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) -+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT (28U) -+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN (0x10000000U) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) -+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT (27U) -+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) -+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN (0x08000000U) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) -+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT (21U) -+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) -+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT (20U) -+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN (0x00100000U) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) -+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT (11U) -+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) -+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT (7U) -+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) -+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) -+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT (1U) -+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) -+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT (0U) -+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_CDM_TERMINATE_PDS -+*/ -+#define RGX_CR_CDM_TERMINATE_PDS (0x04B8U) -+#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) -+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT (36U) -+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT (4U) -+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE (16U) -+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT (4U) -+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT (4U) -+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE (16U) -+ -+ -+/* -+ Register RGX_CR_CDM_TERMINATE_PDS1 -+*/ -+#define RGX_CR_CDM_TERMINATE_PDS1 (0x04C0U) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) -+#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) -+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT (29U) -+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) -+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN (0x20000000U) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) -+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT (28U) -+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN (0x10000000U) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) -+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT (27U) -+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) -+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_EN (0x08000000U) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) -+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT (21U) -+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) -+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT (20U) -+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN (0x00100000U) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) -+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT (11U) -+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) -+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT (7U) -+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) -+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) -+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT (1U) -+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) -+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT (0U) -+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_CDM_CONTEXT_LOAD_PDS0 -+*/ -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0 (0x04D8U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT (36U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE (16U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT (4U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE (16U) -+ -+ -+/* -+ Register RGX_CR_CDM_CONTEXT_LOAD_PDS1 -+*/ -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1 (0x04E0U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT (29U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN (0x20000000U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT (28U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN (0x10000000U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT (27U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN (0x08000000U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT (21U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT (20U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN (0x00100000U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT (11U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT (7U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT (1U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT (0U) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MIPS_WRAPPER_CONFIG -+*/ -+#define RGX_CR_MIPS_WRAPPER_CONFIG (0x0810U) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x000001030F01FFFF)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_SHIFT (40U) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_EN (IMG_UINT64_C(0x0000010000000000)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_SHIFT (33U) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_EN (IMG_UINT64_C(0x0000000200000000)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT (32U) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN (IMG_UINT64_C(0x0000000100000000)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT (25U) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF1FFFFFF)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT (24U) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN (IMG_UINT64_C(0x0000000001000000)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT (16U) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32 (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS (IMG_UINT64_C(0x0000000000010000)) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT (0U) -+#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG1 -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1 (0x0818U) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT (12U) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG2 -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2 (0x0820U) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT (12U) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT (6U) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT (5U) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG1 -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1 (0x0828U) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT (12U) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG2 -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2 (0x0830U) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT (12U) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT (6U) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT (5U) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG1 -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1 (0x0838U) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT (12U) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG2 -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2 (0x0840U) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT (12U) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT (6U) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT (5U) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG1 -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1 (0x0848U) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT (12U) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG2 -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2 (0x0850U) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT (12U) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT (6U) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT (5U) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG1 -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1 (0x0858U) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT (12U) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG2 -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2 (0x0860U) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT (12U) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT (6U) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT (5U) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS (0x0868U) -+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFFFFFFF)) -+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT (32U) -+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN (IMG_UINT64_C(0x0000000100000000)) -+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR (0x0870U) -+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG (0x0878U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL (IMG_UINT64_C(0xFFFFFFF7FFFFFFBF)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT (36U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT (32U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT (12U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT (11U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN (IMG_UINT64_C(0x0000000000000800)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT (7U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB (IMG_UINT64_C(0x0000000000000080)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB (IMG_UINT64_C(0x0000000000000180)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB (IMG_UINT64_C(0x0000000000000200)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB (IMG_UINT64_C(0x0000000000000280)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB (IMG_UINT64_C(0x0000000000000300)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB (IMG_UINT64_C(0x0000000000000380)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT (1U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC1)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP_RANGE_READ -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ (0x0880U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL (IMG_UINT64_C(0x000000000000003F)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT (1U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK (0xFFFFFFC1U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA -+*/ -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA (0x0888U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL (IMG_UINT64_C(0xFFFFFFF7FFFFFF81)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT (36U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT (32U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT (12U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT (11U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN (IMG_UINT64_C(0x0000000000000800)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT (7U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT (0U) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_MIPS_WRAPPER_IRQ_ENABLE -+*/ -+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE (0x08A0U) -+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT (0U) -+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MIPS_WRAPPER_IRQ_STATUS -+*/ -+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS (0x08A8U) -+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT (0U) -+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MIPS_WRAPPER_IRQ_CLEAR -+*/ -+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR (0x08B0U) -+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT (0U) -+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MIPS_WRAPPER_NMI_ENABLE -+*/ -+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE (0x08B8U) -+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT (0U) -+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MIPS_WRAPPER_NMI_EVENT -+*/ -+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT (0x08C0U) -+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT (0U) -+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MIPS_DEBUG_CONFIG -+*/ -+#define RGX_CR_MIPS_DEBUG_CONFIG (0x08C8U) -+#define RGX_CR_MIPS_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT (0U) -+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MIPS_EXCEPTION_STATUS -+*/ -+#define RGX_CR_MIPS_EXCEPTION_STATUS (0x08D0U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000003F)) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT (5U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN (0x00000020U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT (4U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN (0x00000010U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT (3U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN (0x00000008U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT (2U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN (0x00000004U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT (1U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN (0x00000002U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT (0U) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MIPS_WRAPPER_STATUS -+*/ -+#define RGX_CR_MIPS_WRAPPER_STATUS (0x08E8U) -+#define RGX_CR_MIPS_WRAPPER_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_SHIFT (0U) -+#define RGX_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_XPU_BROADCAST -+*/ -+#define RGX_CR_XPU_BROADCAST (0x0890U) -+#define RGX_CR_XPU_BROADCAST_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -+#define RGX_CR_XPU_BROADCAST_MASK_SHIFT (0U) -+#define RGX_CR_XPU_BROADCAST_MASK_CLRMSK (0xFFFFFE00U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVDATAX -+*/ -+#define RGX_CR_META_SP_MSLVDATAX (0x0A00U) -+#define RGX_CR_META_SP_MSLVDATAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVDATAT -+*/ -+#define RGX_CR_META_SP_MSLVDATAT (0x0A08U) -+#define RGX_CR_META_SP_MSLVDATAT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVCTRL0 -+*/ -+#define RGX_CR_META_SP_MSLVCTRL0 (0x0A10U) -+#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT (2U) -+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK (0x00000003U) -+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT (1U) -+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN (0x00000002U) -+#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_META_SP_MSLVCTRL0_RD_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVCTRL1 -+*/ -+#define RGX_CR_META_SP_MSLVCTRL1 (0x0A18U) -+#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) -+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT (30U) -+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) -+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT (29U) -+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) -+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN (0x20000000U) -+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT (28U) -+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN (0x10000000U) -+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT (26U) -+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) -+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN (0x04000000U) -+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT (25U) -+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) -+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN (0x02000000U) -+#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT (24U) -+#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK (0xFEFFFFFFU) -+#define RGX_CR_META_SP_MSLVCTRL1_READY_EN (0x01000000U) -+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT (21U) -+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK (0xFF1FFFFFU) -+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT (20U) -+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN (0x00100000U) -+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT (18U) -+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN (0x00040000U) -+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT (4U) -+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK (0xFFFFFFCFU) -+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT (2U) -+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK (0xFFFFFFF3U) -+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK (0xFFFFFFFCU) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVHANDSHKE -+*/ -+#define RGX_CR_META_SP_MSLVHANDSHKE (0x0A50U) -+#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT (2U) -+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK (0xFFFFFFF3U) -+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK (0xFFFFFFFCU) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVT0KICK -+*/ -+#define RGX_CR_META_SP_MSLVT0KICK (0x0A80U) -+#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVT0KICKI -+*/ -+#define RGX_CR_META_SP_MSLVT0KICKI (0x0A88U) -+#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVT1KICK -+*/ -+#define RGX_CR_META_SP_MSLVT1KICK (0x0A90U) -+#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVT1KICKI -+*/ -+#define RGX_CR_META_SP_MSLVT1KICKI (0x0A98U) -+#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVT2KICK -+*/ -+#define RGX_CR_META_SP_MSLVT2KICK (0x0AA0U) -+#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVT2KICKI -+*/ -+#define RGX_CR_META_SP_MSLVT2KICKI (0x0AA8U) -+#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVT3KICK -+*/ -+#define RGX_CR_META_SP_MSLVT3KICK (0x0AB0U) -+#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVT3KICKI -+*/ -+#define RGX_CR_META_SP_MSLVT3KICKI (0x0AB8U) -+#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVRST -+*/ -+#define RGX_CR_META_SP_MSLVRST (0x0AC0U) -+#define RGX_CR_META_SP_MSLVRST_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVIRQSTATUS -+*/ -+#define RGX_CR_META_SP_MSLVIRQSTATUS (0x0AC8U) -+#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL (IMG_UINT64_C(0x000000000000000C)) -+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT (3U) -+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN (0x00000008U) -+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT (2U) -+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN (0x00000004U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVIRQENABLE -+*/ -+#define RGX_CR_META_SP_MSLVIRQENABLE (0x0AD0U) -+#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000C)) -+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT (3U) -+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN (0x00000008U) -+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT (2U) -+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN (0x00000004U) -+ -+ -+/* -+ Register RGX_CR_META_SP_MSLVIRQLEVEL -+*/ -+#define RGX_CR_META_SP_MSLVIRQLEVEL (0x0AD8U) -+#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT (0U) -+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MTS_SCHEDULE -+*/ -+#define RGX_CR_MTS_SCHEDULE (0x0B00U) -+#define RGX_CR_MTS_SCHEDULE_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -+#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT (8U) -+#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE_HOST_HOST (0x00000100U) -+#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT (6U) -+#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK (0xFFFFFF3FU) -+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1 (0x00000040U) -+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2 (0x00000080U) -+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3 (0x000000C0U) -+#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT (5U) -+#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX (0x00000020U) -+#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT (4U) -+#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED (0x00000010U) -+#define RGX_CR_MTS_SCHEDULE_DM_SHIFT (0U) -+#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK (0xFFFFFFF0U) -+#define RGX_CR_MTS_SCHEDULE_DM_DM0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE_DM_DM1 (0x00000001U) -+#define RGX_CR_MTS_SCHEDULE_DM_DM2 (0x00000002U) -+#define RGX_CR_MTS_SCHEDULE_DM_DM3 (0x00000003U) -+#define RGX_CR_MTS_SCHEDULE_DM_DM4 (0x00000004U) -+#define RGX_CR_MTS_SCHEDULE_DM_DM5 (0x00000005U) -+#define RGX_CR_MTS_SCHEDULE_DM_DM6 (0x00000006U) -+#define RGX_CR_MTS_SCHEDULE_DM_DM7 (0x00000007U) -+#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL (0x0000000FU) -+ -+ -+/* -+ Register RGX_CR_MTS_SCHEDULE1 -+*/ -+#define RGX_CR_MTS_SCHEDULE1 (0x10B00U) -+#define RGX_CR_MTS_SCHEDULE1_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -+#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT (8U) -+#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE1_HOST_HOST (0x00000100U) -+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT (6U) -+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK (0xFFFFFF3FU) -+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1 (0x00000040U) -+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2 (0x00000080U) -+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3 (0x000000C0U) -+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT (5U) -+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX (0x00000020U) -+#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT (4U) -+#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED (0x00000010U) -+#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT (0U) -+#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK (0xFFFFFFF0U) -+#define RGX_CR_MTS_SCHEDULE1_DM_DM0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE1_DM_DM1 (0x00000001U) -+#define RGX_CR_MTS_SCHEDULE1_DM_DM2 (0x00000002U) -+#define RGX_CR_MTS_SCHEDULE1_DM_DM3 (0x00000003U) -+#define RGX_CR_MTS_SCHEDULE1_DM_DM4 (0x00000004U) -+#define RGX_CR_MTS_SCHEDULE1_DM_DM5 (0x00000005U) -+#define RGX_CR_MTS_SCHEDULE1_DM_DM6 (0x00000006U) -+#define RGX_CR_MTS_SCHEDULE1_DM_DM7 (0x00000007U) -+#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL (0x0000000FU) -+ -+ -+/* -+ Register RGX_CR_MTS_SCHEDULE2 -+*/ -+#define RGX_CR_MTS_SCHEDULE2 (0x20B00U) -+#define RGX_CR_MTS_SCHEDULE2_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -+#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT (8U) -+#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE2_HOST_HOST (0x00000100U) -+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT (6U) -+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK (0xFFFFFF3FU) -+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1 (0x00000040U) -+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2 (0x00000080U) -+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3 (0x000000C0U) -+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT (5U) -+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX (0x00000020U) -+#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT (4U) -+#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED (0x00000010U) -+#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT (0U) -+#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK (0xFFFFFFF0U) -+#define RGX_CR_MTS_SCHEDULE2_DM_DM0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE2_DM_DM1 (0x00000001U) -+#define RGX_CR_MTS_SCHEDULE2_DM_DM2 (0x00000002U) -+#define RGX_CR_MTS_SCHEDULE2_DM_DM3 (0x00000003U) -+#define RGX_CR_MTS_SCHEDULE2_DM_DM4 (0x00000004U) -+#define RGX_CR_MTS_SCHEDULE2_DM_DM5 (0x00000005U) -+#define RGX_CR_MTS_SCHEDULE2_DM_DM6 (0x00000006U) -+#define RGX_CR_MTS_SCHEDULE2_DM_DM7 (0x00000007U) -+#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL (0x0000000FU) -+ -+ -+/* -+ Register RGX_CR_MTS_SCHEDULE3 -+*/ -+#define RGX_CR_MTS_SCHEDULE3 (0x30B00U) -+#define RGX_CR_MTS_SCHEDULE3_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -+#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT (8U) -+#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE3_HOST_HOST (0x00000100U) -+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT (6U) -+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK (0xFFFFFF3FU) -+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1 (0x00000040U) -+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2 (0x00000080U) -+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3 (0x000000C0U) -+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT (5U) -+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX (0x00000020U) -+#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT (4U) -+#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED (0x00000010U) -+#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT (0U) -+#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK (0xFFFFFFF0U) -+#define RGX_CR_MTS_SCHEDULE3_DM_DM0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE3_DM_DM1 (0x00000001U) -+#define RGX_CR_MTS_SCHEDULE3_DM_DM2 (0x00000002U) -+#define RGX_CR_MTS_SCHEDULE3_DM_DM3 (0x00000003U) -+#define RGX_CR_MTS_SCHEDULE3_DM_DM4 (0x00000004U) -+#define RGX_CR_MTS_SCHEDULE3_DM_DM5 (0x00000005U) -+#define RGX_CR_MTS_SCHEDULE3_DM_DM6 (0x00000006U) -+#define RGX_CR_MTS_SCHEDULE3_DM_DM7 (0x00000007U) -+#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL (0x0000000FU) -+ -+ -+/* -+ Register RGX_CR_MTS_SCHEDULE4 -+*/ -+#define RGX_CR_MTS_SCHEDULE4 (0x40B00U) -+#define RGX_CR_MTS_SCHEDULE4_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -+#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT (8U) -+#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE4_HOST_HOST (0x00000100U) -+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT (6U) -+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK (0xFFFFFF3FU) -+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1 (0x00000040U) -+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2 (0x00000080U) -+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3 (0x000000C0U) -+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT (5U) -+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX (0x00000020U) -+#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT (4U) -+#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED (0x00000010U) -+#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT (0U) -+#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK (0xFFFFFFF0U) -+#define RGX_CR_MTS_SCHEDULE4_DM_DM0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE4_DM_DM1 (0x00000001U) -+#define RGX_CR_MTS_SCHEDULE4_DM_DM2 (0x00000002U) -+#define RGX_CR_MTS_SCHEDULE4_DM_DM3 (0x00000003U) -+#define RGX_CR_MTS_SCHEDULE4_DM_DM4 (0x00000004U) -+#define RGX_CR_MTS_SCHEDULE4_DM_DM5 (0x00000005U) -+#define RGX_CR_MTS_SCHEDULE4_DM_DM6 (0x00000006U) -+#define RGX_CR_MTS_SCHEDULE4_DM_DM7 (0x00000007U) -+#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL (0x0000000FU) -+ -+ -+/* -+ Register RGX_CR_MTS_SCHEDULE5 -+*/ -+#define RGX_CR_MTS_SCHEDULE5 (0x50B00U) -+#define RGX_CR_MTS_SCHEDULE5_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -+#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT (8U) -+#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE5_HOST_HOST (0x00000100U) -+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT (6U) -+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK (0xFFFFFF3FU) -+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1 (0x00000040U) -+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2 (0x00000080U) -+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3 (0x000000C0U) -+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT (5U) -+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX (0x00000020U) -+#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT (4U) -+#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED (0x00000010U) -+#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT (0U) -+#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK (0xFFFFFFF0U) -+#define RGX_CR_MTS_SCHEDULE5_DM_DM0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE5_DM_DM1 (0x00000001U) -+#define RGX_CR_MTS_SCHEDULE5_DM_DM2 (0x00000002U) -+#define RGX_CR_MTS_SCHEDULE5_DM_DM3 (0x00000003U) -+#define RGX_CR_MTS_SCHEDULE5_DM_DM4 (0x00000004U) -+#define RGX_CR_MTS_SCHEDULE5_DM_DM5 (0x00000005U) -+#define RGX_CR_MTS_SCHEDULE5_DM_DM6 (0x00000006U) -+#define RGX_CR_MTS_SCHEDULE5_DM_DM7 (0x00000007U) -+#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL (0x0000000FU) -+ -+ -+/* -+ Register RGX_CR_MTS_SCHEDULE6 -+*/ -+#define RGX_CR_MTS_SCHEDULE6 (0x60B00U) -+#define RGX_CR_MTS_SCHEDULE6_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -+#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT (8U) -+#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE6_HOST_HOST (0x00000100U) -+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT (6U) -+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK (0xFFFFFF3FU) -+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1 (0x00000040U) -+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2 (0x00000080U) -+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3 (0x000000C0U) -+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT (5U) -+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX (0x00000020U) -+#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT (4U) -+#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED (0x00000010U) -+#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT (0U) -+#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK (0xFFFFFFF0U) -+#define RGX_CR_MTS_SCHEDULE6_DM_DM0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE6_DM_DM1 (0x00000001U) -+#define RGX_CR_MTS_SCHEDULE6_DM_DM2 (0x00000002U) -+#define RGX_CR_MTS_SCHEDULE6_DM_DM3 (0x00000003U) -+#define RGX_CR_MTS_SCHEDULE6_DM_DM4 (0x00000004U) -+#define RGX_CR_MTS_SCHEDULE6_DM_DM5 (0x00000005U) -+#define RGX_CR_MTS_SCHEDULE6_DM_DM6 (0x00000006U) -+#define RGX_CR_MTS_SCHEDULE6_DM_DM7 (0x00000007U) -+#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL (0x0000000FU) -+ -+ -+/* -+ Register RGX_CR_MTS_SCHEDULE7 -+*/ -+#define RGX_CR_MTS_SCHEDULE7 (0x70B00U) -+#define RGX_CR_MTS_SCHEDULE7_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -+#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT (8U) -+#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE7_HOST_HOST (0x00000100U) -+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT (6U) -+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK (0xFFFFFF3FU) -+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1 (0x00000040U) -+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2 (0x00000080U) -+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3 (0x000000C0U) -+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT (5U) -+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX (0x00000020U) -+#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT (4U) -+#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED (0x00000010U) -+#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT (0U) -+#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK (0xFFFFFFF0U) -+#define RGX_CR_MTS_SCHEDULE7_DM_DM0 (0x00000000U) -+#define RGX_CR_MTS_SCHEDULE7_DM_DM1 (0x00000001U) -+#define RGX_CR_MTS_SCHEDULE7_DM_DM2 (0x00000002U) -+#define RGX_CR_MTS_SCHEDULE7_DM_DM3 (0x00000003U) -+#define RGX_CR_MTS_SCHEDULE7_DM_DM4 (0x00000004U) -+#define RGX_CR_MTS_SCHEDULE7_DM_DM5 (0x00000005U) -+#define RGX_CR_MTS_SCHEDULE7_DM_DM6 (0x00000006U) -+#define RGX_CR_MTS_SCHEDULE7_DM_DM7 (0x00000007U) -+#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL (0x0000000FU) -+ -+ -+/* -+ Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC -+*/ -+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC (0x0B30U) -+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) -+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC -+*/ -+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC (0x0B38U) -+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) -+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC -+*/ -+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC (0x0B40U) -+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) -+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC -+*/ -+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC (0x0B48U) -+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) -+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG -+*/ -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG (0x0B50U) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__MASKFULL (IMG_UINT64_C(0x000FF0FFFFFFF701)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFF001)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT (44U) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFFF0FFFFFFFFFFF)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PC_BASE_SHIFT (44U) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFF00FFFFFFFFFFF)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT (40U) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT (12U) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PERSISTENCE_SHIFT (9U) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PERSISTENCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF9FF)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_SLC_COHERENT_SHIFT (8U) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_SLC_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_SLC_COHERENT_EN (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT (0U) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL__S7_INFRA__META (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL__S7_INFRA__MTS (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_MTS_DM0_INTERRUPT_ENABLE -+*/ -+#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE (0x0B58U) -+#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -+#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_MTS_DM1_INTERRUPT_ENABLE -+*/ -+#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE (0x0B60U) -+#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -+#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_MTS_DM2_INTERRUPT_ENABLE -+*/ -+#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE (0x0B68U) -+#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -+#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_MTS_DM3_INTERRUPT_ENABLE -+*/ -+#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE (0x0B70U) -+#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -+#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_MTS_DM4_INTERRUPT_ENABLE -+*/ -+#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE (0x0B78U) -+#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -+#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_MTS_DM5_INTERRUPT_ENABLE -+*/ -+#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE (0x0B80U) -+#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) -+#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_MTS_INTCTX -+*/ -+#define RGX_CR_MTS_INTCTX (0x0B98U) -+#define RGX_CR_MTS_INTCTX_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) -+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT (22U) -+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK (0xC03FFFFFU) -+#define RGX_CR_MTS_INTCTX_DM_PTR_SHIFT (18U) -+#define RGX_CR_MTS_INTCTX_DM_PTR_CLRMSK (0xFFC3FFFFU) -+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT (16U) -+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK (0xFFFCFFFFU) -+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT (8U) -+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK (0xFFFF00FFU) -+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT (0U) -+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_MTS_BGCTX -+*/ -+#define RGX_CR_MTS_BGCTX (0x0BA0U) -+#define RGX_CR_MTS_BGCTX_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) -+#define RGX_CR_MTS_BGCTX_DM_PTR_SHIFT (10U) -+#define RGX_CR_MTS_BGCTX_DM_PTR_CLRMSK (0xFFFFC3FFU) -+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT (8U) -+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK (0xFFFFFCFFU) -+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT (0U) -+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE -+*/ -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE (0x0BA8U) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT (56U) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK (IMG_UINT64_C(0x00FFFFFFFFFFFFFF)) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT (48U) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT (40U) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT (32U) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT (24U) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT (16U) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT (8U) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT (0U) -+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) -+ -+ -+/* -+ Register RGX_CR_MTS_GPU_INT_STATUS -+*/ -+#define RGX_CR_MTS_GPU_INT_STATUS (0x0BB0U) -+#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT (0U) -+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_MTS_SCHEDULE_ENABLE -+*/ -+#define RGX_CR_MTS_SCHEDULE_ENABLE (0x0BC8U) -+#define RGX_CR_MTS_SCHEDULE_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT (0U) -+#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS0_EVENT_STATUS -+*/ -+#define RGX_CR_IRQ_OS0_EVENT_STATUS (0x0BD8U) -+#define RGX_CR_IRQ_OS0_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS0_EVENT_CLEAR -+*/ -+#define RGX_CR_IRQ_OS0_EVENT_CLEAR (0x0BE8U) -+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS1_EVENT_STATUS -+*/ -+#define RGX_CR_IRQ_OS1_EVENT_STATUS (0x10BD8U) -+#define RGX_CR_IRQ_OS1_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS1_EVENT_CLEAR -+*/ -+#define RGX_CR_IRQ_OS1_EVENT_CLEAR (0x10BE8U) -+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS2_EVENT_STATUS -+*/ -+#define RGX_CR_IRQ_OS2_EVENT_STATUS (0x20BD8U) -+#define RGX_CR_IRQ_OS2_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS2_EVENT_CLEAR -+*/ -+#define RGX_CR_IRQ_OS2_EVENT_CLEAR (0x20BE8U) -+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS3_EVENT_STATUS -+*/ -+#define RGX_CR_IRQ_OS3_EVENT_STATUS (0x30BD8U) -+#define RGX_CR_IRQ_OS3_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS3_EVENT_CLEAR -+*/ -+#define RGX_CR_IRQ_OS3_EVENT_CLEAR (0x30BE8U) -+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS4_EVENT_STATUS -+*/ -+#define RGX_CR_IRQ_OS4_EVENT_STATUS (0x40BD8U) -+#define RGX_CR_IRQ_OS4_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS4_EVENT_CLEAR -+*/ -+#define RGX_CR_IRQ_OS4_EVENT_CLEAR (0x40BE8U) -+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS5_EVENT_STATUS -+*/ -+#define RGX_CR_IRQ_OS5_EVENT_STATUS (0x50BD8U) -+#define RGX_CR_IRQ_OS5_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS5_EVENT_CLEAR -+*/ -+#define RGX_CR_IRQ_OS5_EVENT_CLEAR (0x50BE8U) -+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS6_EVENT_STATUS -+*/ -+#define RGX_CR_IRQ_OS6_EVENT_STATUS (0x60BD8U) -+#define RGX_CR_IRQ_OS6_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS6_EVENT_CLEAR -+*/ -+#define RGX_CR_IRQ_OS6_EVENT_CLEAR (0x60BE8U) -+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS7_EVENT_STATUS -+*/ -+#define RGX_CR_IRQ_OS7_EVENT_STATUS (0x70BD8U) -+#define RGX_CR_IRQ_OS7_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_IRQ_OS7_EVENT_CLEAR -+*/ -+#define RGX_CR_IRQ_OS7_EVENT_CLEAR (0x70BE8U) -+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT (0U) -+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_META_BOOT -+*/ -+#define RGX_CR_META_BOOT (0x0BF8U) -+#define RGX_CR_META_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_META_BOOT_MODE_SHIFT (0U) -+#define RGX_CR_META_BOOT_MODE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_META_BOOT_MODE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_GARTEN_SLC -+*/ -+#define RGX_CR_GARTEN_SLC (0x0BB8U) -+#define RGX_CR_GARTEN_SLC_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT (0U) -+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_PPP -+*/ -+#define RGX_CR_PPP (0x0CD0U) -+#define RGX_CR_PPP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PPP_CHECKSUM_SHIFT (0U) -+#define RGX_CR_PPP_CHECKSUM_CLRMSK (0x00000000U) -+ -+ -+#define RGX_CR_ISP_RENDER_DIR_TYPE_MASK (0x00000003U) -+/* -+Top-left to bottom-right */ -+#define RGX_CR_ISP_RENDER_DIR_TYPE_TL2BR (0x00000000U) -+/* -+Top-right to bottom-left */ -+#define RGX_CR_ISP_RENDER_DIR_TYPE_TR2BL (0x00000001U) -+/* -+Bottom-left to top-right */ -+#define RGX_CR_ISP_RENDER_DIR_TYPE_BL2TR (0x00000002U) -+/* -+Bottom-right to top-left */ -+#define RGX_CR_ISP_RENDER_DIR_TYPE_BR2TL (0x00000003U) -+ -+ -+#define RGX_CR_ISP_RENDER_MODE_TYPE_MASK (0x00000003U) -+/* -+Normal render */ -+#define RGX_CR_ISP_RENDER_MODE_TYPE_NORM (0x00000000U) -+/* -+Fast 2D render */ -+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_2D (0x00000002U) -+/* -+Fast scale render */ -+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE (0x00000003U) -+ -+ -+/* -+ Register RGX_CR_ISP_RENDER -+*/ -+#define RGX_CR_ISP_RENDER (0x0F08U) -+#define RGX_CR_ISP_RENDER_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -+#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_SHIFT (8U) -+#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_EN (0x00000100U) -+#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT (7U) -+#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN (0x00000080U) -+#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT (6U) -+#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN (0x00000040U) -+#define RGX_CR_ISP_RENDER_DISABLE_EOMT_SHIFT (5U) -+#define RGX_CR_ISP_RENDER_DISABLE_EOMT_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_ISP_RENDER_DISABLE_EOMT_EN (0x00000020U) -+#define RGX_CR_ISP_RENDER_RESUME_SHIFT (4U) -+#define RGX_CR_ISP_RENDER_RESUME_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_ISP_RENDER_RESUME_EN (0x00000010U) -+#define RGX_CR_ISP_RENDER_DIR_SHIFT (2U) -+#define RGX_CR_ISP_RENDER_DIR_CLRMSK (0xFFFFFFF3U) -+#define RGX_CR_ISP_RENDER_DIR_TL2BR (0x00000000U) -+#define RGX_CR_ISP_RENDER_DIR_TR2BL (0x00000004U) -+#define RGX_CR_ISP_RENDER_DIR_BL2TR (0x00000008U) -+#define RGX_CR_ISP_RENDER_DIR_BR2TL (0x0000000CU) -+#define RGX_CR_ISP_RENDER_MODE_SHIFT (0U) -+#define RGX_CR_ISP_RENDER_MODE_CLRMSK (0xFFFFFFFCU) -+#define RGX_CR_ISP_RENDER_MODE_NORM (0x00000000U) -+#define RGX_CR_ISP_RENDER_MODE_FAST_2D (0x00000002U) -+#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE (0x00000003U) -+ -+ -+/* -+ Register RGX_CR_ISP_CTL -+*/ -+#define RGX_CR_ISP_CTL (0x0F38U) -+#define RGX_CR_ISP_CTL_MASKFULL (IMG_UINT64_C(0x00000000FFFFF3FF)) -+#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_SHIFT (31U) -+#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_CLRMSK (0x7FFFFFFFU) -+#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_EN (0x80000000U) -+#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT (30U) -+#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK (0xBFFFFFFFU) -+#define RGX_CR_ISP_CTL_LINE_STYLE_EN (0x40000000U) -+#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_SHIFT (29U) -+#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_CLRMSK (0xDFFFFFFFU) -+#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_EN (0x20000000U) -+#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_SHIFT (28U) -+#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_EN (0x10000000U) -+#define RGX_CR_ISP_CTL_PAIR_TILES_SHIFT (27U) -+#define RGX_CR_ISP_CTL_PAIR_TILES_CLRMSK (0xF7FFFFFFU) -+#define RGX_CR_ISP_CTL_PAIR_TILES_EN (0x08000000U) -+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_SHIFT (26U) -+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK (0xFBFFFFFFU) -+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_EN (0x04000000U) -+#define RGX_CR_ISP_CTL_TILE_AGE_EN_SHIFT (25U) -+#define RGX_CR_ISP_CTL_TILE_AGE_EN_CLRMSK (0xFDFFFFFFU) -+#define RGX_CR_ISP_CTL_TILE_AGE_EN_EN (0x02000000U) -+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT (23U) -+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK (0xFE7FFFFFU) -+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9 (0x00000000U) -+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10 (0x00800000U) -+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL (0x01000000U) -+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT (21U) -+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK (0xFF9FFFFFU) -+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT (20U) -+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN (0x00100000U) -+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT (19U) -+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK (0xFFF7FFFFU) -+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN (0x00080000U) -+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT (18U) -+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN (0x00040000U) -+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT (17U) -+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK (0xFFFDFFFFU) -+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN (0x00020000U) -+#define RGX_CR_ISP_CTL_SAMPLE_POS_SHIFT (16U) -+#define RGX_CR_ISP_CTL_SAMPLE_POS_CLRMSK (0xFFFEFFFFU) -+#define RGX_CR_ISP_CTL_SAMPLE_POS_EN (0x00010000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_SHIFT (12U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_CLRMSK (0xFFFF0FFFU) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE (0x00000000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO (0x00001000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE (0x00002000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR (0x00003000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE (0x00004000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX (0x00005000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN (0x00006000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT (0x00007000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE (0x00008000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN (0x00009000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN (0x0000A000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE (0x0000B000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN (0x0000C000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN (0x0000D000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN (0x0000E000U) -+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN (0x0000F000U) -+#define RGX_CR_ISP_CTL_VALID_ID_SHIFT (4U) -+#define RGX_CR_ISP_CTL_VALID_ID_CLRMSK (0xFFFFFC0FU) -+#define RGX_CR_ISP_CTL_UPASS_START_SHIFT (0U) -+#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK (0xFFFFFFF0U) -+ -+ -+/* -+ Register RGX_CR_ISP_STATUS -+*/ -+#define RGX_CR_ISP_STATUS (0x1038U) -+#define RGX_CR_ISP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000007)) -+#define RGX_CR_ISP_STATUS_SPLIT_MAX_SHIFT (2U) -+#define RGX_CR_ISP_STATUS_SPLIT_MAX_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_ISP_STATUS_SPLIT_MAX_EN (0x00000004U) -+#define RGX_CR_ISP_STATUS_ACTIVE_SHIFT (1U) -+#define RGX_CR_ISP_STATUS_ACTIVE_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_ISP_STATUS_ACTIVE_EN (0x00000002U) -+#define RGX_CR_ISP_STATUS_EOR_SHIFT (0U) -+#define RGX_CR_ISP_STATUS_EOR_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_ISP_STATUS_EOR_EN (0x00000001U) -+ -+ -+/* -+ Register group: RGX_CR_ISP_XTP_RESUME, with 64 repeats -+*/ -+#define RGX_CR_ISP_XTP_RESUME_REPEATCOUNT (64U) -+/* -+ Register RGX_CR_ISP_XTP_RESUME0 -+*/ -+#define RGX_CR_ISP_XTP_RESUME0 (0x3A00U) -+#define RGX_CR_ISP_XTP_RESUME0_MASKFULL (IMG_UINT64_C(0x00000000003FF3FF)) -+#define RGX_CR_ISP_XTP_RESUME0_TILE_X_SHIFT (12U) -+#define RGX_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK (0xFFC00FFFU) -+#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT (0U) -+#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK (0xFFFFFC00U) -+ -+ -+/* -+ Register group: RGX_CR_ISP_XTP_STORE, with 32 repeats -+*/ -+#define RGX_CR_ISP_XTP_STORE_REPEATCOUNT (32U) -+/* -+ Register RGX_CR_ISP_XTP_STORE0 -+*/ -+#define RGX_CR_ISP_XTP_STORE0 (0x3C00U) -+#define RGX_CR_ISP_XTP_STORE0_MASKFULL (IMG_UINT64_C(0x000000007F3FF3FF)) -+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_SHIFT (30U) -+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK (0xBFFFFFFFU) -+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_EN (0x40000000U) -+#define RGX_CR_ISP_XTP_STORE0_EOR_SHIFT (29U) -+#define RGX_CR_ISP_XTP_STORE0_EOR_CLRMSK (0xDFFFFFFFU) -+#define RGX_CR_ISP_XTP_STORE0_EOR_EN (0x20000000U) -+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT (28U) -+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_EN (0x10000000U) -+#define RGX_CR_ISP_XTP_STORE0_MT_SHIFT (24U) -+#define RGX_CR_ISP_XTP_STORE0_MT_CLRMSK (0xF0FFFFFFU) -+#define RGX_CR_ISP_XTP_STORE0_TILE_X_SHIFT (12U) -+#define RGX_CR_ISP_XTP_STORE0_TILE_X_CLRMSK (0xFFC00FFFU) -+#define RGX_CR_ISP_XTP_STORE0_TILE_Y_SHIFT (0U) -+#define RGX_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK (0xFFFFFC00U) -+ -+ -+/* -+ Register group: RGX_CR_BIF_CAT_BASE, with 8 repeats -+*/ -+#define RGX_CR_BIF_CAT_BASE_REPEATCOUNT (8U) -+/* -+ Register RGX_CR_BIF_CAT_BASE0 -+*/ -+#define RGX_CR_BIF_CAT_BASE0 (0x1200U) -+#define RGX_CR_BIF_CAT_BASE0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_CAT_BASE1 -+*/ -+#define RGX_CR_BIF_CAT_BASE1 (0x1208U) -+#define RGX_CR_BIF_CAT_BASE1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_BIF_CAT_BASE1_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_CAT_BASE2 -+*/ -+#define RGX_CR_BIF_CAT_BASE2 (0x1210U) -+#define RGX_CR_BIF_CAT_BASE2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_BIF_CAT_BASE2_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_CAT_BASE3 -+*/ -+#define RGX_CR_BIF_CAT_BASE3 (0x1218U) -+#define RGX_CR_BIF_CAT_BASE3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_BIF_CAT_BASE3_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_CAT_BASE4 -+*/ -+#define RGX_CR_BIF_CAT_BASE4 (0x1220U) -+#define RGX_CR_BIF_CAT_BASE4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_BIF_CAT_BASE4_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_CAT_BASE5 -+*/ -+#define RGX_CR_BIF_CAT_BASE5 (0x1228U) -+#define RGX_CR_BIF_CAT_BASE5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_BIF_CAT_BASE5_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_CAT_BASE6 -+*/ -+#define RGX_CR_BIF_CAT_BASE6 (0x1230U) -+#define RGX_CR_BIF_CAT_BASE6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_BIF_CAT_BASE6_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_CAT_BASE7 -+*/ -+#define RGX_CR_BIF_CAT_BASE7 (0x1238U) -+#define RGX_CR_BIF_CAT_BASE7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_BIF_CAT_BASE7_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_CAT_BASE_INDEX -+*/ -+#define RGX_CR_BIF_CAT_BASE_INDEX (0x1240U) -+#define RGX_CR_BIF_CAT_BASE_INDEX_MASKFULL (IMG_UINT64_C(0x00070707073F0707)) -+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT (48U) -+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK (IMG_UINT64_C(0xFFF8FFFFFFFFFFFF)) -+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT (40U) -+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT (32U) -+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) -+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT (24U) -+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF)) -+#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_SHIFT (19U) -+#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC7FFFF)) -+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT (16U) -+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF8FFFF)) -+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT (8U) -+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF8FF)) -+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_SHIFT (0U) -+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF8)) -+ -+ -+/* -+ Register RGX_CR_BIF_PM_CAT_BASE_VCE0 -+*/ -+#define RGX_CR_BIF_PM_CAT_BASE_VCE0 (0x1248U) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT (40U) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT (1U) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT (0U) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_BIF_PM_CAT_BASE_TE0 -+*/ -+#define RGX_CR_BIF_PM_CAT_BASE_TE0 (0x1250U) -+#define RGX_CR_BIF_PM_CAT_BASE_TE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) -+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT (40U) -+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) -+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT (1U) -+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT (0U) -+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_BIF_PM_CAT_BASE_ALIST0 -+*/ -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0 (0x1260U) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT (40U) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT (1U) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT (0U) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_BIF_PM_CAT_BASE_VCE1 -+*/ -+#define RGX_CR_BIF_PM_CAT_BASE_VCE1 (0x1268U) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT (40U) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT (1U) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT (0U) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_BIF_PM_CAT_BASE_TE1 -+*/ -+#define RGX_CR_BIF_PM_CAT_BASE_TE1 (0x1270U) -+#define RGX_CR_BIF_PM_CAT_BASE_TE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) -+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT (40U) -+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) -+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT (1U) -+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT (0U) -+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_BIF_PM_CAT_BASE_ALIST1 -+*/ -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1 (0x1280U) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT (40U) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT (12U) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT (1U) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT (0U) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_BIF_MMU_ENTRY_STATUS -+*/ -+#define RGX_CR_BIF_MMU_ENTRY_STATUS (0x1288U) -+#define RGX_CR_BIF_MMU_ENTRY_STATUS_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF0F3)) -+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT (12U) -+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT (4U) -+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F)) -+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT (0U) -+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) -+ -+ -+/* -+ Register RGX_CR_BIF_MMU_ENTRY -+*/ -+#define RGX_CR_BIF_MMU_ENTRY (0x1290U) -+#define RGX_CR_BIF_MMU_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_SHIFT (1U) -+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_EN (0x00000002U) -+#define RGX_CR_BIF_MMU_ENTRY_PENDING_SHIFT (0U) -+#define RGX_CR_BIF_MMU_ENTRY_PENDING_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_BIF_MMU_ENTRY_PENDING_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_BIF_CTRL_INVAL -+*/ -+#define RGX_CR_BIF_CTRL_INVAL (0x12A0U) -+#define RGX_CR_BIF_CTRL_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -+#define RGX_CR_BIF_CTRL_INVAL_TLB1_SHIFT (3U) -+#define RGX_CR_BIF_CTRL_INVAL_TLB1_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_BIF_CTRL_INVAL_TLB1_EN (0x00000008U) -+#define RGX_CR_BIF_CTRL_INVAL_PC_SHIFT (2U) -+#define RGX_CR_BIF_CTRL_INVAL_PC_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_BIF_CTRL_INVAL_PC_EN (0x00000004U) -+#define RGX_CR_BIF_CTRL_INVAL_PD_SHIFT (1U) -+#define RGX_CR_BIF_CTRL_INVAL_PD_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_BIF_CTRL_INVAL_PD_EN (0x00000002U) -+#define RGX_CR_BIF_CTRL_INVAL_PT_SHIFT (0U) -+#define RGX_CR_BIF_CTRL_INVAL_PT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_BIF_CTRL_INVAL_PT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_BIF_CTRL -+*/ -+#define RGX_CR_BIF_CTRL (0x12A8U) -+#define RGX_CR_BIF_CTRL__XE_MEM__MASKFULL (IMG_UINT64_C(0x000000000000033F)) -+#define RGX_CR_BIF_CTRL_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_BIF_CTRL__XE_MEM__PAUSE_MMU_CPU_SHIFT (9U) -+#define RGX_CR_BIF_CTRL__XE_MEM__PAUSE_MMU_CPU_CLRMSK (0xFFFFFDFFU) -+#define RGX_CR_BIF_CTRL__XE_MEM__PAUSE_MMU_CPU_EN (0x00000200U) -+#define RGX_CR_BIF_CTRL__XE_MEM__PAUSE_MMU_BIF4_SHIFT (8U) -+#define RGX_CR_BIF_CTRL__XE_MEM__PAUSE_MMU_BIF4_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_BIF_CTRL__XE_MEM__PAUSE_MMU_BIF4_EN (0x00000100U) -+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT (7U) -+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN (0x00000080U) -+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT (6U) -+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN (0x00000040U) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT (5U) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN (0x00000020U) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT (4U) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN (0x00000010U) -+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_SHIFT (3U) -+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_EN (0x00000008U) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT (2U) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_EN (0x00000004U) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT (1U) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN (0x00000002U) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT (0U) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_BIF_FAULT_BANK0_MMU_STATUS -+*/ -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS (0x12B0U) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0x00000010U) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_BIF_FAULT_BANK0_REQ_STATUS -+*/ -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS (0x12B8U) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__MASKFULL (IMG_UINT64_C(0x001FFFFFFFFFFFF0)) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_SHIFT (52U) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN (IMG_UINT64_C(0x0010000000000000)) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT (46U) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK (IMG_UINT64_C(0xFFF03FFFFFFFFFFF)) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT (40U) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFC0FFFFFFFFFF)) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) -+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) -+ -+ -+/* -+ Register RGX_CR_BIF_FAULT_BANK1_MMU_STATUS -+*/ -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS (0x12C0U) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT (12U) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT (8U) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT (5U) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT (4U) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN (0x00000010U) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT (0U) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_BIF_FAULT_BANK1_REQ_STATUS -+*/ -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS (0x12C8U) -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT (50U) -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT (44U) -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT (40U) -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT (4U) -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) -+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) -+ -+ -+/* -+ Register RGX_CR_BIF_MMU_STATUS -+*/ -+#define RGX_CR_BIF_MMU_STATUS (0x12D0U) -+#define RGX_CR_BIF_MMU_STATUS__XE_MEM__MASKFULL (IMG_UINT64_C(0x000000001FFFFFF7)) -+#define RGX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000001FFFFFF7)) -+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT (28U) -+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_EN (0x10000000U) -+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U) -+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) -+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U) -+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) -+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U) -+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) -+#define RGX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U) -+#define RGX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_BIF_MMU_STATUS_STALLED_EN (0x00000004U) -+#define RGX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U) -+#define RGX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_BIF_MMU_STATUS_PAUSED_EN (0x00000002U) -+#define RGX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U) -+#define RGX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_BIF_MMU_STATUS_BUSY_EN (0x00000001U) -+ -+ -+/* -+ Register group: RGX_CR_BIF_TILING_CFG, with 8 repeats -+*/ -+#define RGX_CR_BIF_TILING_CFG_REPEATCOUNT (8U) -+/* -+ Register RGX_CR_BIF_TILING_CFG0 -+*/ -+#define RGX_CR_BIF_TILING_CFG0 (0x12D8U) -+#define RGX_CR_BIF_TILING_CFG0_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG0_XSTRIDE_SHIFT (61U) -+#define RGX_CR_BIF_TILING_CFG0_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG0_ENABLE_SHIFT (60U) -+#define RGX_CR_BIF_TILING_CFG0_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG0_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_SHIFT (32U) -+#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE (4096U) -+#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_SHIFT (0U) -+#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -+#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_TILING_CFG1 -+*/ -+#define RGX_CR_BIF_TILING_CFG1 (0x12E0U) -+#define RGX_CR_BIF_TILING_CFG1_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG1_XSTRIDE_SHIFT (61U) -+#define RGX_CR_BIF_TILING_CFG1_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG1_ENABLE_SHIFT (60U) -+#define RGX_CR_BIF_TILING_CFG1_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG1_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_SHIFT (32U) -+#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE (4096U) -+#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_SHIFT (0U) -+#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -+#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_TILING_CFG2 -+*/ -+#define RGX_CR_BIF_TILING_CFG2 (0x12E8U) -+#define RGX_CR_BIF_TILING_CFG2_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG2_XSTRIDE_SHIFT (61U) -+#define RGX_CR_BIF_TILING_CFG2_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG2_ENABLE_SHIFT (60U) -+#define RGX_CR_BIF_TILING_CFG2_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG2_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_SHIFT (32U) -+#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE (4096U) -+#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_SHIFT (0U) -+#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -+#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_TILING_CFG3 -+*/ -+#define RGX_CR_BIF_TILING_CFG3 (0x12F0U) -+#define RGX_CR_BIF_TILING_CFG3_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG3_XSTRIDE_SHIFT (61U) -+#define RGX_CR_BIF_TILING_CFG3_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG3_ENABLE_SHIFT (60U) -+#define RGX_CR_BIF_TILING_CFG3_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG3_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_SHIFT (32U) -+#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE (4096U) -+#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_SHIFT (0U) -+#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -+#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_TILING_CFG4 -+*/ -+#define RGX_CR_BIF_TILING_CFG4 (0x12F8U) -+#define RGX_CR_BIF_TILING_CFG4_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG4_XSTRIDE_SHIFT (61U) -+#define RGX_CR_BIF_TILING_CFG4_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG4_ENABLE_SHIFT (60U) -+#define RGX_CR_BIF_TILING_CFG4_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG4_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_SHIFT (32U) -+#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE (4096U) -+#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_SHIFT (0U) -+#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -+#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_TILING_CFG5 -+*/ -+#define RGX_CR_BIF_TILING_CFG5 (0x1300U) -+#define RGX_CR_BIF_TILING_CFG5_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG5_XSTRIDE_SHIFT (61U) -+#define RGX_CR_BIF_TILING_CFG5_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG5_ENABLE_SHIFT (60U) -+#define RGX_CR_BIF_TILING_CFG5_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG5_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_SHIFT (32U) -+#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE (4096U) -+#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_SHIFT (0U) -+#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -+#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_TILING_CFG6 -+*/ -+#define RGX_CR_BIF_TILING_CFG6 (0x1308U) -+#define RGX_CR_BIF_TILING_CFG6_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG6_XSTRIDE_SHIFT (61U) -+#define RGX_CR_BIF_TILING_CFG6_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG6_ENABLE_SHIFT (60U) -+#define RGX_CR_BIF_TILING_CFG6_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG6_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_SHIFT (32U) -+#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE (4096U) -+#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_SHIFT (0U) -+#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -+#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_TILING_CFG7 -+*/ -+#define RGX_CR_BIF_TILING_CFG7 (0x1310U) -+#define RGX_CR_BIF_TILING_CFG7_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG7_XSTRIDE_SHIFT (61U) -+#define RGX_CR_BIF_TILING_CFG7_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG7_ENABLE_SHIFT (60U) -+#define RGX_CR_BIF_TILING_CFG7_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG7_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_SHIFT (32U) -+#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) -+#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE (4096U) -+#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_SHIFT (0U) -+#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) -+#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT (12U) -+#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_BIF_READS_EXT_STATUS -+*/ -+#define RGX_CR_BIF_READS_EXT_STATUS (0x1320U) -+#define RGX_CR_BIF_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_SHIFT (16U) -+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK (0xF000FFFFU) -+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT (0U) -+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_BIF_READS_INT_STATUS -+*/ -+#define RGX_CR_BIF_READS_INT_STATUS (0x1328U) -+#define RGX_CR_BIF_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000007FFFFFF)) -+#define RGX_CR_BIF_READS_INT_STATUS_MMU_SHIFT (16U) -+#define RGX_CR_BIF_READS_INT_STATUS_MMU_CLRMSK (0xF800FFFFU) -+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_SHIFT (0U) -+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_BIFPM_READS_INT_STATUS -+*/ -+#define RGX_CR_BIFPM_READS_INT_STATUS (0x1330U) -+#define RGX_CR_BIFPM_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT (0U) -+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_BIFPM_READS_EXT_STATUS -+*/ -+#define RGX_CR_BIFPM_READS_EXT_STATUS (0x1338U) -+#define RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT (0U) -+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_BIFPM_STATUS_MMU -+*/ -+#define RGX_CR_BIFPM_STATUS_MMU (0x1350U) -+#define RGX_CR_BIFPM_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT (0U) -+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_BIF_STATUS_MMU -+*/ -+#define RGX_CR_BIF_STATUS_MMU (0x1358U) -+#define RGX_CR_BIF_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_SHIFT (0U) -+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_BIF_FAULT_READ -+*/ -+#define RGX_CR_BIF_FAULT_READ (0x13E0U) -+#define RGX_CR_BIF_FAULT_READ_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) -+#define RGX_CR_BIF_FAULT_READ_ADDRESS_SHIFT (4U) -+#define RGX_CR_BIF_FAULT_READ_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT (4U) -+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE (16U) -+ -+ -+/* -+ Register RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS -+*/ -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS (0x1430U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0x00000010U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS -+*/ -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS (0x1438U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) -+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) -+ -+ -+/* -+ Register RGX_CR_TFBC_COMPRESSION_CONTROL -+*/ -+#define RGX_CR_TFBC_COMPRESSION_CONTROL (0x14A0U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_SHIFT (8U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_EN (0x00000100U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_SHIFT (7U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN (0x00000080U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_SHIFT_SHIFT (4U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_SHIFT_CLRMSK (0xFFFFFF8FU) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_SHIFT (3U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_EN (0x00000008U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT (1U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK (0xFFFFFFF9U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_DEFAULT (0x00000000U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD_AND_CORRELATION (0x00000002U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD (0x00000004U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_RESERVED (0x00000006U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT (0U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_0 (0x00000000U) -+#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_1 (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MCU_FENCE -+*/ -+#define RGX_CR_MCU_FENCE (0x1740U) -+#define RGX_CR_MCU_FENCE_MASKFULL (IMG_UINT64_C(0x000007FFFFFFFFE0)) -+#define RGX_CR_MCU_FENCE_DM_SHIFT (40U) -+#define RGX_CR_MCU_FENCE_DM_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_MCU_FENCE_DM_VERTEX (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_MCU_FENCE_DM_PIXEL (IMG_UINT64_C(0x0000010000000000)) -+#define RGX_CR_MCU_FENCE_DM_COMPUTE (IMG_UINT64_C(0x0000020000000000)) -+#define RGX_CR_MCU_FENCE_DM_RAY_VERTEX (IMG_UINT64_C(0x0000030000000000)) -+#define RGX_CR_MCU_FENCE_DM_RAY (IMG_UINT64_C(0x0000040000000000)) -+#define RGX_CR_MCU_FENCE_DM_FASTRENDER (IMG_UINT64_C(0x0000050000000000)) -+#define RGX_CR_MCU_FENCE_ADDR_SHIFT (5U) -+#define RGX_CR_MCU_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) -+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSHIFT (5U) -+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE (32U) -+ -+ -+/* -+ Register group: RGX_CR_SCRATCH, with 16 repeats -+*/ -+#define RGX_CR_SCRATCH_REPEATCOUNT (16U) -+/* -+ Register RGX_CR_SCRATCH0 -+*/ -+#define RGX_CR_SCRATCH0 (0x1A00U) -+#define RGX_CR_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH0_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH0_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH1 -+*/ -+#define RGX_CR_SCRATCH1 (0x1A08U) -+#define RGX_CR_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH1_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH1_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH2 -+*/ -+#define RGX_CR_SCRATCH2 (0x1A10U) -+#define RGX_CR_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH2_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH2_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH3 -+*/ -+#define RGX_CR_SCRATCH3 (0x1A18U) -+#define RGX_CR_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH3_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH3_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH4 -+*/ -+#define RGX_CR_SCRATCH4 (0x1A20U) -+#define RGX_CR_SCRATCH4_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH4_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH4_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH5 -+*/ -+#define RGX_CR_SCRATCH5 (0x1A28U) -+#define RGX_CR_SCRATCH5_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH5_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH5_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH6 -+*/ -+#define RGX_CR_SCRATCH6 (0x1A30U) -+#define RGX_CR_SCRATCH6_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH6_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH6_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH7 -+*/ -+#define RGX_CR_SCRATCH7 (0x1A38U) -+#define RGX_CR_SCRATCH7_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH7_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH7_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH8 -+*/ -+#define RGX_CR_SCRATCH8 (0x1A40U) -+#define RGX_CR_SCRATCH8_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH8_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH8_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH9 -+*/ -+#define RGX_CR_SCRATCH9 (0x1A48U) -+#define RGX_CR_SCRATCH9_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH9_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH9_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH10 -+*/ -+#define RGX_CR_SCRATCH10 (0x1A50U) -+#define RGX_CR_SCRATCH10_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH10_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH10_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH11 -+*/ -+#define RGX_CR_SCRATCH11 (0x1A58U) -+#define RGX_CR_SCRATCH11_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH11_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH11_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH12 -+*/ -+#define RGX_CR_SCRATCH12 (0x1A60U) -+#define RGX_CR_SCRATCH12_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH12_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH12_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH13 -+*/ -+#define RGX_CR_SCRATCH13 (0x1A68U) -+#define RGX_CR_SCRATCH13_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH13_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH13_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH14 -+*/ -+#define RGX_CR_SCRATCH14 (0x1A70U) -+#define RGX_CR_SCRATCH14_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH14_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH14_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SCRATCH15 -+*/ -+#define RGX_CR_SCRATCH15 (0x1A78U) -+#define RGX_CR_SCRATCH15_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SCRATCH15_DATA_SHIFT (0U) -+#define RGX_CR_SCRATCH15_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register group: RGX_CR_OS0_SCRATCH, with 2 repeats -+*/ -+#define RGX_CR_OS0_SCRATCH_REPEATCOUNT (2U) -+/* -+ Register RGX_CR_OS0_SCRATCH0 -+*/ -+#define RGX_CR_OS0_SCRATCH0 (0x1A80U) -+#define RGX_CR_OS0_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS0_SCRATCH0_DATA_SHIFT (0U) -+#define RGX_CR_OS0_SCRATCH0_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS0_SCRATCH1 -+*/ -+#define RGX_CR_OS0_SCRATCH1 (0x1A88U) -+#define RGX_CR_OS0_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS0_SCRATCH1_DATA_SHIFT (0U) -+#define RGX_CR_OS0_SCRATCH1_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS0_SCRATCH2 -+*/ -+#define RGX_CR_OS0_SCRATCH2 (0x1A90U) -+#define RGX_CR_OS0_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS0_SCRATCH2_DATA_SHIFT (0U) -+#define RGX_CR_OS0_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_OS0_SCRATCH3 -+*/ -+#define RGX_CR_OS0_SCRATCH3 (0x1A98U) -+#define RGX_CR_OS0_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS0_SCRATCH3_DATA_SHIFT (0U) -+#define RGX_CR_OS0_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register group: RGX_CR_OS1_SCRATCH, with 2 repeats -+*/ -+#define RGX_CR_OS1_SCRATCH_REPEATCOUNT (2U) -+/* -+ Register RGX_CR_OS1_SCRATCH0 -+*/ -+#define RGX_CR_OS1_SCRATCH0 (0x11A80U) -+#define RGX_CR_OS1_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS1_SCRATCH0_DATA_SHIFT (0U) -+#define RGX_CR_OS1_SCRATCH0_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS1_SCRATCH1 -+*/ -+#define RGX_CR_OS1_SCRATCH1 (0x11A88U) -+#define RGX_CR_OS1_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS1_SCRATCH1_DATA_SHIFT (0U) -+#define RGX_CR_OS1_SCRATCH1_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS1_SCRATCH2 -+*/ -+#define RGX_CR_OS1_SCRATCH2 (0x11A90U) -+#define RGX_CR_OS1_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS1_SCRATCH2_DATA_SHIFT (0U) -+#define RGX_CR_OS1_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_OS1_SCRATCH3 -+*/ -+#define RGX_CR_OS1_SCRATCH3 (0x11A98U) -+#define RGX_CR_OS1_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS1_SCRATCH3_DATA_SHIFT (0U) -+#define RGX_CR_OS1_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register group: RGX_CR_OS2_SCRATCH, with 2 repeats -+*/ -+#define RGX_CR_OS2_SCRATCH_REPEATCOUNT (2U) -+/* -+ Register RGX_CR_OS2_SCRATCH0 -+*/ -+#define RGX_CR_OS2_SCRATCH0 (0x21A80U) -+#define RGX_CR_OS2_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS2_SCRATCH0_DATA_SHIFT (0U) -+#define RGX_CR_OS2_SCRATCH0_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS2_SCRATCH1 -+*/ -+#define RGX_CR_OS2_SCRATCH1 (0x21A88U) -+#define RGX_CR_OS2_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS2_SCRATCH1_DATA_SHIFT (0U) -+#define RGX_CR_OS2_SCRATCH1_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS2_SCRATCH2 -+*/ -+#define RGX_CR_OS2_SCRATCH2 (0x21A90U) -+#define RGX_CR_OS2_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS2_SCRATCH2_DATA_SHIFT (0U) -+#define RGX_CR_OS2_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_OS2_SCRATCH3 -+*/ -+#define RGX_CR_OS2_SCRATCH3 (0x21A98U) -+#define RGX_CR_OS2_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS2_SCRATCH3_DATA_SHIFT (0U) -+#define RGX_CR_OS2_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register group: RGX_CR_OS3_SCRATCH, with 2 repeats -+*/ -+#define RGX_CR_OS3_SCRATCH_REPEATCOUNT (2U) -+/* -+ Register RGX_CR_OS3_SCRATCH0 -+*/ -+#define RGX_CR_OS3_SCRATCH0 (0x31A80U) -+#define RGX_CR_OS3_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS3_SCRATCH0_DATA_SHIFT (0U) -+#define RGX_CR_OS3_SCRATCH0_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS3_SCRATCH1 -+*/ -+#define RGX_CR_OS3_SCRATCH1 (0x31A88U) -+#define RGX_CR_OS3_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS3_SCRATCH1_DATA_SHIFT (0U) -+#define RGX_CR_OS3_SCRATCH1_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS3_SCRATCH2 -+*/ -+#define RGX_CR_OS3_SCRATCH2 (0x31A90U) -+#define RGX_CR_OS3_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS3_SCRATCH2_DATA_SHIFT (0U) -+#define RGX_CR_OS3_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_OS3_SCRATCH3 -+*/ -+#define RGX_CR_OS3_SCRATCH3 (0x31A98U) -+#define RGX_CR_OS3_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS3_SCRATCH3_DATA_SHIFT (0U) -+#define RGX_CR_OS3_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register group: RGX_CR_OS4_SCRATCH, with 2 repeats -+*/ -+#define RGX_CR_OS4_SCRATCH_REPEATCOUNT (2U) -+/* -+ Register RGX_CR_OS4_SCRATCH0 -+*/ -+#define RGX_CR_OS4_SCRATCH0 (0x41A80U) -+#define RGX_CR_OS4_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS4_SCRATCH0_DATA_SHIFT (0U) -+#define RGX_CR_OS4_SCRATCH0_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS4_SCRATCH1 -+*/ -+#define RGX_CR_OS4_SCRATCH1 (0x41A88U) -+#define RGX_CR_OS4_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS4_SCRATCH1_DATA_SHIFT (0U) -+#define RGX_CR_OS4_SCRATCH1_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS4_SCRATCH2 -+*/ -+#define RGX_CR_OS4_SCRATCH2 (0x41A90U) -+#define RGX_CR_OS4_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS4_SCRATCH2_DATA_SHIFT (0U) -+#define RGX_CR_OS4_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_OS4_SCRATCH3 -+*/ -+#define RGX_CR_OS4_SCRATCH3 (0x41A98U) -+#define RGX_CR_OS4_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS4_SCRATCH3_DATA_SHIFT (0U) -+#define RGX_CR_OS4_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register group: RGX_CR_OS5_SCRATCH, with 2 repeats -+*/ -+#define RGX_CR_OS5_SCRATCH_REPEATCOUNT (2U) -+/* -+ Register RGX_CR_OS5_SCRATCH0 -+*/ -+#define RGX_CR_OS5_SCRATCH0 (0x51A80U) -+#define RGX_CR_OS5_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS5_SCRATCH0_DATA_SHIFT (0U) -+#define RGX_CR_OS5_SCRATCH0_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS5_SCRATCH1 -+*/ -+#define RGX_CR_OS5_SCRATCH1 (0x51A88U) -+#define RGX_CR_OS5_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS5_SCRATCH1_DATA_SHIFT (0U) -+#define RGX_CR_OS5_SCRATCH1_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS5_SCRATCH2 -+*/ -+#define RGX_CR_OS5_SCRATCH2 (0x51A90U) -+#define RGX_CR_OS5_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS5_SCRATCH2_DATA_SHIFT (0U) -+#define RGX_CR_OS5_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_OS5_SCRATCH3 -+*/ -+#define RGX_CR_OS5_SCRATCH3 (0x51A98U) -+#define RGX_CR_OS5_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS5_SCRATCH3_DATA_SHIFT (0U) -+#define RGX_CR_OS5_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register group: RGX_CR_OS6_SCRATCH, with 2 repeats -+*/ -+#define RGX_CR_OS6_SCRATCH_REPEATCOUNT (2U) -+/* -+ Register RGX_CR_OS6_SCRATCH0 -+*/ -+#define RGX_CR_OS6_SCRATCH0 (0x61A80U) -+#define RGX_CR_OS6_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS6_SCRATCH0_DATA_SHIFT (0U) -+#define RGX_CR_OS6_SCRATCH0_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS6_SCRATCH1 -+*/ -+#define RGX_CR_OS6_SCRATCH1 (0x61A88U) -+#define RGX_CR_OS6_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS6_SCRATCH1_DATA_SHIFT (0U) -+#define RGX_CR_OS6_SCRATCH1_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS6_SCRATCH2 -+*/ -+#define RGX_CR_OS6_SCRATCH2 (0x61A90U) -+#define RGX_CR_OS6_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS6_SCRATCH2_DATA_SHIFT (0U) -+#define RGX_CR_OS6_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_OS6_SCRATCH3 -+*/ -+#define RGX_CR_OS6_SCRATCH3 (0x61A98U) -+#define RGX_CR_OS6_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS6_SCRATCH3_DATA_SHIFT (0U) -+#define RGX_CR_OS6_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register group: RGX_CR_OS7_SCRATCH, with 2 repeats -+*/ -+#define RGX_CR_OS7_SCRATCH_REPEATCOUNT (2U) -+/* -+ Register RGX_CR_OS7_SCRATCH0 -+*/ -+#define RGX_CR_OS7_SCRATCH0 (0x71A80U) -+#define RGX_CR_OS7_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS7_SCRATCH0_DATA_SHIFT (0U) -+#define RGX_CR_OS7_SCRATCH0_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS7_SCRATCH1 -+*/ -+#define RGX_CR_OS7_SCRATCH1 (0x71A88U) -+#define RGX_CR_OS7_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_OS7_SCRATCH1_DATA_SHIFT (0U) -+#define RGX_CR_OS7_SCRATCH1_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OS7_SCRATCH2 -+*/ -+#define RGX_CR_OS7_SCRATCH2 (0x71A90U) -+#define RGX_CR_OS7_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS7_SCRATCH2_DATA_SHIFT (0U) -+#define RGX_CR_OS7_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_OS7_SCRATCH3 -+*/ -+#define RGX_CR_OS7_SCRATCH3 (0x71A98U) -+#define RGX_CR_OS7_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_OS7_SCRATCH3_DATA_SHIFT (0U) -+#define RGX_CR_OS7_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_SPFILTER_SIGNAL_DESCR -+*/ -+#define RGX_CR_SPFILTER_SIGNAL_DESCR (0x2700U) -+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT (0U) -+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK (0xFFFF0000U) -+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT (4U) -+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE (16U) -+ -+ -+/* -+ Register RGX_CR_SPFILTER_SIGNAL_DESCR_MIN -+*/ -+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN (0x2708U) -+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) -+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT (4U) -+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT (4U) -+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE (16U) -+ -+ -+/* -+ Register group: RGX_CR_FWCORE_ADDR_REMAP_CONFIG, with 16 repeats -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT (16U) -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 (0x3000U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 (0x3008U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 (0x3010U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 (0x3018U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 (0x3020U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 (0x3028U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 (0x3030U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 (0x3038U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 (0x3040U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 (0x3048U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 (0x3050U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 (0x3058U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 (0x3060U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 (0x3068U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 (0x3070U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 -+*/ -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 (0x3078U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_SHIFT (62U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT (61U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT (60U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT (44U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_SHIFT (40U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_BOOT -+*/ -+#define RGX_CR_FWCORE_BOOT (0x3090U) -+#define RGX_CR_FWCORE_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_FWCORE_BOOT_ENABLE_SHIFT (0U) -+#define RGX_CR_FWCORE_BOOT_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_FWCORE_BOOT_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_RESET_ADDR -+*/ -+#define RGX_CR_FWCORE_RESET_ADDR (0x3098U) -+#define RGX_CR_FWCORE_RESET_ADDR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) -+#define RGX_CR_FWCORE_RESET_ADDR_ADDR_SHIFT (1U) -+#define RGX_CR_FWCORE_RESET_ADDR_ADDR_CLRMSK (0x00000001U) -+#define RGX_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSHIFT (1U) -+#define RGX_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSIZE (2U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_WRAPPER_NMI_ADDR -+*/ -+#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR (0x30A0U) -+#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) -+#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_SHIFT (1U) -+#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_CLRMSK (0x00000001U) -+#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSHIFT (1U) -+#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSIZE (2U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_WRAPPER_NMI_EVENT -+*/ -+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT (0x30A8U) -+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT (0U) -+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS -+*/ -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS (0x30B0U) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F771)) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_SHIFT (12U) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT (8U) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_SHIFT (5U) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_SHIFT (4U) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_EN (0x00000010U) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_SHIFT (0U) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS -+*/ -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS (0x30B8U) -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x001FFFFFFFFFFFF0)) -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_SHIFT (52U) -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0010000000000000)) -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT (46U) -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFF03FFFFFFFFFFF)) -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT (40U) -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFC0FFFFFFFFFF)) -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT (4U) -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) -+#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_CTRL_INVAL -+*/ -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL (0x30C0U) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_SHIFT (3U) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_EN (0x00000008U) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_SHIFT (2U) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_EN (0x00000004U) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_SHIFT (1U) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_EN (0x00000002U) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_SHIFT (0U) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_MMU_STATUS -+*/ -+#define RGX_CR_FWCORE_MEM_MMU_STATUS (0x30C8U) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFF7)) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_SHIFT (20U) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_SHIFT (12U) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_SHIFT (4U) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_SHIFT (2U) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_EN (0x00000004U) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_SHIFT (1U) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_EN (0x00000002U) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_SHIFT (0U) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_READS_EXT_STATUS -+*/ -+#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS (0x30D8U) -+#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000FFF)) -+#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_SHIFT (0U) -+#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_CLRMSK (0xFFFFF000U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_READS_INT_STATUS -+*/ -+#define RGX_CR_FWCORE_MEM_READS_INT_STATUS (0x30E0U) -+#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000007FF)) -+#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MMU_SHIFT (0U) -+#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MMU_CLRMSK (0xFFFFF800U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_WRAPPER_FENCE -+*/ -+#define RGX_CR_FWCORE_WRAPPER_FENCE (0x30E8U) -+#define RGX_CR_FWCORE_WRAPPER_FENCE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_SHIFT (0U) -+#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_EN (0x00000001U) -+ -+ -+/* -+ Register group: RGX_CR_FWCORE_MEM_CAT_BASE, with 8 repeats -+*/ -+#define RGX_CR_FWCORE_MEM_CAT_BASE_REPEATCOUNT (8U) -+/* -+ Register RGX_CR_FWCORE_MEM_CAT_BASE0 -+*/ -+#define RGX_CR_FWCORE_MEM_CAT_BASE0 (0x30F0U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_CAT_BASE1 -+*/ -+#define RGX_CR_FWCORE_MEM_CAT_BASE1 (0x30F8U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_CAT_BASE2 -+*/ -+#define RGX_CR_FWCORE_MEM_CAT_BASE2 (0x3100U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_CAT_BASE3 -+*/ -+#define RGX_CR_FWCORE_MEM_CAT_BASE3 (0x3108U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_CAT_BASE4 -+*/ -+#define RGX_CR_FWCORE_MEM_CAT_BASE4 (0x3110U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_CAT_BASE5 -+*/ -+#define RGX_CR_FWCORE_MEM_CAT_BASE5 (0x3118U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_CAT_BASE6 -+*/ -+#define RGX_CR_FWCORE_MEM_CAT_BASE6 (0x3120U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_MEM_CAT_BASE7 -+*/ -+#define RGX_CR_FWCORE_MEM_CAT_BASE7 (0x3128U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_SHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_WDT_RESET -+*/ -+#define RGX_CR_FWCORE_WDT_RESET (0x3130U) -+#define RGX_CR_FWCORE_WDT_RESET_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_FWCORE_WDT_RESET_EN_SHIFT (0U) -+#define RGX_CR_FWCORE_WDT_RESET_EN_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_FWCORE_WDT_RESET_EN_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_WDT_CTRL -+*/ -+#define RGX_CR_FWCORE_WDT_CTRL (0x3138U) -+#define RGX_CR_FWCORE_WDT_CTRL_MASKFULL (IMG_UINT64_C(0x00000000FFFF1F01)) -+#define RGX_CR_FWCORE_WDT_CTRL_PROT_SHIFT (16U) -+#define RGX_CR_FWCORE_WDT_CTRL_PROT_CLRMSK (0x0000FFFFU) -+#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT (8U) -+#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK (0xFFFFE0FFU) -+#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT (0U) -+#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_WDT_COUNT -+*/ -+#define RGX_CR_FWCORE_WDT_COUNT (0x3140U) -+#define RGX_CR_FWCORE_WDT_COUNT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FWCORE_WDT_COUNT_VALUE_SHIFT (0U) -+#define RGX_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register group: RGX_CR_FWCORE_DMI_RESERVED0, with 4 repeats -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT (4U) -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED00 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED00 (0x3400U) -+#define RGX_CR_FWCORE_DMI_RESERVED00_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED01 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED01 (0x3408U) -+#define RGX_CR_FWCORE_DMI_RESERVED01_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED02 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED02 (0x3410U) -+#define RGX_CR_FWCORE_DMI_RESERVED02_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED03 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED03 (0x3418U) -+#define RGX_CR_FWCORE_DMI_RESERVED03_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_DATA0 -+*/ -+#define RGX_CR_FWCORE_DMI_DATA0 (0x3420U) -+#define RGX_CR_FWCORE_DMI_DATA0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FWCORE_DMI_DATA0_VAL_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_DATA0_VAL_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_DATA1 -+*/ -+#define RGX_CR_FWCORE_DMI_DATA1 (0x3428U) -+#define RGX_CR_FWCORE_DMI_DATA1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FWCORE_DMI_DATA1_VAL_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_DATA1_VAL_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register group: RGX_CR_FWCORE_DMI_RESERVED1, with 10 repeats -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT (10U) -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED10 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED10 (0x3430U) -+#define RGX_CR_FWCORE_DMI_RESERVED10_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED11 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED11 (0x3438U) -+#define RGX_CR_FWCORE_DMI_RESERVED11_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED12 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED12 (0x3440U) -+#define RGX_CR_FWCORE_DMI_RESERVED12_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED13 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED13 (0x3448U) -+#define RGX_CR_FWCORE_DMI_RESERVED13_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED14 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED14 (0x3450U) -+#define RGX_CR_FWCORE_DMI_RESERVED14_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_DMCONTROL -+*/ -+#define RGX_CR_FWCORE_DMI_DMCONTROL (0x3480U) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_MASKFULL (IMG_UINT64_C(0x00000000D0000003)) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_SHIFT (31U) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_CLRMSK (0x7FFFFFFFU) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN (0x80000000U) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_SHIFT (30U) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_CLRMSK (0xBFFFFFFFU) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN (0x40000000U) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_SHIFT (28U) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_CLRMSK (0xEFFFFFFFU) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_EN (0x10000000U) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_SHIFT (1U) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_EN (0x00000002U) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_DMSTATUS -+*/ -+#define RGX_CR_FWCORE_DMI_DMSTATUS (0x3488U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_MASKFULL (IMG_UINT64_C(0x00000000004FFFFF)) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_SHIFT (22U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_CLRMSK (0xFFBFFFFFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_EN (0x00400000U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_SHIFT (19U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_CLRMSK (0xFFF7FFFFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_EN (0x00080000U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_SHIFT (18U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_EN (0x00040000U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_SHIFT (17U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_CLRMSK (0xFFFDFFFFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN (0x00020000U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_SHIFT (16U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_CLRMSK (0xFFFEFFFFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_EN (0x00010000U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_SHIFT (15U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_CLRMSK (0xFFFF7FFFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_EN (0x00008000U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_SHIFT (14U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_CLRMSK (0xFFFFBFFFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_EN (0x00004000U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_SHIFT (13U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_CLRMSK (0xFFFFDFFFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_EN (0x00002000U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_SHIFT (12U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_CLRMSK (0xFFFFEFFFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_EN (0x00001000U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_SHIFT (11U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_CLRMSK (0xFFFFF7FFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_EN (0x00000800U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_SHIFT (10U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_CLRMSK (0xFFFFFBFFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_EN (0x00000400U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_SHIFT (9U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_CLRMSK (0xFFFFFDFFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN (0x00000200U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_SHIFT (8U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_EN (0x00000100U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_SHIFT (7U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_EN (0x00000080U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_SHIFT (6U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_EN (0x00000040U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_SHIFT (5U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_EN (0x00000020U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_SHIFT (4U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_EN (0x00000010U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_CLRMSK (0xFFFFFFF0U) -+ -+ -+/* -+ Register group: RGX_CR_FWCORE_DMI_RESERVED2, with 4 repeats -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT (4U) -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED20 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED20 (0x3490U) -+#define RGX_CR_FWCORE_DMI_RESERVED20_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED21 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED21 (0x3498U) -+#define RGX_CR_FWCORE_DMI_RESERVED21_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED22 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED22 (0x34A0U) -+#define RGX_CR_FWCORE_DMI_RESERVED22_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED23 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED23 (0x34A8U) -+#define RGX_CR_FWCORE_DMI_RESERVED23_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_ABSTRACTCS -+*/ -+#define RGX_CR_FWCORE_DMI_ABSTRACTCS (0x34B0U) -+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL (IMG_UINT64_C(0x000000001F00170F)) -+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_SHIFT (24U) -+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_CLRMSK (0xE0FFFFFFU) -+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_SHIFT (12U) -+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_CLRMSK (0xFFFFEFFFU) -+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN (0x00001000U) -+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT (8U) -+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK (0xFFFFF8FFU) -+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_CLRMSK (0xFFFFFFF0U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_COMMAND -+*/ -+#define RGX_CR_FWCORE_DMI_COMMAND (0x34B8U) -+#define RGX_CR_FWCORE_DMI_COMMAND_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT (24U) -+#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_CLRMSK (0x00FFFFFFU) -+#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_CLRMSK (0xFF000000U) -+ -+ -+/* -+ Register group: RGX_CR_FWCORE_DMI_RESERVED3, with 32 repeats -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT (32U) -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED30 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED30 (0x34C0U) -+#define RGX_CR_FWCORE_DMI_RESERVED30_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_RESERVED31 -+*/ -+#define RGX_CR_FWCORE_DMI_RESERVED31 (0x34C8U) -+#define RGX_CR_FWCORE_DMI_RESERVED31_MASKFULL (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_SBCS -+*/ -+#define RGX_CR_FWCORE_DMI_SBCS (0x35C0U) -+#define RGX_CR_FWCORE_DMI_SBCS_MASKFULL (IMG_UINT64_C(0x00000000E07FFFFF)) -+#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_SHIFT (29U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_CLRMSK (0x1FFFFFFFU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_SHIFT (22U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_CLRMSK (0xFFBFFFFFU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_EN (0x00400000U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_SHIFT (21U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_CLRMSK (0xFFDFFFFFU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN (0x00200000U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_SHIFT (20U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN (0x00100000U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT (17U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_CLRMSK (0xFFF1FFFFU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_SHIFT (16U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_CLRMSK (0xFFFEFFFFU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_EN (0x00010000U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_SHIFT (15U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_CLRMSK (0xFFFF7FFFU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_EN (0x00008000U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT (12U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK (0xFFFF8FFFU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_SHIFT (5U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_CLRMSK (0xFFFFF01FU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_SHIFT (4U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_EN (0x00000010U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_SHIFT (3U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_EN (0x00000008U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_SHIFT (2U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_EN (0x00000004U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_SHIFT (1U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_EN (0x00000002U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_SBADDRESS0 -+*/ -+#define RGX_CR_FWCORE_DMI_SBADDRESS0 (0x35C8U) -+#define RGX_CR_FWCORE_DMI_SBADDRESS0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register group: RGX_CR_FWCORE_DMI_SBDATA, with 4 repeats -+*/ -+#define RGX_CR_FWCORE_DMI_SBDATA_REPEATCOUNT (4U) -+/* -+ Register RGX_CR_FWCORE_DMI_SBDATA0 -+*/ -+#define RGX_CR_FWCORE_DMI_SBDATA0 (0x35E0U) -+#define RGX_CR_FWCORE_DMI_SBDATA0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FWCORE_DMI_SBDATA0_DATA_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_SBDATA0_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_SBDATA1 -+*/ -+#define RGX_CR_FWCORE_DMI_SBDATA1 (0x35E8U) -+#define RGX_CR_FWCORE_DMI_SBDATA1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FWCORE_DMI_SBDATA1_DATA_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_SBDATA1_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_SBDATA2 -+*/ -+#define RGX_CR_FWCORE_DMI_SBDATA2 (0x35F0U) -+#define RGX_CR_FWCORE_DMI_SBDATA2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FWCORE_DMI_SBDATA2_DATA_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_SBDATA2_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_SBDATA3 -+*/ -+#define RGX_CR_FWCORE_DMI_SBDATA3 (0x35F8U) -+#define RGX_CR_FWCORE_DMI_SBDATA3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FWCORE_DMI_SBDATA3_DATA_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_SBDATA3_DATA_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_FWCORE_DMI_HALTSUM0 -+*/ -+#define RGX_CR_FWCORE_DMI_HALTSUM0 (0x3600U) -+#define RGX_CR_FWCORE_DMI_HALTSUM0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_SHIFT (0U) -+#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SLC_CTRL_MISC -+*/ -+#define RGX_CR_SLC_CTRL_MISC (0x3800U) -+#define RGX_CR_SLC_CTRL_MISC_MASKFULL (IMG_UINT64_C(0xFFFFFFFF03FF010F)) -+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT (32U) -+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_SHIFT (25U) -+#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -+#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_EN (IMG_UINT64_C(0x0000000002000000)) -+#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_SHIFT (24U) -+#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -+#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN (IMG_UINT64_C(0x0000000001000000)) -+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (16U) -+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) -+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE (IMG_UINT64_C(0x0000000000010000)) -+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 (IMG_UINT64_C(0x0000000000100000)) -+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 (IMG_UINT64_C(0x0000000000110000)) -+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1 (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE (IMG_UINT64_C(0x0000000000210000)) -+#define RGX_CR_SLC_CTRL_MISC_PAUSE_SHIFT (8U) -+#define RGX_CR_SLC_CTRL_MISC_PAUSE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -+#define RGX_CR_SLC_CTRL_MISC_PAUSE_EN (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_SHIFT (3U) -+#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -+#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_EN (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT (2U) -+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT (1U) -+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT (0U) -+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_SLC_CTRL_FLUSH_INVAL -+*/ -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL (0x3818U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL (IMG_UINT64_C(0x0000000080000FFF)) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT (31U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK (0x7FFFFFFFU) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN (0x80000000U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_SHIFT (11U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_CLRMSK (0xFFFFF7FFU) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_EN (0x00000800U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT (10U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK (0xFFFFFBFFU) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN (0x00000400U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT (9U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK (0xFFFFFDFFU) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN (0x00000200U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT (8U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN (0x00000100U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT (7U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN (0x00000080U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT (6U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN (0x00000040U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT (5U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN (0x00000020U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT (4U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN (0x00000010U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT (3U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN (0x00000008U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT (2U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN (0x00000004U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT (1U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN (0x00000002U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT (0U) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_SLC_STATUS0 -+*/ -+#define RGX_CR_SLC_STATUS0 (0x3820U) -+#define RGX_CR_SLC_STATUS0_MASKFULL (IMG_UINT64_C(0x0000000000000007)) -+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT (2U) -+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN (0x00000004U) -+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_SHIFT (1U) -+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_EN (0x00000002U) -+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT (0U) -+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_SLC_CTRL_BYPASS -+*/ -+#define RGX_CR_SLC_CTRL_BYPASS (0x3828U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFF7FFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_COMP_ZLS_SHIFT (59U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_COMP_ZLS_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_COMP_ZLS_EN (IMG_UINT64_C(0x0800000000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_ZLS_HEADER_SHIFT (58U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_ZLS_HEADER_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_ZLS_HEADER_EN (IMG_UINT64_C(0x0400000000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_TCU_HEADER_SHIFT (57U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_TCU_HEADER_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_TCU_HEADER_EN (IMG_UINT64_C(0x0200000000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_ZLS_DATA_SHIFT (56U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_ZLS_DATA_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_ZLS_DATA_EN (IMG_UINT64_C(0x0100000000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_TCU_DATA_SHIFT (55U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_TCU_DATA_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_DECOMP_TCU_DATA_EN (IMG_UINT64_C(0x0080000000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_COMP_PBE_SHIFT (54U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_COMP_PBE_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TFBC_COMP_PBE_EN (IMG_UINT64_C(0x0040000000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TCU_DM_COMPUTE_SHIFT (53U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TCU_DM_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TCU_DM_COMPUTE_EN (IMG_UINT64_C(0x0020000000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__PDSRW_NOLINEFILL_SHIFT (52U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__PDSRW_NOLINEFILL_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__PDSRW_NOLINEFILL_EN (IMG_UINT64_C(0x0010000000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__PBE_NOLINEFILL_SHIFT (51U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__PBE_NOLINEFILL_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__PBE_NOLINEFILL_EN (IMG_UINT64_C(0x0008000000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_FBC_SHIFT (50U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_FBC_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_FBC_EN (IMG_UINT64_C(0x0004000000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_RREQ_SHIFT (49U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_RREQ_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_RREQ_EN (IMG_UINT64_C(0x0002000000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_CREQ_SHIFT (48U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_CREQ_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_CREQ_EN (IMG_UINT64_C(0x0001000000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_PREQ_SHIFT (47U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_PREQ_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_PREQ_EN (IMG_UINT64_C(0x0000800000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_DBSC_SHIFT (46U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_DBSC_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_IPF_DBSC_EN (IMG_UINT64_C(0x0000400000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TCU_SHIFT (45U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TCU_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TCU_EN (IMG_UINT64_C(0x0000200000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_PBE_SHIFT (44U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_PBE_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_PBE_EN (IMG_UINT64_C(0x0000100000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_ISP_SHIFT (43U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_ISP_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_ISP_EN (IMG_UINT64_C(0x0000080000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_PM_SHIFT (42U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_PM_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_PM_EN (IMG_UINT64_C(0x0000040000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TDM_SHIFT (41U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TDM_EN (IMG_UINT64_C(0x0000020000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_CDM_SHIFT (40U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_CDM_EN (IMG_UINT64_C(0x0000010000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_PDS_STATE_SHIFT (39U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_PDS_STATE_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_PDS_STATE_EN (IMG_UINT64_C(0x0000008000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_DB_SHIFT (38U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_DB_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_DB_EN (IMG_UINT64_C(0x0000004000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_VTX_VAR_SHIFT (37U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_VTX_VAR_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TSPF_VTX_VAR_EN (IMG_UINT64_C(0x0000002000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_VDM_SHIFT (36U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_VDM_EN (IMG_UINT64_C(0x0000001000000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PSG_STREAM_SHIFT (35U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PSG_STREAM_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PSG_STREAM_EN (IMG_UINT64_C(0x0000000800000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PSG_REGION_SHIFT (34U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PSG_REGION_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PSG_REGION_EN (IMG_UINT64_C(0x0000000400000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_VCE_SHIFT (33U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_VCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_VCE_EN (IMG_UINT64_C(0x0000000200000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PPP_SHIFT (32U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__REQ_TA_PPP_EN (IMG_UINT64_C(0x0000000100000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_FASTRENDER_SHIFT (31U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_FASTRENDER_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_FASTRENDER_EN (IMG_UINT64_C(0x0000000080000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PM_ALIST_SHIFT (30U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PM_ALIST_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PM_ALIST_EN (IMG_UINT64_C(0x0000000040000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PB_TE_SHIFT (29U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PB_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PB_TE_EN (IMG_UINT64_C(0x0000000020000000)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PB_VCE_SHIFT (28U) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PB_VCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__DM_PB_VCE_EN (IMG_UINT64_C(0x0000000010000000)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT (27U) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN (IMG_UINT64_C(0x0000000008000000)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT (26U) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_EN (IMG_UINT64_C(0x0000000004000000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT (25U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN (IMG_UINT64_C(0x0000000002000000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT (24U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN (IMG_UINT64_C(0x0000000001000000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT (23U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN (IMG_UINT64_C(0x0000000000800000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT (22U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_EN (IMG_UINT64_C(0x0000000000400000)) -+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT (21U) -+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT (20U) -+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN (IMG_UINT64_C(0x0000000000100000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT (19U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_EN (IMG_UINT64_C(0x0000000000080000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT (18U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_EN (IMG_UINT64_C(0x0000000000040000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT (17U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_EN (IMG_UINT64_C(0x0000000000020000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT (16U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN (IMG_UINT64_C(0x0000000000010000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT (15U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN (IMG_UINT64_C(0x0000000000008000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT (14U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_EN (IMG_UINT64_C(0x0000000000004000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT (13U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_EN (IMG_UINT64_C(0x0000000000002000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT (12U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_EN (IMG_UINT64_C(0x0000000000001000)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT (11U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN (IMG_UINT64_C(0x0000000000000800)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT (10U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT (9U) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN (IMG_UINT64_C(0x0000000000000200)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT (8U) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_EN (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT (7U) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_EN (IMG_UINT64_C(0x0000000000000080)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT (6U) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_EN (IMG_UINT64_C(0x0000000000000040)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT (5U) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT (4U) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_EN (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT (3U) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT (2U) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT (1U) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_EN (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_SLC_CTRL_BYPASS_ALL_SHIFT (0U) -+#define RGX_CR_SLC_CTRL_BYPASS_ALL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_SLC_CTRL_BYPASS_ALL_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_SLC_STATUS1 -+*/ -+#define RGX_CR_SLC_STATUS1 (0x3870U) -+#define RGX_CR_SLC_STATUS1_MASKFULL (IMG_UINT64_C(0x800003FF03FFFFFF)) -+#define RGX_CR_SLC_STATUS1_PAUSED_SHIFT (63U) -+#define RGX_CR_SLC_STATUS1_PAUSED_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) -+#define RGX_CR_SLC_STATUS1_PAUSED_EN (IMG_UINT64_C(0x8000000000000000)) -+#define RGX_CR_SLC_STATUS1_READS1_SHIFT (32U) -+#define RGX_CR_SLC_STATUS1_READS1_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) -+#define RGX_CR_SLC_STATUS1_READS0_SHIFT (16U) -+#define RGX_CR_SLC_STATUS1_READS0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) -+#define RGX_CR_SLC_STATUS1_READS1_EXT_SHIFT (8U) -+#define RGX_CR_SLC_STATUS1_READS1_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) -+#define RGX_CR_SLC_STATUS1_READS0_EXT_SHIFT (0U) -+#define RGX_CR_SLC_STATUS1_READS0_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) -+ -+ -+/* -+ Register RGX_CR_SLC_IDLE -+*/ -+#define RGX_CR_SLC_IDLE (0x3898U) -+#define RGX_CR_SLC_IDLE__XE_MEM__MASKFULL (IMG_UINT64_C(0x00000000000003FF)) -+#define RGX_CR_SLC_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_SLC_IDLE__XE_MEM__MH_SYSARB1_SHIFT (9U) -+#define RGX_CR_SLC_IDLE__XE_MEM__MH_SYSARB1_CLRMSK (0xFFFFFDFFU) -+#define RGX_CR_SLC_IDLE__XE_MEM__MH_SYSARB1_EN (0x00000200U) -+#define RGX_CR_SLC_IDLE__XE_MEM__MH_SYSARB0_SHIFT (8U) -+#define RGX_CR_SLC_IDLE__XE_MEM__MH_SYSARB0_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_SLC_IDLE__XE_MEM__MH_SYSARB0_EN (0x00000100U) -+#define RGX_CR_SLC_IDLE_IMGBV4_SHIFT (7U) -+#define RGX_CR_SLC_IDLE_IMGBV4_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_SLC_IDLE_IMGBV4_EN (0x00000080U) -+#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT (6U) -+#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_SLC_IDLE_CACHE_BANKS_EN (0x00000040U) -+#define RGX_CR_SLC_IDLE_RBOFIFO_SHIFT (5U) -+#define RGX_CR_SLC_IDLE_RBOFIFO_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_SLC_IDLE_RBOFIFO_EN (0x00000020U) -+#define RGX_CR_SLC_IDLE_FRC_CONV_SHIFT (4U) -+#define RGX_CR_SLC_IDLE_FRC_CONV_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_SLC_IDLE_FRC_CONV_EN (0x00000010U) -+#define RGX_CR_SLC_IDLE_VXE_CONV_SHIFT (3U) -+#define RGX_CR_SLC_IDLE_VXE_CONV_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_SLC_IDLE_VXE_CONV_EN (0x00000008U) -+#define RGX_CR_SLC_IDLE_VXD_CONV_SHIFT (2U) -+#define RGX_CR_SLC_IDLE_VXD_CONV_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_SLC_IDLE_VXD_CONV_EN (0x00000004U) -+#define RGX_CR_SLC_IDLE_BIF1_CONV_SHIFT (1U) -+#define RGX_CR_SLC_IDLE_BIF1_CONV_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_SLC_IDLE_BIF1_CONV_EN (0x00000002U) -+#define RGX_CR_SLC_IDLE_CBAR_SHIFT (0U) -+#define RGX_CR_SLC_IDLE_CBAR_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_SLC_IDLE_CBAR_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_SLC_STATUS2 -+*/ -+#define RGX_CR_SLC_STATUS2 (0x3908U) -+#define RGX_CR_SLC_STATUS2_MASKFULL (IMG_UINT64_C(0x000003FF03FFFFFF)) -+#define RGX_CR_SLC_STATUS2_READS3_SHIFT (32U) -+#define RGX_CR_SLC_STATUS2_READS3_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) -+#define RGX_CR_SLC_STATUS2_READS2_SHIFT (16U) -+#define RGX_CR_SLC_STATUS2_READS2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) -+#define RGX_CR_SLC_STATUS2_READS3_EXT_SHIFT (8U) -+#define RGX_CR_SLC_STATUS2_READS3_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) -+#define RGX_CR_SLC_STATUS2_READS2_EXT_SHIFT (0U) -+#define RGX_CR_SLC_STATUS2_READS2_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) -+ -+ -+/* -+ Register RGX_CR_SLC_CTRL_MISC2 -+*/ -+#define RGX_CR_SLC_CTRL_MISC2 (0x3930U) -+#define RGX_CR_SLC_CTRL_MISC2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT (0U) -+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SLC_CROSSBAR_LOAD_BALANCE -+*/ -+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE (0x3938U) -+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT (0U) -+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_SLC_SIZE_IN_KB -+*/ -+#define RGX_CR_SLC_SIZE_IN_KB (0x3970U) -+#define RGX_CR_SLC_SIZE_IN_KB_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_SLC_SIZE_IN_KB_SIZE_SHIFT (0U) -+#define RGX_CR_SLC_SIZE_IN_KB_SIZE_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_USC_TIMER -+*/ -+#define RGX_CR_USC_TIMER (0x46C8U) -+#define RGX_CR_USC_TIMER_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_USC_TIMER_CNT_SHIFT (0U) -+#define RGX_CR_USC_TIMER_CNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_USC_TIMER_CNT -+*/ -+#define RGX_CR_USC_TIMER_CNT (0x46D0U) -+#define RGX_CR_USC_TIMER_CNT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_USC_TIMER_CNT_RESET_SHIFT (0U) -+#define RGX_CR_USC_TIMER_CNT_RESET_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_USC_TIMER_CNT_RESET_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_USC_UVS0_CHECKSUM -+*/ -+#define RGX_CR_USC_UVS0_CHECKSUM (0x5000U) -+#define RGX_CR_USC_UVS0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_USC_UVS1_CHECKSUM -+*/ -+#define RGX_CR_USC_UVS1_CHECKSUM (0x5008U) -+#define RGX_CR_USC_UVS1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_USC_UVS2_CHECKSUM -+*/ -+#define RGX_CR_USC_UVS2_CHECKSUM (0x5010U) -+#define RGX_CR_USC_UVS2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_USC_UVS3_CHECKSUM -+*/ -+#define RGX_CR_USC_UVS3_CHECKSUM (0x5018U) -+#define RGX_CR_USC_UVS3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PPP_SIGNATURE -+*/ -+#define RGX_CR_PPP_SIGNATURE (0x5020U) -+#define RGX_CR_PPP_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PPP_SIGNATURE_VALUE_SHIFT (0U) -+#define RGX_CR_PPP_SIGNATURE_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_TE_SIGNATURE -+*/ -+#define RGX_CR_TE_SIGNATURE (0x5028U) -+#define RGX_CR_TE_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_TE_SIGNATURE_VALUE_SHIFT (0U) -+#define RGX_CR_TE_SIGNATURE_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_TE_CHECKSUM -+*/ -+#define RGX_CR_TE_CHECKSUM (0x5110U) -+#define RGX_CR_TE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_USC_UVB_CHECKSUM -+*/ -+#define RGX_CR_USC_UVB_CHECKSUM (0x5118U) -+#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_VCE_CHECKSUM -+*/ -+#define RGX_CR_VCE_CHECKSUM (0x5030U) -+#define RGX_CR_VCE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_ISP_PDS_CHECKSUM -+*/ -+#define RGX_CR_ISP_PDS_CHECKSUM (0x5038U) -+#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_ISP_TPF_CHECKSUM -+*/ -+#define RGX_CR_ISP_TPF_CHECKSUM (0x5040U) -+#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_TFPU_PLANE0_CHECKSUM -+*/ -+#define RGX_CR_TFPU_PLANE0_CHECKSUM (0x5048U) -+#define RGX_CR_TFPU_PLANE0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_TFPU_PLANE1_CHECKSUM -+*/ -+#define RGX_CR_TFPU_PLANE1_CHECKSUM (0x5050U) -+#define RGX_CR_TFPU_PLANE1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PBE_CHECKSUM -+*/ -+#define RGX_CR_PBE_CHECKSUM (0x5058U) -+#define RGX_CR_PBE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PDS_DOUTM_STM_SIGNATURE -+*/ -+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE (0x5060U) -+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT (0U) -+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_IFPU_ISP_CHECKSUM -+*/ -+#define RGX_CR_IFPU_ISP_CHECKSUM (0x5068U) -+#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_USC_UVS4_CHECKSUM -+*/ -+#define RGX_CR_USC_UVS4_CHECKSUM (0x5100U) -+#define RGX_CR_USC_UVS4_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_USC_UVS5_CHECKSUM -+*/ -+#define RGX_CR_USC_UVS5_CHECKSUM (0x5108U) -+#define RGX_CR_USC_UVS5_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PPP_CLIP_CHECKSUM -+*/ -+#define RGX_CR_PPP_CLIP_CHECKSUM (0x5120U) -+#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_TA_PHASE -+*/ -+#define RGX_CR_PERF_TA_PHASE (0x6008U) -+#define RGX_CR_PERF_TA_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_TA_PHASE_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_TA_PHASE_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_3D_PHASE -+*/ -+#define RGX_CR_PERF_3D_PHASE (0x6010U) -+#define RGX_CR_PERF_3D_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_3D_PHASE_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_3D_PHASE_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_COMPUTE_PHASE -+*/ -+#define RGX_CR_PERF_COMPUTE_PHASE (0x6018U) -+#define RGX_CR_PERF_COMPUTE_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_TA_CYCLE -+*/ -+#define RGX_CR_PERF_TA_CYCLE (0x6020U) -+#define RGX_CR_PERF_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_TA_CYCLE_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_TA_CYCLE_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_3D_CYCLE -+*/ -+#define RGX_CR_PERF_3D_CYCLE (0x6028U) -+#define RGX_CR_PERF_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_3D_CYCLE_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_3D_CYCLE_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_COMPUTE_CYCLE -+*/ -+#define RGX_CR_PERF_COMPUTE_CYCLE (0x6030U) -+#define RGX_CR_PERF_COMPUTE_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_TA_OR_3D_CYCLE -+*/ -+#define RGX_CR_PERF_TA_OR_3D_CYCLE (0x6038U) -+#define RGX_CR_PERF_TA_OR_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_INITIAL_TA_CYCLE -+*/ -+#define RGX_CR_PERF_INITIAL_TA_CYCLE (0x6040U) -+#define RGX_CR_PERF_INITIAL_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_SLC0_READ_STALL -+*/ -+#define RGX_CR_PERF_SLC0_READ_STALL (0x60B8U) -+#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_SLC0_WRITE_STALL -+*/ -+#define RGX_CR_PERF_SLC0_WRITE_STALL (0x60C0U) -+#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_SLC1_READ_STALL -+*/ -+#define RGX_CR_PERF_SLC1_READ_STALL (0x60E0U) -+#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_SLC1_WRITE_STALL -+*/ -+#define RGX_CR_PERF_SLC1_WRITE_STALL (0x60E8U) -+#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_SLC2_READ_STALL -+*/ -+#define RGX_CR_PERF_SLC2_READ_STALL (0x6158U) -+#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_SLC2_WRITE_STALL -+*/ -+#define RGX_CR_PERF_SLC2_WRITE_STALL (0x6160U) -+#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_SLC3_READ_STALL -+*/ -+#define RGX_CR_PERF_SLC3_READ_STALL (0x6180U) -+#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_SLC3_WRITE_STALL -+*/ -+#define RGX_CR_PERF_SLC3_WRITE_STALL (0x6188U) -+#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT (0U) -+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PERF_3D_SPINUP -+*/ -+#define RGX_CR_PERF_3D_SPINUP (0x6220U) -+#define RGX_CR_PERF_3D_SPINUP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PERF_3D_SPINUP_CYCLES_SHIFT (0U) -+#define RGX_CR_PERF_3D_SPINUP_CYCLES_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_AXI_ACE_LITE_CONFIGURATION -+*/ -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION (0x38C0U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL (IMG_UINT64_C(0x00003FFFFFFFFFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT (45U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_EN (IMG_UINT64_C(0x0000200000000000)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT (37U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0xFFFFE01FFFFFFFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT (36U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN (IMG_UINT64_C(0x0000001000000000)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT (35U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN (IMG_UINT64_C(0x0000000800000000)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT (34U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN (IMG_UINT64_C(0x0000000400000000)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT (30U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFC3FFFFFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT (26U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC3FFFFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT (22U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC3FFFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT (20U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT (18U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT (16U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT (14U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT (12U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT (10U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT (8U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT (4U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F)) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT (0U) -+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF0)) -+ -+ -+/* -+ Register RGX_CR_POWER_ESTIMATE_RESULT -+*/ -+#define RGX_CR_POWER_ESTIMATE_RESULT (0x6328U) -+#define RGX_CR_POWER_ESTIMATE_RESULT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT (0U) -+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_TA_PERF -+*/ -+#define RGX_CR_TA_PERF (0x7600U) -+#define RGX_CR_TA_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define RGX_CR_TA_PERF_CLR_3_SHIFT (4U) -+#define RGX_CR_TA_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_TA_PERF_CLR_3_EN (0x00000010U) -+#define RGX_CR_TA_PERF_CLR_2_SHIFT (3U) -+#define RGX_CR_TA_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_TA_PERF_CLR_2_EN (0x00000008U) -+#define RGX_CR_TA_PERF_CLR_1_SHIFT (2U) -+#define RGX_CR_TA_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_TA_PERF_CLR_1_EN (0x00000004U) -+#define RGX_CR_TA_PERF_CLR_0_SHIFT (1U) -+#define RGX_CR_TA_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_TA_PERF_CLR_0_EN (0x00000002U) -+#define RGX_CR_TA_PERF_CTRL_ENABLE_SHIFT (0U) -+#define RGX_CR_TA_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_TA_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_TA_PERF_SELECT0 -+*/ -+#define RGX_CR_TA_PERF_SELECT0 (0x7608U) -+#define RGX_CR_TA_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_TA_PERF_SELECT0_MODE_SHIFT (21U) -+#define RGX_CR_TA_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_TA_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_TA_PERF_SELECT1 -+*/ -+#define RGX_CR_TA_PERF_SELECT1 (0x7610U) -+#define RGX_CR_TA_PERF_SELECT1_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_TA_PERF_SELECT1_MODE_SHIFT (21U) -+#define RGX_CR_TA_PERF_SELECT1_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_TA_PERF_SELECT1_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_TA_PERF_SELECT2 -+*/ -+#define RGX_CR_TA_PERF_SELECT2 (0x7618U) -+#define RGX_CR_TA_PERF_SELECT2_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_TA_PERF_SELECT2_MODE_SHIFT (21U) -+#define RGX_CR_TA_PERF_SELECT2_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_TA_PERF_SELECT2_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_TA_PERF_SELECT3 -+*/ -+#define RGX_CR_TA_PERF_SELECT3 (0x7620U) -+#define RGX_CR_TA_PERF_SELECT3_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_TA_PERF_SELECT3_MODE_SHIFT (21U) -+#define RGX_CR_TA_PERF_SELECT3_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_TA_PERF_SELECT3_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_TA_PERF_SELECTED_BITS -+*/ -+#define RGX_CR_TA_PERF_SELECTED_BITS (0x7648U) -+#define RGX_CR_TA_PERF_SELECTED_BITS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT (48U) -+#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -+#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT (32U) -+#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) -+#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT (16U) -+#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) -+#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT (0U) -+#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_TA_PERF_COUNTER_0 -+*/ -+#define RGX_CR_TA_PERF_COUNTER_0 (0x7650U) -+#define RGX_CR_TA_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_TA_PERF_COUNTER_0_REG_SHIFT (0U) -+#define RGX_CR_TA_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_TA_PERF_COUNTER_1 -+*/ -+#define RGX_CR_TA_PERF_COUNTER_1 (0x7658U) -+#define RGX_CR_TA_PERF_COUNTER_1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_TA_PERF_COUNTER_1_REG_SHIFT (0U) -+#define RGX_CR_TA_PERF_COUNTER_1_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_TA_PERF_COUNTER_2 -+*/ -+#define RGX_CR_TA_PERF_COUNTER_2 (0x7660U) -+#define RGX_CR_TA_PERF_COUNTER_2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_TA_PERF_COUNTER_2_REG_SHIFT (0U) -+#define RGX_CR_TA_PERF_COUNTER_2_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_TA_PERF_COUNTER_3 -+*/ -+#define RGX_CR_TA_PERF_COUNTER_3 (0x7668U) -+#define RGX_CR_TA_PERF_COUNTER_3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_TA_PERF_COUNTER_3_REG_SHIFT (0U) -+#define RGX_CR_TA_PERF_COUNTER_3_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_RASTERISATION_PERF -+*/ -+#define RGX_CR_RASTERISATION_PERF (0x7700U) -+#define RGX_CR_RASTERISATION_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define RGX_CR_RASTERISATION_PERF_CLR_3_SHIFT (4U) -+#define RGX_CR_RASTERISATION_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_RASTERISATION_PERF_CLR_3_EN (0x00000010U) -+#define RGX_CR_RASTERISATION_PERF_CLR_2_SHIFT (3U) -+#define RGX_CR_RASTERISATION_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_RASTERISATION_PERF_CLR_2_EN (0x00000008U) -+#define RGX_CR_RASTERISATION_PERF_CLR_1_SHIFT (2U) -+#define RGX_CR_RASTERISATION_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_RASTERISATION_PERF_CLR_1_EN (0x00000004U) -+#define RGX_CR_RASTERISATION_PERF_CLR_0_SHIFT (1U) -+#define RGX_CR_RASTERISATION_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_RASTERISATION_PERF_CLR_0_EN (0x00000002U) -+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT (0U) -+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_RASTERISATION_PERF_SELECT0 -+*/ -+#define RGX_CR_RASTERISATION_PERF_SELECT0 (0x7708U) -+#define RGX_CR_RASTERISATION_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT (21U) -+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_RASTERISATION_PERF_COUNTER_0 -+*/ -+#define RGX_CR_RASTERISATION_PERF_COUNTER_0 (0x7750U) -+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT (0U) -+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_HUB_BIFPMCACHE_PERF -+*/ -+#define RGX_CR_HUB_BIFPMCACHE_PERF (0x7800U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT (4U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN (0x00000010U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT (3U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN (0x00000008U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT (2U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN (0x00000004U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT (1U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN (0x00000002U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT (0U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0 -+*/ -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0 (0x7808U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT (21U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 -+*/ -+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 (0x7850U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT (0U) -+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_TPU_MCU_L0_PERF -+*/ -+#define RGX_CR_TPU_MCU_L0_PERF (0x7900U) -+#define RGX_CR_TPU_MCU_L0_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT (4U) -+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_EN (0x00000010U) -+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT (3U) -+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_EN (0x00000008U) -+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT (2U) -+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_EN (0x00000004U) -+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT (1U) -+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_EN (0x00000002U) -+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT (0U) -+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_TPU_MCU_L0_PERF_SELECT0 -+*/ -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0 (0x7908U) -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT (21U) -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_TPU_MCU_L0_PERF_COUNTER_0 -+*/ -+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0 (0x7950U) -+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT (0U) -+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_USC_PERF -+*/ -+#define RGX_CR_USC_PERF (0x8100U) -+#define RGX_CR_USC_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define RGX_CR_USC_PERF_CLR_3_SHIFT (4U) -+#define RGX_CR_USC_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_USC_PERF_CLR_3_EN (0x00000010U) -+#define RGX_CR_USC_PERF_CLR_2_SHIFT (3U) -+#define RGX_CR_USC_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_USC_PERF_CLR_2_EN (0x00000008U) -+#define RGX_CR_USC_PERF_CLR_1_SHIFT (2U) -+#define RGX_CR_USC_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_USC_PERF_CLR_1_EN (0x00000004U) -+#define RGX_CR_USC_PERF_CLR_0_SHIFT (1U) -+#define RGX_CR_USC_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_USC_PERF_CLR_0_EN (0x00000002U) -+#define RGX_CR_USC_PERF_CTRL_ENABLE_SHIFT (0U) -+#define RGX_CR_USC_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_USC_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_USC_PERF_SELECT0 -+*/ -+#define RGX_CR_USC_PERF_SELECT0 (0x8108U) -+#define RGX_CR_USC_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_USC_PERF_SELECT0_MODE_SHIFT (21U) -+#define RGX_CR_USC_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_USC_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_USC_PERF_COUNTER_0 -+*/ -+#define RGX_CR_USC_PERF_COUNTER_0 (0x8150U) -+#define RGX_CR_USC_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_USC_PERF_COUNTER_0_REG_SHIFT (0U) -+#define RGX_CR_USC_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_JONES_IDLE -+*/ -+#define RGX_CR_JONES_IDLE (0x8328U) -+#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000007FFF)) -+#define RGX_CR_JONES_IDLE_TDM_SHIFT (14U) -+#define RGX_CR_JONES_IDLE_TDM_CLRMSK (0xFFFFBFFFU) -+#define RGX_CR_JONES_IDLE_TDM_EN (0x00004000U) -+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT (13U) -+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK (0xFFFFDFFFU) -+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN (0x00002000U) -+#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT (12U) -+#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK (0xFFFFEFFFU) -+#define RGX_CR_JONES_IDLE_FB_CDC_EN (0x00001000U) -+#define RGX_CR_JONES_IDLE_MMU_SHIFT (11U) -+#define RGX_CR_JONES_IDLE_MMU_CLRMSK (0xFFFFF7FFU) -+#define RGX_CR_JONES_IDLE_MMU_EN (0x00000800U) -+#define RGX_CR_JONES_IDLE_TLA_SHIFT (10U) -+#define RGX_CR_JONES_IDLE_TLA_CLRMSK (0xFFFFFBFFU) -+#define RGX_CR_JONES_IDLE_TLA_EN (0x00000400U) -+#define RGX_CR_JONES_IDLE_GARTEN_SHIFT (9U) -+#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK (0xFFFFFDFFU) -+#define RGX_CR_JONES_IDLE_GARTEN_EN (0x00000200U) -+#define RGX_CR_JONES_IDLE_HOSTIF_SHIFT (8U) -+#define RGX_CR_JONES_IDLE_HOSTIF_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_JONES_IDLE_HOSTIF_EN (0x00000100U) -+#define RGX_CR_JONES_IDLE_SOCIF_SHIFT (7U) -+#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_JONES_IDLE_SOCIF_EN (0x00000080U) -+#define RGX_CR_JONES_IDLE_TILING_SHIFT (6U) -+#define RGX_CR_JONES_IDLE_TILING_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_JONES_IDLE_TILING_EN (0x00000040U) -+#define RGX_CR_JONES_IDLE_IPP_SHIFT (5U) -+#define RGX_CR_JONES_IDLE_IPP_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_JONES_IDLE_IPP_EN (0x00000020U) -+#define RGX_CR_JONES_IDLE_USCS_SHIFT (4U) -+#define RGX_CR_JONES_IDLE_USCS_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_JONES_IDLE_USCS_EN (0x00000010U) -+#define RGX_CR_JONES_IDLE_PM_SHIFT (3U) -+#define RGX_CR_JONES_IDLE_PM_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_JONES_IDLE_PM_EN (0x00000008U) -+#define RGX_CR_JONES_IDLE_CDM_SHIFT (2U) -+#define RGX_CR_JONES_IDLE_CDM_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_JONES_IDLE_CDM_EN (0x00000004U) -+#define RGX_CR_JONES_IDLE_VDM_SHIFT (1U) -+#define RGX_CR_JONES_IDLE_VDM_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_JONES_IDLE_VDM_EN (0x00000002U) -+#define RGX_CR_JONES_IDLE_BIF_SHIFT (0U) -+#define RGX_CR_JONES_IDLE_BIF_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_JONES_IDLE_BIF_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_TORNADO_PERF -+*/ -+#define RGX_CR_TORNADO_PERF (0x8228U) -+#define RGX_CR_TORNADO_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define RGX_CR_TORNADO_PERF_CLR_3_SHIFT (4U) -+#define RGX_CR_TORNADO_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_TORNADO_PERF_CLR_3_EN (0x00000010U) -+#define RGX_CR_TORNADO_PERF_CLR_2_SHIFT (3U) -+#define RGX_CR_TORNADO_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_TORNADO_PERF_CLR_2_EN (0x00000008U) -+#define RGX_CR_TORNADO_PERF_CLR_1_SHIFT (2U) -+#define RGX_CR_TORNADO_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_TORNADO_PERF_CLR_1_EN (0x00000004U) -+#define RGX_CR_TORNADO_PERF_CLR_0_SHIFT (1U) -+#define RGX_CR_TORNADO_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_TORNADO_PERF_CLR_0_EN (0x00000002U) -+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT (0U) -+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_TORNADO_PERF_SELECT0 -+*/ -+#define RGX_CR_TORNADO_PERF_SELECT0 (0x8230U) -+#define RGX_CR_TORNADO_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_SHIFT (21U) -+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_TORNADO_PERF_COUNTER_0 -+*/ -+#define RGX_CR_TORNADO_PERF_COUNTER_0 (0x8268U) -+#define RGX_CR_TORNADO_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT (0U) -+#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_TEXAS_PERF -+*/ -+#define RGX_CR_TEXAS_PERF (0x8290U) -+#define RGX_CR_TEXAS_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F)) -+#define RGX_CR_TEXAS_PERF_CLR_5_SHIFT (6U) -+#define RGX_CR_TEXAS_PERF_CLR_5_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_TEXAS_PERF_CLR_5_EN (0x00000040U) -+#define RGX_CR_TEXAS_PERF_CLR_4_SHIFT (5U) -+#define RGX_CR_TEXAS_PERF_CLR_4_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_TEXAS_PERF_CLR_4_EN (0x00000020U) -+#define RGX_CR_TEXAS_PERF_CLR_3_SHIFT (4U) -+#define RGX_CR_TEXAS_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_TEXAS_PERF_CLR_3_EN (0x00000010U) -+#define RGX_CR_TEXAS_PERF_CLR_2_SHIFT (3U) -+#define RGX_CR_TEXAS_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_TEXAS_PERF_CLR_2_EN (0x00000008U) -+#define RGX_CR_TEXAS_PERF_CLR_1_SHIFT (2U) -+#define RGX_CR_TEXAS_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_TEXAS_PERF_CLR_1_EN (0x00000004U) -+#define RGX_CR_TEXAS_PERF_CLR_0_SHIFT (1U) -+#define RGX_CR_TEXAS_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_TEXAS_PERF_CLR_0_EN (0x00000002U) -+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT (0U) -+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_TEXAS_PERF_SELECT0 -+*/ -+#define RGX_CR_TEXAS_PERF_SELECT0 (0x8298U) -+#define RGX_CR_TEXAS_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF)) -+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_SHIFT (31U) -+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000080000000)) -+#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF)) -+#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_TEXAS_PERF_COUNTER_0 -+*/ -+#define RGX_CR_TEXAS_PERF_COUNTER_0 (0x82D8U) -+#define RGX_CR_TEXAS_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT (0U) -+#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_JONES_PERF -+*/ -+#define RGX_CR_JONES_PERF (0x8330U) -+#define RGX_CR_JONES_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define RGX_CR_JONES_PERF_CLR_3_SHIFT (4U) -+#define RGX_CR_JONES_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_JONES_PERF_CLR_3_EN (0x00000010U) -+#define RGX_CR_JONES_PERF_CLR_2_SHIFT (3U) -+#define RGX_CR_JONES_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_JONES_PERF_CLR_2_EN (0x00000008U) -+#define RGX_CR_JONES_PERF_CLR_1_SHIFT (2U) -+#define RGX_CR_JONES_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_JONES_PERF_CLR_1_EN (0x00000004U) -+#define RGX_CR_JONES_PERF_CLR_0_SHIFT (1U) -+#define RGX_CR_JONES_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_JONES_PERF_CLR_0_EN (0x00000002U) -+#define RGX_CR_JONES_PERF_CTRL_ENABLE_SHIFT (0U) -+#define RGX_CR_JONES_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_JONES_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_JONES_PERF_SELECT0 -+*/ -+#define RGX_CR_JONES_PERF_SELECT0 (0x8338U) -+#define RGX_CR_JONES_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_JONES_PERF_SELECT0_MODE_SHIFT (21U) -+#define RGX_CR_JONES_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_JONES_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_JONES_PERF_COUNTER_0 -+*/ -+#define RGX_CR_JONES_PERF_COUNTER_0 (0x8368U) -+#define RGX_CR_JONES_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_JONES_PERF_COUNTER_0_REG_SHIFT (0U) -+#define RGX_CR_JONES_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_BLACKPEARL_PERF -+*/ -+#define RGX_CR_BLACKPEARL_PERF (0x8400U) -+#define RGX_CR_BLACKPEARL_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F)) -+#define RGX_CR_BLACKPEARL_PERF_CLR_5_SHIFT (6U) -+#define RGX_CR_BLACKPEARL_PERF_CLR_5_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_BLACKPEARL_PERF_CLR_5_EN (0x00000040U) -+#define RGX_CR_BLACKPEARL_PERF_CLR_4_SHIFT (5U) -+#define RGX_CR_BLACKPEARL_PERF_CLR_4_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_BLACKPEARL_PERF_CLR_4_EN (0x00000020U) -+#define RGX_CR_BLACKPEARL_PERF_CLR_3_SHIFT (4U) -+#define RGX_CR_BLACKPEARL_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_BLACKPEARL_PERF_CLR_3_EN (0x00000010U) -+#define RGX_CR_BLACKPEARL_PERF_CLR_2_SHIFT (3U) -+#define RGX_CR_BLACKPEARL_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_BLACKPEARL_PERF_CLR_2_EN (0x00000008U) -+#define RGX_CR_BLACKPEARL_PERF_CLR_1_SHIFT (2U) -+#define RGX_CR_BLACKPEARL_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_BLACKPEARL_PERF_CLR_1_EN (0x00000004U) -+#define RGX_CR_BLACKPEARL_PERF_CLR_0_SHIFT (1U) -+#define RGX_CR_BLACKPEARL_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_BLACKPEARL_PERF_CLR_0_EN (0x00000002U) -+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT (0U) -+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_BLACKPEARL_PERF_SELECT0 -+*/ -+#define RGX_CR_BLACKPEARL_PERF_SELECT0 (0x8408U) -+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF)) -+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT (31U) -+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000080000000)) -+#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF)) -+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_BLACKPEARL_PERF_COUNTER_0 -+*/ -+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0 (0x8448U) -+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT (0U) -+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_PBE_PERF -+*/ -+#define RGX_CR_PBE_PERF (0x8478U) -+#define RGX_CR_PBE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define RGX_CR_PBE_PERF_CLR_3_SHIFT (4U) -+#define RGX_CR_PBE_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_PBE_PERF_CLR_3_EN (0x00000010U) -+#define RGX_CR_PBE_PERF_CLR_2_SHIFT (3U) -+#define RGX_CR_PBE_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_PBE_PERF_CLR_2_EN (0x00000008U) -+#define RGX_CR_PBE_PERF_CLR_1_SHIFT (2U) -+#define RGX_CR_PBE_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_PBE_PERF_CLR_1_EN (0x00000004U) -+#define RGX_CR_PBE_PERF_CLR_0_SHIFT (1U) -+#define RGX_CR_PBE_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_PBE_PERF_CLR_0_EN (0x00000002U) -+#define RGX_CR_PBE_PERF_CTRL_ENABLE_SHIFT (0U) -+#define RGX_CR_PBE_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_PBE_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_PBE_PERF_SELECT0 -+*/ -+#define RGX_CR_PBE_PERF_SELECT0 (0x8480U) -+#define RGX_CR_PBE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_PBE_PERF_SELECT0_MODE_SHIFT (21U) -+#define RGX_CR_PBE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_PBE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_PBE_PERF_COUNTER_0 -+*/ -+#define RGX_CR_PBE_PERF_COUNTER_0 (0x84B0U) -+#define RGX_CR_PBE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_PBE_PERF_COUNTER_0_REG_SHIFT (0U) -+#define RGX_CR_PBE_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_OCP_REVINFO -+*/ -+#define RGX_CR_OCP_REVINFO (0x9000U) -+#define RGX_CR_OCP_REVINFO_MASKFULL (IMG_UINT64_C(0x00000007FFFFFFFF)) -+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT (33U) -+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK (IMG_UINT64_C(0xFFFFFFF9FFFFFFFF)) -+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT (32U) -+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_EN (IMG_UINT64_C(0x0000000100000000)) -+#define RGX_CR_OCP_REVINFO_REVISION_SHIFT (0U) -+#define RGX_CR_OCP_REVINFO_REVISION_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) -+ -+ -+/* -+ Register RGX_CR_OCP_SYSCONFIG -+*/ -+#define RGX_CR_OCP_SYSCONFIG (0x9010U) -+#define RGX_CR_OCP_SYSCONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000FFF)) -+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT (10U) -+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK (0xFFFFF3FFU) -+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT (8U) -+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK (0xFFFFFCFFU) -+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT (6U) -+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK (0xFFFFFF3FU) -+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT (4U) -+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK (0xFFFFFFCFU) -+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT (2U) -+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK (0xFFFFFFF3U) -+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT (0U) -+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK (0xFFFFFFFCU) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQSTATUS_RAW_0 -+*/ -+#define RGX_CR_OCP_IRQSTATUS_RAW_0 (0x9020U) -+#define RGX_CR_OCP_IRQSTATUS_RAW_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT (0U) -+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQSTATUS_RAW_1 -+*/ -+#define RGX_CR_OCP_IRQSTATUS_RAW_1 (0x9028U) -+#define RGX_CR_OCP_IRQSTATUS_RAW_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT (0U) -+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQSTATUS_RAW_2 -+*/ -+#define RGX_CR_OCP_IRQSTATUS_RAW_2 (0x9030U) -+#define RGX_CR_OCP_IRQSTATUS_RAW_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT (0U) -+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQSTATUS_0 -+*/ -+#define RGX_CR_OCP_IRQSTATUS_0 (0x9038U) -+#define RGX_CR_OCP_IRQSTATUS_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT (0U) -+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQSTATUS_1 -+*/ -+#define RGX_CR_OCP_IRQSTATUS_1 (0x9040U) -+#define RGX_CR_OCP_IRQSTATUS_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT (0U) -+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQSTATUS_2 -+*/ -+#define RGX_CR_OCP_IRQSTATUS_2 (0x9048U) -+#define RGX_CR_OCP_IRQSTATUS_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT (0U) -+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQENABLE_SET_0 -+*/ -+#define RGX_CR_OCP_IRQENABLE_SET_0 (0x9050U) -+#define RGX_CR_OCP_IRQENABLE_SET_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT (0U) -+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQENABLE_SET_1 -+*/ -+#define RGX_CR_OCP_IRQENABLE_SET_1 (0x9058U) -+#define RGX_CR_OCP_IRQENABLE_SET_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT (0U) -+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQENABLE_SET_2 -+*/ -+#define RGX_CR_OCP_IRQENABLE_SET_2 (0x9060U) -+#define RGX_CR_OCP_IRQENABLE_SET_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT (0U) -+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQENABLE_CLR_0 -+*/ -+#define RGX_CR_OCP_IRQENABLE_CLR_0 (0x9068U) -+#define RGX_CR_OCP_IRQENABLE_CLR_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT (0U) -+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQENABLE_CLR_1 -+*/ -+#define RGX_CR_OCP_IRQENABLE_CLR_1 (0x9070U) -+#define RGX_CR_OCP_IRQENABLE_CLR_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT (0U) -+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQENABLE_CLR_2 -+*/ -+#define RGX_CR_OCP_IRQENABLE_CLR_2 (0x9078U) -+#define RGX_CR_OCP_IRQENABLE_CLR_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT (0U) -+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_IRQ_EVENT -+*/ -+#define RGX_CR_OCP_IRQ_EVENT (0x9080U) -+#define RGX_CR_OCP_IRQ_EVENT_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF)) -+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT (19U) -+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) -+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000080000)) -+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT (18U) -+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000040000)) -+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT (17U) -+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) -+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000020000)) -+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT (16U) -+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) -+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000010000)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT (15U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000008000)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT (14U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000004000)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT (13U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000002000)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT (12U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000001000)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT (11U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000800)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT (10U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT (9U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000200)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT (8U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT (7U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000080)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT (6U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000040)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT (5U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT (4U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT (3U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT (2U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT (1U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT (0U) -+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_OCP_DEBUG_CONFIG -+*/ -+#define RGX_CR_OCP_DEBUG_CONFIG (0x9088U) -+#define RGX_CR_OCP_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_OCP_DEBUG_CONFIG_REG_SHIFT (0U) -+#define RGX_CR_OCP_DEBUG_CONFIG_REG_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_OCP_DEBUG_CONFIG_REG_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_OCP_DEBUG_STATUS -+*/ -+#define RGX_CR_OCP_DEBUG_STATUS (0x9090U) -+#define RGX_CR_OCP_DEBUG_STATUS_MASKFULL (IMG_UINT64_C(0x001F1F77FFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT (51U) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK (IMG_UINT64_C(0xFFE7FFFFFFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT (50U) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN (IMG_UINT64_C(0x0004000000000000)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT (48U) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT (43U) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFE7FFFFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT (42U) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN (IMG_UINT64_C(0x0000040000000000)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT (40U) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT (38U) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN (IMG_UINT64_C(0x0000004000000000)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT (37U) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN (IMG_UINT64_C(0x0000002000000000)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT (36U) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN (IMG_UINT64_C(0x0000001000000000)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT (34U) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN (IMG_UINT64_C(0x0000000400000000)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT (33U) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN (IMG_UINT64_C(0x0000000200000000)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT (32U) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN (IMG_UINT64_C(0x0000000100000000)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT (31U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN (IMG_UINT64_C(0x0000000080000000)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT (30U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN (IMG_UINT64_C(0x0000000040000000)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT (29U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN (IMG_UINT64_C(0x0000000020000000)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT (27U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFE7FFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT (26U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN (IMG_UINT64_C(0x0000000004000000)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT (24U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT (23U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN (IMG_UINT64_C(0x0000000000800000)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT (22U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN (IMG_UINT64_C(0x0000000000400000)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT (21U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT (19U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE7FFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT (18U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN (IMG_UINT64_C(0x0000000000040000)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT (16U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT (15U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN (IMG_UINT64_C(0x0000000000008000)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT (14U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN (IMG_UINT64_C(0x0000000000004000)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT (13U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN (IMG_UINT64_C(0x0000000000002000)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT (11U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFE7FF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT (10U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT (8U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT (7U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN (IMG_UINT64_C(0x0000000000000080)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT (6U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN (IMG_UINT64_C(0x0000000000000040)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT (5U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT (3U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE7)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT (2U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT (0U) -+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) -+ -+ -+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT (6U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN (0x00000040U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT (5U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_EN (0x00000020U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_META_SHIFT (4U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_META_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_BIF_TRUST_DM_TYPE_META_EN (0x00000010U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT (3U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN (0x00000008U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT (2U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_EN (0x00000004U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT (1U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN (0x00000002U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT (0U) -+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_EN (0x00000001U) -+ -+ -+#define RGX_CR_BIF_TRUST_DM_MASK (0x0000007FU) -+ -+ -+/* -+ Register RGX_CR_BIF_TRUST -+*/ -+#define RGX_CR_BIF_TRUST (0xA000U) -+#define RGX_CR_BIF_TRUST_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) -+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT (20U) -+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFEFFFFFU) -+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN (0x00100000U) -+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT (19U) -+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFF7FFFFU) -+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN (0x00080000U) -+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT (18U) -+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK (0xFFFBFFFFU) -+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN (0x00040000U) -+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT (17U) -+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK (0xFFFDFFFFU) -+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN (0x00020000U) -+#define RGX_CR_BIF_TRUST_ENABLE_SHIFT (16U) -+#define RGX_CR_BIF_TRUST_ENABLE_CLRMSK (0xFFFEFFFFU) -+#define RGX_CR_BIF_TRUST_ENABLE_EN (0x00010000U) -+#define RGX_CR_BIF_TRUST_DM_TRUSTED_SHIFT (9U) -+#define RGX_CR_BIF_TRUST_DM_TRUSTED_CLRMSK (0xFFFF01FFU) -+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT (8U) -+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN (0x00000100U) -+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT (7U) -+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN (0x00000080U) -+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT (6U) -+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN (0x00000040U) -+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT (5U) -+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN (0x00000020U) -+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT (4U) -+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN (0x00000010U) -+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT (3U) -+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN (0x00000008U) -+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT (2U) -+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN (0x00000004U) -+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT (1U) -+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN (0x00000002U) -+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT (0U) -+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_SYS_BUS_SECURE -+*/ -+#define RGX_CR_SYS_BUS_SECURE (0xA100U) -+#define RGX_CR_SYS_BUS_SECURE__SECR__MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_SYS_BUS_SECURE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT (0U) -+#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_FBA_FC0_CHECKSUM -+*/ -+#define RGX_CR_FBA_FC0_CHECKSUM (0xD170U) -+#define RGX_CR_FBA_FC0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_FBA_FC1_CHECKSUM -+*/ -+#define RGX_CR_FBA_FC1_CHECKSUM (0xD178U) -+#define RGX_CR_FBA_FC1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_FBA_FC2_CHECKSUM -+*/ -+#define RGX_CR_FBA_FC2_CHECKSUM (0xD180U) -+#define RGX_CR_FBA_FC2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_FBA_FC3_CHECKSUM -+*/ -+#define RGX_CR_FBA_FC3_CHECKSUM (0xD188U) -+#define RGX_CR_FBA_FC3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_CLK_CTRL2 -+*/ -+#define RGX_CR_CLK_CTRL2 (0xD200U) -+#define RGX_CR_CLK_CTRL2_MASKFULL (IMG_UINT64_C(0x0000000000000F33)) -+#define RGX_CR_CLK_CTRL2_MCU_FBTC_SHIFT (10U) -+#define RGX_CR_CLK_CTRL2_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) -+#define RGX_CR_CLK_CTRL2_MCU_FBTC_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL2_MCU_FBTC_ON (IMG_UINT64_C(0x0000000000000400)) -+#define RGX_CR_CLK_CTRL2_MCU_FBTC_AUTO (IMG_UINT64_C(0x0000000000000800)) -+#define RGX_CR_CLK_CTRL2_VRDM_SHIFT (8U) -+#define RGX_CR_CLK_CTRL2_VRDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) -+#define RGX_CR_CLK_CTRL2_VRDM_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL2_VRDM_ON (IMG_UINT64_C(0x0000000000000100)) -+#define RGX_CR_CLK_CTRL2_VRDM_AUTO (IMG_UINT64_C(0x0000000000000200)) -+#define RGX_CR_CLK_CTRL2_SH_SHIFT (4U) -+#define RGX_CR_CLK_CTRL2_SH_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) -+#define RGX_CR_CLK_CTRL2_SH_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL2_SH_ON (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_CR_CLK_CTRL2_SH_AUTO (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_CR_CLK_CTRL2_FBA_SHIFT (0U) -+#define RGX_CR_CLK_CTRL2_FBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) -+#define RGX_CR_CLK_CTRL2_FBA_OFF (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_CTRL2_FBA_ON (IMG_UINT64_C(0x0000000000000001)) -+#define RGX_CR_CLK_CTRL2_FBA_AUTO (IMG_UINT64_C(0x0000000000000002)) -+ -+ -+/* -+ Register RGX_CR_CLK_STATUS2 -+*/ -+#define RGX_CR_CLK_STATUS2 (0xD208U) -+#define RGX_CR_CLK_STATUS2_MASKFULL (IMG_UINT64_C(0x0000000000000015)) -+#define RGX_CR_CLK_STATUS2_VRDM_SHIFT (4U) -+#define RGX_CR_CLK_STATUS2_VRDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -+#define RGX_CR_CLK_STATUS2_VRDM_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS2_VRDM_RUNNING (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_CR_CLK_STATUS2_SH_SHIFT (2U) -+#define RGX_CR_CLK_STATUS2_SH_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -+#define RGX_CR_CLK_STATUS2_SH_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS2_SH_RUNNING (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_CR_CLK_STATUS2_FBA_SHIFT (0U) -+#define RGX_CR_CLK_STATUS2_FBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_CLK_STATUS2_FBA_GATED (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_CR_CLK_STATUS2_FBA_RUNNING (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_RPM_SHF_FPL -+*/ -+#define RGX_CR_RPM_SHF_FPL (0xD520U) -+#define RGX_CR_RPM_SHF_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC)) -+#define RGX_CR_RPM_SHF_FPL_SIZE_SHIFT (40U) -+#define RGX_CR_RPM_SHF_FPL_SIZE_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) -+#define RGX_CR_RPM_SHF_FPL_BASE_SHIFT (2U) -+#define RGX_CR_RPM_SHF_FPL_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000003)) -+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT (2U) -+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSIZE (4U) -+ -+ -+/* -+ Register RGX_CR_RPM_SHF_FPL_READ -+*/ -+#define RGX_CR_RPM_SHF_FPL_READ (0xD528U) -+#define RGX_CR_RPM_SHF_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) -+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT (22U) -+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK (0xFFBFFFFFU) -+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_EN (0x00400000U) -+#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT (0U) -+#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK (0xFFC00000U) -+ -+ -+/* -+ Register RGX_CR_RPM_SHF_FPL_WRITE -+*/ -+#define RGX_CR_RPM_SHF_FPL_WRITE (0xD530U) -+#define RGX_CR_RPM_SHF_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) -+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT (22U) -+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK (0xFFBFFFFFU) -+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN (0x00400000U) -+#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT (0U) -+#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK (0xFFC00000U) -+ -+ -+/* -+ Register RGX_CR_RPM_SHG_FPL -+*/ -+#define RGX_CR_RPM_SHG_FPL (0xD538U) -+#define RGX_CR_RPM_SHG_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC)) -+#define RGX_CR_RPM_SHG_FPL_SIZE_SHIFT (40U) -+#define RGX_CR_RPM_SHG_FPL_SIZE_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) -+#define RGX_CR_RPM_SHG_FPL_BASE_SHIFT (2U) -+#define RGX_CR_RPM_SHG_FPL_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000003)) -+#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT (2U) -+#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSIZE (4U) -+ -+ -+/* -+ Register RGX_CR_RPM_SHG_FPL_READ -+*/ -+#define RGX_CR_RPM_SHG_FPL_READ (0xD540U) -+#define RGX_CR_RPM_SHG_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) -+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT (22U) -+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK (0xFFBFFFFFU) -+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_EN (0x00400000U) -+#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT (0U) -+#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK (0xFFC00000U) -+ -+ -+/* -+ Register RGX_CR_RPM_SHG_FPL_WRITE -+*/ -+#define RGX_CR_RPM_SHG_FPL_WRITE (0xD548U) -+#define RGX_CR_RPM_SHG_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) -+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT (22U) -+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK (0xFFBFFFFFU) -+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN (0x00400000U) -+#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT (0U) -+#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK (0xFFC00000U) -+ -+ -+/* -+ Register RGX_CR_SH_PERF -+*/ -+#define RGX_CR_SH_PERF (0xD5F8U) -+#define RGX_CR_SH_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define RGX_CR_SH_PERF_CLR_3_SHIFT (4U) -+#define RGX_CR_SH_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_SH_PERF_CLR_3_EN (0x00000010U) -+#define RGX_CR_SH_PERF_CLR_2_SHIFT (3U) -+#define RGX_CR_SH_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_SH_PERF_CLR_2_EN (0x00000008U) -+#define RGX_CR_SH_PERF_CLR_1_SHIFT (2U) -+#define RGX_CR_SH_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_SH_PERF_CLR_1_EN (0x00000004U) -+#define RGX_CR_SH_PERF_CLR_0_SHIFT (1U) -+#define RGX_CR_SH_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_SH_PERF_CLR_0_EN (0x00000002U) -+#define RGX_CR_SH_PERF_CTRL_ENABLE_SHIFT (0U) -+#define RGX_CR_SH_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_SH_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_SH_PERF_SELECT0 -+*/ -+#define RGX_CR_SH_PERF_SELECT0 (0xD600U) -+#define RGX_CR_SH_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define RGX_CR_SH_PERF_SELECT0_MODE_SHIFT (21U) -+#define RGX_CR_SH_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define RGX_CR_SH_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_SH_PERF_COUNTER_0 -+*/ -+#define RGX_CR_SH_PERF_COUNTER_0 (0xD628U) -+#define RGX_CR_SH_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SH_PERF_COUNTER_0_REG_SHIFT (0U) -+#define RGX_CR_SH_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SHF_SHG_CHECKSUM -+*/ -+#define RGX_CR_SHF_SHG_CHECKSUM (0xD1C0U) -+#define RGX_CR_SHF_SHG_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SHF_VERTEX_BIF_CHECKSUM -+*/ -+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM (0xD1C8U) -+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SHF_VARY_BIF_CHECKSUM -+*/ -+#define RGX_CR_SHF_VARY_BIF_CHECKSUM (0xD1D0U) -+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_RPM_BIF_CHECKSUM -+*/ -+#define RGX_CR_RPM_BIF_CHECKSUM (0xD1D8U) -+#define RGX_CR_RPM_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SHG_BIF_CHECKSUM -+*/ -+#define RGX_CR_SHG_BIF_CHECKSUM (0xD1E0U) -+#define RGX_CR_SHG_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register RGX_CR_SHG_FE_BE_CHECKSUM -+*/ -+#define RGX_CR_SHG_FE_BE_CHECKSUM (0xD1E8U) -+#define RGX_CR_SHG_FE_BE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT (0U) -+#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register DPX_CR_BF_PERF -+*/ -+#define DPX_CR_BF_PERF (0xC458U) -+#define DPX_CR_BF_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define DPX_CR_BF_PERF_CLR_3_SHIFT (4U) -+#define DPX_CR_BF_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define DPX_CR_BF_PERF_CLR_3_EN (0x00000010U) -+#define DPX_CR_BF_PERF_CLR_2_SHIFT (3U) -+#define DPX_CR_BF_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define DPX_CR_BF_PERF_CLR_2_EN (0x00000008U) -+#define DPX_CR_BF_PERF_CLR_1_SHIFT (2U) -+#define DPX_CR_BF_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define DPX_CR_BF_PERF_CLR_1_EN (0x00000004U) -+#define DPX_CR_BF_PERF_CLR_0_SHIFT (1U) -+#define DPX_CR_BF_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define DPX_CR_BF_PERF_CLR_0_EN (0x00000002U) -+#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT (0U) -+#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define DPX_CR_BF_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register DPX_CR_BF_PERF_SELECT0 -+*/ -+#define DPX_CR_BF_PERF_SELECT0 (0xC460U) -+#define DPX_CR_BF_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT (21U) -+#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define DPX_CR_BF_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register DPX_CR_BF_PERF_COUNTER_0 -+*/ -+#define DPX_CR_BF_PERF_COUNTER_0 (0xC488U) -+#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT (0U) -+#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register DPX_CR_BT_PERF -+*/ -+#define DPX_CR_BT_PERF (0xC3D0U) -+#define DPX_CR_BT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define DPX_CR_BT_PERF_CLR_3_SHIFT (4U) -+#define DPX_CR_BT_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define DPX_CR_BT_PERF_CLR_3_EN (0x00000010U) -+#define DPX_CR_BT_PERF_CLR_2_SHIFT (3U) -+#define DPX_CR_BT_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define DPX_CR_BT_PERF_CLR_2_EN (0x00000008U) -+#define DPX_CR_BT_PERF_CLR_1_SHIFT (2U) -+#define DPX_CR_BT_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define DPX_CR_BT_PERF_CLR_1_EN (0x00000004U) -+#define DPX_CR_BT_PERF_CLR_0_SHIFT (1U) -+#define DPX_CR_BT_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define DPX_CR_BT_PERF_CLR_0_EN (0x00000002U) -+#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT (0U) -+#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define DPX_CR_BT_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register DPX_CR_BT_PERF_SELECT0 -+*/ -+#define DPX_CR_BT_PERF_SELECT0 (0xC3D8U) -+#define DPX_CR_BT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT (21U) -+#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define DPX_CR_BT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register DPX_CR_BT_PERF_COUNTER_0 -+*/ -+#define DPX_CR_BT_PERF_COUNTER_0 (0xC420U) -+#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT (0U) -+#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register DPX_CR_RQ_USC_DEBUG -+*/ -+#define DPX_CR_RQ_USC_DEBUG (0xC110U) -+#define DPX_CR_RQ_USC_DEBUG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT (0U) -+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) -+ -+ -+/* -+ Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS -+*/ -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS (0xC5C8U) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT (12U) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT (8U) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT (5U) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT (4U) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN (0x00000010U) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT (0U) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) -+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN (0x00000001U) -+ -+ -+/* -+ Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS -+*/ -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS (0xC5D0U) -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x03FFFFFFFFFFFFF0)) -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT (57U) -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0200000000000000)) -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT (44U) -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFE000FFFFFFFFFFF)) -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT (40U) -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT (4U) -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) -+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) -+ -+ -+/* -+ Register DPX_CR_BIF_MMU_STATUS -+*/ -+#define DPX_CR_BIF_MMU_STATUS (0xC5D8U) -+#define DPX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFF7)) -+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U) -+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) -+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U) -+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) -+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U) -+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) -+#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U) -+#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) -+#define DPX_CR_BIF_MMU_STATUS_STALLED_EN (0x00000004U) -+#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U) -+#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) -+#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN (0x00000002U) -+#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U) -+#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) -+#define DPX_CR_BIF_MMU_STATUS_BUSY_EN (0x00000001U) -+ -+ -+/* -+ Register DPX_CR_RT_PERF -+*/ -+#define DPX_CR_RT_PERF (0xC700U) -+#define DPX_CR_RT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define DPX_CR_RT_PERF_CLR_3_SHIFT (4U) -+#define DPX_CR_RT_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define DPX_CR_RT_PERF_CLR_3_EN (0x00000010U) -+#define DPX_CR_RT_PERF_CLR_2_SHIFT (3U) -+#define DPX_CR_RT_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define DPX_CR_RT_PERF_CLR_2_EN (0x00000008U) -+#define DPX_CR_RT_PERF_CLR_1_SHIFT (2U) -+#define DPX_CR_RT_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define DPX_CR_RT_PERF_CLR_1_EN (0x00000004U) -+#define DPX_CR_RT_PERF_CLR_0_SHIFT (1U) -+#define DPX_CR_RT_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define DPX_CR_RT_PERF_CLR_0_EN (0x00000002U) -+#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT (0U) -+#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define DPX_CR_RT_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register DPX_CR_RT_PERF_SELECT0 -+*/ -+#define DPX_CR_RT_PERF_SELECT0 (0xC708U) -+#define DPX_CR_RT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT (21U) -+#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define DPX_CR_RT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register DPX_CR_RT_PERF_COUNTER_0 -+*/ -+#define DPX_CR_RT_PERF_COUNTER_0 (0xC730U) -+#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT (0U) -+#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register DPX_CR_BX_TU_PERF -+*/ -+#define DPX_CR_BX_TU_PERF (0xC908U) -+#define DPX_CR_BX_TU_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT (4U) -+#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) -+#define DPX_CR_BX_TU_PERF_CLR_3_EN (0x00000010U) -+#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT (3U) -+#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) -+#define DPX_CR_BX_TU_PERF_CLR_2_EN (0x00000008U) -+#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT (2U) -+#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) -+#define DPX_CR_BX_TU_PERF_CLR_1_EN (0x00000004U) -+#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT (1U) -+#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) -+#define DPX_CR_BX_TU_PERF_CLR_0_EN (0x00000002U) -+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT (0U) -+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) -+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN (0x00000001U) -+ -+ -+/* -+ Register DPX_CR_BX_TU_PERF_SELECT0 -+*/ -+#define DPX_CR_BX_TU_PERF_SELECT0 (0xC910U) -+#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) -+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT (48U) -+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) -+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT (32U) -+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) -+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT (21U) -+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) -+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) -+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) -+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) -+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT (0U) -+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register DPX_CR_BX_TU_PERF_COUNTER_0 -+*/ -+#define DPX_CR_BX_TU_PERF_COUNTER_0 (0xC938U) -+#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT (0U) -+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) -+ -+ -+/* -+ Register DPX_CR_RS_PDS_RR_CHECKSUM -+*/ -+#define DPX_CR_RS_PDS_RR_CHECKSUM (0xC0F0U) -+#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT (0U) -+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) -+ -+ -+/* -+ Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT -+*/ -+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT (0xE140U) -+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT (0U) -+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_MMU_CBASE_MAPPING -+*/ -+#define RGX_CR_MMU_CBASE_MAPPING (0xE148U) -+#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) -+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT (0U) -+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK (0xF0000000U) -+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U) -+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE (4096U) -+ -+ -+/* -+ Register RGX_CR_MMU_FAULT_STATUS -+*/ -+#define RGX_CR_MMU_FAULT_STATUS (0xE150U) -+#define RGX_CR_MMU_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT (28U) -+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0x000000000FFFFFFF)) -+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT (20U) -+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF00FFFFF)) -+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT (12U) -+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF00FFF)) -+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT (6U) -+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) -+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT (4U) -+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) -+#define RGX_CR_MMU_FAULT_STATUS_RNW_SHIFT (3U) -+#define RGX_CR_MMU_FAULT_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -+#define RGX_CR_MMU_FAULT_STATUS_RNW_EN (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT (1U) -+#define RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) -+#define RGX_CR_MMU_FAULT_STATUS_FAULT_SHIFT (0U) -+#define RGX_CR_MMU_FAULT_STATUS_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_MMU_FAULT_STATUS_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_MMU_FAULT_STATUS_META -+*/ -+#define RGX_CR_MMU_FAULT_STATUS_META (0xE158U) -+#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT (28U) -+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK (IMG_UINT64_C(0x000000000FFFFFFF)) -+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT (20U) -+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF00FFFFF)) -+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT (12U) -+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF00FFF)) -+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT (6U) -+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) -+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT (4U) -+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) -+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT (3U) -+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT (1U) -+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) -+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT (0U) -+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+/* -+ Register RGX_CR_SLC3_CTRL_MISC -+*/ -+#define RGX_CR_SLC3_CTRL_MISC (0xE200U) -+#define RGX_CR_SLC3_CTRL_MISC_MASKFULL (IMG_UINT64_C(0x0000000000000107)) -+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT (8U) -+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK (0xFFFFFEFFU) -+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN (0x00000100U) -+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (0U) -+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (0xFFFFFFF8U) -+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR (0x00000000U) -+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH (0x00000001U) -+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH (0x00000002U) -+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH (0x00000003U) -+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH (0x00000004U) -+ -+ -+/* -+ Register RGX_CR_SLC3_SCRAMBLE -+*/ -+#define RGX_CR_SLC3_SCRAMBLE (0xE208U) -+#define RGX_CR_SLC3_SCRAMBLE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_SLC3_SCRAMBLE_BITS_SHIFT (0U) -+#define RGX_CR_SLC3_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_SLC3_SCRAMBLE2 -+*/ -+#define RGX_CR_SLC3_SCRAMBLE2 (0xE210U) -+#define RGX_CR_SLC3_SCRAMBLE2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_SLC3_SCRAMBLE2_BITS_SHIFT (0U) -+#define RGX_CR_SLC3_SCRAMBLE2_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_SLC3_SCRAMBLE3 -+*/ -+#define RGX_CR_SLC3_SCRAMBLE3 (0xE218U) -+#define RGX_CR_SLC3_SCRAMBLE3_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_SLC3_SCRAMBLE3_BITS_SHIFT (0U) -+#define RGX_CR_SLC3_SCRAMBLE3_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_SLC3_SCRAMBLE4 -+*/ -+#define RGX_CR_SLC3_SCRAMBLE4 (0xE260U) -+#define RGX_CR_SLC3_SCRAMBLE4_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_SLC3_SCRAMBLE4_BITS_SHIFT (0U) -+#define RGX_CR_SLC3_SCRAMBLE4_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) -+ -+ -+/* -+ Register RGX_CR_SLC3_STATUS -+*/ -+#define RGX_CR_SLC3_STATUS (0xE220U) -+#define RGX_CR_SLC3_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+#define RGX_CR_SLC3_STATUS_WRITES1_SHIFT (48U) -+#define RGX_CR_SLC3_STATUS_WRITES1_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -+#define RGX_CR_SLC3_STATUS_WRITES0_SHIFT (32U) -+#define RGX_CR_SLC3_STATUS_WRITES0_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) -+#define RGX_CR_SLC3_STATUS_READS1_SHIFT (16U) -+#define RGX_CR_SLC3_STATUS_READS1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) -+#define RGX_CR_SLC3_STATUS_READS0_SHIFT (0U) -+#define RGX_CR_SLC3_STATUS_READS0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) -+ -+ -+/* -+ Register RGX_CR_SLC3_IDLE -+*/ -+#define RGX_CR_SLC3_IDLE (0xE228U) -+#define RGX_CR_SLC3_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF)) -+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT (18U) -+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK (0xFFF3FFFFU) -+#define RGX_CR_SLC3_IDLE_MMU_SHIFT (17U) -+#define RGX_CR_SLC3_IDLE_MMU_CLRMSK (0xFFFDFFFFU) -+#define RGX_CR_SLC3_IDLE_MMU_EN (0x00020000U) -+#define RGX_CR_SLC3_IDLE_RDI_SHIFT (16U) -+#define RGX_CR_SLC3_IDLE_RDI_CLRMSK (0xFFFEFFFFU) -+#define RGX_CR_SLC3_IDLE_RDI_EN (0x00010000U) -+#define RGX_CR_SLC3_IDLE_IMGBV4_SHIFT (12U) -+#define RGX_CR_SLC3_IDLE_IMGBV4_CLRMSK (0xFFFF0FFFU) -+#define RGX_CR_SLC3_IDLE_CACHE_BANKS_SHIFT (4U) -+#define RGX_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK (0xFFFFF00FU) -+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT (2U) -+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK (0xFFFFFFF3U) -+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT (1U) -+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_EN (0x00000002U) -+#define RGX_CR_SLC3_IDLE_XBAR_SHIFT (0U) -+#define RGX_CR_SLC3_IDLE_XBAR_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_SLC3_IDLE_XBAR_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_SLC3_FAULT_STOP_STATUS -+*/ -+#define RGX_CR_SLC3_FAULT_STOP_STATUS (0xE248U) -+#define RGX_CR_SLC3_FAULT_STOP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000001FFF)) -+#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT (0U) -+#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK (0xFFFFE000U) -+ -+ -+/* -+ Register RGX_CR_VDM_CONTEXT_STORE_MODE -+*/ -+#define RGX_CR_VDM_CONTEXT_STORE_MODE (0xF048U) -+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MASKFULL (IMG_UINT64_C(0x0000000000000003)) -+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT (0U) -+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK (0xFFFFFFFCU) -+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX (0x00000000U) -+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE (0x00000001U) -+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST (0x00000002U) -+ -+ -+/* -+ Register RGX_CR_CONTEXT_MAPPING0 -+*/ -+#define RGX_CR_CONTEXT_MAPPING0 (0xF078U) -+#define RGX_CR_CONTEXT_MAPPING0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT (24U) -+#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK (0x00FFFFFFU) -+#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT (16U) -+#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK (0xFF00FFFFU) -+#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT (8U) -+#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK (0xFFFF00FFU) -+#define RGX_CR_CONTEXT_MAPPING0_TA_SHIFT (0U) -+#define RGX_CR_CONTEXT_MAPPING0_TA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_CONTEXT_MAPPING1 -+*/ -+#define RGX_CR_CONTEXT_MAPPING1 (0xF080U) -+#define RGX_CR_CONTEXT_MAPPING1_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_CONTEXT_MAPPING1_HOST_SHIFT (8U) -+#define RGX_CR_CONTEXT_MAPPING1_HOST_CLRMSK (0xFFFF00FFU) -+#define RGX_CR_CONTEXT_MAPPING1_TLA_SHIFT (0U) -+#define RGX_CR_CONTEXT_MAPPING1_TLA_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_CONTEXT_MAPPING2 -+*/ -+#define RGX_CR_CONTEXT_MAPPING2 (0xF088U) -+#define RGX_CR_CONTEXT_MAPPING2_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) -+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT (16U) -+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK (0xFF00FFFFU) -+#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT (8U) -+#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK (0xFFFF00FFU) -+#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT (0U) -+#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_CONTEXT_MAPPING3 -+*/ -+#define RGX_CR_CONTEXT_MAPPING3 (0xF090U) -+#define RGX_CR_CONTEXT_MAPPING3_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) -+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT (16U) -+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK (0xFF00FFFFU) -+#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT (8U) -+#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK (0xFFFF00FFU) -+#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT (0U) -+#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_BIF_JONES_OUTSTANDING_READ -+*/ -+#define RGX_CR_BIF_JONES_OUTSTANDING_READ (0xF098U) -+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT (0U) -+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ -+*/ -+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ (0xF0A0U) -+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT (0U) -+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_BIF_DUST_OUTSTANDING_READ -+*/ -+#define RGX_CR_BIF_DUST_OUTSTANDING_READ (0xF0A8U) -+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT (0U) -+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_JONES_FIX -+*/ -+#define RGX_CR_JONES_FIX (0xF0C0U) -+#define RGX_CR_JONES_FIX__ROGUE3__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_JONES_FIX_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) -+#define RGX_CR_JONES_FIX_DISABLE_SHIFT (0U) -+#define RGX_CR_JONES_FIX_DISABLE_CLRMSK (0xFFFF0000U) -+ -+ -+/* -+ Register RGX_CR_CONTEXT_MAPPING4 -+*/ -+#define RGX_CR_CONTEXT_MAPPING4 (0xF210U) -+#define RGX_CR_CONTEXT_MAPPING4_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) -+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT (40U) -+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) -+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT (32U) -+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) -+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT (24U) -+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) -+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT (16U) -+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) -+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT (8U) -+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) -+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT (0U) -+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) -+ -+ -+/* -+ Register RGX_CR_MULTICORE_GPU -+*/ -+#define RGX_CR_MULTICORE_GPU (0xF300U) -+#define RGX_CR_MULTICORE_GPU_MASKFULL (IMG_UINT64_C(0x000000000000007F)) -+#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_SHIFT (6U) -+#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN (0x00000040U) -+#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_SHIFT (5U) -+#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN (0x00000020U) -+#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_SHIFT (4U) -+#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN (0x00000010U) -+#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_SHIFT (3U) -+#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN (0x00000008U) -+#define RGX_CR_MULTICORE_GPU_ID_SHIFT (0U) -+#define RGX_CR_MULTICORE_GPU_ID_CLRMSK (0xFFFFFFF8U) -+ -+ -+/* -+ Register RGX_CR_MULTICORE_SYSTEM -+*/ -+#define RGX_CR_MULTICORE_SYSTEM (0xF308U) -+#define RGX_CR_MULTICORE_SYSTEM_MASKFULL (IMG_UINT64_C(0x000000000000000F)) -+#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT (0U) -+#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK (0xFFFFFFF0U) -+ -+ -+/* -+ Register RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON -+*/ -+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON (0xF310U) -+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) -+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) -+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U) -+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU) -+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) -+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON -+*/ -+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON (0xF320U) -+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) -+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) -+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U) -+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU) -+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) -+#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON -+*/ -+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON (0xF330U) -+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) -+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) -+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) -+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U) -+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU) -+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) -+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) -+ -+ -+/* -+ Register RGX_CR_ECC_RAM_ERR_INJ -+*/ -+#define RGX_CR_ECC_RAM_ERR_INJ (0xF340U) -+#define RGX_CR_ECC_RAM_ERR_INJ_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT (4U) -+#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN (0x00000010U) -+#define RGX_CR_ECC_RAM_ERR_INJ_USC_SHIFT (3U) -+#define RGX_CR_ECC_RAM_ERR_INJ_USC_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_ECC_RAM_ERR_INJ_USC_EN (0x00000008U) -+#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT (2U) -+#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN (0x00000004U) -+#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT (1U) -+#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN (0x00000002U) -+#define RGX_CR_ECC_RAM_ERR_INJ_MARS_SHIFT (0U) -+#define RGX_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_ECC_RAM_ERR_INJ_MARS_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_ECC_RAM_INIT_KICK -+*/ -+#define RGX_CR_ECC_RAM_INIT_KICK (0xF348U) -+#define RGX_CR_ECC_RAM_INIT_KICK_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_SHIFT (4U) -+#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_EN (0x00000010U) -+#define RGX_CR_ECC_RAM_INIT_KICK_USC_SHIFT (3U) -+#define RGX_CR_ECC_RAM_INIT_KICK_USC_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_ECC_RAM_INIT_KICK_USC_EN (0x00000008U) -+#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_SHIFT (2U) -+#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_EN (0x00000004U) -+#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_SHIFT (1U) -+#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_EN (0x00000002U) -+#define RGX_CR_ECC_RAM_INIT_KICK_MARS_SHIFT (0U) -+#define RGX_CR_ECC_RAM_INIT_KICK_MARS_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_ECC_RAM_INIT_KICK_MARS_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_ECC_RAM_INIT_DONE -+*/ -+#define RGX_CR_ECC_RAM_INIT_DONE (0xF350U) -+#define RGX_CR_ECC_RAM_INIT_DONE_MASKFULL (IMG_UINT64_C(0x000000000000001F)) -+#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_SHIFT (4U) -+#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_EN (0x00000010U) -+#define RGX_CR_ECC_RAM_INIT_DONE_USC_SHIFT (3U) -+#define RGX_CR_ECC_RAM_INIT_DONE_USC_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_ECC_RAM_INIT_DONE_USC_EN (0x00000008U) -+#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_SHIFT (2U) -+#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_EN (0x00000004U) -+#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_SHIFT (1U) -+#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_EN (0x00000002U) -+#define RGX_CR_ECC_RAM_INIT_DONE_MARS_SHIFT (0U) -+#define RGX_CR_ECC_RAM_INIT_DONE_MARS_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_ECC_RAM_INIT_DONE_MARS_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_SAFETY_EVENT_ENABLE -+*/ -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE (0xF390U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_SHIFT (7U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT (3U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN (0x00000008U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT (2U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN (0x00000004U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT (1U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN (0x00000002U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_SAFETY_EVENT_STATUS -+*/ -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE (0xF398U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_SHIFT (7U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_SHIFT (3U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_EN (0x00000008U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT (2U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN (0x00000004U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_SHIFT (1U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_EN (0x00000002U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_SAFETY_EVENT_CLEAR -+*/ -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE (0xF3A0U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_SHIFT (7U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_SHIFT (3U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_EN (0x00000008U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_SHIFT (2U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_EN (0x00000004U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_SHIFT (1U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_EN (0x00000002U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_FAULT_FW_STATUS -+*/ -+#define RGX_CR_FAULT_FW_STATUS (0xF3B0U) -+#define RGX_CR_FAULT_FW_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000010001)) -+#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT (16U) -+#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_CLRMSK (0xFFFEFFFFU) -+#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_EN (0x00010000U) -+#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT (0U) -+#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_FAULT_FW_CLEAR -+*/ -+#define RGX_CR_FAULT_FW_CLEAR (0xF3B8U) -+#define RGX_CR_FAULT_FW_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000010001)) -+#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_SHIFT (16U) -+#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_CLRMSK (0xFFFEFFFFU) -+#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_EN (0x00010000U) -+#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_SHIFT (0U) -+#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_EN (0x00000001U) -+ -+ -+/* -+ Register RGX_CR_MTS_SAFETY_EVENT_ENABLE -+*/ -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE (0xF3D8U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_SHIFT (7U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT (3U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN (0x00000008U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT (2U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN (0x00000004U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT (1U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN (0x00000002U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) -+#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) -+ -+ -+#endif /* RGX_CR_DEFS_KM_H */ -+/***************************************************************************** -+ End of file (rgx_cr_defs_km.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/km/rgxdefs_km.h b/drivers/gpu/drm/img-rogue/km/rgxdefs_km.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/km/rgxdefs_km.h -@@ -0,0 +1,365 @@ -+/*************************************************************************/ /*! -+@Title Rogue hw definitions (kernel mode) -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXDEFS_KM_H -+#define RGXDEFS_KM_H -+ -+#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) -+#include RGX_BVNC_CORE_KM_HEADER -+#include RGX_BNC_CONFIG_KM_HEADER -+#endif -+ -+#define IMG_EXPLICIT_INCLUDE_HWDEFS -+#if defined(__KERNEL__) -+#include "rgx_cr_defs_km.h" -+#endif -+#undef IMG_EXPLICIT_INCLUDE_HWDEFS -+ -+#include "rgx_heap_firmware.h" -+ -+/* The following Macros are picked up through BVNC headers for no hardware -+ * operations to be compatible with old build infrastructure. -+ */ -+#if defined(NO_HARDWARE) -+/****************************************************************************** -+ * Check for valid B.X.N.C -+ *****************************************************************************/ -+#if !defined(RGX_BVNC_KM_B) || !defined(RGX_BVNC_KM_V) || !defined(RGX_BVNC_KM_N) || !defined(RGX_BVNC_KM_C) -+#error "Need to specify BVNC (RGX_BVNC_KM_B, RGX_BVNC_KM_V, RGX_BVNC_KM_N and RGX_BVNC_C)" -+#endif -+ -+/* Check core/config compatibility */ -+#if (RGX_BVNC_KM_B != RGX_BNC_KM_B) || (RGX_BVNC_KM_N != RGX_BNC_KM_N) || (RGX_BVNC_KM_C != RGX_BNC_KM_C) -+#error "BVNC headers are mismatching (KM core/config)" -+#endif -+#endif -+ -+/****************************************************************************** -+ * RGX Version name -+ *****************************************************************************/ -+#define RGX_BVNC_KM_ST2(S) #S -+#define RGX_BVNC_KM_ST(S) RGX_BVNC_KM_ST2(S) -+#define RGX_BVNC_KM RGX_BVNC_KM_ST(RGX_BVNC_KM_B) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_V) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_N) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_C) -+#define RGX_BVNC_KM_V_ST RGX_BVNC_KM_ST(RGX_BVNC_KM_V) -+ -+/* Maximum string size is [bb.vvvp.nnnn.cccc\0], includes null char */ -+#define RGX_BVNC_STR_SIZE_MAX (2U+1U+4U+1U+4U+1U+4U+1U) -+#define RGX_BVNC_STR_FMTSPEC "%u.%u.%u.%u" -+#define RGX_BVNC_STRP_FMTSPEC "%u.%up.%u.%u" -+ -+ -+/****************************************************************************** -+ * RGX Defines -+ *****************************************************************************/ -+ -+#define BVNC_FIELD_MASK ((1UL << BVNC_FIELD_WIDTH) - 1U) -+#define C_POSITION (0U) -+#define N_POSITION ((C_POSITION) + (BVNC_FIELD_WIDTH)) -+#define V_POSITION ((N_POSITION) + (BVNC_FIELD_WIDTH)) -+#define B_POSITION ((V_POSITION) + (BVNC_FIELD_WIDTH)) -+ -+#define B_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (B_POSITION))) -+#define V_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (V_POSITION))) -+#define N_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (N_POSITION))) -+#define C_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (C_POSITION))) -+ -+#define GET_B(x) (((x) & (B_POSTION_MASK)) >> (B_POSITION)) -+#define GET_V(x) (((x) & (V_POSTION_MASK)) >> (V_POSITION)) -+#define GET_N(x) (((x) & (N_POSTION_MASK)) >> (N_POSITION)) -+#define GET_C(x) (((x) & (C_POSTION_MASK)) >> (C_POSITION)) -+ -+#define BVNC_PACK(B,V,N,C) (((((IMG_UINT64)(B))) << (B_POSITION)) | \ -+ ((((IMG_UINT64)(V))) << (V_POSITION)) | \ -+ ((((IMG_UINT64)(N))) << (N_POSITION)) | \ -+ ((((IMG_UINT64)(C))) << (C_POSITION)) \ -+ ) -+ -+#define RGX_CR_CORE_ID_CONFIG_N_SHIFT (8U) -+#define RGX_CR_CORE_ID_CONFIG_C_SHIFT (0U) -+ -+#define RGX_CR_CORE_ID_CONFIG_N_CLRMSK (0XFFFF00FFU) -+#define RGX_CR_CORE_ID_CONFIG_C_CLRMSK (0XFFFFFF00U) -+ -+#if defined(RGX_CR_CORE_ID__PBVNC) -+#define GET_PBVNC_B(X) ((X & ~RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK) >> RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT) -+#define GET_PBVNC_V(X) ((X & ~RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK) >> RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT) -+#define GET_PBVNC_N(X) ((X & ~RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK) >> RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT) -+#define GET_PBVNC_C(X) ((X & ~RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK) >> RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT) -+#endif -+ -+#if defined(RGX_FEATURE_NUM_OSIDS) -+#define RGXFW_MAX_NUM_OSIDS (RGX_FEATURE_NUM_OSIDS) -+#else -+#define RGXFW_MAX_NUM_OSIDS (8U) -+#endif -+ -+#define RGXFW_HOST_DRIVER_ID (0U) -+#define RGXFW_GUEST_DRIVER_ID_START (RGXFW_HOST_DRIVER_ID + 1U) -+ -+#define RGXFW_THREAD_0 (0U) -+#define RGXFW_THREAD_1 (1U) -+ -+/* META cores (required for the RGX_FEATURE_META) */ -+#define MTP218 (1U) -+#define MTP219 (2U) -+#define LTP218 (3U) -+#define LTP217 (4U) -+ -+/* META Core memory feature depending on META variants */ -+#define RGX_META_COREMEM_32K (32*1024) -+#define RGX_META_COREMEM_48K (48*1024) -+#define RGX_META_COREMEM_64K (64*1024) -+#define RGX_META_COREMEM_96K (96*1024) -+#define RGX_META_COREMEM_128K (128*1024) -+#define RGX_META_COREMEM_256K (256*1024) -+ -+#if !defined(__KERNEL__) -+#if (!defined(SUPPORT_TRUSTED_DEVICE) || defined(RGX_FEATURE_META_DMA)) && \ -+ (defined(RGX_FEATURE_META_COREMEM_SIZE) && RGX_FEATURE_META_COREMEM_SIZE != 0) -+#define RGX_META_COREMEM_SIZE (RGX_FEATURE_META_COREMEM_SIZE*1024U) -+#define RGX_META_COREMEM (1) -+#define RGX_META_COREMEM_CODE (1) -+#if !defined(FIX_HW_BRN_50767) && (!defined(RGX_NUM_DRIVERS_SUPPORTED) || (RGX_NUM_DRIVERS_SUPPORTED == 1)) -+#define RGX_META_COREMEM_DATA (1) -+#endif -+#else -+#undef SUPPORT_META_COREMEM -+#undef RGX_FEATURE_META_COREMEM_SIZE -+#undef RGX_FEATURE_META_DMA -+#define RGX_FEATURE_META_COREMEM_SIZE (0) -+#define RGX_META_COREMEM_SIZE (0) -+#endif -+#endif -+ -+#define GET_ROGUE_CACHE_LINE_SIZE(x) ((((IMG_UINT32)(x)) > 0U) ? ((IMG_UINT32)(x)/8U) : (0U)) -+ -+ -+#if defined(SUPPORT_AGP) -+#define MAX_HW_TA3DCONTEXTS 3U -+#else -+#define MAX_HW_TA3DCONTEXTS 2U -+#endif -+ -+#define RGX_CR_CLK_CTRL_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL_MASKFULL) -+#define RGX_CR_CLK_CTRL_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL_MASKFULL) -+#define RGX_CR_CLK_CTRL2_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL2_MASKFULL) -+#define RGX_CR_CLK_CTRL2_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL2_MASKFULL) -+#define RGX_CR_CLK_XTPLUS_CTRL_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_XTPLUS_CTRL_MASKFULL) -+#define RGX_CR_CLK_XTPLUS_CTRL_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_XTPLUS_CTRL_MASKFULL) -+#define DPX_CR_DPX_CLK_CTRL_ALL_ON (IMG_UINT64_C(0x5555555555555555)&DPX_CR_DPX_CLK_CTRL_MASKFULL) -+#define DPX_CR_DPX_CLK_CTRL_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&DPX_CR_DPX_CLK_CTRL_MASKFULL) -+ -+#define RGX_CR_SOFT_RESET_DUST_n_CORE_EN (RGX_CR_SOFT_RESET_DUST_A_CORE_EN | \ -+ RGX_CR_SOFT_RESET_DUST_B_CORE_EN | \ -+ RGX_CR_SOFT_RESET_DUST_C_CORE_EN | \ -+ RGX_CR_SOFT_RESET_DUST_D_CORE_EN | \ -+ RGX_CR_SOFT_RESET_DUST_E_CORE_EN | \ -+ RGX_CR_SOFT_RESET_DUST_F_CORE_EN | \ -+ RGX_CR_SOFT_RESET_DUST_G_CORE_EN | \ -+ RGX_CR_SOFT_RESET_DUST_H_CORE_EN) -+ -+/* SOFT_RESET Rascal and DUSTs bits */ -+#define RGX_CR_SOFT_RESET_RASCALDUSTS_EN (RGX_CR_SOFT_RESET_RASCAL_CORE_EN | \ -+ RGX_CR_SOFT_RESET_DUST_n_CORE_EN) -+ -+ -+ -+ -+/* SOFT_RESET steps as defined in the TRM */ -+#define RGX_S7_SOFT_RESET_DUSTS (RGX_CR_SOFT_RESET_DUST_n_CORE_EN) -+ -+#define RGX_S7_SOFT_RESET_JONES (RGX_CR_SOFT_RESET_PM_EN | \ -+ RGX_CR_SOFT_RESET_VDM_EN | \ -+ RGX_CR_SOFT_RESET_ISP_EN) -+ -+#define RGX_S7_SOFT_RESET_JONES_ALL (RGX_S7_SOFT_RESET_JONES | \ -+ RGX_CR_SOFT_RESET_BIF_EN | \ -+ RGX_CR_SOFT_RESET_SLC_EN | \ -+ RGX_CR_SOFT_RESET_GARTEN_EN) -+ -+#define RGX_S7_SOFT_RESET2 (RGX_CR_SOFT_RESET2_BLACKPEARL_EN | \ -+ RGX_CR_SOFT_RESET2_PIXEL_EN | \ -+ RGX_CR_SOFT_RESET2_CDM_EN | \ -+ RGX_CR_SOFT_RESET2_VERTEX_EN) -+ -+ -+ -+#define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12U) -+#define RGX_BIF_PM_PHYSICAL_PAGE_SIZE (1UL << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) -+ -+#define RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT (14U) -+#define RGX_BIF_PM_VIRTUAL_PAGE_SIZE (1UL << RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT) -+ -+#define RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE (16U) -+ -+/* To get the number of required Dusts, divide the number of -+ * clusters by 2 and round up -+ */ -+#define RGX_REQ_NUM_DUSTS(CLUSTERS) (((CLUSTERS) + 1U) / 2U) -+ -+/* To get the number of required Bernado/Phantom(s), divide -+ * the number of clusters by 4 and round up -+ */ -+#define RGX_REQ_NUM_PHANTOMS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) -+#define RGX_REQ_NUM_BERNADOS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) -+#define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) -+ -+#if !defined(__KERNEL__) -+# define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS)) -+#endif -+ -+ -+/* META second thread feature depending on META variants and -+ * available CoreMem -+ */ -+#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && (RGX_FEATURE_META_COREMEM_SIZE == 256) -+#define RGXFW_META_SUPPORT_2ND_THREAD -+#endif -+ -+/* -+ * Utility macros to calculate CAT_BASE register addresses -+ */ -+#define BIF_CAT_BASEx(n) \ -+ (RGX_CR_BIF_CAT_BASE0 + ((n) * (RGX_CR_BIF_CAT_BASE1 - RGX_CR_BIF_CAT_BASE0))) -+ -+#define FWCORE_MEM_CAT_BASEx(n) \ -+ (RGX_CR_FWCORE_MEM_CAT_BASE0 + ((n) * (RGX_CR_FWCORE_MEM_CAT_BASE1 - RGX_CR_FWCORE_MEM_CAT_BASE0))) -+ -+/* -+ * FWCORE wrapper register defines -+ */ -+#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT -+#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK -+#define FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT (12U) -+ -+/****************************************************************************** -+ * WA HWBRNs -+ *****************************************************************************/ -+ -+#if defined(RGX_CR_JONES_IDLE_MASKFULL) -+/* Workaround for HW BRN 57289 */ -+#if (RGX_CR_JONES_IDLE_MASKFULL != 0x0000000000007FFF) -+#error This WA must be updated if RGX_CR_JONES_IDLE is expanded!!! -+#endif -+#undef RGX_CR_JONES_IDLE_MASKFULL -+#undef RGX_CR_JONES_IDLE_TDM_SHIFT -+#undef RGX_CR_JONES_IDLE_TDM_CLRMSK -+#undef RGX_CR_JONES_IDLE_TDM_EN -+#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) -+#endif -+ -+#if !defined(__KERNEL__) -+ -+#if defined(RGX_FEATURE_ROGUEXE) -+#define RGX_NUM_RASTERISATION_MODULES RGX_FEATURE_NUM_CLUSTERS -+#else -+#define RGX_NUM_RASTERISATION_MODULES RGX_NUM_PHANTOMS -+#endif -+ -+#endif /* defined(__KERNEL__) */ -+ -+/* GPU CR timer tick in GPU cycles */ -+#define RGX_CRTIME_TICK_IN_CYCLES (256U) -+#define RGX_CRTIME_TICK_IN_CYCLES_SHIFT (8U) -+ -+/* for nohw multicore return max cores possible to client */ -+#define RGX_MULTICORE_MAX_NOHW_CORES (4U) -+ -+/* -+ If the size of the SLC is less than this value then the TPU bypasses the SLC. -+ */ -+#define RGX_TPU_CACHED_SLC_SIZE_THRESHOLD_KB (128U) -+ -+/* -+ * If the size of the SLC is bigger than this value then the TCU must not be bypassed in the SLC. -+ * In XE_MEMORY_HIERARCHY cores, the TCU is bypassed by default. -+ */ -+#define RGX_TCU_CACHED_SLC_SIZE_THRESHOLD_KB (32U) -+ -+/* -+ * Register used by the FW to track the current boot stage (not used in MIPS) -+ */ -+#define RGX_FW_BOOT_STAGE_REGISTER (RGX_CR_POWER_ESTIMATE_RESULT) -+ -+/* -+ * Virtualisation definitions -+ */ -+#define RGX_VIRTUALISATION_REG_SIZE_PER_OS (RGX_CR_MTS_SCHEDULE1 - RGX_CR_MTS_SCHEDULE) -+ -+/* -+ * Renaming MTS sideband bitfields to emphasize that the Register Bank number -+ * of the MTS register used identifies a specific Driver/VM rather than the OSID tag -+ * emitted on bus memory transactions. -+ */ -+#define RGX_MTS_SBDATA_DRIVERID_CLRMSK RGX_CR_MTS_BGCTX_SBDATA0_OS_ID_CLRMSK -+#define RGX_MTS_SBDATA_DRIVERID_SHIFT RGX_CR_MTS_BGCTX_SBDATA0_OS_ID_SHIFT -+ -+/* -+ * Register Bank containing registers secured against host access -+ */ -+#define RGX_HOST_SECURE_REGBANK_OFFSET (0xF0000U) -+#define RGX_HOST_SECURE_REGBANK_SIZE (0x10000U) -+ -+/* -+ * Macro used to indicate which version of HWPerf is active -+ */ -+#define RGX_FEATURE_HWPERF_ROGUE -+ -+/* -+ * Maximum number of cores supported by TRP -+ */ -+#define RGX_TRP_MAX_NUM_CORES (4U) -+ -+/* -+ * Maximum number of cores supported by WGP -+ */ -+#define RGX_WGP_MAX_NUM_CORES (8U) -+ -+#if defined(RGX_FEATURE_VOLCANIC_TB) -+#define SUPPORT_VOLCANIC_TB -+#endif -+#define RGX_FEATURE_SECURITY_ROGUE -+ -+/* Typically the PCI bus returns this value on error */ -+#define RGX_PCI_ERROR_VALUE_BYTE (0xFFU) -+#define RGX_PCI_ERROR_VALUE_DWORD (0xFFFFFFFFU) -+ -+#endif /* RGXDEFS_KM_H */ -diff --git a/drivers/gpu/drm/img-rogue/km/rgxmhdefs_km.h b/drivers/gpu/drm/img-rogue/km/rgxmhdefs_km.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/km/rgxmhdefs_km.h -@@ -0,0 +1,286 @@ -+/*************************************************************************/ /*! -+@Title Hardware definition file rgxmhdefs_km.h -+@Brief The file contains auto-generated hardware definitions without -+ BVNC-specific compile time conditionals. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* **** Autogenerated C -- do not edit **** */ -+ -+/* -+ * rogue_mh.def -+ */ -+ -+ -+#ifndef RGXMHDEFS_KM_H -+#define RGXMHDEFS_KM_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+ -+ -+#define RGXMHDEFS_KM_REVISION 0 -+ -+#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE (0x00000000U) -+#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT (0x00000001U) -+#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE (0x00000002U) -+ -+ -+#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM (0x00000000U) -+#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER (0x00000001U) -+#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL (0x00000002U) -+ -+ -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK (0x00000008U) -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST (0x00000009U) -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK (0x0000000aU) -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST (0x0000000bU) -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0 (0x0000000cU) -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1 (0x0000002dU) -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK (0x0000000fU) -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK (0x00000012U) -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK (0x00000013U) -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK (0x00000016U) -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK (0x00000017U) -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP (0x00000019U) -+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP (0x0000001aU) -+ -+ -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK (0x00000000U) -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST (0x00000001U) -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK (0x00000002U) -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST (0x00000003U) -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0 (0x00000004U) -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1 (0x00000025U) -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP (0x00000006U) -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK (0x00000007U) -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK (0x00000008U) -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK (0x00000009U) -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK (0x00000014U) -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK (0x00000015U) -+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP (0x00000018U) -+ -+ -+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PPP (0x00000008U) -+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCERTC (0x00000007U) -+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TEACRTC (0x00000006U) -+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGRTC (0x00000005U) -+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGR (0x00000004U) -+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGS (0x00000003U) -+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TPC (0x00000002U) -+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCE (0x00000001U) -+ -+ -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ00 (0x00000000U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ01 (0x00000001U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ00 (0x00000002U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ01 (0x00000003U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_RREQ (0x00000004U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DBSC (0x00000005U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CPF (0x00000006U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DELTA (0x00000007U) -+ -+ -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ00 (0x00000000U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ01 (0x00000001U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ02 (0x00000002U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ03 (0x00000003U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ00 (0x00000004U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ01 (0x00000005U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ02 (0x00000006U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ03 (0x00000007U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_RREQ (0x00000008U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DBSC (0x00000009U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CPF (0x0000000aU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DELTA (0x0000000bU) -+ -+ -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ00 (0x00000000U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ01 (0x00000001U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ02 (0x00000002U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ03 (0x00000003U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ04 (0x00000004U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ05 (0x00000005U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ06 (0x00000006U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ00 (0x00000007U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ01 (0x00000008U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ02 (0x00000009U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ03 (0x0000000aU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ04 (0x0000000bU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ05 (0x0000000cU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ06 (0x0000000dU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_RREQ (0x0000000eU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DBSC (0x0000000fU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CPF (0x00000010U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DELTA (0x00000011U) -+ -+ -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ00 (0x00000000U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ01 (0x00000001U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ02 (0x00000002U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ03 (0x00000003U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ04 (0x00000004U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ05 (0x00000005U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ06 (0x00000006U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ07 (0x00000007U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ08 (0x00000008U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ09 (0x00000009U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ10 (0x0000000aU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ11 (0x0000000bU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ12 (0x0000000cU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ13 (0x0000000dU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ00 (0x0000000eU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ01 (0x0000000fU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ02 (0x00000010U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ03 (0x00000011U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ04 (0x00000012U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ05 (0x00000013U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ06 (0x00000014U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ07 (0x00000015U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ08 (0x00000016U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ09 (0x00000017U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ10 (0x00000018U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ11 (0x00000019U) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ12 (0x0000001aU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ13 (0x0000001bU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_RREQ (0x0000001cU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DBSC (0x0000001dU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CPF (0x0000001eU) -+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DELTA (0x0000001fU) -+ -+ -+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE (0x00000000U) -+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS (0x00000001U) -+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA (0x00000002U) -+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA (0x00000003U) -+ -+ -+#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_ZLS (0x00000000U) -+#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_DS (0x00000001U) -+ -+ -+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL (0x00000000U) -+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE (0x00000001U) -+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX (0x00000002U) -+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK (0x00000004U) -+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT (0x00000008U) -+ -+ -+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM (0x00000000U) -+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA (0x00000001U) -+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA (0x00000002U) -+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE (0x00000003U) -+ -+ -+#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH (0x00000002U) -+#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS (0x00000003U) -+ -+ -+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST (0x00000000U) -+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST (0x00000001U) -+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST (0x00000002U) -+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST (0x00000003U) -+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST (0x00000004U) -+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST (0x00000005U) -+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST (0x00000006U) -+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST (0x00000007U) -+ -+ -+#define RGX_MH_TAG_ENCODING_MH_TAG_MMU (0x00000000U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU (0x00000001U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU (0x00000002U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU (0x00000003U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_MIPS (0x00000004U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0 (0x00000005U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1 (0x00000006U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2 (0x00000007U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3 (0x00000008U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0 (0x00000009U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1 (0x0000000aU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2 (0x0000000bU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3 (0x0000000cU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4 (0x0000000dU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_0 (0x0000000eU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_1 (0x0000000fU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA (0x00000010U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB (0x00000011U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC (0x00000012U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD (0x00000013U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA (0x00000014U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB (0x00000015U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC (0x00000016U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD (0x00000017U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW (0x00000018U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_0 (0x00000019U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_1 (0x0000001aU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0 (0x0000001bU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1 (0x0000001cU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_2 (0x0000001dU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_3 (0x0000001eU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_USC (0x0000001fU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS (0x00000020U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS (0x00000021U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF (0x00000022U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS (0x00000023U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF (0x00000024U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ (0x00000025U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS (0x00000026U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5 (0x00000027U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP (0x00000028U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC (0x00000029U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC (0x0000002aU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC (0x0000002bU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION (0x0000002cU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM (0x0000002dU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW (0x0000002eU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC (0x0000002fU) -+#define RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC (0x00000030U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC (0x00000031U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA (0x00000032U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL (0x00000033U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_PBE0 (0x00000034U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_PBE1 (0x00000035U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_PBE2 (0x00000036U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_PBE3 (0x00000037U) -+ -+ -+#endif /* RGXMHDEFS_KM_H */ -+/***************************************************************************** -+ End of file (rgxmhdefs_km.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/km/rgxmmudefs_km.h b/drivers/gpu/drm/img-rogue/km/rgxmmudefs_km.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/km/rgxmmudefs_km.h -@@ -0,0 +1,216 @@ -+/*************************************************************************/ /*! -+@Title Hardware definition file rgxmmudefs_km.h -+@Brief The file contains auto-generated hardware definitions without -+ BVNC-specific compile time conditionals. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* **** Autogenerated C -- do not edit **** */ -+ -+/* -+ * rogue_bif.def -+ */ -+ -+ -+#ifndef RGXMMUDEFS_KM_H -+#define RGXMMUDEFS_KM_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+ -+ -+#define RGXMMUDEFS_KM_REVISION 0 -+ -+#define RGX_BIF_DM_ENCODING_VERTEX (0x00000000U) -+#define RGX_BIF_DM_ENCODING_PIXEL (0x00000001U) -+#define RGX_BIF_DM_ENCODING_COMPUTE (0x00000002U) -+#define RGX_BIF_DM_ENCODING_TLA (0x00000003U) -+#define RGX_BIF_DM_ENCODING_PB_VCE (0x00000004U) -+#define RGX_BIF_DM_ENCODING_PB_TE (0x00000005U) -+#define RGX_BIF_DM_ENCODING_META (0x00000007U) -+#define RGX_BIF_DM_ENCODING_HOST (0x00000008U) -+#define RGX_BIF_DM_ENCODING_PM_ALIST (0x00000009U) -+ -+ -+#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U) -+#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFF003FFFFFFF)) -+#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U) -+#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC01FFFFF)) -+#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) -+#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE00FFF)) -+ -+ -+#define RGX_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U) -+ -+ -+#define RGX_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U) -+ -+ -+#define RGX_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U) -+ -+ -+#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U) -+ -+ -+#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U) -+ -+ -+#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U) -+ -+ -+#define RGX_MMUCTRL_PAGE_SIZE_MASK (0x00000007U) -+#define RGX_MMUCTRL_PAGE_SIZE_4KB (0x00000000U) -+#define RGX_MMUCTRL_PAGE_SIZE_16KB (0x00000001U) -+#define RGX_MMUCTRL_PAGE_SIZE_64KB (0x00000002U) -+#define RGX_MMUCTRL_PAGE_SIZE_256KB (0x00000003U) -+#define RGX_MMUCTRL_PAGE_SIZE_1MB (0x00000004U) -+#define RGX_MMUCTRL_PAGE_SIZE_2MB (0x00000005U) -+ -+ -+#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U) -+#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+ -+ -+#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U) -+#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000003FFF)) -+ -+ -+#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U) -+#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000FFFF)) -+ -+ -+#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U) -+#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000003FFFF)) -+ -+ -+#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U) -+#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000FFFFF)) -+ -+ -+#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U) -+#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00001FFFFF)) -+ -+ -+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U) -+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+ -+ -+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U) -+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000003FF)) -+ -+ -+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U) -+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000000FF)) -+ -+ -+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U) -+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000003F)) -+ -+ -+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U) -+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) -+ -+ -+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U) -+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) -+ -+ -+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U) -+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) -+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (IMG_UINT64_C(0x4000000000000000)) -+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U) -+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) -+#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT (12U) -+#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) -+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U) -+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) -+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U) -+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) -+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000000000000020)) -+#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U) -+#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) -+#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN (IMG_UINT64_C(0x0000000000000010)) -+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT (3U) -+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) -+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_MMUCTRL_PT_DATA_CC_SHIFT (2U) -+#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) -+#define RGX_MMUCTRL_PT_DATA_CC_EN (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U) -+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) -+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT (0U) -+#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_MMUCTRL_PT_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U) -+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) -+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000010000000000)) -+#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U) -+#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) -+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U) -+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF1)) -+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (IMG_UINT64_C(0x0000000000000002)) -+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (IMG_UINT64_C(0x0000000000000004)) -+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (IMG_UINT64_C(0x0000000000000006)) -+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (IMG_UINT64_C(0x0000000000000008)) -+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (IMG_UINT64_C(0x000000000000000a)) -+#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT (0U) -+#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) -+#define RGX_MMUCTRL_PD_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) -+ -+ -+#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U) -+#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU) -+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U) -+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U) -+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U) -+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU) -+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U) -+#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT (0U) -+#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU) -+#define RGX_MMUCTRL_PC_DATA_VALID_EN (0x00000001U) -+ -+ -+#endif /* RGXMMUDEFS_KM_H */ -+/***************************************************************************** -+ End of file (rgxmmudefs_km.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/km_apphint.c b/drivers/gpu/drm/img-rogue/km_apphint.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/km_apphint.c -@@ -0,0 +1,1760 @@ -+/*************************************************************************/ /*! -+@File km_apphint.c -+@Title Apphint routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "di_server.h" -+#include "pvr_uaccess.h" -+#include -+#include -+#include -+ -+/* Common and SO layer */ -+#include "img_defs.h" -+#include "sofunc_pvr.h" -+ -+/* for action device access */ -+#include "pvrsrv.h" -+#include "device.h" -+#include "rgxdevice.h" -+#include "rgxfwutils.h" -+#include "rgxhwperf.h" -+#include "htbserver.h" -+#include "rgxutils.h" -+#include "rgxapi_km.h" -+ -+ -+/* defines for default values */ -+#include "rgx_fwif_km.h" -+#include "htbuffer_types.h" -+ -+#include "pvr_notifier.h" -+ -+#include "km_apphint_defs.h" -+#include "km_apphint.h" -+ -+#if defined(PDUMP) -+#if defined(__linux__) -+ #include -+ -+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) -+ #include -+ #else -+ #include -+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ -+#else -+ #include -+#endif /* __linux__ */ -+#include "pdump_km.h" -+#endif -+ -+/* Size of temporary buffers used to read and write AppHint data. -+ * Must be large enough to contain any strings read or written but no larger -+ * than 4096: which is the buffer size for the kernel_param_ops .get -+ * function. And less than 1024 to keep the stack frame size within bounds. -+ */ -+#define APPHINT_BUFFER_SIZE 512 -+ -+/* Apphint Debug output level */ -+#define APPHINT_DPF_LEVEL PVR_DBG_VERBOSE -+ -+/* -+******************************************************************************* -+ * AppHint mnemonic data type helper tables -+******************************************************************************/ -+struct apphint_lookup { -+ const char *name; -+ int value; -+}; -+ -+static const struct apphint_lookup fwt_logtype_tbl[] = { -+ { "trace", 0}, -+ { "none", 0} -+#if defined(SUPPORT_TBI_INTERFACE) -+ , { "tbi", 1} -+#endif -+}; -+ -+static const struct apphint_lookup fwt_loggroup_tbl[] = { -+ RGXFWIF_LOG_GROUP_NAME_VALUE_MAP -+}; -+ -+static const struct apphint_lookup htb_loggroup_tbl[] = { -+#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) }, -+ HTB_LOG_SFGROUPLIST -+#undef X -+}; -+ -+static const struct apphint_lookup htb_opmode_tbl[] = { -+ { "droplatest", HTB_OPMODE_DROPLATEST}, -+ { "dropoldest", HTB_OPMODE_DROPOLDEST}, -+ { "block", HTB_OPMODE_BLOCK} -+}; -+ -+__maybe_unused -+static const struct apphint_lookup htb_logmode_tbl[] = { -+ { "all", HTB_LOGMODE_ALLPID}, -+ { "restricted", HTB_LOGMODE_RESTRICTEDPID} -+}; -+ -+__maybe_unused -+static const struct apphint_lookup timecorr_clk_tbl[] = { -+ { "mono", 0 }, -+ { "mono_raw", 1 }, -+ { "sched", 2 } -+}; -+ -+/* -+******************************************************************************* -+ Data types -+******************************************************************************/ -+union apphint_value { -+ IMG_UINT64 UINT64; -+ IMG_UINT32 UINT32; -+ IMG_BOOL BOOL; -+ IMG_CHAR *STRING; -+}; -+ -+union apphint_query_action { -+ PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, -+ const void *private_data, IMG_UINT64 *value); -+ PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, -+ const void *private_data, IMG_UINT32 *value); -+ PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, -+ const void *private_data, IMG_BOOL *value); -+ PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, -+ const void *private_data, IMG_CHAR **value); -+}; -+ -+union apphint_set_action { -+ PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, -+ const void *private_data, IMG_UINT64 value); -+ PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, -+ const void *private_data, IMG_UINT32 value); -+ PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, -+ const void *private_data, IMG_BOOL value); -+ PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, -+ const void *private_data, IMG_CHAR *value); -+}; -+ -+struct apphint_action { -+ union apphint_query_action query; /*!< Query callbacks. */ -+ union apphint_set_action set; /*!< Set callbacks. */ -+ const PVRSRV_DEVICE_NODE *device; /*!< Pointer to the device node.*/ -+ const void *private_data; /*!< Opaque data passed to `query` and -+ `set` callbacks. */ -+ union apphint_value stored; /*!< Value of the AppHint. */ -+ bool free; /*!< Flag indicating that memory has been -+ allocated for this AppHint and it -+ needs to be freed on deinit. */ -+ bool initialised; /*!< Flag indicating if the AppHint has -+ been already initialised. */ -+}; -+ -+struct apphint_param { -+ IMG_UINT32 id; -+ APPHINT_DATA_TYPE data_type; -+ const void *data_type_helper; -+ IMG_UINT32 helper_size; -+}; -+ -+struct apphint_init_data { -+ IMG_UINT32 id; /* index into AppHint Table */ -+ APPHINT_CLASS class; -+ const IMG_CHAR *name; -+ union apphint_value default_value; -+ APPHINT_RT_CLASS guest; /* ALWAYS => present on GUEST, -+ NEVER => not present on GUEST */ -+}; -+ -+struct apphint_init_data_mapping { -+ IMG_UINT32 device_apphint_id; -+ IMG_UINT32 modparam_apphint_id; -+}; -+ -+struct apphint_class_state { -+ APPHINT_CLASS class; -+ IMG_BOOL enabled; -+}; -+ -+struct apphint_work { -+ struct work_struct work; -+ union apphint_value new_value; -+ struct apphint_action *action; -+}; -+ -+/* -+******************************************************************************* -+ Initialization / configuration table data -+******************************************************************************/ -+#define UINT32Bitfield UINT32 -+#define UINT32List UINT32 -+ -+static const struct apphint_init_data init_data_buildvar[] = { -+#define X(a, b, c, d, e, f) \ -+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d}, APPHINT_RT_CLASS_ ## f }, -+ APPHINT_LIST_BUILDVAR_COMMON -+ APPHINT_LIST_BUILDVAR -+#undef X -+}; -+ -+static const struct apphint_init_data init_data_modparam[] = { -+#define X(a, b, c, d, e, f) \ -+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d}, APPHINT_RT_CLASS_ ## f }, -+ APPHINT_LIST_MODPARAM_COMMON -+ APPHINT_LIST_MODPARAM -+#undef X -+}; -+ -+static const struct apphint_init_data init_data_debuginfo[] = { -+#define X(a, b, c, d, e, f) \ -+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d}, APPHINT_RT_CLASS_ ## f }, -+ APPHINT_LIST_DEBUGINFO_COMMON -+ APPHINT_LIST_DEBUGINFO -+#undef X -+}; -+ -+static const struct apphint_init_data init_data_debuginfo_device[] = { -+#define X(a, b, c, d, e, f) \ -+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d}, APPHINT_RT_CLASS_ ## f }, -+ APPHINT_LIST_DEBUGINFO_DEVICE_COMMON -+ APPHINT_LIST_DEBUGINFO_DEVICE -+#undef X -+}; -+ -+static const struct apphint_init_data_mapping init_data_debuginfo_device_to_modparams[] = { -+#define X(a, b) \ -+ {APPHINT_ID_ ## a, APPHINT_ID_ ## b}, -+ APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT_COMMON -+ APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT -+#undef X -+}; -+ -+#undef UINT32Bitfield -+#undef UINT32List -+ -+__maybe_unused static const char NO_PARAM_TABLE[] = {}; -+ -+static const struct apphint_param param_lookup[] = { -+#define X(a, b, c, d, e, f) \ -+ {APPHINT_ID_ ## a, APPHINT_DATA_TYPE_ ## b, e, ARRAY_SIZE(e) }, -+ APPHINT_LIST_ALL -+#undef X -+}; -+ -+static const struct apphint_class_state class_state[] = { -+#define X(a) {APPHINT_CLASS_ ## a, APPHINT_ENABLED_CLASS_ ## a}, -+ APPHINT_CLASS_LIST -+#undef X -+}; -+ -+/* -+******************************************************************************* -+ Global state -+******************************************************************************/ -+/* If the union apphint_value becomes such that it is not possible to read -+ * and write atomically, a mutex may be desirable to prevent a read returning -+ * a partially written state. -+ * This would require a statically initialized mutex outside of the -+ * struct apphint_state to prevent use of an uninitialized mutex when -+ * module_params are provided on the command line. -+ * static DEFINE_MUTEX(apphint_mutex); -+ */ -+static struct apphint_state -+{ -+ struct workqueue_struct *workqueue; -+ DI_GROUP *debuginfo_device_rootdir[PVRSRV_MAX_DEVICES]; -+ DI_ENTRY *debuginfo_device_entry[PVRSRV_MAX_DEVICES][APPHINT_DEBUGINFO_DEVICE_ID_MAX]; -+ DI_GROUP *debuginfo_rootdir; -+ DI_ENTRY *debuginfo_entry[APPHINT_DEBUGINFO_ID_MAX]; -+ DI_GROUP *buildvar_rootdir; -+ DI_ENTRY *buildvar_entry[APPHINT_BUILDVAR_ID_MAX]; -+ -+ unsigned int num_devices; -+ PVRSRV_DEVICE_NODE *devices[PVRSRV_MAX_DEVICES]; -+ unsigned int initialized; -+ -+ /* Array contains value space for 1 copy of all apphint values defined -+ * (for device 1) and N copies of device specific apphint values for -+ * multi-device platforms. -+ */ -+ struct apphint_action val[APPHINT_ID_MAX + ((PVRSRV_MAX_DEVICES-1)*APPHINT_DEBUGINFO_DEVICE_ID_MAX)]; -+ -+} apphint = { -+/* statically initialise default values to ensure that any module_params -+ * provided on the command line are not overwritten by defaults. -+ */ -+ .val = { -+#define UINT32Bitfield UINT32 -+#define UINT32List UINT32 -+#define X(a, b, c, d, e, f) \ -+ { {NULL}, {NULL}, NULL, NULL, {.b=d}, NULL }, -+ APPHINT_LIST_ALL -+#undef X -+#undef UINT32Bitfield -+#undef UINT32List -+ }, -+ .initialized = 0, -+ .num_devices = 0 -+}; -+ -+#define APPHINT_DEBUGINFO_DEVICE_ID_OFFSET (APPHINT_ID_MAX-APPHINT_DEBUGINFO_DEVICE_ID_MAX) -+ -+static inline void -+get_apphint_id_from_action_addr(const struct apphint_action * const addr, -+ APPHINT_ID * const id) -+{ -+ *id = (APPHINT_ID)(addr - apphint.val); -+ if (*id >= APPHINT_ID_MAX) { -+ *id -= APPHINT_DEBUGINFO_DEVICE_ID_OFFSET; -+ *id %= APPHINT_DEBUGINFO_DEVICE_ID_MAX; -+ *id += APPHINT_DEBUGINFO_DEVICE_ID_OFFSET; -+ } -+} -+ -+static inline void -+get_value_offset_from_device(const PVRSRV_DEVICE_NODE * const device, -+ int * const offset, -+ APPHINT_ID id) -+{ -+ int i; -+ IMG_BOOL bFound = IMG_FALSE; -+ -+ /* No device offset if not a device specific apphint */ -+ if (APPHINT_OF_DRIVER_NO_DEVICE == device) { -+ *offset = 0; -+ return; -+ } -+ -+ /* Check that the specified ID is a device-specific one. If not we -+ * set the offset to 0 for the global MODPARAM / BUILDVAR etc. AppHint -+ */ -+ for (i = 0; i < ARRAY_SIZE(init_data_debuginfo_device); i++) -+ { -+ const struct apphint_init_data *device_init = &init_data_debuginfo_device[i]; -+ -+ if ((IMG_UINT32)id == device_init->id) { -+ bFound = IMG_TRUE; -+ break; -+ } -+ } -+ -+ if (!bFound) { -+ *offset = 0; -+ return; -+ } -+ -+ for (i = 0; device && i < PVRSRV_MAX_DEVICES; i++) { -+ if (apphint.devices[i] == device) -+ break; -+ } -+ if (PVRSRV_MAX_DEVICES == i) { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Unregistered device", __func__)); -+ i = 0; -+ } -+ *offset = i * APPHINT_DEBUGINFO_DEVICE_ID_MAX; -+} -+ -+/** -+ * apphint_action_worker - perform an action after an AppHint update has been -+ * requested by a UM process -+ * And update the record of the current active value -+ */ -+static void apphint_action_worker(struct work_struct *work) -+{ -+ struct apphint_work *work_pkt = container_of(work, -+ struct apphint_work, -+ work); -+ struct apphint_action *a = work_pkt->action; -+ union apphint_value value = work_pkt->new_value; -+ APPHINT_ID id; -+ PVRSRV_ERROR result = PVRSRV_OK; -+ -+ get_apphint_id_from_action_addr(a, &id); -+ -+ if (a->set.UINT64) { -+ switch (param_lookup[id].data_type) { -+ case APPHINT_DATA_TYPE_UINT64: -+ result = a->set.UINT64(a->device, -+ a->private_data, -+ value.UINT64); -+ break; -+ -+ case APPHINT_DATA_TYPE_UINT32: -+ case APPHINT_DATA_TYPE_UINT32Bitfield: -+ case APPHINT_DATA_TYPE_UINT32List: -+ result = a->set.UINT32(a->device, -+ a->private_data, -+ value.UINT32); -+ break; -+ -+ case APPHINT_DATA_TYPE_BOOL: -+ result = a->set.BOOL(a->device, -+ a->private_data, -+ value.BOOL); -+ break; -+ -+ case APPHINT_DATA_TYPE_STRING: -+ result = a->set.STRING(a->device, -+ a->private_data, -+ value.STRING); -+ kfree(value.STRING); -+ break; -+ -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: unrecognised data type (%d), index (%d)", -+ __func__, param_lookup[id].data_type, id)); -+ } -+ -+ if (PVRSRV_OK != result) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed (%s)", -+ __func__, PVRSRVGetErrorString(result))); -+ } -+ } else { -+ if (a->free) { -+ kfree(a->stored.STRING); -+ } -+ a->stored = value; -+ if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) { -+ a->free = true; -+ } -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: AppHint value updated before handler is registered, ID(%d)", -+ __func__, id)); -+ } -+ kfree((void *)work_pkt); -+} -+ -+static void apphint_action(union apphint_value new_value, -+ struct apphint_action *action) -+{ -+ struct apphint_work *work_pkt = kmalloc(sizeof(*work_pkt), GFP_KERNEL); -+ -+ /* queue apphint update on a serialized workqueue to avoid races */ -+ if (work_pkt) { -+ work_pkt->new_value = new_value; -+ work_pkt->action = action; -+ INIT_WORK(&work_pkt->work, apphint_action_worker); -+ if (0 == queue_work(apphint.workqueue, &work_pkt->work)) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to queue apphint change request", -+ __func__)); -+ goto err_exit; -+ } -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to alloc memory for apphint change request", -+ __func__)); -+ goto err_exit; -+ } -+ return; -+err_exit: -+ kfree(new_value.STRING); -+} -+ -+/** -+ * apphint_read - read the different AppHint data types -+ * return -errno or the buffer size -+ */ -+static int apphint_read(char *buffer, size_t count, APPHINT_ID ue, -+ union apphint_value *value) -+{ -+ APPHINT_DATA_TYPE data_type = param_lookup[ue].data_type; -+ int result = 0; -+ -+ switch (data_type) { -+ case APPHINT_DATA_TYPE_UINT64: -+ if (kstrtou64(buffer, 0, &value->UINT64) < 0) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid UINT64 input data for id %d: %s", -+ __func__, ue, buffer)); -+ result = -EINVAL; -+ goto err_exit; -+ } -+ break; -+ case APPHINT_DATA_TYPE_UINT32: -+ if (kstrtou32(buffer, 0, &value->UINT32) < 0) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid UINT32 input data for id %d: %s", -+ __func__, ue, buffer)); -+ result = -EINVAL; -+ goto err_exit; -+ } -+ break; -+ case APPHINT_DATA_TYPE_BOOL: -+ switch (buffer[0]) { -+ case '0': -+ case 'n': -+ case 'N': -+ case 'f': -+ case 'F': -+ value->BOOL = IMG_FALSE; -+ break; -+ case '1': -+ case 'y': -+ case 'Y': -+ case 't': -+ case 'T': -+ value->BOOL = IMG_TRUE; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid BOOL input data for id %d: %s", -+ __func__, ue, buffer)); -+ result = -EINVAL; -+ goto err_exit; -+ } -+ break; -+ case APPHINT_DATA_TYPE_UINT32List: -+ { -+ int i; -+ struct apphint_lookup *lookup = -+ (struct apphint_lookup *) -+ param_lookup[ue].data_type_helper; -+ int size = param_lookup[ue].helper_size; -+ /* buffer may include '\n', remove it */ -+ char *arg = strsep(&buffer, "\n"); -+ -+ if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { -+ result = -EINVAL; -+ goto err_exit; -+ } -+ -+ for (i = 0; i < size; i++) { -+ if (strcasecmp(lookup[i].name, arg) == 0) { -+ value->UINT32 = lookup[i].value; -+ break; -+ } -+ } -+ if (i == size) { -+ if (OSStringLength(arg) == 0) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: No value set for AppHint", -+ __func__)); -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unrecognised AppHint value (%s)", -+ __func__, arg)); -+ } -+ result = -EINVAL; -+ } -+ break; -+ } -+ case APPHINT_DATA_TYPE_UINT32Bitfield: -+ { -+ int i; -+ struct apphint_lookup *lookup = -+ (struct apphint_lookup *) -+ param_lookup[ue].data_type_helper; -+ int size = param_lookup[ue].helper_size; -+ /* buffer may include '\n', remove it */ -+ char *string = strsep(&buffer, "\n"); -+ char *token = strsep(&string, ","); -+ -+ if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { -+ result = -EINVAL; -+ goto err_exit; -+ } -+ -+ value->UINT32 = 0; -+ /* empty string is valid to clear the bitfield */ -+ while (token && *token) { -+ for (i = 0; i < size; i++) { -+ if (strcasecmp(lookup[i].name, token) == 0) { -+ value->UINT32 |= lookup[i].value; -+ break; -+ } -+ } -+ if (i == size) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unrecognised AppHint value (%s)", -+ __func__, token)); -+ result = -EINVAL; -+ goto err_exit; -+ } -+ token = strsep(&string, ","); -+ } -+ break; -+ } -+ case APPHINT_DATA_TYPE_STRING: -+ { -+ /* buffer may include '\n', remove it */ -+ char *string = strsep(&buffer, "\n"); -+ size_t len = OSStringLength(string); -+ -+ if (!len) { -+ result = -EINVAL; -+ goto err_exit; -+ } -+ -+ ++len; -+ -+ value->STRING = kmalloc(len , GFP_KERNEL); -+ if (!value->STRING) { -+ result = -ENOMEM; -+ goto err_exit; -+ } -+ -+ OSStringLCopy(value->STRING, string, len); -+ break; -+ } -+ default: -+ result = -EINVAL; -+ goto err_exit; -+ } -+ -+err_exit: -+ return (result < 0) ? result : count; -+} -+ -+static PVRSRV_ERROR get_apphint_value_from_action(const struct apphint_action * const action, -+ union apphint_value * const value, -+ const PVRSRV_DEVICE_NODE * const psDevNode) -+{ -+ APPHINT_ID id; -+ APPHINT_DATA_TYPE data_type; -+ PVRSRV_ERROR result = PVRSRV_OK; -+ const PVRSRV_DEVICE_NODE *psDevice; -+ -+ get_apphint_id_from_action_addr(action, &id); -+ data_type = param_lookup[id].data_type; -+ -+ /* If we've got an entry that is APPHINT_OF_DRIVER_NO_DEVICE we should use -+ * the higher-level psDevNode value instead. This is the device-node that is -+ * associated with the original debug_dump request. -+ * Note: if we're called with psDevNode == APPHINT_OF_DRIVER_NO_DEVICE -+ * we attempt to use the first registered apphint.devices[0] (if any -+ * devices have been presented). If we have no devices hooked into the -+ * apphint mechanism we just return the default value for the AppHint. -+ */ -+ if (psDevNode == APPHINT_OF_DRIVER_NO_DEVICE) { -+ if (action->device == APPHINT_OF_DRIVER_NO_DEVICE) { -+ if (apphint.num_devices > 0) { -+ psDevice = apphint.devices[0]; -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Uninitialised AppHint device for AppHint index (%d)", -+ id)); -+ return PVRSRV_ERROR_RETRY; -+ } -+ } else { -+ psDevice = action->device; -+ } -+ } else { -+ if (action->device == APPHINT_OF_DRIVER_NO_DEVICE) { -+ if (psDevNode != action->device) { -+ return PVRSRV_ERROR_INVALID_DEVICE; -+ } -+ psDevice = psDevNode; -+ } else { -+ psDevice = action->device; -+ } -+ } -+ -+ if (action->query.UINT64) { -+ switch (data_type) { -+ case APPHINT_DATA_TYPE_UINT64: -+ result = action->query.UINT64(psDevice, -+ action->private_data, -+ &value->UINT64); -+ break; -+ -+ case APPHINT_DATA_TYPE_UINT32: -+ case APPHINT_DATA_TYPE_UINT32Bitfield: -+ case APPHINT_DATA_TYPE_UINT32List: -+ result = action->query.UINT32(psDevice, -+ action->private_data, -+ &value->UINT32); -+ break; -+ -+ case APPHINT_DATA_TYPE_BOOL: -+ result = action->query.BOOL(psDevice, -+ action->private_data, -+ &value->BOOL); -+ break; -+ -+ case APPHINT_DATA_TYPE_STRING: -+ result = action->query.STRING(psDevice, -+ action->private_data, -+ &value->STRING); -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: unrecognised data type (%d), index (%d)", -+ __func__, data_type, id)); -+ } -+ } else { -+ *value = action->stored; -+ } -+ -+ if (PVRSRV_OK != result) { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed (%d), index (%d)", __func__, result, id)); -+ } -+ -+ return result; -+} -+ -+/* -+ * apphint_write - write the current AppHint data to a buffer -+ * -+ * Returns length written or -errno -+ */ -+static int apphint_write(char *buffer, const size_t size, -+ const struct apphint_action *a) -+{ -+ const struct apphint_param *hint; -+ int result = 0; -+ APPHINT_ID id; -+ union apphint_value value; -+ -+ get_apphint_id_from_action_addr(a, &id); -+ hint = ¶m_lookup[id]; -+ -+ result = get_apphint_value_from_action(a, &value, a->device); -+ -+ switch (hint->data_type) { -+ case APPHINT_DATA_TYPE_UINT64: -+ result += snprintf(buffer + result, size - result, -+ "0x%016llx", -+ value.UINT64); -+ break; -+ case APPHINT_DATA_TYPE_UINT32: -+ result += snprintf(buffer + result, size - result, -+ "0x%08x", -+ value.UINT32); -+ break; -+ case APPHINT_DATA_TYPE_BOOL: -+ result += snprintf(buffer + result, size - result, -+ "%s", -+ value.BOOL ? "Y" : "N"); -+ break; -+ case APPHINT_DATA_TYPE_STRING: -+ if (value.STRING) { -+ result += snprintf(buffer + result, size - result, -+ "%s", -+ *value.STRING ? value.STRING : "(none)"); -+ } else { -+ result += snprintf(buffer + result, size - result, -+ "(none)"); -+ } -+ break; -+ case APPHINT_DATA_TYPE_UINT32List: -+ { -+ struct apphint_lookup *lookup = -+ (struct apphint_lookup *) hint->data_type_helper; -+ IMG_UINT32 i; -+ -+ if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { -+ result = -EINVAL; -+ goto err_exit; -+ } -+ -+ for (i = 0; i < hint->helper_size; i++) { -+ if (lookup[i].value == value.UINT32) { -+ result += snprintf(buffer + result, -+ size - result, -+ "%s", -+ lookup[i].name); -+ break; -+ } -+ } -+ break; -+ } -+ case APPHINT_DATA_TYPE_UINT32Bitfield: -+ { -+ struct apphint_lookup *lookup = -+ (struct apphint_lookup *) hint->data_type_helper; -+ IMG_UINT32 i; -+ -+ if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { -+ result = -EINVAL; -+ goto err_exit; -+ } -+ -+ for (i = 0; i < hint->helper_size; i++) { -+ if (lookup[i].value & value.UINT32) { -+ result += snprintf(buffer + result, -+ size - result, -+ "%s,", -+ lookup[i].name); -+ } -+ } -+ if (result) { -+ /* remove any trailing ',' */ -+ --result; -+ *(buffer + result) = '\0'; -+ } else { -+ result += snprintf(buffer + result, -+ size - result, "none"); -+ } -+ break; -+ } -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: unrecognised data type (%d), index (%d)", -+ __func__, hint->data_type, id)); -+ result = -EINVAL; -+ } -+ -+err_exit: -+ return result; -+} -+ -+/* -+******************************************************************************* -+ Module parameters initialization - different from debuginfo -+******************************************************************************/ -+/* -+ * apphint_kparam_set - Handle an update of a module parameter -+ * -+ * Returns 0, or -errno. arg is in kp->arg. -+ */ -+static int apphint_kparam_set(const char *val, const struct kernel_param *kp) -+{ -+ char val_copy[APPHINT_BUFFER_SIZE]; -+ APPHINT_ID id; -+ union apphint_value value; -+ int result; -+ -+ /* need to discard const in case of string comparison */ -+ result = strlcpy(val_copy, val, APPHINT_BUFFER_SIZE); -+ -+ get_apphint_id_from_action_addr(kp->arg, &id); -+ if (result < APPHINT_BUFFER_SIZE) { -+ result = apphint_read(val_copy, result, id, &value); -+ if (result >= 0) { -+ ((struct apphint_action *)kp->arg)->stored = value; -+ ((struct apphint_action *)kp->arg)->initialised = true; -+ if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) { -+ ((struct apphint_action *)kp->arg)->free = true; -+ } -+ } -+ } else { -+ PVR_DPF((PVR_DBG_ERROR, "%s: String too long", __func__)); -+ } -+ return (result > 0) ? 0 : result; -+} -+ -+/* -+ * apphint_kparam_get - handle a read of a module parameter -+ * -+ * Returns length written or -errno. Buffer is 4k (ie. be short!) -+ */ -+static int apphint_kparam_get(char *buffer, const struct kernel_param *kp) -+{ -+ return apphint_write(buffer, PAGE_SIZE, kp->arg); -+} -+ -+__maybe_unused -+static const struct kernel_param_ops apphint_kparam_fops = { -+ .set = apphint_kparam_set, -+ .get = apphint_kparam_get, -+}; -+ -+/* -+ * call module_param_cb() for all AppHints listed in APPHINT_LIST_MODPARAM_COMMON + APPHINT_LIST_MODPARAM -+ * apphint_modparam_class_ ## resolves to apphint_modparam_enable() except for -+ * AppHint classes that have been disabled. -+ */ -+ -+#define apphint_modparam_enable(name, number, perm) \ -+ module_param_cb(name, &apphint_kparam_fops, &apphint.val[number], perm); -+ -+#define X(a, b, c, d, e, f) \ -+ apphint_modparam_class_ ##c(a, APPHINT_ID_ ## a, 0444) -+ APPHINT_LIST_MODPARAM_COMMON -+ APPHINT_LIST_MODPARAM -+#undef X -+ -+/* -+******************************************************************************* -+ Debug Info get (seq file) operations - supporting functions -+******************************************************************************/ -+static void *apphint_di_start(OSDI_IMPL_ENTRY *s, IMG_UINT64 *pos) -+{ -+ if (*pos == 0) { -+ /* We want only one entry in the sequence, one call to show() */ -+ return (void *) 1; -+ } -+ -+ PVR_UNREFERENCED_PARAMETER(s); -+ -+ return NULL; -+} -+ -+static void apphint_di_stop(OSDI_IMPL_ENTRY *s, void *v) -+{ -+ PVR_UNREFERENCED_PARAMETER(s); -+ PVR_UNREFERENCED_PARAMETER(v); -+} -+ -+static void *apphint_di_next(OSDI_IMPL_ENTRY *s, void *v, IMG_UINT64 *pos) -+{ -+ PVR_UNREFERENCED_PARAMETER(s); -+ PVR_UNREFERENCED_PARAMETER(v); -+ (*pos)++; -+ return NULL; -+} -+ -+static int apphint_di_show(OSDI_IMPL_ENTRY *s, void *v) -+{ -+ IMG_CHAR km_buffer[APPHINT_BUFFER_SIZE]; -+ int result; -+ void *private = DIGetPrivData(s); -+ -+ PVR_UNREFERENCED_PARAMETER(v); -+ -+ result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, private); -+ if (result < 0) { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failure", __func__)); -+ } else { -+ /* debuginfo requires a trailing \n, module_params don't */ -+ result += snprintf(km_buffer + result, -+ APPHINT_BUFFER_SIZE - result, -+ "\n"); -+ DIPuts(s, km_buffer); -+ } -+ -+ /* have to return 0 to see output */ -+ return (result < 0) ? result : 0; -+} -+ -+/* -+******************************************************************************* -+ Debug Info supporting functions -+******************************************************************************/ -+ -+/* -+ * apphint_set - Handle a DI value update -+ */ -+static IMG_INT64 apphint_set(const IMG_CHAR *buffer, IMG_UINT64 count, -+ IMG_UINT64 *ppos, void *data) -+{ -+ APPHINT_ID id; -+ union apphint_value value; -+ struct apphint_action *action = data; -+ char km_buffer[APPHINT_BUFFER_SIZE]; -+ int result = 0; -+ -+ if (ppos == NULL) -+ return -EIO; -+ -+ if (count >= APPHINT_BUFFER_SIZE) { -+ PVR_DPF((PVR_DBG_ERROR, "%s: String too long (%" IMG_INT64_FMTSPECd ")", -+ __func__, count)); -+ result = -EINVAL; -+ goto err_exit; -+ } -+ -+ /* apphint_read() modifies the buffer so we need to copy it */ -+ memcpy(km_buffer, buffer, count); -+ /* count is larger than real buffer by 1 because DI framework appends -+ * a '\0' character at the end, but here we're ignoring this */ -+ count -= 1; -+ km_buffer[count] = '\0'; -+ -+ get_apphint_id_from_action_addr(action, &id); -+ result = apphint_read(km_buffer, count, id, &value); -+ if (result >= 0) -+ apphint_action(value, action); -+ -+ *ppos += count; -+err_exit: -+ return result; -+} -+ -+/* -+ * apphint_debuginfo_init - Create the specified debuginfo entries -+ */ -+static int apphint_debuginfo_init(const char *sub_dir, -+ unsigned int device_num, -+ unsigned int init_data_size, -+ const struct apphint_init_data *init_data, -+ DI_GROUP *parentdir, -+ DI_GROUP **rootdir, -+ DI_ENTRY *entry[]) -+{ -+ PVRSRV_ERROR result; -+ unsigned int i; -+ unsigned int device_value_offset = device_num * APPHINT_DEBUGINFO_DEVICE_ID_MAX; -+ const DI_ITERATOR_CB iterator = { -+ .pfnStart = apphint_di_start, .pfnStop = apphint_di_stop, -+ .pfnNext = apphint_di_next, .pfnShow = apphint_di_show, -+ .pfnWrite = apphint_set, .ui32WriteLenMax = APPHINT_BUFFER_SIZE -+ }; -+ /* Determine if we're booted as a GUEST VZ OS */ -+ IMG_BOOL bIsGUEST = PVRSRV_VZ_MODE_IS(GUEST); -+ -+ if (*rootdir) { -+ PVR_DPF((PVR_DBG_WARNING, -+ "AppHint DebugFS already created, skipping")); -+ result = -EEXIST; -+ goto err_exit; -+ } -+ -+ result = DICreateGroup(sub_dir, parentdir, rootdir); -+ if (result != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_WARNING, -+ "Failed to create \"%s\" DebugFS directory.", sub_dir)); -+ goto err_exit; -+ } -+ -+ for (i = 0; i < init_data_size; i++) { -+ if (!class_state[init_data[i].class].enabled) -+ continue; -+ -+ /* Check to see if this AppHint should appear in a GUEST OS. -+ * This will have a value in the init_data[i].guest field of ALWAYS -+ * and if we don't have this set (and we're in GUEST mode) we must -+ * not present this AppHint to the OS. -+ */ -+ if (bIsGUEST && (init_data[i].guest != APPHINT_RT_CLASS_ALWAYS)) -+ continue; -+ -+ result = DICreateEntry(init_data[i].name, -+ *rootdir, -+ &iterator, -+ (void *) &apphint.val[init_data[i].id + device_value_offset], -+ DI_ENTRY_TYPE_GENERIC, -+ &entry[i]); -+ if (result != PVRSRV_OK) { -+ PVR_DPF((PVR_DBG_WARNING, -+ "Failed to create \"%s/%s\" DebugFS entry.", -+ sub_dir, init_data[i].name)); -+ } -+ } -+ -+ return 0; -+ -+err_exit: -+ return result; -+} -+ -+/* -+ * apphint_debuginfo_deinit- destroy the debuginfo entries -+ */ -+static void apphint_debuginfo_deinit(unsigned int num_entries, -+ DI_GROUP **rootdir, -+ DI_ENTRY *entry[]) -+{ -+ unsigned int i; -+ -+ for (i = 0; i < num_entries; i++) { -+ if (entry[i]) { -+ DIDestroyEntry(entry[i]); -+ } -+ } -+ -+ if (*rootdir) { -+ DIDestroyGroup(*rootdir); -+ *rootdir = NULL; -+ } -+} -+ -+/* -+******************************************************************************* -+ AppHint status dump implementation -+******************************************************************************/ -+#if defined(PDUMP) -+static void apphint_pdump_values(void *pvDeviceNode, -+ const IMG_CHAR *format, ...) -+{ -+ char km_buffer[APPHINT_BUFFER_SIZE]; -+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS; -+ va_list ap; -+ -+ va_start(ap, format); -+ (void)vsnprintf(km_buffer, APPHINT_BUFFER_SIZE, format, ap); -+ va_end(ap); -+ -+ /* ui32CommentSize set to 0 here as function does not make use of the value. */ -+ PDumpCommentKM(NULL, (PVRSRV_DEVICE_NODE*)pvDeviceNode, 0, km_buffer, ui32Flags); -+} -+#endif -+ -+static IMG_BOOL is_apphint_value_equal(const APPHINT_DATA_TYPE data_type, -+ const union apphint_value * const left, -+ const union apphint_value * const right) -+{ -+ switch (data_type) { -+ case APPHINT_DATA_TYPE_UINT64: -+ return left->UINT64 == right->UINT64; -+ case APPHINT_DATA_TYPE_UINT32: -+ case APPHINT_DATA_TYPE_UINT32List: -+ case APPHINT_DATA_TYPE_UINT32Bitfield: -+ return left->UINT32 == right->UINT32; -+ case APPHINT_DATA_TYPE_BOOL: -+ return left->BOOL == right->BOOL; -+ case APPHINT_DATA_TYPE_STRING: -+ return (OSStringNCompare(left->STRING, right->STRING, OSStringLength(right->STRING) + 1) == 0 ? IMG_TRUE : IMG_FALSE); -+ default: -+ PVR_DPF((PVR_DBG_WARNING, "%s: unhandled data type (%d)", __func__, data_type)); -+ return IMG_FALSE; -+ } -+} -+ -+static void apphint_dump_values(const char *group_name, -+ int device_num, -+ const struct apphint_init_data *group_data, -+ int group_size, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ bool list_all, -+ PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ int i, result; -+ int device_value_offset = device_num * APPHINT_DEBUGINFO_DEVICE_ID_MAX; -+ char km_buffer[APPHINT_BUFFER_SIZE]; -+ char count = 0; -+ -+ PVR_DUMPDEBUG_LOG(" %s", group_name); -+ for (i = 0; i < group_size; i++) -+ { -+ IMG_UINT32 id = group_data[i].id; -+ APPHINT_DATA_TYPE data_type = param_lookup[id].data_type; -+ const struct apphint_action *action = &apphint.val[id + device_value_offset]; -+ union apphint_value value; -+ -+ result = get_apphint_value_from_action(action, &value, psDevNode); -+ -+ if (PVRSRV_OK != result) { -+ continue; -+ } -+ -+ /* List only apphints with non-default values */ -+ if (!list_all && -+ is_apphint_value_equal(data_type, &value, &group_data[i].default_value)) { -+ continue; -+ } -+ -+ result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, action); -+ count++; -+ -+ if (result <= 0) { -+ PVR_DUMPDEBUG_LOG(" %s: ", -+ group_data[i].name); -+ } else { -+ PVR_DUMPDEBUG_LOG(" %s: %s", -+ group_data[i].name, km_buffer); -+ } -+ } -+ -+ if (count == 0) { -+ PVR_DUMPDEBUG_LOG(" none"); -+ } -+} -+ -+/* -+ * Callback for debug dump -+ */ -+static void apphint_dump_state(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ int i, result; -+ char description_buffer[50]; -+ PVRSRV_DEVICE_NODE *device = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; -+ -+ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) { -+ PVR_DUMPDEBUG_LOG("------[ AppHint Settings ]------"); -+ -+ apphint_dump_values("Build Vars", 0, -+ init_data_buildvar, ARRAY_SIZE(init_data_buildvar), -+ pfnDumpDebugPrintf, pvDumpDebugFile, true, device); -+ -+ apphint_dump_values("Module Params", 0, -+ init_data_modparam, ARRAY_SIZE(init_data_modparam), -+ pfnDumpDebugPrintf, pvDumpDebugFile, false, device); -+ -+ apphint_dump_values("Debug Info Params", 0, -+ init_data_debuginfo, ARRAY_SIZE(init_data_debuginfo), -+ pfnDumpDebugPrintf, pvDumpDebugFile, false, device); -+ -+ for (i = 0; i < PVRSRV_MAX_DEVICES; i++) { -+ if (!apphint.devices[i] -+ || (device && device != apphint.devices[i])) -+ continue; -+ -+ result = snprintf(description_buffer, -+ sizeof(description_buffer), -+ "Debug Info Params Device ID: %d", -+ i); -+ if (0 > result) -+ continue; -+ -+ apphint_dump_values(description_buffer, i, -+ init_data_debuginfo_device, -+ ARRAY_SIZE(init_data_debuginfo_device), -+ pfnDumpDebugPrintf, -+ pvDumpDebugFile, -+ false, device); -+ } -+ } -+} -+ -+/* -+******************************************************************************* -+ Public interface -+******************************************************************************/ -+int pvr_apphint_init(void) -+{ -+ int result, i; -+ -+ if (apphint.initialized) { -+ result = -EEXIST; -+ goto err_out; -+ } -+ -+ for (i = 0; i < PVRSRV_MAX_DEVICES; i++) -+ apphint.devices[i] = NULL; -+ -+ /* create workqueue with strict execution ordering to ensure no -+ * race conditions when setting/updating apphints from different -+ * contexts -+ */ -+ apphint.workqueue = alloc_workqueue("apphint_workqueue", -+ WQ_UNBOUND | WQ_FREEZABLE, 1); -+ if (!apphint.workqueue) { -+ result = -ENOMEM; -+ goto err_out; -+ } -+ -+ result = apphint_debuginfo_init("apphint", 0, -+ ARRAY_SIZE(init_data_debuginfo), init_data_debuginfo, -+ NULL, -+ &apphint.debuginfo_rootdir, apphint.debuginfo_entry); -+ if (0 != result) -+ goto err_out; -+ -+ result = apphint_debuginfo_init("buildvar", 0, -+ ARRAY_SIZE(init_data_buildvar), init_data_buildvar, -+ NULL, -+ &apphint.buildvar_rootdir, apphint.buildvar_entry); -+ -+ apphint.initialized = 1; -+ -+err_out: -+ return result; -+} -+ -+int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device) -+{ -+ int result, i; -+ unsigned int device_value_offset; -+ -+ if (!apphint.initialized) { -+ result = -EAGAIN; -+ goto err_out; -+ } -+ -+ if (apphint.num_devices+1 > PVRSRV_MAX_DEVICES) { -+ result = -EMFILE; -+ goto err_out; -+ } -+ -+ /* Set the default values for the new device */ -+ device_value_offset = apphint.num_devices * APPHINT_DEBUGINFO_DEVICE_ID_MAX; -+ for (i = 0; i < APPHINT_DEBUGINFO_DEVICE_ID_MAX; i++) { -+ apphint.val[init_data_debuginfo_device[i].id + device_value_offset].stored -+ = init_data_debuginfo_device[i].default_value; -+ } -+ -+ /* Set value of an apphint if mapping to module param exists for it -+ * and this module parameter has been initialised */ -+ for (i = 0; i < ARRAY_SIZE(init_data_debuginfo_device_to_modparams); i++) { -+ const struct apphint_init_data_mapping *mapping = -+ &init_data_debuginfo_device_to_modparams[i]; -+ const struct apphint_action *modparam_action = -+ &apphint.val[mapping->modparam_apphint_id]; -+ struct apphint_action *device_action = -+ &apphint.val[mapping->device_apphint_id + device_value_offset]; -+ -+ /* Set only if the module parameter was explicitly set during the module -+ * load. */ -+ if (modparam_action->initialised) { -+ device_action->stored = modparam_action->stored; -+ } -+ } -+ -+ result = apphint_debuginfo_init("apphint", device->sDevId.ui32InternalID, -+ ARRAY_SIZE(init_data_debuginfo_device), -+ init_data_debuginfo_device, -+ device->sDebugInfo.psGroup, -+ &apphint.debuginfo_device_rootdir[device->sDevId.ui32InternalID], -+ apphint.debuginfo_device_entry[device->sDevId.ui32InternalID]); -+ if (0 != result) -+ goto err_out; -+ -+ apphint.devices[device->sDevId.ui32InternalID] = device; -+ apphint.num_devices++; -+ -+ (void)SOPvrDbgRequestNotifyRegister( -+ &device->hAppHintDbgReqNotify, -+ device, -+ apphint_dump_state, -+ DEBUG_REQUEST_APPHINT, -+ device); -+ -+err_out: -+ return result; -+} -+ -+void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device) -+{ -+ int i; -+ -+ if (!apphint.initialized) -+ return; -+ -+ /* find the device */ -+ for (i = 0; i < PVRSRV_MAX_DEVICES; i++) { -+ if (apphint.devices[i] == device) -+ break; -+ } -+ -+ if (PVRSRV_MAX_DEVICES == i) -+ return; -+ -+ if (device->hAppHintDbgReqNotify) { -+ (void)SOPvrDbgRequestNotifyUnregister( -+ device->hAppHintDbgReqNotify); -+ device->hAppHintDbgReqNotify = NULL; -+ } -+ -+ apphint_debuginfo_deinit(APPHINT_DEBUGINFO_DEVICE_ID_MAX, -+ &apphint.debuginfo_device_rootdir[i], -+ apphint.debuginfo_device_entry[i]); -+ -+ apphint.devices[i] = NULL; -+ -+ WARN_ON(apphint.num_devices==0); -+ apphint.num_devices--; -+} -+ -+void pvr_apphint_deinit(void) -+{ -+ int i; -+ -+ if (!apphint.initialized) -+ return; -+ -+ /* remove any remaining device data */ -+ for (i = 0; apphint.num_devices && i < PVRSRV_MAX_DEVICES; i++) { -+ if (apphint.devices[i]) -+ pvr_apphint_device_unregister(apphint.devices[i]); -+ } -+ -+ /* free all alloc'd string apphints and set to NULL */ -+ for (i = 0; i < ARRAY_SIZE(apphint.val); i++) { -+ if (apphint.val[i].free && apphint.val[i].stored.STRING) { -+ kfree(apphint.val[i].stored.STRING); -+ apphint.val[i].stored.STRING = NULL; -+ apphint.val[i].free = false; -+ } -+ } -+ -+ apphint_debuginfo_deinit(APPHINT_DEBUGINFO_ID_MAX, -+ &apphint.debuginfo_rootdir, apphint.debuginfo_entry); -+ apphint_debuginfo_deinit(APPHINT_BUILDVAR_ID_MAX, -+ &apphint.buildvar_rootdir, apphint.buildvar_entry); -+ -+ destroy_workqueue(apphint.workqueue); -+ -+ apphint.initialized = 0; -+} -+ -+void pvr_apphint_dump_state(PVRSRV_DEVICE_NODE *device) -+{ -+#if defined(PDUMP) -+ /* NB. apphint_pdump_values() is the pfnDumpDebugPrintf -+ * function used when PDUMP is defined. -+ * apphintpdump_values() calls PDumpCommentKM(), which -+ * requires the device but as it is only called as a -+ * DUMPDEBUG_PRINTF_FUNC it is only passed pvDumpDebugFile -+ * (which happens to be the 4th parameter in the call to -+ * apphint_dump_state() below). -+ * Hence, we also need to pass device in the 4th parameter. -+ */ -+ apphint_dump_state(device, DEBUG_REQUEST_VERBOSITY_HIGH, -+ apphint_pdump_values, device); -+#endif -+ apphint_dump_state(device, DEBUG_REQUEST_VERBOSITY_HIGH, -+ NULL, NULL); -+} -+ -+ -+int pvr_apphint_get_uint64(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT64 *pVal) -+{ -+ int error = -ERANGE; -+ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; -+ -+ if (ue < APPHINT_ID_MAX) { -+ if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints -+ { -+ *pVal = apphint.val[ue + device_offset].stored.UINT64; -+ error = 0; -+ } -+ else -+ { -+ *pVal = apphint.val[ue].stored.UINT64; -+ error = 0; -+ } -+ } -+ return error; -+} -+ -+int pvr_apphint_get_uint32(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT32 *pVal) -+{ -+ int error = -ERANGE; -+ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; -+ -+ if (ue < APPHINT_ID_MAX) { -+ if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints -+ { -+ *pVal = apphint.val[ue + device_offset].stored.UINT32; -+ error = 0; -+ } -+ else -+ { -+ *pVal = apphint.val[ue].stored.UINT32; -+ error = 0; -+ } -+ } -+ return error; -+} -+ -+int pvr_apphint_get_bool(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_BOOL *pVal) -+{ -+ int error = -ERANGE; -+ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; -+ -+ if (ue < APPHINT_ID_MAX) { -+ if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints -+ { -+ *pVal = apphint.val[ue + device_offset].stored.BOOL; -+ error = 0; -+ } -+ else -+ { -+ *pVal = apphint.val[ue].stored.BOOL; -+ error = 0; -+ } -+ } -+ return error; -+} -+ -+int pvr_apphint_get_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size) -+{ -+ int error = -ERANGE; -+ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; -+ -+ if (ue < APPHINT_ID_MAX && apphint.val[ue].stored.STRING) { -+ if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints -+ { -+ if (OSStringLCopy(pBuffer, apphint.val[ue + device_offset].stored.STRING, size) < size) { -+ error = 0; -+ } -+ } -+ else -+ { -+ if (OSStringLCopy(pBuffer, apphint.val[ue].stored.STRING, size) < size) { -+ error = 0; -+ } -+ } -+ } -+ return error; -+} -+ -+int pvr_apphint_set_uint64(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT64 Val) -+{ -+ int error = -ERANGE; -+ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; -+ -+ if ((ue < APPHINT_ID_MAX) && -+ (param_lookup[ue].data_type == APPHINT_DATA_TYPE_UINT64)) { -+ -+ if (apphint.val[ue + device_offset].set.UINT64) { -+ apphint.val[ue + device_offset].set.UINT64(apphint.val[ue + device_offset].device, -+ apphint.val[ue + device_offset].private_data, -+ Val); -+ } else { -+ apphint.val[ue + device_offset].stored.UINT64 = Val; -+ } -+ apphint.val[ue].device = device; -+ error = 0; -+ } -+ -+ return error; -+} -+ -+int pvr_apphint_set_uint32(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT32 Val) -+{ -+ int error = -ERANGE; -+ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; -+ -+ if ((ue < APPHINT_ID_MAX) && -+ (param_lookup[ue].data_type == APPHINT_DATA_TYPE_UINT32)) { -+ -+ if (apphint.val[ue + device_offset].set.UINT32) { -+ apphint.val[ue + device_offset].set.UINT32(apphint.val[ue + device_offset].device, -+ apphint.val[ue + device_offset].private_data, -+ Val); -+ } else { -+ apphint.val[ue + device_offset].stored.UINT32 = Val; -+ } -+ apphint.val[ue].device = device; -+ error = 0; -+ } -+ -+ return error; -+} -+ -+int pvr_apphint_set_bool(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_BOOL Val) -+{ -+ int error = -ERANGE; -+ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; -+ -+ if ((ue < APPHINT_ID_MAX) && -+ (param_lookup[ue].data_type == APPHINT_DATA_TYPE_BOOL)) { -+ -+ error = 0; -+ if (apphint.val[ue + device_offset].set.BOOL) { -+ apphint.val[ue + device_offset].set.BOOL(apphint.val[ue + device_offset].device, -+ apphint.val[ue + device_offset].private_data, -+ Val); -+ } else { -+ apphint.val[ue + device_offset].stored.BOOL = Val; -+ } -+ apphint.val[ue].device = device; -+ } -+ -+ return error; -+} -+ -+int pvr_apphint_set_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size) -+{ -+ int error = -ERANGE; -+ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; -+ -+ if ((ue < APPHINT_ID_MAX) && -+ ((param_lookup[ue].data_type == APPHINT_DATA_TYPE_STRING) && -+ apphint.val[ue + device_offset].stored.STRING)) { -+ -+ if (apphint.val[ue + device_offset].set.STRING) { -+ error = apphint.val[ue + device_offset].set.STRING(apphint.val[ue + device_offset].device, -+ apphint.val[ue + device_offset].private_data, -+ pBuffer); -+ } else { -+ if (strlcpy(apphint.val[ue + device_offset].stored.STRING, pBuffer, size) < size) { -+ error = 0; -+ } -+ } -+ apphint.val[ue].device = device; -+ } -+ -+ return error; -+} -+ -+void pvr_apphint_register_handlers_uint64(APPHINT_ID id, -+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value), -+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value), -+ const PVRSRV_DEVICE_NODE *device, -+ const void *private_data) -+{ -+ int device_value_offset; -+ -+ PVR_DPF((APPHINT_DPF_LEVEL, "%s(%d, %p, %p, %p, %p)", -+ __func__, id, query, set, device, private_data)); -+ -+ if (id >= APPHINT_ID_MAX) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: AppHint ID (%d) is out of range, max (%d)", -+ __func__, id, APPHINT_ID_MAX-1)); -+ return; -+ } -+ -+ get_value_offset_from_device(device, &device_value_offset, id); -+ -+ switch (param_lookup[id].data_type) { -+ case APPHINT_DATA_TYPE_UINT64: -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Does not match AppHint data type for ID (%d)", -+ __func__, id)); -+ return; -+ } -+ -+ apphint.val[id + device_value_offset] = (struct apphint_action){ -+ .query.UINT64 = query, -+ .set.UINT64 = set, -+ .device = device, -+ .private_data = private_data, -+ .stored = apphint.val[id + device_value_offset].stored -+ }; -+} -+ -+void pvr_apphint_register_handlers_uint32(APPHINT_ID id, -+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value), -+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value), -+ const PVRSRV_DEVICE_NODE *device, -+ const void *private_data) -+{ -+ int device_value_offset; -+ -+ PVR_DPF((APPHINT_DPF_LEVEL, "%s(%d, %p, %p, %p, %p)", -+ __func__, id, query, set, device, private_data)); -+ -+ if (id >= APPHINT_ID_MAX) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: AppHint ID (%d) is out of range, max (%d)", -+ __func__, id, APPHINT_ID_MAX-1)); -+ return; -+ } -+ -+ get_value_offset_from_device(device, &device_value_offset, id); -+ -+ switch (param_lookup[id].data_type) { -+ case APPHINT_DATA_TYPE_UINT32: -+ case APPHINT_DATA_TYPE_UINT32Bitfield: -+ case APPHINT_DATA_TYPE_UINT32List: -+ break; -+ -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Does not match AppHint data type for ID (%d)", -+ __func__, id)); -+ return; -+ } -+ -+ apphint.val[id + device_value_offset] = (struct apphint_action){ -+ .query.UINT32 = query, -+ .set.UINT32 = set, -+ .device = device, -+ .private_data = private_data, -+ .stored = apphint.val[id + device_value_offset].stored -+ }; -+} -+ -+void pvr_apphint_register_handlers_bool(APPHINT_ID id, -+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value), -+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value), -+ const PVRSRV_DEVICE_NODE *device, -+ const void *private_data) -+{ -+ int device_value_offset; -+ -+ PVR_DPF((APPHINT_DPF_LEVEL, "%s(%d, %p, %p, %p, %p)", -+ __func__, id, query, set, device, private_data)); -+ -+ if (id >= APPHINT_ID_MAX) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: AppHint ID (%d) is out of range, max (%d)", -+ __func__, id, APPHINT_ID_MAX-1)); -+ return; -+ } -+ -+ get_value_offset_from_device(device, &device_value_offset, id); -+ -+ switch (param_lookup[id].data_type) { -+ case APPHINT_DATA_TYPE_BOOL: -+ break; -+ -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Does not match AppHint data type for ID (%d)", -+ __func__, id)); -+ return; -+ } -+ -+ apphint.val[id + device_value_offset] = (struct apphint_action){ -+ .query.BOOL = query, -+ .set.BOOL = set, -+ .device = device, -+ .private_data = private_data, -+ .stored = apphint.val[id + device_value_offset].stored -+ }; -+} -+ -+void pvr_apphint_register_handlers_string(APPHINT_ID id, -+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value), -+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value), -+ const PVRSRV_DEVICE_NODE *device, -+ const void *private_data) -+{ -+ int device_value_offset; -+ -+ PVR_DPF((APPHINT_DPF_LEVEL, "%s(%d, %p, %p, %p, %p)", -+ __func__, id, query, set, device, private_data)); -+ -+ if (id >= APPHINT_ID_MAX) { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: AppHint ID (%d) is out of range, max (%d)", -+ __func__, id, APPHINT_ID_MAX-1)); -+ return; -+ } -+ -+ get_value_offset_from_device(device, &device_value_offset, id); -+ -+ switch (param_lookup[id].data_type) { -+ case APPHINT_DATA_TYPE_STRING: -+ break; -+ -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Does not match AppHint data type for ID (%d)", -+ __func__, id)); -+ return; -+ } -+ -+ apphint.val[id + device_value_offset] = (struct apphint_action){ -+ .query.STRING = query, -+ .set.STRING = set, -+ .device = device, -+ .private_data = private_data, -+ .stored = apphint.val[id + device_value_offset].stored -+ }; -+} -+ -+/* EOF */ -diff --git a/drivers/gpu/drm/img-rogue/km_apphint.h b/drivers/gpu/drm/img-rogue/km_apphint.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/km_apphint.h -@@ -0,0 +1,99 @@ -+/*************************************************************************/ /*! -+@File km_apphint.h -+@Title Apphint internal header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Linux kernel AppHint control -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef KM_APPHINT_H -+#define KM_APPHINT_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#include "pvrsrv_apphint.h" -+#include "km_apphint_defs.h" -+#include "device.h" -+ -+int pvr_apphint_init(void); -+void pvr_apphint_deinit(void); -+int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device); -+void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device); -+void pvr_apphint_dump_state(PVRSRV_DEVICE_NODE *device); -+ -+int pvr_apphint_get_uint64(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT64 *pVal); -+int pvr_apphint_get_uint32(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT32 *pVal); -+int pvr_apphint_get_bool(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_BOOL *pVal); -+int pvr_apphint_get_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size); -+ -+int pvr_apphint_set_uint64(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT64 Val); -+int pvr_apphint_set_uint32(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT32 Val); -+int pvr_apphint_set_bool(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_BOOL Val); -+int pvr_apphint_set_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size); -+ -+void pvr_apphint_register_handlers_uint64(APPHINT_ID id, -+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value), -+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value), -+ const PVRSRV_DEVICE_NODE *device, -+ const void * private_data); -+void pvr_apphint_register_handlers_uint32(APPHINT_ID id, -+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value), -+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value), -+ const PVRSRV_DEVICE_NODE *device, -+ const void *private_data); -+void pvr_apphint_register_handlers_bool(APPHINT_ID id, -+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value), -+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value), -+ const PVRSRV_DEVICE_NODE *device, -+ const void *private_data); -+void pvr_apphint_register_handlers_string(APPHINT_ID id, -+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value), -+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value), -+ const PVRSRV_DEVICE_NODE *device, -+ const void *private_data); -+ -+#if defined(__cplusplus) -+} -+#endif -+#endif /* KM_APPHINT_H */ -+ -+/****************************************************************************** -+ End of file (km_apphint.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/km_apphint_defs.h b/drivers/gpu/drm/img-rogue/km_apphint_defs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/km_apphint_defs.h -@@ -0,0 +1,162 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services AppHint definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include "km_apphint_defs_common.h" -+ -+#ifndef KM_APPHINT_DEFS_H -+#define KM_APPHINT_DEFS_H -+ -+/* NB: The 'DEVICE' AppHints must be last in this list as they will be -+ * duplicated in the case of a driver supporting multiple devices -+ */ -+#define APPHINT_LIST_ALL \ -+ APPHINT_LIST_BUILDVAR_COMMON \ -+ APPHINT_LIST_BUILDVAR \ -+ APPHINT_LIST_MODPARAM_COMMON \ -+ APPHINT_LIST_MODPARAM \ -+ APPHINT_LIST_DEBUGINFO_COMMON \ -+ APPHINT_LIST_DEBUGINFO \ -+ APPHINT_LIST_DEBUGINFO_DEVICE_COMMON \ -+ APPHINT_LIST_DEBUGINFO_DEVICE -+ -+ -+/* -+******************************************************************************* -+ Build variables (rogue-specific) -+ All of these should be configurable only through the 'default' value -+******************************************************************************/ -+#define APPHINT_LIST_BUILDVAR -+ -+/* -+******************************************************************************* -+ Module parameters (rogue-specific) -+******************************************************************************/ -+#define APPHINT_LIST_MODPARAM \ -+/* name, type, class, default, helper, guest, */ \ -+X(EnableCDMKillingRandMode, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(HWPerfDisableCustomCounterFilter, BOOL, VALIDATION, PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER, NO_PARAM_TABLE, ALWAYS ) \ -+X(ValidateSOCUSCTimer, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATESOCUSCTIMERS, NO_PARAM_TABLE, ALWAYS ) \ -+X(ECCRAMErrInj, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(TFBCCompressionControlGroup, UINT32, VALIDATION, PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP, NO_PARAM_TABLE, ALWAYS ) \ -+X(TFBCCompressionControlScheme, UINT32, VALIDATION, PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME, NO_PARAM_TABLE, ALWAYS ) \ -+X(TFBCCompressionControlYUVFormat, BOOL, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ -+X(TFBCCompressionControlLossyMinChannel, BOOL, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ -+X(TFBCVersionDowngrade, UINT32, ALWAYS, PVRSRV_APPHINT_TFBCVERSION, NO_PARAM_TABLE, ALWAYS ) \ -+ -+/* -+******************************************************************************* -+ Debugfs parameters (rogue-specific) - driver configuration -+******************************************************************************/ -+#define APPHINT_LIST_DEBUGINFO \ -+/* name, type, class, default, helper, guest, */ \ -+ -+/* -+******************************************************************************* -+ Debugfs parameters (rogue-specific) - device configuration -+******************************************************************************/ -+#define APPHINT_LIST_DEBUGINFO_DEVICE \ -+/* name, type, class, default, helper, guest, */ \ -+ -+/* -+******************************************************************************* -+ Mapping between debugfs parameters and module parameters. -+ This mapping is used to initialise device specific apphints from module -+ parameters. Each entry in this table will provide a default value to all -+ devices (i.e. if there is more than one device each device's value will -+ be initialised). -+******************************************************************************/ -+#define APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT \ -+/* debuginfo device apphint name modparam name */ -+ -+/* -+******************************************************************************* -+ -+ Table generated enums -+ -+******************************************************************************/ -+/* Unique ID for all AppHints */ -+typedef enum { -+#define X(a, b, c, d, e, f) APPHINT_ID_ ## a, -+ APPHINT_LIST_ALL -+#undef X -+ APPHINT_ID_MAX -+} APPHINT_ID; -+ -+/* ID for build variable Apphints - used for build variable only structures */ -+typedef enum { -+#define X(a, b, c, d, e, f) APPHINT_BUILDVAR_ID_ ## a, -+ APPHINT_LIST_BUILDVAR_COMMON -+ APPHINT_LIST_BUILDVAR -+#undef X -+ APPHINT_BUILDVAR_ID_MAX -+} APPHINT_BUILDVAR_ID; -+ -+/* ID for Modparam Apphints - used for modparam only structures */ -+typedef enum { -+#define X(a, b, c, d, e, f) APPHINT_MODPARAM_ID_ ## a, -+ APPHINT_LIST_MODPARAM_COMMON -+ APPHINT_LIST_MODPARAM -+#undef X -+ APPHINT_MODPARAM_ID_MAX -+} APPHINT_MODPARAM_ID; -+ -+/* ID for Debugfs Apphints - used for debugfs only structures */ -+typedef enum { -+#define X(a, b, c, d, e, f) APPHINT_DEBUGINFO_ID_ ## a, -+ APPHINT_LIST_DEBUGINFO_COMMON -+ APPHINT_LIST_DEBUGINFO -+#undef X -+ APPHINT_DEBUGINFO_ID_MAX -+} APPHINT_DEBUGINFO_ID; -+ -+/* ID for Debugfs Device Apphints - used for debugfs device only structures */ -+typedef enum { -+#define X(a, b, c, d, e, f) APPHINT_DEBUGINFO_DEVICE_ID_ ## a, -+ APPHINT_LIST_DEBUGINFO_DEVICE_COMMON -+ APPHINT_LIST_DEBUGINFO_DEVICE -+#undef X -+ APPHINT_DEBUGINFO_DEVICE_ID_MAX -+} APPHINT_DEBUGINFO_DEVICE_ID; -+ -+#endif /* KM_APPHINT_DEFS_H */ -diff --git a/drivers/gpu/drm/img-rogue/km_apphint_defs_common.h b/drivers/gpu/drm/img-rogue/km_apphint_defs_common.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/km_apphint_defs_common.h -@@ -0,0 +1,304 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services AppHint definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+ -+#ifndef KM_APPHINT_DEFS_COMMON_H -+#define KM_APPHINT_DEFS_COMMON_H -+ -+/* -+******************************************************************************* -+ Build variables -+ All of these should be configurable only through the 'default' value -+******************************************************************************/ -+#define APPHINT_LIST_BUILDVAR_COMMON \ -+/* name, type, class, default, helper, guest, */ \ -+X(EnableTrustedDeviceAceConfig, BOOL, GPUVIRT_VAL, PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG, NO_PARAM_TABLE, ALWAYS ) \ -+X(CleanupThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_CLEANUPTHREADPRIORITY, NO_PARAM_TABLE, ALWAYS ) \ -+X(WatchdogThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY, NO_PARAM_TABLE, ALWAYS ) \ -+X(HWPerfClientBufferSize, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE, NO_PARAM_TABLE, ALWAYS ) \ -+X(DevmemHistoryBufSizeLog2, UINT32, ALWAYS, PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2, NO_PARAM_TABLE, ALWAYS ) \ -+X(DevmemHistoryMaxEntries, UINT32, ALWAYS, PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES, NO_PARAM_TABLE, ALWAYS ) -+ -+/* -+******************************************************************************* -+ Module parameters -+******************************************************************************/ -+#define APPHINT_LIST_MODPARAM_COMMON \ -+/* name, type, class, default, helper, guest, */ \ -+X(GeneralNon4KHeapPageSize, UINT32, ALWAYS, PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(EnableSignatureChecks, BOOL, PDUMP, PVRSRV_APPHINT_ENABLESIGNATURECHECKS, NO_PARAM_TABLE, ALWAYS ) \ -+X(SignatureChecksBufSize, UINT32, PDUMP, PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(DisableClockGating, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLECLOCKGATING, NO_PARAM_TABLE, ALWAYS ) \ -+X(DisableDMOverlap, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEDMOVERLAP, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(EnableRandomContextSwitch, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH, NO_PARAM_TABLE, ALWAYS ) \ -+X(EnableSoftResetContextSwitch, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLESOFTRESETCONTEXTSWITCH, NO_PARAM_TABLE, ALWAYS ) \ -+X(EnableFWContextSwitch, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH, NO_PARAM_TABLE, ALWAYS ) \ -+X(FWContextSwitchProfile, UINT32, VALIDATION, PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(EnableRDPowerIsland, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLERDPOWERISLAND, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(DriverMode, UINT32, ALWAYS, PVRSRV_APPHINT_DRIVERMODE, NO_PARAM_TABLE, ALWAYS ) \ -+X(AutoVzGPUPowerdown, BOOL, ALWAYS, 0, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(FirmwarePerf, UINT32, VALIDATION, PVRSRV_APPHINT_FIRMWAREPERF, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(HWPerfFWBufSizeInKB, UINT32, PDUMP, PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB, NO_PARAM_TABLE, ALWAYS ) \ -+X(HWPerfHostBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB, NO_PARAM_TABLE, ALWAYS ) \ -+X(HWPerfHostThreadTimeoutInMS, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(JonesDisableMask, UINT32, VALIDATION, PVRSRV_APPHINT_JONESDISABLEMASK, NO_PARAM_TABLE, ALWAYS ) \ -+X(NewFilteringMode, BOOL, VALIDATION, PVRSRV_APPHINT_NEWFILTERINGMODE, NO_PARAM_TABLE, ALWAYS ) \ -+X(TruncateMode, UINT32, VALIDATION, PVRSRV_APPHINT_TRUNCATEMODE, NO_PARAM_TABLE, ALWAYS ) \ -+X(EmuMaxFreq, UINT32, ALWAYS, PVRSRV_APPHINT_EMUMAXFREQ, NO_PARAM_TABLE, ALWAYS ) \ -+X(GPIOValidationMode, UINT32, VALIDATION, PVRSRV_APPHINT_GPIOVALIDATIONMODE, NO_PARAM_TABLE, ALWAYS ) \ -+X(RGXBVNC, STRING, ALWAYS, PVRSRV_APPHINT_RGXBVNC, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(FWContextSwitchCrossDM, UINT32, ALWAYS, 0, NO_PARAM_TABLE, ALWAYS ) \ -+X(ValidateIrq, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATEIRQ, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(TPUTrilinearFracMaskPDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \ -+X(TPUTrilinearFracMaskVDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \ -+X(TPUTrilinearFracMaskCDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \ -+X(TPUTrilinearFracMaskTDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \ -+X(TPUTrilinearFracMaskRDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \ -+X(HTBufferSizeInKB, UINT32, ALWAYS, PVRSRV_APPHINT_HTBUFFERSIZE, NO_PARAM_TABLE, ALWAYS ) \ -+X(FWTraceBufSizeInDWords, UINT32, ALWAYS, PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(EnablePageFaultDebug, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG, NO_PARAM_TABLE, ALWAYS ) \ -+X(EnableFullSyncTracking, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING, NO_PARAM_TABLE, ALWAYS ) \ -+X(IgnoreHWReportedBVNC, BOOL, ALWAYS, PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(PhysMemTestPasses, UINT32, ALWAYS, PVRSRV_APPHINT_PHYSMEMTESTPASSES, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(FBCDCVersionOverride, UINT32, VALIDATION, PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE, NO_PARAM_TABLE, ALWAYS ) \ -+X(TestSLRInterval, UINT32, VALIDATION, PVRSRV_APPHINT_TESTSLRINTERVAL, NO_PARAM_TABLE, ALWAYS ) \ -+X(EnablePollOnChecksumErrorStatus, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ -+X(RiscvDmiTest, BOOL, VALIDATION, PVRSRV_APPHINT_RISCVDMITEST, NO_PARAM_TABLE, ALWAYS ) \ -+X(DevMemFWHeapPolicy, UINT32, ALWAYS, PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(EnableAPMAll, UINT32, VALIDATION, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE, ALWAYS ) \ -+X(KernelCCBSizeLog2, UINT32, VALIDATION, PVRSRV_APPHINT_KCCB_SIZE_LOG2, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(SyncCheckpointPoolMaxLog2, UINT32, ALWAYS, PVRSRV_APPHINT_CHECKPOINTPOOLMAXLOG2, NO_PARAM_TABLE, ALWAYS ) \ -+X(SyncCheckpointPoolInitLog2, UINT32, ALWAYS, PVRSRV_APPHINT_CHECKPOINTPOOLINITLOG2, NO_PARAM_TABLE, ALWAYS ) \ -+X(PhysHeapMinMemOnConnection, UINT32, ALWAYS, PVRSRV_APPHINT_PHYSHEAPMINMEMONCONNECTION, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(RestrictGpuLocalPhysHeapSizeMB, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \ -+X(PhysHeapHybridDefault2CpuLocal, BOOL, ALWAYS, 0, NO_PARAM_TABLE, ALWAYS ) \ -+\ -+X(DebugDumpFWTLogType, UINT32, ALWAYS, PVRSRV_APPHINT_DEBUGDUMPFWTLOGTYPE, NO_PARAM_TABLE, ALWAYS ) -+ -+ -+/* -+******************************************************************************* -+ Debugfs parameters - driver configuration -+******************************************************************************/ -+#define APPHINT_LIST_DEBUGINFO_COMMON \ -+/* name, type, class, default, helper, guest, */ \ -+X(EnableHTBLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLEHTBLOGGROUP, htb_loggroup_tbl, ALWAYS ) \ -+X(HTBOperationMode, UINT32List, ALWAYS, PVRSRV_APPHINT_HTBOPERATIONMODE, htb_opmode_tbl, ALWAYS ) \ -+X(EnableFTraceGPU, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFTRACEGPU, NO_PARAM_TABLE, NEVER ) \ -+X(HWPerfClientFilter_Services, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES, NO_PARAM_TABLE, ALWAYS ) \ -+X(HWPerfClientFilter_EGL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL, NO_PARAM_TABLE, ALWAYS ) \ -+X(HWPerfClientFilter_OpenGLES, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES, NO_PARAM_TABLE, ALWAYS ) \ -+X(HWPerfClientFilter_OpenCL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL, NO_PARAM_TABLE, ALWAYS ) \ -+X(HWPerfClientFilter_Vulkan, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN, NO_PARAM_TABLE, ALWAYS ) \ -+X(HWPerfClientFilter_OpenGL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGL, NO_PARAM_TABLE, ALWAYS ) \ -+X(CacheOpConfig, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPCONFIG, NO_PARAM_TABLE, ALWAYS ) \ -+X(CacheOpUMKMThresholdSize, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE, NO_PARAM_TABLE, ALWAYS ) \ -+ -+/* -+******************************************************************************* -+ Debugfs parameters - device configuration -+******************************************************************************/ -+#define APPHINT_LIST_DEBUGINFO_DEVICE_COMMON \ -+/* name, type, class, default, helper, guest, */ \ -+/* Device Firmware config */\ -+X(AssertOnHWRTrigger, BOOL, ALWAYS, APPHNT_BLDVAR_ASSERTONHWRTRIGGER, NO_PARAM_TABLE, ALWAYS ) \ -+X(AssertOutOfMemory, BOOL, ALWAYS, PVRSRV_APPHINT_ASSERTOUTOFMEMORY, NO_PARAM_TABLE, ALWAYS ) \ -+X(CheckMList, BOOL, ALWAYS, PVRSRV_APPHINT_CHECKMLIST, NO_PARAM_TABLE, ALWAYS ) \ -+X(EnableLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLELOGGROUP, fwt_loggroup_tbl, ALWAYS ) \ -+X(FirmwareLogType, UINT32List, ALWAYS, PVRSRV_APPHINT_FIRMWARELOGTYPE, fwt_logtype_tbl, ALWAYS ) \ -+X(HWRDebugDumpLimit, UINT32, ALWAYS, PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT, NO_PARAM_TABLE, ALWAYS ) \ -+X(TimeCorrClock, UINT32List, ALWAYS, PVRSRV_APPHINT_TIMECORRCLOCK, timecorr_clk_tbl, ALWAYS ) \ -+X(HWPerfFWFilter, UINT64, ALWAYS, PVRSRV_APPHINT_HWPERFFWFILTER, NO_PARAM_TABLE, ALWAYS ) \ -+/* Device host config */ \ -+X(EnableAPM, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE, ALWAYS ) \ -+X(DisableFEDLogging, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEFEDLOGGING, NO_PARAM_TABLE, ALWAYS ) \ -+X(ZeroFreelist, BOOL, ALWAYS, PVRSRV_APPHINT_ZEROFREELIST, NO_PARAM_TABLE, ALWAYS ) \ -+X(DisablePDumpPanic, BOOL, PDUMP, PVRSRV_APPHINT_DISABLEPDUMPPANIC, NO_PARAM_TABLE, ALWAYS ) \ -+X(EnableFWPoisonOnFree, BOOL, DEBUG, PVRSRV_APPHINT_ENABLEFWPOISONONFREE, NO_PARAM_TABLE, ALWAYS ) \ -+X(GPUUnitsPowerChange, BOOL, VALIDATION, PVRSRV_APPHINT_GPUUNITSPOWERCHANGE, NO_PARAM_TABLE, ALWAYS ) \ -+X(HWPerfHostFilter, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFHOSTFILTER, NO_PARAM_TABLE, ALWAYS ) -+ -+/* -+******************************************************************************* -+ Mapping between debugfs parameters and module parameters. -+ This mapping is used to initialise device specific apphints from module -+ parameters. -+******************************************************************************/ -+#define APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT_COMMON \ -+/* debuginfo device apphint name modparam name */ \ -+X(EnableAPM, EnableAPMAll) -+ -+/* -+******************************************************************************* -+ * Types used in the APPHINT_LIST_ lists must be defined here. -+ * New types require specific handling code to be added -+******************************************************************************/ -+#define APPHINT_DATA_TYPE_LIST \ -+X(BOOL) \ -+X(UINT64) \ -+X(UINT32) \ -+X(UINT32Bitfield) \ -+X(UINT32List) \ -+X(STRING) -+ -+#define APPHINT_CLASS_LIST \ -+X(ALWAYS) \ -+X(NEVER) \ -+X(DEBUG) \ -+X(PDUMP) \ -+X(VALIDATION) \ -+X(GPUVIRT_VAL) -+ -+#define APPHINT_RT_CLASS_LIST \ -+X(ALWAYS) \ -+X(NEVER) -+ -+/* -+******************************************************************************* -+ Visibility control for module parameters -+ These bind build variables to AppHint Visibility Groups. -+******************************************************************************/ -+#define APPHINT_ENABLED_CLASS_ALWAYS IMG_TRUE -+#define APPHINT_ENABLED_CLASS_NEVER IMG_FALSE -+#define apphint_modparam_class_ALWAYS(a, b, c) apphint_modparam_enable(a, b, c) -+#if defined(DEBUG) -+ #define APPHINT_ENABLED_CLASS_DEBUG IMG_TRUE -+ #define apphint_modparam_class_DEBUG(a, b, c) apphint_modparam_enable(a, b, c) -+#else -+ #define APPHINT_ENABLED_CLASS_DEBUG IMG_FALSE -+ #define apphint_modparam_class_DEBUG(a, b, c) -+#endif -+#if defined(PDUMP) -+ #define APPHINT_ENABLED_CLASS_PDUMP IMG_TRUE -+ #define apphint_modparam_class_PDUMP(a, b, c) apphint_modparam_enable(a, b, c) -+#else -+ #define APPHINT_ENABLED_CLASS_PDUMP IMG_FALSE -+ #define apphint_modparam_class_PDUMP(a, b, c) -+#endif -+#if defined(SUPPORT_VALIDATION) -+ #define APPHINT_ENABLED_CLASS_VALIDATION IMG_TRUE -+ #define apphint_modparam_class_VALIDATION(a, b, c) apphint_modparam_enable(a, b, c) -+#else -+ #define APPHINT_ENABLED_CLASS_VALIDATION IMG_FALSE -+ #define apphint_modparam_class_VALIDATION(a, b, c) -+#endif -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_TRUE -+ #define apphint_modparam_class_GPUVIRT_VAL(a, b, c) apphint_modparam_enable(a, b, c) -+#else -+ #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_FALSE -+ #define apphint_modparam_class_GPUVIRT_VAL(a, b, c) -+#endif -+ -+/* -+******************************************************************************* -+ AppHint defaults based on other build parameters -+******************************************************************************/ -+#if defined(ASSERTONHWRTRIGGER_DEFAULT_ENABLED) -+ #define APPHNT_BLDVAR_ASSERTONHWRTRIGGER 1 -+#else -+ #define APPHNT_BLDVAR_ASSERTONHWRTRIGGER 0 -+#endif -+#if defined(DEBUG) -+ #define APPHNT_BLDVAR_DEBUG 1 -+ #define APPHNT_BLDVAR_DBGDUMPLIMIT RGXFWIF_HWR_DEBUG_DUMP_ALL -+#else -+ #define APPHNT_BLDVAR_DEBUG 0 -+ #define APPHNT_BLDVAR_DBGDUMPLIMIT 1 -+#endif -+#if defined(PDUMP) -+#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_TRUE -+#else -+#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_FALSE -+#endif -+#if defined(DEBUG) || defined(SUPPORT_VALIDATION) -+#define APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG IMG_TRUE -+#else -+#define APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG IMG_FALSE -+#endif -+ -+#if defined(DEBUG) -+ #define APPHNT_PHYSMEMTEST_ENABLE 1 -+#else -+ #define APPHNT_PHYSMEMTEST_ENABLE 0 -+#endif -+ -+/* Data types and actions */ -+typedef enum { -+ APPHINT_DATA_TYPE_INVALID = 0, -+#define X(a) APPHINT_DATA_TYPE_ ## a, -+ APPHINT_DATA_TYPE_LIST -+#undef X -+ APPHINT_DATA_TYPE_MAX -+} APPHINT_DATA_TYPE; -+ -+typedef enum { -+#define X(a) APPHINT_CLASS_ ## a, -+ APPHINT_CLASS_LIST -+#undef X -+ APPHINT_CLASS_MAX -+} APPHINT_CLASS; -+ -+typedef enum { -+#define X(a) APPHINT_RT_CLASS_ ## a, -+ APPHINT_RT_CLASS_LIST -+#undef X -+ APPHINT_RT_CLASS_MAX -+} APPHINT_RT_CLASS; -+#endif /* KM_APPHINT_DEFS_COMMON_H */ -diff --git a/drivers/gpu/drm/img-rogue/linkage.h b/drivers/gpu/drm/img-rogue/linkage.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/linkage.h -@@ -0,0 +1,52 @@ -+/*************************************************************************/ /*! -+@File -+@Title Linux specific Services code internal interfaces -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Interfaces between various parts of the Linux specific -+ Services code, that don't have any other obvious -+ header file to go into. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(LINKAGE_H) -+#define LINKAGE_H -+ -+PVRSRV_ERROR PVROSFuncInit(void); -+void PVROSFuncDeInit(void); -+ -+#endif /* !defined(LINKAGE_H) */ -diff --git a/drivers/gpu/drm/img-rogue/linux_sw_sync.h b/drivers/gpu/drm/img-rogue/linux_sw_sync.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/linux_sw_sync.h -@@ -0,0 +1,52 @@ -+/*************************************************************************/ /*! -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef _UAPI_LINUX_PVR_SW_SYNC_H -+#define _UAPI_LINUX_PVR_SW_SYNC_H -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) -+ -+#include -+ -+#include "pvrsrv_sync_km.h" -+#include "pvr_drm.h" -+ -+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */ -+#endif -diff --git a/drivers/gpu/drm/img-rogue/lists.c b/drivers/gpu/drm/img-rogue/lists.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/lists.c -@@ -0,0 +1,60 @@ -+/*************************************************************************/ /*! -+@File -+@Title Linked list shared functions implementation. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implementation of the list iterators for types shared among -+ more than one file in the services code. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "lists.h" -+ -+/*=================================================================== -+ LIST ITERATOR FUNCTIONS USED IN MORE THAN ONE FILE (those used just -+ once are implemented locally). -+ ===================================================================*/ -+ -+IMPLEMENT_LIST_ANY(PVRSRV_DEVICE_NODE) -+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE) -+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK) -+IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE) -+IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK) -+IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE) -+IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE) -+IMPLEMENT_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE) -+IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE) -diff --git a/drivers/gpu/drm/img-rogue/lists.h b/drivers/gpu/drm/img-rogue/lists.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/lists.h -@@ -0,0 +1,367 @@ -+/*************************************************************************/ /*! -+@File -+@Title Linked list shared functions templates. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Definition of the linked list function templates. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef LISTS_UTILS_H -+#define LISTS_UTILS_H -+ -+/* instruct QAC to ignore warnings about the following custom formatted macros */ -+/* PRQA S 0881,3410 ++ */ -+ -+#if defined(__linux__) -+ #include -+ -+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) -+ #include -+ #else -+ #include -+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ -+#else -+ #include -+#endif /* __linux__ */ -+ -+#include "img_types.h" -+#include "device.h" -+#include "power.h" -+ -+/* -+ - USAGE - -+ -+ The list functions work with any structure that provides the fields psNext and -+ ppsThis. In order to make a function available for a given type, it is required -+ to use the function template macro that creates the actual code. -+ -+ There are 5 main types of functions: -+ - INSERT : given a pointer to the head pointer of the list and a pointer -+ to the node, inserts it as the new head. -+ - INSERT TAIL : given a pointer to the head pointer of the list and a pointer -+ to the node, inserts the node at the tail of the list. -+ - REMOVE : given a pointer to a node, removes it from its list. -+ - FOR EACH : apply a function over all the elements of a list. -+ - ANY : apply a function over the elements of a list, until one of them -+ return a non null value, and then returns it. -+ -+ The two last functions can have a variable argument form, with allows to pass -+ additional parameters to the callback function. In order to do this, the -+ callback function must take two arguments, the first is the current node and -+ the second is a list of variable arguments (va_list). -+ -+ The ANY functions have also another for which specifies the return type of the -+ callback function and the default value returned by the callback function. -+ -+*/ -+ -+/*************************************************************************/ /*! -+@Function List_##TYPE##_ForEach -+@Description Apply a callback function to all the elements of a list. -+@Input psHead The head of the list to be processed. -+@Input pfnCallBack The function to be applied to each element of the list. -+*/ /**************************************************************************/ -+#define DECLARE_LIST_FOR_EACH(TYPE) \ -+void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode)) -+ -+#define IMPLEMENT_LIST_FOR_EACH(TYPE) \ -+void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\ -+{\ -+ while (psHead)\ -+ {\ -+ pfnCallBack(psHead);\ -+ psHead = psHead->psNext;\ -+ }\ -+} -+ -+/*************************************************************************/ /*! -+@Function List_##TYPE##_ForEachSafe -+@Description Apply a callback function to all the elements of a list. Do it -+ in a safe way that handles the fact that a node might remove -+ itself from the list during the iteration. -+@Input psHead The head of the list to be processed. -+@Input pfnCallBack The function to be applied to each element of the list. -+*/ /**************************************************************************/ -+#define DECLARE_LIST_FOR_EACH_SAFE(TYPE) \ -+void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode)) -+ -+#define IMPLEMENT_LIST_FOR_EACH_SAFE(TYPE) \ -+void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\ -+{\ -+ TYPE *psNext;\ -+\ -+ while (psHead)\ -+ {\ -+ psNext = psHead->psNext; \ -+ pfnCallBack(psHead);\ -+ psHead = psNext;\ -+ }\ -+} -+ -+ -+#define DECLARE_LIST_FOR_EACH_VA(TYPE) \ -+void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) -+ -+#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \ -+void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) \ -+{\ -+ va_list ap;\ -+ while (psHead)\ -+ {\ -+ va_start(ap, pfnCallBack);\ -+ pfnCallBack(psHead, ap);\ -+ psHead = psHead->psNext;\ -+ va_end(ap);\ -+ }\ -+} -+ -+ -+/*************************************************************************/ /*! -+@Function List_##TYPE##_Any -+@Description Applies a callback function to the elements of a list until -+ the function returns a non null value, then returns it. -+@Input psHead The head of the list to be processed. -+@Input pfnCallBack The function to be applied to each element of the list. -+@Return The first non null value returned by the callback function. -+*/ /**************************************************************************/ -+#define DECLARE_LIST_ANY(TYPE) \ -+void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode)) -+ -+#define IMPLEMENT_LIST_ANY(TYPE) \ -+void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))\ -+{ \ -+ void *pResult;\ -+ TYPE *psNextNode;\ -+ pResult = NULL;\ -+ psNextNode = psHead;\ -+ while (psHead && !pResult)\ -+ {\ -+ psNextNode = psNextNode->psNext;\ -+ pResult = pfnCallBack(psHead);\ -+ psHead = psNextNode;\ -+ }\ -+ return pResult;\ -+} -+ -+ -+/*with variable arguments, that will be passed as a va_list to the callback function*/ -+ -+#define DECLARE_LIST_ANY_VA(TYPE) \ -+void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...) -+ -+#define IMPLEMENT_LIST_ANY_VA(TYPE) \ -+void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\ -+{\ -+ va_list ap;\ -+ TYPE *psNextNode;\ -+ void* pResult = NULL;\ -+ while (psHead && !pResult)\ -+ {\ -+ psNextNode = psHead->psNext;\ -+ va_start(ap, pfnCallBack);\ -+ pResult = pfnCallBack(psHead, ap);\ -+ va_end(ap);\ -+ psHead = psNextNode;\ -+ }\ -+ return pResult;\ -+} -+ -+/*those ones are for extra type safety, so there's no need to use castings for the results*/ -+ -+#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \ -+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode)) -+ -+#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \ -+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\ -+{ \ -+ RTYPE result;\ -+ TYPE *psNextNode;\ -+ result = CONTINUE;\ -+ psNextNode = psHead;\ -+ while (psHead && result == CONTINUE)\ -+ {\ -+ psNextNode = psNextNode->psNext;\ -+ result = pfnCallBack(psHead);\ -+ psHead = psNextNode;\ -+ }\ -+ return result;\ -+} -+ -+ -+#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \ -+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...) -+ -+#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \ -+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\ -+{\ -+ va_list ap;\ -+ TYPE *psNextNode;\ -+ RTYPE result = CONTINUE;\ -+ while (psHead && result == CONTINUE)\ -+ {\ -+ psNextNode = psHead->psNext;\ -+ va_start(ap, pfnCallBack);\ -+ result = pfnCallBack(psHead, ap);\ -+ va_end(ap);\ -+ psHead = psNextNode;\ -+ }\ -+ return result;\ -+} -+ -+ -+/*************************************************************************/ /*! -+@Function List_##TYPE##_Remove -+@Description Removes a given node from the list. -+@Input psNode The pointer to the node to be removed. -+*/ /**************************************************************************/ -+#define DECLARE_LIST_REMOVE(TYPE) \ -+void List_##TYPE##_Remove(TYPE *psNode) -+ -+#define IMPLEMENT_LIST_REMOVE(TYPE) \ -+void List_##TYPE##_Remove(TYPE *psNode)\ -+{\ -+ (*psNode->ppsThis)=psNode->psNext;\ -+ if (psNode->psNext)\ -+ {\ -+ psNode->psNext->ppsThis = psNode->ppsThis;\ -+ }\ -+} -+ -+/*************************************************************************/ /*! -+@Function List_##TYPE##_Insert -+@Description Inserts a given node at the beginning of the list. -+@Input psHead The pointer to the pointer to the head node. -+@Input psNode The pointer to the node to be inserted. -+*/ /**************************************************************************/ -+#define DECLARE_LIST_INSERT(TYPE) \ -+void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode) -+ -+#define IMPLEMENT_LIST_INSERT(TYPE) \ -+void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\ -+{\ -+ psNewNode->ppsThis = ppsHead;\ -+ psNewNode->psNext = *ppsHead;\ -+ *ppsHead = psNewNode;\ -+ if (psNewNode->psNext)\ -+ {\ -+ psNewNode->psNext->ppsThis = &(psNewNode->psNext);\ -+ }\ -+} -+ -+/*************************************************************************/ /*! -+@Function List_##TYPE##_InsertTail -+@Description Inserts a given node at the end of the list. -+@Input psHead The pointer to the pointer to the head node. -+@Input psNode The pointer to the node to be inserted. -+*/ /**************************************************************************/ -+#define DECLARE_LIST_INSERT_TAIL(TYPE) \ -+void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode) -+ -+#define IMPLEMENT_LIST_INSERT_TAIL(TYPE) \ -+void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)\ -+{\ -+ TYPE *psTempNode = *ppsHead;\ -+ if (psTempNode != NULL)\ -+ {\ -+ while (psTempNode->psNext)\ -+ psTempNode = psTempNode->psNext;\ -+ ppsHead = &psTempNode->psNext;\ -+ }\ -+ psNewNode->ppsThis = ppsHead;\ -+ psNewNode->psNext = NULL;\ -+ *ppsHead = psNewNode;\ -+} -+ -+/*************************************************************************/ /*! -+@Function List_##TYPE##_Reverse -+@Description Reverse a list in place -+@Input ppsHead The pointer to the pointer to the head node. -+*/ /**************************************************************************/ -+#define DECLARE_LIST_REVERSE(TYPE) \ -+void List_##TYPE##_Reverse(TYPE **ppsHead) -+ -+#define IMPLEMENT_LIST_REVERSE(TYPE) \ -+void List_##TYPE##_Reverse(TYPE **ppsHead)\ -+{\ -+ TYPE *psTmpNode1; \ -+ TYPE *psTmpNode2; \ -+ TYPE *psCurNode; \ -+ psTmpNode1 = NULL; \ -+ psCurNode = *ppsHead; \ -+ while (psCurNode) { \ -+ psTmpNode2 = psCurNode->psNext; \ -+ psCurNode->psNext = psTmpNode1; \ -+ psTmpNode1 = psCurNode; \ -+ psCurNode = psTmpNode2; \ -+ if (psCurNode) \ -+ { \ -+ psTmpNode1->ppsThis = &(psCurNode->psNext); \ -+ } \ -+ else \ -+ { \ -+ psTmpNode1->ppsThis = ppsHead; \ -+ } \ -+ } \ -+ *ppsHead = psTmpNode1; \ -+} -+ -+#define IS_LAST_ELEMENT(x) ((x)->psNext == NULL) -+ -+ -+DECLARE_LIST_ANY(PVRSRV_DEVICE_NODE); -+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE); -+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK); -+DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE); -+DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK); -+DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE); -+DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE); -+DECLARE_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE); -+DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE); -+ -+#undef DECLARE_LIST_ANY_2 -+#undef DECLARE_LIST_ANY_VA -+#undef DECLARE_LIST_ANY_VA_2 -+#undef DECLARE_LIST_FOR_EACH -+#undef DECLARE_LIST_FOR_EACH_VA -+#undef DECLARE_LIST_INSERT -+#undef DECLARE_LIST_REMOVE -+ -+#endif -+ -+/* re-enable warnings */ -+/* PRQA S 0881,3410 -- */ -diff --git a/drivers/gpu/drm/img-rogue/lock.h b/drivers/gpu/drm/img-rogue/lock.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/lock.h -@@ -0,0 +1,431 @@ -+/*************************************************************************/ /*! -+@File lock.h -+@Title Locking interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Services internal locking interface -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef LOCK_H -+#define LOCK_H -+ -+/* In Linux kernel mode we are using the kernel mutex implementation directly -+ * with macros. This allows us to use the kernel lockdep feature for lock -+ * debugging. */ -+#include "lock_types.h" -+ -+#if defined(__linux__) && defined(__KERNEL__) -+ -+#include "allocmem.h" -+#include -+ -+#define OSLockCreateNoStats(phLock) ({ \ -+ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ -+ *(phLock) = OSAllocMemNoStats(sizeof(struct mutex)); \ -+ if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \ -+ e;}) -+#define OSLockCreate(phLock) ({ \ -+ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ -+ *(phLock) = OSAllocMem(sizeof(struct mutex)); \ -+ if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \ -+ e;}) -+#define OSLockDestroy(hLock) ({mutex_destroy((hLock)); OSFreeMem((hLock));}) -+#define OSLockDestroyNoStats(hLock) ({mutex_destroy((hLock)); OSFreeMemNoStats((hLock));}) -+ -+#define OSLockAcquire(hLock) ({mutex_lock((hLock));}) -+#define OSLockAcquireNested(hLock, subclass) ({mutex_lock_nested((hLock), (subclass));}) -+#define OSLockRelease(hLock) ({mutex_unlock((hLock));}) -+ -+#define OSLockIsLocked(hLock) ((mutex_is_locked((hLock)) == 1) ? IMG_TRUE : IMG_FALSE) -+#define OSTryLockAcquire(hLock) ((mutex_trylock(hLock) == 1) ? IMG_TRUE : IMG_FALSE) -+ -+#define OSSpinLockCreate(_ppsLock) ({ \ -+ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ -+ *(_ppsLock) = OSAllocMem(sizeof(spinlock_t)); \ -+ if (*(_ppsLock)) {spin_lock_init(*(_ppsLock)); e = PVRSRV_OK;} \ -+ e;}) -+#define OSSpinLockDestroy(_psLock) ({OSFreeMem(_psLock);}) -+ -+typedef unsigned long OS_SPINLOCK_FLAGS; -+#define OSSpinLockAcquire(_pLock, _flags) spin_lock_irqsave(_pLock, _flags) -+#define OSSpinLockRelease(_pLock, _flags) spin_unlock_irqrestore(_pLock, _flags) -+ -+/* These _may_ be reordered or optimized away entirely by the compiler/hw */ -+#define OSAtomicRead(pCounter) atomic_read(pCounter) -+#define OSAtomicWrite(pCounter, i) atomic_set(pCounter, i) -+ -+/* The following atomic operations, in addition to being SMP-safe, also -+ imply a memory barrier around the operation */ -+#define OSAtomicIncrement(pCounter) atomic_inc_return(pCounter) -+#define OSAtomicDecrement(pCounter) atomic_dec_return(pCounter) -+#define OSAtomicCompareExchange(pCounter, oldv, newv) atomic_cmpxchg(pCounter,oldv,newv) -+#define OSAtomicExchange(pCounter, iNewVal) atomic_xchg(pCounter, iNewVal) -+ -+static inline IMG_INT OSAtomicOr(ATOMIC_T *pCounter, IMG_INT iVal) -+{ -+ IMG_INT iOldVal, iLastVal, iNewVal; -+ -+ iLastVal = OSAtomicRead(pCounter); -+ do -+ { -+ iOldVal = iLastVal; -+ iNewVal = iOldVal | iVal; -+ -+ iLastVal = OSAtomicCompareExchange(pCounter, iOldVal, iNewVal); -+ } -+ while (iOldVal != iLastVal); -+ -+ return iNewVal; -+} -+ -+#define OSAtomicAdd(pCounter, incr) atomic_add_return(incr,pCounter) -+#define OSAtomicAddUnless(pCounter, incr, test) atomic_add_unless(pCounter, (incr), (test)) -+ -+#define OSAtomicSubtract(pCounter, incr) atomic_add_return(-(incr),pCounter) -+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), (test)) -+ -+#else /* defined(__linux__) && defined(__KERNEL__) */ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+/**************************************************************************/ /*! -+@Function OSLockCreate -+@Description Creates an operating system lock object. -+@Output phLock The created lock. -+@Return PVRSRV_OK on success. PVRSRV_ERROR_OUT_OF_MEMORY if the driver -+ cannot allocate CPU memory needed for the lock. -+ PVRSRV_ERROR_INIT_FAILURE if the Operating System fails to -+ allocate the lock. -+ */ /**************************************************************************/ -+IMG_INTERNAL -+PVRSRV_ERROR OSLockCreate(POS_LOCK *phLock); -+#if defined(INTEGRITY_OS) -+#define OSLockCreateNoStats OSLockCreate -+#endif -+ -+/**************************************************************************/ /*! -+@Function OSLockDestroy -+@Description Destroys an operating system lock object. -+@Input hLock The lock to be destroyed. -+@Return None. -+ */ /**************************************************************************/ -+IMG_INTERNAL -+void OSLockDestroy(POS_LOCK hLock); -+ -+#if defined(INTEGRITY_OS) -+#define OSLockDestroyNoStats OSLockDestroy -+#endif -+/**************************************************************************/ /*! -+@Function OSLockAcquire -+@Description Acquires an operating system lock. -+ NB. This function must not return until the lock is acquired -+ (meaning the implementation should not timeout or return with -+ an error, as the caller will assume they have the lock). -+@Input hLock The lock to be acquired. -+@Return None. -+ */ /**************************************************************************/ -+IMG_INTERNAL -+void OSLockAcquire(POS_LOCK hLock); -+ -+/**************************************************************************/ /*! -+@Function OSTryLockAcquire -+@Description Try to acquire an operating system lock. -+ NB. If lock is acquired successfully in the first attempt, -+ then the function returns true and else it will return false. -+@Input hLock The lock to be acquired. -+@Return IMG_TRUE if lock acquired successfully, -+ IMG_FALSE otherwise. -+ */ /**************************************************************************/ -+IMG_INTERNAL -+IMG_BOOL OSTryLockAcquire(POS_LOCK hLock); -+ -+/* Nested notation isn't used in UM or other OS's */ -+/**************************************************************************/ /*! -+@Function OSLockAcquireNested -+@Description For operating systems other than Linux, this equates to an -+ OSLockAcquire() call. On Linux, this function wraps a call -+ to mutex_lock_nested(). This recognises the scenario where -+ there may be multiple subclasses within a particular class -+ of lock. In such cases, the order in which the locks belonging -+ these various subclasses are acquired is important and must be -+ validated. -+@Input hLock The lock to be acquired. -+@Input subclass The subclass of the lock. -+@Return None. -+ */ /**************************************************************************/ -+#define OSLockAcquireNested(hLock, subclass) OSLockAcquire((hLock)) -+ -+/**************************************************************************/ /*! -+@Function OSLockRelease -+@Description Releases an operating system lock. -+@Input hLock The lock to be released. -+@Return None. -+ */ /**************************************************************************/ -+IMG_INTERNAL -+void OSLockRelease(POS_LOCK hLock); -+ -+/**************************************************************************/ /*! -+@Function OSLockIsLocked -+@Description Tests whether or not an operating system lock is currently -+ locked. -+@Input hLock The lock to be tested. -+@Return IMG_TRUE if locked, IMG_FALSE if not locked. -+ */ /**************************************************************************/ -+IMG_INTERNAL -+IMG_BOOL OSLockIsLocked(POS_LOCK hLock); -+ -+#if defined(__linux__) -+ -+/* Use GCC intrinsics (read/write semantics consistent with kernel-side implementation) */ -+#define OSAtomicRead(pCounter) (*(volatile IMG_INT32 *)&(pCounter)->counter) -+#define OSAtomicWrite(pCounter, i) ((pCounter)->counter = (IMG_INT32) i) -+#define OSAtomicIncrement(pCounter) __sync_add_and_fetch((&(pCounter)->counter), 1) -+#define OSAtomicDecrement(pCounter) __sync_sub_and_fetch((&(pCounter)->counter), 1) -+#define OSAtomicCompareExchange(pCounter, oldv, newv) \ -+ __sync_val_compare_and_swap((&(pCounter)->counter), oldv, newv) -+#define OSAtomicOr(pCounter, iVal) __sync_or_and_fetch((&(pCounter)->counter), iVal) -+ -+static inline IMG_UINT32 OSAtomicExchange(ATOMIC_T *pCounter, IMG_UINT32 iNewVal) -+{ -+ IMG_UINT32 iOldVal; -+ IMG_UINT32 iLastVal; -+ -+ iLastVal = OSAtomicRead(pCounter); -+ do -+ { -+ iOldVal = iLastVal; -+ iLastVal = OSAtomicCompareExchange(pCounter, iOldVal, iNewVal); -+ } -+ while (iOldVal != iLastVal); -+ -+ return iOldVal; -+} -+ -+#define OSAtomicAdd(pCounter, incr) __sync_add_and_fetch((&(pCounter)->counter), incr) -+#define OSAtomicAddUnless(pCounter, incr, test) ({ \ -+ IMG_INT32 c; IMG_INT32 old; \ -+ c = OSAtomicRead(pCounter); \ -+ while (1) { \ -+ if (c == (test)) break; \ -+ old = OSAtomicCompareExchange(pCounter, c, c+(incr)); \ -+ if (old == c) break; \ -+ c = old; \ -+ } c; }) -+ -+#define OSAtomicSubtract(pCounter, incr) OSAtomicAdd(pCounter, -(incr)) -+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test) -+ -+#else -+ -+/*************************************************************************/ /*! -+@Function OSAtomicRead -+@Description Read the value of a variable atomically. -+ Atomic functions must be implemented in a manner that -+ is both symmetric multiprocessor (SMP) safe and has a memory -+ barrier around each operation. -+@Input pCounter The atomic variable to read -+@Return The value of the atomic variable -+*/ /**************************************************************************/ -+IMG_INTERNAL -+IMG_INT32 OSAtomicRead(const ATOMIC_T *pCounter); -+ -+/*************************************************************************/ /*! -+@Function OSAtomicWrite -+@Description Write the value of a variable atomically. -+ Atomic functions must be implemented in a manner that -+ is both symmetric multiprocessor (SMP) safe and has a memory -+ barrier around each operation. -+@Input pCounter The atomic variable to be written to -+@Input v The value to write -+@Return None -+*/ /**************************************************************************/ -+IMG_INTERNAL -+void OSAtomicWrite(ATOMIC_T *pCounter, IMG_INT32 v); -+ -+/* For the following atomic operations, in addition to being SMP-safe, -+ should also have a memory barrier around each operation */ -+/*************************************************************************/ /*! -+@Function OSAtomicIncrement -+@Description Increment the value of a variable atomically. -+ Atomic functions must be implemented in a manner that -+ is both symmetric multiprocessor (SMP) safe and has a memory -+ barrier around each operation. -+@Input pCounter The atomic variable to be incremented -+@Return The new value of *pCounter. -+*/ /**************************************************************************/ -+IMG_INTERNAL -+IMG_INT32 OSAtomicIncrement(ATOMIC_T *pCounter); -+ -+/*************************************************************************/ /*! -+@Function OSAtomicDecrement -+@Description Decrement the value of a variable atomically. -+ Atomic functions must be implemented in a manner that -+ is both symmetric multiprocessor (SMP) safe and has a memory -+ barrier around each operation. -+@Input pCounter The atomic variable to be decremented -+@Return The new value of *pCounter. -+*/ /**************************************************************************/ -+IMG_INTERNAL -+IMG_INT32 OSAtomicDecrement(ATOMIC_T *pCounter); -+ -+/*************************************************************************/ /*! -+@Function OSAtomicAdd -+@Description Add a specified value to a variable atomically. -+ Atomic functions must be implemented in a manner that -+ is both symmetric multiprocessor (SMP) safe and has a memory -+ barrier around each operation. -+@Input pCounter The atomic variable to add the value to -+@Input v The value to be added -+@Return The new value of *pCounter. -+*/ /**************************************************************************/ -+IMG_INTERNAL -+IMG_INT32 OSAtomicAdd(ATOMIC_T *pCounter, IMG_INT32 v); -+ -+/*************************************************************************/ /*! -+@Function OSAtomicAddUnless -+@Description Add a specified value to a variable atomically unless it -+ already equals a particular value. -+ Atomic functions must be implemented in a manner that -+ is both symmetric multiprocessor (SMP) safe and has a memory -+ barrier around each operation. -+@Input pCounter The atomic variable to add the value to -+@Input v The value to be added to 'pCounter' -+@Input t The test value. If 'pCounter' equals this, -+ its value will not be adjusted -+@Return The old value of *pCounter. -+*/ /**************************************************************************/ -+IMG_INTERNAL -+IMG_INT32 OSAtomicAddUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t); -+ -+/*************************************************************************/ /*! -+@Function OSAtomicSubtract -+@Description Subtract a specified value to a variable atomically. -+ Atomic functions must be implemented in a manner that -+ is both symmetric multiprocessor (SMP) safe and has a memory -+ barrier around each operation. -+@Input pCounter The atomic variable to subtract the value from -+@Input v The value to be subtracted -+@Return The new value of *pCounter. -+*/ /**************************************************************************/ -+IMG_INTERNAL -+IMG_INT32 OSAtomicSubtract(ATOMIC_T *pCounter, IMG_INT32 v); -+ -+/*************************************************************************/ /*! -+@Function OSAtomicSubtractUnless -+@Description Subtract a specified value from a variable atomically unless -+ it already equals a particular value. -+ Atomic functions must be implemented in a manner that -+ is both symmetric multiprocessor (SMP) safe and has a memory -+ barrier around each operation. -+@Input pCounter The atomic variable to subtract the value from -+@Input v The value to be subtracted from 'pCounter' -+@Input t The test value. If 'pCounter' equals this, -+ its value will not be adjusted -+@Return The old value of *pCounter. -+*/ /**************************************************************************/ -+IMG_INTERNAL -+IMG_INT32 OSAtomicSubtractUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t); -+ -+/*************************************************************************/ /*! -+@Function OSAtomicCompareExchange -+@Description Set a variable to a given value only if it is currently -+ equal to a specified value. The whole operation must be atomic. -+ Atomic functions must be implemented in a manner that -+ is both symmetric multiprocessor (SMP) safe and has a memory -+ barrier around each operation. -+@Input pCounter The atomic variable to be checked and -+ possibly updated -+@Input oldv The value the atomic variable must have in -+ order to be modified -+@Input newv The value to write to the atomic variable if -+ it equals 'oldv' -+@Return The old value of *pCounter -+*/ /**************************************************************************/ -+IMG_INTERNAL -+IMG_INT32 OSAtomicCompareExchange(ATOMIC_T *pCounter, IMG_INT32 oldv, IMG_INT32 newv); -+ -+/*************************************************************************/ /*! -+@Function OSAtomicExchange -+@Description Set a variable to a given value and retrieve previous value. -+ The whole operation must be atomic. -+ Atomic functions must be implemented in a manner that -+ is both symmetric multiprocessor (SMP) safe and has a memory -+ barrier around each operation. -+@Input pCounter The atomic variable to be updated -+@Input iNewVal The value to write to the atomic variable -+@Return The previous value of *pCounter. -+*/ /**************************************************************************/ -+IMG_INTERNAL -+IMG_INT32 OSAtomicExchange(ATOMIC_T *pCounter, IMG_INT32 iNewVal); -+ -+/*************************************************************************/ /*! -+@Function OSAtomicOr -+@Description Set a variable to the bitwise or of its current value and the -+ specified value. Equivalent to *pCounter |= iVal. -+ The whole operation must be atomic. -+ Atomic functions must be implemented in a manner that -+ is both symmetric multiprocessor (SMP) safe and has a memory -+ barrier around each operation. -+@Input pCounter The atomic variable to be updated -+@Input iVal The value to bitwise or against -+@Return The new value of *pCounter. -+*/ /**************************************************************************/ -+IMG_INTERNAL -+IMG_INT32 OSAtomicOr(ATOMIC_T *pCounter, IMG_INT32 iVal); -+ -+/* For now, spin-locks are required on Linux only, so other platforms fake -+ * spinlocks with normal mutex locks */ -+/*! Type definitions for OS_SPINLOCK accessor and creation / deletion */ -+typedef unsigned long OS_SPINLOCK_FLAGS; -+/*! Pointer to an OS Spinlock */ -+#define POS_SPINLOCK POS_LOCK -+/*! Wrapper for OSLockCreate() */ -+#define OSSpinLockCreate(ppLock) OSLockCreate(ppLock) -+/*! Wrapper for OSLockDestroy() */ -+#define OSSpinLockDestroy(pLock) OSLockDestroy(pLock) -+/*! Wrapper for OSLockAcquire() */ -+#define OSSpinLockAcquire(pLock, flags) {flags = 0; OSLockAcquire(pLock);} -+/*! Wrapper for OSLockRelease() */ -+#define OSSpinLockRelease(pLock, flags) {flags = 0; OSLockRelease(pLock);} -+ -+#endif /* defined(__linux__) */ -+#endif /* defined(__linux__) && defined(__KERNEL__) */ -+ -+#endif /* LOCK_H */ -diff --git a/drivers/gpu/drm/img-rogue/lock_types.h b/drivers/gpu/drm/img-rogue/lock_types.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/lock_types.h -@@ -0,0 +1,92 @@ -+/*************************************************************************/ /*! -+@File lock_types.h -+@Title Locking types -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Locking specific enums, defines and structures -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef LOCK_TYPES_H -+#define LOCK_TYPES_H -+ -+/* In Linux kernel mode we are using the kernel mutex implementation directly -+ * with macros. This allows us to use the kernel lockdep feature for lock -+ * debugging. */ -+#if defined(__linux__) && defined(__KERNEL__) -+ -+#include -+#include -+/* The mutex is defined as a pointer to be compatible with the other code. This -+ * isn't ideal and usually you wouldn't do that in kernel code. */ -+typedef struct mutex *POS_LOCK; -+typedef struct rw_semaphore *POSWR_LOCK; -+typedef spinlock_t *POS_SPINLOCK; -+typedef atomic_t ATOMIC_T; -+ -+#else /* defined(__linux__) && defined(__KERNEL__) */ -+#include "img_types.h" /* needed for IMG_INT */ -+typedef struct OS_LOCK_TAG *POS_LOCK; -+ -+#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) -+typedef struct OSWR_LOCK_TAG *POSWR_LOCK; -+#else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ -+typedef struct OSWR_LOCK_TAG { -+ IMG_UINT32 ui32Unused; -+} *POSWR_LOCK; -+#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ -+ -+#if defined(__linux__) || defined(__APPLE__) -+ typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T; -+#elif defined(__QNXNTO__) -+ typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T; -+#elif defined(_WIN32) -+ /* -+ * Placeholder definition. WDDM doesn't use Services, but some headers -+ * still have to be shared. This is one such case. -+ */ -+ typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T; -+#elif defined(INTEGRITY_OS) -+ /* Only lower 32bits are used in OS ATOMIC APIs to have consistent behaviour across all OS */ -+ typedef struct OS_ATOMIC_TAG {IMG_INT64 counter;} ATOMIC_T; -+#else -+ #error "Please type-define an atomic lock for this environment" -+#endif -+ -+#endif /* defined(__linux__) && defined(__KERNEL__) */ -+ -+#endif /* LOCK_TYPES_H */ -diff --git a/drivers/gpu/drm/img-rogue/log2.h b/drivers/gpu/drm/img-rogue/log2.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/log2.h -@@ -0,0 +1,417 @@ -+/*************************************************************************/ /*! -+@Title Integer log2 and related functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef LOG2_H -+#define LOG2_H -+ -+#include "img_defs.h" -+ -+/*************************************************************************/ /*! -+@Description Determine if a number is a power of two. -+@Input n -+@Return True if n is a power of 2, false otherwise. True if n == 0. -+*/ /**************************************************************************/ -+static INLINE IMG_BOOL __const_function IsPower2(uint32_t n) -+{ -+ /* C++ needs this cast. */ -+ return (IMG_BOOL)((n & (n - 1U)) == 0U); -+} -+ -+/*************************************************************************/ /*! -+@Description Determine if a number is a power of two. -+@Input n -+@Return True if n is a power of 2, false otherwise. True if n == 0. -+*/ /**************************************************************************/ -+static INLINE IMG_BOOL __const_function IsPower2_64(uint64_t n) -+{ -+ /* C++ needs this cast. */ -+ return (IMG_BOOL)((n & (n - 1U)) == 0U); -+} -+ -+/* Code using GNU GCC intrinsics */ -+#if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) -+ -+/* CHAR_BIT is typically found in . For all the platforms where -+ * CHAR_BIT is not available, defined it here with the assumption that there -+ * are 8 bits in a byte */ -+#ifndef CHAR_BIT -+#define CHAR_BIT 8U -+#endif -+ -+/*************************************************************************/ /*! -+@Description Compute floor(log2(n)) -+@Input n -+@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function FloorLog2(uint32_t n) -+{ -+ if (unlikely(n == 0U)) -+ { -+ return 0; -+ } -+ else -+ { -+ uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); -+ return uNumBits - (uint32_t)__builtin_clz(n) - 1U; -+ } -+} -+ -+/*************************************************************************/ /*! -+@Description Compute floor(log2(n)) -+@Input n -+@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function FloorLog2_64(uint64_t n) -+{ -+ if (unlikely(n == 0U)) -+ { -+ return 0; -+ } -+ else -+ { -+ uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); -+ return uNumBits - (uint32_t)__builtin_clzll(n) - 1U; -+ } -+} -+ -+/*************************************************************************/ /*! -+@Description Compute ceil(log2(n)) -+@Input n -+@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function CeilLog2(uint32_t n) -+{ -+ if (unlikely(n == 0U || n == 1U)) -+ { -+ return 0; -+ } -+ else -+ { -+ uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); -+ -+ n--; /* Handle powers of 2 */ -+ return uNumBits - (uint32_t)__builtin_clz(n); -+ } -+} -+ -+/*************************************************************************/ /*! -+@Description Compute ceil(log2(n)) -+@Input n -+@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function CeilLog2_64(uint64_t n) -+{ -+ if (unlikely(n == 0U || n == 1U)) -+ { -+ return 0; -+ } -+ else -+ { -+ uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); -+ -+ n--; /* Handle powers of 2 */ -+ return uNumBits - (uint32_t)__builtin_clzll(n); -+ } -+} -+ -+/*************************************************************************/ /*! -+@Description Compute log2(n) for exact powers of two only -+@Input n Must be a power of two -+@Return log2(n) -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function ExactLog2(uint32_t n) -+{ -+ return (uint32_t)CHAR_BIT * (uint32_t)sizeof(n) - (uint32_t)__builtin_clz(n) - 1U; -+} -+ -+/*************************************************************************/ /*! -+@Description Compute log2(n) for exact powers of two only -+@Input n Must be a power of two -+@Return log2(n) -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function ExactLog2_64(uint64_t n) -+{ -+ return (uint32_t)CHAR_BIT * (uint32_t)sizeof(n) - (uint32_t)__builtin_clzll(n) - 1U; -+} -+ -+/*************************************************************************/ /*! -+@Description Round a non-power-of-two number up to the next power of two. -+@Input n -+@Return n rounded up to the next power of two. If n is zero or -+ already a power of two, return n unmodified. -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n) -+{ -+ /* Cases with n greater than 2^31 needs separate handling -+ * as result of (1<<32) is undefined. */ -+ if (unlikely( n == 0U || n > (uint32_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - 1U))) -+ { -+ return 0; -+ } -+ -+ /* Return n if it is already a power of 2 */ -+ if ((IMG_BOOL)((n & (n - 1U)) == 0U)) -+ { -+ return n; -+ } -+ -+ return (uint32_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - (uint32_t)__builtin_clz(n)); -+} -+ -+/*************************************************************************/ /*! -+@Description Round a non-power-of-two number up to the next power of two. -+@Input n -+@Return n rounded up to the next power of two. If n is zero or -+ already a power of two, return n unmodified. -+*/ /**************************************************************************/ -+static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n) -+{ -+ /* Cases with n greater than 2^63 needs separate handling -+ * as result of (1<<64) is undefined. */ -+ if (unlikely( n == 0U || n > (uint64_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - 1U))) -+ { -+ return 0; -+ } -+ -+ /* Return n if it is already a power of 2 */ -+ if ((IMG_BOOL)((n & (n - 1U)) == 0U)) -+ { -+ return n; -+ } -+ -+ return (uint64_t)1 << ((uint64_t)CHAR_BIT * sizeof(n) - (uint64_t)__builtin_clzll(n)); -+} -+ -+#else /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */ -+ -+/*************************************************************************/ /*! -+@Description Round a non-power-of-two number up to the next power of two. -+@Input n -+@Return n rounded up to the next power of two. If n is zero or -+ already a power of two, return n unmodified. -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n) -+{ -+ n--; -+ n |= n >> 1; /* handle 2 bit numbers */ -+ n |= n >> 2; /* handle 4 bit numbers */ -+ n |= n >> 4; /* handle 8 bit numbers */ -+ n |= n >> 8; /* handle 16 bit numbers */ -+ n |= n >> 16; /* handle 32 bit numbers */ -+ n++; -+ -+ return n; -+} -+ -+/*************************************************************************/ /*! -+@Description Round a non-power-of-two number up to the next power of two. -+@Input n -+@Return n rounded up to the next power of two. If n is zero or -+ already a power of two, return n unmodified. -+*/ /**************************************************************************/ -+static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n) -+{ -+ n--; -+ n |= n >> 1; /* handle 2 bit numbers */ -+ n |= n >> 2; /* handle 4 bit numbers */ -+ n |= n >> 4; /* handle 8 bit numbers */ -+ n |= n >> 8; /* handle 16 bit numbers */ -+ n |= n >> 16; /* handle 32 bit numbers */ -+ n |= n >> 32; /* handle 64 bit numbers */ -+ n++; -+ -+ return n; -+} -+ -+/*************************************************************************/ /*! -+@Description Compute floor(log2(n)) -+@Input n -+@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function FloorLog2(uint32_t n) -+{ -+ uint32_t ui32log2 = 0; -+ -+ for (n >>= 1; n != 0U; n >>= 1) -+ { -+ ui32log2++; -+ } -+ -+ return ui32log2; -+} -+ -+/*************************************************************************/ /*! -+@Description Compute floor(log2(n)) -+@Input n -+@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function FloorLog2_64(uint64_t n) -+{ -+ uint32_t ui32log2 = 0; -+ -+ for (n >>= 1; n != 0U; n >>= 1) -+ { -+ ui32log2++; -+ } -+ -+ return ui32log2; -+} -+ -+/*************************************************************************/ /*! -+@Description Compute ceil(log2(n)) -+@Input n -+@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function CeilLog2(uint32_t n) -+{ -+ uint32_t ui32log2 = 0; -+ -+ if (n == 0U) -+ { -+ return 0; -+ } -+ -+ n--; /* Handle powers of 2 */ -+ -+ while (n != 0U) -+ { -+ ui32log2++; -+ n >>= 1; -+ } -+ -+ return ui32log2; -+} -+ -+/*************************************************************************/ /*! -+@Description Compute ceil(log2(n)) -+@Input n -+@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function CeilLog2_64(uint64_t n) -+{ -+ uint32_t ui32log2 = 0; -+ -+ if (n == 0U) -+ { -+ return 0; -+ } -+ -+ n--; /* Handle powers of 2 */ -+ -+ while (n != 0U) -+ { -+ ui32log2++; -+ n >>= 1; -+ } -+ -+ return ui32log2; -+} -+ -+/*************************************************************************/ /*! -+@Description Compute log2(n) for exact powers of two only -+@Input n Must be a power of two -+@Return log2(n) -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function ExactLog2(uint32_t n) -+{ -+ static const uint32_t b[] = -+ {0xAAAAAAAAU, 0xCCCCCCCCU, 0xF0F0F0F0U, 0xFF00FF00U, 0xFFFF0000U}; -+ uint32_t r = (((n & b[0]) != 0U) ? 1U : 0U); -+ -+ r |= (uint32_t) (((n & b[4]) != 0U) ? (1U << 4) : 0U); -+ r |= (uint32_t) (((n & b[3]) != 0U) ? (1U << 3) : 0U); -+ r |= (uint32_t) (((n & b[2]) != 0U) ? (1U << 2) : 0U); -+ r |= (uint32_t) (((n & b[1]) != 0U) ? (1U << 1) : 0U); -+ -+ return r; -+} -+ -+/*************************************************************************/ /*! -+@Description Compute log2(n) for exact powers of two only -+@Input n Must be a power of two -+@Return log2(n) -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function ExactLog2_64(uint64_t n) -+{ -+ static const uint64_t b[] = -+ {0xAAAAAAAAAAAAAAAAUL, 0xCCCCCCCCCCCCCCCCUL, -+ 0xF0F0F0F0F0F0F0F0UL, 0xFF00FF00FF00FF00UL, -+ 0xFFFF0000FFFF0000UL, 0xFFFFFFFF00000000UL}; -+ uint32_t r = (((n & b[0]) != 0U) ? 1U : 0U); -+ -+ r |= (uint32_t) (((n & b[5]) != 0U) ? (1U << 5) : 0U); -+ r |= (uint32_t) (((n & b[4]) != 0U) ? (1U << 4) : 0U); -+ r |= (uint32_t) (((n & b[3]) != 0U) ? (1U << 3) : 0U); -+ r |= (uint32_t) (((n & b[2]) != 0U) ? (1U << 2) : 0U); -+ r |= (uint32_t) (((n & b[1]) != 0U) ? (1U << 1) : 0U); -+ -+ return r; -+} -+ -+#endif /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */ -+ -+/*************************************************************************/ /*! -+@Description Compute floor(log2(size)) , where size is the max of 3 sizes -+ This is almost always the ONLY EVER valid use of FloorLog2. -+ Usually CeilLog2() should be used instead. -+ For a 5x5x1 texture, the 3 miplevels are: -+ 0: 5x5x1 -+ 1: 2x2x1 -+ 2: 1x1x1 -+ -+ For an 8x8x1 texture, the 4 miplevels are: -+ 0: 8x8x1 -+ 1: 4x4x1 -+ 2: 2x2x1 -+ 3: 1x1x1 -+ -+ -+@Input sizeX, sizeY, sizeZ -+@Return Count of mipmap levels for given dimensions -+*/ /**************************************************************************/ -+static INLINE uint32_t __const_function NumMipLevels(uint32_t sizeX, uint32_t sizeY, uint32_t sizeZ) -+{ -+ -+ uint32_t maxSize = MAX(MAX(sizeX, sizeY), sizeZ); -+ return FloorLog2(maxSize) + 1U; -+} -+ -+#endif /* LOG2_H */ -diff --git a/drivers/gpu/drm/img-rogue/mem_utils.c b/drivers/gpu/drm/img-rogue/mem_utils.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/mem_utils.c -@@ -0,0 +1,445 @@ -+/*************************************************************************/ /*! -+@File -+@Title Memory manipulation functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Memory related functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "osfunc_common.h" -+#include "img_defs.h" -+ -+/* This workaround is only *required* on ARM64. Avoid building or including -+ * it by default on other architectures, unless the 'safe memcpy' test flag -+ * is enabled. (The code should work on other architectures.) -+ */ -+ -+ -+ -+/* NOTE: This C file is compiled with -ffreestanding to avoid pattern matching -+ * by the compiler to stdlib functions, and it must only use the below -+ * headers. Do not include any IMG or services headers in this file. -+ */ -+#if defined(__KERNEL__) && defined(__linux__) -+#include -+#else -+#include -+#endif -+ -+/* The attribute "vector_size" will generate floating point instructions -+ * and use FPU registers. In kernel OS, the FPU registers might be corrupted -+ * when CPU is doing context switch because FPU registers are not expected to -+ * be stored. -+ * GCC enables compiler option, -mgeneral-regs-only, by default. -+ * This option restricts the generated code to use general registers only -+ * so that we don't have issues on that. -+ */ -+#if defined(__KERNEL__) && defined(__clang__) -+ -+#define DEVICE_MEMSETCPY_NON_VECTOR_KM -+/* Loading or storing 16 or 32 bytes is only supported on 64-bit machines. */ -+#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES > 8 -+typedef __uint128_t uint128_t; -+ -+typedef struct -+{ -+ uint128_t ui128DataFields[2]; -+} -+uint256_t; -+#endif -+ -+#endif -+ -+/* This file is only intended to be used on platforms which use GCC or Clang, -+ * due to its requirement on __attribute__((vector_size(n))), typeof() and -+ * __SIZEOF__ macros. -+ */ -+ -+#if defined(__GNUC__) -+ -+#ifndef MIN -+#define MIN(a, b) \ -+ ({__typeof(a) _a = (a); __typeof(b) _b = (b); _a > _b ? _b : _a;}) -+#endif -+ -+#if !defined(DEVICE_MEMSETCPY_ALIGN_IN_BYTES) -+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES __SIZEOF_LONG__ -+#endif -+#if (DEVICE_MEMSETCPY_ALIGN_IN_BYTES & (DEVICE_MEMSETCPY_ALIGN_IN_BYTES - 1)) != 0 -+#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be a power of 2" -+#endif -+#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES < 4 -+#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be equal or greater than 4" -+#endif -+ -+#if __SIZEOF_POINTER__ != __SIZEOF_LONG__ -+#error No support for architectures where void* and long are sized differently -+#endif -+ -+#if __SIZEOF_LONG__ > DEVICE_MEMSETCPY_ALIGN_IN_BYTES -+/* Meaningless, and harder to do correctly */ -+# error Cannot handle DEVICE_MEMSETCPY_ALIGN_IN_BYTES < sizeof(long) -+typedef unsigned long block_t; -+#elif __SIZEOF_LONG__ <= DEVICE_MEMSETCPY_ALIGN_IN_BYTES -+# if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) -+# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8 -+ typedef uint64_t block_t; -+# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16 -+ typedef uint128_t block_t; -+# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32 -+ typedef uint256_t block_t; -+# endif -+# else -+typedef unsigned int block_t -+ __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES))); -+# endif -+# if defined(__arm64__) || defined(__aarch64__) -+# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8 -+# define DEVICE_MEMSETCPY_ARM64 -+# define REGSZ "w" -+# define REGCL "w" -+# define BVCLB "r" -+# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16 -+# define DEVICE_MEMSETCPY_ARM64 -+# define REGSZ "x" -+# define REGCL "x" -+# define BVCLB "r" -+# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32 -+# if defined(__ARM_NEON_FP) -+# define DEVICE_MEMSETCPY_ARM64 -+# define REGSZ "q" -+# define REGCL "v" -+# define BVCLB "w" -+# endif -+# endif -+# if defined(DEVICE_MEMSETCPY_ARM64) -+# if defined(DEVICE_MEMSETCPY_ARM64_NON_TEMPORAL) -+# define NSHLD() __asm__ ("dmb nshld") -+# define NSHST() __asm__ ("dmb nshst") -+# define LDP "ldnp" -+# define STP "stnp" -+# else -+# define NSHLD() -+# define NSHST() -+# define LDP "ldp" -+# define STP "stp" -+# endif -+# if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) -+# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8 -+typedef uint32_t block_half_t; -+# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16 -+typedef uint64_t block_half_t; -+# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32 -+typedef uint128_t block_half_t; -+# endif -+# else -+ typedef unsigned int block_half_t -+ __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES / 2))); -+# endif -+# endif -+# endif -+#endif -+ -+__attribute__((visibility("hidden"))) -+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize) -+{ -+ volatile const char *pcSrc = pvSrc; -+ volatile char *pcDst = pvDst; -+ size_t uPreambleBytes; -+ int bBlockCopy = 0; -+ -+ size_t uSrcUnaligned = (size_t)pcSrc % sizeof(block_t); -+ size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t); -+ -+ if (!uSrcUnaligned && !uDstUnaligned) -+ { -+ /* Neither pointer is unaligned. Optimal case. */ -+ bBlockCopy = 1; -+ } -+ else -+ { -+ if (uSrcUnaligned == uDstUnaligned) -+ { -+ /* Neither pointer is usefully aligned, but they are misaligned in -+ * the same way, so we can copy a preamble in a slow way, then -+ * optimize the rest. -+ */ -+ uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize); -+ uSize -= uPreambleBytes; -+ while (uPreambleBytes) -+ { -+ *pcDst++ = *pcSrc++; -+ uPreambleBytes--; -+ } -+ -+ bBlockCopy = 1; -+ } -+ else if ((uSrcUnaligned | uDstUnaligned) % sizeof(int) == 0) -+ { -+ /* Both pointers are at least 32-bit aligned, and we assume that -+ * the processor must handle all kinds of 32-bit load-stores. -+ * NOTE: Could we optimize this with a non-temporal version? -+ */ -+ if (uSize >= sizeof(int)) -+ { -+ volatile int *piSrc = (int *)((void *)pcSrc); -+ volatile int *piDst = (int *)((void *)pcDst); -+ -+ while (uSize >= sizeof(int)) -+ { -+ *piDst++ = *piSrc++; -+ uSize -= sizeof(int); -+ } -+ -+ pcSrc = (char *)((void *)piSrc); -+ pcDst = (char *)((void *)piDst); -+ } -+ } -+ } -+ -+ if (bBlockCopy && uSize >= sizeof(block_t)) -+ { -+ volatile block_t *pSrc = (block_t *)((void *)pcSrc); -+ volatile block_t *pDst = (block_t *)((void *)pcDst); -+ -+#if defined(DEVICE_MEMSETCPY_ARM64) -+ NSHLD(); -+#endif -+ -+ while (uSize >= sizeof(block_t)) -+ { -+#if defined(DEVICE_MEMSETCPY_ARM64) -+ __asm__ (LDP " " REGSZ "0, " REGSZ "1, [%[pSrc]]\n\t" -+ STP " " REGSZ "0, " REGSZ "1, [%[pDst]]" -+ : -+ : [pSrc] "r" (pSrc), [pDst] "r" (pDst) -+ : "memory", REGCL "0", REGCL "1"); -+#else -+ *pDst = *pSrc; -+#endif -+ pDst++; -+ pSrc++; -+ uSize -= sizeof(block_t); -+ } -+ -+#if defined(DEVICE_MEMSETCPY_ARM64) -+ NSHST(); -+#endif -+ -+ pcSrc = (char *)((void *)pSrc); -+ pcDst = (char *)((void *)pDst); -+ } -+ -+ while (uSize) -+ { -+ *pcDst++ = *pcSrc++; -+ uSize--; -+ } -+} -+ -+__attribute__((visibility("hidden"))) -+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize) -+{ -+ volatile char *pcDst = pvDst; -+ size_t uPreambleBytes; -+ -+ size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t); -+ -+ if (uDstUnaligned) -+ { -+ uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize); -+ uSize -= uPreambleBytes; -+ while (uPreambleBytes) -+ { -+ *pcDst++ = ui8Value; -+ uPreambleBytes--; -+ } -+ } -+ -+ if (uSize >= sizeof(block_t)) -+ { -+ volatile block_t *pDst = (block_t *)((void *)pcDst); -+ size_t i, uBlockSize; -+#if defined(DEVICE_MEMSETCPY_ARM64) -+ typedef block_half_t BLK_t; -+#else -+ typedef block_t BLK_t; -+#endif /* defined(DEVICE_MEMSETCPY_ARM64) */ -+ -+#if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) -+ BLK_t bValue = 0; -+ -+ uBlockSize = sizeof(BLK_t) / sizeof(ui8Value); -+ -+ for (i = 0; i < uBlockSize; i++) -+ { -+ bValue |= (BLK_t)ui8Value << ((uBlockSize - i - 1) * BITS_PER_BYTE); -+ } -+#else -+ BLK_t bValue = {0}; -+ -+ uBlockSize = sizeof(bValue) / sizeof(unsigned int); -+ for (i = 0; i < uBlockSize; i++) -+ bValue[i] = ui8Value << 24U | -+ ui8Value << 16U | -+ ui8Value << 8U | -+ ui8Value; -+#endif /* defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) */ -+ -+#if defined(DEVICE_MEMSETCPY_ARM64) -+ NSHLD(); -+#endif -+ -+ while (uSize >= sizeof(block_t)) -+ { -+#if defined(DEVICE_MEMSETCPY_ARM64) -+ __asm__ (STP " %" REGSZ "[bValue], %" REGSZ "[bValue], [%[pDst]]" -+ : -+ : [bValue] BVCLB (bValue), [pDst] "r" (pDst) -+ : "memory"); -+#else -+ *pDst = bValue; -+#endif -+ pDst++; -+ uSize -= sizeof(block_t); -+ } -+ -+#if defined(DEVICE_MEMSETCPY_ARM64) -+ NSHST(); -+#endif -+ -+ pcDst = (char *)((void *)pDst); -+ } -+ -+ while (uSize) -+ { -+ *pcDst++ = ui8Value; -+ uSize--; -+ } -+} -+ -+#endif /* defined(__GNUC__) */ -+ -+/* Potentially very slow (but safe) fallbacks for non-GNU C compilers */ -+IMG_INTERNAL -+void DeviceMemCopyBytes(void *pvDst, const void *pvSrc, size_t uSize) -+{ -+ volatile const char *pcSrc = pvSrc; -+ volatile char *pcDst = pvDst; -+ -+ while (uSize) -+ { -+ *pcDst++ = *pcSrc++; -+ uSize--; -+ } -+} -+ -+IMG_INTERNAL -+void DeviceMemSetBytes(void *pvDst, unsigned char ui8Value, size_t uSize) -+{ -+ volatile char *pcDst = pvDst; -+ -+ while (uSize) -+ { -+ *pcDst++ = ui8Value; -+ uSize--; -+ } -+} -+ -+#if !defined(__QNXNTO__) /* Ignore Neutrino as it uses strlcpy */ -+ -+#if defined(__KERNEL__) && defined(__linux__) -+/* -+ * In case of Linux kernel-mode in a debug build, choose the variant -+ * of StringLCopy that uses strlcpy and logs truncation via a stack dump. -+ * For Linux kernel-mode in a release build, strlcpy alone is used. -+ */ -+#if defined(DEBUG) -+IMG_INTERNAL -+size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) -+{ -+ /* -+ * Let strlcpy handle any truncation cases correctly. -+ * We will definitely get a NUL-terminated string set in pszDest -+ */ -+ size_t uSrcSize = strlcpy(pszDest, pszSrc, uDataSize); -+ -+#if defined(PVR_DEBUG_STRLCPY) -+ /* Handle truncation by dumping calling stack if debug allows */ -+ if (uSrcSize >= uDataSize) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'", -+ __func__, pszSrc, (long)uDataSize, pszDest)); -+ OSDumpStack(); -+ } -+#endif /* defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) */ -+ -+ return uSrcSize; -+} -+#endif /* defined(DEBUG) */ -+ -+#else /* defined(__KERNEL__) && defined(__linux__) */ -+/* -+ * For every other platform, make use of the strnlen and strncpy -+ * implementation of StringLCopy. -+ * NOTE: It is crucial to avoid memcpy as this has a hidden side-effect of -+ * dragging in whatever the build-environment flavour of GLIBC is which can -+ * cause unexpected failures for host-side command execution. -+ */ -+IMG_INTERNAL -+size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) -+{ -+ size_t uSrcSize = strnlen(pszSrc, uDataSize); -+ -+ (void)strncpy(pszDest, pszSrc, uSrcSize); -+ if (uSrcSize == uDataSize) -+ { -+ pszDest[uSrcSize-1] = '\0'; -+ } -+ else -+ { -+ pszDest[uSrcSize] = '\0'; -+ } -+ -+ return uSrcSize; -+} -+ -+#endif /* defined(__KERNEL__) && defined(__linux__) */ -+ -+#endif /* !defined(__QNXNTO__) */ -diff --git a/drivers/gpu/drm/img-rogue/mmu_common.c b/drivers/gpu/drm/img-rogue/mmu_common.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/mmu_common.c -@@ -0,0 +1,4800 @@ -+/*************************************************************************/ /*! -+@File -+@Title Common MMU Management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements basic low level control of MMU. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "pvr_debug.h" -+#include "dllist.h" -+#include "osfunc.h" -+#include "allocmem.h" -+ -+#include "pvr_notifier.h" -+#include "pvrsrv.h" -+#include "htbserver.h" -+#include "pvr_ricommon.h" -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+# include "process_stats.h" -+# include "proc_stats.h" -+#endif -+ -+#if defined(PDUMP) -+#include "pdump_km.h" -+#include "pdump_physmem.h" -+#endif -+ -+#include "physmem.h" -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+#include "physmem_lma.h" -+#endif -+ -+/* -+Major Interfaces to other modules: -+ -+Let's keep this graph up-to-date: -+ -+ +-----------+ -+ | devicemem | -+ +-----------+ -+ | -+ +============+ -+ | mmu_common | -+ +============+ -+ | -+ +-----------------+ -+ | | -+ +---------+ +----------+ -+ | pmr | | device | -+ +---------+ +----------+ -+ */ -+ -+#include "mmu_common.h" -+#include "pmr.h" -+#include "devicemem_server_utils.h" -+ -+/* #define MMU_OBJECT_REFCOUNT_DEBUGING 1 */ -+#if defined(MMU_OBJECT_REFCOUNT_DEBUGING) -+#define MMU_OBJ_DBG(x) PVR_DPF(x) -+#else -+#define MMU_OBJ_DBG(x) -+#endif -+ -+#define SCRATCH_PAGE "SCRATCH_PAGE" -+#define DEV_ZERO_PAGE "DEV_ZERO_PAGE" -+#define PVR_SCRATCH_PAGE_INIT_VALUE 0 -+#define PVR_ZERO_PAGE_INIT_VALUE 0 -+ -+/*! -+ * Refcounted structure that is shared between the context and -+ * the cleanup thread items. -+ * It is used to keep track of all cleanup items and whether the creating -+ * MMU context has been destroyed and therefore is not allowed to be -+ * accessed any more. -+ * -+ * The cleanup thread is used to defer the freeing of the page tables -+ * because we have to make sure that the MMU cache has been invalidated. -+ * If we don't take care of this the MMU might partially access cached -+ * and uncached tables which might lead to inconsistencies and in the -+ * worst case to MMU pending faults on random memory. -+ */ -+typedef struct _MMU_CTX_CLEANUP_DATA_ -+{ -+ /*! Refcount to know when this structure can be destroyed */ -+ ATOMIC_T iRef; -+ /*! Protect items in this structure, especially the refcount */ -+ POS_LOCK hCleanupLock; -+ /*! List of all cleanup items currently in flight */ -+ DLLIST_NODE sMMUCtxCleanupItemsHead; -+ /*! Was the MMU context destroyed and should not be accessed any more? */ -+ IMG_BOOL bMMUContextExists; -+#if defined(SUPPORT_CUSTOM_OSID_EMISSION) -+ /*! Associated OSid for this context */ -+ IMG_UINT32 ui32OSid; -+#endif /* defined(SUPPORT_CUSTOM_OSID_EMISSION) */ -+} MMU_CTX_CLEANUP_DATA; -+ -+ -+/*! -+ * Structure holding one or more page tables that need to be -+ * freed after the MMU cache has been flushed which is signalled when -+ * the stored sync has a value that is <= the required value. -+ */ -+typedef struct _MMU_CLEANUP_ITEM_ -+{ -+ /*! Cleanup thread data */ -+ PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn; -+ /*! List to hold all the MMU_MEMORY_MAPPINGs, i.e. page tables */ -+ DLLIST_NODE sMMUMappingHead; -+ /*! Node of the cleanup item list for the context */ -+ DLLIST_NODE sMMUCtxCleanupItem; -+ /* Pointer to the cleanup meta data */ -+ MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData; -+ /* Sync to query if the MMU cache was flushed */ -+ PVRSRV_CLIENT_SYNC_PRIM *psSync; -+ /*! The update value of the sync to signal that the cache was flushed */ -+ IMG_UINT32 uiRequiredSyncVal; -+ /*! The update value of the power off counter */ -+ IMG_UINT32 uiRequiredPowerOffCounter; -+ /*! The device node needed to free the page tables */ -+ PVRSRV_DEVICE_NODE *psDevNode; -+} MMU_CLEANUP_ITEM; -+ -+/*! -+ All physical allocations and frees are relative to this context, so -+ we would get all the allocations of PCs, PDs, and PTs from the same -+ RA. -+ -+ We have one per MMU context in case we have mixed UMA/LMA devices -+ within the same system. -+ */ -+typedef struct _MMU_PHYSMEM_CONTEXT_ -+{ -+ /*! Associated MMU_CONTEXT */ -+ struct _MMU_CONTEXT_ *psMMUContext; -+ -+ /*! Parent device node */ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ -+ /*! Refcount so we know when to free up the arena */ -+ IMG_UINT32 uiNumAllocations; -+ -+ /*! Arena from which physical memory is derived */ -+ RA_ARENA *psPhysMemRA; -+ /*! Arena name */ -+ IMG_CHAR *pszPhysMemRAName; -+ /*! Size of arena name string */ -+ size_t uiPhysMemRANameAllocSize; -+ -+ /*! Meta data for deferred cleanup */ -+ MMU_CTX_CLEANUP_DATA *psCleanupData; -+ /*! Temporary list of all deferred MMU_MEMORY_MAPPINGs. */ -+ DLLIST_NODE sTmpMMUMappingHead; -+ -+#if defined(SUPPORT_CUSTOM_OSID_EMISSION) -+ IMG_UINT32 ui32OSid; -+ IMG_UINT32 ui32OSidReg; -+ IMG_BOOL bOSidAxiProt; -+#endif -+ -+} MMU_PHYSMEM_CONTEXT; -+ -+/*! -+ Mapping structure for MMU memory allocation -+ */ -+typedef struct _MMU_MEMORY_MAPPING_ -+{ -+ /*! Physmem context to allocate from */ -+ MMU_PHYSMEM_CONTEXT *psContext; -+ /*! OS/system Handle for this allocation */ -+ PG_HANDLE sMemHandle; -+ /*! CPU virtual address of this allocation */ -+ void *pvCpuVAddr; -+ /*! Device physical address of this allocation */ -+ IMG_DEV_PHYADDR sDevPAddr; -+ /*! Size of this allocation */ -+ size_t uiSize; -+ /*! Number of current mappings of this allocation */ -+ IMG_UINT32 uiCpuVAddrRefCount; -+ /*! Node for the defer free list */ -+ DLLIST_NODE sMMUMappingItem; -+} MMU_MEMORY_MAPPING; -+ -+/*! -+ Memory descriptor for MMU objects. There can be more than one memory -+ descriptor per MMU memory allocation. -+ */ -+typedef struct _MMU_MEMORY_DESC_ -+{ -+ /* NB: bValid is set if this descriptor describes physical -+ memory. This allows "empty" descriptors to exist, such that we -+ can allocate them in batches. */ -+ /*! Does this MMU object have physical backing */ -+ IMG_BOOL bValid; -+ /*! Device Physical address of physical backing */ -+ IMG_DEV_PHYADDR sDevPAddr; -+ /*! CPU virtual address of physical backing */ -+ void *pvCpuVAddr; -+ /*! Mapping data for this MMU object */ -+ MMU_MEMORY_MAPPING *psMapping; -+ /*! Memdesc offset into the psMapping */ -+ IMG_UINT32 uiOffset; -+ /*! Size of the Memdesc */ -+ IMG_UINT32 uiSize; -+} MMU_MEMORY_DESC; -+ -+/*! -+ MMU levelx structure. This is generic and is used -+ for all levels (PC, PD, PT). -+ */ -+typedef struct _MMU_Levelx_INFO_ -+{ -+ /*! The Number of entries in this level */ -+ IMG_UINT32 ui32NumOfEntries; -+ -+ /*! Number of times this level has been reference. Note: For Level1 (PTE) -+ we still take/drop the reference when setting up the page tables rather -+ then at map/unmap time as this simplifies things */ -+ IMG_UINT32 ui32RefCount; -+ -+ /*! MemDesc for this level */ -+ MMU_MEMORY_DESC sMemDesc; -+ -+ /*! Array of infos for the next level. Must be last member in structure */ -+ struct _MMU_Levelx_INFO_ *apsNextLevel[1]; -+} MMU_Levelx_INFO; -+ -+/*! -+ MMU context structure -+ */ -+struct _MMU_CONTEXT_ -+{ -+ /*! Originating Connection */ -+ CONNECTION_DATA *psConnection; -+ -+ /*! Device MMU attribute descriptions */ -+ MMU_DEVICEATTRIBS *psDevAttrs; -+ -+ /*! For allocation and deallocation of the physical memory where -+ the pagetables live */ -+ struct _MMU_PHYSMEM_CONTEXT_ *psPhysMemCtx; -+ -+#if defined(PDUMP) -+ /*! PDump context ID (required for PDump commands with virtual addresses) */ -+ IMG_UINT32 uiPDumpContextID; -+ -+ /*! The refcount of the PDump context ID */ -+ IMG_UINT32 ui32PDumpContextIDRefCount; -+#endif -+ -+ /*! MMU cache invalidation flags. For Rogue, only one set of flags are -+ * maintained (in the KernelMMUCtx) and used for all context page table -+ * updates, while in Volcanic each context maintains its own flags -+ * for the PTs it contains. This is due to the different memory hierarchy -+ * designs in use between the architectures, See SLC_VIVT feature. */ -+ ATOMIC_T sCacheFlags; -+ -+ /*! Lock to ensure exclusive access when manipulating the MMU context or -+ * reading and using its content -+ */ -+ POS_LOCK hLock; -+ -+ /*! Base level info structure. Must be last member in structure */ -+ MMU_Levelx_INFO sBaseLevelInfo; -+ /* NO OTHER MEMBERS AFTER THIS STRUCTURE ! */ -+}; -+ -+/* -+ * Only the kernel created (direct bridge) firmware memory context will -+ * have a NULL connection and all other application memory context get a -+ * valid connection object. -+ */ -+#define _MMU_IS_FWKM_CTX(_ctx) ((_ctx)->psConnection == NULL) -+ -+/* -+ * Used to determine if the MMU Ctx provided is the FWKM memory context -+ * and if it belongs to the VZ Guest. -+ */ -+#define _MMU_IS_FWKM_CTX_VZGUEST(_ctx) (PVRSRV_VZ_MODE_IS(GUEST) && _MMU_IS_FWKM_CTX(_ctx)) -+ -+static const IMG_DEV_PHYADDR gsBadDevPhyAddr = {MMU_BAD_PHYS_ADDR}; -+ -+#if defined(DEBUG) -+#include "log2.h" -+#endif -+ -+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) -+static IMG_UINT32 g_ui32MMULeakCounter = 0; -+static DEFINE_MUTEX(g_sMMULeakMutex); -+#endif -+ -+/***************************************************************************** -+ * Utility functions * -+ *****************************************************************************/ -+ -+/*************************************************************************/ /*! -+@Function _FreeMMUMapping -+ -+@Description Free a given dllist of MMU_MEMORY_MAPPINGs and the page tables -+ they represent. -+ -+@Input psDevNode Device node -+ -+@Input psTmpMMUMappingHead List of MMU_MEMORY_MAPPINGs to free -+ */ -+/*****************************************************************************/ -+static void -+_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode, -+ PDLLIST_NODE psTmpMMUMappingHead) -+{ -+ PDLLIST_NODE psNode, psNextNode; -+ -+ /* Free the current list unconditionally */ -+ dllist_foreach_node(psTmpMMUMappingHead, -+ psNode, -+ psNextNode) -+ { -+ MMU_MEMORY_MAPPING *psMapping = IMG_CONTAINER_OF(psNode, -+ MMU_MEMORY_MAPPING, -+ sMMUMappingItem); -+ -+ PhysHeapPagesFree(psDevNode->psMMUPhysHeap, &psMapping->sMemHandle); -+ dllist_remove_node(psNode); -+ OSFreeMem(psMapping); -+ } -+} -+ -+#if defined(SUPPORT_MMU_DEFERRED_FREE) -+/*************************************************************************/ /*! -+@Function _CleanupThread_FreeMMUMapping -+ -+@Description Function to be executed by the cleanup thread to free -+ MMU_MEMORY_MAPPINGs after the MMU cache has been invalidated. -+ -+ This function will request a MMU cache invalidate once and -+ retry to free the MMU_MEMORY_MAPPINGs until the invalidate -+ has been executed. -+ -+ If the memory context that created this cleanup item has been -+ destroyed in the meantime this function will directly free the -+ MMU_MEMORY_MAPPINGs without waiting for any MMU cache -+ invalidation. -+ -+@Input pvData Cleanup data in form of a MMU_CLEANUP_ITEM -+ -+@Return PVRSRV_OK if successful otherwise PVRSRV_ERROR_RETRY -+ */ -+/*****************************************************************************/ -+static PVRSRV_ERROR -+_CleanupThread_FreeMMUMapping(void* pvData) -+{ -+ PVRSRV_ERROR eError; -+ MMU_CLEANUP_ITEM *psCleanup = (MMU_CLEANUP_ITEM *)pvData; -+ MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData = psCleanup->psMMUCtxCleanupData; -+ PVRSRV_DEVICE_NODE *psDevNode = psCleanup->psDevNode; -+ IMG_BOOL bFreeNow; -+ -+ OSLockAcquire(psMMUCtxCleanupData->hCleanupLock); -+ -+ /* Don't attempt to free anything when the context has been destroyed. -+ * Especially don't access any device specific structures any more!*/ -+ if (!psMMUCtxCleanupData->bMMUContextExists) -+ { -+ OSFreeMem(psCleanup); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_OK, e0); -+ } -+ -+ if (psCleanup->psSync == NULL) -+ { -+ /* Kick to invalidate the MMU caches and get sync info */ -+ eError = psDevNode->pfnMMUCacheInvalidateKick(psDevNode, -+ &psCleanup->uiRequiredSyncVal); -+ if (eError != PVRSRV_OK) -+ { -+ OSLockRelease(psMMUCtxCleanupData->hCleanupLock); -+ return PVRSRV_ERROR_RETRY; -+ } -+ psCleanup->psSync = psDevNode->psMMUCacheSyncPrim; -+ /* If we have no sync reference we treat this as a simple FREE. -+ * We cannot retry as there will never be a sync-prim to kick / invalidate. -+ */ -+ if (psCleanup->psSync == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Device %u has no MMUCacheSyncPrim", -+ __func__, psDevNode->sDevId.ui32InternalID)); -+ bFreeNow = IMG_TRUE; -+ goto freeNow; -+ } -+ } -+ -+ /* Has the invalidate executed (sync is updated when the Firmware performs -+ * the invalidate)? */ -+ bFreeNow = PVRSRVHasCounter32Advanced(OSReadDeviceMem32(psCleanup->psSync->pui32LinAddr), -+ psCleanup->uiRequiredSyncVal); -+ -+ /* Has there been a power off? */ -+ bFreeNow = bFreeNow || PVRSRVHasCounter32Advanced(psDevNode->uiPowerOffCounter, -+ psCleanup->uiRequiredPowerOffCounter); -+ -+#if defined(NO_HARDWARE) -+ /* In NOHW the syncs will never be updated so just free the tables */ -+ bFreeNow = IMG_TRUE; -+#endif -+ /* If the Invalidate operation is not completed, check if the operation timed out */ -+ if (!bFreeNow) -+ { -+ IMG_UINT32 uiTimeStart = psCleanup->sCleanupThreadFn.ui32TimeStart; -+ IMG_UINT32 uiTimeEnd = psCleanup->sCleanupThreadFn.ui32TimeEnd; -+ -+ /* If the time left for the completion of invalidate operation is -+ * within 500ms of time-out, consider the operation as timed out */ -+ if ((uiTimeEnd - uiTimeStart - 500) <= (OSClockms() - uiTimeStart)) -+ { -+ /* Consider the operation is timed out */ -+ bFreeNow = IMG_TRUE; -+ } -+ } -+ -+freeNow: -+ /* Free if the invalidate operation completed or the operation itself timed out */ -+ if (bFreeNow) -+ { -+ _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead); -+ -+ dllist_remove_node(&psCleanup->sMMUCtxCleanupItem); -+ -+ OSFreeMem(psCleanup); -+ -+ eError = PVRSRV_OK; -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_RETRY; -+ } -+ -+e0: -+ -+ /* If this cleanup task has been successfully executed we can -+ * decrease the context cleanup data refcount. Successfully -+ * means here that the MMU_MEMORY_MAPPINGs have been freed by -+ * either this cleanup task of when the MMU context has been -+ * destroyed. */ -+ if (eError == PVRSRV_OK) -+ { -+ OSLockRelease(psMMUCtxCleanupData->hCleanupLock); -+ -+ if (OSAtomicDecrement(&psMMUCtxCleanupData->iRef) == 0) -+ { -+ OSLockDestroy(psMMUCtxCleanupData->hCleanupLock); -+ OSFreeMem(psMMUCtxCleanupData); -+ } -+ } -+ else -+ { -+ OSLockRelease(psMMUCtxCleanupData->hCleanupLock); -+ } -+ -+ -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function _SetupCleanup_FreeMMUMapping -+ -+@Description Setup a cleanup item for the cleanup thread that will -+ kick off a MMU invalidate request and free the associated -+ MMU_MEMORY_MAPPINGs when the invalidate was successful. -+ -+@Input psPhysMemCtx The current MMU physmem context -+ */ -+/*****************************************************************************/ -+static void -+_SetupCleanup_FreeMMUMapping(MMU_PHYSMEM_CONTEXT *psPhysMemCtx) -+{ -+ -+ MMU_CLEANUP_ITEM *psCleanupItem; -+ MMU_CTX_CLEANUP_DATA *psCleanupData = psPhysMemCtx->psCleanupData; -+ PVRSRV_DEVICE_NODE *psDevNode = psPhysMemCtx->psDevNode; -+ -+ if (dllist_is_empty(&psPhysMemCtx->sTmpMMUMappingHead)) -+ { -+ goto e0; -+ } -+ -+#if defined(PDUMP) -+ /* Free the page tables immediately in case of pdump, which avoids -+ * changing script files due to the additional invalidation kick */ -+ goto e1; -+#endif -+ -+ /* Don't defer the freeing if we are currently unloading the driver -+ * or if the sync has been destroyed */ -+ if (PVRSRVGetPVRSRVData()->bUnload || -+ psDevNode->psMMUCacheSyncPrim == NULL) -+ { -+ goto e1; -+ } -+ -+ /* Allocate a cleanup item */ -+ psCleanupItem = OSAllocMem(sizeof(*psCleanupItem)); -+ if (!psCleanupItem) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to get memory for deferred page table cleanup. " -+ "Freeing tables immediately", -+ __func__)); -+ goto e1; -+ } -+ -+ /* Set sync to NULL to indicate we did not interact with -+ * the FW yet. Kicking off an MMU cache invalidate should -+ * be done in the cleanup thread to not waste time here. */ -+ psCleanupItem->psSync = NULL; -+ psCleanupItem->uiRequiredSyncVal = 0; -+ psCleanupItem->uiRequiredPowerOffCounter = psDevNode->uiPowerOffCounterNext; -+ psCleanupItem->psDevNode = psDevNode; -+ psCleanupItem->psMMUCtxCleanupData = psCleanupData; -+ -+ OSAtomicIncrement(&psCleanupData->iRef); -+ -+ /* Move the page tables to free to the cleanup item */ -+ dllist_replace_head(&psPhysMemCtx->sTmpMMUMappingHead, -+ &psCleanupItem->sMMUMappingHead); -+ -+ /* Add the cleanup item itself to the context list */ -+ dllist_add_to_tail(&psCleanupData->sMMUCtxCleanupItemsHead, -+ &psCleanupItem->sMMUCtxCleanupItem); -+ -+ /* Setup the cleanup thread data and add the work item */ -+ psCleanupItem->sCleanupThreadFn.pfnFree = _CleanupThread_FreeMMUMapping; -+ psCleanupItem->sCleanupThreadFn.pvData = psCleanupItem; -+ psCleanupItem->sCleanupThreadFn.bDependsOnHW = IMG_TRUE; -+ psCleanupItem->sCleanupThreadFn.eCleanupType = PVRSRV_CLEANUP_TYPE_MMU; -+ CLEANUP_THREAD_SET_RETRY_TIMEOUT(&psCleanupItem->sCleanupThreadFn, -+ CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); -+ -+ PVRSRVCleanupThreadAddWork(psDevNode, &psCleanupItem->sCleanupThreadFn); -+ -+ return; -+ -+e1: -+ /* Free the page tables now */ -+ _FreeMMUMapping(psDevNode, &psPhysMemCtx->sTmpMMUMappingHead); -+e0: -+ return; -+} -+#endif -+ -+/*************************************************************************/ /*! -+@Function _CalcPCEIdx -+ -+@Description Calculate the page catalogue index -+ -+@Input sDevVAddr Device virtual address -+ -+@Input psDevVAddrConfig Configuration of the virtual address -+ -+@Input bRoundUp Round up the index -+ -+@Return The page catalogue index -+ */ -+/*****************************************************************************/ -+static IMG_UINT32 _CalcPCEIdx(IMG_DEV_VIRTADDR sDevVAddr, -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, -+ IMG_BOOL bRoundUp) -+{ -+ IMG_DEV_VIRTADDR sTmpDevVAddr; -+ IMG_UINT32 ui32RetVal; -+ -+ sTmpDevVAddr = sDevVAddr; -+ -+ if (bRoundUp) -+ { -+ sTmpDevVAddr.uiAddr--; -+ } -+ ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPCIndexMask) -+ >> psDevVAddrConfig->uiPCIndexShift); -+ -+ if (bRoundUp) -+ { -+ ui32RetVal++; -+ } -+ -+ return ui32RetVal; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function _CalcPDEIdx -+ -+@Description Calculate the page directory index -+ -+@Input sDevVAddr Device virtual address -+ -+@Input psDevVAddrConfig Configuration of the virtual address -+ -+@Input bRoundUp Round up the index -+ -+@Return The page directory index -+ */ -+/*****************************************************************************/ -+static IMG_UINT32 _CalcPDEIdx(IMG_DEV_VIRTADDR sDevVAddr, -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, -+ IMG_BOOL bRoundUp) -+{ -+ IMG_DEV_VIRTADDR sTmpDevVAddr; -+ IMG_UINT32 ui32RetVal; -+ -+ sTmpDevVAddr = sDevVAddr; -+ -+ if (bRoundUp) -+ { -+ sTmpDevVAddr.uiAddr--; -+ } -+ ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPDIndexMask) -+ >> psDevVAddrConfig->uiPDIndexShift); -+ -+ if (bRoundUp) -+ { -+ ui32RetVal++; -+ } -+ -+ return ui32RetVal; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function _CalcPTEIdx -+ -+@Description Calculate the page entry index -+ -+@Input sDevVAddr Device virtual address -+ -+@Input psDevVAddrConfig Configuration of the virtual address -+ -+@Input bRoundUp Round up the index -+ -+@Return The page entry index -+ */ -+/*****************************************************************************/ -+static IMG_UINT32 _CalcPTEIdx(IMG_DEV_VIRTADDR sDevVAddr, -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, -+ IMG_BOOL bRoundUp) -+{ -+ IMG_DEV_VIRTADDR sTmpDevVAddr; -+ IMG_UINT32 ui32RetVal; -+ -+ sTmpDevVAddr = sDevVAddr; -+ sTmpDevVAddr.uiAddr -= psDevVAddrConfig->uiOffsetInBytes; -+ if (bRoundUp) -+ { -+ sTmpDevVAddr.uiAddr--; -+ } -+ ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPTIndexMask) -+ >> psDevVAddrConfig->uiPTIndexShift); -+ -+ if (bRoundUp) -+ { -+ ui32RetVal++; -+ } -+ -+ return ui32RetVal; -+} -+ -+ -+/***************************************************************************** -+ * MMU memory allocation/management functions (mem desc) * -+ *****************************************************************************/ -+ -+/*************************************************************************/ /*! -+@Function _MMU_PhysMem_RAImportAlloc -+ -+@Description Imports MMU Px memory into the RA. This is where the -+ actual allocation of physical memory happens. -+ -+@Input hArenaHandle Handle that was passed in during the -+ creation of the RA -+ -+@Input uiSize Size of the memory to import -+ -+@Input uiFlags Flags that where passed in the allocation. -+ -+@Input uBaseAlignment Alignment for the base returned, not used -+ in this context. -+ -+@Output puiBase The address of where to insert this import -+ -+@Output puiActualSize The actual size of the import -+ -+@Output phPriv Handle which will be passed back when -+ this import is freed -+ -+@Return PVRSRV_OK if import alloc was successful -+ */ -+/*****************************************************************************/ -+static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle, -+ RA_LENGTH_T uiSize, -+ RA_FLAGS_T uiFlags, -+ RA_LENGTH_T uBaseAlignment, -+ const IMG_CHAR *pszAnnotation, -+ RA_BASE_T *puiBase, -+ RA_LENGTH_T *puiActualSize, -+ RA_PERISPAN_HANDLE *phPriv) -+{ -+ MMU_PHYSMEM_CONTEXT *psPhysMemCtx = (MMU_PHYSMEM_CONTEXT *)hArenaHandle; -+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psPhysMemCtx->psDevNode; -+ MMU_MEMORY_MAPPING *psMapping; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiPid = 0; -+ -+ PVR_UNREFERENCED_PARAMETER(pszAnnotation); -+ PVR_UNREFERENCED_PARAMETER(uiFlags); -+ PVR_UNREFERENCED_PARAMETER(uBaseAlignment); -+ -+ PVR_ASSERT(psDevNode != NULL); -+ PVR_GOTO_IF_INVALID_PARAM(psDevNode, eError, e0); -+ -+ psMapping = OSAllocMem(sizeof(MMU_MEMORY_MAPPING)); -+ PVR_GOTO_IF_NOMEM(psMapping, eError, e0); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ uiPid = psDevNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE ? -+ PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM(); -+#endif -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ /* -+ * Store the OSid in the PG_HANDLE.uiOSid field for use by the -+ * pfnDevPxFree() routine. -+ */ -+ psMapping->sMemHandle.uiOSid = psPhysMemCtx->ui32OSid; -+ eError = PhysHeapPagesAllocGPV(psDevNode->psMMUPhysHeap, -+ TRUNCATE_64BITS_TO_SIZE_T(uiSize), -+ &psMapping->sMemHandle, -+ &psMapping->sDevPAddr, -+ psPhysMemCtx->ui32OSid, -+ uiPid); -+#else -+ eError = PhysHeapPagesAlloc(psDevNode->psMMUPhysHeap, -+ TRUNCATE_64BITS_TO_SIZE_T(uiSize), -+ &psMapping->sMemHandle, -+ &psMapping->sDevPAddr, -+ uiPid); -+#endif -+ if (eError != PVRSRV_OK) -+ { -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVRSRVStatsUpdateOOMStat(NULL, psDevNode, PVRSRV_DEVICE_STAT_TYPE_OOM_PHYSMEM_COUNT, -+ OSGetCurrentClientProcessIDKM()); -+#endif -+ goto e1; -+ } -+ -+ psMapping->psContext = psPhysMemCtx; -+ psMapping->uiSize = TRUNCATE_64BITS_TO_SIZE_T(uiSize); -+ -+ psMapping->uiCpuVAddrRefCount = 0; -+ -+ *phPriv = (RA_PERISPAN_HANDLE) psMapping; -+ -+ /* Note: This assumes this memory never gets paged out */ -+ *puiBase = (RA_BASE_T)psMapping->sDevPAddr.uiAddr; -+ *puiActualSize = uiSize; -+ -+ return PVRSRV_OK; -+ -+e1: -+ OSFreeMem(psMapping); -+e0: -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function _MMU_PhysMem_RAImportFree -+ -+@Description Imports MMU Px memory into the RA. This is where the -+ actual free of physical memory happens. -+ -+@Input hArenaHandle Handle that was passed in during the -+ creation of the RA -+ -+@Input puiBase The address of where to insert this import -+ -+@Output phPriv Private data that the import alloc provided -+ -+@Return None -+ */ -+/*****************************************************************************/ -+static void _MMU_PhysMem_RAImportFree(RA_PERARENA_HANDLE hArenaHandle, -+ RA_BASE_T uiBase, -+ RA_PERISPAN_HANDLE hPriv) -+{ -+ MMU_MEMORY_MAPPING *psMapping = (MMU_MEMORY_MAPPING *)hPriv; -+ MMU_PHYSMEM_CONTEXT *psPhysMemCtx = (MMU_PHYSMEM_CONTEXT *)hArenaHandle; -+ -+ PVR_UNREFERENCED_PARAMETER(uiBase); -+ -+ /* Check we have dropped all CPU mappings */ -+ PVR_ASSERT(psMapping->uiCpuVAddrRefCount == 0); -+ -+ /* Add mapping to defer free list */ -+ psMapping->psContext = NULL; -+ dllist_add_to_tail(&psPhysMemCtx->sTmpMMUMappingHead, &psMapping->sMMUMappingItem); -+} -+ -+/*************************************************************************/ /*! -+@Function _MMU_PhysMemAlloc -+ -+@Description Allocates physical memory for MMU objects -+ -+@Input psPhysMemCtx Physmem context to do the allocation from -+ -+@Output psMemDesc Allocation description -+ -+@Input uiBytes Size of the allocation in bytes -+ -+@Input uiAlignment Alignment requirement of this allocation -+ -+@Return PVRSRV_OK if allocation was successful -+ */ -+/*****************************************************************************/ -+ -+static PVRSRV_ERROR _MMU_PhysMemAlloc(MMU_PHYSMEM_CONTEXT *psPhysMemCtx, -+ MMU_MEMORY_DESC *psMemDesc, -+ size_t uiBytes, -+ size_t uiAlignment) -+{ -+ PVRSRV_ERROR eError; -+ RA_BASE_T uiPhysAddr; -+ -+ PVR_RETURN_IF_INVALID_PARAM(psMemDesc); -+ PVR_RETURN_IF_INVALID_PARAM(!psMemDesc->bValid); -+ -+ eError = RA_Alloc(psPhysMemCtx->psPhysMemRA, -+ uiBytes, -+ RA_NO_IMPORT_MULTIPLIER, -+ 0, /* flags */ -+ uiAlignment, -+ "", -+ &uiPhysAddr, -+ NULL, -+ (RA_PERISPAN_HANDLE *)&psMemDesc->psMapping); -+ -+ PVR_LOG_RETURN_IF_ERROR(eError, "RA_Alloc"); -+ -+ psMemDesc->bValid = IMG_TRUE; -+ psMemDesc->pvCpuVAddr = NULL; -+ psMemDesc->sDevPAddr.uiAddr = (IMG_UINT64) uiPhysAddr; -+ -+ if (psMemDesc->psMapping->uiCpuVAddrRefCount == 0) -+ { -+ eError = PhysHeapPagesMap(psPhysMemCtx->psDevNode->psMMUPhysHeap, -+ &psMemDesc->psMapping->sMemHandle, -+ psMemDesc->psMapping->uiSize, -+ &psMemDesc->psMapping->sDevPAddr, -+ &psMemDesc->psMapping->pvCpuVAddr); -+ if (eError != PVRSRV_OK) -+ { -+ RA_Free(psPhysMemCtx->psPhysMemRA, psMemDesc->sDevPAddr.uiAddr); -+ return eError; -+ } -+ } -+ -+ psMemDesc->psMapping->uiCpuVAddrRefCount++; -+ psMemDesc->uiOffset = (psMemDesc->sDevPAddr.uiAddr - psMemDesc->psMapping->sDevPAddr.uiAddr); -+ psMemDesc->pvCpuVAddr = (IMG_UINT8 *)psMemDesc->psMapping->pvCpuVAddr + psMemDesc->uiOffset; -+ psMemDesc->uiSize = uiBytes; -+ PVR_ASSERT(psMemDesc->pvCpuVAddr != NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function _MMU_PhysMemFree -+ -+@Description Allocates physical memory for MMU objects -+ -+@Input psPhysMemCtx Physmem context to do the free on -+ -+@Input psMemDesc Allocation description -+ -+@Return None -+ */ -+/*****************************************************************************/ -+static void _MMU_PhysMemFree(MMU_PHYSMEM_CONTEXT *psPhysMemCtx, -+ MMU_MEMORY_DESC *psMemDesc) -+{ -+ RA_BASE_T uiPhysAddr; -+ -+ PVR_ASSERT(psMemDesc->bValid); -+ -+ if (--psMemDesc->psMapping->uiCpuVAddrRefCount == 0) -+ { -+ PhysHeapPagesUnMap(psPhysMemCtx->psDevNode->psMMUPhysHeap, -+ &psMemDesc->psMapping->sMemHandle, -+ psMemDesc->psMapping->pvCpuVAddr); -+ } -+ -+ psMemDesc->pvCpuVAddr = NULL; -+ -+ uiPhysAddr = psMemDesc->sDevPAddr.uiAddr; -+ RA_Free(psPhysMemCtx->psPhysMemRA, uiPhysAddr); -+ -+ psMemDesc->bValid = IMG_FALSE; -+} -+ -+ -+/***************************************************************************** -+ * MMU object allocation/management functions * -+ *****************************************************************************/ -+ -+static INLINE PVRSRV_ERROR _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, -+ MMU_PROTFLAGS_T *puiMMUProtFlags, -+ MMU_CONTEXT *psMMUContext) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 uiGPUCacheMode; -+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; -+ -+ /* Do flag conversion between devmem flags and MMU generic flags */ -+ if (bInvalidate == IMG_TRUE) -+ { -+ *puiMMUProtFlags |= MMU_PROTFLAGS_INVALID; -+ return eError; /* OK */ -+ } -+ -+ /* Convert to valid flags for valid mapping */ -+ *puiMMUProtFlags |= ((uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) -+ >> PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) -+ << MMU_PROTFLAGS_DEVICE_OFFSET; -+ -+ if (PVRSRV_CHECK_GPU_READABLE(uiMappingFlags)) -+ { -+ *puiMMUProtFlags |= MMU_PROTFLAGS_READABLE; -+ } -+ if (PVRSRV_CHECK_GPU_WRITEABLE(uiMappingFlags)) -+ { -+ *puiMMUProtFlags |= MMU_PROTFLAGS_WRITEABLE; -+ } -+ -+ eError = DevmemDeviceCacheMode(psDevNode, uiMappingFlags, &uiGPUCacheMode); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ switch (uiGPUCacheMode) -+ { -+ case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED: -+ case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC: -+ break; -+ case PVRSRV_MEMALLOCFLAG_GPU_CACHED: -+ *puiMMUProtFlags |= MMU_PROTFLAGS_CACHED; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Wrong parameters", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (DevmemDeviceCacheCoherency(psDevNode, uiMappingFlags)) -+ { -+ *puiMMUProtFlags |= MMU_PROTFLAGS_CACHE_COHERENT; -+ } -+ -+ if (psDevNode->pfnMMUTweakProtFlags) -+ { -+ psDevNode->pfnMMUTweakProtFlags(psDevNode, psMMUContext->psDevAttrs, uiMappingFlags, puiMMUProtFlags); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function _PxMemAlloc -+ -+@Description Allocates physical memory for MMU objects, initialises -+ and PDumps it. -+ -+@Input psMMUContext MMU context -+ -+@Input uiNumEntries Number of entries to allocate -+ -+@Input psConfig MMU Px config -+ -+@Input eMMULevel MMU level that allocation is for -+ -+@Output psMemDesc Description of allocation -+ -+@Return PVRSRV_OK if allocation was successful -+ */ -+/*****************************************************************************/ -+static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext, -+ IMG_UINT32 uiNumEntries, -+ const MMU_PxE_CONFIG *psConfig, -+ MMU_LEVEL eMMULevel, -+ MMU_MEMORY_DESC *psMemDesc, -+ IMG_UINT32 uiLog2Align) -+{ -+ PVRSRV_ERROR eError; -+ size_t uiBytes; -+ size_t uiAlign; -+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; -+ -+ PVR_ASSERT(psConfig->uiBytesPerEntry != 0); -+ -+ uiBytes = uiNumEntries * psConfig->uiBytesPerEntry; -+ /* We need here the alignment of the previous level because that is the entry for we generate here */ -+ uiAlign = 1 << uiLog2Align; -+ -+ /* -+ * If the hardware specifies an alignment requirement for a page table then -+ * it also requires that all memory up to the next aligned address is -+ * zeroed. -+ * -+ * Failing to do this can result in uninitialised data outside of the actual -+ * page table range being read by the MMU and treated as valid, e.g. the -+ * pending flag. -+ * -+ * Typically this will affect 1MiB, 2MiB PT pages which have a size of 16 -+ * and 8 bytes respectively but an alignment requirement of 64 bytes each. -+ */ -+ uiBytes = PVR_ALIGN(uiBytes, uiAlign); -+ -+ /* allocate the object */ -+ eError = _MMU_PhysMemAlloc(psMMUContext->psPhysMemCtx, -+ psMemDesc, uiBytes, uiAlign); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_GOTO_WITH_ERROR("_MMU_PhysMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); -+ } -+ -+ /* -+ Clear the object -+ Note: if any MMUs are cleared with non-zero values then will need a -+ custom clear function -+ Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is -+ unlikely -+ */ -+ OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, uiBytes); -+ -+ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ &psMemDesc->psMapping->sMemHandle, -+ psMemDesc->uiOffset, -+ psMemDesc->uiSize); -+ PVR_GOTO_IF_ERROR(eError, e1); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevNode, "Alloc MMU object"); -+ -+ PDumpMMUMalloc(psDevNode, -+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, -+ eMMULevel, -+ &psMemDesc->sDevPAddr, -+ uiBytes, -+ uiAlign, -+ psMMUContext->psDevAttrs->eMMUType); -+ -+ PDumpMMUDumpPxEntries(psDevNode, -+ eMMULevel, -+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, -+ psMemDesc->pvCpuVAddr, -+ psMemDesc->sDevPAddr, -+ 0, -+ uiNumEntries, -+ NULL, NULL, 0, /* pdump symbolic info is irrelevant here */ -+ psConfig->uiBytesPerEntry, -+ uiLog2Align, -+ psConfig->uiAddrShift, -+ psConfig->uiAddrMask, -+ psConfig->uiProtMask, -+ psConfig->uiValidEnMask, -+ 0, -+ psMMUContext->psDevAttrs->eMMUType); -+#endif -+ -+ return PVRSRV_OK; -+e1: -+ _MMU_PhysMemFree(psMMUContext->psPhysMemCtx, -+ psMemDesc); -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function _PxMemFree -+ -+@Description Frees physical memory for MMU objects, de-initialises -+ and PDumps it. -+ -+@Input psMemDesc Description of allocation -+ -+@Return PVRSRV_OK if allocation was successful -+ */ -+/*****************************************************************************/ -+ -+static void _PxMemFree(MMU_CONTEXT *psMMUContext, -+ MMU_MEMORY_DESC *psMemDesc, MMU_LEVEL eMMULevel) -+{ -+#if defined(MMU_CLEARMEM_ON_FREE) -+ /* -+ Clear the MMU object -+ Note: if any MMUs are cleared with non-zero values then will need a -+ custom clear function -+ Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is -+ unlikely -+ */ -+ OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, psMemDesc->ui32Bytes); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psMMUContext->psPhysMemCtx->psDevNode, "Clear MMU object before freeing it"); -+#endif -+#endif/* MMU_CLEARMEM_ON_FREE */ -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psMMUContext->psPhysMemCtx->psDevNode, "Free MMU object"); -+ PDumpMMUFree(psMMUContext->psPhysMemCtx->psDevNode, -+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, -+ eMMULevel, -+ &psMemDesc->sDevPAddr, -+ psMMUContext->psDevAttrs->eMMUType); -+#else -+ PVR_UNREFERENCED_PARAMETER(eMMULevel); -+#endif -+ /* free the PC */ -+ _MMU_PhysMemFree(psMMUContext->psPhysMemCtx, psMemDesc); -+} -+ -+static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext, -+ MMU_Levelx_INFO *psLevel, -+ IMG_UINT32 uiIndex, -+ const MMU_PxE_CONFIG *psConfig, -+ const IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_BOOL bUnmap, -+#if defined(PDUMP) -+ const IMG_CHAR *pszMemspaceName, -+ const IMG_CHAR *pszSymbolicAddr, -+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset, -+#endif -+ IMG_UINT64 uiProtFlags) -+{ -+ MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc; -+ IMG_UINT64 ui64PxE64; -+ IMG_UINT64 uiAddr = psDevPAddr->uiAddr; -+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; -+ -+ if (psDevNode->pfnValidateOrTweakPhysAddrs) -+ { -+ PVRSRV_ERROR eErr = psDevNode->pfnValidateOrTweakPhysAddrs(psDevNode, -+ psMMUContext->psDevAttrs, -+ &uiAddr); -+ /* return if error */ -+ PVR_LOG_RETURN_IF_ERROR(eErr, "_SetupPTE"); -+ } -+ -+ /* Calculate Entry */ -+ ui64PxE64 = uiAddr /* Calculate the offset to that base */ -+ >> psConfig->uiAddrLog2Align /* Shift away the useless bits, because the alignment is very coarse and we address by alignment */ -+ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ -+ & psConfig->uiAddrMask; /* Delete unused bits */ -+ ui64PxE64 |= uiProtFlags; -+ -+ /* Set the entry */ -+ if (psConfig->uiBytesPerEntry == 8) -+ { -+ IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ -+ -+ pui64Px[uiIndex] = ui64PxE64; -+ } -+ else if (psConfig->uiBytesPerEntry == 4) -+ { -+ IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ -+ -+ /* assert that the result fits into 32 bits before writing -+ it into the 32-bit array with a cast */ -+ PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU)); -+ -+ pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64; -+ } -+ else -+ { -+ return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG; -+ } -+ -+ -+ /* Log modification */ -+ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, -+ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), -+ uiIndex, MMU_LEVEL_1, -+ HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64), -+ !bUnmap); -+ -+#if defined(PDUMP) -+ PDumpMMUDumpPxEntries(psDevNode, -+ MMU_LEVEL_1, -+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, -+ psMemDesc->pvCpuVAddr, -+ psMemDesc->sDevPAddr, -+ uiIndex, -+ 1, -+ pszMemspaceName, -+ pszSymbolicAddr, -+ uiSymbolicAddrOffset, -+ psConfig->uiBytesPerEntry, -+ psConfig->uiAddrLog2Align, -+ psConfig->uiAddrShift, -+ psConfig->uiAddrMask, -+ psConfig->uiProtMask, -+ psConfig->uiValidEnMask, -+ 0, -+ psMMUContext->psDevAttrs->eMMUType); -+#endif /*PDUMP*/ -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function _SetupPxE -+ -+@Description Sets up an entry of an MMU object to point to the -+ provided address -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input psLevel Level info for MMU object -+ -+@Input uiIndex Index into the MMU object to setup -+ -+@Input psConfig MMU Px config -+ -+@Input eMMULevel Level of MMU object -+ -+@Input psDevPAddr Address to setup the MMU object to point to -+ -+@Input pszMemspaceName Name of the PDump memory space that the entry -+ will point to -+ -+@Input pszSymbolicAddr PDump symbolic address that the entry will -+ point to -+ -+@Input uiProtFlags MMU protection flags -+ -+@Return PVRSRV_OK if the setup was successful -+ */ -+/*****************************************************************************/ -+static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext, -+ MMU_Levelx_INFO *psLevel, -+ IMG_UINT32 uiIndex, -+ const MMU_PxE_CONFIG *psConfig, -+ MMU_LEVEL eMMULevel, -+ const IMG_DEV_PHYADDR *psDevPAddr, -+#if defined(PDUMP) -+ const IMG_CHAR *pszMemspaceName, -+ const IMG_CHAR *pszSymbolicAddr, -+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset, -+#endif -+ MMU_PROTFLAGS_T uiProtFlags, -+ IMG_UINT32 uiLog2DataPageSize) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; -+ MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc; -+ -+ IMG_UINT32 (*pfnDerivePxEProt4)(IMG_UINT32); -+ IMG_UINT64 (*pfnDerivePxEProt8)(IMG_UINT32, IMG_UINT32); -+ -+ if (!psDevPAddr) -+ { -+ /* Invalidate entry */ -+ if (~uiProtFlags & MMU_PROTFLAGS_INVALID) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Error, no physical address specified, but not invalidating entry")); -+ uiProtFlags |= MMU_PROTFLAGS_INVALID; -+ } -+ psDevPAddr = &gsBadDevPhyAddr; -+ } -+ else -+ { -+ if (uiProtFlags & MMU_PROTFLAGS_INVALID) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "A physical address was specified when requesting invalidation of entry")); -+ uiProtFlags |= MMU_PROTFLAGS_INVALID; -+ } -+ } -+ -+ switch (eMMULevel) -+ { -+ case MMU_LEVEL_3: -+ pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePCEProt4; -+ pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePCEProt8; -+ break; -+ -+ case MMU_LEVEL_2: -+ pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePDEProt4; -+ pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePDEProt8; -+ break; -+ -+ case MMU_LEVEL_1: -+ pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePTEProt4; -+ pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePTEProt8; -+ break; -+ -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid MMU level", __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* How big is a PxE in bytes? */ -+ /* Filling the actual Px entry with an address */ -+ switch (psConfig->uiBytesPerEntry) -+ { -+ case 4: -+ { -+ IMG_UINT32 *pui32Px; -+ IMG_UINT64 ui64PxE64; -+ -+ pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ -+ -+ ui64PxE64 = psDevPAddr->uiAddr /* Calculate the offset to that base */ -+ >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */ -+ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ -+ & psConfig->uiAddrMask; /* Delete unused higher bits */ -+ -+ ui64PxE64 |= (IMG_UINT64)pfnDerivePxEProt4(uiProtFlags); -+ /* assert that the result fits into 32 bits before writing -+ it into the 32-bit array with a cast */ -+ PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU)); -+ -+ /* We should never invalidate an invalid page */ -+ if (uiProtFlags & MMU_PROTFLAGS_INVALID) -+ { -+ PVR_ASSERT(pui32Px[uiIndex] != ui64PxE64); -+ } -+ pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64; -+ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, -+ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), -+ uiIndex, eMMULevel, -+ HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64), -+ (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1); -+ break; -+ } -+ case 8: -+ { -+ IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ -+ IMG_UINT64 ui64PxE64; -+ -+ ui64PxE64 = psDevPAddr->uiAddr /* Calculate the offset to that base */ -+ >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */ -+ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ -+ & psConfig->uiAddrMask; /* Delete unused higher bits */ -+ ui64PxE64 |= pfnDerivePxEProt8(uiProtFlags, uiLog2DataPageSize); -+ -+ pui64Px[uiIndex] = ui64PxE64; -+ -+ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, -+ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), -+ uiIndex, eMMULevel, -+ HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64), -+ (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1); -+ break; -+ } -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: PxE size not supported (%d) for level %d", -+ __func__, psConfig->uiBytesPerEntry, eMMULevel)); -+ -+ return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG; -+ } -+ -+#if defined(PDUMP) -+ PDumpMMUDumpPxEntries(psDevNode, -+ eMMULevel, -+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, -+ psMemDesc->pvCpuVAddr, -+ psMemDesc->sDevPAddr, -+ uiIndex, -+ 1, -+ pszMemspaceName, -+ pszSymbolicAddr, -+ uiSymbolicAddrOffset, -+ psConfig->uiBytesPerEntry, -+ psConfig->uiAddrLog2Align, -+ psConfig->uiAddrShift, -+ psConfig->uiAddrMask, -+ psConfig->uiProtMask, -+ psConfig->uiValidEnMask, -+ 0, -+ psMMUContext->psDevAttrs->eMMUType); -+#endif -+ -+ psDevNode->pfnMMUCacheInvalidate(psDevNode, psMMUContext, -+ eMMULevel, -+ uiProtFlags & MMU_PROTFLAGS_INVALID); -+ -+ return PVRSRV_OK; -+} -+ -+/***************************************************************************** -+ * MMU host control functions (Level Info) * -+ *****************************************************************************/ -+ -+ -+/*************************************************************************/ /*! -+@Function _MMU_FreeLevel -+ -+@Description Recursively frees the specified range of Px entries. If any -+ level has its last reference dropped then the MMU object -+ memory and the MMU_Levelx_Info will be freed. -+ -+ At each level we might be crossing a boundary from one Px to -+ another. The values for auiStartArray should be by used for -+ the first call into each level and the values in auiEndArray -+ should only be used in the last call for each level. -+ In order to determine if this is the first/last call we pass -+ in bFirst and bLast. -+ When one level calls down to the next only if bFirst/bLast is set -+ and it's the first/last iteration of the loop at its level will -+ bFirst/bLast set for the next recursion. -+ This means that each iteration has the knowledge of the previous -+ level which is required. -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input psLevel Level info on which to free the -+ specified range -+ -+@Input auiStartArray Array of start indexes (one for each level) -+ -+@Input auiEndArray Array of end indexes (one for each level) -+ -+@Input auiEntriesPerPxArray Array of number of entries for the Px -+ (one for each level) -+ -+@Input apsConfig Array of PxE configs (one for each level) -+ -+@Input aeMMULevel Array of MMU levels (one for each level) -+ -+@Input pui32CurrentLevel Pointer to a variable which is set to our -+ current level -+ -+@Input uiStartIndex Start index of the range to free -+ -+@Input uiEndIndex End index of the range to free -+ -+@Input bFirst This is the first call for this level -+ -+@Input bLast This is the last call for this level -+ -+@Return IMG_TRUE if the last reference to psLevel was dropped -+ */ -+/*****************************************************************************/ -+static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext, -+ MMU_Levelx_INFO *psLevel, -+ IMG_UINT32 auiStartArray[], -+ IMG_UINT32 auiEndArray[], -+ IMG_UINT32 auiEntriesPerPxArray[], -+ const MMU_PxE_CONFIG *apsConfig[], -+ MMU_LEVEL aeMMULevel[], -+ IMG_UINT32 *pui32CurrentLevel, -+ IMG_UINT32 uiStartIndex, -+ IMG_UINT32 uiEndIndex, -+ IMG_BOOL bFirst, -+ IMG_BOOL bLast, -+ IMG_UINT32 uiLog2DataPageSize) -+{ -+ IMG_UINT32 uiThisLevel = *pui32CurrentLevel; -+ const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; -+ IMG_UINT32 i; -+ IMG_BOOL bFreed = IMG_FALSE; -+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; -+ -+ /* Parameter checks */ -+ PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL); -+ PVR_ASSERT(psLevel != NULL); -+ -+ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel: level = %d, range %d - %d, refcount = %d", -+ aeMMULevel[uiThisLevel], uiStartIndex, -+ uiEndIndex, psLevel->ui32RefCount)); -+ -+ for (i = uiStartIndex;(i < uiEndIndex) && (psLevel != NULL);i++) -+ { -+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) -+ { -+ MMU_Levelx_INFO *psNextLevel = psLevel->apsNextLevel[i]; -+ IMG_UINT32 uiNextStartIndex; -+ IMG_UINT32 uiNextEndIndex; -+ IMG_BOOL bNextFirst; -+ IMG_BOOL bNextLast; -+ -+ /* If we're crossing a Px then the start index changes */ -+ if (bFirst && (i == uiStartIndex)) -+ { -+ uiNextStartIndex = auiStartArray[uiThisLevel + 1]; -+ bNextFirst = IMG_TRUE; -+ } -+ else -+ { -+ uiNextStartIndex = 0; -+ bNextFirst = IMG_FALSE; -+ } -+ -+ /* If we're crossing a Px then the end index changes */ -+ if (bLast && (i == (uiEndIndex - 1))) -+ { -+ uiNextEndIndex = auiEndArray[uiThisLevel + 1]; -+ bNextLast = IMG_TRUE; -+ } -+ else -+ { -+ uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; -+ bNextLast = IMG_FALSE; -+ } -+ -+ /* Recurse into the next level */ -+ (*pui32CurrentLevel)++; -+ if (_MMU_FreeLevel(psMMUContext, psNextLevel, auiStartArray, -+ auiEndArray, auiEntriesPerPxArray, -+ apsConfig, aeMMULevel, pui32CurrentLevel, -+ uiNextStartIndex, uiNextEndIndex, -+ bNextFirst, bNextLast, uiLog2DataPageSize)) -+ { -+ PVRSRV_ERROR eError; -+ -+ /* Un-wire the entry */ -+ eError = _SetupPxE(psMMUContext, -+ psLevel, -+ i, -+ psConfig, -+ aeMMULevel[uiThisLevel], -+ NULL, -+#if defined(PDUMP) -+ NULL, /* Only required for data page */ -+ NULL, /* Only required for data page */ -+ 0, /* Only required for data page */ -+#endif -+ MMU_PROTFLAGS_INVALID, -+ uiLog2DataPageSize); -+ -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* Free table of the level below, pointed to by this table entry. -+ * We don't destroy the table inside the above _MMU_FreeLevel call because we -+ * first have to set the table entry of the level above to invalid. */ -+ _PxMemFree(psMMUContext, &psNextLevel->sMemDesc, aeMMULevel[*pui32CurrentLevel]); -+ OSFreeMem(psNextLevel); -+ -+ /* The level below us is empty, drop the refcount and clear the pointer */ -+ psLevel->ui32RefCount--; -+ psLevel->apsNextLevel[i] = NULL; -+ -+ /* Check we haven't wrapped around */ -+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); -+ } -+ (*pui32CurrentLevel)--; -+ } -+ else -+ { -+ psLevel->ui32RefCount--; -+ } -+ -+ /* -+ Free this level if it is no longer referenced, unless it's the base -+ level in which case it's part of the MMU context and should be freed -+ when the MMU context is freed -+ */ -+ if ((psLevel->ui32RefCount == 0) && (psLevel != &psMMUContext->sBaseLevelInfo)) -+ { -+ bFreed = IMG_TRUE; -+ } -+ } -+ -+ /* Level one flushing is done when we actually write the table entries */ -+ if ((aeMMULevel[uiThisLevel] != MMU_LEVEL_1) && (psLevel != NULL)) -+ { -+ PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ &psLevel->sMemDesc.psMapping->sMemHandle, -+ uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, -+ (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry); -+ } -+ -+ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel end: level = %d, refcount = %d", -+ aeMMULevel[uiThisLevel], bFreed?0: (psLevel)?psLevel->ui32RefCount:-1)); -+ -+ return bFreed; -+} -+ -+/*************************************************************************/ /*! -+@Function _MMU_AllocLevel -+ -+@Description Recursively allocates the specified range of Px entries. If any -+ level has its last reference dropped then the MMU object -+ memory and the MMU_Levelx_Info will be freed. -+ -+ At each level we might be crossing a boundary from one Px to -+ another. The values for auiStartArray should be by used for -+ the first call into each level and the values in auiEndArray -+ should only be used in the last call for each level. -+ In order to determine if this is the first/last call we pass -+ in bFirst and bLast. -+ When one level calls down to the next only if bFirst/bLast is set -+ and it's the first/last iteration of the loop at its level will -+ bFirst/bLast set for the next recursion. -+ This means that each iteration has the knowledge of the previous -+ level which is required. -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input psLevel Level info on which to free the -+ specified range -+ -+@Input auiStartArray Array of start indexes (one for each level) -+ -+@Input auiEndArray Array of end indexes (one for each level) -+ -+@Input auiEntriesPerPxArray Array of number of entries for the Px -+ (one for each level) -+ -+@Input apsConfig Array of PxE configs (one for each level) -+ -+@Input aeMMULevel Array of MMU levels (one for each level) -+ -+@Input pui32CurrentLevel Pointer to a variable which is set to our -+ current level -+ -+@Input uiStartIndex Start index of the range to free -+ -+@Input uiEndIndex End index of the range to free -+ -+@Input bFirst This is the first call for this level -+ -+@Input bLast This is the last call for this level -+ -+@Return IMG_TRUE if the last reference to psLevel was dropped -+ */ -+/*****************************************************************************/ -+static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, -+ MMU_Levelx_INFO *psLevel, -+ IMG_UINT32 auiStartArray[], -+ IMG_UINT32 auiEndArray[], -+ IMG_UINT32 auiEntriesPerPxArray[], -+ const MMU_PxE_CONFIG *apsConfig[], -+ MMU_LEVEL aeMMULevel[], -+ IMG_UINT32 *pui32CurrentLevel, -+ IMG_UINT32 uiStartIndex, -+ IMG_UINT32 uiEndIndex, -+ IMG_BOOL bFirst, -+ IMG_BOOL bLast, -+ IMG_UINT32 uiLog2DataPageSize) -+{ -+ IMG_UINT32 uiThisLevel = *pui32CurrentLevel; /* Starting with 0 */ -+ const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; /* The table config for the current level */ -+ PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ IMG_UINT32 uiAllocState = 99; /* Debug info to check what progress was made in the function. Updated during this function. */ -+ IMG_UINT32 i; -+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; -+ -+ /* Parameter check */ -+ PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL); -+ -+ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel: level = %d, range %d - %d, refcount = %d", -+ aeMMULevel[uiThisLevel], uiStartIndex, -+ uiEndIndex, psLevel->ui32RefCount)); -+ -+ /* Go from uiStartIndex to uiEndIndex through the Px */ -+ for (i = uiStartIndex;i < uiEndIndex;i++) -+ { -+ /* Only try an allocation if this is not the last level */ -+ /*Because a PT allocation is already done while setting the entry in PD */ -+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) -+ { -+ IMG_UINT32 uiNextStartIndex; -+ IMG_UINT32 uiNextEndIndex; -+ IMG_BOOL bNextFirst; -+ IMG_BOOL bNextLast; -+ -+ /* If there is already a next Px level existing, do not allocate it */ -+ if (!psLevel->apsNextLevel[i]) -+ { -+ MMU_Levelx_INFO *psNextLevel; -+ IMG_UINT32 ui32AllocSize; -+ IMG_UINT32 uiNextEntries; -+ -+ /* Allocate and setup the next level */ -+ uiNextEntries = auiEntriesPerPxArray[uiThisLevel + 1]; -+ ui32AllocSize = sizeof(MMU_Levelx_INFO); -+ if (aeMMULevel[uiThisLevel + 1] != MMU_LEVEL_1) -+ { -+ ui32AllocSize += sizeof(MMU_Levelx_INFO *) * (uiNextEntries - 1); -+ } -+ psNextLevel = OSAllocZMem(ui32AllocSize); -+ if (psNextLevel == NULL) -+ { -+ uiAllocState = 0; -+ goto e0; -+ } -+ -+ /* Hook in this level for next time */ -+ psLevel->apsNextLevel[i] = psNextLevel; -+ -+ psNextLevel->ui32NumOfEntries = uiNextEntries; -+ psNextLevel->ui32RefCount = 0; -+ /* Allocate Px memory for a sub level*/ -+ eError = _PxMemAlloc(psMMUContext, uiNextEntries, apsConfig[uiThisLevel + 1], -+ aeMMULevel[uiThisLevel + 1], -+ &psNextLevel->sMemDesc, -+ psConfig->uiAddrLog2Align); -+ if (eError != PVRSRV_OK) -+ { -+ uiAllocState = 1; -+ goto e0; -+ } -+ -+ /* Wire up the entry */ -+ eError = _SetupPxE(psMMUContext, -+ psLevel, -+ i, -+ psConfig, -+ aeMMULevel[uiThisLevel], -+ &psNextLevel->sMemDesc.sDevPAddr, -+#if defined(PDUMP) -+ NULL, /* Only required for data page */ -+ NULL, /* Only required for data page */ -+ 0, /* Only required for data page */ -+#endif -+ 0, -+ uiLog2DataPageSize); -+ -+ if (eError != PVRSRV_OK) -+ { -+ uiAllocState = 2; -+ goto e0; -+ } -+ -+ psLevel->ui32RefCount++; -+ } -+ -+ /* If we're crossing a Px then the start index changes */ -+ if (bFirst && (i == uiStartIndex)) -+ { -+ uiNextStartIndex = auiStartArray[uiThisLevel + 1]; -+ bNextFirst = IMG_TRUE; -+ } -+ else -+ { -+ uiNextStartIndex = 0; -+ bNextFirst = IMG_FALSE; -+ } -+ -+ /* If we're crossing a Px then the end index changes */ -+ if (bLast && (i == (uiEndIndex - 1))) -+ { -+ uiNextEndIndex = auiEndArray[uiThisLevel + 1]; -+ bNextLast = IMG_TRUE; -+ } -+ else -+ { -+ uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; -+ bNextLast = IMG_FALSE; -+ } -+ -+ /* Recurse into the next level */ -+ (*pui32CurrentLevel)++; -+ eError = _MMU_AllocLevel(psMMUContext, psLevel->apsNextLevel[i], -+ auiStartArray, -+ auiEndArray, -+ auiEntriesPerPxArray, -+ apsConfig, -+ aeMMULevel, -+ pui32CurrentLevel, -+ uiNextStartIndex, -+ uiNextEndIndex, -+ bNextFirst, -+ bNextLast, -+ uiLog2DataPageSize); -+ (*pui32CurrentLevel)--; -+ if (eError != PVRSRV_OK) -+ { -+ uiAllocState = 2; -+ goto e0; -+ } -+ } -+ else -+ { -+ /* All we need to do for level 1 is bump the refcount */ -+ psLevel->ui32RefCount++; -+ } -+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); -+ } -+ -+ /* Level one flushing is done when we actually write the table entries */ -+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) -+ { -+ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ &psLevel->sMemDesc.psMapping->sMemHandle, -+ uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, -+ (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ } -+ -+ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel end: level = %d, refcount = %d", -+ aeMMULevel[uiThisLevel], psLevel->ui32RefCount)); -+ return PVRSRV_OK; -+ -+e0: -+ /* Confirm that we've not come down this route unexpectedly */ -+ PVR_ASSERT(uiAllocState!=99); -+ PVR_DPF((PVR_DBG_ERROR, "_MMU_AllocLevel: Error %d allocating Px for level %d in stage %d" -+ ,eError, aeMMULevel[uiThisLevel], uiAllocState)); -+ -+ /* The start value of index variable i is not initialised on purpose. -+ * This clean-up loop deinitialises what was already initialised in -+ * reverse order, so the i index already has the correct value. -+ */ -+ for (/* i already set */; i>= uiStartIndex && i< uiEndIndex; i--) -+ { -+ switch (uiAllocState) -+ { -+ IMG_UINT32 uiNextStartIndex; -+ IMG_UINT32 uiNextEndIndex; -+ IMG_BOOL bNextFirst; -+ IMG_BOOL bNextLast; -+ -+ case 3: -+ /* If we're crossing a Px then the start index changes */ -+ if (bFirst && (i == uiStartIndex)) -+ { -+ uiNextStartIndex = auiStartArray[uiThisLevel + 1]; -+ bNextFirst = IMG_TRUE; -+ } -+ else -+ { -+ uiNextStartIndex = 0; -+ bNextFirst = IMG_FALSE; -+ } -+ -+ /* If we're crossing a Px then the end index changes */ -+ if (bLast && (i == (uiEndIndex - 1))) -+ { -+ uiNextEndIndex = auiEndArray[uiThisLevel + 1]; -+ bNextLast = IMG_TRUE; -+ } -+ else -+ { -+ uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; -+ bNextLast = IMG_FALSE; -+ } -+ -+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) -+ { -+ (*pui32CurrentLevel)++; -+ if (_MMU_FreeLevel(psMMUContext, psLevel->apsNextLevel[i], -+ auiStartArray, auiEndArray, -+ auiEntriesPerPxArray, apsConfig, -+ aeMMULevel, pui32CurrentLevel, -+ uiNextStartIndex, uiNextEndIndex, -+ bNextFirst, bNextLast, uiLog2DataPageSize)) -+ { -+ psLevel->ui32RefCount--; -+ psLevel->apsNextLevel[i] = NULL; -+ -+ /* Check we haven't wrapped around */ -+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); -+ } -+ (*pui32CurrentLevel)--; -+ } -+ else -+ { -+ /* We should never come down this path, but it's here -+ for completeness */ -+ psLevel->ui32RefCount--; -+ -+ /* Check we haven't wrapped around */ -+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); -+ } -+ -+ __fallthrough; -+ case 2: -+ if (psLevel->apsNextLevel[i] != NULL && -+ psLevel->apsNextLevel[i]->ui32RefCount == 0) -+ { -+ _PxMemFree(psMMUContext, &psLevel->sMemDesc, -+ aeMMULevel[uiThisLevel]); -+ } -+ -+ __fallthrough; -+ case 1: -+ if (psLevel->apsNextLevel[i] != NULL && -+ psLevel->apsNextLevel[i]->ui32RefCount == 0) -+ { -+ OSFreeMem(psLevel->apsNextLevel[i]); -+ psLevel->apsNextLevel[i] = NULL; -+ } -+ -+ __fallthrough; -+ case 0: -+ uiAllocState = 3; -+ break; -+ } -+ } -+ return eError; -+} -+ -+/***************************************************************************** -+ * MMU page table functions * -+ *****************************************************************************/ -+ -+/*************************************************************************/ /*! -+@Function _MMU_GetLevelData -+ -+@Description Get the all the level data and calculates the indexes for the -+ specified address range -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input sDevVAddrStart Start device virtual address -+ -+@Input sDevVAddrEnd End device virtual address -+ -+@Input uiLog2DataPageSize Log2 of the page size to use -+ -+@Input auiStartArray Array of start indexes (one for each level) -+ -+@Input auiEndArray Array of end indexes (one for each level) -+ -+@Input uiEntriesPerPxArray Array of number of entries for the Px -+ (one for each level) -+ -+@Input apsConfig Array of PxE configs (one for each level) -+ -+@Input aeMMULevel Array of MMU levels (one for each level) -+ -+@Input ppsMMUDevVAddrConfig Device virtual address config -+ -+@Input phPriv Private data of page size config -+ -+@Return IMG_TRUE if the last reference to psLevel was dropped -+ */ -+/*****************************************************************************/ -+static void _MMU_GetLevelData(MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR sDevVAddrStart, -+ IMG_DEV_VIRTADDR sDevVAddrEnd, -+ IMG_UINT32 uiLog2DataPageSize, -+ IMG_UINT32 auiStartArray[], -+ IMG_UINT32 auiEndArray[], -+ IMG_UINT32 auiEntriesPerPx[], -+ const MMU_PxE_CONFIG *apsConfig[], -+ MMU_LEVEL aeMMULevel[], -+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, -+ IMG_HANDLE *phPriv) -+{ -+ const MMU_PxE_CONFIG *psMMUPDEConfig; -+ const MMU_PxE_CONFIG *psMMUPTEConfig; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i = 0; -+ -+ eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize, -+ &psMMUPDEConfig, -+ &psMMUPTEConfig, -+ ppsMMUDevVAddrConfig, -+ phPriv); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ psDevVAddrConfig = *ppsMMUDevVAddrConfig; -+ -+ if (psDevVAddrConfig->uiPCIndexMask != 0) -+ { -+ auiStartArray[i] = _CalcPCEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE); -+ auiEndArray[i] = _CalcPCEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE); -+ auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPC; -+ apsConfig[i] = psDevAttrs->psBaseConfig; -+ aeMMULevel[i] = MMU_LEVEL_3; -+ i++; -+ } -+ -+ if (psDevVAddrConfig->uiPDIndexMask != 0) -+ { -+ auiStartArray[i] = _CalcPDEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE); -+ auiEndArray[i] = _CalcPDEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE); -+ auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPD; -+ if (i == 0) -+ { -+ apsConfig[i] = psDevAttrs->psBaseConfig; -+ } -+ else -+ { -+ apsConfig[i] = psMMUPDEConfig; -+ } -+ aeMMULevel[i] = MMU_LEVEL_2; -+ i++; -+ } -+ -+ /* -+ There is always a PTE entry so we have a slightly different behaviour than above. -+ E.g. for 2 MB pages the uiPTIndexMask is 0x0000000000 but still there -+ is a PT with one entry. -+ -+ */ -+ auiStartArray[i] = _CalcPTEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE); -+ if (psDevVAddrConfig->uiPTIndexMask !=0) -+ { -+ auiEndArray[i] = _CalcPTEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE); -+ } -+ else -+ { -+ /* -+ If the PTE mask is zero it means there is only 1 PTE and thus, as an -+ an exclusive bound, the end array index is equal to the start index + 1. -+ */ -+ -+ auiEndArray[i] = auiStartArray[i] + 1; -+ } -+ -+ auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPT; -+ -+ if (i == 0) -+ { -+ apsConfig[i] = psDevAttrs->psBaseConfig; -+ } -+ else -+ { -+ apsConfig[i] = psMMUPTEConfig; -+ } -+ aeMMULevel[i] = MMU_LEVEL_1; -+} -+ -+static void _MMU_PutLevelData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hPriv) -+{ -+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; -+ -+ psDevAttrs->pfnPutPageSizeConfiguration(hPriv); -+} -+ -+/*************************************************************************/ /*! -+@Function _AllocPageTables -+ -+@Description Allocate page tables and any higher level MMU objects required -+ for the specified virtual range -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input sDevVAddrStart Start device virtual address -+ -+@Input sDevVAddrEnd End device virtual address -+ -+@Input uiLog2DataPageSize Page size of the data pages -+ -+@Return PVRSRV_OK if the allocation was successful -+ */ -+/*****************************************************************************/ -+static PVRSRV_ERROR -+_AllocPageTables(MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR sDevVAddrStart, -+ IMG_DEV_VIRTADDR sDevVAddrEnd, -+ IMG_UINT32 uiLog2DataPageSize) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 auiStartArray[MMU_MAX_LEVEL]; -+ IMG_UINT32 auiEndArray[MMU_MAX_LEVEL]; -+ IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL]; -+ MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL]; -+ const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL]; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ IMG_HANDLE hPriv; -+ IMG_UINT32 ui32CurrentLevel = 0; -+ -+ PVR_DPF((PVR_DBG_ALLOC, -+ "_AllocPageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, -+ sDevVAddrStart.uiAddr, -+ sDevVAddrEnd.uiAddr -+ )); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psMMUContext->psPhysMemCtx->psDevNode, -+ "Allocating page tables for %"IMG_UINT64_FMTSPEC" bytes virtual range: " -+ IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, -+ (IMG_UINT64)sDevVAddrEnd.uiAddr - (IMG_UINT64)sDevVAddrStart.uiAddr, -+ (IMG_UINT64)sDevVAddrStart.uiAddr, -+ (IMG_UINT64)sDevVAddrEnd.uiAddr); -+#endif -+ -+ _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd, -+ (IMG_UINT32) uiLog2DataPageSize, auiStartArray, auiEndArray, -+ auiEntriesPerPx, apsConfig, aeMMULevel, -+ &psDevVAddrConfig, &hPriv); -+ -+ HTBLOGK(HTB_SF_MMU_PAGE_OP_ALLOC, -+ HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr), -+ HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr)); -+ -+ eError = _MMU_AllocLevel(psMMUContext, &psMMUContext->sBaseLevelInfo, -+ auiStartArray, auiEndArray, auiEntriesPerPx, -+ apsConfig, aeMMULevel, &ui32CurrentLevel, -+ auiStartArray[0], auiEndArray[0], -+ IMG_TRUE, IMG_TRUE, uiLog2DataPageSize); -+ -+ _MMU_PutLevelData(psMMUContext, hPriv); -+ -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function _FreePageTables -+ -+@Description Free page tables and any higher level MMU objects at are no -+ longer referenced for the specified virtual range. -+ This will fill the temporary free list of the MMU context which -+ needs cleanup after the call. -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input sDevVAddrStart Start device virtual address -+ -+@Input sDevVAddrEnd End device virtual address -+ -+@Input uiLog2DataPageSize Page size of the data pages -+ -+@Return None -+ */ -+/*****************************************************************************/ -+static void _FreePageTables(MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR sDevVAddrStart, -+ IMG_DEV_VIRTADDR sDevVAddrEnd, -+ IMG_UINT32 uiLog2DataPageSize) -+{ -+ IMG_UINT32 auiStartArray[MMU_MAX_LEVEL]; -+ IMG_UINT32 auiEndArray[MMU_MAX_LEVEL]; -+ IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL]; -+ MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL]; -+ const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL]; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ IMG_UINT32 ui32CurrentLevel = 0; -+ IMG_HANDLE hPriv; -+ -+ PVR_DPF((PVR_DBG_ALLOC, -+ "_FreePageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, -+ sDevVAddrStart.uiAddr, -+ sDevVAddrEnd.uiAddr -+ )); -+ -+ _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd, -+ uiLog2DataPageSize, auiStartArray, auiEndArray, -+ auiEntriesPerPx, apsConfig, aeMMULevel, -+ &psDevVAddrConfig, &hPriv); -+ -+ HTBLOGK(HTB_SF_MMU_PAGE_OP_FREE, -+ HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr), -+ HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr)); -+ -+ /* ignoring return code, in this case there should be no references -+ * to the level anymore, and at this stage there is nothing to do with -+ * the return status */ -+ (void) _MMU_FreeLevel(psMMUContext, &psMMUContext->sBaseLevelInfo, -+ auiStartArray, auiEndArray, auiEntriesPerPx, -+ apsConfig, aeMMULevel, &ui32CurrentLevel, -+ auiStartArray[0], auiEndArray[0], -+ IMG_TRUE, IMG_TRUE, uiLog2DataPageSize); -+ -+ _MMU_PutLevelData(psMMUContext, hPriv); -+} -+ -+ -+/*************************************************************************/ /*! -+@Function _MMU_GetPTInfo -+ -+@Description Get the PT level information and PT entry index for the specified -+ virtual address -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input psDevVAddr Device virtual address to get the PTE info -+ from. -+ -+@Input psDevVAddrConfig The current virtual address config obtained -+ by another function call before. -+ -+@Output psLevel Level info of the PT -+ -+@Output pui32PTEIndex Index into the PT the address corresponds to -+ -+@Return None -+ */ -+/*****************************************************************************/ -+static INLINE void _MMU_GetPTInfo(MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, -+ MMU_Levelx_INFO **ppsLevel, -+ IMG_UINT32 *pui32PTEIndex) -+{ -+ MMU_Levelx_INFO *psLocalLevel = NULL; -+ MMU_LEVEL eMMULevel = psMMUContext->psDevAttrs->psBaseConfig->ePxLevel; -+ IMG_UINT32 uiPCEIndex; -+ IMG_UINT32 uiPDEIndex; -+ -+ if ((eMMULevel <= MMU_LEVEL_0) || (eMMULevel >= MMU_LEVEL_LAST)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTEInfo: Invalid MMU level")); -+ PVR_ASSERT(0); -+ } -+ -+ for (; eMMULevel > MMU_LEVEL_0; eMMULevel--) -+ { -+ if (eMMULevel == MMU_LEVEL_3) -+ { -+ /* find the page directory containing the PCE */ -+ uiPCEIndex = _CalcPCEIdx (sDevVAddr, psDevVAddrConfig, -+ IMG_FALSE); -+ psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPCEIndex]; -+ } -+ -+ if (eMMULevel == MMU_LEVEL_2) -+ { -+ /* find the page table containing the PDE */ -+ uiPDEIndex = _CalcPDEIdx (sDevVAddr, psDevVAddrConfig, -+ IMG_FALSE); -+ if (psLocalLevel != NULL) -+ { -+ psLocalLevel = psLocalLevel->apsNextLevel[uiPDEIndex]; -+ } -+ else -+ { -+ psLocalLevel = -+ psMMUContext->sBaseLevelInfo.apsNextLevel[uiPDEIndex]; -+ } -+ } -+ -+ if (eMMULevel == MMU_LEVEL_1) -+ { -+ /* find PTE index into page table */ -+ *pui32PTEIndex = _CalcPTEIdx (sDevVAddr, psDevVAddrConfig, -+ IMG_FALSE); -+ if (psLocalLevel == NULL) -+ { -+ psLocalLevel = &psMMUContext->sBaseLevelInfo; -+ } -+ } -+ } -+ *ppsLevel = psLocalLevel; -+} -+ -+/*************************************************************************/ /*! -+@Function _MMU_GetPTConfig -+ -+@Description Get the level config. Call _MMU_PutPTConfig after use! -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input uiLog2DataPageSize Log 2 of the page size -+ -+@Output ppsConfig Config of the PTE -+ -+@Output phPriv Private data handle to be passed back -+ when the info is put -+ -+@Output ppsDevVAddrConfig Config of the device virtual addresses -+ -+@Return None -+ */ -+/*****************************************************************************/ -+static INLINE void _MMU_GetPTConfig(MMU_CONTEXT *psMMUContext, -+ IMG_UINT32 uiLog2DataPageSize, -+ const MMU_PxE_CONFIG **ppsConfig, -+ IMG_HANDLE *phPriv, -+ const MMU_DEVVADDR_CONFIG **ppsDevVAddrConfig) -+{ -+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ const MMU_PxE_CONFIG *psPDEConfig; -+ const MMU_PxE_CONFIG *psPTEConfig; -+ -+ if (psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize, -+ &psPDEConfig, -+ &psPTEConfig, -+ &psDevVAddrConfig, -+ phPriv) != PVRSRV_OK) -+ { -+ /* -+ There should be no way we got here unless uiLog2DataPageSize -+ has changed after the MMU_Alloc call (in which case it's a bug in -+ the MM code) -+ */ -+ PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not get valid page size config")); -+ PVR_ASSERT(0); -+ } -+ -+ *ppsConfig = psPTEConfig; -+ *ppsDevVAddrConfig = psDevVAddrConfig; -+} -+ -+/*************************************************************************/ /*! -+@Function _MMU_PutPTConfig -+ -+@Description Put the level info. Has to be called after _MMU_GetPTConfig to -+ ensure correct refcounting. -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input phPriv Private data handle created by -+ _MMU_GetPTConfig. -+ -+@Return None -+ */ -+/*****************************************************************************/ -+static INLINE void _MMU_PutPTConfig(MMU_CONTEXT *psMMUContext, -+ IMG_HANDLE hPriv) -+{ -+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; -+ -+ if (psDevAttrs->pfnPutPageSizeConfiguration(hPriv) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Could not put page size config", -+ __func__)); -+ PVR_ASSERT(0); -+ } -+} -+ -+/* scratch / zero pages */ -+ -+static PVRSRV_ERROR _MMU_GetBackingPage(PVRSRV_DEVICE_NODE *psDevNode, -+ PVRSRV_DEF_PAGE *psDefPage, -+ IMG_INT uiInitValue, -+ IMG_CHAR *pcDefPageName, -+ IMG_BOOL bInitPage) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_DEV_PHYADDR sDevPAddr = {0}; -+ -+ OSLockAcquire(psDefPage->psPgLock); -+ -+ if (psDefPage->ui64PgPhysAddr != MMU_BAD_PHYS_ADDR) -+ { -+ goto UnlockAndReturn; -+ } -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevNode, "Alloc %s page object", pcDefPageName); -+#endif -+ -+ /* Allocate the scratch / zero page required for physical backing -+ * of virtual ranges -+ */ -+ eError = DevPhysMemAlloc(psDevNode, -+ (1 << psDefPage->ui32Log2PgSize), -+ 0, -+ uiInitValue, -+ bInitPage, -+#if defined(PDUMP) -+ psDevNode->psMMUDevAttrs->pszMMUPxPDumpMemSpaceName, -+ pcDefPageName, -+ &psDefPage->hPdumpPg, -+#endif -+ PVR_SYS_ALLOC_PID, -+ &psDefPage->sPageHandle, -+ &sDevPAddr); -+ PVR_GOTO_IF_ERROR(eError, UnlockAndReturn); -+ -+ psDefPage->ui64PgPhysAddr = sDevPAddr.uiAddr; -+ -+UnlockAndReturn: -+ OSLockRelease(psDefPage->psPgLock); -+ -+ return eError; -+} -+ -+static void _MMU_FreeBackingPage(PVRSRV_DEVICE_NODE *psDevNode, -+ PVRSRV_DEF_PAGE *psDefPage, -+ IMG_CHAR *pcDefPageName) -+{ -+ OSLockAcquire(psDefPage->psPgLock); -+ -+ if (psDefPage->ui64PgPhysAddr == MMU_BAD_PHYS_ADDR) -+ { -+ goto UnlockAndReturn; -+ } -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevNode, "Free %s page object", pcDefPageName); -+#endif -+ -+ DevPhysMemFree(psDevNode, -+#if defined(PDUMP) -+ psDefPage->hPdumpPg, -+#endif -+ &psDefPage->sPageHandle); -+ -+#if defined(PDUMP) -+ psDefPage->hPdumpPg = NULL; -+#endif -+ psDefPage->ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; -+ -+UnlockAndReturn: -+ OSLockRelease(psDefPage->psPgLock); -+} -+ -+ -+/***************************************************************************** -+ * Public interface functions * -+ *****************************************************************************/ -+ -+/* -+ MMU_InitDevice -+*/ -+PVRSRV_ERROR MMU_InitDevice(struct _PVRSRV_DEVICE_NODE_ *psDevNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Set the order to 0 */ -+ psDevNode->sScratchPage.sPageHandle.uiOrder = 0; -+ psDevNode->sDevZeroPage.sPageHandle.uiOrder = 0; -+ -+ /* Set the size of the Scratch and Zero pages to largest page size */ -+ if (psDevNode->ui32Non4KPageSizeLog2 != 0) -+ { -+ psDevNode->sScratchPage.ui32Log2PgSize = psDevNode->ui32Non4KPageSizeLog2; -+ psDevNode->sDevZeroPage.ui32Log2PgSize = psDevNode->ui32Non4KPageSizeLog2; -+ } -+ else -+ { -+ psDevNode->sScratchPage.ui32Log2PgSize = OSGetPageShift(); -+ psDevNode->sDevZeroPage.ui32Log2PgSize = OSGetPageShift(); -+ } -+ -+ /* Set the Scratch page phys addr */ -+ psDevNode->sScratchPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; -+ -+ /* Set the Zero page phys addr */ -+ psDevNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; -+ -+ /* The lock can be acquired from MISR (Z-buffer) path */ -+ eError = OSLockCreate(&psDevNode->sScratchPage.psPgLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate.Scratch", ErrReturnError); -+ -+ /* Create the lock for zero page */ -+ eError = OSLockCreate(&psDevNode->sDevZeroPage.psPgLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate.Zero", ErrFreeScratchPageLock); -+ -+#ifdef PDUMP -+ psDevNode->sScratchPage.hPdumpPg = NULL; -+ psDevNode->sDevZeroPage.hPdumpPg = NULL; -+ -+ eError = _MMU_GetBackingPage(psDevNode, -+ &psDevNode->sScratchPage, -+ PVR_SCRATCH_PAGE_INIT_VALUE, -+ SCRATCH_PAGE, -+ IMG_TRUE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage.Scratch", ErrFreeZeroPageLock); -+ -+ eError = _MMU_GetBackingPage(psDevNode, -+ &psDevNode->sDevZeroPage, -+ PVR_ZERO_PAGE_INIT_VALUE, -+ DEV_ZERO_PAGE, -+ IMG_TRUE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage.Zero", ErrFreeScratchPage); -+#endif /* PDUMP */ -+ -+ return PVRSRV_OK; -+ -+#ifdef PDUMP -+ErrFreeScratchPage: -+ _MMU_FreeBackingPage(psDevNode, &psDevNode->sScratchPage, SCRATCH_PAGE); -+ErrFreeZeroPageLock: -+ OSLockDestroy(psDevNode->sDevZeroPage.psPgLock); -+ psDevNode->sDevZeroPage.psPgLock = NULL; -+#endif /* PDUMP */ -+ErrFreeScratchPageLock: -+ OSLockDestroy(psDevNode->sScratchPage.psPgLock); -+ psDevNode->sScratchPage.psPgLock = NULL; -+ErrReturnError: -+ return eError; -+} -+ -+/* -+ MMU_DeInitDevice -+*/ -+void MMU_DeInitDevice(struct _PVRSRV_DEVICE_NODE_ *psDevNode) -+{ -+ if (psDevNode->sScratchPage.psPgLock != NULL) -+ { -+ _MMU_FreeBackingPage(psDevNode, &psDevNode->sScratchPage, SCRATCH_PAGE); -+ -+ OSLockDestroy(psDevNode->sScratchPage.psPgLock); -+ psDevNode->sScratchPage.psPgLock = NULL; -+ } -+ -+ if (psDevNode->sDevZeroPage.psPgLock) -+ { -+ _MMU_FreeBackingPage(psDevNode, &psDevNode->sDevZeroPage, DEV_ZERO_PAGE); -+ -+ -+ OSLockDestroy(psDevNode->sDevZeroPage.psPgLock); -+ psDevNode->sDevZeroPage.psPgLock = NULL; -+ } -+} -+ -+/* -+ MMU_ContextCreate -+ */ -+PVRSRV_ERROR -+MMU_ContextCreate(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ MMU_CONTEXT **ppsMMUContext, -+ MMU_DEVICEATTRIBS *psDevAttrs) -+{ -+ MMU_CONTEXT *psMMUContext; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ const MMU_PxE_CONFIG *psConfig; -+ MMU_PHYSMEM_CONTEXT *psPhysMemCtx; -+ IMG_UINT32 ui32BaseObjects; -+ IMG_UINT32 ui32Size; -+ IMG_CHAR sBuf[40]; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevNode, "MMU context create"); -+#endif -+ -+ psConfig = psDevAttrs->psBaseConfig; -+ psDevVAddrConfig = psDevAttrs->psTopLevelDevVAddrConfig; -+ -+ switch (psDevAttrs->psBaseConfig->ePxLevel) -+ { -+ case MMU_LEVEL_3: -+ ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPC; -+ break; -+ -+ case MMU_LEVEL_2: -+ ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPD; -+ break; -+ -+ case MMU_LEVEL_1: -+ ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPT; -+ break; -+ -+ default: -+ PVR_LOG_GOTO_WITH_ERROR("psDevAttrs->psBaseConfig->ePxLevel", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); -+ } -+ -+ /* Allocate the MMU context with the Level 1 Px info's */ -+ ui32Size = sizeof(MMU_CONTEXT) + -+ ((ui32BaseObjects - 1) * sizeof(MMU_Levelx_INFO *)); -+ -+ psMMUContext = OSAllocZMem(ui32Size); -+ PVR_LOG_GOTO_IF_NOMEM(psMMUContext, eError, e0); -+ -+#if defined(PDUMP) -+ /* Clear the refcount */ -+ psMMUContext->ui32PDumpContextIDRefCount = 0; -+#endif -+ /* Record Device specific attributes in the context for subsequent use */ -+ psMMUContext->psDevAttrs = psDevAttrs; -+ -+ /* -+ Allocate physmem context and set it up -+ */ -+ psPhysMemCtx = OSAllocZMem(sizeof(MMU_PHYSMEM_CONTEXT)); -+ PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx, eError, e1); -+ -+ psMMUContext->psPhysMemCtx = psPhysMemCtx; -+ psMMUContext->psConnection = psConnection; -+ -+ psPhysMemCtx->psDevNode = psDevNode; /* Needed for Direct Bridge case */ -+ psPhysMemCtx->psMMUContext = psMMUContext; /* Back-link to self */ -+ -+#if defined(SUPPORT_CUSTOM_OSID_EMISSION) -+ /* Save the app-specific values for external reference via MMU_GetOSids. */ -+ if (!_MMU_IS_FWKM_CTX(psMMUContext)) -+ { -+ psPhysMemCtx->ui32OSid = psConnection->ui32OSid; -+ psPhysMemCtx->ui32OSidReg = psConnection->ui32OSidReg; -+ psPhysMemCtx->bOSidAxiProt = psConnection->bOSidAxiProtReg; -+ } -+ else -+ { -+ /* Direct Bridge calling sequence e.g. Firmware */ -+ psPhysMemCtx->ui32OSid = 0; -+ psPhysMemCtx->ui32OSidReg = 0; -+ psPhysMemCtx->bOSidAxiProt = IMG_FALSE; -+ } -+#endif -+ -+ OSSNPrintf(sBuf, sizeof(sBuf), "pgtables %p", psPhysMemCtx); -+ psPhysMemCtx->uiPhysMemRANameAllocSize = OSStringLength(sBuf)+1; -+ psPhysMemCtx->pszPhysMemRAName = OSAllocMem(psPhysMemCtx->uiPhysMemRANameAllocSize); -+ PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->pszPhysMemRAName, eError, e2); -+ -+ OSStringLCopy(psPhysMemCtx->pszPhysMemRAName, sBuf, psPhysMemCtx->uiPhysMemRANameAllocSize); -+ -+ psPhysMemCtx->psPhysMemRA = RA_Create(psPhysMemCtx->pszPhysMemRAName, -+ /* subsequent import */ -+ PhysHeapGetPageShift(psDevNode->psMMUPhysHeap), -+ RA_LOCKCLASS_1, -+ _MMU_PhysMem_RAImportAlloc, -+ _MMU_PhysMem_RAImportFree, -+ psPhysMemCtx, /* priv */ -+ RA_POLICY_DEFAULT); -+ if (psPhysMemCtx->psPhysMemRA == NULL) -+ { -+ OSFreeMem(psPhysMemCtx->pszPhysMemRAName); -+ psPhysMemCtx->pszPhysMemRAName = NULL; -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, e3); -+ } -+ -+ /* Setup cleanup meta data to check if a MMU context -+ * has been destroyed and should not be accessed anymore */ -+ psPhysMemCtx->psCleanupData = OSAllocMem(sizeof(*(psPhysMemCtx->psCleanupData))); -+ PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->psCleanupData, eError, e4); -+ -+#if defined(SUPPORT_CUSTOM_OSID_EMISSION) -+ /* Record the originating OSid for all allocation / free for this context */ -+ psPhysMemCtx->psCleanupData->ui32OSid = psPhysMemCtx->ui32OSid; -+#endif /* defined(SUPPORT_CUSTOM_OSID_EMISSION) */ -+ OSLockCreate(&psPhysMemCtx->psCleanupData->hCleanupLock); -+ psPhysMemCtx->psCleanupData->bMMUContextExists = IMG_TRUE; -+ dllist_init(&psPhysMemCtx->psCleanupData->sMMUCtxCleanupItemsHead); -+ OSAtomicWrite(&psPhysMemCtx->psCleanupData->iRef, 1); -+ -+ /* allocate the base level object */ -+ /* -+ Note: Although this is not required by the this file until -+ the 1st allocation is made, a device specific callback -+ might request the base object address so we allocate -+ it up front. -+ In VZ we only need to allocate the FW/KM ctx on the -+ Host machine as all Guest tables are alloc'd and pre-mapped into -+ the host FW Memory Context. -+ */ -+ if (!_MMU_IS_FWKM_CTX_VZGUEST(psMMUContext)) -+ { -+ if (_PxMemAlloc(psMMUContext, -+ ui32BaseObjects, -+ psConfig, -+ psDevAttrs->psBaseConfig->ePxLevel, -+ &psMMUContext->sBaseLevelInfo.sMemDesc, -+ psDevAttrs->ui32BaseAlign)) -+ { -+ PVR_LOG_GOTO_WITH_ERROR("_PxMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e5); -+ } -+ } -+ -+ dllist_init(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead); -+ -+ psMMUContext->sBaseLevelInfo.ui32NumOfEntries = ui32BaseObjects; -+ psMMUContext->sBaseLevelInfo.ui32RefCount = 0; -+ -+ /* Support cores that need to adjust the base level Px */ -+ if (psDevNode->pfnMMUTopLevelPxWorkarounds != NULL) -+ { -+ PVRSRV_ERROR eError; -+ MMU_MEMORY_DESC *psMemDesc = &psMMUContext->sBaseLevelInfo.sMemDesc; -+ -+ psDevNode->pfnMMUTopLevelPxWorkarounds(psConnection, -+ psDevNode, -+ psMemDesc->sDevPAddr, -+ psMemDesc->pvCpuVAddr); -+ -+ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ &psMemDesc->psMapping->sMemHandle, -+ psMemDesc->uiOffset, -+ psMemDesc->uiSize); -+ PVR_LOG_IF_ERROR(eError, "PhysHeapPagesClean"); -+ } -+ -+ eError = OSLockCreate(&psMMUContext->hLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e6); -+ -+ /* return context */ -+ *ppsMMUContext = psMMUContext; -+ -+ return PVRSRV_OK; -+ -+e6: -+ _PxMemFree(psMMUContext, &psMMUContext->sBaseLevelInfo.sMemDesc, psDevAttrs->psBaseConfig->ePxLevel); -+e5: -+ OSFreeMem(psPhysMemCtx->psCleanupData); -+e4: -+ RA_Delete(psPhysMemCtx->psPhysMemRA); -+e3: -+ OSFreeMem(psPhysMemCtx->pszPhysMemRAName); -+e2: -+ OSFreeMem(psPhysMemCtx); -+e1: -+ OSFreeMem(psMMUContext); -+e0: -+ return eError; -+} -+ -+/* -+ MMU_ContextDestroy -+ */ -+void -+MMU_ContextDestroy (MMU_CONTEXT *psMMUContext) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PDLLIST_NODE psNode, psNextNode; -+ -+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psMMUContext->psPhysMemCtx->psDevNode; -+ MMU_CTX_CLEANUP_DATA *psCleanupData = psMMUContext->psPhysMemCtx->psCleanupData; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Enter", __func__)); -+ -+ if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) -+ { -+ /* There should be no way to get here with live pages unless -+ there is a bug in this module or the MM code */ -+ PVR_ASSERT(psMMUContext->sBaseLevelInfo.ui32RefCount == 0); -+ } -+ -+ /* Cleanup lock must be acquired before MMUContext lock. Reverse order -+ * may lead to a deadlock and is reported by lockdep. */ -+ OSLockAcquire(psCleanupData->hCleanupLock); -+ OSLockAcquire(psMMUContext->hLock); -+ -+ /* Free the top level MMU object - will be put on defer free list. -+ * This has to be done before the step below that will empty the -+ * defer-free list. */ -+ if (!_MMU_IS_FWKM_CTX_VZGUEST(psMMUContext)) -+ { -+ _PxMemFree(psMMUContext, -+ &psMMUContext->sBaseLevelInfo.sMemDesc, -+ psMMUContext->psDevAttrs->psBaseConfig->ePxLevel); -+ } -+ -+ /* Empty the temporary defer-free list of Px */ -+ _FreeMMUMapping(psDevNode, &psMMUContext->psPhysMemCtx->sTmpMMUMappingHead); -+ PVR_ASSERT(dllist_is_empty(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead)); -+ -+ /* Empty the defer free list so the cleanup thread will -+ * not have to access any MMU context related structures anymore */ -+ dllist_foreach_node(&psCleanupData->sMMUCtxCleanupItemsHead, -+ psNode, -+ psNextNode) -+ { -+ MMU_CLEANUP_ITEM *psCleanup = IMG_CONTAINER_OF(psNode, -+ MMU_CLEANUP_ITEM, -+ sMMUCtxCleanupItem); -+ -+ _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead); -+ -+ dllist_remove_node(psNode); -+ } -+ PVR_ASSERT(dllist_is_empty(&psCleanupData->sMMUCtxCleanupItemsHead)); -+ -+ psCleanupData->bMMUContextExists = IMG_FALSE; -+ -+ /* Free physmem context */ -+ RA_Delete(psMMUContext->psPhysMemCtx->psPhysMemRA); -+ psMMUContext->psPhysMemCtx->psPhysMemRA = NULL; -+ OSFreeMem(psMMUContext->psPhysMemCtx->pszPhysMemRAName); -+ psMMUContext->psPhysMemCtx->pszPhysMemRAName = NULL; -+ -+ OSFreeMem(psMMUContext->psPhysMemCtx); -+ -+ OSLockRelease(psMMUContext->hLock); -+ -+ OSLockRelease(psCleanupData->hCleanupLock); -+ -+ if (OSAtomicDecrement(&psCleanupData->iRef) == 0) -+ { -+ OSLockDestroy(psCleanupData->hCleanupLock); -+ OSFreeMem(psCleanupData); -+ } -+ -+ OSLockDestroy(psMMUContext->hLock); -+ -+ /* free the context itself. */ -+ OSFreeMem(psMMUContext); -+ /*not nulling pointer, copy on stack*/ -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Exit", __func__)); -+} -+ -+/* -+ MMU_Alloc -+ */ -+PVRSRV_ERROR -+MMU_Alloc (MMU_CONTEXT *psMMUContext, -+ IMG_DEVMEM_SIZE_T uSize, -+ IMG_DEVMEM_SIZE_T *puActualSize, -+ IMG_UINT32 uiProtFlags, -+ IMG_DEVMEM_SIZE_T uDevVAddrAlignment, -+ IMG_DEV_VIRTADDR *psDevVAddr, -+ IMG_UINT32 uiLog2PageSize) -+{ -+ PVRSRV_ERROR eError; -+ IMG_DEV_VIRTADDR sDevVAddrEnd; -+ -+ const MMU_PxE_CONFIG *psPDEConfig; -+ const MMU_PxE_CONFIG *psPTEConfig; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ -+ MMU_DEVICEATTRIBS *psDevAttrs; -+ IMG_HANDLE hPriv; -+ -+#if !defined(DEBUG) -+ PVR_UNREFERENCED_PARAMETER(uDevVAddrAlignment); -+#endif -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: uSize=" IMG_DEVMEM_SIZE_FMTSPEC -+ ", uiProtFlags=0x%x, align="IMG_DEVMEM_ALIGN_FMTSPEC, -+ __func__, uSize, uiProtFlags, uDevVAddrAlignment)); -+ -+ /* check params */ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psMMUContext, "psMMUContext"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevVAddr, "psDevVAddr"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(puActualSize, "puActualSize"); -+ -+ psDevAttrs = psMMUContext->psDevAttrs; -+ -+ eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2PageSize, -+ &psPDEConfig, -+ &psPTEConfig, -+ &psDevVAddrConfig, -+ &hPriv); -+ PVR_LOG_RETURN_IF_ERROR(eError, "pfnGetPageSizeConfiguration"); -+ -+ /* size and alignment must be datapage granular */ -+ if (((psDevVAddr->uiAddr & psDevVAddrConfig->uiPageOffsetMask) != 0) -+ || ((uSize & psDevVAddrConfig->uiPageOffsetMask) != 0)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: invalid address or size granularity", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ sDevVAddrEnd = *psDevVAddr; -+ sDevVAddrEnd.uiAddr += uSize; -+ -+ OSLockAcquire(psMMUContext->hLock); -+ -+ -+ /* This is to divert generation of firmware pre-mapped page -+ * tables to a stand alone MMU driver. */ -+ if ((psDevAttrs->pfnTestPremapConfigureMMU != NULL) && -+ _MMU_IS_FWKM_CTX(psMMUContext)) -+ { -+ eError = psDevAttrs->pfnTestPremapConfigureMMU( -+ psMMUContext->psPhysMemCtx->psDevNode, psMMUContext, -+ *psDevVAddr, sDevVAddrEnd, -+ uiLog2PageSize); -+ } -+ else -+ { -+ eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize); -+ } -+ OSLockRelease(psMMUContext->hLock); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "_AllocPageTables"); -+ return PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES; -+ } -+ -+ psDevAttrs->pfnPutPageSizeConfiguration(hPriv); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ MMU_Free -+ */ -+void -+MMU_Free (MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 uiLog2DataPageSize) -+{ -+ IMG_DEV_VIRTADDR sDevVAddrEnd; -+ MMU_PHYSMEM_CONTEXT *psPhysMemCtx; -+#if defined(SUPPORT_MMU_DEFERRED_FREE) -+ PVRSRV_DEV_POWER_STATE ePowerState; -+ PVRSRV_ERROR eError; -+#endif -+ -+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ IMG_UINT32 ui32MMULeakMax = psPVRSRVData->sMemLeakIntervals.ui32MMU; -+ -+ mutex_lock(&g_sMMULeakMutex); -+ -+ g_ui32MMULeakCounter++; -+ if (ui32MMULeakMax && g_ui32MMULeakCounter >= ui32MMULeakMax) -+ { -+ g_ui32MMULeakCounter = 0; -+ mutex_unlock(&g_sMMULeakMutex); -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Skipped MMU free for address 0x%016" IMG_UINT64_FMTSPECx " to trigger memory leak.", -+ __func__, -+ sDevVAddr.uiAddr)); -+ return; -+ } -+ -+ mutex_unlock(&g_sMMULeakMutex); -+#endif -+ -+ PVR_ASSERT(psMMUContext != NULL); -+ PVR_LOG_RETURN_VOID_IF_FALSE(psMMUContext != NULL, "psMMUContext"); -+ -+ psPhysMemCtx = psMMUContext->psPhysMemCtx; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freeing DevVAddr " IMG_DEV_VIRTADDR_FMTSPEC, -+ __func__, sDevVAddr.uiAddr)); -+ -+ /* ensure the address range to free is inside the heap */ -+ sDevVAddrEnd = sDevVAddr; -+ sDevVAddrEnd.uiAddr += uiSize; -+ -+ /* The Cleanup lock has to be taken before the MMUContext hLock to -+ * prevent deadlock scenarios. It is necessary only for parts of -+ * _SetupCleanup_FreeMMUMapping though.*/ -+ OSLockAcquire(psPhysMemCtx->psCleanupData->hCleanupLock); -+ -+ OSLockAcquire(psMMUContext->hLock); -+ -+ _FreePageTables(psMMUContext, -+ sDevVAddr, -+ sDevVAddrEnd, -+ uiLog2DataPageSize); -+ -+#if defined(SUPPORT_MMU_DEFERRED_FREE) -+ eError = PVRSRVGetDevicePowerState(psPhysMemCtx->psDevNode, &ePowerState); -+ if (eError != PVRSRV_OK) -+ { -+ /* Treat unknown power state as ON. */ -+ ePowerState = PVRSRV_DEV_POWER_STATE_ON; -+ } -+ -+ if (ePowerState == PVRSRV_DEV_POWER_STATE_OFF) -+ { -+ _FreeMMUMapping(psPhysMemCtx->psDevNode, &psPhysMemCtx->sTmpMMUMappingHead); -+ } -+ else -+ { -+ _SetupCleanup_FreeMMUMapping(psMMUContext->psPhysMemCtx); -+ } -+#else -+ if (!dllist_is_empty(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead)) -+ { -+ _FreeMMUMapping(psPhysMemCtx->psDevNode, &psPhysMemCtx->sTmpMMUMappingHead); -+ } -+#endif -+ OSLockRelease(psMMUContext->hLock); -+ -+ OSLockRelease(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock); -+} -+ -+PVRSRV_ERROR -+MMU_MapPages(MMU_CONTEXT *psMMUContext, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, -+ IMG_DEV_VIRTADDR sDevVAddrBase, -+ PMR *psPMR, -+ IMG_UINT32 ui32PhysPgOffset, -+ IMG_UINT32 ui32MapPageCount, -+ IMG_UINT32 *paui32MapIndices, -+ IMG_UINT32 uiLog2HeapPageSize) -+{ -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hPriv; -+ -+ MMU_Levelx_INFO *psLevel = NULL; -+ -+ MMU_Levelx_INFO *psPrevLevel = NULL; -+ -+ IMG_UINT32 uiPTEIndex = 0; -+ IMG_UINT32 uiPageSize = (1 << uiLog2HeapPageSize); -+ IMG_UINT32 uiLoop = 0; -+ IMG_UINT32 ui32MappedCount = 0; -+ IMG_DEVMEM_OFFSET_T uiPgOffset = 0; -+ IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0; -+ -+ IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0, uiDefProtFlags=0; -+ MMU_PROTFLAGS_T uiMMUProtFlags = 0; -+ -+ const MMU_PxE_CONFIG *psConfig; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ -+ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; -+ -+ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_DEV_PHYADDR *psDevPAddr; -+ IMG_DEV_PHYADDR sDevPAddr; -+ IMG_BOOL *pbValid; -+ IMG_BOOL bValid; -+ IMG_BOOL bScratchBacking = IMG_FALSE, bZeroBacking = IMG_FALSE; -+ IMG_BOOL bNeedBacking = IMG_FALSE; -+ PVRSRV_DEVICE_NODE *psDevNode; -+ -+#if defined(PDUMP) -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset; -+ -+ PDUMPCOMMENT(psMMUContext->psPhysMemCtx->psDevNode, "Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)", -+ (IMG_UINT64)(ui32MapPageCount * uiPageSize)); -+#endif /*PDUMP*/ -+ -+ /* Validate the most essential parameters */ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psMMUContext != NULL, "psMMUContext"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psPMR != NULL, "psPMR"); -+ -+ psDevNode = psMMUContext->psPhysMemCtx->psDevNode; -+ -+ /* Allocate memory for page-frame-numbers and validity states, -+ N.B. assert could be triggered by an illegal uiSizeBytes */ -+ if (ui32MapPageCount > PMR_MAX_TRANSLATION_STACK_ALLOC) -+ { -+ psDevPAddr = OSAllocMem(ui32MapPageCount * sizeof(IMG_DEV_PHYADDR)); -+ PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, ErrReturnError); -+ -+ pbValid = OSAllocMem(ui32MapPageCount * sizeof(IMG_BOOL)); -+ PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrFreePAddrMappingArray); -+ } -+ else -+ { -+ psDevPAddr = asDevPAddr; -+ pbValid = abValid; -+ } -+ -+ /* Get the Device physical addresses of the pages we are trying to map -+ * In the case of non indexed mapping we can get all addresses at once */ -+ if (NULL == paui32MapIndices) -+ { -+ eError = PMR_DevPhysAddr(psPMR, -+ uiLog2HeapPageSize, -+ ui32MapPageCount, -+ ((IMG_DEVMEM_OFFSET_T) ui32PhysPgOffset << uiLog2HeapPageSize), -+ psDevPAddr, -+ pbValid, -+ DEVICE_USE); -+ PVR_GOTO_IF_ERROR(eError, ErrFreeValidArray); -+ } -+ -+ /*Get the Page table level configuration */ -+ _MMU_GetPTConfig(psMMUContext, -+ (IMG_UINT32) uiLog2HeapPageSize, -+ &psConfig, -+ &hPriv, -+ &psDevVAddrConfig); -+ -+ eError = _MMU_ConvertDevMemFlags(IMG_FALSE, -+ uiMappingFlags, -+ &uiMMUProtFlags, -+ psMMUContext); -+ PVR_GOTO_IF_ERROR(eError, ErrPutPTConfig); -+ -+ /* Callback to get device specific protection flags */ -+ if (psConfig->uiBytesPerEntry == 8) -+ { -+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); -+ uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; -+ uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE), -+ uiLog2HeapPageSize); -+ } -+ else if (psConfig->uiBytesPerEntry == 4) -+ { -+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); -+ uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; -+ uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE)); -+ } -+ else -+ { -+ PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_INVALID_PARAMS, ErrPutPTConfig); -+ } -+ -+ if (PMR_IsSparse(psPMR)) -+ { -+ /* We know there will not be 4G number of PMR's */ -+ bScratchBacking = PVRSRV_IS_SPARSE_SCRATCH_BACKING_REQUIRED(PMR_Flags(psPMR)); -+ if (bScratchBacking) -+ { -+ bZeroBacking = PVRSRV_IS_ZERO_BACKING_REQUIRED(PMR_Flags(psPMR)); -+ } -+ -+ if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiMappingFlags)) -+ { -+ /* Obtain non-coherent protection flags as we cannot have multiple coherent -+ virtual pages pointing to the same physical page so all scratch page -+ mappings have to be non-coherent even in a coherent allocation */ -+ eError = _MMU_ConvertDevMemFlags(IMG_FALSE, -+ uiMappingFlags & ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT, -+ &uiMMUProtFlags, -+ psMMUContext); -+ PVR_GOTO_IF_ERROR(eError, ErrPutPTConfig); -+ -+ /* We've already validated possible values of uiBytesPerEntry at the start of this function */ -+ PVR_ASSERT(psConfig->uiBytesPerEntry == 4 || psConfig->uiBytesPerEntry == 8); -+ } -+ } -+ -+ OSLockAcquire(psMMUContext->hLock); -+ -+ for (uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++) -+ { -+ -+#if defined(PDUMP) -+ IMG_DEVMEM_OFFSET_T uiNextSymName; -+#endif /*PDUMP*/ -+ -+ if (NULL != paui32MapIndices) -+ { -+ uiPgOffset = paui32MapIndices[uiLoop]; -+ -+ /*Calculate the Device Virtual Address of the page */ -+ sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + (uiPgOffset * uiPageSize); -+ -+ /* Get the physical address to map */ -+ eError = PMR_DevPhysAddr(psPMR, -+ uiLog2HeapPageSize, -+ 1, -+ uiPgOffset * uiPageSize, -+ &sDevPAddr, -+ &bValid, -+ DEVICE_USE); -+ PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages); -+ } -+ else -+ { -+ uiPgOffset = uiLoop + ui32PhysPgOffset; -+ sDevPAddr = psDevPAddr[uiLoop]; -+ bValid = pbValid[uiLoop]; -+ } -+ -+ uiDefProtFlags = uiProtFlags; -+ /* -+ The default value of the entry is invalid so we don't need to mark -+ it as such if the page wasn't valid, we just advance pass that address -+ */ -+ if (bValid || bScratchBacking) -+ { -+ if (!bValid) -+ { -+ if (bZeroBacking) -+ { -+ eError = _MMU_GetBackingPage(psDevNode, -+ &psDevNode->sDevZeroPage, -+ PVR_ZERO_PAGE_INIT_VALUE, -+ DEV_ZERO_PAGE, -+ IMG_TRUE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage", -+ ErrUnlockAndUnmapPages); -+ -+ sDevPAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr; -+ /* Ensure the zero back page PTE is read only */ -+ uiDefProtFlags = uiProtFlagsReadOnly; -+ } -+ else -+ { -+ eError = _MMU_GetBackingPage(psDevNode, -+ &psDevNode->sScratchPage, -+ PVR_SCRATCH_PAGE_INIT_VALUE, -+ SCRATCH_PAGE, -+ IMG_TRUE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage", -+ ErrUnlockAndUnmapPages); -+ -+ sDevPAddr.uiAddr = psDevNode->sScratchPage.ui64PgPhysAddr; -+ } -+ } -+ else -+ { -+ /* check the physical alignment of the memory to map */ -+ PVR_ASSERT((sDevPAddr.uiAddr & (uiPageSize-1)) == 0); -+ } -+ -+#if defined(DEBUG) -+ { -+ IMG_INT32 i32FeatureVal = 0; -+ IMG_UINT32 ui32BitLength = FloorLog2(sDevPAddr.uiAddr); -+ -+ i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH); -+ do { -+ /* i32FeatureVal can be negative for cases where this feature is undefined -+ * In that situation we need to bail out than go ahead with debug comparison */ -+ if (0 > i32FeatureVal) -+ break; -+ -+ if (ui32BitLength > i32FeatureVal) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s Failed. The physical address bitlength (%d)" -+ " is greater than the chip can handle (%d).", -+ __func__, ui32BitLength, i32FeatureVal)); -+ -+ PVR_ASSERT(ui32BitLength <= i32FeatureVal); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto ErrUnlockAndUnmapPages; -+ } -+ } while (0); -+ } -+#endif /*DEBUG*/ -+ -+#if defined(PDUMP) -+ if (bValid) -+ { -+ eError = PMR_PDumpSymbolicAddr(psPMR, uiPgOffset * uiPageSize, -+ sizeof(aszMemspaceName), &aszMemspaceName[0], -+ sizeof(aszSymbolicAddress), &aszSymbolicAddress[0], -+ &uiSymbolicAddrOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ } -+#endif /*PDUMP*/ -+ -+ psPrevLevel = psLevel; -+ /* Calculate PT index and get new table descriptor */ -+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, -+ &psLevel, &uiPTEIndex); -+ -+ if (psPrevLevel == psLevel) -+ { -+ /* -+ * Sparse allocations may have page offsets which -+ * decrement as well as increment, so make sure we -+ * update the range we will flush correctly. -+ */ -+ if (uiPTEIndex > uiFlushEnd) -+ uiFlushEnd = uiPTEIndex; -+ else if (uiPTEIndex < uiFlushStart) -+ uiFlushStart = uiPTEIndex; -+ } -+ else -+ { -+ /* Flush if we moved to another psLevel, i.e. page table */ -+ if (psPrevLevel != NULL) -+ { -+ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ &psPrevLevel->sMemDesc.psMapping->sMemHandle, -+ uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset, -+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); -+ PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages); -+ } -+ -+ uiFlushStart = uiPTEIndex; -+ uiFlushEnd = uiFlushStart; -+ } -+ -+ HTBLOGK(HTB_SF_MMU_PAGE_OP_MAP, -+ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr), -+ HTBLOG_U64_BITS_HIGH(sDevPAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevPAddr.uiAddr)); -+ -+ /* Set the PT entry with the specified address and protection flags */ -+ eError = _SetupPTE(psMMUContext, -+ psLevel, -+ uiPTEIndex, -+ psConfig, -+ &sDevPAddr, -+ IMG_FALSE, -+#if defined(PDUMP) -+ (bValid)?aszMemspaceName:(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName), -+ ((bValid)?aszSymbolicAddress:((bZeroBacking)?DEV_ZERO_PAGE:SCRATCH_PAGE)), -+ (bValid)?uiSymbolicAddrOffset:0, -+#endif /*PDUMP*/ -+ uiDefProtFlags); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_SetupPTE", ErrUnlockAndUnmapPages); -+ -+ if (bValid) -+ { -+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); -+ PVR_DPF ((PVR_DBG_MESSAGE, -+ "%s: devVAddr=" IMG_DEV_VIRTADDR_FMTSPEC ", " -+ "size=" IMG_DEVMEM_OFFSET_FMTSPEC, -+ __func__, -+ sDevVAddr.uiAddr, -+ uiPgOffset * uiPageSize)); -+ -+ ui32MappedCount++; -+ } -+ } -+ -+ sDevVAddr.uiAddr += uiPageSize; -+ } -+ -+ /* Flush the last level we touched */ -+ if (psLevel != NULL) -+ { -+ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ &psLevel->sMemDesc.psMapping->sMemHandle, -+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, -+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); -+ PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages); -+ } -+ -+ OSLockRelease(psMMUContext->hLock); -+ -+ _MMU_PutPTConfig(psMMUContext, hPriv); -+ -+ if (psDevPAddr != asDevPAddr) -+ { -+ OSFreeMem(pbValid); -+ OSFreeMem(psDevPAddr); -+ } -+ -+ /* Flush TLB for PTs*/ -+ psDevNode->pfnMMUCacheInvalidate(psDevNode, -+ psMMUContext, -+ MMU_LEVEL_1, -+ IMG_FALSE); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevNode, "Wired up %d Page Table entries (out of %d)", ui32MappedCount, ui32MapPageCount); -+#endif /*PDUMP*/ -+ -+ return PVRSRV_OK; -+ -+ErrUnlockAndUnmapPages: -+ OSLockRelease(psMMUContext->hLock); -+ -+ if (PMR_IsSparse(psPMR) && PVRSRV_IS_SPARSE_SCRATCH_BACKING_REQUIRED(uiMappingFlags)) -+ { -+ bNeedBacking = IMG_TRUE; -+ } -+ -+ MMU_UnmapPages(psMMUContext, -+ (bNeedBacking) ? uiMappingFlags : 0, -+ sDevVAddrBase, -+ uiLoop, -+ paui32MapIndices, -+ uiLog2HeapPageSize, -+ uiMappingFlags); -+ErrPutPTConfig: -+ _MMU_PutPTConfig(psMMUContext, hPriv); -+ErrFreeValidArray: -+ if (psDevPAddr != asDevPAddr) -+ { -+ OSFreeMem(pbValid); -+ } -+ErrFreePAddrMappingArray: -+ if (psDevPAddr != asDevPAddr) -+ { -+ OSFreeMem(psDevPAddr); -+ } -+ErrReturnError: -+ return eError; -+} -+ -+/* -+ MMU_UnmapPages -+ */ -+void -+MMU_UnmapPages(MMU_CONTEXT *psMMUContext, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, -+ IMG_DEV_VIRTADDR sDevVAddrBase, -+ IMG_UINT32 ui32PageCount, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_UINT32 uiLog2PageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags) -+{ -+ IMG_UINT32 uiPTEIndex = 0, ui32Loop=0; -+ IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; -+ IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0; -+ MMU_Levelx_INFO *psLevel = NULL; -+ MMU_Levelx_INFO *psPrevLevel = NULL; -+ IMG_HANDLE hPriv; -+ const MMU_PxE_CONFIG *psConfig; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0; -+ MMU_PROTFLAGS_T uiMMUProtFlags = 0, uiMMUReadOnlyProtFlags = 0; -+ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; -+ IMG_DEV_PHYADDR sBackingPgDevPhysAddr; -+ IMG_BOOL bUnmap = IMG_TRUE, bScratchBacking = IMG_FALSE, bZeroBacking = IMG_FALSE; -+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; -+ -+#if defined(PDUMP) -+ const IMG_CHAR *pcBackingPageName = NULL; -+ -+ PDUMPCOMMENT(psDevNode, -+ "Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX, -+ ui32PageCount, -+ (IMG_UINT64)sDevVAddr.uiAddr, -+ ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1); -+#endif -+ bScratchBacking = PVRSRV_IS_SPARSE_SCRATCH_BACKING_REQUIRED(uiMemAllocFlags); -+ bZeroBacking = PVRSRV_IS_ZERO_BACKING_REQUIRED(uiMemAllocFlags); -+ -+ if (bZeroBacking) -+ { -+ sBackingPgDevPhysAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr; -+ } -+ else -+ { -+ sBackingPgDevPhysAddr.uiAddr = psDevNode->sScratchPage.ui64PgPhysAddr; -+ } -+ -+#if defined(PDUMP) -+ if (bScratchBacking) -+ { -+ pcBackingPageName = bZeroBacking ? DEV_ZERO_PAGE : SCRATCH_PAGE; -+ } -+#endif -+ -+ bUnmap = (uiMappingFlags)? !bScratchBacking : IMG_TRUE; -+ /* Get PT and address configs */ -+ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize, -+ &psConfig, &hPriv, &psDevVAddrConfig); -+ -+ if (_MMU_ConvertDevMemFlags(bUnmap, -+ uiMappingFlags, -+ &uiMMUProtFlags, -+ psMMUContext) != PVRSRV_OK) -+ { -+ return; -+ } -+ -+ uiMMUReadOnlyProtFlags = (uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE) | MMU_PROTFLAGS_READABLE; -+ -+ /* Callback to get device specific protection flags */ -+ if (psConfig->uiBytesPerEntry == 4) -+ { -+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); -+ uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUReadOnlyProtFlags); -+ } -+ else if (psConfig->uiBytesPerEntry == 8) -+ { -+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize); -+ uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUReadOnlyProtFlags, uiLog2PageSize); -+ } -+ -+ -+ OSLockAcquire(psMMUContext->hLock); -+ -+ /* Unmap page by page */ -+ while (ui32Loop < ui32PageCount) -+ { -+ if (NULL != pai32FreeIndices) -+ { -+ /*Calculate the Device Virtual Address of the page */ -+ sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + -+ pai32FreeIndices[ui32Loop] * (IMG_UINT64) uiPageSize; -+ } -+ -+ psPrevLevel = psLevel; -+ /* Calculate PT index and get new table descriptor */ -+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, -+ &psLevel, &uiPTEIndex); -+ -+ if (psPrevLevel == psLevel) -+ { -+ /* -+ * Sparse allocations may have page offsets which -+ * decrement as well as increment, so make sure we -+ * update the range we will flush correctly. -+ */ -+ if (uiPTEIndex > uiFlushEnd) -+ uiFlushEnd = uiPTEIndex; -+ else if (uiPTEIndex < uiFlushStart) -+ uiFlushStart = uiPTEIndex; -+ } -+ else -+ { -+ /* Flush if we moved to another psLevel, i.e. page table */ -+ if (psPrevLevel != NULL) -+ { -+ PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ &psPrevLevel->sMemDesc.psMapping->sMemHandle, -+ uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset, -+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); -+ } -+ -+ uiFlushStart = uiPTEIndex; -+ uiFlushEnd = uiFlushStart; -+ } -+ -+ HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP, -+ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr)); -+ -+ /* Set the PT entry to invalid and poison it with a bad address */ -+ if (_SetupPTE(psMMUContext, -+ psLevel, -+ uiPTEIndex, -+ psConfig, -+ (bScratchBacking)? &sBackingPgDevPhysAddr : &gsBadDevPhyAddr, -+ bUnmap, -+#if defined(PDUMP) -+ (bScratchBacking)? (psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName): NULL, -+ pcBackingPageName, -+ 0U, -+#endif -+ (bZeroBacking)? uiProtFlagsReadOnly: uiProtFlags) != PVRSRV_OK) -+ { -+ goto e0; -+ } -+ -+ /* Check we haven't wrapped around */ -+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); -+ ui32Loop++; -+ sDevVAddr.uiAddr += uiPageSize; -+ } -+ -+ /* Flush the last level we touched */ -+ if (psLevel != NULL) -+ { -+ PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ &psLevel->sMemDesc.psMapping->sMemHandle, -+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, -+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); -+ } -+ -+ OSLockRelease(psMMUContext->hLock); -+ -+ _MMU_PutPTConfig(psMMUContext, hPriv); -+ -+ /* Flush TLB for PTs*/ -+ psDevNode->pfnMMUCacheInvalidate(psDevNode, -+ psMMUContext, -+ MMU_LEVEL_1, -+ IMG_TRUE); -+ -+ return; -+ -+e0: -+ _MMU_PutPTConfig(psMMUContext, hPriv); -+ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table")); -+ PVR_ASSERT(0); -+ OSLockRelease(psMMUContext->hLock); -+ return; -+} -+ -+PVRSRV_ERROR -+MMUX_MapVRangeToBackingPage(MMU_CONTEXT *psMMUContext, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, -+ IMG_DEV_VIRTADDR sDevVAddrBase, -+ IMG_UINT32 ui32MapPageCount, -+ IMG_UINT32 uiLog2HeapPageSize) -+{ -+ PVRSRV_ERROR eError; -+ -+ IMG_UINT32 uiPageSize = (1 << uiLog2HeapPageSize); -+ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; -+ IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0; -+ IMG_UINT32 uiPTEIndex = 0; -+ -+ MMU_Levelx_INFO *psLevel = NULL; -+ MMU_Levelx_INFO *psPrevLevel = NULL; -+ -+ IMG_UINT32 uiLoop = 0; -+ -+ IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0; -+ MMU_PROTFLAGS_T uiMMUProtFlags = 0; -+ IMG_BOOL bZeroBacking = PVRSRV_IS_ZERO_BACKING_REQUIRED(uiMappingFlags); -+ -+ const MMU_PxE_CONFIG *psConfig; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ IMG_HANDLE hPriv; -+ -+ IMG_DEV_PHYADDR sDevPAddr; -+ -+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; -+ -+ /*Get the Page table level configuration */ -+ _MMU_GetPTConfig(psMMUContext, -+ (IMG_UINT32) uiLog2HeapPageSize, -+ &psConfig, -+ &hPriv, -+ &psDevVAddrConfig); -+ -+ eError = _MMU_ConvertDevMemFlags(IMG_FALSE, -+ uiMappingFlags, -+ &uiMMUProtFlags, -+ psMMUContext); -+ PVR_GOTO_IF_ERROR(eError, ErrPutPTConfig); -+ -+ /* Callback to get device specific protection flags */ -+ if (psConfig->uiBytesPerEntry == 8) -+ { -+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); -+ uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; -+ uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE), -+ uiLog2HeapPageSize); -+ } -+ else if (psConfig->uiBytesPerEntry == 4) -+ { -+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); -+ uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; -+ uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE)); -+ } -+ else -+ { -+ PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_INVALID_PARAMS, ErrPutPTConfig); -+ } -+ -+ if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiMappingFlags)) -+ { -+ /* Obtain non-coherent protection flags as we cannot have multiple coherent -+ virtual pages pointing to the same physical page so all scratch page -+ mappings have to be non-coherent even in a coherent allocation */ -+ eError = _MMU_ConvertDevMemFlags(IMG_FALSE, -+ uiMappingFlags & ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT, -+ &uiMMUProtFlags, -+ psMMUContext); -+ PVR_GOTO_IF_ERROR(eError, ErrPutPTConfig); -+ -+ /* We've already validated possible values of uiBytesPerEntry at the start of this function */ -+ PVR_ASSERT(psConfig->uiBytesPerEntry == 4 || psConfig->uiBytesPerEntry == 8); -+ } -+ -+ if (bZeroBacking) -+ { -+ eError = _MMU_GetBackingPage(psDevNode, -+ &psDevNode->sDevZeroPage, -+ PVR_ZERO_PAGE_INIT_VALUE, -+ DEV_ZERO_PAGE, -+ IMG_TRUE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage", -+ ErrPutPTConfig); -+ sDevPAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr; -+ } -+ else -+ { -+ eError = _MMU_GetBackingPage(psDevNode, -+ &psDevNode->sScratchPage, -+ PVR_SCRATCH_PAGE_INIT_VALUE, -+ SCRATCH_PAGE, -+ IMG_TRUE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage", -+ ErrPutPTConfig); -+ sDevPAddr.uiAddr = psDevNode->sScratchPage.ui64PgPhysAddr; -+ } -+ -+#if defined(DEBUG) -+ { -+ IMG_INT32 i32FeatureVal = 0; -+ IMG_UINT32 ui32BitLength = FloorLog2(sDevPAddr.uiAddr); -+ -+ i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH); -+ do { -+ /* i32FeatureVal can be negative for cases where this feature is undefined -+ * In that situation we need to bail out than go ahead with debug comparison */ -+ if (0 > i32FeatureVal) -+ break; -+ -+ if (ui32BitLength > i32FeatureVal) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s Failed. The physical address bitlength (%d)" -+ " is greater than the chip can handle (%d).", -+ __func__, ui32BitLength, i32FeatureVal)); -+ -+ PVR_ASSERT(ui32BitLength <= i32FeatureVal); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto ErrPutPTConfig; -+ } -+ } while (0); -+ } -+#endif /*DEBUG*/ -+ -+ OSLockAcquire(psMMUContext->hLock); -+ -+ for (uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++) -+ { -+ psPrevLevel = psLevel; -+ /* Calculate PT index and get new table descriptor */ -+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, -+ &psLevel, &uiPTEIndex); -+ -+ if (psPrevLevel == psLevel) -+ { -+ /* -+ * Sparse allocations may have page offsets which -+ * decrement as well as increment, so make sure we -+ * update the range we will flush correctly. -+ */ -+ if (uiPTEIndex > uiFlushEnd) -+ uiFlushEnd = uiPTEIndex; -+ else if (uiPTEIndex < uiFlushStart) -+ uiFlushStart = uiPTEIndex; -+ } -+ else -+ { -+ /* Flush if we moved to another psLevel, i.e. page table */ -+ if (psPrevLevel != NULL) -+ { -+ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ &psPrevLevel->sMemDesc.psMapping->sMemHandle, -+ uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset, -+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); -+ PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages); -+ } -+ -+ uiFlushStart = uiPTEIndex; -+ uiFlushEnd = uiFlushStart; -+ } -+ -+ HTBLOGK(HTB_SF_MMU_PAGE_OP_MAP, -+ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr), -+ HTBLOG_U64_BITS_HIGH(sDevPAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevPAddr.uiAddr)); -+ -+ /* Set the PT entry with the specified address and protection flags */ -+ eError = _SetupPTE(psMMUContext, -+ psLevel, -+ uiPTEIndex, -+ psConfig, -+ &sDevPAddr, -+ IMG_FALSE, -+#if defined(PDUMP) -+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, -+ bZeroBacking ? DEV_ZERO_PAGE : SCRATCH_PAGE, -+ 0, -+#endif /*PDUMP*/ -+ bZeroBacking ? uiProtFlagsReadOnly : uiProtFlags); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_SetupPTE", ErrUnlockAndUnmapPages); -+ -+ sDevVAddr.uiAddr += uiPageSize; -+ } -+ -+ /* Flush the last level we touched */ -+ if (psLevel != NULL) -+ { -+ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ &psLevel->sMemDesc.psMapping->sMemHandle, -+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, -+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); -+ PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages); -+ } -+ -+ OSLockRelease(psMMUContext->hLock); -+ -+ _MMU_PutPTConfig(psMMUContext, hPriv); -+ -+ /* Flush TLB for PTs*/ -+ psDevNode->pfnMMUCacheInvalidate(psDevNode, -+ psMMUContext, -+ MMU_LEVEL_1, -+ IMG_FALSE); -+ -+ return PVRSRV_OK; -+ -+ErrUnlockAndUnmapPages: -+ OSLockRelease(psMMUContext->hLock); -+ -+ MMU_UnmapPages(psMMUContext, -+ 0, -+ sDevVAddrBase, -+ uiLoop, -+ NULL, -+ uiLog2HeapPageSize, -+ uiMappingFlags); -+ErrPutPTConfig: -+ _MMU_PutPTConfig(psMMUContext, hPriv); -+ return eError; -+} -+ -+PVRSRV_ERROR -+MMU_MapPMRFast(MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR sDevVAddrBase, -+ const PMR *psPMR, -+ IMG_DEVMEM_SIZE_T uiSizeBytes, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, -+ IMG_UINT32 uiLog2HeapPageSize) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; -+ const MMU_PxE_CONFIG *psConfig; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ IMG_HANDLE hPriv; -+ -+ IMG_UINT32 i, uiChunkStart, uiLastPTEIndex, uiNumEntriesToWrite; -+ IMG_UINT32 ui32PagesDone=0, uiPTEIndex=0; -+ -+ IMG_UINT8 uiAddrLog2Align, uiAddrShift; -+ IMG_UINT64 uiAddrMask, uiProtFlags; -+ IMG_UINT32 uiBytesPerEntry; -+ -+ IMG_UINT64* pui64LevelBase; -+ IMG_UINT32* pui32LevelBase; -+ MMU_PROTFLAGS_T uiMMUProtFlags = 0; -+ MMU_Levelx_INFO *psLevel = NULL; -+ -+ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_UINT32 uiNumPages = uiSizeBytes >> uiLog2HeapPageSize; -+ -+#if defined(PVRSRV_ENABLE_HTB) -+ IMG_BOOL bHTBLog = -+ HTB_GROUP_ENABLED(HTB_SF_MMU_PAGE_OP_PMRMAP) || -+ HTB_GROUP_ENABLED(HTB_SF_MMU_PAGE_OP_TABLE); -+#endif -+ -+ IMG_BOOL bValidateOrTweak = psDevNode->pfnValidateOrTweakPhysAddrs ? IMG_TRUE : IMG_FALSE; -+ -+#if defined(PDUMP) -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset; -+ IMG_UINT32 ui32MappedCount = 0; -+ PDUMPCOMMENT(psDevNode, "Wire up Page Table entries to point to the Data Pages (%d bytes)", uiNumPages << uiLog2HeapPageSize); -+#endif /*PDUMP*/ -+ -+ PVR_ASSERT (psMMUContext != NULL); -+ PVR_ASSERT (psPMR != NULL); -+ PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiNumPages << uiLog2HeapPageSize == uiSizeBytes); -+ -+ /* Get general PT and address configs */ -+ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2HeapPageSize, -+ &psConfig, &hPriv, &psDevVAddrConfig); -+ -+ eError = _MMU_ConvertDevMemFlags(IMG_FALSE, -+ uiMappingFlags, -+ &uiMMUProtFlags, -+ psMMUContext); -+ PVR_GOTO_IF_ERROR(eError, put_mmu_context); -+ -+ uiAddrLog2Align = psConfig->uiAddrLog2Align; -+ uiAddrShift = psConfig->uiAddrShift; -+ uiAddrMask = psConfig->uiAddrMask; -+ uiBytesPerEntry = psConfig->uiBytesPerEntry; -+ -+ if (uiBytesPerEntry == 8) -+ { -+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); -+ } -+ else if (uiBytesPerEntry == 4) -+ { -+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); -+ } -+ else -+ { -+ PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, put_mmu_context); -+ } -+ -+ OSLockAcquire(psMMUContext->hLock); -+ -+ do -+ { -+ _MMU_GetPTInfo(psMMUContext, sDevVAddrBase, psDevVAddrConfig, -+ &psLevel, &uiPTEIndex); -+ -+ pui64LevelBase = (IMG_UINT64*)psLevel->sMemDesc.pvCpuVAddr; -+ pui32LevelBase = (IMG_UINT32*)psLevel->sMemDesc.pvCpuVAddr; -+ -+ uiLastPTEIndex = MIN(uiPTEIndex + uiNumPages - ui32PagesDone, psDevVAddrConfig->uiNumEntriesPT); -+ uiNumEntriesToWrite = uiLastPTEIndex - uiPTEIndex; -+ -+ for (uiChunkStart = 0; uiChunkStart < uiNumEntriesToWrite; uiChunkStart += PMR_MAX_TRANSLATION_STACK_ALLOC) -+ { -+ IMG_UINT32 uiNumPagesInBlock = MIN(uiNumEntriesToWrite - uiChunkStart, PMR_MAX_TRANSLATION_STACK_ALLOC); -+ -+ eError = PMR_DevPhysAddr(psPMR, -+ uiLog2HeapPageSize, -+ uiNumPagesInBlock, -+ (IMG_UINT64) (ui32PagesDone + uiChunkStart) << uiLog2HeapPageSize, -+ asDevPAddr, -+ abValid, -+ DEVICE_USE); -+ PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); -+ -+ if (bValidateOrTweak) -+ { -+ for (i=0; ipfnValidateOrTweakPhysAddrs(psDevNode, -+ psMMUContext->psDevAttrs, -+ &asDevPAddr[i].uiAddr); -+ PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); -+ } -+ } -+ -+#if defined(DEBUG) -+ { -+ IMG_INT32 i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH); -+ -+ if (i32FeatureVal >= 0) -+ { -+ for (i=0; i i32FeatureVal) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s Failed. The physical address bitlength (%d)" -+ " is greater than the chip can handle (%d).", -+ __func__, ui32BitLength, i32FeatureVal)); -+ -+ PVR_ASSERT(ui32BitLength <= i32FeatureVal); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, unlock_mmu_context); -+ } -+ } -+ } -+ } -+#endif /*DEBUG*/ -+ -+ if (uiBytesPerEntry == 8) -+ { -+ for (i=0; i> uiAddrLog2Align) << uiAddrShift) & uiAddrMask) | uiProtFlags; -+ } -+ } -+ else if (uiBytesPerEntry == 4) -+ { -+ for (i=0; i> uiAddrLog2Align) << uiAddrShift) & uiAddrMask) | uiProtFlags; -+ PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU)); -+#endif -+ -+ pui32LevelBase[uiPTEIndex + uiChunkStart + i] = -+ (((asDevPAddr[i].uiAddr >> uiAddrLog2Align) << uiAddrShift) & uiAddrMask) | uiProtFlags; -+ } -+ } -+ -+#if defined(PDUMP) -+ for (i=0; ipsDevAttrs->pszMMUPxPDumpMemSpaceName, -+ psLevel->sMemDesc.pvCpuVAddr, -+ psLevel->sMemDesc.sDevPAddr, -+ uiPTEIndex + uiChunkStart + i, -+ 1, -+ aszMemspaceName, -+ aszSymbolicAddress, -+ uiSymbolicAddrOffset, -+ uiBytesPerEntry, -+ uiAddrLog2Align, -+ uiAddrShift, -+ uiAddrMask, -+ psConfig->uiProtMask, -+ psConfig->uiValidEnMask, -+ 0, -+ psMMUContext->psDevAttrs->eMMUType); -+ } -+#endif /*PDUMP*/ -+ -+#if defined(PVRSRV_ENABLE_HTB) -+ if (bHTBLog) -+ { -+ for (i=0; i> uiAddrLog2Align) << uiAddrShift) & uiAddrMask) | uiProtFlags; -+ sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + (ui32PagesDone + uiChunkStart + i) * (1 << uiLog2HeapPageSize); -+ -+ HTBLOGK(HTB_SF_MMU_PAGE_OP_PMRMAP, -+ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr), -+ HTBLOG_U64_BITS_HIGH(asDevPAddr[i].uiAddr), HTBLOG_U64_BITS_LOW(asDevPAddr[i].uiAddr)); -+ -+ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, -+ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), -+ uiPTEIndex + uiChunkStart + i, MMU_LEVEL_1, -+ HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64), -+ IMG_FALSE); -+ } -+ } -+#endif -+ } -+ -+ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ &psLevel->sMemDesc.psMapping->sMemHandle, -+ uiPTEIndex * uiBytesPerEntry + psLevel->sMemDesc.uiOffset, -+ (uiNumEntriesToWrite) * uiBytesPerEntry); -+ PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); -+ -+ sDevVAddrBase.uiAddr += uiNumEntriesToWrite * (1 << uiLog2HeapPageSize); -+ ui32PagesDone += uiNumEntriesToWrite; -+ -+ } while (ui32PagesDone < uiNumPages); -+ -+ OSLockRelease(psMMUContext->hLock); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevNode, "Wired up %d Page Table entries (out of %d)", ui32MappedCount, uiNumPages); -+#endif /*PDUMP*/ -+ -+ /* Flush TLB for PTs*/ -+ psDevNode->pfnMMUCacheInvalidate(psDevNode, -+ psMMUContext, -+ MMU_LEVEL_1, -+ IMG_FALSE); -+ -+ _MMU_PutPTConfig(psMMUContext, hPriv); -+ -+ return PVRSRV_OK; -+ -+unlock_mmu_context: -+ OSLockRelease(psMMUContext->hLock); -+ MMU_UnmapPMRFast(psMMUContext, -+ sDevVAddrBase, -+ uiNumPages, -+ uiLog2HeapPageSize); -+put_mmu_context: -+ _MMU_PutPTConfig(psMMUContext, hPriv); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ return eError; -+} -+ -+/* -+ MMU_UnmapPMRFast -+ */ -+void -+MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR sDevVAddrBase, -+ IMG_UINT32 ui32PageCount, -+ IMG_UINT32 uiLog2PageSize) -+{ -+ IMG_UINT32 uiPTEIndex = 0, uiLastPTEIndex = 0, ui32PagesDone=0, i, uiNumEntriesToWrite; -+ IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; -+ MMU_Levelx_INFO *psLevel = NULL; -+ IMG_HANDLE hPriv; -+ void* pvPTStart; -+ const MMU_PxE_CONFIG *psConfig; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; -+ IMG_UINT64 uiProtFlags = 0; -+ MMU_PROTFLAGS_T uiMMUProtFlags = 0; -+ IMG_UINT64 uiEntry = 0; -+ IMG_BOOL bLog; -+ -+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; -+ -+ /* Get PT and address configs */ -+ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize, -+ &psConfig, &hPriv, &psDevVAddrConfig); -+ -+ if (_MMU_ConvertDevMemFlags(IMG_TRUE, -+ 0, -+ &uiMMUProtFlags, -+ psMMUContext) != PVRSRV_OK) -+ { -+ return; -+ } -+ -+ /* Callback to get device specific protection flags */ -+ -+ if (psConfig->uiBytesPerEntry == 8) -+ { -+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize); -+ -+ /* Fill the entry with a bad address but leave space for protection flags */ -+ uiEntry = (gsBadDevPhyAddr.uiAddr & ~psConfig->uiProtMask) | uiProtFlags; -+ } -+ else if (psConfig->uiBytesPerEntry == 4) -+ { -+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); -+ -+ /* Fill the entry with a bad address but leave space for protection flags */ -+ uiEntry = (((IMG_UINT32) gsBadDevPhyAddr.uiAddr) & ~psConfig->uiProtMask) | (IMG_UINT32) uiProtFlags; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: The page table entry byte length is not supported", -+ __func__)); -+ goto e0; -+ } -+ -+ bLog = HTB_GROUP_ENABLED(HTB_SF_MMU_PAGE_OP_UNMAP) || -+ HTB_GROUP_ENABLED(HTB_SF_MMU_PAGE_OP_TABLE); -+ -+ OSLockAcquire(psMMUContext->hLock); -+ -+ do -+ { -+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, -+ &psLevel, &uiPTEIndex); -+ -+ pvPTStart = psLevel->sMemDesc.pvCpuVAddr; -+ -+ uiLastPTEIndex = MIN(uiPTEIndex + ui32PageCount - ui32PagesDone, psDevVAddrConfig->uiNumEntriesPT); -+ uiNumEntriesToWrite = uiLastPTEIndex - uiPTEIndex; -+ -+ if (psConfig->uiBytesPerEntry == 8) -+ { -+ for (i=uiPTEIndex; iuiBytesPerEntry == 4); -+#endif -+ for (i=uiPTEIndex; ipsDevAttrs->pszMMUPxPDumpMemSpaceName, -+ psLevel->sMemDesc.pvCpuVAddr, -+ psLevel->sMemDesc.sDevPAddr, -+ uiPTEIndex, -+ uiNumEntriesToWrite, -+ NULL, -+ NULL, -+ 0, -+ psConfig->uiBytesPerEntry, -+ psConfig->uiAddrLog2Align, -+ psConfig->uiAddrShift, -+ psConfig->uiAddrMask, -+ psConfig->uiProtMask, -+ psConfig->uiValidEnMask, -+ 0, -+ psMMUContext->psDevAttrs->eMMUType); -+#endif /*PDUMP*/ -+ -+ PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ &psLevel->sMemDesc.psMapping->sMemHandle, -+ uiPTEIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, -+ (uiNumEntriesToWrite) * psConfig->uiBytesPerEntry); -+ -+ sDevVAddr.uiAddr += uiNumEntriesToWrite * uiPageSize; -+ ui32PagesDone += uiNumEntriesToWrite; -+ -+ } while (ui32PagesDone < ui32PageCount); -+ -+ OSLockRelease(psMMUContext->hLock); -+ -+ _MMU_PutPTConfig(psMMUContext, hPriv); -+ -+ /* Flush TLB for PTs*/ -+ psDevNode->pfnMMUCacheInvalidate(psDevNode, -+ psMMUContext, -+ MMU_LEVEL_1, -+ IMG_TRUE); -+ -+ return; -+ -+e0: -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map/unmap page table", __func__)); -+ PVR_ASSERT(0); -+ return; -+} -+ -+/* -+ MMU_AcquireBaseAddr -+ */ -+PVRSRV_ERROR -+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr) -+{ -+ if (!psMMUContext) -+ { -+ psPhysAddr->uiAddr = 0; -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (_MMU_IS_FWKM_CTX_VZGUEST(psMMUContext)) -+ { -+ PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_NOT_SUPPORTED, -+ "MMU_AcquireBaseAddr"); -+ } -+ -+ *psPhysAddr = psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr; -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ MMU_AcquireCPUBaseAddr -+ */ -+PVRSRV_ERROR -+MMU_AcquireCPUBaseAddr(MMU_CONTEXT *psMMUContext, void **ppvCPUVAddr) -+{ -+ if (!psMMUContext) -+ { -+ *ppvCPUVAddr = NULL; -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (_MMU_IS_FWKM_CTX_VZGUEST(psMMUContext)) -+ { -+ PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_NOT_SUPPORTED, -+ "MMU_AcquireCPUBaseAddr"); -+ } -+ -+ *ppvCPUVAddr = psMMUContext->sBaseLevelInfo.sMemDesc.pvCpuVAddr; -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ MMU_ReleaseBaseAddr -+ */ -+void -+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext) -+{ -+ PVR_UNREFERENCED_PARAMETER(psMMUContext); -+} -+ -+/* -+ MMU_AppendCacheFlags -+*/ -+ -+void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32AppendFlags) -+{ -+ PVR_ASSERT(psMMUContext != NULL); -+ -+ if (psMMUContext == NULL) -+ { -+ return; -+ } -+ -+ OSAtomicOr(&psMMUContext->sCacheFlags, (IMG_INT)ui32AppendFlags); -+} -+ -+/* -+ MMU_GetAndResetCacheFlags -+*/ -+IMG_UINT32 MMU_GetAndResetCacheFlags(MMU_CONTEXT *psMMUContext) -+{ -+ IMG_UINT32 uiFlags; -+ -+ PVR_ASSERT(psMMUContext != NULL); -+ if (psMMUContext == NULL) -+ { -+ return 0; -+ } -+ -+ uiFlags = (IMG_UINT32) OSAtomicExchange(&psMMUContext->sCacheFlags, 0); -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ /* kick cleanup thread to free all zombie PMRs residing in the device's -+ * zombie list */ -+ if (PMRQueueZombiesForCleanup(psMMUContext->psPhysMemCtx->psDevNode)) -+ { -+ BITMASK_SET(uiFlags, RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL | RGXFWIF_MMUCACHEDATA_FLAGS_PT); -+ } -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+ return uiFlags; -+} -+ -+#if defined(SUPPORT_CUSTOM_OSID_EMISSION) -+/* -+ MMU_GetOSids -+ */ -+ -+void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt) -+{ -+ *pui32OSid = psMMUContext->psPhysMemCtx->ui32OSid; -+ *pui32OSidReg = psMMUContext->psPhysMemCtx->ui32OSidReg; -+ *pbOSidAxiProt = psMMUContext->psPhysMemCtx->bOSidAxiProt; -+} -+ -+#endif -+ -+static IMG_BOOL _MMUGetPxEFaultLevelData(const MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR *psDevVAddr, -+ const MMU_PxE_CONFIG *psConfig, -+ void *pvCpuVAddr, -+ IMG_UINT32 ui32PxIndex, -+ MMU_LEVEL_DATA *psMMULevelDataOut, -+ IMG_UINT32 *ui32Log2PageSizeOut) -+{ -+ static const IMG_CHAR *apszMMUValidStr[4] = { -+ /*--*/ "not valid", -+ /*-V*/ "valid", -+ /*P-*/ "pending", -+ /*PV*/ "inconsistent (pending and valid)" }; -+ #define _MMU_VALID_STR(_cfg, _entry) \ -+ (apszMMUValidStr[((((_entry)&(_cfg->uiPendingEnMask))!=0) << 1)| \ -+ ((((_entry)&(_cfg->uiValidEnMask))!=0) << 0)]) -+ #define _MMU_MASK_VALID_FOR_32BITS(_cfg) \ -+ ((_cfg->uiPendingEnMask | _cfg->uiValidEnMask) <= 0xFFFFFFFF) -+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; -+ -+ if (psConfig->uiBytesPerEntry == 4) -+ { -+ IMG_UINT32 *pui32Ptr = pvCpuVAddr; -+ -+ psMMULevelDataOut->ui64Address = pui32Ptr[ui32PxIndex]; -+ -+ /* Check if we are dealing with a PxE entry where these bits are in the first 32 bits */ -+ /* But if we know it is a 4 byte entry, why check this? */ -+ if (_MMU_MASK_VALID_FOR_32BITS(psConfig)) -+ { -+ psMMULevelDataOut->psDebugStr = _MMU_VALID_STR(psConfig, pui32Ptr[ui32PxIndex] & psConfig->uiProtMask); -+ } -+ else -+ { -+ psMMULevelDataOut->psDebugStr = ""; -+ PVR_DPF((PVR_DBG_ERROR, "Invalid %sE masks for 32-bit entry", psConfig->pszPxLevelStr)); -+ return IMG_FALSE; -+ } -+ -+ if (ui32Log2PageSizeOut != NULL) -+ { -+ if (psDevAttrs->pfnGetPageSizeFromPDE4(pui32Ptr[ui32PxIndex], ui32Log2PageSizeOut) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to get the page size from the %sE", psConfig->pszPxLevelStr)); -+ return IMG_FALSE; -+ } -+ } -+ } -+ else -+ { -+ IMG_UINT64 *pui64Ptr = pvCpuVAddr; -+ -+ psMMULevelDataOut->ui64Address = pui64Ptr[ui32PxIndex]; -+ psMMULevelDataOut->psDebugStr = _MMU_VALID_STR(psConfig, pui64Ptr[ui32PxIndex] & psConfig->uiProtMask); -+ -+ if (ui32Log2PageSizeOut != NULL) -+ { -+ /* MMU_VERSION >= 4 */ -+ if (psDevAttrs->pfnGetPageSizeFromVirtAddr != NULL) -+ { -+ if (psDevAttrs->pfnGetPageSizeFromVirtAddr(psMMUContext->psPhysMemCtx->psDevNode, *psDevVAddr, ui32Log2PageSizeOut) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to get the page size from device VA")); -+ return IMG_FALSE; -+ } -+ } -+ /* MMU_VERSION < 4 */ -+ else if (psDevAttrs->pfnGetPageSizeFromPDE8(pui64Ptr[ui32PxIndex], ui32Log2PageSizeOut) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to get the page size from the %sE", psConfig->pszPxLevelStr)); -+ return IMG_FALSE; -+ } -+ } -+ } -+ -+ return IMG_TRUE; -+} -+ -+/* -+ MMU_CheckFaultAddress -+ */ -+void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR *psDevVAddr, -+ MMU_FAULT_DATA *psOutFaultData) -+{ -+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; -+ MMU_LEVEL eMMULevel = psDevAttrs->psBaseConfig->ePxLevel; -+ const MMU_PxE_CONFIG *psConfig; -+ const MMU_PxE_CONFIG *psMMUPDEConfig; -+ const MMU_PxE_CONFIG *psMMUPTEConfig; -+ const MMU_DEVVADDR_CONFIG *psMMUDevVAddrConfig; -+ IMG_HANDLE hPriv; -+ MMU_Levelx_INFO *psLevel = NULL; -+ PVRSRV_ERROR eError; -+ IMG_UINT64 uiIndex; -+ IMG_UINT32 ui32PCIndex = 0xFFFFFFFF; -+ IMG_UINT32 ui32PDIndex = 0xFFFFFFFF; -+ IMG_UINT32 ui32PTIndex = 0xFFFFFFFF; -+ IMG_UINT32 ui32Log2PageSize; -+ MMU_FAULT_DATA sMMUFaultData = {0}; -+ MMU_LEVEL_DATA *psMMULevelData; -+ -+ OSLockAcquire(psMMUContext->hLock); -+ -+ /* -+ At this point we don't know the page size so assume it's 4K. -+ When we get the PD level (MMU_LEVEL_2) we can check to see -+ if this assumption is correct. -+ */ -+ eError = psDevAttrs->pfnGetPageSizeConfiguration(12, -+ &psMMUPDEConfig, -+ &psMMUPTEConfig, -+ &psMMUDevVAddrConfig, -+ &hPriv); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("Failed to get the page size info for log2 page sizeof 12")); -+ } -+ -+ psLevel = &psMMUContext->sBaseLevelInfo; -+ psConfig = psDevAttrs->psBaseConfig; -+ -+ sMMUFaultData.eTopLevel = psDevAttrs->psBaseConfig->ePxLevel; -+ sMMUFaultData.eType = MMU_FAULT_TYPE_NON_PM; -+ -+ -+ for (; eMMULevel > MMU_LEVEL_0; eMMULevel--) -+ { -+ if (eMMULevel == MMU_LEVEL_3) -+ { -+ /* Determine the PC index */ -+ uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexMask; -+ uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexShift; -+ ui32PCIndex = (IMG_UINT32) uiIndex; -+ PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PCIndex)); -+ -+ psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_3]; -+ psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry; -+ psMMULevelData->ui32Index = ui32PCIndex; -+ -+ if (ui32PCIndex >= psLevel->ui32NumOfEntries) -+ { -+ psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries; -+ break; -+ } -+ -+ (void) _MMUGetPxEFaultLevelData(psMMUContext, psDevVAddr, psConfig, -+ psLevel->sMemDesc.pvCpuVAddr, -+ ui32PCIndex, -+ psMMULevelData, NULL); -+ -+ psLevel = psLevel->apsNextLevel[ui32PCIndex]; -+ if (!psLevel) -+ { -+ break; -+ } -+ psConfig = psMMUPDEConfig; -+ continue; /* continue to the next level */ -+ } -+ -+ -+ if (eMMULevel == MMU_LEVEL_2) -+ { -+ /* Determine the PD index */ -+ uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexMask; -+ uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexShift; -+ ui32PDIndex = (IMG_UINT32) uiIndex; -+ PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PDIndex)); -+ -+ psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_2]; -+ psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry; -+ psMMULevelData->ui32Index = ui32PDIndex; -+ -+ if (ui32PDIndex >= psLevel->ui32NumOfEntries) -+ { -+ psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries; -+ break; -+ } -+ -+ (void) _MMUGetPxEFaultLevelData(psMMUContext, psDevVAddr, psConfig, -+ psLevel->sMemDesc.pvCpuVAddr, -+ ui32PDIndex, -+ psMMULevelData, &ui32Log2PageSize); -+ -+ /* -+ We assumed the page size was 4K, now we have the actual size -+ from the PDE we can confirm if our assumption was correct. -+ Until now it hasn't mattered as the PC and PD are the same -+ regardless of the page size -+ */ -+ if (ui32Log2PageSize != 12) -+ { -+ /* Put the 4K page size data */ -+ psDevAttrs->pfnPutPageSizeConfiguration(hPriv); -+ -+ /* Get the correct size data */ -+ eError = psDevAttrs->pfnGetPageSizeConfiguration(ui32Log2PageSize, -+ &psMMUPDEConfig, -+ &psMMUPTEConfig, -+ &psMMUDevVAddrConfig, -+ &hPriv); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("Failed to get the page size info for log2 page sizeof %d", ui32Log2PageSize)); -+ break; -+ } -+ } -+ -+ psLevel = psLevel->apsNextLevel[ui32PDIndex]; -+ if (!psLevel) -+ { -+ break; -+ } -+ psConfig = psMMUPTEConfig; -+ continue; /* continue to the next level */ -+ } -+ -+ -+ if (eMMULevel == MMU_LEVEL_1) -+ { -+ /* Determine the PT index */ -+ uiIndex = psDevVAddr->uiAddr & psMMUDevVAddrConfig->uiPTIndexMask; -+ uiIndex = uiIndex >> psMMUDevVAddrConfig->uiPTIndexShift; -+ ui32PTIndex = (IMG_UINT32) uiIndex; -+ PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PTIndex)); -+ -+ psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_1]; -+ psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry; -+ psMMULevelData->ui32Index = ui32PTIndex; -+ -+ if (ui32PTIndex >= psLevel->ui32NumOfEntries) -+ { -+ psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries; -+ break; -+ } -+ -+ (void) _MMUGetPxEFaultLevelData(psMMUContext, psDevVAddr, psConfig, -+ psLevel->sMemDesc.pvCpuVAddr, -+ ui32PTIndex, -+ psMMULevelData, NULL); -+ break; -+ } -+ -+ PVR_LOG(("Unsupported MMU setup: %d", eMMULevel)); -+ break; -+ } -+ -+ /* Put the page size data back */ -+ psDevAttrs->pfnPutPageSizeConfiguration(hPriv); -+ OSLockRelease(psMMUContext->hLock); -+ -+ *psOutFaultData = sMMUFaultData; -+} -+ -+static IMG_UINT64 MMU_GetVDevAddrPTE(MMU_CONTEXT *psMMUContext, -+ const MMU_PxE_CONFIG *psConfig, -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, -+ IMG_UINT32 uiLog2PageSize, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_BOOL *pbStatusOut) -+{ -+ MMU_Levelx_INFO *psLevel = NULL; -+ IMG_UINT32 uiIndex = 0; -+ IMG_BOOL bStatus = IMG_FALSE; -+ IMG_UINT64 ui64Entry = 0; -+ -+ OSLockAcquire(psMMUContext->hLock); -+ -+ switch (psMMUContext->psDevAttrs->psBaseConfig->ePxLevel) -+ { -+ case MMU_LEVEL_3: -+ uiIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); -+ psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex]; -+ if (psLevel == NULL) -+ break; -+ -+ __fallthrough; -+ case MMU_LEVEL_2: -+ uiIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); -+ -+ if (psLevel != NULL) -+ psLevel = psLevel->apsNextLevel[uiIndex]; -+ else -+ psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex]; -+ -+ if (psLevel == NULL) -+ break; -+ -+ __fallthrough; -+ case MMU_LEVEL_1: -+ uiIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); -+ -+ if (psLevel == NULL) -+ psLevel = &psMMUContext->sBaseLevelInfo; -+ -+ ui64Entry = ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiIndex]; -+ bStatus = ui64Entry & psConfig->uiValidEnMask; -+ -+ break; -+ default: -+ PVR_LOG(("MMU_IsVDevAddrValid: Unsupported MMU setup")); -+ break; -+ } -+ -+ OSLockRelease(psMMUContext->hLock); -+ -+ *pbStatusOut = bStatus; -+ -+ return ui64Entry; -+} -+ -+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext, -+ IMG_UINT32 uiLog2PageSize, -+ IMG_DEV_VIRTADDR sDevVAddr) -+{ -+ IMG_BOOL bStatus; -+ const MMU_PxE_CONFIG *psConfig; -+ IMG_HANDLE hPriv; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ -+ _MMU_GetPTConfig(psMMUContext, uiLog2PageSize, &psConfig, &hPriv, &psDevVAddrConfig); -+ -+ MMU_GetVDevAddrPTE(psMMUContext, -+ psConfig, -+ psDevVAddrConfig, -+ uiLog2PageSize, -+ sDevVAddr, -+ &bStatus); -+ -+ _MMU_PutPTConfig(psMMUContext, hPriv); -+ -+ return bStatus; -+} -+ -+#if defined(PDUMP) -+/* -+ MMU_ContextDerivePCPDumpSymAddr -+ */ -+PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext, -+ IMG_CHAR *pszPDumpSymbolicNameBuffer, -+ size_t uiPDumpSymbolicNameBufferSize) -+{ -+ size_t uiCount; -+ IMG_UINT64 ui64PhysAddr; -+ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId; -+ -+ if (!psMMUContext->sBaseLevelInfo.sMemDesc.bValid) -+ { -+ /* We don't have any allocations. You're not allowed to ask -+ * for the page catalogue base address until you've made at -+ * least one allocation. -+ */ -+ return PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR; -+ } -+ -+ ui64PhysAddr = (IMG_UINT64)psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr.uiAddr; -+ -+ PVR_ASSERT(uiPDumpSymbolicNameBufferSize >= (IMG_UINT32)(21 + OSStringLength(psDevId->pszPDumpDevName))); -+ -+ /* Page table Symbolic Name is formed from page table phys addr -+ prefixed with MMUPT_. */ -+ uiCount = OSSNPrintf(pszPDumpSymbolicNameBuffer, -+ uiPDumpSymbolicNameBufferSize, -+ ":%s:%s%016"IMG_UINT64_FMTSPECX, -+ psDevId->pszPDumpDevName, -+ psMMUContext->sBaseLevelInfo.sMemDesc.bValid?"MMUPC_":"XXX", -+ ui64PhysAddr); -+ -+ if (uiCount + 1 > uiPDumpSymbolicNameBufferSize) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ MMU_PDumpWritePageCatBase -+ */ -+PVRSRV_ERROR -+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, -+ const IMG_CHAR *pszSpaceName, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT32 ui32WordSize, -+ IMG_UINT32 ui32AlignShift, -+ IMG_UINT32 ui32Shift, -+ PDUMP_FLAGS_T uiPdumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CHAR aszPageCatBaseSymbolicAddr[100]; -+ const IMG_CHAR *pszPDumpDevName = psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName; -+ -+ eError = MMU_ContextDerivePCPDumpSymAddr(psMMUContext, -+ &aszPageCatBaseSymbolicAddr[0], -+ sizeof(aszPageCatBaseSymbolicAddr)); -+ if (eError == PVRSRV_OK) -+ { -+ eError = PDumpWriteSymbAddress(psMMUContext->psPhysMemCtx->psDevNode, -+ pszSpaceName, -+ uiOffset, -+ aszPageCatBaseSymbolicAddr, -+ 0, /* offset -- Could be non-zero for var. pgsz */ -+ pszPDumpDevName, -+ ui32WordSize, -+ ui32AlignShift, -+ ui32Shift, -+ uiPdumpFlags | PDUMP_FLAGS_CONTINUOUS); -+ } -+ -+ return eError; -+} -+ -+/* -+ MMU_AcquirePDumpMMUContext -+ */ -+PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext, -+ IMG_UINT32 *pui32PDumpMMUContextID, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId; -+ -+ if (!psMMUContext->ui32PDumpContextIDRefCount) -+ { -+ PDUMP_MMU_ALLOC_MMUCONTEXT(psMMUContext->psPhysMemCtx->psDevNode, -+ psDevId->pszPDumpDevName, -+ psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr, -+ psMMUContext->psDevAttrs->eMMUType, -+ &psMMUContext->uiPDumpContextID, -+ ui32PDumpFlags); -+ } -+ -+ psMMUContext->ui32PDumpContextIDRefCount++; -+ *pui32PDumpMMUContextID = psMMUContext->uiPDumpContextID; -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ MMU_ReleasePDumpMMUContext -+ */ -+PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId; -+ -+ PVR_ASSERT(psMMUContext->ui32PDumpContextIDRefCount != 0); -+ psMMUContext->ui32PDumpContextIDRefCount--; -+ -+ if (psMMUContext->ui32PDumpContextIDRefCount == 0) -+ { -+ PDUMP_MMU_FREE_MMUCONTEXT(psMMUContext->psPhysMemCtx->psDevNode, -+ psDevId->pszPDumpDevName, -+ psMMUContext->uiPDumpContextID, -+ ui32PDumpFlags); -+ } -+ -+ return PVRSRV_OK; -+} -+#endif -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+PVRSRV_ERROR MMU_CacheInvalidateKick(PPVRSRV_DEVICE_NODE psDeviceNode, -+ IMG_UINT32 *puiRequiredSyncValue) -+{ -+ IMG_UINT32 uiRequiredSyncValue; -+ PVRSRV_ERROR eError; -+ -+ eError = psDeviceNode->pfnMMUCacheInvalidateKick(psDeviceNode, &uiRequiredSyncValue); -+ -+ if (puiRequiredSyncValue != NULL) -+ { -+ *puiRequiredSyncValue = uiRequiredSyncValue; -+ } -+ -+ return eError; -+} -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+/****************************************************************************** -+ End of file (mmu_common.c) -+ ******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/mmu_common.h b/drivers/gpu/drm/img-rogue/mmu_common.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/mmu_common.h -@@ -0,0 +1,831 @@ -+/*************************************************************************/ /*! -+@File -+@Title Common MMU Management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements basic low level control of MMU. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef MMU_COMMON_H -+#define MMU_COMMON_H -+ -+/* -+ The Memory Management Unit (MMU) performs device virtual to physical -+ translation. -+ -+ Terminology: -+ - page catalogue, PC (optional, 3 tier MMU) -+ - page directory, PD -+ - page table, PT (can be variable sized) -+ - data page, DP (can be variable sized) -+ Note: PD and PC are fixed size and can't be larger than the native -+ physical (CPU) page size -+ Shifts and AlignShift variables: -+ - 'xxxShift' represent the number of bits a bitfield is shifted left from bit0 -+ - 'xxxAlignShift' is used to convert a bitfield (based at bit0) into byte units -+ by applying a bit shift left by 'xxxAlignShift' bits -+*/ -+ -+/* -+ Device Virtual Address Config: -+ -+ Incoming Device Virtual Address is deconstructed into up to 4 -+ fields, where the virtual address is up to 64bits: -+ MSB-----------------------------------------------LSB -+ | PC Index: | PD Index: | PT Index: | DP offset: | -+ | d bits | c bits | b-v bits | a+v bits | -+ ----------------------------------------------------- -+ where v is the variable page table modifier, e.g. -+ v == 0 -> 4KB DP -+ v == 2 -> 16KB DP -+ v == 4 -> 64KB DP -+ v == 6 -> 256KB DP -+ v == 8 -> 1MB DP -+ v == 10 -> 4MB DP -+*/ -+ -+/* services/server/include/ */ -+#include "pmr.h" -+ -+/* include/ */ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_notifier.h" -+#include "pvrsrv_error.h" -+#include "servicesext.h" -+#include "sync_prim_internal.h" -+ -+/*! -+ The level of the MMU -+*/ -+typedef enum -+{ -+ MMU_LEVEL_0 = 0, /* Level 0 = Page */ -+ -+ MMU_LEVEL_1, /* Level 1 = PT */ -+ MMU_LEVEL_2, /* Level 2 = PD */ -+ MMU_LEVEL_3, /* Level 3 = PC */ -+ MMU_LEVEL_LAST -+} MMU_LEVEL; -+ -+/* moved after declaration of MMU_LEVEL, as pdump_mmu.h references it */ -+#include "pdump_mmu.h" -+ -+#define MMU_MAX_LEVEL 3 -+ -+ -+typedef struct _MMU_CONTEXT_ MMU_CONTEXT; -+ -+ -+/* -+ P(C/D/T) Entry Config: -+ -+ MSB-----------------------------------------------LSB -+ | PT Addr: | variable PT ctrl | protection flags: | -+ | bits c+v | b bits | a bits | -+ ----------------------------------------------------- -+ where v is the variable page table modifier and is optional -+*/ -+/*! -+ Generic MMU entry description. This is used to describe PC, PD and PT entries. -+*/ -+typedef struct _MMU_PxE_CONFIG_ -+{ -+ MMU_LEVEL ePxLevel; /*! MMU Level this config describes */ -+ const IMG_CHAR *pszPxLevelStr; /*! Px string for this level */ -+ IMG_UINT8 uiBytesPerEntry; /*! Size of an entry in bytes */ -+ -+ IMG_UINT64 uiAddrMask; /*! Physical address mask */ -+ IMG_UINT8 uiAddrShift; /*! Physical address shift */ -+ IMG_UINT8 uiAddrLog2Align; /*! Physical address Log 2 alignment */ -+ -+ IMG_UINT64 uiVarCtrlMask; /*! Variable control mask */ -+ IMG_UINT8 uiVarCtrlShift; /*! Variable control shift */ -+ -+ IMG_UINT64 uiProtMask; /*! Protection flags mask */ -+ IMG_UINT8 uiProtShift; /*! Protection flags shift */ -+ IMG_UINT64 uiPendingEnMask; /*! Entry pending bit mask */ -+ IMG_UINT64 uiValidEnMask; /*! Entry valid bit mask */ -+ IMG_UINT8 uiValidEnShift; /*! Entry valid bit shift */ -+} MMU_PxE_CONFIG; -+ -+/*! -+ MMU virtual address split -+*/ -+typedef struct _MMU_DEVVADDR_CONFIG_ -+{ -+ /*! Page catalogue index mask */ -+ IMG_UINT64 uiPCIndexMask; -+ /*! Page catalogue index shift */ -+ IMG_UINT8 uiPCIndexShift; -+ /*! Total number of PC entries */ -+ IMG_UINT32 uiNumEntriesPC; -+ -+ /*! Page directory mask */ -+ IMG_UINT64 uiPDIndexMask; -+ /*! Page directory shift */ -+ IMG_UINT8 uiPDIndexShift; -+ /*! Total number of PD entries */ -+ IMG_UINT32 uiNumEntriesPD; -+ -+ /*! Page table mask */ -+ IMG_UINT64 uiPTIndexMask; -+ /*! Page index shift */ -+ IMG_UINT8 uiPTIndexShift; -+ /*! Total number of PT entries */ -+ IMG_UINT32 uiNumEntriesPT; -+ -+ /*! Page offset mask */ -+ IMG_UINT64 uiPageOffsetMask; -+ /*! Page offset shift */ -+ IMG_UINT8 uiPageOffsetShift; -+ -+ /*! First virtual address mappable for this config */ -+ IMG_UINT64 uiOffsetInBytes; -+ -+} MMU_DEVVADDR_CONFIG; -+ -+/*! -+ MMU device attributes. This structure is the interface between the generic -+ MMU code and the device specific MMU code. -+*/ -+typedef struct _MMU_DEVICEATTRIBS_ -+{ -+ /*! Page and address type */ -+ PDUMP_MMU_TYPE eMMUType; -+ -+ /*! Name string of the PDUMP memory space to use */ -+ IMG_CHAR *pszMMUPxPDumpMemSpaceName; -+ -+ /*! Alignment requirement of the top/base object */ -+ IMG_UINT32 ui32BaseAlign; -+ -+ /*! HW config of the top/base object */ -+ struct _MMU_PxE_CONFIG_ *psBaseConfig; -+ -+ /*! Address split for the base object */ -+ const struct _MMU_DEVVADDR_CONFIG_ *psTopLevelDevVAddrConfig; -+ -+ /* Optional, test feature used to generate the pre-mapped page tables in a stand alone MMU driver.*/ -+ PVRSRV_ERROR (*pfnTestPremapConfigureMMU)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR sDevVAddrStart, -+ IMG_DEV_VIRTADDR sDevVAddrEnd, -+ IMG_UINT32 ui32Log2PageSize); -+ -+ /*! Callback for creating protection bits for the page catalogue entry with 8 byte entry */ -+ IMG_UINT64 (*pfnDerivePCEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); -+ /*! Callback for creating protection bits for the page catalogue entry with 4 byte entry */ -+ IMG_UINT32 (*pfnDerivePCEProt4)(IMG_UINT32 uiProtFlags); -+ /*! Callback for creating protection bits for the page directory entry with 8 byte entry */ -+ IMG_UINT64 (*pfnDerivePDEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); -+ /*! Callback for creating protection bits for the page directory entry with 4 byte entry */ -+ IMG_UINT32 (*pfnDerivePDEProt4)(IMG_UINT32 uiProtFlags); -+ /*! Callback for creating protection bits for the page table entry with 8 byte entry */ -+ IMG_UINT64 (*pfnDerivePTEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); -+ /*! Callback for creating protection bits for the page table entry with 4 byte entry */ -+ IMG_UINT32 (*pfnDerivePTEProt4)(IMG_UINT32 uiProtFlags); -+ -+ /*! Callback for getting the MMU configuration based on the specified page size */ -+ PVRSRV_ERROR (*pfnGetPageSizeConfiguration)(IMG_UINT32 ui32DataPageSize, -+ const struct _MMU_PxE_CONFIG_ **ppsMMUPDEConfig, -+ const struct _MMU_PxE_CONFIG_ **ppsMMUPTEConfig, -+ const struct _MMU_DEVVADDR_CONFIG_ **ppsMMUDevVAddrConfig, -+ IMG_HANDLE *phPriv2); -+ /*! Callback for putting the MMU configuration obtained from pfnGetPageSizeConfiguration */ -+ PVRSRV_ERROR (*pfnPutPageSizeConfiguration)(IMG_HANDLE hPriv); -+ -+ /*! Callback for getting the page size from the PDE for the page table entry with 4 byte entry */ -+ PVRSRV_ERROR (*pfnGetPageSizeFromPDE4)(IMG_UINT32, IMG_UINT32 *); -+ /*! Callback for getting the page size from the PDE for the page table entry with 8 byte entry */ -+ PVRSRV_ERROR (*pfnGetPageSizeFromPDE8)(IMG_UINT64, IMG_UINT32 *); -+ /*! Callback for getting the page size directly from the address. Supported on MMU4 */ -+ PVRSRV_ERROR (*pfnGetPageSizeFromVirtAddr)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_DEV_VIRTADDR, IMG_UINT32 *); -+ -+ /*! Private data handle */ -+ IMG_HANDLE hGetPageSizeFnPriv; -+} MMU_DEVICEATTRIBS; -+ -+ -+/* MMU Protection flags */ -+ -+/* These are specified generically and in a h/w independent way, and -+ are interpreted at each level (PC/PD/PT) separately. */ -+ -+/* The following flags are for internal use only, and should not -+ traverse the API */ -+#define MMU_PROTFLAGS_INVALID 0x80000000U -+ -+typedef IMG_UINT32 MMU_PROTFLAGS_T; -+ -+/* The following flags should be supplied by the caller: */ -+#define MMU_PROTFLAGS_READABLE (1U<<0) -+#define MMU_PROTFLAGS_WRITEABLE (1U<<1) -+#define MMU_PROTFLAGS_CACHE_COHERENT (1U<<2) -+#define MMU_PROTFLAGS_CACHED (1U<<3) -+ -+/* Device specific flags*/ -+#define MMU_PROTFLAGS_DEVICE_OFFSET 16 -+#define MMU_PROTFLAGS_DEVICE_MASK 0x000f0000UL -+#define MMU_PROTFLAGS_DEVICE(n) \ -+ (((n) << MMU_PROTFLAGS_DEVICE_OFFSET) & \ -+ MMU_PROTFLAGS_DEVICE_MASK) -+ -+ -+struct _PVRSRV_DEVICE_NODE_; -+ -+struct _CONNECTION_DATA_; -+ -+typedef struct _MMU_PAGESIZECONFIG_ -+{ -+ const MMU_PxE_CONFIG *psPDEConfig; -+ const MMU_PxE_CONFIG *psPTEConfig; -+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; -+ IMG_UINT32 uiRefCount; -+ IMG_UINT32 uiMaxRefCount; -+} MMU_PAGESIZECONFIG; -+ -+/*************************************************************************/ /*! -+@Function MMU_InitDevice -+ -+@Description Creates MMU device specific resources. -+ -+@Input psDevNode Device node of the device to create the -+ MMU context for -+ -+@Return PVRSRV_OK if the initialisation process was successful -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR MMU_InitDevice(struct _PVRSRV_DEVICE_NODE_ *psDevNode); -+ -+/*************************************************************************/ /*! -+@Function MMU_DeInitDevice -+ -+@Description Clean-up MMU device specific resources. -+ -+@Input psDevNode Device node of the device -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void MMU_DeInitDevice(struct _PVRSRV_DEVICE_NODE_ *psDevNode); -+ -+/*************************************************************************/ /*! -+@Function MMU_ContextCreate -+ -+@Description Create a new MMU context -+ -+@Input psConnection Connection requesting the MMU context -+ creation. Can be NULL for kernel/FW -+ memory context. -+@Input psDevNode Device node of the device to create the -+ MMU context for -+@Output ppsMMUContext The created MMU context -+ -+@Return PVRSRV_OK if the MMU context was successfully created -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+MMU_ContextCreate(struct _CONNECTION_DATA_ *psConnection, -+ struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ MMU_CONTEXT **ppsMMUContext, -+ MMU_DEVICEATTRIBS *psDevAttrs); -+ -+ -+/*************************************************************************/ /*! -+@Function MMU_ContextDestroy -+ -+@Description Destroy a MMU context -+ -+@Input psMMUContext MMU context to destroy -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+MMU_ContextDestroy(MMU_CONTEXT *psMMUContext); -+ -+/*************************************************************************/ /*! -+@Function MMU_Alloc -+ -+@Description Allocate the page tables required for the specified virtual range -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input uSize The size of the allocation -+ -+@Output puActualSize Actual size of allocation -+ -+@Input uiProtFlags Generic MMU protection flags -+ -+@Input uDevVAddrAlignment Alignment requirement of the virtual -+ allocation -+ -+@Input psDevVAddr Virtual address to start the allocation -+ from -+ -+@Return PVRSRV_OK if the allocation of the page tables was successful -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+MMU_Alloc(MMU_CONTEXT *psMMUContext, -+ IMG_DEVMEM_SIZE_T uSize, -+ IMG_DEVMEM_SIZE_T *puActualSize, -+ IMG_UINT32 uiProtFlags, -+ IMG_DEVMEM_SIZE_T uDevVAddrAlignment, -+ IMG_DEV_VIRTADDR *psDevVAddr, -+ IMG_UINT32 uiLog2PageSize); -+ -+ -+/*************************************************************************/ /*! -+@Function MMU_Free -+ -+@Description Free the page tables of the specified virtual range -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input sDevVAddr Virtual address to start the free -+ from -+ -+@Input uiSize The size of the allocation -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+MMU_Free(MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 uiLog2DataPageSize); -+ -+ -+/*************************************************************************/ /*! -+@Function MMU_MapPages -+ -+@Description Map pages to the MMU. -+ Two modes of operation: One requires a list of physical page -+ indices that are going to be mapped, the other just takes -+ the PMR and a possible offset to map parts of it. -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input uiMappingFlags Memalloc flags for the mapping -+ -+@Input sDevVAddrBase Device virtual address of the 1st page -+ -+@Input psPMR PMR to map -+ -+@Input ui32PhysPgOffset Physical offset into the PMR -+ -+@Input ui32MapPageCount Number of pages to map -+ -+@Input paui32MapIndices List of page indices to map, -+ can be NULL -+ -+@Input uiLog2PageSize Log2 page size of the pages to map -+ -+@Return PVRSRV_OK if the mapping was successful -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+MMU_MapPages(MMU_CONTEXT *psMMUContext, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, -+ IMG_DEV_VIRTADDR sDevVAddrBase, -+ PMR *psPMR, -+ IMG_UINT32 ui32PhysPgOffset, -+ IMG_UINT32 ui32MapPageCount, -+ IMG_UINT32 *paui32MapIndices, -+ IMG_UINT32 uiLog2PageSize); -+ -+/*************************************************************************/ /*! -+@Function MMU_UnmapPages -+ -+@Description Unmap pages from the MMU. -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input uiMappingFlags Memalloc flags for the mapping -+ -+@Input sDevVAddr Device virtual address of the 1st page -+ -+@Input ui32PageCount Number of pages to unmap -+ -+@Input pai32UnmapIndicies Array of page indices to be unmapped -+ -+@Input uiLog2PageSize log2 size of the page -+ -+ -+@Input uiMemAllocFlags Indicates if the unmapped regions need -+ to be backed by dummy or zero page -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+MMU_UnmapPages(MMU_CONTEXT *psMMUContext, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_UINT32 ui32PageCount, -+ IMG_UINT32 *pai32UnmapIndicies, -+ IMG_UINT32 uiLog2PageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags); -+ -+/*************************************************************************/ /*! -+@Function MMUX_MapVRangeToBackingPage -+ -+@Description Map virtual range to a backing page in the MMU. -+ Used in DevmemX calls which don't tie a PMR to a virtual -+ range implicitly. -+ -+@Input psMMUContext MMU context to operate on -+@Input uiMappingFlags Memalloc flags for the mapping -+@Input sDevVAddrBase Device virtual address of the 1st page -+@Input ui32MapPageCount Number of pages to map -+@Input uiLog2PageSize Log2 page size of the pages to map -+ -+@Return PVRSRV_OK if the mapping was successful -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+MMUX_MapVRangeToBackingPage(MMU_CONTEXT *psMMUContext, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, -+ IMG_DEV_VIRTADDR sDevVAddrBase, -+ IMG_UINT32 ui32MapPageCount, -+ IMG_UINT32 uiLog2HeapPageSize); -+ -+/*************************************************************************/ /*! -+@Function MMU_MapPMRFast -+ -+@Description Map a PMR into the MMU. Must be not sparse. -+ This is supposed to cover most mappings and, as the name suggests, -+ should be as fast as possible. -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input sDevVAddr Device virtual address to map the PMR -+ into -+ -+@Input psPMR PMR to map -+ -+@Input uiSizeBytes Size in bytes to map -+ -+@Input uiMappingFlags Memalloc flags for the mapping -+ -+@Return PVRSRV_OK if the PMR was successfully mapped -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+MMU_MapPMRFast(MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR sDevVAddr, -+ const PMR *psPMR, -+ IMG_DEVMEM_SIZE_T uiSizeBytes, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, -+ IMG_UINT32 uiLog2PageSize); -+ -+/*************************************************************************/ /*! -+@Function MMU_UnmapPMRFast -+ -+@Description Unmap pages from the MMU as fast as possible. -+ PMR must be non-sparse! -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input sDevVAddrBase Device virtual address of the 1st page -+ -+@Input ui32PageCount Number of pages to unmap -+ -+@Input uiLog2PageSize log2 size of the page -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR sDevVAddrBase, -+ IMG_UINT32 ui32PageCount, -+ IMG_UINT32 uiLog2PageSize); -+ -+/*************************************************************************/ /*! -+@Function MMU_AcquireBaseAddr -+ -+@Description Acquire the device physical address of the base level MMU object -+ -+@Input psMMUContext MMU context to operate on -+ -+@Output psPhysAddr Device physical address of the base level -+ MMU object -+ -+@Return PVRSRV_OK if successful -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr); -+ -+/*************************************************************************/ /*! -+@Function MMU_AcquireCPUBaseAddr -+ -+@Description Acquire the CPU Virtual Address of the base level MMU object -+ -+@Input psMMUContext MMU context to operate on -+ -+@Output ppvCPUVAddr CPU Virtual Address of the base level -+ MMU object -+ -+@Return PVRSRV_OK if successful -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+MMU_AcquireCPUBaseAddr(MMU_CONTEXT *psMMUContext, void **ppvCPUVAddr); -+ -+/*************************************************************************/ /*! -+@Function MMU_ReleaseBaseAddr -+ -+@Description Release the device physical address of the base level MMU object -+ -+@Input psMMUContext MMU context to operate on -+ -+@Return PVRSRV_OK if successful -+*/ -+/*****************************************************************************/ -+void -+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext); -+ -+#if defined(SUPPORT_CUSTOM_OSID_EMISSION) -+/***********************************************************************************/ /*! -+@Function MMU_SetOSid -+ -+@Description Set the OSid associated with the application (and the MMU Context) -+ -+@Input psMMUContext MMU context to store the OSid on -+ -+@Input ui32OSid the OSid in question -+ -+@Input ui32OSidReg The value that the firmware will assign to the -+ registers. -+ -+@Input bOSidAxiProt Toggles whether the AXI prot bit will be set or -+ not. -+@Return None -+*/ -+/***********************************************************************************/ -+ -+void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, -+ IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt); -+ -+/***********************************************************************************/ /*! -+@Function MMU_GetOSid -+ -+@Description Retrieve the OSid associated with the MMU context. -+ -+@Input psMMUContext MMU context in which the OSid is stored -+ -+@Output pui32OSid The OSid in question -+ -+@Output pui32OSidReg The OSid that the firmware will assign to the -+ registers. -+ -+@Output pbOSidAxiProt Toggles whether the AXI prot bit will be set or -+ not. -+@Return None -+*/ -+/***********************************************************************************/ -+ -+void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 * pui32OSid, -+ IMG_UINT32 * pui32OSidReg, IMG_BOOL *pbOSidAxiProt); -+#endif -+ -+/*************************************************************************/ /*! -+@Function MMU_AppendCacheFlags -+ -+@Description Set the cache flags to the bitwise or of themselves and the -+ specified input flags, i.e. ui32CacheFlags |= ui32NewCacheFlags, -+ atomically. -+ -+@Input psMMUContext MMU context -+ -+@Input ui32NewCacheFlags Cache flags to append. -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags); -+ -+/*************************************************************************/ /*! -+@Function MMU_GetAndResetCacheFlags -+ -+@Description Clears MMU context flags, atomically. -+ -+@Input psMMUContext MMU context -+ -+@Return Previous MMU context cache flags. -+*/ -+/*****************************************************************************/ -+IMG_UINT32 MMU_GetAndResetCacheFlags(MMU_CONTEXT *psMMUContext); -+ -+typedef struct _MMU_LEVEL_DATA_ -+{ -+ IMG_UINT32 ui32Index; -+ IMG_UINT32 ui32NumOfEntries; -+ const IMG_CHAR *psDebugStr; -+ IMG_UINT8 uiBytesPerEntry; -+ IMG_UINT64 ui64Address; -+} MMU_LEVEL_DATA; -+ -+typedef enum _MMU_FAULT_TYPE_ -+{ -+ MMU_FAULT_TYPE_UNKNOWN = 0, /* If fault is not analysed by Host */ -+ MMU_FAULT_TYPE_PM, -+ MMU_FAULT_TYPE_NON_PM, -+} MMU_FAULT_TYPE; -+ -+typedef struct _MMU_FAULT_DATA_ -+{ -+ MMU_LEVEL eTopLevel; -+ MMU_FAULT_TYPE eType; -+ MMU_LEVEL_DATA sLevelData[MMU_LEVEL_LAST]; -+} MMU_FAULT_DATA; -+ -+/*************************************************************************/ /*! -+@Function MMU_CheckFaultAddress -+ -+@Description Check the specified MMU context to see if the provided address -+ should be valid -+ -+@Input psMMUContext MMU context to store the data on -+ -+@Input psDevVAddr Address to check -+ -+@Output psOutFaultData To store fault details after checking -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, -+ IMG_DEV_VIRTADDR *psDevVAddr, -+ MMU_FAULT_DATA *psOutFaultData); -+ -+/*************************************************************************/ /*! -+@Function MMU_IsVDevAddrValid -+@Description Checks if given address is valid. -+@Input psMMUContext MMU context to store the data on -+@Input uiLog2PageSize page size -+@Input sDevVAddr Address to check -+@Return IMG_TRUE of address is valid -+*/ /**************************************************************************/ -+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext, -+ IMG_UINT32 uiLog2PageSize, -+ IMG_DEV_VIRTADDR sDevVAddr); -+ -+#if defined(PDUMP) -+ -+/*************************************************************************/ /*! -+@Function MMU_ContextDerivePCPDumpSymAddr -+ -+@Description Derives a PDump Symbolic address for the top level MMU object -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input pszPDumpSymbolicNameBuffer Buffer to write the PDump symbolic -+ address to -+ -+@Input uiPDumpSymbolicNameBufferSize Size of the buffer -+ -+@Return PVRSRV_OK if successful -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext, -+ IMG_CHAR *pszPDumpSymbolicNameBuffer, -+ size_t uiPDumpSymbolicNameBufferSize); -+ -+/*************************************************************************/ /*! -+@Function MMU_PDumpWritePageCatBase -+ -+@Description PDump write of the top level MMU object to a device register -+ -+@Input psMMUContext MMU context to operate on -+ -+@Input pszSpaceName PDump name of the mem/reg space -+ -+@Input uiOffset Offset to write the address to -+ -+@Return PVRSRV_OK if successful -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, -+ const IMG_CHAR *pszSpaceName, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT32 ui32WordSize, -+ IMG_UINT32 ui32AlignShift, -+ IMG_UINT32 ui32Shift, -+ PDUMP_FLAGS_T uiPdumpFlags); -+ -+/*************************************************************************/ /*! -+@Function MMU_AcquirePDumpMMUContext -+ -+@Description Acquire a reference to the PDump MMU context for this MMU -+ context -+ -+@Input psMMUContext MMU context to operate on -+ -+@Output pui32PDumpMMUContextID PDump MMU context ID -+ -+@Return PVRSRV_OK if successful -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext, -+ IMG_UINT32 *pui32PDumpMMUContextID, -+ IMG_UINT32 ui32PDumpFlags); -+ -+/*************************************************************************/ /*! -+@Function MMU_ReleasePDumpMMUContext -+ -+@Description Release a reference to the PDump MMU context for this MMU context -+ -+@Input psMMUContext MMU context to operate on -+ -+@Return PVRSRV_OK if successful -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext, -+ IMG_UINT32 ui32PDumpFlags); -+#else /* PDUMP */ -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(MMU_PDumpWritePageCatBase) -+#endif -+static INLINE void -+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, -+ const IMG_CHAR *pszSpaceName, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT32 ui32WordSize, -+ IMG_UINT32 ui32AlignShift, -+ IMG_UINT32 ui32Shift, -+ PDUMP_FLAGS_T uiPdumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psMMUContext); -+ PVR_UNREFERENCED_PARAMETER(pszSpaceName); -+ PVR_UNREFERENCED_PARAMETER(uiOffset); -+ PVR_UNREFERENCED_PARAMETER(ui32WordSize); -+ PVR_UNREFERENCED_PARAMETER(ui32AlignShift); -+ PVR_UNREFERENCED_PARAMETER(ui32Shift); -+ PVR_UNREFERENCED_PARAMETER(uiPdumpFlags); -+} -+#endif /* PDUMP */ -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+/*************************************************************************/ /*! -+@Function MMU_CacheInvalidateKick -+ -+@Description Kicks the Firmware to invalidate caches -+ -+@Input psDeviceNode Pointer to the device node -+@Output puiRequiredSyncValue Value the associated sync prim will be -+ updated to after the kick is finished -+ (parameter ignored if NULL) -+ -+@Return PVRSRV_OK if successful -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR MMU_CacheInvalidateKick(PPVRSRV_DEVICE_NODE psDeviceNode, -+ IMG_UINT32 *puiRequiredSyncValue); -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+#endif /* #ifdef MMU_COMMON_H */ -diff --git a/drivers/gpu/drm/img-rogue/module_common.c b/drivers/gpu/drm/img-rogue/module_common.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/module_common.c -@@ -0,0 +1,767 @@ -+/*************************************************************************/ /*! -+@File -+@Title Common Linux module setup -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+ -+#if defined(CONFIG_DEBUG_FS) -+#include "pvr_debugfs.h" -+#endif /* defined(CONFIG_DEBUG_FS) */ -+#if defined(CONFIG_PROC_FS) -+#include "pvr_procfs.h" -+#endif /* defined(CONFIG_PROC_FS) */ -+#include "di_server.h" -+#include "private_data.h" -+#include "linkage.h" -+#include "power.h" -+#include "env_connection.h" -+#include "process_stats.h" -+#include "module_common.h" -+#include "pvrsrv.h" -+#include "srvcore.h" -+#if defined(SUPPORT_RGX) -+#include "rgxdevice.h" -+#endif -+#include "pvrsrv_error.h" -+#include "pvr_drv.h" -+#include "pvr_bridge_k.h" -+ -+#include "pvr_fence.h" -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) -+#include "pvr_sync.h" -+#if !defined(USE_PVRSYNC_DEVNODE) -+#include "pvr_sync_ioctl_drm.h" -+#endif -+#endif -+ -+#include "ospvr_gputrace.h" -+ -+#include "km_apphint.h" -+#include "srvinit.h" -+ -+#include "pvr_ion_stats.h" -+ -+#if defined(SUPPORT_DISPLAY_CLASS) -+/* Display class interface */ -+#include "kerneldisplay.h" -+EXPORT_SYMBOL(DCRegisterDevice); -+EXPORT_SYMBOL(DCUnregisterDevice); -+EXPORT_SYMBOL(DCDisplayConfigurationRetired); -+EXPORT_SYMBOL(DCDisplayHasPendingCommand); -+EXPORT_SYMBOL(DCImportBufferAcquire); -+EXPORT_SYMBOL(DCImportBufferRelease); -+ -+EXPORT_SYMBOL(PVRSRVGetDriverStatus); -+EXPORT_SYMBOL(PVRSRVSystemInstallDeviceLISR); -+EXPORT_SYMBOL(PVRSRVSystemUninstallDeviceLISR); -+ -+#include "pvr_notifier.h" -+EXPORT_SYMBOL(PVRSRVCheckStatus); -+#endif /* defined(SUPPORT_DISPLAY_CLASS) */ -+ -+#if defined(SUPPORT_EXTERNAL_PHYSHEAP_INTERFACE) -+/* -+ * Physmem interface. -+ * Required by LMA DC drivers, and some non-DC LMA display drivers. -+ */ -+#include "physheap.h" -+EXPORT_SYMBOL(PhysHeapAcquireByID); -+EXPORT_SYMBOL(PhysHeapRelease); -+EXPORT_SYMBOL(PhysHeapGetType); -+EXPORT_SYMBOL(PhysHeapGetCpuPAddr); -+EXPORT_SYMBOL(PhysHeapGetSize); -+EXPORT_SYMBOL(PhysHeapCpuPAddrToDevPAddr); -+ -+#include "pvr_debug.h" -+EXPORT_SYMBOL(PVRSRVGetErrorString); -+EXPORT_SYMBOL(PVRSRVGetDeviceInstance); -+#endif -+ -+#if defined(SUPPORT_RGX) -+#include "rgxapi_km.h" -+#if defined(SUPPORT_SHARED_SLC) -+EXPORT_SYMBOL(RGXInitSLC); -+#endif -+EXPORT_SYMBOL(RGXHWPerfConnect); -+EXPORT_SYMBOL(RGXHWPerfDisconnect); -+EXPORT_SYMBOL(RGXHWPerfControl); -+#if defined(RGX_FEATURE_HWPERF_VOLCANIC) -+EXPORT_SYMBOL(RGXHWPerfConfigureCounters); -+#else -+EXPORT_SYMBOL(RGXHWPerfConfigMuxCounters); -+EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCustomCounters); -+#endif -+EXPORT_SYMBOL(RGXHWPerfDisableCounters); -+EXPORT_SYMBOL(RGXHWPerfAcquireEvents); -+EXPORT_SYMBOL(RGXHWPerfReleaseEvents); -+EXPORT_SYMBOL(RGXHWPerfConvertCRTimeStamp); -+#if defined(SUPPORT_KERNEL_HWPERF_TEST) -+EXPORT_SYMBOL(OSAddTimer); -+EXPORT_SYMBOL(OSEnableTimer); -+EXPORT_SYMBOL(OSDisableTimer); -+EXPORT_SYMBOL(OSRemoveTimer); -+#endif -+#endif -+ -+static int PVRSRVDeviceSyncOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, -+ struct drm_file *psDRMFile); -+ -+CONNECTION_DATA *LinuxServicesConnectionFromFile(struct file *pFile) -+{ -+ if (pFile) -+ { -+ struct drm_file *psDRMFile; -+ PVRSRV_CONNECTION_PRIV *psConnectionPriv; -+ -+ psDRMFile = pFile->private_data; -+ PVR_LOG_RETURN_IF_FALSE(psDRMFile != NULL, "psDRMFile is NULL", NULL); -+ -+ psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; -+ PVR_LOG_RETURN_IF_FALSE(psConnectionPriv != NULL, "psConnectionPriv is NULL", NULL); -+ -+ return (CONNECTION_DATA*)psConnectionPriv->pvConnectionData; -+ } -+ -+ return NULL; -+} -+ -+CONNECTION_DATA *LinuxSyncConnectionFromFile(struct file *pFile) -+{ -+ if (pFile) -+ { -+ struct drm_file *psDRMFile = pFile->private_data; -+ PVRSRV_CONNECTION_PRIV *psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; -+ -+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) -+ return (CONNECTION_DATA*)psConnectionPriv->pvConnectionData; -+#else -+ return (CONNECTION_DATA*)psConnectionPriv->pvSyncConnectionData; -+#endif -+ } -+ -+ return NULL; -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDriverInit -+@Description Common one time driver initialisation -+@Return int 0 on success and a Linux error code otherwise -+*/ /***************************************************************************/ -+int PVRSRVDriverInit(void) -+{ -+ PVRSRV_ERROR error; -+ int os_err; -+ -+ error = PVROSFuncInit(); -+ if (error != PVRSRV_OK) -+ { -+ return -ENOMEM; -+ } -+ -+ error = PVRSRVCommonDriverInit(); -+ if (error != PVRSRV_OK) -+ { -+ return -ENODEV; -+ } -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) -+ error = pvr_sync_register_functions(); -+ if (error != PVRSRV_OK) -+ { -+ return -EPERM; -+ } -+ -+ os_err = pvr_sync_init(); -+ if (os_err != 0) -+ { -+ return os_err; -+ } -+#endif -+ -+ os_err = pvr_apphint_init(); -+ if (os_err != 0) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed AppHint setup(%d)", __func__, -+ os_err)); -+ } -+ -+#if defined(SUPPORT_RGX) -+ error = PVRGpuTraceSupportInit(); -+ if (error != PVRSRV_OK) -+ { -+ return -ENOMEM; -+ } -+#endif -+ -+#if defined(ANDROID) -+#if defined(CONFIG_PROC_FS) -+ error = PVRProcFsRegister(); -+ if (error != PVRSRV_OK) -+ { -+ return -ENOMEM; -+ } -+#elif defined(CONFIG_DEBUG_FS) -+ error = PVRDebugFsRegister(); -+ if (error != PVRSRV_OK) -+ { -+ return -ENOMEM; -+ } -+#endif /* defined(CONFIG_PROC_FS) || defined(CONFIG_DEBUG_FS) */ -+#else -+#if defined(CONFIG_DEBUG_FS) -+ error = PVRDebugFsRegister(); -+ if (error != PVRSRV_OK) -+ { -+ return -ENOMEM; -+ } -+#elif defined(CONFIG_PROC_FS) -+ error = PVRProcFsRegister(); -+ if (error != PVRSRV_OK) -+ { -+ return -ENOMEM; -+ } -+#endif /* defined(CONFIG_DEBUG_FS) || defined(CONFIG_PROC_FS) */ -+#endif /* defined(ANDROID) */ -+ -+ error = PVRSRVIonStatsInitialise(); -+ if (error != PVRSRV_OK) -+ { -+ return -ENODEV; -+ } -+ -+#if defined(SUPPORT_RGX) -+ /* calling here because we need to handle input from the file even -+ * before the devices are initialised -+ * note: we're not passing a device node because apphint callbacks don't -+ * need it */ -+ PVRGpuTraceInitAppHintCallbacks(NULL); -+#endif -+ -+ return 0; -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDriverDeinit -+@Description Common one time driver de-initialisation -+@Return void -+*/ /***************************************************************************/ -+void PVRSRVDriverDeinit(void) -+{ -+ pvr_apphint_deinit(); -+ -+ PVRSRVIonStatsDestroy(); -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) -+ pvr_sync_deinit(); -+#endif -+ -+#if defined(SUPPORT_RGX) -+ PVRGpuTraceSupportDeInit(); -+#endif -+ -+ PVRSRVCommonDriverDeInit(); -+ -+ PVROSFuncDeInit(); -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDeviceInit -+@Description Common device related initialisation. -+@Input psDeviceNode The device node for which initialisation should be -+ performed -+@Return int 0 on success and a Linux error code otherwise -+*/ /***************************************************************************/ -+int PVRSRVDeviceInit(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) -+ { -+ PVRSRV_ERROR eError = pvr_sync_device_init(psDeviceNode->psDevConfig->pvOSDevice); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create sync (%d)", -+ __func__, eError)); -+ return -EBUSY; -+ } -+ } -+#endif -+ -+#if defined(SUPPORT_RGX) -+ { -+ int error = PVRGpuTraceInitDevice(psDeviceNode); -+ if (error != 0) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: failed to initialise PVR GPU Tracing on device%d (%d)", -+ __func__, psDeviceNode->sDevId.i32KernelDeviceID, error)); -+ } -+ } -+#endif -+ -+ return 0; -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDeviceDeinit -+@Description Common device related de-initialisation. -+@Input psDeviceNode The device node for which de-initialisation should -+ be performed -+@Return void -+*/ /***************************************************************************/ -+void PVRSRVDeviceDeinit(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+#if defined(SUPPORT_RGX) -+ PVRGpuTraceDeInitDevice(psDeviceNode); -+#endif -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) -+ pvr_sync_device_deinit(psDeviceNode->psDevConfig->pvOSDevice); -+#endif -+ -+#if defined(SUPPORT_DMA_TRANSFER) -+ PVRSRVDeInitialiseDMA(psDeviceNode); -+#endif -+ -+ pvr_fence_cleanup(); -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDeviceShutdown -+@Description Common device shutdown. -+@Input psDev The device node representing the device that should -+ be shutdown -+@Return void -+*/ /***************************************************************************/ -+ -+void PVRSRVDeviceShutdown(struct drm_device *psDev) -+{ -+ struct pvr_drm_private *psDevPriv = psDev->dev_private; -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevPriv->dev_node; -+ -+ /* Since this is a shutdown request ignore the returned error and try to -+ * to power off the device. This is done because there is no way of -+ * signalling the OS that this call failed. */ -+ (void) LinuxBridgeBlockClientsAccess(psDevPriv, IMG_TRUE); -+ -+ /* Passing PVRSRV_POWER_FLAGS_NONE as there are no special actions required -+ * from the shutdown call beside the regular device power off. */ -+ (void) PVRSRVSetDeviceSystemPowerState(psDeviceNode, -+ PVRSRV_SYS_POWER_STATE_OFF, -+ PVRSRV_POWER_FLAGS_NONE); -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDeviceSuspend -+@Description Common device suspend. -+@Input psDev The device node representing the device that should -+ be suspended -+@Return int 0 on success and a Linux error code otherwise -+*/ /***************************************************************************/ -+int PVRSRVDeviceSuspend(struct drm_device *psDev) -+{ -+ struct pvr_drm_private *psDevPriv = psDev->dev_private; -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevPriv->dev_node; -+ PVRSRV_ERROR eError; -+ -+ /* LinuxBridgeBlockClientsAccess prevents processes from using the driver -+ * while it's suspended (this is needed for Android). */ -+ eError = LinuxBridgeBlockClientsAccess(psDevPriv, IMG_FALSE); -+ PVR_LOG_RETURN_IF_FALSE_VA(eError == PVRSRV_OK, -EFAULT, -+ "LinuxBridgeBlockClientsAccess() failed with error %u", -+ eError); -+ -+#if defined(SUPPORT_AUTOVZ) -+ /* To allow the driver to power down the GPU under AutoVz, the firmware must -+ * declared as offline, otherwise all power requests will be ignored. */ -+ psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; -+#endif -+ -+ if (PVRSRVSetDeviceSystemPowerState(psDeviceNode, -+ PVRSRV_SYS_POWER_STATE_OFF, -+ PVRSRV_POWER_FLAGS_OSPM_SUSPEND_REQ) != PVRSRV_OK) -+ { -+ /* Ignore return error as we're already returning an error here. */ -+ (void) LinuxBridgeUnblockClientsAccess(psDevPriv); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDeviceResume -+@Description Common device resume. -+@Input psDev The device node representing the device that should -+ be resumed -+@Return int 0 on success and a Linux error code otherwise -+*/ /***************************************************************************/ -+int PVRSRVDeviceResume(struct drm_device *psDev) -+{ -+ struct pvr_drm_private *psDevPriv = psDev->dev_private; -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevPriv->dev_node; -+ -+ if (PVRSRVSetDeviceSystemPowerState(psDeviceNode, -+ PVRSRV_SYS_POWER_STATE_ON, -+ PVRSRV_POWER_FLAGS_OSPM_RESUME_REQ) != PVRSRV_OK) -+ { -+ return -EINVAL; -+ } -+ -+ /* Ignore return error. We should proceed even if this fails. */ -+ (void) LinuxBridgeUnblockClientsAccess(psDevPriv); -+ -+ /* -+ * Reprocess the device queues in case commands were blocked during -+ * suspend. -+ */ -+ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE) -+ { -+ PVRSRVCheckStatus(NULL); -+ } -+ -+ return 0; -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDeviceServicesOpen -+@Description Services device open. -+@Input psDeviceNode The device node representing the device being -+ opened by a user mode process -+@Input psDRMFile The DRM file data that backs the file handle -+ returned to the user mode process -+@Return int 0 on success and a Linux error code otherwise -+*/ /***************************************************************************/ -+int PVRSRVDeviceServicesOpen(PVRSRV_DEVICE_NODE *psDeviceNode, -+ struct drm_file *psDRMFile) -+{ -+ static DEFINE_MUTEX(sDeviceInitMutex); -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ ENV_CONNECTION_PRIVATE_DATA sPrivData; -+ PVRSRV_CONNECTION_PRIV *psConnectionPriv; -+ PVRSRV_ERROR eError; -+ int iErr = 0; -+ -+ if (!psPVRSRVData) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: No device data", __func__)); -+ iErr = -ENODEV; -+ goto out; -+ } -+ -+ mutex_lock(&sDeviceInitMutex); -+ /* -+ * If the first attempt already set the state to bad, -+ * there is no point in going the second time, so get out -+ */ -+ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_BAD) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Driver already in bad state. Device open failed.", -+ __func__)); -+ iErr = -ENODEV; -+ mutex_unlock(&sDeviceInitMutex); -+ goto out; -+ } -+ -+ if (psDRMFile->driver_priv == NULL) -+ { -+ /* Allocate psConnectionPriv (stores private data and release pfn under driver_priv) */ -+ psConnectionPriv = kzalloc(sizeof(*psConnectionPriv), GFP_KERNEL); -+ if (!psConnectionPriv) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: No memory to allocate driver_priv data", __func__)); -+ iErr = -ENOMEM; -+ mutex_unlock(&sDeviceInitMutex); -+ goto fail_alloc_connection_priv; -+ } -+ } -+ else -+ { -+ psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; -+ } -+ -+ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_CREATED) -+ { -+ eError = PVRSRVCommonDeviceInitialise(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise device (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ iErr = -ENODEV; -+ mutex_unlock(&sDeviceInitMutex); -+ goto fail_device_init; -+ } -+ -+#if defined(SUPPORT_RGX) -+ PVRGpuTraceInitIfEnabled(psDeviceNode); -+#endif -+ } -+ mutex_unlock(&sDeviceInitMutex); -+ -+ sPrivData.psDevNode = psDeviceNode; -+ -+ /* -+ * Here we pass the file pointer which will passed through to our -+ * OSConnectionPrivateDataInit function where we can save it so -+ * we can back reference the file structure from its connection -+ */ -+ eError = PVRSRVCommonConnectionConnect(&psConnectionPriv->pvConnectionData, -+ (void *)&sPrivData); -+ if (eError != PVRSRV_OK) -+ { -+ iErr = -ENOMEM; -+ goto fail_connect; -+ } -+ -+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) -+ psConnectionPriv->pfDeviceRelease = PVRSRVCommonConnectionDisconnect; -+#endif -+ psDRMFile->driver_priv = (void*)psConnectionPriv; -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+ eError = PVRSRVGpuTraceWorkPeriodEventStatsRegister( -+ &psConnectionPriv->pvGpuWorkPeriodEventStats); -+ if (eError != PVRSRV_OK) -+ { -+ iErr = -ENOMEM; -+ goto fail_connect; -+ } -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+ goto out; -+ -+fail_connect: -+fail_device_init: -+ kfree(psConnectionPriv); -+fail_alloc_connection_priv: -+out: -+ return iErr; -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDeviceSyncOpen -+@Description Sync device open. -+@Input psDeviceNode The device node representing the device being -+ opened by a user mode process -+@Input psDRMFile The DRM file data that backs the file handle -+ returned to the user mode process -+@Return int 0 on success and a Linux error code otherwise -+*/ /***************************************************************************/ -+static int PVRSRVDeviceSyncOpen(PVRSRV_DEVICE_NODE *psDeviceNode, -+ struct drm_file *psDRMFile) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ CONNECTION_DATA *psConnection = NULL; -+ ENV_CONNECTION_PRIVATE_DATA sPrivData; -+ PVRSRV_CONNECTION_PRIV *psConnectionPriv; -+ PVRSRV_ERROR eError; -+ int iErr = 0; -+ -+ if (!psPVRSRVData) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: No device data", __func__)); -+ iErr = -ENODEV; -+ goto out; -+ } -+ -+ if (psDRMFile->driver_priv == NULL) -+ { -+ /* Allocate psConnectionPriv (stores private data and release pfn under driver_priv) */ -+ psConnectionPriv = kzalloc(sizeof(*psConnectionPriv), GFP_KERNEL); -+ if (!psConnectionPriv) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: No memory to allocate driver_priv data", __func__)); -+ iErr = -ENOMEM; -+ goto out; -+ } -+ } -+ else -+ { -+ psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; -+ } -+ -+ /* Allocate connection data area, no stats since process not registered yet */ -+ psConnection = kzalloc(sizeof(*psConnection), GFP_KERNEL); -+ if (!psConnection) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: No memory to allocate connection data", __func__)); -+ iErr = -ENOMEM; -+ goto fail_alloc_connection; -+ } -+ psConnection->bSyncConnection = IMG_TRUE; -+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) -+ psConnectionPriv->pvConnectionData = (void*)psConnection; -+#else -+ psConnectionPriv->pvSyncConnectionData = (void*)psConnection; -+#endif -+ -+ sPrivData.psDevNode = psDeviceNode; -+ -+ /* Call environment specific connection data init function */ -+ eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, &sPrivData); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: OSConnectionPrivateDataInit() failed (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ goto fail_private_data_init; -+ } -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE) -+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) -+ iErr = pvr_sync_open(psConnectionPriv->pvConnectionData, psDRMFile); -+#else -+ iErr = pvr_sync_open(psConnectionPriv->pvSyncConnectionData, psDRMFile); -+#endif -+ if (iErr) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: pvr_sync_open() failed(%d)", -+ __func__, iErr)); -+ goto fail_pvr_sync_open; -+ } -+#endif -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE) -+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) -+ psConnectionPriv->pfDeviceRelease = pvr_sync_close; -+#endif -+#endif -+ psDRMFile->driver_priv = psConnectionPriv; -+ goto out; -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE) -+fail_pvr_sync_open: -+ OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData); -+#endif -+fail_private_data_init: -+ kfree(psConnection); -+fail_alloc_connection: -+ kfree(psConnectionPriv); -+out: -+ return iErr; -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDeviceRelease -+@Description Common device release. -+@Input psDeviceNode The device node for the device that the given file -+ represents -+@Input psDRMFile The DRM file data that's being released -+@Return void -+*/ /***************************************************************************/ -+void PVRSRVDeviceRelease(PVRSRV_DEVICE_NODE *psDeviceNode, -+ struct drm_file *psDRMFile) -+{ -+ CONNECTION_DATA *psConnection = NULL; -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ -+ if (psDRMFile->driver_priv) -+ { -+ PVRSRV_CONNECTION_PRIV *psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; -+ -+ if (psConnectionPriv->pvConnectionData) -+ { -+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+ if (psConnectionPriv->pvGpuWorkPeriodEventStats) -+ { -+ PVRSRVGpuTraceWorkPeriodEventStatsUnregister( -+ psConnectionPriv->pvGpuWorkPeriodEventStats); -+ psConnectionPriv->pvGpuWorkPeriodEventStats = NULL; -+ } -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+ -+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) -+ if (psConnectionPriv->pfDeviceRelease) -+ { -+ psConnectionPriv->pfDeviceRelease(psConnectionPriv->pvConnectionData); -+ } -+#else -+ if (psConnectionPriv->pvConnectionData) -+ PVRSRVCommonConnectionDisconnect(psConnectionPriv->pvConnectionData); -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE) -+ if (psConnectionPriv->pvSyncConnectionData) -+ pvr_sync_close(psConnectionPriv->pvSyncConnectionData); -+#endif -+#endif -+ psConnection = psConnectionPriv->pvConnectionData; -+ if (psConnection->bSyncConnection == IMG_TRUE) -+ { -+ if (psConnection->hOsPrivateData != NULL) -+ { -+ OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData); -+ psConnection->hOsPrivateData = NULL; -+ } -+ kfree(psConnection); -+ psConnection = NULL; -+ } -+ } -+ kfree(psDRMFile->driver_priv); -+ psDRMFile->driver_priv = NULL; -+ } -+} -+ -+int -+drm_pvr_srvkm_init(struct drm_device *dev, void *arg, struct drm_file *psDRMFile) -+{ -+ struct drm_pvr_srvkm_init_data *data = arg; -+ struct pvr_drm_private *priv = dev->dev_private; -+ int iErr = 0; -+ -+ switch (data->init_module) -+ { -+ case PVR_SRVKM_SYNC_INIT: -+ { -+ iErr = PVRSRVDeviceSyncOpen(priv->dev_node, psDRMFile); -+ break; -+ } -+ case PVR_SRVKM_SERVICES_INIT: -+ { -+ iErr = PVRSRVDeviceServicesOpen(priv->dev_node, psDRMFile); -+ break; -+ } -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid init_module (%d)", -+ __func__, data->init_module)); -+ iErr = -EINVAL; -+ } -+ } -+ -+ return iErr; -+} -diff --git a/drivers/gpu/drm/img-rogue/module_common.h b/drivers/gpu/drm/img-rogue/module_common.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/module_common.h -@@ -0,0 +1,109 @@ -+/*************************************************************************/ /*! -+@File module_common.h -+@Title Common linux module setup header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef MODULE_COMMON_H -+#define MODULE_COMMON_H -+ -+#include "pvr_drm.h" -+ -+/* DRVNAME is the name we use to register our driver. */ -+#define DRVNAME PVR_LDM_DRIVER_REGISTRATION_NAME -+ -+struct _PVRSRV_DEVICE_NODE_; -+struct drm_file; -+struct drm_device; -+ -+/* psDRMFile->driver_priv will point to a PVRSV_CONNECTION_PRIV -+ * struct, which will contain a ptr to the CONNECTION_DATA and -+ * a pfn to the release function (which will differ depending -+ * on whether the connection is to Sync or Services). -+ */ -+typedef void (*PFN_PVRSRV_DEV_RELEASE)(void *pvData); -+typedef struct -+{ -+ /* pvConnectionData is used to hold Services connection data -+ * for all PVRSRV_DEVICE_INIT_MODE options. -+ */ -+ void *pvConnectionData; -+ -+ /* pfDeviceRelease is used to indicate the release function -+ * to be called when PVRSRV_DEVICE_INIT_MODE is PVRSRV_LINUX_DEV_INIT_ON_CONNECT, -+ * as we can then have one connections made (either for Services or Sync) per -+ * psDRMFile, and need to know which type of connection is being released -+ * (as the ioctl release call is common for both). -+ */ -+ PFN_PVRSRV_DEV_RELEASE pfDeviceRelease; -+ -+ /* pvSyncConnectionData is used to hold Sync connection data -+ * when PVRSRV_DEVICE_INIT_MODE is not PVRSRV_LINUX_DEV_INIT_ON_CONNECT, -+ * as we can then have two connections made (for Services and Sync) to -+ * the same psDRMFile. -+ */ -+ void *pvSyncConnectionData; -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+ /* hGpuWorkPeriodEventStats is used to hold gpu work period event stats -+ * private data for each apps which have been working with GPU. -+ */ -+ void *pvGpuWorkPeriodEventStats; -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+ -+} PVRSRV_CONNECTION_PRIV; -+ -+int PVRSRVDriverInit(void); -+void PVRSRVDriverDeinit(void); -+ -+int PVRSRVDeviceInit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); -+void PVRSRVDeviceDeinit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); -+ -+void PVRSRVDeviceShutdown(struct drm_device *psDev); -+int PVRSRVDeviceSuspend(struct drm_device *psDev); -+int PVRSRVDeviceResume(struct drm_device *psDev); -+ -+int PVRSRVDeviceServicesOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, -+ struct drm_file *psDRMFile); -+void PVRSRVDeviceRelease(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, -+ struct drm_file *psDRMFile); -+int drm_pvr_srvkm_init(struct drm_device *dev, -+ void *arg, struct drm_file *psDRMFile); -+ -+#endif /* MODULE_COMMON_H */ -diff --git a/drivers/gpu/drm/img-rogue/multicore_defs.h b/drivers/gpu/drm/img-rogue/multicore_defs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/multicore_defs.h -@@ -0,0 +1,53 @@ -+/**************************************************************************/ /*! -+@File -+@Title RGX Multicore Information flags -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGX_MULTICORE_DEFS_H -+#define RGX_MULTICORE_DEFS_H -+ -+/* Capability bits returned to client in RGXGetMultiCoreInfo */ -+#define RGX_MULTICORE_CAPABILITY_FRAGMENT_EN (0x00000040U) -+#define RGX_MULTICORE_CAPABILITY_GEOMETRY_EN (0x00000020U) -+#define RGX_MULTICORE_CAPABILITY_COMPUTE_EN (0x00000010U) -+#define RGX_MULTICORE_CAPABILITY_PRIMARY_EN (0x00000008U) -+#define RGX_MULTICORE_ID_CLRMSK (0xFFFFFFF8U) -+ -+#endif /* RGX_MULTICORE_DEFS_H */ -diff --git a/drivers/gpu/drm/img-rogue/opaque_types.h b/drivers/gpu/drm/img-rogue/opaque_types.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/opaque_types.h -@@ -0,0 +1,56 @@ -+/*************************************************************************/ /*! -+@File -+@Title Opaque Types -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines opaque types for various services types -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef SERVICES_OPAQUE_TYPES_H -+#define SERVICES_OPAQUE_TYPES_H -+ -+#include "img_defs.h" -+#include "img_types.h" -+ -+typedef struct _PVRSRV_DEVICE_NODE_ *PPVRSRV_DEVICE_NODE; -+typedef const struct _PVRSRV_DEVICE_NODE_ *PCPVRSRV_DEVICE_NODE; -+ -+#endif /* SERVICES_OPAQUE_TYPES_H */ -+ -+/****************************************************************************** -+ End of file (opaque_types.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/os_apphint.h b/drivers/gpu/drm/img-rogue/os_apphint.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/os_apphint.h -@@ -0,0 +1,186 @@ -+/*************************************************************************/ /*! -+@File os_apphint.h -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description OS-independent interface for retrieving apphints -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include "img_defs.h" -+#if defined(__linux__) -+#include "km_apphint.h" -+#include "device.h" -+#else -+#include "services_client_porting.h" -+#endif -+#if !defined(OS_APPHINT_H) -+#define OS_APPHINT_H -+ -+/*! Supplied to os_get_apphint_XXX() functions when the param/AppHint is -+ * applicable to all devices and not a specific device. Typically used -+ * for server-wide build and module AppHints. -+ */ -+#define APPHINT_NO_DEVICE (NULL) -+ -+#if defined(__linux__) && !defined(DOXYGEN) -+static INLINE IMG_UINT os_get_apphint_UINT32(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) { -+ return !pvr_apphint_get_uint32(device, id, pVal); -+} -+static INLINE IMG_UINT os_get_apphint_UINT64(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) { -+ return !pvr_apphint_get_uint64(device, id, pVal); -+} -+static INLINE IMG_UINT os_get_apphint_BOOL(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) { -+ return !pvr_apphint_get_bool(device, id, pVal); -+} -+static INLINE IMG_UINT os_get_apphint_STRING(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_CHAR *pAppHintDefault, IMG_CHAR *buffer, size_t size) { -+ return !pvr_apphint_get_string(device, id, buffer, size); -+} -+ -+#define OSGetAppHintUINT32(device, state, name, appHintDefault, value) \ -+ os_get_apphint_UINT32(device, state, APPHINT_ID_ ## name, appHintDefault, value) -+ -+#define OSGetAppHintUINT64(device, state, name, appHintDefault, value) \ -+ os_get_apphint_UINT64(device, state, APPHINT_ID_ ## name, appHintDefault, value) -+ -+#define OSGetAppHintBOOL(device, state, name, appHintDefault, value) \ -+ os_get_apphint_BOOL(device, state, APPHINT_ID_ ## name, appHintDefault, value) -+ -+#define OSGetAppHintSTRING(device, state, name, appHintDefault, buffer, size) \ -+ os_get_apphint_STRING(device, state, APPHINT_ID_ ## name, appHintDefault, buffer, size) -+ -+ -+#define OSCreateAppHintState(state) \ -+ PVR_UNREFERENCED_PARAMETER(state) -+ -+#define OSFreeAppHintState(state) \ -+ PVR_UNREFERENCED_PARAMETER(state) -+ -+#else /* defined(__linux__) && !defined(DOXYGEN) */ -+ -+/**************************************************************************/ /*! -+@def OSGetAppHintUINT32(state, name, appHintDefault, value) -+@Description Interface for retrieval of uint32 server app hint. -+ For non-linux operating systems, this macro implements a call -+ from server code to PVRSRVGetAppHint() declared in -+ services_client_porting.h, effectively making it 'shared' code. -+@Input device Device node -+@Input state App hint state -+@Input name Name used to identify app hint -+@Input appHintDefault Default value to be returned if no -+ app hint is found. -+@Output value Pointer to returned app hint value. -+ */ /**************************************************************************/ -+#define OSGetAppHintUINT32(device, state, name, appHintDefault, value) \ -+ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) -+ -+/**************************************************************************/ /*! -+@def OSGetAppHintUINT64(state, name, appHintDefault, value) -+@Description Interface for retrieval of uint64 server app hint. -+ For non-linux operating systems, this macro implements a call -+ from server code to PVRSRVGetAppHint() declared in -+ services_client_porting.h, effectively making it 'shared' code. -+@Input device Device node -+@Input state App hint state -+@Input name Name used to identify app hint -+@Input appHintDefault Default value to be returned if no -+ app hint is found. -+@Output value Pointer to returned app hint value. -+ */ /**************************************************************************/ -+#define OSGetAppHintUINT64(device, state, name, appHintDefault, value) \ -+ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) -+ -+/**************************************************************************/ /*! -+@def OSGetAppHintBOOL(state, name, appHintDefault, value) -+@Description Interface for retrieval of IMG_BOOL server app hint. -+ For non-linux operating systems, this macro implements a call -+ from server code to PVRSRVGetAppHint() declared in -+ services_client_porting.h, effectively making it 'shared' code. -+@Input device Device node -+@Input state App hint state -+@Input name Name used to identify app hint -+@Input appHintDefault Default value to be returned if no -+ app hint is found. -+@Output value Pointer to returned app hint value. -+ */ /**************************************************************************/ -+#define OSGetAppHintBOOL(device, state, name, appHintDefault, value) \ -+ PVRSRVGetAppHint(state, # name, IMG_BOOL_TYPE, appHintDefault, value) -+ -+/**************************************************************************/ /*! -+@def OSGetAppHintSTRING(state, name, appHintDefault, buffer, size) -+@Description Interface for retrieval of string server app hint. -+ For non-linux operating systems, this macro implements a call -+ from server code to PVRSRVGetAppHint() declared in -+ services_client_porting.h, effectively making it 'shared' code. -+@Input device Device node -+@Input state App hint state -+@Input name Name used to identify app hint -+@Input appHintDefault Default value to be returned if no -+ app hint is found. -+@Output buffer Buffer used to return app hint string. -+@Input size Size of the buffer. -+ */ /**************************************************************************/ -+#define OSGetAppHintSTRING(device, state, name, appHintDefault, buffer, size) \ -+ (PVR_UNREFERENCED_PARAMETER(size), PVRSRVGetAppHint(state, # name, IMG_STRING_TYPE, appHintDefault, buffer)) -+ -+/**************************************************************************/ /*! -+@def OSCreateAppHintState(state) -+@Description Creates the app hint state. -+ For non-linux operating systems, this macro implements a call -+ from server code to PVRSRVCreateAppHintState() declared in -+ services_client_porting.h, effectively making it 'shared' code. -+@Output state App hint state -+ */ /**************************************************************************/ -+#define OSCreateAppHintState(state) \ -+ PVRSRVCreateAppHintState(IMG_SRV_UM, 0, state) -+ -+/**************************************************************************/ /*! -+@def OSFreeAppHintState -+@Description Free the app hint state. -+ For non-linux operating systems, this macro implements a call -+ from server code to PVRSRVCreateAppHintState() declared in -+ services_client_porting.h, effectively making it 'shared' code. -+@Output state App hint state -+ */ /**************************************************************************/ -+#define OSFreeAppHintState(state) \ -+ PVRSRVFreeAppHintState(IMG_SRV_UM, state) -+ -+#endif /* defined(__linux__) */ -+ -+#endif /* OS_APPHINT_H */ -+ -+/****************************************************************************** -+ End of file (os_apphint.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/os_cpu_cache.h b/drivers/gpu/drm/img-rogue/os_cpu_cache.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/os_cpu_cache.h -@@ -0,0 +1,69 @@ -+/*************************************************************************/ /*! -+@File -+@Title OS and CPU d-cache maintenance mechanisms -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines for cache management which are visible internally only -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef OS_CPU_CACHE_H -+#define OS_CPU_CACHE_H -+ -+#include "info_page_defs.h" -+ -+#define PVRSRV_CACHE_OP_TIMELINE 0x8 /*!< Request SW_SYNC timeline notification when executed */ -+#define PVRSRV_CACHE_OP_FORCE_SYNCHRONOUS 0x10 /*!< Force all batch members to be executed synchronously */ -+ -+#define CACHEFLUSH_ISA_X86 0x1 /*!< x86/x64 specific UM range-based cache flush */ -+#define CACHEFLUSH_ISA_ARM64 0x2 /*!< Aarch64 specific UM range-based cache flush */ -+#define CACHEFLUSH_ISA_GENERIC 0x3 /*!< Other ISA's without UM range-based cache flush */ -+#ifndef CACHEFLUSH_ISA_TYPE -+ #if defined(__i386__) || defined(__x86_64__) -+ #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_X86 -+ #elif defined(__arm64__) || defined(__aarch64__) -+ #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_ARM64 -+ #else -+ #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_GENERIC -+ #endif -+#endif -+ -+#if (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_X86) || (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_ARM64) -+#define CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH /*!< x86/x86_64/ARM64 supports user-mode d-cache flush */ -+#endif -+ -+#endif /* OS_CPU_CACHE_H */ -diff --git a/drivers/gpu/drm/img-rogue/os_srvinit_param.h b/drivers/gpu/drm/img-rogue/os_srvinit_param.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/os_srvinit_param.h -@@ -0,0 +1,328 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services initialisation parameters header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Services initialisation parameter support for the Linux kernel. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef OS_SRVINIT_PARAM_H -+#define OS_SRVINIT_PARAM_H -+ -+#if defined(__linux__) && defined(__KERNEL__) -+#include "km_apphint.h" -+#include "km_apphint_defs.h" -+ -+/* Supplied to SrvInitParamGetXXX() functions when the param/AppHint is -+ * applicable to all devices and not a specific device. Typically used -+ * for server-wide build and module AppHints. -+ */ -+#define INITPARAM_NO_DEVICE (NULL) -+ -+#define SrvInitParamOpen() NULL -+#define SrvInitParamClose(pvState) ((void)(pvState)) -+ -+#define SrvInitParamGetBOOL(device, state, name, value) \ -+ ((void) pvr_apphint_get_bool(device, APPHINT_ID_ ## name, &value)) -+ -+#define SrvInitParamGetUINT32(device, state, name, value) \ -+ ((void) pvr_apphint_get_uint32(device, APPHINT_ID_ ## name, &value)) -+ -+#define SrvInitParamGetUINT64(device, state, name, value) \ -+ ((void) pvr_apphint_get_uint64(device, APPHINT_ID_ ## name, &value)) -+ -+#define SrvInitParamGetSTRING(device, state, name, buffer, size) \ -+ ((void) pvr_apphint_get_string(device, APPHINT_ID_ ## name, buffer, size)) -+ -+#define SrvInitParamGetUINT32BitField(device, state, name, value) \ -+ ((void) pvr_apphint_get_uint32(device, APPHINT_ID_ ## name, &value)) -+ -+#define SrvInitParamGetUINT32List(device, state, name, value) \ -+ ((void) pvr_apphint_get_uint32(device, APPHINT_ID_ ## name, &value)) -+ -+#else /* defined(__linux__) && defined(__KERNEL__) */ -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#include "img_defs.h" -+#include "img_types.h" -+ -+/*! Lookup item. */ -+typedef struct -+{ -+ const IMG_CHAR *pszValue; /*!< looked up name */ -+ IMG_UINT32 ui32Value; /*!< looked up value */ -+} SRV_INIT_PARAM_UINT32_LOOKUP; -+ -+/*************************************************************************/ /*! -+@Brief SrvInitParamOpen -+ -+@Description Establish a connection to the Parameter resource store which is -+ used to hold configuration information associated with the -+ server instance. -+ -+@Return (void *) Handle to Parameter resource store to be used for -+ subsequent parameter value queries -+ -+*/ /**************************************************************************/ -+void *SrvInitParamOpen(void); -+ -+/*************************************************************************/ /*! -+@Brief SrvInitParamClose -+ -+@Description Remove a pre-existing connection to the Parameter resource store -+ given by 'pvState' and release any temporary storage associated -+ with the 'pvState' mapping handle -+ -+@Input pvState Handle to Parameter resource store -+ -+*/ /**************************************************************************/ -+void SrvInitParamClose(void *pvState); -+ -+/*************************************************************************/ /*! -+@Brief _SrvInitParamGetBOOL -+ -+@Description Get the current BOOL value for parameter 'pszName' from the -+ Parameter resource store attached to 'pvState' -+ -+@Input pvState Handle to Parameter resource store -+ -+@Input pszName Name of parameter to look-up -+ -+@Input pbDefault Value to return if parameter not found -+ -+@Output pbValue Value of parameter 'pszName' or 'pbDefault' -+ if not found -+ -+*/ /**************************************************************************/ -+void _SrvInitParamGetBOOL( -+ void *pvState, -+ const IMG_CHAR *pszName, -+ const IMG_BOOL *pbDefault, -+ IMG_BOOL *pbValue -+); -+ -+/*! Get the BOOL value for parameter 'name' from the parameter resource store -+ * attached to 'state'. */ -+#define SrvInitParamGetBOOL(device, state, name, value) \ -+ _SrvInitParamGetBOOL(state, # name, & __SrvInitParam_ ## name, &(value)) -+ -+/*! Initialise FLAG type parameter identified by 'name'. */ -+#define SrvInitParamInitFLAG(name, defval, unused) \ -+ static const IMG_BOOL __SrvInitParam_ ## name = defval; -+ -+/*! Initialise BOOL type parameter identified by 'name'. */ -+#define SrvInitParamInitBOOL(name, defval, unused) \ -+ static const IMG_BOOL __SrvInitParam_ ## name = defval; -+ -+/*************************************************************************/ /*! -+@Brief _SrvInitParamGetUINT32 -+ -+@Description Get the current IMG_UINT32 value for parameter 'pszName' -+ from the Parameter resource store attached to 'pvState' -+ -+@Input pvState Handle to Parameter resource store -+ -+@Input pszName Name of parameter to look-up -+ -+@Input pui32Default Value to return if parameter not found -+ -+@Output pui32Value Value of parameter 'pszName' or -+ 'pui32Default' if not found -+ -+*/ /**************************************************************************/ -+void _SrvInitParamGetUINT32( -+ void *pvState, -+ const IMG_CHAR *pszName, -+ const IMG_UINT32 *pui32Default, -+ IMG_UINT32 *pui32Value -+); -+ -+/*! Get the UINT32 value for parameter 'name' from the parameter resource store -+ * attached to 'state'. */ -+#define SrvInitParamGetUINT32(device, state, name, value) \ -+ _SrvInitParamGetUINT32(state, # name, & __SrvInitParam_ ## name, &(value)) -+ -+/*! Initialise UINT32 type parameter identified by 'name'. */ -+#define SrvInitParamInitUINT32(name, defval, unused) \ -+ static const IMG_UINT32 __SrvInitParam_ ## name = defval; -+ -+/*! Initialise UINT64 type parameter identified by 'name'. */ -+#define SrvInitParamInitUINT64(name, defval, unused) \ -+ static const IMG_UINT64 __SrvInitParam_ ## name = defval; -+ -+/*! @cond Doxygen_Suppress */ -+#define SrvInitParamUnreferenced(name) \ -+ PVR_UNREFERENCED_PARAMETER( __SrvInitParam_ ## name ) -+/*! @endcond */ -+ -+/*************************************************************************/ /*! -+@Brief _SrvInitParamGetUINT32BitField -+ -+@Description Get the current IMG_UINT32 bitfield value for parameter -+ 'pszBasename' from the Parameter resource store -+ attached to 'pvState' -+ -+@Input pvState Handle to Parameter resource store -+ -+@Input pszBaseName Bitfield parameter name to search for -+ -+@Input uiDefault Default return value if parameter not found -+ -+@Input psLookup Bitfield array to traverse -+ -+@Input uiSize number of elements in 'psLookup' -+ -+@Output puiValue Value of bitfield or 'uiDefault' if -+ parameter not found -+*/ /**************************************************************************/ -+void _SrvInitParamGetUINT32BitField( -+ void *pvState, -+ const IMG_CHAR *pszBaseName, -+ IMG_UINT32 uiDefault, -+ const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup, -+ IMG_UINT32 uiSize, -+ IMG_UINT32 *puiValue -+); -+ -+/*! Initialise UINT32 bitfield type parameter identified by 'name' with -+ * 'inival' value and 'lookup' look up array. */ -+#define SrvInitParamInitUINT32Bitfield(name, inival, lookup) \ -+ static IMG_UINT32 __SrvInitParam_ ## name = inival; \ -+ static SRV_INIT_PARAM_UINT32_LOOKUP * \ -+ __SrvInitParamLookup_ ## name = &lookup[0]; \ -+ static const IMG_UINT32 __SrvInitParamSize_ ## name = \ -+ ARRAY_SIZE(lookup); -+ -+/*! Get the UINT32 bitfield value for parameter 'name' from the parameter -+ * resource store attached to 'state'. */ -+#define SrvInitParamGetUINT32BitField(device, state, name, value) \ -+ _SrvInitParamGetUINT32BitField(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value)) -+ -+/*************************************************************************/ /*! -+@Brief _SrvInitParamGetUINT32List -+ -+@Description Get the current IMG_UINT32 list value for the specified -+ parameter 'pszName' from the Parameter resource store -+ attached to 'pvState' -+ -+@Input pvState Handle to Parameter resource store -+ -+@Input pszName Parameter list name to search for -+ -+@Input uiDefault Default value to return if 'pszName' is -+ not set within 'pvState' -+ -+@Input psLookup parameter list to traverse -+ -+@Input uiSize number of elements in 'psLookup' list -+ -+@Output puiValue value of located list element or -+ 'uiDefault' if parameter not found -+ -+*/ /**************************************************************************/ -+void _SrvInitParamGetUINT32List( -+ void *pvState, -+ const IMG_CHAR *pszName, -+ IMG_UINT32 uiDefault, -+ const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup, -+ IMG_UINT32 uiSize, -+ IMG_UINT32 *puiValue -+); -+ -+/*! Get the UINT32 list value for parameter 'name' from the parameter -+ * resource store attached to 'state'. */ -+#define SrvInitParamGetUINT32List(device, state, name, value) \ -+ _SrvInitParamGetUINT32List(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value)) -+ -+/*! Initialise UINT32 list type parameter identified by 'name' with -+ * 'defval' default value and 'lookup' look up list. */ -+#define SrvInitParamInitUINT32List(name, defval, lookup) \ -+ static IMG_UINT32 __SrvInitParam_ ## name = defval; \ -+ static SRV_INIT_PARAM_UINT32_LOOKUP * \ -+ __SrvInitParamLookup_ ## name = &lookup[0]; \ -+ static const IMG_UINT32 __SrvInitParamSize_ ## name = \ -+ ARRAY_SIZE(lookup); -+ -+/*************************************************************************/ /*! -+@Brief _SrvInitParamGetSTRING -+ -+@Description Get the contents of the specified parameter string 'pszName' -+ from the Parameter resource store attached to 'pvState' -+ -+@Input pvState Handle to Parameter resource store -+ -+@Input pszName Parameter string name to search for -+ -+@Input psDefault Default string to return if 'pszName' is -+ not set within 'pvState' -+ -+@Input size Size of output 'pBuffer' -+ -+@Output pBuffer Output copy of 'pszName' contents or -+ copy of 'psDefault' if 'pszName' is not -+ set within 'pvState' -+ -+*/ /**************************************************************************/ -+void _SrvInitParamGetSTRING( -+ void *pvState, -+ const IMG_CHAR *pszName, -+ const IMG_CHAR *psDefault, -+ IMG_CHAR *pBuffer, -+ size_t size -+); -+ -+/*! Initialise STRING type parameter identified by 'name' with 'defval' default -+ * value. */ -+#define SrvInitParamInitSTRING(name, defval, unused) \ -+ static const IMG_CHAR *__SrvInitParam_ ## name = defval; -+ -+/*! Get the STRING value for parameter 'name' from the parameter resource store -+ * attached to 'state'. */ -+#define SrvInitParamGetSTRING(device, state, name, buffer, size) \ -+ _SrvInitParamGetSTRING(state, # name, __SrvInitParam_ ## name, buffer, size) -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* defined(__linux__) && defined(__KERNEL__) */ -+ -+#endif /* OS_SRVINIT_PARAM_H */ -diff --git a/drivers/gpu/drm/img-rogue/osconnection_server.c b/drivers/gpu/drm/img-rogue/osconnection_server.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/osconnection_server.c -@@ -0,0 +1,157 @@ -+/*************************************************************************/ /*! -+@File -+@Title Linux specific per process data functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+ -+#include "connection_server.h" -+#include "osconnection_server.h" -+ -+#include "env_connection.h" -+#include "allocmem.h" -+#include "pvr_debug.h" -+ -+#include -+ -+#if defined(SUPPORT_ION) -+#include -+#include PVR_ANDROID_ION_HEADER -+ -+/* -+ The ion device (the base object for all requests) -+ gets created by the system and we acquire it via -+ Linux specific functions provided by the system layer -+*/ -+#include "ion_sys.h" -+#endif -+ -+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData) -+{ -+ ENV_CONNECTION_PRIVATE_DATA *psPrivData = pvOSData; -+ ENV_CONNECTION_DATA *psEnvConnection; -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ ENV_ION_CONNECTION_DATA *psIonConnection; -+#endif -+ -+ *phOsPrivateData = OSAllocZMem(sizeof(ENV_CONNECTION_DATA)); -+ -+ if (*phOsPrivateData == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __func__)); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psEnvConnection = (ENV_CONNECTION_DATA *)*phOsPrivateData; -+ -+ psEnvConnection->owner = current->tgid; -+ -+ psEnvConnection->psDevNode = psPrivData->psDevNode; -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) -+ psEnvConnection->pvPvrSyncPrivateData = NULL; -+#endif -+ -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ psIonConnection = (ENV_ION_CONNECTION_DATA *)OSAllocZMem(sizeof(ENV_ION_CONNECTION_DATA)); -+ if (psIonConnection == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __func__)); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psEnvConnection->psIonData = psIonConnection; -+ /* -+ We can have more than one connection per process, so we need -+ more than the PID to have a unique name. -+ */ -+ psEnvConnection->psIonData->psIonDev = IonDevAcquire(); -+ OSSNPrintf(psEnvConnection->psIonData->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%p-%d", *phOsPrivateData, OSGetCurrentClientProcessIDKM()); -+ psEnvConnection->psIonData->psIonClient = -+ ion_client_create(psEnvConnection->psIonData->psIonDev, -+ psEnvConnection->psIonData->azIonClientName); -+ -+ if (IS_ERR_OR_NULL(psEnvConnection->psIonData->psIonClient)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "OSConnectionPrivateDataInit: Couldn't create " -+ "ion client for per connection data")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+#endif /* SUPPORT_ION && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData) -+{ -+ if (hOsPrivateData == NULL) -+ { -+ return PVRSRV_OK; -+ } -+ -+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) -+ { -+ ENV_CONNECTION_DATA *psEnvConnection = hOsPrivateData; -+ -+ PVR_ASSERT(psEnvConnection->psIonData != NULL); -+ -+ PVR_ASSERT(psEnvConnection->psIonData->psIonClient != NULL); -+ ion_client_destroy(psEnvConnection->psIonData->psIonClient); -+ -+ IonDevRelease(psEnvConnection->psIonData->psIonDev); -+ OSFreeMem(psEnvConnection->psIonData); -+ } -+#endif -+ -+ OSFreeMem(hOsPrivateData); -+ /*not nulling pointer, copy on stack*/ -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_DEVICE_NODE *OSGetDevNode(CONNECTION_DATA *psConnection) -+{ -+ ENV_CONNECTION_DATA *psEnvConnection; -+ -+ psEnvConnection = PVRSRVConnectionPrivateData(psConnection); -+ PVR_ASSERT(psEnvConnection); -+ -+ return psEnvConnection->psDevNode; -+} -diff --git a/drivers/gpu/drm/img-rogue/osconnection_server.h b/drivers/gpu/drm/img-rogue/osconnection_server.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/osconnection_server.h -@@ -0,0 +1,133 @@ -+/**************************************************************************/ /*! -+@File -+@Title Server side connection management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description API for OS specific callbacks from server side connection -+ management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+#ifndef OSCONNECTION_SERVER_H -+#define OSCONNECTION_SERVER_H -+ -+#include "handle.h" -+#include "osfunc.h" -+ -+/*! Function not implemented definition. */ -+#define OSCONNECTION_SERVER_NOT_IMPLEMENTED 0 -+/*! Assert used for OSCONNECTION_SERVER_NOT_IMPLEMENTED. */ -+#define OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT() PVR_ASSERT(OSCONNECTION_SERVER_NOT_IMPLEMENTED) -+ -+#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) -+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData); -+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData); -+ -+PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase); -+ -+PVRSRV_DEVICE_NODE* OSGetDevNode(CONNECTION_DATA *psConnection); -+ -+#else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(OSConnectionPrivateDataInit) -+#endif -+/*************************************************************************/ /*! -+@Function OSConnectionPrivateDataInit -+@Description Allocates and initialises any OS-specific private data -+ relating to a connection. -+ Called from PVRSRVCommonConnectionConnect(). -+@Input pvOSData pointer to any OS private data -+@Output phOsPrivateData handle to the created connection -+ private data -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+static INLINE PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData) -+{ -+ PVR_UNREFERENCED_PARAMETER(phOsPrivateData); -+ PVR_UNREFERENCED_PARAMETER(pvOSData); -+ -+ OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT(); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(OSConnectionPrivateDataDeInit) -+#endif -+/*************************************************************************/ /*! -+@Function OSConnectionPrivateDataDeInit -+@Description Frees previously allocated OS-specific private data -+ relating to a connection. -+@Input hOsPrivateData handle to the connection private data -+ to be freed -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+static INLINE PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData) -+{ -+ PVR_UNREFERENCED_PARAMETER(hOsPrivateData); -+ -+ OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT(); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(OSConnectionSetHandleOptions) -+#endif -+static INLINE PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase) -+{ -+ PVR_UNREFERENCED_PARAMETER(psHandleBase); -+ -+ OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT(); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(OSGetDevNode) -+#endif -+static INLINE PVRSRV_DEVICE_NODE* OSGetDevNode(CONNECTION_DATA *psConnection) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT(); -+ -+ return NULL; -+} -+#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ -+ -+ -+#endif /* OSCONNECTION_SERVER_H */ -diff --git a/drivers/gpu/drm/img-rogue/osdi_impl.h b/drivers/gpu/drm/img-rogue/osdi_impl.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/osdi_impl.h -@@ -0,0 +1,211 @@ -+/*************************************************************************/ /*! -+@File -+@Title Functions and types for creating Debug Info implementations. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef OSDI_IMPL_H -+#define OSDI_IMPL_H -+ -+#if defined(__linux__) -+ #include -+ -+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) -+ #include -+ #else -+ #include -+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ -+#else -+ #include -+#endif /* __linux__ */ -+ -+#include "di_common.h" -+#include "pvrsrv_error.h" -+ -+/*! Implementation callbacks. Those operations are performed on native -+ * implementation handles. */ -+typedef struct OSDI_IMPL_ENTRY_CB -+{ -+ /*! @Function pfnWrite -+ * -+ * @Description -+ * Writes the binary data of the DI entry to the output sync, whatever that -+ * may be for the DI implementation. -+ * -+ * @Input pvNativeHandle native implementation handle -+ * @Input pvData data -+ * @Input uiSize pvData length -+ */ -+ void (*pfnWrite)(void *pvNativeHandle, const void *pvData, -+ IMG_UINT32 uiSize); -+ -+ /*! @Function pfnVPrintf -+ * -+ * @Description -+ * Implementation of the 'vprintf' operation. -+ * -+ * @Input pvNativeHandle native implementation handle -+ * @Input pszFmt NUL-terminated format string -+ * @Input va_list variable length argument list -+ */ -+ void (*pfnVPrintf)(void *pvNativeHandle, const IMG_CHAR *pszFmt, va_list pArgs); -+ -+ /*! @Function pfnPuts -+ * -+ * @Description -+ * Implementation of the 'puts' operation. -+ * -+ * @Input pvNativeHandle native implementation handle -+ * @Input pszStr NUL-terminated string -+ */ -+ void (*pfnPuts)(void *pvNativeHandle, const IMG_CHAR *pszStr); -+ -+ /*! @Function pfnHasOverflowed -+ * -+ * @Description -+ * Checks if the native implementation's buffer has overflowed. -+ * -+ * @Input pvNativeHandle native implementation handle -+ */ -+ IMG_BOOL (*pfnHasOverflowed)(void *pvNativeHandle); -+} OSDI_IMPL_ENTRY_CB; -+ -+/*! Debug Info entry specialisation. */ -+struct OSDI_IMPL_ENTRY -+{ -+ /*! Pointer to the private data. The data originates from DICreateEntry() -+ * function. */ -+ void *pvPrivData; -+ /*! Pointer to the implementation native handle. */ -+ void *pvNative; -+ /*! Implementation entry callbacks. */ -+ OSDI_IMPL_ENTRY_CB *psCb; -+}; /* OSDI_IMPL_ENTRY is already typedef-ed in di_common.h */ -+ -+/*! Debug Info implementation callbacks. */ -+typedef struct OSDI_IMPL_CB -+{ -+ /*! Initialise implementation callback. -+ */ -+ PVRSRV_ERROR (*pfnInit)(void); -+ -+ /*! De-initialise implementation callback. -+ */ -+ void (*pfnDeInit)(void); -+ -+ /*! @Function pfnCreateEntry -+ * -+ * @Description -+ * Creates entry of eType type with pszName in the pvNativeGroup parent -+ * group. The entry is an abstract term which depends on the implementation, -+ * e.g.: a file in DebugFS. -+ * -+ * @Input pszName: name of the entry -+ * @Input eType: type of the entry -+ * @Input psIterCb: iterator implementation for the entry -+ * @Input pvPrivData: data that will be passed to the iterator callbacks -+ * in OSDI_IMPL_ENTRY - it can be retrieved by calling -+ * DIGetPrivData() function -+ * @Input pvNativeGroup: implementation specific handle to the parent group -+ * -+ * @Output pvNativeEntry: implementation specific handle to the entry -+ * -+ * return PVRSRV_ERROR error code -+ */ -+ PVRSRV_ERROR (*pfnCreateEntry)(const IMG_CHAR *pszName, -+ DI_ENTRY_TYPE eType, -+ const DI_ITERATOR_CB *psIterCb, -+ void *pvPrivData, -+ void *pvNativeGroup, -+ void **pvNativeEntry); -+ -+ /*! @Function pfnDestroyEntry -+ * -+ * @Description -+ * Destroys native entry. -+ * -+ * @Input psNativeEntry: handle to the entry -+ */ -+ void (*pfnDestroyEntry)(void *psNativeEntry); -+ -+ /*! @Function pfnCreateGroup -+ * -+ * @Description -+ * Creates group with pszName in the psNativeParentGroup parent group. -+ * The group is an abstract term which depends on the implementation, -+ * e.g.: a directory in DebugFS. -+ * -+ * @Input pszName: name of the entry -+ * @Input psNativeParentGroup: implementation specific handle to the parent -+ * group -+ * -+ * @Output psNativeGroup: implementation specific handle to the group -+ * -+ * return PVRSRV_ERROR error code -+ */ -+ PVRSRV_ERROR (*pfnCreateGroup)(const IMG_CHAR *pszName, -+ void *psNativeParentGroup, -+ void **psNativeGroup); -+ -+ /*! @Function pfnDestroyGroup -+ * -+ * @Description -+ * Destroys native group. -+ * -+ * @Input psNativeGroup: handle to the group -+ */ -+ void (*pfnDestroyGroup)(void *psNativeGroup); -+} OSDI_IMPL_CB; -+ -+/*! @Function DIRegisterImplementation -+ * -+ * @Description -+ * Registers Debug Info implementations with the framework. The framework takes -+ * the ownership of the implementation and will clean up the resources when -+ * it's de-initialised. -+ * -+ * @Input pszName: name of the implementation -+ * @Input psImplCb: implementation callbacks -+ * -+ * @Return PVRSRV_ERROR error code -+ */ -+PVRSRV_ERROR DIRegisterImplementation(const IMG_CHAR *pszName, -+ const OSDI_IMPL_CB *psImplCb); -+ -+#endif /* OSDI_IMPL_H */ -diff --git a/drivers/gpu/drm/img-rogue/osfunc.c b/drivers/gpu/drm/img-rogue/osfunc.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/osfunc.c -@@ -0,0 +1,2878 @@ -+/*************************************************************************/ /*! -+@File -+@Title Environment related functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+#include -+#include -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) -+#include -+#include -+#else -+#include -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */ -+#if defined(SUPPORT_SECURE_ALLOC_KM) -+#if defined(PVR_ANDROID_HAS_DMA_HEAP_FIND) -+#include -+#include "physmem_dmabuf.h" -+#else -+#include "physmem.h" -+#endif -+#endif -+ -+#include "log2.h" -+#include "osfunc.h" -+#include "cache_km.h" -+#include "img_defs.h" -+#include "img_types.h" -+#include "allocmem.h" -+#include "devicemem_server_utils.h" -+#include "event.h" -+#include "linkage.h" -+#include "pvr_uaccess.h" -+#include "pvr_debug.h" -+#include "pvr_bridge_k.h" -+#include "pvrsrv_memallocflags.h" -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#include "process_stats.h" -+#endif -+#include "physmem_osmem_linux.h" -+#include "dma_support.h" -+ -+#include "pvrsrv_sync_server.h" -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+#include "ri_server.h" -+#include "pvr_ricommon.h" -+#endif -+ -+#include "kernel_compatibility.h" -+ -+#if defined(VIRTUAL_PLATFORM) -+#define EVENT_OBJECT_TIMEOUT_US (120000000ULL) -+#else -+#if defined(EMULATOR) || defined(TC_APOLLO_TCF5) -+#define EVENT_OBJECT_TIMEOUT_US (2000000ULL) -+#else -+#define EVENT_OBJECT_TIMEOUT_US (100000ULL) -+#endif /* EMULATOR */ -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) -+#define PVR_FOLL_LONGTERM (0x0U) -+#else -+#define PVR_FOLL_LONGTERM FOLL_LONGTERM -+#endif -+ -+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 6, 0)) -+ -+#define pvr_pin_user_pages_for_dma(puiAddress, num_pages, bWrite, pages) get_user_pages_fast( \ -+ (unsigned long)puiAddress, \ -+ (int)num_pages, \ -+ (int) (bWrite ? FOLL_WRITE : 0) | PVR_FOLL_LONGTERM, \ -+ pages) -+#define pvr_unpin_user_page_for_dma(p) put_page(p) -+ -+#else -+ -+#define pvr_pin_user_pages_for_dma(puiAddress, num_pages, bWrite, pages) pin_user_pages_fast( \ -+ (unsigned long)puiAddress, \ -+ (int) num_pages, \ -+ (int) (bWrite ? FOLL_WRITE : 0) | PVR_FOLL_LONGTERM, \ -+ pages) -+#define pvr_unpin_user_page_for_dma(p) unpin_user_page(p) -+ -+#endif -+ -+ -+typedef struct { -+ struct task_struct *kthread; -+ PFN_THREAD pfnThread; -+ void *hData; -+ IMG_CHAR *pszThreadName; -+ IMG_BOOL bIsThreadRunning; -+ IMG_BOOL bIsSupportingThread; -+ PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB; -+ DLLIST_NODE sNode; -+} OSThreadData; -+ -+void OSSuspendTaskInterruptible(void) -+{ -+ set_current_state(TASK_INTERRUPTIBLE); -+ schedule(); -+} -+ -+static DLLIST_NODE gsThreadListHead; -+ -+static void _ThreadListAddEntry(OSThreadData *psThreadListNode) -+{ -+ dllist_add_to_tail(&gsThreadListHead, &(psThreadListNode->sNode)); -+} -+ -+static void _ThreadListRemoveEntry(OSThreadData *psThreadListNode) -+{ -+ dllist_remove_node(&(psThreadListNode->sNode)); -+} -+ -+static void _ThreadSetStopped(OSThreadData *psOSThreadData) -+{ -+ psOSThreadData->bIsThreadRunning = IMG_FALSE; -+} -+ -+static void _OSInitThreadList(void) -+{ -+ dllist_init(&gsThreadListHead); -+} -+ -+void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ PDLLIST_NODE psNodeCurr, psNodeNext; -+ -+ dllist_foreach_node(&gsThreadListHead, psNodeCurr, psNodeNext) -+ { -+ OSThreadData *psThreadListNode; -+ psThreadListNode = IMG_CONTAINER_OF(psNodeCurr, OSThreadData, sNode); -+ -+ PVR_DUMPDEBUG_LOG(" %s : %s", -+ psThreadListNode->pszThreadName, -+ (psThreadListNode->bIsThreadRunning) ? "Running" : "Stopped"); -+ -+ if (psThreadListNode->pfnDebugDumpCB) -+ { -+ psThreadListNode->pfnDebugDumpCB(pfnDumpDebugPrintf, pvDumpDebugFile); -+ } -+ } -+} -+ -+PVRSRV_ERROR OSPhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, size_t uiSize, -+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_PID uiPid) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); -+ struct device *psDev = psDevNode->psDevConfig->pvOSDevice; -+ IMG_CPU_PHYADDR sCpuPAddr; -+ struct page *psPage; -+ IMG_UINT32 ui32Order=0; -+ gfp_t gfp_flags; -+ -+ PVR_ASSERT(uiSize != 0); -+ /*Align the size to the page granularity */ -+ uiSize = PAGE_ALIGN(uiSize); -+ -+ /*Get the order to be used with the allocation */ -+ ui32Order = get_order(uiSize); -+ -+ gfp_flags = GFP_KERNEL; -+ -+#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) -+ if (psDev) -+ { -+ if (*psDev->dma_mask == DMA_BIT_MASK(32)) -+ { -+ /* Limit to 32 bit. -+ * Achieved by setting __GFP_DMA32 for 64 bit systems */ -+ gfp_flags |= __GFP_DMA32; -+ } -+ else if (*psDev->dma_mask < DMA_BIT_MASK(32)) -+ { -+ /* Limit to whatever the size of DMA zone is. */ -+ gfp_flags |= __GFP_DMA; -+ } -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(psDev); -+#endif -+ -+ /*allocate the pages */ -+ psPage = alloc_pages(gfp_flags, ui32Order); -+ if (psPage == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ uiSize = (1 << ui32Order) * PAGE_SIZE; -+ -+ psMemHandle->u.pvHandle = psPage; -+ psMemHandle->uiOrder = ui32Order; -+ sCpuPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(psPage)); -+ -+ /* -+ * Even when more pages are allocated as base MMU object we still need one single physical address because -+ * they are physically contiguous. -+ */ -+ PhysHeapCpuPAddrToDevPAddr(psPhysHeap, 1, psDevPAddr, &sCpuPAddr); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, -+ uiSize, -+ (IMG_UINT64)(uintptr_t) psPage, -+ uiPid); -+#else -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, -+ psPage, -+ sCpuPAddr, -+ uiSize, -+ uiPid -+ DEBUG_MEMSTATS_VALUES); -+#endif -+#else -+ PVR_UNREFERENCED_PARAMETER(uiPid); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+void OSPhyContigPagesFree(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle) -+{ -+ struct page *psPage = (struct page*) psMemHandle->u.pvHandle; -+ IMG_UINT32 ui32Order; -+ -+ PVR_UNREFERENCED_PARAMETER(psPhysHeap); -+ -+ ui32Order = psMemHandle->uiOrder; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, -+ (IMG_UINT64)(uintptr_t) psPage); -+#else -+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, -+ (IMG_UINT64)(uintptr_t) psPage, -+ OSGetCurrentClientProcessIDKM()); -+#endif -+#endif -+ -+ __free_pages(psPage, ui32Order); -+ psMemHandle->uiOrder = 0; -+} -+ -+PVRSRV_ERROR OSPhyContigPagesMap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, -+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, -+ void **pvPtr) -+{ -+ size_t actualSize = 1 << (PAGE_SHIFT + psMemHandle->uiOrder); -+ *pvPtr = kmap((struct page*)psMemHandle->u.pvHandle); -+ -+ PVR_UNREFERENCED_PARAMETER(psDevPAddr); -+ -+ PVR_UNREFERENCED_PARAMETER(actualSize); /* If we don't take an #ifdef path */ -+ PVR_UNREFERENCED_PARAMETER(uiSize); -+ PVR_UNREFERENCED_PARAMETER(psPhysHeap); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, actualSize, OSGetCurrentClientProcessIDKM()); -+#else -+ { -+ IMG_CPU_PHYADDR sCpuPAddr; -+ sCpuPAddr.uiAddr = 0; -+ -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, -+ *pvPtr, -+ sCpuPAddr, -+ actualSize, -+ OSGetCurrentClientProcessIDKM() -+ DEBUG_MEMSTATS_VALUES); -+ } -+#endif -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+void OSPhyContigPagesUnmap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, void *pvPtr) -+{ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ /* Mapping is done a page at a time */ -+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, -+ (1 << (PAGE_SHIFT + psMemHandle->uiOrder)), -+ OSGetCurrentClientProcessIDKM()); -+#else -+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, -+ (IMG_UINT64)(uintptr_t)pvPtr, -+ OSGetCurrentClientProcessIDKM()); -+#endif -+#endif -+ -+ PVR_UNREFERENCED_PARAMETER(psPhysHeap); -+ PVR_UNREFERENCED_PARAMETER(pvPtr); -+ -+ kunmap((struct page*) psMemHandle->u.pvHandle); -+} -+ -+PVRSRV_ERROR OSPhyContigPagesClean(PHYS_HEAP *psPhysHeap, -+ PG_HANDLE *psMemHandle, -+ IMG_UINT32 uiOffset, -+ IMG_UINT32 uiLength) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ struct page* psPage = (struct page*) psMemHandle->u.pvHandle; -+ -+ void* pvVirtAddrStart = kmap(psPage) + uiOffset; -+ IMG_CPU_PHYADDR sPhysStart, sPhysEnd; -+ -+ IMG_UINT32 ui32Order; -+ -+ if (uiLength == 0) -+ { -+ goto e0; -+ } -+ -+ ui32Order = psMemHandle->uiOrder; -+ if ((uiOffset + uiLength) > ((1 << ui32Order) * PAGE_SIZE)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid size params, uiOffset %u, uiLength %u", -+ __func__, -+ uiOffset, -+ uiLength)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e0; -+ } -+ -+ sPhysStart.uiAddr = page_to_phys(psPage) + uiOffset; -+ sPhysEnd.uiAddr = sPhysStart.uiAddr + uiLength; -+ -+ CacheOpExec(psDevNode, -+ pvVirtAddrStart, -+ pvVirtAddrStart + uiLength, -+ sPhysStart, -+ sPhysEnd, -+ PVRSRV_CACHE_OP_CLEAN); -+ -+e0: -+ kunmap(psPage); -+ -+ return eError; -+} -+ -+#if defined(__GNUC__) -+#define PVRSRV_MEM_ALIGN __attribute__ ((aligned (0x8))) -+#define PVRSRV_MEM_ALIGN_MASK (0x7) -+#else -+#error "PVRSRV Alignment macros need to be defined for this compiler" -+#endif -+ -+IMG_UINT32 OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE eCacheAttribute) -+{ -+ IMG_UINT32 uiSize = 0; -+ -+ switch (eCacheAttribute) -+ { -+ case OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE: -+ uiSize = cache_line_size(); -+ break; -+ -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache attribute type %d", -+ __func__, (IMG_UINT32)eCacheAttribute)); -+ PVR_ASSERT(0); -+ break; -+ } -+ -+ return uiSize; -+} -+ -+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen) -+{ -+ return (IMG_INT)memcmp(pvBufA, pvBufB, uiLen); -+} -+ -+size_t OSStringLCat(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDstSize) -+{ -+ /* -+ * Let strlcat handle any truncation cases correctly. -+ * We will definitely get a NUL-terminated string set in pszDest -+ */ -+ size_t uSrcSize = strlcat(pszDest, pszSrc, uDstSize); -+ -+#if defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) -+ /* Handle truncation by dumping calling stack if debug allows */ -+ if (uSrcSize >= uDstSize) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'", -+ __func__, pszSrc, (long)uDstSize, pszDest)); -+ OSDumpStack(); -+ } -+#endif /* defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) */ -+ -+ return uSrcSize; -+} -+ -+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) -+{ -+ va_list argList; -+ IMG_INT32 iCount; -+ -+ va_start(argList, pszFormat); -+ iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList); -+ va_end(argList); -+ -+ return iCount; -+} -+ -+IMG_INT32 OSVSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR* pszFormat, va_list vaArgs) -+{ -+ return vsnprintf(pStr, ui32Size, pszFormat, vaArgs); -+} -+ -+size_t OSStringLength(const IMG_CHAR *pStr) -+{ -+ return strlen(pStr); -+} -+ -+size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount) -+{ -+ return strnlen(pStr, uiCount); -+} -+ -+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, -+ size_t uiSize) -+{ -+#if defined(DEBUG) -+ /* Double-check that we are not passing NULL parameters in. If we are we -+ * return -1 (for arg1 == NULL, arg2 != NULL) -+ * 0 (for arg1 == NULL, arg2 == NULL -+ * +1 (for arg1 != NULL, arg2 == NULL) -+ * strncmp(arg1, arg2, size) otherwise -+ */ -+ if (pStr1 == NULL) -+ { -+ if (pStr2 == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %p, %d): Both args NULL", -+ __func__, pStr1, pStr2, (int)uiSize)); -+ OSDumpStack(); -+ return 0; /* Both NULL */ -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %p, %d): arg1 NULL", -+ __func__, pStr1, pStr2, (int)uiSize)); -+ OSDumpStack(); -+ return -1; /* NULL < non-NULL */ -+ } -+ } -+ else -+ { -+ if (pStr2 == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %p, %d): arg2 NULL", -+ __func__, pStr1, pStr2, (int)uiSize)); -+ OSDumpStack(); -+ return +1; /* non-NULL > NULL */ -+ } -+ else -+ { -+ return strncmp(pStr1, pStr2, uiSize); -+ } -+ } -+#else -+ return strncmp(pStr1, pStr2, uiSize); -+#endif -+} -+ -+PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base, -+ IMG_UINT32 *ui32Result) -+{ -+ if (kstrtou32(pStr, ui32Base, ui32Result) != 0) -+ return PVRSRV_ERROR_CONVERSION_FAILED; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_UINT32 OSStringUINT32ToStr(IMG_CHAR *pszBuf, size_t uSize, -+ IMG_UINT32 ui32Num) -+{ -+ IMG_UINT32 ui32i, ui32Len = 0, ui32NumCopy = ui32Num; -+ -+ /* calculate string length required to hold the number string */ -+ do -+ { -+ ui32Len++; -+ ui32NumCopy /= 10; -+ } while (ui32NumCopy != 0); -+ -+ if (unlikely(ui32Len >= uSize)) -+ { -+ /* insufficient buffer */ -+ return 0; -+ } -+ -+ for (ui32i = 0; ui32i < ui32Len; ui32i++) -+ { -+ pszBuf[ui32Len - (ui32i + 1)] = '0' + ui32Num % 10; -+ ui32Num = ui32Num / 10; -+ } -+ -+ pszBuf[ui32Len] = '\0'; -+ return ui32Len; -+} -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC) -+static struct workqueue_struct *gpFenceStatusWq; -+ -+static PVRSRV_ERROR _NativeSyncInit(void) -+{ -+ gpFenceStatusWq = create_freezable_workqueue("pvr_fence_status"); -+ if (!gpFenceStatusWq) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create foreign fence status workqueue", -+ __func__)); -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static void _NativeSyncDeinit(void) -+{ -+ destroy_workqueue(gpFenceStatusWq); -+} -+ -+struct workqueue_struct *NativeSyncGetFenceStatusWq(void) -+{ -+ if (!gpFenceStatusWq) -+ { -+#if defined(DEBUG) -+ PVR_ASSERT(gpFenceStatusWq); -+#endif -+ return NULL; -+ } -+ -+ return gpFenceStatusWq; -+} -+#endif -+ -+PVRSRV_ERROR OSInitEnvData(void) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ LinuxInitPhysmem(); -+ -+ _OSInitThreadList(); -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC) -+ eError = _NativeSyncInit(); -+#endif -+ -+ return eError; -+} -+ -+void OSDeInitEnvData(void) -+{ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC) -+ _NativeSyncDeinit(); -+#endif -+ -+ LinuxDeinitPhysmem(); -+} -+ -+void OSReleaseThreadQuanta(void) -+{ -+ schedule(); -+} -+ -+void OSMemoryBarrier(volatile void *hReadback) -+{ -+ mb(); -+ -+ if (hReadback) -+ { -+ /* Force a read-back to memory to avoid posted writes on certain buses -+ * e.g. PCI(E) -+ */ -+ (void) OSReadDeviceMem32(hReadback); -+ } -+} -+ -+void OSWriteMemoryBarrier(volatile void *hReadback) -+{ -+ wmb(); -+ -+ if (hReadback) -+ { -+ /* Force a read-back to memory to avoid posted writes on certain buses -+ * e.g. PCI(E) -+ */ -+ (void) OSReadDeviceMem32(hReadback); -+ } -+} -+ -+/* Not matching/aligning this API to the Clockus() API above to avoid necessary -+ * multiplication/division operations in calling code. -+ */ -+static inline IMG_UINT64 Clockns64(void) -+{ -+ IMG_UINT64 timenow; -+ -+ /* Kernel thread preempt protection. Some architecture implementations -+ * (ARM) of sched_clock are not preempt safe when the kernel is configured -+ * as such e.g. CONFIG_PREEMPT and others. -+ */ -+ preempt_disable(); -+ -+ /* Using sched_clock instead of ktime_get since we need a time stamp that -+ * correlates with that shown in kernel logs and trace data not one that -+ * is a bit behind. */ -+ timenow = sched_clock(); -+ -+ preempt_enable(); -+ -+ return timenow; -+} -+ -+IMG_UINT64 OSClockns64(void) -+{ -+ return Clockns64(); -+} -+ -+IMG_UINT64 OSClockus64(void) -+{ -+ IMG_UINT64 timenow = Clockns64(); -+ IMG_UINT32 remainder; -+ -+ return OSDivide64r64(timenow, 1000, &remainder); -+} -+ -+IMG_UINT32 OSClockus(void) -+{ -+ return (IMG_UINT32) OSClockus64(); -+} -+ -+IMG_UINT32 OSClockms(void) -+{ -+ IMG_UINT64 timenow = Clockns64(); -+ IMG_UINT32 remainder; -+ -+ return OSDivide64(timenow, 1000000, &remainder); -+} -+ -+static inline IMG_UINT64 KClockns64(void) -+{ -+ ktime_t sTime = ktime_get(); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) -+ return sTime; -+#else -+ return sTime.tv64; -+#endif -+} -+ -+PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time) -+{ -+ *pui64Time = KClockns64(); -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time) -+{ -+ IMG_UINT64 timenow = KClockns64(); -+ IMG_UINT32 remainder; -+ -+ *pui64Time = OSDivide64r64(timenow, 1000, &remainder); -+ return PVRSRV_OK; -+} -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) -+IMG_UINT64 OSClockMonotonicRawns64(void) -+{ -+ struct timespec64 ts; -+ -+ ktime_get_raw_ts64(&ts); -+ return ts.tv_sec * 1000000000 + ts.tv_nsec; -+} -+#else -+IMG_UINT64 OSClockMonotonicRawns64(void) -+{ -+ struct timespec ts; -+ -+ getrawmonotonic(&ts); -+ return (IMG_UINT64) ts.tv_sec * 1000000000 + ts.tv_nsec; -+} -+#endif -+ -+IMG_UINT64 OSClockMonotonicRawus64(void) -+{ -+ IMG_UINT32 rem; -+ return OSDivide64r64(OSClockMonotonicRawns64(), 1000, &rem); -+} -+ -+/* -+ OSWaitus -+*/ -+void OSWaitus(IMG_UINT32 ui32Timeus) -+{ -+ udelay(ui32Timeus); -+} -+ -+ -+/* -+ OSSleepms -+*/ -+void OSSleepms(IMG_UINT32 ui32Timems) -+{ -+ msleep(ui32Timems); -+} -+ -+ -+INLINE IMG_UINT64 OSGetCurrentProcessVASpaceSize(void) -+{ -+ return (IMG_UINT64)TASK_SIZE; -+} -+ -+INLINE IMG_PID OSGetCurrentProcessID(void) -+{ -+ if (in_interrupt()) -+ { -+ return KERNEL_ID; -+ } -+ -+ return (IMG_PID)task_tgid_nr(current); -+} -+ -+INLINE IMG_PID OSGetCurrentVirtualProcessID(void) -+{ -+ if (in_interrupt()) -+ { -+ return KERNEL_ID; -+ } -+ -+ return (IMG_PID)task_tgid_vnr(current); -+} -+ -+INLINE IMG_CHAR *OSGetCurrentProcessName(void) -+{ -+ return current->comm; -+} -+ -+INLINE uintptr_t OSGetCurrentThreadID(void) -+{ -+ if (in_interrupt()) -+ { -+ return KERNEL_ID; -+ } -+ -+ return current->pid; -+} -+ -+IMG_PID OSGetCurrentClientProcessIDKM(void) -+{ -+ return OSGetCurrentProcessID(); -+} -+ -+IMG_CHAR *OSGetCurrentClientProcessNameKM(void) -+{ -+ return OSGetCurrentProcessName(); -+} -+ -+uintptr_t OSGetCurrentClientThreadIDKM(void) -+{ -+ return OSGetCurrentThreadID(); -+} -+ -+size_t OSGetPageSize(void) -+{ -+ return PAGE_SIZE; -+} -+ -+size_t OSGetPageShift(void) -+{ -+ return PAGE_SHIFT; -+} -+ -+size_t OSGetPageMask(void) -+{ -+ return (OSGetPageSize()-1); -+} -+ -+size_t OSGetOrder(size_t uSize) -+{ -+ return get_order(PAGE_ALIGN(uSize)); -+} -+ -+IMG_UINT64 OSGetRAMSize(void) -+{ -+ struct sysinfo SI; -+ si_meminfo(&SI); -+ -+ return (PAGE_SIZE * SI.totalram); -+} -+ -+typedef struct -+{ -+ int os_error; -+ PVRSRV_ERROR pvr_error; -+} error_map_t; -+ -+#define PVRSRV_ERROR_TO_OS_ERROR \ -+ X(PVRSRV_OK, 0) \ -+ X(PVRSRV_ERROR_BRIDGE_EFAULT, EFAULT) \ -+ X(PVRSRV_ERROR_BRIDGE_EINVAL, EINVAL) \ -+ X(PVRSRV_ERROR_BRIDGE_ENOMEM, ENOMEM) \ -+ X(PVRSRV_ERROR_BRIDGE_ERANGE, ERANGE) \ -+ X(PVRSRV_ERROR_BRIDGE_EPERM, EPERM) \ -+ X(PVRSRV_ERROR_BRIDGE_ENOTTY, ENOTTY) \ -+ X(PVRSRV_ERROR_BRIDGE_CALL_FAILED, ENOTTY) \ -+ X(PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL, ERANGE) \ -+ X(PVRSRV_ERROR_OUT_OF_MEMORY, ENOMEM) \ -+ X(PVRSRV_ERROR_PMR_NOT_PERMITTED, EACCES) \ -+ X(PVRSRV_ERROR_INVALID_PARAMS, EINVAL) \ -+ X(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING, EPERM) \ -+ X(PVRSRV_ERROR_NOT_IMPLEMENTED, ENOSYS) -+ -+/* return -ve versions of POSIX errors as they are used in this form */ -+int PVRSRVToNativeError(PVRSRV_ERROR eError) -+{ -+ switch (eError) -+ { -+#define X(_PVRSRV_ERROR, _OS_ERROR) \ -+ case (_PVRSRV_ERROR): return -(_OS_ERROR); -+ -+ PVRSRV_ERROR_TO_OS_ERROR -+ -+#undef X -+ default: -+ return -EFAULT; -+ } -+} -+ -+typedef struct _MISR_DATA_ { -+ struct workqueue_struct *psWorkQueue; -+ struct work_struct sMISRWork; -+ const IMG_CHAR* pszName; -+ PFN_MISR pfnMISR; -+ void *hData; -+} MISR_DATA; -+ -+/* -+ MISRWrapper -+*/ -+static void MISRWrapper(struct work_struct *data) -+{ -+ MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Waking up '%s' MISR %p", psMISRData->pszName, psMISRData)); -+ -+ psMISRData->pfnMISR(psMISRData->hData); -+} -+ -+/* -+ OSInstallMISR -+*/ -+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR, -+ void *hData, const IMG_CHAR *pszMisrName) -+{ -+ MISR_DATA *psMISRData; -+ -+ psMISRData = OSAllocMem(sizeof(*psMISRData)); -+ PVR_LOG_RETURN_IF_NOMEM(psMISRData, "psMISRData"); -+ -+ psMISRData->hData = hData; -+ psMISRData->pfnMISR = pfnMISR; -+ psMISRData->pszName = pszMisrName; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Installing MISR with cookie %p", psMISRData)); -+ -+ psMISRData->psWorkQueue = create_singlethread_workqueue("pvr_misr"); -+ -+ if (psMISRData->psWorkQueue == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed")); -+ OSFreeMem(psMISRData); -+ return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD; -+ } -+ -+ INIT_WORK(&psMISRData->sMISRWork, MISRWrapper); -+ -+ *hMISRData = (IMG_HANDLE) psMISRData; -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ OSUninstallMISR -+*/ -+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData) -+{ -+ MISR_DATA *psMISRData = (MISR_DATA *) hMISRData; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Uninstalling MISR with cookie %p", psMISRData)); -+ -+ destroy_workqueue(psMISRData->psWorkQueue); -+ OSFreeMem(psMISRData); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ OSScheduleMISR -+*/ -+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData) -+{ -+ MISR_DATA *psMISRData = (MISR_DATA *) hMISRData; -+ -+ /* -+ Note: -+ -+ In the case of NO_HARDWARE we want the driver to be synchronous so -+ that we don't have to worry about waiting for previous operations -+ to complete -+ */ -+#if defined(NO_HARDWARE) -+ psMISRData->pfnMISR(psMISRData->hData); -+ return PVRSRV_OK; -+#else -+ { -+ bool rc = queue_work(psMISRData->psWorkQueue, &psMISRData->sMISRWork); -+ return rc ? PVRSRV_OK : PVRSRV_ERROR_ALREADY_EXISTS; -+ } -+#endif -+} -+ -+/* OS specific values for thread priority */ -+static const IMG_INT32 ai32OSPriorityValues[OS_THREAD_LAST_PRIORITY] = -+{ -+ 0, /* OS_THREAD_NOSET_PRIORITY */ -+ -20, /* OS_THREAD_HIGHEST_PRIORITY */ -+ -10, /* OS_THREAD_HIGH_PRIORITY */ -+ 0, /* OS_THREAD_NORMAL_PRIORITY */ -+ 9, /* OS_THREAD_LOW_PRIORITY */ -+ 19, /* OS_THREAD_LOWEST_PRIORITY */ -+}; -+ -+static int OSThreadRun(void *data) -+{ -+ OSThreadData *psOSThreadData = data; -+ -+ /* count freezable threads */ -+ LinuxBridgeNumActiveKernelThreadsIncrement(); -+ -+ /* Returns true if the thread was frozen, should we do anything with this -+ * information? What do we return? Which one is the error case? */ -+ set_freezable(); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Starting Thread '%s'...", psOSThreadData->pszThreadName)); -+ -+ /* Call the client's kernel thread with the client's data pointer */ -+ psOSThreadData->pfnThread(psOSThreadData->hData); -+ -+ if (psOSThreadData->bIsSupportingThread) -+ { -+ _ThreadSetStopped(psOSThreadData); -+ } -+ -+ /* Wait for OSThreadDestroy() to call kthread_stop() */ -+ while (!kthread_freezable_should_stop(NULL)) -+ { -+ schedule(); -+ } -+ -+ LinuxBridgeNumActiveKernelThreadsDecrement(); -+ -+ return 0; -+} -+ -+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread, -+ IMG_CHAR *pszThreadName, -+ PFN_THREAD pfnThread, -+ PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, -+ IMG_BOOL bIsSupportingThread, -+ void *hData) -+{ -+ return OSThreadCreatePriority(phThread, pszThreadName, pfnThread, -+ pfnDebugDumpCB, bIsSupportingThread, hData, -+ OS_THREAD_NOSET_PRIORITY); -+} -+ -+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread, -+ IMG_CHAR *pszThreadName, -+ PFN_THREAD pfnThread, -+ PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, -+ IMG_BOOL bIsSupportingThread, -+ void *hData, -+ OS_THREAD_LEVEL eThreadPriority) -+{ -+ OSThreadData *psOSThreadData; -+ PVRSRV_ERROR eError; -+ -+ psOSThreadData = OSAllocZMem(sizeof(*psOSThreadData)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSThreadData, eError, fail_alloc); -+ -+ psOSThreadData->pfnThread = pfnThread; -+ psOSThreadData->hData = hData; -+ psOSThreadData->pszThreadName = pszThreadName; -+ psOSThreadData->kthread = kthread_run(OSThreadRun, psOSThreadData, "%s", pszThreadName); -+ -+ if (IS_ERR(psOSThreadData->kthread)) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_kthread; -+ } -+ -+ if (bIsSupportingThread) -+ { -+ psOSThreadData->pfnDebugDumpCB = pfnDebugDumpCB; -+ psOSThreadData->bIsThreadRunning = IMG_TRUE; -+ psOSThreadData->bIsSupportingThread = IMG_TRUE; -+ -+ _ThreadListAddEntry(psOSThreadData); -+ } -+ -+ if (eThreadPriority != OS_THREAD_NOSET_PRIORITY && -+ eThreadPriority < OS_THREAD_LAST_PRIORITY) -+ { -+ set_user_nice(psOSThreadData->kthread, -+ ai32OSPriorityValues[eThreadPriority]); -+ } -+ -+ *phThread = psOSThreadData; -+ -+ return PVRSRV_OK; -+ -+fail_kthread: -+ OSFreeMem(psOSThreadData); -+fail_alloc: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread) -+{ -+ OSThreadData *psOSThreadData = hThread; -+ int ret; -+ -+ /* Let the thread know we are ready for it to end and wait for it. */ -+ ret = kthread_stop(psOSThreadData->kthread); -+ if (0 != ret) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "kthread_stop failed(%d)", ret)); -+ return PVRSRV_ERROR_RETRY; -+ } -+ -+ if (psOSThreadData->bIsSupportingThread) -+ { -+ _ThreadListRemoveEntry(psOSThreadData); -+ } -+ -+ OSFreeMem(psOSThreadData); -+ -+ return PVRSRV_OK; -+} -+ -+void OSPanic(void) -+{ -+ BUG(); -+ -+#if defined(__KLOCWORK__) -+ /* Klocwork does not understand that BUG is terminal... */ -+ abort(); -+#endif -+} -+ -+IMG_BOOL OSIsMapPhysNonContigSupported(void) -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && !defined(CONFIG_VMAP_PFN) -+ return IMG_FALSE; -+#else -+ return IMG_TRUE; -+#endif -+} -+ -+void OSUnMapPhysArrayToLin(void *pvLinAddr, void *pvPrivData) -+{ -+ if (is_vmalloc_addr(pvLinAddr)) -+ { -+#if defined(CONFIG_VMAP_PFN) -+ PVR_UNREFERENCED_PARAMETER(pvPrivData); -+ vunmap(pvLinAddr); -+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) -+ unmap_kernel_range((unsigned long) (uintptr_t) pvLinAddr, -+ get_vm_area_size(pvPrivData)); -+ free_vm_area(pvPrivData); -+#else -+ PVR_DPF((PVR_DBG_ERROR,"%s: Cannot map into kernel, no method supported.", __func__)); -+ PVR_ASSERT(0); -+#endif -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Given kernel address is not a vmalloc addr", __func__)); -+ } -+} -+ -+#define PagePAToPFN(PageAddr) (PageAddr >> PAGE_SHIFT) -+ -+PVRSRV_ERROR OSMapPhysArrayToLin(IMG_CPU_PHYADDR pPagePA[], -+ IMG_UINT32 uiPagesCount, -+ void **ppvLinAddr, -+ void **ppvPrivData) -+{ -+ if (ppvLinAddr == NULL || ppvPrivData == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+#if defined(CONFIG_VMAP_PFN) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < uiPagesCount; i++) -+ { -+ pPagePA[i].uiAddr = PagePAToPFN(pPagePA[i].uiAddr); -+ } -+ -+ *ppvLinAddr = vmap_pfn((unsigned long *)pPagePA, -+ (unsigned int)uiPagesCount, -+ pgprot_device(PAGE_KERNEL)); -+ if (NULL == *ppvLinAddr) -+ { -+ return PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED; -+ } -+ *ppvPrivData = NULL; -+ return PVRSRV_OK; -+ } -+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) -+ { -+ pte_t *pte[32], **pte_array; -+ struct vm_struct *psVMA; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 i = 0; -+ -+ pte_array = &pte[0]; -+ if (sizeof(pte) < (sizeof(pte[0]) * uiPagesCount)) -+ { -+ pte_array = kzalloc(uiPagesCount * sizeof(*pte_array), GFP_KERNEL); -+ if (NULL == pte_array) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ } -+ -+ psVMA = alloc_vm_area((size_t)(uiPagesCount << PAGE_SHIFT), pte_array); -+ if (NULL == psVMA) -+ { -+ eError = PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY; -+ goto FreePTEArray; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Allocated VM: %s VMA: %p Addr: %p Size: 0x%lx count: %d", __func__, -+ psVMA, psVMA->addr, psVMA->size, psVMA->nr_pages)); -+ -+ for (i = 0; i < uiPagesCount; i++) -+ { -+ *(pte_array[i]) = pte_mkspecial(pfn_pte((unsigned long) PagePAToPFN(pPagePA[i].uiAddr), -+ pgprot_device(PAGE_KERNEL))); -+ } -+ -+ OSWriteMemoryBarrier(psVMA->addr); -+ -+ *ppvLinAddr = psVMA->addr; -+ *ppvPrivData = psVMA; -+ -+FreePTEArray: -+ if (pte_array != pte) -+ { -+ kfree(pte_array); -+ } -+ -+ return eError; -+ } -+#else -+ PVR_DPF((PVR_DBG_ERROR,"%s: Cannot map into kernel, no method supported.", __func__)); -+ PVR_ASSERT(0); -+ *ppvLinAddr = NULL; -+ return PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED; -+#endif -+} -+ -+void * -+OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, -+ size_t ui32Bytes, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags) -+{ -+ void __iomem *pvLinAddr; -+ -+ if (uiMappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)) -+ { -+ PVR_ASSERT(!"Found non-cpu cache mode flag when mapping to the cpu"); -+ return NULL; -+ } -+ -+ if (! PVRSRV_VZ_MODE_IS(NATIVE)) -+ { -+ /* -+ This is required to support DMA physheaps for GPU virtualization. -+ Unfortunately, if a region of kernel managed memory is turned into -+ a DMA buffer, conflicting mappings can come about easily on Linux -+ as the original memory is mapped by the kernel as normal cached -+ memory whilst DMA buffers are mapped mostly as uncached device or -+ cache-coherent device memory. In both cases the system will have -+ two conflicting mappings for the same memory region and will have -+ "undefined behaviour" for most processors notably ARMv6 onwards -+ and some x86 micro-architectures. As a result, perform ioremapping -+ manually for DMA physheap allocations by translating from CPU/VA -+ to BUS/PA thereby preventing the creation of conflicting mappings. -+ */ -+ pvLinAddr = (void __iomem *) SysDmaDevPAddrToCpuVAddr(BasePAddr.uiAddr, ui32Bytes); -+ if (pvLinAddr != NULL) -+ { -+ return (void __force *) pvLinAddr; -+ } -+ } -+ -+ switch (uiMappingFlags) -+ { -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: -+ pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes); -+ break; -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: -+#if defined(CONFIG_X86) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) -+ pvLinAddr = (void __iomem *)ioremap_wc(BasePAddr.uiAddr, ui32Bytes); -+#else -+ pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes); -+#endif -+ break; -+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED: -+#if defined(CONFIG_X86) || defined(CONFIG_ARM) -+ pvLinAddr = (void __iomem *)ioremap_cache(BasePAddr.uiAddr, ui32Bytes); -+#else -+ pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes); -+#endif -+ break; -+ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT: -+ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT: -+ PVR_ASSERT(!"Unexpected cpu cache mode"); -+ pvLinAddr = NULL; -+ break; -+ default: -+ PVR_ASSERT(!"Unsupported cpu cache mode"); -+ pvLinAddr = NULL; -+ break; -+ } -+ -+ return (void __force *) pvLinAddr; -+} -+ -+ -+IMG_BOOL -+OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes) -+{ -+ PVR_UNREFERENCED_PARAMETER(ui32Bytes); -+ -+ if (!PVRSRV_VZ_MODE_IS(NATIVE)) -+ { -+ if (SysDmaCpuVAddrToDevPAddr(pvLinAddr)) -+ { -+ return IMG_TRUE; -+ } -+ } -+ -+ iounmap((void __iomem *) pvLinAddr); -+ -+ return IMG_TRUE; -+} -+ -+#define OS_MAX_TIMERS 8 -+ -+/* Timer callback structure used by OSAddTimer */ -+typedef struct TIMER_CALLBACK_DATA_TAG -+{ -+ IMG_BOOL bInUse; -+ PFN_TIMER_FUNC pfnTimerFunc; -+ void *pvData; -+ struct timer_list sTimer; -+ IMG_UINT32 ui32Delay; -+ IMG_BOOL bActive; -+ struct work_struct sWork; -+}TIMER_CALLBACK_DATA; -+ -+static struct workqueue_struct *psTimerWorkQueue; -+ -+static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS]; -+ -+static DEFINE_MUTEX(sTimerStructLock); -+ -+static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData) -+{ -+ if (!psTimerCBData->bActive) -+ return; -+ -+ /* call timer callback */ -+ psTimerCBData->pfnTimerFunc(psTimerCBData->pvData); -+ -+ /* reset timer */ -+ mod_timer(&psTimerCBData->sTimer, psTimerCBData->sTimer.expires + psTimerCBData->ui32Delay); -+} -+ -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) -+/*************************************************************************/ /*! -+@Function OSTimerCallbackWrapper -+@Description OS specific timer callback wrapper function -+@Input psTimer Timer list structure -+*/ /**************************************************************************/ -+static void OSTimerCallbackWrapper(struct timer_list *psTimer) -+{ -+ TIMER_CALLBACK_DATA *psTimerCBData = from_timer(psTimerCBData, psTimer, sTimer); -+#else -+/*************************************************************************/ /*! -+@Function OSTimerCallbackWrapper -+@Description OS specific timer callback wrapper function -+@Input uData Timer callback data -+*/ /**************************************************************************/ -+static void OSTimerCallbackWrapper(uintptr_t uData) -+{ -+ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)uData; -+#endif -+ int res; -+ -+ res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork); -+ if (res == 0) -+ { -+ PVR_LOG(("OSTimerCallbackWrapper: work already queued")); -+ } -+} -+ -+ -+static void OSTimerWorkQueueCallBack(struct work_struct *psWork) -+{ -+ TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork); -+ -+ OSTimerCallbackBody(psTimerCBData); -+} -+ -+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout) -+{ -+ TIMER_CALLBACK_DATA *psTimerCBData; -+ IMG_UINT32 ui32i; -+ -+ /* check callback */ -+ if (!pfnTimerFunc) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback")); -+ return NULL; -+ } -+ -+ /* Allocate timer callback data structure */ -+ mutex_lock(&sTimerStructLock); -+ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) -+ { -+ psTimerCBData = &sTimers[ui32i]; -+ if (!psTimerCBData->bInUse) -+ { -+ psTimerCBData->bInUse = IMG_TRUE; -+ break; -+ } -+ } -+ mutex_unlock(&sTimerStructLock); -+ if (ui32i >= OS_MAX_TIMERS) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use")); -+ return NULL; -+ } -+ -+ psTimerCBData->pfnTimerFunc = pfnTimerFunc; -+ psTimerCBData->pvData = pvData; -+ psTimerCBData->bActive = IMG_FALSE; -+ -+ /* -+ HZ = ticks per second -+ ui32MsTimeout = required ms delay -+ ticks = (Hz * ui32MsTimeout) / 1000 -+ */ -+ psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000) -+ ? 1 -+ : ((HZ * ui32MsTimeout) / 1000); -+ -+ /* initialise object */ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) -+ timer_setup(&psTimerCBData->sTimer, OSTimerCallbackWrapper, 0); -+#else -+ init_timer(&psTimerCBData->sTimer); -+ -+ /* setup timer object */ -+ psTimerCBData->sTimer.function = (void *)OSTimerCallbackWrapper; -+ psTimerCBData->sTimer.data = (uintptr_t)psTimerCBData; -+#endif -+ -+ return (IMG_HANDLE)(uintptr_t)(ui32i + 1); -+} -+ -+ -+static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer) -+{ -+ IMG_UINT32 ui32i = (IMG_UINT32)((uintptr_t)hTimer) - 1; -+ -+ PVR_ASSERT(ui32i < OS_MAX_TIMERS); -+ -+ return &sTimers[ui32i]; -+} -+ -+PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer) -+{ -+ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); -+ -+ PVR_ASSERT(psTimerCBData->bInUse); -+ PVR_ASSERT(!psTimerCBData->bActive); -+ -+ /* free timer callback data struct */ -+ psTimerCBData->bInUse = IMG_FALSE; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer) -+{ -+ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); -+ -+ PVR_ASSERT(psTimerCBData->bInUse); -+ PVR_ASSERT(!psTimerCBData->bActive); -+ -+ /* Start timer arming */ -+ psTimerCBData->bActive = IMG_TRUE; -+ -+ /* set the expire time */ -+ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies; -+ -+ /* Add the timer to the list */ -+ add_timer(&psTimerCBData->sTimer); -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer) -+{ -+ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); -+ -+ PVR_ASSERT(psTimerCBData->bInUse); -+ PVR_ASSERT(psTimerCBData->bActive); -+ -+ /* Stop timer from arming */ -+ psTimerCBData->bActive = IMG_FALSE; -+ smp_mb(); -+ -+ flush_workqueue(psTimerWorkQueue); -+ -+ /* remove timer */ -+ del_timer_sync(&psTimerCBData->sTimer); -+ -+ /* -+ * This second flush is to catch the case where the timer ran -+ * before we managed to delete it, in which case, it will have -+ * queued more work for the workqueue. Since the bActive flag -+ * has been cleared, this second flush won't result in the -+ * timer being rearmed. -+ */ -+ flush_workqueue(psTimerWorkQueue); -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, IMG_HANDLE *hEventObject) -+{ -+ PVR_UNREFERENCED_PARAMETER(pszName); -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject"); -+ -+ return LinuxEventObjectListCreate(hEventObject); -+} -+ -+ -+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject) -+{ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject"); -+ -+ return LinuxEventObjectListDestroy(hEventObject); -+} -+ -+#define _FREEZABLE IMG_TRUE -+#define _NON_FREEZABLE IMG_FALSE -+ -+/* -+ * EventObjectWaitTimeout() -+ */ -+static PVRSRV_ERROR EventObjectWaitTimeout(IMG_HANDLE hOSEventKM, -+ IMG_UINT64 uiTimeoutus) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (hOSEventKM && uiTimeoutus > 0) -+ { -+ eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, _NON_FREEZABLE); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: invalid arguments %p, %lld", hOSEventKM, uiTimeoutus)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus) -+{ -+ return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus); -+} -+ -+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM) -+{ -+ return OSEventObjectWaitTimeout(hOSEventKM, EVENT_OBJECT_TIMEOUT_US); -+} -+ -+PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM, -+ IMG_UINT64 uiTimeoutus) -+{ -+ PVRSRV_ERROR eError; -+ -+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+ if (hOSEventKM) -+ { -+ if (uiTimeoutus > 0) -+ eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, -+ _FREEZABLE); -+ else -+ eError = LinuxEventObjectWaitUntilSignalled(hOSEventKM); -+ } -+#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ -+ if (hOSEventKM && uiTimeoutus > 0) -+ { -+ eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, -+ _FREEZABLE); -+ } -+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWaitKernel: invalid arguments %p", -+ hOSEventKM)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return eError; -+} -+ -+void OSEventObjectDumpDebugInfo(IMG_HANDLE hOSEventKM) -+{ -+ LinuxEventObjectDumpDebugInfo(hOSEventKM); -+} -+ -+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject, IMG_HANDLE *phOSEvent) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(phOSEvent, "phOSEvent"); -+ PVR_LOG_GOTO_IF_INVALID_PARAM(hEventObject, eError, error); -+ -+ eError = LinuxEventObjectAdd(hEventObject, phOSEvent); -+ PVR_LOG_GOTO_IF_ERROR(eError, "LinuxEventObjectAdd", error); -+ -+ return PVRSRV_OK; -+ -+error: -+ *phOSEvent = NULL; -+ return eError; -+} -+ -+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM) -+{ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(hOSEventKM, "hOSEventKM"); -+ -+ return LinuxEventObjectDelete(hOSEventKM); -+} -+ -+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject) -+{ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject"); -+ -+ return LinuxEventObjectSignal(hEventObject); -+} -+ -+PVRSRV_ERROR OSCopyToUser(void *pvProcess, -+ void __user *pvDest, -+ const void *pvSrc, -+ size_t ui32Bytes) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvProcess); -+ -+ if (pvr_copy_to_user(pvDest, pvSrc, ui32Bytes)==0) -+ return PVRSRV_OK; -+ else -+ return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY; -+} -+ -+PVRSRV_ERROR OSCopyFromUser(void *pvProcess, -+ void *pvDest, -+ const void __user *pvSrc, -+ size_t ui32Bytes) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvProcess); -+ -+ if (likely(pvr_copy_from_user(pvDest, pvSrc, ui32Bytes)==0)) -+ return PVRSRV_OK; -+ else -+ return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY; -+} -+ -+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder) -+{ -+ *pui32Remainder = do_div(ui64Divident, ui32Divisor); -+ -+ return ui64Divident; -+} -+ -+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder) -+{ -+ *pui32Remainder = do_div(ui64Divident, ui32Divisor); -+ -+ return (IMG_UINT32) ui64Divident; -+} -+ -+/* One time osfunc initialisation */ -+PVRSRV_ERROR PVROSFuncInit(void) -+{ -+ { -+ PVR_ASSERT(!psTimerWorkQueue); -+ -+ psTimerWorkQueue = create_freezable_workqueue("pvr_timer"); -+ if (psTimerWorkQueue == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", -+ __func__)); -+ return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD; -+ } -+ } -+ -+ { -+ IMG_UINT32 ui32i; -+ -+ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) -+ { -+ TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i]; -+ -+ INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack); -+ } -+ } -+ return PVRSRV_OK; -+} -+ -+/* -+ * Osfunc deinitialisation. -+ * Note that PVROSFuncInit may not have been called -+ */ -+void PVROSFuncDeInit(void) -+{ -+ if (psTimerWorkQueue != NULL) -+ { -+ destroy_workqueue(psTimerWorkQueue); -+ psTimerWorkQueue = NULL; -+ } -+} -+ -+void OSDumpStack(void) -+{ -+ dump_stack(); -+} -+ -+PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray, -+ IMG_UINT64 sCpuVAddrBase, -+ IMG_CPU_PHYADDR sCpuPAHeapBase, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_BOOL bIsLMA) -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ pfn_t sPFN; -+#else -+ IMG_UINT64 uiPFN; -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ -+ -+ PVRSRV_ERROR eError; -+ -+ struct mm_struct *psMM = current->mm; -+ struct vm_area_struct *psVMA = NULL; -+ struct address_space *psMapping = NULL; -+ struct page *psPage = NULL; -+ -+ IMG_UINT64 uiCPUVirtAddr = 0; -+ IMG_UINT32 ui32Loop = 0; -+ IMG_UINT32 ui32PageSize = OSGetPageSize(); -+ IMG_BOOL bMixedMap = IMG_FALSE; -+ -+ /* -+ * Acquire the lock before manipulating the VMA -+ * In this case only mmap_sem lock would suffice as the pages associated with this VMA -+ * are never meant to be swapped out. -+ * -+ * In the future, in case the pages are marked as swapped, page_table_lock needs -+ * to be acquired in conjunction with this to disable page swapping. -+ */ -+ -+ /* Find the Virtual Memory Area associated with the user base address */ -+ psVMA = find_vma(psMM, (uintptr_t)sCpuVAddrBase); -+ if (NULL == psVMA) -+ { -+ eError = PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND; -+ return eError; -+ } -+ -+ /* Acquire the memory sem */ -+ mmap_write_lock(psMM); -+ -+ psMapping = psVMA->vm_file->f_mapping; -+ -+ /* Set the page offset to the correct value as this is disturbed in MMAP_PMR func */ -+ psVMA->vm_pgoff = (psVMA->vm_start >> PAGE_SHIFT); -+ -+ /* Delete the entries for the pages that got freed */ -+ if (ui32FreePageCount && (pai32FreeIndices != NULL)) -+ { -+ for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) -+ { -+ uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32FreeIndices[ui32Loop] * ui32PageSize)); -+ -+ unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1); -+ -+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE -+ /* -+ * Still need to map pages in case remap flag is set. -+ * That is not done until the remap case succeeds -+ */ -+#endif -+ } -+ eError = PVRSRV_OK; -+ } -+ -+ if ((psVMA->vm_flags & VM_MIXEDMAP) || bIsLMA) -+ { -+ pvr_vm_flags_set(psVMA, VM_MIXEDMAP); -+ bMixedMap = IMG_TRUE; -+ } -+ else -+ { -+ if (ui32AllocPageCount && (NULL != pai32AllocIndices)) -+ { -+ for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++) -+ { -+ -+ psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]]; -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ sPFN = page_to_pfn_t(psPage); -+ -+ if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0) -+#else -+ uiPFN = page_to_pfn(psPage); -+ -+ if (!pfn_valid(uiPFN) || (page_count(pfn_to_page(uiPFN)) == 0)) -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ -+ { -+ bMixedMap = IMG_TRUE; -+ pvr_vm_flags_set(psVMA, VM_MIXEDMAP); -+ break; -+ } -+ } -+ } -+ } -+ -+ /* Map the pages that got allocated */ -+ if (ui32AllocPageCount && (NULL != pai32AllocIndices)) -+ { -+ for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++) -+ { -+ int err; -+ -+ uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32AllocIndices[ui32Loop] * ui32PageSize)); -+ unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1); -+ -+ if (bIsLMA) -+ { -+ phys_addr_t uiAddr = sCpuPAHeapBase.uiAddr + -+ ((IMG_DEV_PHYADDR *)psPageArray)[pai32AllocIndices[ui32Loop]].uiAddr; -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ sPFN = phys_to_pfn_t(uiAddr, 0); -+ psPage = pfn_t_to_page(sPFN); -+#else -+ uiPFN = uiAddr >> PAGE_SHIFT; -+ psPage = pfn_to_page(uiPFN); -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ -+ } -+ else -+ { -+ psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]]; -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ sPFN = page_to_pfn_t(psPage); -+#else -+ uiPFN = page_to_pfn(psPage); -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ -+ } -+ -+ if (bMixedMap) -+ { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) -+ vm_fault_t vmf; -+ -+ vmf = vmf_insert_mixed(psVMA, uiCPUVirtAddr, sPFN); -+ if (vmf & VM_FAULT_ERROR) -+ { -+ err = vm_fault_to_errno(vmf, 0); -+ } -+ else -+ { -+ err = 0; -+ } -+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ err = vm_insert_mixed(psVMA, uiCPUVirtAddr, sPFN); -+#else -+ err = vm_insert_mixed(psVMA, uiCPUVirtAddr, uiPFN); -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) */ -+ } -+ else -+ { -+ err = vm_insert_page(psVMA, uiCPUVirtAddr, psPage); -+ } -+ -+ if (err) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "Remap failure error code: %d", err)); -+ eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED; -+ goto eFailed; -+ } -+ } -+ } -+ -+ eError = PVRSRV_OK; -+eFailed: -+ mmap_write_unlock(psMM); -+ -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function OSDebugSignalPID -+@Description Sends a SIGTRAP signal to a specific PID in user mode for -+ debugging purposes. The user mode process can register a handler -+ against this signal. -+ This is necessary to support the Rogue debugger. If the Rogue -+ debugger is not used then this function may be implemented as -+ a stub. -+@Input ui32PID The PID for the signal. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID) -+{ -+ int err; -+ struct pid *psPID; -+ -+ psPID = find_vpid(ui32PID); -+ if (psPID == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get PID struct.", __func__)); -+ return PVRSRV_ERROR_NOT_FOUND; -+ } -+ -+ err = kill_pid(psPID, SIGTRAP, 0); -+ if (err != 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Signal Failure %d", __func__, err)); -+ return PVRSRV_ERROR_SIGNAL_FAILED; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function OSIsKernelThread -+@Description This API determines if the current running thread is a kernel -+ thread (i.e. one not associated with any userland process, -+ typically an MISR handler.) -+@Return IMG_TRUE if it is a kernel thread, otherwise IMG_FALSE. -+*/ /**************************************************************************/ -+IMG_BOOL OSIsKernelThread(void) -+{ -+ /* -+ * Kernel threads have a NULL memory descriptor. -+ * -+ * See https://www.kernel.org/doc/Documentation/vm/active_mm.txt -+ */ -+ return current->mm == NULL; -+} -+ -+void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ PVR_DUMPDEBUG_LOG("OS kernel info: %s %s %s %s", -+ utsname()->sysname, -+ utsname()->release, -+ utsname()->version, -+ utsname()->machine); -+} -+#if defined(SUPPORT_DMA_TRANSFER) -+ -+typedef struct _OS_CLEANUP_DATA_ -+{ -+ IMG_BOOL bSucceed; -+ IMG_BOOL bAdvanceTimeline; -+ IMG_UINT uiRefCount; -+ IMG_UINT uiNumDMA; -+ IMG_UINT uiCount; -+ -+ struct dma_async_tx_descriptor** ppsDescriptors; -+ -+ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ PFN_SERVER_CLEANUP pfnServerCleanup; -+ void* pvServerCleanupData; -+ -+ enum dma_data_direction eDirection; -+ struct sg_table **ppsSg; -+ struct page ***pages; -+ IMG_UINT32* puiNumPages; -+ spinlock_t spinlock; -+ -+ struct completion start_cleanup; -+ struct completion *sync_completion; -+ -+ /* Sparse PMR transfer information */ -+ IMG_BOOL *pbIsSparse; -+ IMG_UINT *uiNumValidPages; -+ struct sg_table ***ppsSgSparse; -+ struct dma_async_tx_descriptor*** ppsDescriptorsSparse; -+ -+} OS_CLEANUP_DATA; -+ -+static int cleanup_thread(void *pvData) -+{ -+ IMG_UINT32 i, j; -+ struct completion *sync_completion = NULL; -+ OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA*)pvData; -+ IMG_BOOL bSucceed = psOSCleanup->bSucceed; -+ -+ sync_completion = psOSCleanup->sync_completion; -+ -+#if defined(DMA_VERBOSE) -+ PVR_DPF((PVR_DBG_ERROR, "Cleanup thread waiting (%p) on completion", pvData)); -+#endif -+ -+ wait_for_completion(&psOSCleanup->start_cleanup); -+ -+#if defined(DMA_VERBOSE) -+ PVR_DPF((PVR_DBG_ERROR, "Cleanup thread notified (%p)", pvData)); -+#endif -+ /* Free resources */ -+ for (i=0; iuiCount; i++) -+ { -+ if (!psOSCleanup->pbIsSparse[i]) -+ { -+ dma_sync_sg_for_cpu(psOSCleanup->psDevNode->psDevConfig->pvOSDevice, -+ psOSCleanup->ppsSg[i]->sgl, -+ psOSCleanup->ppsSg[i]->nents, -+ psOSCleanup->eDirection); -+ -+ dma_unmap_sg(psOSCleanup->psDevNode->psDevConfig->pvOSDevice, -+ psOSCleanup->ppsSg[i]->sgl, -+ psOSCleanup->ppsSg[i]->nents, -+ psOSCleanup->eDirection); -+ -+ sg_free_table(psOSCleanup->ppsSg[i]); -+ -+ OSFreeMem(psOSCleanup->ppsSg[i]); -+ -+ /* Unpin pages */ -+ for (j=0; jpuiNumPages[i]; j++) -+ { -+ /* -+ * using DMA_FROM_DEVICE from enum dma_data_direction instead of DMA_DEV_TO_MEM -+ * from enum dma_transfer_direction to avoid casting explicitly -+ */ -+ if (psOSCleanup->eDirection == DMA_FROM_DEVICE) -+ { -+ set_page_dirty_lock(psOSCleanup->pages[i][j]); -+ } -+ -+ pvr_unpin_user_page_for_dma(psOSCleanup->pages[i][j]); -+ } -+ } -+ else -+ { -+ for (j = 0; j < psOSCleanup->puiNumPages[i]; j++) -+ { -+ if (psOSCleanup->ppsSgSparse[i][j]) { -+ dma_sync_sg_for_cpu(psOSCleanup->psDevNode->psDevConfig->pvOSDevice, -+ psOSCleanup->ppsSgSparse[i][j]->sgl, -+ psOSCleanup->ppsSgSparse[i][j]->nents, -+ psOSCleanup->eDirection); -+ -+ -+ dma_unmap_sg(psOSCleanup->psDevNode->psDevConfig->pvOSDevice, -+ psOSCleanup->ppsSgSparse[i][j]->sgl, -+ psOSCleanup->ppsSgSparse[i][j]->nents, -+ psOSCleanup->eDirection); -+ -+ sg_free_table(psOSCleanup->ppsSgSparse[i][j]); -+ -+ OSFreeMem(psOSCleanup->ppsSgSparse[i][j]); -+ -+ } -+ } -+ -+ OSFreeMem(psOSCleanup->ppsSgSparse[i]); -+ OSFreeMem(psOSCleanup->ppsDescriptorsSparse[i]); -+ -+ /* Unpin pages */ -+ for (j=0; jpuiNumPages[i]*2; j++) -+ { -+ /* -+ * Some pages might've been pinned twice -+ * Others may have not been pinned at all -+ */ -+ if (psOSCleanup->pages[i][j]) -+ { -+ /* -+ * using DMA_FROM_DEVICE from enum dma_data_direction instead of DMA_DEV_TO_MEM -+ * from enum dma_transfer_direction to avoid casting explicitly -+ */ -+ if (psOSCleanup->eDirection == DMA_FROM_DEVICE) -+ { -+ set_page_dirty_lock(psOSCleanup->pages[i][j]); -+ } -+ -+ pvr_unpin_user_page_for_dma(psOSCleanup->pages[i][j]); -+ } -+ } -+ } -+ -+ OSFreeMem(psOSCleanup->pages[i]); -+ } -+ -+ psOSCleanup->pfnServerCleanup(psOSCleanup->pvServerCleanupData, -+ psOSCleanup->bAdvanceTimeline); -+ -+ OSFreeMem(psOSCleanup->ppsSg); -+ OSFreeMem(psOSCleanup->pages); -+ OSFreeMem(psOSCleanup->puiNumPages); -+ OSFreeMem(psOSCleanup->ppsSgSparse); -+ OSFreeMem(psOSCleanup->ppsDescriptorsSparse); -+ OSFreeMem(psOSCleanup->ppsDescriptors); -+ OSFreeMem(psOSCleanup->pbIsSparse); -+ OSFreeMem(psOSCleanup->uiNumValidPages); -+ OSFreeMem(psOSCleanup); -+ -+ if (bSucceed) -+ { -+ kthread_complete_and_exit(sync_completion, 0); -+ } -+ else -+ { -+ kthread_complete_and_exit(NULL, 0); -+ } -+ return 0; -+} -+ -+static void dma_callback(void *pvOSCleanup) -+{ -+ OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA*)pvOSCleanup; -+ unsigned long flags; -+ -+#if defined(DMA_VERBOSE) -+ PVR_DPF((PVR_DBG_ERROR, "dma_callback (%p) refcount decreased to %d", psOSCleanup, psOSCleanup->uiRefCount - 1)); -+#endif -+ spin_lock_irqsave(&psOSCleanup->spinlock, flags); -+ -+ psOSCleanup->uiRefCount--; -+ -+ if (psOSCleanup->uiRefCount==0) -+ { -+ /* Notify the cleanup thread */ -+ spin_unlock_irqrestore(&psOSCleanup->spinlock, flags); -+ complete(&psOSCleanup->start_cleanup); -+ return; -+ } -+ -+ spin_unlock_irqrestore(&psOSCleanup->spinlock, flags); -+} -+ -+#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA) -+static void -+DMADumpPhysicalAddresses(struct page **ppsHostMemPages, -+ IMG_UINT32 uiNumPages, -+ IMG_DMA_ADDR *sDmaAddr, -+ IMG_UINT64 ui64Offset) -+{ -+ IMG_CPU_PHYADDR sPagePhysAddr; -+ IMG_UINT32 uiIdx; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "DMA Transfer Address Dump")); -+ PVR_DPF((PVR_DBG_MESSAGE, "Hostmem phys addresses:")); -+ -+ for (uiIdx = 0; uiIdx < uiNumPages; uiIdx++) -+ { -+ sPagePhysAddr.uiAddr = page_to_phys(ppsHostMemPages[uiIdx]); -+ if (uiIdx == 0) -+ { -+ sPagePhysAddr.uiAddr += ui64Offset; -+ PVR_DPF((PVR_DBG_MESSAGE, "\tHost mem start at 0x%llX", sPagePhysAddr.uiAddr)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "\tHost Mem Page %d at 0x%llX", uiIdx, -+ sPagePhysAddr.uiAddr)); -+ } -+ } -+ PVR_DPF((PVR_DBG_MESSAGE, "Devmem CPU phys address: 0x%llX", -+ sDmaAddr->uiAddr)); -+} -+#endif -+ -+PVRSRV_ERROR OSDmaSubmitTransfer(PVRSRV_DEVICE_NODE *psDevNode, void *pvOSData, -+ void *pvChan, IMG_BOOL bSynchronous) -+{ -+ OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA*)pvOSData; -+ struct completion* sync_completion = NULL; -+ -+ psOSCleanup->bSucceed = IMG_TRUE; -+ psOSCleanup->bAdvanceTimeline = IMG_TRUE; -+ -+ if (bSynchronous) -+ { -+ sync_completion = OSAllocZMem(sizeof(struct completion)); -+ init_completion(sync_completion); -+ } -+ -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ /* Wait only on number of ops scheduled. This might be different to NumDMAs -+ in certain error conditions */ -+ psOSCleanup->uiRefCount = psOSCleanup->uiCount; -+ psOSCleanup->sync_completion = sync_completion; -+ -+ { -+ IMG_UINT32 i,j; -+ for (i=0; iuiCount; i++) -+ { -+ if (psOSCleanup->pbIsSparse[i]) -+ { -+ for (j=0; jpuiNumPages[i]; j++) -+ { -+ if (psOSCleanup->ppsDescriptorsSparse[i][j]) -+ dmaengine_submit(psOSCleanup->ppsDescriptorsSparse[i][j]); -+ } -+ } -+ else -+ { -+ dmaengine_submit(psOSCleanup->ppsDescriptors[i]); -+ } -+ } -+ } -+ -+ dma_async_issue_pending(pvChan); -+ -+ if (bSynchronous) -+ { -+ wait_for_completion(sync_completion); -+ OSFreeMem(sync_completion); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+void OSDmaForceCleanup(PVRSRV_DEVICE_NODE *psDevNode, void *pvChan, -+ void *pvOSData, void *pvServerCleanupParam, -+ PFN_SERVER_CLEANUP pfnServerCleanup) -+{ -+ OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA *)pvOSData; -+ IMG_UINT ui32Retries; -+ -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ -+ psOSCleanup->bSucceed = IMG_FALSE; -+ psOSCleanup->bAdvanceTimeline = IMG_TRUE; -+ -+ /* Need to wait for outstanding DMA Engine ops before advancing the -+ user-supplied timeline in case of error. dmaengine_terminate_sync -+ cannot be called from within atomic context, so cannot invoke it -+ from inside the cleanup kernel thread. */ -+ for (ui32Retries = 0; ui32Retries < DMA_ERROR_SYNC_RETRIES; ui32Retries++) -+ { -+ if (dmaengine_terminate_sync(pvChan) == 0) -+ { -+ break; -+ } -+ } -+ if (ui32Retries == DMA_ERROR_SYNC_RETRIES) -+ { -+ /* We cannot guarantee all outstanding DMAs were terminated -+ * so we let the UM fence time out as a fallback mechanism */ -+ psOSCleanup->bAdvanceTimeline = IMG_FALSE; -+ } -+ -+ if (psOSCleanup->uiCount > 0) -+ { -+ complete(&psOSCleanup->start_cleanup); -+ } -+ else -+ { -+ /* Cleanup kthread never run, need to manually wind down */ -+ pfnServerCleanup(pvServerCleanupParam, psOSCleanup->bAdvanceTimeline); -+ -+ OSFreeMem(psOSCleanup->ppsSg); -+ OSFreeMem(psOSCleanup->pages); -+ OSFreeMem(psOSCleanup->puiNumPages); -+ OSFreeMem(psOSCleanup->ppsSgSparse); -+ OSFreeMem(psOSCleanup->pbIsSparse); -+ OSFreeMem(psOSCleanup->uiNumValidPages); -+ OSFreeMem(psOSCleanup->ppsDescriptors); -+ OSFreeMem(psOSCleanup->ppsDescriptorsSparse); -+ -+ OSFreeMem(psOSCleanup); -+ } -+} -+ -+PVRSRV_ERROR OSDmaAllocData(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 uiNumDMA, void **pvOutData) -+{ -+ PVRSRV_ERROR eError; -+ OS_CLEANUP_DATA *psOSCleanup = OSAllocZMem(sizeof(OS_CLEANUP_DATA)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup, eError, e0); -+ -+ psOSCleanup->uiNumDMA = uiNumDMA; -+ psOSCleanup->psDevNode = psDevNode; -+ -+ spin_lock_init(&psOSCleanup->spinlock); -+ -+ init_completion(&psOSCleanup->start_cleanup); -+ -+ psOSCleanup->ppsDescriptors = OSAllocZMem(uiNumDMA * sizeof(struct dma_async_tx_descriptor*)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsDescriptors, eError, e0); -+ -+ psOSCleanup->ppsDescriptorsSparse = OSAllocZMem(uiNumDMA * sizeof(struct dma_async_tx_descriptor*)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsDescriptorsSparse, eError, e11); -+ -+ psOSCleanup->ppsSg = OSAllocZMem(uiNumDMA * sizeof(struct sg_table*)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsSg, eError, e1); -+ -+ psOSCleanup->ppsSgSparse = OSAllocZMem(uiNumDMA * sizeof(struct sg_table*)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsSgSparse, eError, e12); -+ -+ psOSCleanup->pbIsSparse = OSAllocZMem(uiNumDMA * sizeof(IMG_BOOL)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->pbIsSparse, eError, e13); -+ -+ psOSCleanup->uiNumValidPages = OSAllocZMem(uiNumDMA * sizeof(IMG_UINT)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->uiNumValidPages, eError, e14); -+ -+ psOSCleanup->pages = OSAllocZMem(uiNumDMA * sizeof(struct page **)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->pages, eError, e2); -+ -+ psOSCleanup->puiNumPages = OSAllocZMem(uiNumDMA * sizeof(IMG_UINT32)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->puiNumPages, eError, e3); -+ -+ *pvOutData = psOSCleanup; -+ -+ return PVRSRV_OK; -+ -+e3: -+ OSFreeMem(psOSCleanup->pages); -+e2: -+ OSFreeMem(psOSCleanup->uiNumValidPages); -+e14: -+ OSFreeMem(psOSCleanup->pbIsSparse); -+e13: -+ OSFreeMem(psOSCleanup->ppsSgSparse); -+e12: -+ OSFreeMem(psOSCleanup->ppsSg); -+e1: -+ OSFreeMem(psOSCleanup->ppsDescriptorsSparse); -+e11: -+ OSFreeMem(psOSCleanup->ppsDescriptors); -+e0: -+ OSFreeMem(psOSCleanup); -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function OSDmaTransfer -+@Description This API is used to ask OS to perform a DMA transfer operation -+@Return -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSDmaPrepareTransfer(PVRSRV_DEVICE_NODE *psDevNode, -+ void* pvChan, -+ IMG_DMA_ADDR* psDmaAddr, IMG_UINT64* puiAddress, -+ IMG_UINT64 uiSize, IMG_BOOL bMemToDev, -+ void* pvOSData, -+ void* pvServerCleanupParam, PFN_SERVER_CLEANUP pfnServerCleanup, IMG_BOOL bFirst) -+{ -+ -+ IMG_INT iRet; -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; -+ OS_CLEANUP_DATA* psOSCleanupData = pvOSData; -+ -+ enum dma_data_direction eDataDirection = bMemToDev ? DMA_TO_DEVICE : DMA_FROM_DEVICE; -+ struct dma_slave_config sConfig = {0}; -+ struct dma_async_tx_descriptor *psDesc; -+ -+ unsigned long offset = (unsigned long)puiAddress & ((1 << PAGE_SHIFT) - 1); -+ unsigned int num_pages = (uiSize + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; -+ int num_pinned_pages = 0; -+ -+ struct sg_table *psSg = OSAllocZMem(sizeof(struct sg_table)); -+ PVR_LOG_GOTO_IF_NOMEM(psSg, eError, e0); -+ -+ psOSCleanupData->pages[psOSCleanupData->uiCount] = OSAllocZMem(num_pages * sizeof(struct page *)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->pages[psOSCleanupData->uiCount], eError, e1); -+ -+ num_pinned_pages = pvr_pin_user_pages_for_dma( -+ puiAddress, -+ num_pages, -+ !bMemToDev, -+ psOSCleanupData->pages[psOSCleanupData->uiCount]); -+ -+ if (num_pinned_pages != num_pages) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "get_user_pages_fast failed: (%d - %u)", num_pinned_pages, num_pages)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e2; -+ } -+ -+#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA) -+ DMADumpPhysicalAddresses(psOSCleanupData->pages[psOSCleanupData->uiCount], -+ num_pages, psDmaAddr, offset); -+#endif -+ -+ psOSCleanupData->puiNumPages[psOSCleanupData->uiCount] = num_pinned_pages; -+ -+ if (sg_alloc_table_from_pages(psSg, psOSCleanupData->pages[psOSCleanupData->uiCount], num_pages, offset, uiSize, GFP_KERNEL) != 0) -+ { -+ eError = PVRSRV_ERROR_BAD_MAPPING; -+ PVR_DPF((PVR_DBG_ERROR, "sg_alloc_table_from_pages failed")); -+ goto e3; -+ } -+ -+ if (bMemToDev) -+ { -+ sConfig.direction = DMA_MEM_TO_DEV; -+ sConfig.src_addr = 0; -+ sConfig.dst_addr = psDmaAddr->uiAddr; -+ } -+ else -+ { -+ sConfig.direction = DMA_DEV_TO_MEM; -+ sConfig.src_addr = psDmaAddr->uiAddr; -+ sConfig.dst_addr = 0; -+ } -+ dmaengine_slave_config(pvChan, &sConfig); -+ -+ iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection); -+ if (!iRet) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Error mapping SG list", __func__)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e4; -+ } -+ -+ dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, eDataDirection); -+ -+ psDesc = dmaengine_prep_slave_sg(pvChan, psSg->sgl, (unsigned int)iRet, sConfig.direction, 0); -+ if (!psDesc) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: dmaengine_prep_slave_sg failed", __func__)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e5; -+ } -+ -+ psOSCleanupData->eDirection = eDataDirection; -+ psOSCleanupData->ppsSg[psOSCleanupData->uiCount] = psSg; -+ psOSCleanupData->pfnServerCleanup = pfnServerCleanup; -+ psOSCleanupData->pvServerCleanupData = pvServerCleanupParam; -+ -+ psDesc->callback_param = psOSCleanupData; -+ psDesc->callback = dma_callback; -+ -+ if (bFirst) -+ { -+ struct task_struct* t1; -+ t1 = kthread_run(cleanup_thread, psOSCleanupData, "dma-cleanup-thread"); -+ } -+ psOSCleanupData->ppsDescriptors[psOSCleanupData->uiCount] = psDesc; -+ -+ psOSCleanupData->uiCount++; -+ -+ return PVRSRV_OK; -+ -+e5: -+ dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection); -+e4: -+ sg_free_table(psSg); -+e3: -+ { -+ IMG_UINT32 i; -+ /* Unpin pages */ -+ for (i=0; ipuiNumPages[psOSCleanupData->uiCount]; i++) -+ { -+ pvr_unpin_user_page_for_dma(psOSCleanupData->pages[psOSCleanupData->uiCount][i]); -+ } -+ } -+e2: -+ OSFreeMem(psOSCleanupData->pages[psOSCleanupData->uiCount]); -+e1: -+ OSFreeMem(psSg); -+e0: -+ return eError; -+} -+ -+static IMG_UINT32 -+CalculateValidPages(IMG_BOOL *pbValid, -+ IMG_UINT32 ui32SizeInPages) -+{ -+ IMG_UINT32 ui32nValid; -+ IMG_UINT32 ui32Idx; -+ -+ for (ui32Idx = 0, ui32nValid = 0; ui32Idx < ui32SizeInPages; ui32Idx++) -+ { -+ ui32nValid += pbValid[ui32Idx] ? 1 : 0; -+ } -+ -+ return ui32nValid; -+} -+ -+PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, -+ void* pvChan, -+ IMG_DMA_ADDR* psDmaAddr, -+ IMG_BOOL *pbValid, -+ IMG_UINT64* puiAddress, -+ IMG_UINT64 uiSize, -+ IMG_UINT32 uiOffsetInFirstPMRPage, -+ IMG_UINT32 ui32SizeInPages, -+ IMG_BOOL bMemToDev, -+ void* pvOSData, -+ void* pvServerCleanupParam, -+ PFN_SERVER_CLEANUP pfnServerCleanup, -+ IMG_BOOL bFirst) -+{ -+ -+ IMG_INT iRet; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; -+ OS_CLEANUP_DATA* psOSCleanupData = pvOSData; -+ IMG_UINT32 ui32PageSize = OSGetPageSize(); -+ void *pvNextAddress = puiAddress; -+ IMG_UINT32 ui32Idx; -+ IMG_INT32 i32Rwd; -+ -+ enum dma_data_direction eDataDirection = bMemToDev ? DMA_TO_DEVICE : DMA_FROM_DEVICE; -+ struct dma_slave_config sConfig = {0}; -+ struct dma_async_tx_descriptor *psDesc; -+ -+ unsigned long offset = (unsigned long)puiAddress & ((1 << PAGE_SHIFT) - 1); -+ unsigned int num_pages = (uiSize + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; -+ unsigned int num_valid_pages = CalculateValidPages(pbValid, ui32SizeInPages); -+ unsigned int num_pinned_pages = 0; -+ unsigned int valid_idx; -+ size_t transfer_size; -+ struct page ** next_pages; -+ struct sg_table *psSg; -+ -+ psOSCleanupData->uiNumValidPages[psOSCleanupData->uiCount] = num_valid_pages; -+ psOSCleanupData->pbIsSparse[psOSCleanupData->uiCount] = IMG_TRUE; -+ -+ /* -+ * If an SG transfer from virtual memory to card memory goes over a page boundary in -+ * main memory, it'll span two different pages - therefore, total number of pages to -+ * keep track of should be twice as many as for a simple transfer. This twice-as-big -+ * allocation is also necessary because the same virtual memory page might be present -+ * in more than one SG DMA transfer, because of differences in first-page offset between -+ * the sparse device PMR and the virtual memory buffer. -+ */ -+ psOSCleanupData->pages[psOSCleanupData->uiCount] = OSAllocZMem(2*num_valid_pages * sizeof(struct page *)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->pages[psOSCleanupData->uiCount], eError, e0); -+ -+ psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount] = OSAllocZMem(num_valid_pages * sizeof(struct sg_table *)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount], eError, e1); -+ -+ psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount] = OSAllocZMem(num_valid_pages * sizeof(struct dma_async_tx_descriptor *)); -+ PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount], eError, e11); -+ -+ for (ui32Idx = 0, valid_idx = 0; ui32Idx < ui32SizeInPages; ui32Idx++) -+ { -+ if (valid_idx == num_valid_pages) -+ { -+ break; -+ } -+ if (!pbValid[ui32Idx]) -+ { -+ pvNextAddress += (ui32Idx == 0) ? ui32PageSize - uiOffsetInFirstPMRPage : ui32PageSize; -+ continue; -+ } -+ -+ /* Pick transfer size */ -+ if (ui32Idx == 0) -+ { -+ if (uiOffsetInFirstPMRPage + uiSize <= ui32PageSize) -+ { -+ PVR_ASSERT(num_valid_pages == 1); -+ transfer_size = uiSize; -+ } -+ else -+ { -+ transfer_size = ui32PageSize - uiOffsetInFirstPMRPage; -+ } -+ } -+ else -+ { -+ /* Last valid LMA page */ -+ if (valid_idx == num_valid_pages - 1) -+ { -+ transfer_size = ((uiOffsetInFirstPMRPage + uiSize - 1) % ui32PageSize) + 1; -+ } -+ else -+ { -+ transfer_size = ui32PageSize; -+ } -+ } -+ -+ if (((unsigned long long)pvNextAddress & (ui32PageSize - 1)) + transfer_size > ui32PageSize) -+ { -+ num_pages = 2; -+ } -+ else -+ { -+ num_pages = 1; -+ } -+ -+ next_pages = psOSCleanupData->pages[psOSCleanupData->uiCount] + (valid_idx * 2); -+ -+ num_pinned_pages = pvr_pin_user_pages_for_dma( -+ pvNextAddress, -+ num_pages, -+ !bMemToDev, -+ next_pages); -+ -+ if (num_pinned_pages != num_pages) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "get_user_pages_fast for sparse failed: (%d - %u)", num_pinned_pages, num_pages)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e2; -+ } -+ -+#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA) -+ DMADumpPhysicalAddresses(next_pages, num_pages, -+ &psDmaAddr[ui32Idx], -+ (unsigned long)pvNextAddress & (ui32PageSize - 1)); -+#endif -+ -+ psSg = OSAllocZMem(sizeof(struct sg_table)); -+ PVR_LOG_GOTO_IF_NOMEM(psSg, eError, e3); -+ -+ if (sg_alloc_table_from_pages(psSg, next_pages, num_pages, -+ (unsigned long)pvNextAddress & (ui32PageSize - 1), -+ transfer_size, -+ GFP_KERNEL) != 0) -+ { -+ eError = PVRSRV_ERROR_BAD_MAPPING; -+ PVR_DPF((PVR_DBG_ERROR, "sg_alloc_table_from_pages failed")); -+ goto e4; -+ } -+ -+ pvNextAddress += transfer_size; -+ -+ if (bMemToDev) -+ { -+ sConfig.direction = DMA_MEM_TO_DEV; -+ sConfig.src_addr = 0; -+ sConfig.dst_addr = psDmaAddr[ui32Idx].uiAddr; -+ } -+ else -+ { -+ sConfig.direction = DMA_DEV_TO_MEM; -+ sConfig.src_addr = psDmaAddr[ui32Idx].uiAddr; -+ sConfig.dst_addr = 0; -+ } -+ dmaengine_slave_config(pvChan, &sConfig); -+ -+ iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection); -+ if (!iRet) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Error mapping SG list", __func__)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e5; -+ } -+ dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, eDataDirection); -+ -+ psDesc = dmaengine_prep_slave_sg(pvChan, psSg->sgl, (unsigned int)iRet, sConfig.direction, 0); -+ if (!psDesc) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: dmaengine_prep_slave_sg failed", __func__)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto e6; -+ } -+ -+ psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount][valid_idx] = psSg; -+ psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount][valid_idx] = psDesc; -+ psOSCleanupData->puiNumPages[psOSCleanupData->uiCount] = ++valid_idx; -+ -+ if (valid_idx == num_valid_pages) -+ { -+ psDesc->callback_param = psOSCleanupData; -+ psDesc->callback = dma_callback; -+ -+ if (bFirst) -+ { -+ struct task_struct* t1; -+ -+ psOSCleanupData->eDirection = eDataDirection; -+ psOSCleanupData->pfnServerCleanup = pfnServerCleanup; -+ psOSCleanupData->pvServerCleanupData = pvServerCleanupParam; -+ -+ t1 = kthread_run(cleanup_thread, psOSCleanupData, "dma-cleanup-thread"); -+ } -+ -+ psOSCleanupData->uiCount++; -+ } -+ -+ } -+ -+ return PVRSRV_OK; -+ -+e6: -+ dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection); -+e5: -+ sg_free_table(psSg); -+e4: -+ OSFreeMem(psSg); -+e3: -+ /* Unpin last */ -+ pvr_unpin_user_page_for_dma(psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx]); -+ -+ if (psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx+1]) -+ { -+ pvr_unpin_user_page_for_dma(psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx+1]); -+ } -+e2: -+ /* rewind */ -+ for (i32Rwd=valid_idx-1; i32Rwd >= 0; i32Rwd--) -+ { -+ IMG_UINT32 i; -+ -+ psSg = psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount][i32Rwd]; -+ dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection); -+ sg_free_table(psSg); -+ -+ /* Unpin pages */ -+ for (i=0; i < psOSCleanupData->puiNumPages[psOSCleanupData->uiCount]*2; i++) -+ { -+ if (psOSCleanupData->pages[psOSCleanupData->uiCount][i]) -+ { -+ pvr_unpin_user_page_for_dma(psOSCleanupData->pages[psOSCleanupData->uiCount][i]); -+ } -+ } -+ } -+ OSFreeMem(psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount]); -+e11: -+ OSFreeMem(psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount]); -+e1: -+ OSFreeMem(psOSCleanupData->pages[psOSCleanupData->uiCount]); -+e0: -+ return eError; -+} -+ -+#endif /* SUPPORT_DMA_TRANSFER */ -+ -+#if defined(SUPPORT_SECURE_ALLOC_KM) -+#if defined(PVR_ANDROID_HAS_DMA_HEAP_FIND) -+IMG_INTERNAL PVRSRV_ERROR -+OSAllocateSecBuf(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszName, -+ PMR **ppsPMR) -+{ -+ struct dma_heap *heap; -+ struct dma_buf *buf; -+ struct device *dev; -+ struct dma_buf_attachment *buf_attachment; -+ -+ IMG_UINT32 ui32MappingTable = 0; -+ PVRSRV_ERROR eError; -+ IMG_CHAR *pszHeapName; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode->psDevConfig->pszSecureDMAHeapName, "pszSecureDMAHeapName"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM((OSStringLength(psDeviceNode->psDevConfig->pszSecureDMAHeapName) > 0), "pszSecureDMAHeapName length"); -+ -+ pszHeapName = psDeviceNode->psDevConfig->pszSecureDMAHeapName; -+ dev = (struct device*)psDeviceNode->psDevConfig->pvOSDevice; -+ -+ heap = dma_heap_find(pszHeapName); -+ PVR_LOG_GOTO_IF_NOMEM(heap, eError, ErrorExit); -+ -+ buf = dma_heap_buffer_alloc(heap, uiSize, 0, 0); -+ PVR_LOG_GOTO_IF_NOMEM(buf, eError, ErrorBufPut); -+ -+ if (buf->size < uiSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: buffer size (%ld) is less than requested (%lld).", -+ __func__, buf->size, uiSize)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ErrorBufFree; -+ } -+ -+ buf_attachment = dma_buf_attach(buf, dev); -+ PVR_LOG_GOTO_IF_NOMEM(buf_attachment, eError, ErrorBufFree); -+ -+ eError = PhysmemCreateNewDmaBufBackedPMR(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_EXTERNAL], -+ buf_attachment, -+ NULL, -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE -+ | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE, -+ buf->size, -+ 1, -+ 1, -+ &ui32MappingTable, -+ OSStringLength(pszName), -+ pszName, -+ ppsPMR); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateNewDmaBufBackedPMR", ErrorBufDetach); -+ -+ return PVRSRV_OK; -+ -+ErrorBufDetach: -+ dma_buf_detach(buf, buf_attachment); -+ErrorBufFree: -+ dma_heap_buffer_free(buf); -+ErrorBufPut: -+ dma_buf_put(buf); -+ErrorExit: -+ -+ return eError; -+} -+ -+IMG_INTERNAL void -+OSFreeSecBuf(PMR *psPMR) -+{ -+ struct dma_buf *buf = PhysmemGetDmaBuf(psPMR); -+ dma_buf_put(buf); -+ dma_heap_buffer_free(buf); -+ -+ PMRUnrefPMR(psPMR); -+} -+#else /* PVR_ANDROID_HAS_DMA_HEAP_FIND */ -+IMG_INTERNAL PVRSRV_ERROR -+OSAllocateSecBuf(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszName, -+ PMR **ppsPMR) -+{ -+ IMG_UINT32 ui32MappingTable = 0; -+ PVRSRV_ERROR eError; -+ -+ eError = PhysmemNewRamBackedPMR(NULL, -+ psDeviceNode, -+ uiSize, -+ 1, -+ 1, -+ &ui32MappingTable, -+ ExactLog2(OSGetPageSize()), -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(GPU_SECURE) -+ | PVRSRV_MEMALLOCFLAG_GPU_READABLE -+ | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE, -+ OSStringLength(pszName), -+ pszName, -+ OSGetCurrentClientProcessIDKM(), -+ ppsPMR, -+ PDUMP_NONE, -+ NULL); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemNewRamBackedPMR", ErrorExit); -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ eError = RIWritePMREntryWithOwnerKM(*ppsPMR, PVR_SYS_ALLOC_PID); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RIWritePMREntryWithOwnerKM", ErrorUnrefPMR); -+#endif -+ -+ return PVRSRV_OK; -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ErrorUnrefPMR: -+ PMRUnrefPMR(*ppsPMR); -+#endif -+ErrorExit: -+ return eError; -+} -+ -+IMG_INTERNAL void -+OSFreeSecBuf(PMR *psPMR) -+{ -+ PMRUnrefPMR(psPMR); -+} -+#endif -+#endif /* SUPPORT_SECURE_ALLOC_KM */ -diff --git a/drivers/gpu/drm/img-rogue/osfunc.h b/drivers/gpu/drm/img-rogue/osfunc.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/osfunc.h -@@ -0,0 +1,1882 @@ -+/*************************************************************************/ /*! -+@File -+@Title OS functions header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description OS specific API definitions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifdef DEBUG_RELEASE_BUILD -+#pragma optimize( "", off ) -+#define DEBUG 1 -+#endif -+ -+#ifndef OSFUNC_H -+/*! @cond Doxygen_Suppress */ -+#define OSFUNC_H -+/*! @endcond */ -+ -+#if defined(__linux__) && defined(__KERNEL__) -+#include "kernel_nospec.h" -+#if !defined(NO_HARDWARE) -+#include -+ -+#endif -+#endif -+ -+#if defined(__linux__) && defined(__KERNEL__) -+ #include -+ -+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) -+ #include -+ #else -+ #include -+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ -+#else -+ #include -+#endif /* __linux__ */ -+ -+#if defined(__QNXNTO__) -+#include -+#include -+#endif -+ -+#if defined(INTEGRITY_OS) -+#include -+#include -+#endif -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "device.h" -+#include "pvrsrv_device.h" -+#include "cache_ops.h" -+#include "osfunc_common.h" -+#if defined(SUPPORT_DMA_TRANSFER) -+#include "dma_km.h" -+#include "pmr.h" -+#endif -+ -+/****************************************************************************** -+ * Static defines -+ *****************************************************************************/ -+/*! -+ * Returned by OSGetCurrentProcessID() and OSGetCurrentThreadID() if the OS -+ * is currently operating in the interrupt context. -+ */ -+#define KERNEL_ID 0xffffffffL -+ -+#if defined(__linux__) && defined(__KERNEL__) -+#define OSConfineArrayIndexNoSpeculation(index, size) array_index_nospec((index), (size)) -+#elif defined(__QNXNTO__) -+#define OSConfineArrayIndexNoSpeculation(index, size) (index) -+#define PVRSRV_MISSING_NO_SPEC_IMPL -+#elif defined(INTEGRITY_OS) -+#define OSConfineArrayIndexNoSpeculation(index, size) (index) -+#define PVRSRV_MISSING_NO_SPEC_IMPL -+#else -+/*************************************************************************/ /*! -+@Function OSConfineArrayIndexNoSpeculation -+@Description This macro aims to avoid code exposure to Cache Timing -+ Side-Channel Mechanisms which rely on speculative code -+ execution (Variant 1). It does so by ensuring a value to be -+ used as an array index will be set to zero if outside of the -+ bounds of the array, meaning any speculative execution of code -+ which uses this suitably adjusted index value will not then -+ attempt to load data from memory outside of the array bounds. -+ Code calling this macro must still first verify that the -+ original unmodified index value is within the bounds of the -+ array, and should then only use the modified value returned -+ by this function when accessing the array itself. -+ NB. If no OS-specific implementation of this macro is -+ defined, the original index is returned unmodified and no -+ protection against the potential exploit is provided. -+@Input index The original array index value that would be used to -+ access the array. -+@Input size The number of elements in the array being accessed. -+@Return The value to use for the array index, modified so that it -+ remains within array bounds. -+*/ /**************************************************************************/ -+#define OSConfineArrayIndexNoSpeculation(index, size) (index) -+#if !defined(DOXYGEN) -+#define PVRSRV_MISSING_NO_SPEC_IMPL -+#endif -+#endif -+ -+/*************************************************************************/ /*! -+@Function OSClockns64 -+@Description This function returns the number of ticks since system boot -+ expressed in nanoseconds. Unlike OSClockns, OSClockns64 has -+ a near 64-bit range. -+@Return The 64-bit clock value, in nanoseconds. -+*/ /**************************************************************************/ -+IMG_UINT64 OSClockns64(void); -+ -+/*************************************************************************/ /*! -+@Function OSClockus64 -+@Description This function returns the number of ticks since system boot -+ expressed in microseconds. Unlike OSClockus, OSClockus64 has -+ a near 64-bit range. -+@Return The 64-bit clock value, in microseconds. -+*/ /**************************************************************************/ -+IMG_UINT64 OSClockus64(void); -+ -+/*************************************************************************/ /*! -+@Function OSClockus -+@Description This function returns the number of ticks since system boot -+ in microseconds. -+@Return The 32-bit clock value, in microseconds. -+*/ /**************************************************************************/ -+IMG_UINT32 OSClockus(void); -+ -+/*************************************************************************/ /*! -+@Function OSClockms -+@Description This function returns the number of ticks since system boot -+ in milliseconds. -+@Return The 32-bit clock value, in milliseconds. -+*/ /**************************************************************************/ -+IMG_UINT32 OSClockms(void); -+ -+/*************************************************************************/ /*! -+@Function OSClockMonotonicns64 -+@Description This function returns a clock value based on the system -+ monotonic clock. -+@Output pui64Time The 64-bit clock value, in nanoseconds. -+@Return Error Code. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time); -+ -+/*************************************************************************/ /*! -+@Function OSClockMonotonicus64 -+@Description This function returns a clock value based on the system -+ monotonic clock. -+@Output pui64Time The 64-bit clock value, in microseconds. -+@Return Error Code. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time); -+ -+/*************************************************************************/ /*! -+@Function OSClockMonotonicRawns64 -+@Description This function returns a clock value based on the system -+ monotonic raw clock. -+@Return 64bit ns timestamp -+*/ /**************************************************************************/ -+IMG_UINT64 OSClockMonotonicRawns64(void); -+ -+/*************************************************************************/ /*! -+@Function OSClockMonotonicRawus64 -+@Description This function returns a clock value based on the system -+ monotonic raw clock. -+@Return 64bit us timestamp -+*/ /**************************************************************************/ -+IMG_UINT64 OSClockMonotonicRawus64(void); -+ -+/*************************************************************************/ /*! -+@Function OSGetPageSize -+@Description This function returns the page size. -+ If the OS is not using memory mappings it should return a -+ default value of 4096. -+@Return The size of a page, in bytes. -+*/ /**************************************************************************/ -+size_t OSGetPageSize(void); -+ -+/*************************************************************************/ /*! -+@Function OSGetPageShift -+@Description This function returns the page size expressed as a power of -+ two. A number of pages, left-shifted by this value, gives the -+ equivalent size in bytes. -+ If the OS is not using memory mappings it should return a -+ default value of 12. -+@Return The page size expressed as a power of two. -+*/ /**************************************************************************/ -+size_t OSGetPageShift(void); -+ -+/*************************************************************************/ /*! -+@Function OSGetPageMask -+@Description This function returns a bitmask that may be applied to an -+ address to mask off the least-significant bits so as to -+ leave the start address of the page containing that address. -+@Return The page mask. -+*/ /**************************************************************************/ -+size_t OSGetPageMask(void); -+ -+/*************************************************************************/ /*! -+@Function OSGetOrder -+@Description This function returns the order of power of two for a given -+ size. Eg. for a uSize of 4096 bytes the function would -+ return 12 (4096 = 2^12). -+@Input uSize The size in bytes. -+@Return The order of power of two. -+*/ /**************************************************************************/ -+size_t OSGetOrder(size_t uSize); -+ -+/*************************************************************************/ /*! -+@Function OSGetRAMSize -+@Description This function returns the total amount of GPU-addressable -+ memory provided by the system. In other words, after loading -+ the driver this would be the largest allocation an -+ application would reasonably expect to be able to make. -+ Note that this is function is not expected to return the -+ current available memory but the amount which would be -+ available on startup. -+@Return Total GPU-addressable memory size, in bytes. -+*/ /**************************************************************************/ -+IMG_UINT64 OSGetRAMSize(void); -+ -+/*************************************************************************/ /*! -+@Description Pointer to a Mid-level Interrupt Service Routine (MISR). -+@Input pvData Pointer to MISR specific data. -+*/ /**************************************************************************/ -+typedef void (*PFN_MISR)(void *pvData); -+ -+/*************************************************************************/ /*! -+@Description Pointer to a thread entry point function. -+@Input pvData Pointer to thread specific data. -+*/ /**************************************************************************/ -+typedef void (*PFN_THREAD)(void *pvData); -+ -+/*************************************************************************/ /*! -+@Function OSChangeSparseMemCPUAddrMap -+@Description This function changes the CPU mapping of the underlying -+ sparse allocation. It is used by a PMR 'factory' -+ implementation if that factory supports sparse -+ allocations. -+@Input psPageArray array representing the pages in the -+ sparse allocation -+@Input sCpuVAddrBase the virtual base address of the sparse -+ allocation ('first' page) -+@Input sCpuPAHeapBase the physical address of the virtual -+ base address 'sCpuVAddrBase' -+@Input ui32AllocPageCount the number of pages referenced in -+ 'pai32AllocIndices' -+@Input pai32AllocIndices list of indices of pages within -+ 'psPageArray' that we now want to -+ allocate and map -+@Input ui32FreePageCount the number of pages referenced in -+ 'pai32FreeIndices' -+@Input pai32FreeIndices list of indices of pages within -+ 'psPageArray' we now want to -+ unmap and free -+@Input bIsLMA flag indicating if the sparse allocation -+ is from LMA or UMA memory -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray, -+ IMG_UINT64 sCpuVAddrBase, -+ IMG_CPU_PHYADDR sCpuPAHeapBase, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_BOOL bIsLMA); -+ -+/*************************************************************************/ /*! -+@Function OSInstallMISR -+@Description Installs a Mid-level Interrupt Service Routine (MISR) -+ which handles higher-level processing of interrupts from -+ the device (GPU). -+ An MISR runs outside of interrupt context, and so may be -+ descheduled. This means it can contain code that would -+ not be permitted in the LISR. -+ An MISR is invoked when OSScheduleMISR() is called. This -+ call should be made by installed LISR once it has completed -+ its interrupt processing. -+ Multiple MISRs may be installed by the driver to handle -+ different causes of interrupt. -+@Input pfnMISR pointer to the function to be installed -+ as the MISR -+@Input hData private data provided to the MISR -+@Input pszMisrName Name describing purpose of MISR worker thread -+ (Must be a string literal). -+@Output hMISRData handle to the installed MISR (to be used -+ for a subsequent uninstall) -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, -+ PFN_MISR pfnMISR, -+ void *hData, -+ const IMG_CHAR *pszMisrName); -+ -+/*************************************************************************/ /*! -+@Function OSUninstallMISR -+@Description Uninstalls a Mid-level Interrupt Service Routine (MISR). -+@Input hMISRData handle to the installed MISR -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData); -+ -+/*************************************************************************/ /*! -+@Function OSScheduleMISR -+@Description Schedules a Mid-level Interrupt Service Routine (MISR) to be -+ executed. An MISR should be executed outside of interrupt -+ context, for example in a work queue. -+@Input hMISRData handle to the installed MISR -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData); -+ -+/*************************************************************************/ /*! -+@Description Pointer to a function implementing debug dump of thread-specific -+ data. -+@Input pfnDumpDebugPrintf Used to specify the print function used -+ to dump any debug information. If this -+ argument is NULL then a default print -+ function will be used. -+@Input pvDumpDebugFile File identifier to be passed to the -+ print function if specified. -+*/ /**************************************************************************/ -+ -+typedef void (*PFN_THREAD_DEBUG_DUMP)(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+ -+/*************************************************************************/ /*! -+@Function OSThreadCreate -+@Description Creates a kernel thread and starts it running. The caller -+ is responsible for informing the thread that it must finish -+ and return from the pfnThread function. It is not possible -+ to kill or terminate it. The new thread runs with the default -+ priority provided by the Operating System. -+ Note: Kernel threads are freezable which means that they -+ can be frozen by the kernel on for example driver suspend. -+ Because of that only OSEventObjectWaitKernel() function should -+ be used to put kernel threads in waiting state. -+@Output phThread Returned handle to the thread. -+@Input pszThreadName Name to assign to the thread. -+@Input pfnThread Thread entry point function. -+@Input pfnDebugDumpCB Used to dump info of the created thread -+@Input bIsSupportingThread Set, if summary of this thread needs to -+ be dumped in debug_dump -+@Input hData Thread specific data pointer for pfnThread(). -+@Return Standard PVRSRV_ERROR error code. -+*/ /**************************************************************************/ -+ -+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread, -+ IMG_CHAR *pszThreadName, -+ PFN_THREAD pfnThread, -+ PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, -+ IMG_BOOL bIsSupportingThread, -+ void *hData); -+ -+/*! Available priority levels for the creation of a new Kernel Thread. */ -+typedef enum priority_levels -+{ -+ OS_THREAD_NOSET_PRIORITY = 0, /* With this option the priority level is the default for the given OS */ -+ OS_THREAD_HIGHEST_PRIORITY, -+ OS_THREAD_HIGH_PRIORITY, -+ OS_THREAD_NORMAL_PRIORITY, -+ OS_THREAD_LOW_PRIORITY, -+ OS_THREAD_LOWEST_PRIORITY, -+ OS_THREAD_LAST_PRIORITY /* This must be always the last entry */ -+} OS_THREAD_LEVEL; -+ -+/*************************************************************************/ /*! -+@Function OSThreadCreatePriority -+@Description As OSThreadCreate, this function creates a kernel thread and -+ starts it running. The difference is that with this function -+ is possible to specify the priority used to schedule the new -+ thread. -+ -+@Output phThread Returned handle to the thread. -+@Input pszThreadName Name to assign to the thread. -+@Input pfnThread Thread entry point function. -+@Input pfnDebugDumpCB Used to dump info of the created thread -+@Input bIsSupportingThread Set, if summary of this thread needs to -+ be dumped in debug_dump -+@Input hData Thread specific data pointer for pfnThread(). -+@Input eThreadPriority Priority level to assign to the new thread. -+@Return Standard PVRSRV_ERROR error code. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread, -+ IMG_CHAR *pszThreadName, -+ PFN_THREAD pfnThread, -+ PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, -+ IMG_BOOL bIsSupportingThread, -+ void *hData, -+ OS_THREAD_LEVEL eThreadPriority); -+ -+/*************************************************************************/ /*! -+@Function OSThreadDestroy -+@Description Waits for the thread to end and then destroys the thread -+ handle memory. This function will block and wait for the -+ thread to finish successfully, thereby providing a sync point -+ for the thread completing its work. No attempt is made to kill -+ or otherwise terminate the thread. -+@Input hThread The thread handle returned by OSThreadCreate(). -+@Return Standard PVRSRV_ERROR error code. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread); -+ -+/*************************************************************************/ /*! -+@Function OSIsMapPhysNonContigSupported -+@Description Determine if the kernel mapping of physically non-contiguous -+ pages is supported in the OS layer -+ -+ Note: For this function to return IMG_TRUE a full implementation -+ of the following functions is required: -+ OSMapPhysArrayToLin -+ OSUnMapPhysArrayToLin -+ -+@Return IMG_BOOL -+*/ /**************************************************************************/ -+IMG_BOOL OSIsMapPhysNonContigSupported(void); -+ -+/*************************************************************************/ /*! -+@Function OSUnMapPhysArrayToLin -+@Description UnMap a kernel virtual address that was produced by mapping -+ a number of Pages in OSMapPhysArrayToLin. -+ -+ Note: This function is only required if the Non contiguous -+ allocation feature is required, in this case -+ OSIsMapPhysNonContigSupported should return IMG_TRUE. -+ If not required this function should return -+ PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED -+ -+@Input pvLinAddr The linear mapping to be unmapped -+@Input pvPrivData Optional implementation specific data. -+ -+@Return None -+*/ /**************************************************************************/ -+void OSUnMapPhysArrayToLin(void *pvLinAddr, void *pvPrivData); -+ -+/*************************************************************************/ /*! -+@Function OSMapPhysArrayToLin -+@Description Given an array of OS page physical addresses and a count -+ of said Pages, this function will map those pages into a -+ virtually contiguous range, this allows for non physically -+ contiguous allocations to be mapped into the kernel. -+ Page size is assumed to be OS page size. -+ -+ Note: This function is only required if the Non contiguous -+ allocation feature is required, in this case -+ OSIsMapPhysNonContigSupported should return IMG_TRUE. -+ If not required this function should return -+ PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED -+ -+@Input pPagePA Array of Pages -+@Input uiPageCount Page count of pulPages -+@Output ppvLinAddr Pointer to a virtual kernel address of the -+ mapped Pages. -+@Output ppvPrivData Optional implementation specific data. -+@Return Standard PVRSRV_ERROR error code. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSMapPhysArrayToLin(IMG_CPU_PHYADDR pPagePA[], -+ IMG_UINT32 uiPageCount, -+ void **ppvLinAddr, -+ void **ppvPrivData); -+ -+/*************************************************************************/ /*! -+@Function OSMapPhysToLin -+@Description Maps physical memory into a linear address range. -+@Input BasePAddr physical CPU address -+@Input ui32Bytes number of bytes to be mapped -+@Input uiFlags flags denoting the caching mode to be employed -+ for the mapping (uncached/write-combined, -+ cached coherent or cached incoherent). -+ See pvrsrv_memallocflags.h for full flag bit -+ definitions. -+@Return Pointer to the new mapping if successful, NULL otherwise. -+*/ /**************************************************************************/ -+void *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, size_t ui32Bytes, PVRSRV_MEMALLOCFLAGS_T uiFlags); -+ -+/*************************************************************************/ /*! -+@Function OSUnMapPhysToLin -+@Description Unmaps physical memory previously mapped by OSMapPhysToLin(). -+@Input pvLinAddr the linear mapping to be unmapped -+@Input ui32Bytes number of bytes to be unmapped -+@Return IMG_TRUE if unmapping was successful, IMG_FALSE otherwise. -+*/ /**************************************************************************/ -+IMG_BOOL OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes); -+ -+/*************************************************************************/ /*! -+@Function OSCPUCacheFlushRangeKM -+@Description Clean and invalidate the CPU cache for the specified -+ address range. -+@Input psDevNode device on which the allocation was made -+@Input pvVirtStart virtual start address of the range to be -+ flushed -+@Input pvVirtEnd virtual end address of the range to be -+ flushed -+@Input sCPUPhysStart physical start address of the range to be -+ flushed -+@Input sCPUPhysEnd physical end address of the range to be -+ flushed -+@Return None -+*/ /**************************************************************************/ -+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd); -+ -+/*************************************************************************/ /*! -+@Function OSCPUCacheCleanRangeKM -+@Description Clean the CPU cache for the specified address range. -+ This writes out the contents of the cache and clears the -+ 'dirty' bit (which indicates the physical memory is -+ consistent with the cache contents). -+@Input psDevNode device on which the allocation was made -+@Input pvVirtStart virtual start address of the range to be -+ cleaned -+@Input pvVirtEnd virtual end address of the range to be -+ cleaned -+@Input sCPUPhysStart physical start address of the range to be -+ cleaned -+@Input sCPUPhysEnd physical end address of the range to be -+ cleaned -+@Return None -+*/ /**************************************************************************/ -+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd); -+ -+/*************************************************************************/ /*! -+@Function OSCPUCacheInvalidateRangeKM -+@Description Invalidate the CPU cache for the specified address range. -+ The cache must reload data from those addresses if they -+ are accessed. -+@Input psDevNode device on which the allocation was made -+@Input pvVirtStart virtual start address of the range to be -+ invalidated -+@Input pvVirtEnd virtual end address of the range to be -+ invalidated -+@Input sCPUPhysStart physical start address of the range to be -+ invalidated -+@Input sCPUPhysEnd physical end address of the range to be -+ invalidated -+@Return None -+*/ /**************************************************************************/ -+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd); -+ -+/*! CPU Cache operations address domain type */ -+typedef enum -+{ -+ OS_CACHE_OP_ADDR_TYPE_VIRTUAL, /*!< Operation requires CPU virtual address only */ -+ OS_CACHE_OP_ADDR_TYPE_PHYSICAL, /*!< Operation requires CPU physical address only */ -+ OS_CACHE_OP_ADDR_TYPE_BOTH /*!< Operation requires both CPU virtual & physical addresses */ -+} OS_CACHE_OP_ADDR_TYPE; -+ -+/*************************************************************************/ /*! -+@Function OSCPUCacheOpAddressType -+@Description Returns the address type (i.e. virtual/physical/both) the CPU -+ architecture performs cache maintenance operations under. -+ This is used to infer whether the virtual or physical address -+ supplied to the OSCPUCacheXXXRangeKM functions can be omitted -+ when called. -+@Return OS_CACHE_OP_ADDR_TYPE -+*/ /**************************************************************************/ -+OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_DEVICE_NODE *psDevNode); -+ -+/*! CPU Cache attributes available for retrieval, DCache unless specified */ -+typedef enum _OS_CPU_CACHE_ATTRIBUTE_ -+{ -+ OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE, /*!< The cache line size */ -+ OS_CPU_CACHE_ATTRIBUTE_COUNT /*!< The number of attributes (must be last) */ -+} OS_CPU_CACHE_ATTRIBUTE; -+ -+/*************************************************************************/ /*! -+@Function OSCPUCacheAttributeSize -+@Description Returns the size of a given cache attribute. -+ Typically this function is used to return the cache line -+ size, but may be extended to return the size of other -+ cache attributes. -+@Input eCacheAttribute the cache attribute whose size should -+ be returned. -+@Return The size of the specified cache attribute, in bytes. -+*/ /**************************************************************************/ -+IMG_UINT32 OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE eCacheAttribute); -+ -+/*************************************************************************/ /*! -+@Function OSGetCurrentProcessID -+@Description Returns ID of current process (thread group) -+@Return ID of current process -+*****************************************************************************/ -+IMG_PID OSGetCurrentProcessID(void); -+ -+/*************************************************************************/ /*! -+@Function OSGetCurrentVirtualProcessID -+@Description Returns ID of current process (thread group of current -+ PID namespace) -+@Return ID of current process in PID namespace -+*****************************************************************************/ -+IMG_PID OSGetCurrentVirtualProcessID(void); -+ -+/*************************************************************************/ /*! -+@Function OSGetCurrentProcessName -+@Description Gets the name of current process -+@Return Process name -+*****************************************************************************/ -+IMG_CHAR *OSGetCurrentProcessName(void); -+ -+/*************************************************************************/ /*! -+@Function OSGetCurrentProcessVASpaceSize -+@Description Returns the CPU virtual address space size of current process -+@Return Process VA space size -+*/ /**************************************************************************/ -+IMG_UINT64 OSGetCurrentProcessVASpaceSize(void); -+ -+/*************************************************************************/ /*! -+@Function OSGetCurrentThreadID -+@Description Returns ID for current thread -+@Return ID of current thread -+*****************************************************************************/ -+uintptr_t OSGetCurrentThreadID(void); -+ -+/*************************************************************************/ /*! -+@Function OSGetCurrentClientProcessIDKM -+@Description Returns ID of current client process (thread group) which -+ has made a bridge call into the server. -+ For some operating systems, this may simply be the current -+ process id. For others, it may be that a dedicated thread -+ is used to handle the processing of bridge calls and that -+ some additional processing is required to obtain the ID of -+ the client process making the bridge call. -+@Return ID of current client process -+*****************************************************************************/ -+IMG_PID OSGetCurrentClientProcessIDKM(void); -+ -+/*************************************************************************/ /*! -+@Function OSGetCurrentClientProcessNameKM -+@Description Gets the name of current client process -+@Return Client process name -+*****************************************************************************/ -+IMG_CHAR *OSGetCurrentClientProcessNameKM(void); -+ -+/*************************************************************************/ /*! -+@Function OSGetCurrentClientThreadIDKM -+@Description Returns ID for current client thread -+ For some operating systems, this may simply be the current -+ thread id. For others, it may be that a dedicated thread -+ is used to handle the processing of bridge calls and that -+ some additional processing is require to obtain the ID of -+ the client thread making the bridge call. -+@Return ID of current client thread -+*****************************************************************************/ -+uintptr_t OSGetCurrentClientThreadIDKM(void); -+ -+/*************************************************************************/ /*! -+@Function OSMemCmp -+@Description Compares two blocks of memory for equality. -+@Input pvBufA Pointer to the first block of memory -+@Input pvBufB Pointer to the second block of memory -+@Input uiLen The number of bytes to be compared -+@Return Value < 0 if pvBufA is less than pvBufB. -+ Value > 0 if pvBufB is less than pvBufA. -+ Value = 0 if pvBufA is equal to pvBufB. -+*****************************************************************************/ -+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen); -+ -+/*************************************************************************/ /*! -+@Function OSPhyContigPagesAlloc -+@Description Allocates a number of contiguous physical pages. -+ If allocations made by this function are CPU cached then -+ OSPhyContigPagesClean has to be implemented to write the -+ cached data to memory. -+@Input psPhysHeap the heap from which to allocate -+@Input uiSize the size of the required allocation (in bytes) -+@Output psMemHandle a returned handle to be used to refer to this -+ allocation -+@Output psDevPAddr the physical address of the allocation -+@Input uiPid the process ID that this allocation should -+ be associated with -+@Return PVRSRV_OK on success, a failure code otherwise. -+*****************************************************************************/ -+PVRSRV_ERROR OSPhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, size_t uiSize, -+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_PID uiPid); -+ -+/*************************************************************************/ /*! -+@Function OSPhyContigPagesFree -+@Description Frees a previous allocation of contiguous physical pages -+@Input psPhysHeap the heap from which to allocate -+@Input psMemHandle the handle of the allocation to be freed -+@Return None. -+*****************************************************************************/ -+void OSPhyContigPagesFree(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle); -+ -+/*************************************************************************/ /*! -+@Function OSPhyContigPagesMap -+@Description Maps the specified allocation of contiguous physical pages -+ to a kernel virtual address -+@Input psPhysHeap the heap from which to allocate -+@Input psMemHandle the handle of the allocation to be mapped -+@Input uiSize the size of the allocation (in bytes) -+@Input psDevPAddr the physical address of the allocation -+@Output pvPtr the virtual kernel address to which the -+ allocation is now mapped -+@Return PVRSRV_OK on success, a failure code otherwise. -+*****************************************************************************/ -+PVRSRV_ERROR OSPhyContigPagesMap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, -+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, -+ void **pvPtr); -+ -+/*************************************************************************/ /*! -+@Function OSPhyContigPagesUnmap -+@Description Unmaps the kernel mapping for the specified allocation of -+ contiguous physical pages -+@Input psPhysHeap the heap from which to allocate -+@Input psMemHandle the handle of the allocation to be unmapped -+@Input pvPtr the virtual kernel address to which the -+ allocation is currently mapped -+@Return None. -+*****************************************************************************/ -+void OSPhyContigPagesUnmap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, void *pvPtr); -+ -+/*************************************************************************/ /*! -+@Function OSPhyContigPagesClean -+@Description Write the content of the specified allocation from CPU cache to -+ memory from (start + uiOffset) to (start + uiOffset + uiLength) -+ It is expected to be implemented as a cache clean operation but -+ it is allowed to fall back to a cache clean + invalidate -+ (i.e. flush). -+ If allocations returned by OSPhyContigPagesAlloc are always -+ uncached this can be implemented as nop. -+@Input psPhysHeap the heap from which to allocate -+@Input psMemHandle the handle of the allocation to be flushed -+@Input uiOffset the offset in bytes from the start of the -+ allocation from where to start flushing -+@Input uiLength the amount to flush from the offset in bytes -+@Return PVRSRV_OK on success, a failure code otherwise. -+*****************************************************************************/ -+PVRSRV_ERROR OSPhyContigPagesClean(PHYS_HEAP *psPhysHeap, -+ PG_HANDLE *psMemHandle, -+ IMG_UINT32 uiOffset, -+ IMG_UINT32 uiLength); -+ -+ -+/*************************************************************************/ /*! -+@Function OSInitEnvData -+@Description Called to initialise any environment-specific data. This -+ could include initialising the bridge calling infrastructure -+ or device memory management infrastructure. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSInitEnvData(void); -+ -+/*************************************************************************/ /*! -+@Function OSDeInitEnvData -+@Description The counterpart to OSInitEnvData(). Called to free any -+ resources which may have been allocated by OSInitEnvData(). -+@Return None. -+*/ /**************************************************************************/ -+void OSDeInitEnvData(void); -+ -+/*************************************************************************/ /*! -+@Function OSStringLCat -+@Description OS function to support the BSD C strlcat() function. -+*/ /**************************************************************************/ -+size_t OSStringLCat(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDstSize); -+ -+/*************************************************************************/ /*! -+@Function OSSNPrintf -+@Description OS function to support the standard C snprintf() function. -+@Output pStr char array to print into -+@Input ui32Size maximum size of data to write (chars) -+@Input pszFormat format string -+*/ /**************************************************************************/ -+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) __printf(3, 4); -+ -+/*************************************************************************/ /*! -+@Function OSVSNPrintf -+@Description Printf to IMG string using variable args (see stdarg.h). -+ This is necessary because the '...' notation does not -+ support nested function calls. -+@Input ui32Size maximum size of data to write (chars) -+@Input pszFormat format string -+@Input vaArgs variable args structure (from stdarg.h) -+@Output pStr char array to print into -+@Return Number of character written in buffer if successful other wise -1 on error -+*/ /**************************************************************************/ -+IMG_INT32 OSVSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR* pszFormat, va_list vaArgs) __printf(3, 0); -+ -+/*************************************************************************/ /*! -+@Function OSStringLength -+@Description OS function to support the standard C strlen() function. -+*/ /**************************************************************************/ -+size_t OSStringLength(const IMG_CHAR *pStr); -+ -+/*************************************************************************/ /*! -+@Function OSStringNLength -+@Description Return the length of a string, excluding the terminating null -+ byte ('\0'), but return at most 'uiCount' bytes. Only the first -+ 'uiCount' bytes of 'pStr' are interrogated. -+@Input pStr pointer to the string -+@Input uiCount the maximum length to return -+@Return Length of the string if less than 'uiCount' bytes, otherwise -+ 'uiCount'. -+*/ /**************************************************************************/ -+size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount); -+ -+/*************************************************************************/ /*! -+@Function OSStringNCompare -+@Description OS function to support the standard C strncmp() function. -+*/ /**************************************************************************/ -+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, -+ size_t uiSize); -+ -+/*************************************************************************/ /*! -+@Function OSStringToUINT32 -+@Description Changes string to IMG_UINT32. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base, -+ IMG_UINT32 *ui32Result); -+ -+/*************************************************************************/ /*! -+@Function OSStringUINT32ToStr -+@Description Changes IMG_UINT32 to string -+@Input pszBuf Buffer to write output number string -+@Input uSize Size of buffer provided, i.e. size of pszBuf -+@Input ui32Num Number to convert to string -+@Return Returns 0 if buffer is not sufficient to hold the number string, -+ else returns length of number string -+*/ /**************************************************************************/ -+IMG_UINT32 OSStringUINT32ToStr(IMG_CHAR *pszBuf, size_t uSize, IMG_UINT32 ui32Num); -+ -+/*************************************************************************/ /*! -+@Function OSEventObjectCreate -+@Description Create an event object. -+@Input pszName name to assign to the new event object. -+@Output EventObject the created event object. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, -+ IMG_HANDLE *EventObject); -+ -+/*************************************************************************/ /*! -+@Function OSEventObjectDestroy -+@Description Destroy an event object. -+@Input hEventObject the event object to destroy. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject); -+ -+/*************************************************************************/ /*! -+@Function OSEventObjectSignal -+@Description Signal an event object. Any thread waiting on that event -+ object will be woken. -+@Input hEventObject the event object to signal. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject); -+ -+/*************************************************************************/ /*! -+@Function OSEventObjectWait -+@Description Wait for an event object to signal. The function is passed -+ an OS event object handle (which allows the OS to have the -+ calling thread wait on the associated event object). -+ The calling thread will be rescheduled when the associated -+ event object signals. -+ If the event object has not signalled after a default timeout -+ period (defined in EVENT_OBJECT_TIMEOUT_MS), the function -+ will return with the result code PVRSRV_ERROR_TIMEOUT. -+ -+ -+@Input hOSEventKM the OS event object handle associated with -+ the event object. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM); -+ -+/*************************************************************************/ /*! -+@Function OSEventObjectWaitKernel -+@Description Wait for an event object to signal. The function is passed -+ an OS event object handle (which allows the OS to have the -+ calling thread wait on the associated event object). -+ The calling thread will be rescheduled when the associated -+ event object signals. -+ If the event object has not signalled after a default timeout -+ period (defined in EVENT_OBJECT_TIMEOUT_MS), the function -+ will return with the result code PVRSRV_ERROR_TIMEOUT. -+ -+ Note: This function should be used only by kernel thread. -+ This is because all kernel threads are freezable and -+ this function allows the kernel to freeze the threads -+ when waiting. -+ -+ See OSEventObjectWait() for more details. -+ -+@Input hOSEventKM the OS event object handle associated with -+ the event object. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+#if defined(__linux__) && defined(__KERNEL__) -+PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus); -+#else -+#define OSEventObjectWaitKernel OSEventObjectWaitTimeout -+#endif -+ -+/*************************************************************************/ /*! -+@Function OSSuspendTaskInterruptible -+@Description Suspend the current task into interruptible state. -+@Return none. -+*/ /**************************************************************************/ -+#if defined(__linux__) && defined(__KERNEL__) -+void OSSuspendTaskInterruptible(void); -+#endif -+ -+/*************************************************************************/ /*! -+@Function OSEventObjectWaitTimeout -+@Description Wait for an event object to signal or timeout. The function -+ is passed an OS event object handle (which allows the OS to -+ have the calling thread wait on the associated event object). -+ The calling thread will be rescheduled when the associated -+ event object signals. -+ If the event object has not signalled after the specified -+ timeout period (passed in 'uiTimeoutus'), the function -+ will return with the result code PVRSRV_ERROR_TIMEOUT. -+@Input hOSEventKM the OS event object handle associated with -+ the event object. -+@Input uiTimeoutus the timeout period (in usecs) -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus); -+ -+/*************************************************************************/ /*! -+@Function OSEventObjectDumpDebugInfo -+@Description Emits debug counters/stats related to the event object passed -+@Input hOSEventKM the OS event object handle associated with -+ the event object. -+@Return None. -+*/ /**************************************************************************/ -+void OSEventObjectDumpDebugInfo(IMG_HANDLE hOSEventKM); -+ -+/*************************************************************************/ /*! -+@Function OSEventObjectOpen -+@Description Open an OS handle on the specified event object. -+ This OS handle may then be used to make a thread wait for -+ that event object to signal. -+@Input hEventObject Event object handle. -+@Output phOSEvent OS handle to the returned event object. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject, -+ IMG_HANDLE *phOSEvent); -+ -+/*************************************************************************/ /*! -+@Function OSEventObjectClose -+@Description Close an OS handle previously opened for an event object. -+@Input hOSEventKM OS event object handle to close. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM); -+ -+/*************************************************************************/ /*! -+@Function OSWaitus -+@Description Implements a busy wait of the specified number of microseconds. -+ This function does NOT release thread quanta. -+@Input ui32Timeus The duration of the wait period (in us) -+@Return None. -+*/ /**************************************************************************/ -+void OSWaitus(IMG_UINT32 ui32Timeus); -+ -+/*************************************************************************/ /*! -+@Function OSSleepms -+@Description Implements a sleep of the specified number of milliseconds. -+ This function may allow pre-emption, meaning the thread -+ may potentially not be rescheduled for a longer period. -+@Input ui32Timems The duration of the sleep (in ms) -+@Return None. -+*/ /**************************************************************************/ -+void OSSleepms(IMG_UINT32 ui32Timems); -+ -+/*************************************************************************/ /*! -+@Function OSReleaseThreadQuanta -+@Description Relinquishes the current thread's execution time-slice, -+ permitting the OS scheduler to schedule another thread. -+@Return None. -+*/ /**************************************************************************/ -+void OSReleaseThreadQuanta(void); -+ -+#if defined(__linux__) && defined(__KERNEL__) -+#define OSReadMemoryBarrier() rmb() -+#else -+/*************************************************************************/ /*! -+@Function OSReadMemoryBarrier -+@Description Insert a read memory barrier. -+ The read memory barrier guarantees that all load (read) -+ operations specified before the barrier will appear to happen -+ before all of the load operations specified after the barrier. -+*/ /**************************************************************************/ -+void OSReadMemoryBarrier(void); -+#endif -+/*************************************************************************/ /*! -+@Function OSMemoryBarrier -+@Description Insert a read/write memory barrier. -+ The read and write memory barrier guarantees that all load -+ (read) and all store (write) operations specified before the -+ barrier will appear to happen before all of the load/store -+ operations specified after the barrier. -+@Input hReadback Optional pointer to memory to read back, can be -+ useful for flushing queues in bus interconnects to RAM before -+ device (GPU) access the shared memory. -+@Return None. -+*/ /**************************************************************************/ -+void OSMemoryBarrier(volatile void *hReadback); -+/*************************************************************************/ /*! -+@Function OSWriteMemoryBarrier -+@Description Insert a write memory barrier. -+ The write memory barrier guarantees that all store operations -+ (writes) specified before the barrier will appear to happen -+ before all of the store operations specified after the barrier. -+@Input hReadback Optional pointer to memory to read back, can be -+ useful for flushing queues in bus interconnects to RAM before -+ device (GPU) access the shared memory. -+@Return None. -+*/ /**************************************************************************/ -+void OSWriteMemoryBarrier(volatile void *hReadback); -+ -+/*************************************************************************/ /*! -+*/ /**************************************************************************/ -+ -+/* The access method is dependent on the location of the physical memory that -+ * makes up the PhyHeaps defined for the system and the CPU architecture. These -+ * macros may change in future to accommodate different access requirements. -+ */ -+/*! Performs a 32 bit word read from the device memory. */ -+#define OSReadDeviceMem32(addr) (*((volatile IMG_UINT32 __force *)((void*)addr))) -+/*! Performs a 32 bit word write to the device memory. */ -+#define OSWriteDeviceMem32(addr, val) (*((volatile IMG_UINT32 __force *)((void*)addr)) = (IMG_UINT32)(val)) -+/*! Performs a 32 bit word write to the device memory and issues a write memory barrier */ -+#define OSWriteDeviceMem32WithWMB(addr, val) \ -+ do { \ -+ *((volatile IMG_UINT32 __force *)((void*)addr)) = (IMG_UINT32)(val); \ -+ OSWriteMemoryBarrier(addr); \ -+ } while (0) -+ -+#if defined(NO_HARDWARE) -+ /* OSReadHWReg operations skipped in no hardware builds */ -+ #define OSReadUncheckedHWReg8(addr, off) ((void)(addr), 0x4eU) -+ #define OSReadUncheckedHWReg16(addr, off) ((void)(addr), 0x3a4eU) -+ #define OSReadUncheckedHWReg32(addr, off) ((void)(addr), 0x30f73a4eU) -+#if defined(__QNXNTO__) && __SIZEOF_LONG__ == 8 -+ /* This is needed for 64-bit QNX builds where the size of a long is 64 bits */ -+ #define OSReadUncheckedHWReg64(addr, off) ((void)(addr), 0x5b376c9d30f73a4eUL) -+#else -+ #define OSReadUncheckedHWReg64(addr, off) ((void)(addr), 0x5b376c9d30f73a4eULL) -+#endif -+ -+ #define OSWriteUncheckedHWReg8(addr, off, val) -+ #define OSWriteUncheckedHWReg16(addr, off, val) -+ #define OSWriteUncheckedHWReg32(addr, off, val) -+ #define OSWriteUncheckedHWReg64(addr, off, val) ((void)(val)) -+ -+ #define OSReadHWReg8(addr, off) OSReadUncheckedHWReg8(addr, off) -+ #define OSReadHWReg16(addr, off) OSReadUncheckedHWReg16(addr, off) -+ #define OSReadHWReg32(addr, off) OSReadUncheckedHWReg32(addr, off) -+ #define OSReadHWReg64(addr, off) OSReadUncheckedHWReg64(addr, off) -+ -+ #define OSWriteHWReg8(addr, off, val) OSWriteUncheckedHWReg8(addr, off, val) -+ #define OSWriteHWReg16(addr, off, val) OSWriteUncheckedHWReg16(addr, off, val) -+ #define OSWriteHWReg32(addr, off, val) OSWriteUncheckedHWReg32(addr, off, val) -+ #define OSWriteHWReg64(addr, off, val) OSWriteUncheckedHWReg64(addr, off, val) -+ -+#else -+ -+#if defined(__linux__) && defined(__KERNEL__) -+ #define OSReadUncheckedHWReg8(addr, off) ((IMG_UINT8)readb((IMG_BYTE __iomem *)(addr) + (off))) -+ #define OSReadUncheckedHWReg16(addr, off) ((IMG_UINT16)readw((IMG_BYTE __iomem *)(addr) + (off))) -+ #define OSReadUncheckedHWReg32(addr, off) ((IMG_UINT32)readl((IMG_BYTE __iomem *)(addr) + (off))) -+ -+ /* Little endian support only */ -+ #define OSReadUncheckedHWReg64(addr, off) \ -+ ({ \ -+ __typeof__(addr) _addr = addr; \ -+ __typeof__(off) _off = off; \ -+ (IMG_UINT64) \ -+ ( \ -+ ( (IMG_UINT64)(readl((IMG_BYTE __iomem *)(_addr) + (_off) + 4)) << 32) \ -+ | readl((IMG_BYTE __iomem *)(_addr) + (_off)) \ -+ ); \ -+ }) -+ -+ #define OSWriteUncheckedHWReg8(addr, off, val) writeb((IMG_UINT8)(val), (IMG_BYTE __iomem *)(addr) + (off)) -+ #define OSWriteUncheckedHWReg16(addr, off, val) writew((IMG_UINT16)(val), (IMG_BYTE __iomem *)(addr) + (off)) -+ #define OSWriteUncheckedHWReg32(addr, off, val) writel((IMG_UINT32)(val), (IMG_BYTE __iomem *)(addr) + (off)) -+ /* Little endian support only */ -+ #define OSWriteUncheckedHWReg64(addr, off, val) do \ -+ { \ -+ __typeof__(addr) _addr = addr; \ -+ __typeof__(off) _off = off; \ -+ __typeof__(val) _val = val; \ -+ writel((IMG_UINT32)((_val) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off)); \ -+ writel((IMG_UINT32)(((IMG_UINT64)(_val) >> 32) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off) + 4); \ -+ } while (0) -+ -+#else /* defined(__linux__) && defined(__KERNEL__) */ -+/*************************************************************************/ /*! -+@Function OSReadUncheckedHWReg8 -+@Description Read from an 8-bit memory-mapped device register. -+ The implementation should not permit the compiler to -+ reorder the I/O sequence. -+ The implementation should ensure that for a NO_HARDWARE -+ build the code does not attempt to read from a location -+ but instead returns a constant value. -+@Input pvLinRegBaseAddr The virtual base address of the register -+ block. -+@Input ui32Offset The byte offset from the base address of -+ the register to be read. -+@Return The byte read. -+*/ /**************************************************************************/ -+ IMG_UINT8 OSReadUncheckedHWReg8(volatile void *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset); -+ -+/*************************************************************************/ /*! -+@Function OSReadUncheckedHWReg16 -+@Description Read from a 16-bit memory-mapped device register. -+ The implementation should not permit the compiler to -+ reorder the I/O sequence. -+ The implementation should ensure that for a NO_HARDWARE -+ build the code does not attempt to read from a location -+ but instead returns a constant value. -+@Input pvLinRegBaseAddr The virtual base address of the register -+ block. -+@Input ui32Offset The byte offset from the base address of -+ the register to be read. -+@Return The word read. -+*/ /**************************************************************************/ -+ IMG_UINT16 OSReadUncheckedHWReg16(volatile void *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset); -+ -+/*************************************************************************/ /*! -+@Function OSReadUncheckedHWReg32 -+@Description Read from a 32-bit memory-mapped device register. -+ The implementation should not permit the compiler to -+ reorder the I/O sequence. -+ The implementation should ensure that for a NO_HARDWARE -+ build the code does not attempt to read from a location -+ but instead returns a constant value. -+@Input pvLinRegBaseAddr The virtual base address of the register -+ block. -+@Input ui32Offset The byte offset from the base address of -+ the register to be read. -+@Return The long word read. -+*/ /**************************************************************************/ -+ IMG_UINT32 OSReadUncheckedHWReg32(volatile void *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset); -+ -+/*************************************************************************/ /*! -+@Function OSReadUncheckedHWReg64 -+@Description Read from a 64-bit memory-mapped device register. -+ The implementation should not permit the compiler to -+ reorder the I/O sequence. -+ The implementation should ensure that for a NO_HARDWARE -+ build the code does not attempt to read from a location -+ but instead returns a constant value. -+@Input pvLinRegBaseAddr The virtual base address of the register -+ block. -+@Input ui32Offset The byte offset from the base address of -+ the register to be read. -+@Return The long long word read. -+*/ /**************************************************************************/ -+ IMG_UINT64 OSReadUncheckedHWReg64(volatile void *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset); -+ -+/*************************************************************************/ /*! -+@Function OSWriteUncheckedHWReg8 -+@Description Write to an 8-bit memory-mapped device register. -+ The implementation should not permit the compiler to -+ reorder the I/O sequence. -+ The implementation should ensure that for a NO_HARDWARE -+ build the code does not attempt to write to a location. -+@Input pvLinRegBaseAddr The virtual base address of the register -+ block. -+@Input ui32Offset The byte offset from the base address of -+ the register to be written to. -+@Input ui8Value The byte to be written to the register. -+@Return None. -+*/ /**************************************************************************/ -+ void OSWriteUncheckedHWReg8(volatile void *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset, IMG_UINT8 ui8Value); -+ -+/*************************************************************************/ /*! -+@Function OSWriteUncheckedHWReg16 -+@Description Write to a 16-bit memory-mapped device register. -+ The implementation should not permit the compiler to -+ reorder the I/O sequence. -+ The implementation should ensure that for a NO_HARDWARE -+ build the code does not attempt to write to a location. -+@Input pvLinRegBaseAddr The virtual base address of the register -+ block. -+@Input ui32Offset The byte offset from the base address of -+ the register to be written to. -+@Input ui16Value The word to be written to the register. -+@Return None. -+*/ /**************************************************************************/ -+ void OSWriteUncheckedHWReg16(volatile void *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT16 ui16Value); -+ -+/*************************************************************************/ /*! -+@Function OSWriteUncheckedHWReg32 -+@Description Write to a 32-bit memory-mapped device register. -+ The implementation should not permit the compiler to -+ reorder the I/O sequence. -+ The implementation should ensure that for a NO_HARDWARE -+ build the code does not attempt to write to a location. -+@Input pvLinRegBaseAddr The virtual base address of the register -+ block. -+@Input ui32Offset The byte offset from the base address of -+ the register to be written to. -+@Input ui32Value The long word to be written to the register. -+@Return None. -+*/ /**************************************************************************/ -+ void OSWriteUncheckedHWReg32(volatile void *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value); -+ -+/*************************************************************************/ /*! -+@Function OSWriteUncheckedHWReg64 -+@Description Write to a 64-bit memory-mapped device register. -+ The implementation should not permit the compiler to -+ reorder the I/O sequence. -+ The implementation should ensure that for a NO_HARDWARE -+ build the code does not attempt to write to a location. -+@Input pvLinRegBaseAddr The virtual base address of the register -+ block. -+@Input ui32Offset The byte offset from the base address of -+ the register to be written to. -+@Input ui64Value The long long word to be written to the -+ register. -+@Return None. -+*/ /**************************************************************************/ -+ void OSWriteUncheckedHWReg64(volatile void *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT64 ui64Value); -+ -+#endif /* defined(__linux__) && defined(__KERNEL__) */ -+ -+#if !defined(DOXYGEN) -+ -+#if defined(RGX_HOST_SECURE_REGBANK_OFFSET) && defined(DEBUG) -+static INLINE bool _NonSecureRegister(IMG_UINT32 ui32Offset) -+{ -+ const IMG_UINT32 ui32PerCoreRegBankSize = RGX_HOST_SECURE_REGBANK_OFFSET + RGX_HOST_SECURE_REGBANK_SIZE; -+ const IMG_UINT32 ui32RegOffsetInCoreBank = ui32Offset % ui32PerCoreRegBankSize; -+ -+ if (ui32RegOffsetInCoreBank < RGX_HOST_SECURE_REGBANK_OFFSET) -+ { -+ return true; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Secure register (0x%X) accessed incorrectly. " -+ "Call OSUncheckedHWReg instead with " -+ "psDevInfo->pvSecureRegsBaseKM as a register base.", -+ ui32RegOffsetInCoreBank)); -+ return false; -+ } -+ -+} -+#else -+#define _NonSecureRegister(ui32Offset) (true) -+#endif -+ -+ /* systems using real hardware must check that regular register -+ * operations don't attempt to access secure registers */ -+ static INLINE IMG_UINT8 OSReadHWReg8(volatile void __iomem *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset) -+ { -+ PVR_ASSERT(_NonSecureRegister(ui32Offset)); -+ return OSReadUncheckedHWReg8(pvLinRegBaseAddr, ui32Offset); -+ } -+ -+ static INLINE IMG_UINT16 OSReadHWReg16(volatile void __iomem *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset) -+ { -+ PVR_ASSERT(_NonSecureRegister(ui32Offset)); -+ return OSReadUncheckedHWReg16(pvLinRegBaseAddr, ui32Offset); -+ } -+ -+ static INLINE IMG_UINT32 OSReadHWReg32(volatile void __iomem *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset) -+ { -+ PVR_ASSERT(_NonSecureRegister(ui32Offset)); -+ return OSReadUncheckedHWReg32(pvLinRegBaseAddr, ui32Offset); -+ } -+ -+ static INLINE IMG_UINT64 OSReadHWReg64(volatile void __iomem *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset) -+ { -+ PVR_ASSERT(_NonSecureRegister(ui32Offset)); -+ return OSReadUncheckedHWReg64(pvLinRegBaseAddr, ui32Offset); -+ } -+ -+ static INLINE void OSWriteHWReg8(volatile void __iomem *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT8 ui8Value) -+ { -+ PVR_ASSERT(_NonSecureRegister(ui32Offset)); -+ OSWriteUncheckedHWReg8(pvLinRegBaseAddr, ui32Offset, ui8Value); -+ } -+ -+ static INLINE void OSWriteHWReg16(volatile void __iomem *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT16 ui16Value) -+ { -+ PVR_ASSERT(_NonSecureRegister(ui32Offset)); -+ OSWriteUncheckedHWReg16(pvLinRegBaseAddr, ui32Offset, ui16Value); -+ } -+ -+ static INLINE void OSWriteHWReg32(volatile void __iomem *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value) -+ { -+ PVR_ASSERT(_NonSecureRegister(ui32Offset)); -+ OSWriteUncheckedHWReg32(pvLinRegBaseAddr, ui32Offset, ui32Value); -+ } -+ -+ static INLINE void OSWriteHWReg64(volatile void __iomem *pvLinRegBaseAddr, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT64 ui64Value) -+ { -+ PVR_ASSERT(_NonSecureRegister(ui32Offset)); -+ OSWriteUncheckedHWReg64(pvLinRegBaseAddr, ui32Offset, ui64Value); -+ } -+ -+#endif /* !defined(DOXYGEN) */ -+#endif /* defined(NO_HARDWARE) */ -+ -+/*************************************************************************/ /*! -+@Description Pointer to a timer callback function. -+@Input pvData Pointer to timer specific data. -+*/ /**************************************************************************/ -+typedef void (*PFN_TIMER_FUNC)(void* pvData); -+ -+/*************************************************************************/ /*! -+@Function OSAddTimer -+@Description OS specific function to install a timer callback. The -+ timer will then need to be enabled, as it is disabled by -+ default. -+ When enabled, the callback will be invoked once the specified -+ timeout has elapsed. -+@Input pfnTimerFunc Timer callback -+@Input *pvData Callback data -+@Input ui32MsTimeout Callback period -+@Return Valid handle on success, NULL if a failure -+*/ /**************************************************************************/ -+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout); -+ -+/*************************************************************************/ /*! -+@Function OSRemoveTimer -+@Description Removes the specified timer. The handle becomes invalid and -+ should no longer be used. -+@Input hTimer handle of the timer to be removed -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSRemoveTimer(IMG_HANDLE hTimer); -+ -+/*************************************************************************/ /*! -+@Function OSEnableTimer -+@Description Enable the specified timer. after enabling, the timer will -+ invoke the associated callback at an interval determined by -+ the configured timeout period until disabled. -+@Input hTimer handle of the timer to be enabled -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hTimer); -+ -+/*************************************************************************/ /*! -+@Function OSDisableTimer -+@Description Disable the specified timer -+@Input hTimer handle of the timer to be disabled -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer); -+ -+ -+/*************************************************************************/ /*! -+ @Function OSPanic -+ @Description Take action in response to an unrecoverable driver error -+ @Return None -+*/ /**************************************************************************/ -+void OSPanic(void); -+ -+/*************************************************************************/ /*! -+@Function OSCopyToUser -+@Description Copy data to user-addressable memory from kernel-addressable -+ memory. -+ Note that pvDest may be an invalid address or NULL and the -+ function should return an error in this case. -+ For operating systems that do not have a user/kernel space -+ distinction, this function should be implemented as a stub -+ which simply returns PVRSRV_ERROR_NOT_SUPPORTED. -+@Input pvProcess handle of the connection -+@Input pvDest pointer to the destination User memory -+@Input pvSrc pointer to the source Kernel memory -+@Input ui32Bytes size of the data to be copied -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSCopyToUser(void *pvProcess, void __user *pvDest, const void *pvSrc, size_t ui32Bytes); -+ -+/*************************************************************************/ /*! -+@Function OSCopyFromUser -+@Description Copy data from user-addressable memory to kernel-addressable -+ memory. -+ Note that pvSrc may be an invalid address or NULL and the -+ function should return an error in this case. -+ For operating systems that do not have a user/kernel space -+ distinction, this function should be implemented as a stub -+ which simply returns PVRSRV_ERROR_NOT_SUPPORTED. -+@Input pvProcess handle of the connection -+@Input pvDest pointer to the destination Kernel memory -+@Input pvSrc pointer to the source User memory -+@Input ui32Bytes size of the data to be copied -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSCopyFromUser(void *pvProcess, void *pvDest, const void __user *pvSrc, size_t ui32Bytes); -+ -+#if defined(__linux__) || defined(INTEGRITY_OS) -+#define OSBridgeCopyFromUser OSCopyFromUser -+#define OSBridgeCopyToUser OSCopyToUser -+#else -+/*************************************************************************/ /*! -+@Function OSBridgeCopyFromUser -+@Description Copy data from user-addressable memory into kernel-addressable -+ memory as part of a bridge call operation. -+ For operating systems that do not have a user/kernel space -+ distinction, this function will require whatever implementation -+ is needed to pass data for making the bridge function call. -+ For operating systems which do have a user/kernel space -+ distinction (such as Linux) this function may be defined so -+ as to equate to a call to OSCopyFromUser(). -+@Input pvProcess handle of the connection -+@Input pvDest pointer to the destination Kernel memory -+@Input pvSrc pointer to the source User memory -+@Input ui32Bytes size of the data to be copied -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSBridgeCopyFromUser (void *pvProcess, -+ void *pvDest, -+ const void *pvSrc, -+ size_t ui32Bytes); -+ -+/*************************************************************************/ /*! -+@Function OSBridgeCopyToUser -+@Description Copy data to user-addressable memory from kernel-addressable -+ memory as part of a bridge call operation. -+ For operating systems that do not have a user/kernel space -+ distinction, this function will require whatever implementation -+ is needed to pass data for making the bridge function call. -+ For operating systems which do have a user/kernel space -+ distinction (such as Linux) this function may be defined so -+ as to equate to a call to OSCopyToUser(). -+@Input pvProcess handle of the connection -+@Input pvDest pointer to the destination User memory -+@Input pvSrc pointer to the source Kernel memory -+@Input ui32Bytes size of the data to be copied -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSBridgeCopyToUser (void *pvProcess, -+ void *pvDest, -+ const void *pvSrc, -+ size_t ui32Bytes); -+#endif -+ -+/* To be increased if required in future */ -+#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x2000 /*!< Size of the memory block used to hold data passed in to a bridge call */ -+#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000 /*!< Size of the memory block used to hold data returned from a bridge call */ -+ -+/*************************************************************************/ /*! -+@Function OSPlatformBridgeInit -+@Description Called during device creation to allow the OS port to register -+ other bridge modules and related resources that it requires. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSPlatformBridgeInit(void); -+ -+/*************************************************************************/ /*! -+@Function OSPlatformBridgeDeInit -+@Description Called during device destruction to allow the OS port to -+ deregister its OS specific bridges and clean up other -+ related resources. -+*/ /**************************************************************************/ -+void OSPlatformBridgeDeInit(void); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVToNativeError -+@Description Returns the OS-specific equivalent error number/code for -+ the specified PVRSRV_ERROR value. -+ If there is no equivalent, or the PVRSRV_ERROR value is -+ PVRSRV_OK (no error), 0 is returned. -+@Return The OS equivalent error code. -+*/ /**************************************************************************/ -+int PVRSRVToNativeError(PVRSRV_ERROR e); -+/** See PVRSRVToNativeError(). */ -+#define OSPVRSRVToNativeError(e) ( (PVRSRV_OK == e)? 0: PVRSRVToNativeError(e) ) -+ -+ -+#if defined(__linux__) && defined(__KERNEL__) -+ -+/* Provide LockDep friendly definitions for Services RW locks */ -+#include -+#include -+#include "allocmem.h" -+ -+#define OSWRLockCreate(ppsLock) ({ \ -+ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ -+ *(ppsLock) = OSAllocMem(sizeof(struct rw_semaphore)); \ -+ if (*(ppsLock)) { init_rwsem(*(ppsLock)); e = PVRSRV_OK; }; \ -+ e;}) -+#define OSWRLockDestroy(psLock) ({OSFreeMem(psLock); PVRSRV_OK;}) -+ -+#define OSWRLockAcquireRead(psLock) ({down_read(psLock); PVRSRV_OK;}) -+#define OSWRLockAcquireReadNested(psLock, subclass) ({down_read_nested((psLock), (subclass)); PVRSRV_OK;}) -+#define OSWRLockReleaseRead(psLock) ({up_read(psLock); PVRSRV_OK;}) -+#define OSWRLockAcquireWrite(psLock) ({down_write(psLock); PVRSRV_OK;}) -+#define OSWRLockReleaseWrite(psLock) ({up_write(psLock); PVRSRV_OK;}) -+ -+#elif defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) -+/* User-mode unit tests use these definitions on Linux */ -+ -+PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock); -+void OSWRLockDestroy(POSWR_LOCK psLock); -+void OSWRLockAcquireRead(POSWR_LOCK psLock); -+#define OSWRLockAcquireReadNested(psLock, subclass) OSWRLockAcquireRead((psLock)) -+void OSWRLockReleaseRead(POSWR_LOCK psLock); -+void OSWRLockAcquireWrite(POSWR_LOCK psLock); -+void OSWRLockReleaseWrite(POSWR_LOCK psLock); -+ -+#else -+ -+#if !defined(DOXYGEN) -+#define OSFUNC_NOT_IMPLEMENTED 0 -+#define OSFUNC_NOT_IMPLEMENTED_ASSERT() PVR_ASSERT(OSFUNC_NOT_IMPLEMENTED) -+#endif -+ -+/*************************************************************************/ /*! -+@Function OSWRLockCreate -+@Description Create a writer/reader lock. -+ This type of lock allows multiple concurrent readers but -+ only a single writer, allowing for optimized performance. -+@Output ppsLock A handle to the created WR lock. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+static INLINE PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock) -+{ -+ PVR_UNREFERENCED_PARAMETER(ppsLock); -+ -+ OSFUNC_NOT_IMPLEMENTED_ASSERT(); -+ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+} -+ -+/*************************************************************************/ /*! -+@Function OSWRLockDestroy -+@Description Destroys a writer/reader lock. -+@Input psLock The handle of the WR lock to be destroyed. -+@Return None. -+*/ /**************************************************************************/ -+static INLINE void OSWRLockDestroy(POSWR_LOCK psLock) -+{ -+ PVR_UNREFERENCED_PARAMETER(psLock); -+ OSFUNC_NOT_IMPLEMENTED_ASSERT(); -+} -+ -+/*************************************************************************/ /*! -+@Function OSWRLockAcquireRead -+@Description Acquire a writer/reader read lock. -+ If the write lock is already acquired, the caller will -+ block until it is released. -+@Input psLock The handle of the WR lock to be acquired for -+ reading. -+@Return None. -+*/ /**************************************************************************/ -+static INLINE void OSWRLockAcquireRead(POSWR_LOCK psLock) -+{ -+ PVR_UNREFERENCED_PARAMETER(psLock); -+ OSFUNC_NOT_IMPLEMENTED_ASSERT(); -+} -+ -+/*************************************************************************/ /*! -+@Function OSWRLockAcquireReadNested -+@Description Acquire a nested writer/reader read lock. -+ If the write lock is already acquired, the caller will -+ block until it is released. -+ For operating systems other than Linux, this equates to an -+ OSWRLockAcquireRead() call. On Linux, this function wraps a call -+ to down_read_nested(). This recognises the scenario where -+ there may be multiple subclasses within a particular class -+ of lock. In such cases, the order in which the locks belonging -+ these various subclasses are acquired is important and must be -+ validated. -+@Input psLock The handle of the WR lock to be acquired for -+ reading. -+@Input iSubclass The subclass of the lock. -+@Return None. -+*/ /**************************************************************************/ -+static INLINE void OSWRLockAcquireReadNested(POSWR_LOCK psLock, IMG_INT iSubclass) -+{ -+ PVR_UNREFERENCED_PARAMETER(psLock); -+ PVR_UNREFERENCED_PARAMETER(iSubclass); -+ OSFUNC_NOT_IMPLEMENTED_ASSERT(); -+} -+ -+/*************************************************************************/ /*! -+@Function OSWRLockReleaseRead -+@Description Release a writer/reader read lock. -+@Input psLock The handle of the WR lock whose read lock is to -+ be released. -+@Return None. -+*/ /**************************************************************************/ -+static INLINE void OSWRLockReleaseRead(POSWR_LOCK psLock) -+{ -+ PVR_UNREFERENCED_PARAMETER(psLock); -+ OSFUNC_NOT_IMPLEMENTED_ASSERT(); -+} -+ -+/*************************************************************************/ /*! -+@Function OSWRLockAcquireWrite -+@Description Acquire a writer/reader write lock. -+ If the write lock or any read lock are already acquired, -+ the caller will block until all are released. -+@Input psLock The handle of the WR lock to be acquired for -+ writing. -+@Return None. -+*/ /**************************************************************************/ -+static INLINE void OSWRLockAcquireWrite(POSWR_LOCK psLock) -+{ -+ PVR_UNREFERENCED_PARAMETER(psLock); -+ OSFUNC_NOT_IMPLEMENTED_ASSERT(); -+} -+ -+/*************************************************************************/ /*! -+@Function OSWRLockReleaseWrite -+@Description Release a writer/reader write lock. -+@Input psLock The handle of the WR lock whose write lock is to -+ be released. -+@Return None -+*/ /**************************************************************************/ -+static INLINE void OSWRLockReleaseWrite(POSWR_LOCK psLock) -+{ -+ PVR_UNREFERENCED_PARAMETER(psLock); -+ OSFUNC_NOT_IMPLEMENTED_ASSERT(); -+} -+#endif -+ -+/*************************************************************************/ /*! -+@Function OSDivide64r64 -+@Description Divide a 64-bit value by a 32-bit value. Return the 64-bit -+ quotient. -+ The remainder is also returned in 'pui32Remainder'. -+@Input ui64Divident The number to be divided. -+@Input ui32Divisor The 32-bit value 'ui64Divident' is to -+ be divided by. -+@Output pui32Remainder The remainder of the division. -+@Return The 64-bit quotient (result of the division). -+*/ /**************************************************************************/ -+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder); -+ -+/*************************************************************************/ /*! -+@Function OSDivide64 -+@Description Divide a 64-bit value by a 32-bit value. Return a 32-bit -+ quotient. -+ The remainder is also returned in 'pui32Remainder'. -+ This function allows for a more optimal implementation -+ of a 64-bit division when the result is known to be -+ representable in 32-bits. -+@Input ui64Divident The number to be divided. -+@Input ui32Divisor The 32-bit value 'ui64Divident' is to -+ be divided by. -+@Output pui32Remainder The remainder of the division. -+@Return The 32-bit quotient (result of the division). -+*/ /**************************************************************************/ -+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder); -+ -+/*************************************************************************/ /*! -+@Function OSDumpStack -+@Description Dump the current task information and its stack trace. -+@Return None -+*/ /**************************************************************************/ -+void OSDumpStack(void); -+ -+/*************************************************************************/ /*! -+@Function OSUserModeAccessToPerfCountersEn -+@Description Permit User-mode access to CPU performance counter -+ registers. -+ This function is called during device initialisation. -+ Certain CPU architectures may need to explicitly permit -+ User mode access to performance counters - if this is -+ required, the necessary code should be implemented inside -+ this function. -+@Return None. -+*/ /**************************************************************************/ -+void OSUserModeAccessToPerfCountersEn(void); -+ -+/*************************************************************************/ /*! -+@Function OSDebugSignalPID -+@Description Sends a SIGTRAP signal to a specific PID in user mode for -+ debugging purposes. The user mode process can register a handler -+ against this signal. -+ This is necessary to support the Rogue debugger. If the Rogue -+ debugger is not used then this function may be implemented as -+ a stub. -+@Input ui32PID The PID for the signal. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID); -+ -+#if defined(__linux__) && defined(__KERNEL__) && !defined(DOXYGEN) -+#define OSWarnOn(a) WARN_ON(a) -+#else -+/*************************************************************************/ /*! -+@Function OSWarnOn -+@Description This API allows the driver to emit a special token and stack -+ dump to the server log when an issue is detected that needs the -+ OS to be notified. The token or call may be used to trigger -+ log collection by the OS environment. -+ PVR_DPF log messages will have been emitted prior to this call. -+@Input a Expression to evaluate, if true trigger Warn signal -+@Return None -+*/ /**************************************************************************/ -+#define OSWarnOn(a) do { if ((a)) { OSDumpStack(); } } while (0) -+#endif -+ -+/*************************************************************************/ /*! -+@Function OSIsKernelThread -+@Description This API determines if the current running thread is a kernel -+ thread (i.e. one not associated with any userland process, -+ typically an MISR handler.) -+@Return IMG_TRUE if it is a kernel thread, otherwise IMG_FALSE. -+*/ /**************************************************************************/ -+IMG_BOOL OSIsKernelThread(void); -+ -+/*************************************************************************/ /*! -+@Function OSThreadDumpInfo -+@Description Traverse the thread list and call each of the stored -+ callbacks to dump the info in debug_dump. -+@Input pfnDumpDebugPrintf The 'printf' function to be called to -+ display the debug info -+@Input pvDumpDebugFile Optional file identifier to be passed to -+ the 'printf' function if required -+*/ /**************************************************************************/ -+void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+ -+/*************************************************************************/ /*! -+@Function OSDumpVersionInfo -+@Description Store OS version information in debug dump. -+@Input pfnDumpDebugPrintf The 'printf' function to be called to -+ display the debug info -+@Input pvDumpDebugFile Optional file identifier to be passed to -+ the 'printf' function if required -+*/ /**************************************************************************/ -+void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+ -+/*************************************************************************/ /*! -+@Function OSIsWriteCombineUnalignedSafe -+@Description Determine if unaligned accesses to write-combine memory are -+ safe to perform, i.e. whether we are safe from a CPU fault -+ occurring. This test is specifically aimed at ARM64 platforms -+ which cannot provide this guarantee if the memory is 'device' -+ memory rather than 'normal' under the ARM memory architecture. -+@Return IMG_TRUE if safe, IMG_FALSE otherwise. -+*/ /**************************************************************************/ -+IMG_BOOL OSIsWriteCombineUnalignedSafe(void); -+ -+/*************************************************************************/ /*! -+@Function OSDebugLevel -+@Description Returns current value of the debug level. -+@Return Debug level. -+*/ /**************************************************************************/ -+IMG_UINT32 OSDebugLevel(void); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVSetDebugLevel -+@Description Sets the current value of the debug level to ui32DebugLevel. -+@Input ui32DebugLevel New debug level value. -+*/ /**************************************************************************/ -+void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVIsDebugLevel -+@Description Tests if a given debug level is enabled. -+@Input ui32DebugLevel IMG_TRUE if debug level is enabled -+ and IMG_FALSE otherwise. -+*/ /**************************************************************************/ -+IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel); -+ -+#if defined(SUPPORT_DMA_TRANSFER) -+ -+typedef void (*PFN_SERVER_CLEANUP)(void *pvData, IMG_BOOL bAdvanceTimeline); -+ -+#define DMA_COMPLETION_TIMEOUT_MS 60000 -+#define DMA_ERROR_SYNC_RETRIES 100 -+ -+PVRSRV_ERROR OSDmaPrepareTransfer(PVRSRV_DEVICE_NODE *psDevNode, void *psChan, -+ IMG_DMA_ADDR* psDmaAddr, IMG_UINT64* puiAddress, -+ IMG_UINT64 uiSize, IMG_BOOL bMemToDev, -+ IMG_HANDLE pvOSData, -+ IMG_HANDLE pvServerCleanupParam,PFN_SERVER_CLEANUP pfnServerCleanup, IMG_BOOL bFirst); -+ -+PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, IMG_HANDLE pvChan, -+ IMG_DMA_ADDR* psDmaAddr, IMG_BOOL *pbValid, -+ IMG_UINT64* puiAddress, IMG_UINT64 uiSize, -+ IMG_UINT32 uiOffsetInPage, -+ IMG_UINT32 ui32SizeInPages, -+ IMG_BOOL bMemToDev, -+ IMG_HANDLE pvOSData, -+ IMG_HANDLE pvServerCleanupParam, PFN_SERVER_CLEANUP pfnServerCleanup, -+ IMG_BOOL bFirst); -+ -+PVRSRV_ERROR OSDmaAllocData(PVRSRV_DEVICE_NODE *psDevNode,IMG_UINT32 uiNumDMA, void **pvAllocedData); -+PVRSRV_ERROR OSDmaSubmitTransfer(PVRSRV_DEVICE_NODE *psDevNode, void *pvOSData, void *psChan, IMG_BOOL bSynchronous); -+void OSDmaForceCleanup(PVRSRV_DEVICE_NODE *psDevNode, void *pvChan, -+ void *pvOSData, IMG_HANDLE pvServerCleanupParam, -+ PFN_SERVER_CLEANUP pfnServerCleanup); -+#endif -+#if defined(SUPPORT_SECURE_ALLOC_KM) -+PVRSRV_ERROR -+OSAllocateSecBuf(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszName, -+ PMR **ppsPMR); -+ -+void -+OSFreeSecBuf(PMR *psPMR); -+#endif -+#endif /* OSFUNC_H */ -+ -+/****************************************************************************** -+ End of file (osfunc.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/osfunc_arm.c b/drivers/gpu/drm/img-rogue/osfunc_arm.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/osfunc_arm.c -@@ -0,0 +1,181 @@ -+/*************************************************************************/ /*! -+@File -+@Title arm specific OS functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Processor specific OS functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) -+ #include -+#else -+ #include -+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) */ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) -+ #include -+#endif -+#include -+ -+#include "pvrsrv_error.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "osfunc.h" -+#include "pvr_debug.h" -+ -+ -+static inline size_t pvr_dmac_range_len(const void *pvStart, const void *pvEnd) -+{ -+ return (size_t)((char *)pvEnd - (char *)pvStart); -+} -+ -+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd) -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)) -+ struct device *dev = psDevNode->psDevConfig->pvOSDevice; -+ if (dev) -+ { -+ dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_TO_DEVICE); -+ dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_FROM_DEVICE); -+ } -+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) -+ arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE); -+ arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE); -+#else -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ /* Inner cache */ -+ dmac_flush_range(pvVirtStart, pvVirtEnd); -+ -+ /* Outer cache */ -+ outer_flush_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); -+#endif -+} -+ -+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd) -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)) -+ struct device *dev = psDevNode->psDevConfig->pvOSDevice; -+ if (dev) -+ { -+ dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_TO_DEVICE); -+ } -+ -+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) -+ arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE); -+#else -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ /* Inner cache */ -+ dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_TO_DEVICE); -+ -+ /* Outer cache */ -+ outer_clean_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); -+#endif -+} -+ -+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd) -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)) -+ struct device *dev = psDevNode->psDevConfig->pvOSDevice; -+ if (dev) -+ { -+ dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_FROM_DEVICE); -+ } -+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) -+ arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE); -+#else -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ /* Inner cache */ -+ dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_FROM_DEVICE); -+ -+ /* Outer cache */ -+ outer_inv_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); -+#endif -+} -+ -+OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) -+ return OS_CACHE_OP_ADDR_TYPE_PHYSICAL; -+#else -+ return OS_CACHE_OP_ADDR_TYPE_BOTH; -+#endif -+} -+ -+/* User Enable Register */ -+#define PMUSERENR_EN 0x00000001 /* enable user access to the counters */ -+ -+static void per_cpu_perf_counter_user_access_en(void *data) -+{ -+ PVR_UNREFERENCED_PARAMETER(data); -+ /* Enable user-mode access to counters. */ -+ asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(PMUSERENR_EN)); -+} -+ -+void OSUserModeAccessToPerfCountersEn(void) -+{ -+ on_each_cpu(per_cpu_perf_counter_user_access_en, NULL, 1); -+} -+ -+IMG_BOOL OSIsWriteCombineUnalignedSafe(void) -+{ -+ /* -+ * The kernel looks to have always used normal memory under ARM32. -+ * See osfunc_arm64.c implementation for more details. -+ */ -+ return IMG_TRUE; -+} -diff --git a/drivers/gpu/drm/img-rogue/osfunc_arm64.c b/drivers/gpu/drm/img-rogue/osfunc_arm64.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/osfunc_arm64.c -@@ -0,0 +1,272 @@ -+/*************************************************************************/ /*! -+@File -+@Title arm64 specific OS functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Processor specific OS functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include -+#include -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) -+ #include -+#else -+ #include -+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) */ -+#include -+#include -+ -+#include "pvrsrv_error.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "osfunc.h" -+#include "pvr_debug.h" -+ -+#include "kernel_compatibility.h" -+ -+#if defined(CONFIG_OUTER_CACHE) -+ /* If you encounter a 64-bit ARM system with an outer cache, you'll need -+ * to add the necessary code to manage that cache. See osfunc_arm.c -+ * for an example of how to do so. -+ */ -+ #error "CONFIG_OUTER_CACHE not supported on arm64." -+#endif -+ -+static inline void begin_user_mode_access(void) -+{ -+#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN) -+ uaccess_enable_privileged(); -+#endif -+} -+ -+static inline void end_user_mode_access(void) -+{ -+#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN) -+ uaccess_disable_privileged(); -+#endif -+} -+ -+static inline void FlushRange(void *pvRangeAddrStart, -+ void *pvRangeAddrEnd, -+ PVRSRV_CACHE_OP eCacheOp) -+{ -+ IMG_UINT32 ui32CacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); -+ IMG_BYTE *pbStart = pvRangeAddrStart; -+ IMG_BYTE *pbEnd = pvRangeAddrEnd; -+ IMG_BYTE *pbBase; -+ -+ /* -+ On arm64, the TRM states in D5.8.1 (data and unified caches) that if cache -+ maintenance is performed on a memory location using a VA, the effect of -+ that cache maintenance is visible to all VA aliases of the physical memory -+ location. So here it's quicker to issue the machine cache maintenance -+ instruction directly without going via the Linux kernel DMA framework as -+ this is sufficient to maintain the CPU d-caches on arm64. -+ */ -+ -+ begin_user_mode_access(); -+ -+ pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)ui32CacheLineSize); -+ -+ /* Memory-barrier */ -+ asm volatile("dsb sy" : : : "memory"); -+ -+ switch (eCacheOp) -+ { -+ case PVRSRV_CACHE_OP_CLEAN: -+ for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize) -+ { -+ asm volatile ("dc cvac, %0" :: "r" (pbBase)); -+ } -+ break; -+ case PVRSRV_CACHE_OP_INVALIDATE: -+ for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize) -+ { -+ asm volatile ("dc ivac, %0" :: "r" (pbBase)); -+ } -+ break; -+ case PVRSRV_CACHE_OP_FLUSH: -+ for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize) -+ { -+ asm volatile ("dc civac, %0" :: "r" (pbBase)); -+ } -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Cache maintenance operation type %d is invalid", -+ __func__, eCacheOp)); -+ break; -+ } -+ -+ /* Memory-barrier */ -+ asm volatile("dsb sy" : : : "memory"); -+ -+ end_user_mode_access(); -+} -+ -+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd) -+{ -+ struct device *dev; -+ -+ if (pvVirtStart) -+ { -+ FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_FLUSH); -+ return; -+ } -+ -+ dev = psDevNode->psDevConfig->pvOSDevice; -+ -+ if (dev) -+ { -+ dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_TO_DEVICE); -+ dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_FROM_DEVICE); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Cache operation cannot be completed!")); -+ } -+ -+} -+ -+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd) -+{ -+ struct device *dev; -+ -+ if (pvVirtStart) -+ { -+ FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_CLEAN); -+ return; -+ } -+ -+ dev = psDevNode->psDevConfig->pvOSDevice; -+ -+ if (dev) -+ { -+ dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_TO_DEVICE); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Cache operation cannot be completed!")); -+ } -+ -+} -+ -+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd) -+{ -+ struct device *dev; -+ -+ if (pvVirtStart) -+ { -+ FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_INVALIDATE); -+ return; -+ } -+ -+ dev = psDevNode->psDevConfig->pvOSDevice; -+ -+ if (dev) -+ { -+ dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_FROM_DEVICE); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Cache operation cannot be completed!")); -+ } -+} -+ -+ -+OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ if (!psDevNode->psDevConfig->pvOSDevice) -+ { -+ /* Host Mem device node doesn't have an associated Linux dev ptr. -+ Use virtual addr ops instead of asking kernel to do physical -+ maintenance */ -+ return OS_CACHE_OP_ADDR_TYPE_VIRTUAL; -+ } -+ -+ return OS_CACHE_OP_ADDR_TYPE_PHYSICAL; -+} -+ -+void OSUserModeAccessToPerfCountersEn(void) -+{ -+} -+ -+IMG_BOOL OSIsWriteCombineUnalignedSafe(void) -+{ -+ /* -+ * Under ARM64 there is the concept of 'device' [0] and 'normal' [1] memory. -+ * Unaligned access on device memory is explicitly disallowed [2]: -+ * -+ * 'Further, unaligned accesses are only allowed to regions marked as Normal -+ * memory type. -+ * ... -+ * Attempts to perform unaligned accesses when not allowed will cause an -+ * alignment fault (data abort).' -+ * -+ * Write-combine on ARM64 can be implemented as either normal non-cached -+ * memory (NORMAL_NC) or as device memory with gathering enabled -+ * (DEVICE_GRE.) Kernel 3.13 changed this from the latter to the former. -+ * -+ * [0]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/CHDBDIDF.html -+ * [1]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/ch13s01s01.html -+ * [2]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html -+ */ -+ -+ pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); -+ -+ return (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_NC); -+} -diff --git a/drivers/gpu/drm/img-rogue/osfunc_common.h b/drivers/gpu/drm/img-rogue/osfunc_common.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/osfunc_common.h -@@ -0,0 +1,284 @@ -+/*************************************************************************/ /*! -+@File -+@Title OS functions header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description OS specific API definitions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef OSFUNC_COMMON_H -+/*! @cond Doxygen_Suppress */ -+#define OSFUNC_COMMON_H -+/*! @endcond */ -+ -+#if defined(__KERNEL__) && defined(__linux__) -+#include -+#else -+#include -+#endif -+ -+#include "img_types.h" -+ -+#ifdef __cplusplus -+extern "C" -+{ -+#endif -+ -+/**************************************************************************/ /*! -+@Function DeviceMemSet -+@Description Set memory, whose mapping may be uncached, to a given value. -+ Safe implementation for all architectures for uncached mapping, -+ optimised for speed where supported by tool chains. -+ In such cases, OSDeviceMemSet() is defined as a call to this -+ function. -+@Input pvDest void pointer to the memory to be set -+@Input ui8Value byte containing the value to be set -+@Input ui32Size the number of bytes to be set to the given value -+@Return None -+ */ /**************************************************************************/ -+void DeviceMemSet(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size); -+ -+/**************************************************************************/ /*! -+@Function DeviceMemCopy -+@Description Copy values from one area of memory. Safe implementation for -+ all architectures for uncached mapping, of either the source -+ or destination, optimised for speed where supported by tool -+ chains. In such cases, OSDeviceMemCopy() is defined as a call -+ to this function. -+@Input pvDst void pointer to the destination memory -+@Input pvSrc void pointer to the source memory -+@Input ui32Size the number of bytes to be copied -+@Return None -+ */ /**************************************************************************/ -+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t ui32Size); -+ -+/**************************************************************************/ /*! -+@Function DeviceMemSetBytes -+@Description Potentially very slow (but safe) memset fallback for non-GNU C -+ compilers for arm64/aarch64 -+@Input pvDest void pointer to the memory to be set -+@Input ui8Value byte containing the value to be set -+@Input ui32Size the number of bytes to be set to the given value -+@Return None -+ */ /**************************************************************************/ -+void DeviceMemSetBytes(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size); -+ -+/**************************************************************************/ /*! -+@Function DeviceMemCopyBytes -+@Description Potentially very slow (but safe) memcpy fallback for non-GNU C -+ compilers for arm64/aarch64 -+@Input pvDst void pointer to the destination memory -+@Input pvSrc void pointer to the source memory -+@Input ui32Size the number of bytes to be copied -+@Return None -+ */ /**************************************************************************/ -+void DeviceMemCopyBytes(void *pvDst, const void *pvSrc, size_t ui32Size); -+ -+/**************************************************************************/ /*! -+@Function StringLCopy -+@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. -+ If no null byte ('\0') is contained within the first uDataSize-1 -+ characters of the source string, the destination string will be -+ truncated. If the length of the source string is less than uDataSize -+ an additional NUL byte will be copied to the destination string -+ to ensure that the string is NUL-terminated. -+@Input pszDest char pointer to the destination string -+@Input pszSrc const char pointer to the source string -+@Input uDataSize the maximum number of bytes to be copied -+@Return Size of the source string -+ */ /**************************************************************************/ -+size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize); -+ -+#if defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY) -+#if defined(__GNUC__) -+/* Workarounds for assumptions made that memory will not be mapped uncached -+ * in kernel or user address spaces on arm64 platforms (or other testing). -+ */ -+ -+#define OSDeviceMemSet(a,b,c) DeviceMemSet((a), (b), (c)) -+#define OSDeviceMemCopy(a,b,c) DeviceMemCopy((a), (b), (c)) -+ -+#else /* defined __GNUC__ */ -+ -+#define OSDeviceMemSet(a,b,c) DeviceMemSetBytes((a), (b), (c)) -+#define OSDeviceMemCopy(a,b,c) DeviceMemCopyBytes((a), (b), (c)) -+ -+#endif /* defined __GNUC__ */ -+ -+#else /* (defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */ -+ -+/* Everything else */ -+ -+/**************************************************************************/ /*! -+@Function OSDeviceMemSet -+@Description Set memory, whose mapping may be uncached, to a given value. -+ On some architectures, additional processing may be needed -+ if the mapping is uncached. -+@Input a void pointer to the memory to be set -+@Input b byte containing the value to be set -+@Input c the number of bytes to be set to the given value -+@Return Pointer to the destination memory. -+ */ /**************************************************************************/ -+#define OSDeviceMemSet(a,b,c) \ -+ do { \ -+ if ((c) != 0U) \ -+ { \ -+ (void) memset((a), (b), (c)); \ -+ (void) *(volatile IMG_UINT32*)((void*)(a)); \ -+ } \ -+ } while (false) -+ -+/**************************************************************************/ /*! -+@Function OSDeviceMemCopy -+@Description Copy values from one area of memory, to another, when one -+ or both mappings may be uncached. -+ On some architectures, additional processing may be needed -+ if mappings are uncached. -+@Input a void pointer to the destination memory -+@Input b void pointer to the source memory -+@Input c the number of bytes to be copied -+@Return Pointer to the destination memory. -+ */ /**************************************************************************/ -+#define OSDeviceMemCopy(a,b,c) \ -+ do { \ -+ if ((c) != 0U) \ -+ { \ -+ memcpy((a), (b), (c)); \ -+ (void) *(volatile IMG_UINT32*)((void*)(a)); \ -+ } \ -+ } while (false) -+ -+#endif /* (defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */ -+ -+/**************************************************************************/ /*! -+@Function OSCachedMemSet -+@Description Set memory, where the mapping is known to be cached, to a -+ given value. This function exists to allow an optimal memset -+ to be performed when memory is known to be cached. -+@Input a void pointer to the memory to be set -+@Input b byte containing the value to be set -+@Input c the number of bytes to be set to the given value -+@Return Pointer to the destination memory. -+ */ /**************************************************************************/ -+#define OSCachedMemSet(a,b,c) (void) memset((a), (b), (c)) -+ -+/**************************************************************************/ /*! -+@Function OSCachedMemCopy -+@Description Copy values from one area of memory, to another, when both -+ mappings are known to be cached. -+ This function exists to allow an optimal memcpy to be -+ performed when memory is known to be cached. -+@Input a void pointer to the destination memory -+@Input b void pointer to the source memory -+@Input c the number of bytes to be copied -+@Return Pointer to the destination memory. -+ */ /**************************************************************************/ -+#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c)) -+ -+#if defined(__KERNEL__) -+ -+/**************************************************************************/ /*! -+@Function OSCachedMemSetWMB -+@Description Set memory, where the mapping is known to be cached or -+ write-combine, to a given value and issue a write memory barrier -+ after. This -+ function exists to allow an optimal memset to be performed when -+ memory is known to be cached or write-combine. -+@Input a void pointer to the memory to be set -+@Input b byte containing the value to be set -+@Input c the number of bytes to be set to the given value -+@Return Pointer to the destination memory. -+ */ /**************************************************************************/ -+#define OSCachedMemSetWMB(a,b,c) \ -+ do { \ -+ if ((c) != 0U) \ -+ { \ -+ (void) memset((a), (b), (c)); \ -+ OSWriteMemoryBarrier(a); \ -+ } \ -+ } while (false) -+/**************************************************************************/ /*! -+@Function OSCachedMemCopy -+@Description Copy values from one area of memory, to another, when both -+ mappings are known to be cached or write-combine and issue -+ a write memory barrier after. -+ This function exists to allow an optimal memcpy to be -+ performed when memory is known to be cached or write-combine. -+@Input a void pointer to the destination memory -+@Input b void pointer to the source memory -+@Input c the number of bytes to be copied -+@Return Pointer to the destination memory. -+ */ /**************************************************************************/ -+#define OSCachedMemCopyWMB(a,b,c) \ -+ do { \ -+ if ((c) != 0U) \ -+ { \ -+ (void) memcpy((a), (b), (c)); \ -+ OSWriteMemoryBarrier(a); \ -+ } \ -+ } while (false) -+#endif /* defined(__KERNEL__) */ -+ -+/**************************************************************************/ /*! -+@Function OSStringLCopy -+@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. -+ If no null byte ('\0') is contained within the first uDataSize-1 -+ characters of the source string, the destination string will be -+ truncated. If the length of the source string is less than uDataSize -+ an additional NUL byte will be copied to the destination string -+ to ensure that the string is NUL-terminated. -+@Input a char pointer to the destination string -+@Input b const char pointer to the source string -+@Input c the maximum number of bytes to be copied -+@Return Size of the source string -+ */ /**************************************************************************/ -+#if defined(__QNXNTO__) || (defined(__linux__) && defined(__KERNEL__) && !defined(DEBUG)) -+#define OSStringLCopy(a,b,c) strlcpy((a), (b), (c)) -+#else /* defined(__QNXNTO__) ... */ -+#define OSStringLCopy(a,b,c) StringLCopy((a), (b), (c)) -+#endif /* defined(__QNXNTO__) ... */ -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* OSFUNC_COMMON_H */ -+ -+/****************************************************************************** -+ End of file (osfunc_common.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/osfunc_riscv.c b/drivers/gpu/drm/img-rogue/osfunc_riscv.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/osfunc_riscv.c -@@ -0,0 +1,231 @@ -+/*************************************************************************/ /*! -+@File -+@Title RISC-V specific OS functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Processor specific OS functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "osfunc.h" -+#include "pvr_debug.h" -+#include "cache_ops.h" -+#include -+#include "sysconfig.h" -+ -+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd) -+{ -+ struct device *dev; -+ /* -+ * RISC-V cache maintenance mechanism is not part of the core spec. -+ * This leaves the actual mechanism of action to an implementer. -+ * Here we let the system layer decide how maintenance is done. -+ */ -+ if (psDevNode->psDevConfig->pfnHostCacheMaintenance) -+ { -+ psDevNode->psDevConfig->pfnHostCacheMaintenance( -+ psDevNode->psDevConfig->hSysData, -+ PVRSRV_CACHE_OP_FLUSH, -+ pvVirtStart, -+ pvVirtEnd, -+ sCPUPhysStart, -+ sCPUPhysEnd); -+ -+ } -+#if !defined(NO_HARDWARE) -+ else -+ { -+ dev = psDevNode->psDevConfig->pvOSDevice; -+ if (dev) -+ { -+ if (sCPUPhysStart.uiAddr == IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL)) { -+ dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_TO_DEVICE); -+ dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_FROM_DEVICE); -+ } else if (sCPUPhysStart.uiAddr != IMG_CAST_TO_CPUPHYADDR_UINT(0x0ULL)) { -+ dma_sync_single_for_device(dev, phys_cpu2gpu(sCPUPhysStart.uiAddr), -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_TO_DEVICE); -+ dma_sync_single_for_cpu(dev, phys_cpu2gpu(sCPUPhysStart.uiAddr), -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_FROM_DEVICE); -+ } -+ } -+ //PVR_DPF((PVR_DBG_WARNING, -+ // "%s: System doesn't implement cache maintenance. Skipping!", -+ // __func__)); -+ } -+#endif -+} -+ -+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd) -+{ -+ struct device *dev; -+ /* -+ * RISC-V cache maintenance mechanism is not part of the core spec. -+ * This leaves the actual mechanism of action to an implementer. -+ * Here we let the system layer decide how maintenance is done. -+ */ -+ if (psDevNode->psDevConfig->pfnHostCacheMaintenance) -+ { -+ psDevNode->psDevConfig->pfnHostCacheMaintenance( -+ psDevNode->psDevConfig->hSysData, -+ PVRSRV_CACHE_OP_CLEAN, -+ pvVirtStart, -+ pvVirtEnd, -+ sCPUPhysStart, -+ sCPUPhysEnd); -+ -+ } -+#if !defined(NO_HARDWARE) -+ else -+ { -+ dev = psDevNode->psDevConfig->pvOSDevice; -+ if (dev) -+ { -+ if (sCPUPhysStart.uiAddr == IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL)) { -+ dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_TO_DEVICE); -+ } else if (sCPUPhysStart.uiAddr != IMG_CAST_TO_CPUPHYADDR_UINT(0x0ULL)) { -+ dma_sync_single_for_device(dev, phys_cpu2gpu(sCPUPhysStart.uiAddr), -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_TO_DEVICE); -+ } -+ } -+ //PVR_DPF((PVR_DBG_WARNING, -+ // "%s: System doesn't implement cache maintenance. Skipping!", -+ // __func__)); -+ } -+#endif -+} -+ -+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd) -+{ -+ struct device *dev; -+ /* -+ * RISC-V cache maintenance mechanism is not part of the core spec. -+ * This leaves the actual mechanism of action to an implementer. -+ * Here we let the system layer decide how maintenance is done. -+ */ -+ if (psDevNode->psDevConfig->pfnHostCacheMaintenance) -+ { -+ psDevNode->psDevConfig->pfnHostCacheMaintenance( -+ psDevNode->psDevConfig->hSysData, -+ PVRSRV_CACHE_OP_INVALIDATE, -+ pvVirtStart, -+ pvVirtEnd, -+ sCPUPhysStart, -+ sCPUPhysEnd); -+ -+ } -+#if !defined(NO_HARDWARE) -+ else -+ { -+ dev = psDevNode->psDevConfig->pvOSDevice; -+ -+ if (dev) -+ { -+ if (sCPUPhysStart.uiAddr == IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL)) { -+ dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_FROM_DEVICE); -+ } else if (sCPUPhysStart.uiAddr != IMG_CAST_TO_CPUPHYADDR_UINT(0x0ULL)) { -+ dma_sync_single_for_cpu(dev, phys_cpu2gpu(sCPUPhysStart.uiAddr), -+ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, -+ DMA_FROM_DEVICE); -+ } -+ } -+ //PVR_DPF((PVR_DBG_WARNING, -+ // "%s: System doesn't implement cache maintenance. Skipping!", -+ // __func__)); -+ } -+#endif -+} -+ -+OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ /* -+ * Need to obtain psDevNode here and do the following: -+ * -+ * OS_CACHE_OP_ADDR_TYPE eOpAddrType = -+ * psDevNode->psDevConfig->bHasPhysicalCacheMaintenance ? -+ * OS_CACHE_OP_ADDR_TYPE_PHYSICAL : OS_CACHE_OP_ADDR_TYPE_VIRTUAL; -+ * -+ * Return BOTH for now on. -+ * -+ */ -+ return OS_CACHE_OP_ADDR_TYPE_BOTH; -+} -+ -+void OSUserModeAccessToPerfCountersEn(void) -+{ -+#if !defined(NO_HARDWARE) -+ PVR_DPF((PVR_DBG_WARNING, "%s: Not implemented!", __func__)); -+ //PVR_ASSERT(0); -+#endif -+} -+ -+IMG_BOOL OSIsWriteCombineUnalignedSafe(void) -+{ -+#if !defined(NO_HARDWARE) -+ //PVR_DPF((PVR_DBG_WARNING, -+ // "%s: Not implemented (assuming false)!", -+ // __func__)); -+ //PVR_ASSERT(0); -+ return IMG_TRUE; -+ //return IMG_FALSE; -+#else -+ return IMG_TRUE; -+#endif -+} -diff --git a/drivers/gpu/drm/img-rogue/osfunc_x86.c b/drivers/gpu/drm/img-rogue/osfunc_x86.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/osfunc_x86.c -@@ -0,0 +1,135 @@ -+/*************************************************************************/ /*! -+@File -+@Title x86 specific OS functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Processor specific OS functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+#include -+ -+#include "pvrsrv_error.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "osfunc.h" -+#include "pvr_debug.h" -+ -+static void x86_flush_cache_range(const void *pvStart, const void *pvEnd) -+{ -+ IMG_BYTE *pbStart = (IMG_BYTE *)pvStart; -+ IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd; -+ IMG_BYTE *pbBase; -+ -+ pbEnd = (IMG_BYTE *)PVR_ALIGN((uintptr_t)pbEnd, -+ (uintptr_t)boot_cpu_data.x86_clflush_size); -+ -+ mb(); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) -+ __uaccess_begin(); -+#endif -+ -+ for (pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size) -+ { -+ clflush(pbBase); -+ } -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) -+ __uaccess_end(); -+#endif -+ -+ mb(); -+} -+ -+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); -+ PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); -+ -+ x86_flush_cache_range(pvVirtStart, pvVirtEnd); -+} -+ -+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); -+ PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); -+ -+ /* No clean feature on x86 */ -+ x86_flush_cache_range(pvVirtStart, pvVirtEnd); -+} -+ -+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); -+ PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); -+ -+ /* No invalidate-only support */ -+ x86_flush_cache_range(pvVirtStart, pvVirtEnd); -+} -+ -+OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ return OS_CACHE_OP_ADDR_TYPE_VIRTUAL; -+} -+ -+void OSUserModeAccessToPerfCountersEn(void) -+{ -+ /* Not applicable to x86 architecture. */ -+} -+ -+IMG_BOOL OSIsWriteCombineUnalignedSafe(void) -+{ -+ return IMG_TRUE; -+} -diff --git a/drivers/gpu/drm/img-rogue/oskm_apphint.h b/drivers/gpu/drm/img-rogue/oskm_apphint.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/oskm_apphint.h -@@ -0,0 +1,186 @@ -+/*************************************************************************/ /*! -+@File oskm_apphint.h -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description OS-independent interface for retrieving KM apphints -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include "img_defs.h" -+#if defined(__linux__) -+#include "km_apphint.h" -+#include "device.h" -+#else -+#include "services_client_porting.h" -+#endif -+#if !defined(OSKM_APPHINT_H) -+#define OSKM_APPHINT_H -+ -+/*! Supplied to os_get_km_apphint_XXX() functions when the param/AppHint is -+ * applicable to all devices and not a specific device. Typically used -+ * for server-wide build and module AppHints. -+ */ -+#define APPHINT_NO_DEVICE (NULL) -+ -+#if defined(__linux__) && !defined(DOXYGEN) -+static INLINE IMG_UINT os_get_km_apphint_UINT32(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) { -+ return !pvr_apphint_get_uint32(device, id, pVal); -+} -+static INLINE IMG_UINT os_get_km_apphint_UINT64(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) { -+ return !pvr_apphint_get_uint64(device, id, pVal); -+} -+static INLINE IMG_UINT os_get_km_apphint_BOOL(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) { -+ return !pvr_apphint_get_bool(device, id, pVal); -+} -+static INLINE IMG_UINT os_get_km_apphint_STRING(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_CHAR *pAppHintDefault, IMG_CHAR *buffer, size_t size) { -+ return !pvr_apphint_get_string(device, id, buffer, size); -+} -+ -+#define OSGetKMAppHintUINT32(device, state, name, appHintDefault, value) \ -+ os_get_km_apphint_UINT32(device, state, APPHINT_ID_ ## name, appHintDefault, value) -+ -+#define OSGetKMAppHintUINT64(device, state, name, appHintDefault, value) \ -+ os_get_km_apphint_UINT64(device, state, APPHINT_ID_ ## name, appHintDefault, value) -+ -+#define OSGetKMAppHintBOOL(device, state, name, appHintDefault, value) \ -+ os_get_km_apphint_BOOL(device, state, APPHINT_ID_ ## name, appHintDefault, value) -+ -+#define OSGetKMAppHintSTRING(device, state, name, appHintDefault, buffer, size) \ -+ os_get_km_apphint_STRING(device, state, APPHINT_ID_ ## name, appHintDefault, buffer, size) -+ -+ -+#define OSCreateKMAppHintState(state) \ -+ PVR_UNREFERENCED_PARAMETER(state) -+ -+#define OSFreeKMAppHintState(state) \ -+ PVR_UNREFERENCED_PARAMETER(state) -+ -+#else /* defined(__linux__) && !defined(DOXYGEN) */ -+ -+/**************************************************************************/ /*! -+@def OSGetKMAppHintUINT32(state, name, appHintDefault, value) -+@Description Interface for retrieval of uint32 km app hint. -+ For non-linux operating systems, this macro implements a call -+ from server code to PVRSRVGetAppHint() declared in -+ services_client_porting.h, effectively making it 'shared' code. -+@Input device Device node -+@Input state App hint state -+@Input name Name used to identify app hint -+@Input appHintDefault Default value to be returned if no -+ app hint is found. -+@Output value Pointer to returned app hint value. -+ */ /**************************************************************************/ -+#define OSGetKMAppHintUINT32(device, state, name, appHintDefault, value) \ -+ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) -+ -+/**************************************************************************/ /*! -+@def OSGetKMAppHintUINT64(state, name, appHintDefault, value) -+@Description Interface for retrieval of uint64 km app hint. -+ For non-linux operating systems, this macro implements a call -+ from server code to PVRSRVGetAppHint() declared in -+ services_client_porting.h, effectively making it 'shared' code. -+@Input device Device node -+@Input state App hint state -+@Input name Name used to identify app hint -+@Input appHintDefault Default value to be returned if no -+ app hint is found. -+@Output value Pointer to returned app hint value. -+ */ /**************************************************************************/ -+#define OSGetKMAppHintUINT64(device, state, name, appHintDefault, value) \ -+ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) -+ -+/**************************************************************************/ /*! -+@def OSGetKMAppHintBOOL(state, name, appHintDefault, value) -+@Description Interface for retrieval of IMG_BOOL km app hint. -+ For non-linux operating systems, this macro implements a call -+ from server code to PVRSRVGetAppHint() declared in -+ services_client_porting.h, effectively making it 'shared' code. -+@Input device Device node -+@Input state App hint state -+@Input name Name used to identify app hint -+@Input appHintDefault Default value to be returned if no -+ app hint is found. -+@Output value Pointer to returned app hint value. -+ */ /**************************************************************************/ -+#define OSGetKMAppHintBOOL(device, state, name, appHintDefault, value) \ -+ PVRSRVGetAppHint(state, # name, IMG_BOOL_TYPE, appHintDefault, value) -+ -+/**************************************************************************/ /*! -+@def OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) -+@Description Interface for retrieval of string km app hint. -+ For non-linux operating systems, this macro implements a call -+ from server code to PVRSRVGetAppHint() declared in -+ services_client_porting.h, effectively making it 'shared' code. -+@Input device Device node -+@Input state App hint state -+@Input name Name used to identify app hint -+@Input appHintDefault Default value to be returned if no -+ app hint is found. -+@Output buffer Buffer used to return app hint string. -+@Input size Size of the buffer. -+ */ /**************************************************************************/ -+#define OSGetKMAppHintSTRING(device, state, name, appHintDefault, buffer, size) \ -+ (PVR_UNREFERENCED_PARAMETER(size), PVRSRVGetAppHint(state, # name, IMG_STRING_TYPE, appHintDefault, buffer)) -+ -+/**************************************************************************/ /*! -+@def OSCreateKMAppHintState(state) -+@Description Creates the app hint state. -+ For non-linux operating systems, this macro implements a call -+ from server code to PVRSRVCreateAppHintState() declared in -+ services_client_porting.h, effectively making it 'shared' code. -+@Output state App hint state -+ */ /**************************************************************************/ -+#define OSCreateKMAppHintState(state) \ -+ PVRSRVCreateAppHintState(IMG_SRV_UM, 0, state) -+ -+/**************************************************************************/ /*! -+@def OSFreeKMAppHintState -+@Description Free the app hint state. -+ For non-linux operating systems, this macro implements a call -+ from server code to PVRSRVCreateAppHintState() declared in -+ services_client_porting.h, effectively making it 'shared' code. -+@Output state App hint state -+ */ /**************************************************************************/ -+#define OSFreeKMAppHintState(state) \ -+ PVRSRVFreeAppHintState(IMG_SRV_UM, state) -+ -+#endif /* defined(__linux__) */ -+ -+#endif /* OSKM_APPHINT_H */ -+ -+/****************************************************************************** -+ End of file (oskm_apphint.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/osmmap.h b/drivers/gpu/drm/img-rogue/osmmap.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/osmmap.h -@@ -0,0 +1,115 @@ -+/*************************************************************************/ /*! -+@File -+@Title OS Interface for mapping PMRs into CPU space. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description OS abstraction for the mmap2 interface for mapping PMRs into -+ User Mode memory -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef OSMMAP_H -+#define OSMMAP_H -+ -+#include -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+ -+/*************************************************************************/ /*! -+@Function OSMMapPMR -+@Description Maps the specified PMR into CPU memory so that it may be -+ accessed by the user process. -+ Whether the memory is mapped read only, read/write, or not at -+ all, is dependent on the PMR itself. -+ The PMR handle is opaque to the user, and lower levels of this -+ stack ensure that the handle is private to this process, such -+ that this API cannot be abused to gain access to other people's -+ PMRs. The OS implementation of this function should return the -+ virtual address and length for the User to use. The "PrivData" -+ is to be stored opaquely by the caller (N.B. he should make no -+ assumptions, in particular, NULL is a valid handle) and given -+ back to the call to OSMUnmapPMR. -+ The OS implementation is free to use the PrivData handle for -+ any purpose it sees fit. -+@Input hBridge The bridge handle. -+@Input hPMR The handle of the PMR to be mapped. -+@Input uiPMRLength The size of the PMR. -+@Input uiFlags Flags indicating how the mapping should -+ be done (read-only, etc). These may not -+ be honoured if the PMR does not permit -+ them. -+@Output phOSMMapPrivDataOut Returned private data. -+@Output ppvMappingAddressOut The returned mapping. -+@Output puiMappingLengthOut The size of the returned mapping. -+@Return PVRSRV_OK on success, failure code otherwise. -+ */ /*************************************************************************/ -+PVRSRV_ERROR -+OSMMapPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_DEVMEM_SIZE_T uiPMRLength, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_HANDLE *phOSMMapPrivDataOut, -+ void **ppvMappingAddressOut, -+ size_t *puiMappingLengthOut); -+ -+/*************************************************************************/ /*! -+@Function OSMUnmapPMR -+@Description Unmaps the specified PMR from CPU memory. -+ This function is the counterpart to OSMMapPMR. -+ The caller is required to pass the PMR handle back in along -+ with the same 3-tuple of information that was returned by the -+ call to OSMMapPMR in phOSMMapPrivDataOut. -+ It is possible to unmap only part of the original mapping -+ with this call, by specifying only the address range to be -+ unmapped in pvMappingAddress and uiMappingLength. -+@Input hBridge The bridge handle. -+@Input hPMR The handle of the PMR to be unmapped. -+@Input hOSMMapPrivData The OS private data of the mapping. -+@Input pvMappingAddress The address to be unmapped. -+@Input uiMappingLength The size to be unmapped. -+@Return PVRSRV_OK on success, failure code otherwise. -+ */ /*************************************************************************/ -+void -+OSMUnmapPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_HANDLE hOSMMapPrivData, -+ void *pvMappingAddress, -+ size_t uiMappingLength); -+ -+#endif /* OSMMAP_H */ -diff --git a/drivers/gpu/drm/img-rogue/osmmap_stub.c b/drivers/gpu/drm/img-rogue/osmmap_stub.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/osmmap_stub.c -@@ -0,0 +1,146 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device Memory Management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description OS abstraction for the mmap2 interface for mapping PMRs into -+ User Mode memory -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* our exported API */ -+#include "osmmap.h" -+ -+/* include/ */ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+ -+/* services/include/ */ -+ -+/* services/include/srvhelper/ */ -+#include "ra.h" -+ -+#include "pmr.h" -+ -+PVRSRV_ERROR -+OSMMapPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_DEVMEM_SIZE_T uiPMRSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_HANDLE *phOSMMapPrivDataOut, -+ void **ppvMappingAddressOut, -+ size_t *puiMappingLengthOut) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMR; -+ void *pvKernelAddress; -+ size_t uiLength; -+ IMG_HANDLE hPriv; -+ -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ PVR_UNREFERENCED_PARAMETER(uiFlags); -+ -+ /* -+ Normally this function would mmap a PMR into the memory space of -+ user process, but in this case we're taking a PMR and mapping it -+ into kernel virtual space. We keep the same function name for -+ symmetry as this allows the higher layers of the software stack -+ to not care whether they are user mode or kernel -+ */ -+ -+ psPMR = hPMR; -+ -+ if (PMR_IsSparse(psPMR)) -+ { -+ eError = PMRAcquireSparseKernelMappingData(psPMR, -+ 0, -+ 0, -+ &pvKernelAddress, -+ &uiLength, -+ &hPriv); -+ } -+ else -+ { -+ eError = PMRAcquireKernelMappingData(psPMR, -+ 0, -+ 0, -+ &pvKernelAddress, -+ &uiLength, -+ &hPriv); -+ } -+ if (eError != PVRSRV_OK) -+ { -+ goto e0; -+ } -+ -+ *phOSMMapPrivDataOut = hPriv; -+ *ppvMappingAddressOut = pvKernelAddress; -+ *puiMappingLengthOut = uiLength; -+ -+ /* MappingLength might be rounded up to page size */ -+ PVR_ASSERT(*puiMappingLengthOut >= uiPMRSize); -+ -+ return PVRSRV_OK; -+ -+ /* -+ error exit paths follow -+ */ -+ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+void -+OSMUnmapPMR(IMG_HANDLE hBridge, -+ IMG_HANDLE hPMR, -+ IMG_HANDLE hOSMMapPrivData, -+ void *pvMappingAddress, -+ size_t uiMappingLength) -+{ -+ PMR *psPMR; -+ -+ PVR_UNREFERENCED_PARAMETER(hBridge); -+ PVR_UNREFERENCED_PARAMETER(pvMappingAddress); -+ PVR_UNREFERENCED_PARAMETER(uiMappingLength); -+ -+ psPMR = hPMR; -+ PMRReleaseKernelMappingData(psPMR, -+ hOSMMapPrivData); -+} -diff --git a/drivers/gpu/drm/img-rogue/ospvr_gputrace.h b/drivers/gpu/drm/img-rogue/ospvr_gputrace.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/ospvr_gputrace.h -@@ -0,0 +1,183 @@ -+/*************************************************************************/ /*! -+@File ospvr_gputrace.h -+@Title PVR GPU Trace module common environment interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVR_GPUTRACE_H_ -+#define PVR_GPUTRACE_H_ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "rgx_hwperf.h" -+#include "rgxdevice.h" -+#include "device.h" -+ -+#if defined(__linux__) && defined(CONFIG_EVENT_TRACING) -+ -+/* Early initialisation of GPU Trace events logic. -+ * This function is called on *driver* initialisation. */ -+PVRSRV_ERROR PVRGpuTraceSupportInit(void); -+ -+/* GPU Trace resources final cleanup. -+ * This function is called on driver de-initialisation. */ -+void PVRGpuTraceSupportDeInit(void); -+ -+/* Initialisation for AppHints callbacks. -+ * This function is called during the late stage of driver initialisation but -+ * before the device initialisation but after the debugfs sub-system has been -+ * initialised. */ -+void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/* Per-device initialisation of the GPU Trace resources */ -+PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/* TL Initialisation of FTrace */ -+PVRSRV_ERROR PVRGpuTraceInitStream(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/* Per-device cleanup for the GPU Trace resources. */ -+void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/* Enables the gpu trace sub-system for a given device. */ -+PVRSRV_ERROR PVRGpuTraceSetEnabled( -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bNewValue); -+ -+/* Performs some initialisation steps if the feature was enabled -+ * on driver startup. */ -+void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/* FTrace events callbacks interface */ -+ -+void PVRGpuTraceEnableUfoCallback(void); -+void PVRGpuTraceDisableUfoCallback(void); -+ -+void PVRGpuTraceEnableFirmwareActivityCallback(void); -+void PVRGpuTraceDisableFirmwareActivityCallback(void); -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+PVRSRV_ERROR -+PVRSRVGpuTraceWorkPeriodEventStatsRegister(IMG_HANDLE* -+ phGpuWorkPeriodEventStats); -+void PVRSRVGpuTraceWorkPeriodEventStatsUnregister( -+ IMG_HANDLE hGpuWorkPeriodEventStats); -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+ -+#else /* defined(__linux__) && defined(CONFIG_EVENT_TRACING) */ -+ -+static inline PVRSRV_ERROR PVRGpuTraceSupportInit(void) { -+ return PVRSRV_OK; -+} -+ -+static inline void PVRGpuTraceSupportDeInit(void) {} -+ -+static inline void PVRGpuTraceInitAppHintCallbacks( -+ const PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+} -+ -+static inline PVRSRV_ERROR PVRGpuTraceInitDevice( -+ PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ return PVRSRV_OK; -+} -+ -+static inline PVRSRV_ERROR PVRGpuTraceInitStream(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ return PVRSRV_OK; -+} -+ -+static inline void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+} -+ -+static inline PVRSRV_ERROR PVRGpuTraceSetEnabled( -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bNewValue) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(bNewValue); -+ return PVRSRV_OK; -+} -+ -+static inline void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+} -+ -+static inline void PVRGpuTraceEnableUfoCallback(void) {} -+static inline void PVRGpuTraceDisableUfoCallback(void) {} -+ -+static inline void PVRGpuTraceEnableFirmwareActivityCallback(void) {} -+static inline void PVRGpuTraceDisableFirmwareActivityCallback(void) {} -+ -+#endif /* defined(__linux__) && defined(CONFIG_EVENT_TRACING) */ -+ -+#if defined(__linux__) && defined(CONFIG_EVENT_TRACING) && defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ -+void PVRGpuTraceEnqueueEvent( -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32FirmwareCtx, -+ IMG_UINT32 ui32ExternalJobRef, -+ IMG_UINT32 ui32InternalJobRef, -+ RGX_HWPERF_KICK_TYPE eKickType); -+ -+#else -+ -+static inline void PVRGpuTraceEnqueueEvent( -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32FirmwareCtx, -+ IMG_UINT32 ui32ExternalJobRef, -+ IMG_UINT32 ui32InternalJobRef, -+ RGX_HWPERF_KICK_TYPE eKickType) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ PVR_UNREFERENCED_PARAMETER(ui32FirmwareCtx); -+ PVR_UNREFERENCED_PARAMETER(ui32ExternalJobRef); -+ PVR_UNREFERENCED_PARAMETER(ui32InternalJobRef); -+ PVR_UNREFERENCED_PARAMETER(eKickType); -+} -+ -+#endif /* defined(__linux__) && defined(CONFIG_EVENT_TRACING) && defined(PVRSRV_TRACE_ROGUE_EVENTS) */ -+ -+#endif /* PVR_GPUTRACE_H_ */ -diff --git a/drivers/gpu/drm/img-rogue/pci_support.c b/drivers/gpu/drm/img-rogue/pci_support.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pci_support.c -@@ -0,0 +1,726 @@ -+/*************************************************************************/ /*! -+@File -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+#include -+ -+#if defined(CONFIG_MTRR) -+#include -+#endif -+ -+#include "pci_support.h" -+#include "allocmem.h" -+ -+typedef struct _PVR_PCI_DEV_TAG -+{ -+ struct pci_dev *psPCIDev; -+ HOST_PCI_INIT_FLAGS ePCIFlags; -+ IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE]; -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -+ int iMTRR[DEVICE_COUNT_RESOURCE]; -+#endif -+} PVR_PCI_DEV; -+ -+/*************************************************************************/ /*! -+@Function OSPCISetDev -+@Description Set a PCI device for subsequent use. -+@Input pvPCICookie Pointer to OS specific PCI structure -+@Input eFlags Flags -+@Return PVRSRV_PCI_DEV_HANDLE Pointer to PCI device handle -+*/ /**************************************************************************/ -+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags) -+{ -+ int err; -+ IMG_UINT32 i; -+ PVR_PCI_DEV *psPVRPCI; -+ -+ psPVRPCI = OSAllocMem(sizeof(*psPVRPCI)); -+ if (psPVRPCI == NULL) -+ { -+ printk(KERN_ERR "OSPCISetDev: Couldn't allocate PVR PCI structure\n"); -+ return NULL; -+ } -+ -+ psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie; -+ psPVRPCI->ePCIFlags = eFlags; -+ -+ err = pci_enable_device(psPVRPCI->psPCIDev); -+ if (err != 0) -+ { -+ printk(KERN_ERR "OSPCISetDev: Couldn't enable device (%d)\n", err); -+ OSFreeMem(psPVRPCI); -+ return NULL; -+ } -+ -+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ -+ { -+ pci_set_master(psPVRPCI->psPCIDev); -+ } -+ -+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ -+ { -+#if defined(CONFIG_PCI_MSI) -+ err = pci_enable_msi(psPVRPCI->psPCIDev); -+ if (err != 0) -+ { -+ printk(KERN_ERR "OSPCISetDev: Couldn't enable MSI (%d)", err); -+ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI; /* PRQA S 1474,3358,4130 */ /* misuse of enums */ -+ } -+#else -+ printk(KERN_ERR "OSPCISetDev: MSI support not enabled in the kernel"); -+#endif -+ } -+ -+ /* Initialise the PCI resource and MTRR tracking array */ -+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) -+ { -+ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -+ psPVRPCI->iMTRR[i] = -1; -+#endif -+ } -+ -+ return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI; -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIAcquireDev -+@Description Acquire a PCI device for subsequent use. -+@Input ui16VendorID Vendor PCI ID -+@Input ui16DeviceID Device PCI ID -+@Input eFlags Flags -+@Return PVRSRV_PCI_DEV_HANDLE Pointer to PCI device handle -+*/ /**************************************************************************/ -+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, -+ IMG_UINT16 ui16DeviceID, -+ HOST_PCI_INIT_FLAGS eFlags) -+{ -+ struct pci_dev *psPCIDev; -+ -+ psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL); -+ if (psPCIDev == NULL) -+ { -+ return NULL; -+ } -+ -+ return OSPCISetDev((void *)psPCIDev, eFlags); -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIIRQ -+@Description Get the interrupt number for the device. -+@Input hPVRPCI PCI device handle -+@Output pui16DeviceID Pointer to where the interrupt number -+ should be returned -+@Return PVRSRV_ERROR Services error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ) -+{ -+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; -+ -+ if (pui32IRQ == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ *pui32IRQ = psPVRPCI->psPCIDev->irq; -+ -+ return PVRSRV_OK; -+} -+ -+/* Functions supported by OSPCIAddrRangeFunc */ -+enum HOST_PCI_ADDR_RANGE_FUNC -+{ -+ HOST_PCI_ADDR_RANGE_FUNC_LEN, -+ HOST_PCI_ADDR_RANGE_FUNC_START, -+ HOST_PCI_ADDR_RANGE_FUNC_END, -+ HOST_PCI_ADDR_RANGE_FUNC_REQUEST, -+ HOST_PCI_ADDR_RANGE_FUNC_RELEASE -+}; -+ -+/*************************************************************************/ /*! -+@Function OSPCIAddrRangeFunc -+@Description Internal support function for various address range related -+ functions -+@Input eFunc Function to perform -+@Input hPVRPCI PCI device handle -+@Input ui32Index Address range index -+@Return IMG_UINT32 Function dependent value -+*/ /**************************************************************************/ -+static IMG_UINT64 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc, -+ PVRSRV_PCI_DEV_HANDLE hPVRPCI, -+ IMG_UINT32 ui32Index) -+{ -+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; -+ -+ if (ui32Index >= DEVICE_COUNT_RESOURCE) -+ { -+ printk(KERN_ERR "OSPCIAddrRangeFunc: Index out of range"); -+ return 0; -+ } -+ -+ switch (eFunc) -+ { -+ case HOST_PCI_ADDR_RANGE_FUNC_LEN: -+ { -+ return pci_resource_len(psPVRPCI->psPCIDev, ui32Index); -+ } -+ case HOST_PCI_ADDR_RANGE_FUNC_START: -+ { -+ return pci_resource_start(psPVRPCI->psPCIDev, ui32Index); -+ } -+ case HOST_PCI_ADDR_RANGE_FUNC_END: -+ { -+ return pci_resource_end(psPVRPCI->psPCIDev, ui32Index); -+ } -+ case HOST_PCI_ADDR_RANGE_FUNC_REQUEST: -+ { -+ int err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME); -+ if (err != 0) -+ { -+ printk(KERN_ERR "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err); -+ return 0; -+ } -+ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE; -+ return 1; -+ } -+ case HOST_PCI_ADDR_RANGE_FUNC_RELEASE: -+ { -+ if (psPVRPCI->abPCIResourceInUse[ui32Index]) -+ { -+ pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index); -+ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE; -+ } -+ return 1; -+ } -+ default: -+ { -+ printk(KERN_ERR "OSPCIAddrRangeFunc: Unknown function"); -+ break; -+ } -+ } -+ -+ return 0; -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIAddrRangeLen -+@Description Returns length of a given address range -+@Input hPVRPCI PCI device handle -+@Input ui32Index Address range index -+@Return IMG_UINT32 Length of address range or 0 if no -+ such range -+*/ /**************************************************************************/ -+IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) -+{ -+ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index); -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIAddrRangeStart -+@Description Returns the start of a given address range -+@Input hPVRPCI PCI device handle -+@Input ui32Index Address range index -+@Return IMG_UINT32 Start of address range or 0 if no -+ such range -+*/ /**************************************************************************/ -+IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) -+{ -+ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index); -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIAddrRangeEnd -+@Description Returns the end of a given address range -+@Input hPVRPCI PCI device handle -+@Input ui32Index Address range index -+@Return IMG_UINT32 End of address range or 0 if no such -+ range -+*/ /**************************************************************************/ -+IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) -+{ -+ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index); -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIRequestAddrRange -+@Description Request a given address range index for subsequent use -+@Input hPVRPCI PCI device handle -+@Input ui32Index Address range index -+@Return PVRSRV_ERROR Services error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, -+ IMG_UINT32 ui32Index) -+{ -+ if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0) -+ { -+ return PVRSRV_ERROR_PCI_CALL_FAILED; -+ } -+ else -+ { -+ return PVRSRV_OK; -+ } -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIReleaseAddrRange -+@Description Release a given address range that is no longer being used -+@Input hPVRPCI PCI device handle -+@Input ui32Index Address range index -+@Return PVRSRV_ERROR Services error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) -+{ -+ if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0) -+ { -+ return PVRSRV_ERROR_PCI_CALL_FAILED; -+ } -+ else -+ { -+ return PVRSRV_OK; -+ } -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIRequestAddrRegion -+@Description Request a given region from an address range for subsequent use -+@Input hPVRPCI PCI device handle -+@Input ui32Index Address range index -+@Input uiOffset Offset into the address range that forms -+ the start of the region -+@Input uiLength Length of the region -+@Return PVRSRV_ERROR Services error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, -+ IMG_UINT32 ui32Index, -+ IMG_UINT64 uiOffset, -+ IMG_UINT64 uiLength) -+{ -+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; -+ resource_size_t start; -+ resource_size_t end; -+ -+ start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); -+ end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index); -+ -+ /* Check that the requested region is valid */ -+ if ((start + uiOffset + uiLength - 1) > end) -+ { -+ return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH; -+ } -+ -+ if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO) -+ { -+ if (request_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL) -+ { -+ return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; -+ } -+ } -+ else -+ { -+ if (request_mem_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL) -+ { -+ return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIReleaseAddrRegion -+@Description Release a given region, from an address range, that is no -+ longer in use -+@Input hPVRPCI PCI device handle -+@Input ui32Index Address range index -+@Input ui32Offset Offset into the address range that forms -+ the start of the region -+@Input ui32Length Length of the region -+@Return PVRSRV_ERROR Services error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, -+ IMG_UINT32 ui32Index, -+ IMG_UINT64 uiOffset, -+ IMG_UINT64 uiLength) -+{ -+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; -+ resource_size_t start; -+ resource_size_t end; -+ -+ start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); -+ end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index); -+ -+ /* Check that the region is valid */ -+ if ((start + uiOffset + uiLength - 1) > end) -+ { -+ return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH; -+ } -+ -+ if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO) -+ { -+ release_region(start + uiOffset, uiLength); -+ } -+ else -+ { -+ release_mem_region(start + uiOffset, uiLength); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIReleaseDev -+@Description Release a PCI device that is no longer being used -+@Input hPVRPCI PCI device handle -+@Return PVRSRV_ERROR Services error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) -+{ -+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; -+ int i; -+ -+ /* Release all PCI regions that are currently in use */ -+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) -+ { -+ if (psPVRPCI->abPCIResourceInUse[i]) -+ { -+ pci_release_region(psPVRPCI->psPCIDev, i); -+ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; -+ } -+ } -+ -+#if defined(CONFIG_PCI_MSI) -+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ -+ { -+ pci_disable_msi(psPVRPCI->psPCIDev); -+ } -+#endif -+ -+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ -+ { -+ pci_clear_master(psPVRPCI->psPCIDev); -+ } -+ -+ pci_disable_device(psPVRPCI->psPCIDev); -+ -+ OSFreeMem(psPVRPCI); -+ /*not nulling pointer, copy on stack*/ -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCISuspendDev -+@Description Prepare PCI device to be turned off by power management -+@Input hPVRPCI PCI device handle -+@Return PVRSRV_ERROR Services error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) -+{ -+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; -+ int i; -+ int err; -+ -+ /* Release all PCI regions that are currently in use */ -+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) -+ { -+ if (psPVRPCI->abPCIResourceInUse[i]) -+ { -+ pci_release_region(psPVRPCI->psPCIDev, i); -+ } -+ } -+ -+ err = pci_save_state(psPVRPCI->psPCIDev); -+ if (err != 0) -+ { -+ printk(KERN_ERR "OSPCISuspendDev: pci_save_state_failed (%d)", err); -+ return PVRSRV_ERROR_PCI_CALL_FAILED; -+ } -+ -+ pci_disable_device(psPVRPCI->psPCIDev); -+ -+ err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND)); -+ switch (err) -+ { -+ case 0: -+ break; -+ case -EIO: -+ printk(KERN_ERR "OSPCISuspendDev: device doesn't support PCI PM"); -+ break; -+ case -EINVAL: -+ printk(KERN_ERR "OSPCISuspendDev: can't enter requested power state"); -+ break; -+ default: -+ printk(KERN_ERR "OSPCISuspendDev: pci_set_power_state failed (%d)", err); -+ break; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIResumeDev -+@Description Prepare a PCI device to be resumed by power management -+@Input hPVRPCI PCI device handle -+@Return PVRSRV_ERROR Services error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) -+{ -+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; -+ int err; -+ int i; -+ -+ err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON)); -+ switch (err) -+ { -+ case 0: -+ break; -+ case -EIO: -+ printk(KERN_ERR "OSPCIResumeDev: device doesn't support PCI PM"); -+ break; -+ case -EINVAL: -+ printk(KERN_ERR "OSPCIResumeDev: can't enter requested power state"); -+ return PVRSRV_ERROR_UNKNOWN_POWER_STATE; -+ default: -+ printk(KERN_ERR "OSPCIResumeDev: pci_set_power_state failed (%d)", err); -+ return PVRSRV_ERROR_UNKNOWN_POWER_STATE; -+ } -+ -+ pci_restore_state(psPVRPCI->psPCIDev); -+ -+ err = pci_enable_device(psPVRPCI->psPCIDev); -+ if (err != 0) -+ { -+ printk(KERN_ERR "OSPCIResumeDev: Couldn't enable device (%d)", err); -+ return PVRSRV_ERROR_PCI_CALL_FAILED; -+ } -+ -+ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ -+ pci_set_master(psPVRPCI->psPCIDev); -+ -+ /* Restore the PCI resource tracking array */ -+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) -+ { -+ if (psPVRPCI->abPCIResourceInUse[i]) -+ { -+ err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME); -+ if (err != 0) -+ { -+ printk(KERN_ERR "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err); -+ } -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIGetVendorDeviceIDs -+@Description Retrieve PCI vendor ID and device ID. -+@Input hPVRPCI PCI device handle -+@Output pui16VendorID Vendor ID -+@Output pui16DeviceID Device ID -+@Return PVRSRV_ERROR Services error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, -+ IMG_UINT16 *pui16VendorID, -+ IMG_UINT16 *pui16DeviceID) -+{ -+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; -+ struct pci_dev *psPCIDev; -+ -+ if (psPVRPCI == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psPCIDev = psPVRPCI->psPCIDev; -+ if (psPCIDev == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ *pui16VendorID = psPCIDev->vendor; -+ *pui16DeviceID = psPCIDev->device; -+ -+ return PVRSRV_OK; -+} -+ -+#if defined(CONFIG_MTRR) -+ -+/*************************************************************************/ /*! -+@Function OSPCIClearResourceMTRRs -+@Description Clear any BIOS-configured MTRRs for a PCI memory region -+@Input hPVRPCI PCI device handle -+@Input ui32Index Address range index -+@Return PVRSRV_ERROR Services error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) -+{ -+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; -+ resource_size_t start, end; -+ int res; -+ -+ start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); -+ end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) -+ res = arch_io_reserve_memtype_wc(start, end - start); -+ if (res) -+ { -+ return PVRSRV_ERROR_PCI_CALL_FAILED; -+ } -+#endif -+ res = arch_phys_wc_add(start, end - start); -+ if (res < 0) -+ { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) -+ arch_io_free_memtype_wc(start, end - start); -+#endif -+ -+ return PVRSRV_ERROR_PCI_CALL_FAILED; -+ } -+ psPVRPCI->iMTRR[ui32Index] = res; -+#else -+ -+ res = mtrr_add(start, end - start, MTRR_TYPE_UNCACHABLE, 0); -+ if (res < 0) -+ { -+ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); -+ return PVRSRV_ERROR_PCI_CALL_FAILED; -+ } -+ -+ res = mtrr_del(res, start, end - start); -+ if (res < 0) -+ { -+ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res); -+ return PVRSRV_ERROR_PCI_CALL_FAILED; -+ } -+ -+ /* Workaround for overlapping MTRRs. */ -+ { -+ IMG_BOOL bGotMTRR0 = IMG_FALSE; -+ -+ /* Current mobo BIOSes will normally set up a WRBACK MTRR spanning -+ * 0->4GB, and then another 4GB->6GB. If the PCI card's automatic & -+ * overlapping UNCACHABLE MTRR is deleted, we see WRBACK behaviour. -+ * -+ * WRBACK is incompatible with some PCI devices, so try to split -+ * the UNCACHABLE regions up and insert a WRCOMB region instead. -+ */ -+ res = mtrr_add(start, end - start, MTRR_TYPE_WRBACK, 0); -+ if (res < 0) -+ { -+ /* If this fails, services has probably run before and created -+ * a write-combined MTRR for the test chip. Assume it has, and -+ * don't return an error here. -+ */ -+ return PVRSRV_OK; -+ } -+ -+ if (res == 0) -+ bGotMTRR0 = IMG_TRUE; -+ -+ res = mtrr_del(res, start, end - start); -+ if (res < 0) -+ { -+ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res); -+ return PVRSRV_ERROR_PCI_CALL_FAILED; -+ } -+ -+ if (bGotMTRR0) -+ { -+ /* Replace 0 with a non-overlapping WRBACK MTRR */ -+ res = mtrr_add(0, start, MTRR_TYPE_WRBACK, 0); -+ if (res < 0) -+ { -+ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); -+ return PVRSRV_ERROR_PCI_CALL_FAILED; -+ } -+ -+ /* Add a WRCOMB MTRR for the PCI device memory bar */ -+ res = mtrr_add(start, end - start, MTRR_TYPE_WRCOMB, 0); -+ if (res < 0) -+ { -+ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); -+ return PVRSRV_ERROR_PCI_CALL_FAILED; -+ } -+ } -+ } -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function OSPCIReleaseResourceMTRRs -+@Description Release resources allocated by OSPCIClearResourceMTRRs -+@Input hPVRPCI PCI device handle -+@Input ui32Index Address range index -+*/ /**************************************************************************/ -+void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -+ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; -+ -+ if (psPVRPCI->iMTRR[ui32Index] >= 0) -+ { -+ arch_phys_wc_del(psPVRPCI->iMTRR[ui32Index]); -+ psPVRPCI->iMTRR[ui32Index] = -1; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) -+ { -+ resource_size_t start, end; -+ -+ start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); -+ end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1; -+ -+ arch_io_free_memtype_wc(start, end - start); -+ } -+#endif -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(hPVRPCI); -+ PVR_UNREFERENCED_PARAMETER(ui32Index); -+#endif -+} -+#endif /* defined(CONFIG_MTRR) */ -diff --git a/drivers/gpu/drm/img-rogue/pci_support.h b/drivers/gpu/drm/img-rogue/pci_support.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pci_support.h -@@ -0,0 +1,99 @@ -+/*************************************************************************/ /*! -+@File -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PCI_SUPPORT_H -+#define PCI_SUPPORT_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+#if defined(__linux__) -+#include -+#define TO_PCI_COOKIE(dev) to_pci_dev((struct device *)(dev)) -+#else -+#define TO_PCI_COOKIE(dev) (dev) -+#endif -+ -+typedef enum _HOST_PCI_INIT_FLAGS_ -+{ -+ HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001, -+ HOST_PCI_INIT_FLAG_MSI = 0x00000002, -+ HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff -+} HOST_PCI_INIT_FLAGS; -+ -+struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_; -+typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE; -+ -+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags); -+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags); -+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); -+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ); -+IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); -+IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); -+IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); -+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); -+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); -+PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength); -+PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength); -+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); -+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); -+PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16VendorID, IMG_UINT16 *pui16DeviceID); -+ -+#if defined(CONFIG_MTRR) -+PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); -+void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); -+#else -+static inline PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) -+{ -+ PVR_UNREFERENCED_PARAMETER(hPVRPCI); -+ PVR_UNREFERENCED_PARAMETER(ui32Index); -+ return PVRSRV_OK; -+} -+ -+static inline void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) -+{ -+ PVR_UNREFERENCED_PARAMETER(hPVRPCI); -+ PVR_UNREFERENCED_PARAMETER(ui32Index); -+} -+#endif -+ -+#endif /* PCI_SUPPORT_H */ -diff --git a/drivers/gpu/drm/img-rogue/pdp/drm_pdp.mk b/drivers/gpu/drm/img-rogue/pdp/drm_pdp.mk -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pdp/drm_pdp.mk -@@ -0,0 +1,13 @@ -+drm_pdp-y += \ -+ ../apollo/drm_pdp_crtc.o \ -+ ../apollo/drm_pdp_debugfs.o \ -+ ../apollo/drm_pdp_drv.o \ -+ ../apollo/drm_pdp_dvi.o \ -+ ../apollo/drm_pdp_fb.o \ -+ ../apollo/drm_pdp_gem.o \ -+ ../apollo/drm_pdp_modeset.o \ -+ ../apollo/drm_pdp_plane.o \ -+ ../apollo/drm_pdp_tmds.o \ -+ ../apollo/pdp_apollo.o \ -+ ../apollo/pdp_odin.o \ -+ ../apollo/pdp_plato.o -diff --git a/drivers/gpu/drm/img-rogue/pdp2_mmu_regs.h b/drivers/gpu/drm/img-rogue/pdp2_mmu_regs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pdp2_mmu_regs.h -@@ -0,0 +1,764 @@ -+/*************************************************************************/ /*! -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+ -+#ifndef _PDP2_MMU_REGS_H -+#define _PDP2_MMU_REGS_H -+ -+/* Hardware register definitions */ -+ -+#define PDP_BIF_DIR_BASE_ADDR_OFFSET (0x0020) -+#define PDP_BIF_DIR_BASE_ADDR_STRIDE (4) -+#define PDP_BIF_DIR_BASE_ADDR_NO_ENTRIES (4) -+ -+/* PDP_BIF, DIR_BASE_ADDR, MMU_DIR_BASE_ADDR -+Base address in physical memory for MMU Directory n Entries. When MMU_ENABLE_EXT_ADDRESSING is '1', the bits 31:0 are assigned to the address 31+EXT_ADDR_RANGE:0+EXT_ADDR_RANGE, but then any address offset within a page is forced to 0. When MMU_ENABLE_EXT_ADDRESSING is '0', bits 31:12 are assigned to address 31:12 -+*/ -+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_MASK (0xFFFFFFFF) -+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_SHIFT (0) -+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_LENGTH (32) -+#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_TILE_CFG_OFFSET (0x0040) -+#define PDP_BIF_TILE_CFG_STRIDE (4) -+#define PDP_BIF_TILE_CFG_NO_ENTRIES (4) -+ -+/* PDP_BIF, TILE_CFG, TILE_128INTERLEAVE -+*/ -+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_MASK (0x00000010) -+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_LSBMASK (0x00000001) -+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_SHIFT (4) -+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_LENGTH (1) -+#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, TILE_CFG, TILE_ENABLE -+*/ -+#define PDP_BIF_TILE_CFG_TILE_ENABLE_MASK (0x00000008) -+#define PDP_BIF_TILE_CFG_TILE_ENABLE_LSBMASK (0x00000001) -+#define PDP_BIF_TILE_CFG_TILE_ENABLE_SHIFT (3) -+#define PDP_BIF_TILE_CFG_TILE_ENABLE_LENGTH (1) -+#define PDP_BIF_TILE_CFG_TILE_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, TILE_CFG, TILE_STRIDE -+*/ -+#define PDP_BIF_TILE_CFG_TILE_STRIDE_MASK (0x00000007) -+#define PDP_BIF_TILE_CFG_TILE_STRIDE_LSBMASK (0x00000007) -+#define PDP_BIF_TILE_CFG_TILE_STRIDE_SHIFT (0) -+#define PDP_BIF_TILE_CFG_TILE_STRIDE_LENGTH (3) -+#define PDP_BIF_TILE_CFG_TILE_STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_TILE_MIN_ADDR_OFFSET (0x0050) -+#define PDP_BIF_TILE_MIN_ADDR_STRIDE (4) -+#define PDP_BIF_TILE_MIN_ADDR_NO_ENTRIES (4) -+ -+/* PDP_BIF, TILE_MIN_ADDR, TILE_MIN_ADDR -+*/ -+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_MASK (0xFFFFFFFF) -+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_SHIFT (0) -+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_LENGTH (32) -+#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_TILE_MAX_ADDR_OFFSET (0x0060) -+#define PDP_BIF_TILE_MAX_ADDR_STRIDE (4) -+#define PDP_BIF_TILE_MAX_ADDR_NO_ENTRIES (4) -+ -+/* PDP_BIF, TILE_MAX_ADDR, TILE_MAX_ADDR -+*/ -+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_MASK (0xFFFFFFFF) -+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_SHIFT (0) -+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_LENGTH (32) -+#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_CONTROL0_OFFSET (0x0000) -+ -+/* PDP_BIF, CONTROL0, MMU_TILING_SCHEME -+*/ -+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_MASK (0x00000001) -+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_SHIFT (0) -+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_LENGTH (1) -+#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL0, MMU_CACHE_POLICY -+*/ -+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_MASK (0x00000100) -+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_SHIFT (8) -+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_LENGTH (1) -+#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL0, FORCE_CACHE_POLICY_BYPASS -+*/ -+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_MASK (0x00000200) -+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_SHIFT (9) -+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_LENGTH (1) -+#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL0, STALL_ON_PROTOCOL_FAULT -+*/ -+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_MASK (0x00001000) -+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_SHIFT (12) -+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_LENGTH (1) -+#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_CONTROL1_OFFSET (0x0008) -+ -+/* PDP_BIF, CONTROL1, MMU_FLUSH0 -+*/ -+#define PDP_BIF_CONTROL1_MMU_FLUSH0_MASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_FLUSH0_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_FLUSH0_SHIFT (0) -+#define PDP_BIF_CONTROL1_MMU_FLUSH0_LENGTH (1) -+#define PDP_BIF_CONTROL1_MMU_FLUSH0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL1, MMU_FLUSH1 -+*/ -+#define PDP_BIF_CONTROL1_MMU_FLUSH1_MASK (0x00000002) -+#define PDP_BIF_CONTROL1_MMU_FLUSH1_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_FLUSH1_SHIFT (1) -+#define PDP_BIF_CONTROL1_MMU_FLUSH1_LENGTH (1) -+#define PDP_BIF_CONTROL1_MMU_FLUSH1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL1, MMU_FLUSH2 -+*/ -+#define PDP_BIF_CONTROL1_MMU_FLUSH2_MASK (0x00000004) -+#define PDP_BIF_CONTROL1_MMU_FLUSH2_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_FLUSH2_SHIFT (2) -+#define PDP_BIF_CONTROL1_MMU_FLUSH2_LENGTH (1) -+#define PDP_BIF_CONTROL1_MMU_FLUSH2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL1, MMU_FLUSH3 -+*/ -+#define PDP_BIF_CONTROL1_MMU_FLUSH3_MASK (0x00000008) -+#define PDP_BIF_CONTROL1_MMU_FLUSH3_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_FLUSH3_SHIFT (3) -+#define PDP_BIF_CONTROL1_MMU_FLUSH3_LENGTH (1) -+#define PDP_BIF_CONTROL1_MMU_FLUSH3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL1, MMU_INVALDC0 -+*/ -+#define PDP_BIF_CONTROL1_MMU_INVALDC0_MASK (0x00000100) -+#define PDP_BIF_CONTROL1_MMU_INVALDC0_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_INVALDC0_SHIFT (8) -+#define PDP_BIF_CONTROL1_MMU_INVALDC0_LENGTH (1) -+#define PDP_BIF_CONTROL1_MMU_INVALDC0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL1, MMU_INVALDC1 -+*/ -+#define PDP_BIF_CONTROL1_MMU_INVALDC1_MASK (0x00000200) -+#define PDP_BIF_CONTROL1_MMU_INVALDC1_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_INVALDC1_SHIFT (9) -+#define PDP_BIF_CONTROL1_MMU_INVALDC1_LENGTH (1) -+#define PDP_BIF_CONTROL1_MMU_INVALDC1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL1, MMU_INVALDC2 -+*/ -+#define PDP_BIF_CONTROL1_MMU_INVALDC2_MASK (0x00000400) -+#define PDP_BIF_CONTROL1_MMU_INVALDC2_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_INVALDC2_SHIFT (10) -+#define PDP_BIF_CONTROL1_MMU_INVALDC2_LENGTH (1) -+#define PDP_BIF_CONTROL1_MMU_INVALDC2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL1, MMU_INVALDC3 -+*/ -+#define PDP_BIF_CONTROL1_MMU_INVALDC3_MASK (0x00000800) -+#define PDP_BIF_CONTROL1_MMU_INVALDC3_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_INVALDC3_SHIFT (11) -+#define PDP_BIF_CONTROL1_MMU_INVALDC3_LENGTH (1) -+#define PDP_BIF_CONTROL1_MMU_INVALDC3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL1, MMU_FAULT_CLEAR -+*/ -+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_MASK (0x00010000) -+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_SHIFT (16) -+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_LENGTH (1) -+#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL1, PROTOCOL_FAULT_CLEAR -+*/ -+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_MASK (0x00100000) -+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_SHIFT (20) -+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_LENGTH (1) -+#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL1, MMU_PAUSE_SET -+*/ -+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_MASK (0x01000000) -+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_SHIFT (24) -+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_LENGTH (1) -+#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL1, MMU_PAUSE_CLEAR -+*/ -+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_MASK (0x02000000) -+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_SHIFT (25) -+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_LENGTH (1) -+#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONTROL1, MMU_SOFT_RESET -+*/ -+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_MASK (0x10000000) -+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_LSBMASK (0x00000001) -+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_SHIFT (28) -+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_LENGTH (1) -+#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_BANK_INDEX_OFFSET (0x0010) -+ -+/* PDP_BIF, BANK_INDEX, MMU_BANK_INDEX -+*/ -+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_MASK (0xC0000000) -+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_LSBMASK (0x00000003) -+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SHIFT (30) -+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_LENGTH (2) -+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SIGNED_FIELD IMG_FALSE -+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_NO_REPS (16) -+#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SIZE (2) -+ -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_OFFSET (0x0018) -+ -+/* PDP_BIF, REQUEST_PRIORITY_ENABLE, CMD_PRIORITY_ENABLE -+*/ -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_MASK (0x00008000) -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_LSBMASK (0x00000001) -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SHIFT (15) -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_LENGTH (1) -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SIGNED_FIELD IMG_FALSE -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_NO_REPS (16) -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SIZE (1) -+ -+/* PDP_BIF, REQUEST_PRIORITY_ENABLE, CMD_MMU_PRIORITY_ENABLE -+*/ -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_MASK (0x00010000) -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_LSBMASK (0x00000001) -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_SHIFT (16) -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_LENGTH (1) -+#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_OFFSET (0x001C) -+ -+/* PDP_BIF, REQUEST_LIMITED_THROUGHPUT, LIMITED_WORDS -+*/ -+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_MASK (0x000003FF) -+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_LSBMASK (0x000003FF) -+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_SHIFT (0) -+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_LENGTH (10) -+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, REQUEST_LIMITED_THROUGHPUT, REQUEST_GAP -+*/ -+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_MASK (0x0FFF0000) -+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_LSBMASK (0x00000FFF) -+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_SHIFT (16) -+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_LENGTH (12) -+#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_ADDRESS_CONTROL_OFFSET (0x0070) -+ -+/* PDP_BIF, ADDRESS_CONTROL, MMU_BYPASS -+*/ -+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_MASK (0x00000001) -+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_LSBMASK (0x00000001) -+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SHIFT (0) -+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_LENGTH (1) -+#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, ADDRESS_CONTROL, MMU_ENABLE_EXT_ADDRESSING -+*/ -+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_MASK (0x00000010) -+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_LSBMASK (0x00000001) -+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SHIFT (4) -+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_LENGTH (1) -+#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, ADDRESS_CONTROL, UPPER_ADDRESS_FIXED -+*/ -+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_MASK (0x00FF0000) -+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_LSBMASK (0x000000FF) -+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SHIFT (16) -+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_LENGTH (8) -+#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_CONFIG0_OFFSET (0x0080) -+ -+/* PDP_BIF, CONFIG0, NUM_REQUESTORS -+*/ -+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_MASK (0x0000000F) -+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_LSBMASK (0x0000000F) -+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_SHIFT (0) -+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_LENGTH (4) -+#define PDP_BIF_CONFIG0_NUM_REQUESTORS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG0, EXTENDED_ADDR_RANGE -+*/ -+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_MASK (0x000000F0) -+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_LSBMASK (0x0000000F) -+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_SHIFT (4) -+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_LENGTH (4) -+#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG0, GROUP_OVERRIDE_SIZE -+*/ -+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_MASK (0x00000700) -+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_LSBMASK (0x00000007) -+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_SHIFT (8) -+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_LENGTH (3) -+#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG0, ADDR_COHERENCY_SUPPORTED -+*/ -+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_MASK (0x00001000) -+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_LSBMASK (0x00000001) -+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_SHIFT (12) -+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_LENGTH (1) -+#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG0, MMU_SUPPORTED -+*/ -+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_MASK (0x00002000) -+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_LSBMASK (0x00000001) -+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_SHIFT (13) -+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_LENGTH (1) -+#define PDP_BIF_CONFIG0_MMU_SUPPORTED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG0, TILE_ADDR_GRANULARITY -+*/ -+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_MASK (0x001F0000) -+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_LSBMASK (0x0000001F) -+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_SHIFT (16) -+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_LENGTH (5) -+#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG0, NO_READ_REORDER -+*/ -+#define PDP_BIF_CONFIG0_NO_READ_REORDER_MASK (0x00200000) -+#define PDP_BIF_CONFIG0_NO_READ_REORDER_LSBMASK (0x00000001) -+#define PDP_BIF_CONFIG0_NO_READ_REORDER_SHIFT (21) -+#define PDP_BIF_CONFIG0_NO_READ_REORDER_LENGTH (1) -+#define PDP_BIF_CONFIG0_NO_READ_REORDER_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG0, TAGS_SUPPORTED -+*/ -+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_MASK (0xFFC00000) -+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_LSBMASK (0x000003FF) -+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_SHIFT (22) -+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_LENGTH (10) -+#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_CONFIG1_OFFSET (0x0084) -+ -+/* PDP_BIF, CONFIG1, PAGE_SIZE -+*/ -+#define PDP_BIF_CONFIG1_PAGE_SIZE_MASK (0x0000000F) -+#define PDP_BIF_CONFIG1_PAGE_SIZE_LSBMASK (0x0000000F) -+#define PDP_BIF_CONFIG1_PAGE_SIZE_SHIFT (0) -+#define PDP_BIF_CONFIG1_PAGE_SIZE_LENGTH (4) -+#define PDP_BIF_CONFIG1_PAGE_SIZE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG1, PAGE_CACHE_ENTRIES -+*/ -+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_MASK (0x0000FF00) -+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_LSBMASK (0x000000FF) -+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_SHIFT (8) -+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_LENGTH (8) -+#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG1, DIR_CACHE_ENTRIES -+*/ -+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_MASK (0x001F0000) -+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_LSBMASK (0x0000001F) -+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_SHIFT (16) -+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_LENGTH (5) -+#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG1, BANDWIDTH_COUNT_SUPPORTED -+*/ -+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_MASK (0x01000000) -+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_LSBMASK (0x00000001) -+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_SHIFT (24) -+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_LENGTH (1) -+#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG1, STALL_COUNT_SUPPORTED -+*/ -+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_MASK (0x02000000) -+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_LSBMASK (0x00000001) -+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_SHIFT (25) -+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_LENGTH (1) -+#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG1, LATENCY_COUNT_SUPPORTED -+*/ -+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_MASK (0x04000000) -+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_LSBMASK (0x00000001) -+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_SHIFT (26) -+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_LENGTH (1) -+#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, CONFIG1, SUPPORT_READ_INTERLEAVE -+*/ -+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_MASK (0x10000000) -+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_LSBMASK (0x00000001) -+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_SHIFT (28) -+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_LENGTH (1) -+#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_STATUS0_OFFSET (0x0088) -+ -+/* PDP_BIF, STATUS0, MMU_PF_N_RW -+*/ -+#define PDP_BIF_STATUS0_MMU_PF_N_RW_MASK (0x00000001) -+#define PDP_BIF_STATUS0_MMU_PF_N_RW_LSBMASK (0x00000001) -+#define PDP_BIF_STATUS0_MMU_PF_N_RW_SHIFT (0) -+#define PDP_BIF_STATUS0_MMU_PF_N_RW_LENGTH (1) -+#define PDP_BIF_STATUS0_MMU_PF_N_RW_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, STATUS0, MMU_FAULT_ADDR -+*/ -+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_MASK (0xFFFFF000) -+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_LSBMASK (0x000FFFFF) -+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_SHIFT (12) -+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_LENGTH (20) -+#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_STATUS1_OFFSET (0x008C) -+ -+/* PDP_BIF, STATUS1, MMU_FAULT_REQ_STAT -+*/ -+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_MASK (0x0000FFFF) -+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_LSBMASK (0x0000FFFF) -+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_SHIFT (0) -+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_LENGTH (16) -+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, STATUS1, MMU_FAULT_REQ_ID -+*/ -+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_MASK (0x000F0000) -+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_LSBMASK (0x0000000F) -+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_SHIFT (16) -+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_LENGTH (4) -+#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, STATUS1, MMU_FAULT_INDEX -+*/ -+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_MASK (0x03000000) -+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_LSBMASK (0x00000003) -+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_SHIFT (24) -+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_LENGTH (2) -+#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, STATUS1, MMU_FAULT_RNW -+*/ -+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_MASK (0x10000000) -+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_LSBMASK (0x00000001) -+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_SHIFT (28) -+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_LENGTH (1) -+#define PDP_BIF_STATUS1_MMU_FAULT_RNW_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_MEM_REQ_OFFSET (0x0090) -+ -+/* PDP_BIF, MEM_REQ, TAG_OUTSTANDING -+*/ -+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_MASK (0x000003FF) -+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_LSBMASK (0x000003FF) -+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_SHIFT (0) -+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_LENGTH (10) -+#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, MEM_REQ, EXT_WRRESP_FAULT -+*/ -+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_MASK (0x00001000) -+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_LSBMASK (0x00000001) -+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_SHIFT (12) -+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_LENGTH (1) -+#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, MEM_REQ, EXT_RDRESP_FAULT -+*/ -+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_MASK (0x00002000) -+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_LSBMASK (0x00000001) -+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_SHIFT (13) -+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_LENGTH (1) -+#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, MEM_REQ, EXT_READ_BURST_FAULT -+*/ -+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_MASK (0x00004000) -+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_LSBMASK (0x00000001) -+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_SHIFT (14) -+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_LENGTH (1) -+#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, MEM_REQ, INT_PROTOCOL_FAULT -+*/ -+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_MASK (0x80000000) -+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_LSBMASK (0x00000001) -+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SHIFT (31) -+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_LENGTH (1) -+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SIGNED_FIELD IMG_FALSE -+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_NO_REPS (16) -+#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SIZE (1) -+ -+#define PDP_BIF_MEM_EXT_OUTSTANDING_OFFSET (0x0094) -+ -+/* PDP_BIF, MEM_EXT_OUTSTANDING, READ_WORDS_OUTSTANDING -+*/ -+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_MASK (0x0000FFFF) -+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_LSBMASK (0x0000FFFF) -+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_SHIFT (0) -+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_LENGTH (16) -+#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_FAULT_SELECT_OFFSET (0x00A0) -+ -+/* PDP_BIF, FAULT_SELECT, MMU_FAULT_SELECT -+*/ -+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_MASK (0x0000000F) -+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_LSBMASK (0x0000000F) -+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_SHIFT (0) -+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_LENGTH (4) -+#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_PROTOCOL_FAULT_OFFSET (0x00A8) -+ -+/* PDP_BIF, PROTOCOL_FAULT, FAULT_PAGE_BREAK -+*/ -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_MASK (0x00000001) -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_LSBMASK (0x00000001) -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_SHIFT (0) -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_LENGTH (1) -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, PROTOCOL_FAULT, FAULT_WRITE -+*/ -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_MASK (0x00000010) -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_LSBMASK (0x00000001) -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_SHIFT (4) -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_LENGTH (1) -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, PROTOCOL_FAULT, FAULT_READ -+*/ -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_MASK (0x00000020) -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_LSBMASK (0x00000001) -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_SHIFT (5) -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_LENGTH (1) -+#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_TOTAL_READ_REQ_OFFSET (0x0100) -+ -+/* PDP_BIF, TOTAL_READ_REQ, TOTAL_READ_REQ -+*/ -+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_MASK (0xFFFFFFFF) -+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_SHIFT (0) -+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_LENGTH (32) -+#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_TOTAL_WRITE_REQ_OFFSET (0x0104) -+ -+/* PDP_BIF, TOTAL_WRITE_REQ, TOTAL_WRITE_REQ -+*/ -+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_MASK (0xFFFFFFFF) -+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_SHIFT (0) -+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_LENGTH (32) -+#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_READS_LESS_64_REQ_OFFSET (0x0108) -+ -+/* PDP_BIF, READS_LESS_64_REQ, READS_LESS_64_REQ -+*/ -+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_MASK (0xFFFFFFFF) -+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_SHIFT (0) -+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_LENGTH (32) -+#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_WRITES_LESS_64_REQ_OFFSET (0x010C) -+ -+/* PDP_BIF, WRITES_LESS_64_REQ, WRITES_LESS_64_REQ -+*/ -+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_MASK (0xFFFFFFFF) -+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_SHIFT (0) -+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_LENGTH (32) -+#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_EXT_CMD_STALL_OFFSET (0x0120) -+ -+/* PDP_BIF, EXT_CMD_STALL, EXT_CMD_STALL -+*/ -+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_MASK (0xFFFFFFFF) -+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_SHIFT (0) -+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_LENGTH (32) -+#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_WRITE_REQ_STALL_OFFSET (0x0124) -+ -+/* PDP_BIF, WRITE_REQ_STALL, WRITE_REQ_STALL -+*/ -+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_MASK (0xFFFFFFFF) -+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_SHIFT (0) -+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_LENGTH (32) -+#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_MISS_STALL_OFFSET (0x0128) -+ -+/* PDP_BIF, MISS_STALL, MMU_MISS_STALL -+*/ -+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_MASK (0xFFFFFFFF) -+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_SHIFT (0) -+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_LENGTH (32) -+#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_ADDRESS_STALL_OFFSET (0x012C) -+ -+/* PDP_BIF, ADDRESS_STALL, ADDRESS_STALL -+*/ -+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_MASK (0xFFFFFFFF) -+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_SHIFT (0) -+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_LENGTH (32) -+#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_TAG_STALL_OFFSET (0x0130) -+ -+/* PDP_BIF, TAG_STALL, TAG_STALL -+*/ -+#define PDP_BIF_TAG_STALL_TAG_STALL_MASK (0xFFFFFFFF) -+#define PDP_BIF_TAG_STALL_TAG_STALL_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_TAG_STALL_TAG_STALL_SHIFT (0) -+#define PDP_BIF_TAG_STALL_TAG_STALL_LENGTH (32) -+#define PDP_BIF_TAG_STALL_TAG_STALL_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_PEAK_READ_OUTSTANDING_OFFSET (0x0140) -+ -+/* PDP_BIF, PEAK_READ_OUTSTANDING, PEAK_TAG_OUTSTANDING -+*/ -+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_MASK (0x000003FF) -+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_LSBMASK (0x000003FF) -+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_SHIFT (0) -+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_LENGTH (10) -+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, PEAK_READ_OUTSTANDING, PEAK_READ_LATENCY -+*/ -+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_MASK (0xFFFF0000) -+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_LSBMASK (0x0000FFFF) -+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_SHIFT (16) -+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_LENGTH (16) -+#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_AVERAGE_READ_LATENCY_OFFSET (0x0144) -+ -+/* PDP_BIF, AVERAGE_READ_LATENCY, AVERAGE_READ_LATENCY -+*/ -+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_MASK (0xFFFFFFFF) -+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_LSBMASK (0xFFFFFFFF) -+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_SHIFT (0) -+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_LENGTH (32) -+#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_STATISTICS_CONTROL_OFFSET (0x0160) -+ -+/* PDP_BIF, STATISTICS_CONTROL, BANDWIDTH_STATS_INIT -+*/ -+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_MASK (0x00000001) -+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_LSBMASK (0x00000001) -+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_SHIFT (0) -+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_LENGTH (1) -+#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, STATISTICS_CONTROL, STALL_STATS_INIT -+*/ -+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_MASK (0x00000002) -+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_LSBMASK (0x00000001) -+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_SHIFT (1) -+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_LENGTH (1) -+#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, STATISTICS_CONTROL, LATENCY_STATS_INIT -+*/ -+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_MASK (0x00000004) -+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_LSBMASK (0x00000001) -+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_SHIFT (2) -+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_LENGTH (1) -+#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BIF_VERSION_OFFSET (0x01D0) -+ -+/* PDP_BIF, VERSION, MMU_MAJOR_REV -+*/ -+#define PDP_BIF_VERSION_MMU_MAJOR_REV_MASK (0x00FF0000) -+#define PDP_BIF_VERSION_MMU_MAJOR_REV_LSBMASK (0x000000FF) -+#define PDP_BIF_VERSION_MMU_MAJOR_REV_SHIFT (16) -+#define PDP_BIF_VERSION_MMU_MAJOR_REV_LENGTH (8) -+#define PDP_BIF_VERSION_MMU_MAJOR_REV_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, VERSION, MMU_MINOR_REV -+*/ -+#define PDP_BIF_VERSION_MMU_MINOR_REV_MASK (0x0000FF00) -+#define PDP_BIF_VERSION_MMU_MINOR_REV_LSBMASK (0x000000FF) -+#define PDP_BIF_VERSION_MMU_MINOR_REV_SHIFT (8) -+#define PDP_BIF_VERSION_MMU_MINOR_REV_LENGTH (8) -+#define PDP_BIF_VERSION_MMU_MINOR_REV_SIGNED_FIELD IMG_FALSE -+ -+/* PDP_BIF, VERSION, MMU_MAINT_REV -+*/ -+#define PDP_BIF_VERSION_MMU_MAINT_REV_MASK (0x000000FF) -+#define PDP_BIF_VERSION_MMU_MAINT_REV_LSBMASK (0x000000FF) -+#define PDP_BIF_VERSION_MMU_MAINT_REV_SHIFT (0) -+#define PDP_BIF_VERSION_MMU_MAINT_REV_LENGTH (8) -+#define PDP_BIF_VERSION_MMU_MAINT_REV_SIGNED_FIELD IMG_FALSE -+ -+#endif /* _PDP2_MMU_REGS_H */ -diff --git a/drivers/gpu/drm/img-rogue/pdp2_regs.h b/drivers/gpu/drm/img-rogue/pdp2_regs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pdp2_regs.h -@@ -0,0 +1,8565 @@ -+/*************************************************************************/ /*! -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+ -+#ifndef _PDP2_REGS_H -+#define _PDP2_REGS_H -+ -+/* -+ * Bitfield operations -+ * For each argument field, the following preprocessor macros must exist -+ * field##_MASK - the number of bits in the bit field -+ * field##_SHIFT - offset from the first bit -+ */ -+#define PLACE_FIELD(field, val) \ -+ (((u32)(val) << (field##_SHIFT)) & (field##_MASK)) -+ -+#define ADJ_FIELD(x, field, val) \ -+ (((x) & ~(field##_MASK)) \ -+ | PLACE_FIELD(field, val)) -+ -+#define SET_FIELD(x, field, val) \ -+ (x) = ADJ_FIELD(x, field, val) -+ -+#define GET_FIELD(x, field) \ -+ (((x) & (field##_MASK)) >> (field##_SHIFT)) -+ -+/* Keeps most significant bits */ -+#define MOVE_FIELD(x, o1, l1, o2, l2) \ -+ (((x) >> ((o1) + (l1) - (l2))) << (o2)) -+ -+#define MAX_FIELD_VALUE(field) \ -+ ((field##_MASK) >> (field##_SHIFT)) -+ -+/* Hardware register definitions */ -+ -+#define PDP_GRPH1SURF_OFFSET (0x0000) -+ -+/* PDP, GRPH1SURF, GRPH1PIXFMT -+*/ -+#define PDP_GRPH1SURF_GRPH1PIXFMT_MASK (0xF8000000) -+#define PDP_GRPH1SURF_GRPH1PIXFMT_LSBMASK (0x0000001F) -+#define PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT (27) -+#define PDP_GRPH1SURF_GRPH1PIXFMT_LENGTH (5) -+#define PDP_GRPH1SURF_GRPH1PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1SURF, GRPH1USEGAMMA -+*/ -+#define PDP_GRPH1SURF_GRPH1USEGAMMA_MASK (0x04000000) -+#define PDP_GRPH1SURF_GRPH1USEGAMMA_LSBMASK (0x00000001) -+#define PDP_GRPH1SURF_GRPH1USEGAMMA_SHIFT (26) -+#define PDP_GRPH1SURF_GRPH1USEGAMMA_LENGTH (1) -+#define PDP_GRPH1SURF_GRPH1USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1SURF, GRPH1USECSC -+*/ -+#define PDP_GRPH1SURF_GRPH1USECSC_MASK (0x02000000) -+#define PDP_GRPH1SURF_GRPH1USECSC_LSBMASK (0x00000001) -+#define PDP_GRPH1SURF_GRPH1USECSC_SHIFT (25) -+#define PDP_GRPH1SURF_GRPH1USECSC_LENGTH (1) -+#define PDP_GRPH1SURF_GRPH1USECSC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1SURF, GRPH1LUTRWCHOICE -+*/ -+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_MASK (0x01000000) -+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LSBMASK (0x00000001) -+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SHIFT (24) -+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LENGTH (1) -+#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1SURF, GRPH1USELUT -+*/ -+#define PDP_GRPH1SURF_GRPH1USELUT_MASK (0x00800000) -+#define PDP_GRPH1SURF_GRPH1USELUT_LSBMASK (0x00000001) -+#define PDP_GRPH1SURF_GRPH1USELUT_SHIFT (23) -+#define PDP_GRPH1SURF_GRPH1USELUT_LENGTH (1) -+#define PDP_GRPH1SURF_GRPH1USELUT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2SURF_OFFSET (0x0004) -+ -+/* PDP, GRPH2SURF, GRPH2PIXFMT -+*/ -+#define PDP_GRPH2SURF_GRPH2PIXFMT_MASK (0xF8000000) -+#define PDP_GRPH2SURF_GRPH2PIXFMT_LSBMASK (0x0000001F) -+#define PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT (27) -+#define PDP_GRPH2SURF_GRPH2PIXFMT_LENGTH (5) -+#define PDP_GRPH2SURF_GRPH2PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2SURF, GRPH2USEGAMMA -+*/ -+#define PDP_GRPH2SURF_GRPH2USEGAMMA_MASK (0x04000000) -+#define PDP_GRPH2SURF_GRPH2USEGAMMA_LSBMASK (0x00000001) -+#define PDP_GRPH2SURF_GRPH2USEGAMMA_SHIFT (26) -+#define PDP_GRPH2SURF_GRPH2USEGAMMA_LENGTH (1) -+#define PDP_GRPH2SURF_GRPH2USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2SURF, GRPH2USECSC -+*/ -+#define PDP_GRPH2SURF_GRPH2USECSC_MASK (0x02000000) -+#define PDP_GRPH2SURF_GRPH2USECSC_LSBMASK (0x00000001) -+#define PDP_GRPH2SURF_GRPH2USECSC_SHIFT (25) -+#define PDP_GRPH2SURF_GRPH2USECSC_LENGTH (1) -+#define PDP_GRPH2SURF_GRPH2USECSC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2SURF, GRPH2LUTRWCHOICE -+*/ -+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_MASK (0x01000000) -+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LSBMASK (0x00000001) -+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SHIFT (24) -+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LENGTH (1) -+#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2SURF, GRPH2USELUT -+*/ -+#define PDP_GRPH2SURF_GRPH2USELUT_MASK (0x00800000) -+#define PDP_GRPH2SURF_GRPH2USELUT_LSBMASK (0x00000001) -+#define PDP_GRPH2SURF_GRPH2USELUT_SHIFT (23) -+#define PDP_GRPH2SURF_GRPH2USELUT_LENGTH (1) -+#define PDP_GRPH2SURF_GRPH2USELUT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3SURF_OFFSET (0x0008) -+ -+/* PDP, GRPH3SURF, GRPH3PIXFMT -+*/ -+#define PDP_GRPH3SURF_GRPH3PIXFMT_MASK (0xF8000000) -+#define PDP_GRPH3SURF_GRPH3PIXFMT_LSBMASK (0x0000001F) -+#define PDP_GRPH3SURF_GRPH3PIXFMT_SHIFT (27) -+#define PDP_GRPH3SURF_GRPH3PIXFMT_LENGTH (5) -+#define PDP_GRPH3SURF_GRPH3PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3SURF, GRPH3USEGAMMA -+*/ -+#define PDP_GRPH3SURF_GRPH3USEGAMMA_MASK (0x04000000) -+#define PDP_GRPH3SURF_GRPH3USEGAMMA_LSBMASK (0x00000001) -+#define PDP_GRPH3SURF_GRPH3USEGAMMA_SHIFT (26) -+#define PDP_GRPH3SURF_GRPH3USEGAMMA_LENGTH (1) -+#define PDP_GRPH3SURF_GRPH3USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3SURF, GRPH3USECSC -+*/ -+#define PDP_GRPH3SURF_GRPH3USECSC_MASK (0x02000000) -+#define PDP_GRPH3SURF_GRPH3USECSC_LSBMASK (0x00000001) -+#define PDP_GRPH3SURF_GRPH3USECSC_SHIFT (25) -+#define PDP_GRPH3SURF_GRPH3USECSC_LENGTH (1) -+#define PDP_GRPH3SURF_GRPH3USECSC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3SURF, GRPH3LUTRWCHOICE -+*/ -+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_MASK (0x01000000) -+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LSBMASK (0x00000001) -+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SHIFT (24) -+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LENGTH (1) -+#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3SURF, GRPH3USELUT -+*/ -+#define PDP_GRPH3SURF_GRPH3USELUT_MASK (0x00800000) -+#define PDP_GRPH3SURF_GRPH3USELUT_LSBMASK (0x00000001) -+#define PDP_GRPH3SURF_GRPH3USELUT_SHIFT (23) -+#define PDP_GRPH3SURF_GRPH3USELUT_LENGTH (1) -+#define PDP_GRPH3SURF_GRPH3USELUT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4SURF_OFFSET (0x000C) -+ -+/* PDP, GRPH4SURF, GRPH4PIXFMT -+*/ -+#define PDP_GRPH4SURF_GRPH4PIXFMT_MASK (0xF8000000) -+#define PDP_GRPH4SURF_GRPH4PIXFMT_LSBMASK (0x0000001F) -+#define PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT (27) -+#define PDP_GRPH4SURF_GRPH4PIXFMT_LENGTH (5) -+#define PDP_GRPH4SURF_GRPH4PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4SURF, GRPH4USEGAMMA -+*/ -+#define PDP_GRPH4SURF_GRPH4USEGAMMA_MASK (0x04000000) -+#define PDP_GRPH4SURF_GRPH4USEGAMMA_LSBMASK (0x00000001) -+#define PDP_GRPH4SURF_GRPH4USEGAMMA_SHIFT (26) -+#define PDP_GRPH4SURF_GRPH4USEGAMMA_LENGTH (1) -+#define PDP_GRPH4SURF_GRPH4USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4SURF, GRPH4USECSC -+*/ -+#define PDP_GRPH4SURF_GRPH4USECSC_MASK (0x02000000) -+#define PDP_GRPH4SURF_GRPH4USECSC_LSBMASK (0x00000001) -+#define PDP_GRPH4SURF_GRPH4USECSC_SHIFT (25) -+#define PDP_GRPH4SURF_GRPH4USECSC_LENGTH (1) -+#define PDP_GRPH4SURF_GRPH4USECSC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4SURF, GRPH4LUTRWCHOICE -+*/ -+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_MASK (0x01000000) -+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LSBMASK (0x00000001) -+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SHIFT (24) -+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LENGTH (1) -+#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4SURF, GRPH4USELUT -+*/ -+#define PDP_GRPH4SURF_GRPH4USELUT_MASK (0x00800000) -+#define PDP_GRPH4SURF_GRPH4USELUT_LSBMASK (0x00000001) -+#define PDP_GRPH4SURF_GRPH4USELUT_SHIFT (23) -+#define PDP_GRPH4SURF_GRPH4USELUT_LENGTH (1) -+#define PDP_GRPH4SURF_GRPH4USELUT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1SURF_OFFSET (0x0010) -+ -+/* PDP, VID1SURF, VID1PIXFMT -+*/ -+#define PDP_VID1SURF_VID1PIXFMT_MASK (0xF8000000) -+#define PDP_VID1SURF_VID1PIXFMT_LSBMASK (0x0000001F) -+#define PDP_VID1SURF_VID1PIXFMT_SHIFT (27) -+#define PDP_VID1SURF_VID1PIXFMT_LENGTH (5) -+#define PDP_VID1SURF_VID1PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SURF, VID1USEGAMMA -+*/ -+#define PDP_VID1SURF_VID1USEGAMMA_MASK (0x04000000) -+#define PDP_VID1SURF_VID1USEGAMMA_LSBMASK (0x00000001) -+#define PDP_VID1SURF_VID1USEGAMMA_SHIFT (26) -+#define PDP_VID1SURF_VID1USEGAMMA_LENGTH (1) -+#define PDP_VID1SURF_VID1USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SURF, VID1USECSC -+*/ -+#define PDP_VID1SURF_VID1USECSC_MASK (0x02000000) -+#define PDP_VID1SURF_VID1USECSC_LSBMASK (0x00000001) -+#define PDP_VID1SURF_VID1USECSC_SHIFT (25) -+#define PDP_VID1SURF_VID1USECSC_LENGTH (1) -+#define PDP_VID1SURF_VID1USECSC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SURF, VID1USEI2P -+*/ -+#define PDP_VID1SURF_VID1USEI2P_MASK (0x01000000) -+#define PDP_VID1SURF_VID1USEI2P_LSBMASK (0x00000001) -+#define PDP_VID1SURF_VID1USEI2P_SHIFT (24) -+#define PDP_VID1SURF_VID1USEI2P_LENGTH (1) -+#define PDP_VID1SURF_VID1USEI2P_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SURF, VID1COSITED -+*/ -+#define PDP_VID1SURF_VID1COSITED_MASK (0x00800000) -+#define PDP_VID1SURF_VID1COSITED_LSBMASK (0x00000001) -+#define PDP_VID1SURF_VID1COSITED_SHIFT (23) -+#define PDP_VID1SURF_VID1COSITED_LENGTH (1) -+#define PDP_VID1SURF_VID1COSITED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SURF, VID1USEHQCD -+*/ -+#define PDP_VID1SURF_VID1USEHQCD_MASK (0x00400000) -+#define PDP_VID1SURF_VID1USEHQCD_LSBMASK (0x00000001) -+#define PDP_VID1SURF_VID1USEHQCD_SHIFT (22) -+#define PDP_VID1SURF_VID1USEHQCD_LENGTH (1) -+#define PDP_VID1SURF_VID1USEHQCD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SURF, VID1USEINSTREAM -+*/ -+#define PDP_VID1SURF_VID1USEINSTREAM_MASK (0x00200000) -+#define PDP_VID1SURF_VID1USEINSTREAM_LSBMASK (0x00000001) -+#define PDP_VID1SURF_VID1USEINSTREAM_SHIFT (21) -+#define PDP_VID1SURF_VID1USEINSTREAM_LENGTH (1) -+#define PDP_VID1SURF_VID1USEINSTREAM_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2SURF_OFFSET (0x0014) -+ -+/* PDP, VID2SURF, VID2PIXFMT -+*/ -+#define PDP_VID2SURF_VID2PIXFMT_MASK (0xF8000000) -+#define PDP_VID2SURF_VID2PIXFMT_LSBMASK (0x0000001F) -+#define PDP_VID2SURF_VID2PIXFMT_SHIFT (27) -+#define PDP_VID2SURF_VID2PIXFMT_LENGTH (5) -+#define PDP_VID2SURF_VID2PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SURF, VID2COSITED -+*/ -+#define PDP_VID2SURF_VID2COSITED_MASK (0x00800000) -+#define PDP_VID2SURF_VID2COSITED_LSBMASK (0x00000001) -+#define PDP_VID2SURF_VID2COSITED_SHIFT (23) -+#define PDP_VID2SURF_VID2COSITED_LENGTH (1) -+#define PDP_VID2SURF_VID2COSITED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SURF, VID2USEGAMMA -+*/ -+#define PDP_VID2SURF_VID2USEGAMMA_MASK (0x04000000) -+#define PDP_VID2SURF_VID2USEGAMMA_LSBMASK (0x00000001) -+#define PDP_VID2SURF_VID2USEGAMMA_SHIFT (26) -+#define PDP_VID2SURF_VID2USEGAMMA_LENGTH (1) -+#define PDP_VID2SURF_VID2USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SURF, VID2USECSC -+*/ -+#define PDP_VID2SURF_VID2USECSC_MASK (0x02000000) -+#define PDP_VID2SURF_VID2USECSC_LSBMASK (0x00000001) -+#define PDP_VID2SURF_VID2USECSC_SHIFT (25) -+#define PDP_VID2SURF_VID2USECSC_LENGTH (1) -+#define PDP_VID2SURF_VID2USECSC_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3SURF_OFFSET (0x0018) -+ -+/* PDP, VID3SURF, VID3PIXFMT -+*/ -+#define PDP_VID3SURF_VID3PIXFMT_MASK (0xF8000000) -+#define PDP_VID3SURF_VID3PIXFMT_LSBMASK (0x0000001F) -+#define PDP_VID3SURF_VID3PIXFMT_SHIFT (27) -+#define PDP_VID3SURF_VID3PIXFMT_LENGTH (5) -+#define PDP_VID3SURF_VID3PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SURF, VID3COSITED -+*/ -+#define PDP_VID3SURF_VID3COSITED_MASK (0x00800000) -+#define PDP_VID3SURF_VID3COSITED_LSBMASK (0x00000001) -+#define PDP_VID3SURF_VID3COSITED_SHIFT (23) -+#define PDP_VID3SURF_VID3COSITED_LENGTH (1) -+#define PDP_VID3SURF_VID3COSITED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SURF, VID3USEGAMMA -+*/ -+#define PDP_VID3SURF_VID3USEGAMMA_MASK (0x04000000) -+#define PDP_VID3SURF_VID3USEGAMMA_LSBMASK (0x00000001) -+#define PDP_VID3SURF_VID3USEGAMMA_SHIFT (26) -+#define PDP_VID3SURF_VID3USEGAMMA_LENGTH (1) -+#define PDP_VID3SURF_VID3USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SURF, VID3USECSC -+*/ -+#define PDP_VID3SURF_VID3USECSC_MASK (0x02000000) -+#define PDP_VID3SURF_VID3USECSC_LSBMASK (0x00000001) -+#define PDP_VID3SURF_VID3USECSC_SHIFT (25) -+#define PDP_VID3SURF_VID3USECSC_LENGTH (1) -+#define PDP_VID3SURF_VID3USECSC_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4SURF_OFFSET (0x001C) -+ -+/* PDP, VID4SURF, VID4PIXFMT -+*/ -+#define PDP_VID4SURF_VID4PIXFMT_MASK (0xF8000000) -+#define PDP_VID4SURF_VID4PIXFMT_LSBMASK (0x0000001F) -+#define PDP_VID4SURF_VID4PIXFMT_SHIFT (27) -+#define PDP_VID4SURF_VID4PIXFMT_LENGTH (5) -+#define PDP_VID4SURF_VID4PIXFMT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SURF, VID4COSITED -+*/ -+#define PDP_VID4SURF_VID4COSITED_MASK (0x00800000) -+#define PDP_VID4SURF_VID4COSITED_LSBMASK (0x00000001) -+#define PDP_VID4SURF_VID4COSITED_SHIFT (23) -+#define PDP_VID4SURF_VID4COSITED_LENGTH (1) -+#define PDP_VID4SURF_VID4COSITED_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SURF, VID4USEGAMMA -+*/ -+#define PDP_VID4SURF_VID4USEGAMMA_MASK (0x04000000) -+#define PDP_VID4SURF_VID4USEGAMMA_LSBMASK (0x00000001) -+#define PDP_VID4SURF_VID4USEGAMMA_SHIFT (26) -+#define PDP_VID4SURF_VID4USEGAMMA_LENGTH (1) -+#define PDP_VID4SURF_VID4USEGAMMA_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SURF, VID4USECSC -+*/ -+#define PDP_VID4SURF_VID4USECSC_MASK (0x02000000) -+#define PDP_VID4SURF_VID4USECSC_LSBMASK (0x00000001) -+#define PDP_VID4SURF_VID4USECSC_SHIFT (25) -+#define PDP_VID4SURF_VID4USECSC_LENGTH (1) -+#define PDP_VID4SURF_VID4USECSC_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1CTRL_OFFSET (0x0020) -+ -+/* PDP, GRPH1CTRL, GRPH1STREN -+*/ -+#define PDP_GRPH1CTRL_GRPH1STREN_MASK (0x80000000) -+#define PDP_GRPH1CTRL_GRPH1STREN_LSBMASK (0x00000001) -+#define PDP_GRPH1CTRL_GRPH1STREN_SHIFT (31) -+#define PDP_GRPH1CTRL_GRPH1STREN_LENGTH (1) -+#define PDP_GRPH1CTRL_GRPH1STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1CTRL, GRPH1CKEYEN -+*/ -+#define PDP_GRPH1CTRL_GRPH1CKEYEN_MASK (0x40000000) -+#define PDP_GRPH1CTRL_GRPH1CKEYEN_LSBMASK (0x00000001) -+#define PDP_GRPH1CTRL_GRPH1CKEYEN_SHIFT (30) -+#define PDP_GRPH1CTRL_GRPH1CKEYEN_LENGTH (1) -+#define PDP_GRPH1CTRL_GRPH1CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1CTRL, GRPH1CKEYSRC -+*/ -+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_MASK (0x20000000) -+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_LSBMASK (0x00000001) -+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_SHIFT (29) -+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_LENGTH (1) -+#define PDP_GRPH1CTRL_GRPH1CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1CTRL, GRPH1BLEND -+*/ -+#define PDP_GRPH1CTRL_GRPH1BLEND_MASK (0x18000000) -+#define PDP_GRPH1CTRL_GRPH1BLEND_LSBMASK (0x00000003) -+#define PDP_GRPH1CTRL_GRPH1BLEND_SHIFT (27) -+#define PDP_GRPH1CTRL_GRPH1BLEND_LENGTH (2) -+#define PDP_GRPH1CTRL_GRPH1BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1CTRL, GRPH1BLENDPOS -+*/ -+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK (0x07000000) -+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_LSBMASK (0x00000007) -+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT (24) -+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_LENGTH (3) -+#define PDP_GRPH1CTRL_GRPH1BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1CTRL, GRPH1DITHEREN -+*/ -+#define PDP_GRPH1CTRL_GRPH1DITHEREN_MASK (0x00800000) -+#define PDP_GRPH1CTRL_GRPH1DITHEREN_LSBMASK (0x00000001) -+#define PDP_GRPH1CTRL_GRPH1DITHEREN_SHIFT (23) -+#define PDP_GRPH1CTRL_GRPH1DITHEREN_LENGTH (1) -+#define PDP_GRPH1CTRL_GRPH1DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2CTRL_OFFSET (0x0024) -+ -+/* PDP, GRPH2CTRL, GRPH2STREN -+*/ -+#define PDP_GRPH2CTRL_GRPH2STREN_MASK (0x80000000) -+#define PDP_GRPH2CTRL_GRPH2STREN_LSBMASK (0x00000001) -+#define PDP_GRPH2CTRL_GRPH2STREN_SHIFT (31) -+#define PDP_GRPH2CTRL_GRPH2STREN_LENGTH (1) -+#define PDP_GRPH2CTRL_GRPH2STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2CTRL, GRPH2CKEYEN -+*/ -+#define PDP_GRPH2CTRL_GRPH2CKEYEN_MASK (0x40000000) -+#define PDP_GRPH2CTRL_GRPH2CKEYEN_LSBMASK (0x00000001) -+#define PDP_GRPH2CTRL_GRPH2CKEYEN_SHIFT (30) -+#define PDP_GRPH2CTRL_GRPH2CKEYEN_LENGTH (1) -+#define PDP_GRPH2CTRL_GRPH2CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2CTRL, GRPH2CKEYSRC -+*/ -+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_MASK (0x20000000) -+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_LSBMASK (0x00000001) -+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_SHIFT (29) -+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_LENGTH (1) -+#define PDP_GRPH2CTRL_GRPH2CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2CTRL, GRPH2BLEND -+*/ -+#define PDP_GRPH2CTRL_GRPH2BLEND_MASK (0x18000000) -+#define PDP_GRPH2CTRL_GRPH2BLEND_LSBMASK (0x00000003) -+#define PDP_GRPH2CTRL_GRPH2BLEND_SHIFT (27) -+#define PDP_GRPH2CTRL_GRPH2BLEND_LENGTH (2) -+#define PDP_GRPH2CTRL_GRPH2BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2CTRL, GRPH2BLENDPOS -+*/ -+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK (0x07000000) -+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_LSBMASK (0x00000007) -+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT (24) -+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_LENGTH (3) -+#define PDP_GRPH2CTRL_GRPH2BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2CTRL, GRPH2DITHEREN -+*/ -+#define PDP_GRPH2CTRL_GRPH2DITHEREN_MASK (0x00800000) -+#define PDP_GRPH2CTRL_GRPH2DITHEREN_LSBMASK (0x00000001) -+#define PDP_GRPH2CTRL_GRPH2DITHEREN_SHIFT (23) -+#define PDP_GRPH2CTRL_GRPH2DITHEREN_LENGTH (1) -+#define PDP_GRPH2CTRL_GRPH2DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3CTRL_OFFSET (0x0028) -+ -+/* PDP, GRPH3CTRL, GRPH3STREN -+*/ -+#define PDP_GRPH3CTRL_GRPH3STREN_MASK (0x80000000) -+#define PDP_GRPH3CTRL_GRPH3STREN_LSBMASK (0x00000001) -+#define PDP_GRPH3CTRL_GRPH3STREN_SHIFT (31) -+#define PDP_GRPH3CTRL_GRPH3STREN_LENGTH (1) -+#define PDP_GRPH3CTRL_GRPH3STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3CTRL, GRPH3CKEYEN -+*/ -+#define PDP_GRPH3CTRL_GRPH3CKEYEN_MASK (0x40000000) -+#define PDP_GRPH3CTRL_GRPH3CKEYEN_LSBMASK (0x00000001) -+#define PDP_GRPH3CTRL_GRPH3CKEYEN_SHIFT (30) -+#define PDP_GRPH3CTRL_GRPH3CKEYEN_LENGTH (1) -+#define PDP_GRPH3CTRL_GRPH3CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3CTRL, GRPH3CKEYSRC -+*/ -+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_MASK (0x20000000) -+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_LSBMASK (0x00000001) -+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_SHIFT (29) -+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_LENGTH (1) -+#define PDP_GRPH3CTRL_GRPH3CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3CTRL, GRPH3BLEND -+*/ -+#define PDP_GRPH3CTRL_GRPH3BLEND_MASK (0x18000000) -+#define PDP_GRPH3CTRL_GRPH3BLEND_LSBMASK (0x00000003) -+#define PDP_GRPH3CTRL_GRPH3BLEND_SHIFT (27) -+#define PDP_GRPH3CTRL_GRPH3BLEND_LENGTH (2) -+#define PDP_GRPH3CTRL_GRPH3BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3CTRL, GRPH3BLENDPOS -+*/ -+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_MASK (0x07000000) -+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_LSBMASK (0x00000007) -+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_SHIFT (24) -+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_LENGTH (3) -+#define PDP_GRPH3CTRL_GRPH3BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3CTRL, GRPH3DITHEREN -+*/ -+#define PDP_GRPH3CTRL_GRPH3DITHEREN_MASK (0x00800000) -+#define PDP_GRPH3CTRL_GRPH3DITHEREN_LSBMASK (0x00000001) -+#define PDP_GRPH3CTRL_GRPH3DITHEREN_SHIFT (23) -+#define PDP_GRPH3CTRL_GRPH3DITHEREN_LENGTH (1) -+#define PDP_GRPH3CTRL_GRPH3DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4CTRL_OFFSET (0x002C) -+ -+/* PDP, GRPH4CTRL, GRPH4STREN -+*/ -+#define PDP_GRPH4CTRL_GRPH4STREN_MASK (0x80000000) -+#define PDP_GRPH4CTRL_GRPH4STREN_LSBMASK (0x00000001) -+#define PDP_GRPH4CTRL_GRPH4STREN_SHIFT (31) -+#define PDP_GRPH4CTRL_GRPH4STREN_LENGTH (1) -+#define PDP_GRPH4CTRL_GRPH4STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4CTRL, GRPH4CKEYEN -+*/ -+#define PDP_GRPH4CTRL_GRPH4CKEYEN_MASK (0x40000000) -+#define PDP_GRPH4CTRL_GRPH4CKEYEN_LSBMASK (0x00000001) -+#define PDP_GRPH4CTRL_GRPH4CKEYEN_SHIFT (30) -+#define PDP_GRPH4CTRL_GRPH4CKEYEN_LENGTH (1) -+#define PDP_GRPH4CTRL_GRPH4CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4CTRL, GRPH4CKEYSRC -+*/ -+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_MASK (0x20000000) -+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_LSBMASK (0x00000001) -+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_SHIFT (29) -+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_LENGTH (1) -+#define PDP_GRPH4CTRL_GRPH4CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4CTRL, GRPH4BLEND -+*/ -+#define PDP_GRPH4CTRL_GRPH4BLEND_MASK (0x18000000) -+#define PDP_GRPH4CTRL_GRPH4BLEND_LSBMASK (0x00000003) -+#define PDP_GRPH4CTRL_GRPH4BLEND_SHIFT (27) -+#define PDP_GRPH4CTRL_GRPH4BLEND_LENGTH (2) -+#define PDP_GRPH4CTRL_GRPH4BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4CTRL, GRPH4BLENDPOS -+*/ -+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK (0x07000000) -+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_LSBMASK (0x00000007) -+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT (24) -+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_LENGTH (3) -+#define PDP_GRPH4CTRL_GRPH4BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4CTRL, GRPH4DITHEREN -+*/ -+#define PDP_GRPH4CTRL_GRPH4DITHEREN_MASK (0x00800000) -+#define PDP_GRPH4CTRL_GRPH4DITHEREN_LSBMASK (0x00000001) -+#define PDP_GRPH4CTRL_GRPH4DITHEREN_SHIFT (23) -+#define PDP_GRPH4CTRL_GRPH4DITHEREN_LENGTH (1) -+#define PDP_GRPH4CTRL_GRPH4DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1CTRL_OFFSET (0x0030) -+ -+/* PDP, VID1CTRL, VID1STREN -+*/ -+#define PDP_VID1CTRL_VID1STREN_MASK (0x80000000) -+#define PDP_VID1CTRL_VID1STREN_LSBMASK (0x00000001) -+#define PDP_VID1CTRL_VID1STREN_SHIFT (31) -+#define PDP_VID1CTRL_VID1STREN_LENGTH (1) -+#define PDP_VID1CTRL_VID1STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1CTRL, VID1CKEYEN -+*/ -+#define PDP_VID1CTRL_VID1CKEYEN_MASK (0x40000000) -+#define PDP_VID1CTRL_VID1CKEYEN_LSBMASK (0x00000001) -+#define PDP_VID1CTRL_VID1CKEYEN_SHIFT (30) -+#define PDP_VID1CTRL_VID1CKEYEN_LENGTH (1) -+#define PDP_VID1CTRL_VID1CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1CTRL, VID1CKEYSRC -+*/ -+#define PDP_VID1CTRL_VID1CKEYSRC_MASK (0x20000000) -+#define PDP_VID1CTRL_VID1CKEYSRC_LSBMASK (0x00000001) -+#define PDP_VID1CTRL_VID1CKEYSRC_SHIFT (29) -+#define PDP_VID1CTRL_VID1CKEYSRC_LENGTH (1) -+#define PDP_VID1CTRL_VID1CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1CTRL, VID1BLEND -+*/ -+#define PDP_VID1CTRL_VID1BLEND_MASK (0x18000000) -+#define PDP_VID1CTRL_VID1BLEND_LSBMASK (0x00000003) -+#define PDP_VID1CTRL_VID1BLEND_SHIFT (27) -+#define PDP_VID1CTRL_VID1BLEND_LENGTH (2) -+#define PDP_VID1CTRL_VID1BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1CTRL, VID1BLENDPOS -+*/ -+#define PDP_VID1CTRL_VID1BLENDPOS_MASK (0x07000000) -+#define PDP_VID1CTRL_VID1BLENDPOS_LSBMASK (0x00000007) -+#define PDP_VID1CTRL_VID1BLENDPOS_SHIFT (24) -+#define PDP_VID1CTRL_VID1BLENDPOS_LENGTH (3) -+#define PDP_VID1CTRL_VID1BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1CTRL, VID1DITHEREN -+*/ -+#define PDP_VID1CTRL_VID1DITHEREN_MASK (0x00800000) -+#define PDP_VID1CTRL_VID1DITHEREN_LSBMASK (0x00000001) -+#define PDP_VID1CTRL_VID1DITHEREN_SHIFT (23) -+#define PDP_VID1CTRL_VID1DITHEREN_LENGTH (1) -+#define PDP_VID1CTRL_VID1DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2CTRL_OFFSET (0x0034) -+ -+/* PDP, VID2CTRL, VID2STREN -+*/ -+#define PDP_VID2CTRL_VID2STREN_MASK (0x80000000) -+#define PDP_VID2CTRL_VID2STREN_LSBMASK (0x00000001) -+#define PDP_VID2CTRL_VID2STREN_SHIFT (31) -+#define PDP_VID2CTRL_VID2STREN_LENGTH (1) -+#define PDP_VID2CTRL_VID2STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2CTRL, VID2CKEYEN -+*/ -+#define PDP_VID2CTRL_VID2CKEYEN_MASK (0x40000000) -+#define PDP_VID2CTRL_VID2CKEYEN_LSBMASK (0x00000001) -+#define PDP_VID2CTRL_VID2CKEYEN_SHIFT (30) -+#define PDP_VID2CTRL_VID2CKEYEN_LENGTH (1) -+#define PDP_VID2CTRL_VID2CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2CTRL, VID2CKEYSRC -+*/ -+#define PDP_VID2CTRL_VID2CKEYSRC_MASK (0x20000000) -+#define PDP_VID2CTRL_VID2CKEYSRC_LSBMASK (0x00000001) -+#define PDP_VID2CTRL_VID2CKEYSRC_SHIFT (29) -+#define PDP_VID2CTRL_VID2CKEYSRC_LENGTH (1) -+#define PDP_VID2CTRL_VID2CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2CTRL, VID2BLEND -+*/ -+#define PDP_VID2CTRL_VID2BLEND_MASK (0x18000000) -+#define PDP_VID2CTRL_VID2BLEND_LSBMASK (0x00000003) -+#define PDP_VID2CTRL_VID2BLEND_SHIFT (27) -+#define PDP_VID2CTRL_VID2BLEND_LENGTH (2) -+#define PDP_VID2CTRL_VID2BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2CTRL, VID2BLENDPOS -+*/ -+#define PDP_VID2CTRL_VID2BLENDPOS_MASK (0x07000000) -+#define PDP_VID2CTRL_VID2BLENDPOS_LSBMASK (0x00000007) -+#define PDP_VID2CTRL_VID2BLENDPOS_SHIFT (24) -+#define PDP_VID2CTRL_VID2BLENDPOS_LENGTH (3) -+#define PDP_VID2CTRL_VID2BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2CTRL, VID2DITHEREN -+*/ -+#define PDP_VID2CTRL_VID2DITHEREN_MASK (0x00800000) -+#define PDP_VID2CTRL_VID2DITHEREN_LSBMASK (0x00000001) -+#define PDP_VID2CTRL_VID2DITHEREN_SHIFT (23) -+#define PDP_VID2CTRL_VID2DITHEREN_LENGTH (1) -+#define PDP_VID2CTRL_VID2DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3CTRL_OFFSET (0x0038) -+ -+/* PDP, VID3CTRL, VID3STREN -+*/ -+#define PDP_VID3CTRL_VID3STREN_MASK (0x80000000) -+#define PDP_VID3CTRL_VID3STREN_LSBMASK (0x00000001) -+#define PDP_VID3CTRL_VID3STREN_SHIFT (31) -+#define PDP_VID3CTRL_VID3STREN_LENGTH (1) -+#define PDP_VID3CTRL_VID3STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3CTRL, VID3CKEYEN -+*/ -+#define PDP_VID3CTRL_VID3CKEYEN_MASK (0x40000000) -+#define PDP_VID3CTRL_VID3CKEYEN_LSBMASK (0x00000001) -+#define PDP_VID3CTRL_VID3CKEYEN_SHIFT (30) -+#define PDP_VID3CTRL_VID3CKEYEN_LENGTH (1) -+#define PDP_VID3CTRL_VID3CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3CTRL, VID3CKEYSRC -+*/ -+#define PDP_VID3CTRL_VID3CKEYSRC_MASK (0x20000000) -+#define PDP_VID3CTRL_VID3CKEYSRC_LSBMASK (0x00000001) -+#define PDP_VID3CTRL_VID3CKEYSRC_SHIFT (29) -+#define PDP_VID3CTRL_VID3CKEYSRC_LENGTH (1) -+#define PDP_VID3CTRL_VID3CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3CTRL, VID3BLEND -+*/ -+#define PDP_VID3CTRL_VID3BLEND_MASK (0x18000000) -+#define PDP_VID3CTRL_VID3BLEND_LSBMASK (0x00000003) -+#define PDP_VID3CTRL_VID3BLEND_SHIFT (27) -+#define PDP_VID3CTRL_VID3BLEND_LENGTH (2) -+#define PDP_VID3CTRL_VID3BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3CTRL, VID3BLENDPOS -+*/ -+#define PDP_VID3CTRL_VID3BLENDPOS_MASK (0x07000000) -+#define PDP_VID3CTRL_VID3BLENDPOS_LSBMASK (0x00000007) -+#define PDP_VID3CTRL_VID3BLENDPOS_SHIFT (24) -+#define PDP_VID3CTRL_VID3BLENDPOS_LENGTH (3) -+#define PDP_VID3CTRL_VID3BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3CTRL, VID3DITHEREN -+*/ -+#define PDP_VID3CTRL_VID3DITHEREN_MASK (0x00800000) -+#define PDP_VID3CTRL_VID3DITHEREN_LSBMASK (0x00000001) -+#define PDP_VID3CTRL_VID3DITHEREN_SHIFT (23) -+#define PDP_VID3CTRL_VID3DITHEREN_LENGTH (1) -+#define PDP_VID3CTRL_VID3DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4CTRL_OFFSET (0x003C) -+ -+/* PDP, VID4CTRL, VID4STREN -+*/ -+#define PDP_VID4CTRL_VID4STREN_MASK (0x80000000) -+#define PDP_VID4CTRL_VID4STREN_LSBMASK (0x00000001) -+#define PDP_VID4CTRL_VID4STREN_SHIFT (31) -+#define PDP_VID4CTRL_VID4STREN_LENGTH (1) -+#define PDP_VID4CTRL_VID4STREN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4CTRL, VID4CKEYEN -+*/ -+#define PDP_VID4CTRL_VID4CKEYEN_MASK (0x40000000) -+#define PDP_VID4CTRL_VID4CKEYEN_LSBMASK (0x00000001) -+#define PDP_VID4CTRL_VID4CKEYEN_SHIFT (30) -+#define PDP_VID4CTRL_VID4CKEYEN_LENGTH (1) -+#define PDP_VID4CTRL_VID4CKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4CTRL, VID4CKEYSRC -+*/ -+#define PDP_VID4CTRL_VID4CKEYSRC_MASK (0x20000000) -+#define PDP_VID4CTRL_VID4CKEYSRC_LSBMASK (0x00000001) -+#define PDP_VID4CTRL_VID4CKEYSRC_SHIFT (29) -+#define PDP_VID4CTRL_VID4CKEYSRC_LENGTH (1) -+#define PDP_VID4CTRL_VID4CKEYSRC_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4CTRL, VID4BLEND -+*/ -+#define PDP_VID4CTRL_VID4BLEND_MASK (0x18000000) -+#define PDP_VID4CTRL_VID4BLEND_LSBMASK (0x00000003) -+#define PDP_VID4CTRL_VID4BLEND_SHIFT (27) -+#define PDP_VID4CTRL_VID4BLEND_LENGTH (2) -+#define PDP_VID4CTRL_VID4BLEND_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4CTRL, VID4BLENDPOS -+*/ -+#define PDP_VID4CTRL_VID4BLENDPOS_MASK (0x07000000) -+#define PDP_VID4CTRL_VID4BLENDPOS_LSBMASK (0x00000007) -+#define PDP_VID4CTRL_VID4BLENDPOS_SHIFT (24) -+#define PDP_VID4CTRL_VID4BLENDPOS_LENGTH (3) -+#define PDP_VID4CTRL_VID4BLENDPOS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4CTRL, VID4DITHEREN -+*/ -+#define PDP_VID4CTRL_VID4DITHEREN_MASK (0x00800000) -+#define PDP_VID4CTRL_VID4DITHEREN_LSBMASK (0x00000001) -+#define PDP_VID4CTRL_VID4DITHEREN_SHIFT (23) -+#define PDP_VID4CTRL_VID4DITHEREN_LENGTH (1) -+#define PDP_VID4CTRL_VID4DITHEREN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1UCTRL_OFFSET (0x0050) -+ -+/* PDP, VID1UCTRL, VID1UVHALFSTR -+*/ -+#define PDP_VID1UCTRL_VID1UVHALFSTR_MASK (0xC0000000) -+#define PDP_VID1UCTRL_VID1UVHALFSTR_LSBMASK (0x00000003) -+#define PDP_VID1UCTRL_VID1UVHALFSTR_SHIFT (30) -+#define PDP_VID1UCTRL_VID1UVHALFSTR_LENGTH (2) -+#define PDP_VID1UCTRL_VID1UVHALFSTR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2UCTRL_OFFSET (0x0054) -+ -+/* PDP, VID2UCTRL, VID2UVHALFSTR -+*/ -+#define PDP_VID2UCTRL_VID2UVHALFSTR_MASK (0xC0000000) -+#define PDP_VID2UCTRL_VID2UVHALFSTR_LSBMASK (0x00000003) -+#define PDP_VID2UCTRL_VID2UVHALFSTR_SHIFT (30) -+#define PDP_VID2UCTRL_VID2UVHALFSTR_LENGTH (2) -+#define PDP_VID2UCTRL_VID2UVHALFSTR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3UCTRL_OFFSET (0x0058) -+ -+/* PDP, VID3UCTRL, VID3UVHALFSTR -+*/ -+#define PDP_VID3UCTRL_VID3UVHALFSTR_MASK (0xC0000000) -+#define PDP_VID3UCTRL_VID3UVHALFSTR_LSBMASK (0x00000003) -+#define PDP_VID3UCTRL_VID3UVHALFSTR_SHIFT (30) -+#define PDP_VID3UCTRL_VID3UVHALFSTR_LENGTH (2) -+#define PDP_VID3UCTRL_VID3UVHALFSTR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4UCTRL_OFFSET (0x005C) -+ -+/* PDP, VID4UCTRL, VID4UVHALFSTR -+*/ -+#define PDP_VID4UCTRL_VID4UVHALFSTR_MASK (0xC0000000) -+#define PDP_VID4UCTRL_VID4UVHALFSTR_LSBMASK (0x00000003) -+#define PDP_VID4UCTRL_VID4UVHALFSTR_SHIFT (30) -+#define PDP_VID4UCTRL_VID4UVHALFSTR_LENGTH (2) -+#define PDP_VID4UCTRL_VID4UVHALFSTR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1STRIDE_OFFSET (0x0060) -+ -+/* PDP, GRPH1STRIDE, GRPH1STRIDE -+*/ -+#define PDP_GRPH1STRIDE_GRPH1STRIDE_MASK (0xFFC00000) -+#define PDP_GRPH1STRIDE_GRPH1STRIDE_LSBMASK (0x000003FF) -+#define PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT (22) -+#define PDP_GRPH1STRIDE_GRPH1STRIDE_LENGTH (10) -+#define PDP_GRPH1STRIDE_GRPH1STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2STRIDE_OFFSET (0x0064) -+ -+/* PDP, GRPH2STRIDE, GRPH2STRIDE -+*/ -+#define PDP_GRPH2STRIDE_GRPH2STRIDE_MASK (0xFFC00000) -+#define PDP_GRPH2STRIDE_GRPH2STRIDE_LSBMASK (0x000003FF) -+#define PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT (22) -+#define PDP_GRPH2STRIDE_GRPH2STRIDE_LENGTH (10) -+#define PDP_GRPH2STRIDE_GRPH2STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3STRIDE_OFFSET (0x0068) -+ -+/* PDP, GRPH3STRIDE, GRPH3STRIDE -+*/ -+#define PDP_GRPH3STRIDE_GRPH3STRIDE_MASK (0xFFC00000) -+#define PDP_GRPH3STRIDE_GRPH3STRIDE_LSBMASK (0x000003FF) -+#define PDP_GRPH3STRIDE_GRPH3STRIDE_SHIFT (22) -+#define PDP_GRPH3STRIDE_GRPH3STRIDE_LENGTH (10) -+#define PDP_GRPH3STRIDE_GRPH3STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4STRIDE_OFFSET (0x006C) -+ -+/* PDP, GRPH4STRIDE, GRPH4STRIDE -+*/ -+#define PDP_GRPH4STRIDE_GRPH4STRIDE_MASK (0xFFC00000) -+#define PDP_GRPH4STRIDE_GRPH4STRIDE_LSBMASK (0x000003FF) -+#define PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT (22) -+#define PDP_GRPH4STRIDE_GRPH4STRIDE_LENGTH (10) -+#define PDP_GRPH4STRIDE_GRPH4STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1STRIDE_OFFSET (0x0070) -+ -+/* PDP, VID1STRIDE, VID1STRIDE -+*/ -+#define PDP_VID1STRIDE_VID1STRIDE_MASK (0xFFC00000) -+#define PDP_VID1STRIDE_VID1STRIDE_LSBMASK (0x000003FF) -+#define PDP_VID1STRIDE_VID1STRIDE_SHIFT (22) -+#define PDP_VID1STRIDE_VID1STRIDE_LENGTH (10) -+#define PDP_VID1STRIDE_VID1STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2STRIDE_OFFSET (0x0074) -+ -+/* PDP, VID2STRIDE, VID2STRIDE -+*/ -+#define PDP_VID2STRIDE_VID2STRIDE_MASK (0xFFC00000) -+#define PDP_VID2STRIDE_VID2STRIDE_LSBMASK (0x000003FF) -+#define PDP_VID2STRIDE_VID2STRIDE_SHIFT (22) -+#define PDP_VID2STRIDE_VID2STRIDE_LENGTH (10) -+#define PDP_VID2STRIDE_VID2STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3STRIDE_OFFSET (0x0078) -+ -+/* PDP, VID3STRIDE, VID3STRIDE -+*/ -+#define PDP_VID3STRIDE_VID3STRIDE_MASK (0xFFC00000) -+#define PDP_VID3STRIDE_VID3STRIDE_LSBMASK (0x000003FF) -+#define PDP_VID3STRIDE_VID3STRIDE_SHIFT (22) -+#define PDP_VID3STRIDE_VID3STRIDE_LENGTH (10) -+#define PDP_VID3STRIDE_VID3STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4STRIDE_OFFSET (0x007C) -+ -+/* PDP, VID4STRIDE, VID4STRIDE -+*/ -+#define PDP_VID4STRIDE_VID4STRIDE_MASK (0xFFC00000) -+#define PDP_VID4STRIDE_VID4STRIDE_LSBMASK (0x000003FF) -+#define PDP_VID4STRIDE_VID4STRIDE_SHIFT (22) -+#define PDP_VID4STRIDE_VID4STRIDE_LENGTH (10) -+#define PDP_VID4STRIDE_VID4STRIDE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1SIZE_OFFSET (0x0080) -+ -+/* PDP, GRPH1SIZE, GRPH1WIDTH -+*/ -+#define PDP_GRPH1SIZE_GRPH1WIDTH_MASK (0x0FFF0000) -+#define PDP_GRPH1SIZE_GRPH1WIDTH_LSBMASK (0x00000FFF) -+#define PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT (16) -+#define PDP_GRPH1SIZE_GRPH1WIDTH_LENGTH (12) -+#define PDP_GRPH1SIZE_GRPH1WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1SIZE, GRPH1HEIGHT -+*/ -+#define PDP_GRPH1SIZE_GRPH1HEIGHT_MASK (0x00000FFF) -+#define PDP_GRPH1SIZE_GRPH1HEIGHT_LSBMASK (0x00000FFF) -+#define PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT (0) -+#define PDP_GRPH1SIZE_GRPH1HEIGHT_LENGTH (12) -+#define PDP_GRPH1SIZE_GRPH1HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2SIZE_OFFSET (0x0084) -+ -+/* PDP, GRPH2SIZE, GRPH2WIDTH -+*/ -+#define PDP_GRPH2SIZE_GRPH2WIDTH_MASK (0x0FFF0000) -+#define PDP_GRPH2SIZE_GRPH2WIDTH_LSBMASK (0x00000FFF) -+#define PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT (16) -+#define PDP_GRPH2SIZE_GRPH2WIDTH_LENGTH (12) -+#define PDP_GRPH2SIZE_GRPH2WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2SIZE, GRPH2HEIGHT -+*/ -+#define PDP_GRPH2SIZE_GRPH2HEIGHT_MASK (0x00000FFF) -+#define PDP_GRPH2SIZE_GRPH2HEIGHT_LSBMASK (0x00000FFF) -+#define PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT (0) -+#define PDP_GRPH2SIZE_GRPH2HEIGHT_LENGTH (12) -+#define PDP_GRPH2SIZE_GRPH2HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3SIZE_OFFSET (0x0088) -+ -+/* PDP, GRPH3SIZE, GRPH3WIDTH -+*/ -+#define PDP_GRPH3SIZE_GRPH3WIDTH_MASK (0x0FFF0000) -+#define PDP_GRPH3SIZE_GRPH3WIDTH_LSBMASK (0x00000FFF) -+#define PDP_GRPH3SIZE_GRPH3WIDTH_SHIFT (16) -+#define PDP_GRPH3SIZE_GRPH3WIDTH_LENGTH (12) -+#define PDP_GRPH3SIZE_GRPH3WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3SIZE, GRPH3HEIGHT -+*/ -+#define PDP_GRPH3SIZE_GRPH3HEIGHT_MASK (0x00000FFF) -+#define PDP_GRPH3SIZE_GRPH3HEIGHT_LSBMASK (0x00000FFF) -+#define PDP_GRPH3SIZE_GRPH3HEIGHT_SHIFT (0) -+#define PDP_GRPH3SIZE_GRPH3HEIGHT_LENGTH (12) -+#define PDP_GRPH3SIZE_GRPH3HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4SIZE_OFFSET (0x008C) -+ -+/* PDP, GRPH4SIZE, GRPH4WIDTH -+*/ -+#define PDP_GRPH4SIZE_GRPH4WIDTH_MASK (0x0FFF0000) -+#define PDP_GRPH4SIZE_GRPH4WIDTH_LSBMASK (0x00000FFF) -+#define PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT (16) -+#define PDP_GRPH4SIZE_GRPH4WIDTH_LENGTH (12) -+#define PDP_GRPH4SIZE_GRPH4WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4SIZE, GRPH4HEIGHT -+*/ -+#define PDP_GRPH4SIZE_GRPH4HEIGHT_MASK (0x00000FFF) -+#define PDP_GRPH4SIZE_GRPH4HEIGHT_LSBMASK (0x00000FFF) -+#define PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT (0) -+#define PDP_GRPH4SIZE_GRPH4HEIGHT_LENGTH (12) -+#define PDP_GRPH4SIZE_GRPH4HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1SIZE_OFFSET (0x0090) -+ -+/* PDP, VID1SIZE, VID1WIDTH -+*/ -+#define PDP_VID1SIZE_VID1WIDTH_MASK (0x0FFF0000) -+#define PDP_VID1SIZE_VID1WIDTH_LSBMASK (0x00000FFF) -+#define PDP_VID1SIZE_VID1WIDTH_SHIFT (16) -+#define PDP_VID1SIZE_VID1WIDTH_LENGTH (12) -+#define PDP_VID1SIZE_VID1WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SIZE, VID1HEIGHT -+*/ -+#define PDP_VID1SIZE_VID1HEIGHT_MASK (0x00000FFF) -+#define PDP_VID1SIZE_VID1HEIGHT_LSBMASK (0x00000FFF) -+#define PDP_VID1SIZE_VID1HEIGHT_SHIFT (0) -+#define PDP_VID1SIZE_VID1HEIGHT_LENGTH (12) -+#define PDP_VID1SIZE_VID1HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2SIZE_OFFSET (0x0094) -+ -+/* PDP, VID2SIZE, VID2WIDTH -+*/ -+#define PDP_VID2SIZE_VID2WIDTH_MASK (0x0FFF0000) -+#define PDP_VID2SIZE_VID2WIDTH_LSBMASK (0x00000FFF) -+#define PDP_VID2SIZE_VID2WIDTH_SHIFT (16) -+#define PDP_VID2SIZE_VID2WIDTH_LENGTH (12) -+#define PDP_VID2SIZE_VID2WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SIZE, VID2HEIGHT -+*/ -+#define PDP_VID2SIZE_VID2HEIGHT_MASK (0x00000FFF) -+#define PDP_VID2SIZE_VID2HEIGHT_LSBMASK (0x00000FFF) -+#define PDP_VID2SIZE_VID2HEIGHT_SHIFT (0) -+#define PDP_VID2SIZE_VID2HEIGHT_LENGTH (12) -+#define PDP_VID2SIZE_VID2HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3SIZE_OFFSET (0x0098) -+ -+/* PDP, VID3SIZE, VID3WIDTH -+*/ -+#define PDP_VID3SIZE_VID3WIDTH_MASK (0x0FFF0000) -+#define PDP_VID3SIZE_VID3WIDTH_LSBMASK (0x00000FFF) -+#define PDP_VID3SIZE_VID3WIDTH_SHIFT (16) -+#define PDP_VID3SIZE_VID3WIDTH_LENGTH (12) -+#define PDP_VID3SIZE_VID3WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SIZE, VID3HEIGHT -+*/ -+#define PDP_VID3SIZE_VID3HEIGHT_MASK (0x00000FFF) -+#define PDP_VID3SIZE_VID3HEIGHT_LSBMASK (0x00000FFF) -+#define PDP_VID3SIZE_VID3HEIGHT_SHIFT (0) -+#define PDP_VID3SIZE_VID3HEIGHT_LENGTH (12) -+#define PDP_VID3SIZE_VID3HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4SIZE_OFFSET (0x009C) -+ -+/* PDP, VID4SIZE, VID4WIDTH -+*/ -+#define PDP_VID4SIZE_VID4WIDTH_MASK (0x0FFF0000) -+#define PDP_VID4SIZE_VID4WIDTH_LSBMASK (0x00000FFF) -+#define PDP_VID4SIZE_VID4WIDTH_SHIFT (16) -+#define PDP_VID4SIZE_VID4WIDTH_LENGTH (12) -+#define PDP_VID4SIZE_VID4WIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SIZE, VID4HEIGHT -+*/ -+#define PDP_VID4SIZE_VID4HEIGHT_MASK (0x00000FFF) -+#define PDP_VID4SIZE_VID4HEIGHT_LSBMASK (0x00000FFF) -+#define PDP_VID4SIZE_VID4HEIGHT_SHIFT (0) -+#define PDP_VID4SIZE_VID4HEIGHT_LENGTH (12) -+#define PDP_VID4SIZE_VID4HEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1POSN_OFFSET (0x00A0) -+ -+/* PDP, GRPH1POSN, GRPH1XSTART -+*/ -+#define PDP_GRPH1POSN_GRPH1XSTART_MASK (0x0FFF0000) -+#define PDP_GRPH1POSN_GRPH1XSTART_LSBMASK (0x00000FFF) -+#define PDP_GRPH1POSN_GRPH1XSTART_SHIFT (16) -+#define PDP_GRPH1POSN_GRPH1XSTART_LENGTH (12) -+#define PDP_GRPH1POSN_GRPH1XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1POSN, GRPH1YSTART -+*/ -+#define PDP_GRPH1POSN_GRPH1YSTART_MASK (0x00000FFF) -+#define PDP_GRPH1POSN_GRPH1YSTART_LSBMASK (0x00000FFF) -+#define PDP_GRPH1POSN_GRPH1YSTART_SHIFT (0) -+#define PDP_GRPH1POSN_GRPH1YSTART_LENGTH (12) -+#define PDP_GRPH1POSN_GRPH1YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2POSN_OFFSET (0x00A4) -+ -+/* PDP, GRPH2POSN, GRPH2XSTART -+*/ -+#define PDP_GRPH2POSN_GRPH2XSTART_MASK (0x0FFF0000) -+#define PDP_GRPH2POSN_GRPH2XSTART_LSBMASK (0x00000FFF) -+#define PDP_GRPH2POSN_GRPH2XSTART_SHIFT (16) -+#define PDP_GRPH2POSN_GRPH2XSTART_LENGTH (12) -+#define PDP_GRPH2POSN_GRPH2XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2POSN, GRPH2YSTART -+*/ -+#define PDP_GRPH2POSN_GRPH2YSTART_MASK (0x00000FFF) -+#define PDP_GRPH2POSN_GRPH2YSTART_LSBMASK (0x00000FFF) -+#define PDP_GRPH2POSN_GRPH2YSTART_SHIFT (0) -+#define PDP_GRPH2POSN_GRPH2YSTART_LENGTH (12) -+#define PDP_GRPH2POSN_GRPH2YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3POSN_OFFSET (0x00A8) -+ -+/* PDP, GRPH3POSN, GRPH3XSTART -+*/ -+#define PDP_GRPH3POSN_GRPH3XSTART_MASK (0x0FFF0000) -+#define PDP_GRPH3POSN_GRPH3XSTART_LSBMASK (0x00000FFF) -+#define PDP_GRPH3POSN_GRPH3XSTART_SHIFT (16) -+#define PDP_GRPH3POSN_GRPH3XSTART_LENGTH (12) -+#define PDP_GRPH3POSN_GRPH3XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3POSN, GRPH3YSTART -+*/ -+#define PDP_GRPH3POSN_GRPH3YSTART_MASK (0x00000FFF) -+#define PDP_GRPH3POSN_GRPH3YSTART_LSBMASK (0x00000FFF) -+#define PDP_GRPH3POSN_GRPH3YSTART_SHIFT (0) -+#define PDP_GRPH3POSN_GRPH3YSTART_LENGTH (12) -+#define PDP_GRPH3POSN_GRPH3YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4POSN_OFFSET (0x00AC) -+ -+/* PDP, GRPH4POSN, GRPH4XSTART -+*/ -+#define PDP_GRPH4POSN_GRPH4XSTART_MASK (0x0FFF0000) -+#define PDP_GRPH4POSN_GRPH4XSTART_LSBMASK (0x00000FFF) -+#define PDP_GRPH4POSN_GRPH4XSTART_SHIFT (16) -+#define PDP_GRPH4POSN_GRPH4XSTART_LENGTH (12) -+#define PDP_GRPH4POSN_GRPH4XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4POSN, GRPH4YSTART -+*/ -+#define PDP_GRPH4POSN_GRPH4YSTART_MASK (0x00000FFF) -+#define PDP_GRPH4POSN_GRPH4YSTART_LSBMASK (0x00000FFF) -+#define PDP_GRPH4POSN_GRPH4YSTART_SHIFT (0) -+#define PDP_GRPH4POSN_GRPH4YSTART_LENGTH (12) -+#define PDP_GRPH4POSN_GRPH4YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1POSN_OFFSET (0x00B0) -+ -+/* PDP, VID1POSN, VID1XSTART -+*/ -+#define PDP_VID1POSN_VID1XSTART_MASK (0x0FFF0000) -+#define PDP_VID1POSN_VID1XSTART_LSBMASK (0x00000FFF) -+#define PDP_VID1POSN_VID1XSTART_SHIFT (16) -+#define PDP_VID1POSN_VID1XSTART_LENGTH (12) -+#define PDP_VID1POSN_VID1XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1POSN, VID1YSTART -+*/ -+#define PDP_VID1POSN_VID1YSTART_MASK (0x00000FFF) -+#define PDP_VID1POSN_VID1YSTART_LSBMASK (0x00000FFF) -+#define PDP_VID1POSN_VID1YSTART_SHIFT (0) -+#define PDP_VID1POSN_VID1YSTART_LENGTH (12) -+#define PDP_VID1POSN_VID1YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2POSN_OFFSET (0x00B4) -+ -+/* PDP, VID2POSN, VID2XSTART -+*/ -+#define PDP_VID2POSN_VID2XSTART_MASK (0x0FFF0000) -+#define PDP_VID2POSN_VID2XSTART_LSBMASK (0x00000FFF) -+#define PDP_VID2POSN_VID2XSTART_SHIFT (16) -+#define PDP_VID2POSN_VID2XSTART_LENGTH (12) -+#define PDP_VID2POSN_VID2XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2POSN, VID2YSTART -+*/ -+#define PDP_VID2POSN_VID2YSTART_MASK (0x00000FFF) -+#define PDP_VID2POSN_VID2YSTART_LSBMASK (0x00000FFF) -+#define PDP_VID2POSN_VID2YSTART_SHIFT (0) -+#define PDP_VID2POSN_VID2YSTART_LENGTH (12) -+#define PDP_VID2POSN_VID2YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3POSN_OFFSET (0x00B8) -+ -+/* PDP, VID3POSN, VID3XSTART -+*/ -+#define PDP_VID3POSN_VID3XSTART_MASK (0x0FFF0000) -+#define PDP_VID3POSN_VID3XSTART_LSBMASK (0x00000FFF) -+#define PDP_VID3POSN_VID3XSTART_SHIFT (16) -+#define PDP_VID3POSN_VID3XSTART_LENGTH (12) -+#define PDP_VID3POSN_VID3XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3POSN, VID3YSTART -+*/ -+#define PDP_VID3POSN_VID3YSTART_MASK (0x00000FFF) -+#define PDP_VID3POSN_VID3YSTART_LSBMASK (0x00000FFF) -+#define PDP_VID3POSN_VID3YSTART_SHIFT (0) -+#define PDP_VID3POSN_VID3YSTART_LENGTH (12) -+#define PDP_VID3POSN_VID3YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4POSN_OFFSET (0x00BC) -+ -+/* PDP, VID4POSN, VID4XSTART -+*/ -+#define PDP_VID4POSN_VID4XSTART_MASK (0x0FFF0000) -+#define PDP_VID4POSN_VID4XSTART_LSBMASK (0x00000FFF) -+#define PDP_VID4POSN_VID4XSTART_SHIFT (16) -+#define PDP_VID4POSN_VID4XSTART_LENGTH (12) -+#define PDP_VID4POSN_VID4XSTART_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4POSN, VID4YSTART -+*/ -+#define PDP_VID4POSN_VID4YSTART_MASK (0x00000FFF) -+#define PDP_VID4POSN_VID4YSTART_LSBMASK (0x00000FFF) -+#define PDP_VID4POSN_VID4YSTART_SHIFT (0) -+#define PDP_VID4POSN_VID4YSTART_LENGTH (12) -+#define PDP_VID4POSN_VID4YSTART_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1GALPHA_OFFSET (0x00C0) -+ -+/* PDP, GRPH1GALPHA, GRPH1GALPHA -+*/ -+#define PDP_GRPH1GALPHA_GRPH1GALPHA_MASK (0x000003FF) -+#define PDP_GRPH1GALPHA_GRPH1GALPHA_LSBMASK (0x000003FF) -+#define PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT (0) -+#define PDP_GRPH1GALPHA_GRPH1GALPHA_LENGTH (10) -+#define PDP_GRPH1GALPHA_GRPH1GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2GALPHA_OFFSET (0x00C4) -+ -+/* PDP, GRPH2GALPHA, GRPH2GALPHA -+*/ -+#define PDP_GRPH2GALPHA_GRPH2GALPHA_MASK (0x000003FF) -+#define PDP_GRPH2GALPHA_GRPH2GALPHA_LSBMASK (0x000003FF) -+#define PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT (0) -+#define PDP_GRPH2GALPHA_GRPH2GALPHA_LENGTH (10) -+#define PDP_GRPH2GALPHA_GRPH2GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3GALPHA_OFFSET (0x00C8) -+ -+/* PDP, GRPH3GALPHA, GRPH3GALPHA -+*/ -+#define PDP_GRPH3GALPHA_GRPH3GALPHA_MASK (0x000003FF) -+#define PDP_GRPH3GALPHA_GRPH3GALPHA_LSBMASK (0x000003FF) -+#define PDP_GRPH3GALPHA_GRPH3GALPHA_SHIFT (0) -+#define PDP_GRPH3GALPHA_GRPH3GALPHA_LENGTH (10) -+#define PDP_GRPH3GALPHA_GRPH3GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4GALPHA_OFFSET (0x00CC) -+ -+/* PDP, GRPH4GALPHA, GRPH4GALPHA -+*/ -+#define PDP_GRPH4GALPHA_GRPH4GALPHA_MASK (0x000003FF) -+#define PDP_GRPH4GALPHA_GRPH4GALPHA_LSBMASK (0x000003FF) -+#define PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT (0) -+#define PDP_GRPH4GALPHA_GRPH4GALPHA_LENGTH (10) -+#define PDP_GRPH4GALPHA_GRPH4GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1GALPHA_OFFSET (0x00D0) -+ -+/* PDP, VID1GALPHA, VID1GALPHA -+*/ -+#define PDP_VID1GALPHA_VID1GALPHA_MASK (0x000003FF) -+#define PDP_VID1GALPHA_VID1GALPHA_LSBMASK (0x000003FF) -+#define PDP_VID1GALPHA_VID1GALPHA_SHIFT (0) -+#define PDP_VID1GALPHA_VID1GALPHA_LENGTH (10) -+#define PDP_VID1GALPHA_VID1GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2GALPHA_OFFSET (0x00D4) -+ -+/* PDP, VID2GALPHA, VID2GALPHA -+*/ -+#define PDP_VID2GALPHA_VID2GALPHA_MASK (0x000003FF) -+#define PDP_VID2GALPHA_VID2GALPHA_LSBMASK (0x000003FF) -+#define PDP_VID2GALPHA_VID2GALPHA_SHIFT (0) -+#define PDP_VID2GALPHA_VID2GALPHA_LENGTH (10) -+#define PDP_VID2GALPHA_VID2GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3GALPHA_OFFSET (0x00D8) -+ -+/* PDP, VID3GALPHA, VID3GALPHA -+*/ -+#define PDP_VID3GALPHA_VID3GALPHA_MASK (0x000003FF) -+#define PDP_VID3GALPHA_VID3GALPHA_LSBMASK (0x000003FF) -+#define PDP_VID3GALPHA_VID3GALPHA_SHIFT (0) -+#define PDP_VID3GALPHA_VID3GALPHA_LENGTH (10) -+#define PDP_VID3GALPHA_VID3GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4GALPHA_OFFSET (0x00DC) -+ -+/* PDP, VID4GALPHA, VID4GALPHA -+*/ -+#define PDP_VID4GALPHA_VID4GALPHA_MASK (0x000003FF) -+#define PDP_VID4GALPHA_VID4GALPHA_LSBMASK (0x000003FF) -+#define PDP_VID4GALPHA_VID4GALPHA_SHIFT (0) -+#define PDP_VID4GALPHA_VID4GALPHA_LENGTH (10) -+#define PDP_VID4GALPHA_VID4GALPHA_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1CKEY_R_OFFSET (0x00E0) -+ -+/* PDP, GRPH1CKEY_R, GRPH1CKEY_R -+*/ -+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_MASK (0x000003FF) -+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_LSBMASK (0x000003FF) -+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_SHIFT (0) -+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_LENGTH (10) -+#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1CKEY_GB_OFFSET (0x00E4) -+ -+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_G -+*/ -+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_MASK (0x03FF0000) -+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LSBMASK (0x000003FF) -+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SHIFT (16) -+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LENGTH (10) -+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1CKEY_GB, GRPH1CKEY_B -+*/ -+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_MASK (0x000003FF) -+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LSBMASK (0x000003FF) -+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SHIFT (0) -+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LENGTH (10) -+#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2CKEY_R_OFFSET (0x00E8) -+ -+/* PDP, GRPH2CKEY_R, GRPH2CKEY_R -+*/ -+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_MASK (0x000003FF) -+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_LSBMASK (0x000003FF) -+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_SHIFT (0) -+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_LENGTH (10) -+#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2CKEY_GB_OFFSET (0x00EC) -+ -+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_G -+*/ -+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_MASK (0x03FF0000) -+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LSBMASK (0x000003FF) -+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SHIFT (16) -+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LENGTH (10) -+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2CKEY_GB, GRPH2CKEY_B -+*/ -+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_MASK (0x000003FF) -+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LSBMASK (0x000003FF) -+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SHIFT (0) -+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LENGTH (10) -+#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3CKEY_R_OFFSET (0x00F0) -+ -+/* PDP, GRPH3CKEY_R, GRPH3CKEY_R -+*/ -+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_MASK (0x000003FF) -+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_LSBMASK (0x000003FF) -+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_SHIFT (0) -+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_LENGTH (10) -+#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3CKEY_GB_OFFSET (0x00F4) -+ -+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_G -+*/ -+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_MASK (0x03FF0000) -+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LSBMASK (0x000003FF) -+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SHIFT (16) -+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LENGTH (10) -+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3CKEY_GB, GRPH3CKEY_B -+*/ -+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_MASK (0x000003FF) -+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LSBMASK (0x000003FF) -+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SHIFT (0) -+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LENGTH (10) -+#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4CKEY_R_OFFSET (0x00F8) -+ -+/* PDP, GRPH4CKEY_R, GRPH4CKEY_R -+*/ -+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_MASK (0x000003FF) -+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_LSBMASK (0x000003FF) -+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_SHIFT (0) -+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_LENGTH (10) -+#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4CKEY_GB_OFFSET (0x00FC) -+ -+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_G -+*/ -+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_MASK (0x03FF0000) -+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LSBMASK (0x000003FF) -+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SHIFT (16) -+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LENGTH (10) -+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4CKEY_GB, GRPH4CKEY_B -+*/ -+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_MASK (0x000003FF) -+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LSBMASK (0x000003FF) -+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SHIFT (0) -+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LENGTH (10) -+#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1CKEY_R_OFFSET (0x0100) -+ -+/* PDP, VID1CKEY_R, VID1CKEY_R -+*/ -+#define PDP_VID1CKEY_R_VID1CKEY_R_MASK (0x000003FF) -+#define PDP_VID1CKEY_R_VID1CKEY_R_LSBMASK (0x000003FF) -+#define PDP_VID1CKEY_R_VID1CKEY_R_SHIFT (0) -+#define PDP_VID1CKEY_R_VID1CKEY_R_LENGTH (10) -+#define PDP_VID1CKEY_R_VID1CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1CKEY_GB_OFFSET (0x0104) -+ -+/* PDP, VID1CKEY_GB, VID1CKEY_G -+*/ -+#define PDP_VID1CKEY_GB_VID1CKEY_G_MASK (0x03FF0000) -+#define PDP_VID1CKEY_GB_VID1CKEY_G_LSBMASK (0x000003FF) -+#define PDP_VID1CKEY_GB_VID1CKEY_G_SHIFT (16) -+#define PDP_VID1CKEY_GB_VID1CKEY_G_LENGTH (10) -+#define PDP_VID1CKEY_GB_VID1CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1CKEY_GB, VID1CKEY_B -+*/ -+#define PDP_VID1CKEY_GB_VID1CKEY_B_MASK (0x000003FF) -+#define PDP_VID1CKEY_GB_VID1CKEY_B_LSBMASK (0x000003FF) -+#define PDP_VID1CKEY_GB_VID1CKEY_B_SHIFT (0) -+#define PDP_VID1CKEY_GB_VID1CKEY_B_LENGTH (10) -+#define PDP_VID1CKEY_GB_VID1CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2CKEY_R_OFFSET (0x0108) -+ -+/* PDP, VID2CKEY_R, VID2CKEY_R -+*/ -+#define PDP_VID2CKEY_R_VID2CKEY_R_MASK (0x000003FF) -+#define PDP_VID2CKEY_R_VID2CKEY_R_LSBMASK (0x000003FF) -+#define PDP_VID2CKEY_R_VID2CKEY_R_SHIFT (0) -+#define PDP_VID2CKEY_R_VID2CKEY_R_LENGTH (10) -+#define PDP_VID2CKEY_R_VID2CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2CKEY_GB_OFFSET (0x010C) -+ -+/* PDP, VID2CKEY_GB, VID2CKEY_G -+*/ -+#define PDP_VID2CKEY_GB_VID2CKEY_G_MASK (0x03FF0000) -+#define PDP_VID2CKEY_GB_VID2CKEY_G_LSBMASK (0x000003FF) -+#define PDP_VID2CKEY_GB_VID2CKEY_G_SHIFT (16) -+#define PDP_VID2CKEY_GB_VID2CKEY_G_LENGTH (10) -+#define PDP_VID2CKEY_GB_VID2CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2CKEY_GB, VID2CKEY_B -+*/ -+#define PDP_VID2CKEY_GB_VID2CKEY_B_MASK (0x000003FF) -+#define PDP_VID2CKEY_GB_VID2CKEY_B_LSBMASK (0x000003FF) -+#define PDP_VID2CKEY_GB_VID2CKEY_B_SHIFT (0) -+#define PDP_VID2CKEY_GB_VID2CKEY_B_LENGTH (10) -+#define PDP_VID2CKEY_GB_VID2CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3CKEY_R_OFFSET (0x0110) -+ -+/* PDP, VID3CKEY_R, VID3CKEY_R -+*/ -+#define PDP_VID3CKEY_R_VID3CKEY_R_MASK (0x000003FF) -+#define PDP_VID3CKEY_R_VID3CKEY_R_LSBMASK (0x000003FF) -+#define PDP_VID3CKEY_R_VID3CKEY_R_SHIFT (0) -+#define PDP_VID3CKEY_R_VID3CKEY_R_LENGTH (10) -+#define PDP_VID3CKEY_R_VID3CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3CKEY_GB_OFFSET (0x0114) -+ -+/* PDP, VID3CKEY_GB, VID3CKEY_G -+*/ -+#define PDP_VID3CKEY_GB_VID3CKEY_G_MASK (0x03FF0000) -+#define PDP_VID3CKEY_GB_VID3CKEY_G_LSBMASK (0x000003FF) -+#define PDP_VID3CKEY_GB_VID3CKEY_G_SHIFT (16) -+#define PDP_VID3CKEY_GB_VID3CKEY_G_LENGTH (10) -+#define PDP_VID3CKEY_GB_VID3CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3CKEY_GB, VID3CKEY_B -+*/ -+#define PDP_VID3CKEY_GB_VID3CKEY_B_MASK (0x000003FF) -+#define PDP_VID3CKEY_GB_VID3CKEY_B_LSBMASK (0x000003FF) -+#define PDP_VID3CKEY_GB_VID3CKEY_B_SHIFT (0) -+#define PDP_VID3CKEY_GB_VID3CKEY_B_LENGTH (10) -+#define PDP_VID3CKEY_GB_VID3CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4CKEY_R_OFFSET (0x0118) -+ -+/* PDP, VID4CKEY_R, VID4CKEY_R -+*/ -+#define PDP_VID4CKEY_R_VID4CKEY_R_MASK (0x000003FF) -+#define PDP_VID4CKEY_R_VID4CKEY_R_LSBMASK (0x000003FF) -+#define PDP_VID4CKEY_R_VID4CKEY_R_SHIFT (0) -+#define PDP_VID4CKEY_R_VID4CKEY_R_LENGTH (10) -+#define PDP_VID4CKEY_R_VID4CKEY_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4CKEY_GB_OFFSET (0x011C) -+ -+/* PDP, VID4CKEY_GB, VID4CKEY_G -+*/ -+#define PDP_VID4CKEY_GB_VID4CKEY_G_MASK (0x03FF0000) -+#define PDP_VID4CKEY_GB_VID4CKEY_G_LSBMASK (0x000003FF) -+#define PDP_VID4CKEY_GB_VID4CKEY_G_SHIFT (16) -+#define PDP_VID4CKEY_GB_VID4CKEY_G_LENGTH (10) -+#define PDP_VID4CKEY_GB_VID4CKEY_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4CKEY_GB, VID4CKEY_B -+*/ -+#define PDP_VID4CKEY_GB_VID4CKEY_B_MASK (0x000003FF) -+#define PDP_VID4CKEY_GB_VID4CKEY_B_LSBMASK (0x000003FF) -+#define PDP_VID4CKEY_GB_VID4CKEY_B_SHIFT (0) -+#define PDP_VID4CKEY_GB_VID4CKEY_B_LENGTH (10) -+#define PDP_VID4CKEY_GB_VID4CKEY_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1BLND2_R_OFFSET (0x0120) -+ -+/* PDP, GRPH1BLND2_R, GRPH1PIXDBL -+*/ -+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_MASK (0x80000000) -+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_LSBMASK (0x00000001) -+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_SHIFT (31) -+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_LENGTH (1) -+#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1BLND2_R, GRPH1LINDBL -+*/ -+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_MASK (0x20000000) -+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_LSBMASK (0x00000001) -+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_SHIFT (29) -+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_LENGTH (1) -+#define PDP_GRPH1BLND2_R_GRPH1LINDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1BLND2_R, GRPH1CKEYMASK_R -+*/ -+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_MASK (0x000003FF) -+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LSBMASK (0x000003FF) -+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SHIFT (0) -+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LENGTH (10) -+#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1BLND2_GB_OFFSET (0x0124) -+ -+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_G -+*/ -+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_MASK (0x03FF0000) -+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LSBMASK (0x000003FF) -+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SHIFT (16) -+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LENGTH (10) -+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_B -+*/ -+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_MASK (0x000003FF) -+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LSBMASK (0x000003FF) -+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SHIFT (0) -+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LENGTH (10) -+#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2BLND2_R_OFFSET (0x0128) -+ -+/* PDP, GRPH2BLND2_R, GRPH2PIXDBL -+*/ -+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_MASK (0x80000000) -+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_LSBMASK (0x00000001) -+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_SHIFT (31) -+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_LENGTH (1) -+#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2BLND2_R, GRPH2LINDBL -+*/ -+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_MASK (0x20000000) -+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_LSBMASK (0x00000001) -+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_SHIFT (29) -+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_LENGTH (1) -+#define PDP_GRPH2BLND2_R_GRPH2LINDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2BLND2_R, GRPH2CKEYMASK_R -+*/ -+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_MASK (0x000003FF) -+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LSBMASK (0x000003FF) -+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SHIFT (0) -+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LENGTH (10) -+#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2BLND2_GB_OFFSET (0x012C) -+ -+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_G -+*/ -+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_MASK (0x03FF0000) -+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LSBMASK (0x000003FF) -+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SHIFT (16) -+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LENGTH (10) -+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_B -+*/ -+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_MASK (0x000003FF) -+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LSBMASK (0x000003FF) -+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SHIFT (0) -+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LENGTH (10) -+#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3BLND2_R_OFFSET (0x0130) -+ -+/* PDP, GRPH3BLND2_R, GRPH3PIXDBL -+*/ -+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_MASK (0x80000000) -+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_LSBMASK (0x00000001) -+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_SHIFT (31) -+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_LENGTH (1) -+#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3BLND2_R, GRPH3LINDBL -+*/ -+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_MASK (0x20000000) -+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_LSBMASK (0x00000001) -+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_SHIFT (29) -+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_LENGTH (1) -+#define PDP_GRPH3BLND2_R_GRPH3LINDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3BLND2_R, GRPH3CKEYMASK_R -+*/ -+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_MASK (0x000003FF) -+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LSBMASK (0x000003FF) -+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SHIFT (0) -+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LENGTH (10) -+#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3BLND2_GB_OFFSET (0x0134) -+ -+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_G -+*/ -+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_MASK (0x03FF0000) -+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LSBMASK (0x000003FF) -+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SHIFT (16) -+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LENGTH (10) -+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_B -+*/ -+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_MASK (0x000003FF) -+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LSBMASK (0x000003FF) -+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SHIFT (0) -+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LENGTH (10) -+#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4BLND2_R_OFFSET (0x0138) -+ -+/* PDP, GRPH4BLND2_R, GRPH4PIXDBL -+*/ -+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_MASK (0x80000000) -+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_LSBMASK (0x00000001) -+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_SHIFT (31) -+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_LENGTH (1) -+#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4BLND2_R, GRPH4LINDBL -+*/ -+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_MASK (0x20000000) -+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_LSBMASK (0x00000001) -+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_SHIFT (29) -+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_LENGTH (1) -+#define PDP_GRPH4BLND2_R_GRPH4LINDBL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4BLND2_R, GRPH4CKEYMASK_R -+*/ -+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_MASK (0x000003FF) -+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LSBMASK (0x000003FF) -+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SHIFT (0) -+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LENGTH (10) -+#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4BLND2_GB_OFFSET (0x013C) -+ -+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_G -+*/ -+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_MASK (0x03FF0000) -+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LSBMASK (0x000003FF) -+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SHIFT (16) -+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LENGTH (10) -+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_B -+*/ -+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_MASK (0x000003FF) -+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LSBMASK (0x000003FF) -+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SHIFT (0) -+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LENGTH (10) -+#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1BLND2_R_OFFSET (0x0140) -+ -+/* PDP, VID1BLND2_R, VID1CKEYMASK_R -+*/ -+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_MASK (0x000003FF) -+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_LSBMASK (0x000003FF) -+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_SHIFT (0) -+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_LENGTH (10) -+#define PDP_VID1BLND2_R_VID1CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1BLND2_GB_OFFSET (0x0144) -+ -+/* PDP, VID1BLND2_GB, VID1CKEYMASK_G -+*/ -+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_MASK (0x03FF0000) -+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_LSBMASK (0x000003FF) -+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_SHIFT (16) -+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_LENGTH (10) -+#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1BLND2_GB, VID1CKEYMASK_B -+*/ -+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_MASK (0x000003FF) -+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_LSBMASK (0x000003FF) -+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_SHIFT (0) -+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_LENGTH (10) -+#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2BLND2_R_OFFSET (0x0148) -+ -+/* PDP, VID2BLND2_R, VID2CKEYMASK_R -+*/ -+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_MASK (0x000003FF) -+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_LSBMASK (0x000003FF) -+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_SHIFT (0) -+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_LENGTH (10) -+#define PDP_VID2BLND2_R_VID2CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2BLND2_GB_OFFSET (0x014C) -+ -+/* PDP, VID2BLND2_GB, VID2CKEYMASK_G -+*/ -+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_MASK (0x03FF0000) -+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_LSBMASK (0x000003FF) -+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_SHIFT (16) -+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_LENGTH (10) -+#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2BLND2_GB, VID2CKEYMASK_B -+*/ -+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_MASK (0x000003FF) -+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_LSBMASK (0x000003FF) -+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_SHIFT (0) -+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_LENGTH (10) -+#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3BLND2_R_OFFSET (0x0150) -+ -+/* PDP, VID3BLND2_R, VID3CKEYMASK_R -+*/ -+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_MASK (0x000003FF) -+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_LSBMASK (0x000003FF) -+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_SHIFT (0) -+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_LENGTH (10) -+#define PDP_VID3BLND2_R_VID3CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3BLND2_GB_OFFSET (0x0154) -+ -+/* PDP, VID3BLND2_GB, VID3CKEYMASK_G -+*/ -+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_MASK (0x03FF0000) -+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_LSBMASK (0x000003FF) -+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_SHIFT (16) -+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_LENGTH (10) -+#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3BLND2_GB, VID3CKEYMASK_B -+*/ -+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_MASK (0x000003FF) -+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_LSBMASK (0x000003FF) -+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_SHIFT (0) -+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_LENGTH (10) -+#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4BLND2_R_OFFSET (0x0158) -+ -+/* PDP, VID4BLND2_R, VID4CKEYMASK_R -+*/ -+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_MASK (0x000003FF) -+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_LSBMASK (0x000003FF) -+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_SHIFT (0) -+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_LENGTH (10) -+#define PDP_VID4BLND2_R_VID4CKEYMASK_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4BLND2_GB_OFFSET (0x015C) -+ -+/* PDP, VID4BLND2_GB, VID4CKEYMASK_G -+*/ -+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_MASK (0x03FF0000) -+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_LSBMASK (0x000003FF) -+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_SHIFT (16) -+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_LENGTH (10) -+#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4BLND2_GB, VID4CKEYMASK_B -+*/ -+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_MASK (0x000003FF) -+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_LSBMASK (0x000003FF) -+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_SHIFT (0) -+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_LENGTH (10) -+#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1INTERLEAVE_CTRL_OFFSET (0x0160) -+ -+/* PDP, GRPH1INTERLEAVE_CTRL, GRPH1INTFIELD -+*/ -+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK (0x00000001) -+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LSBMASK (0x00000001) -+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT (0) -+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LENGTH (1) -+#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2INTERLEAVE_CTRL_OFFSET (0x0164) -+ -+/* PDP, GRPH2INTERLEAVE_CTRL, GRPH2INTFIELD -+*/ -+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK (0x00000001) -+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LSBMASK (0x00000001) -+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT (0) -+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LENGTH (1) -+#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3INTERLEAVE_CTRL_OFFSET (0x0168) -+ -+/* PDP, GRPH3INTERLEAVE_CTRL, GRPH3INTFIELD -+*/ -+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_MASK (0x00000001) -+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LSBMASK (0x00000001) -+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SHIFT (0) -+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LENGTH (1) -+#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4INTERLEAVE_CTRL_OFFSET (0x016C) -+ -+/* PDP, GRPH4INTERLEAVE_CTRL, GRPH4INTFIELD -+*/ -+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK (0x00000001) -+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LSBMASK (0x00000001) -+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT (0) -+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LENGTH (1) -+#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1INTERLEAVE_CTRL_OFFSET (0x0170) -+ -+/* PDP, VID1INTERLEAVE_CTRL, VID1INTFIELD -+*/ -+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK (0x00000001) -+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LSBMASK (0x00000001) -+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT (0) -+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LENGTH (1) -+#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2INTERLEAVE_CTRL_OFFSET (0x0174) -+ -+/* PDP, VID2INTERLEAVE_CTRL, VID2INTFIELD -+*/ -+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_MASK (0x00000001) -+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LSBMASK (0x00000001) -+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SHIFT (0) -+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LENGTH (1) -+#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3INTERLEAVE_CTRL_OFFSET (0x0178) -+ -+/* PDP, VID3INTERLEAVE_CTRL, VID3INTFIELD -+*/ -+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_MASK (0x00000001) -+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LSBMASK (0x00000001) -+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SHIFT (0) -+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LENGTH (1) -+#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4INTERLEAVE_CTRL_OFFSET (0x017C) -+ -+/* PDP, VID4INTERLEAVE_CTRL, VID4INTFIELD -+*/ -+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_MASK (0x00000001) -+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LSBMASK (0x00000001) -+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SHIFT (0) -+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LENGTH (1) -+#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1BASEADDR_OFFSET (0x0180) -+ -+/* PDP, GRPH1BASEADDR, GRPH1BASEADDR -+*/ -+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK (0xFFFFFFE0) -+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_SHIFT (5) -+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_LENGTH (27) -+#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2BASEADDR_OFFSET (0x0184) -+ -+/* PDP, GRPH2BASEADDR, GRPH2BASEADDR -+*/ -+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_MASK (0xFFFFFFE0) -+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_SHIFT (5) -+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_LENGTH (27) -+#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3BASEADDR_OFFSET (0x0188) -+ -+/* PDP, GRPH3BASEADDR, GRPH3BASEADDR -+*/ -+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_MASK (0xFFFFFFE0) -+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_SHIFT (5) -+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_LENGTH (27) -+#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4BASEADDR_OFFSET (0x018C) -+ -+/* PDP, GRPH4BASEADDR, GRPH4BASEADDR -+*/ -+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_MASK (0xFFFFFFE0) -+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_SHIFT (5) -+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_LENGTH (27) -+#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1BASEADDR_OFFSET (0x0190) -+ -+/* PDP, VID1BASEADDR, VID1BASEADDR -+*/ -+#define PDP_VID1BASEADDR_VID1BASEADDR_MASK (0xFFFFFFE0) -+#define PDP_VID1BASEADDR_VID1BASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_VID1BASEADDR_VID1BASEADDR_SHIFT (5) -+#define PDP_VID1BASEADDR_VID1BASEADDR_LENGTH (27) -+#define PDP_VID1BASEADDR_VID1BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2BASEADDR_OFFSET (0x0194) -+ -+/* PDP, VID2BASEADDR, VID2BASEADDR -+*/ -+#define PDP_VID2BASEADDR_VID2BASEADDR_MASK (0xFFFFFFE0) -+#define PDP_VID2BASEADDR_VID2BASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_VID2BASEADDR_VID2BASEADDR_SHIFT (5) -+#define PDP_VID2BASEADDR_VID2BASEADDR_LENGTH (27) -+#define PDP_VID2BASEADDR_VID2BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3BASEADDR_OFFSET (0x0198) -+ -+/* PDP, VID3BASEADDR, VID3BASEADDR -+*/ -+#define PDP_VID3BASEADDR_VID3BASEADDR_MASK (0xFFFFFFE0) -+#define PDP_VID3BASEADDR_VID3BASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_VID3BASEADDR_VID3BASEADDR_SHIFT (5) -+#define PDP_VID3BASEADDR_VID3BASEADDR_LENGTH (27) -+#define PDP_VID3BASEADDR_VID3BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4BASEADDR_OFFSET (0x019C) -+ -+/* PDP, VID4BASEADDR, VID4BASEADDR -+*/ -+#define PDP_VID4BASEADDR_VID4BASEADDR_MASK (0xFFFFFFE0) -+#define PDP_VID4BASEADDR_VID4BASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_VID4BASEADDR_VID4BASEADDR_SHIFT (5) -+#define PDP_VID4BASEADDR_VID4BASEADDR_LENGTH (27) -+#define PDP_VID4BASEADDR_VID4BASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1UBASEADDR_OFFSET (0x01B0) -+ -+/* PDP, VID1UBASEADDR, VID1UBASEADDR -+*/ -+#define PDP_VID1UBASEADDR_VID1UBASEADDR_MASK (0xFFFFFFE0) -+#define PDP_VID1UBASEADDR_VID1UBASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_VID1UBASEADDR_VID1UBASEADDR_SHIFT (5) -+#define PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH (27) -+#define PDP_VID1UBASEADDR_VID1UBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2UBASEADDR_OFFSET (0x01B4) -+ -+/* PDP, VID2UBASEADDR, VID2UBASEADDR -+*/ -+#define PDP_VID2UBASEADDR_VID2UBASEADDR_MASK (0xFFFFFFE0) -+#define PDP_VID2UBASEADDR_VID2UBASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_VID2UBASEADDR_VID2UBASEADDR_SHIFT (5) -+#define PDP_VID2UBASEADDR_VID2UBASEADDR_LENGTH (27) -+#define PDP_VID2UBASEADDR_VID2UBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3UBASEADDR_OFFSET (0x01B8) -+ -+/* PDP, VID3UBASEADDR, VID3UBASEADDR -+*/ -+#define PDP_VID3UBASEADDR_VID3UBASEADDR_MASK (0xFFFFFFE0) -+#define PDP_VID3UBASEADDR_VID3UBASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_VID3UBASEADDR_VID3UBASEADDR_SHIFT (5) -+#define PDP_VID3UBASEADDR_VID3UBASEADDR_LENGTH (27) -+#define PDP_VID3UBASEADDR_VID3UBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4UBASEADDR_OFFSET (0x01BC) -+ -+/* PDP, VID4UBASEADDR, VID4UBASEADDR -+*/ -+#define PDP_VID4UBASEADDR_VID4UBASEADDR_MASK (0xFFFFFFE0) -+#define PDP_VID4UBASEADDR_VID4UBASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_VID4UBASEADDR_VID4UBASEADDR_SHIFT (5) -+#define PDP_VID4UBASEADDR_VID4UBASEADDR_LENGTH (27) -+#define PDP_VID4UBASEADDR_VID4UBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1VBASEADDR_OFFSET (0x01D0) -+ -+/* PDP, VID1VBASEADDR, VID1VBASEADDR -+*/ -+#define PDP_VID1VBASEADDR_VID1VBASEADDR_MASK (0xFFFFFFE0) -+#define PDP_VID1VBASEADDR_VID1VBASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_VID1VBASEADDR_VID1VBASEADDR_SHIFT (5) -+#define PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH (27) -+#define PDP_VID1VBASEADDR_VID1VBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2VBASEADDR_OFFSET (0x01D4) -+ -+/* PDP, VID2VBASEADDR, VID2VBASEADDR -+*/ -+#define PDP_VID2VBASEADDR_VID2VBASEADDR_MASK (0xFFFFFFE0) -+#define PDP_VID2VBASEADDR_VID2VBASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_VID2VBASEADDR_VID2VBASEADDR_SHIFT (5) -+#define PDP_VID2VBASEADDR_VID2VBASEADDR_LENGTH (27) -+#define PDP_VID2VBASEADDR_VID2VBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3VBASEADDR_OFFSET (0x01D8) -+ -+/* PDP, VID3VBASEADDR, VID3VBASEADDR -+*/ -+#define PDP_VID3VBASEADDR_VID3VBASEADDR_MASK (0xFFFFFFE0) -+#define PDP_VID3VBASEADDR_VID3VBASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_VID3VBASEADDR_VID3VBASEADDR_SHIFT (5) -+#define PDP_VID3VBASEADDR_VID3VBASEADDR_LENGTH (27) -+#define PDP_VID3VBASEADDR_VID3VBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4VBASEADDR_OFFSET (0x01DC) -+ -+/* PDP, VID4VBASEADDR, VID4VBASEADDR -+*/ -+#define PDP_VID4VBASEADDR_VID4VBASEADDR_MASK (0xFFFFFFE0) -+#define PDP_VID4VBASEADDR_VID4VBASEADDR_LSBMASK (0x07FFFFFF) -+#define PDP_VID4VBASEADDR_VID4VBASEADDR_SHIFT (5) -+#define PDP_VID4VBASEADDR_VID4VBASEADDR_LENGTH (27) -+#define PDP_VID4VBASEADDR_VID4VBASEADDR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1POSTSKIPCTRL_OFFSET (0x0230) -+ -+/* PDP, VID1POSTSKIPCTRL, VID1HPOSTCLIP -+*/ -+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_MASK (0x007F0000) -+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LSBMASK (0x0000007F) -+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SHIFT (16) -+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LENGTH (7) -+#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1POSTSKIPCTRL, VID1VPOSTCLIP -+*/ -+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_MASK (0x0000003F) -+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LSBMASK (0x0000003F) -+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SHIFT (0) -+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LENGTH (6) -+#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2POSTSKIPCTRL_OFFSET (0x0234) -+ -+/* PDP, VID2POSTSKIPCTRL, VID2HPOSTCLIP -+*/ -+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_MASK (0x007F0000) -+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LSBMASK (0x0000007F) -+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SHIFT (16) -+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LENGTH (7) -+#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2POSTSKIPCTRL, VID2VPOSTCLIP -+*/ -+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_MASK (0x0000003F) -+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LSBMASK (0x0000003F) -+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SHIFT (0) -+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LENGTH (6) -+#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3POSTSKIPCTRL_OFFSET (0x0238) -+ -+/* PDP, VID3POSTSKIPCTRL, VID3HPOSTCLIP -+*/ -+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_MASK (0x007F0000) -+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LSBMASK (0x0000007F) -+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SHIFT (16) -+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LENGTH (7) -+#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3POSTSKIPCTRL, VID3VPOSTCLIP -+*/ -+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_MASK (0x0000003F) -+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LSBMASK (0x0000003F) -+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SHIFT (0) -+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LENGTH (6) -+#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4POSTSKIPCTRL_OFFSET (0x023C) -+ -+/* PDP, VID4POSTSKIPCTRL, VID4HPOSTCLIP -+*/ -+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_MASK (0x007F0000) -+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LSBMASK (0x0000007F) -+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SHIFT (16) -+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LENGTH (7) -+#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4POSTSKIPCTRL, VID4VPOSTCLIP -+*/ -+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_MASK (0x0000003F) -+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LSBMASK (0x0000003F) -+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SHIFT (0) -+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LENGTH (6) -+#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1DECIMATE_CTRL_OFFSET (0x0240) -+ -+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_MODE -+*/ -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_PIXEL_HALVE -+*/ -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_EN -+*/ -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_MASK (0x00000001) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LSBMASK (0x00000001) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SHIFT (0) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LENGTH (1) -+#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2DECIMATE_CTRL_OFFSET (0x0244) -+ -+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_MODE -+*/ -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_PIXEL_HALVE -+*/ -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_EN -+*/ -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_MASK (0x00000001) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LSBMASK (0x00000001) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SHIFT (0) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LENGTH (1) -+#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3DECIMATE_CTRL_OFFSET (0x0248) -+ -+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_MODE -+*/ -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_PIXEL_HALVE -+*/ -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_EN -+*/ -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_MASK (0x00000001) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LSBMASK (0x00000001) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SHIFT (0) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LENGTH (1) -+#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4DECIMATE_CTRL_OFFSET (0x024C) -+ -+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_MODE -+*/ -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_PIXEL_HALVE -+*/ -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_EN -+*/ -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_MASK (0x00000001) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LSBMASK (0x00000001) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SHIFT (0) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LENGTH (1) -+#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1DECIMATE_CTRL_OFFSET (0x0250) -+ -+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_MODE -+*/ -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_PIXEL_HALVE -+*/ -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_EN -+*/ -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_MASK (0x00000001) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LSBMASK (0x00000001) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SHIFT (0) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LENGTH (1) -+#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2DECIMATE_CTRL_OFFSET (0x0254) -+ -+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_MODE -+*/ -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_PIXEL_HALVE -+*/ -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_EN -+*/ -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_MASK (0x00000001) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LSBMASK (0x00000001) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SHIFT (0) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LENGTH (1) -+#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3DECIMATE_CTRL_OFFSET (0x0258) -+ -+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_MODE -+*/ -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_PIXEL_HALVE -+*/ -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_EN -+*/ -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_MASK (0x00000001) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LSBMASK (0x00000001) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SHIFT (0) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LENGTH (1) -+#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4DECIMATE_CTRL_OFFSET (0x025C) -+ -+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_COUNT -+*/ -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_MODE -+*/ -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_PIXEL_HALVE -+*/ -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_MASK (0x00000004) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SHIFT (2) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LENGTH (1) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_EN -+*/ -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_MASK (0x00000001) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LSBMASK (0x00000001) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SHIFT (0) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LENGTH (1) -+#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1SKIPCTRL_OFFSET (0x0270) -+ -+/* PDP, VID1SKIPCTRL, VID1HSKIP -+*/ -+#define PDP_VID1SKIPCTRL_VID1HSKIP_MASK (0x0FFF0000) -+#define PDP_VID1SKIPCTRL_VID1HSKIP_LSBMASK (0x00000FFF) -+#define PDP_VID1SKIPCTRL_VID1HSKIP_SHIFT (16) -+#define PDP_VID1SKIPCTRL_VID1HSKIP_LENGTH (12) -+#define PDP_VID1SKIPCTRL_VID1HSKIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SKIPCTRL, VID1VSKIP -+*/ -+#define PDP_VID1SKIPCTRL_VID1VSKIP_MASK (0x00000FFF) -+#define PDP_VID1SKIPCTRL_VID1VSKIP_LSBMASK (0x00000FFF) -+#define PDP_VID1SKIPCTRL_VID1VSKIP_SHIFT (0) -+#define PDP_VID1SKIPCTRL_VID1VSKIP_LENGTH (12) -+#define PDP_VID1SKIPCTRL_VID1VSKIP_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2SKIPCTRL_OFFSET (0x0274) -+ -+/* PDP, VID2SKIPCTRL, VID2HSKIP -+*/ -+#define PDP_VID2SKIPCTRL_VID2HSKIP_MASK (0x0FFF0000) -+#define PDP_VID2SKIPCTRL_VID2HSKIP_LSBMASK (0x00000FFF) -+#define PDP_VID2SKIPCTRL_VID2HSKIP_SHIFT (16) -+#define PDP_VID2SKIPCTRL_VID2HSKIP_LENGTH (12) -+#define PDP_VID2SKIPCTRL_VID2HSKIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SKIPCTRL, VID2VSKIP -+*/ -+#define PDP_VID2SKIPCTRL_VID2VSKIP_MASK (0x00000FFF) -+#define PDP_VID2SKIPCTRL_VID2VSKIP_LSBMASK (0x00000FFF) -+#define PDP_VID2SKIPCTRL_VID2VSKIP_SHIFT (0) -+#define PDP_VID2SKIPCTRL_VID2VSKIP_LENGTH (12) -+#define PDP_VID2SKIPCTRL_VID2VSKIP_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3SKIPCTRL_OFFSET (0x0278) -+ -+/* PDP, VID3SKIPCTRL, VID3HSKIP -+*/ -+#define PDP_VID3SKIPCTRL_VID3HSKIP_MASK (0x0FFF0000) -+#define PDP_VID3SKIPCTRL_VID3HSKIP_LSBMASK (0x00000FFF) -+#define PDP_VID3SKIPCTRL_VID3HSKIP_SHIFT (16) -+#define PDP_VID3SKIPCTRL_VID3HSKIP_LENGTH (12) -+#define PDP_VID3SKIPCTRL_VID3HSKIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SKIPCTRL, VID3VSKIP -+*/ -+#define PDP_VID3SKIPCTRL_VID3VSKIP_MASK (0x00000FFF) -+#define PDP_VID3SKIPCTRL_VID3VSKIP_LSBMASK (0x00000FFF) -+#define PDP_VID3SKIPCTRL_VID3VSKIP_SHIFT (0) -+#define PDP_VID3SKIPCTRL_VID3VSKIP_LENGTH (12) -+#define PDP_VID3SKIPCTRL_VID3VSKIP_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4SKIPCTRL_OFFSET (0x027C) -+ -+/* PDP, VID4SKIPCTRL, VID4HSKIP -+*/ -+#define PDP_VID4SKIPCTRL_VID4HSKIP_MASK (0x0FFF0000) -+#define PDP_VID4SKIPCTRL_VID4HSKIP_LSBMASK (0x00000FFF) -+#define PDP_VID4SKIPCTRL_VID4HSKIP_SHIFT (16) -+#define PDP_VID4SKIPCTRL_VID4HSKIP_LENGTH (12) -+#define PDP_VID4SKIPCTRL_VID4HSKIP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SKIPCTRL, VID4VSKIP -+*/ -+#define PDP_VID4SKIPCTRL_VID4VSKIP_MASK (0x00000FFF) -+#define PDP_VID4SKIPCTRL_VID4VSKIP_LSBMASK (0x00000FFF) -+#define PDP_VID4SKIPCTRL_VID4VSKIP_SHIFT (0) -+#define PDP_VID4SKIPCTRL_VID4VSKIP_LENGTH (12) -+#define PDP_VID4SKIPCTRL_VID4VSKIP_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1SCALECTRL_OFFSET (0x0460) -+ -+/* PDP, VID1SCALECTRL, VID1HSCALEBP -+*/ -+#define PDP_VID1SCALECTRL_VID1HSCALEBP_MASK (0x80000000) -+#define PDP_VID1SCALECTRL_VID1HSCALEBP_LSBMASK (0x00000001) -+#define PDP_VID1SCALECTRL_VID1HSCALEBP_SHIFT (31) -+#define PDP_VID1SCALECTRL_VID1HSCALEBP_LENGTH (1) -+#define PDP_VID1SCALECTRL_VID1HSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALECTRL, VID1VSCALEBP -+*/ -+#define PDP_VID1SCALECTRL_VID1VSCALEBP_MASK (0x40000000) -+#define PDP_VID1SCALECTRL_VID1VSCALEBP_LSBMASK (0x00000001) -+#define PDP_VID1SCALECTRL_VID1VSCALEBP_SHIFT (30) -+#define PDP_VID1SCALECTRL_VID1VSCALEBP_LENGTH (1) -+#define PDP_VID1SCALECTRL_VID1VSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALECTRL, VID1HSBEFOREVS -+*/ -+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_MASK (0x20000000) -+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_LSBMASK (0x00000001) -+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_SHIFT (29) -+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_LENGTH (1) -+#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALECTRL, VID1VSURUNCTRL -+*/ -+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_MASK (0x08000000) -+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_LSBMASK (0x00000001) -+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_SHIFT (27) -+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_LENGTH (1) -+#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALECTRL, VID1PAN_EN -+*/ -+#define PDP_VID1SCALECTRL_VID1PAN_EN_MASK (0x00040000) -+#define PDP_VID1SCALECTRL_VID1PAN_EN_LSBMASK (0x00000001) -+#define PDP_VID1SCALECTRL_VID1PAN_EN_SHIFT (18) -+#define PDP_VID1SCALECTRL_VID1PAN_EN_LENGTH (1) -+#define PDP_VID1SCALECTRL_VID1PAN_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALECTRL, VID1VORDER -+*/ -+#define PDP_VID1SCALECTRL_VID1VORDER_MASK (0x00030000) -+#define PDP_VID1SCALECTRL_VID1VORDER_LSBMASK (0x00000003) -+#define PDP_VID1SCALECTRL_VID1VORDER_SHIFT (16) -+#define PDP_VID1SCALECTRL_VID1VORDER_LENGTH (2) -+#define PDP_VID1SCALECTRL_VID1VORDER_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALECTRL, VID1VPITCH -+*/ -+#define PDP_VID1SCALECTRL_VID1VPITCH_MASK (0x0000FFFF) -+#define PDP_VID1SCALECTRL_VID1VPITCH_LSBMASK (0x0000FFFF) -+#define PDP_VID1SCALECTRL_VID1VPITCH_SHIFT (0) -+#define PDP_VID1SCALECTRL_VID1VPITCH_LENGTH (16) -+#define PDP_VID1SCALECTRL_VID1VPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1VSINIT_OFFSET (0x0464) -+ -+/* PDP, VID1VSINIT, VID1VINITIAL1 -+*/ -+#define PDP_VID1VSINIT_VID1VINITIAL1_MASK (0xFFFF0000) -+#define PDP_VID1VSINIT_VID1VINITIAL1_LSBMASK (0x0000FFFF) -+#define PDP_VID1VSINIT_VID1VINITIAL1_SHIFT (16) -+#define PDP_VID1VSINIT_VID1VINITIAL1_LENGTH (16) -+#define PDP_VID1VSINIT_VID1VINITIAL1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1VSINIT, VID1VINITIAL0 -+*/ -+#define PDP_VID1VSINIT_VID1VINITIAL0_MASK (0x0000FFFF) -+#define PDP_VID1VSINIT_VID1VINITIAL0_LSBMASK (0x0000FFFF) -+#define PDP_VID1VSINIT_VID1VINITIAL0_SHIFT (0) -+#define PDP_VID1VSINIT_VID1VINITIAL0_LENGTH (16) -+#define PDP_VID1VSINIT_VID1VINITIAL0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1VCOEFF0_OFFSET (0x0468) -+ -+/* PDP, VID1VCOEFF0, VID1VCOEFF0 -+*/ -+#define PDP_VID1VCOEFF0_VID1VCOEFF0_MASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF0_VID1VCOEFF0_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF0_VID1VCOEFF0_SHIFT (0) -+#define PDP_VID1VCOEFF0_VID1VCOEFF0_LENGTH (32) -+#define PDP_VID1VCOEFF0_VID1VCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1VCOEFF1_OFFSET (0x046C) -+ -+/* PDP, VID1VCOEFF1, VID1VCOEFF1 -+*/ -+#define PDP_VID1VCOEFF1_VID1VCOEFF1_MASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF1_VID1VCOEFF1_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF1_VID1VCOEFF1_SHIFT (0) -+#define PDP_VID1VCOEFF1_VID1VCOEFF1_LENGTH (32) -+#define PDP_VID1VCOEFF1_VID1VCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1VCOEFF2_OFFSET (0x0470) -+ -+/* PDP, VID1VCOEFF2, VID1VCOEFF2 -+*/ -+#define PDP_VID1VCOEFF2_VID1VCOEFF2_MASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF2_VID1VCOEFF2_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF2_VID1VCOEFF2_SHIFT (0) -+#define PDP_VID1VCOEFF2_VID1VCOEFF2_LENGTH (32) -+#define PDP_VID1VCOEFF2_VID1VCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1VCOEFF3_OFFSET (0x0474) -+ -+/* PDP, VID1VCOEFF3, VID1VCOEFF3 -+*/ -+#define PDP_VID1VCOEFF3_VID1VCOEFF3_MASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF3_VID1VCOEFF3_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF3_VID1VCOEFF3_SHIFT (0) -+#define PDP_VID1VCOEFF3_VID1VCOEFF3_LENGTH (32) -+#define PDP_VID1VCOEFF3_VID1VCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1VCOEFF4_OFFSET (0x0478) -+ -+/* PDP, VID1VCOEFF4, VID1VCOEFF4 -+*/ -+#define PDP_VID1VCOEFF4_VID1VCOEFF4_MASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF4_VID1VCOEFF4_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF4_VID1VCOEFF4_SHIFT (0) -+#define PDP_VID1VCOEFF4_VID1VCOEFF4_LENGTH (32) -+#define PDP_VID1VCOEFF4_VID1VCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1VCOEFF5_OFFSET (0x047C) -+ -+/* PDP, VID1VCOEFF5, VID1VCOEFF5 -+*/ -+#define PDP_VID1VCOEFF5_VID1VCOEFF5_MASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF5_VID1VCOEFF5_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF5_VID1VCOEFF5_SHIFT (0) -+#define PDP_VID1VCOEFF5_VID1VCOEFF5_LENGTH (32) -+#define PDP_VID1VCOEFF5_VID1VCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1VCOEFF6_OFFSET (0x0480) -+ -+/* PDP, VID1VCOEFF6, VID1VCOEFF6 -+*/ -+#define PDP_VID1VCOEFF6_VID1VCOEFF6_MASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF6_VID1VCOEFF6_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF6_VID1VCOEFF6_SHIFT (0) -+#define PDP_VID1VCOEFF6_VID1VCOEFF6_LENGTH (32) -+#define PDP_VID1VCOEFF6_VID1VCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1VCOEFF7_OFFSET (0x0484) -+ -+/* PDP, VID1VCOEFF7, VID1VCOEFF7 -+*/ -+#define PDP_VID1VCOEFF7_VID1VCOEFF7_MASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF7_VID1VCOEFF7_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1VCOEFF7_VID1VCOEFF7_SHIFT (0) -+#define PDP_VID1VCOEFF7_VID1VCOEFF7_LENGTH (32) -+#define PDP_VID1VCOEFF7_VID1VCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1VCOEFF8_OFFSET (0x0488) -+ -+/* PDP, VID1VCOEFF8, VID1VCOEFF8 -+*/ -+#define PDP_VID1VCOEFF8_VID1VCOEFF8_MASK (0x000000FF) -+#define PDP_VID1VCOEFF8_VID1VCOEFF8_LSBMASK (0x000000FF) -+#define PDP_VID1VCOEFF8_VID1VCOEFF8_SHIFT (0) -+#define PDP_VID1VCOEFF8_VID1VCOEFF8_LENGTH (8) -+#define PDP_VID1VCOEFF8_VID1VCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HSINIT_OFFSET (0x048C) -+ -+/* PDP, VID1HSINIT, VID1HINITIAL -+*/ -+#define PDP_VID1HSINIT_VID1HINITIAL_MASK (0xFFFF0000) -+#define PDP_VID1HSINIT_VID1HINITIAL_LSBMASK (0x0000FFFF) -+#define PDP_VID1HSINIT_VID1HINITIAL_SHIFT (16) -+#define PDP_VID1HSINIT_VID1HINITIAL_LENGTH (16) -+#define PDP_VID1HSINIT_VID1HINITIAL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1HSINIT, VID1HPITCH -+*/ -+#define PDP_VID1HSINIT_VID1HPITCH_MASK (0x0000FFFF) -+#define PDP_VID1HSINIT_VID1HPITCH_LSBMASK (0x0000FFFF) -+#define PDP_VID1HSINIT_VID1HPITCH_SHIFT (0) -+#define PDP_VID1HSINIT_VID1HPITCH_LENGTH (16) -+#define PDP_VID1HSINIT_VID1HPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF0_OFFSET (0x0490) -+ -+/* PDP, VID1HCOEFF0, VID1HCOEFF0 -+*/ -+#define PDP_VID1HCOEFF0_VID1HCOEFF0_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF0_VID1HCOEFF0_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF0_VID1HCOEFF0_SHIFT (0) -+#define PDP_VID1HCOEFF0_VID1HCOEFF0_LENGTH (32) -+#define PDP_VID1HCOEFF0_VID1HCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF1_OFFSET (0x0494) -+ -+/* PDP, VID1HCOEFF1, VID1HCOEFF1 -+*/ -+#define PDP_VID1HCOEFF1_VID1HCOEFF1_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF1_VID1HCOEFF1_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF1_VID1HCOEFF1_SHIFT (0) -+#define PDP_VID1HCOEFF1_VID1HCOEFF1_LENGTH (32) -+#define PDP_VID1HCOEFF1_VID1HCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF2_OFFSET (0x0498) -+ -+/* PDP, VID1HCOEFF2, VID1HCOEFF2 -+*/ -+#define PDP_VID1HCOEFF2_VID1HCOEFF2_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF2_VID1HCOEFF2_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF2_VID1HCOEFF2_SHIFT (0) -+#define PDP_VID1HCOEFF2_VID1HCOEFF2_LENGTH (32) -+#define PDP_VID1HCOEFF2_VID1HCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF3_OFFSET (0x049C) -+ -+/* PDP, VID1HCOEFF3, VID1HCOEFF3 -+*/ -+#define PDP_VID1HCOEFF3_VID1HCOEFF3_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF3_VID1HCOEFF3_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF3_VID1HCOEFF3_SHIFT (0) -+#define PDP_VID1HCOEFF3_VID1HCOEFF3_LENGTH (32) -+#define PDP_VID1HCOEFF3_VID1HCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF4_OFFSET (0x04A0) -+ -+/* PDP, VID1HCOEFF4, VID1HCOEFF4 -+*/ -+#define PDP_VID1HCOEFF4_VID1HCOEFF4_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF4_VID1HCOEFF4_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF4_VID1HCOEFF4_SHIFT (0) -+#define PDP_VID1HCOEFF4_VID1HCOEFF4_LENGTH (32) -+#define PDP_VID1HCOEFF4_VID1HCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF5_OFFSET (0x04A4) -+ -+/* PDP, VID1HCOEFF5, VID1HCOEFF5 -+*/ -+#define PDP_VID1HCOEFF5_VID1HCOEFF5_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF5_VID1HCOEFF5_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF5_VID1HCOEFF5_SHIFT (0) -+#define PDP_VID1HCOEFF5_VID1HCOEFF5_LENGTH (32) -+#define PDP_VID1HCOEFF5_VID1HCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF6_OFFSET (0x04A8) -+ -+/* PDP, VID1HCOEFF6, VID1HCOEFF6 -+*/ -+#define PDP_VID1HCOEFF6_VID1HCOEFF6_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF6_VID1HCOEFF6_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF6_VID1HCOEFF6_SHIFT (0) -+#define PDP_VID1HCOEFF6_VID1HCOEFF6_LENGTH (32) -+#define PDP_VID1HCOEFF6_VID1HCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF7_OFFSET (0x04AC) -+ -+/* PDP, VID1HCOEFF7, VID1HCOEFF7 -+*/ -+#define PDP_VID1HCOEFF7_VID1HCOEFF7_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF7_VID1HCOEFF7_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF7_VID1HCOEFF7_SHIFT (0) -+#define PDP_VID1HCOEFF7_VID1HCOEFF7_LENGTH (32) -+#define PDP_VID1HCOEFF7_VID1HCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF8_OFFSET (0x04B0) -+ -+/* PDP, VID1HCOEFF8, VID1HCOEFF8 -+*/ -+#define PDP_VID1HCOEFF8_VID1HCOEFF8_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF8_VID1HCOEFF8_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF8_VID1HCOEFF8_SHIFT (0) -+#define PDP_VID1HCOEFF8_VID1HCOEFF8_LENGTH (32) -+#define PDP_VID1HCOEFF8_VID1HCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF9_OFFSET (0x04B4) -+ -+/* PDP, VID1HCOEFF9, VID1HCOEFF9 -+*/ -+#define PDP_VID1HCOEFF9_VID1HCOEFF9_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF9_VID1HCOEFF9_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF9_VID1HCOEFF9_SHIFT (0) -+#define PDP_VID1HCOEFF9_VID1HCOEFF9_LENGTH (32) -+#define PDP_VID1HCOEFF9_VID1HCOEFF9_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF10_OFFSET (0x04B8) -+ -+/* PDP, VID1HCOEFF10, VID1HCOEFF10 -+*/ -+#define PDP_VID1HCOEFF10_VID1HCOEFF10_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF10_VID1HCOEFF10_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF10_VID1HCOEFF10_SHIFT (0) -+#define PDP_VID1HCOEFF10_VID1HCOEFF10_LENGTH (32) -+#define PDP_VID1HCOEFF10_VID1HCOEFF10_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF11_OFFSET (0x04BC) -+ -+/* PDP, VID1HCOEFF11, VID1HCOEFF11 -+*/ -+#define PDP_VID1HCOEFF11_VID1HCOEFF11_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF11_VID1HCOEFF11_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF11_VID1HCOEFF11_SHIFT (0) -+#define PDP_VID1HCOEFF11_VID1HCOEFF11_LENGTH (32) -+#define PDP_VID1HCOEFF11_VID1HCOEFF11_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF12_OFFSET (0x04C0) -+ -+/* PDP, VID1HCOEFF12, VID1HCOEFF12 -+*/ -+#define PDP_VID1HCOEFF12_VID1HCOEFF12_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF12_VID1HCOEFF12_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF12_VID1HCOEFF12_SHIFT (0) -+#define PDP_VID1HCOEFF12_VID1HCOEFF12_LENGTH (32) -+#define PDP_VID1HCOEFF12_VID1HCOEFF12_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF13_OFFSET (0x04C4) -+ -+/* PDP, VID1HCOEFF13, VID1HCOEFF13 -+*/ -+#define PDP_VID1HCOEFF13_VID1HCOEFF13_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF13_VID1HCOEFF13_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF13_VID1HCOEFF13_SHIFT (0) -+#define PDP_VID1HCOEFF13_VID1HCOEFF13_LENGTH (32) -+#define PDP_VID1HCOEFF13_VID1HCOEFF13_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF14_OFFSET (0x04C8) -+ -+/* PDP, VID1HCOEFF14, VID1HCOEFF14 -+*/ -+#define PDP_VID1HCOEFF14_VID1HCOEFF14_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF14_VID1HCOEFF14_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF14_VID1HCOEFF14_SHIFT (0) -+#define PDP_VID1HCOEFF14_VID1HCOEFF14_LENGTH (32) -+#define PDP_VID1HCOEFF14_VID1HCOEFF14_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF15_OFFSET (0x04CC) -+ -+/* PDP, VID1HCOEFF15, VID1HCOEFF15 -+*/ -+#define PDP_VID1HCOEFF15_VID1HCOEFF15_MASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF15_VID1HCOEFF15_LSBMASK (0xFFFFFFFF) -+#define PDP_VID1HCOEFF15_VID1HCOEFF15_SHIFT (0) -+#define PDP_VID1HCOEFF15_VID1HCOEFF15_LENGTH (32) -+#define PDP_VID1HCOEFF15_VID1HCOEFF15_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1HCOEFF16_OFFSET (0x04D0) -+ -+/* PDP, VID1HCOEFF16, VID1HCOEFF16 -+*/ -+#define PDP_VID1HCOEFF16_VID1HCOEFF16_MASK (0x000000FF) -+#define PDP_VID1HCOEFF16_VID1HCOEFF16_LSBMASK (0x000000FF) -+#define PDP_VID1HCOEFF16_VID1HCOEFF16_SHIFT (0) -+#define PDP_VID1HCOEFF16_VID1HCOEFF16_LENGTH (8) -+#define PDP_VID1HCOEFF16_VID1HCOEFF16_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1SCALESIZE_OFFSET (0x04D4) -+ -+/* PDP, VID1SCALESIZE, VID1SCALEWIDTH -+*/ -+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_MASK (0x0FFF0000) -+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_LSBMASK (0x00000FFF) -+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_SHIFT (16) -+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_LENGTH (12) -+#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1SCALESIZE, VID1SCALEHEIGHT -+*/ -+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_MASK (0x00000FFF) -+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LSBMASK (0x00000FFF) -+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SHIFT (0) -+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LENGTH (12) -+#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CORE_ID_OFFSET (0x04E0) -+ -+/* PDP, PVR_PDP_CORE_ID, GROUP_ID -+*/ -+#define PDP_CORE_ID_GROUP_ID_MASK (0xFF000000) -+#define PDP_CORE_ID_GROUP_ID_LSBMASK (0x000000FF) -+#define PDP_CORE_ID_GROUP_ID_SHIFT (24) -+#define PDP_CORE_ID_GROUP_ID_LENGTH (8) -+#define PDP_CORE_ID_GROUP_ID_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PVR_PDP_CORE_ID, CORE_ID -+*/ -+#define PDP_CORE_ID_CORE_ID_MASK (0x00FF0000) -+#define PDP_CORE_ID_CORE_ID_LSBMASK (0x000000FF) -+#define PDP_CORE_ID_CORE_ID_SHIFT (16) -+#define PDP_CORE_ID_CORE_ID_LENGTH (8) -+#define PDP_CORE_ID_CORE_ID_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PVR_PDP_CORE_ID, CONFIG_ID -+*/ -+#define PDP_CORE_ID_CONFIG_ID_MASK (0x0000FFFF) -+#define PDP_CORE_ID_CONFIG_ID_LSBMASK (0x0000FFFF) -+#define PDP_CORE_ID_CONFIG_ID_SHIFT (0) -+#define PDP_CORE_ID_CONFIG_ID_LENGTH (16) -+#define PDP_CORE_ID_CONFIG_ID_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CORE_REV_OFFSET (0x04F0) -+ -+/* PDP, PVR_PDP_CORE_REV, MAJOR_REV -+*/ -+#define PDP_CORE_REV_MAJOR_REV_MASK (0x00FF0000) -+#define PDP_CORE_REV_MAJOR_REV_LSBMASK (0x000000FF) -+#define PDP_CORE_REV_MAJOR_REV_SHIFT (16) -+#define PDP_CORE_REV_MAJOR_REV_LENGTH (8) -+#define PDP_CORE_REV_MAJOR_REV_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PVR_PDP_CORE_REV, MINOR_REV -+*/ -+#define PDP_CORE_REV_MINOR_REV_MASK (0x0000FF00) -+#define PDP_CORE_REV_MINOR_REV_LSBMASK (0x000000FF) -+#define PDP_CORE_REV_MINOR_REV_SHIFT (8) -+#define PDP_CORE_REV_MINOR_REV_LENGTH (8) -+#define PDP_CORE_REV_MINOR_REV_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PVR_PDP_CORE_REV, MAINT_REV -+*/ -+#define PDP_CORE_REV_MAINT_REV_MASK (0x000000FF) -+#define PDP_CORE_REV_MAINT_REV_LSBMASK (0x000000FF) -+#define PDP_CORE_REV_MAINT_REV_SHIFT (0) -+#define PDP_CORE_REV_MAINT_REV_LENGTH (8) -+#define PDP_CORE_REV_MAINT_REV_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2SCALECTRL_OFFSET (0x0500) -+ -+/* PDP, VID2SCALECTRL, VID2HSCALEBP -+*/ -+#define PDP_VID2SCALECTRL_VID2HSCALEBP_MASK (0x80000000) -+#define PDP_VID2SCALECTRL_VID2HSCALEBP_LSBMASK (0x00000001) -+#define PDP_VID2SCALECTRL_VID2HSCALEBP_SHIFT (31) -+#define PDP_VID2SCALECTRL_VID2HSCALEBP_LENGTH (1) -+#define PDP_VID2SCALECTRL_VID2HSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALECTRL, VID2VSCALEBP -+*/ -+#define PDP_VID2SCALECTRL_VID2VSCALEBP_MASK (0x40000000) -+#define PDP_VID2SCALECTRL_VID2VSCALEBP_LSBMASK (0x00000001) -+#define PDP_VID2SCALECTRL_VID2VSCALEBP_SHIFT (30) -+#define PDP_VID2SCALECTRL_VID2VSCALEBP_LENGTH (1) -+#define PDP_VID2SCALECTRL_VID2VSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALECTRL, VID2HSBEFOREVS -+*/ -+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_MASK (0x20000000) -+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_LSBMASK (0x00000001) -+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_SHIFT (29) -+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_LENGTH (1) -+#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALECTRL, VID2VSURUNCTRL -+*/ -+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_MASK (0x08000000) -+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_LSBMASK (0x00000001) -+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_SHIFT (27) -+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_LENGTH (1) -+#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALECTRL, VID2PAN_EN -+*/ -+#define PDP_VID2SCALECTRL_VID2PAN_EN_MASK (0x00040000) -+#define PDP_VID2SCALECTRL_VID2PAN_EN_LSBMASK (0x00000001) -+#define PDP_VID2SCALECTRL_VID2PAN_EN_SHIFT (18) -+#define PDP_VID2SCALECTRL_VID2PAN_EN_LENGTH (1) -+#define PDP_VID2SCALECTRL_VID2PAN_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALECTRL, VID2VORDER -+*/ -+#define PDP_VID2SCALECTRL_VID2VORDER_MASK (0x00030000) -+#define PDP_VID2SCALECTRL_VID2VORDER_LSBMASK (0x00000003) -+#define PDP_VID2SCALECTRL_VID2VORDER_SHIFT (16) -+#define PDP_VID2SCALECTRL_VID2VORDER_LENGTH (2) -+#define PDP_VID2SCALECTRL_VID2VORDER_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALECTRL, VID2VPITCH -+*/ -+#define PDP_VID2SCALECTRL_VID2VPITCH_MASK (0x0000FFFF) -+#define PDP_VID2SCALECTRL_VID2VPITCH_LSBMASK (0x0000FFFF) -+#define PDP_VID2SCALECTRL_VID2VPITCH_SHIFT (0) -+#define PDP_VID2SCALECTRL_VID2VPITCH_LENGTH (16) -+#define PDP_VID2SCALECTRL_VID2VPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2VSINIT_OFFSET (0x0504) -+ -+/* PDP, VID2VSINIT, VID2VINITIAL1 -+*/ -+#define PDP_VID2VSINIT_VID2VINITIAL1_MASK (0xFFFF0000) -+#define PDP_VID2VSINIT_VID2VINITIAL1_LSBMASK (0x0000FFFF) -+#define PDP_VID2VSINIT_VID2VINITIAL1_SHIFT (16) -+#define PDP_VID2VSINIT_VID2VINITIAL1_LENGTH (16) -+#define PDP_VID2VSINIT_VID2VINITIAL1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2VSINIT, VID2VINITIAL0 -+*/ -+#define PDP_VID2VSINIT_VID2VINITIAL0_MASK (0x0000FFFF) -+#define PDP_VID2VSINIT_VID2VINITIAL0_LSBMASK (0x0000FFFF) -+#define PDP_VID2VSINIT_VID2VINITIAL0_SHIFT (0) -+#define PDP_VID2VSINIT_VID2VINITIAL0_LENGTH (16) -+#define PDP_VID2VSINIT_VID2VINITIAL0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2VCOEFF0_OFFSET (0x0508) -+ -+/* PDP, VID2VCOEFF0, VID2VCOEFF0 -+*/ -+#define PDP_VID2VCOEFF0_VID2VCOEFF0_MASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF0_VID2VCOEFF0_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF0_VID2VCOEFF0_SHIFT (0) -+#define PDP_VID2VCOEFF0_VID2VCOEFF0_LENGTH (32) -+#define PDP_VID2VCOEFF0_VID2VCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2VCOEFF1_OFFSET (0x050C) -+ -+/* PDP, VID2VCOEFF1, VID2VCOEFF1 -+*/ -+#define PDP_VID2VCOEFF1_VID2VCOEFF1_MASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF1_VID2VCOEFF1_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF1_VID2VCOEFF1_SHIFT (0) -+#define PDP_VID2VCOEFF1_VID2VCOEFF1_LENGTH (32) -+#define PDP_VID2VCOEFF1_VID2VCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2VCOEFF2_OFFSET (0x0510) -+ -+/* PDP, VID2VCOEFF2, VID2VCOEFF2 -+*/ -+#define PDP_VID2VCOEFF2_VID2VCOEFF2_MASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF2_VID2VCOEFF2_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF2_VID2VCOEFF2_SHIFT (0) -+#define PDP_VID2VCOEFF2_VID2VCOEFF2_LENGTH (32) -+#define PDP_VID2VCOEFF2_VID2VCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2VCOEFF3_OFFSET (0x0514) -+ -+/* PDP, VID2VCOEFF3, VID2VCOEFF3 -+*/ -+#define PDP_VID2VCOEFF3_VID2VCOEFF3_MASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF3_VID2VCOEFF3_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF3_VID2VCOEFF3_SHIFT (0) -+#define PDP_VID2VCOEFF3_VID2VCOEFF3_LENGTH (32) -+#define PDP_VID2VCOEFF3_VID2VCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2VCOEFF4_OFFSET (0x0518) -+ -+/* PDP, VID2VCOEFF4, VID2VCOEFF4 -+*/ -+#define PDP_VID2VCOEFF4_VID2VCOEFF4_MASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF4_VID2VCOEFF4_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF4_VID2VCOEFF4_SHIFT (0) -+#define PDP_VID2VCOEFF4_VID2VCOEFF4_LENGTH (32) -+#define PDP_VID2VCOEFF4_VID2VCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2VCOEFF5_OFFSET (0x051C) -+ -+/* PDP, VID2VCOEFF5, VID2VCOEFF5 -+*/ -+#define PDP_VID2VCOEFF5_VID2VCOEFF5_MASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF5_VID2VCOEFF5_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF5_VID2VCOEFF5_SHIFT (0) -+#define PDP_VID2VCOEFF5_VID2VCOEFF5_LENGTH (32) -+#define PDP_VID2VCOEFF5_VID2VCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2VCOEFF6_OFFSET (0x0520) -+ -+/* PDP, VID2VCOEFF6, VID2VCOEFF6 -+*/ -+#define PDP_VID2VCOEFF6_VID2VCOEFF6_MASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF6_VID2VCOEFF6_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF6_VID2VCOEFF6_SHIFT (0) -+#define PDP_VID2VCOEFF6_VID2VCOEFF6_LENGTH (32) -+#define PDP_VID2VCOEFF6_VID2VCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2VCOEFF7_OFFSET (0x0524) -+ -+/* PDP, VID2VCOEFF7, VID2VCOEFF7 -+*/ -+#define PDP_VID2VCOEFF7_VID2VCOEFF7_MASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF7_VID2VCOEFF7_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2VCOEFF7_VID2VCOEFF7_SHIFT (0) -+#define PDP_VID2VCOEFF7_VID2VCOEFF7_LENGTH (32) -+#define PDP_VID2VCOEFF7_VID2VCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2VCOEFF8_OFFSET (0x0528) -+ -+/* PDP, VID2VCOEFF8, VID2VCOEFF8 -+*/ -+#define PDP_VID2VCOEFF8_VID2VCOEFF8_MASK (0x000000FF) -+#define PDP_VID2VCOEFF8_VID2VCOEFF8_LSBMASK (0x000000FF) -+#define PDP_VID2VCOEFF8_VID2VCOEFF8_SHIFT (0) -+#define PDP_VID2VCOEFF8_VID2VCOEFF8_LENGTH (8) -+#define PDP_VID2VCOEFF8_VID2VCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HSINIT_OFFSET (0x052C) -+ -+/* PDP, VID2HSINIT, VID2HINITIAL -+*/ -+#define PDP_VID2HSINIT_VID2HINITIAL_MASK (0xFFFF0000) -+#define PDP_VID2HSINIT_VID2HINITIAL_LSBMASK (0x0000FFFF) -+#define PDP_VID2HSINIT_VID2HINITIAL_SHIFT (16) -+#define PDP_VID2HSINIT_VID2HINITIAL_LENGTH (16) -+#define PDP_VID2HSINIT_VID2HINITIAL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2HSINIT, VID2HPITCH -+*/ -+#define PDP_VID2HSINIT_VID2HPITCH_MASK (0x0000FFFF) -+#define PDP_VID2HSINIT_VID2HPITCH_LSBMASK (0x0000FFFF) -+#define PDP_VID2HSINIT_VID2HPITCH_SHIFT (0) -+#define PDP_VID2HSINIT_VID2HPITCH_LENGTH (16) -+#define PDP_VID2HSINIT_VID2HPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF0_OFFSET (0x0530) -+ -+/* PDP, VID2HCOEFF0, VID2HCOEFF0 -+*/ -+#define PDP_VID2HCOEFF0_VID2HCOEFF0_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF0_VID2HCOEFF0_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF0_VID2HCOEFF0_SHIFT (0) -+#define PDP_VID2HCOEFF0_VID2HCOEFF0_LENGTH (32) -+#define PDP_VID2HCOEFF0_VID2HCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF1_OFFSET (0x0534) -+ -+/* PDP, VID2HCOEFF1, VID2HCOEFF1 -+*/ -+#define PDP_VID2HCOEFF1_VID2HCOEFF1_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF1_VID2HCOEFF1_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF1_VID2HCOEFF1_SHIFT (0) -+#define PDP_VID2HCOEFF1_VID2HCOEFF1_LENGTH (32) -+#define PDP_VID2HCOEFF1_VID2HCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF2_OFFSET (0x0538) -+ -+/* PDP, VID2HCOEFF2, VID2HCOEFF2 -+*/ -+#define PDP_VID2HCOEFF2_VID2HCOEFF2_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF2_VID2HCOEFF2_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF2_VID2HCOEFF2_SHIFT (0) -+#define PDP_VID2HCOEFF2_VID2HCOEFF2_LENGTH (32) -+#define PDP_VID2HCOEFF2_VID2HCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF3_OFFSET (0x053C) -+ -+/* PDP, VID2HCOEFF3, VID2HCOEFF3 -+*/ -+#define PDP_VID2HCOEFF3_VID2HCOEFF3_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF3_VID2HCOEFF3_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF3_VID2HCOEFF3_SHIFT (0) -+#define PDP_VID2HCOEFF3_VID2HCOEFF3_LENGTH (32) -+#define PDP_VID2HCOEFF3_VID2HCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF4_OFFSET (0x0540) -+ -+/* PDP, VID2HCOEFF4, VID2HCOEFF4 -+*/ -+#define PDP_VID2HCOEFF4_VID2HCOEFF4_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF4_VID2HCOEFF4_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF4_VID2HCOEFF4_SHIFT (0) -+#define PDP_VID2HCOEFF4_VID2HCOEFF4_LENGTH (32) -+#define PDP_VID2HCOEFF4_VID2HCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF5_OFFSET (0x0544) -+ -+/* PDP, VID2HCOEFF5, VID2HCOEFF5 -+*/ -+#define PDP_VID2HCOEFF5_VID2HCOEFF5_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF5_VID2HCOEFF5_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF5_VID2HCOEFF5_SHIFT (0) -+#define PDP_VID2HCOEFF5_VID2HCOEFF5_LENGTH (32) -+#define PDP_VID2HCOEFF5_VID2HCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF6_OFFSET (0x0548) -+ -+/* PDP, VID2HCOEFF6, VID2HCOEFF6 -+*/ -+#define PDP_VID2HCOEFF6_VID2HCOEFF6_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF6_VID2HCOEFF6_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF6_VID2HCOEFF6_SHIFT (0) -+#define PDP_VID2HCOEFF6_VID2HCOEFF6_LENGTH (32) -+#define PDP_VID2HCOEFF6_VID2HCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF7_OFFSET (0x054C) -+ -+/* PDP, VID2HCOEFF7, VID2HCOEFF7 -+*/ -+#define PDP_VID2HCOEFF7_VID2HCOEFF7_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF7_VID2HCOEFF7_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF7_VID2HCOEFF7_SHIFT (0) -+#define PDP_VID2HCOEFF7_VID2HCOEFF7_LENGTH (32) -+#define PDP_VID2HCOEFF7_VID2HCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF8_OFFSET (0x0550) -+ -+/* PDP, VID2HCOEFF8, VID2HCOEFF8 -+*/ -+#define PDP_VID2HCOEFF8_VID2HCOEFF8_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF8_VID2HCOEFF8_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF8_VID2HCOEFF8_SHIFT (0) -+#define PDP_VID2HCOEFF8_VID2HCOEFF8_LENGTH (32) -+#define PDP_VID2HCOEFF8_VID2HCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF9_OFFSET (0x0554) -+ -+/* PDP, VID2HCOEFF9, VID2HCOEFF9 -+*/ -+#define PDP_VID2HCOEFF9_VID2HCOEFF9_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF9_VID2HCOEFF9_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF9_VID2HCOEFF9_SHIFT (0) -+#define PDP_VID2HCOEFF9_VID2HCOEFF9_LENGTH (32) -+#define PDP_VID2HCOEFF9_VID2HCOEFF9_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF10_OFFSET (0x0558) -+ -+/* PDP, VID2HCOEFF10, VID2HCOEFF10 -+*/ -+#define PDP_VID2HCOEFF10_VID2HCOEFF10_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF10_VID2HCOEFF10_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF10_VID2HCOEFF10_SHIFT (0) -+#define PDP_VID2HCOEFF10_VID2HCOEFF10_LENGTH (32) -+#define PDP_VID2HCOEFF10_VID2HCOEFF10_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF11_OFFSET (0x055C) -+ -+/* PDP, VID2HCOEFF11, VID2HCOEFF11 -+*/ -+#define PDP_VID2HCOEFF11_VID2HCOEFF11_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF11_VID2HCOEFF11_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF11_VID2HCOEFF11_SHIFT (0) -+#define PDP_VID2HCOEFF11_VID2HCOEFF11_LENGTH (32) -+#define PDP_VID2HCOEFF11_VID2HCOEFF11_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF12_OFFSET (0x0560) -+ -+/* PDP, VID2HCOEFF12, VID2HCOEFF12 -+*/ -+#define PDP_VID2HCOEFF12_VID2HCOEFF12_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF12_VID2HCOEFF12_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF12_VID2HCOEFF12_SHIFT (0) -+#define PDP_VID2HCOEFF12_VID2HCOEFF12_LENGTH (32) -+#define PDP_VID2HCOEFF12_VID2HCOEFF12_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF13_OFFSET (0x0564) -+ -+/* PDP, VID2HCOEFF13, VID2HCOEFF13 -+*/ -+#define PDP_VID2HCOEFF13_VID2HCOEFF13_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF13_VID2HCOEFF13_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF13_VID2HCOEFF13_SHIFT (0) -+#define PDP_VID2HCOEFF13_VID2HCOEFF13_LENGTH (32) -+#define PDP_VID2HCOEFF13_VID2HCOEFF13_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF14_OFFSET (0x0568) -+ -+/* PDP, VID2HCOEFF14, VID2HCOEFF14 -+*/ -+#define PDP_VID2HCOEFF14_VID2HCOEFF14_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF14_VID2HCOEFF14_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF14_VID2HCOEFF14_SHIFT (0) -+#define PDP_VID2HCOEFF14_VID2HCOEFF14_LENGTH (32) -+#define PDP_VID2HCOEFF14_VID2HCOEFF14_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF15_OFFSET (0x056C) -+ -+/* PDP, VID2HCOEFF15, VID2HCOEFF15 -+*/ -+#define PDP_VID2HCOEFF15_VID2HCOEFF15_MASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF15_VID2HCOEFF15_LSBMASK (0xFFFFFFFF) -+#define PDP_VID2HCOEFF15_VID2HCOEFF15_SHIFT (0) -+#define PDP_VID2HCOEFF15_VID2HCOEFF15_LENGTH (32) -+#define PDP_VID2HCOEFF15_VID2HCOEFF15_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2HCOEFF16_OFFSET (0x0570) -+ -+/* PDP, VID2HCOEFF16, VID2HCOEFF16 -+*/ -+#define PDP_VID2HCOEFF16_VID2HCOEFF16_MASK (0x000000FF) -+#define PDP_VID2HCOEFF16_VID2HCOEFF16_LSBMASK (0x000000FF) -+#define PDP_VID2HCOEFF16_VID2HCOEFF16_SHIFT (0) -+#define PDP_VID2HCOEFF16_VID2HCOEFF16_LENGTH (8) -+#define PDP_VID2HCOEFF16_VID2HCOEFF16_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2SCALESIZE_OFFSET (0x0574) -+ -+/* PDP, VID2SCALESIZE, VID2SCALEWIDTH -+*/ -+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_MASK (0x0FFF0000) -+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_LSBMASK (0x00000FFF) -+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_SHIFT (16) -+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_LENGTH (12) -+#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2SCALESIZE, VID2SCALEHEIGHT -+*/ -+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_MASK (0x00000FFF) -+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LSBMASK (0x00000FFF) -+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SHIFT (0) -+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LENGTH (12) -+#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3SCALECTRL_OFFSET (0x0578) -+ -+/* PDP, VID3SCALECTRL, VID3HSCALEBP -+*/ -+#define PDP_VID3SCALECTRL_VID3HSCALEBP_MASK (0x80000000) -+#define PDP_VID3SCALECTRL_VID3HSCALEBP_LSBMASK (0x00000001) -+#define PDP_VID3SCALECTRL_VID3HSCALEBP_SHIFT (31) -+#define PDP_VID3SCALECTRL_VID3HSCALEBP_LENGTH (1) -+#define PDP_VID3SCALECTRL_VID3HSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALECTRL, VID3VSCALEBP -+*/ -+#define PDP_VID3SCALECTRL_VID3VSCALEBP_MASK (0x40000000) -+#define PDP_VID3SCALECTRL_VID3VSCALEBP_LSBMASK (0x00000001) -+#define PDP_VID3SCALECTRL_VID3VSCALEBP_SHIFT (30) -+#define PDP_VID3SCALECTRL_VID3VSCALEBP_LENGTH (1) -+#define PDP_VID3SCALECTRL_VID3VSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALECTRL, VID3HSBEFOREVS -+*/ -+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_MASK (0x20000000) -+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_LSBMASK (0x00000001) -+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_SHIFT (29) -+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_LENGTH (1) -+#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALECTRL, VID3VSURUNCTRL -+*/ -+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_MASK (0x08000000) -+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_LSBMASK (0x00000001) -+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_SHIFT (27) -+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_LENGTH (1) -+#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALECTRL, VID3PAN_EN -+*/ -+#define PDP_VID3SCALECTRL_VID3PAN_EN_MASK (0x00040000) -+#define PDP_VID3SCALECTRL_VID3PAN_EN_LSBMASK (0x00000001) -+#define PDP_VID3SCALECTRL_VID3PAN_EN_SHIFT (18) -+#define PDP_VID3SCALECTRL_VID3PAN_EN_LENGTH (1) -+#define PDP_VID3SCALECTRL_VID3PAN_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALECTRL, VID3VORDER -+*/ -+#define PDP_VID3SCALECTRL_VID3VORDER_MASK (0x00030000) -+#define PDP_VID3SCALECTRL_VID3VORDER_LSBMASK (0x00000003) -+#define PDP_VID3SCALECTRL_VID3VORDER_SHIFT (16) -+#define PDP_VID3SCALECTRL_VID3VORDER_LENGTH (2) -+#define PDP_VID3SCALECTRL_VID3VORDER_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALECTRL, VID3VPITCH -+*/ -+#define PDP_VID3SCALECTRL_VID3VPITCH_MASK (0x0000FFFF) -+#define PDP_VID3SCALECTRL_VID3VPITCH_LSBMASK (0x0000FFFF) -+#define PDP_VID3SCALECTRL_VID3VPITCH_SHIFT (0) -+#define PDP_VID3SCALECTRL_VID3VPITCH_LENGTH (16) -+#define PDP_VID3SCALECTRL_VID3VPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3VSINIT_OFFSET (0x057C) -+ -+/* PDP, VID3VSINIT, VID3VINITIAL1 -+*/ -+#define PDP_VID3VSINIT_VID3VINITIAL1_MASK (0xFFFF0000) -+#define PDP_VID3VSINIT_VID3VINITIAL1_LSBMASK (0x0000FFFF) -+#define PDP_VID3VSINIT_VID3VINITIAL1_SHIFT (16) -+#define PDP_VID3VSINIT_VID3VINITIAL1_LENGTH (16) -+#define PDP_VID3VSINIT_VID3VINITIAL1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3VSINIT, VID3VINITIAL0 -+*/ -+#define PDP_VID3VSINIT_VID3VINITIAL0_MASK (0x0000FFFF) -+#define PDP_VID3VSINIT_VID3VINITIAL0_LSBMASK (0x0000FFFF) -+#define PDP_VID3VSINIT_VID3VINITIAL0_SHIFT (0) -+#define PDP_VID3VSINIT_VID3VINITIAL0_LENGTH (16) -+#define PDP_VID3VSINIT_VID3VINITIAL0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3VCOEFF0_OFFSET (0x0580) -+ -+/* PDP, VID3VCOEFF0, VID3VCOEFF0 -+*/ -+#define PDP_VID3VCOEFF0_VID3VCOEFF0_MASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF0_VID3VCOEFF0_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF0_VID3VCOEFF0_SHIFT (0) -+#define PDP_VID3VCOEFF0_VID3VCOEFF0_LENGTH (32) -+#define PDP_VID3VCOEFF0_VID3VCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3VCOEFF1_OFFSET (0x0584) -+ -+/* PDP, VID3VCOEFF1, VID3VCOEFF1 -+*/ -+#define PDP_VID3VCOEFF1_VID3VCOEFF1_MASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF1_VID3VCOEFF1_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF1_VID3VCOEFF1_SHIFT (0) -+#define PDP_VID3VCOEFF1_VID3VCOEFF1_LENGTH (32) -+#define PDP_VID3VCOEFF1_VID3VCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3VCOEFF2_OFFSET (0x0588) -+ -+/* PDP, VID3VCOEFF2, VID3VCOEFF2 -+*/ -+#define PDP_VID3VCOEFF2_VID3VCOEFF2_MASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF2_VID3VCOEFF2_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF2_VID3VCOEFF2_SHIFT (0) -+#define PDP_VID3VCOEFF2_VID3VCOEFF2_LENGTH (32) -+#define PDP_VID3VCOEFF2_VID3VCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3VCOEFF3_OFFSET (0x058C) -+ -+/* PDP, VID3VCOEFF3, VID3VCOEFF3 -+*/ -+#define PDP_VID3VCOEFF3_VID3VCOEFF3_MASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF3_VID3VCOEFF3_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF3_VID3VCOEFF3_SHIFT (0) -+#define PDP_VID3VCOEFF3_VID3VCOEFF3_LENGTH (32) -+#define PDP_VID3VCOEFF3_VID3VCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3VCOEFF4_OFFSET (0x0590) -+ -+/* PDP, VID3VCOEFF4, VID3VCOEFF4 -+*/ -+#define PDP_VID3VCOEFF4_VID3VCOEFF4_MASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF4_VID3VCOEFF4_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF4_VID3VCOEFF4_SHIFT (0) -+#define PDP_VID3VCOEFF4_VID3VCOEFF4_LENGTH (32) -+#define PDP_VID3VCOEFF4_VID3VCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3VCOEFF5_OFFSET (0x0594) -+ -+/* PDP, VID3VCOEFF5, VID3VCOEFF5 -+*/ -+#define PDP_VID3VCOEFF5_VID3VCOEFF5_MASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF5_VID3VCOEFF5_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF5_VID3VCOEFF5_SHIFT (0) -+#define PDP_VID3VCOEFF5_VID3VCOEFF5_LENGTH (32) -+#define PDP_VID3VCOEFF5_VID3VCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3VCOEFF6_OFFSET (0x0598) -+ -+/* PDP, VID3VCOEFF6, VID3VCOEFF6 -+*/ -+#define PDP_VID3VCOEFF6_VID3VCOEFF6_MASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF6_VID3VCOEFF6_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF6_VID3VCOEFF6_SHIFT (0) -+#define PDP_VID3VCOEFF6_VID3VCOEFF6_LENGTH (32) -+#define PDP_VID3VCOEFF6_VID3VCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3VCOEFF7_OFFSET (0x059C) -+ -+/* PDP, VID3VCOEFF7, VID3VCOEFF7 -+*/ -+#define PDP_VID3VCOEFF7_VID3VCOEFF7_MASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF7_VID3VCOEFF7_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3VCOEFF7_VID3VCOEFF7_SHIFT (0) -+#define PDP_VID3VCOEFF7_VID3VCOEFF7_LENGTH (32) -+#define PDP_VID3VCOEFF7_VID3VCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3VCOEFF8_OFFSET (0x05A0) -+ -+/* PDP, VID3VCOEFF8, VID3VCOEFF8 -+*/ -+#define PDP_VID3VCOEFF8_VID3VCOEFF8_MASK (0x000000FF) -+#define PDP_VID3VCOEFF8_VID3VCOEFF8_LSBMASK (0x000000FF) -+#define PDP_VID3VCOEFF8_VID3VCOEFF8_SHIFT (0) -+#define PDP_VID3VCOEFF8_VID3VCOEFF8_LENGTH (8) -+#define PDP_VID3VCOEFF8_VID3VCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HSINIT_OFFSET (0x05A4) -+ -+/* PDP, VID3HSINIT, VID3HINITIAL -+*/ -+#define PDP_VID3HSINIT_VID3HINITIAL_MASK (0xFFFF0000) -+#define PDP_VID3HSINIT_VID3HINITIAL_LSBMASK (0x0000FFFF) -+#define PDP_VID3HSINIT_VID3HINITIAL_SHIFT (16) -+#define PDP_VID3HSINIT_VID3HINITIAL_LENGTH (16) -+#define PDP_VID3HSINIT_VID3HINITIAL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3HSINIT, VID3HPITCH -+*/ -+#define PDP_VID3HSINIT_VID3HPITCH_MASK (0x0000FFFF) -+#define PDP_VID3HSINIT_VID3HPITCH_LSBMASK (0x0000FFFF) -+#define PDP_VID3HSINIT_VID3HPITCH_SHIFT (0) -+#define PDP_VID3HSINIT_VID3HPITCH_LENGTH (16) -+#define PDP_VID3HSINIT_VID3HPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF0_OFFSET (0x05A8) -+ -+/* PDP, VID3HCOEFF0, VID3HCOEFF0 -+*/ -+#define PDP_VID3HCOEFF0_VID3HCOEFF0_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF0_VID3HCOEFF0_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF0_VID3HCOEFF0_SHIFT (0) -+#define PDP_VID3HCOEFF0_VID3HCOEFF0_LENGTH (32) -+#define PDP_VID3HCOEFF0_VID3HCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF1_OFFSET (0x05AC) -+ -+/* PDP, VID3HCOEFF1, VID3HCOEFF1 -+*/ -+#define PDP_VID3HCOEFF1_VID3HCOEFF1_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF1_VID3HCOEFF1_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF1_VID3HCOEFF1_SHIFT (0) -+#define PDP_VID3HCOEFF1_VID3HCOEFF1_LENGTH (32) -+#define PDP_VID3HCOEFF1_VID3HCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF2_OFFSET (0x05B0) -+ -+/* PDP, VID3HCOEFF2, VID3HCOEFF2 -+*/ -+#define PDP_VID3HCOEFF2_VID3HCOEFF2_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF2_VID3HCOEFF2_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF2_VID3HCOEFF2_SHIFT (0) -+#define PDP_VID3HCOEFF2_VID3HCOEFF2_LENGTH (32) -+#define PDP_VID3HCOEFF2_VID3HCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF3_OFFSET (0x05B4) -+ -+/* PDP, VID3HCOEFF3, VID3HCOEFF3 -+*/ -+#define PDP_VID3HCOEFF3_VID3HCOEFF3_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF3_VID3HCOEFF3_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF3_VID3HCOEFF3_SHIFT (0) -+#define PDP_VID3HCOEFF3_VID3HCOEFF3_LENGTH (32) -+#define PDP_VID3HCOEFF3_VID3HCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF4_OFFSET (0x05B8) -+ -+/* PDP, VID3HCOEFF4, VID3HCOEFF4 -+*/ -+#define PDP_VID3HCOEFF4_VID3HCOEFF4_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF4_VID3HCOEFF4_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF4_VID3HCOEFF4_SHIFT (0) -+#define PDP_VID3HCOEFF4_VID3HCOEFF4_LENGTH (32) -+#define PDP_VID3HCOEFF4_VID3HCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF5_OFFSET (0x05BC) -+ -+/* PDP, VID3HCOEFF5, VID3HCOEFF5 -+*/ -+#define PDP_VID3HCOEFF5_VID3HCOEFF5_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF5_VID3HCOEFF5_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF5_VID3HCOEFF5_SHIFT (0) -+#define PDP_VID3HCOEFF5_VID3HCOEFF5_LENGTH (32) -+#define PDP_VID3HCOEFF5_VID3HCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF6_OFFSET (0x05C0) -+ -+/* PDP, VID3HCOEFF6, VID3HCOEFF6 -+*/ -+#define PDP_VID3HCOEFF6_VID3HCOEFF6_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF6_VID3HCOEFF6_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF6_VID3HCOEFF6_SHIFT (0) -+#define PDP_VID3HCOEFF6_VID3HCOEFF6_LENGTH (32) -+#define PDP_VID3HCOEFF6_VID3HCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF7_OFFSET (0x05C4) -+ -+/* PDP, VID3HCOEFF7, VID3HCOEFF7 -+*/ -+#define PDP_VID3HCOEFF7_VID3HCOEFF7_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF7_VID3HCOEFF7_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF7_VID3HCOEFF7_SHIFT (0) -+#define PDP_VID3HCOEFF7_VID3HCOEFF7_LENGTH (32) -+#define PDP_VID3HCOEFF7_VID3HCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF8_OFFSET (0x05C8) -+ -+/* PDP, VID3HCOEFF8, VID3HCOEFF8 -+*/ -+#define PDP_VID3HCOEFF8_VID3HCOEFF8_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF8_VID3HCOEFF8_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF8_VID3HCOEFF8_SHIFT (0) -+#define PDP_VID3HCOEFF8_VID3HCOEFF8_LENGTH (32) -+#define PDP_VID3HCOEFF8_VID3HCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF9_OFFSET (0x05CC) -+ -+/* PDP, VID3HCOEFF9, VID3HCOEFF9 -+*/ -+#define PDP_VID3HCOEFF9_VID3HCOEFF9_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF9_VID3HCOEFF9_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF9_VID3HCOEFF9_SHIFT (0) -+#define PDP_VID3HCOEFF9_VID3HCOEFF9_LENGTH (32) -+#define PDP_VID3HCOEFF9_VID3HCOEFF9_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF10_OFFSET (0x05D0) -+ -+/* PDP, VID3HCOEFF10, VID3HCOEFF10 -+*/ -+#define PDP_VID3HCOEFF10_VID3HCOEFF10_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF10_VID3HCOEFF10_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF10_VID3HCOEFF10_SHIFT (0) -+#define PDP_VID3HCOEFF10_VID3HCOEFF10_LENGTH (32) -+#define PDP_VID3HCOEFF10_VID3HCOEFF10_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF11_OFFSET (0x05D4) -+ -+/* PDP, VID3HCOEFF11, VID3HCOEFF11 -+*/ -+#define PDP_VID3HCOEFF11_VID3HCOEFF11_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF11_VID3HCOEFF11_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF11_VID3HCOEFF11_SHIFT (0) -+#define PDP_VID3HCOEFF11_VID3HCOEFF11_LENGTH (32) -+#define PDP_VID3HCOEFF11_VID3HCOEFF11_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF12_OFFSET (0x05D8) -+ -+/* PDP, VID3HCOEFF12, VID3HCOEFF12 -+*/ -+#define PDP_VID3HCOEFF12_VID3HCOEFF12_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF12_VID3HCOEFF12_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF12_VID3HCOEFF12_SHIFT (0) -+#define PDP_VID3HCOEFF12_VID3HCOEFF12_LENGTH (32) -+#define PDP_VID3HCOEFF12_VID3HCOEFF12_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF13_OFFSET (0x05DC) -+ -+/* PDP, VID3HCOEFF13, VID3HCOEFF13 -+*/ -+#define PDP_VID3HCOEFF13_VID3HCOEFF13_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF13_VID3HCOEFF13_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF13_VID3HCOEFF13_SHIFT (0) -+#define PDP_VID3HCOEFF13_VID3HCOEFF13_LENGTH (32) -+#define PDP_VID3HCOEFF13_VID3HCOEFF13_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF14_OFFSET (0x05E0) -+ -+/* PDP, VID3HCOEFF14, VID3HCOEFF14 -+*/ -+#define PDP_VID3HCOEFF14_VID3HCOEFF14_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF14_VID3HCOEFF14_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF14_VID3HCOEFF14_SHIFT (0) -+#define PDP_VID3HCOEFF14_VID3HCOEFF14_LENGTH (32) -+#define PDP_VID3HCOEFF14_VID3HCOEFF14_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF15_OFFSET (0x05E4) -+ -+/* PDP, VID3HCOEFF15, VID3HCOEFF15 -+*/ -+#define PDP_VID3HCOEFF15_VID3HCOEFF15_MASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF15_VID3HCOEFF15_LSBMASK (0xFFFFFFFF) -+#define PDP_VID3HCOEFF15_VID3HCOEFF15_SHIFT (0) -+#define PDP_VID3HCOEFF15_VID3HCOEFF15_LENGTH (32) -+#define PDP_VID3HCOEFF15_VID3HCOEFF15_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3HCOEFF16_OFFSET (0x05E8) -+ -+/* PDP, VID3HCOEFF16, VID3HCOEFF16 -+*/ -+#define PDP_VID3HCOEFF16_VID3HCOEFF16_MASK (0x000000FF) -+#define PDP_VID3HCOEFF16_VID3HCOEFF16_LSBMASK (0x000000FF) -+#define PDP_VID3HCOEFF16_VID3HCOEFF16_SHIFT (0) -+#define PDP_VID3HCOEFF16_VID3HCOEFF16_LENGTH (8) -+#define PDP_VID3HCOEFF16_VID3HCOEFF16_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3SCALESIZE_OFFSET (0x05EC) -+ -+/* PDP, VID3SCALESIZE, VID3SCALEWIDTH -+*/ -+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_MASK (0x0FFF0000) -+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_LSBMASK (0x00000FFF) -+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_SHIFT (16) -+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_LENGTH (12) -+#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3SCALESIZE, VID3SCALEHEIGHT -+*/ -+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_MASK (0x00000FFF) -+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LSBMASK (0x00000FFF) -+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SHIFT (0) -+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LENGTH (12) -+#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4SCALECTRL_OFFSET (0x05F0) -+ -+/* PDP, VID4SCALECTRL, VID4HSCALEBP -+*/ -+#define PDP_VID4SCALECTRL_VID4HSCALEBP_MASK (0x80000000) -+#define PDP_VID4SCALECTRL_VID4HSCALEBP_LSBMASK (0x00000001) -+#define PDP_VID4SCALECTRL_VID4HSCALEBP_SHIFT (31) -+#define PDP_VID4SCALECTRL_VID4HSCALEBP_LENGTH (1) -+#define PDP_VID4SCALECTRL_VID4HSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALECTRL, VID4VSCALEBP -+*/ -+#define PDP_VID4SCALECTRL_VID4VSCALEBP_MASK (0x40000000) -+#define PDP_VID4SCALECTRL_VID4VSCALEBP_LSBMASK (0x00000001) -+#define PDP_VID4SCALECTRL_VID4VSCALEBP_SHIFT (30) -+#define PDP_VID4SCALECTRL_VID4VSCALEBP_LENGTH (1) -+#define PDP_VID4SCALECTRL_VID4VSCALEBP_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALECTRL, VID4HSBEFOREVS -+*/ -+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_MASK (0x20000000) -+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_LSBMASK (0x00000001) -+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_SHIFT (29) -+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_LENGTH (1) -+#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALECTRL, VID4VSURUNCTRL -+*/ -+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_MASK (0x08000000) -+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_LSBMASK (0x00000001) -+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_SHIFT (27) -+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_LENGTH (1) -+#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALECTRL, VID4PAN_EN -+*/ -+#define PDP_VID4SCALECTRL_VID4PAN_EN_MASK (0x00040000) -+#define PDP_VID4SCALECTRL_VID4PAN_EN_LSBMASK (0x00000001) -+#define PDP_VID4SCALECTRL_VID4PAN_EN_SHIFT (18) -+#define PDP_VID4SCALECTRL_VID4PAN_EN_LENGTH (1) -+#define PDP_VID4SCALECTRL_VID4PAN_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALECTRL, VID4VORDER -+*/ -+#define PDP_VID4SCALECTRL_VID4VORDER_MASK (0x00030000) -+#define PDP_VID4SCALECTRL_VID4VORDER_LSBMASK (0x00000003) -+#define PDP_VID4SCALECTRL_VID4VORDER_SHIFT (16) -+#define PDP_VID4SCALECTRL_VID4VORDER_LENGTH (2) -+#define PDP_VID4SCALECTRL_VID4VORDER_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALECTRL, VID4VPITCH -+*/ -+#define PDP_VID4SCALECTRL_VID4VPITCH_MASK (0x0000FFFF) -+#define PDP_VID4SCALECTRL_VID4VPITCH_LSBMASK (0x0000FFFF) -+#define PDP_VID4SCALECTRL_VID4VPITCH_SHIFT (0) -+#define PDP_VID4SCALECTRL_VID4VPITCH_LENGTH (16) -+#define PDP_VID4SCALECTRL_VID4VPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4VSINIT_OFFSET (0x05F4) -+ -+/* PDP, VID4VSINIT, VID4VINITIAL1 -+*/ -+#define PDP_VID4VSINIT_VID4VINITIAL1_MASK (0xFFFF0000) -+#define PDP_VID4VSINIT_VID4VINITIAL1_LSBMASK (0x0000FFFF) -+#define PDP_VID4VSINIT_VID4VINITIAL1_SHIFT (16) -+#define PDP_VID4VSINIT_VID4VINITIAL1_LENGTH (16) -+#define PDP_VID4VSINIT_VID4VINITIAL1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4VSINIT, VID4VINITIAL0 -+*/ -+#define PDP_VID4VSINIT_VID4VINITIAL0_MASK (0x0000FFFF) -+#define PDP_VID4VSINIT_VID4VINITIAL0_LSBMASK (0x0000FFFF) -+#define PDP_VID4VSINIT_VID4VINITIAL0_SHIFT (0) -+#define PDP_VID4VSINIT_VID4VINITIAL0_LENGTH (16) -+#define PDP_VID4VSINIT_VID4VINITIAL0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4VCOEFF0_OFFSET (0x05F8) -+ -+/* PDP, VID4VCOEFF0, VID4VCOEFF0 -+*/ -+#define PDP_VID4VCOEFF0_VID4VCOEFF0_MASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF0_VID4VCOEFF0_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF0_VID4VCOEFF0_SHIFT (0) -+#define PDP_VID4VCOEFF0_VID4VCOEFF0_LENGTH (32) -+#define PDP_VID4VCOEFF0_VID4VCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4VCOEFF1_OFFSET (0x05FC) -+ -+/* PDP, VID4VCOEFF1, VID4VCOEFF1 -+*/ -+#define PDP_VID4VCOEFF1_VID4VCOEFF1_MASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF1_VID4VCOEFF1_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF1_VID4VCOEFF1_SHIFT (0) -+#define PDP_VID4VCOEFF1_VID4VCOEFF1_LENGTH (32) -+#define PDP_VID4VCOEFF1_VID4VCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4VCOEFF2_OFFSET (0x0600) -+ -+/* PDP, VID4VCOEFF2, VID4VCOEFF2 -+*/ -+#define PDP_VID4VCOEFF2_VID4VCOEFF2_MASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF2_VID4VCOEFF2_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF2_VID4VCOEFF2_SHIFT (0) -+#define PDP_VID4VCOEFF2_VID4VCOEFF2_LENGTH (32) -+#define PDP_VID4VCOEFF2_VID4VCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4VCOEFF3_OFFSET (0x0604) -+ -+/* PDP, VID4VCOEFF3, VID4VCOEFF3 -+*/ -+#define PDP_VID4VCOEFF3_VID4VCOEFF3_MASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF3_VID4VCOEFF3_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF3_VID4VCOEFF3_SHIFT (0) -+#define PDP_VID4VCOEFF3_VID4VCOEFF3_LENGTH (32) -+#define PDP_VID4VCOEFF3_VID4VCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4VCOEFF4_OFFSET (0x0608) -+ -+/* PDP, VID4VCOEFF4, VID4VCOEFF4 -+*/ -+#define PDP_VID4VCOEFF4_VID4VCOEFF4_MASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF4_VID4VCOEFF4_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF4_VID4VCOEFF4_SHIFT (0) -+#define PDP_VID4VCOEFF4_VID4VCOEFF4_LENGTH (32) -+#define PDP_VID4VCOEFF4_VID4VCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4VCOEFF5_OFFSET (0x060C) -+ -+/* PDP, VID4VCOEFF5, VID4VCOEFF5 -+*/ -+#define PDP_VID4VCOEFF5_VID4VCOEFF5_MASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF5_VID4VCOEFF5_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF5_VID4VCOEFF5_SHIFT (0) -+#define PDP_VID4VCOEFF5_VID4VCOEFF5_LENGTH (32) -+#define PDP_VID4VCOEFF5_VID4VCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4VCOEFF6_OFFSET (0x0610) -+ -+/* PDP, VID4VCOEFF6, VID4VCOEFF6 -+*/ -+#define PDP_VID4VCOEFF6_VID4VCOEFF6_MASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF6_VID4VCOEFF6_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF6_VID4VCOEFF6_SHIFT (0) -+#define PDP_VID4VCOEFF6_VID4VCOEFF6_LENGTH (32) -+#define PDP_VID4VCOEFF6_VID4VCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4VCOEFF7_OFFSET (0x0614) -+ -+/* PDP, VID4VCOEFF7, VID4VCOEFF7 -+*/ -+#define PDP_VID4VCOEFF7_VID4VCOEFF7_MASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF7_VID4VCOEFF7_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4VCOEFF7_VID4VCOEFF7_SHIFT (0) -+#define PDP_VID4VCOEFF7_VID4VCOEFF7_LENGTH (32) -+#define PDP_VID4VCOEFF7_VID4VCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4VCOEFF8_OFFSET (0x0618) -+ -+/* PDP, VID4VCOEFF8, VID4VCOEFF8 -+*/ -+#define PDP_VID4VCOEFF8_VID4VCOEFF8_MASK (0x000000FF) -+#define PDP_VID4VCOEFF8_VID4VCOEFF8_LSBMASK (0x000000FF) -+#define PDP_VID4VCOEFF8_VID4VCOEFF8_SHIFT (0) -+#define PDP_VID4VCOEFF8_VID4VCOEFF8_LENGTH (8) -+#define PDP_VID4VCOEFF8_VID4VCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HSINIT_OFFSET (0x061C) -+ -+/* PDP, VID4HSINIT, VID4HINITIAL -+*/ -+#define PDP_VID4HSINIT_VID4HINITIAL_MASK (0xFFFF0000) -+#define PDP_VID4HSINIT_VID4HINITIAL_LSBMASK (0x0000FFFF) -+#define PDP_VID4HSINIT_VID4HINITIAL_SHIFT (16) -+#define PDP_VID4HSINIT_VID4HINITIAL_LENGTH (16) -+#define PDP_VID4HSINIT_VID4HINITIAL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4HSINIT, VID4HPITCH -+*/ -+#define PDP_VID4HSINIT_VID4HPITCH_MASK (0x0000FFFF) -+#define PDP_VID4HSINIT_VID4HPITCH_LSBMASK (0x0000FFFF) -+#define PDP_VID4HSINIT_VID4HPITCH_SHIFT (0) -+#define PDP_VID4HSINIT_VID4HPITCH_LENGTH (16) -+#define PDP_VID4HSINIT_VID4HPITCH_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF0_OFFSET (0x0620) -+ -+/* PDP, VID4HCOEFF0, VID4HCOEFF0 -+*/ -+#define PDP_VID4HCOEFF0_VID4HCOEFF0_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF0_VID4HCOEFF0_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF0_VID4HCOEFF0_SHIFT (0) -+#define PDP_VID4HCOEFF0_VID4HCOEFF0_LENGTH (32) -+#define PDP_VID4HCOEFF0_VID4HCOEFF0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF1_OFFSET (0x0624) -+ -+/* PDP, VID4HCOEFF1, VID4HCOEFF1 -+*/ -+#define PDP_VID4HCOEFF1_VID4HCOEFF1_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF1_VID4HCOEFF1_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF1_VID4HCOEFF1_SHIFT (0) -+#define PDP_VID4HCOEFF1_VID4HCOEFF1_LENGTH (32) -+#define PDP_VID4HCOEFF1_VID4HCOEFF1_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF2_OFFSET (0x0628) -+ -+/* PDP, VID4HCOEFF2, VID4HCOEFF2 -+*/ -+#define PDP_VID4HCOEFF2_VID4HCOEFF2_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF2_VID4HCOEFF2_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF2_VID4HCOEFF2_SHIFT (0) -+#define PDP_VID4HCOEFF2_VID4HCOEFF2_LENGTH (32) -+#define PDP_VID4HCOEFF2_VID4HCOEFF2_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF3_OFFSET (0x062C) -+ -+/* PDP, VID4HCOEFF3, VID4HCOEFF3 -+*/ -+#define PDP_VID4HCOEFF3_VID4HCOEFF3_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF3_VID4HCOEFF3_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF3_VID4HCOEFF3_SHIFT (0) -+#define PDP_VID4HCOEFF3_VID4HCOEFF3_LENGTH (32) -+#define PDP_VID4HCOEFF3_VID4HCOEFF3_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF4_OFFSET (0x0630) -+ -+/* PDP, VID4HCOEFF4, VID4HCOEFF4 -+*/ -+#define PDP_VID4HCOEFF4_VID4HCOEFF4_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF4_VID4HCOEFF4_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF4_VID4HCOEFF4_SHIFT (0) -+#define PDP_VID4HCOEFF4_VID4HCOEFF4_LENGTH (32) -+#define PDP_VID4HCOEFF4_VID4HCOEFF4_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF5_OFFSET (0x0634) -+ -+/* PDP, VID4HCOEFF5, VID4HCOEFF5 -+*/ -+#define PDP_VID4HCOEFF5_VID4HCOEFF5_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF5_VID4HCOEFF5_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF5_VID4HCOEFF5_SHIFT (0) -+#define PDP_VID4HCOEFF5_VID4HCOEFF5_LENGTH (32) -+#define PDP_VID4HCOEFF5_VID4HCOEFF5_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF6_OFFSET (0x0638) -+ -+/* PDP, VID4HCOEFF6, VID4HCOEFF6 -+*/ -+#define PDP_VID4HCOEFF6_VID4HCOEFF6_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF6_VID4HCOEFF6_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF6_VID4HCOEFF6_SHIFT (0) -+#define PDP_VID4HCOEFF6_VID4HCOEFF6_LENGTH (32) -+#define PDP_VID4HCOEFF6_VID4HCOEFF6_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF7_OFFSET (0x063C) -+ -+/* PDP, VID4HCOEFF7, VID4HCOEFF7 -+*/ -+#define PDP_VID4HCOEFF7_VID4HCOEFF7_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF7_VID4HCOEFF7_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF7_VID4HCOEFF7_SHIFT (0) -+#define PDP_VID4HCOEFF7_VID4HCOEFF7_LENGTH (32) -+#define PDP_VID4HCOEFF7_VID4HCOEFF7_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF8_OFFSET (0x0640) -+ -+/* PDP, VID4HCOEFF8, VID4HCOEFF8 -+*/ -+#define PDP_VID4HCOEFF8_VID4HCOEFF8_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF8_VID4HCOEFF8_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF8_VID4HCOEFF8_SHIFT (0) -+#define PDP_VID4HCOEFF8_VID4HCOEFF8_LENGTH (32) -+#define PDP_VID4HCOEFF8_VID4HCOEFF8_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF9_OFFSET (0x0644) -+ -+/* PDP, VID4HCOEFF9, VID4HCOEFF9 -+*/ -+#define PDP_VID4HCOEFF9_VID4HCOEFF9_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF9_VID4HCOEFF9_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF9_VID4HCOEFF9_SHIFT (0) -+#define PDP_VID4HCOEFF9_VID4HCOEFF9_LENGTH (32) -+#define PDP_VID4HCOEFF9_VID4HCOEFF9_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF10_OFFSET (0x0648) -+ -+/* PDP, VID4HCOEFF10, VID4HCOEFF10 -+*/ -+#define PDP_VID4HCOEFF10_VID4HCOEFF10_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF10_VID4HCOEFF10_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF10_VID4HCOEFF10_SHIFT (0) -+#define PDP_VID4HCOEFF10_VID4HCOEFF10_LENGTH (32) -+#define PDP_VID4HCOEFF10_VID4HCOEFF10_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF11_OFFSET (0x064C) -+ -+/* PDP, VID4HCOEFF11, VID4HCOEFF11 -+*/ -+#define PDP_VID4HCOEFF11_VID4HCOEFF11_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF11_VID4HCOEFF11_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF11_VID4HCOEFF11_SHIFT (0) -+#define PDP_VID4HCOEFF11_VID4HCOEFF11_LENGTH (32) -+#define PDP_VID4HCOEFF11_VID4HCOEFF11_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF12_OFFSET (0x0650) -+ -+/* PDP, VID4HCOEFF12, VID4HCOEFF12 -+*/ -+#define PDP_VID4HCOEFF12_VID4HCOEFF12_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF12_VID4HCOEFF12_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF12_VID4HCOEFF12_SHIFT (0) -+#define PDP_VID4HCOEFF12_VID4HCOEFF12_LENGTH (32) -+#define PDP_VID4HCOEFF12_VID4HCOEFF12_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF13_OFFSET (0x0654) -+ -+/* PDP, VID4HCOEFF13, VID4HCOEFF13 -+*/ -+#define PDP_VID4HCOEFF13_VID4HCOEFF13_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF13_VID4HCOEFF13_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF13_VID4HCOEFF13_SHIFT (0) -+#define PDP_VID4HCOEFF13_VID4HCOEFF13_LENGTH (32) -+#define PDP_VID4HCOEFF13_VID4HCOEFF13_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF14_OFFSET (0x0658) -+ -+/* PDP, VID4HCOEFF14, VID4HCOEFF14 -+*/ -+#define PDP_VID4HCOEFF14_VID4HCOEFF14_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF14_VID4HCOEFF14_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF14_VID4HCOEFF14_SHIFT (0) -+#define PDP_VID4HCOEFF14_VID4HCOEFF14_LENGTH (32) -+#define PDP_VID4HCOEFF14_VID4HCOEFF14_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF15_OFFSET (0x065C) -+ -+/* PDP, VID4HCOEFF15, VID4HCOEFF15 -+*/ -+#define PDP_VID4HCOEFF15_VID4HCOEFF15_MASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF15_VID4HCOEFF15_LSBMASK (0xFFFFFFFF) -+#define PDP_VID4HCOEFF15_VID4HCOEFF15_SHIFT (0) -+#define PDP_VID4HCOEFF15_VID4HCOEFF15_LENGTH (32) -+#define PDP_VID4HCOEFF15_VID4HCOEFF15_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4HCOEFF16_OFFSET (0x0660) -+ -+/* PDP, VID4HCOEFF16, VID4HCOEFF16 -+*/ -+#define PDP_VID4HCOEFF16_VID4HCOEFF16_MASK (0x000000FF) -+#define PDP_VID4HCOEFF16_VID4HCOEFF16_LSBMASK (0x000000FF) -+#define PDP_VID4HCOEFF16_VID4HCOEFF16_SHIFT (0) -+#define PDP_VID4HCOEFF16_VID4HCOEFF16_LENGTH (8) -+#define PDP_VID4HCOEFF16_VID4HCOEFF16_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4SCALESIZE_OFFSET (0x0664) -+ -+/* PDP, VID4SCALESIZE, VID4SCALEWIDTH -+*/ -+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_MASK (0x0FFF0000) -+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_LSBMASK (0x00000FFF) -+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_SHIFT (16) -+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_LENGTH (12) -+#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4SCALESIZE, VID4SCALEHEIGHT -+*/ -+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_MASK (0x00000FFF) -+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LSBMASK (0x00000FFF) -+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SHIFT (0) -+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LENGTH (12) -+#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PORTER_BLND0_OFFSET (0x0668) -+ -+/* PDP, PORTER_BLND0, BLND0BLENDTYPE -+*/ -+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_MASK (0x00000010) -+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_LSBMASK (0x00000001) -+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_SHIFT (4) -+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_LENGTH (1) -+#define PDP_PORTER_BLND0_BLND0BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND0, BLND0PORTERMODE -+*/ -+#define PDP_PORTER_BLND0_BLND0PORTERMODE_MASK (0x0000000F) -+#define PDP_PORTER_BLND0_BLND0PORTERMODE_LSBMASK (0x0000000F) -+#define PDP_PORTER_BLND0_BLND0PORTERMODE_SHIFT (0) -+#define PDP_PORTER_BLND0_BLND0PORTERMODE_LENGTH (4) -+#define PDP_PORTER_BLND0_BLND0PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PORTER_BLND1_OFFSET (0x066C) -+ -+/* PDP, PORTER_BLND1, BLND1BLENDTYPE -+*/ -+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_MASK (0x00000010) -+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_LSBMASK (0x00000001) -+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_SHIFT (4) -+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_LENGTH (1) -+#define PDP_PORTER_BLND1_BLND1BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND1, BLND1PORTERMODE -+*/ -+#define PDP_PORTER_BLND1_BLND1PORTERMODE_MASK (0x0000000F) -+#define PDP_PORTER_BLND1_BLND1PORTERMODE_LSBMASK (0x0000000F) -+#define PDP_PORTER_BLND1_BLND1PORTERMODE_SHIFT (0) -+#define PDP_PORTER_BLND1_BLND1PORTERMODE_LENGTH (4) -+#define PDP_PORTER_BLND1_BLND1PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PORTER_BLND2_OFFSET (0x0670) -+ -+/* PDP, PORTER_BLND2, BLND2BLENDTYPE -+*/ -+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_MASK (0x00000010) -+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_LSBMASK (0x00000001) -+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_SHIFT (4) -+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_LENGTH (1) -+#define PDP_PORTER_BLND2_BLND2BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND2, BLND2PORTERMODE -+*/ -+#define PDP_PORTER_BLND2_BLND2PORTERMODE_MASK (0x0000000F) -+#define PDP_PORTER_BLND2_BLND2PORTERMODE_LSBMASK (0x0000000F) -+#define PDP_PORTER_BLND2_BLND2PORTERMODE_SHIFT (0) -+#define PDP_PORTER_BLND2_BLND2PORTERMODE_LENGTH (4) -+#define PDP_PORTER_BLND2_BLND2PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PORTER_BLND3_OFFSET (0x0674) -+ -+/* PDP, PORTER_BLND3, BLND3BLENDTYPE -+*/ -+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_MASK (0x00000010) -+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_LSBMASK (0x00000001) -+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_SHIFT (4) -+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_LENGTH (1) -+#define PDP_PORTER_BLND3_BLND3BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND3, BLND3PORTERMODE -+*/ -+#define PDP_PORTER_BLND3_BLND3PORTERMODE_MASK (0x0000000F) -+#define PDP_PORTER_BLND3_BLND3PORTERMODE_LSBMASK (0x0000000F) -+#define PDP_PORTER_BLND3_BLND3PORTERMODE_SHIFT (0) -+#define PDP_PORTER_BLND3_BLND3PORTERMODE_LENGTH (4) -+#define PDP_PORTER_BLND3_BLND3PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PORTER_BLND4_OFFSET (0x0678) -+ -+/* PDP, PORTER_BLND4, BLND4BLENDTYPE -+*/ -+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_MASK (0x00000010) -+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_LSBMASK (0x00000001) -+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_SHIFT (4) -+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_LENGTH (1) -+#define PDP_PORTER_BLND4_BLND4BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND4, BLND4PORTERMODE -+*/ -+#define PDP_PORTER_BLND4_BLND4PORTERMODE_MASK (0x0000000F) -+#define PDP_PORTER_BLND4_BLND4PORTERMODE_LSBMASK (0x0000000F) -+#define PDP_PORTER_BLND4_BLND4PORTERMODE_SHIFT (0) -+#define PDP_PORTER_BLND4_BLND4PORTERMODE_LENGTH (4) -+#define PDP_PORTER_BLND4_BLND4PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PORTER_BLND5_OFFSET (0x067C) -+ -+/* PDP, PORTER_BLND5, BLND5BLENDTYPE -+*/ -+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_MASK (0x00000010) -+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_LSBMASK (0x00000001) -+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_SHIFT (4) -+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_LENGTH (1) -+#define PDP_PORTER_BLND5_BLND5BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND5, BLND5PORTERMODE -+*/ -+#define PDP_PORTER_BLND5_BLND5PORTERMODE_MASK (0x0000000F) -+#define PDP_PORTER_BLND5_BLND5PORTERMODE_LSBMASK (0x0000000F) -+#define PDP_PORTER_BLND5_BLND5PORTERMODE_SHIFT (0) -+#define PDP_PORTER_BLND5_BLND5PORTERMODE_LENGTH (4) -+#define PDP_PORTER_BLND5_BLND5PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PORTER_BLND6_OFFSET (0x0680) -+ -+/* PDP, PORTER_BLND6, BLND6BLENDTYPE -+*/ -+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_MASK (0x00000010) -+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_LSBMASK (0x00000001) -+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_SHIFT (4) -+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_LENGTH (1) -+#define PDP_PORTER_BLND6_BLND6BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND6, BLND6PORTERMODE -+*/ -+#define PDP_PORTER_BLND6_BLND6PORTERMODE_MASK (0x0000000F) -+#define PDP_PORTER_BLND6_BLND6PORTERMODE_LSBMASK (0x0000000F) -+#define PDP_PORTER_BLND6_BLND6PORTERMODE_SHIFT (0) -+#define PDP_PORTER_BLND6_BLND6PORTERMODE_LENGTH (4) -+#define PDP_PORTER_BLND6_BLND6PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PORTER_BLND7_OFFSET (0x0684) -+ -+/* PDP, PORTER_BLND7, BLND7BLENDTYPE -+*/ -+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_MASK (0x00000010) -+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_LSBMASK (0x00000001) -+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_SHIFT (4) -+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_LENGTH (1) -+#define PDP_PORTER_BLND7_BLND7BLENDTYPE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PORTER_BLND7, BLND7PORTERMODE -+*/ -+#define PDP_PORTER_BLND7_BLND7PORTERMODE_MASK (0x0000000F) -+#define PDP_PORTER_BLND7_BLND7PORTERMODE_LSBMASK (0x0000000F) -+#define PDP_PORTER_BLND7_BLND7PORTERMODE_SHIFT (0) -+#define PDP_PORTER_BLND7_BLND7PORTERMODE_LENGTH (4) -+#define PDP_PORTER_BLND7_BLND7PORTERMODE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06C8) -+ -+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_TRANS -+*/ -+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_MASK (0x03FF0000) -+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) -+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SHIFT (16) -+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LENGTH (10) -+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_OPAQUE -+*/ -+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) -+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) -+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SHIFT (0) -+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LENGTH (10) -+#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06CC) -+ -+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMAX -+*/ -+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_MASK (0x03FF0000) -+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LSBMASK (0x000003FF) -+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SHIFT (16) -+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LENGTH (10) -+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMIN -+*/ -+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_MASK (0x000003FF) -+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LSBMASK (0x000003FF) -+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SHIFT (0) -+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LENGTH (10) -+#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1LUMAKEY_C_RG_OFFSET (0x06D0) -+ -+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_R -+*/ -+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_MASK (0x0FFF0000) -+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LSBMASK (0x00000FFF) -+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SHIFT (16) -+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LENGTH (12) -+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_G -+*/ -+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_MASK (0x00000FFF) -+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LSBMASK (0x00000FFF) -+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SHIFT (0) -+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LENGTH (12) -+#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1LUMAKEY_C_B_OFFSET (0x06D4) -+ -+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYALPHAMULT -+*/ -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_MASK (0x20000000) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LSBMASK (0x00000001) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SHIFT (29) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LENGTH (1) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYEN -+*/ -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_MASK (0x10000000) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LSBMASK (0x00000001) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SHIFT (28) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LENGTH (1) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYOUTOFF -+*/ -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_MASK (0x03FF0000) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LSBMASK (0x000003FF) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SHIFT (16) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LENGTH (10) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYC_B -+*/ -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_MASK (0x00000FFF) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LSBMASK (0x00000FFF) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SHIFT (0) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LENGTH (12) -+#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06D8) -+ -+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_TRANS -+*/ -+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_MASK (0x03FF0000) -+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) -+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SHIFT (16) -+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LENGTH (10) -+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_OPAQUE -+*/ -+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) -+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) -+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SHIFT (0) -+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LENGTH (10) -+#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06DC) -+ -+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMAX -+*/ -+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_MASK (0x03FF0000) -+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LSBMASK (0x000003FF) -+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SHIFT (16) -+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LENGTH (10) -+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMIN -+*/ -+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_MASK (0x000003FF) -+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LSBMASK (0x000003FF) -+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SHIFT (0) -+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LENGTH (10) -+#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2LUMAKEY_C_RG_OFFSET (0x06E0) -+ -+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_R -+*/ -+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_MASK (0x0FFF0000) -+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LSBMASK (0x00000FFF) -+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SHIFT (16) -+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LENGTH (12) -+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_G -+*/ -+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_MASK (0x00000FFF) -+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LSBMASK (0x00000FFF) -+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SHIFT (0) -+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LENGTH (12) -+#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2LUMAKEY_C_B_OFFSET (0x06E4) -+ -+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYALPHAMULT -+*/ -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_MASK (0x20000000) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LSBMASK (0x00000001) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SHIFT (29) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LENGTH (1) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYEN -+*/ -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_MASK (0x10000000) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LSBMASK (0x00000001) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SHIFT (28) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LENGTH (1) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYOUTOFF -+*/ -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_MASK (0x03FF0000) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LSBMASK (0x000003FF) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SHIFT (16) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LENGTH (10) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYC_B -+*/ -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_MASK (0x00000FFF) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LSBMASK (0x00000FFF) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SHIFT (0) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LENGTH (12) -+#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06E8) -+ -+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_TRANS -+*/ -+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_MASK (0x03FF0000) -+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) -+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SHIFT (16) -+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LENGTH (10) -+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_OPAQUE -+*/ -+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) -+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) -+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SHIFT (0) -+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LENGTH (10) -+#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06EC) -+ -+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMAX -+*/ -+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_MASK (0x03FF0000) -+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LSBMASK (0x000003FF) -+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SHIFT (16) -+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LENGTH (10) -+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMIN -+*/ -+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_MASK (0x000003FF) -+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LSBMASK (0x000003FF) -+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SHIFT (0) -+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LENGTH (10) -+#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3LUMAKEY_C_RG_OFFSET (0x06F0) -+ -+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_R -+*/ -+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_MASK (0x0FFF0000) -+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LSBMASK (0x00000FFF) -+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SHIFT (16) -+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LENGTH (12) -+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_G -+*/ -+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_MASK (0x00000FFF) -+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LSBMASK (0x00000FFF) -+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SHIFT (0) -+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LENGTH (12) -+#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3LUMAKEY_C_B_OFFSET (0x06F4) -+ -+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYALPHAMULT -+*/ -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_MASK (0x20000000) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LSBMASK (0x00000001) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SHIFT (29) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LENGTH (1) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYEN -+*/ -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_MASK (0x10000000) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LSBMASK (0x00000001) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SHIFT (28) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LENGTH (1) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYOUTOFF -+*/ -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_MASK (0x03FF0000) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LSBMASK (0x000003FF) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SHIFT (16) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LENGTH (10) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYC_B -+*/ -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_MASK (0x00000FFF) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LSBMASK (0x00000FFF) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SHIFT (0) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LENGTH (12) -+#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06F8) -+ -+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_TRANS -+*/ -+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_MASK (0x03FF0000) -+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) -+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SHIFT (16) -+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LENGTH (10) -+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_OPAQUE -+*/ -+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) -+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) -+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SHIFT (0) -+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LENGTH (10) -+#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06FC) -+ -+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMAX -+*/ -+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_MASK (0x03FF0000) -+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LSBMASK (0x000003FF) -+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SHIFT (16) -+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LENGTH (10) -+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMIN -+*/ -+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_MASK (0x000003FF) -+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LSBMASK (0x000003FF) -+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SHIFT (0) -+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LENGTH (10) -+#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4LUMAKEY_C_RG_OFFSET (0x0700) -+ -+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_R -+*/ -+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_MASK (0x0FFF0000) -+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LSBMASK (0x00000FFF) -+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SHIFT (16) -+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LENGTH (12) -+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_G -+*/ -+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_MASK (0x00000FFF) -+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LSBMASK (0x00000FFF) -+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SHIFT (0) -+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LENGTH (12) -+#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4LUMAKEY_C_B_OFFSET (0x0704) -+ -+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYALPHAMULT -+*/ -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_MASK (0x20000000) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LSBMASK (0x00000001) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SHIFT (29) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LENGTH (1) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYEN -+*/ -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_MASK (0x10000000) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LSBMASK (0x00000001) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SHIFT (28) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LENGTH (1) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYOUTOFF -+*/ -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_MASK (0x03FF0000) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LSBMASK (0x000003FF) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SHIFT (16) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LENGTH (10) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYC_B -+*/ -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_MASK (0x00000FFF) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LSBMASK (0x00000FFF) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SHIFT (0) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LENGTH (12) -+#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CSCCOEFF0_OFFSET (0x0708) -+ -+/* PDP, CSCCOEFF0, CSCCOEFFRU -+*/ -+#define PDP_CSCCOEFF0_CSCCOEFFRU_MASK (0x003FF800) -+#define PDP_CSCCOEFF0_CSCCOEFFRU_LSBMASK (0x000007FF) -+#define PDP_CSCCOEFF0_CSCCOEFFRU_SHIFT (11) -+#define PDP_CSCCOEFF0_CSCCOEFFRU_LENGTH (11) -+#define PDP_CSCCOEFF0_CSCCOEFFRU_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CSCCOEFF0, CSCCOEFFRY -+*/ -+#define PDP_CSCCOEFF0_CSCCOEFFRY_MASK (0x000007FF) -+#define PDP_CSCCOEFF0_CSCCOEFFRY_LSBMASK (0x000007FF) -+#define PDP_CSCCOEFF0_CSCCOEFFRY_SHIFT (0) -+#define PDP_CSCCOEFF0_CSCCOEFFRY_LENGTH (11) -+#define PDP_CSCCOEFF0_CSCCOEFFRY_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CSCCOEFF1_OFFSET (0x070C) -+ -+/* PDP, CSCCOEFF1, CSCCOEFFGY -+*/ -+#define PDP_CSCCOEFF1_CSCCOEFFGY_MASK (0x003FF800) -+#define PDP_CSCCOEFF1_CSCCOEFFGY_LSBMASK (0x000007FF) -+#define PDP_CSCCOEFF1_CSCCOEFFGY_SHIFT (11) -+#define PDP_CSCCOEFF1_CSCCOEFFGY_LENGTH (11) -+#define PDP_CSCCOEFF1_CSCCOEFFGY_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CSCCOEFF1, CSCCOEFFRV -+*/ -+#define PDP_CSCCOEFF1_CSCCOEFFRV_MASK (0x000007FF) -+#define PDP_CSCCOEFF1_CSCCOEFFRV_LSBMASK (0x000007FF) -+#define PDP_CSCCOEFF1_CSCCOEFFRV_SHIFT (0) -+#define PDP_CSCCOEFF1_CSCCOEFFRV_LENGTH (11) -+#define PDP_CSCCOEFF1_CSCCOEFFRV_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CSCCOEFF2_OFFSET (0x0710) -+ -+/* PDP, CSCCOEFF2, CSCCOEFFGV -+*/ -+#define PDP_CSCCOEFF2_CSCCOEFFGV_MASK (0x003FF800) -+#define PDP_CSCCOEFF2_CSCCOEFFGV_LSBMASK (0x000007FF) -+#define PDP_CSCCOEFF2_CSCCOEFFGV_SHIFT (11) -+#define PDP_CSCCOEFF2_CSCCOEFFGV_LENGTH (11) -+#define PDP_CSCCOEFF2_CSCCOEFFGV_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CSCCOEFF2, CSCCOEFFGU -+*/ -+#define PDP_CSCCOEFF2_CSCCOEFFGU_MASK (0x000007FF) -+#define PDP_CSCCOEFF2_CSCCOEFFGU_LSBMASK (0x000007FF) -+#define PDP_CSCCOEFF2_CSCCOEFFGU_SHIFT (0) -+#define PDP_CSCCOEFF2_CSCCOEFFGU_LENGTH (11) -+#define PDP_CSCCOEFF2_CSCCOEFFGU_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CSCCOEFF3_OFFSET (0x0714) -+ -+/* PDP, CSCCOEFF3, CSCCOEFFBU -+*/ -+#define PDP_CSCCOEFF3_CSCCOEFFBU_MASK (0x003FF800) -+#define PDP_CSCCOEFF3_CSCCOEFFBU_LSBMASK (0x000007FF) -+#define PDP_CSCCOEFF3_CSCCOEFFBU_SHIFT (11) -+#define PDP_CSCCOEFF3_CSCCOEFFBU_LENGTH (11) -+#define PDP_CSCCOEFF3_CSCCOEFFBU_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CSCCOEFF3, CSCCOEFFBY -+*/ -+#define PDP_CSCCOEFF3_CSCCOEFFBY_MASK (0x000007FF) -+#define PDP_CSCCOEFF3_CSCCOEFFBY_LSBMASK (0x000007FF) -+#define PDP_CSCCOEFF3_CSCCOEFFBY_SHIFT (0) -+#define PDP_CSCCOEFF3_CSCCOEFFBY_LENGTH (11) -+#define PDP_CSCCOEFF3_CSCCOEFFBY_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CSCCOEFF4_OFFSET (0x0718) -+ -+/* PDP, CSCCOEFF4, CSCCOEFFBV -+*/ -+#define PDP_CSCCOEFF4_CSCCOEFFBV_MASK (0x000007FF) -+#define PDP_CSCCOEFF4_CSCCOEFFBV_LSBMASK (0x000007FF) -+#define PDP_CSCCOEFF4_CSCCOEFFBV_SHIFT (0) -+#define PDP_CSCCOEFF4_CSCCOEFFBV_LENGTH (11) -+#define PDP_CSCCOEFF4_CSCCOEFFBV_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BGNDCOL_AR_OFFSET (0x071C) -+ -+/* PDP, BGNDCOL_AR, BGNDCOL_A -+*/ -+#define PDP_BGNDCOL_AR_BGNDCOL_A_MASK (0x03FF0000) -+#define PDP_BGNDCOL_AR_BGNDCOL_A_LSBMASK (0x000003FF) -+#define PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT (16) -+#define PDP_BGNDCOL_AR_BGNDCOL_A_LENGTH (10) -+#define PDP_BGNDCOL_AR_BGNDCOL_A_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, BGNDCOL_AR, BGNDCOL_R -+*/ -+#define PDP_BGNDCOL_AR_BGNDCOL_R_MASK (0x000003FF) -+#define PDP_BGNDCOL_AR_BGNDCOL_R_LSBMASK (0x000003FF) -+#define PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT (0) -+#define PDP_BGNDCOL_AR_BGNDCOL_R_LENGTH (10) -+#define PDP_BGNDCOL_AR_BGNDCOL_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BGNDCOL_GB_OFFSET (0x0720) -+ -+/* PDP, BGNDCOL_GB, BGNDCOL_G -+*/ -+#define PDP_BGNDCOL_GB_BGNDCOL_G_MASK (0x03FF0000) -+#define PDP_BGNDCOL_GB_BGNDCOL_G_LSBMASK (0x000003FF) -+#define PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT (16) -+#define PDP_BGNDCOL_GB_BGNDCOL_G_LENGTH (10) -+#define PDP_BGNDCOL_GB_BGNDCOL_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, BGNDCOL_GB, BGNDCOL_B -+*/ -+#define PDP_BGNDCOL_GB_BGNDCOL_B_MASK (0x000003FF) -+#define PDP_BGNDCOL_GB_BGNDCOL_B_LSBMASK (0x000003FF) -+#define PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT (0) -+#define PDP_BGNDCOL_GB_BGNDCOL_B_LENGTH (10) -+#define PDP_BGNDCOL_GB_BGNDCOL_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BORDCOL_R_OFFSET (0x0724) -+ -+/* PDP, BORDCOL_R, BORDCOL_R -+*/ -+#define PDP_BORDCOL_R_BORDCOL_R_MASK (0x000003FF) -+#define PDP_BORDCOL_R_BORDCOL_R_LSBMASK (0x000003FF) -+#define PDP_BORDCOL_R_BORDCOL_R_SHIFT (0) -+#define PDP_BORDCOL_R_BORDCOL_R_LENGTH (10) -+#define PDP_BORDCOL_R_BORDCOL_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BORDCOL_GB_OFFSET (0x0728) -+ -+/* PDP, BORDCOL_GB, BORDCOL_G -+*/ -+#define PDP_BORDCOL_GB_BORDCOL_G_MASK (0x03FF0000) -+#define PDP_BORDCOL_GB_BORDCOL_G_LSBMASK (0x000003FF) -+#define PDP_BORDCOL_GB_BORDCOL_G_SHIFT (16) -+#define PDP_BORDCOL_GB_BORDCOL_G_LENGTH (10) -+#define PDP_BORDCOL_GB_BORDCOL_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, BORDCOL_GB, BORDCOL_B -+*/ -+#define PDP_BORDCOL_GB_BORDCOL_B_MASK (0x000003FF) -+#define PDP_BORDCOL_GB_BORDCOL_B_LSBMASK (0x000003FF) -+#define PDP_BORDCOL_GB_BORDCOL_B_SHIFT (0) -+#define PDP_BORDCOL_GB_BORDCOL_B_LENGTH (10) -+#define PDP_BORDCOL_GB_BORDCOL_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_LINESTAT_OFFSET (0x0734) -+ -+/* PDP, LINESTAT, LINENO -+*/ -+#define PDP_LINESTAT_LINENO_MASK (0x00001FFF) -+#define PDP_LINESTAT_LINENO_LSBMASK (0x00001FFF) -+#define PDP_LINESTAT_LINENO_SHIFT (0) -+#define PDP_LINESTAT_LINENO_LENGTH (13) -+#define PDP_LINESTAT_LINENO_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CR_PDP_PROCAMP_C11C12_OFFSET (0x0738) -+ -+/* PDP, CR_PDP_PROCAMP_C11C12, CR_PROCAMP_C12 -+*/ -+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_MASK (0x3FFF0000) -+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LSBMASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SHIFT (16) -+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LENGTH (14) -+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_PDP_PROCAMP_C11C12, CR_PROCAMP_C11 -+*/ -+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_MASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LSBMASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SHIFT (0) -+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LENGTH (14) -+#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CR_PDP_PROCAMP_C13C21_OFFSET (0x073C) -+ -+/* PDP, CR_PDP_PROCAMP_C13C21, CR_PROCAMP_C21 -+*/ -+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_MASK (0x3FFF0000) -+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LSBMASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SHIFT (16) -+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LENGTH (14) -+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_PDP_PROCAMP_C13C21, CR_PROCAMP_C13 -+*/ -+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_MASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LSBMASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SHIFT (0) -+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LENGTH (14) -+#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CR_PDP_PROCAMP_C22C23_OFFSET (0x0740) -+ -+/* PDP, CR_PDP_PROCAMP_C22C23, CR_PROCAMP_C23 -+*/ -+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_MASK (0x3FFF0000) -+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LSBMASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SHIFT (16) -+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LENGTH (14) -+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_PDP_PROCAMP_C22C23, CR_PROCAMP_C22 -+*/ -+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_MASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LSBMASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SHIFT (0) -+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LENGTH (14) -+#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CR_PDP_PROCAMP_C31C32_OFFSET (0x0744) -+ -+/* PDP, CR_PDP_PROCAMP_C31C32, CR_PROCAMP_C32 -+*/ -+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_MASK (0x3FFF0000) -+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LSBMASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SHIFT (16) -+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LENGTH (14) -+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_PDP_PROCAMP_C31C32, CR_PROCAMP_C31 -+*/ -+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_MASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LSBMASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SHIFT (0) -+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LENGTH (14) -+#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CR_PDP_PROCAMP_C33_OFFSET (0x0748) -+ -+/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_C33 -+*/ -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_MASK (0x3FFF0000) -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_LSBMASK (0x00003FFF) -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_SHIFT (16) -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_LENGTH (14) -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_RANGE -+*/ -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_MASK (0x00000030) -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LSBMASK (0x00000003) -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SHIFT (4) -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LENGTH (2) -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_EN -+*/ -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_MASK (0x00000001) -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_LSBMASK (0x00000001) -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_SHIFT (0) -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_LENGTH (1) -+#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_OFFSET (0x074C) -+ -+/* PDP, CR_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_G -+*/ -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_MASK (0x0FFF0000) -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LSBMASK (0x00000FFF) -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SHIFT (16) -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LENGTH (12) -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_B -+*/ -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_MASK (0x00000FFF) -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LSBMASK (0x00000FFF) -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SHIFT (0) -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LENGTH (12) -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_OFFSET (0x0750) -+ -+/* PDP, CR_PDP_PROCAMP_OUTOFFSET_R, CR_PROCAMP_OUTOFF_R -+*/ -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_MASK (0x00000FFF) -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LSBMASK (0x00000FFF) -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SHIFT (0) -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LENGTH (12) -+#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_OFFSET (0x0754) -+ -+/* PDP, CR_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_G -+*/ -+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_MASK (0x03FF0000) -+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LSBMASK (0x000003FF) -+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SHIFT (16) -+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LENGTH (10) -+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, CR_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_B -+*/ -+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_MASK (0x000003FF) -+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LSBMASK (0x000003FF) -+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SHIFT (0) -+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LENGTH (10) -+#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_OFFSET (0x0758) -+ -+/* PDP, CR_PDP_PROCAMP_INOFFSET_R, CR_PROCAMP_INOFF_R -+*/ -+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_MASK (0x000003FF) -+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LSBMASK (0x000003FF) -+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SHIFT (0) -+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LENGTH (10) -+#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_SIGNAT_R_OFFSET (0x075C) -+ -+/* PDP, SIGNAT_R, SIGNATURE_R -+*/ -+#define PDP_SIGNAT_R_SIGNATURE_R_MASK (0x000003FF) -+#define PDP_SIGNAT_R_SIGNATURE_R_LSBMASK (0x000003FF) -+#define PDP_SIGNAT_R_SIGNATURE_R_SHIFT (0) -+#define PDP_SIGNAT_R_SIGNATURE_R_LENGTH (10) -+#define PDP_SIGNAT_R_SIGNATURE_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_SIGNAT_GB_OFFSET (0x0760) -+ -+/* PDP, SIGNAT_GB, SIGNATURE_G -+*/ -+#define PDP_SIGNAT_GB_SIGNATURE_G_MASK (0x03FF0000) -+#define PDP_SIGNAT_GB_SIGNATURE_G_LSBMASK (0x000003FF) -+#define PDP_SIGNAT_GB_SIGNATURE_G_SHIFT (16) -+#define PDP_SIGNAT_GB_SIGNATURE_G_LENGTH (10) -+#define PDP_SIGNAT_GB_SIGNATURE_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SIGNAT_GB, SIGNATURE_B -+*/ -+#define PDP_SIGNAT_GB_SIGNATURE_B_MASK (0x000003FF) -+#define PDP_SIGNAT_GB_SIGNATURE_B_LSBMASK (0x000003FF) -+#define PDP_SIGNAT_GB_SIGNATURE_B_SHIFT (0) -+#define PDP_SIGNAT_GB_SIGNATURE_B_LENGTH (10) -+#define PDP_SIGNAT_GB_SIGNATURE_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_REGISTER_UPDATE_CTRL_OFFSET (0x0764) -+ -+/* PDP, REGISTER_UPDATE_CTRL, BYPASS_DOUBLE_BUFFERING -+*/ -+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_MASK (0x00000004) -+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LSBMASK (0x00000001) -+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SHIFT (2) -+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LENGTH (1) -+#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, REGISTER_UPDATE_CTRL, REGISTERS_VALID -+*/ -+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK (0x00000002) -+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LSBMASK (0x00000001) -+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT (1) -+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LENGTH (1) -+#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, REGISTER_UPDATE_CTRL, USE_VBLANK -+*/ -+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_MASK (0x00000001) -+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LSBMASK (0x00000001) -+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT (0) -+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LENGTH (1) -+#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_REGISTER_UPDATE_STATUS_OFFSET (0x0768) -+ -+/* PDP, REGISTER_UPDATE_STATUS, REGISTERS_UPDATED -+*/ -+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_MASK (0x00000002) -+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LSBMASK (0x00000001) -+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SHIFT (1) -+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LENGTH (1) -+#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DBGCTRL_OFFSET (0x076C) -+ -+/* PDP, DBGCTRL, DBG_READ -+*/ -+#define PDP_DBGCTRL_DBG_READ_MASK (0x00000002) -+#define PDP_DBGCTRL_DBG_READ_LSBMASK (0x00000001) -+#define PDP_DBGCTRL_DBG_READ_SHIFT (1) -+#define PDP_DBGCTRL_DBG_READ_LENGTH (1) -+#define PDP_DBGCTRL_DBG_READ_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DBGCTRL, DBG_ENAB -+*/ -+#define PDP_DBGCTRL_DBG_ENAB_MASK (0x00000001) -+#define PDP_DBGCTRL_DBG_ENAB_LSBMASK (0x00000001) -+#define PDP_DBGCTRL_DBG_ENAB_SHIFT (0) -+#define PDP_DBGCTRL_DBG_ENAB_LENGTH (1) -+#define PDP_DBGCTRL_DBG_ENAB_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DBGDATA_R_OFFSET (0x0770) -+ -+/* PDP, DBGDATA_R, DBG_DATA_R -+*/ -+#define PDP_DBGDATA_R_DBG_DATA_R_MASK (0x000003FF) -+#define PDP_DBGDATA_R_DBG_DATA_R_LSBMASK (0x000003FF) -+#define PDP_DBGDATA_R_DBG_DATA_R_SHIFT (0) -+#define PDP_DBGDATA_R_DBG_DATA_R_LENGTH (10) -+#define PDP_DBGDATA_R_DBG_DATA_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DBGDATA_GB_OFFSET (0x0774) -+ -+/* PDP, DBGDATA_GB, DBG_DATA_G -+*/ -+#define PDP_DBGDATA_GB_DBG_DATA_G_MASK (0x03FF0000) -+#define PDP_DBGDATA_GB_DBG_DATA_G_LSBMASK (0x000003FF) -+#define PDP_DBGDATA_GB_DBG_DATA_G_SHIFT (16) -+#define PDP_DBGDATA_GB_DBG_DATA_G_LENGTH (10) -+#define PDP_DBGDATA_GB_DBG_DATA_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DBGDATA_GB, DBG_DATA_B -+*/ -+#define PDP_DBGDATA_GB_DBG_DATA_B_MASK (0x000003FF) -+#define PDP_DBGDATA_GB_DBG_DATA_B_LSBMASK (0x000003FF) -+#define PDP_DBGDATA_GB_DBG_DATA_B_SHIFT (0) -+#define PDP_DBGDATA_GB_DBG_DATA_B_LENGTH (10) -+#define PDP_DBGDATA_GB_DBG_DATA_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DBGSIDE_OFFSET (0x0778) -+ -+/* PDP, DBGSIDE, DBG_VAL -+*/ -+#define PDP_DBGSIDE_DBG_VAL_MASK (0x00000008) -+#define PDP_DBGSIDE_DBG_VAL_LSBMASK (0x00000001) -+#define PDP_DBGSIDE_DBG_VAL_SHIFT (3) -+#define PDP_DBGSIDE_DBG_VAL_LENGTH (1) -+#define PDP_DBGSIDE_DBG_VAL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DBGSIDE, DBG_SIDE -+*/ -+#define PDP_DBGSIDE_DBG_SIDE_MASK (0x00000007) -+#define PDP_DBGSIDE_DBG_SIDE_LSBMASK (0x00000007) -+#define PDP_DBGSIDE_DBG_SIDE_SHIFT (0) -+#define PDP_DBGSIDE_DBG_SIDE_LENGTH (3) -+#define PDP_DBGSIDE_DBG_SIDE_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_OUTPUT_OFFSET (0x077C) -+ -+/* PDP, OUTPUT, EIGHT_BIT_OUTPUT -+*/ -+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_MASK (0x00000002) -+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_LSBMASK (0x00000001) -+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_SHIFT (1) -+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_LENGTH (1) -+#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, OUTPUT, OUTPUT_CONFIG -+*/ -+#define PDP_OUTPUT_OUTPUT_CONFIG_MASK (0x00000001) -+#define PDP_OUTPUT_OUTPUT_CONFIG_LSBMASK (0x00000001) -+#define PDP_OUTPUT_OUTPUT_CONFIG_SHIFT (0) -+#define PDP_OUTPUT_OUTPUT_CONFIG_LENGTH (1) -+#define PDP_OUTPUT_OUTPUT_CONFIG_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_SYNCCTRL_OFFSET (0x0780) -+ -+/* PDP, SYNCCTRL, SYNCACTIVE -+*/ -+#define PDP_SYNCCTRL_SYNCACTIVE_MASK (0x80000000) -+#define PDP_SYNCCTRL_SYNCACTIVE_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_SYNCACTIVE_SHIFT (31) -+#define PDP_SYNCCTRL_SYNCACTIVE_LENGTH (1) -+#define PDP_SYNCCTRL_SYNCACTIVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, PDP_RST -+*/ -+#define PDP_SYNCCTRL_PDP_RST_MASK (0x20000000) -+#define PDP_SYNCCTRL_PDP_RST_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_PDP_RST_SHIFT (29) -+#define PDP_SYNCCTRL_PDP_RST_LENGTH (1) -+#define PDP_SYNCCTRL_PDP_RST_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, POWERDN -+*/ -+#define PDP_SYNCCTRL_POWERDN_MASK (0x10000000) -+#define PDP_SYNCCTRL_POWERDN_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_POWERDN_SHIFT (28) -+#define PDP_SYNCCTRL_POWERDN_LENGTH (1) -+#define PDP_SYNCCTRL_POWERDN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, LOWPWRMODE -+*/ -+#define PDP_SYNCCTRL_LOWPWRMODE_MASK (0x08000000) -+#define PDP_SYNCCTRL_LOWPWRMODE_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_LOWPWRMODE_SHIFT (27) -+#define PDP_SYNCCTRL_LOWPWRMODE_LENGTH (1) -+#define PDP_SYNCCTRL_LOWPWRMODE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, UPDSYNCTRL -+*/ -+#define PDP_SYNCCTRL_UPDSYNCTRL_MASK (0x04000000) -+#define PDP_SYNCCTRL_UPDSYNCTRL_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_UPDSYNCTRL_SHIFT (26) -+#define PDP_SYNCCTRL_UPDSYNCTRL_LENGTH (1) -+#define PDP_SYNCCTRL_UPDSYNCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, UPDINTCTRL -+*/ -+#define PDP_SYNCCTRL_UPDINTCTRL_MASK (0x02000000) -+#define PDP_SYNCCTRL_UPDINTCTRL_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_UPDINTCTRL_SHIFT (25) -+#define PDP_SYNCCTRL_UPDINTCTRL_LENGTH (1) -+#define PDP_SYNCCTRL_UPDINTCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, UPDCTRL -+*/ -+#define PDP_SYNCCTRL_UPDCTRL_MASK (0x01000000) -+#define PDP_SYNCCTRL_UPDCTRL_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_UPDCTRL_SHIFT (24) -+#define PDP_SYNCCTRL_UPDCTRL_LENGTH (1) -+#define PDP_SYNCCTRL_UPDCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, UPDWAIT -+*/ -+#define PDP_SYNCCTRL_UPDWAIT_MASK (0x000F0000) -+#define PDP_SYNCCTRL_UPDWAIT_LSBMASK (0x0000000F) -+#define PDP_SYNCCTRL_UPDWAIT_SHIFT (16) -+#define PDP_SYNCCTRL_UPDWAIT_LENGTH (4) -+#define PDP_SYNCCTRL_UPDWAIT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, FIELD_EN -+*/ -+#define PDP_SYNCCTRL_FIELD_EN_MASK (0x00002000) -+#define PDP_SYNCCTRL_FIELD_EN_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_FIELD_EN_SHIFT (13) -+#define PDP_SYNCCTRL_FIELD_EN_LENGTH (1) -+#define PDP_SYNCCTRL_FIELD_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, CSYNC_EN -+*/ -+#define PDP_SYNCCTRL_CSYNC_EN_MASK (0x00001000) -+#define PDP_SYNCCTRL_CSYNC_EN_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_CSYNC_EN_SHIFT (12) -+#define PDP_SYNCCTRL_CSYNC_EN_LENGTH (1) -+#define PDP_SYNCCTRL_CSYNC_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, CLKPOL -+*/ -+#define PDP_SYNCCTRL_CLKPOL_MASK (0x00000800) -+#define PDP_SYNCCTRL_CLKPOL_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_CLKPOL_SHIFT (11) -+#define PDP_SYNCCTRL_CLKPOL_LENGTH (1) -+#define PDP_SYNCCTRL_CLKPOL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, VS_SLAVE -+*/ -+#define PDP_SYNCCTRL_VS_SLAVE_MASK (0x00000080) -+#define PDP_SYNCCTRL_VS_SLAVE_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_VS_SLAVE_SHIFT (7) -+#define PDP_SYNCCTRL_VS_SLAVE_LENGTH (1) -+#define PDP_SYNCCTRL_VS_SLAVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, HS_SLAVE -+*/ -+#define PDP_SYNCCTRL_HS_SLAVE_MASK (0x00000040) -+#define PDP_SYNCCTRL_HS_SLAVE_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_HS_SLAVE_SHIFT (6) -+#define PDP_SYNCCTRL_HS_SLAVE_LENGTH (1) -+#define PDP_SYNCCTRL_HS_SLAVE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, BLNKPOL -+*/ -+#define PDP_SYNCCTRL_BLNKPOL_MASK (0x00000020) -+#define PDP_SYNCCTRL_BLNKPOL_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_BLNKPOL_SHIFT (5) -+#define PDP_SYNCCTRL_BLNKPOL_LENGTH (1) -+#define PDP_SYNCCTRL_BLNKPOL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, BLNKDIS -+*/ -+#define PDP_SYNCCTRL_BLNKDIS_MASK (0x00000010) -+#define PDP_SYNCCTRL_BLNKDIS_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_BLNKDIS_SHIFT (4) -+#define PDP_SYNCCTRL_BLNKDIS_LENGTH (1) -+#define PDP_SYNCCTRL_BLNKDIS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, VSPOL -+*/ -+#define PDP_SYNCCTRL_VSPOL_MASK (0x00000008) -+#define PDP_SYNCCTRL_VSPOL_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_VSPOL_SHIFT (3) -+#define PDP_SYNCCTRL_VSPOL_LENGTH (1) -+#define PDP_SYNCCTRL_VSPOL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, VSDIS -+*/ -+#define PDP_SYNCCTRL_VSDIS_MASK (0x00000004) -+#define PDP_SYNCCTRL_VSDIS_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_VSDIS_SHIFT (2) -+#define PDP_SYNCCTRL_VSDIS_LENGTH (1) -+#define PDP_SYNCCTRL_VSDIS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, HSPOL -+*/ -+#define PDP_SYNCCTRL_HSPOL_MASK (0x00000002) -+#define PDP_SYNCCTRL_HSPOL_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_HSPOL_SHIFT (1) -+#define PDP_SYNCCTRL_HSPOL_LENGTH (1) -+#define PDP_SYNCCTRL_HSPOL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, SYNCCTRL, HSDIS -+*/ -+#define PDP_SYNCCTRL_HSDIS_MASK (0x00000001) -+#define PDP_SYNCCTRL_HSDIS_LSBMASK (0x00000001) -+#define PDP_SYNCCTRL_HSDIS_SHIFT (0) -+#define PDP_SYNCCTRL_HSDIS_LENGTH (1) -+#define PDP_SYNCCTRL_HSDIS_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_HSYNC1_OFFSET (0x0784) -+ -+/* PDP, HSYNC1, HBPS -+*/ -+#define PDP_HSYNC1_HBPS_MASK (0x1FFF0000) -+#define PDP_HSYNC1_HBPS_LSBMASK (0x00001FFF) -+#define PDP_HSYNC1_HBPS_SHIFT (16) -+#define PDP_HSYNC1_HBPS_LENGTH (13) -+#define PDP_HSYNC1_HBPS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, HSYNC1, HT -+*/ -+#define PDP_HSYNC1_HT_MASK (0x00001FFF) -+#define PDP_HSYNC1_HT_LSBMASK (0x00001FFF) -+#define PDP_HSYNC1_HT_SHIFT (0) -+#define PDP_HSYNC1_HT_LENGTH (13) -+#define PDP_HSYNC1_HT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_HSYNC2_OFFSET (0x0788) -+ -+/* PDP, HSYNC2, HAS -+*/ -+#define PDP_HSYNC2_HAS_MASK (0x1FFF0000) -+#define PDP_HSYNC2_HAS_LSBMASK (0x00001FFF) -+#define PDP_HSYNC2_HAS_SHIFT (16) -+#define PDP_HSYNC2_HAS_LENGTH (13) -+#define PDP_HSYNC2_HAS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, HSYNC2, HLBS -+*/ -+#define PDP_HSYNC2_HLBS_MASK (0x00001FFF) -+#define PDP_HSYNC2_HLBS_LSBMASK (0x00001FFF) -+#define PDP_HSYNC2_HLBS_SHIFT (0) -+#define PDP_HSYNC2_HLBS_LENGTH (13) -+#define PDP_HSYNC2_HLBS_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_HSYNC3_OFFSET (0x078C) -+ -+/* PDP, HSYNC3, HFPS -+*/ -+#define PDP_HSYNC3_HFPS_MASK (0x1FFF0000) -+#define PDP_HSYNC3_HFPS_LSBMASK (0x00001FFF) -+#define PDP_HSYNC3_HFPS_SHIFT (16) -+#define PDP_HSYNC3_HFPS_LENGTH (13) -+#define PDP_HSYNC3_HFPS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, HSYNC3, HRBS -+*/ -+#define PDP_HSYNC3_HRBS_MASK (0x00001FFF) -+#define PDP_HSYNC3_HRBS_LSBMASK (0x00001FFF) -+#define PDP_HSYNC3_HRBS_SHIFT (0) -+#define PDP_HSYNC3_HRBS_LENGTH (13) -+#define PDP_HSYNC3_HRBS_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VSYNC1_OFFSET (0x0790) -+ -+/* PDP, VSYNC1, VBPS -+*/ -+#define PDP_VSYNC1_VBPS_MASK (0x1FFF0000) -+#define PDP_VSYNC1_VBPS_LSBMASK (0x00001FFF) -+#define PDP_VSYNC1_VBPS_SHIFT (16) -+#define PDP_VSYNC1_VBPS_LENGTH (13) -+#define PDP_VSYNC1_VBPS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VSYNC1, VT -+*/ -+#define PDP_VSYNC1_VT_MASK (0x00001FFF) -+#define PDP_VSYNC1_VT_LSBMASK (0x00001FFF) -+#define PDP_VSYNC1_VT_SHIFT (0) -+#define PDP_VSYNC1_VT_LENGTH (13) -+#define PDP_VSYNC1_VT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VSYNC2_OFFSET (0x0794) -+ -+/* PDP, VSYNC2, VAS -+*/ -+#define PDP_VSYNC2_VAS_MASK (0x1FFF0000) -+#define PDP_VSYNC2_VAS_LSBMASK (0x00001FFF) -+#define PDP_VSYNC2_VAS_SHIFT (16) -+#define PDP_VSYNC2_VAS_LENGTH (13) -+#define PDP_VSYNC2_VAS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VSYNC2, VTBS -+*/ -+#define PDP_VSYNC2_VTBS_MASK (0x00001FFF) -+#define PDP_VSYNC2_VTBS_LSBMASK (0x00001FFF) -+#define PDP_VSYNC2_VTBS_SHIFT (0) -+#define PDP_VSYNC2_VTBS_LENGTH (13) -+#define PDP_VSYNC2_VTBS_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VSYNC3_OFFSET (0x0798) -+ -+/* PDP, VSYNC3, VFPS -+*/ -+#define PDP_VSYNC3_VFPS_MASK (0x1FFF0000) -+#define PDP_VSYNC3_VFPS_LSBMASK (0x00001FFF) -+#define PDP_VSYNC3_VFPS_SHIFT (16) -+#define PDP_VSYNC3_VFPS_LENGTH (13) -+#define PDP_VSYNC3_VFPS_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VSYNC3, VBBS -+*/ -+#define PDP_VSYNC3_VBBS_MASK (0x00001FFF) -+#define PDP_VSYNC3_VBBS_LSBMASK (0x00001FFF) -+#define PDP_VSYNC3_VBBS_SHIFT (0) -+#define PDP_VSYNC3_VBBS_LENGTH (13) -+#define PDP_VSYNC3_VBBS_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_INTSTAT_OFFSET (0x079C) -+ -+/* PDP, INTSTAT, INTS_VID4ORUN -+*/ -+#define PDP_INTSTAT_INTS_VID4ORUN_MASK (0x00080000) -+#define PDP_INTSTAT_INTS_VID4ORUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_VID4ORUN_SHIFT (19) -+#define PDP_INTSTAT_INTS_VID4ORUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_VID4ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID3ORUN -+*/ -+#define PDP_INTSTAT_INTS_VID3ORUN_MASK (0x00040000) -+#define PDP_INTSTAT_INTS_VID3ORUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_VID3ORUN_SHIFT (18) -+#define PDP_INTSTAT_INTS_VID3ORUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_VID3ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID2ORUN -+*/ -+#define PDP_INTSTAT_INTS_VID2ORUN_MASK (0x00020000) -+#define PDP_INTSTAT_INTS_VID2ORUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_VID2ORUN_SHIFT (17) -+#define PDP_INTSTAT_INTS_VID2ORUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_VID2ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID1ORUN -+*/ -+#define PDP_INTSTAT_INTS_VID1ORUN_MASK (0x00010000) -+#define PDP_INTSTAT_INTS_VID1ORUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_VID1ORUN_SHIFT (16) -+#define PDP_INTSTAT_INTS_VID1ORUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_VID1ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH4ORUN -+*/ -+#define PDP_INTSTAT_INTS_GRPH4ORUN_MASK (0x00008000) -+#define PDP_INTSTAT_INTS_GRPH4ORUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_GRPH4ORUN_SHIFT (15) -+#define PDP_INTSTAT_INTS_GRPH4ORUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_GRPH4ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH3ORUN -+*/ -+#define PDP_INTSTAT_INTS_GRPH3ORUN_MASK (0x00004000) -+#define PDP_INTSTAT_INTS_GRPH3ORUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_GRPH3ORUN_SHIFT (14) -+#define PDP_INTSTAT_INTS_GRPH3ORUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_GRPH3ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH2ORUN -+*/ -+#define PDP_INTSTAT_INTS_GRPH2ORUN_MASK (0x00002000) -+#define PDP_INTSTAT_INTS_GRPH2ORUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_GRPH2ORUN_SHIFT (13) -+#define PDP_INTSTAT_INTS_GRPH2ORUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_GRPH2ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH1ORUN -+*/ -+#define PDP_INTSTAT_INTS_GRPH1ORUN_MASK (0x00001000) -+#define PDP_INTSTAT_INTS_GRPH1ORUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_GRPH1ORUN_SHIFT (12) -+#define PDP_INTSTAT_INTS_GRPH1ORUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_GRPH1ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID4URUN -+*/ -+#define PDP_INTSTAT_INTS_VID4URUN_MASK (0x00000800) -+#define PDP_INTSTAT_INTS_VID4URUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_VID4URUN_SHIFT (11) -+#define PDP_INTSTAT_INTS_VID4URUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_VID4URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID3URUN -+*/ -+#define PDP_INTSTAT_INTS_VID3URUN_MASK (0x00000400) -+#define PDP_INTSTAT_INTS_VID3URUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_VID3URUN_SHIFT (10) -+#define PDP_INTSTAT_INTS_VID3URUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_VID3URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID2URUN -+*/ -+#define PDP_INTSTAT_INTS_VID2URUN_MASK (0x00000200) -+#define PDP_INTSTAT_INTS_VID2URUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_VID2URUN_SHIFT (9) -+#define PDP_INTSTAT_INTS_VID2URUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_VID2URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VID1URUN -+*/ -+#define PDP_INTSTAT_INTS_VID1URUN_MASK (0x00000100) -+#define PDP_INTSTAT_INTS_VID1URUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_VID1URUN_SHIFT (8) -+#define PDP_INTSTAT_INTS_VID1URUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_VID1URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH4URUN -+*/ -+#define PDP_INTSTAT_INTS_GRPH4URUN_MASK (0x00000080) -+#define PDP_INTSTAT_INTS_GRPH4URUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_GRPH4URUN_SHIFT (7) -+#define PDP_INTSTAT_INTS_GRPH4URUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_GRPH4URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH3URUN -+*/ -+#define PDP_INTSTAT_INTS_GRPH3URUN_MASK (0x00000040) -+#define PDP_INTSTAT_INTS_GRPH3URUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_GRPH3URUN_SHIFT (6) -+#define PDP_INTSTAT_INTS_GRPH3URUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_GRPH3URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH2URUN -+*/ -+#define PDP_INTSTAT_INTS_GRPH2URUN_MASK (0x00000020) -+#define PDP_INTSTAT_INTS_GRPH2URUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_GRPH2URUN_SHIFT (5) -+#define PDP_INTSTAT_INTS_GRPH2URUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_GRPH2URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_GRPH1URUN -+*/ -+#define PDP_INTSTAT_INTS_GRPH1URUN_MASK (0x00000010) -+#define PDP_INTSTAT_INTS_GRPH1URUN_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_GRPH1URUN_SHIFT (4) -+#define PDP_INTSTAT_INTS_GRPH1URUN_LENGTH (1) -+#define PDP_INTSTAT_INTS_GRPH1URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VBLNK1 -+*/ -+#define PDP_INTSTAT_INTS_VBLNK1_MASK (0x00000008) -+#define PDP_INTSTAT_INTS_VBLNK1_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_VBLNK1_SHIFT (3) -+#define PDP_INTSTAT_INTS_VBLNK1_LENGTH (1) -+#define PDP_INTSTAT_INTS_VBLNK1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_VBLNK0 -+*/ -+#define PDP_INTSTAT_INTS_VBLNK0_MASK (0x00000004) -+#define PDP_INTSTAT_INTS_VBLNK0_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_VBLNK0_SHIFT (2) -+#define PDP_INTSTAT_INTS_VBLNK0_LENGTH (1) -+#define PDP_INTSTAT_INTS_VBLNK0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_HBLNK1 -+*/ -+#define PDP_INTSTAT_INTS_HBLNK1_MASK (0x00000002) -+#define PDP_INTSTAT_INTS_HBLNK1_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_HBLNK1_SHIFT (1) -+#define PDP_INTSTAT_INTS_HBLNK1_LENGTH (1) -+#define PDP_INTSTAT_INTS_HBLNK1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTSTAT, INTS_HBLNK0 -+*/ -+#define PDP_INTSTAT_INTS_HBLNK0_MASK (0x00000001) -+#define PDP_INTSTAT_INTS_HBLNK0_LSBMASK (0x00000001) -+#define PDP_INTSTAT_INTS_HBLNK0_SHIFT (0) -+#define PDP_INTSTAT_INTS_HBLNK0_LENGTH (1) -+#define PDP_INTSTAT_INTS_HBLNK0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_INTENAB_OFFSET (0x07A0) -+ -+/* PDP, INTENAB, INTEN_VID4ORUN -+*/ -+#define PDP_INTENAB_INTEN_VID4ORUN_MASK (0x00080000) -+#define PDP_INTENAB_INTEN_VID4ORUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_VID4ORUN_SHIFT (19) -+#define PDP_INTENAB_INTEN_VID4ORUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_VID4ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID3ORUN -+*/ -+#define PDP_INTENAB_INTEN_VID3ORUN_MASK (0x00040000) -+#define PDP_INTENAB_INTEN_VID3ORUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_VID3ORUN_SHIFT (18) -+#define PDP_INTENAB_INTEN_VID3ORUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_VID3ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID2ORUN -+*/ -+#define PDP_INTENAB_INTEN_VID2ORUN_MASK (0x00020000) -+#define PDP_INTENAB_INTEN_VID2ORUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_VID2ORUN_SHIFT (17) -+#define PDP_INTENAB_INTEN_VID2ORUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_VID2ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID1ORUN -+*/ -+#define PDP_INTENAB_INTEN_VID1ORUN_MASK (0x00010000) -+#define PDP_INTENAB_INTEN_VID1ORUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_VID1ORUN_SHIFT (16) -+#define PDP_INTENAB_INTEN_VID1ORUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_VID1ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH4ORUN -+*/ -+#define PDP_INTENAB_INTEN_GRPH4ORUN_MASK (0x00008000) -+#define PDP_INTENAB_INTEN_GRPH4ORUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_GRPH4ORUN_SHIFT (15) -+#define PDP_INTENAB_INTEN_GRPH4ORUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_GRPH4ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH3ORUN -+*/ -+#define PDP_INTENAB_INTEN_GRPH3ORUN_MASK (0x00004000) -+#define PDP_INTENAB_INTEN_GRPH3ORUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_GRPH3ORUN_SHIFT (14) -+#define PDP_INTENAB_INTEN_GRPH3ORUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_GRPH3ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH2ORUN -+*/ -+#define PDP_INTENAB_INTEN_GRPH2ORUN_MASK (0x00002000) -+#define PDP_INTENAB_INTEN_GRPH2ORUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_GRPH2ORUN_SHIFT (13) -+#define PDP_INTENAB_INTEN_GRPH2ORUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_GRPH2ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH1ORUN -+*/ -+#define PDP_INTENAB_INTEN_GRPH1ORUN_MASK (0x00001000) -+#define PDP_INTENAB_INTEN_GRPH1ORUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_GRPH1ORUN_SHIFT (12) -+#define PDP_INTENAB_INTEN_GRPH1ORUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_GRPH1ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID4URUN -+*/ -+#define PDP_INTENAB_INTEN_VID4URUN_MASK (0x00000800) -+#define PDP_INTENAB_INTEN_VID4URUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_VID4URUN_SHIFT (11) -+#define PDP_INTENAB_INTEN_VID4URUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_VID4URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID3URUN -+*/ -+#define PDP_INTENAB_INTEN_VID3URUN_MASK (0x00000400) -+#define PDP_INTENAB_INTEN_VID3URUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_VID3URUN_SHIFT (10) -+#define PDP_INTENAB_INTEN_VID3URUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_VID3URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID2URUN -+*/ -+#define PDP_INTENAB_INTEN_VID2URUN_MASK (0x00000200) -+#define PDP_INTENAB_INTEN_VID2URUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_VID2URUN_SHIFT (9) -+#define PDP_INTENAB_INTEN_VID2URUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_VID2URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VID1URUN -+*/ -+#define PDP_INTENAB_INTEN_VID1URUN_MASK (0x00000100) -+#define PDP_INTENAB_INTEN_VID1URUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_VID1URUN_SHIFT (8) -+#define PDP_INTENAB_INTEN_VID1URUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_VID1URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH4URUN -+*/ -+#define PDP_INTENAB_INTEN_GRPH4URUN_MASK (0x00000080) -+#define PDP_INTENAB_INTEN_GRPH4URUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_GRPH4URUN_SHIFT (7) -+#define PDP_INTENAB_INTEN_GRPH4URUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_GRPH4URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH3URUN -+*/ -+#define PDP_INTENAB_INTEN_GRPH3URUN_MASK (0x00000040) -+#define PDP_INTENAB_INTEN_GRPH3URUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_GRPH3URUN_SHIFT (6) -+#define PDP_INTENAB_INTEN_GRPH3URUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_GRPH3URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH2URUN -+*/ -+#define PDP_INTENAB_INTEN_GRPH2URUN_MASK (0x00000020) -+#define PDP_INTENAB_INTEN_GRPH2URUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_GRPH2URUN_SHIFT (5) -+#define PDP_INTENAB_INTEN_GRPH2URUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_GRPH2URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_GRPH1URUN -+*/ -+#define PDP_INTENAB_INTEN_GRPH1URUN_MASK (0x00000010) -+#define PDP_INTENAB_INTEN_GRPH1URUN_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_GRPH1URUN_SHIFT (4) -+#define PDP_INTENAB_INTEN_GRPH1URUN_LENGTH (1) -+#define PDP_INTENAB_INTEN_GRPH1URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VBLNK1 -+*/ -+#define PDP_INTENAB_INTEN_VBLNK1_MASK (0x00000008) -+#define PDP_INTENAB_INTEN_VBLNK1_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_VBLNK1_SHIFT (3) -+#define PDP_INTENAB_INTEN_VBLNK1_LENGTH (1) -+#define PDP_INTENAB_INTEN_VBLNK1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_VBLNK0 -+*/ -+#define PDP_INTENAB_INTEN_VBLNK0_MASK (0x00000004) -+#define PDP_INTENAB_INTEN_VBLNK0_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_VBLNK0_SHIFT (2) -+#define PDP_INTENAB_INTEN_VBLNK0_LENGTH (1) -+#define PDP_INTENAB_INTEN_VBLNK0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_HBLNK1 -+*/ -+#define PDP_INTENAB_INTEN_HBLNK1_MASK (0x00000002) -+#define PDP_INTENAB_INTEN_HBLNK1_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_HBLNK1_SHIFT (1) -+#define PDP_INTENAB_INTEN_HBLNK1_LENGTH (1) -+#define PDP_INTENAB_INTEN_HBLNK1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTENAB, INTEN_HBLNK0 -+*/ -+#define PDP_INTENAB_INTEN_HBLNK0_MASK (0x00000001) -+#define PDP_INTENAB_INTEN_HBLNK0_LSBMASK (0x00000001) -+#define PDP_INTENAB_INTEN_HBLNK0_SHIFT (0) -+#define PDP_INTENAB_INTEN_HBLNK0_LENGTH (1) -+#define PDP_INTENAB_INTEN_HBLNK0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_INTCLR_OFFSET (0x07A4) -+ -+/* PDP, INTCLR, INTCLR_VID4ORUN -+*/ -+#define PDP_INTCLR_INTCLR_VID4ORUN_MASK (0x00080000) -+#define PDP_INTCLR_INTCLR_VID4ORUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_VID4ORUN_SHIFT (19) -+#define PDP_INTCLR_INTCLR_VID4ORUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_VID4ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID3ORUN -+*/ -+#define PDP_INTCLR_INTCLR_VID3ORUN_MASK (0x00040000) -+#define PDP_INTCLR_INTCLR_VID3ORUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_VID3ORUN_SHIFT (18) -+#define PDP_INTCLR_INTCLR_VID3ORUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_VID3ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID2ORUN -+*/ -+#define PDP_INTCLR_INTCLR_VID2ORUN_MASK (0x00020000) -+#define PDP_INTCLR_INTCLR_VID2ORUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_VID2ORUN_SHIFT (17) -+#define PDP_INTCLR_INTCLR_VID2ORUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_VID2ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID1ORUN -+*/ -+#define PDP_INTCLR_INTCLR_VID1ORUN_MASK (0x00010000) -+#define PDP_INTCLR_INTCLR_VID1ORUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_VID1ORUN_SHIFT (16) -+#define PDP_INTCLR_INTCLR_VID1ORUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_VID1ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH4ORUN -+*/ -+#define PDP_INTCLR_INTCLR_GRPH4ORUN_MASK (0x00008000) -+#define PDP_INTCLR_INTCLR_GRPH4ORUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_GRPH4ORUN_SHIFT (15) -+#define PDP_INTCLR_INTCLR_GRPH4ORUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_GRPH4ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH3ORUN -+*/ -+#define PDP_INTCLR_INTCLR_GRPH3ORUN_MASK (0x00004000) -+#define PDP_INTCLR_INTCLR_GRPH3ORUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_GRPH3ORUN_SHIFT (14) -+#define PDP_INTCLR_INTCLR_GRPH3ORUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_GRPH3ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH2ORUN -+*/ -+#define PDP_INTCLR_INTCLR_GRPH2ORUN_MASK (0x00002000) -+#define PDP_INTCLR_INTCLR_GRPH2ORUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_GRPH2ORUN_SHIFT (13) -+#define PDP_INTCLR_INTCLR_GRPH2ORUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_GRPH2ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH1ORUN -+*/ -+#define PDP_INTCLR_INTCLR_GRPH1ORUN_MASK (0x00001000) -+#define PDP_INTCLR_INTCLR_GRPH1ORUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_GRPH1ORUN_SHIFT (12) -+#define PDP_INTCLR_INTCLR_GRPH1ORUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_GRPH1ORUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID4URUN -+*/ -+#define PDP_INTCLR_INTCLR_VID4URUN_MASK (0x00000800) -+#define PDP_INTCLR_INTCLR_VID4URUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_VID4URUN_SHIFT (11) -+#define PDP_INTCLR_INTCLR_VID4URUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_VID4URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID3URUN -+*/ -+#define PDP_INTCLR_INTCLR_VID3URUN_MASK (0x00000400) -+#define PDP_INTCLR_INTCLR_VID3URUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_VID3URUN_SHIFT (10) -+#define PDP_INTCLR_INTCLR_VID3URUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_VID3URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID2URUN -+*/ -+#define PDP_INTCLR_INTCLR_VID2URUN_MASK (0x00000200) -+#define PDP_INTCLR_INTCLR_VID2URUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_VID2URUN_SHIFT (9) -+#define PDP_INTCLR_INTCLR_VID2URUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_VID2URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VID1URUN -+*/ -+#define PDP_INTCLR_INTCLR_VID1URUN_MASK (0x00000100) -+#define PDP_INTCLR_INTCLR_VID1URUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_VID1URUN_SHIFT (8) -+#define PDP_INTCLR_INTCLR_VID1URUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_VID1URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH4URUN -+*/ -+#define PDP_INTCLR_INTCLR_GRPH4URUN_MASK (0x00000080) -+#define PDP_INTCLR_INTCLR_GRPH4URUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_GRPH4URUN_SHIFT (7) -+#define PDP_INTCLR_INTCLR_GRPH4URUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_GRPH4URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH3URUN -+*/ -+#define PDP_INTCLR_INTCLR_GRPH3URUN_MASK (0x00000040) -+#define PDP_INTCLR_INTCLR_GRPH3URUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_GRPH3URUN_SHIFT (6) -+#define PDP_INTCLR_INTCLR_GRPH3URUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_GRPH3URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH2URUN -+*/ -+#define PDP_INTCLR_INTCLR_GRPH2URUN_MASK (0x00000020) -+#define PDP_INTCLR_INTCLR_GRPH2URUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_GRPH2URUN_SHIFT (5) -+#define PDP_INTCLR_INTCLR_GRPH2URUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_GRPH2URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_GRPH1URUN -+*/ -+#define PDP_INTCLR_INTCLR_GRPH1URUN_MASK (0x00000010) -+#define PDP_INTCLR_INTCLR_GRPH1URUN_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_GRPH1URUN_SHIFT (4) -+#define PDP_INTCLR_INTCLR_GRPH1URUN_LENGTH (1) -+#define PDP_INTCLR_INTCLR_GRPH1URUN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VBLNK1 -+*/ -+#define PDP_INTCLR_INTCLR_VBLNK1_MASK (0x00000008) -+#define PDP_INTCLR_INTCLR_VBLNK1_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_VBLNK1_SHIFT (3) -+#define PDP_INTCLR_INTCLR_VBLNK1_LENGTH (1) -+#define PDP_INTCLR_INTCLR_VBLNK1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_VBLNK0 -+*/ -+#define PDP_INTCLR_INTCLR_VBLNK0_MASK (0x00000004) -+#define PDP_INTCLR_INTCLR_VBLNK0_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_VBLNK0_SHIFT (2) -+#define PDP_INTCLR_INTCLR_VBLNK0_LENGTH (1) -+#define PDP_INTCLR_INTCLR_VBLNK0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_HBLNK1 -+*/ -+#define PDP_INTCLR_INTCLR_HBLNK1_MASK (0x00000002) -+#define PDP_INTCLR_INTCLR_HBLNK1_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_HBLNK1_SHIFT (1) -+#define PDP_INTCLR_INTCLR_HBLNK1_LENGTH (1) -+#define PDP_INTCLR_INTCLR_HBLNK1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, INTCLR, INTCLR_HBLNK0 -+*/ -+#define PDP_INTCLR_INTCLR_HBLNK0_MASK (0x00000001) -+#define PDP_INTCLR_INTCLR_HBLNK0_LSBMASK (0x00000001) -+#define PDP_INTCLR_INTCLR_HBLNK0_SHIFT (0) -+#define PDP_INTCLR_INTCLR_HBLNK0_LENGTH (1) -+#define PDP_INTCLR_INTCLR_HBLNK0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_MEMCTRL_OFFSET (0x07A8) -+ -+/* PDP, MEMCTRL, MEMREFRESH -+*/ -+#define PDP_MEMCTRL_MEMREFRESH_MASK (0xC0000000) -+#define PDP_MEMCTRL_MEMREFRESH_LSBMASK (0x00000003) -+#define PDP_MEMCTRL_MEMREFRESH_SHIFT (30) -+#define PDP_MEMCTRL_MEMREFRESH_LENGTH (2) -+#define PDP_MEMCTRL_MEMREFRESH_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, MEMCTRL, BURSTLEN -+*/ -+#define PDP_MEMCTRL_BURSTLEN_MASK (0x000000FF) -+#define PDP_MEMCTRL_BURSTLEN_LSBMASK (0x000000FF) -+#define PDP_MEMCTRL_BURSTLEN_SHIFT (0) -+#define PDP_MEMCTRL_BURSTLEN_LENGTH (8) -+#define PDP_MEMCTRL_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_MEM_THRESH_OFFSET (0x07AC) -+ -+/* PDP, MEM_THRESH, UVTHRESHOLD -+*/ -+#define PDP_MEM_THRESH_UVTHRESHOLD_MASK (0xFF000000) -+#define PDP_MEM_THRESH_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define PDP_MEM_THRESH_UVTHRESHOLD_SHIFT (24) -+#define PDP_MEM_THRESH_UVTHRESHOLD_LENGTH (8) -+#define PDP_MEM_THRESH_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, MEM_THRESH, YTHRESHOLD -+*/ -+#define PDP_MEM_THRESH_YTHRESHOLD_MASK (0x001FF000) -+#define PDP_MEM_THRESH_YTHRESHOLD_LSBMASK (0x000001FF) -+#define PDP_MEM_THRESH_YTHRESHOLD_SHIFT (12) -+#define PDP_MEM_THRESH_YTHRESHOLD_LENGTH (9) -+#define PDP_MEM_THRESH_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, MEM_THRESH, THRESHOLD -+*/ -+#define PDP_MEM_THRESH_THRESHOLD_MASK (0x000001FF) -+#define PDP_MEM_THRESH_THRESHOLD_LSBMASK (0x000001FF) -+#define PDP_MEM_THRESH_THRESHOLD_SHIFT (0) -+#define PDP_MEM_THRESH_THRESHOLD_LENGTH (9) -+#define PDP_MEM_THRESH_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_ALTERNATE_3D_CTRL_OFFSET (0x07B0) -+ -+/* PDP, ALTERNATE_3D_CTRL, ALT3D_ON -+*/ -+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_MASK (0x00000010) -+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LSBMASK (0x00000001) -+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SHIFT (4) -+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LENGTH (1) -+#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, ALTERNATE_3D_CTRL, ALT3D_BLENDSEL -+*/ -+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_MASK (0x00000007) -+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LSBMASK (0x00000007) -+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SHIFT (0) -+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LENGTH (3) -+#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA0_R_OFFSET (0x07B4) -+ -+/* PDP, GAMMA0_R, GAMMA0_R -+*/ -+#define PDP_GAMMA0_R_GAMMA0_R_MASK (0x000003FF) -+#define PDP_GAMMA0_R_GAMMA0_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA0_R_GAMMA0_R_SHIFT (0) -+#define PDP_GAMMA0_R_GAMMA0_R_LENGTH (10) -+#define PDP_GAMMA0_R_GAMMA0_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA0_GB_OFFSET (0x07B8) -+ -+/* PDP, GAMMA0_GB, GAMMA0_G -+*/ -+#define PDP_GAMMA0_GB_GAMMA0_G_MASK (0x03FF0000) -+#define PDP_GAMMA0_GB_GAMMA0_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA0_GB_GAMMA0_G_SHIFT (16) -+#define PDP_GAMMA0_GB_GAMMA0_G_LENGTH (10) -+#define PDP_GAMMA0_GB_GAMMA0_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA0_GB, GAMMA0_B -+*/ -+#define PDP_GAMMA0_GB_GAMMA0_B_MASK (0x000003FF) -+#define PDP_GAMMA0_GB_GAMMA0_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA0_GB_GAMMA0_B_SHIFT (0) -+#define PDP_GAMMA0_GB_GAMMA0_B_LENGTH (10) -+#define PDP_GAMMA0_GB_GAMMA0_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA1_R_OFFSET (0x07BC) -+ -+/* PDP, GAMMA1_R, GAMMA1_R -+*/ -+#define PDP_GAMMA1_R_GAMMA1_R_MASK (0x000003FF) -+#define PDP_GAMMA1_R_GAMMA1_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA1_R_GAMMA1_R_SHIFT (0) -+#define PDP_GAMMA1_R_GAMMA1_R_LENGTH (10) -+#define PDP_GAMMA1_R_GAMMA1_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA1_GB_OFFSET (0x07C0) -+ -+/* PDP, GAMMA1_GB, GAMMA1_G -+*/ -+#define PDP_GAMMA1_GB_GAMMA1_G_MASK (0x03FF0000) -+#define PDP_GAMMA1_GB_GAMMA1_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA1_GB_GAMMA1_G_SHIFT (16) -+#define PDP_GAMMA1_GB_GAMMA1_G_LENGTH (10) -+#define PDP_GAMMA1_GB_GAMMA1_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA1_GB, GAMMA1_B -+*/ -+#define PDP_GAMMA1_GB_GAMMA1_B_MASK (0x000003FF) -+#define PDP_GAMMA1_GB_GAMMA1_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA1_GB_GAMMA1_B_SHIFT (0) -+#define PDP_GAMMA1_GB_GAMMA1_B_LENGTH (10) -+#define PDP_GAMMA1_GB_GAMMA1_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA2_R_OFFSET (0x07C4) -+ -+/* PDP, GAMMA2_R, GAMMA2_R -+*/ -+#define PDP_GAMMA2_R_GAMMA2_R_MASK (0x000003FF) -+#define PDP_GAMMA2_R_GAMMA2_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA2_R_GAMMA2_R_SHIFT (0) -+#define PDP_GAMMA2_R_GAMMA2_R_LENGTH (10) -+#define PDP_GAMMA2_R_GAMMA2_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA2_GB_OFFSET (0x07C8) -+ -+/* PDP, GAMMA2_GB, GAMMA2_G -+*/ -+#define PDP_GAMMA2_GB_GAMMA2_G_MASK (0x03FF0000) -+#define PDP_GAMMA2_GB_GAMMA2_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA2_GB_GAMMA2_G_SHIFT (16) -+#define PDP_GAMMA2_GB_GAMMA2_G_LENGTH (10) -+#define PDP_GAMMA2_GB_GAMMA2_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA2_GB, GAMMA2_B -+*/ -+#define PDP_GAMMA2_GB_GAMMA2_B_MASK (0x000003FF) -+#define PDP_GAMMA2_GB_GAMMA2_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA2_GB_GAMMA2_B_SHIFT (0) -+#define PDP_GAMMA2_GB_GAMMA2_B_LENGTH (10) -+#define PDP_GAMMA2_GB_GAMMA2_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA3_R_OFFSET (0x07CC) -+ -+/* PDP, GAMMA3_R, GAMMA3_R -+*/ -+#define PDP_GAMMA3_R_GAMMA3_R_MASK (0x000003FF) -+#define PDP_GAMMA3_R_GAMMA3_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA3_R_GAMMA3_R_SHIFT (0) -+#define PDP_GAMMA3_R_GAMMA3_R_LENGTH (10) -+#define PDP_GAMMA3_R_GAMMA3_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA3_GB_OFFSET (0x07D0) -+ -+/* PDP, GAMMA3_GB, GAMMA3_G -+*/ -+#define PDP_GAMMA3_GB_GAMMA3_G_MASK (0x03FF0000) -+#define PDP_GAMMA3_GB_GAMMA3_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA3_GB_GAMMA3_G_SHIFT (16) -+#define PDP_GAMMA3_GB_GAMMA3_G_LENGTH (10) -+#define PDP_GAMMA3_GB_GAMMA3_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA3_GB, GAMMA3_B -+*/ -+#define PDP_GAMMA3_GB_GAMMA3_B_MASK (0x000003FF) -+#define PDP_GAMMA3_GB_GAMMA3_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA3_GB_GAMMA3_B_SHIFT (0) -+#define PDP_GAMMA3_GB_GAMMA3_B_LENGTH (10) -+#define PDP_GAMMA3_GB_GAMMA3_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA4_R_OFFSET (0x07D4) -+ -+/* PDP, GAMMA4_R, GAMMA4_R -+*/ -+#define PDP_GAMMA4_R_GAMMA4_R_MASK (0x000003FF) -+#define PDP_GAMMA4_R_GAMMA4_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA4_R_GAMMA4_R_SHIFT (0) -+#define PDP_GAMMA4_R_GAMMA4_R_LENGTH (10) -+#define PDP_GAMMA4_R_GAMMA4_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA4_GB_OFFSET (0x07D8) -+ -+/* PDP, GAMMA4_GB, GAMMA4_G -+*/ -+#define PDP_GAMMA4_GB_GAMMA4_G_MASK (0x03FF0000) -+#define PDP_GAMMA4_GB_GAMMA4_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA4_GB_GAMMA4_G_SHIFT (16) -+#define PDP_GAMMA4_GB_GAMMA4_G_LENGTH (10) -+#define PDP_GAMMA4_GB_GAMMA4_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA4_GB, GAMMA4_B -+*/ -+#define PDP_GAMMA4_GB_GAMMA4_B_MASK (0x000003FF) -+#define PDP_GAMMA4_GB_GAMMA4_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA4_GB_GAMMA4_B_SHIFT (0) -+#define PDP_GAMMA4_GB_GAMMA4_B_LENGTH (10) -+#define PDP_GAMMA4_GB_GAMMA4_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA5_R_OFFSET (0x07DC) -+ -+/* PDP, GAMMA5_R, GAMMA5_R -+*/ -+#define PDP_GAMMA5_R_GAMMA5_R_MASK (0x000003FF) -+#define PDP_GAMMA5_R_GAMMA5_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA5_R_GAMMA5_R_SHIFT (0) -+#define PDP_GAMMA5_R_GAMMA5_R_LENGTH (10) -+#define PDP_GAMMA5_R_GAMMA5_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA5_GB_OFFSET (0x07E0) -+ -+/* PDP, GAMMA5_GB, GAMMA5_G -+*/ -+#define PDP_GAMMA5_GB_GAMMA5_G_MASK (0x03FF0000) -+#define PDP_GAMMA5_GB_GAMMA5_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA5_GB_GAMMA5_G_SHIFT (16) -+#define PDP_GAMMA5_GB_GAMMA5_G_LENGTH (10) -+#define PDP_GAMMA5_GB_GAMMA5_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA5_GB, GAMMA5_B -+*/ -+#define PDP_GAMMA5_GB_GAMMA5_B_MASK (0x000003FF) -+#define PDP_GAMMA5_GB_GAMMA5_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA5_GB_GAMMA5_B_SHIFT (0) -+#define PDP_GAMMA5_GB_GAMMA5_B_LENGTH (10) -+#define PDP_GAMMA5_GB_GAMMA5_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA6_R_OFFSET (0x07E4) -+ -+/* PDP, GAMMA6_R, GAMMA6_R -+*/ -+#define PDP_GAMMA6_R_GAMMA6_R_MASK (0x000003FF) -+#define PDP_GAMMA6_R_GAMMA6_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA6_R_GAMMA6_R_SHIFT (0) -+#define PDP_GAMMA6_R_GAMMA6_R_LENGTH (10) -+#define PDP_GAMMA6_R_GAMMA6_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA6_GB_OFFSET (0x07E8) -+ -+/* PDP, GAMMA6_GB, GAMMA6_G -+*/ -+#define PDP_GAMMA6_GB_GAMMA6_G_MASK (0x03FF0000) -+#define PDP_GAMMA6_GB_GAMMA6_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA6_GB_GAMMA6_G_SHIFT (16) -+#define PDP_GAMMA6_GB_GAMMA6_G_LENGTH (10) -+#define PDP_GAMMA6_GB_GAMMA6_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA6_GB, GAMMA6_B -+*/ -+#define PDP_GAMMA6_GB_GAMMA6_B_MASK (0x000003FF) -+#define PDP_GAMMA6_GB_GAMMA6_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA6_GB_GAMMA6_B_SHIFT (0) -+#define PDP_GAMMA6_GB_GAMMA6_B_LENGTH (10) -+#define PDP_GAMMA6_GB_GAMMA6_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA7_R_OFFSET (0x07EC) -+ -+/* PDP, GAMMA7_R, GAMMA7_R -+*/ -+#define PDP_GAMMA7_R_GAMMA7_R_MASK (0x000003FF) -+#define PDP_GAMMA7_R_GAMMA7_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA7_R_GAMMA7_R_SHIFT (0) -+#define PDP_GAMMA7_R_GAMMA7_R_LENGTH (10) -+#define PDP_GAMMA7_R_GAMMA7_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA7_GB_OFFSET (0x07F0) -+ -+/* PDP, GAMMA7_GB, GAMMA7_G -+*/ -+#define PDP_GAMMA7_GB_GAMMA7_G_MASK (0x03FF0000) -+#define PDP_GAMMA7_GB_GAMMA7_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA7_GB_GAMMA7_G_SHIFT (16) -+#define PDP_GAMMA7_GB_GAMMA7_G_LENGTH (10) -+#define PDP_GAMMA7_GB_GAMMA7_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA7_GB, GAMMA7_B -+*/ -+#define PDP_GAMMA7_GB_GAMMA7_B_MASK (0x000003FF) -+#define PDP_GAMMA7_GB_GAMMA7_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA7_GB_GAMMA7_B_SHIFT (0) -+#define PDP_GAMMA7_GB_GAMMA7_B_LENGTH (10) -+#define PDP_GAMMA7_GB_GAMMA7_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA8_R_OFFSET (0x07F4) -+ -+/* PDP, GAMMA8_R, GAMMA8_R -+*/ -+#define PDP_GAMMA8_R_GAMMA8_R_MASK (0x000003FF) -+#define PDP_GAMMA8_R_GAMMA8_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA8_R_GAMMA8_R_SHIFT (0) -+#define PDP_GAMMA8_R_GAMMA8_R_LENGTH (10) -+#define PDP_GAMMA8_R_GAMMA8_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA8_GB_OFFSET (0x07F8) -+ -+/* PDP, GAMMA8_GB, GAMMA8_G -+*/ -+#define PDP_GAMMA8_GB_GAMMA8_G_MASK (0x03FF0000) -+#define PDP_GAMMA8_GB_GAMMA8_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA8_GB_GAMMA8_G_SHIFT (16) -+#define PDP_GAMMA8_GB_GAMMA8_G_LENGTH (10) -+#define PDP_GAMMA8_GB_GAMMA8_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA8_GB, GAMMA8_B -+*/ -+#define PDP_GAMMA8_GB_GAMMA8_B_MASK (0x000003FF) -+#define PDP_GAMMA8_GB_GAMMA8_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA8_GB_GAMMA8_B_SHIFT (0) -+#define PDP_GAMMA8_GB_GAMMA8_B_LENGTH (10) -+#define PDP_GAMMA8_GB_GAMMA8_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA9_R_OFFSET (0x07FC) -+ -+/* PDP, GAMMA9_R, GAMMA9_R -+*/ -+#define PDP_GAMMA9_R_GAMMA9_R_MASK (0x000003FF) -+#define PDP_GAMMA9_R_GAMMA9_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA9_R_GAMMA9_R_SHIFT (0) -+#define PDP_GAMMA9_R_GAMMA9_R_LENGTH (10) -+#define PDP_GAMMA9_R_GAMMA9_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA9_GB_OFFSET (0x0800) -+ -+/* PDP, GAMMA9_GB, GAMMA9_G -+*/ -+#define PDP_GAMMA9_GB_GAMMA9_G_MASK (0x03FF0000) -+#define PDP_GAMMA9_GB_GAMMA9_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA9_GB_GAMMA9_G_SHIFT (16) -+#define PDP_GAMMA9_GB_GAMMA9_G_LENGTH (10) -+#define PDP_GAMMA9_GB_GAMMA9_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA9_GB, GAMMA9_B -+*/ -+#define PDP_GAMMA9_GB_GAMMA9_B_MASK (0x000003FF) -+#define PDP_GAMMA9_GB_GAMMA9_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA9_GB_GAMMA9_B_SHIFT (0) -+#define PDP_GAMMA9_GB_GAMMA9_B_LENGTH (10) -+#define PDP_GAMMA9_GB_GAMMA9_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA10_R_OFFSET (0x0804) -+ -+/* PDP, GAMMA10_R, GAMMA10_R -+*/ -+#define PDP_GAMMA10_R_GAMMA10_R_MASK (0x000003FF) -+#define PDP_GAMMA10_R_GAMMA10_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA10_R_GAMMA10_R_SHIFT (0) -+#define PDP_GAMMA10_R_GAMMA10_R_LENGTH (10) -+#define PDP_GAMMA10_R_GAMMA10_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA10_GB_OFFSET (0x0808) -+ -+/* PDP, GAMMA10_GB, GAMMA10_G -+*/ -+#define PDP_GAMMA10_GB_GAMMA10_G_MASK (0x03FF0000) -+#define PDP_GAMMA10_GB_GAMMA10_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA10_GB_GAMMA10_G_SHIFT (16) -+#define PDP_GAMMA10_GB_GAMMA10_G_LENGTH (10) -+#define PDP_GAMMA10_GB_GAMMA10_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA10_GB, GAMMA10_B -+*/ -+#define PDP_GAMMA10_GB_GAMMA10_B_MASK (0x000003FF) -+#define PDP_GAMMA10_GB_GAMMA10_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA10_GB_GAMMA10_B_SHIFT (0) -+#define PDP_GAMMA10_GB_GAMMA10_B_LENGTH (10) -+#define PDP_GAMMA10_GB_GAMMA10_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA11_R_OFFSET (0x080C) -+ -+/* PDP, GAMMA11_R, GAMMA11_R -+*/ -+#define PDP_GAMMA11_R_GAMMA11_R_MASK (0x000003FF) -+#define PDP_GAMMA11_R_GAMMA11_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA11_R_GAMMA11_R_SHIFT (0) -+#define PDP_GAMMA11_R_GAMMA11_R_LENGTH (10) -+#define PDP_GAMMA11_R_GAMMA11_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA11_GB_OFFSET (0x0810) -+ -+/* PDP, GAMMA11_GB, GAMMA11_G -+*/ -+#define PDP_GAMMA11_GB_GAMMA11_G_MASK (0x03FF0000) -+#define PDP_GAMMA11_GB_GAMMA11_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA11_GB_GAMMA11_G_SHIFT (16) -+#define PDP_GAMMA11_GB_GAMMA11_G_LENGTH (10) -+#define PDP_GAMMA11_GB_GAMMA11_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA11_GB, GAMMA11_B -+*/ -+#define PDP_GAMMA11_GB_GAMMA11_B_MASK (0x000003FF) -+#define PDP_GAMMA11_GB_GAMMA11_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA11_GB_GAMMA11_B_SHIFT (0) -+#define PDP_GAMMA11_GB_GAMMA11_B_LENGTH (10) -+#define PDP_GAMMA11_GB_GAMMA11_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA12_R_OFFSET (0x0814) -+ -+/* PDP, GAMMA12_R, GAMMA12_R -+*/ -+#define PDP_GAMMA12_R_GAMMA12_R_MASK (0x000003FF) -+#define PDP_GAMMA12_R_GAMMA12_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA12_R_GAMMA12_R_SHIFT (0) -+#define PDP_GAMMA12_R_GAMMA12_R_LENGTH (10) -+#define PDP_GAMMA12_R_GAMMA12_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA12_GB_OFFSET (0x0818) -+ -+/* PDP, GAMMA12_GB, GAMMA12_G -+*/ -+#define PDP_GAMMA12_GB_GAMMA12_G_MASK (0x03FF0000) -+#define PDP_GAMMA12_GB_GAMMA12_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA12_GB_GAMMA12_G_SHIFT (16) -+#define PDP_GAMMA12_GB_GAMMA12_G_LENGTH (10) -+#define PDP_GAMMA12_GB_GAMMA12_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA12_GB, GAMMA12_B -+*/ -+#define PDP_GAMMA12_GB_GAMMA12_B_MASK (0x000003FF) -+#define PDP_GAMMA12_GB_GAMMA12_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA12_GB_GAMMA12_B_SHIFT (0) -+#define PDP_GAMMA12_GB_GAMMA12_B_LENGTH (10) -+#define PDP_GAMMA12_GB_GAMMA12_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA13_R_OFFSET (0x081C) -+ -+/* PDP, GAMMA13_R, GAMMA13_R -+*/ -+#define PDP_GAMMA13_R_GAMMA13_R_MASK (0x000003FF) -+#define PDP_GAMMA13_R_GAMMA13_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA13_R_GAMMA13_R_SHIFT (0) -+#define PDP_GAMMA13_R_GAMMA13_R_LENGTH (10) -+#define PDP_GAMMA13_R_GAMMA13_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA13_GB_OFFSET (0x0820) -+ -+/* PDP, GAMMA13_GB, GAMMA13_G -+*/ -+#define PDP_GAMMA13_GB_GAMMA13_G_MASK (0x03FF0000) -+#define PDP_GAMMA13_GB_GAMMA13_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA13_GB_GAMMA13_G_SHIFT (16) -+#define PDP_GAMMA13_GB_GAMMA13_G_LENGTH (10) -+#define PDP_GAMMA13_GB_GAMMA13_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA13_GB, GAMMA13_B -+*/ -+#define PDP_GAMMA13_GB_GAMMA13_B_MASK (0x000003FF) -+#define PDP_GAMMA13_GB_GAMMA13_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA13_GB_GAMMA13_B_SHIFT (0) -+#define PDP_GAMMA13_GB_GAMMA13_B_LENGTH (10) -+#define PDP_GAMMA13_GB_GAMMA13_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA14_R_OFFSET (0x0824) -+ -+/* PDP, GAMMA14_R, GAMMA14_R -+*/ -+#define PDP_GAMMA14_R_GAMMA14_R_MASK (0x000003FF) -+#define PDP_GAMMA14_R_GAMMA14_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA14_R_GAMMA14_R_SHIFT (0) -+#define PDP_GAMMA14_R_GAMMA14_R_LENGTH (10) -+#define PDP_GAMMA14_R_GAMMA14_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA14_GB_OFFSET (0x0828) -+ -+/* PDP, GAMMA14_GB, GAMMA14_G -+*/ -+#define PDP_GAMMA14_GB_GAMMA14_G_MASK (0x03FF0000) -+#define PDP_GAMMA14_GB_GAMMA14_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA14_GB_GAMMA14_G_SHIFT (16) -+#define PDP_GAMMA14_GB_GAMMA14_G_LENGTH (10) -+#define PDP_GAMMA14_GB_GAMMA14_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA14_GB, GAMMA14_B -+*/ -+#define PDP_GAMMA14_GB_GAMMA14_B_MASK (0x000003FF) -+#define PDP_GAMMA14_GB_GAMMA14_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA14_GB_GAMMA14_B_SHIFT (0) -+#define PDP_GAMMA14_GB_GAMMA14_B_LENGTH (10) -+#define PDP_GAMMA14_GB_GAMMA14_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA15_R_OFFSET (0x082C) -+ -+/* PDP, GAMMA15_R, GAMMA15_R -+*/ -+#define PDP_GAMMA15_R_GAMMA15_R_MASK (0x000003FF) -+#define PDP_GAMMA15_R_GAMMA15_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA15_R_GAMMA15_R_SHIFT (0) -+#define PDP_GAMMA15_R_GAMMA15_R_LENGTH (10) -+#define PDP_GAMMA15_R_GAMMA15_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA15_GB_OFFSET (0x0830) -+ -+/* PDP, GAMMA15_GB, GAMMA15_G -+*/ -+#define PDP_GAMMA15_GB_GAMMA15_G_MASK (0x03FF0000) -+#define PDP_GAMMA15_GB_GAMMA15_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA15_GB_GAMMA15_G_SHIFT (16) -+#define PDP_GAMMA15_GB_GAMMA15_G_LENGTH (10) -+#define PDP_GAMMA15_GB_GAMMA15_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA15_GB, GAMMA15_B -+*/ -+#define PDP_GAMMA15_GB_GAMMA15_B_MASK (0x000003FF) -+#define PDP_GAMMA15_GB_GAMMA15_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA15_GB_GAMMA15_B_SHIFT (0) -+#define PDP_GAMMA15_GB_GAMMA15_B_LENGTH (10) -+#define PDP_GAMMA15_GB_GAMMA15_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA16_R_OFFSET (0x0834) -+ -+/* PDP, GAMMA16_R, GAMMA16_R -+*/ -+#define PDP_GAMMA16_R_GAMMA16_R_MASK (0x000003FF) -+#define PDP_GAMMA16_R_GAMMA16_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA16_R_GAMMA16_R_SHIFT (0) -+#define PDP_GAMMA16_R_GAMMA16_R_LENGTH (10) -+#define PDP_GAMMA16_R_GAMMA16_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA16_GB_OFFSET (0x0838) -+ -+/* PDP, GAMMA16_GB, GAMMA16_G -+*/ -+#define PDP_GAMMA16_GB_GAMMA16_G_MASK (0x03FF0000) -+#define PDP_GAMMA16_GB_GAMMA16_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA16_GB_GAMMA16_G_SHIFT (16) -+#define PDP_GAMMA16_GB_GAMMA16_G_LENGTH (10) -+#define PDP_GAMMA16_GB_GAMMA16_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA16_GB, GAMMA16_B -+*/ -+#define PDP_GAMMA16_GB_GAMMA16_B_MASK (0x000003FF) -+#define PDP_GAMMA16_GB_GAMMA16_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA16_GB_GAMMA16_B_SHIFT (0) -+#define PDP_GAMMA16_GB_GAMMA16_B_LENGTH (10) -+#define PDP_GAMMA16_GB_GAMMA16_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA17_R_OFFSET (0x083C) -+ -+/* PDP, GAMMA17_R, GAMMA17_R -+*/ -+#define PDP_GAMMA17_R_GAMMA17_R_MASK (0x000003FF) -+#define PDP_GAMMA17_R_GAMMA17_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA17_R_GAMMA17_R_SHIFT (0) -+#define PDP_GAMMA17_R_GAMMA17_R_LENGTH (10) -+#define PDP_GAMMA17_R_GAMMA17_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA17_GB_OFFSET (0x0840) -+ -+/* PDP, GAMMA17_GB, GAMMA17_G -+*/ -+#define PDP_GAMMA17_GB_GAMMA17_G_MASK (0x03FF0000) -+#define PDP_GAMMA17_GB_GAMMA17_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA17_GB_GAMMA17_G_SHIFT (16) -+#define PDP_GAMMA17_GB_GAMMA17_G_LENGTH (10) -+#define PDP_GAMMA17_GB_GAMMA17_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA17_GB, GAMMA17_B -+*/ -+#define PDP_GAMMA17_GB_GAMMA17_B_MASK (0x000003FF) -+#define PDP_GAMMA17_GB_GAMMA17_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA17_GB_GAMMA17_B_SHIFT (0) -+#define PDP_GAMMA17_GB_GAMMA17_B_LENGTH (10) -+#define PDP_GAMMA17_GB_GAMMA17_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA18_R_OFFSET (0x0844) -+ -+/* PDP, GAMMA18_R, GAMMA18_R -+*/ -+#define PDP_GAMMA18_R_GAMMA18_R_MASK (0x000003FF) -+#define PDP_GAMMA18_R_GAMMA18_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA18_R_GAMMA18_R_SHIFT (0) -+#define PDP_GAMMA18_R_GAMMA18_R_LENGTH (10) -+#define PDP_GAMMA18_R_GAMMA18_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA18_GB_OFFSET (0x0848) -+ -+/* PDP, GAMMA18_GB, GAMMA18_G -+*/ -+#define PDP_GAMMA18_GB_GAMMA18_G_MASK (0x03FF0000) -+#define PDP_GAMMA18_GB_GAMMA18_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA18_GB_GAMMA18_G_SHIFT (16) -+#define PDP_GAMMA18_GB_GAMMA18_G_LENGTH (10) -+#define PDP_GAMMA18_GB_GAMMA18_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA18_GB, GAMMA18_B -+*/ -+#define PDP_GAMMA18_GB_GAMMA18_B_MASK (0x000003FF) -+#define PDP_GAMMA18_GB_GAMMA18_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA18_GB_GAMMA18_B_SHIFT (0) -+#define PDP_GAMMA18_GB_GAMMA18_B_LENGTH (10) -+#define PDP_GAMMA18_GB_GAMMA18_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA19_R_OFFSET (0x084C) -+ -+/* PDP, GAMMA19_R, GAMMA19_R -+*/ -+#define PDP_GAMMA19_R_GAMMA19_R_MASK (0x000003FF) -+#define PDP_GAMMA19_R_GAMMA19_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA19_R_GAMMA19_R_SHIFT (0) -+#define PDP_GAMMA19_R_GAMMA19_R_LENGTH (10) -+#define PDP_GAMMA19_R_GAMMA19_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA19_GB_OFFSET (0x0850) -+ -+/* PDP, GAMMA19_GB, GAMMA19_G -+*/ -+#define PDP_GAMMA19_GB_GAMMA19_G_MASK (0x03FF0000) -+#define PDP_GAMMA19_GB_GAMMA19_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA19_GB_GAMMA19_G_SHIFT (16) -+#define PDP_GAMMA19_GB_GAMMA19_G_LENGTH (10) -+#define PDP_GAMMA19_GB_GAMMA19_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA19_GB, GAMMA19_B -+*/ -+#define PDP_GAMMA19_GB_GAMMA19_B_MASK (0x000003FF) -+#define PDP_GAMMA19_GB_GAMMA19_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA19_GB_GAMMA19_B_SHIFT (0) -+#define PDP_GAMMA19_GB_GAMMA19_B_LENGTH (10) -+#define PDP_GAMMA19_GB_GAMMA19_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA20_R_OFFSET (0x0854) -+ -+/* PDP, GAMMA20_R, GAMMA20_R -+*/ -+#define PDP_GAMMA20_R_GAMMA20_R_MASK (0x000003FF) -+#define PDP_GAMMA20_R_GAMMA20_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA20_R_GAMMA20_R_SHIFT (0) -+#define PDP_GAMMA20_R_GAMMA20_R_LENGTH (10) -+#define PDP_GAMMA20_R_GAMMA20_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA20_GB_OFFSET (0x0858) -+ -+/* PDP, GAMMA20_GB, GAMMA20_G -+*/ -+#define PDP_GAMMA20_GB_GAMMA20_G_MASK (0x03FF0000) -+#define PDP_GAMMA20_GB_GAMMA20_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA20_GB_GAMMA20_G_SHIFT (16) -+#define PDP_GAMMA20_GB_GAMMA20_G_LENGTH (10) -+#define PDP_GAMMA20_GB_GAMMA20_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA20_GB, GAMMA20_B -+*/ -+#define PDP_GAMMA20_GB_GAMMA20_B_MASK (0x000003FF) -+#define PDP_GAMMA20_GB_GAMMA20_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA20_GB_GAMMA20_B_SHIFT (0) -+#define PDP_GAMMA20_GB_GAMMA20_B_LENGTH (10) -+#define PDP_GAMMA20_GB_GAMMA20_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA21_R_OFFSET (0x085C) -+ -+/* PDP, GAMMA21_R, GAMMA21_R -+*/ -+#define PDP_GAMMA21_R_GAMMA21_R_MASK (0x000003FF) -+#define PDP_GAMMA21_R_GAMMA21_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA21_R_GAMMA21_R_SHIFT (0) -+#define PDP_GAMMA21_R_GAMMA21_R_LENGTH (10) -+#define PDP_GAMMA21_R_GAMMA21_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA21_GB_OFFSET (0x0860) -+ -+/* PDP, GAMMA21_GB, GAMMA21_G -+*/ -+#define PDP_GAMMA21_GB_GAMMA21_G_MASK (0x03FF0000) -+#define PDP_GAMMA21_GB_GAMMA21_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA21_GB_GAMMA21_G_SHIFT (16) -+#define PDP_GAMMA21_GB_GAMMA21_G_LENGTH (10) -+#define PDP_GAMMA21_GB_GAMMA21_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA21_GB, GAMMA21_B -+*/ -+#define PDP_GAMMA21_GB_GAMMA21_B_MASK (0x000003FF) -+#define PDP_GAMMA21_GB_GAMMA21_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA21_GB_GAMMA21_B_SHIFT (0) -+#define PDP_GAMMA21_GB_GAMMA21_B_LENGTH (10) -+#define PDP_GAMMA21_GB_GAMMA21_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA22_R_OFFSET (0x0864) -+ -+/* PDP, GAMMA22_R, GAMMA22_R -+*/ -+#define PDP_GAMMA22_R_GAMMA22_R_MASK (0x000003FF) -+#define PDP_GAMMA22_R_GAMMA22_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA22_R_GAMMA22_R_SHIFT (0) -+#define PDP_GAMMA22_R_GAMMA22_R_LENGTH (10) -+#define PDP_GAMMA22_R_GAMMA22_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA22_GB_OFFSET (0x0868) -+ -+/* PDP, GAMMA22_GB, GAMMA22_G -+*/ -+#define PDP_GAMMA22_GB_GAMMA22_G_MASK (0x03FF0000) -+#define PDP_GAMMA22_GB_GAMMA22_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA22_GB_GAMMA22_G_SHIFT (16) -+#define PDP_GAMMA22_GB_GAMMA22_G_LENGTH (10) -+#define PDP_GAMMA22_GB_GAMMA22_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA22_GB, GAMMA22_B -+*/ -+#define PDP_GAMMA22_GB_GAMMA22_B_MASK (0x000003FF) -+#define PDP_GAMMA22_GB_GAMMA22_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA22_GB_GAMMA22_B_SHIFT (0) -+#define PDP_GAMMA22_GB_GAMMA22_B_LENGTH (10) -+#define PDP_GAMMA22_GB_GAMMA22_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA23_R_OFFSET (0x086C) -+ -+/* PDP, GAMMA23_R, GAMMA23_R -+*/ -+#define PDP_GAMMA23_R_GAMMA23_R_MASK (0x000003FF) -+#define PDP_GAMMA23_R_GAMMA23_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA23_R_GAMMA23_R_SHIFT (0) -+#define PDP_GAMMA23_R_GAMMA23_R_LENGTH (10) -+#define PDP_GAMMA23_R_GAMMA23_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA23_GB_OFFSET (0x0870) -+ -+/* PDP, GAMMA23_GB, GAMMA23_G -+*/ -+#define PDP_GAMMA23_GB_GAMMA23_G_MASK (0x03FF0000) -+#define PDP_GAMMA23_GB_GAMMA23_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA23_GB_GAMMA23_G_SHIFT (16) -+#define PDP_GAMMA23_GB_GAMMA23_G_LENGTH (10) -+#define PDP_GAMMA23_GB_GAMMA23_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA23_GB, GAMMA23_B -+*/ -+#define PDP_GAMMA23_GB_GAMMA23_B_MASK (0x000003FF) -+#define PDP_GAMMA23_GB_GAMMA23_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA23_GB_GAMMA23_B_SHIFT (0) -+#define PDP_GAMMA23_GB_GAMMA23_B_LENGTH (10) -+#define PDP_GAMMA23_GB_GAMMA23_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA24_R_OFFSET (0x0874) -+ -+/* PDP, GAMMA24_R, GAMMA24_R -+*/ -+#define PDP_GAMMA24_R_GAMMA24_R_MASK (0x000003FF) -+#define PDP_GAMMA24_R_GAMMA24_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA24_R_GAMMA24_R_SHIFT (0) -+#define PDP_GAMMA24_R_GAMMA24_R_LENGTH (10) -+#define PDP_GAMMA24_R_GAMMA24_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA24_GB_OFFSET (0x0878) -+ -+/* PDP, GAMMA24_GB, GAMMA24_G -+*/ -+#define PDP_GAMMA24_GB_GAMMA24_G_MASK (0x03FF0000) -+#define PDP_GAMMA24_GB_GAMMA24_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA24_GB_GAMMA24_G_SHIFT (16) -+#define PDP_GAMMA24_GB_GAMMA24_G_LENGTH (10) -+#define PDP_GAMMA24_GB_GAMMA24_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA24_GB, GAMMA24_B -+*/ -+#define PDP_GAMMA24_GB_GAMMA24_B_MASK (0x000003FF) -+#define PDP_GAMMA24_GB_GAMMA24_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA24_GB_GAMMA24_B_SHIFT (0) -+#define PDP_GAMMA24_GB_GAMMA24_B_LENGTH (10) -+#define PDP_GAMMA24_GB_GAMMA24_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA25_R_OFFSET (0x087C) -+ -+/* PDP, GAMMA25_R, GAMMA25_R -+*/ -+#define PDP_GAMMA25_R_GAMMA25_R_MASK (0x000003FF) -+#define PDP_GAMMA25_R_GAMMA25_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA25_R_GAMMA25_R_SHIFT (0) -+#define PDP_GAMMA25_R_GAMMA25_R_LENGTH (10) -+#define PDP_GAMMA25_R_GAMMA25_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA25_GB_OFFSET (0x0880) -+ -+/* PDP, GAMMA25_GB, GAMMA25_G -+*/ -+#define PDP_GAMMA25_GB_GAMMA25_G_MASK (0x03FF0000) -+#define PDP_GAMMA25_GB_GAMMA25_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA25_GB_GAMMA25_G_SHIFT (16) -+#define PDP_GAMMA25_GB_GAMMA25_G_LENGTH (10) -+#define PDP_GAMMA25_GB_GAMMA25_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA25_GB, GAMMA25_B -+*/ -+#define PDP_GAMMA25_GB_GAMMA25_B_MASK (0x000003FF) -+#define PDP_GAMMA25_GB_GAMMA25_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA25_GB_GAMMA25_B_SHIFT (0) -+#define PDP_GAMMA25_GB_GAMMA25_B_LENGTH (10) -+#define PDP_GAMMA25_GB_GAMMA25_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA26_R_OFFSET (0x0884) -+ -+/* PDP, GAMMA26_R, GAMMA26_R -+*/ -+#define PDP_GAMMA26_R_GAMMA26_R_MASK (0x000003FF) -+#define PDP_GAMMA26_R_GAMMA26_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA26_R_GAMMA26_R_SHIFT (0) -+#define PDP_GAMMA26_R_GAMMA26_R_LENGTH (10) -+#define PDP_GAMMA26_R_GAMMA26_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA26_GB_OFFSET (0x0888) -+ -+/* PDP, GAMMA26_GB, GAMMA26_G -+*/ -+#define PDP_GAMMA26_GB_GAMMA26_G_MASK (0x03FF0000) -+#define PDP_GAMMA26_GB_GAMMA26_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA26_GB_GAMMA26_G_SHIFT (16) -+#define PDP_GAMMA26_GB_GAMMA26_G_LENGTH (10) -+#define PDP_GAMMA26_GB_GAMMA26_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA26_GB, GAMMA26_B -+*/ -+#define PDP_GAMMA26_GB_GAMMA26_B_MASK (0x000003FF) -+#define PDP_GAMMA26_GB_GAMMA26_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA26_GB_GAMMA26_B_SHIFT (0) -+#define PDP_GAMMA26_GB_GAMMA26_B_LENGTH (10) -+#define PDP_GAMMA26_GB_GAMMA26_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA27_R_OFFSET (0x088C) -+ -+/* PDP, GAMMA27_R, GAMMA27_R -+*/ -+#define PDP_GAMMA27_R_GAMMA27_R_MASK (0x000003FF) -+#define PDP_GAMMA27_R_GAMMA27_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA27_R_GAMMA27_R_SHIFT (0) -+#define PDP_GAMMA27_R_GAMMA27_R_LENGTH (10) -+#define PDP_GAMMA27_R_GAMMA27_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA27_GB_OFFSET (0x0890) -+ -+/* PDP, GAMMA27_GB, GAMMA27_G -+*/ -+#define PDP_GAMMA27_GB_GAMMA27_G_MASK (0x03FF0000) -+#define PDP_GAMMA27_GB_GAMMA27_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA27_GB_GAMMA27_G_SHIFT (16) -+#define PDP_GAMMA27_GB_GAMMA27_G_LENGTH (10) -+#define PDP_GAMMA27_GB_GAMMA27_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA27_GB, GAMMA27_B -+*/ -+#define PDP_GAMMA27_GB_GAMMA27_B_MASK (0x000003FF) -+#define PDP_GAMMA27_GB_GAMMA27_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA27_GB_GAMMA27_B_SHIFT (0) -+#define PDP_GAMMA27_GB_GAMMA27_B_LENGTH (10) -+#define PDP_GAMMA27_GB_GAMMA27_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA28_R_OFFSET (0x0894) -+ -+/* PDP, GAMMA28_R, GAMMA28_R -+*/ -+#define PDP_GAMMA28_R_GAMMA28_R_MASK (0x000003FF) -+#define PDP_GAMMA28_R_GAMMA28_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA28_R_GAMMA28_R_SHIFT (0) -+#define PDP_GAMMA28_R_GAMMA28_R_LENGTH (10) -+#define PDP_GAMMA28_R_GAMMA28_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA28_GB_OFFSET (0x0898) -+ -+/* PDP, GAMMA28_GB, GAMMA28_G -+*/ -+#define PDP_GAMMA28_GB_GAMMA28_G_MASK (0x03FF0000) -+#define PDP_GAMMA28_GB_GAMMA28_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA28_GB_GAMMA28_G_SHIFT (16) -+#define PDP_GAMMA28_GB_GAMMA28_G_LENGTH (10) -+#define PDP_GAMMA28_GB_GAMMA28_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA28_GB, GAMMA28_B -+*/ -+#define PDP_GAMMA28_GB_GAMMA28_B_MASK (0x000003FF) -+#define PDP_GAMMA28_GB_GAMMA28_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA28_GB_GAMMA28_B_SHIFT (0) -+#define PDP_GAMMA28_GB_GAMMA28_B_LENGTH (10) -+#define PDP_GAMMA28_GB_GAMMA28_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA29_R_OFFSET (0x089C) -+ -+/* PDP, GAMMA29_R, GAMMA29_R -+*/ -+#define PDP_GAMMA29_R_GAMMA29_R_MASK (0x000003FF) -+#define PDP_GAMMA29_R_GAMMA29_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA29_R_GAMMA29_R_SHIFT (0) -+#define PDP_GAMMA29_R_GAMMA29_R_LENGTH (10) -+#define PDP_GAMMA29_R_GAMMA29_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA29_GB_OFFSET (0x08A0) -+ -+/* PDP, GAMMA29_GB, GAMMA29_G -+*/ -+#define PDP_GAMMA29_GB_GAMMA29_G_MASK (0x03FF0000) -+#define PDP_GAMMA29_GB_GAMMA29_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA29_GB_GAMMA29_G_SHIFT (16) -+#define PDP_GAMMA29_GB_GAMMA29_G_LENGTH (10) -+#define PDP_GAMMA29_GB_GAMMA29_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA29_GB, GAMMA29_B -+*/ -+#define PDP_GAMMA29_GB_GAMMA29_B_MASK (0x000003FF) -+#define PDP_GAMMA29_GB_GAMMA29_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA29_GB_GAMMA29_B_SHIFT (0) -+#define PDP_GAMMA29_GB_GAMMA29_B_LENGTH (10) -+#define PDP_GAMMA29_GB_GAMMA29_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA30_R_OFFSET (0x08A4) -+ -+/* PDP, GAMMA30_R, GAMMA30_R -+*/ -+#define PDP_GAMMA30_R_GAMMA30_R_MASK (0x000003FF) -+#define PDP_GAMMA30_R_GAMMA30_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA30_R_GAMMA30_R_SHIFT (0) -+#define PDP_GAMMA30_R_GAMMA30_R_LENGTH (10) -+#define PDP_GAMMA30_R_GAMMA30_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA30_GB_OFFSET (0x08A8) -+ -+/* PDP, GAMMA30_GB, GAMMA30_G -+*/ -+#define PDP_GAMMA30_GB_GAMMA30_G_MASK (0x03FF0000) -+#define PDP_GAMMA30_GB_GAMMA30_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA30_GB_GAMMA30_G_SHIFT (16) -+#define PDP_GAMMA30_GB_GAMMA30_G_LENGTH (10) -+#define PDP_GAMMA30_GB_GAMMA30_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA30_GB, GAMMA30_B -+*/ -+#define PDP_GAMMA30_GB_GAMMA30_B_MASK (0x000003FF) -+#define PDP_GAMMA30_GB_GAMMA30_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA30_GB_GAMMA30_B_SHIFT (0) -+#define PDP_GAMMA30_GB_GAMMA30_B_LENGTH (10) -+#define PDP_GAMMA30_GB_GAMMA30_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA31_R_OFFSET (0x08AC) -+ -+/* PDP, GAMMA31_R, GAMMA31_R -+*/ -+#define PDP_GAMMA31_R_GAMMA31_R_MASK (0x000003FF) -+#define PDP_GAMMA31_R_GAMMA31_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA31_R_GAMMA31_R_SHIFT (0) -+#define PDP_GAMMA31_R_GAMMA31_R_LENGTH (10) -+#define PDP_GAMMA31_R_GAMMA31_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA31_GB_OFFSET (0x08B0) -+ -+/* PDP, GAMMA31_GB, GAMMA31_G -+*/ -+#define PDP_GAMMA31_GB_GAMMA31_G_MASK (0x03FF0000) -+#define PDP_GAMMA31_GB_GAMMA31_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA31_GB_GAMMA31_G_SHIFT (16) -+#define PDP_GAMMA31_GB_GAMMA31_G_LENGTH (10) -+#define PDP_GAMMA31_GB_GAMMA31_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA31_GB, GAMMA31_B -+*/ -+#define PDP_GAMMA31_GB_GAMMA31_B_MASK (0x000003FF) -+#define PDP_GAMMA31_GB_GAMMA31_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA31_GB_GAMMA31_B_SHIFT (0) -+#define PDP_GAMMA31_GB_GAMMA31_B_LENGTH (10) -+#define PDP_GAMMA31_GB_GAMMA31_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA32_R_OFFSET (0x08B4) -+ -+/* PDP, GAMMA32_R, GAMMA32_R -+*/ -+#define PDP_GAMMA32_R_GAMMA32_R_MASK (0x000003FF) -+#define PDP_GAMMA32_R_GAMMA32_R_LSBMASK (0x000003FF) -+#define PDP_GAMMA32_R_GAMMA32_R_SHIFT (0) -+#define PDP_GAMMA32_R_GAMMA32_R_LENGTH (10) -+#define PDP_GAMMA32_R_GAMMA32_R_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GAMMA32_GB_OFFSET (0x08B8) -+ -+/* PDP, GAMMA32_GB, GAMMA32_G -+*/ -+#define PDP_GAMMA32_GB_GAMMA32_G_MASK (0x03FF0000) -+#define PDP_GAMMA32_GB_GAMMA32_G_LSBMASK (0x000003FF) -+#define PDP_GAMMA32_GB_GAMMA32_G_SHIFT (16) -+#define PDP_GAMMA32_GB_GAMMA32_G_LENGTH (10) -+#define PDP_GAMMA32_GB_GAMMA32_G_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GAMMA32_GB, GAMMA32_B -+*/ -+#define PDP_GAMMA32_GB_GAMMA32_B_MASK (0x000003FF) -+#define PDP_GAMMA32_GB_GAMMA32_B_LSBMASK (0x000003FF) -+#define PDP_GAMMA32_GB_GAMMA32_B_SHIFT (0) -+#define PDP_GAMMA32_GB_GAMMA32_B_LENGTH (10) -+#define PDP_GAMMA32_GB_GAMMA32_B_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VEVENT_OFFSET (0x08BC) -+ -+/* PDP, VEVENT, VEVENT -+*/ -+#define PDP_VEVENT_VEVENT_MASK (0x1FFF0000) -+#define PDP_VEVENT_VEVENT_LSBMASK (0x00001FFF) -+#define PDP_VEVENT_VEVENT_SHIFT (16) -+#define PDP_VEVENT_VEVENT_LENGTH (13) -+#define PDP_VEVENT_VEVENT_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VEVENT, VFETCH -+*/ -+#define PDP_VEVENT_VFETCH_MASK (0x00001FFF) -+#define PDP_VEVENT_VFETCH_LSBMASK (0x00001FFF) -+#define PDP_VEVENT_VFETCH_SHIFT (0) -+#define PDP_VEVENT_VFETCH_LENGTH (13) -+#define PDP_VEVENT_VFETCH_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_HDECTRL_OFFSET (0x08C0) -+ -+/* PDP, HDECTRL, HDES -+*/ -+#define PDP_HDECTRL_HDES_MASK (0x1FFF0000) -+#define PDP_HDECTRL_HDES_LSBMASK (0x00001FFF) -+#define PDP_HDECTRL_HDES_SHIFT (16) -+#define PDP_HDECTRL_HDES_LENGTH (13) -+#define PDP_HDECTRL_HDES_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, HDECTRL, HDEF -+*/ -+#define PDP_HDECTRL_HDEF_MASK (0x00001FFF) -+#define PDP_HDECTRL_HDEF_LSBMASK (0x00001FFF) -+#define PDP_HDECTRL_HDEF_SHIFT (0) -+#define PDP_HDECTRL_HDEF_LENGTH (13) -+#define PDP_HDECTRL_HDEF_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VDECTRL_OFFSET (0x08C4) -+ -+/* PDP, VDECTRL, VDES -+*/ -+#define PDP_VDECTRL_VDES_MASK (0x1FFF0000) -+#define PDP_VDECTRL_VDES_LSBMASK (0x00001FFF) -+#define PDP_VDECTRL_VDES_SHIFT (16) -+#define PDP_VDECTRL_VDES_LENGTH (13) -+#define PDP_VDECTRL_VDES_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VDECTRL, VDEF -+*/ -+#define PDP_VDECTRL_VDEF_MASK (0x00001FFF) -+#define PDP_VDECTRL_VDEF_LSBMASK (0x00001FFF) -+#define PDP_VDECTRL_VDEF_SHIFT (0) -+#define PDP_VDECTRL_VDEF_LENGTH (13) -+#define PDP_VDECTRL_VDEF_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_OPMASK_R_OFFSET (0x08C8) -+ -+/* PDP, OPMASK_R, MASKLEVEL -+*/ -+#define PDP_OPMASK_R_MASKLEVEL_MASK (0x80000000) -+#define PDP_OPMASK_R_MASKLEVEL_LSBMASK (0x00000001) -+#define PDP_OPMASK_R_MASKLEVEL_SHIFT (31) -+#define PDP_OPMASK_R_MASKLEVEL_LENGTH (1) -+#define PDP_OPMASK_R_MASKLEVEL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, OPMASK_R, BLANKLEVEL -+*/ -+#define PDP_OPMASK_R_BLANKLEVEL_MASK (0x40000000) -+#define PDP_OPMASK_R_BLANKLEVEL_LSBMASK (0x00000001) -+#define PDP_OPMASK_R_BLANKLEVEL_SHIFT (30) -+#define PDP_OPMASK_R_BLANKLEVEL_LENGTH (1) -+#define PDP_OPMASK_R_BLANKLEVEL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, OPMASK_R, MASKR -+*/ -+#define PDP_OPMASK_R_MASKR_MASK (0x000003FF) -+#define PDP_OPMASK_R_MASKR_LSBMASK (0x000003FF) -+#define PDP_OPMASK_R_MASKR_SHIFT (0) -+#define PDP_OPMASK_R_MASKR_LENGTH (10) -+#define PDP_OPMASK_R_MASKR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_OPMASK_GB_OFFSET (0x08CC) -+ -+/* PDP, OPMASK_GB, MASKG -+*/ -+#define PDP_OPMASK_GB_MASKG_MASK (0x03FF0000) -+#define PDP_OPMASK_GB_MASKG_LSBMASK (0x000003FF) -+#define PDP_OPMASK_GB_MASKG_SHIFT (16) -+#define PDP_OPMASK_GB_MASKG_LENGTH (10) -+#define PDP_OPMASK_GB_MASKG_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, OPMASK_GB, MASKB -+*/ -+#define PDP_OPMASK_GB_MASKB_MASK (0x000003FF) -+#define PDP_OPMASK_GB_MASKB_LSBMASK (0x000003FF) -+#define PDP_OPMASK_GB_MASKB_SHIFT (0) -+#define PDP_OPMASK_GB_MASKB_LENGTH (10) -+#define PDP_OPMASK_GB_MASKB_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_REGLD_ADDR_CTRL_OFFSET (0x08D0) -+ -+/* PDP, REGLD_ADDR_CTRL, REGLD_ADDRIN -+*/ -+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_MASK (0xFFFFFFF0) -+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LSBMASK (0x0FFFFFFF) -+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SHIFT (4) -+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LENGTH (28) -+#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_REGLD_ADDR_STAT_OFFSET (0x08D4) -+ -+/* PDP, REGLD_ADDR_STAT, REGLD_ADDROUT -+*/ -+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_MASK (0xFFFFFFF0) -+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LSBMASK (0x0FFFFFFF) -+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SHIFT (4) -+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LENGTH (28) -+#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_REGLD_STAT_OFFSET (0x08D8) -+ -+/* PDP, REGLD_STAT, REGLD_ADDREN -+*/ -+#define PDP_REGLD_STAT_REGLD_ADDREN_MASK (0x00800000) -+#define PDP_REGLD_STAT_REGLD_ADDREN_LSBMASK (0x00000001) -+#define PDP_REGLD_STAT_REGLD_ADDREN_SHIFT (23) -+#define PDP_REGLD_STAT_REGLD_ADDREN_LENGTH (1) -+#define PDP_REGLD_STAT_REGLD_ADDREN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_REGLD_CTRL_OFFSET (0x08DC) -+ -+/* PDP, REGLD_CTRL, REGLD_ADDRLEN -+*/ -+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_MASK (0xFF000000) -+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_LSBMASK (0x000000FF) -+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_SHIFT (24) -+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_LENGTH (8) -+#define PDP_REGLD_CTRL_REGLD_ADDRLEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, REGLD_CTRL, REGLD_VAL -+*/ -+#define PDP_REGLD_CTRL_REGLD_VAL_MASK (0x00800000) -+#define PDP_REGLD_CTRL_REGLD_VAL_LSBMASK (0x00000001) -+#define PDP_REGLD_CTRL_REGLD_VAL_SHIFT (23) -+#define PDP_REGLD_CTRL_REGLD_VAL_LENGTH (1) -+#define PDP_REGLD_CTRL_REGLD_VAL_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_UPDCTRL_OFFSET (0x08E0) -+ -+/* PDP, UPDCTRL, UPDFIELD -+*/ -+#define PDP_UPDCTRL_UPDFIELD_MASK (0x00000001) -+#define PDP_UPDCTRL_UPDFIELD_LSBMASK (0x00000001) -+#define PDP_UPDCTRL_UPDFIELD_SHIFT (0) -+#define PDP_UPDCTRL_UPDFIELD_LENGTH (1) -+#define PDP_UPDCTRL_UPDFIELD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_INTCTRL_OFFSET (0x08E4) -+ -+/* PDP, PVR_PDP_INTCTRL, HBLNK_LINE -+*/ -+#define PDP_INTCTRL_HBLNK_LINE_MASK (0x00010000) -+#define PDP_INTCTRL_HBLNK_LINE_LSBMASK (0x00000001) -+#define PDP_INTCTRL_HBLNK_LINE_SHIFT (16) -+#define PDP_INTCTRL_HBLNK_LINE_LENGTH (1) -+#define PDP_INTCTRL_HBLNK_LINE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PVR_PDP_INTCTRL, HBLNK_LINENO -+*/ -+#define PDP_INTCTRL_HBLNK_LINENO_MASK (0x00001FFF) -+#define PDP_INTCTRL_HBLNK_LINENO_LSBMASK (0x00001FFF) -+#define PDP_INTCTRL_HBLNK_LINENO_SHIFT (0) -+#define PDP_INTCTRL_HBLNK_LINENO_LENGTH (13) -+#define PDP_INTCTRL_HBLNK_LINENO_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PDISETUP_OFFSET (0x0900) -+ -+/* PDP, PDISETUP, PDI_BLNKLVL -+*/ -+#define PDP_PDISETUP_PDI_BLNKLVL_MASK (0x00000040) -+#define PDP_PDISETUP_PDI_BLNKLVL_LSBMASK (0x00000001) -+#define PDP_PDISETUP_PDI_BLNKLVL_SHIFT (6) -+#define PDP_PDISETUP_PDI_BLNKLVL_LENGTH (1) -+#define PDP_PDISETUP_PDI_BLNKLVL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDISETUP, PDI_BLNK -+*/ -+#define PDP_PDISETUP_PDI_BLNK_MASK (0x00000020) -+#define PDP_PDISETUP_PDI_BLNK_LSBMASK (0x00000001) -+#define PDP_PDISETUP_PDI_BLNK_SHIFT (5) -+#define PDP_PDISETUP_PDI_BLNK_LENGTH (1) -+#define PDP_PDISETUP_PDI_BLNK_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDISETUP, PDI_PWR -+*/ -+#define PDP_PDISETUP_PDI_PWR_MASK (0x00000010) -+#define PDP_PDISETUP_PDI_PWR_LSBMASK (0x00000001) -+#define PDP_PDISETUP_PDI_PWR_SHIFT (4) -+#define PDP_PDISETUP_PDI_PWR_LENGTH (1) -+#define PDP_PDISETUP_PDI_PWR_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDISETUP, PDI_EN -+*/ -+#define PDP_PDISETUP_PDI_EN_MASK (0x00000008) -+#define PDP_PDISETUP_PDI_EN_LSBMASK (0x00000001) -+#define PDP_PDISETUP_PDI_EN_SHIFT (3) -+#define PDP_PDISETUP_PDI_EN_LENGTH (1) -+#define PDP_PDISETUP_PDI_EN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDISETUP, PDI_GDEN -+*/ -+#define PDP_PDISETUP_PDI_GDEN_MASK (0x00000004) -+#define PDP_PDISETUP_PDI_GDEN_LSBMASK (0x00000001) -+#define PDP_PDISETUP_PDI_GDEN_SHIFT (2) -+#define PDP_PDISETUP_PDI_GDEN_LENGTH (1) -+#define PDP_PDISETUP_PDI_GDEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDISETUP, PDI_NFEN -+*/ -+#define PDP_PDISETUP_PDI_NFEN_MASK (0x00000002) -+#define PDP_PDISETUP_PDI_NFEN_LSBMASK (0x00000001) -+#define PDP_PDISETUP_PDI_NFEN_SHIFT (1) -+#define PDP_PDISETUP_PDI_NFEN_LENGTH (1) -+#define PDP_PDISETUP_PDI_NFEN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDISETUP, PDI_CR -+*/ -+#define PDP_PDISETUP_PDI_CR_MASK (0x00000001) -+#define PDP_PDISETUP_PDI_CR_LSBMASK (0x00000001) -+#define PDP_PDISETUP_PDI_CR_SHIFT (0) -+#define PDP_PDISETUP_PDI_CR_LENGTH (1) -+#define PDP_PDISETUP_PDI_CR_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PDITIMING0_OFFSET (0x0904) -+ -+/* PDP, PDITIMING0, PDI_PWRSVGD -+*/ -+#define PDP_PDITIMING0_PDI_PWRSVGD_MASK (0x0F000000) -+#define PDP_PDITIMING0_PDI_PWRSVGD_LSBMASK (0x0000000F) -+#define PDP_PDITIMING0_PDI_PWRSVGD_SHIFT (24) -+#define PDP_PDITIMING0_PDI_PWRSVGD_LENGTH (4) -+#define PDP_PDITIMING0_PDI_PWRSVGD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDITIMING0, PDI_LSDEL -+*/ -+#define PDP_PDITIMING0_PDI_LSDEL_MASK (0x007F0000) -+#define PDP_PDITIMING0_PDI_LSDEL_LSBMASK (0x0000007F) -+#define PDP_PDITIMING0_PDI_LSDEL_SHIFT (16) -+#define PDP_PDITIMING0_PDI_LSDEL_LENGTH (7) -+#define PDP_PDITIMING0_PDI_LSDEL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDITIMING0, PDI_PWRSV2GD2 -+*/ -+#define PDP_PDITIMING0_PDI_PWRSV2GD2_MASK (0x000003FF) -+#define PDP_PDITIMING0_PDI_PWRSV2GD2_LSBMASK (0x000003FF) -+#define PDP_PDITIMING0_PDI_PWRSV2GD2_SHIFT (0) -+#define PDP_PDITIMING0_PDI_PWRSV2GD2_LENGTH (10) -+#define PDP_PDITIMING0_PDI_PWRSV2GD2_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PDITIMING1_OFFSET (0x0908) -+ -+/* PDP, PDITIMING1, PDI_NLDEL -+*/ -+#define PDP_PDITIMING1_PDI_NLDEL_MASK (0x000F0000) -+#define PDP_PDITIMING1_PDI_NLDEL_LSBMASK (0x0000000F) -+#define PDP_PDITIMING1_PDI_NLDEL_SHIFT (16) -+#define PDP_PDITIMING1_PDI_NLDEL_LENGTH (4) -+#define PDP_PDITIMING1_PDI_NLDEL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDITIMING1, PDI_ACBDEL -+*/ -+#define PDP_PDITIMING1_PDI_ACBDEL_MASK (0x000003FF) -+#define PDP_PDITIMING1_PDI_ACBDEL_LSBMASK (0x000003FF) -+#define PDP_PDITIMING1_PDI_ACBDEL_SHIFT (0) -+#define PDP_PDITIMING1_PDI_ACBDEL_LENGTH (10) -+#define PDP_PDITIMING1_PDI_ACBDEL_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PDICOREID_OFFSET (0x090C) -+ -+/* PDP, PDICOREID, PDI_GROUP_ID -+*/ -+#define PDP_PDICOREID_PDI_GROUP_ID_MASK (0xFF000000) -+#define PDP_PDICOREID_PDI_GROUP_ID_LSBMASK (0x000000FF) -+#define PDP_PDICOREID_PDI_GROUP_ID_SHIFT (24) -+#define PDP_PDICOREID_PDI_GROUP_ID_LENGTH (8) -+#define PDP_PDICOREID_PDI_GROUP_ID_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDICOREID, PDI_CORE_ID -+*/ -+#define PDP_PDICOREID_PDI_CORE_ID_MASK (0x00FF0000) -+#define PDP_PDICOREID_PDI_CORE_ID_LSBMASK (0x000000FF) -+#define PDP_PDICOREID_PDI_CORE_ID_SHIFT (16) -+#define PDP_PDICOREID_PDI_CORE_ID_LENGTH (8) -+#define PDP_PDICOREID_PDI_CORE_ID_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDICOREID, PDI_CONFIG_ID -+*/ -+#define PDP_PDICOREID_PDI_CONFIG_ID_MASK (0x0000FFFF) -+#define PDP_PDICOREID_PDI_CONFIG_ID_LSBMASK (0x0000FFFF) -+#define PDP_PDICOREID_PDI_CONFIG_ID_SHIFT (0) -+#define PDP_PDICOREID_PDI_CONFIG_ID_LENGTH (16) -+#define PDP_PDICOREID_PDI_CONFIG_ID_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_PDICOREREV_OFFSET (0x0910) -+ -+/* PDP, PDICOREREV, PDI_MAJOR_REV -+*/ -+#define PDP_PDICOREREV_PDI_MAJOR_REV_MASK (0x00FF0000) -+#define PDP_PDICOREREV_PDI_MAJOR_REV_LSBMASK (0x000000FF) -+#define PDP_PDICOREREV_PDI_MAJOR_REV_SHIFT (16) -+#define PDP_PDICOREREV_PDI_MAJOR_REV_LENGTH (8) -+#define PDP_PDICOREREV_PDI_MAJOR_REV_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDICOREREV, PDI_MINOR_REV -+*/ -+#define PDP_PDICOREREV_PDI_MINOR_REV_MASK (0x0000FF00) -+#define PDP_PDICOREREV_PDI_MINOR_REV_LSBMASK (0x000000FF) -+#define PDP_PDICOREREV_PDI_MINOR_REV_SHIFT (8) -+#define PDP_PDICOREREV_PDI_MINOR_REV_LENGTH (8) -+#define PDP_PDICOREREV_PDI_MINOR_REV_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, PDICOREREV, PDI_MAINT_REV -+*/ -+#define PDP_PDICOREREV_PDI_MAINT_REV_MASK (0x000000FF) -+#define PDP_PDICOREREV_PDI_MAINT_REV_LSBMASK (0x000000FF) -+#define PDP_PDICOREREV_PDI_MAINT_REV_SHIFT (0) -+#define PDP_PDICOREREV_PDI_MAINT_REV_LENGTH (8) -+#define PDP_PDICOREREV_PDI_MAINT_REV_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX2_OFFSET (0x0920) -+ -+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y1 -+*/ -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_MASK (0x000000C0) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LSBMASK (0x00000003) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SHIFT (6) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LENGTH (2) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y1 -+*/ -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_MASK (0x00000030) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LSBMASK (0x00000003) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SHIFT (4) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LENGTH (2) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y0 -+*/ -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_MASK (0x0000000C) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LSBMASK (0x00000003) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SHIFT (2) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LENGTH (2) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y0 -+*/ -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_MASK (0x00000003) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LSBMASK (0x00000003) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SHIFT (0) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LENGTH (2) -+#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX4_0_OFFSET (0x0924) -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y1 -+*/ -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_MASK (0xF0000000) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SHIFT (28) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LENGTH (4) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y1 -+*/ -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_MASK (0x0F000000) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SHIFT (24) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LENGTH (4) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y1 -+*/ -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_MASK (0x00F00000) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SHIFT (20) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LENGTH (4) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y1 -+*/ -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_MASK (0x000F0000) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SHIFT (16) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LENGTH (4) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y0 -+*/ -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_MASK (0x0000F000) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SHIFT (12) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LENGTH (4) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y0 -+*/ -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_MASK (0x00000F00) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SHIFT (8) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LENGTH (4) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y0 -+*/ -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_MASK (0x000000F0) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SHIFT (4) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LENGTH (4) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y0 -+*/ -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_MASK (0x0000000F) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SHIFT (0) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LENGTH (4) -+#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX4_1_OFFSET (0x0928) -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y3 -+*/ -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_MASK (0xF0000000) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SHIFT (28) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LENGTH (4) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y3 -+*/ -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_MASK (0x0F000000) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SHIFT (24) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LENGTH (4) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y3 -+*/ -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_MASK (0x00F00000) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SHIFT (20) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LENGTH (4) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y3 -+*/ -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_MASK (0x000F0000) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SHIFT (16) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LENGTH (4) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y2 -+*/ -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_MASK (0x0000F000) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SHIFT (12) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LENGTH (4) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y2 -+*/ -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_MASK (0x00000F00) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SHIFT (8) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LENGTH (4) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y2 -+*/ -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_MASK (0x000000F0) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SHIFT (4) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LENGTH (4) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y2 -+*/ -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_MASK (0x0000000F) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LSBMASK (0x0000000F) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SHIFT (0) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LENGTH (4) -+#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_0_OFFSET (0x092C) -+ -+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X4Y0 -+*/ -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_MASK (0x3F000000) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SHIFT (24) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LENGTH (6) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X3Y0 -+*/ -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SHIFT (18) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LENGTH (6) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X2Y0 -+*/ -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SHIFT (12) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LENGTH (6) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X1Y0 -+*/ -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SHIFT (6) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LENGTH (6) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X0Y0 -+*/ -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SHIFT (0) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LENGTH (6) -+#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_1_OFFSET (0x0930) -+ -+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X1Y1 -+*/ -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_MASK (0x3F000000) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SHIFT (24) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LENGTH (6) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X0Y1 -+*/ -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SHIFT (18) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LENGTH (6) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X7Y0 -+*/ -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SHIFT (12) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LENGTH (6) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X6Y0 -+*/ -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SHIFT (6) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LENGTH (6) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X5Y0 -+*/ -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SHIFT (0) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LENGTH (6) -+#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_2_OFFSET (0x0934) -+ -+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X6Y1 -+*/ -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_MASK (0x3F000000) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SHIFT (24) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LENGTH (6) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X5Y1 -+*/ -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SHIFT (18) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LENGTH (6) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X4Y1 -+*/ -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SHIFT (12) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LENGTH (6) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X3Y1 -+*/ -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SHIFT (6) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LENGTH (6) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X2Y1 -+*/ -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SHIFT (0) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LENGTH (6) -+#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_3_OFFSET (0x0938) -+ -+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X3Y2 -+*/ -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_MASK (0x3F000000) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SHIFT (24) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LENGTH (6) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X2Y2 -+*/ -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SHIFT (18) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LENGTH (6) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X1Y2 -+*/ -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SHIFT (12) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LENGTH (6) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X0Y2 -+*/ -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SHIFT (6) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LENGTH (6) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X7Y1 -+*/ -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SHIFT (0) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LENGTH (6) -+#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_4_OFFSET (0x093C) -+ -+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X0Y3 -+*/ -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_MASK (0x3F000000) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SHIFT (24) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LENGTH (6) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X7Y2 -+*/ -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SHIFT (18) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LENGTH (6) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X6Y2 -+*/ -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SHIFT (12) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LENGTH (6) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X5Y2 -+*/ -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SHIFT (6) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LENGTH (6) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X4Y2 -+*/ -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SHIFT (0) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LENGTH (6) -+#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_5_OFFSET (0x0940) -+ -+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X5Y3 -+*/ -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_MASK (0x3F000000) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SHIFT (24) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LENGTH (6) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X4Y3 -+*/ -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SHIFT (18) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LENGTH (6) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X3Y3 -+*/ -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SHIFT (12) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LENGTH (6) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X2Y3 -+*/ -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SHIFT (6) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LENGTH (6) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X1Y3 -+*/ -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SHIFT (0) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LENGTH (6) -+#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_6_OFFSET (0x0944) -+ -+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X2Y4 -+*/ -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_MASK (0x3F000000) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SHIFT (24) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LENGTH (6) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X1Y4 -+*/ -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SHIFT (18) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LENGTH (6) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X0Y4 -+*/ -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SHIFT (12) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LENGTH (6) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X7Y3 -+*/ -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SHIFT (6) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LENGTH (6) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X6Y3 -+*/ -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SHIFT (0) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LENGTH (6) -+#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_7_OFFSET (0x0948) -+ -+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X7Y4 -+*/ -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_MASK (0x3F000000) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SHIFT (24) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LENGTH (6) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X6Y4 -+*/ -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SHIFT (18) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LENGTH (6) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X5Y4 -+*/ -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SHIFT (12) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LENGTH (6) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X4Y4 -+*/ -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SHIFT (6) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LENGTH (6) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X3Y4 -+*/ -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SHIFT (0) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LENGTH (6) -+#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_8_OFFSET (0x094C) -+ -+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X4Y5 -+*/ -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_MASK (0x3F000000) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SHIFT (24) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LENGTH (6) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X3Y5 -+*/ -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SHIFT (18) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LENGTH (6) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X2Y5 -+*/ -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SHIFT (12) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LENGTH (6) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X1Y5 -+*/ -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SHIFT (6) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LENGTH (6) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X0Y5 -+*/ -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SHIFT (0) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LENGTH (6) -+#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_9_OFFSET (0x0950) -+ -+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X1Y6 -+*/ -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_MASK (0x3F000000) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SHIFT (24) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LENGTH (6) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X0Y6 -+*/ -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SHIFT (18) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LENGTH (6) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X7Y5 -+*/ -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SHIFT (12) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LENGTH (6) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X6Y5 -+*/ -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SHIFT (6) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LENGTH (6) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X5Y5 -+*/ -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SHIFT (0) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LENGTH (6) -+#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_10_OFFSET (0x0954) -+ -+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X6Y6 -+*/ -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_MASK (0x3F000000) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SHIFT (24) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LENGTH (6) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X5Y6 -+*/ -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SHIFT (18) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LENGTH (6) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X4Y6 -+*/ -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SHIFT (12) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LENGTH (6) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X3Y6 -+*/ -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SHIFT (6) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LENGTH (6) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X2Y6 -+*/ -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SHIFT (0) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LENGTH (6) -+#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_11_OFFSET (0x0958) -+ -+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X3Y7 -+*/ -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_MASK (0x3F000000) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SHIFT (24) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LENGTH (6) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X2Y7 -+*/ -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SHIFT (18) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LENGTH (6) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X1Y7 -+*/ -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SHIFT (12) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LENGTH (6) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X0Y7 -+*/ -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SHIFT (6) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LENGTH (6) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X7Y6 -+*/ -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SHIFT (0) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LENGTH (6) -+#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_DITHERMATRIX8_12_OFFSET (0x095C) -+ -+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X7Y7 -+*/ -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_MASK (0x00FC0000) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SHIFT (18) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LENGTH (6) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X6Y7 -+*/ -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_MASK (0x0003F000) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SHIFT (12) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LENGTH (6) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X5Y7 -+*/ -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_MASK (0x00000FC0) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SHIFT (6) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LENGTH (6) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X4Y7 -+*/ -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_MASK (0x0000003F) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LSBMASK (0x0000003F) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SHIFT (0) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LENGTH (6) -+#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1_MEMCTRL_OFFSET (0x0960) -+ -+/* PDP, GRPH1_MEMCTRL, GRPH1_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_MEMCTRL, GRPH1_BURSTLEN -+*/ -+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_MASK (0x000000FF) -+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LSBMASK (0x000000FF) -+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SHIFT (0) -+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LENGTH (8) -+#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1_MEM_THRESH_OFFSET (0x0964) -+ -+/* PDP, GRPH1_MEM_THRESH, GRPH1_UVTHRESHOLD -+*/ -+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_MASK (0xFF000000) -+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SHIFT (24) -+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LENGTH (8) -+#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_MEM_THRESH, GRPH1_YTHRESHOLD -+*/ -+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_MASK (0x001FF000) -+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LSBMASK (0x000001FF) -+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SHIFT (12) -+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LENGTH (9) -+#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_MEM_THRESH, GRPH1_THRESHOLD -+*/ -+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_MASK (0x000001FF) -+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LSBMASK (0x000001FF) -+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SHIFT (0) -+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LENGTH (9) -+#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2_MEMCTRL_OFFSET (0x0968) -+ -+/* PDP, GRPH2_MEMCTRL, GRPH2_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_MEMCTRL, GRPH2_BURSTLEN -+*/ -+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_MASK (0x000000FF) -+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LSBMASK (0x000000FF) -+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SHIFT (0) -+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LENGTH (8) -+#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2_MEM_THRESH_OFFSET (0x096C) -+ -+/* PDP, GRPH2_MEM_THRESH, GRPH2_UVTHRESHOLD -+*/ -+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_MASK (0xFF000000) -+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SHIFT (24) -+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LENGTH (8) -+#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_MEM_THRESH, GRPH2_YTHRESHOLD -+*/ -+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_MASK (0x001FF000) -+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LSBMASK (0x000001FF) -+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SHIFT (12) -+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LENGTH (9) -+#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_MEM_THRESH, GRPH2_THRESHOLD -+*/ -+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_MASK (0x000001FF) -+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LSBMASK (0x000001FF) -+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SHIFT (0) -+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LENGTH (9) -+#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3_MEMCTRL_OFFSET (0x0970) -+ -+/* PDP, GRPH3_MEMCTRL, GRPH3_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_MEMCTRL, GRPH3_BURSTLEN -+*/ -+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_MASK (0x000000FF) -+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LSBMASK (0x000000FF) -+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SHIFT (0) -+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LENGTH (8) -+#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3_MEM_THRESH_OFFSET (0x0974) -+ -+/* PDP, GRPH3_MEM_THRESH, GRPH3_UVTHRESHOLD -+*/ -+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_MASK (0xFF000000) -+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SHIFT (24) -+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LENGTH (8) -+#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_MEM_THRESH, GRPH3_YTHRESHOLD -+*/ -+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_MASK (0x001FF000) -+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LSBMASK (0x000001FF) -+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SHIFT (12) -+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LENGTH (9) -+#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_MEM_THRESH, GRPH3_THRESHOLD -+*/ -+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_MASK (0x000001FF) -+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LSBMASK (0x000001FF) -+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SHIFT (0) -+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LENGTH (9) -+#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4_MEMCTRL_OFFSET (0x0978) -+ -+/* PDP, GRPH4_MEMCTRL, GRPH4_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_MEMCTRL, GRPH4_BURSTLEN -+*/ -+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_MASK (0x000000FF) -+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LSBMASK (0x000000FF) -+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SHIFT (0) -+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LENGTH (8) -+#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4_MEM_THRESH_OFFSET (0x097C) -+ -+/* PDP, GRPH4_MEM_THRESH, GRPH4_UVTHRESHOLD -+*/ -+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_MASK (0xFF000000) -+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SHIFT (24) -+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LENGTH (8) -+#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_MEM_THRESH, GRPH4_YTHRESHOLD -+*/ -+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_MASK (0x001FF000) -+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LSBMASK (0x000001FF) -+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SHIFT (12) -+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LENGTH (9) -+#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_MEM_THRESH, GRPH4_THRESHOLD -+*/ -+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_MASK (0x000001FF) -+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LSBMASK (0x000001FF) -+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SHIFT (0) -+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LENGTH (9) -+#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1_MEMCTRL_OFFSET (0x0980) -+ -+/* PDP, VID1_MEMCTRL, VID1_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_MEMCTRL, VID1_BURSTLEN -+*/ -+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_MASK (0x000000FF) -+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_LSBMASK (0x000000FF) -+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_SHIFT (0) -+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_LENGTH (8) -+#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1_MEM_THRESH_OFFSET (0x0984) -+ -+/* PDP, VID1_MEM_THRESH, VID1_UVTHRESHOLD -+*/ -+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_MASK (0xFF000000) -+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SHIFT (24) -+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LENGTH (8) -+#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_MEM_THRESH, VID1_YTHRESHOLD -+*/ -+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_MASK (0x001FF000) -+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LSBMASK (0x000001FF) -+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SHIFT (12) -+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LENGTH (9) -+#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_MEM_THRESH, VID1_THRESHOLD -+*/ -+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_MASK (0x000001FF) -+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LSBMASK (0x000001FF) -+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SHIFT (0) -+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LENGTH (9) -+#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2_MEMCTRL_OFFSET (0x0988) -+ -+/* PDP, VID2_MEMCTRL, VID2_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_MEMCTRL, VID2_BURSTLEN -+*/ -+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_MASK (0x000000FF) -+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_LSBMASK (0x000000FF) -+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_SHIFT (0) -+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_LENGTH (8) -+#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2_MEM_THRESH_OFFSET (0x098C) -+ -+/* PDP, VID2_MEM_THRESH, VID2_UVTHRESHOLD -+*/ -+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_MASK (0xFF000000) -+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SHIFT (24) -+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LENGTH (8) -+#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_MEM_THRESH, VID2_YTHRESHOLD -+*/ -+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_MASK (0x001FF000) -+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LSBMASK (0x000001FF) -+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SHIFT (12) -+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LENGTH (9) -+#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_MEM_THRESH, VID2_THRESHOLD -+*/ -+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_MASK (0x000001FF) -+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LSBMASK (0x000001FF) -+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SHIFT (0) -+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LENGTH (9) -+#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3_MEMCTRL_OFFSET (0x0990) -+ -+/* PDP, VID3_MEMCTRL, VID3_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_MEMCTRL, VID3_BURSTLEN -+*/ -+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_MASK (0x000000FF) -+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_LSBMASK (0x000000FF) -+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_SHIFT (0) -+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_LENGTH (8) -+#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3_MEM_THRESH_OFFSET (0x0994) -+ -+/* PDP, VID3_MEM_THRESH, VID3_UVTHRESHOLD -+*/ -+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_MASK (0xFF000000) -+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SHIFT (24) -+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LENGTH (8) -+#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_MEM_THRESH, VID3_YTHRESHOLD -+*/ -+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_MASK (0x001FF000) -+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LSBMASK (0x000001FF) -+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SHIFT (12) -+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LENGTH (9) -+#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_MEM_THRESH, VID3_THRESHOLD -+*/ -+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_MASK (0x000001FF) -+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LSBMASK (0x000001FF) -+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SHIFT (0) -+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LENGTH (9) -+#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4_MEMCTRL_OFFSET (0x0998) -+ -+/* PDP, VID4_MEMCTRL, VID4_LOCAL_GLOBAL_MEMCTRL -+*/ -+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) -+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) -+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) -+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) -+#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_MEMCTRL, VID4_BURSTLEN -+*/ -+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_MASK (0x000000FF) -+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_LSBMASK (0x000000FF) -+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_SHIFT (0) -+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_LENGTH (8) -+#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4_MEM_THRESH_OFFSET (0x099C) -+ -+/* PDP, VID4_MEM_THRESH, VID4_UVTHRESHOLD -+*/ -+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_MASK (0xFF000000) -+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LSBMASK (0x000000FF) -+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SHIFT (24) -+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LENGTH (8) -+#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_MEM_THRESH, VID4_YTHRESHOLD -+*/ -+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_MASK (0x001FF000) -+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LSBMASK (0x000001FF) -+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SHIFT (12) -+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LENGTH (9) -+#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_MEM_THRESH, VID4_THRESHOLD -+*/ -+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_MASK (0x000001FF) -+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LSBMASK (0x000001FF) -+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SHIFT (0) -+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LENGTH (9) -+#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH1_PANIC_THRESH_OFFSET (0x09A0) -+ -+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_ENABLE -+*/ -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_MASK (0x80000000) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SHIFT (31) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LENGTH (1) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_ENABLE -+*/ -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_MASK (0x40000000) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SHIFT (30) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LENGTH (1) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MAX -+*/ -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MIN -+*/ -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MAX -+*/ -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MIN -+*/ -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH2_PANIC_THRESH_OFFSET (0x09A4) -+ -+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_ENABLE -+*/ -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_MASK (0x80000000) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SHIFT (31) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LENGTH (1) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_ENABLE -+*/ -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_MASK (0x40000000) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SHIFT (30) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LENGTH (1) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MAX -+*/ -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MIN -+*/ -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MAX -+*/ -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MIN -+*/ -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH3_PANIC_THRESH_OFFSET (0x09A8) -+ -+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_ENABLE -+*/ -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_MASK (0x80000000) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SHIFT (31) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LENGTH (1) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_ENABLE -+*/ -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_MASK (0x40000000) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SHIFT (30) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LENGTH (1) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MAX -+*/ -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MIN -+*/ -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MAX -+*/ -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MIN -+*/ -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_GRPH4_PANIC_THRESH_OFFSET (0x09AC) -+ -+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_ENABLE -+*/ -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_MASK (0x80000000) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SHIFT (31) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LENGTH (1) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_ENABLE -+*/ -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_MASK (0x40000000) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SHIFT (30) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LENGTH (1) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MAX -+*/ -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MIN -+*/ -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MAX -+*/ -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MIN -+*/ -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID1_PANIC_THRESH_OFFSET (0x09B0) -+ -+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_ENABLE -+*/ -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_MASK (0x80000000) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SHIFT (31) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LENGTH (1) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_ENABLE -+*/ -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_MASK (0x40000000) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SHIFT (30) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LENGTH (1) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MAX -+*/ -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MIN -+*/ -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MAX -+*/ -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MIN -+*/ -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID2_PANIC_THRESH_OFFSET (0x09B4) -+ -+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_ENABLE -+*/ -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_MASK (0x80000000) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SHIFT (31) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LENGTH (1) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_ENABLE -+*/ -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_MASK (0x40000000) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SHIFT (30) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LENGTH (1) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MAX -+*/ -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MIN -+*/ -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MAX -+*/ -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MIN -+*/ -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID3_PANIC_THRESH_OFFSET (0x09B8) -+ -+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_ENABLE -+*/ -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_MASK (0x80000000) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SHIFT (31) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LENGTH (1) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_ENABLE -+*/ -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_MASK (0x40000000) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SHIFT (30) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LENGTH (1) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MAX -+*/ -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MIN -+*/ -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MAX -+*/ -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MIN -+*/ -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_VID4_PANIC_THRESH_OFFSET (0x09BC) -+ -+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_ENABLE -+*/ -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_MASK (0x80000000) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LSBMASK (0x00000001) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SHIFT (31) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LENGTH (1) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_ENABLE -+*/ -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_MASK (0x40000000) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LSBMASK (0x00000001) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SHIFT (30) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LENGTH (1) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MAX -+*/ -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MIN -+*/ -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MAX -+*/ -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE -+ -+/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MIN -+*/ -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) -+#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE -+ -+#define PDP_BURST_BOUNDARY_OFFSET (0x09C0) -+ -+/* PDP, BURST_BOUNDARY, BURST_BOUNDARY -+*/ -+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_MASK (0x0000003F) -+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_LSBMASK (0x0000003F) -+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_SHIFT (0) -+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_LENGTH (6) -+#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_SIGNED_FIELD IMG_FALSE -+ -+ -+/* ------------------------ End of register definitions ------------------------ */ -+ -+/* -+// NUMREG defines the extent of register address space. -+*/ -+ -+#define PDP_NUMREG ((0x09C0 >> 2)+1) -+ -+/* Info about video plane addresses */ -+#define PDP_YADDR_BITS (PDP_VID1BASEADDR_VID1BASEADDR_LENGTH) -+#define PDP_YADDR_ALIGN 5 -+#define PDP_UADDR_BITS (PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH) -+#define PDP_UADDR_ALIGN 5 -+#define PDP_VADDR_BITS (PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH) -+#define PDP_VADDR_ALIGN 5 -+ -+#define PDP_YSTRIDE_BITS (PDP_VID1STRIDE_VID1STRIDE_LENGTH) -+#define PDP_YSTRIDE_ALIGN 5 -+ -+#define PDP_MAX_INPUT_WIDTH (PDP_VID1SIZE_VID1WIDTH_LSBMASK + 1) -+#define PDP_MAX_INPUT_HEIGHT (PDP_VID1SIZE_VID1HEIGHT_LSBMASK + 1) -+ -+/* Maximum 6 bytes per pixel for RGB161616 */ -+#define PDP_MAX_IMAGE_BYTES (PDP_MAX_INPUT_WIDTH * PDP_MAX_INPUT_HEIGHT * 6) -+ -+/* Round up */ -+#define PDP_MAX_IMAGE_PAGES ((PDP_MAX_IMAGE_BYTES+PAGE_SIZE-1)/PAGE_SIZE) -+ -+#define PDP_YADDR_MAX (((1 << PDP_YADDR_BITS) - 1) << PDP_YADDR_ALIGN) -+#define PDP_UADDR_MAX (((1 << PDP_UADDR_BITS) - 1) << PDP_UADDR_ALIGN) -+#define PDP_VADDR_MAX (((1 << PDP_VADDR_BITS) - 1) << PDP_VADDR_ALIGN) -+#define PDP_YSTRIDE_MAX ((1 << PDP_YSTRIDE_BITS) << PDP_YSTRIDE_ALIGN) -+#define PDP_YADDR_ALIGNMASK ((1 << PDP_YADDR_ALIGN) - 1) -+#define PDP_UADDR_ALIGNMASK ((1 << PDP_UADDR_ALIGN) - 1) -+#define PDP_VADDR_ALIGNMASK ((1 << PDP_VADDR_ALIGN) - 1) -+#define PDP_YSTRIDE_ALIGNMASK ((1 << PDP_YSTRIDE_ALIGN) - 1) -+ -+/* Field Values */ -+#define PDP_SURF_PIXFMT_RGB332 0x3 -+#define PDP_SURF_PIXFMT_ARGB4444 0x4 -+#define PDP_SURF_PIXFMT_ARGB1555 0x5 -+#define PDP_SURF_PIXFMT_RGB888 0x6 -+#define PDP_SURF_PIXFMT_RGB565 0x7 -+#define PDP_SURF_PIXFMT_ARGB8888 0x8 -+#define PDP_SURF_PIXFMT_420_PL8 0x9 -+#define PDP_SURF_PIXFMT_420_PL8IVU 0xA -+#define PDP_SURF_PIXFMT_420_PL8IUV 0xB -+#define PDP_SURF_PIXFMT_422_UY0VY1_8888 0xC -+#define PDP_SURF_PIXFMT_422_VY0UY1_8888 0xD -+#define PDP_SURF_PIXFMT_422_Y0UY1V_8888 0xE -+#define PDP_SURF_PIXFMT_422_Y0VY1U_8888 0xF -+#define PDP_SURF_PIXFMT_AYUV8888 0x10 -+#define PDP_SURF_PIXFMT_YUV101010 0x15 -+#define PDP_SURF_PIXFMT_RGB101010 0x17 -+#define PDP_SURF_PIXFMT_420_PL10IUV 0x18 -+#define PDP_SURF_PIXFMT_420_PL10IVU 0x19 -+#define PDP_SURF_PIXFMT_422_PL10IUV 0x1A -+#define PDP_SURF_PIXFMT_422_PL10IVU 0x1B -+#define PDP_SURF_PIXFMT_RGB121212 0x1E -+#define PDP_SURF_PIXFMT_RGB161616 0x1F -+ -+#define PDP_CTRL_CKEYSRC_PREV 0x0 -+#define PDP_CTRL_CKEYSRC_CUR 0x1 -+ -+#define PDP_MEMCTRL_MEMREFRESH_ALWAYS 0x0 -+#define PDP_MEMCTRL_MEMREFRESH_HBLNK 0x1 -+#define PDP_MEMCTRL_MEMREFRESH_VBLNK 0x2 -+#define PDP_MEMCTRL_MEMREFRESH_BOTH 0x3 -+ -+#define PDP_3D_CTRL_BLENDSEL_BGND_WITH_POS0 0x0 -+#define PDP_3D_CTRL_BLENDSEL_POS0_WITH_POS1 0x1 -+#define PDP_3D_CTRL_BLENDSEL_POS1_WITH_POS2 0x2 -+#define PDP_3D_CTRL_BLENDSEL_POS2_WITH_POS3 0x3 -+#define PDP_3D_CTRL_BLENDSEL_POS3_WITH_POS4 0x4 -+#define PDP_3D_CTRL_BLENDSEL_POS4_WITH_POS5 0x5 -+#define PDP_3D_CTRL_BLENDSEL_POS5_WITH_POS6 0x6 -+#define PDP_3D_CTRL_BLENDSEL_POS6_WITH_POS7 0x7 -+ -+#define PDP_UADDR_UV_STRIDE_EQUAL_TO_Y_STRIDE 0x0 -+#define PDP_UADDR_UV_STRIDE_EQUAL_TO_DOUBLE_Y_STRIDE 0x1 -+#define PDP_UADDR_UV_STRIDE_EQUAL_TO_HALF_Y_STRIDE 0x2 -+ -+#define PDP_PROCAMP_OUTPUT_OFFSET_FRACTIONAL_BITS 1 -+#define PDP_PROCAMP_COEFFICIENT_FRACTIONAL_BITS 10 -+ -+/*-------------------------------------------------------------------------------*/ -+ -+#endif /* _PDP2_REGS_H */ -diff --git a/drivers/gpu/drm/img-rogue/pdp_drm.h b/drivers/gpu/drm/img-rogue/pdp_drm.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pdp_drm.h -@@ -0,0 +1,105 @@ -+/* -+ * @File -+ * @Title PDP DRM definitions shared between kernel and user space. -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__PDP_DRM_H__) -+#define __PDP_DRM_H__ -+ -+#if defined(__KERNEL__) -+#include -+#else -+#include -+#endif -+ -+struct drm_pdp_gem_create { -+ __u64 size; /* in */ -+ __u32 flags; /* in */ -+ __u32 handle; /* out */ -+}; -+ -+struct drm_pdp_gem_mmap { -+ __u32 handle; /* in */ -+ __u32 pad; -+ __u64 offset; /* out */ -+}; -+ -+#define PDP_GEM_CPU_PREP_READ (1 << 0) -+#define PDP_GEM_CPU_PREP_WRITE (1 << 1) -+#define PDP_GEM_CPU_PREP_NOWAIT (1 << 2) -+ -+struct drm_pdp_gem_cpu_prep { -+ __u32 handle; /* in */ -+ __u32 flags; /* in */ -+}; -+ -+struct drm_pdp_gem_cpu_fini { -+ __u32 handle; /* in */ -+ __u32 pad; -+}; -+ -+/* -+ * DRM command numbers, relative to DRM_COMMAND_BASE. -+ * These defines must be prefixed with "DRM_". -+ */ -+#define DRM_PDP_GEM_CREATE 0x00 -+#define DRM_PDP_GEM_MMAP 0x01 -+#define DRM_PDP_GEM_CPU_PREP 0x02 -+#define DRM_PDP_GEM_CPU_FINI 0x03 -+ -+/* These defines must be prefixed with "DRM_IOCTL_". */ -+#define DRM_IOCTL_PDP_GEM_CREATE \ -+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_CREATE, \ -+ struct drm_pdp_gem_create) -+ -+#define DRM_IOCTL_PDP_GEM_MMAP\ -+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_MMAP, \ -+ struct drm_pdp_gem_mmap) -+ -+#define DRM_IOCTL_PDP_GEM_CPU_PREP \ -+ DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_PREP, \ -+ struct drm_pdp_gem_cpu_prep) -+ -+#define DRM_IOCTL_PDP_GEM_CPU_FINI \ -+ DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_FINI, \ -+ struct drm_pdp_gem_cpu_fini) -+ -+#endif /* defined(__PDP_DRM_H__) */ -diff --git a/drivers/gpu/drm/img-rogue/pdump.h b/drivers/gpu/drm/img-rogue/pdump.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pdump.h -@@ -0,0 +1,238 @@ -+/*************************************************************************/ /*! -+@File -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef SERVICES_PDUMP_H -+#define SERVICES_PDUMP_H -+ -+#include "img_types.h" -+#include "services_km.h" -+ -+ -+/* A PDump out2.txt script is made up of 3 sections from three buffers: -+ * * -+ * - Init phase buffer - holds PDump data written during driver -+ * initialisation, non-volatile. -+ * - Main phase buffer - holds PDump data written after driver init, -+ * volatile. -+ * - Deinit phase buffer - holds PDump data needed to shutdown HW/play back, -+ * written only during driver initialisation using -+ * the DEINIT flag. -+ * -+ * Volatile in this sense means that the buffer is drained and cleared when -+ * the pdump capture application connects and transfers the data to file. -+ * -+ * The PDump sub-system uses the driver state (init/post-init), whether -+ * the pdump capture application is connected or not (capture range set/unset) -+ * and, if pdump connected whether the frame is in the range set, to decide -+ * which of the 3 buffers to write the PDump data. Hence there are several -+ * key time periods in the lifetime of the kernel driver that is enabled -+ * with PDUMP=1 (flag XX labels below time line): -+ * -+ * Events:load init pdump enter exit pdump -+ * driver done connects range range disconnects -+ * |__________________|____________|__________|______________|____________|______ . . . -+ * State: | init phase | no capture | <- capture client connected -> | no capture -+ * | | | | -+ * |__________________|____________|______________________________________|_____ . . . -+ * Flag: | CT,DI | NONE,CT,PR | NONE,CT,PR | See no -+ * | Never NONE or PR | Never DI | Never DI | capture -+ * |__________________|____________|______________________________________|_____ . . . -+ * Write | NONE -undef | -No write | -No write | -Main buf | -No write | See no -+ * buffer | CT -Init buf | -Main buf | -Main buf | -Main buf | -Main buf | capture -+ * | PR -undef | -Init buf | -undef | -Init & Main | -undef | -+ * | DI -Deinit buf | -undef | -undef | -undef | -undef | -+ * |__________________|____________|___________|______________|___________|_____ . . . -+ * -+ * Note: The time line could repeat if the pdump capture application is -+ * disconnected and reconnected without unloading the driver module. -+ * -+ * The DEINIT (DI) | CONTINUOUS (CT) | PERSISTENT (PR) flags must never -+ * be OR'd together and given to a PDump call since undefined behaviour may -+ * result and produce an invalid PDump which does not play back cleanly. -+ * -+ * The decision on which flag to use comes down to which time period the -+ * client or server driver makes the PDump write call AND the nature/purpose -+ * of the data. -+ * -+ * Note: This is a simplified time line, not all conditions represented. -+ * -+ */ -+ -+typedef IMG_UINT32 PDUMP_FLAGS_T; -+ -+#define PDUMP_FLAGS_NONE PDUMP_NONE /* -+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) -+ #include -+ #else -+ #include -+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ -+ #else -+ #include -+ #endif /* __linux__ */ -+#endif /* PDUMP */ -+ -+/* services/srvkm/include/ */ -+#include "device.h" -+ -+/* include/ */ -+#include "pvrsrv_error.h" -+ -+ -+#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) -+#define __pvrsrv_defined_struct_enum__ -+#include -+#endif -+ -+#include "connection_server.h" -+/* Pull in pdump flags from services include */ -+#include "pdump.h" -+#include "pdumpdefs.h" -+ -+/* Define this to enable the PDUMP_HERE trace in the server */ -+#undef PDUMP_TRACE -+ -+#if defined(PDUMP_TRACE) -+#define PDUMP_HERE_VAR __maybe_unused IMG_UINT32 here = 0; -+#define PDUMP_HERE(a) { here = (a); if (ui32Flags & PDUMP_FLAGS_DEBUG) PVR_DPF((PVR_DBG_WARNING, "HERE %d", (a))); } -+#define PDUMP_HEREA(a) { here = (a); PVR_DPF((PVR_DBG_WARNING, "HERE ALWAYS %d", (a))); } -+#else -+#define PDUMP_HERE_VAR __maybe_unused IMG_UINT32 here = 0; -+#define PDUMP_HERE(a) here = (a); -+#define PDUMP_HEREA(a) here = (a); -+#endif -+ -+#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0 -+#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0 -+ -+/* Invalid value for PDump block number */ -+#define PDUMP_BLOCKNUM_INVALID IMG_UINT32_MAX -+ -+typedef struct _PDUMP_CONNECTION_DATA_ PDUMP_CONNECTION_DATA; -+ -+/* PDump transition events */ -+typedef enum _PDUMP_TRANSITION_EVENT_ -+{ -+ PDUMP_TRANSITION_EVENT_NONE, /* No event */ -+ PDUMP_TRANSITION_EVENT_BLOCK_FINISHED, /* Block mode event, current PDump-block has finished */ -+ PDUMP_TRANSITION_EVENT_BLOCK_STARTED, /* Block mode event, new PDump-block has started */ -+ PDUMP_TRANSITION_EVENT_RANGE_ENTERED, /* Transition into capture range */ -+ PDUMP_TRANSITION_EVENT_RANGE_EXITED, /* Transition out of capture range */ -+} PDUMP_TRANSITION_EVENT; -+ -+typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION)(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags); -+typedef void (*PFN_PDUMP_SYNCBLOCKS)(PVRSRV_DEVICE_NODE *psDevNode, void *pvData, PDUMP_TRANSITION_EVENT eEvent); -+ -+typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION_FENCE_SYNC)(void *pvData, PDUMP_TRANSITION_EVENT eEvent); -+ -+#ifdef PDUMP -+ -+/*! Macro used to record a panic in the PDump script stream */ -+#define PDUMP_PANIC(_dev, _id, _msg) do \ -+ { PVRSRV_ERROR _eE;\ -+ _eE = PDumpPanic((_dev), ((RGX_PDUMP_PANIC_ ## _id) & 0xFFFF), _msg, __func__, __LINE__); \ -+ PVR_LOG_IF_ERROR(_eE, "PDumpPanic");\ -+ MSC_SUPPRESS_4127\ -+ } while (0) -+ -+/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */ -+#define PDUMP_ERROR(_dev, _err, _msg) \ -+ (void)PDumpCaptureError((_dev), _err, _msg, __func__, __LINE__) -+ -+#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE -+#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE -+#define SZ_FILENAME_SIZE_MAX (PVRSRV_PDUMP_MAX_FILENAME_SIZE+sizeof(PDUMP_PARAM_N_FILE_NAME)) -+ -+#define PDUMP_GET_SCRIPT_STRING() \ -+ IMG_HANDLE hScript; \ -+ void *pvScriptAlloc; \ -+ IMG_UINT32 ui32MaxLen = SZ_SCRIPT_SIZE_MAX-1; \ -+ pvScriptAlloc = OSAllocMem( SZ_SCRIPT_SIZE_MAX ); \ -+ if (!pvScriptAlloc) \ -+ { \ -+ PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_STRING() failed to allocate memory for script buffer")); \ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; \ -+ } \ -+ \ -+ hScript = (IMG_HANDLE) pvScriptAlloc; -+ -+#define PDUMP_GET_MSG_STRING() \ -+ IMG_CHAR *pszMsg; \ -+ void *pvMsgAlloc; \ -+ IMG_UINT32 ui32MaxLen = SZ_MSG_SIZE_MAX-1; \ -+ pvMsgAlloc = OSAllocMem( SZ_MSG_SIZE_MAX ); \ -+ if (!pvMsgAlloc) \ -+ { \ -+ PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_MSG_STRING() failed to allocate memory for message buffer")); \ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; \ -+ } \ -+ pszMsg = (IMG_CHAR *)pvMsgAlloc; -+ -+#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \ -+ IMG_HANDLE hScript; \ -+ IMG_CHAR *pszFileName; \ -+ IMG_UINT32 ui32MaxLenScript = SZ_SCRIPT_SIZE_MAX-1; \ -+ void *pvScriptAlloc; \ -+ void *pvFileAlloc; \ -+ pvScriptAlloc = OSAllocMem( SZ_SCRIPT_SIZE_MAX ); \ -+ if (!pvScriptAlloc) \ -+ { \ -+ PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_AND_FILE_STRING() failed to allocate memory for script buffer")); \ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; \ -+ } \ -+ \ -+ hScript = (IMG_HANDLE) pvScriptAlloc; \ -+ pvFileAlloc = OSAllocMem( SZ_FILENAME_SIZE_MAX ); \ -+ if (!pvFileAlloc) \ -+ { \ -+ PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_AND_FILE_STRING() failed to allocate memory for filename buffer")); \ -+ OSFreeMem(pvScriptAlloc); \ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; \ -+ } \ -+ pszFileName = (IMG_CHAR *)pvFileAlloc; -+ -+#define PDUMP_RELEASE_SCRIPT_STRING() \ -+ if (pvScriptAlloc) \ -+ { \ -+ OSFreeMem(pvScriptAlloc); \ -+ pvScriptAlloc = NULL; \ -+ } -+ -+#define PDUMP_RELEASE_MSG_STRING() \ -+ if (pvMsgAlloc) \ -+ { \ -+ OSFreeMem(pvMsgAlloc); \ -+ pvMsgAlloc = NULL; \ -+ } -+ -+#define PDUMP_RELEASE_FILE_STRING() \ -+ if (pvFileAlloc) \ -+ { \ -+ OSFreeMem(pvFileAlloc); \ -+ pvFileAlloc = NULL; \ -+ } -+ -+#define PDUMP_RELEASE_SCRIPT_AND_FILE_STRING() \ -+ if (pvScriptAlloc) \ -+ { \ -+ OSFreeMem(pvScriptAlloc); \ -+ pvScriptAlloc = NULL; \ -+ } \ -+ if (pvFileAlloc) \ -+ { \ -+ OSFreeMem(pvFileAlloc); \ -+ pvFileAlloc = NULL; \ -+ } -+ -+ -+/* Shared across pdump_x files */ -+PVRSRV_ERROR PDumpInitCommon(void); -+void PDumpDeInitCommon(void); -+PVRSRV_ERROR PDumpValidateUMFlags(PDUMP_FLAGS_T uiFlags); -+PVRSRV_ERROR PDumpReady(void); -+void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset, -+ size_t *puiZeroPageSize, -+ const IMG_CHAR **ppszZeroPageFilename); -+ -+void PDumpConnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode); -+void PDumpDisconnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+void PDumpStopInitPhase(PVRSRV_DEVICE_NODE *psDeviceNode); -+PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32Frame); -+PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32* pui32Frame); -+PVRSRV_ERROR PDumpCommentKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32CommentSize, -+ IMG_CHAR *pszComment, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32Mode, -+ IMG_UINT32 ui32Start, -+ IMG_UINT32 ui32End, -+ IMG_UINT32 ui32Interval, -+ IMG_UINT32 ui32MaxParamFileSize, -+ IMG_UINT32 ui32AutoTermTimeout); -+ -+ -+PVRSRV_ERROR PDumpReg32(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpReg64(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT64 ui64RegValue, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpRegLabelToReg64(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ IMG_UINT32 ui32RegDst, -+ IMG_UINT32 ui32RegSrc, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpPhysHandleToInternalVar64(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszInternalVar, -+ IMG_HANDLE hPdumpPages, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpMemLabelToInternalVar64(IMG_CHAR *pszInternalVar, -+ PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpInternalVarToMemLabel(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_CHAR *pszInternalVar, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpWriteVarORValueOp(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszInternalVariable, -+ const IMG_UINT64 ui64Value, -+ const IMG_UINT32 ui32PDumpFlags); -+ -+PVRSRV_ERROR PDumpWriteVarANDValueOp(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszInternalVariable, -+ const IMG_UINT64 ui64Value, -+ const IMG_UINT32 ui32PDumpFlags); -+ -+PVRSRV_ERROR PDumpWriteVarSHRValueOp(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszInternalVariable, -+ const IMG_UINT64 ui64Value, -+ const IMG_UINT32 ui32PDumpFlags); -+ -+PVRSRV_ERROR PDumpWriteVarORVarOp(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszInternalVar, -+ const IMG_CHAR *pszInternalVar2, -+ const IMG_UINT32 ui32PDumpFlags); -+ -+PVRSRV_ERROR PDumpWriteVarANDVarOp(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszInternalVar, -+ const IMG_CHAR *pszInternalVar2, -+ const IMG_UINT32 ui32PDumpFlags); -+ -+PVRSRV_ERROR PDumpInternalVarToReg32(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ IMG_UINT32 ui32Reg, -+ IMG_CHAR *pszInternalVar, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpInternalVarToReg64(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ IMG_UINT32 ui32Reg, -+ IMG_CHAR *pszInternalVar, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource, -+ PMR *psPMRDest, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource, -+ PMR *psPMRDest, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName, -+ IMG_UINT32 ui32Reg, -+ PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName, -+ IMG_UINT32 ui32Reg, -+ PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpRegLabelToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ IMG_UINT32 ui32Reg, -+ IMG_CHAR *pszInternalVar, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpSAW(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszDevSpaceName, -+ IMG_UINT32 ui32HPOffsetBytes, -+ IMG_UINT32 ui32NumSaveBytes, -+ IMG_CHAR *pszOutfileName, -+ IMG_UINT32 ui32OutfileOffsetByte, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+PVRSRV_ERROR PDumpRegPolKM(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue, -+ IMG_UINT32 ui32Mask, -+ IMG_UINT32 ui32Flags, -+ PDUMP_POLL_OPERATOR eOperator); -+ -+ -+/**************************************************************************/ /*! -+@Function PDumpImageDescriptor -+@Description PDumps image data out as an IMGBv2 data section -+@Input psDeviceNode Pointer to device node. -+@Input ui32MMUContextID PDUMP MMU context ID. -+@Input pszSABFileName Pointer to string containing file name of -+ Image being SABed -+@Input sData GPU virtual address of this surface. -+@Input ui32DataSize Image data size -+@Input ui32LogicalWidth Image logical width -+@Input ui32LogicalHeight Image logical height -+@Input ui32PhysicalWidth Image physical width -+@Input ui32PhysicalHeight Image physical height -+@Input ePixFmt Image pixel format -+@Input eFBCompression FB compression mode -+@Input paui32FBCClearColour FB clear colour (Only applicable to FBC surfaces) -+@Input eFBCSwizzle FBC channel swizzle (Only applicable to FBC surfaces) -+@Input sHeader GPU virtual address of the headers of this -+ surface (Only applicable to FBC surfaces) -+@Input ui32HeaderSize Header size (Only applicable to FBC surfaces) -+@Input ui32PDumpFlags PDUMP flags -+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ -+ error code -+*/ /***************************************************************************/ -+PVRSRV_ERROR PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32MMUContextID, -+ IMG_CHAR *pszSABFileName, -+ IMG_DEV_VIRTADDR sData, -+ IMG_UINT32 ui32DataSize, -+ IMG_UINT32 ui32LogicalWidth, -+ IMG_UINT32 ui32LogicalHeight, -+ IMG_UINT32 ui32PhysicalWidth, -+ IMG_UINT32 ui32PhysicalHeight, -+ PDUMP_PIXEL_FORMAT ePixFmt, -+ IMG_MEMLAYOUT eMemLayout, -+ IMG_FB_COMPRESSION eFBCompression, -+ const IMG_UINT32 *paui32FBCClearColour, -+ PDUMP_FBC_SWIZZLE eFBCSwizzle, -+ IMG_DEV_VIRTADDR sHeader, -+ IMG_UINT32 ui32HeaderSize, -+ IMG_UINT32 ui32PDumpFlags); -+ -+/**************************************************************************/ /*! -+@Function PDumpDataDescriptor -+@Description PDumps non-image data out as an IMGCv1 data section -+@Input psDeviceNode Pointer to device node. -+@Input ui32MMUContextID PDUMP MMU context ID. -+@Input pszSABFileName Pointer to string containing file name of -+ Data being SABed -+@Input sData GPU virtual address of this data. -+@Input ui32DataSize Data size -+@Input ui32HeaderType Header type -+@Input ui32ElementType Data element type -+@Input ui32ElementCount Number of data elements -+@Input ui32PDumpFlags PDUMP flags -+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ -+ error code -+*/ /***************************************************************************/ -+PVRSRV_ERROR PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32MMUContextID, -+ IMG_CHAR *pszSABFileName, -+ IMG_DEV_VIRTADDR sData, -+ IMG_UINT32 ui32DataSize, -+ IMG_UINT32 ui32HeaderType, -+ IMG_UINT32 ui32ElementType, -+ IMG_UINT32 ui32ElementCount, -+ IMG_UINT32 ui32PDumpFlags); -+ -+ -+PVRSRV_ERROR PDumpReadRegKM(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ IMG_CHAR *pszFileName, -+ IMG_UINT32 ui32FileOffset, -+ IMG_UINT32 ui32Address, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32PDumpFlags); -+ -+__printf(3, 4) -+PVRSRV_ERROR PDumpCommentWithFlags(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32Flags, -+ IMG_CHAR* pszFormat, -+ ...); -+ -+PVRSRV_ERROR PDumpCommentWithFlagsVA(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32Flags, -+ const IMG_CHAR * pszFormat, -+ va_list args); -+ -+PVRSRV_ERROR PDumpPanic(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32PanicNo, -+ IMG_CHAR* pszPanicMsg, -+ const IMG_CHAR* pszPPFunc, -+ IMG_UINT32 ui32PPline); -+ -+PVRSRV_ERROR PDumpCaptureError(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PVRSRV_ERROR ui32ErrorNo, -+ IMG_CHAR* pszErrorMsg, -+ const IMG_CHAR* pszPPFunc, -+ IMG_UINT32 ui32PPline); -+ -+PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame); -+ -+PVRSRV_ERROR PDumpGetStateKM(IMG_UINT64 *ui64State); -+ -+PVRSRV_ERROR PDumpForceCaptureStopKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+PVRSRV_ERROR PDumpRegRead32ToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ IMG_UINT32 ui32RegOffset, -+ IMG_CHAR *pszInternalVar, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpRegRead32(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ const IMG_UINT32 dwRegOffset, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpRegRead64(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ const IMG_UINT32 dwRegOffset, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpRegRead64ToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ IMG_CHAR *pszInternalVar, -+ const IMG_UINT32 dwRegOffset, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpIDLWithFlags(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32Clocks, -+ IMG_UINT32 ui32Flags); -+PVRSRV_ERROR PDumpIDL(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32Clocks); -+ -+PVRSRV_ERROR PDumpRegBasedCBP(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpRegName, -+ IMG_UINT32 ui32RegOffset, -+ IMG_UINT32 ui32WPosVal, -+ IMG_UINT32 ui32PacketSize, -+ IMG_UINT32 ui32BufferSize, -+ IMG_UINT32 ui32Flags); -+ -+PVRSRV_ERROR PDumpTRG(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszMemSpace, -+ IMG_UINT32 ui32MMUCtxID, -+ IMG_UINT32 ui32RegionID, -+ IMG_BOOL bEnable, -+ IMG_UINT64 ui64VAddr, -+ IMG_UINT64 ui64LenBytes, -+ IMG_UINT32 ui32XStride, -+ IMG_UINT32 ui32Flags); -+ -+void PDumpLock(void); -+void PDumpUnlock(void); -+ -+PVRSRV_ERROR PDumpRegCondStr(IMG_CHAR **ppszPDumpCond, -+ IMG_CHAR *pszPDumpRegName, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue, -+ IMG_UINT32 ui32Mask, -+ IMG_UINT32 ui32Flags, -+ PDUMP_POLL_OPERATOR eOperator); -+ -+PVRSRV_ERROR PDumpInternalValCondStr(IMG_CHAR **ppszPDumpCond, -+ IMG_CHAR *pszInternalVar, -+ IMG_UINT32 ui32RegValue, -+ IMG_UINT32 ui32Mask, -+ IMG_UINT32 ui32Flags, -+ PDUMP_POLL_OPERATOR eOperator); -+ -+PVRSRV_ERROR PDumpIfKM(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags); -+PVRSRV_ERROR PDumpElseKM(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags); -+PVRSRV_ERROR PDumpFiKM(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags); -+PVRSRV_ERROR PDumpStartDoLoopKM(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32PDumpFlags); -+PVRSRV_ERROR PDumpEndDoWhileLoopKM(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszPDumpWhileCond, -+ IMG_UINT32 ui32PDumpFlags); -+PVRSRV_ERROR PDumpCOMCommand(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32PDumpFlags, -+ const IMG_CHAR *pszPDump); -+ -+void PDumpPowerTransitionStart(PVRSRV_DEVICE_NODE *psDeviceNode); -+void PDumpPowerTransitionEnd(PVRSRV_DEVICE_NODE *psDeviceNode); -+IMG_BOOL PDumpCheckFlagsWrite(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32Flags); -+ -+/*! -+ * @name PDumpWriteParameter -+ * @brief General function for writing to PDump stream. Used -+ * mainly for memory dumps to parameter stream. -+ * Usually more convenient to use PDumpWriteScript below -+ * for the script stream. -+ * @param psDeviceNode - device PDump pertains to -+ * @param psui8Data - data to write -+ * @param ui32Size - size of write -+ * @param ui32Flags - PDump flags -+ * @param pui32FileOffset - on return contains the file offset to -+ * the start of the parameter data -+ * @param aszFilenameStr - pointer to at least a 20 char buffer to -+ * return the parameter filename -+ * @return error -+ */ -+PVRSRV_ERROR PDumpWriteParameter(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT8 *psui8Data, IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32Flags, IMG_UINT32* pui32FileOffset, -+ IMG_CHAR* aszFilenameStr); -+ -+/*! -+ * @name PDumpWriteScript -+ * @brief Write an PDumpOS created string to the "script" output stream -+ * @param psDeviceNode - device PDump pertains to -+ * @param hString - PDump OS layer handle of string buffer to write -+ * @param ui32Flags - PDump flags -+ * @return IMG_TRUE on success. -+ */ -+IMG_BOOL PDumpWriteScript(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_HANDLE hString, IMG_UINT32 ui32Flags); -+ -+/**************************************************************************/ /*! -+@Function PDumpSNPrintf -+@Description Printf to OS-specific PDump state buffer. This function is -+ only called if PDUMP is defined. -+@Input hBuf handle of buffer to write into -+@Input ui32ScriptSizeMax maximum size of data to write (chars) -+@Input pszFormat format string -+@Return None -+*/ /**************************************************************************/ -+__printf(3, 4) -+PVRSRV_ERROR PDumpSNPrintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...); -+ -+ -+/* -+ PDumpWriteShiftedMaskedValue(): -+ -+ loads the "reference" address into an internal PDump register, -+ optionally shifts it right, -+ optionally shifts it left, -+ optionally masks it -+ then finally writes the computed value to the given destination address -+ -+ i.e. it emits pdump language equivalent to this expression: -+ -+ dest = ((&ref) >> SHRamount << SHLamount) & MASK -+*/ -+PVRSRV_ERROR -+PDumpWriteShiftedMaskedValue(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszDestRegspaceName, -+ const IMG_CHAR *pszDestSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiDestOffset, -+ const IMG_CHAR *pszRefRegspaceName, -+ const IMG_CHAR *pszRefSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiRefOffset, -+ IMG_UINT32 uiSHRAmount, -+ IMG_UINT32 uiSHLAmount, -+ IMG_UINT32 uiMask, -+ IMG_DEVMEM_SIZE_T uiWordSize, -+ IMG_UINT32 uiPDumpFlags); -+ -+/* -+ PDumpWriteSymbAddress(): -+ writes the address of the "reference" to the offset given -+*/ -+PVRSRV_ERROR -+PDumpWriteSymbAddress(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszDestSpaceName, -+ IMG_DEVMEM_OFFSET_T uiDestOffset, -+ const IMG_CHAR *pszRefSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiRefOffset, -+ const IMG_CHAR *pszPDumpDevName, -+ IMG_UINT32 ui32WordSize, -+ IMG_UINT32 ui32AlignShift, -+ IMG_UINT32 ui32Shift, -+ IMG_UINT32 uiPDumpFlags); -+ -+/* Register the connection with the PDump subsystem */ -+PVRSRV_ERROR -+PDumpRegisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode, -+ void *hSyncPrivData, -+ PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks, -+ PDUMP_CONNECTION_DATA **ppsPDumpConnectionData); -+ -+/* Unregister the connection with the PDump subsystem */ -+void -+PDumpUnregisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PDUMP_CONNECTION_DATA *psPDumpConnectionData); -+ -+/* Register for notification of PDump Transition into/out of capture range */ -+PVRSRV_ERROR -+PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData, -+ PFN_PDUMP_TRANSITION pfnCallback, -+ void *hPrivData, -+ void *pvDevice, -+ void **ppvHandle); -+ -+/* Unregister notification of PDump Transition */ -+void -+PDumpUnregisterTransitionCallback(void *pvHandle); -+ -+PVRSRV_ERROR -+PDumpRegisterTransitionCallbackFenceSync(void *hPrivData, -+ PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback, -+ void **ppvHandle); -+ -+void -+PDumpUnregisterTransitionCallbackFenceSync(void *pvHandle); -+ -+/* Notify PDump of a Transition into/out of capture range */ -+PVRSRV_ERROR -+PDumpTransition(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PDUMP_CONNECTION_DATA *psPDumpConnectionData, -+ PDUMP_TRANSITION_EVENT eEvent, -+ IMG_UINT32 ui32PDumpFlags); -+ -+/* Check if writing to a PDump file is permitted for the given device */ -+IMG_BOOL PDumpIsDevicePermitted(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/* _ui32PDumpFlags must be a variable in the local scope */ -+#define PDUMP_LOCK(_ui32PDumpFlags) do \ -+ { if ((_ui32PDumpFlags & PDUMP_FLAGS_PDUMP_LOCK_HELD) == 0)\ -+ {\ -+ PDumpLock();\ -+ }\ -+ MSC_SUPPRESS_4127\ -+ } while (0) -+ -+/* _ui32PDumpFlags must be a variable in the local scope */ -+#define PDUMP_UNLOCK(_ui32PDumpFlags) do \ -+ { if ((_ui32PDumpFlags & PDUMP_FLAGS_PDUMP_LOCK_HELD) == 0)\ -+ {\ -+ PDumpUnlock();\ -+ }\ -+ MSC_SUPPRESS_4127\ -+ } while (0) -+ -+#define PDUMPINIT PDumpInitCommon -+#define PDUMPDEINIT PDumpDeInitCommon -+#define PDUMPREG32 PDumpReg32 -+#define PDUMPREG64 PDumpReg64 -+#define PDUMPREGREAD32 PDumpRegRead32 -+#define PDUMPREGREAD64 PDumpRegRead64 -+#define PDUMPCOMMENT(d, ...) PDumpCommentWithFlags(d, PDUMP_FLAGS_CONTINUOUS, __VA_ARGS__) -+#define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags -+#define PDUMPREGPOL PDumpRegPolKM -+#define PDUMPREGBASEDCBP PDumpRegBasedCBP -+#define PDUMPENDINITPHASE PDumpStopInitPhase -+#define PDUMPIDLWITHFLAGS PDumpIDLWithFlags -+#define PDUMPIDL PDumpIDL -+#define PDUMPPOWCMDSTART PDumpPowerTransitionStart -+#define PDUMPPOWCMDEND PDumpPowerTransitionEnd -+#define PDUMPCOM PDumpCOMCommand -+ -+/* _ui32PDumpFlags must be a variable in the local scope */ -+#define PDUMP_BLKSTART(_ui32PDumpFlags) do \ -+ { PDUMP_LOCK(_ui32PDumpFlags);\ -+ _ui32PDumpFlags |= PDUMP_FLAGS_PDUMP_LOCK_HELD;\ -+ MSC_SUPPRESS_4127\ -+ } while (0) -+ -+/* _ui32PDumpFlags must be a variable in the local scope */ -+#define PDUMP_BLKEND(_ui32PDumpFlags) do \ -+ { _ui32PDumpFlags &= ~PDUMP_FLAGS_PDUMP_LOCK_HELD;\ -+ PDUMP_UNLOCK(_ui32PDumpFlags);\ -+ MSC_SUPPRESS_4127\ -+ } while (0) -+ -+/* _ui32PDumpFlags must be a variable in the local scope */ -+#define PDUMPIF(_dev,_msg,_ui32PDumpFlags) do \ -+ {PDUMP_BLKSTART(_ui32PDumpFlags);\ -+ PDumpIfKM(_dev,_msg,_ui32PDumpFlags);\ -+ MSC_SUPPRESS_4127\ -+ } while (0) -+ -+#define PDUMPELSE PDumpElseKM -+ -+/* _ui32PDumpFlags must be a variable in the local scope */ -+#define PDUMPFI(_dev,_msg,_ui32PDumpFlags) do \ -+ { PDumpFiKM(_dev,_msg,_ui32PDumpFlags);\ -+ PDUMP_BLKEND(_ui32PDumpFlags);\ -+ MSC_SUPPRESS_4127\ -+ } while (0) -+ -+#else -+/* -+ We should be clearer about which functions can be called -+ across the bridge as this looks rather unbalanced -+*/ -+ -+/*! Macro used to record a panic in the PDump script stream */ -+#define PDUMP_PANIC(_dev, _id, _msg) ((void)0) -+ -+/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */ -+#define PDUMP_ERROR(_dev, _err, _msg) ((void)0) -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpInitCommon) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpInitCommon(void) -+{ -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpConnectionNotify) -+#endif -+static INLINE void -+PDumpConnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpDisconnectionNotify) -+#endif -+static INLINE void -+PDumpDisconnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpLock) -+#endif -+static INLINE void -+PDumpLock(void) -+{ -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpUnlock) -+#endif -+static INLINE void -+PDumpUnlock(void) -+{ -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpStopInitPhase) -+#endif -+static INLINE void -+PDumpStopInitPhase(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpSetFrameKM) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpSetFrameKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32Frame) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(ui32Frame); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpGetFrameKM) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpGetFrameKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32* pui32Frame) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(pui32Frame); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpCommentKM) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpCommentKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32CommentSize, -+ IMG_CHAR *pszComment, -+ IMG_UINT32 ui32Flags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(ui32CommentSize); -+ PVR_UNREFERENCED_PARAMETER(pszComment); -+ PVR_UNREFERENCED_PARAMETER(ui32Flags); -+ return PVRSRV_OK; -+} -+ -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpSetDefaultCaptureParamsKM) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpSetDefaultCaptureParamsKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32Mode, -+ IMG_UINT32 ui32Start, -+ IMG_UINT32 ui32End, -+ IMG_UINT32 ui32Interval, -+ IMG_UINT32 ui32MaxParamFileSize, -+ IMG_UINT32 ui32AutoTermTimeout) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(ui32Mode); -+ PVR_UNREFERENCED_PARAMETER(ui32Start); -+ PVR_UNREFERENCED_PARAMETER(ui32End); -+ PVR_UNREFERENCED_PARAMETER(ui32Interval); -+ PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize); -+ PVR_UNREFERENCED_PARAMETER(ui32AutoTermTimeout); -+ -+ return PVRSRV_OK; -+} -+ -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpPanic) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpPanic(IMG_UINT32 ui32PanicNo, -+ IMG_CHAR* pszPanicMsg, -+ const IMG_CHAR* pszPPFunc, -+ IMG_UINT32 ui32PPline) -+{ -+ PVR_UNREFERENCED_PARAMETER(ui32PanicNo); -+ PVR_UNREFERENCED_PARAMETER(pszPanicMsg); -+ PVR_UNREFERENCED_PARAMETER(pszPPFunc); -+ PVR_UNREFERENCED_PARAMETER(ui32PPline); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpCaptureError) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo, -+ IMG_CHAR* pszErrorMsg, -+ const IMG_CHAR* pszPPFunc, -+ IMG_UINT32 ui32PPline) -+{ -+ PVR_UNREFERENCED_PARAMETER(ui32ErrorNo); -+ PVR_UNREFERENCED_PARAMETER(pszErrorMsg); -+ PVR_UNREFERENCED_PARAMETER(pszPPFunc); -+ PVR_UNREFERENCED_PARAMETER(ui32PPline); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpIsLastCaptureFrameKM) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame) -+{ -+ *pbIsLastCaptureFrame = IMG_FALSE; -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpGetStateKM) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpGetStateKM(IMG_UINT64 *ui64State) -+{ -+ *ui64State = 0; -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpForceCaptureStopKM) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpForceCaptureStopKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpImageDescriptor) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32MMUContextID, -+ IMG_CHAR *pszSABFileName, -+ IMG_DEV_VIRTADDR sData, -+ IMG_UINT32 ui32DataSize, -+ IMG_UINT32 ui32LogicalWidth, -+ IMG_UINT32 ui32LogicalHeight, -+ IMG_UINT32 ui32PhysicalWidth, -+ IMG_UINT32 ui32PhysicalHeight, -+ PDUMP_PIXEL_FORMAT ePixFmt, -+ IMG_MEMLAYOUT eMemLayout, -+ IMG_FB_COMPRESSION eFBCompression, -+ const IMG_UINT32 *paui32FBCClearColour, -+ PDUMP_FBC_SWIZZLE eFBCSwizzle, -+ IMG_DEV_VIRTADDR sHeader, -+ IMG_UINT32 ui32HeaderSize, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(ui32MMUContextID); -+ PVR_UNREFERENCED_PARAMETER(pszSABFileName); -+ PVR_UNREFERENCED_PARAMETER(sData); -+ PVR_UNREFERENCED_PARAMETER(ui32DataSize); -+ PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth); -+ PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight); -+ PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth); -+ PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight); -+ PVR_UNREFERENCED_PARAMETER(ePixFmt); -+ PVR_UNREFERENCED_PARAMETER(eMemLayout); -+ PVR_UNREFERENCED_PARAMETER(eFBCompression); -+ PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour); -+ PVR_UNREFERENCED_PARAMETER(eFBCSwizzle); -+ PVR_UNREFERENCED_PARAMETER(sHeader); -+ PVR_UNREFERENCED_PARAMETER(ui32HeaderSize); -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpDataDescriptor) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32MMUContextID, -+ IMG_CHAR *pszSABFileName, -+ IMG_DEV_VIRTADDR sData, -+ IMG_UINT32 ui32DataSize, -+ IMG_UINT32 ui32ElementType, -+ IMG_UINT32 ui32ElementCount, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(ui32MMUContextID); -+ PVR_UNREFERENCED_PARAMETER(pszSABFileName); -+ PVR_UNREFERENCED_PARAMETER(sData); -+ PVR_UNREFERENCED_PARAMETER(ui32DataSize); -+ PVR_UNREFERENCED_PARAMETER(ui32ElementType); -+ PVR_UNREFERENCED_PARAMETER(ui32ElementCount); -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpRegisterConnection) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpRegisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode, -+ void *hSyncPrivData, -+ PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks, -+ PDUMP_CONNECTION_DATA **ppsPDumpConnectionData) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(hSyncPrivData); -+ PVR_UNREFERENCED_PARAMETER(pfnPDumpSyncBlocks); -+ PVR_UNREFERENCED_PARAMETER(ppsPDumpConnectionData); -+ -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpUnregisterConnection) -+#endif -+static INLINE void -+PDumpUnregisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PDUMP_CONNECTION_DATA *psPDumpConnectionData) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpRegisterTransitionCallback) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData, -+ PFN_PDUMP_TRANSITION pfnCallback, -+ void *hPrivData, -+ void *pvDevice, -+ void **ppvHandle) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); -+ PVR_UNREFERENCED_PARAMETER(pfnCallback); -+ PVR_UNREFERENCED_PARAMETER(hPrivData); -+ PVR_UNREFERENCED_PARAMETER(pvDevice); -+ PVR_UNREFERENCED_PARAMETER(ppvHandle); -+ -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpUnregisterTransitionCallback) -+#endif -+static INLINE void -+PDumpUnregisterTransitionCallback(void *pvHandle) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvHandle); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpRegisterTransitionCallback) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpRegisterTransitionCallbackFenceSync(void *hPrivData, -+ PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback, -+ void **ppvHandle) -+{ -+ PVR_UNREFERENCED_PARAMETER(pfnCallback); -+ PVR_UNREFERENCED_PARAMETER(hPrivData); -+ PVR_UNREFERENCED_PARAMETER(ppvHandle); -+ -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpUnregisterTransitionCallbackFenceSync) -+#endif -+static INLINE void -+PDumpUnregisterTransitionCallbackFenceSync(void *pvHandle) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvHandle); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpTransition) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpTransition(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PDUMP_CONNECTION_DATA *psPDumpConnectionData, -+ PDUMP_TRANSITION_EVENT eEvent, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); -+ PVR_UNREFERENCED_PARAMETER(eEvent); -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+ return PVRSRV_OK; -+} -+ -+#if defined(__linux__) || defined(GCC_IA32) || defined(GCC_ARM) || defined(__QNXNTO__) || defined(INTEGRITY_OS) -+ #define PDUMPINIT PDumpInitCommon -+ #define PDUMPDEINIT(args...) -+ #define PDUMPREG32(args...) -+ #define PDUMPREG64(args...) -+ #define PDUMPREGREAD32(args...) -+ #define PDUMPREGREAD64(args...) -+ #define PDUMPCOMMENT(args...) -+ #define PDUMPREGPOL(args...) -+ #define PDUMPSYNC(args...) -+ #define PDUMPCOPYTOMEM(args...) -+ #define PDUMPWRITE(args...) -+ #define PDUMPREGBASEDCBP(args...) -+ #define PDUMPCOMMENTWITHFLAGS(args...) -+ #define PDUMPENDINITPHASE(args...) -+ #define PDUMPIDLWITHFLAGS(args...) -+ #define PDUMPIDL(args...) -+ #define PDUMPPOWCMDSTART(args...) -+ #define PDUMPPOWCMDEND(args...) -+ #define PDUMP_LOCK(args...) -+ #define PDUMP_UNLOCK(args...) -+ #define PDUMPIF(args...) -+ #define PDUMPFI(args...) -+ #define PDUMPCOM(args...) -+#else -+ #error Compiler not specified -+#endif -+ -+#endif /* PDUMP */ -+ -+#endif /* PDUMP_KM_H */ -+ -+/****************************************************************************** -+ End of file (pdump_km.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/pdump_mmu.h b/drivers/gpu/drm/img-rogue/pdump_mmu.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pdump_mmu.h -@@ -0,0 +1,147 @@ -+/**************************************************************************/ /*! -+@File -+@Title Common MMU Management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements basic low level control of MMU. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef SRVKM_PDUMP_MMU_H -+#define SRVKM_PDUMP_MMU_H -+ -+/* services/server/include/ */ -+#include "pdump_symbolicaddr.h" -+/* include/ */ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#include "mmu_common.h" -+ -+#include "opaque_types.h" -+ -+#if defined(PDUMP) -+PVRSRV_ERROR -+PDumpMMUMalloc(PPVRSRV_DEVICE_NODE psDeviceNode, -+ const IMG_CHAR *pszPDumpDevName, -+ MMU_LEVEL eMMULevel, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32Align, -+ PDUMP_MMU_TYPE eMMUType); -+ -+PVRSRV_ERROR -+PDumpMMUFree(PPVRSRV_DEVICE_NODE psDeviceNode, -+ const IMG_CHAR *pszPDumpDevName, -+ MMU_LEVEL eMMULevel, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ PDUMP_MMU_TYPE eMMUType); -+ -+PVRSRV_ERROR -+PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName, -+ PMR *psPMRDest, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, -+ IMG_UINT32 ui32Flags, -+ MMU_LEVEL eMMULevel, -+ IMG_UINT64 ui64PxSymAddr, -+ IMG_UINT64 ui64PxOffset); -+ -+PVRSRV_ERROR -+PDumpMMUDumpPxEntries(PPVRSRV_DEVICE_NODE psDeviceNode, -+ MMU_LEVEL eMMULevel, -+ const IMG_CHAR *pszPDumpDevName, -+ void *pvPxMem, -+ IMG_DEV_PHYADDR sPxDevPAddr, -+ IMG_UINT32 uiFirstEntry, -+ IMG_UINT32 uiNumEntries, -+ const IMG_CHAR *pszMemspaceName, -+ const IMG_CHAR *pszSymbolicAddr, -+ IMG_UINT64 uiSymbolicAddrOffset, -+ IMG_UINT32 uiBytesPerEntry, -+ IMG_UINT32 uiLog2Align, -+ IMG_UINT32 uiAddrShift, -+ IMG_UINT64 uiAddrMask, -+ IMG_UINT64 uiPxEProtMask, -+ IMG_UINT64 uiDataValidEnable, -+ IMG_UINT32 ui32Flags, -+ PDUMP_MMU_TYPE eMMUType); -+ -+PVRSRV_ERROR -+PDumpMMUAllocMMUContext(PPVRSRV_DEVICE_NODE psDeviceNode, -+ const IMG_CHAR *pszPDumpMemSpaceName, -+ IMG_DEV_PHYADDR sPCDevPAddr, -+ PDUMP_MMU_TYPE eMMUType, -+ IMG_UINT32 *pui32MMUContextID, -+ IMG_UINT32 ui32PDumpFlags); -+ -+PVRSRV_ERROR -+PDumpMMUFreeMMUContext(PPVRSRV_DEVICE_NODE psDeviceNode, -+ const IMG_CHAR *pszPDumpMemSpaceName, -+ IMG_UINT32 ui32MMUContextID, -+ IMG_UINT32 ui32PDumpFlags); -+ -+PVRSRV_ERROR -+PDumpMMUSAB(PPVRSRV_DEVICE_NODE psDeviceNode, -+ const IMG_CHAR *pszPDumpMemNamespace, -+ IMG_UINT32 uiPDumpMMUCtx, -+ IMG_DEV_VIRTADDR sDevAddrStart, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 uiFileOffset, -+ IMG_UINT32 ui32PDumpFlags); -+ -+#define PDUMP_MMU_ALLOC_MMUCONTEXT(psDevNode, pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID, ui32PDumpFlags) \ -+ PDumpMMUAllocMMUContext(psDevNode, \ -+ pszPDumpMemDevName, \ -+ sPCDevPAddr, \ -+ eMMUType, \ -+ puiPDumpCtxID, \ -+ ui32PDumpFlags) -+ -+#define PDUMP_MMU_FREE_MMUCONTEXT(psDevNode, pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) \ -+ PDumpMMUFreeMMUContext(psDevNode, pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) -+#else /* PDUMP */ -+ -+#define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID, ui32PDumpFlags) \ -+ ((void)0) -+#define PDUMP_MMU_FREE_MMUCONTEXT(psDevNode, pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) \ -+ ((void)0) -+ -+#endif /* PDUMP */ -+ -+#endif -diff --git a/drivers/gpu/drm/img-rogue/pdump_physmem.h b/drivers/gpu/drm/img-rogue/pdump_physmem.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pdump_physmem.h -@@ -0,0 +1,300 @@ -+/**************************************************************************/ /*! -+@File -+@Title pdump functions to assist with physmem allocations -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements basic low level control of MMU. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef SRVSRV_PDUMP_PHYSMEM_H -+#define SRVSRV_PDUMP_PHYSMEM_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "pmr.h" -+#include "device.h" /* For device node */ -+ -+#define PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH 40 -+#define PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH 60 -+#define PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH (PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH) -+ -+typedef struct _PDUMP_PHYSMEM_INFO_T_ PDUMP_PHYSMEM_INFO_T; -+ -+#if defined(PDUMP) -+PVRSRV_ERROR -+PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle, -+ IMG_CHAR **ppszSymbolicAddress); -+ -+PVRSRV_ERROR -+PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicAddress, -+ IMG_UINT64 ui64Size, -+ /* alignment is alignment of start of buffer _and_ -+ minimum contiguity - i.e. smallest allowable -+ page-size. */ -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ IMG_BOOL bInitialise, -+ IMG_UINT8 ui8InitValue, -+ IMG_HANDLE *phHandlePtr, -+ IMG_UINT32 ui32PDumpFlags); -+ -+PVRSRV_ERROR -+PDumpMallocUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicAddress, -+ IMG_UINT64 ui64Size, -+ /* alignment is alignment of start of buffer _and_ -+ minimum contiguity - i.e. smallest allowable -+ page-size. */ -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ IMG_BOOL bInitialise, -+ IMG_UINT8 ui8InitValue, -+ IMG_HANDLE *phHandlePtr, -+ IMG_UINT32 ui32PDumpFlags); -+ -+PVRSRV_ERROR -+PDumpFree(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_HANDLE hPDumpAllocationInfoHandle); -+ -+PVRSRV_ERROR -+PDumpFreeUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_HANDLE hPDumpAllocationInfoHandle); -+ -+void -+PDumpMakeStringValid(IMG_CHAR *pszString, -+ IMG_UINT32 ui32StrLen); -+#else /* PDUMP */ -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PDumpGetSymbolicAddr) -+#endif -+static INLINE PVRSRV_ERROR -+PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle, -+ IMG_CHAR **ppszSymbolicAddress) -+{ -+ PVR_UNREFERENCED_PARAMETER(hPhysmemPDumpHandle); -+ PVR_UNREFERENCED_PARAMETER(ppszSymbolicAddress); -+ return PVRSRV_OK; -+} -+ -+static INLINE PVRSRV_ERROR -+PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicAddress, -+ IMG_UINT64 ui64Size, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ IMG_BOOL bInitialise, -+ IMG_UINT8 ui8InitValue, -+ IMG_HANDLE *phHandlePtr, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(pszDevSpace); -+ PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress); -+ PVR_UNREFERENCED_PARAMETER(ui64Size); -+ PVR_UNREFERENCED_PARAMETER(uiAlign); -+ PVR_UNREFERENCED_PARAMETER(bInitialise); -+ PVR_UNREFERENCED_PARAMETER(ui8InitValue); -+ PVR_UNREFERENCED_PARAMETER(phHandlePtr); -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+ return PVRSRV_OK; -+} -+ -+static INLINE PVRSRV_ERROR -+PDumpMallocUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicAddress, -+ IMG_UINT64 ui64Size, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ IMG_BOOL bInitialise, -+ IMG_UINT8 ui8InitValue, -+ IMG_HANDLE *phHandlePtr, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(pszDevSpace); -+ PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress); -+ PVR_UNREFERENCED_PARAMETER(ui64Size); -+ PVR_UNREFERENCED_PARAMETER(uiAlign); -+ PVR_UNREFERENCED_PARAMETER(bInitialise); -+ PVR_UNREFERENCED_PARAMETER(ui8InitValue); -+ PVR_UNREFERENCED_PARAMETER(phHandlePtr); -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+ return PVRSRV_OK; -+} -+ -+static INLINE PVRSRV_ERROR -+PDumpFree(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_HANDLE hPDumpAllocationInfoHandle) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle); -+ return PVRSRV_OK; -+} -+ -+static INLINE PVRSRV_ERROR -+PDumpFreeUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_HANDLE hPDumpAllocationInfoHandle) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle); -+ return PVRSRV_OK; -+} -+#endif /* PDUMP */ -+ -+#define PMR_DEFAULT_PREFIX "PMR" -+#define PMR_SPARSE_PREFIX "SPMR" -+#define PMR_SYMBOLICADDR_FMTSPEC "%s%"IMG_UINT64_FMTSPEC"_%"IMG_UINT64_FMTSPEC"_%s" -+#define PMR_MEMSPACE_FMTSPEC "%s" -+#define PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC "CC_%s" -+ -+PVRSRV_ERROR -+PDumpPMRWRW32(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT32 ui32Value, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+PVRSRV_ERROR -+PDumpPMRWRW32InternalVarToMem(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ const IMG_CHAR *pszInternalVar, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+PVRSRV_ERROR -+PDumpPMRRDW32MemToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszInternalVar, -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+PVRSRV_ERROR -+PDumpPMRWRW64(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT64 ui64Value, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+PVRSRV_ERROR -+PDumpPMRWRW64InternalVarToMem(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ const IMG_CHAR *pszInternalVar, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+PVRSRV_ERROR -+PDumpPMRRDW64MemToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszInternalVar, -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+PVRSRV_ERROR -+PDumpPMRLDB(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 uiFileOffset, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+PVRSRV_ERROR -+PDumpPMRSAB(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ const IMG_CHAR *pszFileName, -+ IMG_UINT32 uiFileOffset); -+ -+/* -+ PDumpPMRPOL() -+ -+ Emits a POL to the PDUMP. -+*/ -+PVRSRV_ERROR -+PDumpPMRPOL(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszMempaceName, -+ const IMG_CHAR *pszSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ IMG_UINT32 uiCount, -+ IMG_UINT32 uiDelay, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+PVRSRV_ERROR -+PDumpPMRCBP(PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_CHAR *pszMemspaceName, -+ const IMG_CHAR *pszSymbolicName, -+ IMG_DEVMEM_OFFSET_T uiReadOffset, -+ IMG_DEVMEM_OFFSET_T uiWriteOffset, -+ IMG_DEVMEM_SIZE_T uiPacketSize, -+ IMG_DEVMEM_SIZE_T uiBufferSize); -+ -+/* -+ * PDumpWriteParameterBlob() -+ * -+ * Writes a binary blob to the pdump param stream containing the current -+ * contents of the memory, and returns the filename and offset of where -+ * that blob is located (for use in a subsequent LDB, for example). -+ * -+ * Caller to provide buffer to receive filename, and declare the size of -+ * that buffer. -+ */ -+PVRSRV_ERROR -+PDumpWriteParameterBlob(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT8 *pcBuffer, -+ size_t uiNumBytes, -+ PDUMP_FLAGS_T uiPDumpFlags, -+ IMG_CHAR *pszFilenameOut, -+ size_t uiFilenameBufSz, -+ PDUMP_FILEOFFSET_T *puiOffsetOut); -+ -+#endif /* #ifndef SRVSRV_PDUMP_PHYSMEM_H */ -diff --git a/drivers/gpu/drm/img-rogue/pdump_symbolicaddr.h b/drivers/gpu/drm/img-rogue/pdump_symbolicaddr.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pdump_symbolicaddr.h -@@ -0,0 +1,55 @@ -+/**************************************************************************/ /*! -+@File -+@Title Abstraction of PDUMP symbolic address derivation -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Allows pdump functions to derive symbolic addresses on-the-fly -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef SRVKM_PDUMP_SYMBOLICADDR_H -+#define SRVKM_PDUMP_SYMBOLICADDR_H -+ -+#include "img_types.h" -+ -+#include "pvrsrv_error.h" -+ -+/* pdump symbolic addresses are generated on-the-fly with a callback */ -+ -+typedef PVRSRV_ERROR (*PVRSRV_SYMADDRFUNCPTR)(IMG_HANDLE hPriv, IMG_UINT32 uiOffset, IMG_CHAR *pszSymbolicAddr, IMG_UINT32 ui32SymbolicAddrLen, IMG_UINT32 *pui32NewOffset); -+ -+#endif /* #ifndef SRVKM_PDUMP_SYMBOLICADDR_H */ -diff --git a/drivers/gpu/drm/img-rogue/pdumpdefs.h b/drivers/gpu/drm/img-rogue/pdumpdefs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pdumpdefs.h -@@ -0,0 +1,268 @@ -+/*************************************************************************/ /*! -+@File -+@Title PDUMP definitions header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description PDUMP definitions header -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PDUMPDEFS_H -+#define PDUMPDEFS_H -+ -+/*! PDump Pixel Format Enumeration */ -+typedef enum _PDUMP_PIXEL_FORMAT_ -+{ -+ PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2, -+ PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9, -+/* PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10, */ -+ PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11, -+ PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12, -+ PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13, -+ PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15, -+ PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16, -+ PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17, -+ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18, -+ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20, -+ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25, -+ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26, -+ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27, -+ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28, -+ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29, -+ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31, -+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34, -+ PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35, -+ PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36, -+ PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37, -+ PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41, -+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44, -+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45, -+ PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46, -+ PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47, -+ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16F16 = 49, -+ PVRSRV_PDUMP_PIXEL_FORMAT_A4 = 50, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB2101010 = 51, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RSGSBS888 = 52, -+ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32F32 = 53, -+ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16 = 54, -+ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32 = 55, -+ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16 = 56, -+ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32 = 57, -+ PVRSRV_PDUMP_PIXEL_FORMAT_U8 = 58, -+ PVRSRV_PDUMP_PIXEL_FORMAT_U8U8 = 59, -+ PVRSRV_PDUMP_PIXEL_FORMAT_U16 = 60, -+ PVRSRV_PDUMP_PIXEL_FORMAT_U16U16 = 61, -+ PVRSRV_PDUMP_PIXEL_FORMAT_U16U16U16U16 = 62, -+ PVRSRV_PDUMP_PIXEL_FORMAT_U32 = 63, -+ PVRSRV_PDUMP_PIXEL_FORMAT_U32U32 = 64, -+ PVRSRV_PDUMP_PIXEL_FORMAT_U32U32U32U32 = 65, -+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32 = 66, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR10_PACK16 = 67, -+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB10_PACK16 = 68, -+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA10_PACK16 = 69, -+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA10_PACK16 = 70, -+ -+ PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff -+ -+} PDUMP_PIXEL_FORMAT; -+ -+typedef enum _PDUMP_FBC_SWIZZLE_ -+{ -+ PVRSRV_PDUMP_FBC_SWIZZLE_ARGB = 0x0, -+ PVRSRV_PDUMP_FBC_SWIZZLE_ARBG = 0x1, -+ PVRSRV_PDUMP_FBC_SWIZZLE_AGRB = 0x2, -+ PVRSRV_PDUMP_FBC_SWIZZLE_AGBR = 0x3, -+ PVRSRV_PDUMP_FBC_SWIZZLE_ABGR = 0x4, -+ PVRSRV_PDUMP_FBC_SWIZZLE_ABRG = 0x5, -+ PVRSRV_PDUMP_FBC_SWIZZLE_RGBA = 0x8, -+ PVRSRV_PDUMP_FBC_SWIZZLE_RBGA = 0x9, -+ PVRSRV_PDUMP_FBC_SWIZZLE_GRBA = 0xA, -+ PVRSRV_PDUMP_FBC_SWIZZLE_GBRA = 0xB, -+ PVRSRV_PDUMP_FBC_SWIZZLE_BGRA = 0xC, -+ PVRSRV_PDUMP_FBC_SWIZZLE_BRGA = 0xD, -+} PDUMP_FBC_SWIZZLE; -+ -+/*! PDump addrmode */ -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT 0 -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_MASK 0x000000FF -+ -+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT 8 -+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE (1U << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT) -+ -+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_CONTROL_SHIFT 11 -+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_CONTROL_MASK 0x00000800 -+ -+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT 12 -+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK 0x000FF000 -+ -+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT 20 -+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_MASK 0x00F00000 -+ -+#define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT 24 -+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT 25 -+ -+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT 26 -+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_MASK 0x0C000000 -+ -+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT 28 -+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK 0xF0000000 -+ -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_STRIDE (0U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE1 (1U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE2 (2U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE3 (3U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE4 (4U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE5 (5U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE6 (6U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE7 (7U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_TWIDDLED (9U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED (11U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_ZTWIDDLED (12U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) -+ -+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_25_50_75 (0U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_CONTROL_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_25_37_50 (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_CONTROL_SHIFT) -+ -+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE (0U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT (1U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT (2U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_32X2_DIRECT (3U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT (4U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT (5U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE (6U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE (7U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) -+ -+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_NONE (0U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_75 (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_37 (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_50 (2U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_25 (3U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT) -+ -+#define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR (1U << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT) -+ -+#define PVRSRV_PDUMP_ADDRMODE_FBC_LOSSY (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT) -+ -+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_BASE (1U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_ENHANCED (2U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V2 (3U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_SURFACE (4U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_RESOURCE (5U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_SURFACE (6U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_RESOURCE (7U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V4 (8U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V4PLUS (9U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) -+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_TFBCDC (10U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) -+ -+/*! PDump Poll Operator */ -+typedef enum _PDUMP_POLL_OPERATOR -+{ -+ PDUMP_POLL_OPERATOR_EQUAL = 0, -+ PDUMP_POLL_OPERATOR_LESS = 1, -+ PDUMP_POLL_OPERATOR_LESSEQUAL = 2, -+ PDUMP_POLL_OPERATOR_GREATER = 3, -+ PDUMP_POLL_OPERATOR_GREATEREQUAL = 4, -+ PDUMP_POLL_OPERATOR_NOTEQUAL = 5, -+} PDUMP_POLL_OPERATOR; -+ -+ -+#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 75 /*!< Max length of a pdump log file name */ -+#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 350 /*!< Max length of a pdump comment */ -+ -+/*! -+ PDump MMU type -+*/ -+typedef enum -+{ -+ PDUMP_MMU_TYPE_4KPAGE_32BIT_STDTILE = 1, -+ PDUMP_MMU_TYPE_VARPAGE_32BIT_STDTILE = 2, -+ PDUMP_MMU_TYPE_4KPAGE_36BIT_EXTTILE = 3, -+ PDUMP_MMU_TYPE_4KPAGE_32BIT_EXTTILE = 4, -+ PDUMP_MMU_TYPE_4KPAGE_36BIT_STDTILE = 5, -+ PDUMP_MMU_TYPE_VARPAGE_40BIT = 6, -+ PDUMP_MMU_TYPE_VIDEO_40BIT_STDTILE = 7, -+ PDUMP_MMU_TYPE_VIDEO_40BIT_EXTTILE = 8, -+ PDUMP_MMU_TYPE_MIPS_MICROAPTIV = 9, -+ PDUMP_MMU_TYPE_LAST -+} PDUMP_MMU_TYPE; -+ -+/*! -+ PDump states -+ These values are used by the bridge call PVRSRVPDumpGetState -+*/ -+#define PDUMP_STATE_CAPTURE_FRAME (1U) /*!< Flag represents the PDump being in capture range or not*/ -+#define PDUMP_STATE_CONNECTED (2U) /*!< Flag represents the PDump Client App being connected on not */ -+#define PDUMP_STATE_SUSPENDED (4U) /*!< Flag represents the PDump being suspended or not */ -+#define PDUMP_STATE_CAPTURE_IN_INTERVAL (8U) /*!< Flag represents the PDump being in a capture range interval */ -+#define PDUMP_STATE_APP_TERMINATED (16U) /*!< Flag represents the PDump captured app has been terminated */ -+ -+/*! -+ PDump Capture modes -+ Values used with calls to PVRSRVPDumpSetDefaultCaptureParams -+*/ -+#define PDUMP_CAPMODE_UNSET 0x00000000UL -+#define PDUMP_CAPMODE_FRAMED 0x00000001UL -+#define PDUMP_CAPMODE_CONTINUOUS 0x00000002UL -+#define PDUMP_CAPMODE_BLOCKED 0x00000003UL -+ -+#define PDUMP_CAPMODE_MAX PDUMP_CAPMODE_BLOCKED -+ -+#endif /* PDUMPDEFS_H */ -+ -+/***************************************************************************** -+ End of file (pdumpdefs.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/pdumpdesc.h b/drivers/gpu/drm/img-rogue/pdumpdesc.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pdumpdesc.h -@@ -0,0 +1,230 @@ -+/*************************************************************************/ /*! -+@File pdumpdesc.h -+@Title PDump Descriptor format -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Describes PDump descriptors that may be passed to the -+ extraction routines (SAB). -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(PDUMPDESC_H) -+#define PDUMPDESC_H -+ -+#include "pdumpdefs.h" -+ -+/* -+ * Common fields -+ */ -+#define HEADER_WORD0_TYPE_SHIFT (0) -+#define HEADER_WORD0_TYPE_CLRMSK (0xFFFFFFFFU) -+ -+#define HEADER_WORD1_SIZE_SHIFT (0) -+#define HEADER_WORD1_SIZE_CLRMSK (0x0000FFFFU) -+#define HEADER_WORD1_VERSION_SHIFT (16) -+#define HEADER_WORD1_VERSION_CLRMSK (0xFFFF0000U) -+ -+#define HEADER_WORD2_DATA_SIZE_SHIFT (0) -+#define HEADER_WORD2_DATA_SIZE_CLRMSK (0xFFFFFFFFU) -+ -+ -+/* -+ * The image type descriptor -+ */ -+ -+/* -+ * Header type (IMGBv3) - 'IMGB' in hex + VERSION 3 -+ * Header size - 64 bytes -+ */ -+#define IMAGE_HEADER_TYPE (0x42474D49) -+#define IMAGE_HEADER_SIZE (64) -+#define IMAGE_HEADER_VERSION (3) -+ -+/* -+ * Image type-specific fields -+ */ -+#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_SHIFT (0) -+#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_CLRMSK (0xFFFFFFFFU) -+ -+#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_SHIFT (0) -+#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_CLRMSK (0xFFFFFFFFU) -+ -+#define IMAGE_HEADER_WORD5_FORMAT_SHIFT (0) -+#define IMAGE_HEADER_WORD5_FORMAT_CLRMSK (0xFFFFFFFFU) -+ -+#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_SHIFT (0) -+#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_CLRMSK (0xFFFFFFFFU) -+ -+#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_SHIFT (0) -+#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_CLRMSK (0xFFFFFFFFU) -+ -+#define IMAGE_HEADER_WORD8_TWIDDLING_SHIFT (0) -+#define IMAGE_HEADER_WORD8_TWIDDLING_CLRMSK (0x000000FFU) -+#define IMAGE_HEADER_WORD8_TWIDDLING_STRIDED (0 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) -+#define IMAGE_HEADER_WORD8_TWIDDLING_NTWIDDLE (9 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) -+#define IMAGE_HEADER_WORD8_TWIDDLING_ZTWIDDLE (12 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) -+ -+ -+#define IMAGE_HEADER_WORD8_STRIDE_SHIFT (8) -+#define IMAGE_HEADER_WORD8_STRIDE_CLRMSK (0x0000FF00U) -+#define IMAGE_HEADER_WORD8_STRIDE_POSITIVE (0 << IMAGE_HEADER_WORD8_STRIDE_SHIFT) -+#define IMAGE_HEADER_WORD8_STRIDE_NEGATIVE (1 << IMAGE_HEADER_WORD8_STRIDE_SHIFT) -+ -+#define IMAGE_HEADER_WORD8_BIFTYPE_SHIFT (16) -+#define IMAGE_HEADER_WORD8_BIFTYPE_CLRMSK (0x00FF0000U) -+#define IMAGE_HEADER_WORD8_BIFTYPE_NONE (0 << IMAGE_HEADER_WORD8_BIFTYPE_SHIFT) -+ -+#define IMAGE_HEADER_WORD8_FBCTYPE_SHIFT (24) -+#define IMAGE_HEADER_WORD8_FBCTYPE_CLRMSK (0xFF000000U) -+#define IMAGE_HEADER_WORD8_FBCTYPE_8X8 (1 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) -+#define IMAGE_HEADER_WORD8_FBCTYPE_16x4 (2 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) -+#define IMAGE_HEADER_WORD8_FBCTYPE_32x2 (3 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) -+ -+#define IMAGE_HEADER_WORD9_FBCDECOR_SHIFT (0) -+#define IMAGE_HEADER_WORD9_FBCDECOR_CLRMSK (0x000000FFU) -+#define IMAGE_HEADER_WORD9_FBCDECOR_ENABLE (1 << IMAGE_HEADER_WORD9_FBCDECOR_SHIFT) -+ -+/* Align with fbcomp_export_c.h in pdump_tools branch */ -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT (8) -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_CLRMSK (0x0000FF00U) -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_SAME_AS_GPU (0 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_BASE (1 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_TWIDDLED_EN (2 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* TWIDDLED_ENHANCED */ -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V2 (3 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT1 (4 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT2 (5 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* V30_WITH_HEADER_REMAP */ -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT1 (6 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT2 (7 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* V31_WITH_HEADER_REMAP */ -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V4 (8 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V4_PLUS (9 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) -+#define IMAGE_HEADER_WORD9_FBCCOMPAT_TFBC (10 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) -+ -+#define IMAGE_HEADER_WORD9_LOSSY_SHIFT (16) -+#define IMAGE_HEADER_WORD9_LOSSY_CLRMSK (0x00FF0000U) -+/* Non-TFBC */ -+#define IMAGE_HEADER_WORD9_LOSSY_ON (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) -+ -+/* TFBC */ -+#define IMAGE_HEADER_WORD9_LOSSY_75 (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) -+#define IMAGE_HEADER_WORD9_LOSSY_37 (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) -+#define IMAGE_HEADER_WORD9_LOSSY_50 (2 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) -+#define IMAGE_HEADER_WORD9_LOSSY_25 (3 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) -+#define IMAGE_HEADER_WORD9_LOSSY_OFF (0 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) -+ -+#define IMAGE_HEADER_WORD9_SWIZZLE_SHIFT (24) -+#define IMAGE_HEADER_WORD9_SWIZZLE_CLRMSK (0xFF000000U) -+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ARGB (0x0 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) -+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ARBG (0x1 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) -+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_AGRB (0x2 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) -+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_AGBR (0x3 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) -+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ABGR (0x4 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) -+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ABRG (0x5 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) -+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_RGBA (0x8 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) -+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_RBGA (0x9 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) -+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_GRBA (0xA << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) -+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_GBRA (0xB << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) -+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_BGRA (0xC << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) -+#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_BRGA (0xD << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) -+ -+#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_SHIFT (0) -+#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_CLRMSK (0xFFFFFFFFU) -+ -+#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_SHIFT (0) -+#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_CLRMSK (0xFFFFFFFFU) -+ -+#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_SHIFT (0) -+#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_CLRMSK (0xFFFFFFFFU) -+ -+#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_SHIFT (0) -+#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_CLRMSK (0xFFFFFFFFU) -+ -+#define IMAGE_HEADER_WORD14_TFBC_GROUP_SHIFT (0) -+#define IMAGE_HEADER_WORD14_TFBC_GROUP_CLRMSK (0x000000FFU) -+#define IMAGE_HEADER_WORD14_TFBC_GROUP_25_50_75 (0 << IMAGE_HEADER_WORD14_TFBC_GROUP_SHIFT) -+#define IMAGE_HEADER_WORD14_TFBC_GROUP_25_37_50 (1 << IMAGE_HEADER_WORD14_TFBC_GROUP_SHIFT) -+ -+#define IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT (8) -+#define IMAGE_HEADER_WORD14_COMP_SCHEME_CLRMSK (0x0000FF00U) -+#define IMAGE_HEADER_WORD14_COMP_SCHEME_ALL (0 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT) -+#define IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_CORR (1 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT) -+#define IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_ONLY (2 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT) -+#define IMAGE_HEADER_WORD14_COMP_SCHEME_PTC_ONLY (3 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT) -+ -+#define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_SHIFT (16) -+#define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_CLRMSK (0x00FF0000U) -+#define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_EN (1 << IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_SHIFT) /* Treat YUV10 optimal formats as 8 bits */ -+ -+#define IMAGE_HEADER_WORD14_LOSSY_MIN_CHANNEL_SHIFT (24) -+#define IMAGE_HEADER_WORD14_LOSSY_MIN_CHANNEL_CLRMSK (0xFF000000U) -+#define IMAGE_HEADER_WORD14_LOSSY_MIN_CHANNEL_EN (1 << IMAGE_HEADER_WORD14_LOSSY_MIN_CHANNEL_SHIFT) /* Override lossy min channel setting */ -+ -+/* IMAGE_HEADER_WORD15_RESERVED2 */ -+ -+/* -+ * The data type descriptor -+ */ -+ -+/* -+ * Header type (IMGCv1) - 'IMGC' in hex + VERSION 0 -+ * Header size - 20 bytes (5 x 32 bit WORDS) -+ */ -+#define DATA_HEADER_TYPE (0x43474D49) -+#define DATA_HEADER_SIZE (20) -+#define DATA_HEADER_VERSION (0) -+ -+/* -+ * The IBIN type descriptor -+ */ -+ -+/* -+ * Header type (IBIN) - 'IBIN' in hex + VERSION 0 -+ * Header size - 12 bytes (3 x 32 bit WORDS) -+ */ -+#define IBIN_HEADER_TYPE (0x4e494249) -+#define IBIN_HEADER_SIZE (12) -+#define IBIN_HEADER_VERSION (0) -+ -+/* -+ * Data type-specific fields -+ */ -+#define DATA_HEADER_WORD3_ELEMENT_TYPE_SHIFT (0) -+#define DATA_HEADER_WORD3_ELEMENT_TYPE_CLRMSK (0xFFFFFFFFU) -+ -+#define DATA_HEADER_WORD4_ELEMENT_COUNT_SHIFT (0) -+#define DATA_HEADER_WORD4_ELEMENT_COUNT_CLRMSK (0xFFFFFFFFU) -+ -+#endif /* PDUMPDESC_H */ -diff --git a/drivers/gpu/drm/img-rogue/physheap.c b/drivers/gpu/drm/img-rogue/physheap.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physheap.c -@@ -0,0 +1,1735 @@ -+/*************************************************************************/ /*! -+@File physheap.c -+@Title Physical heap management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Management functions for the physical heap(s). A heap contains -+ all the information required by services when using memory from -+ that heap (such as CPU <> Device physical address translation). -+ A system must register one heap but can have more then one which -+ is why a heap must register with a (system) unique ID. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+#include "img_types.h" -+#include "img_defs.h" -+#include "physheap.h" -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "osfunc.h" -+#include "pvrsrv.h" -+#include "physmem.h" -+#include "physmem_hostmem.h" -+#include "physmem_lma.h" -+#include "physmem_osmem.h" -+#include "debug_common.h" -+ -+struct _PHYS_HEAP_ -+{ -+ /*! The type of this heap */ -+ PHYS_HEAP_TYPE eType; -+ -+ /*! The allocation policy for this heap */ -+ PHYS_HEAP_POLICY uiPolicy; -+ -+ /* Config flags */ -+ PHYS_HEAP_USAGE_FLAGS ui32UsageFlags; -+ -+ /* OOM Detection state */ -+#if !defined(PVRSRV_PHYSHEAP_DISABLE_OOM_DEMOTION) -+ ATOMIC_T sOOMDetected; -+#endif -+ -+ /*! Pointer to device node struct */ -+ PPVRSRV_DEVICE_NODE psDevNode; -+ /*! PDump name of this physical memory heap */ -+ IMG_CHAR *pszPDumpMemspaceName; -+ /*! Physheap name of this physical memory heap */ -+ IMG_CHAR aszName[PHYS_HEAP_NAME_SIZE]; -+ /*! Private data for the translate routines */ -+ IMG_HANDLE hPrivData; -+ /*! Function callbacks */ -+ PHYS_HEAP_FUNCTIONS *psMemFuncs; -+ -+ /*! Refcount */ -+ IMG_UINT32 ui32RefCount; -+ -+ /*! Implementation specific */ -+ PHEAP_IMPL_DATA pvImplData; -+ PHEAP_IMPL_FUNCS *psImplFuncs; -+ -+ /*! Pointer to next physical heap */ -+ struct _PHYS_HEAP_ *psNext; -+ -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ /*! IPA Policy value from Heap Config */ -+ IMG_UINT32 ui32IPAPolicyValue; -+ -+ /*! IPA Clear Mask value from Heap Config */ -+ IMG_UINT32 ui32IPAClearMask; -+ -+ /*! IPA Bit Shift value from Heap Config */ -+ IMG_UINT32 ui32IPAShift; -+#endif /* defined(PVRSRV_SUPPORT_IPA_FEATURE) */ -+}; -+ -+#if defined(REFCOUNT_DEBUG) -+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) \ -+ PVRSRVDebugPrintf(PVR_DBG_WARNING, \ -+ __FILE__, \ -+ __LINE__, \ -+ fmt, \ -+ __VA_ARGS__) -+#else -+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) -+#endif -+ -+#define IsOOMError(err) ((err == PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES) | \ -+ (err == PVRSRV_ERROR_OUT_OF_MEMORY) | \ -+ (err == PVRSRV_ERROR_PMR_TOO_LARGE)) -+ -+typedef enum _PVR_LAYER_HEAP_ACTION_ -+{ -+ PVR_LAYER_HEAP_ACTION_IGNORE, /* skip heap during heap init */ -+ PVR_LAYER_HEAP_ACTION_INSTANTIATE, /* instantiate heap but don't acquire */ -+ PVR_LAYER_HEAP_ACTION_INITIALISE /* instantiate and acquire */ -+ -+} PVR_LAYER_HEAP_ACTION; -+ -+typedef struct PHYS_HEAP_PROPERTIES_TAG -+{ -+ PVRSRV_PHYS_HEAP eFallbackHeap; -+ PVR_LAYER_HEAP_ACTION ePVRLayerAction; -+ IMG_BOOL bUserModeAlloc; -+} PHYS_HEAP_PROPERTIES; -+ -+/* NOTE: Table entries and order must match enum PVRSRV_PHYS_HEAP to ensure -+ * correct operation of PhysHeapCreatePMR(). -+ */ -+static PHYS_HEAP_PROPERTIES gasHeapProperties[PVRSRV_PHYS_HEAP_LAST] = -+{ -+ /* eFallbackHeap, ePVRLayerAction, bUserModeAlloc */ -+ { PVRSRV_PHYS_HEAP_DEFAULT, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* DEFAULT */ -+ { PVRSRV_PHYS_HEAP_DEFAULT, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* CPU_LOCAL */ -+ { PVRSRV_PHYS_HEAP_DEFAULT, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* GPU_LOCAL */ -+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* GPU_PRIVATE */ -+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_MAIN */ -+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_FALSE }, /* EXTERNAL */ -+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_FALSE }, /* GPU_COHERENT */ -+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* GPU_SECURE */ -+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_CONFIG */ -+ { PVRSRV_PHYS_HEAP_FW_MAIN, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_CODE */ -+ { PVRSRV_PHYS_HEAP_FW_MAIN, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PRIV_DATA */ -+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP_PT */ -+ { PVRSRV_PHYS_HEAP_FW_PREMAP0, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP0 */ -+ { PVRSRV_PHYS_HEAP_FW_PREMAP1, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP1 */ -+ { PVRSRV_PHYS_HEAP_FW_PREMAP2, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP2 */ -+ { PVRSRV_PHYS_HEAP_FW_PREMAP3, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP3 */ -+ { PVRSRV_PHYS_HEAP_FW_PREMAP4, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP4 */ -+ { PVRSRV_PHYS_HEAP_FW_PREMAP5, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP5 */ -+ { PVRSRV_PHYS_HEAP_FW_PREMAP6, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP6 */ -+ { PVRSRV_PHYS_HEAP_FW_PREMAP7, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP7 */ -+ { PVRSRV_PHYS_HEAP_WRAP, PVR_LAYER_HEAP_ACTION_INSTANTIATE, IMG_FALSE }, /* WRAP */ -+ { PVRSRV_PHYS_HEAP_DISPLAY, PVR_LAYER_HEAP_ACTION_INSTANTIATE, IMG_FALSE }, /* DISPLAY */ -+}; -+ -+static_assert((ARRAY_SIZE(gasHeapProperties) == PVRSRV_PHYS_HEAP_LAST), -+ "Size or order of gasHeapProperties entries incorrect for PVRSRV_PHYS_HEAP enum"); -+ -+static IMG_BOOL PhysHeapCreatedByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap); -+static IMG_BOOL PhysHeapAcquiredByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap); -+ -+/** -+ * ! IMPORTANT ! -+ * Do not change this string array unless the usage flag definitions in -+ * physheap_config.h have changed. -+ * -+ * NOTE: Use DebugCommonFlagStrings or GetPhysHeapUsageString to get -+ * usage flags string. -+ */ -+static const IMG_FLAGS2DESC g_asPhysHeapUsageFlagStrings[] = -+{ -+ {PHYS_HEAP_USAGE_CPU_LOCAL, "CPU_LOCAL"}, -+ {PHYS_HEAP_USAGE_GPU_LOCAL, "GPU_LOCAL"}, -+ {PHYS_HEAP_USAGE_GPU_PRIVATE, "GPU_PRIVATE"}, -+ {PHYS_HEAP_USAGE_EXTERNAL, "EXTERNAL"}, -+ {PHYS_HEAP_USAGE_GPU_COHERENT, "GPU_COHERENT"}, -+ {PHYS_HEAP_USAGE_GPU_SECURE, "GPU_SECURE"}, -+ {PHYS_HEAP_USAGE_FW_SHARED, "FW_SHARED"}, -+ {PHYS_HEAP_USAGE_FW_PRIVATE, "FW_PRIVATE"}, -+ {PHYS_HEAP_USAGE_FW_CODE, "FW_CODE"}, -+ {PHYS_HEAP_USAGE_FW_PRIV_DATA, "FW_PRIV_DATA"}, -+ {PHYS_HEAP_USAGE_FW_PREMAP_PT, "FW_PREMAP_PT"}, -+ {PHYS_HEAP_USAGE_FW_PREMAP, "FW_PREMAP"}, -+ {PHYS_HEAP_USAGE_WRAP, "WRAP"}, -+ {PHYS_HEAP_USAGE_DISPLAY, "DISPLAY"} -+}; -+ -+/*************************************************************************/ /*! -+@Function PhysHeapCheckValidUsageFlags -+@Description Checks if any bits were set outside of the valid ones within -+ PHYS_HEAP_USAGE_FLAGS. -+ -+@Input ui32PhysHeapUsage The value of the usage flag. -+ -+@Return True or False depending on whether there were only valid bits set. -+*/ /**************************************************************************/ -+static inline IMG_BOOL PhysHeapCheckValidUsageFlags(PHYS_HEAP_USAGE_FLAGS ui32PhysHeapUsage) -+{ -+ return !(ui32PhysHeapUsage & ~PHYS_HEAP_USAGE_MASK); -+} -+ -+/*************************************************************************/ /*! -+@Function GetPhysHeapUsageString -+@Description This function is used to create a comma separated string of all -+ usage flags passed in as a bitfield. -+ -+@Input ui32UsageFlags The bitfield of usage flags. -+@Input ui32Size The size of the memory pointed to by -+ pszUsageString. -+@Output pszUsageString A pointer to memory where the created string -+ will be stored. -+ -+@Return If successful PVRSRV_OK, else a PVRSRV_ERROR. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR GetPhysHeapUsageString(PHYS_HEAP_USAGE_FLAGS ui32UsageFlags, -+ IMG_UINT32 ui32Size, -+ IMG_CHAR *const pszUsageString) -+{ -+ IMG_UINT32 i; -+ IMG_BOOL bFirst = IMG_TRUE; -+ size_t uiSize = 0; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszUsageString != NULL, "pszUsageString"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ui32Size > 0, "ui32Size"); -+ -+ /* Initialise the string to be null terminated at the beginning */ -+ uiSize = OSStringLCopy(pszUsageString, "\0", sizeof(IMG_CHAR)); -+ -+ if (ui32UsageFlags == 0) -+ { -+ uiSize = OSStringLCopy(pszUsageString, "NONE", (size_t)ui32Size); -+ PVR_LOG_RETURN_IF_FALSE((uiSize < ui32Size), "OSStringLCopy", PVRSRV_ERROR_OUT_OF_MEMORY); -+ -+ return PVRSRV_OK; -+ } -+ -+ /* Process from left to right. */ -+ for (i = (sizeof(PHYS_HEAP_USAGE_FLAGS) * BITS_PER_BYTE - 1); i > 0; i--) -+ { -+ IMG_UINT32 ui32Flag = BIT(i); -+ -+ if (BITMASK_HAS(ui32UsageFlags, ui32Flag)) -+ { -+ IMG_CHAR pszString[32] = "\0"; -+ -+ if (PhysHeapCheckValidUsageFlags(ui32Flag)) -+ { -+ DebugCommonFlagStrings(pszString, -+ sizeof(pszString), -+ g_asPhysHeapUsageFlagStrings, -+ ARRAY_SIZE(g_asPhysHeapUsageFlagStrings), -+ ui32Flag); -+ } -+ else -+ { -+ uiSize = OSStringLCat(pszString, -+ "INVALID", -+ sizeof(pszString)); -+ PVR_LOG_RETURN_IF_FALSE((uiSize < sizeof(pszString)), "OSStringLCat", PVRSRV_ERROR_OUT_OF_MEMORY); -+ } -+ -+ if (!bFirst) -+ { -+ uiSize = OSStringLCat(pszUsageString, -+ ", ", -+ (size_t)ui32Size); -+ PVR_LOG_RETURN_IF_FALSE((uiSize < ui32Size), "OSStringLCat", PVRSRV_ERROR_OUT_OF_MEMORY); -+ } -+ else -+ { -+ bFirst = IMG_FALSE; -+ } -+ -+ uiSize = OSStringLCat(pszUsageString, -+ pszString, -+ (size_t)ui32Size); -+ PVR_LOG_RETURN_IF_FALSE((uiSize < ui32Size), "OSStringLCat", PVRSRV_ERROR_OUT_OF_MEMORY); -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function PhysHeapCreatePropertiesString -+@Description This function is used to create a string containing properties -+ of the specified physheap. -+ -+@Input psPhysHeap The physheap to create the string from. -+@Input ui32Size The size of the memory pointed to by -+ pszPhysHeapString. -+@Output pszPhysHeapString A pointer to memory where the created string -+ will be stored. -+ -+@Return If successful PVRSRV_OK, else a PVRSRV_ERROR. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR PhysHeapCreatePropertiesString(PHYS_HEAP *psPhysHeap, -+ IMG_UINT32 ui32Size, -+ IMG_CHAR *pszPhysHeapString) -+{ -+ static const IMG_CHAR *const pszTypeStrings[] = { -+ "UNKNOWN", -+ "UMA", -+ "LMA", -+ "DMA", -+#if defined(SUPPORT_WRAP_EXTMEMOBJECT) -+ "WRAP" -+#endif -+ }; -+ -+ IMG_UINT64 ui64TotalSize; -+ IMG_UINT64 ui64FreeSize; -+ IMG_CHAR pszUsageString[127] = "\0"; -+ IMG_INT32 iCount; -+ PVRSRV_ERROR eError; -+ -+ if (psPhysHeap->eType >= ARRAY_SIZE(pszTypeStrings)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PhysHeap at address %p eType is not a PHYS_HEAP_TYPE", -+ psPhysHeap)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAPINFO, failure); -+ } -+ -+ psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData, -+ &ui64TotalSize, -+ &ui64FreeSize); -+ -+ eError = GetPhysHeapUsageString(psPhysHeap->ui32UsageFlags, -+ sizeof(pszUsageString), -+ pszUsageString); -+ PVR_LOG_GOTO_IF_ERROR(eError, "GetPhysHeapUsageString", failure); -+ -+ if (psPhysHeap->eType == PHYS_HEAP_TYPE_LMA) -+ { -+ IMG_CPU_PHYADDR sCPUPAddr; -+ IMG_DEV_PHYADDR sGPUPAddr; -+ -+ PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetCPUPAddr != NULL); -+ PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetDevPAddr != NULL); -+ -+ eError = psPhysHeap->psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData, -+ &sCPUPAddr); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "pfnGetCPUPAddr"); -+ sCPUPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(IMG_UINT64_MAX); -+ } -+ -+ eError = psPhysHeap->psImplFuncs->pfnGetDevPAddr(psPhysHeap->pvImplData, -+ &sGPUPAddr); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "pfnGetDevPAddr"); -+ sGPUPAddr.uiAddr = IMG_UINT64_MAX; -+ } -+ -+ iCount = OSSNPrintf(pszPhysHeapString, -+ ui32Size, -+ "0x%p -> PdMs: %s, Type: %s, " -+ "CPU PA Base: " CPUPHYADDR_UINT_FMTSPEC", " -+ "GPU PA Base: 0x%08"IMG_UINT64_FMTSPECx", " -+ "Usage Flags: 0x%08x (%s), Refs: %d, " -+ "Free Size: %"IMG_UINT64_FMTSPEC"B, " -+ "Total Size: %"IMG_UINT64_FMTSPEC"B", -+ psPhysHeap, -+ psPhysHeap->pszPDumpMemspaceName, -+ pszTypeStrings[psPhysHeap->eType], -+ CPUPHYADDR_FMTARG(sCPUPAddr.uiAddr), -+ sGPUPAddr.uiAddr, -+ psPhysHeap->ui32UsageFlags, -+ pszUsageString, -+ psPhysHeap->ui32RefCount, -+ ui64FreeSize, -+ ui64TotalSize); -+ } -+ else -+ { -+ iCount = OSSNPrintf(pszPhysHeapString, -+ ui32Size, -+ "0x%p -> PdMs: %s, Type: %s, " -+ "Usage Flags: 0x%08x (%s), Refs: %d, " -+ "Free Size: %"IMG_UINT64_FMTSPEC"B, " -+ "Total Size: %"IMG_UINT64_FMTSPEC"B", -+ psPhysHeap, -+ psPhysHeap->pszPDumpMemspaceName, -+ pszTypeStrings[psPhysHeap->eType], -+ psPhysHeap->ui32UsageFlags, -+ pszUsageString, -+ psPhysHeap->ui32RefCount, -+ ui64FreeSize, -+ ui64TotalSize); -+ } -+ -+ if (0 < iCount && iCount < (IMG_INT32)ui32Size) -+ { -+ return PVRSRV_OK; -+ } -+ -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+failure: -+ OSStringLCopy(pszPhysHeapString, "\0", ui32Size); -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function PhysHeapDebugRequest -+@Description This function is used to output debug information for a given -+ device's PhysHeaps. -+@Input pfnDbgRequestHandle Data required by this function that is -+ passed through the RegisterDeviceDbgRequestNotify -+ function. -+@Input ui32VerbLevel The maximum verbosity of the debug request. -+@Input pfnDumpDebugPrintf The specified print function that should be -+ used to dump any debug information -+ (see PVRSRVDebugRequest). -+@Input pvDumpDebugFile Optional file identifier to be passed to -+ the print function if required. -+@Return void -+*/ /**************************************************************************/ -+static void PhysHeapDebugRequest(PVRSRV_DBGREQ_HANDLE pfnDbgRequestHandle, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ PPVRSRV_DEVICE_NODE psDeviceNode = (PPVRSRV_DEVICE_NODE)pfnDbgRequestHandle; -+ PHYS_HEAP *psPhysHeap; -+ -+ PVR_LOG_RETURN_VOID_IF_FALSE(psDeviceNode != NULL, -+ "Phys Heap debug request failed. psDeviceNode was NULL"); -+ -+ PVR_DUMPDEBUG_LOG("------[ Device ID: %d - Phys Heaps ]------", -+ psDeviceNode->sDevId.i32KernelDeviceID); -+ -+ for (psPhysHeap = psDeviceNode->psPhysHeapList; psPhysHeap != NULL; psPhysHeap = psPhysHeap->psNext) -+ { -+ IMG_CHAR pszPhysHeapString[256] = "\0"; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ eError = PhysHeapCreatePropertiesString(psPhysHeap, -+ sizeof(pszPhysHeapString), -+ pszPhysHeapString); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "PhysHeapCreatePropertiesString"); -+ continue; -+ } -+ -+ PVR_DUMPDEBUG_LOG("%s", pszPhysHeapString); -+ } -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ OSLockAcquire(psDeviceNode->hPMRZombieListLock); -+ PVR_DUMPDEBUG_LOG("PMR Zombie Count: %u, PMR Zombie Count In Cleanup: %u", -+ psDeviceNode->uiPMRZombieCount, -+ psDeviceNode->uiPMRZombieCountInCleanup); -+ OSLockRelease(psDeviceNode->hPMRZombieListLock); -+#endif -+ PVR_DUMPDEBUG_LOG("PMR Live Count: %d", PMRGetLiveCount()); -+} -+ -+/*************************************************************************/ /*! -+@Function HeapCfgUsedByPVRLayer -+@Description Checks if a physheap config must be handled by the PVR Layer -+@Input psConfig PhysHeapConfig -+@Return IMG_BOOL -+*/ /**************************************************************************/ -+static IMG_BOOL HeapCfgUsedByPVRLayer(PHYS_HEAP_CONFIG *psConfig) -+{ -+ PVRSRV_PHYS_HEAP eHeap; -+ IMG_BOOL bPVRHeap = IMG_FALSE; -+ -+ /* Heaps are triaged for initialisation by either -+ * the PVR Layer or the device-specific heap handler. */ -+ for (eHeap = PVRSRV_PHYS_HEAP_DEFAULT; -+ eHeap < PVRSRV_PHYS_HEAP_LAST; -+ eHeap++) -+ { -+ if (BIT_ISSET(psConfig->ui32UsageFlags, eHeap) && -+ PhysHeapCreatedByPVRLayer(eHeap)) -+ { -+ bPVRHeap = IMG_TRUE; -+ break; -+ } -+ } -+ -+ return bPVRHeap; -+} -+ -+/*************************************************************************/ /*! -+@Function PhysHeapCreateDeviceHeapsFromConfigs -+@Description Create new heaps for a device from configs. -+@Input psDevNode Pointer to device node struct -+@Input pasConfigs Pointer to array of Heap configurations. -+@Input ui32NumConfigs Number of configurations in array. -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+PhysHeapCreateDeviceHeapsFromConfigs(PPVRSRV_DEVICE_NODE psDevNode, -+ PHYS_HEAP_CONFIG *pasConfigs, -+ IMG_UINT32 ui32NumConfigs) -+{ -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ -+ psDevNode->psPhysHeapList = NULL; -+ -+ for (i = 0; i < ui32NumConfigs; i++) -+ { -+ /* A PhysHeapConfig can have multiple usage flags. If any flag in a -+ * heap's set points to a heap type that is handled by the PVR Layer -+ * then we assume that a single heap is shared between multiple -+ * allocators and it is safe to instantiate it here. If the heap -+ * is not marked to be initialised by the PVR Layer, leave it -+ * to the device specific handler. */ -+ if (HeapCfgUsedByPVRLayer(&pasConfigs[i])) -+ { -+ eError = PhysHeapCreateHeapFromConfig(psDevNode, &pasConfigs[i], NULL); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); -+ } -+ } -+ -+#if defined(SUPPORT_PHYSMEM_TEST) -+ /* For a temporary device node there will never be a debug dump -+ * request targeting it */ -+ if (psDevNode->hDebugTable != NULL) -+#endif -+ { -+ eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hPhysHeapDbgReqNotify, -+ psDevNode, -+ PhysHeapDebugRequest, -+ DEBUG_REQUEST_SYS, -+ psDevNode); -+ -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRegisterDeviceDbgRequestNotify"); -+ } -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PhysHeapCreateHeapFromConfig(PVRSRV_DEVICE_NODE *psDevNode, -+ PHYS_HEAP_CONFIG *psConfig, -+ PHYS_HEAP **ppsPhysHeap) -+{ -+ PVRSRV_ERROR eResult; -+ -+ if (psConfig->eType == PHYS_HEAP_TYPE_UMA -+#if defined(SUPPORT_WRAP_EXTMEMOBJECT) -+ || psConfig->eType == PHYS_HEAP_TYPE_WRAP -+#endif -+ ) -+ { -+ eResult = PhysmemCreateHeapOSMEM(psDevNode, -+ PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG, -+ psConfig, -+ ppsPhysHeap); -+ } -+ else if ((psConfig->eType == PHYS_HEAP_TYPE_LMA) || -+ (psConfig->eType == PHYS_HEAP_TYPE_DMA)) -+ { -+ PHYS_HEAP_POLICY uiHeapPolicy; -+ -+ if (psDevNode->pfnPhysHeapGetLMAPolicy != NULL) -+ { -+ uiHeapPolicy = psDevNode->pfnPhysHeapGetLMAPolicy(psConfig->ui32UsageFlags); -+ } -+ else -+ { -+ uiHeapPolicy = OSIsMapPhysNonContigSupported() ? -+ PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG : -+ PHYS_HEAP_POLICY_DEFAULT; -+ } -+ -+ eResult = PhysmemCreateHeapLMA(psDevNode, -+ uiHeapPolicy, -+ psConfig, -+ (psConfig->eType == PHYS_HEAP_TYPE_LMA) ? -+ "GPU LMA (Sys)" : -+ "GPU LMA DMA (Sys)", -+ ppsPhysHeap); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s Invalid phys heap type: %d", -+ __func__, psConfig->eType)); -+ eResult = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return eResult; -+} -+ -+#define PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE (0x100000ULL * 32ULL) /* 32MB */ -+ -+static PVRSRV_ERROR PVRSRVValidatePhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ IMG_UINT32 ui32FlagsAccumulate = 0; -+ IMG_UINT32 i; -+ -+ PVR_LOG_RETURN_IF_FALSE(psDevConfig->ui32PhysHeapCount > 0, -+ "Device config must specify at least one phys heap config.", -+ PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ -+ for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++) -+ { -+ PHYS_HEAP_CONFIG *psHeapConf = &psDevConfig->pasPhysHeaps[i]; -+ -+ PVR_LOG_RETURN_IF_FALSE_VA(psHeapConf->ui32UsageFlags != 0, -+ PVRSRV_ERROR_PHYSHEAP_CONFIG, -+ "Phys heap config %d: must specify usage flags.", i); -+ -+ PVR_LOG_RETURN_IF_FALSE_VA((ui32FlagsAccumulate & psHeapConf->ui32UsageFlags) == 0, -+ PVRSRV_ERROR_PHYSHEAP_CONFIG, -+ "Phys heap config %d: duplicate usage flags.", i); -+ -+ ui32FlagsAccumulate |= psHeapConf->ui32UsageFlags; -+ -+ /* Output message if default heap is LMA and smaller than recommended minimum */ -+ if (BITMASK_ANY((1U << psDevConfig->eDefaultHeap), PHYS_HEAP_USAGE_MASK) && -+ BITMASK_ANY((1U << psDevConfig->eDefaultHeap), psHeapConf->ui32UsageFlags) && -+#if defined(__KERNEL__) -+ ((psHeapConf->eType == PHYS_HEAP_TYPE_LMA) || -+ (psHeapConf->eType == PHYS_HEAP_TYPE_DMA)) && -+#else -+ (psHeapConf->eType == PHYS_HEAP_TYPE_LMA) && -+#endif -+ (psHeapConf->uiSize < PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Size of default heap is 0x%" IMG_UINT64_FMTSPECX -+ " (recommended minimum heap size is 0x%llx)", -+ __func__, psHeapConf->uiSize, -+ PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE)); -+ } -+ } -+ -+ if (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_GPU_LOCAL) -+ { -+ PVR_LOG_RETURN_IF_FALSE(((ui32FlagsAccumulate & PHYS_HEAP_USAGE_GPU_LOCAL) != 0) , -+ "Device config must specify GPU local phys heap config.", -+ PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ } -+ else if (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_CPU_LOCAL) -+ { -+ PVR_LOG_RETURN_IF_FALSE(((ui32FlagsAccumulate & PHYS_HEAP_USAGE_CPU_LOCAL) != 0) , -+ "Device config must specify CPU local phys heap config.", -+ PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+/*************************************************************************/ /*! -+@Function CreateGpuVirtValArenas -+@Description Create virtualization validation arenas -+@Input psDeviceNode The device node -+@Return PVRSRV_ERROR PVRSRV_OK on success -+*/ /**************************************************************************/ -+static PVRSRV_ERROR CreateGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ /* aui64OSidMin and aui64OSidMax are what we program into HW registers. -+ The values are different from base/size of arenas. */ -+ IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]; -+ IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]; -+ PHYS_HEAP_CONFIG *psGPULocalHeap = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_GPU_LOCAL); -+ PHYS_HEAP_CONFIG *psDisplayHeap = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_DISPLAY); -+ IMG_UINT64 uBase; -+ IMG_UINT64 uSize; -+ IMG_UINT64 uBaseShared; -+ IMG_UINT64 uSizeShared; -+ IMG_UINT64 uSizeSharedReg; -+ IMG_UINT32 i; -+ -+ /* Shared region is fixed size, the remaining space is divided amongst OSes */ -+ uSizeShared = PVR_ALIGN(GPUVIRT_SIZEOF_SHARED, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); -+ uSize = psGPULocalHeap->uiSize - uSizeShared; -+ uSize /= GPUVIRT_VALIDATION_NUM_OS; -+ uSize = uSize & ~((IMG_UINT64)OSGetPageSize() - 1ULL); /* Align, round down */ -+ -+ uBase = psGPULocalHeap->sCardBase.uiAddr; -+ uBaseShared = uBase + uSize * GPUVIRT_VALIDATION_NUM_OS; -+ uSizeShared = psGPULocalHeap->uiSize - (uBaseShared - uBase); -+ -+ PVR_LOG(("GPUVIRT_VALIDATION split GPU_LOCAL base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", -+ psGPULocalHeap->sCardBase.uiAddr, -+ psGPULocalHeap->uiSize)); -+ -+ /* If a display heap config exists, include the display heap in the non-secure regions */ -+ if (psDisplayHeap) -+ { -+ /* Only works when DISPLAY heap follows GPU_LOCAL heap. */ -+ PVR_LOG(("GPUVIRT_VALIDATION include DISPLAY in shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", -+ psDisplayHeap->sCardBase.uiAddr, -+ psDisplayHeap->uiSize)); -+ -+ uSizeSharedReg = uSizeShared + psDisplayHeap->uiSize; -+ } -+ else -+ { -+ uSizeSharedReg = uSizeShared; -+ } -+ -+ PVR_ASSERT(uSize >= GPUVIRT_MIN_SIZE); -+ PVR_ASSERT(uSizeSharedReg >= GPUVIRT_SIZEOF_SHARED); -+ -+ FOREACH_VALIDATION_OSID(i) -+ { -+ IMG_CHAR aszOSRAName[RA_MAX_NAME_LENGTH]; -+ -+ PVR_LOG(("GPUVIRT_VALIDATION create arena OS: %d, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", i, uBase, uSize)); -+ -+ OSSNPrintf(aszOSRAName, RA_MAX_NAME_LENGTH, "GPUVIRT_OS%d", i); -+ -+ psDeviceNode->psOSidSubArena[i] = RA_Create_With_Span(aszOSRAName, -+ OSGetPageShift(), -+ 0, -+ uBase, -+ uSize, -+ RA_POLICY_DEFAULT); -+ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSidSubArena[i], "RA_Create_With_Span"); -+ -+ aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i] = uBase; -+ -+ if (i == 0) -+ { -+ /* OSid0 has access to all regions */ -+ aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = psGPULocalHeap->uiSize - 1ULL; -+ } -+ else -+ { -+ aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = uBase + uSize - 1ULL; -+ } -+ -+ /* uSizeSharedReg includes display heap */ -+ aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared; -+ aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared + uSizeSharedReg - 1ULL; -+ -+ PVR_LOG(("GPUVIRT_VALIDATION HW reg regions %d: min[0]: 0x%" IMG_UINT64_FMTSPECX ", max[0]: 0x%" IMG_UINT64_FMTSPECX ", min[1]: 0x%" IMG_UINT64_FMTSPECX ", max[1]: 0x%" IMG_UINT64_FMTSPECX ",", -+ i, -+ aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i], -+ aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i], -+ aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i], -+ aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i])); -+ uBase += uSize; -+ } -+ -+ PVR_LOG(("GPUVIRT_VALIDATION create arena Shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", uBaseShared, uSizeShared)); -+ -+ PVR_ASSERT(uSizeShared >= GPUVIRT_SIZEOF_SHARED); -+ -+ /* uSizeShared does not include display heap */ -+ psDeviceNode->psOSSharedArena = RA_Create_With_Span("GPUVIRT_SHARED", -+ OSGetPageShift(), -+ 0, -+ uBaseShared, -+ uSizeShared, -+ RA_POLICY_DEFAULT); -+ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSSharedArena, "RA_Create_With_Span"); -+ -+ if (psDeviceNode->psDevConfig->pfnSysDevVirtInit != NULL) -+ { -+ psDeviceNode->psDevConfig->pfnSysDevVirtInit(psDeviceNode->psDevConfig->hSysData, aui64OSidMin, aui64OSidMax); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Counter-part to CreateGpuVirtValArenas. -+ */ -+static void DestroyGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ IMG_UINT32 uiCounter = 0; -+ -+ FOREACH_VALIDATION_OSID(uiCounter) -+ { -+ if (uiCounter == RGXFW_HOST_DRIVER_ID) -+ { -+ /* -+ * NOTE: We overload psOSidSubArena[0] into the psLocalMemArena so we must -+ * not free it here as it gets cleared later. -+ */ -+ continue; -+ } -+ -+ if (psDeviceNode->psOSidSubArena[uiCounter] == NULL) -+ { -+ continue; -+ } -+ RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]); -+ } -+ -+ if (psDeviceNode->psOSSharedArena != NULL) -+ { -+ RA_Delete(psDeviceNode->psOSSharedArena); -+ } -+} -+#endif -+ -+/*************************************************************************/ /*! -+@Function PhysHeapMMUPxSetup -+@Description Setup MMU Px allocation function pointers. -+@Input psDeviceNode Pointer to device node struct -+@Return PVRSRV_ERROR PVRSRV_OK on success. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR PhysHeapMMUPxSetup(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ PHYS_HEAP_TYPE eHeapType; -+ PVRSRV_ERROR eError; -+ -+ eError = PhysHeapAcquireByID(psDeviceNode->psDevConfig->eDefaultHeap, -+ psDeviceNode, &psDeviceNode->psMMUPhysHeap); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByID", ErrorDeinit); -+ -+ eHeapType = PhysHeapGetType(psDeviceNode->psMMUPhysHeap); -+ -+ if (eHeapType == PHYS_HEAP_TYPE_UMA) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses OS System memory (UMA)", __func__)); -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ PVR_DPF((PVR_DBG_ERROR, "%s: Virtualisation Validation builds are currently only" -+ " supported on systems with local memory (LMA).", __func__)); -+ eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ goto ErrorDeinit; -+#endif -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses local memory managed by the driver (LMA)", __func__)); -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ eError = CreateGpuVirtValArenas(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "CreateGpuVirtValArenas", ErrorDeinit); -+#endif -+ } -+ -+ return PVRSRV_OK; -+ErrorDeinit: -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function PhysHeapMMUPxDeInit -+@Description Deinit after PhysHeapMMUPxSetup. -+@Input psDeviceNode Pointer to device node struct -+*/ /**************************************************************************/ -+static void PhysHeapMMUPxDeInit(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ /* Remove local LMA subarenas */ -+ DestroyGpuVirtValArenas(psDeviceNode); -+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ -+ -+ if (psDeviceNode->psMMUPhysHeap != NULL) -+ { -+ PhysHeapRelease(psDeviceNode->psMMUPhysHeap); -+ psDeviceNode->psMMUPhysHeap = NULL; -+ } -+} -+ -+PVRSRV_ERROR PhysHeapInitDeviceHeaps(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_PHYS_HEAP ePhysHeap; -+ -+ eError = OSLockCreate(&psDeviceNode->hPhysHeapLock); -+ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); -+ -+ eError = PVRSRVValidatePhysHeapConfig(psDevConfig); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVValidatePhysHeapConfig"); -+ -+ eError = PhysHeapCreateDeviceHeapsFromConfigs(psDeviceNode, -+ psDevConfig->pasPhysHeaps, -+ psDevConfig->ui32PhysHeapCount); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreateDeviceHeapsFromConfigs", ErrorDeinit); -+ -+ /* Must loop from the 2nd heap to the last */ -+ PVR_ASSERT(PVRSRV_PHYS_HEAP_DEFAULT == 0); -+ for (ePhysHeap = (PVRSRV_PHYS_HEAP)(PVRSRV_PHYS_HEAP_DEFAULT+1); ePhysHeap < PVRSRV_PHYS_HEAP_LAST; ePhysHeap++) -+ { -+ if (PhysHeapAcquiredByPVRLayer(ePhysHeap)) -+ { -+ eError = PhysHeapAcquireByID(ePhysHeap, psDeviceNode, &psDeviceNode->apsPhysHeap[ePhysHeap]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByID", ErrorDeinit); -+ } -+ } -+ -+ if (PhysHeapValidateDefaultHeapExists(psDeviceNode)) -+ { -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapValidateDefaultHeapExists", ErrorDeinit); -+ } -+ -+ eError = PhysHeapMMUPxSetup(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapMMUPxSetup", ErrorDeinit); -+ -+ return PVRSRV_OK; -+ -+ErrorDeinit: -+ PVR_ASSERT(IMG_FALSE); -+ PhysHeapDeInitDeviceHeaps(psDeviceNode); -+ -+ return eError; -+} -+ -+void PhysHeapDeInitDeviceHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_PHYS_HEAP ePhysHeapIdx; -+ IMG_UINT32 i; -+ -+ PhysHeapMMUPxDeInit(psDeviceNode); -+ -+ /* Release heaps */ -+ for (ePhysHeapIdx = PVRSRV_PHYS_HEAP_DEFAULT; -+ ePhysHeapIdx < ARRAY_SIZE(psDeviceNode->apsPhysHeap); -+ ePhysHeapIdx++) -+ { -+ if (psDeviceNode->apsPhysHeap[ePhysHeapIdx]) -+ { -+ PhysHeapRelease(psDeviceNode->apsPhysHeap[ePhysHeapIdx]); -+ } -+ } -+ -+ FOREACH_SUPPORTED_DRIVER(i) -+ { -+ if (psDeviceNode->apsFWPremapPhysHeap[i]) -+ { -+ PhysHeapDestroy(psDeviceNode->apsFWPremapPhysHeap[i]); -+ psDeviceNode->apsFWPremapPhysHeap[i] = NULL; -+ } -+ } -+ -+ PhysHeapDestroyDeviceHeaps(psDeviceNode); -+ -+ OSLockDestroy(psDeviceNode->hPhysHeapLock); -+} -+ -+PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode, -+ PHYS_HEAP_CONFIG *psConfig, -+ PHYS_HEAP_POLICY uiPolicy, -+ PHEAP_IMPL_DATA pvImplData, -+ PHEAP_IMPL_FUNCS *psImplFuncs, -+ PHYS_HEAP **ppsPhysHeap) -+{ -+ PHYS_HEAP *psNew; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); -+ -+ if (psConfig->eType == PHYS_HEAP_TYPE_UNKNOWN) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psImplFuncs != NULL, "psImplFuncs"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psImplFuncs->pfnCreatePMR != NULL, "psImplFuncs->pfnCreatePMR"); -+ -+ psNew = OSAllocMem(sizeof(PHYS_HEAP)); -+ PVR_RETURN_IF_NOMEM(psNew); -+ psNew->psDevNode = psDevNode; -+ psNew->eType = psConfig->eType; -+ psNew->uiPolicy = uiPolicy; -+ psNew->psMemFuncs = psConfig->psMemFuncs; -+ psNew->hPrivData = psConfig->hPrivData; -+ psNew->ui32RefCount = 0; -+ psNew->pszPDumpMemspaceName = psConfig->pszPDumpMemspaceName; -+ psNew->ui32UsageFlags = psConfig->ui32UsageFlags; -+#if !defined(PVRSRV_PHYSHEAP_DISABLE_OOM_DEMOTION) -+ OSAtomicWrite(&psNew->sOOMDetected, IMG_FALSE); -+#endif -+ -+ psNew->pvImplData = pvImplData; -+ psNew->psImplFuncs = psImplFuncs; -+ -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ { -+ IMG_UINT8 ui8Val; -+ -+ /* Ensure we do not cause an address fault by accessing beyond -+ * the end of the psConfig->sIPAConfig structure. -+ */ -+ ui8Val = psConfig->sIPAConfig.ui8IPAPolicyDefault; -+ psNew->ui32IPAPolicyValue = (IMG_UINT32)ui8Val; -+ -+ ui8Val = psConfig->sIPAConfig.ui8IPAPolicyMask; -+ psNew->ui32IPAClearMask = (IMG_UINT32)ui8Val; -+ -+ ui8Val = psConfig->sIPAConfig.ui8IPAPolicyShift; -+ psNew->ui32IPAShift = (IMG_UINT32)ui8Val; -+ PVR_LOG_VA(PVR_DBG_MESSAGE, "%s: Physheap <%p> ['%s'] Config @ <%p> IPA = [0x%x, 0x%x, 0x%x]", -+ __func__, psNew, psNew->aszName, -+ psConfig, psNew->ui32IPAPolicyValue, -+ psNew->ui32IPAClearMask, psNew->ui32IPAShift); -+ } -+#endif -+ -+ OSStringLCopy(psNew->aszName, -+ (psConfig->pszHeapName) ? psConfig->pszHeapName : "Unknown PhysHeap", -+ PHYS_HEAP_NAME_SIZE); -+ -+ if (ppsPhysHeap != NULL) -+ { -+ *ppsPhysHeap = psNew; -+ } -+ -+ psNew->psNext = psDevNode->psPhysHeapList; -+ psDevNode->psPhysHeapList = psNew; -+ -+ PVR_DPF_RETURN_RC1(PVRSRV_OK, psNew); -+} -+ -+void PhysHeapDestroyDeviceHeaps(PPVRSRV_DEVICE_NODE psDevNode) -+{ -+ PHYS_HEAP *psNode = psDevNode->psPhysHeapList; -+ -+ if (psDevNode->hPhysHeapDbgReqNotify) -+ { -+ PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hPhysHeapDbgReqNotify); -+ } -+ -+ while (psNode) -+ { -+ PHYS_HEAP *psTmp = psNode; -+ -+ psNode = psNode->psNext; -+ PhysHeapDestroy(psTmp); -+ } -+} -+ -+void PhysHeapDestroy(PHYS_HEAP *psPhysHeap) -+{ -+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; -+ PPVRSRV_DEVICE_NODE psDevNode = psPhysHeap->psDevNode; -+ -+ PVR_DPF_ENTERED1(psPhysHeap); -+ -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+ if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK) -+#endif -+ { -+ PVR_ASSERT(psPhysHeap->ui32RefCount == 0); -+ } -+ -+ if (psDevNode->psPhysHeapList == psPhysHeap) -+ { -+ psDevNode->psPhysHeapList = psPhysHeap->psNext; -+ } -+ else -+ { -+ PHYS_HEAP *psTmp = psDevNode->psPhysHeapList; -+ -+ while (psTmp->psNext != psPhysHeap) -+ { -+ psTmp = psTmp->psNext; -+ } -+ psTmp->psNext = psPhysHeap->psNext; -+ } -+ -+ if (psImplFuncs->pfnDestroyData != NULL) -+ { -+ psImplFuncs->pfnDestroyData(psPhysHeap->pvImplData); -+ } -+ -+ OSFreeMem(psPhysHeap); -+ -+ PVR_DPF_RETURN; -+} -+ -+static void _PhysHeapCountUserModeHeaps(PPVRSRV_DEVICE_NODE psDevNode, -+ PHYS_HEAP_USAGE_FLAGS ui32UsageFlags) -+{ -+ PVRSRV_PHYS_HEAP eHeap; -+ -+ for (eHeap = PVRSRV_PHYS_HEAP_DEFAULT; -+ eHeap <= PVRSRV_PHYS_HEAP_LAST; -+ eHeap++) -+ { -+ if (BIT_ISSET(ui32UsageFlags, eHeap) && -+ PhysHeapUserModeAlloc(eHeap)) -+ { -+ psDevNode->ui32UserAllocHeapCount++; -+ break; -+ } -+ } -+} -+ -+PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap) -+{ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psPhysHeap != NULL, "psPhysHeap"); -+ -+ psPhysHeap->ui32RefCount++; -+ -+ /* When acquiring a heap for the 1st time, perform a check and -+ * calculate the total number of user accessible physical heaps */ -+ if (psPhysHeap->ui32RefCount == 1) -+ { -+ _PhysHeapCountUserModeHeaps(psPhysHeap->psDevNode, -+ psPhysHeap->ui32UsageFlags); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PHYS_HEAP * _PhysHeapFindHeapOrFallback(PVRSRV_PHYS_HEAP ePhysHeap, -+ PPVRSRV_DEVICE_NODE psDevNode) -+{ -+ PHYS_HEAP *psPhysHeapNode = psDevNode->psPhysHeapList; -+ PVRSRV_PHYS_HEAP eFallback; -+ -+ /* Swap the default heap alias for the system's real default PhysHeap */ -+ if (ePhysHeap == PVRSRV_PHYS_HEAP_DEFAULT) -+ { -+ ePhysHeap = psDevNode->psDevConfig->eDefaultHeap; -+ } -+ -+ /* Check cache of PhysHeaps to see if it has been resolved before */ -+ if (psDevNode->apsPhysHeap[ePhysHeap] != NULL) -+ { -+ return psDevNode->apsPhysHeap[ePhysHeap]; -+ } -+ -+ /* Cache not ready, carry out search with fallback */ -+ while (psPhysHeapNode) -+ { -+ if (BIT_ISSET(psPhysHeapNode->ui32UsageFlags, ePhysHeap)) -+ { -+ return psPhysHeapNode; -+ } -+ -+ psPhysHeapNode = psPhysHeapNode->psNext; -+ } -+ -+ /* Find fallback PhysHeap */ -+ eFallback = gasHeapProperties[ePhysHeap].eFallbackHeap; -+ if (ePhysHeap == eFallback) -+ { -+ return NULL; -+ } -+ else -+ { -+ return _PhysHeapFindHeapOrFallback(eFallback, psDevNode); -+ } -+} -+ -+/* -+ * Acquire heap, no fallback, no recursion: single loop acquisition -+ */ -+static PHYS_HEAP* _PhysHeapFindRealHeapNoFallback(PVRSRV_PHYS_HEAP ePhysHeap, -+ PPVRSRV_DEVICE_NODE psDevNode) -+{ -+ PHYS_HEAP *psPhysHeapNode = psDevNode->psPhysHeapList; -+ -+ /* Swap the default heap alias for the system's real default PhysHeap */ -+ if (ePhysHeap == PVRSRV_PHYS_HEAP_DEFAULT) -+ { -+ ePhysHeap = psDevNode->psDevConfig->eDefaultHeap; -+ } -+ -+ /* Check cache of PhysHeaps to see if it has been resolved before */ -+ if (BIT_ISSET(psDevNode->apsPhysHeap[ePhysHeap]->ui32UsageFlags, ePhysHeap)) -+ { -+ return psDevNode->apsPhysHeap[ePhysHeap]; -+ } -+ -+ /* Cache not ready, carry out search for real PhysHeap, no fallback */ -+ while (psPhysHeapNode) -+ { -+ if (BIT_ISSET(psPhysHeapNode->ui32UsageFlags, ePhysHeap)) -+ { -+ return psPhysHeapNode; -+ } -+ -+ psPhysHeapNode = psPhysHeapNode->psNext; -+ } -+ return NULL; -+} -+ -+PVRSRV_ERROR PhysHeapAcquireByID(PVRSRV_PHYS_HEAP eDevPhysHeap, -+ PPVRSRV_DEVICE_NODE psDevNode, -+ PHYS_HEAP **ppsPhysHeap) -+{ -+ PHYS_HEAP *psPhysHeap; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(eDevPhysHeap < PVRSRV_PHYS_HEAP_LAST, "eDevPhysHeap"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); -+ -+ PVR_DPF_ENTERED1(ui32Flags); -+ -+ OSLockAcquire(psDevNode->hPhysHeapLock); -+ -+ psPhysHeap = _PhysHeapFindHeapOrFallback(eDevPhysHeap, psDevNode); -+ -+ if (psPhysHeap != NULL) -+ { -+ psPhysHeap->ui32RefCount++; -+ PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", -+ __func__, psPhysHeap, psPhysHeap->ui32RefCount); -+ -+ /* When acquiring a heap for the 1st time, perform a check and -+ * calculate the total number of user accessible physical heaps */ -+ if (psPhysHeap->ui32RefCount == 1) -+ { -+ _PhysHeapCountUserModeHeaps(psDevNode, BIT(eDevPhysHeap)); -+ } -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID; -+ } -+ -+ OSLockRelease(psDevNode->hPhysHeapLock); -+ -+ *ppsPhysHeap = psPhysHeap; -+ PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap); -+} -+ -+void PhysHeapRelease(PHYS_HEAP *psPhysHeap) -+{ -+ PVR_DPF_ENTERED1(psPhysHeap); -+ -+ OSLockAcquire(psPhysHeap->psDevNode->hPhysHeapLock); -+ psPhysHeap->ui32RefCount--; -+ PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", -+ __func__, psPhysHeap, psPhysHeap->ui32RefCount); -+ OSLockRelease(psPhysHeap->psDevNode->hPhysHeapLock); -+ -+ PVR_DPF_RETURN; -+} -+ -+PHEAP_IMPL_DATA PhysHeapGetImplData(PHYS_HEAP *psPhysHeap) -+{ -+ return psPhysHeap->pvImplData; -+} -+ -+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap) -+{ -+ PVR_ASSERT(psPhysHeap->eType != PHYS_HEAP_TYPE_UNKNOWN); -+ return psPhysHeap->eType; -+} -+ -+PHYS_HEAP_POLICY PhysHeapGetPolicy(PHYS_HEAP *psPhysHeap) -+{ -+ return psPhysHeap->uiPolicy; -+} -+ -+PHYS_HEAP_USAGE_FLAGS PhysHeapGetFlags(PHYS_HEAP *psPhysHeap) -+{ -+ return psPhysHeap->ui32UsageFlags; -+} -+ -+const IMG_CHAR *PhysHeapName(PHYS_HEAP *psPhysHeap) -+{ -+ return psPhysHeap->aszName; -+} -+ -+IMG_BOOL PhysHeapValidateDefaultHeapExists(PPVRSRV_DEVICE_NODE psDevNode) -+{ -+ PVRSRV_PHYS_HEAP eDefaultHeap = psDevNode->psDevConfig->eDefaultHeap; -+ -+ return ((psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_DEFAULT] != NULL) && -+ ((psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_DEFAULT] == -+ psDevNode->apsPhysHeap[eDefaultHeap]))); -+} -+ -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+IMG_UINT32 PhysHeapGetIPAValue(PHYS_HEAP *psPhysHeap) -+{ -+ return psPhysHeap->ui32IPAPolicyValue; -+} -+ -+IMG_UINT32 PhysHeapGetIPAMask(PHYS_HEAP *psPhysHeap) -+{ -+ return psPhysHeap->ui32IPAClearMask; -+} -+ -+IMG_UINT32 PhysHeapGetIPAShift(PHYS_HEAP *psPhysHeap) -+{ -+ return psPhysHeap->ui32IPAShift; -+} -+#endif -+ -+/* -+ * This function will set the psDevPAddr to whatever the system layer -+ * has set it for the referenced region. -+ * It will not fail if the psDevPAddr is invalid. -+ */ -+PVRSRV_ERROR PhysHeapGetDevPAddr(PHYS_HEAP *psPhysHeap, -+ IMG_DEV_PHYADDR *psDevPAddr) -+{ -+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; -+ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; -+ -+ if (psImplFuncs->pfnGetDevPAddr != NULL) -+ { -+ eResult = psImplFuncs->pfnGetDevPAddr(psPhysHeap->pvImplData, -+ psDevPAddr); -+ } -+ -+ return eResult; -+} -+ -+/* -+ * This function will set the psCpuPAddr to whatever the system layer -+ * has set it for the referenced region. -+ * It will not fail if the psCpuPAddr is invalid. -+ */ -+PVRSRV_ERROR PhysHeapGetCpuPAddr(PHYS_HEAP *psPhysHeap, -+ IMG_CPU_PHYADDR *psCpuPAddr) -+{ -+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; -+ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; -+ -+ if (psImplFuncs->pfnGetCPUPAddr != NULL) -+ { -+ eResult = psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData, -+ psCpuPAddr); -+ } -+ -+ return eResult; -+} -+ -+PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap, -+ IMG_UINT64 *puiSize) -+{ -+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; -+ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; -+ -+ if (psImplFuncs->pfnGetSize != NULL) -+ { -+ eResult = psImplFuncs->pfnGetSize(psPhysHeap->pvImplData, -+ puiSize); -+ } -+ -+ return eResult; -+} -+ -+PVRSRV_ERROR -+PhysHeapGetMemInfo(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32PhysHeapCount, -+ PVRSRV_PHYS_HEAP *paePhysHeapID, -+ PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats) -+{ -+ IMG_UINT32 i = 0; -+ PHYS_HEAP *psPhysHeap; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode invalid"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ui32PhysHeapCount <= MAX_USER_MODE_ALLOC_PHYS_HEAPS, "ui32PhysHeapCount invalid"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(paePhysHeapID != NULL, "paePhysHeapID invalid"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(paPhysHeapMemStats != NULL, "paPhysHeapMemStats invalid"); -+ -+ for (i = 0; i < ui32PhysHeapCount; i++) -+ { -+ if (paePhysHeapID[i] >= PVRSRV_PHYS_HEAP_LAST) -+ { -+ return PVRSRV_ERROR_PHYSHEAP_ID_INVALID; -+ } -+ -+ psPhysHeap = _PhysHeapFindRealHeapNoFallback(paePhysHeapID[i], psDevNode); -+ -+ paPhysHeapMemStats[i].ui32PhysHeapFlags = 0; -+ -+ if (psPhysHeap && PhysHeapUserModeAlloc(paePhysHeapID[i]) -+ && psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats) -+ { -+ psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData, -+ &paPhysHeapMemStats[i].ui64TotalSize, -+ &paPhysHeapMemStats[i].ui64FreeSize); -+ paPhysHeapMemStats[i].ui32PhysHeapFlags |= PhysHeapGetType(psPhysHeap); -+ -+ if (paePhysHeapID[i] == psDevNode->psDevConfig->eDefaultHeap) -+ { -+ paPhysHeapMemStats[i].ui32PhysHeapFlags |= PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT; -+ } -+ } -+ else -+ { -+ paPhysHeapMemStats[i].ui64TotalSize = 0; -+ paPhysHeapMemStats[i].ui64FreeSize = 0; -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr) -+{ -+ psPhysHeap->psMemFuncs->pfnCpuPAddrToDevPAddr(psPhysHeap->hPrivData, -+ ui32NumOfAddr, -+ psDevPAddr, -+ psCpuPAddr); -+} -+ -+void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr, -+ IMG_DEV_PHYADDR *psDevPAddr) -+{ -+ psPhysHeap->psMemFuncs->pfnDevPAddrToCpuPAddr(psPhysHeap->hPrivData, -+ ui32NumOfAddr, -+ psCpuPAddr, -+ psDevPAddr); -+} -+ -+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap) -+{ -+ return psPhysHeap->pszPDumpMemspaceName; -+} -+ -+#if !defined(PVRSRV_PHYSHEAP_DISABLE_OOM_DEMOTION) -+static inline void _LogOOMDetection(IMG_BOOL isOOMDetected, PHYS_HEAP *psPhysHeap, PVRSRV_MEMALLOCFLAGS_T uiFlags) -+{ -+ IMG_BOOL bExistingVal = OSAtomicExchange(&psPhysHeap->sOOMDetected, isOOMDetected); -+ PVRSRV_PHYS_HEAP ePhysIdx = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags); -+ -+ if (bExistingVal != isOOMDetected) -+ { -+ PVR_LOG(("Device: %d Physheap: %s OOM: %s", -+ (psPhysHeap->psDevNode->sDevId.ui32InternalID), -+ g_asPhysHeapUsageFlagStrings[ePhysIdx-1].pszLabel, -+ (isOOMDetected) ? "Detected" : "Resolved")); -+ } -+} -+#endif -+ -+PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap, -+ struct _CONNECTION_DATA_ *psConnection, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 uiLog2PageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszAnnotation, -+ IMG_PID uiPid, -+ PMR **ppsPMRPtr, -+ IMG_UINT32 ui32PDumpFlags, -+ PVRSRV_MEMALLOCFLAGS_T *puiOutFlags) -+{ -+ PVRSRV_ERROR eError; -+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; -+#if !defined(PVRSRV_PHYSHEAP_DISABLE_OOM_DEMOTION) -+ IMG_UINT64 uiFreeBytes; -+ PVRSRV_PHYS_HEAP eDemotionPhysIdx; -+ PVRSRV_MEMALLOCFLAGS_T uiDemotionFlags = uiFlags; -+ PVRSRV_PHYS_HEAP ePhysIdx = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags); -+ PHYS_HEAP *psDemotionHeap = NULL; -+#endif -+ eError = psImplFuncs->pfnCreatePMR(psPhysHeap, -+ psConnection, -+ uiSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ pui32MappingTable, -+ uiLog2PageSize, -+ uiFlags, -+ pszAnnotation, -+ uiPid, -+ ppsPMRPtr, -+ ui32PDumpFlags); -+ -+#if !defined(PVRSRV_PHYSHEAP_DISABLE_OOM_DEMOTION) -+ /* Check for OOM error, return if otherwise */ -+ _LogOOMDetection(((IsOOMError(eError)) ? IMG_TRUE : IMG_FALSE), psPhysHeap, uiFlags); -+ if (eError == PVRSRV_OK) -+ { -+ if (puiOutFlags) -+ { -+ *puiOutFlags = uiFlags; -+ } -+ return eError; -+ } -+ PVR_LOG_RETURN_IF_FALSE((IsOOMError(eError)), "Failed to allocate PMR", eError); -+ -+ /* Skip logic and return if mandate flag is set */ -+ if (PVRSRV_CHECK_MANDATED_PHYSHEAP(uiFlags)) -+ { -+ return eError; -+ } -+ -+ /* Demotion only occurs on CPU_LOCAL,GPU_LOCAL,GPU_PRIVATE */ -+ if (ePhysIdx > PVRSRV_PHYS_HEAP_GPU_PRIVATE) -+ { -+ return eError; -+ } -+ -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ for (eDemotionPhysIdx = (PVRSRV_PHYS_HEAP)(ePhysIdx-1); eDemotionPhysIdx != PVRSRV_PHYS_HEAP_DEFAULT; eDemotionPhysIdx--) -+ { -+ PVRSRV_CHANGE_PHYS_HEAP_HINT(eDemotionPhysIdx, uiDemotionFlags); -+ PVR_LOG_IF_FALSE_VA(PVR_DBG_MESSAGE, (ePhysIdx-eDemotionPhysIdx < 2), "Demoted from %s to CPU_LOCAL. " -+ "Expect Performance to be affected!", g_asPhysHeapUsageFlagStrings[ePhysIdx-1].pszLabel); -+ psDemotionHeap = _PhysHeapFindRealHeapNoFallback(eDemotionPhysIdx, psPhysHeap->psDevNode); -+ -+ /* Either no alternative available, or allocation already failed on selected heap */ -+ if (psDemotionHeap == NULL || psPhysHeap == psDemotionHeap) -+ { -+ continue; -+ } -+ -+ if (PhysHeapFreeMemCheck(psDemotionHeap, uiSize, &uiFreeBytes) != PVRSRV_OK) -+ { -+ _LogOOMDetection(IMG_TRUE, psDemotionHeap, uiDemotionFlags); -+ continue; -+ } -+ -+ psImplFuncs = psDemotionHeap->psImplFuncs; -+ eError = psImplFuncs->pfnCreatePMR(psDemotionHeap, -+ psConnection, -+ uiSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ pui32MappingTable, -+ uiLog2PageSize, -+ uiDemotionFlags, -+ pszAnnotation, -+ uiPid, -+ ppsPMRPtr, -+ ui32PDumpFlags); -+ _LogOOMDetection(((IsOOMError(eError)) ? IMG_TRUE : IMG_FALSE), psDemotionHeap, uiDemotionFlags); -+ -+ if (eError == PVRSRV_OK) -+ { -+ if (puiOutFlags) -+ { -+ *puiOutFlags = uiDemotionFlags; -+ } -+ break; -+ } -+ } -+ if (eError == PVRSRV_OK) -+ { -+ /* Success demotion worked error Ok - emit warning. */ -+ PVR_LOG_VA(PVR_DBG_WARNING, "PhysHeap(%s) failed to allocate PMR. Demoted to %s" , -+ g_asPhysHeapUsageFlagStrings[ePhysIdx-1].pszLabel, -+ g_asPhysHeapUsageFlagStrings[eDemotionPhysIdx-1].pszLabel); -+ } -+ else -+ { -+ /* Unable to create PMR (Heap not found or CreatePMR failed) - emit Error */ -+ PVR_LOG_VA(PVR_DBG_ERROR, "Error raised %s : Unable to %s." , -+ PVRSRVGETERRORSTRING(eError), -+ (psDemotionHeap == NULL) ? "find heaps for demotion" : -+ "allocate PMR via Demotion heap"); -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ { -+ PPVRSRV_DEVICE_NODE psDevNode = PhysHeapDeviceNode(psPhysHeap); -+ OSLockAcquire(psDevNode->hPMRZombieListLock); -+ PVR_LOG_VA(PVR_DBG_ERROR, "PMR Zombie Count: %u, PMR Zombie Count In Cleanup: %u", -+ psDevNode->uiPMRZombieCount, -+ psDevNode->uiPMRZombieCountInCleanup); -+ OSLockRelease(psDevNode->hPMRZombieListLock); -+ } -+#endif -+ } -+#endif -+ return eError; -+} -+ -+PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap) -+{ -+ PVR_ASSERT(psPhysHeap != NULL); -+ -+ return psPhysHeap->psDevNode; -+} -+ -+static IMG_BOOL PhysHeapCreatedByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap) -+{ -+ PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST); -+ -+ return (gasHeapProperties[ePhysHeap].ePVRLayerAction != PVR_LAYER_HEAP_ACTION_IGNORE); -+} -+ -+static IMG_BOOL PhysHeapAcquiredByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap) -+{ -+ PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST); -+ -+ return (gasHeapProperties[ePhysHeap].ePVRLayerAction == PVR_LAYER_HEAP_ACTION_INITIALISE); -+} -+ -+IMG_BOOL PhysHeapUserModeAlloc(PVRSRV_PHYS_HEAP ePhysHeap) -+{ -+ PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST); -+ -+ return gasHeapProperties[ePhysHeap].bUserModeAlloc; -+} -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+PVRSRV_ERROR PhysHeapPagesAllocGPV(PHYS_HEAP *psPhysHeap, size_t uiSize, -+ PG_HANDLE *psMemHandle, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_UINT32 ui32OSid, IMG_PID uiPid) -+{ -+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; -+ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; -+ -+ if (psImplFuncs->pfnPagesAllocGPV != NULL) -+ { -+ eResult = psImplFuncs->pfnPagesAllocGPV(psPhysHeap, -+ uiSize, psMemHandle, psDevPAddr, ui32OSid, uiPid); -+ } -+ -+ return eResult; -+} -+#endif -+ -+PVRSRV_ERROR PhysHeapPagesAlloc(PHYS_HEAP *psPhysHeap, size_t uiSize, -+ PG_HANDLE *psMemHandle, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_PID uiPid) -+{ -+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; -+ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; -+ -+ if (psImplFuncs->pfnPagesAlloc != NULL) -+ { -+ eResult = psImplFuncs->pfnPagesAlloc(psPhysHeap, -+ uiSize, psMemHandle, psDevPAddr, uiPid); -+ } -+ -+ return eResult; -+} -+ -+void PhysHeapPagesFree(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle) -+{ -+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; -+ -+ PVR_ASSERT(psImplFuncs->pfnPagesFree != NULL); -+ -+ if (psImplFuncs->pfnPagesFree != NULL) -+ { -+ psImplFuncs->pfnPagesFree(psPhysHeap, -+ psMemHandle); -+ } -+} -+ -+PVRSRV_ERROR PhysHeapPagesMap(PHYS_HEAP *psPhysHeap, PG_HANDLE *pshMemHandle, size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, -+ void **pvPtr) -+{ -+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; -+ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; -+ -+ if (psImplFuncs->pfnPagesMap != NULL) -+ { -+ eResult = psImplFuncs->pfnPagesMap(psPhysHeap, -+ pshMemHandle, uiSize, psDevPAddr, pvPtr); -+ } -+ -+ return eResult; -+} -+ -+void PhysHeapPagesUnMap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, void *pvPtr) -+{ -+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; -+ -+ PVR_ASSERT(psImplFuncs->pfnPagesUnMap != NULL); -+ -+ if (psImplFuncs->pfnPagesUnMap != NULL) -+ { -+ psImplFuncs->pfnPagesUnMap(psPhysHeap, -+ psMemHandle, pvPtr); -+ } -+} -+ -+PVRSRV_ERROR PhysHeapPagesClean(PHYS_HEAP *psPhysHeap, PG_HANDLE *pshMemHandle, -+ IMG_UINT32 uiOffset, -+ IMG_UINT32 uiLength) -+{ -+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; -+ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; -+ -+ if (psImplFuncs->pfnPagesClean != NULL) -+ { -+ eResult = psImplFuncs->pfnPagesClean(psPhysHeap, -+ pshMemHandle, uiOffset, uiLength); -+ } -+ -+ return eResult; -+} -+ -+IMG_UINT32 PhysHeapGetPageShift(PHYS_HEAP *psPhysHeap) -+{ -+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; -+ IMG_UINT32 ui32PageShift = 0; -+ -+ PVR_ASSERT(psImplFuncs->pfnGetPageShift != NULL); -+ -+ if (psImplFuncs->pfnGetPageShift != NULL) -+ { -+ ui32PageShift = psImplFuncs->pfnGetPageShift(); -+ } -+ -+ return ui32PageShift; -+} -+ -+PVRSRV_ERROR PhysHeapFreeMemCheck(PHYS_HEAP *psPhysHeap, -+ IMG_UINT64 ui64MinRequiredMem, -+ IMG_UINT64 *pui64FreeMem) -+{ -+ IMG_UINT64 ui64TotalSize; -+ IMG_UINT64 ui64FreeSize; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psPhysHeap != NULL, "psPhysHeap"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pui64FreeMem != NULL, "pui64FreeMem"); -+ -+ psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData, -+ &ui64TotalSize, -+ &ui64FreeSize); -+ -+ *pui64FreeMem = ui64FreeSize; -+ if (ui64MinRequiredMem >= *pui64FreeMem) -+ { -+ eError = PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY; -+ } -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/physheap.h b/drivers/gpu/drm/img-rogue/physheap.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physheap.h -@@ -0,0 +1,486 @@ -+/*************************************************************************/ /*! -+@File -+@Title Physical heap management header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines the interface for the physical heap management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+#include "devicemem_typedefs.h" -+#include "opaque_types.h" -+#include "pmr_impl.h" -+#include "physheap_config.h" -+#include "pvrsrv_device.h" -+ -+#ifndef PHYSHEAP_H -+#define PHYSHEAP_H -+ -+#define B2KB(x) ((x) >> 10) -+#define B2MB(x) ((x) >> 20) -+ -+static inline IMG_UINT64 KB2B(IMG_UINT64 ui64Kilobytes) { return ui64Kilobytes << 10; } -+static inline IMG_UINT64 MB2B(IMG_UINT64 ui64Megabytes) { return ui64Megabytes << 20; } -+ -+typedef struct _PHYS_HEAP_ PHYS_HEAP; -+#define INVALID_PHYS_HEAP 0xDEADDEAD -+ -+typedef IMG_UINT32 PHYS_HEAP_POLICY; -+ -+/* Heap has default allocation policy and does not require -+ * any additional OS Functionality. Physically contiguous -+ * allocations are required for this physheap. -+ */ -+#define PHYS_HEAP_POLICY_DEFAULT (0U) -+ -+/* -+ * Heap has allocation strategy that may produce non -+ * physically contiguous allocations, additional OS functionality -+ * is required to map these allocations into the kernel. -+ */ -+#define PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG (1U) -+#define PHYS_HEAP_POLOCY_ALLOC_ALLOW_NONCONTIG_MASK (1U) -+ -+struct _CONNECTION_DATA_; -+ -+typedef struct _PG_HANDLE_ -+{ -+ union -+ { -+ void *pvHandle; -+ IMG_UINT64 ui64Handle; -+ }u; -+ /* The allocation order is log2 value of the number of pages to allocate. -+ * As such this is a correspondingly small value. E.g, for order 4 we -+ * are talking 2^4 * PAGE_SIZE contiguous allocation. -+ * DevPxAlloc API does not need to support orders higher than 4. -+ */ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ IMG_BYTE uiOrder; /* Order of the corresponding allocation */ -+ IMG_BYTE uiOSid; /* OSid to use for allocation arena. -+ * Connection-specific. */ -+ IMG_BYTE uiPad1, -+ uiPad2; /* Spare */ -+#else -+ IMG_BYTE uiOrder; /* Order of the corresponding allocation */ -+ IMG_BYTE uiPad1, -+ uiPad2, -+ uiPad3; /* Spare */ -+#endif -+} PG_HANDLE; -+ -+/*! Pointer to private implementation specific data */ -+typedef void *PHEAP_IMPL_DATA; -+ -+/*************************************************************************/ /*! -+@Function Callback function PFN_DESTROY_DATA -+@Description Destroy private implementation specific data. -+@Input PHEAP_IMPL_DATA Pointer to implementation data. -+*/ /**************************************************************************/ -+typedef void (*PFN_DESTROY_DATA)(PHEAP_IMPL_DATA); -+/*************************************************************************/ /*! -+@Function Callback function PFN_GET_DEV_PADDR -+@Description Get heap device physical address. -+@Input PHEAP_IMPL_DATA Pointer to implementation data. -+@Output IMG_DEV_PHYADDR Device physical address. -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_GET_DEV_PADDR)(PHEAP_IMPL_DATA, IMG_DEV_PHYADDR*); -+/*************************************************************************/ /*! -+@Function Callback function PFN_GET_CPU_PADDR -+@Description Get heap CPU physical address. -+@Input PHEAP_IMPL_DATA Pointer to implementation data. -+@Output IMG_CPU_PHYADDR CPU physical address. -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_GET_CPU_PADDR)(PHEAP_IMPL_DATA, IMG_CPU_PHYADDR*); -+/*************************************************************************/ /*! -+@Function Callback function PFN_GET_SIZE -+@Description Get size of heap. -+@Input PHEAP_IMPL_DATA Pointer to implementation data. -+@Output IMG_UINT64 Size of heap. -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_GET_SIZE)(PHEAP_IMPL_DATA, IMG_UINT64*); -+/*************************************************************************/ /*! -+@Function Callback function PFN_GET_PAGE_SHIFT -+@Description Get heap log2 page shift. -+@Return IMG_UINT32 Log2 page shift -+*/ /**************************************************************************/ -+typedef IMG_UINT32 (*PFN_GET_PAGE_SHIFT)(void); -+ -+/*************************************************************************/ /*! -+@Function Callback function PFN_GET_MEM_STATS -+@Description Get total and free memory size of the physical heap managed by -+ the PMR Factory. -+@Input PHEAP_IMPL_DATA Pointer to implementation data. -+@Output IMG_UINT64 total Size of heap. -+@Output IMG_UINT64 free Size available in a heap. -+@Return none -+*/ /**************************************************************************/ -+typedef void (*PFN_GET_MEM_STATS)(PHEAP_IMPL_DATA, IMG_UINT64 *, IMG_UINT64 *); -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+typedef PVRSRV_ERROR (*PFN_PAGES_ALLOC_GPV)(PHYS_HEAP *psPhysHeap, size_t uiSize, -+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_UINT32 ui32OSid, IMG_PID uiPid); -+#endif -+typedef PVRSRV_ERROR (*PFN_PAGES_ALLOC)(PHYS_HEAP *psPhysHeap, size_t uiSize, -+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_PID uiPid); -+ -+typedef void (*PFN_PAGES_FREE)(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle); -+ -+typedef PVRSRV_ERROR (*PFN_PAGES_MAP)(PHYS_HEAP *psPhysHeap, PG_HANDLE *pshMemHandle, -+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, -+ void **pvPtr); -+ -+typedef void (*PFN_PAGES_UNMAP)(PHYS_HEAP *psPhysHeap, -+ PG_HANDLE *psMemHandle, void *pvPtr); -+ -+typedef PVRSRV_ERROR (*PFN_PAGES_CLEAN)(PHYS_HEAP *psPhysHeap, -+ PG_HANDLE *pshMemHandle, -+ IMG_UINT32 uiOffset, -+ IMG_UINT32 uiLength); -+ -+/*************************************************************************/ /*! -+@Function Callback function PFN_CREATE_PMR -+@Description Create a PMR physical allocation and back with RAM on creation, -+ if required. The RAM page comes either directly from -+ the Phys Heap's associated pool of memory or from an OS API. -+@Input psPhysHeap Pointer to Phys Heap. -+@Input psConnection Pointer to device connection. -+@Input uiSize Allocation size. -+@Input ui32NumPhysChunks Physical chunk count. -+@Input ui32NumVirtChunks Virtual chunk count. -+@Input pui32MappingTable Mapping Table. -+@Input uiLog2PageSize Page size. -+@Input uiFlags Memalloc flags. -+@Input pszAnnotation Annotation. -+@Input uiPid Process ID. -+@Output ppsPMRPtr Pointer to PMR. -+@Input ui32PDumpFlag PDump flags. -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_CREATE_PMR)(PHYS_HEAP *psPhysHeap, -+ struct _CONNECTION_DATA_ *psConnection, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 uiLog2PageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszAnnotation, -+ IMG_PID uiPid, -+ PMR **ppsPMRPtr, -+ IMG_UINT32 ui32PDumpFlags); -+ -+/*! Implementation specific function table */ -+typedef struct PHEAP_IMPL_FUNCS_TAG -+{ -+ PFN_DESTROY_DATA pfnDestroyData; -+ PFN_GET_DEV_PADDR pfnGetDevPAddr; -+ PFN_GET_CPU_PADDR pfnGetCPUPAddr; -+ PFN_GET_SIZE pfnGetSize; -+ PFN_GET_PAGE_SHIFT pfnGetPageShift; -+ PFN_GET_MEM_STATS pfnGetPMRFactoryMemStats; -+ PFN_CREATE_PMR pfnCreatePMR; -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ PFN_PAGES_ALLOC_GPV pfnPagesAllocGPV; -+#endif -+ PFN_PAGES_ALLOC pfnPagesAlloc; -+ PFN_PAGES_FREE pfnPagesFree; -+ PFN_PAGES_MAP pfnPagesMap; -+ PFN_PAGES_UNMAP pfnPagesUnMap; -+ PFN_PAGES_CLEAN pfnPagesClean; -+} PHEAP_IMPL_FUNCS; -+ -+/*************************************************************************/ /*! -+@Function PhysHeapInitDeviceHeaps -+@Description Registers and acquires physical memory heaps -+@Input psDeviceNode pointer to device node -+@Input psDevConfig pointer to device config -+@Return PVRSRV_ERROR PVRSRV_OK on success, or a PVRSRV_ error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR PhysHeapInitDeviceHeaps(PPVRSRV_DEVICE_NODE psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapDeInitDeviceHeaps -+@Description Releases and unregisters physical memory heaps -+@Input psDeviceNode pointer to device node -+@Return PVRSRV_ERROR PVRSRV_OK on success, or a PVRSRV_ error code -+*/ /**************************************************************************/ -+void PhysHeapDeInitDeviceHeaps(PPVRSRV_DEVICE_NODE psDeviceNode); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapCreateHeapFromConfig -+@Description Create a new heap. Calls specific heap API depending -+ on heap type. -+@Input psDevNode Pointer to device node struct. -+@Input psConfig Heap configuration. -+@Output ppsPhysHeap Optional pointer to the created heap. Can be NULL -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PhysHeapCreateHeapFromConfig(PPVRSRV_DEVICE_NODE psDevNode, -+ PHYS_HEAP_CONFIG *psConfig, -+ PHYS_HEAP **ppsPhysHeap); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapCreate -+@Description Create a new heap. Allocated and stored internally. -+ Destroy with PhysHeapDestroy when no longer required. -+@Input psDevNode Pointer to device node struct -+@Input psConfig Heap configuration. -+@Input uiPolicy Phys heap allocation policy. -+@Input pvImplData Implementation specific data. Can be NULL. -+@Input psImplFuncs Implementation specific function table. Must be -+ a valid pointer. -+@Output ppsPhysHeap Optional pointer to the created heap. Can be NULL -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode, -+ PHYS_HEAP_CONFIG *psConfig, -+ PHYS_HEAP_POLICY uiPolicy, -+ PHEAP_IMPL_DATA pvImplData, -+ PHEAP_IMPL_FUNCS *psImplFuncs, -+ PHYS_HEAP **ppsPhysHeap); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapDestroyDeviceHeaps -+@Description Destroys all heaps referenced by a device. -+@Input psDevNode Pointer to a device node struct. -+@Return void -+*/ /**************************************************************************/ -+void PhysHeapDestroyDeviceHeaps(PPVRSRV_DEVICE_NODE psDevNode); -+ -+void PhysHeapDestroy(PHYS_HEAP *psPhysHeap); -+ -+PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapAcquireByID -+@Description Acquire PhysHeap by DevPhysHeap. -+@Input eDevPhysHeap Device Phys Heap. -+@Input psDevNode Pointer to device node struct -+@Output ppsPhysHeap PhysHeap if found. -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR PhysHeapAcquireByID(PVRSRV_PHYS_HEAP eDevPhysHeap, -+ PPVRSRV_DEVICE_NODE psDevNode, -+ PHYS_HEAP **ppsPhysHeap); -+ -+void PhysHeapRelease(PHYS_HEAP *psPhysHeap); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapGetImplData -+@Description Get physical heap implementation specific data. -+@Input psPhysHeap Pointer to physical heap. -+@Input psConfig Heap configuration. -+@Return pvImplData Implementation specific data. Can be NULL. -+*/ /**************************************************************************/ -+PHEAP_IMPL_DATA PhysHeapGetImplData(PHYS_HEAP *psPhysHeap); -+ -+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapGetPolicy -+@Description Get phys heap allocation policy flags. -+@Input psPhysHeap Pointer to physical heap. -+@Return PHYS_HEAP_POLICY Phys heap policy flags. -+*/ /**************************************************************************/ -+PHYS_HEAP_POLICY PhysHeapGetPolicy(PHYS_HEAP *psPhysHeap); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapGetFlags -+@Description Get phys heap usage flags. -+@Input psPhysHeap Pointer to physical heap. -+@Return PHYS_HEAP_USAGE_FLAGS Phys heap usage flags. -+*/ /**************************************************************************/ -+PHYS_HEAP_USAGE_FLAGS PhysHeapGetFlags(PHYS_HEAP *psPhysHeap); -+ -+IMG_BOOL PhysHeapValidateDefaultHeapExists(PPVRSRV_DEVICE_NODE psDevNode); -+ -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+IMG_UINT32 PhysHeapGetIPAValue(PHYS_HEAP *psPhysHeap); -+ -+IMG_UINT32 PhysHeapGetIPAMask(PHYS_HEAP *psPhysHeap); -+ -+IMG_UINT32 PhysHeapGetIPAShift(PHYS_HEAP *psPhysHeap); -+#endif -+ -+PVRSRV_ERROR PhysHeapGetCpuPAddr(PHYS_HEAP *psPhysHeap, -+ IMG_CPU_PHYADDR *psCpuPAddr); -+ -+ -+PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap, -+ IMG_UINT64 *puiSize); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapGetMemInfo -+@Description Get phys heap memory statistics for a given physical heap ID. -+@Input psDevNode Pointer to device node struct -+@Input ui32PhysHeapCount Physical heap count -+@Input paePhysHeapID Physical heap ID -+@Output paPhysHeapMemStats Buffer that holds the memory statistics -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PhysHeapGetMemInfo(PPVRSRV_DEVICE_NODE psDevNode, -+ IMG_UINT32 ui32PhysHeapCount, -+ PVRSRV_PHYS_HEAP *paePhysHeapID, -+ PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats); -+ -+PVRSRV_ERROR PhysHeapGetDevPAddr(PHYS_HEAP *psPhysHeap, -+ IMG_DEV_PHYADDR *psDevPAddr); -+ -+void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr); -+ -+void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr, -+ IMG_DEV_PHYADDR *psDevPAddr); -+ -+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap); -+ -+const IMG_CHAR *PhysHeapName(PHYS_HEAP *psPhysHeap); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapCreatePMR -+@Description Function calls an implementation-specific function pointer. -+ See function pointer for details. -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap, -+ struct _CONNECTION_DATA_ *psConnection, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 uiLog2PageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszAnnotation, -+ IMG_PID uiPid, -+ PMR **ppsPMRPtr, -+ IMG_UINT32 ui32PDumpFlags, -+ PVRSRV_MEMALLOCFLAGS_T *uiOutFlags); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapDeviceNode -+@Description Get pointer to the device node this heap belongs to. -+@Input psPhysHeap Pointer to physical heap. -+@Return PPVRSRV_DEVICE_NODE Pointer to device node. -+*/ /**************************************************************************/ -+PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapInitByPVRLayer -+@Description Is phys heap to be initialised in PVR layer? -+@Input ePhysHeap phys heap -+@Return IMG_BOOL return IMG_TRUE if yes -+*/ /**************************************************************************/ -+IMG_BOOL PhysHeapInitByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapUserModeAlloc -+@Description Is allocation from UM allowed? -+@Input ePhysHeap phys heap -+@Return IMG_BOOL return IMG_TRUE if yes -+*/ /**************************************************************************/ -+IMG_BOOL PhysHeapUserModeAlloc(PVRSRV_PHYS_HEAP ePhysHeap); -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+PVRSRV_ERROR PhysHeapPagesAllocGPV(PHYS_HEAP *psPhysHeap, -+ size_t uiSize, -+ PG_HANDLE *psMemHandle, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_UINT32 ui32OSid, IMG_PID uiPid); -+#endif -+ -+PVRSRV_ERROR PhysHeapPagesAlloc(PHYS_HEAP *psPhysHeap, -+ size_t uiSize, -+ PG_HANDLE *psMemHandle, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_PID uiPid); -+ -+void PhysHeapPagesFree(PHYS_HEAP *psPhysHeap, -+ PG_HANDLE *psMemHandle); -+ -+PVRSRV_ERROR PhysHeapPagesMap(PHYS_HEAP *psPhysHeap, -+ PG_HANDLE *pshMemHandle, -+ size_t uiSize, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ void **pvPtr); -+ -+void PhysHeapPagesUnMap(PHYS_HEAP *psPhysHeap, -+ PG_HANDLE *psMemHandle, -+ void *pvPtr); -+ -+PVRSRV_ERROR PhysHeapPagesClean(PHYS_HEAP *psPhysHeap, -+ PG_HANDLE *pshMemHandle, -+ IMG_UINT32 uiOffset, -+ IMG_UINT32 uiLength); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapGetPageShift -+@Description Get phys heap page shift. -+@Input psPhysHeap Pointer to physical heap. -+@Return IMG_UINT32 Log2 page shift -+*/ /**************************************************************************/ -+IMG_UINT32 PhysHeapGetPageShift(PHYS_HEAP *psPhysHeap); -+ -+/*************************************************************************/ /*! -+@Function PhysHeapFreeMemCheck -+@Description Check a physheap has the required amount of free memory. -+ -+@Input psPhysHeap Pointer to physical heap. -+@Input ui64MinRequiredMem The minimum free memory for success (bytes). -+@Output pui64FreeMem The free memory in the physical heap (bytes). -+ -+@Return PVRSRV_ERROR If successful PVRSRV_OK else a PVRSRV_ERROR code. -+*/ /**************************************************************************/ -+PVRSRV_ERROR PhysHeapFreeMemCheck(PHYS_HEAP *psPhysHeap, -+ IMG_UINT64 ui64MinRequiredMem, -+ IMG_UINT64 *pui64FreeMem); -+ -+#endif /* PHYSHEAP_H */ -diff --git a/drivers/gpu/drm/img-rogue/physheap_config.h b/drivers/gpu/drm/img-rogue/physheap_config.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physheap_config.h -@@ -0,0 +1,164 @@ -+/*************************************************************************/ /*! -+@File physheap_config.h -+@Title Physical heap Config API -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Physical heap configs are created in the system layer and -+ stored against each device node for use in the Services Server -+ common layer. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PHYSHEAP_CONFIG_H -+#define PHYSHEAP_CONFIG_H -+ -+#include "img_types.h" -+#include "pvrsrv_memallocflags.h" -+#include "pvrsrv_memalloc_physheap.h" -+ -+typedef IMG_UINT32 PHYS_HEAP_USAGE_FLAGS; -+ -+/** -+ * ! IMPORTANT ! -+ * If you update the PHYS_HEAP_USAGE_FLAGS definitions, you must update the -+ * g_asPhysHeapUsageFlagStrings structure within physheap.c and the -+ * PHYS_HEAP_USAGE_MASK. -+ */ -+#define PHYS_HEAP_USAGE_CPU_LOCAL (1U < -+ -+module_param(PMRAllocFail, uint, 0644); -+MODULE_PARM_DESC(PMRAllocFail, "When number of PMR allocs reaches " -+ "this value, it will fail (default value is 0 which " -+ "means that alloc function will behave normally)."); -+#endif /* defined(__linux__) */ -+#endif /* defined(DEBUG) */ -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#include "process_stats.h" -+#include "proc_stats.h" -+#endif -+ -+PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32MemSize, -+ IMG_UINT32 ui32Log2Align, -+ const IMG_UINT8 u8Value, -+ IMG_BOOL bInitPage, -+#if defined(PDUMP) -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicAddress, -+ IMG_HANDLE *phHandlePtr, -+#endif -+ IMG_PID uiPid, -+ IMG_HANDLE hMemHandle, -+ IMG_DEV_PHYADDR *psDevPhysAddr) -+{ -+ void *pvCpuVAddr; -+ PVRSRV_ERROR eError; -+#if defined(PDUMP) -+ IMG_CHAR szFilenameOut[PDUMP_PARAM_MAX_FILE_NAME]; -+ PDUMP_FILEOFFSET_T uiOffsetOut; -+ IMG_UINT32 ui32PageSize; -+ IMG_UINT32 ui32PDumpMemSize = ui32MemSize; -+ PVRSRV_ERROR ePDumpError; -+#endif -+ PG_HANDLE *psMemHandle; -+ IMG_UINT64 uiMask; -+ IMG_DEV_PHYADDR sDevPhysAddr_int; -+ -+ psMemHandle = hMemHandle; -+ -+ /* Allocate the pages */ -+ eError = PhysHeapPagesAlloc(psDevNode->psMMUPhysHeap, -+ TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize), -+ psMemHandle, -+ &sDevPhysAddr_int, -+ uiPid); -+ PVR_LOG_RETURN_IF_ERROR(eError, "pfnDevPxAlloc:1"); -+ -+ /* Check to see if the page allocator returned pages with our desired -+ * alignment, which is not unlikely -+ */ -+ uiMask = (1 << ui32Log2Align) - 1; -+ if (ui32Log2Align && (sDevPhysAddr_int.uiAddr & uiMask)) -+ { -+ /* use over allocation instead */ -+ PhysHeapPagesFree(psDevNode->psMMUPhysHeap, psMemHandle); -+ -+ ui32MemSize += (IMG_UINT32) uiMask; -+ eError = PhysHeapPagesAlloc(psDevNode->psMMUPhysHeap, -+ TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize), -+ psMemHandle, -+ &sDevPhysAddr_int, -+ uiPid); -+ PVR_LOG_RETURN_IF_ERROR(eError, "pfnDevPxAlloc:2"); -+ -+ sDevPhysAddr_int.uiAddr += uiMask; -+ sDevPhysAddr_int.uiAddr &= ~uiMask; -+ } -+ *psDevPhysAddr = sDevPhysAddr_int; -+ -+#if defined(PDUMP) -+ ui32PageSize = ui32Log2Align? (1 << ui32Log2Align) : OSGetPageSize(); -+ eError = PDumpMalloc(psDevNode, -+ pszDevSpace, -+ pszSymbolicAddress, -+ ui32PDumpMemSize, -+ ui32PageSize, -+ IMG_FALSE, -+ 0, -+ phHandlePtr, -+ PDUMP_NONE); -+ if (PVRSRV_OK != eError) -+ { -+ PDUMPCOMMENT(psDevNode, "Allocating pages failed"); -+ *phHandlePtr = NULL; -+ } -+ ePDumpError = eError; -+#endif -+ -+ if (bInitPage) -+ { -+ /*Map the page to the CPU VA space */ -+ eError = PhysHeapPagesMap(psDevNode->psMMUPhysHeap, -+ psMemHandle, -+ ui32MemSize, -+ &sDevPhysAddr_int, -+ &pvCpuVAddr); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_LOG_ERROR(eError, "DevPxMap"); -+ PhysHeapPagesFree(psDevNode->psMMUPhysHeap, psMemHandle); -+ return eError; -+ } -+ -+ /*Fill the memory with given content */ -+ OSDeviceMemSet(pvCpuVAddr, u8Value, ui32MemSize); -+ -+ /*Map the page to the CPU VA space */ -+ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, -+ psMemHandle, -+ 0, -+ ui32MemSize); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_LOG_ERROR(eError, "DevPxClean"); -+ PhysHeapPagesUnMap(psDevNode->psMMUPhysHeap, psMemHandle, pvCpuVAddr); -+ PhysHeapPagesFree(psDevNode->psMMUPhysHeap, psMemHandle); -+ return eError; -+ } -+ -+#if defined(PDUMP) -+ if (ePDumpError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) -+ { -+ /* PDumping of the page contents can be done in two ways -+ * 1. Store the single byte init value to the .prm file -+ * and load the same value to the entire dummy page buffer -+ * This method requires lot of LDB's inserted into the out2.txt -+ * -+ * 2. Store the entire contents of the buffer to the .prm file -+ * and load them back. -+ * This only needs a single LDB instruction in the .prm file -+ * and chosen this method -+ * size of .prm file might go up but that's not huge at least -+ * for this allocation -+ */ -+ /* Write the buffer contents to the prm file */ -+ eError = PDumpWriteParameterBlob(psDevNode, -+ pvCpuVAddr, -+ ui32PDumpMemSize, -+ PDUMP_FLAGS_CONTINUOUS, -+ szFilenameOut, -+ sizeof(szFilenameOut), -+ &uiOffsetOut); -+ if (PVRSRV_OK == eError) -+ { -+ /* Load the buffer back to the allocated memory when playing the pdump */ -+ eError = PDumpPMRLDB(psDevNode, -+ pszDevSpace, -+ pszSymbolicAddress, -+ 0, -+ ui32PDumpMemSize, -+ szFilenameOut, -+ uiOffsetOut, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (PVRSRV_OK != eError) -+ { -+ PDUMP_ERROR(psDevNode, eError, "Failed to write LDB statement to script file"); -+ PVR_LOG_ERROR(eError, "PDumpPMRLDB"); -+ } -+ } -+ else if (eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED) -+ { -+ PDUMP_ERROR(psDevNode, eError, "Failed to write device allocation to parameter file"); -+ PVR_LOG_ERROR(eError, "PDumpWriteParameterBlob"); -+ } -+ else -+ { -+ /* Else write to parameter file prevented under the flags and -+ * current state of the driver so skip write to script and error IF. -+ * This is expected e.g., if not in the capture range. -+ */ -+ eError = PVRSRV_OK; -+ } -+ } -+#endif -+ -+ /* Unmap the page */ -+ PhysHeapPagesUnMap(psDevNode->psMMUPhysHeap, -+ psMemHandle, -+ pvCpuVAddr); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode, -+#if defined(PDUMP) -+ IMG_HANDLE hPDUMPMemHandle, -+#endif -+ IMG_HANDLE hMemHandle) -+{ -+ PG_HANDLE *psMemHandle; -+ -+ psMemHandle = hMemHandle; -+ PhysHeapPagesFree(psDevNode->psMMUPhysHeap, psMemHandle); -+#if defined(PDUMP) -+ if (NULL != hPDUMPMemHandle) -+ { -+ PDumpFree(psDevNode, hPDUMPMemHandle); -+ } -+#endif -+ -+} -+ -+ -+/* Checks the input parameters and adjusts them if possible and necessary */ -+static inline PVRSRV_ERROR _ValidateParams(IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 *puiLog2AllocPageSize, -+ IMG_DEVMEM_SIZE_T *puiSize) -+{ -+ IMG_UINT32 uiLog2AllocPageSize = *puiLog2AllocPageSize; -+ IMG_DEVMEM_SIZE_T uiSize = *puiSize; -+ /* Sparse if we have different number of virtual and physical chunks plus -+ * in general all allocations with more than one virtual chunk */ -+ IMG_BOOL bIsSparse = (ui32NumVirtChunks != ui32NumPhysChunks || -+ ui32NumVirtChunks > 1) ? IMG_TRUE : IMG_FALSE; -+ -+ if (PVRSRV_CHECK_ON_DEMAND(uiFlags) && -+ PVRSRV_CHECK_PHYS_ALLOC_NOW(uiFlags)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid to specify both ON_DEMAND and NOW phys alloc flags: 0x%" IMG_UINT64_FMTSPECX, __func__, uiFlags)); -+ return PVRSRV_ERROR_INVALID_FLAGS; -+ } -+ -+ if (ui32NumPhysChunks == 0 && ui32NumVirtChunks == 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Number of physical chunks and number of virtual chunks " -+ "cannot be both 0", -+ __func__)); -+ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Protect against ridiculous page sizes */ -+ if (uiLog2AllocPageSize > RGX_HEAP_2MB_PAGE_SHIFT || uiLog2AllocPageSize < RGX_HEAP_4KB_PAGE_SHIFT) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Page size is out of range: 2^%u.", uiLog2AllocPageSize)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Range check of the alloc size */ -+ if (!PMRValidateSize(uiSize)) -+ { -+ PVR_LOG_VA(PVR_DBG_ERROR, -+ "PMR size exceeds limit #Chunks: %u ChunkSz %"IMG_UINT64_FMTSPECX"", -+ ui32NumVirtChunks, -+ (IMG_UINT64) 1ULL << uiLog2AllocPageSize); -+ return PVRSRV_ERROR_PMR_TOO_LARGE; -+ } -+ -+ /* Fail if requesting coherency on one side but uncached on the other */ -+ if (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) && -+ (PVRSRV_CHECK_GPU_UNCACHED(uiFlags) || PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Request for CPU coherency but specifying GPU uncached " -+ "Please use GPU cached flags for coherency.")); -+ return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; -+ } -+ -+ if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) && -+ (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Request for GPU coherency but specifying CPU uncached " -+ "Please use CPU cached flags for coherency.")); -+ return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; -+ } -+ -+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) && PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (bIsSparse) -+ { -+ /* For sparse we need correct parameters like a suitable page size.... */ -+ if (OSGetPageShift() > uiLog2AllocPageSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid log2-contiguity for sparse allocation. " -+ "Requested %u, required minimum %zd", -+ __func__, -+ uiLog2AllocPageSize, -+ OSGetPageShift() )); -+ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (ui32NumVirtChunks * (1 << uiLog2AllocPageSize) != uiSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Total alloc size (%#" IMG_UINT64_FMTSPECx ") " -+ "is not equal to virtual chunks * chunk size " -+ "(%#" IMG_UINT64_FMTSPECx ")", -+ __func__, uiSize, (IMG_UINT64) (ui32NumVirtChunks * (1ULL << uiLog2AllocPageSize)))); -+ -+ return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; -+ } -+ -+ if (ui32NumPhysChunks > ui32NumVirtChunks) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Number of physical chunks (%u) must not be greater " -+ "than number of virtual chunks (%u)", -+ __func__, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks)); -+ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+ else -+ { -+ /* -+ * Silently round up alignment/pagesize if request was less that PAGE_SHIFT -+ * because it would never be harmful for memory to be _more_ contiguous that -+ * was desired. -+ */ -+ uiLog2AllocPageSize = OSGetPageShift() > uiLog2AllocPageSize ? -+ OSGetPageShift() : uiLog2AllocPageSize; -+ -+ /* Same for total size */ -+ uiSize = PVR_ALIGN(uiSize, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); -+ } -+ -+ if ((uiSize & ((1ULL << uiLog2AllocPageSize) - 1)) != 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Total size (%#" IMG_UINT64_FMTSPECx ") " -+ "must be a multiple of the requested contiguity (%" -+ IMG_UINT64_FMTSPEC ")", __func__, uiSize, -+ (IMG_UINT64) (1ULL << uiLog2AllocPageSize))); -+ return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; -+ } -+ -+ *puiLog2AllocPageSize = uiLog2AllocPageSize; -+ *puiSize = uiSize; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR _DevPhysHeapFromFlags(PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ PVRSRV_PHYS_HEAP *peDevPhysHeap) -+{ -+ PVRSRV_PHYS_HEAP eHeap = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags); -+ -+ switch (eHeap) -+ { -+ case PVRSRV_PHYS_HEAP_FW_PREMAP0: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP1: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP2: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP3: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP4: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP5: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP6: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP7: -+ { -+ /* keep heap (with check) */ -+ PVR_RETURN_IF_INVALID_PARAM(!PVRSRV_VZ_MODE_IS(GUEST)); -+ break; -+ } -+ case PVRSRV_PHYS_HEAP_LAST: -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ default: -+ { -+ break; -+ } -+ } -+ -+ *peDevPhysHeap = eHeap; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PhysmemNewRamBackedPMR_direct(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 uiLog2AllocPageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 uiAnnotationLength, -+ const IMG_CHAR *pszAnnotation, -+ IMG_PID uiPid, -+ PMR **ppsPMRPtr, -+ IMG_UINT32 ui32PDumpFlags, -+ PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ PVRSRV_PHYS_HEAP ePhysHeapIdx; -+ PVRSRV_MEMALLOCFLAGS_T uiPMRFlags = uiFlags; -+ uiPid = (psConnection != NULL) ? OSGetCurrentClientProcessIDKM() : uiPid; -+ -+ /* This is where we would expect to validate the uiAnnotationLength parameter -+ (to confirm it is sufficient to store the string in pszAnnotation plus a -+ terminating NULL). However, we do not make reference to this value when -+ we copy the string in PMRCreatePMR() - instead there we use strlcpy() -+ to copy at most chars and ensure whatever is copied is null-terminated. -+ The parameter is only used by the generated bridge code. -+ */ -+ PVR_UNREFERENCED_PARAMETER(uiAnnotationLength); -+ -+ eError = _ValidateParams(ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ uiFlags, -+ &uiLog2AllocPageSize, -+ &uiSize); -+ PVR_RETURN_IF_ERROR(eError); -+ -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+#if !defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) -+ /* Do not permit IPA PMR allocation flags to be passed through to the -+ * new PMR. -+ */ -+ uiPMRFlags &= ~PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK; -+#endif -+#endif -+ -+#if defined(PDUMP) -+ eError = PDumpValidateUMFlags(ui32PDumpFlags); -+ PVR_RETURN_IF_ERROR(eError); -+#endif -+ -+ for (i = 0; i < ui32NumPhysChunks; i++) -+ { -+ PVR_LOG_RETURN_IF_FALSE(pui32MappingTable[i] < ui32NumVirtChunks, -+ "Mapping table value exceeds ui32NumVirtChunks", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ eError = _DevPhysHeapFromFlags(uiFlags, &ePhysHeapIdx); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ if (ePhysHeapIdx == PVRSRV_PHYS_HEAP_DEFAULT) -+ { -+ ePhysHeapIdx = psDevNode->psDevConfig->eDefaultHeap; -+ PVRSRV_CHANGE_PHYS_HEAP_HINT(ePhysHeapIdx, uiPMRFlags); -+ } -+ -+ if (ePhysHeapIdx == PVRSRV_PHYS_HEAP_GPU_LOCAL) -+ { -+ if ((uiFlags & PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK) == 0) -+ { -+ ePhysHeapIdx = PVRSRV_PHYS_HEAP_GPU_PRIVATE; -+ PVRSRV_SET_PHYS_HEAP_HINT(GPU_PRIVATE, uiPMRFlags); -+ PVR_DPF((PVR_DBG_VERBOSE, "%s: Consider explicit use of GPU_PRIVATE for PMR %s." -+ " Implicit conversion to GPU PRIVATE performed", -+ __func__, pszAnnotation)); -+ } -+ else if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) && -+ PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig)) -+ { -+ ePhysHeapIdx = PVRSRV_PHYS_HEAP_GPU_COHERENT; -+ PVRSRV_SET_PHYS_HEAP_HINT(GPU_COHERENT, uiPMRFlags); -+ } -+ } -+ else if (ePhysHeapIdx == PVRSRV_PHYS_HEAP_GPU_PRIVATE) -+ { -+ if (uiFlags & PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid flags for PMR %s!" -+ " Client requested GPU_PRIVATE physical heap with CPU access flags.", -+ __func__, pszAnnotation)); -+ return PVRSRV_ERROR_INVALID_HEAP; -+ } -+ } -+ -+ if (NULL == psDevNode->apsPhysHeap[ePhysHeapIdx]) -+ { -+ /* In case a heap hasn't been acquired for this type, return invalid heap error */ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Requested allocation on device node (%p) from " -+ "an invalid heap (HeapIndex=%d)", -+ __func__, psDevNode, ePhysHeapIdx)); -+ return PVRSRV_ERROR_INVALID_HEAP; -+ } -+ -+#if defined(DEBUG) -+ if (PMRAllocFail > 0) -+ { -+ static IMG_UINT32 ui32AllocCount = 1; -+ -+ if (ui32AllocCount < PMRAllocFail) -+ { -+ ui32AllocCount++; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s failed on %d allocation.", -+ __func__, ui32AllocCount)); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ } -+#endif /* defined(DEBUG) */ -+ -+ /* If the driver is in an 'init' state all of the allocated memory -+ * should be attributed to the driver (PID 1) rather than to the -+ * process those allocations are made under. Same applies to the memory -+ * allocated for the Firmware. */ -+ if (psDevNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE || -+ PVRSRV_CHECK_FW_MAIN(uiFlags)) -+ { -+ uiPid = PVR_SYS_ALLOC_PID; -+ } -+ -+ /* ePhysHeapIdx and PhysHeap hint in uiPMRFlags match and provide the -+ * intended PhysHeap to use at this point, but systems vary so the next -+ * call may fallback (apsPhysHeap[]) or demote (OutOfMem) and not be the -+ * heap that was intended, e.g. GPU_PRIVATE index may fallback to GPU_LOCAL, -+ * GPU_LOCAL may demote to CPU_LOCAL. -+ * On output uiPMRFlags show the PhysHeap finally used. -+ */ -+ eError = PhysHeapCreatePMR(psDevNode->apsPhysHeap[ePhysHeapIdx], -+ psConnection, -+ uiSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ pui32MappingTable, -+ uiLog2AllocPageSize, -+ uiPMRFlags, -+ pszAnnotation, -+ uiPid, -+ ppsPMRPtr, -+ ui32PDumpFlags, -+ &uiPMRFlags); -+ -+ if (puiPMRFlags != NULL) -+ { -+ *puiPMRFlags = uiPMRFlags; -+ } -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ if (eError != PVRSRV_OK) -+ { -+ PVRSRVStatsUpdateOOMStat(psConnection, -+ psDevNode, -+ PVRSRV_DEVICE_STAT_TYPE_OOM_PHYSMEM_COUNT, -+ OSGetCurrentClientProcessIDKM()); -+ } -+#endif -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PhysmemNewRamBackedPMR(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 uiLog2AllocPageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 uiAnnotationLength, -+ const IMG_CHAR *pszAnnotation, -+ IMG_PID uiPid, -+ PMR **ppsPMRPtr, -+ IMG_UINT32 ui32PDumpFlags, -+ PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags) -+{ -+ PVRSRV_PHYS_HEAP ePhysHeap = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags); -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(uiAnnotationLength != 0, "uiAnnotationLength"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszAnnotation != NULL, "pszAnnotation"); -+ -+ if (ePhysHeap == PVRSRV_PHYS_HEAP_DEFAULT) -+ { -+ ePhysHeap = psDevNode->psDevConfig->eDefaultHeap; -+ } -+ -+ if (!PhysHeapUserModeAlloc(ePhysHeap)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid phys heap hint: %d.", __func__, ePhysHeap)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = PhysmemNewRamBackedPMR_direct(psConnection, -+ psDevNode, -+ uiSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ pui32MappingTable, -+ uiLog2AllocPageSize, -+ uiFlags, -+ uiAnnotationLength, -+ pszAnnotation, -+ uiPid, -+ ppsPMRPtr, -+ ui32PDumpFlags, -+ puiPMRFlags); -+ if (eError == PVRSRV_OK) -+ { -+ /* Lock phys addresses if backing was allocated */ -+ if (PVRSRV_CHECK_PHYS_ALLOC_NOW(uiFlags)) -+ { -+ eError = PMRLockSysPhysAddresses(*ppsPMRPtr); -+ } -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVGetDefaultPhysicalHeapKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ PVRSRV_PHYS_HEAP *peHeap) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ *peHeap = psDevNode->psDevConfig->eDefaultHeap; -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32PhysHeapCount, -+ PVRSRV_PHYS_HEAP *paePhysHeapID, -+ PHYS_HEAP_MEM_STATS *paPhysHeapMemStats) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ return PhysHeapGetMemInfo(psDevNode, -+ ui32PhysHeapCount, -+ paePhysHeapID, -+ paPhysHeapMemStats); -+} -+ -+/* 'Wrapper' function to call PMRImportPMR(), which first checks the PMR is -+ * for the current device. This avoids the need to do this in pmr.c, which -+ * would then need PVRSRV_DEVICE_NODE (defining this type in pmr.h causes a -+ * typedef redefinition issue). -+ */ -+#if defined(SUPPORT_INSECURE_EXPORT) -+PVRSRV_ERROR -+PhysmemImportPMR(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ PMR_EXPORT *psPMRExport, -+ PMR_PASSWORD_T uiPassword, -+ PMR_SIZE_T uiSize, -+ PMR_LOG2ALIGN_T uiLog2Contig, -+ PMR **ppsPMR) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ if (PMRGetExportDeviceNode(psPMRExport) != psDevNode) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", __func__)); -+ return PVRSRV_ERROR_PMR_NOT_PERMITTED; -+ } -+ -+ return PMRImportPMR(psPMRExport, -+ uiPassword, -+ uiSize, -+ uiLog2Contig, -+ ppsPMR); -+} -+#endif /* if defined(SUPPORT_INSECURE_EXPORT) */ -diff --git a/drivers/gpu/drm/img-rogue/physmem.h b/drivers/gpu/drm/img-rogue/physmem.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem.h -@@ -0,0 +1,237 @@ -+/*************************************************************************/ /*! -+@File -+@Title Physmem header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for common entry point for creation of RAM backed PMR's -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SRVSRV_PHYSMEM_H -+#define SRVSRV_PHYSMEM_H -+ -+/* include/ */ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+#include "connection_server.h" -+ -+/* services/server/include/ */ -+#include "pmr.h" -+#include "pmr_impl.h" -+ -+/* Valid values for TC_MEMORY_CONFIG configuration option */ -+#define TC_MEMORY_LOCAL (1) -+#define TC_MEMORY_HOST (2) -+#define TC_MEMORY_HYBRID (3) -+ -+/* Valid values for the PLATO_MEMORY_CONFIG configuration option */ -+#define PLATO_MEMORY_LOCAL (1) -+#define PLATO_MEMORY_HOST (2) -+#define PLATO_MEMORY_HYBRID (3) -+ -+/*************************************************************************/ /*! -+@Function DevPhysMemAlloc -+@Description Allocate memory from device specific heaps directly. -+@Input psDevNode device node to operate on -+@Input ui32MemSize Size of the memory to be allocated -+@Input u8Value Value to be initialised to. -+@Input bInitPage Flag to control initialisation -+@Input pszDevSpace PDUMP memory space in which the -+ allocation is to be done -+@Input pszSymbolicAddress Symbolic name of the allocation -+@Input phHandlePtr PDUMP handle to the allocation -+@Input uiPid PID of the process owning the allocation -+ (or PVR_SYS_ALLOC_PID if the allocation -+ belongs to the driver) -+@Output hMemHandle Handle to the allocated memory -+@Output psDevPhysAddr Device Physical address of allocated -+ page -+@Return PVRSRV_OK if the allocation is successful -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32MemSize, -+ IMG_UINT32 ui32Log2Align, -+ const IMG_UINT8 u8Value, -+ IMG_BOOL bInitPage, -+#if defined(PDUMP) -+ const IMG_CHAR *pszDevSpace, -+ const IMG_CHAR *pszSymbolicAddress, -+ IMG_HANDLE *phHandlePtr, -+#endif -+ IMG_PID uiPid, -+ IMG_HANDLE hMemHandle, -+ IMG_DEV_PHYADDR *psDevPhysAddr); -+ -+/*************************************************************************/ /*! -+@Function DevPhysMemFree -+@Description Free memory to device specific heaps directly. -+@Input psDevNode device node to operate on -+@Input hPDUMPMemHandle Pdump handle to allocated memory -+@Input hMemHandle Devmem handle to allocated memory -+@Return None -+*/ /**************************************************************************/ -+void -+DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode, -+#if defined(PDUMP) -+ IMG_HANDLE hPDUMPMemHandle, -+#endif -+ IMG_HANDLE hMemHandle); -+ -+/* -+ * PhysmemNewRamBackedPMR -+ * -+ * This function will create a RAM backed PMR using the device specific -+ * callback, this allows control at a per-devicenode level to select the -+ * memory source thus supporting mixed UMA/LMA systems. -+ * -+ * The size must be a multiple of page size. The page size is specified in -+ * log2. It should be regarded as a minimum contiguity of which the -+ * resulting memory must be a multiple. It may be that this should be a fixed -+ * number. It may be that the allocation size needs to be a multiple of some -+ * coarser "page size" than that specified in the page size argument. -+ * For example, take an OS whose page granularity is a fixed 16kB, but the -+ * caller requests memory in page sizes of 4kB. The request can be satisfied -+ * if and only if the SIZE requested is a multiple of 16kB. If the arguments -+ * supplied are such that this OS cannot grant the request, -+ * PVRSRV_ERROR_INVALID_PARAMS will be returned. -+ * -+ * The caller should supply storage of a pointer. Upon successful return a -+ * PMR object will have been created and a pointer to it returned in the -+ * PMROut argument. -+ * -+ * A PMR successfully created should be destroyed with PhysmemUnrefPMR. -+ * -+ * Note that this function may cause memory allocations and on some operating -+ * systems this may cause scheduling events, so it is important that this -+ * function be called with interrupts enabled and in a context where -+ * scheduling events and memory allocations are permitted. -+ * -+ * The flags may be used by the implementation to change its behaviour if -+ * required. The flags will also be stored in the PMR as immutable metadata -+ * and returned to mmu_common when it asks for it. -+ * -+ * The PID specified is used to tie this allocation to the process context -+ * that the allocation is made on behalf of. -+ */ -+PVRSRV_ERROR -+PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 uiLog2PageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 uiAnnotationLength, -+ const IMG_CHAR *pszAnnotation, -+ IMG_PID uiPid, -+ PMR **ppsPMROut, -+ IMG_UINT32 ui32PDumpFlags, -+ PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags); -+ -+PVRSRV_ERROR -+PhysmemNewRamBackedPMR_direct(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 uiLog2PageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 uiAnnotationLength, -+ const IMG_CHAR *pszAnnotation, -+ IMG_PID uiPid, -+ PMR **ppsPMROut, -+ IMG_UINT32 ui32PDumpFlags, -+ PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags); -+ -+/*************************************************************************/ /*! -+@Function PhysmemImportPMR -+@Description Import PMR a previously exported PMR -+@Input psPMRExport The exported PMR token -+@Input uiPassword Authorisation password -+ for the PMR being imported -+@Input uiSize Size of the PMR being imported -+ (for verification) -+@Input uiLog2Contig Log2 continuity of the PMR being -+ imported (for verification) -+@Output ppsPMR The imported PMR -+@Return PVRSRV_ERROR_PMR_NOT_PERMITTED if not for the same device -+ PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR if password incorrect -+ PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES if size or contiguity incorrect -+ PVRSRV_OK if successful -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PhysmemImportPMR(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ PMR_EXPORT *psPMRExport, -+ PMR_PASSWORD_T uiPassword, -+ PMR_SIZE_T uiSize, -+ PMR_LOG2ALIGN_T uiLog2Contig, -+ PMR **ppsPMR); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVGetDefaultPhysicalHeapKM -+@Description For the specified device, get the physical heap used for -+ allocations when the PVRSRV_PHYS_HEAP_DEFAULT -+ physical heap hint is set in memalloc flags. -+@Output peHeap Default Heap return value -+@Return PVRSRV_OK if successful -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVGetDefaultPhysicalHeapKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ PVRSRV_PHYS_HEAP *peHeap); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVPhysHeapGetMemInfoKM -+@Description Get the memory usage statistics for a given physical heap ID -+@Input ui32PhysHeapCount Physical Heap count -+@Input paePhysHeapID Array of Physical Heap ID's -+@Output paPhysHeapMemStats Buffer to hold the memory statistics -+@Return PVRSRV_OK if successful -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32PhysHeapCount, -+ PVRSRV_PHYS_HEAP *paePhysHeapID, -+ PHYS_HEAP_MEM_STATS *paPhysHeapMemStats); -+ -+#endif /* SRVSRV_PHYSMEM_H */ -diff --git a/drivers/gpu/drm/img-rogue/physmem_dmabuf.c b/drivers/gpu/drm/img-rogue/physmem_dmabuf.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_dmabuf.c -@@ -0,0 +1,1297 @@ -+/*************************************************************************/ /*! -+@File physmem_dmabuf.c -+@Title dmabuf memory allocator -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of the memory management. This module is responsible for -+ implementing the function callbacks for dmabuf memory. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+ -+#include "physmem_dmabuf.h" -+#include "pvrsrv.h" -+#include "pmr.h" -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) -+ -+#include -+#include -+#include -+#include -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+ -+#include "allocmem.h" -+#include "osfunc.h" -+#include "pmr_impl.h" -+#include "hash.h" -+#include "private_data.h" -+#include "module_common.h" -+#include "pvr_ion_stats.h" -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+#include "ri_server.h" -+#endif -+ -+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) -+#include "mmap_stats.h" -+#endif -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#include "process_stats.h" -+#endif -+ -+#include "kernel_compatibility.h" -+ -+/* -+ * dma_buf_ops -+ * -+ * These are all returning errors if used. -+ * The point is to prevent anyone outside of our driver from importing -+ * and using our dmabuf. -+ */ -+ -+static int PVRDmaBufOpsAttach(struct dma_buf *psDmaBuf, -+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \ -+ !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL)))) -+ struct device *psDev, -+#endif -+ struct dma_buf_attachment *psAttachment) -+{ -+ return -ENOSYS; -+} -+ -+static struct sg_table *PVRDmaBufOpsMap(struct dma_buf_attachment *psAttachment, -+ enum dma_data_direction eDirection) -+{ -+ /* Attach hasn't been called yet */ -+ return ERR_PTR(-EINVAL); -+} -+ -+static void PVRDmaBufOpsUnmap(struct dma_buf_attachment *psAttachment, -+ struct sg_table *psTable, -+ enum dma_data_direction eDirection) -+{ -+} -+ -+static void PVRDmaBufOpsRelease(struct dma_buf *psDmaBuf) -+{ -+ PMR *psPMR = (PMR *) psDmaBuf->priv; -+ -+ PMRUnrefPMR(psPMR); -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) -+static void *PVRDmaBufOpsKMap(struct dma_buf *psDmaBuf, unsigned long uiPageNum) -+{ -+ return ERR_PTR(-ENOSYS); -+} -+#endif -+ -+static int PVRDmaBufOpsMMap(struct dma_buf *psDmaBuf, struct vm_area_struct *psVMA) -+{ -+ return -ENOSYS; -+} -+ -+static const struct dma_buf_ops sPVRDmaBufOps = -+{ -+ .attach = PVRDmaBufOpsAttach, -+ .map_dma_buf = PVRDmaBufOpsMap, -+ .unmap_dma_buf = PVRDmaBufOpsUnmap, -+ .release = PVRDmaBufOpsRelease, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) -+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \ -+ !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL)))) -+ .map_atomic = PVRDmaBufOpsKMap, -+#endif -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) -+ .map = PVRDmaBufOpsKMap, -+#endif -+#else -+ .kmap_atomic = PVRDmaBufOpsKMap, -+ .kmap = PVRDmaBufOpsKMap, -+#endif -+ .mmap = PVRDmaBufOpsMMap, -+}; -+ -+/* end of dma_buf_ops */ -+ -+ -+typedef struct _PMR_DMA_BUF_DATA_ -+{ -+ /* Filled in at PMR create time */ -+ PHYS_HEAP *psPhysHeap; -+ struct dma_buf_attachment *psAttachment; -+ PFN_DESTROY_DMABUF_PMR pfnDestroy; -+ IMG_BOOL bPoisonOnFree; -+ -+ /* Mapping information. */ -+ struct iosys_map sMap; -+ -+ /* Modified by PMR lock/unlock */ -+ struct sg_table *psSgTable; -+ IMG_DEV_PHYADDR *pasDevPhysAddr; -+ IMG_UINT32 ui32PhysPageCount; -+ IMG_UINT32 ui32VirtPageCount; -+ -+ IMG_BOOL bZombie; -+} PMR_DMA_BUF_DATA; -+ -+/* Start size of the g_psDmaBufHash hash table */ -+#define DMA_BUF_HASH_SIZE 20 -+ -+static DEFINE_MUTEX(g_FactoryLock); -+ -+static HASH_TABLE *g_psDmaBufHash; -+static IMG_UINT32 g_ui32HashRefCount; -+ -+#if defined(PVR_ANDROID_ION_USE_SG_LENGTH) -+#define pvr_sg_length(sg) ((sg)->length) -+#else -+#define pvr_sg_length(sg) sg_dma_len(sg) -+#endif -+ -+static int -+DmaBufSetValue(struct dma_buf *psDmaBuf, int iValue, const char *szFunc) -+{ -+ struct iosys_map sMap; -+ int err, err_end_access; -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) -+ int i; -+#endif -+ -+ err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE); -+ if (err) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to begin cpu access (err=%d)", -+ szFunc, err)); -+ goto err_out; -+ } -+ -+ err = dma_buf_vmap(psDmaBuf, &sMap); -+ if (err) -+ { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map page (err=%d)", -+ szFunc, err)); -+ goto exit_end_access; -+#else -+ for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++) -+ { -+ void *pvKernAddr; -+ -+ pvKernAddr = dma_buf_kmap(psDmaBuf, i); -+ if (IS_ERR_OR_NULL(pvKernAddr)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map page (err=%ld)", -+ szFunc, -+ pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM)); -+ err = !pvKernAddr ? -ENOMEM : -EINVAL; -+ -+ goto exit_end_access; -+ } -+ -+ memset(pvKernAddr, iValue, PAGE_SIZE); -+ -+ dma_buf_kunmap(psDmaBuf, i, pvKernAddr); -+ } -+#endif -+ } -+ else -+ { -+ memset(sMap.vaddr, iValue, psDmaBuf->size); -+ -+ dma_buf_vunmap(psDmaBuf, &sMap); -+ } -+ -+ err = 0; -+ -+exit_end_access: -+ do { -+ err_end_access = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE); -+ } while (err_end_access == -EAGAIN || err_end_access == -EINTR); -+ -+ if (err_end_access) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to end cpu access (err=%d)", -+ szFunc, err_end_access)); -+ if (!err) -+ { -+ err = err_end_access; -+ } -+ } -+ -+err_out: -+ return err; -+} -+ -+/***************************************************************************** -+ * PMR callback functions * -+ *****************************************************************************/ -+ -+/* This function is protected by the pfn(Get/Release)PMRFactoryLock() lock -+ * acquired/released in _UnrefAndMaybeDestroy() in pmr.c. */ -+static void PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv) -+{ -+ PMR_DMA_BUF_DATA *psPrivData = pvPriv; -+ struct dma_buf_attachment *psAttachment = psPrivData->psAttachment; -+ struct dma_buf *psDmaBuf = psAttachment->dmabuf; -+ struct sg_table *psSgTable = psPrivData->psSgTable; -+ -+ if (psDmaBuf->ops != &sPVRDmaBufOps) -+ { -+ if (g_psDmaBufHash) -+ { -+ /* We have a hash table so check if we've seen this dmabuf before */ -+ if (HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf) != 0U) -+ { -+ g_ui32HashRefCount--; -+ -+ if (g_ui32HashRefCount == 0) -+ { -+ HASH_Delete(g_psDmaBufHash); -+ g_psDmaBufHash = NULL; -+ } -+ } -+ -+ PVRSRVIonRemoveMemAllocRecord(psDmaBuf); -+ } -+ } -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ if (psPrivData->bZombie) -+ { -+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE, -+ psPrivData->ui32PhysPageCount << PAGE_SHIFT, -+ OSGetCurrentClientProcessIDKM()); -+ } -+ else -+#endif -+ { -+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, -+ psPrivData->ui32PhysPageCount << PAGE_SHIFT, -+ OSGetCurrentClientProcessIDKM()); -+ } -+#endif -+ -+ psPrivData->ui32PhysPageCount = 0; -+ -+ dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL); -+ -+ if (psPrivData->bPoisonOnFree) -+ { -+ int err = DmaBufSetValue(psDmaBuf, PVRSRV_POISON_ON_FREE_VALUE, -+ __func__); -+ PVR_LOG_IF_FALSE(err != 0, "Failed to poison allocation before free"); -+ -+ PVR_ASSERT(err != 0); -+ } -+ -+ if (psPrivData->pfnDestroy) -+ { -+ psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment); -+ } -+ -+ OSFreeMem(psPrivData->pasDevPhysAddr); -+ OSFreeMem(psPrivData); -+} -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+static PVRSRV_ERROR PMRZombifyDmaBufMem(PMR_IMPL_PRIVDATA pvPriv, PMR *psPMR) -+{ -+ PMR_DMA_BUF_DATA *psPrivData = pvPriv; -+ struct dma_buf_attachment *psAttachment = psPrivData->psAttachment; -+ struct dma_buf *psDmaBuf = psAttachment->dmabuf; -+ -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ -+ psPrivData->bZombie = IMG_TRUE; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, -+ psPrivData->ui32PhysPageCount << PAGE_SHIFT, -+ OSGetCurrentClientProcessIDKM()); -+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE, -+ psPrivData->ui32PhysPageCount << PAGE_SHIFT, -+ OSGetCurrentClientProcessIDKM()); -+#else -+ PVR_UNREFERENCED_PARAMETER(pvPriv); -+#endif -+ -+ PVRSRVIonZombifyMemAllocRecord(psDmaBuf); -+ -+ return PVRSRV_OK; -+} -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvPriv); -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvPriv); -+ return PVRSRV_OK; -+} -+ -+static void PMRFactoryLock(void) -+{ -+ mutex_lock(&g_FactoryLock); -+} -+ -+static void PMRFactoryUnlock(void) -+{ -+ mutex_unlock(&g_FactoryLock); -+} -+ -+static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_DEVMEM_OFFSET_T *puiOffset, -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ IMG_UINT64 ui64IPAPolicyValue, -+ IMG_UINT64 ui64IPAClearMask, -+#endif -+ IMG_BOOL *pbValid, -+ IMG_DEV_PHYADDR *psDevPAddr) -+{ -+ PMR_DMA_BUF_DATA *psPrivData = pvPriv; -+ IMG_UINT32 ui32PageIndex; -+ IMG_UINT32 idx; -+ -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ PVR_UNREFERENCED_PARAMETER(ui64IPAPolicyValue); -+ PVR_UNREFERENCED_PARAMETER(ui64IPAClearMask); -+#endif -+ -+ if (ui32Log2PageSize != PAGE_SHIFT) -+ { -+ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; -+ } -+ -+ for (idx=0; idx < ui32NumOfPages; idx++) -+ { -+ if (pbValid[idx]) -+ { -+ IMG_UINT32 ui32InPageOffset; -+ -+ ui32PageIndex = puiOffset[idx] >> PAGE_SHIFT; -+ ui32InPageOffset = puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)ui32PageIndex << PAGE_SHIFT); -+ -+ PVR_LOG_RETURN_IF_FALSE(ui32PageIndex < psPrivData->ui32VirtPageCount, -+ "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE); -+ -+ PVR_ASSERT(ui32InPageOffset < PAGE_SIZE); -+ psDevPAddr[idx].uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset; -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ /* Modify the physical address with the associated IPA values */ -+ psDevPAddr[idx].uiAddr &= ~ui64IPAClearMask; -+ psDevPAddr[idx].uiAddr |= ui64IPAPolicyValue; -+#endif -+ } -+ } -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+PMRAcquireKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv, -+ size_t uiOffset, -+ size_t uiSize, -+ void **ppvKernelAddressOut, -+ IMG_HANDLE *phHandleOut, -+ PMR_FLAGS_T ulFlags) -+{ -+ PMR_DMA_BUF_DATA *psPrivData = pvPriv; -+ struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf; -+ PVRSRV_ERROR eError; -+ int err; -+ -+ if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Kernel mappings for sparse DMABufs " -+ "are not allowed!", __func__)); -+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; -+ goto fail; -+ } -+ -+ err = dma_buf_begin_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL); -+ if (err) -+ { -+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; -+ goto fail; -+ } -+ -+ err = dma_buf_vmap(psDmaBuf, &psPrivData->sMap); -+ if (err != 0 || psPrivData->sMap.vaddr == NULL) -+ { -+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; -+ goto fail_kmap; -+ } -+ -+ *ppvKernelAddressOut = psPrivData->sMap.vaddr + uiOffset; -+ *phHandleOut = psPrivData->sMap.vaddr; -+ -+ return PVRSRV_OK; -+ -+fail_kmap: -+ do { -+ err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL); -+ } while (err == -EAGAIN || err == -EINTR); -+ -+fail: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+static void PMRReleaseKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_HANDLE hHandle) -+{ -+ PMR_DMA_BUF_DATA *psPrivData = pvPriv; -+ struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf; -+ int err; -+ -+ dma_buf_vunmap(psDmaBuf, &psPrivData->sMap); -+ -+ do { -+ err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL); -+ } while (err == -EAGAIN || err == -EINTR); -+} -+ -+static PVRSRV_ERROR PMRMMapDmaBuf(PMR_IMPL_PRIVDATA pvPriv, -+ PMR *psPMR, -+ PMR_MMAP_DATA pOSMMapData) -+{ -+ PMR_DMA_BUF_DATA *psPrivData = pvPriv; -+ struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf; -+ struct vm_area_struct *psVma = pOSMMapData; -+ int err; -+ -+ if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Not possible to MMAP sparse DMABufs", -+ __func__)); -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+ } -+ -+ err = dma_buf_mmap(psDmaBuf, psVma, 0); -+ if (err) -+ { -+ return (err == -EINVAL) ? PVRSRV_ERROR_NOT_SUPPORTED : PVRSRV_ERROR_BAD_MAPPING; -+ } -+ -+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) -+ MMapStatsAddOrUpdatePMR(psPMR, psVma->vm_end - psVma->vm_start); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab = -+{ -+ .pfnLockPhysAddresses = PMRLockPhysAddressesDmaBuf, -+ .pfnUnlockPhysAddresses = PMRUnlockPhysAddressesDmaBuf, -+ .pfnDevPhysAddr = PMRDevPhysAddrDmaBuf, -+ .pfnAcquireKernelMappingData = PMRAcquireKernelMappingDataDmaBuf, -+ .pfnReleaseKernelMappingData = PMRReleaseKernelMappingDataDmaBuf, -+ .pfnMMap = PMRMMapDmaBuf, -+ .pfnFinalize = PMRFinalizeDmaBuf, -+ .pfnGetPMRFactoryLock = PMRFactoryLock, -+ .pfnReleasePMRFactoryLock = PMRFactoryUnlock, -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ .pfnZombify = PMRZombifyDmaBufMem, -+#endif -+}; -+ -+/***************************************************************************** -+ * Public facing interface * -+ *****************************************************************************/ -+ -+PVRSRV_ERROR -+PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, -+ struct dma_buf_attachment *psAttachment, -+ PFN_DESTROY_DMABUF_PMR pfnDestroy, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_DEVMEM_SIZE_T uiChunkSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 ui32NameSize, -+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], -+ PMR **ppsPMRPtr) -+{ -+ struct dma_buf *psDmaBuf = psAttachment->dmabuf; -+ PMR_DMA_BUF_DATA *psPrivData; -+ PMR_FLAGS_T uiPMRFlags; -+ IMG_BOOL bZeroOnAlloc; -+ IMG_BOOL bPoisonOnAlloc; -+ IMG_BOOL bPoisonOnFree; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i, j; -+ IMG_UINT32 uiPagesPerChunk = uiChunkSize >> PAGE_SHIFT; -+ IMG_UINT32 ui32PageCount = 0; -+ struct scatterlist *sg; -+ struct sg_table *table; -+ IMG_UINT32 uiSglOffset; -+ IMG_CHAR pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN]; -+ -+ bZeroOnAlloc = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags); -+ bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags); -+#if defined(DEBUG) -+ bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags); -+#else -+ bPoisonOnFree = IMG_FALSE; -+#endif -+ if (bZeroOnAlloc && bPoisonOnFree) -+ { -+ /* Zero on Alloc and Poison on Alloc are mutually exclusive */ -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto errReturn; -+ } -+ -+ if (!PMRValidateSize((IMG_UINT64) ui32NumVirtChunks * uiChunkSize)) -+ { -+ PVR_LOG_VA(PVR_DBG_ERROR, -+ "PMR size exceeds limit #Chunks: %u ChunkSz %"IMG_UINT64_FMTSPECX"", -+ ui32NumVirtChunks, -+ uiChunkSize); -+ eError = PVRSRV_ERROR_PMR_TOO_LARGE; -+ goto errReturn; -+ } -+ -+ psPrivData = OSAllocZMem(sizeof(*psPrivData)); -+ if (psPrivData == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto errReturn; -+ } -+ -+ psPrivData->psPhysHeap = psHeap; -+ psPrivData->psAttachment = psAttachment; -+ psPrivData->pfnDestroy = pfnDestroy; -+ psPrivData->bPoisonOnFree = bPoisonOnFree; -+ psPrivData->ui32VirtPageCount = -+ (ui32NumVirtChunks * uiChunkSize) >> PAGE_SHIFT; -+ -+ psPrivData->pasDevPhysAddr = -+ OSAllocZMem(sizeof(*(psPrivData->pasDevPhysAddr)) * -+ psPrivData->ui32VirtPageCount); -+ if (!psPrivData->pasDevPhysAddr) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate buffer for physical addresses (oom)", -+ __func__)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto errFreePrivData; -+ } -+ -+ if (bZeroOnAlloc || bPoisonOnAlloc) -+ { -+ int iValue = bZeroOnAlloc ? 0 : PVRSRV_POISON_ON_ALLOC_VALUE; -+ int err; -+ -+ err = DmaBufSetValue(psDmaBuf, iValue, __func__); -+ if (err) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map buffer for %s", -+ __func__, -+ bZeroOnAlloc ? "zeroing" : "poisoning")); -+ -+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; -+ goto errFreePhysAddr; -+ } -+ } -+ -+ table = dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL); -+ if (IS_ERR_OR_NULL(table)) -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto errFreePhysAddr; -+ } -+ -+ /* -+ * We do a two pass process: first work out how many pages there -+ * are and second, fill in the data. -+ */ -+ for_each_sg(table->sgl, sg, table->nents, i) -+ { -+ ui32PageCount += PAGE_ALIGN(pvr_sg_length(sg)) / PAGE_SIZE; -+ } -+ -+ if (WARN_ON(!ui32PageCount)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Number of phys. pages must not be zero", -+ __func__)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto errUnmap; -+ } -+ -+ if (WARN_ON(ui32PageCount != ui32NumPhysChunks * uiPagesPerChunk)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Requested physical chunks and actual " -+ "number of physical dma buf pages don't match", -+ __func__)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto errUnmap; -+ } -+ -+ psPrivData->ui32PhysPageCount = ui32PageCount; -+ psPrivData->psSgTable = table; -+ ui32PageCount = 0; -+ sg = table->sgl; -+ uiSglOffset = 0; -+ -+ -+ /* Fill physical address array */ -+ for (i = 0; i < ui32NumPhysChunks; i++) -+ { -+ for (j = 0; j < uiPagesPerChunk; j++) -+ { -+ IMG_UINT32 uiIdx = pui32MappingTable[i] * uiPagesPerChunk + j; -+ -+ psPrivData->pasDevPhysAddr[uiIdx].uiAddr = -+ sg_dma_address(sg) + uiSglOffset; -+ -+ /* Get the next offset for the current sgl or the next sgl */ -+ uiSglOffset += PAGE_SIZE; -+ if (uiSglOffset >= pvr_sg_length(sg)) -+ { -+ sg = sg_next(sg); -+ uiSglOffset = 0; -+ -+ /* Check that we haven't looped */ -+ if (WARN_ON(sg == table->sgl)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to fill phys. address " -+ "array", -+ __func__)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto errUnmap; -+ } -+ } -+ } -+ } -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, -+ psPrivData->ui32PhysPageCount << PAGE_SHIFT, -+ OSGetCurrentClientProcessIDKM()); -+#endif -+ -+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); -+ -+ /* -+ * Check no significant bits were lost in cast due to different -+ * bit widths for flags -+ */ -+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); -+ -+ if (OSSNPrintf((IMG_CHAR *)pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN, "ImpDmaBuf:%s", (IMG_CHAR *)pszName) < 0) -+ { -+ pszAnnotation[0] = '\0'; -+ } -+ else -+ { -+ pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN-1] = '\0'; -+ } -+ -+ eError = PMRCreatePMR(psHeap, -+ ui32NumVirtChunks * uiChunkSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ pui32MappingTable, -+ PAGE_SHIFT, -+ uiPMRFlags, -+ pszAnnotation, -+ &_sPMRDmaBufFuncTab, -+ psPrivData, -+ PMR_TYPE_DMABUF, -+ ppsPMRPtr, -+ PDUMP_NONE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PMR (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ goto errFreePhysAddr; -+ } -+ -+ return PVRSRV_OK; -+ -+errUnmap: -+ dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL); -+errFreePhysAddr: -+ OSFreeMem(psPrivData->pasDevPhysAddr); -+errFreePrivData: -+ OSFreeMem(psPrivData); -+errReturn: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+static void PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap, -+ struct dma_buf_attachment *psAttachment) -+{ -+ struct dma_buf *psDmaBuf = psAttachment->dmabuf; -+ -+ PVR_UNREFERENCED_PARAMETER(psHeap); -+ -+ dma_buf_detach(psDmaBuf, psAttachment); -+ dma_buf_put(psDmaBuf); -+} -+ -+struct dma_buf * -+PhysmemGetDmaBuf(PMR *psPMR) -+{ -+ PMR_DMA_BUF_DATA *psPrivData; -+ -+ psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab); -+ if (psPrivData) -+ { -+ return psPrivData->psAttachment->dmabuf; -+ } -+ -+ return NULL; -+} -+ -+PVRSRV_ERROR -+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ PMR *psPMR, -+ IMG_INT *piFd) -+{ -+ struct dma_buf *psDmaBuf; -+ IMG_DEVMEM_SIZE_T uiPMRSize; -+ PVRSRV_ERROR eError; -+ IMG_INT iFd; -+ -+ PMRFactoryLock(); -+ -+ PMRRefPMR(psPMR); -+ -+ PMR_LogicalSize(psPMR, &uiPMRSize); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -+ { -+ DEFINE_DMA_BUF_EXPORT_INFO(sDmaBufExportInfo); -+ -+ sDmaBufExportInfo.priv = psPMR; -+ sDmaBufExportInfo.ops = &sPVRDmaBufOps; -+ sDmaBufExportInfo.size = uiPMRSize; -+ sDmaBufExportInfo.flags = O_RDWR; -+ -+ psDmaBuf = dma_buf_export(&sDmaBufExportInfo); -+ } -+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) -+ psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps, -+ uiPMRSize, O_RDWR, NULL); -+#else -+ psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps, -+ uiPMRSize, O_RDWR); -+#endif -+ -+ if (IS_ERR_OR_NULL(psDmaBuf)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to export buffer (err=%ld)", -+ __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_pmr_ref; -+ } -+ -+ iFd = dma_buf_fd(psDmaBuf, O_RDWR); -+ if (iFd < 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf fd (err=%d)", -+ __func__, iFd)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_dma_buf; -+ } -+ -+ PMRFactoryUnlock(); -+ *piFd = iFd; -+ -+ /* A PMR memory lay out can't change once exported -+ * This makes sure the exported and imported parties see -+ * the same layout of the memory */ -+ PMR_SetLayoutFixed(psPMR, IMG_TRUE); -+ -+ return PVRSRV_OK; -+ -+fail_dma_buf: -+ dma_buf_put(psDmaBuf); -+ -+fail_pmr_ref: -+ PMRUnrefPMR(psPMR); -+ PMRFactoryUnlock(); -+ -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR -+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_INT fd, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 ui32NameSize, -+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], -+ PMR **ppsPMRPtr, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign) -+{ -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_UINT32 ui32MappingTable = 0; -+ struct dma_buf *psDmaBuf; -+ PVRSRV_ERROR eError; -+ -+ /* Get the buffer handle */ -+ psDmaBuf = dma_buf_get(fd); -+ if (IS_ERR_OR_NULL(psDmaBuf)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)", -+ __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); -+ return PVRSRV_ERROR_BAD_MAPPING; -+ -+ } -+ -+ uiSize = psDmaBuf->size; -+ -+ eError = PhysmemImportSparseDmaBuf(psConnection, -+ psDevNode, -+ fd, -+ uiFlags, -+ uiSize, -+ 1, -+ 1, -+ &ui32MappingTable, -+ ui32NameSize, -+ pszName, -+ ppsPMRPtr, -+ puiSize, -+ puiAlign); -+ -+ dma_buf_put(psDmaBuf); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PhysmemImportDmaBufLocked(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_INT fd, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 ui32NameSize, -+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], -+ PMR **ppsPMRPtr, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign) -+{ -+ PMR *psPMRPtr; -+ PVRSRV_ERROR eError; -+ -+ eError = PhysmemImportDmaBuf(psConnection, -+ psDevNode, -+ fd, -+ uiFlags, -+ ui32NameSize, -+ pszName, -+ &psPMRPtr, -+ puiSize, -+ puiAlign); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemImportDmaBuf"); -+ -+ if (!PVRSRV_CHECK_PHYS_ALLOC_NOW(PMR_Flags(psPMRPtr))) -+ { -+ eError = PMRLockSysPhysAddresses(psPMRPtr); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", error_unref); -+ } -+ -+ *ppsPMRPtr = psPMRPtr; -+ return eError; -+ -+error_unref: -+ /* Undo the reference taken on the PMR in PhysmemImportDmaBuf */ -+ PMRUnrefPMR(psPMRPtr); -+ return eError; -+} -+ -+PVRSRV_ERROR -+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_INT fd, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_DEVMEM_SIZE_T uiChunkSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 ui32NameSize, -+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], -+ PMR **ppsPMRPtr, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign) -+{ -+ PMR *psPMR = NULL; -+ struct dma_buf_attachment *psAttachment; -+ struct dma_buf *psDmaBuf; -+ PVRSRV_ERROR eError; -+ IMG_BOOL bHashTableCreated = IMG_FALSE; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVR_GOTO_IF_INVALID_PARAM(psDevNode != NULL, eError, errReturn); -+ -+ /* Terminate string from bridge to prevent corrupt annotations in RI */ -+ if (pszName != NULL) -+ { -+ IMG_CHAR* pszName0 = (IMG_CHAR*) pszName; -+ pszName0[ui32NameSize-1] = '\0'; -+ } -+ -+ PMRFactoryLock(); -+ -+ /* Get the buffer handle */ -+ psDmaBuf = dma_buf_get(fd); -+ if (IS_ERR_OR_NULL(psDmaBuf)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)", -+ __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BAD_MAPPING, errUnlockReturn); -+ } -+ -+ if (psDmaBuf->ops == &sPVRDmaBufOps) -+ { -+ /* We exported this dma_buf, so we can just get its PMR. */ -+ psPMR = psDmaBuf->priv; -+ -+ /* However, we can't import it if it belongs to a different device. */ -+ if (PMR_DeviceNode(psPMR) != psDevNode) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", -+ __func__)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, -+ errUnlockAndDMAPut); -+ } -+ } -+ else if (g_psDmaBufHash != NULL) -+ { -+ /* We have a hash table so check if we've seen this dmabuf -+ * before. */ -+ psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf); -+ } -+ else -+ { -+ /* As different processes may import the same dmabuf we need to -+ * create a hash table so we don't generate a duplicate PMR but -+ * rather just take a reference on an existing one. */ -+ g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE); -+ PVR_GOTO_IF_NOMEM(g_psDmaBufHash, eError, errUnlockAndDMAPut); -+ -+ bHashTableCreated = IMG_TRUE; -+ } -+ -+ if (psPMR != NULL) -+ { -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ if (PMR_IsZombie(psPMR)) -+ { -+ PMRDequeueZombieAndRef(psPMR); -+ } -+ else -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+ { -+ /* Reuse the PMR we already created */ -+ PMRRefPMR(psPMR); -+ } -+ -+ *ppsPMRPtr = psPMR; -+ PMR_LogicalSize(psPMR, puiSize); -+ *puiAlign = PAGE_SIZE; -+ -+ PMRFactoryUnlock(); -+ dma_buf_put(psDmaBuf); -+ -+ /* We expect a PMR to be immutable at this point. -+ * But its explicitly set here to cover a corner case -+ * where a PMR created through non-DMA interface could be -+ * imported back again through DMA interface. */ -+ PMR_SetLayoutFixed(psPMR, IMG_TRUE); -+ -+ return PVRSRV_OK; -+ } -+ -+ /* Do we want this to be a sparse PMR? */ -+ if (ui32NumVirtChunks > 1) -+ { -+ IMG_UINT32 i; -+ -+ /* Parameter validation */ -+ if (psDmaBuf->size != (uiChunkSize * ui32NumPhysChunks) || -+ uiChunkSize != PAGE_SIZE || -+ ui32NumPhysChunks > ui32NumVirtChunks) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Requesting sparse buffer: " -+ "uiChunkSize ("IMG_DEVMEM_SIZE_FMTSPEC") must be equal to " -+ "OS page size (%lu). uiChunkSize * ui32NumPhysChunks " -+ "("IMG_DEVMEM_SIZE_FMTSPEC") must" -+ " be equal to the buffer size ("IMG_SIZE_FMTSPEC"). " -+ "ui32NumPhysChunks (%u) must be lesser or equal to " -+ "ui32NumVirtChunks (%u)", -+ __func__, -+ uiChunkSize, -+ PAGE_SIZE, -+ uiChunkSize * ui32NumPhysChunks, -+ psDmaBuf->size, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, -+ errUnlockAndDMAPut); -+ } -+ -+ /* Parameter validation - Mapping table entries*/ -+ for (i = 0; i < ui32NumPhysChunks; i++) -+ { -+ if (pui32MappingTable[i] > ui32NumVirtChunks) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Requesting sparse buffer: " -+ "Entry in mapping table (%u) is out of allocation " -+ "bounds (%u)", __func__, -+ (IMG_UINT32) pui32MappingTable[i], -+ (IMG_UINT32) ui32NumVirtChunks)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, -+ errUnlockAndDMAPut); -+ } -+ } -+ } -+ else -+ { -+ /* if ui32NumPhysChunks == 0 pui32MappingTable is NULL and because -+ * is ui32NumPhysChunks is set to 1 below we don't allow NULL array */ -+ if (pui32MappingTable == NULL) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, -+ errUnlockAndDMAPut); -+ } -+ -+ /* Make sure parameters are valid for non-sparse allocations as well */ -+ uiChunkSize = psDmaBuf->size; -+ ui32NumPhysChunks = 1; -+ ui32NumVirtChunks = 1; -+ } -+ -+ -+ psAttachment = dma_buf_attach(psDmaBuf, psDevNode->psDevConfig->pvOSDevice); -+ if (IS_ERR_OR_NULL(psAttachment)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to attach to dma-buf (err=%ld)", -+ __func__, psAttachment? PTR_ERR(psAttachment) : -ENOMEM)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BAD_MAPPING, -+ errUnlockAndDMAPut); -+ } -+ -+ /* -+ * Note: -+ * While we have no way to determine the type of the buffer we just -+ * assume that all dmabufs are from the same physical heap. -+ */ -+ eError = PhysmemCreateNewDmaBufBackedPMR(psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_EXTERNAL], -+ psAttachment, -+ PhysmemDestroyDmaBuf, -+ uiFlags, -+ uiChunkSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ pui32MappingTable, -+ ui32NameSize, -+ pszName, -+ &psPMR); -+ PVR_GOTO_IF_ERROR(eError, errDMADetach); -+ -+ /* First time we've seen this dmabuf so store it in the hash table */ -+ HASH_Insert(g_psDmaBufHash, (uintptr_t) psDmaBuf, (uintptr_t) psPMR); -+ g_ui32HashRefCount++; -+ -+ PMRFactoryUnlock(); -+ -+ PVRSRVIonAddMemAllocRecord(psDmaBuf); -+ -+ *ppsPMRPtr = psPMR; -+ *puiSize = ui32NumVirtChunks * uiChunkSize; -+ *puiAlign = PAGE_SIZE; -+ -+ /* The memory that's just imported is owned by some other entity. -+ * Hence the memory layout cannot be changed through our API */ -+ PMR_SetLayoutFixed(psPMR, IMG_TRUE); -+ -+ return PVRSRV_OK; -+ -+errDMADetach: -+ dma_buf_detach(psDmaBuf, psAttachment); -+ -+errUnlockAndDMAPut: -+ if (bHashTableCreated) -+ { -+ HASH_Delete(g_psDmaBufHash); -+ g_psDmaBufHash = NULL; -+ } -+ dma_buf_put(psDmaBuf); -+ -+errUnlockReturn: -+ PMRFactoryUnlock(); -+ -+errReturn: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) */ -+ -+PVRSRV_ERROR -+PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, -+ struct dma_buf_attachment *psAttachment, -+ PFN_DESTROY_DMABUF_PMR pfnDestroy, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_DEVMEM_SIZE_T uiChunkSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 ui32NameSize, -+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], -+ PMR **ppsPMRPtr) -+{ -+ PVR_UNREFERENCED_PARAMETER(psHeap); -+ PVR_UNREFERENCED_PARAMETER(psAttachment); -+ PVR_UNREFERENCED_PARAMETER(pfnDestroy); -+ PVR_UNREFERENCED_PARAMETER(uiFlags); -+ PVR_UNREFERENCED_PARAMETER(uiChunkSize); -+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); -+ PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks); -+ PVR_UNREFERENCED_PARAMETER(pui32MappingTable); -+ PVR_UNREFERENCED_PARAMETER(ui32NameSize); -+ PVR_UNREFERENCED_PARAMETER(pszName); -+ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); -+ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+} -+ -+struct dma_buf * -+PhysmemGetDmaBuf(PMR *psPMR) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ -+ return NULL; -+} -+ -+PVRSRV_ERROR -+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ PMR *psPMR, -+ IMG_INT *piFd) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ PVR_UNREFERENCED_PARAMETER(piFd); -+ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+} -+ -+PVRSRV_ERROR -+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_INT fd, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 ui32NameSize, -+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], -+ PMR **ppsPMRPtr, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ PVR_UNREFERENCED_PARAMETER(fd); -+ PVR_UNREFERENCED_PARAMETER(uiFlags); -+ PVR_UNREFERENCED_PARAMETER(ui32NameSize); -+ PVR_UNREFERENCED_PARAMETER(pszName); -+ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); -+ PVR_UNREFERENCED_PARAMETER(puiSize); -+ PVR_UNREFERENCED_PARAMETER(puiAlign); -+ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+} -+ -+PVRSRV_ERROR -+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_INT fd, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_DEVMEM_SIZE_T uiChunkSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 ui32NameSize, -+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], -+ PMR **ppsPMRPtr, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ PVR_UNREFERENCED_PARAMETER(fd); -+ PVR_UNREFERENCED_PARAMETER(uiFlags); -+ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); -+ PVR_UNREFERENCED_PARAMETER(puiSize); -+ PVR_UNREFERENCED_PARAMETER(puiAlign); -+ PVR_UNREFERENCED_PARAMETER(uiChunkSize); -+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); -+ PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks); -+ PVR_UNREFERENCED_PARAMETER(pui32MappingTable); -+ PVR_UNREFERENCED_PARAMETER(ui32NameSize); -+ PVR_UNREFERENCED_PARAMETER(pszName); -+ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+} -+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) */ -diff --git a/drivers/gpu/drm/img-rogue/physmem_dmabuf.h b/drivers/gpu/drm/img-rogue/physmem_dmabuf.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_dmabuf.h -@@ -0,0 +1,124 @@ -+/**************************************************************************/ /*! -+@File physmem_dmabuf.h -+@Title Header for dmabuf PMR factory -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of the memory management. This module is responsible for -+ implementing the function callbacks importing Ion allocations -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#if !defined(PHYSMEM_DMABUF_H) -+#define PHYSMEM_DMABUF_H -+ -+#include -+ -+#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) -+#define __pvrsrv_defined_struct_enum__ -+#include -+#endif -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+#include "connection_server.h" -+ -+#include "pmr.h" -+ -+typedef void (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap, -+ struct dma_buf_attachment *psAttachment); -+ -+PVRSRV_ERROR -+PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, -+ struct dma_buf_attachment *psAttachment, -+ PFN_DESTROY_DMABUF_PMR pfnDestroy, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_DEVMEM_SIZE_T uiChunkSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 ui32NameSize, -+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], -+ PMR **ppsPMRPtr); -+ -+struct dma_buf * -+PhysmemGetDmaBuf(PMR *psPMR); -+ -+PVRSRV_ERROR -+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ PMR *psPMR, -+ IMG_INT *piFd); -+ -+PVRSRV_ERROR -+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_INT fd, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 ui32NameSize, -+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], -+ PMR **ppsPMRPtr, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign); -+ -+PVRSRV_ERROR -+PhysmemImportDmaBufLocked(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_INT fd, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32 ui32NameSize, -+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], -+ PMR **ppsPMRPtr, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign); -+ -+PVRSRV_ERROR -+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_INT fd, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_DEVMEM_SIZE_T uiChunkSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 ui32NameSize, -+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], -+ PMR **ppsPMRPtr, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign); -+ -+#endif /* !defined(PHYSMEM_DMABUF_H) */ -diff --git a/drivers/gpu/drm/img-rogue/physmem_extmem.c b/drivers/gpu/drm/img-rogue/physmem_extmem.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_extmem.c -@@ -0,0 +1,71 @@ -+/*************************************************************************/ /*! -+@File physmem_extmem.c -+@Title Physmem - External Memory Import Support -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Common entry point for Importing Externally allocated Memory -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+#include "pvrsrv.h" -+#include "physmem_extmem.h" -+ -+#if 0 -+#if defined(LMA) -+#pragma message "WARNING! You are compiling SUPPORT_WRAP_EXTMEM on a platform with LMA. " \ -+ "Make sure the memory you wrap is actually accessible by the GPU!" -+#endif -+#endif -+ -+PVRSRV_ERROR -+PhysmemWrapExtMem(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT64 pvCpuVAddr, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ PMR **ppsPMRPtr) -+{ -+ return PhysmemWrapExtMemOS(psConnection, -+ psDevNode, -+ uiSize, -+ (IMG_CPU_VIRTADDR)(uintptr_t)pvCpuVAddr, -+ uiFlags, -+ ppsPMRPtr); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/physmem_extmem.h b/drivers/gpu/drm/img-rogue/physmem_extmem.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_extmem.h -@@ -0,0 +1,76 @@ -+/*************************************************************************/ /*! -+@File -+@Title Physmem_extmem header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for common entry point for creation of wrapped memory PMR's -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SRVSRV_PHYSMEM_EXTMEM_H -+#define SRVSRV_PHYSMEM_EXTMEM_H -+ -+/* include/ */ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+#include "connection_server.h" -+ -+/* services/server/include/ */ -+#include "pmr.h" -+#include "pmr_impl.h" -+ -+/* -+ * Based on PhysmemNewRamBackedPMR but uses a passed in virtual address. -+ */ -+PVRSRV_ERROR -+PhysmemWrapExtMemOS(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_CPU_VIRTADDR pvCpuVAddr, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ PMR **ppsPMRPtr); -+ -+PVRSRV_ERROR -+PhysmemWrapExtMem(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT64 pvCpuVAddr, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ PMR **ppsPMROut); -+ -+#endif /* SRVSRV_PHYSMEM_EXTMEM_H */ -diff --git a/drivers/gpu/drm/img-rogue/physmem_extmem_linux.c b/drivers/gpu/drm/img-rogue/physmem_extmem_linux.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_extmem_linux.c -@@ -0,0 +1,1026 @@ -+/*************************************************************************/ /*! -+@File -+@Title Implementation of PMR functions to wrap non-services allocated -+ memory. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of the memory management. This module is responsible for -+ implementing the function callbacks for physical memory. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "pvr_debug.h" -+#include "devicemem_server_utils.h" -+ -+#include "physmem_extmem.h" -+#include "physmem_extmem_wrap.h" -+#include "pdump_physmem.h" -+#include "pmr.h" -+#include "pmr_impl.h" -+#include "physmem.h" -+#include "cache_km.h" -+ -+#include "kernel_compatibility.h" -+ -+typedef struct _WRAP_KERNEL_MAP_DATA_ -+{ -+ void *pvKernLinAddr; -+ IMG_BOOL bVMAP; -+} WRAP_KERNEL_MAP_DATA; -+ -+/* Free the PMR private data */ -+static void _FreeWrapData(PMR_WRAP_DATA *psPrivData) -+{ -+ OSFreeMem(psPrivData->ppsPageArray); -+ OSFreeMem(psPrivData->ppvPhysAddr); -+ OSFreeMem(psPrivData); -+} -+ -+ -+/* Allocate the PMR private data */ -+static PVRSRV_ERROR _AllocWrapData(PMR_WRAP_DATA **ppsPrivData, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_CPU_VIRTADDR pvCpuVAddr, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags) -+{ -+ PVRSRV_ERROR eError; -+ PMR_WRAP_DATA *psPrivData; -+ struct vm_area_struct *psVMArea; -+ IMG_UINT32 ui32CPUCacheMode; -+ -+ /* Find the VMA */ -+ psVMArea = find_vma(current->mm, (uintptr_t)pvCpuVAddr); -+ if (psVMArea == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Couldn't find memory region containing start address %p", -+ __func__, -+ (void*) pvCpuVAddr)); -+ return PVRSRV_ERROR_INVALID_CPU_ADDR; -+ } -+ -+ /* If requested size is larger than actual allocation -+ * return error. Can never request more memory to be imported -+ * than its original allocation */ -+ /* Now check the end address is in range */ -+ if (((uintptr_t)pvCpuVAddr + uiSize) > psVMArea->vm_end) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: End address %p is outside of the region returned by find_vma", -+ __func__, -+ (void*) (uintptr_t)((uintptr_t)pvCpuVAddr + uiSize))); -+ return PVRSRV_ERROR_BAD_PARAM_SIZE; -+ } -+ -+ /* Find_vma locates a region with an end point past a given -+ * virtual address. So check the address is actually in the region. */ -+ if ((uintptr_t)pvCpuVAddr < psVMArea->vm_start) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Start address %p is outside of the region returned by find_vma", -+ __func__, -+ (void*) pvCpuVAddr)); -+ return PVRSRV_ERROR_INVALID_CPU_ADDR; -+ } -+ -+ eError = DevmemCPUCacheMode(psDevNode, -+ uiFlags, -+ &ui32CPUCacheMode); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ /* Allocate and initialise private factory data */ -+ psPrivData = OSAllocZMem(sizeof(*psPrivData)); -+ if (psPrivData == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psPrivData->ui32CPUCacheFlags = ui32CPUCacheMode; -+ -+ /* Assign the VMA area structure if needed later */ -+ psPrivData->psVMArea = psVMArea; -+ -+ psPrivData->psDevNode = psDevNode; -+ psPrivData->uiTotalNumPages = uiSize >> PAGE_SHIFT; -+ -+ /* Allocate page and phys address arrays */ -+ psPrivData->ppsPageArray = OSAllocZMem(sizeof(*(psPrivData->ppsPageArray)) * psPrivData->uiTotalNumPages); -+ if (psPrivData == NULL) -+ { -+ OSFreeMem(psPrivData); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ -+ } -+ -+ psPrivData->ppvPhysAddr = OSAllocZMem(sizeof(*(psPrivData->ppvPhysAddr)) * psPrivData->uiTotalNumPages); -+ if (psPrivData->ppvPhysAddr == NULL) -+ { -+ OSFreeMem(psPrivData->ppsPageArray); -+ OSFreeMem(psPrivData); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ if (uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED)) -+ { -+ psPrivData->bWrite = IMG_TRUE; -+ } -+ -+ *ppsPrivData = psPrivData; -+ -+ return PVRSRV_OK; -+} -+ -+#if defined(SUPPORT_LINUX_WRAP_EXTMEM_PAGE_TABLE_WALK) -+/* Free the pages we got via _TryFindVMA() */ -+static void _FreeFindVMAPages(PMR_WRAP_DATA *psPrivData) -+{ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psPrivData->uiTotalNumPages; i++) -+ { -+ if (psPrivData->ppsPageArray[i] != NULL) -+ { -+ /* Release the page */ -+ put_page(psPrivData->ppsPageArray[i]); -+ } -+ } -+} -+#endif -+ -+/* Release pages got via get_user_pages(). -+ * As the function comment for GUP states we have -+ * to set the page dirty if it has been written to and -+ * we have to put the page to decrease its refcount. */ -+static void _FreeGetUserPages(PMR_WRAP_DATA *psPrivData) -+{ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psPrivData->uiTotalNumPages; i++) -+ { -+ if (psPrivData->ppsPageArray[i]) -+ { -+ if (IMG_TRUE == psPrivData->bWrite) -+ { -+ /* Write data back to fs if necessary */ -+ set_page_dirty_lock(psPrivData->ppsPageArray[i]); -+ } -+ -+ /* Release the page */ -+ put_page(psPrivData->ppsPageArray[i]); -+ } -+ } -+} -+ -+/* Get the page structures and physical addresses mapped to -+ * a CPU virtual range via get_user_pages() */ -+static PVRSRV_ERROR _TryGetUserPages(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_CPU_VIRTADDR pvCpuVAddr, -+ PMR_WRAP_DATA *psPrivData) -+{ -+ IMG_INT32 iMappedPages, i; -+ IMG_UINT64 ui64DmaMask = dma_get_mask(psDevNode->psDevConfig->pvOSDevice); -+ -+ /* Do the actual call */ -+ iMappedPages = get_user_pages_fast((uintptr_t) pvCpuVAddr, uiSize >> PAGE_SHIFT, -+ psPrivData->bWrite, psPrivData->ppsPageArray); -+ if (iMappedPages < 0) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "get_user_pages_fast() failed, got back %d, expected num pages %d", -+ iMappedPages, -+ psPrivData->uiTotalNumPages)); -+ -+ return PVRSRV_ERROR_FAILED_TO_ACQUIRE_PAGES; -+ } -+ -+ /* Fill the physical address array */ -+ for (i = 0; i < psPrivData->uiTotalNumPages; i++) -+ { -+ if (psPrivData->ppsPageArray[i]) -+ { -+ psPrivData->ppvPhysAddr[i].uiAddr = page_to_phys(psPrivData->ppsPageArray[i]); -+ psPrivData->uiNumBackedPages += 1; -+ } -+ else -+ { -+ psPrivData->ppvPhysAddr[i].uiAddr = 0; -+ } -+ -+ /* APOLLO test chips TCF5 or ES2 can only access 4G maximum memory from the card. -+ * This is due to the 32 bit PCI card interface to the host -+ * Hence pages with physical address beyond 4G range cannot be accessed by the device -+ * An error is reported in such a case -+ * -+ * The same restriction may apply on to platforms as well*/ -+ if (psPrivData->ppvPhysAddr[i].uiAddr & ~ui64DmaMask) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Backed page @pos:%d Physical Address: %pa exceeds GPU " -+ "accessible range(mask): 0x%0llx", -+ i, &psPrivData->ppvPhysAddr[i].uiAddr, ui64DmaMask)); -+ -+ psPrivData->bWrite = IMG_FALSE; -+ -+ /* Free the acquired pages */ -+ _FreeGetUserPages(psPrivData); -+ -+ return PVRSRV_ERROR_INVALID_PHYS_ADDR; -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/* Release all the pages we got via _TryGetUserPages or _TryFindVMA */ -+static PVRSRV_ERROR _WrapExtMemReleasePages(PMR_WRAP_DATA *psPrivData) -+{ -+ switch (psPrivData->eWrapType) -+ { -+ case WRAP_TYPE_GET_USER_PAGES: -+ _FreeGetUserPages(psPrivData); -+ break; -+#if defined(SUPPORT_LINUX_WRAP_EXTMEM_PAGE_TABLE_WALK) -+ case WRAP_TYPE_FIND_VMA: -+ _FreeFindVMAPages(psPrivData); -+ break; -+#endif -+ case WRAP_TYPE_NULL: -+ /* fall through */ -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Wrong wrap type, cannot release pages", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_WRAP_TYPE; -+ } -+ -+ _FreeWrapData(psPrivData); -+ -+ return PVRSRV_OK; -+} -+ -+/* Try to get pages and physical addresses for a CPU virtual address. -+ * Will try both methods: -+ * get_user_pages or find_vma + page table walk if the first one fails */ -+static PVRSRV_ERROR _WrapExtMemAcquirePages(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_CPU_VIRTADDR pvCpuVAddr, -+ PMR_WRAP_DATA *psPrivData) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = _TryGetUserPages(psDevNode, -+ uiSize, -+ pvCpuVAddr, -+ psPrivData); -+ if (eError == PVRSRV_OK) -+ { -+ psPrivData->eWrapType = WRAP_TYPE_GET_USER_PAGES; -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Used GetUserPages", -+ __func__)); -+ return PVRSRV_OK; -+ } -+ -+#if defined(SUPPORT_LINUX_WRAP_EXTMEM_PAGE_TABLE_WALK) -+ if (PVRSRV_ERROR_INVALID_PHYS_ADDR != eError) -+ { -+ PVRSRV_ERROR eError2; -+ eError2 = _TryFindVMA(uiSize, -+ (uintptr_t) pvCpuVAddr, -+ psPrivData); -+ if (eError2 == PVRSRV_OK) -+ { -+ psPrivData->eWrapType = WRAP_TYPE_FIND_VMA; -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Used FindVMA", -+ __func__)); -+ return PVRSRV_OK; -+ } -+ -+ PVR_WARN_IF_ERROR(eError, "_TryGetUserPages"); -+ PVR_LOG_ERROR(eError2, "_TryFindVMA"); -+ } -+ else -+ { -+ PVR_LOG_ERROR(eError, "_TryGetUserPages"); -+ } -+#else -+ PVR_LOG_ERROR(eError, "_TryGetUserPages"); -+#endif -+ -+ psPrivData->eWrapType = WRAP_TYPE_NULL; -+ return eError; -+} -+ -+static PVRSRV_ERROR -+PMRSysPhysAddrExtMem(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_DEVMEM_OFFSET_T *puiOffset, -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ IMG_UINT64 ui64IPAPolicyValue, -+ IMG_UINT64 ui64IPAClearMask, -+#endif -+ IMG_BOOL *pbValid, -+ IMG_DEV_PHYADDR *psDevPAddr) -+{ -+ const PMR_WRAP_DATA *psWrapData = pvPriv; -+ IMG_UINT32 uiPageSize = 1U << PAGE_SHIFT; -+ IMG_UINT32 uiInPageOffset; -+ IMG_UINT32 uiPageIndex; -+ IMG_UINT32 uiIdx; -+ -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ PVR_UNREFERENCED_PARAMETER(ui64IPAPolicyValue); -+ PVR_UNREFERENCED_PARAMETER(ui64IPAClearMask); -+#endif -+ -+ -+ if (PAGE_SHIFT != ui32Log2PageSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Requested ui32Log2PageSize %u is different from " -+ "OS page shift %u. Not supported.", -+ __func__, -+ ui32Log2PageSize, -+ PAGE_SHIFT)); -+ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; -+ } -+ -+ for (uiIdx=0; uiIdx < ui32NumOfPages; uiIdx++) -+ { -+ uiPageIndex = puiOffset[uiIdx] >> PAGE_SHIFT; -+ uiInPageOffset = puiOffset[uiIdx] - ((IMG_DEVMEM_OFFSET_T)uiPageIndex << PAGE_SHIFT); -+ -+ PVR_LOG_RETURN_IF_FALSE(uiPageIndex < psWrapData->uiTotalNumPages, -+ "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE); -+ -+ PVR_ASSERT(uiInPageOffset < uiPageSize); -+ -+ /* We always handle CPU physical addresses in this PMR factory -+ * but this callback expects device physical addresses so we have to translate. */ -+ PhysHeapCpuPAddrToDevPAddr(psWrapData->psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL], -+ 1, -+ &psDevPAddr[uiIdx], -+ &psWrapData->ppvPhysAddr[uiPageIndex]); -+ -+ pbValid[uiIdx] = (psDevPAddr[uiIdx].uiAddr)? IMG_TRUE:IMG_FALSE; -+ -+ psDevPAddr[uiIdx].uiAddr += uiInPageOffset; -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ psDevPAddr[uiIdx].uiAddr &= ~ui64IPAClearMask; -+ psDevPAddr[uiIdx].uiAddr |= ui64IPAPolicyValue; -+#endif /* PVRSRV_SUPPORT_IPA_FEATURE */ -+ } -+ -+ return PVRSRV_OK; -+} -+ -+ -+static void -+PMRFinalizeExtMem(PMR_IMPL_PRIVDATA pvPriv) -+{ -+ PMR_WRAP_DATA *psWrapData = pvPriv; -+ -+ PVRSRV_ERROR eError = _WrapExtMemReleasePages(psWrapData); -+ PVR_LOG_IF_ERROR(eError, "_WrapExtMemReleasePages"); -+} -+ -+ -+static void _UnmapPage(PMR_WRAP_DATA *psWrapData, -+ WRAP_KERNEL_MAP_DATA *psMapData) -+{ -+ IMG_BOOL bSuccess; -+ -+ if (psMapData->bVMAP) -+ { -+ vunmap(psMapData->pvKernLinAddr); -+ } -+ else -+ { -+ if (psWrapData->ui32CPUCacheFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Unable to unmap wrapped extmem " -+ "page - wrong cached mode flags passed. This may leak " -+ "memory.", __func__)); -+ PVR_ASSERT(!"Found non-cpu cache mode flag when unmapping from " -+ "the cpu"); -+ } -+ else -+ { -+ bSuccess = OSUnMapPhysToLin(psMapData->pvKernLinAddr, PAGE_SIZE); -+ if (!bSuccess) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Unable to unmap wrapped extmem " -+ "page. This may leak memory.", __func__)); -+ } -+ } -+ } -+} -+ -+static void _MapPage(PMR_WRAP_DATA *psWrapData, -+ IMG_UINT32 uiPageIdx, -+ WRAP_KERNEL_MAP_DATA *psMapData) -+{ -+ IMG_UINT8 *puiLinCPUAddr; -+ -+ if (psWrapData->ppsPageArray[uiPageIdx]) -+ { -+ pgprot_t prot = PAGE_KERNEL; -+ -+ switch (PVRSRV_CPU_CACHE_MODE(psWrapData->ui32CPUCacheFlags)) -+ { -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: -+ prot = pgprot_noncached(prot); -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: -+ prot = pgprot_writecombine(prot); -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED: -+ break; -+ -+ default: -+ break; -+ } -+ -+ puiLinCPUAddr = vmap(&psWrapData->ppsPageArray[uiPageIdx], 1, VM_MAP, prot); -+ -+ psMapData->bVMAP = IMG_TRUE; -+ } -+ else -+ { -+ puiLinCPUAddr = (IMG_UINT8*) OSMapPhysToLin(psWrapData->ppvPhysAddr[uiPageIdx], -+ PAGE_SIZE, -+ psWrapData->ui32CPUCacheFlags); -+ -+ psMapData->bVMAP = IMG_FALSE; -+ } -+ -+ psMapData->pvKernLinAddr = puiLinCPUAddr; -+ -+} -+ -+static PVRSRV_ERROR _CopyBytesExtMem(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes, -+ IMG_BOOL bWrite) -+{ -+ PMR_WRAP_DATA *psWrapData = (PMR_WRAP_DATA*) pvPriv; -+ size_t uiBytesToCopy = uiBufSz; -+ IMG_UINT32 uiPageIdx = uiOffset >> PAGE_SHIFT; -+ IMG_BOOL bFirst = IMG_TRUE; -+ WRAP_KERNEL_MAP_DATA sKernMapData; -+ -+ -+ if ((uiBufSz + uiOffset) > (psWrapData->uiTotalNumPages << PAGE_SHIFT)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Trying to read out of bounds of PMR.", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Copy the pages */ -+ while (uiBytesToCopy != 0) -+ { -+ size_t uiBytesPerLoop; -+ IMG_DEVMEM_OFFSET_T uiCopyOffset; -+ -+ if (bFirst) -+ { -+ bFirst = IMG_FALSE; -+ uiCopyOffset = (uiOffset & ~PAGE_MASK); -+ uiBytesPerLoop = (uiBufSz >= (PAGE_SIZE - uiCopyOffset)) ? -+ PAGE_SIZE - uiCopyOffset : uiBufSz; -+ } -+ else -+ { -+ uiCopyOffset = 0; -+ uiBytesPerLoop = (uiBytesToCopy > PAGE_SIZE) ? PAGE_SIZE : uiBytesToCopy; -+ } -+ -+ _MapPage(psWrapData, -+ uiPageIdx, -+ &sKernMapData); -+ -+ if (sKernMapData.pvKernLinAddr == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unable to map wrapped extmem page.", -+ __func__)); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ /* Use 'DeviceMemCopy' because we need to be conservative. We can't -+ * know whether the wrapped memory was originally imported as cached. -+ */ -+ -+ if (bWrite) -+ { -+ OSDeviceMemCopy(sKernMapData.pvKernLinAddr + uiCopyOffset, -+ pcBuffer, -+ uiBytesPerLoop); -+ } -+ else -+ { -+ OSDeviceMemCopy(pcBuffer, -+ sKernMapData.pvKernLinAddr + uiCopyOffset, -+ uiBytesPerLoop); -+ } -+ -+ _UnmapPage(psWrapData, -+ &sKernMapData); -+ -+ uiBytesToCopy -= uiBytesPerLoop; -+ uiPageIdx++; -+ } -+ -+ *puiNumBytes = uiBufSz; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+PMRReadBytesExtMem(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes) -+{ -+ -+ return _CopyBytesExtMem(pvPriv, -+ uiOffset, -+ pcBuffer, -+ uiBufSz, -+ puiNumBytes, -+ IMG_FALSE); -+} -+ -+static PVRSRV_ERROR -+PMRWriteBytesExtMem(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes) -+{ -+ return _CopyBytesExtMem(pvPriv, -+ uiOffset, -+ pcBuffer, -+ uiBufSz, -+ puiNumBytes, -+ IMG_TRUE); -+} -+ -+ -+static PVRSRV_ERROR -+PMRAcquireKernelMappingDataExtMem(PMR_IMPL_PRIVDATA pvPriv, -+ size_t uiOffset, -+ size_t uiSize, -+ void **ppvKernelAddressOut, -+ IMG_HANDLE *phHandleOut, -+ PMR_FLAGS_T ulFlags) -+{ -+ PVRSRV_ERROR eError; -+ PMR_WRAP_DATA *psWrapData = (PMR_WRAP_DATA*) pvPriv; -+ WRAP_KERNEL_MAP_DATA *psKernMapData; -+ IMG_UINT32 ui32PageIndex = uiOffset >> PAGE_SHIFT; -+ IMG_UINT32 ui32PageOffset = (IMG_UINT32)(uiOffset & ~PAGE_MASK); -+ -+ /* Offset was out of bounds */ -+ if (ui32PageIndex > psWrapData->uiTotalNumPages) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error, offset out of PMR bounds.", -+ __func__)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e0; -+ } -+ -+ /* We can not map in more than one page with ioremap. -+ * Only possible with physically contiguous pages */ -+ if ((ui32PageOffset + uiSize) > PAGE_SIZE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error, cannot map more than one page for wrapped extmem.", -+ __func__)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e0; -+ } -+ -+ psKernMapData = OSAllocMem(sizeof(*psKernMapData)); -+ if (psKernMapData == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error, unable to allocate memory for kernel mapping data.", -+ __func__)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e0; -+ } -+ -+ _MapPage(psWrapData, -+ ui32PageIndex, -+ psKernMapData); -+ -+ if (psKernMapData->pvKernLinAddr == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unable to map wrapped extmem page.", -+ __func__)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e1; -+ } -+ -+ *ppvKernelAddressOut = ((IMG_CHAR *) psKernMapData->pvKernLinAddr) + ui32PageOffset; -+ *phHandleOut = psKernMapData; -+ -+ return PVRSRV_OK; -+ -+ /* error exit paths follow */ -+e1: -+ OSFreeMem(psKernMapData); -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+static void PMRReleaseKernelMappingDataExtMem(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_HANDLE hHandle) -+{ -+ PMR_WRAP_DATA *psWrapData = (PMR_WRAP_DATA*) pvPriv; -+ WRAP_KERNEL_MAP_DATA *psKernMapData = (void*) hHandle; -+ -+ _UnmapPage(psWrapData, -+ psKernMapData); -+ -+ OSFreeMem(psKernMapData); -+} -+ -+static inline void begin_user_mode_access(IMG_UINT *uiState) -+{ -+#if defined(CONFIG_ARM) && defined(CONFIG_CPU_SW_DOMAIN_PAN) -+ *uiState = uaccess_save_and_enable(); -+#elif defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN) -+ PVR_UNREFERENCED_PARAMETER(uiState); -+ uaccess_enable_privileged(); -+#elif defined(CONFIG_X86) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) -+ PVR_UNREFERENCED_PARAMETER(uiState); -+ __uaccess_begin(); -+#else -+ PVR_UNREFERENCED_PARAMETER(uiState); -+#endif -+ -+} -+ -+static inline void end_user_mode_access(IMG_UINT uiState) -+{ -+#if defined(CONFIG_ARM) && defined(CONFIG_CPU_SW_DOMAIN_PAN) -+ uaccess_restore(uiState); -+#elif defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN) -+ PVR_UNREFERENCED_PARAMETER(uiState); -+ uaccess_disable_privileged(); -+#elif defined(CONFIG_X86) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) -+ PVR_UNREFERENCED_PARAMETER(uiState); -+ __uaccess_end(); -+#else -+ PVR_UNREFERENCED_PARAMETER(uiState); -+#endif -+} -+ -+static PVRSRV_ERROR _FlushUMVirtualRange(PVRSRV_DEVICE_NODE *psDevNode, -+ PMR_WRAP_DATA *psPrivData, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_CPU_VIRTADDR pvCpuVAddr) -+{ -+ struct vm_area_struct *psVMArea; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT uiUserAccessState=0; -+ -+ mmap_read_lock(current->mm); -+ -+ /* Check the addr space is not torn down in the mean time */ -+ psVMArea = psPrivData->psVMArea; -+ -+ /* -+ * Latest kernels enable "Privileged access never" feature in the kernel -+ * This features leads to a privilege access failure fault (oops) whenever -+ * the kernel tries to access any user mode address. -+ * . -+ * This is an additional security feature that allows kernel -+ * to be protected against possible software attacks. -+ * In this case, as we need to flush the cache lines associated -+ * with the user mode virtual address, we need to explicitly disable -+ * the feature for the duration of access and re-enable it once done. -+ * */ -+ begin_user_mode_access(&uiUserAccessState); -+ { -+#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_METAG) -+ -+ IMG_CPU_PHYADDR sCPUPhysStart = {0}; -+ -+ eError = CacheOpExec (psDevNode, -+ pvCpuVAddr, -+ ((IMG_UINT8 *)pvCpuVAddr + uiSize), -+ sCPUPhysStart, -+ sCPUPhysStart, -+ PVRSRV_CACHE_OP_CLEAN); -+ -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to clean the virtual region cache %p", -+ __func__, -+ pvCpuVAddr)); -+ goto UMFlushFailed; -+ } -+#else -+ IMG_CPU_PHYADDR sCPUPhysStart, sCPUPhysEnd; -+ void *pvVirtStart, *pvVirtEnd; -+ IMG_UINT i = 0; -+ -+ for (i = 0; i < psPrivData->uiTotalNumPages; i++) -+ { -+ if (NULL != psPrivData->ppsPageArray[i]) -+ { -+ pvVirtStart = pvCpuVAddr + (i * PAGE_SIZE); -+ pvVirtEnd = pvVirtStart + PAGE_SIZE; -+ -+ sCPUPhysStart.uiAddr = psPrivData->ppvPhysAddr[i].uiAddr; -+ sCPUPhysEnd.uiAddr = sCPUPhysStart.uiAddr + PAGE_SIZE; -+ -+ eError = CacheOpExec (psDevNode, -+ pvVirtStart, -+ pvVirtEnd, -+ sCPUPhysStart, -+ sCPUPhysEnd, -+ PVRSRV_CACHE_OP_CLEAN); -+ -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to clean the virtual region cache %p", -+ __func__, -+ pvCpuVAddr)); -+ goto UMFlushFailed; -+ } -+ } -+ } -+#endif -+ } -+ -+UMFlushFailed: -+ end_user_mode_access(uiUserAccessState); -+ -+ mmap_read_unlock(current->mm); -+ return eError; -+} -+ -+static PMR_IMPL_FUNCTAB _sPMRWrapPFuncTab = { -+ .pfnLockPhysAddresses = NULL, -+ .pfnUnlockPhysAddresses = NULL, -+ .pfnDevPhysAddr = &PMRSysPhysAddrExtMem, -+ .pfnAcquireKernelMappingData = PMRAcquireKernelMappingDataExtMem, -+ .pfnReleaseKernelMappingData = PMRReleaseKernelMappingDataExtMem, -+ .pfnReadBytes = PMRReadBytesExtMem, -+ .pfnWriteBytes = PMRWriteBytesExtMem, -+ .pfnChangeSparseMem = NULL, -+ .pfnChangeSparseMemCPUMap = NULL, -+ .pfnFinalize = &PMRFinalizeExtMem, -+}; -+ -+static inline PVRSRV_ERROR PhysmemValidateParam( IMG_DEVMEM_SIZE_T uiSize, -+ IMG_CPU_VIRTADDR pvCpuVAddr, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags) -+{ -+ if (!access_ok(pvCpuVAddr, uiSize)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Invalid User mode CPU virtual address")); -+ return PVRSRV_ERROR_INVALID_CPU_ADDR; -+ } -+ -+ /* Fail if requesting coherency on one side but uncached on the other */ -+ if ((PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) && -+ (PVRSRV_CHECK_GPU_UNCACHED(uiFlags) || PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags)))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Request for CPU coherency but specifying GPU uncached " -+ "Please use GPU cached flags for coherency.")); -+ return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; -+ } -+ -+ if ((PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) && -+ (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags)))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Request for GPU coherency but specifying CPU uncached " -+ "Please use CPU cached flags for coherency.")); -+ return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; -+ } -+ -+#if !defined(PVRSRV_WRAP_EXTMEM_WRITE_ATTRIB_ENABLE) -+ if (uiFlags & (PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED | -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | -+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC -+#if defined(DEBUG) -+ | PVRSRV_MEMALLOCFLAG_POISON_ON_FREE -+#endif -+ )) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Write Attribute not supported. Passed Flags: 0x%"PVRSRV_MEMALLOCFLAGS_FMTSPEC, -+ __func__, uiFlags)); -+ return PVRSRV_ERROR_INVALID_FLAGS; -+ } -+#endif -+ -+ /* Check address and size alignment */ -+ if (uiSize & (PAGE_SIZE - 1)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Given size %llu is not multiple of OS page size (%lu)", -+ __func__, -+ uiSize, -+ PAGE_SIZE)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (((uintptr_t) pvCpuVAddr) & (PAGE_SIZE - 1)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Given address %p is not aligned to OS page size (%lu)", -+ __func__, -+ pvCpuVAddr, -+ PAGE_SIZE)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PhysmemWrapExtMemOS(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_CPU_VIRTADDR pvCpuVAddr, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ PMR **ppsPMRPtr) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 *pui32MappingTable = NULL; -+ PMR_WRAP_DATA *psPrivData; -+ PMR *psPMR; -+ IMG_UINT uiTotalNumPages = (uiSize >> PAGE_SHIFT); -+ IMG_UINT i = 0; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) -+ /* Ignore the most significant byte. */ -+ pvCpuVAddr = (IMG_CPU_VIRTADDR)untagged_addr((uintptr_t)pvCpuVAddr); -+#endif -+ -+ eError = PhysmemValidateParam(uiSize, -+ pvCpuVAddr, -+ uiFlags); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ /* Allocate private factory data */ -+ eError = _AllocWrapData(&psPrivData, -+ psDevNode, -+ uiSize, -+ pvCpuVAddr, -+ uiFlags); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ /* Actually find and acquire the pages and physical addresses */ -+ eError = _WrapExtMemAcquirePages(psDevNode, -+ uiSize, -+ pvCpuVAddr, -+ psPrivData); -+ if (eError != PVRSRV_OK) -+ { -+ _FreeWrapData(psPrivData); -+ goto e0; -+ } -+ -+ -+ pui32MappingTable = OSAllocMem(sizeof(*pui32MappingTable) * uiTotalNumPages); -+ if (pui32MappingTable == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e1; -+ } -+ -+ for (i = 0; i < uiTotalNumPages; i++) -+ { -+ pui32MappingTable[i] = i; -+ } -+ -+ /* Avoid creating scratch page or zero page when the entire -+ * allocation is backed and pinned */ -+ if (psPrivData->uiNumBackedPages == psPrivData->uiTotalNumPages) -+ { -+ uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING; -+ } -+ -+ /* Create a suitable PMR */ -+ eError = PMRCreatePMR(psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL], -+ uiSize, /* PMR_SIZE_T uiLogicalSize */ -+ uiTotalNumPages, /* IMG_UINT32 ui32NumPhysChunks */ -+ uiTotalNumPages, /* IMG_UINT32 ui32NumVirtChunks */ -+ pui32MappingTable, -+ PAGE_SHIFT, /* PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee */ -+ (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK), /* PMR_FLAGS_T uiFlags */ -+ "WrappedExtMem", /* const IMG_CHAR *pszAnnotation */ -+ &_sPMRWrapPFuncTab, /* const PMR_IMPL_FUNCTAB *psFuncTab */ -+ psPrivData, /* PMR_IMPL_PRIVDATA pvPrivData */ -+ PMR_TYPE_EXTMEM, -+ &psPMR, /* PMR **ppsPMRPtr */ -+ PDUMP_NONE); /* IMG_UINT32 ui32PDumpFlags */ -+ if (eError != PVRSRV_OK) -+ { -+ goto e2; -+ } -+ -+ if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags)) -+ { -+ eError = _FlushUMVirtualRange(psDevNode, -+ psPrivData, -+ uiSize, -+ pvCpuVAddr); -+ if (eError != PVRSRV_OK) -+ { -+ goto e3; -+ } -+ } -+ -+ /* Mark the PMR such that no layout changes can happen. -+ * The memory is allocated in the CPU domain and hence -+ * no changes can be made through any of our API */ -+ PMR_SetLayoutFixed(psPMR, IMG_TRUE); -+ -+ *ppsPMRPtr = psPMR; -+ -+ OSFreeMem(pui32MappingTable); -+ -+ return PVRSRV_OK; -+e3: -+ PMRUnrefPMR(psPMR); -+e2: -+ OSFreeMem(pui32MappingTable); -+e1: -+ _WrapExtMemReleasePages(psPrivData); -+e0: -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/physmem_extmem_wrap.h b/drivers/gpu/drm/img-rogue/physmem_extmem_wrap.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_extmem_wrap.h -@@ -0,0 +1,115 @@ -+/*************************************************************************/ /*! -+@File physmem_extmem_wrap.h -+@Title Header for wrapping non-services allocated memory PMR factory. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of the memory management. This module is responsible for -+ implementing the support for wrapping non services allocated -+ memory into GPU space. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SRVSRV_PHYSMEM_EXTEMEM_WRAP_H -+#define SRVSRV_PHYSMEM_EXTEMEM_WRAP_H -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "img_types.h" -+#include "device.h" -+ -+typedef enum _WRAP_EXT_MEM_TYPE_ -+{ -+ WRAP_TYPE_NULL = 0, -+ WRAP_TYPE_GET_USER_PAGES, -+#if defined(SUPPORT_LINUX_WRAP_EXTMEM_PAGE_TABLE_WALK) -+ WRAP_TYPE_FIND_VMA -+#endif -+} WRAP_EXT_MEM_TYPE; -+ -+typedef struct _PMR_WRAP_DATA_ -+{ -+ /* Device for which this allocation has been made */ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ -+ /* Total Number of pages in the allocation */ -+ IMG_UINT32 uiTotalNumPages; -+ -+ /* Total number of actual physically backed pages */ -+ IMG_UINT32 uiNumBackedPages; -+ -+ /* This is only filled if we have page mappings, -+ * for pfn mappings this stays empty */ -+ struct page **ppsPageArray; -+ -+ /* VM Area structure */ -+ struct vm_area_struct *psVMArea; -+ -+ /* This should always be filled and hold the physical addresses */ -+ IMG_CPU_PHYADDR *ppvPhysAddr; -+ -+ /* Did we find the pages via get_user_pages() -+ * or via FindVMA and a page table walk? */ -+ WRAP_EXT_MEM_TYPE eWrapType; -+ -+ /* CPU caching modes for the PMR mapping */ -+ IMG_UINT32 ui32CPUCacheFlags; -+ -+ /* Write permitted ?*/ -+ IMG_BOOL bWrite; -+ -+} PMR_WRAP_DATA; -+ -+#if defined(SUPPORT_LINUX_WRAP_EXTMEM_PAGE_TABLE_WALK) -+/* Find the VMA to a given CPU virtual address and do a -+ * page table walk to find the corresponding pfns */ -+PVRSRV_ERROR _TryFindVMA(IMG_DEVMEM_SIZE_T uiSize, -+ uintptr_t pvCpuVAddr, -+ PMR_WRAP_DATA *psPrivData); -+#endif -+ -+#endif /* SRVSRV_PHYSMEM_EXTEMEM_WRAP_H */ -diff --git a/drivers/gpu/drm/img-rogue/physmem_hostmem.c b/drivers/gpu/drm/img-rogue/physmem_hostmem.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_hostmem.c -@@ -0,0 +1,207 @@ -+/*************************************************************************/ /*! -+@File physmem_hostmem.c -+@Title Host memory device node functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Functions relevant to device memory allocations made from host -+ mem device node. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "physmem_hostmem.h" -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "allocmem.h" -+#include "physheap.h" -+#include "pvrsrv_device.h" -+#include "physheap.h" -+#include "physmem_osmem.h" -+#include "sysconfig.h" -+ -+static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr); -+ -+static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr, -+ IMG_DEV_PHYADDR *psDevPAddr); -+ -+/* heap callbacks for host driver's device's heap */ -+static PHYS_HEAP_FUNCTIONS gsHostMemDevPhysHeapFuncs = -+{ -+ /* pfnCpuPAddrToDevPAddr */ -+ HostMemCpuPAddrToDevPAddr, -+ /* pfnDevPAddrToCpuPAddr */ -+ HostMemDevPAddrToCpuPAddr, -+}; -+ -+static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[]; -+ -+/* heap configuration for host driver's device */ -+static PHYS_HEAP_CONFIG gsPhysHeapConfigHostMemDevice[] = -+{ -+ { -+ PHYS_HEAP_TYPE_UMA, -+ "SYSMEM", -+ &gsHostMemDevPhysHeapFuncs, -+ {0}, -+ {0}, -+ 0, -+ "uma_cpu_local", -+ (IMG_HANDLE)&gsHostMemDevConfig[0], -+ PHYS_HEAP_USAGE_CPU_LOCAL, -+ } -+}; -+ -+/* device configuration for host driver's device */ -+static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[] = -+{ -+ { -+ .pszName = "HostMemDevice", -+ .eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE, -+ .pasPhysHeaps = &gsPhysHeapConfigHostMemDevice[0], -+ .ui32PhysHeapCount = ARRAY_SIZE(gsPhysHeapConfigHostMemDevice), -+ } -+}; -+ -+static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr) -+{ -+ PVR_UNREFERENCED_PARAMETER(hPrivData); -+ -+ /* Optimise common case */ -+ psDevPAddr[0].uiAddr = phys_cpu2gpu(psCpuPAddr[0].uiAddr); -+ if (ui32NumOfAddr > 1) -+ { -+ IMG_UINT32 ui32Idx; -+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) -+ { -+ psDevPAddr[ui32Idx].uiAddr = phys_cpu2gpu(psCpuPAddr[ui32Idx].uiAddr); -+ } -+ } -+} -+ -+static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr, -+ IMG_DEV_PHYADDR *psDevPAddr) -+{ -+ PVR_UNREFERENCED_PARAMETER(hPrivData); -+ -+ /* Optimise common case */ -+ psCpuPAddr[0].uiAddr = phys_gpu2cpu(psDevPAddr[0].uiAddr); -+ if (ui32NumOfAddr > 1) -+ { -+ IMG_UINT32 ui32Idx; -+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) -+ { -+ psCpuPAddr[ui32Idx].uiAddr = phys_gpu2cpu(psDevPAddr[ui32Idx].uiAddr); -+ } -+ } -+} -+ -+PVRSRV_ERROR HostMemDeviceCreate(PVRSRV_DEVICE_NODE **ppsDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_DEVICE_CONFIG *psDevConfig = &gsHostMemDevConfig[0]; -+ -+ /* Assert ensures HostMemory device isn't already created and -+ * that data is initialised */ -+ PVR_ASSERT(*ppsDeviceNode == NULL); -+ -+ /* for now, we only know a single heap (UMA) config for host device */ -+ PVR_ASSERT(psDevConfig->ui32PhysHeapCount == 1 && -+ psDevConfig->pasPhysHeaps[0].eType == PHYS_HEAP_TYPE_UMA); -+ -+ /* N.B.- In case of any failures in this function, we just return error to -+ the caller, as clean-up is taken care by _HostMemDeviceDestroy function */ -+ -+ psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode)); -+ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem"); -+ -+ /* early save return pointer to aid clean-up */ -+ *ppsDeviceNode = psDeviceNode; -+ -+ psDeviceNode->sDevId.ui32InternalID = PVRSRV_HOST_DEVICE_ID; -+ psDeviceNode->psDevConfig = psDevConfig; -+ psDeviceNode->psPhysHeapList = NULL; -+ -+ eError = OSLockCreate(&psDeviceNode->hPhysHeapLock); -+ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); -+ -+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode, -+ &psDevConfig->pasPhysHeaps[0], -+ NULL); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig"); -+ -+ /* Only CPU local heap is valid on host-mem DevNode, so enable minimal callbacks */ -+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_CPU_LOCAL, -+ psDeviceNode, -+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquire"); -+ -+ dllist_init(&psDeviceNode->sCleanupThreadWorkList); -+ -+ return PVRSRV_OK; -+} -+ -+void HostMemDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ if (!psDeviceNode) -+ { -+ return; -+ } -+ else -+ { -+ if (psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]) -+ { -+ PhysHeapRelease(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]); -+ } -+ -+ PhysHeapDestroyDeviceHeaps(psDeviceNode); -+ } -+ -+ OSLockDestroy(psDeviceNode->hPhysHeapLock); -+ -+ OSFreeMem(psDeviceNode); -+} -diff --git a/drivers/gpu/drm/img-rogue/physmem_hostmem.h b/drivers/gpu/drm/img-rogue/physmem_hostmem.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_hostmem.h -@@ -0,0 +1,65 @@ -+/*************************************************************************/ /*! -+@File physmem_hostmem.h -+@Title Host memory device node header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(PHYSMEM_HOSTMEM_H) -+#define PHYSMEM_HOSTMEM_H -+ -+#include "pvrsrv_device.h" -+#include "device.h" -+ -+/*************************************************************************/ /*! -+@Function HostMemDeviceCreate -+@Description Allocate memory for and create host memory device node. -+@Output ppsDeviceNode Pointer to device node pointer. -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR HostMemDeviceCreate(PVRSRV_DEVICE_NODE **ppsDeviceNode); -+ -+/*************************************************************************/ /*! -+@Function HostMemDeviceDestroy -+@Description Destroy host memory device node. -+@Input psDeviceNode Pointer to device node. -+*/ /**************************************************************************/ -+void HostMemDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+#endif /* !defined(PHYSMEM_HOSTMEM_H) */ -diff --git a/drivers/gpu/drm/img-rogue/physmem_lma.c b/drivers/gpu/drm/img-rogue/physmem_lma.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_lma.c -@@ -0,0 +1,2934 @@ -+/*************************************************************************/ /*! -+@File physmem_lma.c -+@Title Local card memory allocator -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of the memory management. This module is responsible for -+ implementing the function callbacks for local card memory. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+#include "rgx_pdump_panics.h" -+#include "allocmem.h" -+#include "osfunc.h" -+#include "pvrsrv.h" -+#include "devicemem_server_utils.h" -+#include "physmem_lma.h" -+#include "pdump_km.h" -+#include "pmr.h" -+#include "pmr_impl.h" -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#include "process_stats.h" -+#endif -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+#include "rgxutils.h" -+#endif -+ -+#if defined(INTEGRITY_OS) -+#include "mm.h" -+#include "integrity_memobject.h" -+#endif -+ -+/* Assert that the conversions between the RA base type and the device -+ * physical address are safe. -+ */ -+static_assert(sizeof(IMG_DEV_PHYADDR) == sizeof(RA_BASE_T), -+ "Size IMG_DEV_PHYADDR != RA_BASE_T"); -+ -+/* Since 0x0 is a valid DevPAddr, we rely on max 64-bit value to be an invalid -+ * page address */ -+#define INVALID_PAGE_ADDR ~((IMG_UINT64)0x0) -+#define ZERO_PAGE_VALUE 0 -+ -+typedef struct _PMR_KERNEL_MAP_HANDLE_ { -+ void *vma; -+ void *pvKernelAddress; -+ /* uiSize has 2 uses: -+ * In Physically contiguous case it is used to track size of the mapping -+ * for free. -+ * In Physically sparse case it is used to determine free path to use, single page -+ * sparse mapping or multi page -+ */ -+ size_t uiSize; -+} PMR_KERNEL_MAPPING; -+ -+typedef struct _PMR_LMALLOCARRAY_DATA_ { -+ -+#define FLAG_ZERO (0U) -+#define FLAG_POISON_ON_FREE (1U) -+#define FLAG_POISON_ON_ALLOC (2U) -+#define FLAG_ONDEMAND (3U) -+#define FLAG_SPARSE (4U) -+#define FLAG_PHYS_CONTIG (5U) -+#define FLAG_ZOMBIE (6U) -+ -+ IMG_PID uiPid; -+ -+ /* -+ * N.B Chunks referenced in this struct commonly are -+ * to OS page sized. But in reality it is dependent on -+ * the uiLog2ChunkSize. -+ * Chunks will always be one 1 << uiLog2ChunkSize in size. -+ * */ -+ -+ /* -+ * The number of chunks currently allocated in the PMR. -+ */ -+ IMG_INT32 iNumChunksAllocated; -+ -+ /* -+ * Total number of (Virtual) chunks supported by this PMR. -+ */ -+ IMG_UINT32 uiTotalNumChunks; -+ -+ /* The number of chunks to next be allocated for the PMR. -+ * This will initially be the number allocated at first alloc -+ * but may be changed in later calls to change sparse. -+ * It represents the number of chunks to next be allocated. -+ * This is used to store this value because we have the ability to -+ * defer allocation. -+ */ -+ IMG_UINT32 uiChunksToAlloc; -+ -+ /* -+ * Log2 representation of the chunksize. -+ */ -+ IMG_UINT32 uiLog2ChunkSize; -+ -+ /* Physical heap and arena pointers for this allocation */ -+ PHYS_HEAP* psPhysHeap; -+ RA_ARENA* psArena; -+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags; -+ -+ /* -+ Connection data for this requests' originating process. NULL for -+ direct-bridge originating calls -+ */ -+ CONNECTION_DATA *psConnection; -+ -+ /* -+ * Allocation flags related to the pages: -+ * Zero - Should we Zero memory on alloc -+ * Poison on free - Should we Poison the memory on free. -+ * Poison on alloc - Should we Poison the memory on alloc. -+ * On demand - Is the allocation on Demand i.e Do we defer allocation to time of use. -+ * Sparse - Is the PMR sparse. -+ * Phys Contig - Is the alloc Physically contiguous -+ * Zombie - Is zombie -+ * */ -+ IMG_UINT32 ui32Flags; -+ -+ RA_BASE_ARRAY_T aBaseArray; /* Array of RA Bases */ -+ -+} PMR_LMALLOCARRAY_DATA; -+ -+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) -+/* Global structure to manage GPU memory leak */ -+static DEFINE_MUTEX(g_sLMALeakMutex); -+static IMG_UINT32 g_ui32LMALeakCounter = 0; -+#endif -+ -+#if defined(PVRSRV_PHYSHEAP_DISABLE_OOM_DEMOTION) -+#define PHYSHEAP_DPF_LVL PVR_DBG_ERROR -+#else -+#define PHYSHEAP_DPF_LVL PVR_DBG_WARNING -+#endif -+ -+ -+typedef struct PHYSMEM_LMA_DATA_TAG { -+ RA_ARENA *psRA; -+ IMG_CPU_PHYADDR sStartAddr; -+ IMG_DEV_PHYADDR sCardBase; -+ IMG_UINT64 uiSize; -+} PHYSMEM_LMA_DATA; -+ -+/* -+ * This function will set the psDevPAddr to whatever the system layer -+ * has set it for the referenced heap. -+ * It will not fail if the psDevPAddr is invalid. -+ */ -+static PVRSRV_ERROR -+_GetDevPAddr(PHEAP_IMPL_DATA pvImplData, -+ IMG_DEV_PHYADDR *psDevPAddr) -+{ -+ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; -+ -+ *psDevPAddr = psLMAData->sCardBase; -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * This function will set the psCpuPAddr to whatever the system layer -+ * has set it for the referenced heap. -+ * It will not fail if the psCpuPAddr is invalid. -+ */ -+static PVRSRV_ERROR -+_GetCPUPAddr(PHEAP_IMPL_DATA pvImplData, -+ IMG_CPU_PHYADDR *psCpuPAddr) -+{ -+ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; -+ -+ *psCpuPAddr = psLMAData->sStartAddr; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+_GetSize(PHEAP_IMPL_DATA pvImplData, -+ IMG_UINT64 *puiSize) -+{ -+ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; -+ -+ *puiSize = psLMAData->uiSize; -+ -+ return PVRSRV_OK; -+} -+ -+static IMG_UINT32 -+_GetPageShift(void) -+{ -+ return PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT; -+} -+ -+static void PhysmemGetLocalRamMemStats(PHEAP_IMPL_DATA pvImplData, -+ IMG_UINT64 *pui64TotalSize, -+ IMG_UINT64 *pui64FreeSize) -+{ -+ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; -+ RA_USAGE_STATS sRAUsageStats; -+ -+ RA_Get_Usage_Stats(psLMAData->psRA, &sRAUsageStats); -+ -+ *pui64TotalSize = sRAUsageStats.ui64TotalArenaSize; -+ *pui64FreeSize = sRAUsageStats.ui64FreeArenaSize; -+} -+ -+#if !defined(SUPPORT_GPUVIRT_VALIDATION) -+static PVRSRV_ERROR -+PhysmemGetArenaLMA(PHYS_HEAP *psPhysHeap, -+ RA_ARENA **ppsArena) -+{ -+ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)PhysHeapGetImplData(psPhysHeap); -+ -+ PVR_LOG_RETURN_IF_FALSE(psLMAData != NULL, "psLMAData", PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ *ppsArena = psLMAData->psRA; -+ -+ return PVRSRV_OK; -+} -+#endif -+ -+static PVRSRV_ERROR -+_CreateArenas(PHEAP_IMPL_DATA pvImplData, IMG_CHAR *pszLabel, PHYS_HEAP_POLICY uiPolicy) -+{ -+ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; -+ -+ IMG_UINT32 ui32RAPolicy = -+ ((uiPolicy & PHYS_HEAP_POLOCY_ALLOC_ALLOW_NONCONTIG_MASK) == PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG) -+ ? RA_POLICY_ALLOC_ALLOW_NONCONTIG : RA_POLICY_DEFAULT; -+ -+ psLMAData->psRA = RA_Create_With_Span(pszLabel, -+ OSGetPageShift(), -+ psLMAData->sStartAddr.uiAddr, -+ psLMAData->sCardBase.uiAddr, -+ psLMAData->uiSize, -+ ui32RAPolicy); -+ PVR_LOG_RETURN_IF_NOMEM(psLMAData->psRA, "RA_Create_With_Span"); -+ -+ return PVRSRV_OK; -+} -+ -+static void -+_DestroyArenas(PHEAP_IMPL_DATA pvImplData) -+{ -+ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; -+ -+ /* Remove RAs and RA names for local card memory */ -+ if (psLMAData->psRA) -+ { -+ RA_Delete(psLMAData->psRA); -+ psLMAData->psRA = NULL; -+ } -+} -+ -+static void -+_DestroyImplData(PHEAP_IMPL_DATA pvImplData) -+{ -+ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; -+ -+ _DestroyArenas(pvImplData); -+ -+ OSFreeMem(psLMAData); -+} -+ -+struct _PHYS_HEAP_ITERATOR_ { -+ PHYS_HEAP *psPhysHeap; -+ RA_ARENA_ITERATOR *psRAIter; -+ -+ IMG_UINT64 uiTotalSize; -+ IMG_UINT64 uiInUseSize; -+}; -+ -+PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode, -+ PVRSRV_PHYS_HEAP ePhysHeap, -+ PHYS_HEAP_ITERATOR **ppsIter) -+{ -+ PVRSRV_ERROR eError; -+ PHYSMEM_LMA_DATA *psLMAData; -+ PHYS_HEAP_ITERATOR *psHeapIter; -+ PHYS_HEAP *psPhysHeap = NULL; -+ RA_USAGE_STATS sStats; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppsIter != NULL, "ppsIter"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); -+ -+ eError = PhysHeapAcquireByID(ePhysHeap, psDevNode, &psPhysHeap); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquireByID"); -+ -+ PVR_LOG_GOTO_IF_FALSE(PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA, -+ "PhysHeap must be of LMA type", release_heap); -+ -+ psLMAData = (PHYSMEM_LMA_DATA *) PhysHeapGetImplData(psPhysHeap); -+ -+ psHeapIter = OSAllocMem(sizeof(*psHeapIter)); -+ PVR_LOG_GOTO_IF_NOMEM(psHeapIter, eError, release_heap); -+ -+ psHeapIter->psPhysHeap = psPhysHeap; -+ psHeapIter->psRAIter = RA_IteratorAcquire(psLMAData->psRA, IMG_FALSE); -+ PVR_LOG_GOTO_IF_NOMEM(psHeapIter->psRAIter, eError, free_heap_iter); -+ -+ /* get heap usage */ -+ RA_Get_Usage_Stats(psLMAData->psRA, &sStats); -+ -+ psHeapIter->uiTotalSize = sStats.ui64TotalArenaSize; -+ psHeapIter->uiInUseSize = sStats.ui64TotalArenaSize - sStats.ui64FreeArenaSize; -+ -+ *ppsIter = psHeapIter; -+ -+ return PVRSRV_OK; -+ -+free_heap_iter: -+ OSFreeMem(psHeapIter); -+ -+release_heap: -+ PhysHeapRelease(psPhysHeap); -+ -+ return eError; -+} -+ -+void LMA_HeapIteratorDestroy(PHYS_HEAP_ITERATOR *psIter) -+{ -+ PHYS_HEAP_ITERATOR *psHeapIter = psIter; -+ -+ PVR_LOG_RETURN_VOID_IF_FALSE(psHeapIter != NULL, "psHeapIter is NULL"); -+ -+ PhysHeapRelease(psHeapIter->psPhysHeap); -+ RA_IteratorRelease(psHeapIter->psRAIter); -+ OSFreeMem(psHeapIter); -+} -+ -+PVRSRV_ERROR LMA_HeapIteratorReset(PHYS_HEAP_ITERATOR *psIter) -+{ -+ PHYS_HEAP_ITERATOR *psHeapIter = psIter; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psHeapIter != NULL, "ppsIter"); -+ -+ RA_IteratorReset(psHeapIter->psRAIter); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_BOOL LMA_HeapIteratorNext(PHYS_HEAP_ITERATOR *psIter, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_UINT64 *puiSize) -+{ -+ PHYS_HEAP_ITERATOR *psHeapIter = psIter; -+ RA_ITERATOR_DATA sData = {0}; -+ -+ if (psHeapIter == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "psHeapIter in %s() is NULL", __func__)); -+ return IMG_FALSE; -+ } -+ -+ if (!RA_IteratorNext(psHeapIter->psRAIter, &sData)) -+ { -+ return IMG_FALSE; -+ } -+ -+ PVR_ASSERT(sData.uiSize != 0); -+ -+ psDevPAddr->uiAddr = sData.uiAddr; -+ *puiSize = sData.uiSize; -+ -+ return IMG_TRUE; -+} -+ -+PVRSRV_ERROR LMA_HeapIteratorGetHeapStats(PHYS_HEAP_ITERATOR *psIter, -+ IMG_UINT64 *puiTotalSize, -+ IMG_UINT64 *puiInUseSize) -+{ -+ PHYS_HEAP_ITERATOR *psHeapIter = psIter; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psHeapIter != NULL, "psHeapIter"); -+ -+ *puiTotalSize = psHeapIter->uiTotalSize; -+ *puiInUseSize = psHeapIter->uiInUseSize; -+ -+ return PVRSRV_OK; -+} -+ -+ -+static PVRSRV_ERROR -+_LMA_DoPhyContigPagesAlloc(RA_ARENA *pArena, -+ size_t uiSize, -+ PG_HANDLE *psMemHandle, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_PID uiPid) -+{ -+ RA_BASE_T uiCardAddr = 0; -+ RA_LENGTH_T uiActualSize; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32Log2NumPages; -+ -+#if defined(DEBUG) -+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ static IMG_UINT32 ui32MaxLog2NumPages = 7; /* 128 pages => 512KB */ -+#else -+ static IMG_UINT32 ui32MaxLog2NumPages = 4; /* 16 pages => 64KB */ -+#endif -+#endif /* defined(DEBUG) */ -+ -+ PVR_ASSERT(uiSize != 0); -+ ui32Log2NumPages = OSGetOrder(uiSize); -+ uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); -+ -+ eError = RA_Alloc(pArena, -+ uiSize, -+ RA_NO_IMPORT_MULTIPLIER, -+ 0, /* No flags */ -+ uiSize, -+ "LMA_PhyContigPagesAlloc", -+ &uiCardAddr, -+ &uiActualSize, -+ NULL); /* No private handle */ -+ -+ if (eError != PVRSRV_OK) -+ { -+ RA_USAGE_STATS sRAStats; -+ RA_Get_Usage_Stats(pArena, &sRAStats); -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to Allocate size = 0x"IMG_SIZE_FMTSPECX", align = 0x" -+ IMG_SIZE_FMTSPECX" Arena Free Space 0x%"IMG_UINT64_FMTSPECX, -+ uiSize, uiSize, sRAStats.ui64FreeArenaSize)); -+ return eError; -+ } -+ -+ PVR_ASSERT(uiSize == uiActualSize); -+ -+ psMemHandle->u.ui64Handle = uiCardAddr; -+ psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, -+ uiSize, -+ uiCardAddr, -+ uiPid); -+#else -+ { -+ IMG_CPU_PHYADDR sCpuPAddr; -+ sCpuPAddr.uiAddr = psDevPAddr->uiAddr; -+ -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, -+ NULL, -+ sCpuPAddr, -+ uiSize, -+ uiPid -+ DEBUG_MEMSTATS_VALUES); -+ } -+#endif -+#endif -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: (GPU Virtualisation) Allocated 0x" IMG_SIZE_FMTSPECX " at 0x%" -+ IMG_UINT64_FMTSPECX ", Arena ID %u", -+ __func__, uiSize, psDevPAddr->uiAddr, psMemHandle->uiOSid)); -+#endif -+ -+#if defined(DEBUG) -+ PVR_ASSERT((ui32Log2NumPages <= ui32MaxLog2NumPages)); -+ if (ui32Log2NumPages > ui32MaxLog2NumPages) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: ui32MaxLog2NumPages = %u, increasing to %u", __func__, -+ ui32MaxLog2NumPages, ui32Log2NumPages )); -+ ui32MaxLog2NumPages = ui32Log2NumPages; -+ } -+#endif /* defined(DEBUG) */ -+ psMemHandle->uiOrder = ui32Log2NumPages; -+ -+ return eError; -+} -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+static PVRSRV_ERROR -+LMA_PhyContigPagesAllocGPV(PHYS_HEAP *psPhysHeap, -+ size_t uiSize, -+ PG_HANDLE *psMemHandle, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_UINT32 ui32OSid, -+ IMG_PID uiPid) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); -+ RA_ARENA *pArena; -+ IMG_UINT32 ui32Log2NumPages = 0; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(uiSize != 0); -+ ui32Log2NumPages = OSGetOrder(uiSize); -+ uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); -+ -+ PVR_ASSERT(ui32OSid < GPUVIRT_VALIDATION_NUM_OS); -+ if (ui32OSid >= GPUVIRT_VALIDATION_NUM_OS) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Arena index %u defaulting to 0", -+ __func__, ui32OSid)); -+ ui32OSid = 0; -+ } -+ -+ pArena = psDevNode->psOSidSubArena[ui32OSid]; -+ -+ if (psMemHandle->uiOSid != ui32OSid) -+ { -+ PVR_LOG(("%s: Unexpected OSid value %u - expecting %u", __func__, -+ psMemHandle->uiOSid, ui32OSid)); -+ } -+ -+ psMemHandle->uiOSid = ui32OSid; /* For Free() use */ -+ -+ eError = _LMA_DoPhyContigPagesAlloc(pArena, uiSize, psMemHandle, -+ psDevPAddr, uiPid); -+ PVR_LOG_IF_ERROR(eError, "_LMA_DoPhyContigPagesAlloc"); -+ -+ return eError; -+} -+#endif -+ -+static PVRSRV_ERROR -+LMA_PhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, -+ size_t uiSize, -+ PG_HANDLE *psMemHandle, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_PID uiPid) -+{ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ IMG_UINT32 ui32OSid = 0; -+ return LMA_PhyContigPagesAllocGPV(psPhysHeap, uiSize, psMemHandle, psDevPAddr, -+ ui32OSid, uiPid); -+#else -+ PVRSRV_ERROR eError; -+ -+ RA_ARENA *pArena; -+ IMG_UINT32 ui32Log2NumPages = 0; -+ -+ eError = PhysmemGetArenaLMA(psPhysHeap, &pArena); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLMA"); -+ -+ PVR_ASSERT(uiSize != 0); -+ ui32Log2NumPages = OSGetOrder(uiSize); -+ uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); -+ -+ eError = _LMA_DoPhyContigPagesAlloc(pArena, uiSize, psMemHandle, -+ psDevPAddr, uiPid); -+ PVR_LOG_IF_ERROR(eError, "_LMA_DoPhyContigPagesAlloc"); -+ -+ return eError; -+#endif -+} -+ -+static void -+LMA_PhyContigPagesFree(PHYS_HEAP *psPhysHeap, -+ PG_HANDLE *psMemHandle) -+{ -+ RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle; -+ RA_ARENA *pArena; -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); -+ IMG_UINT32 ui32OSid = psMemHandle->uiOSid; -+ -+ /* -+ * The Arena ID is set by the originating allocation, and maintained via -+ * the call stacks into this function. We have a limited range of IDs -+ * and if the passed value falls outside this we simply treat it as a -+ * 'global' arena ID of 0. This is where all default OS-specific allocations -+ * are created. -+ */ -+ PVR_ASSERT(ui32OSid < GPUVIRT_VALIDATION_NUM_OS); -+ if (ui32OSid >= GPUVIRT_VALIDATION_NUM_OS) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Arena index %u PhysAddr 0x%" -+ IMG_UINT64_FMTSPECx " Reverting to Arena 0", __func__, -+ ui32OSid, uiCardAddr)); -+ /* -+ * No way of determining what we're trying to free so default to the -+ * global default arena index 0. -+ */ -+ ui32OSid = 0; -+ } -+ -+ pArena = psDevNode->psOSidSubArena[ui32OSid]; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: (GPU Virtualisation) Freeing 0x%" -+ IMG_UINT64_FMTSPECx ", Arena %u", __func__, -+ uiCardAddr, ui32OSid)); -+ -+#else -+ PhysmemGetArenaLMA(psPhysHeap, &pArena); -+#endif -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, -+ (IMG_UINT64)uiCardAddr); -+#else -+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, -+ (IMG_UINT64)uiCardAddr, -+ OSGetCurrentClientProcessIDKM()); -+#endif -+#endif -+ -+ RA_Free(pArena, uiCardAddr); -+ psMemHandle->uiOrder = 0; -+} -+ -+static PVRSRV_ERROR -+LMA_PhyContigPagesMap(PHYS_HEAP *psPhysHeap, -+ PG_HANDLE *psMemHandle, -+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, -+ void **pvPtr) -+{ -+ IMG_CPU_PHYADDR sCpuPAddr; -+ IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder); -+ PVR_UNREFERENCED_PARAMETER(uiSize); -+ -+ PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr); -+ *pvPtr = OSMapPhysToLin(sCpuPAddr, -+ ui32NumPages * OSGetPageSize(), -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC); -+ PVR_RETURN_IF_NOMEM(*pvPtr); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, -+ ui32NumPages * OSGetPageSize(), -+ OSGetCurrentClientProcessIDKM()); -+#else -+ { -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, -+ *pvPtr, -+ sCpuPAddr, -+ ui32NumPages * OSGetPageSize(), -+ OSGetCurrentClientProcessIDKM() -+ DEBUG_MEMSTATS_VALUES); -+ } -+#endif -+#endif -+ return PVRSRV_OK; -+} -+ -+static void -+LMA_PhyContigPagesUnmap(PHYS_HEAP *psPhysHeap, -+ PG_HANDLE *psMemHandle, -+ void *pvPtr) -+{ -+ IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder); -+ PVR_UNREFERENCED_PARAMETER(psPhysHeap); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, -+ ui32NumPages * OSGetPageSize(), -+ OSGetCurrentClientProcessIDKM()); -+#else -+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, -+ (IMG_UINT64)(uintptr_t)pvPtr, -+ OSGetCurrentClientProcessIDKM()); -+#endif -+#endif -+ -+ OSUnMapPhysToLin(pvPtr, ui32NumPages * OSGetPageSize()); -+} -+ -+static PVRSRV_ERROR -+LMA_PhyContigPagesClean(PHYS_HEAP *psPhysHeap, -+ PG_HANDLE *psMemHandle, -+ IMG_UINT32 uiOffset, -+ IMG_UINT32 uiLength) -+{ -+ /* No need to flush because we map as uncached */ -+ PVR_UNREFERENCED_PARAMETER(psPhysHeap); -+ PVR_UNREFERENCED_PARAMETER(psMemHandle); -+ PVR_UNREFERENCED_PARAMETER(uiOffset); -+ PVR_UNREFERENCED_PARAMETER(uiLength); -+ -+ return PVRSRV_OK; -+} -+ -+static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs = -+{ -+ .pfnDestroyData = &_DestroyImplData, -+ .pfnGetDevPAddr = &_GetDevPAddr, -+ .pfnGetCPUPAddr = &_GetCPUPAddr, -+ .pfnGetSize = &_GetSize, -+ .pfnGetPageShift = &_GetPageShift, -+ .pfnGetPMRFactoryMemStats = &PhysmemGetLocalRamMemStats, -+ .pfnCreatePMR = &PhysmemNewLocalRamBackedPMR, -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ .pfnPagesAllocGPV = &LMA_PhyContigPagesAllocGPV, -+#endif -+ .pfnPagesAlloc = &LMA_PhyContigPagesAlloc, -+ .pfnPagesFree = &LMA_PhyContigPagesFree, -+ .pfnPagesMap = &LMA_PhyContigPagesMap, -+ .pfnPagesUnMap = &LMA_PhyContigPagesUnmap, -+ .pfnPagesClean = &LMA_PhyContigPagesClean, -+}; -+ -+PVRSRV_ERROR -+PhysmemCreateHeapLMA(PVRSRV_DEVICE_NODE *psDevNode, -+ PHYS_HEAP_POLICY uiPolicy, -+ PHYS_HEAP_CONFIG *psConfig, -+ IMG_CHAR *pszLabel, -+ PHYS_HEAP **ppsPhysHeap) -+{ -+ PHYSMEM_LMA_DATA *psLMAData; -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszLabel != NULL, "pszLabel"); -+ -+ psLMAData = OSAllocMem(sizeof(*psLMAData)); -+ PVR_LOG_RETURN_IF_NOMEM(psLMAData, "OSAllocMem"); -+ -+ psLMAData->sStartAddr = psConfig->sStartAddr; -+ psLMAData->sCardBase = psConfig->sCardBase; -+ psLMAData->uiSize = psConfig->uiSize; -+ -+ eError = PhysHeapCreate(psDevNode, -+ psConfig, -+ uiPolicy, -+ (PHEAP_IMPL_DATA)psLMAData, -+ &_sPHEAPImplFuncs, -+ ppsPhysHeap); -+ if (eError != PVRSRV_OK) -+ { -+ OSFreeMem(psLMAData); -+ return eError; -+ } -+ -+ eError = _CreateArenas(psLMAData, pszLabel, uiPolicy); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_CreateArenas"); -+ -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR _MapPhysicalContigAlloc(PHYS_HEAP *psPhysHeap, -+ RA_BASE_ARRAY_T paBaseArray, -+ size_t uiSize, -+ PMR_FLAGS_T ulFlags, -+ PMR_KERNEL_MAPPING *psMapping) -+{ -+ IMG_UINT32 ui32CPUCacheFlags; -+ PVRSRV_ERROR eError; -+ IMG_CPU_PHYADDR sCpuPAddr; -+ IMG_DEV_PHYADDR sDevPAddr; -+ sDevPAddr.uiAddr = RA_BASE_STRIP_GHOST_BIT(*paBaseArray); -+ -+ eError = DevmemCPUCacheMode(PhysHeapDeviceNode(psPhysHeap), ulFlags, &ui32CPUCacheFlags); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ PhysHeapDevPAddrToCpuPAddr(psPhysHeap, -+ 1, -+ &sCpuPAddr, -+ &sDevPAddr); -+ -+ psMapping->pvKernelAddress = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags); -+ PVR_LOG_RETURN_IF_FALSE(psMapping->pvKernelAddress, -+ "OSMapPhyToLin: out of VM Mem", -+ PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING); -+ psMapping->vma = NULL; -+ psMapping->uiSize = uiSize; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR _MapPhysicalSparseAlloc(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData, -+ RA_BASE_ARRAY_T paBaseArray, -+ size_t uiSize, -+ PMR_FLAGS_T ulFlags, -+ PMR_KERNEL_MAPPING *psMapping) -+{ -+ IMG_UINT32 uiChunkCount = uiSize >> psLMAllocArrayData->uiLog2ChunkSize; -+ IMG_CPU_PHYADDR uiPages[PMR_MAX_TRANSLATION_STACK_ALLOC], *puiPages; -+ PVRSRV_ERROR eError; -+ size_t uiPageShift = OSGetPageShift(); -+ IMG_UINT32 uiOSPageCnt = psLMAllocArrayData->uiLog2ChunkSize - uiPageShift; -+ -+ if ((uiChunkCount << uiOSPageCnt) > PMR_MAX_TRANSLATION_STACK_ALLOC) -+ { -+ puiPages = OSAllocZMem(sizeof(IMG_CPU_PHYADDR) * (uiChunkCount << uiOSPageCnt)); -+ PVR_RETURN_IF_NOMEM(puiPages); -+ } -+ else -+ { -+ puiPages = &uiPages[0]; -+ } -+ -+ if (uiOSPageCnt == 0) -+ { -+ IMG_UINT32 i; -+ PhysHeapDevPAddrToCpuPAddr(psLMAllocArrayData->psPhysHeap, -+ uiChunkCount, -+ puiPages, -+ (IMG_DEV_PHYADDR *)paBaseArray); -+ -+ /* If the ghost bit is present then the addrs returned will be off by 1 -+ * Strip the ghost bit to correct to real page aligned addresses. -+ * */ -+ for (i = 0; i < uiChunkCount; i++) -+ { -+ puiPages[i].uiAddr = RA_BASE_STRIP_GHOST_BIT(puiPages[i].uiAddr); -+ } -+ } -+ else -+ { -+ IMG_UINT32 i = 0, j = 0, index = 0; -+ for (i = 0; i < uiChunkCount; i++) -+ { -+ IMG_UINT32 ui32OSPagesPerDeviceChunk = (1 << uiOSPageCnt); -+ IMG_DEV_PHYADDR uiDevAddr; -+ uiDevAddr.uiAddr = RA_BASE_STRIP_GHOST_BIT(paBaseArray[i]); -+ for (j = 0; j < ui32OSPagesPerDeviceChunk; j++) -+ { -+ uiDevAddr.uiAddr += (1ULL << uiPageShift); -+ PhysHeapDevPAddrToCpuPAddr(psLMAllocArrayData->psPhysHeap, -+ 1, -+ &puiPages[index], -+ &uiDevAddr); -+ index++; -+ } -+ } -+ } -+ -+ eError = OSMapPhysArrayToLin(puiPages, -+ uiChunkCount, -+ &psMapping->pvKernelAddress, -+ &psMapping->vma); -+ if (eError == PVRSRV_OK) -+ { -+ psMapping->uiSize = uiSize; -+ } -+ -+ if (puiPages != &uiPages[0]) -+ { -+ OSFreeMem(puiPages); -+ } -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR _MapPMRKernel(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData, -+ RA_BASE_ARRAY_T paBaseArray, -+ size_t uiSize, -+ PMR_FLAGS_T ulFlags, -+ PMR_KERNEL_MAPPING *psMapping) -+{ -+ PVRSRV_ERROR eError; -+ PHYS_HEAP *psPhysHeap = psLMAllocArrayData->psPhysHeap; -+ if (!BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_SPARSE)) -+ { -+ /* Physically Contig */ -+ if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_PHYS_CONTIG)) -+ { -+ eError = _MapPhysicalContigAlloc(psPhysHeap, -+ paBaseArray, -+ uiSize, -+ ulFlags, -+ psMapping); -+ } -+ /* Physically Sparse */ -+ else -+ { -+ eError = _MapPhysicalSparseAlloc(psLMAllocArrayData, -+ paBaseArray, -+ uiSize, -+ ulFlags, -+ psMapping); -+ } -+ } -+ else -+ { -+ /* Sparse Alloc Single Chunk */ -+ if (uiSize == (1 << psLMAllocArrayData->uiLog2ChunkSize)) -+ { -+ eError = _MapPhysicalContigAlloc(psPhysHeap, -+ paBaseArray, -+ uiSize, -+ ulFlags, -+ psMapping); -+ } -+ /* Sparse Alloc Multi Chunk */ -+ else -+ { -+ eError = _MapPhysicalSparseAlloc(psLMAllocArrayData, -+ paBaseArray, -+ uiSize, -+ ulFlags, -+ psMapping); -+ } -+ } -+ -+ return eError; -+} -+ -+static void _UnMapPhysicalContigAlloc(PMR_KERNEL_MAPPING *psKernelMapping) -+{ -+ OSUnMapPhysToLin(psKernelMapping->pvKernelAddress, psKernelMapping->uiSize); -+} -+ -+static void _UnMapPhysicalSparseAlloc(PMR_KERNEL_MAPPING *psKernelMapping) -+{ -+ OSUnMapPhysArrayToLin(psKernelMapping->pvKernelAddress, -+ psKernelMapping->vma); -+} -+ -+static void _UnMapPMRKernel(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData, -+ PMR_KERNEL_MAPPING *psKernelMapping) -+{ -+ if (!BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_SPARSE)) -+ { -+ /* Physically Contig */ -+ if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_PHYS_CONTIG)) -+ { -+ _UnMapPhysicalContigAlloc(psKernelMapping); -+ } -+ /* Physically Sparse */ -+ else -+ { -+ _UnMapPhysicalSparseAlloc(psKernelMapping); -+ } -+ } -+ else -+ { -+ /* Sparse Alloc Single Chunk */ -+ if (psKernelMapping->uiSize == (1 << psLMAllocArrayData->uiLog2ChunkSize)) -+ { -+ _UnMapPhysicalContigAlloc(psKernelMapping); -+ } -+ /* Sparse Alloc Multi Chunk */ -+ else -+ { -+ _UnMapPhysicalSparseAlloc(psKernelMapping); -+ } -+ } -+} -+ -+static PVRSRV_ERROR -+_PhysPgMemSet(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData, -+ RA_BASE_ARRAY_T paBaseArray, -+ size_t uiSize, -+ IMG_BYTE ui8SetValue) -+{ -+ PVRSRV_ERROR eError; -+ PMR_KERNEL_MAPPING sKernelMapping; -+ -+ eError = _MapPMRKernel(psLMAllocArrayData, -+ paBaseArray, -+ uiSize, -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, -+ &sKernelMapping); -+ PVR_GOTO_IF_ERROR(eError, map_failed); -+ -+ OSCachedMemSetWMB(sKernelMapping.pvKernelAddress, ui8SetValue, uiSize); -+ -+ _UnMapPMRKernel(psLMAllocArrayData, &sKernelMapping); -+ -+ return PVRSRV_OK; -+ -+map_failed: -+ PVR_DPF((PVR_DBG_ERROR, "Failed to poison/zero allocation")); -+ return eError; -+} -+ -+static PVRSRV_ERROR -+_AllocLMPageArray(PMR_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 uiLog2AllocPageSize, -+ IMG_UINT32 ui32Flags, -+ PHYS_HEAP* psPhysHeap, -+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, -+ IMG_PID uiPid, -+ PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr, -+ CONNECTION_DATA *psConnection) -+{ -+ PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiNumPages; -+ -+ PVR_ASSERT(!BIT_ISSET(ui32Flags, FLAG_ZERO) || !BIT_ISSET(ui32Flags, FLAG_POISON_ON_ALLOC)); -+ PVR_ASSERT(OSGetPageShift() <= uiLog2AllocPageSize); -+ -+ /* Use of cast below is justified by the assertion that follows to -+ prove that no significant bits have been truncated */ -+ uiNumPages = (IMG_UINT32)(((uiSize - 1) >> uiLog2AllocPageSize) + 1); -+ PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2AllocPageSize) == uiSize); -+ -+ psPageArrayData = OSAllocMem(sizeof(PMR_LMALLOCARRAY_DATA) + (sizeof(RA_BASE_T) * uiNumPages)); -+ PVR_GOTO_IF_NOMEM(psPageArrayData, eError, errorOnAllocArray); -+ -+ if (BIT_ISSET(ui32Flags, FLAG_SPARSE)) -+ { -+ /* Since no pages are allocated yet, initialise page addresses to INVALID_PAGE_ADDR */ -+ OSCachedMemSet(psPageArrayData->aBaseArray, -+ 0xFF, -+ sizeof(RA_BASE_T) * -+ uiNumPages); -+ } -+ else -+ { -+ /* Base pointers have been allocated for the full PMR in case we require a non -+ * physically contiguous backing for the virtually contiguous allocation but the most -+ * common case will be contiguous and so only require the first Base to be present -+ */ -+ psPageArrayData->aBaseArray[0] = INVALID_BASE_ADDR; -+ } -+ -+ psPageArrayData->uiTotalNumChunks = uiNumPages; -+ psPageArrayData->uiChunksToAlloc = BIT_ISSET(ui32Flags, FLAG_SPARSE) ? ui32NumPhysChunks : uiNumPages; -+ psPageArrayData->uiLog2ChunkSize = uiLog2AllocPageSize; -+ -+ psPageArrayData->psConnection = psConnection; -+ psPageArrayData->uiPid = uiPid; -+ psPageArrayData->iNumChunksAllocated = 0; -+ psPageArrayData->ui32Flags = ui32Flags; -+ psPageArrayData->psPhysHeap = psPhysHeap; -+ psPageArrayData->uiAllocFlags = uiAllocFlags; -+ -+ *ppsPageArrayDataPtr = psPageArrayData; -+ -+ return PVRSRV_OK; -+ -+/* -+ error exit path follows: -+*/ -+ -+errorOnAllocArray: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+static PVRSRV_ERROR -+_AllocLMPagesContig(PMR_LMALLOCARRAY_DATA *psPageArrayData) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; -+ IMG_UINT64 uiPhysSize = (IMG_UINT64) psPageArrayData->uiChunksToAlloc << uiLog2ChunkSize; -+ IMG_BOOL bPhysContig; -+ IMG_UINT32 ui32Flags = psPageArrayData->ui32Flags; -+ -+ -+ eError = RA_AllocMulti(psPageArrayData->psArena, -+ uiPhysSize, -+ uiLog2ChunkSize, -+ RA_NO_IMPORT_MULTIPLIER, -+ 0, /* No flags */ -+ "LMA_Page_Alloc", -+ psPageArrayData->aBaseArray, -+ psPageArrayData->uiTotalNumChunks, -+ &bPhysContig); -+ -+ if (PVRSRV_OK != eError) -+ { -+ RA_USAGE_STATS sRAStats; -+ IMG_CHAR *pszArenaName; -+ RA_Get_Usage_Stats(psPageArrayData->psArena, &sRAStats); -+ pszArenaName = RA_GetArenaName(psPageArrayData->psArena); -+ -+ PVR_DPF((PHYSHEAP_DPF_LVL, -+ "Contig: Failed to Allocate size = 0x%llx, align = 0x%llx" -+ " Arena Free Space 0x%"IMG_UINT64_FMTSPECX"" -+ " Arena Name: '%s'", -+ (unsigned long long)uiPhysSize, -+ 1ULL << uiLog2ChunkSize, -+ sRAStats.ui64FreeArenaSize, -+ pszArenaName)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc); -+ } -+ -+ if (bPhysContig) -+ { -+ BIT_SET(psPageArrayData->ui32Flags, FLAG_PHYS_CONTIG); -+ } -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+{ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "(GPU Virtualization Validation): First RealBase: %"IMG_UINT64_FMTSPECX, -+ psPageArrayData->aBaseArray[0])); -+} -+#endif -+ -+ if (BIT_ISSET(ui32Flags, FLAG_POISON_ON_ALLOC)) -+ { -+ eError = _PhysPgMemSet(psPageArrayData, -+ psPageArrayData->aBaseArray, -+ uiPhysSize, -+ PVRSRV_POISON_ON_ALLOC_VALUE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnPoison); -+ } -+ -+ if (BIT_ISSET(ui32Flags, FLAG_ZERO)) -+ { -+ eError = _PhysPgMemSet(psPageArrayData, -+ psPageArrayData->aBaseArray, -+ uiPhysSize, -+ ZERO_PAGE_VALUE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnZero); -+ } -+ -+ psPageArrayData->iNumChunksAllocated += psPageArrayData->uiChunksToAlloc; -+ -+ /* We have alloc'd the previous request, set 0 for book keeping */ -+ psPageArrayData->uiChunksToAlloc = 0; -+ -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiPhysSize, psPageArrayData->uiPid); -+#else -+ if (bPhysContig) -+ { -+ IMG_CPU_PHYADDR sLocalCpuPAddr; -+ sLocalCpuPAddr.uiAddr = (IMG_UINT64) psPageArrayData->aBaseArray[0]; -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, -+ NULL, -+ sLocalCpuPAddr, -+ psPageArrayData->uiTotalNumChunks << uiLog2ChunkSize, -+ psPageArrayData->uiPid -+ DEBUG_MEMSTATS_VALUES); -+ } -+ else -+ { -+ IMG_UINT32 i, j; -+ IMG_CPU_PHYADDR sLocalCpuPAddr; -+ -+ for (i = 0; i < psPageArrayData->uiTotalNumChunks;) -+ { -+ IMG_UINT32 ui32AllocSizeInChunks = 1; -+ -+ for (j = i; -+ j + 1 != psPageArrayData->uiTotalNumChunks && -+ RA_BASE_IS_GHOST(psPageArrayData->aBaseArray[j + 1]); -+ j++) -+ { -+ ui32AllocSizeInChunks++; -+ } -+ -+ sLocalCpuPAddr.uiAddr = (IMG_UINT64) psPageArrayData->aBaseArray[i]; -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, -+ NULL, -+ sLocalCpuPAddr, -+ ui32AllocSizeInChunks << uiLog2ChunkSize, -+ psPageArrayData->uiPid -+ DEBUG_MEMSTATS_VALUES); -+ -+ i += ui32AllocSizeInChunks; -+ } -+ } -+#endif -+#endif -+ -+ return PVRSRV_OK; -+ -+ /* -+ error exit paths follow: -+ */ -+errorOnZero: -+errorOnPoison: -+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; -+ -+ RA_FreeMulti(psPageArrayData->psArena, -+ psPageArrayData->aBaseArray, -+ psPageArrayData->uiTotalNumChunks); -+ -+errorOnRAAlloc: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+/* -+ * Fully allocated variant of sparse allocation does not take in as argument an -+ * array of indices. It is used in cases where the amount of chunks to allocate is -+ * the same as the total the PMR can represent. I.E when we want to fully populate -+ * a sparse PMR. -+ */ -+static PVRSRV_ERROR -+_AllocLMPagesSparseFull(PMR_LMALLOCARRAY_DATA *psPageArrayData) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; -+ IMG_UINT64 uiPhysSize = (IMG_UINT64) psPageArrayData->uiChunksToAlloc << uiLog2ChunkSize; -+ IMG_UINT32 ui32Flags = psPageArrayData->ui32Flags; -+ -+ -+ eError = RA_AllocMultiSparse(psPageArrayData->psArena, -+ uiLog2ChunkSize, -+ RA_NO_IMPORT_MULTIPLIER, -+ 0, /* No flags */ -+ "LMA_Page_Alloc", -+ psPageArrayData->aBaseArray, -+ psPageArrayData->uiTotalNumChunks, -+ NULL, /* No indices given meaning allocate full base array using chunk count below */ -+ psPageArrayData->uiChunksToAlloc); -+ if (PVRSRV_OK != eError) -+ { -+ RA_USAGE_STATS sRAStats; -+ IMG_CHAR *pszArenaName; -+ RA_Get_Usage_Stats(psPageArrayData->psArena, &sRAStats); -+ pszArenaName = RA_GetArenaName(psPageArrayData->psArena); -+ -+ PVR_DPF((PHYSHEAP_DPF_LVL, -+ "SparseFull: Failed to Allocate size = 0x%llx, align = 0x%llx" -+ " Arena Free Space 0x%"IMG_UINT64_FMTSPECX"" -+ " Arena Name: '%s'", -+ (unsigned long long)uiPhysSize, -+ 1ULL << uiLog2ChunkSize, -+ sRAStats.ui64FreeArenaSize, -+ pszArenaName)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc); -+ } -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+{ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "(GPU Virtualization Validation): First RealBase: %"IMG_UINT64_FMTSPECX, -+ psPageArrayData->aBaseArray[0])); -+} -+#endif -+ -+ if (BIT_ISSET(ui32Flags, FLAG_POISON_ON_ALLOC)) -+ { -+ eError = _PhysPgMemSet(psPageArrayData, -+ psPageArrayData->aBaseArray, -+ uiPhysSize, -+ PVRSRV_POISON_ON_ALLOC_VALUE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnPoison); -+ } -+ -+ if (BIT_ISSET(ui32Flags, FLAG_ZERO)) -+ { -+ eError = _PhysPgMemSet(psPageArrayData, -+ psPageArrayData->aBaseArray, -+ uiPhysSize, -+ ZERO_PAGE_VALUE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnZero); -+ } -+ -+ psPageArrayData->iNumChunksAllocated += psPageArrayData->uiChunksToAlloc; -+ -+ /* We have alloc'd the previous request, set 0 for book keeping */ -+ psPageArrayData->uiChunksToAlloc = 0; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiPhysSize, psPageArrayData->uiPid); -+#else -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psPageArrayData->uiTotalNumChunks; i++) -+ { -+ IMG_CPU_PHYADDR sLocalCpuPAddr; -+ sLocalCpuPAddr.uiAddr = -+ (IMG_UINT64) RA_BASE_STRIP_GHOST_BIT(psPageArrayData->aBaseArray[i]); -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, -+ NULL, -+ sLocalCpuPAddr, -+ 1 << uiLog2ChunkSize, -+ psPageArrayData->uiPid -+ DEBUG_MEMSTATS_VALUES); -+ } -+ } -+#endif -+#endif -+ -+ return PVRSRV_OK; -+ -+ /* -+ error exit paths follow: -+ */ -+errorOnZero: -+errorOnPoison: -+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; -+ -+ RA_FreeMulti(psPageArrayData->psArena, -+ psPageArrayData->aBaseArray, -+ psPageArrayData->uiTotalNumChunks); -+ -+errorOnRAAlloc: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+static PVRSRV_ERROR -+_AllocLMPagesSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; -+ IMG_UINT32 uiChunkSize = 1ULL << uiLog2ChunkSize; -+ IMG_UINT32 uiChunksToAlloc = psPageArrayData->uiChunksToAlloc; -+ IMG_UINT32 ui32Flags = psPageArrayData->ui32Flags; -+ -+ if (!pui32MapTable) -+ { -+ PVR_LOG_GOTO_WITH_ERROR("pui32MapTable", eError, PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY, errorOnRAAlloc); -+ } -+ -+#if defined(DEBUG) -+ /* -+ * This block performs validation of the mapping table input in the following ways: -+ * Check that each index in the mapping table does not exceed the number of the chunks -+ * the whole PMR supports. -+ * Check that each index given by the mapping table is not already allocated. -+ * Check that there are no duplicated indices given in the mapping table. -+ */ -+ { -+ IMG_UINT32 i; -+ IMG_BOOL bIssueDetected = IMG_FALSE; -+ PVRSRV_ERROR eMapCheckError; -+ -+ for (i = 0; i < uiChunksToAlloc; i++) -+ { -+ if (pui32MapTable[i] >= psPageArrayData->uiTotalNumChunks) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Page alloc request Index out of bounds for PMR @0x%p", -+ __func__, -+ psPageArrayData)); -+ eMapCheckError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; -+ bIssueDetected = IMG_TRUE; -+ break; -+ } -+ -+ if (!RA_BASE_IS_INVALID(psPageArrayData->aBaseArray[pui32MapTable[i]])) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Mapping already exists Index %u Mapping index %u", -+ __func__, -+ i, -+ pui32MapTable[i])); -+ eMapCheckError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS; -+ bIssueDetected = IMG_TRUE; -+ break; -+ } -+ -+ if (RA_BASE_IS_SPARSE_PREP(psPageArrayData->aBaseArray[pui32MapTable[i]])) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Mapping already exists in mapping table given Index %u Mapping index %u", -+ __func__, -+ i, -+ pui32MapTable[i])); -+ eMapCheckError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS; -+ bIssueDetected = IMG_TRUE; -+ break; -+ } -+ else -+ { -+ /* Set the To Prep value so we can detect duplicated map indices */ -+ psPageArrayData->aBaseArray[pui32MapTable[i]] = RA_BASE_SPARSE_PREP_ALLOC_ADDR; -+ } -+ } -+ /* Unwind the Alloc Prep Values */ -+ if (bIssueDetected) -+ { -+ /* We don't want to affect the index of the issue seen -+ * as it could be a valid mapping. If it is a duplicated -+ * mapping in the given table then we will clean-up the -+ * previous instance anyway. -+ */ -+ IMG_UINT32 uiUnwind = i; -+ -+ for (i = 0; i < uiUnwind; i++) -+ { -+ psPageArrayData->aBaseArray[pui32MapTable[i]] = INVALID_BASE_ADDR; -+ } -+ -+ PVR_GOTO_WITH_ERROR(eError, eMapCheckError, errorOnRAAlloc); -+ } -+ } -+#endif -+ -+ eError = RA_AllocMultiSparse(psPageArrayData->psArena, -+ psPageArrayData->uiLog2ChunkSize, -+ RA_NO_IMPORT_MULTIPLIER, -+ 0, -+ "LMA_Page_Alloc", -+ psPageArrayData->aBaseArray, -+ psPageArrayData->uiTotalNumChunks, -+ pui32MapTable, -+ uiChunksToAlloc); -+ if (PVRSRV_OK != eError) -+ { -+ RA_USAGE_STATS sRAStats; -+ IMG_CHAR *pszArenaName; -+ RA_Get_Usage_Stats(psPageArrayData->psArena, &sRAStats); -+ pszArenaName = RA_GetArenaName(psPageArrayData->psArena); -+ -+ PVR_DPF((PHYSHEAP_DPF_LVL, -+ "Sparse: Failed to Allocate size = 0x%llx, align = 0x%llx" -+ " Arena Free Space 0x%"IMG_UINT64_FMTSPECX"" -+ " Arena Name: '%s'", -+ (unsigned long long) uiChunksToAlloc << uiLog2ChunkSize, -+ 1ULL << uiLog2ChunkSize, -+ sRAStats.ui64FreeArenaSize, -+ pszArenaName)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc); -+ } -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+{ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "(GPU Virtualization Validation): First RealBase: %"IMG_UINT64_FMTSPECX, -+ psPageArrayData->aBaseArray[pui32MapTable[0]])); -+} -+#endif -+ -+ if (BIT_ISSET(ui32Flags, FLAG_POISON_ON_ALLOC) || BIT_ISSET(ui32Flags, FLAG_ZERO)) -+ { -+ IMG_UINT32 i, ui32Index = 0; -+ for (i = 0; i < uiChunksToAlloc; i++) -+ { -+ ui32Index = pui32MapTable[i]; -+ -+ eError = _PhysPgMemSet(psPageArrayData, -+ &psPageArrayData->aBaseArray[ui32Index], -+ uiChunkSize, -+ BIT_ISSET(ui32Flags, FLAG_POISON_ON_ALLOC) ? PVRSRV_POISON_ON_ALLOC_VALUE : -+ ZERO_PAGE_VALUE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnPoisonZero); -+ } -+ } -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, -+ uiChunksToAlloc << uiLog2ChunkSize, -+ psPageArrayData->uiPid); -+#else -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psPageArrayData->uiChunksToAlloc; i++) -+ { -+ IMG_UINT32 ui32Index = pui32MapTable[i]; -+ IMG_CPU_PHYADDR sLocalCpuPAddr; -+ sLocalCpuPAddr.uiAddr = -+ (IMG_UINT64) RA_BASE_STRIP_GHOST_BIT(psPageArrayData->aBaseArray[ui32Index]); -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, -+ NULL, -+ sLocalCpuPAddr, -+ uiChunkSize, -+ psPageArrayData->uiPid -+ DEBUG_MEMSTATS_VALUES); -+ } -+ } -+#endif -+#endif -+ -+ psPageArrayData->iNumChunksAllocated += uiChunksToAlloc; -+ -+ /* We have alloc'd the previous request, set 0 for book keeping */ -+ psPageArrayData->uiChunksToAlloc = 0; -+ -+ return PVRSRV_OK; -+ -+ /* -+ error exit paths follow: -+ */ -+errorOnPoisonZero: -+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; -+ -+ RA_FreeMultiSparse(psPageArrayData->psArena, -+ psPageArrayData->aBaseArray, -+ psPageArrayData->uiTotalNumChunks, -+ psPageArrayData->uiLog2ChunkSize, -+ pui32MapTable, -+ &uiChunksToAlloc); -+ -+errorOnRAAlloc: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+ -+} -+ -+static PVRSRV_ERROR -+_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable) -+{ -+ PVRSRV_ERROR eError; -+ RA_ARENA *pArena; -+ -+ PVR_ASSERT(NULL != psPageArrayData); -+ PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated); -+ -+ if (psPageArrayData->uiTotalNumChunks < -+ (psPageArrayData->iNumChunksAllocated + psPageArrayData->uiChunksToAlloc)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Pages requested to allocate don't fit PMR alloc Size. " -+ "Allocated: %u + Requested: %u > Total Allowed: %u", -+ psPageArrayData->iNumChunksAllocated, -+ psPageArrayData->uiChunksToAlloc, -+ psPageArrayData->uiTotalNumChunks)); -+ return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; -+ } -+ -+ /* If we have a non-backed sparse PMR then we can just return */ -+ if (psPageArrayData->uiChunksToAlloc == 0) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Non-Backed Sparse PMR Created: %p.", -+ __func__, -+ psPageArrayData)); -+ return PVRSRV_OK; -+ } -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ { -+ IMG_UINT32 ui32OSid=0; -+ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPageArrayData->psPhysHeap); -+ -+ /* Obtain the OSid specific data from our connection handle */ -+ if (psPageArrayData->psConnection != NULL) -+ { -+ ui32OSid = psPageArrayData->psConnection->ui32OSid; -+ } -+ -+ if (PVRSRV_CHECK_SHARED_BUFFER(psPageArrayData->uiAllocFlags)) -+ { -+ pArena=psDevNode->psOSSharedArena; -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "(GPU Virtualization Validation): Giving from shared mem")); -+ } -+ else -+ { -+ pArena=psDevNode->psOSidSubArena[ui32OSid]; -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "(GPU Virtualization Validation): Giving from OS slot %d", -+ ui32OSid)); -+ } -+ } -+#else -+ /* Get suitable local memory region for this GPU physheap allocation */ -+ eError = PhysmemGetArenaLMA(psPageArrayData->psPhysHeap, &pArena); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLMA"); -+#endif -+ -+ psPageArrayData->psArena = pArena; -+ -+ /* -+ * 3 cases: -+ * Sparse allocation populating the whole PMR. -+ * [**********] -+ * Sparse allocation partially populating the PMR at given indices. -+ * [*** *** **] -+ * Contiguous allocation. -+ * [**********] -+ * -+ * Note: Separate cases are required for 1 and 3 due to memstats tracking. -+ * In Contiguous case we can track the block as a single memstat record as we know -+ * we will also free in that size record. -+ * Sparse allocations require a memstat record per chunk as they can be arbitrarily -+ * free'd. -+ */ -+ if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_SPARSE)) -+ { -+ if (psPageArrayData->uiTotalNumChunks == psPageArrayData->uiChunksToAlloc && -+ !pui32MapTable) -+ { -+ eError = _AllocLMPagesSparseFull(psPageArrayData); -+ } -+ else -+ { -+ eError = _AllocLMPagesSparse(psPageArrayData, pui32MapTable); -+ } -+ } -+ else -+ { -+ eError = _AllocLMPagesContig(psPageArrayData); -+ } -+ -+ return eError; -+} -+ -+static void -+_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData) -+{ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "physmem_lma.c: freed local memory array structure for PMR @0x%p", -+ psPageArrayData)); -+ -+ OSFreeMem(psPageArrayData); -+} -+ -+static PVRSRV_ERROR -+_FreeLMPagesContig(PMR_LMALLOCARRAY_DATA *psPageArrayData) -+{ -+ RA_ARENA *pArena = psPageArrayData->psArena; -+ IMG_UINT64 uiPhysSize = -+ (IMG_UINT64) psPageArrayData->uiTotalNumChunks << psPageArrayData->uiLog2ChunkSize; -+ PVRSRV_ERROR eError; -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ IMG_UINT32 uiStat = PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES; -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_ZOMBIE)) -+ { -+ uiStat = PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES; -+ } -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ -+ -+ PVR_ASSERT(psPageArrayData->iNumChunksAllocated != 0); -+ PVR_ASSERT(psPageArrayData->iNumChunksAllocated == -+ psPageArrayData->uiTotalNumChunks); -+ -+ if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_POISON_ON_FREE)) -+ { -+ eError = _PhysPgMemSet(psPageArrayData, -+ psPageArrayData->aBaseArray, -+ uiPhysSize, -+ PVRSRV_POISON_ON_FREE_VALUE); -+ PVR_LOG_IF_ERROR(eError, "_PhysPgMemSet"); -+ } -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsDecrMemAllocStat(uiStat, -+ uiPhysSize, -+ psPageArrayData->uiPid); -+#else -+ if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_PHYS_CONTIG)) -+ { -+ PVRSRVStatsRemoveMemAllocRecord(uiStat, -+ (IMG_UINT64) psPageArrayData->aBaseArray[0], -+ psPageArrayData->uiPid); -+ } -+ else -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psPageArrayData->uiTotalNumChunks; i++) -+ { -+ if (RA_BASE_IS_REAL(psPageArrayData->aBaseArray[i])) -+ { -+ PVRSRVStatsRemoveMemAllocRecord(uiStat, -+ (IMG_UINT64) psPageArrayData->aBaseArray[i], -+ psPageArrayData->uiPid); -+ } -+ } -+ } -+#endif -+#endif -+ -+ if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_PHYS_CONTIG)) -+ { -+ eError = RA_FreeMulti(pArena, -+ psPageArrayData->aBaseArray, -+ 1); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMulti"); -+ } -+ else -+ { -+ eError = RA_FreeMulti(pArena, -+ psPageArrayData->aBaseArray, -+ psPageArrayData->iNumChunksAllocated); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMulti"); -+ } -+ -+ psPageArrayData->iNumChunksAllocated = 0; -+ -+ PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: freed %"IMG_UINT64_FMTSPEC" local memory for PMR @0x%p", -+ __func__, -+ uiPhysSize, -+ psPageArrayData)); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR -+_FreeLMPagesRemainingSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData) -+{ -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiChunkSize = 1ULL << psPageArrayData->uiLog2ChunkSize; -+ IMG_UINT32 ui32Flags = psPageArrayData->ui32Flags; -+ IMG_BOOL bPoisonOnFree = (BIT_ISSET(ui32Flags, FLAG_POISON_ON_FREE)); -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ IMG_UINT32 uiStat = PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES; -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_ZOMBIE)) -+ { -+ uiStat = PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES; -+ } -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsDecrMemAllocStat(uiStat, -+ psPageArrayData->iNumChunksAllocated << psPageArrayData->uiLog2ChunkSize, -+ psPageArrayData->uiPid); -+#endif -+ -+ for (i = 0; i < psPageArrayData->uiTotalNumChunks;) -+ { -+ if (RA_BASE_IS_REAL(psPageArrayData->aBaseArray[i])) -+ { -+ IMG_UINT32 j; -+ IMG_UINT32 ui32AccumulatedChunks = 1; -+ -+ for (j = i; -+ j + 1 != psPageArrayData->uiTotalNumChunks && -+ RA_BASE_IS_GHOST(psPageArrayData->aBaseArray[j + 1]); -+ j++) -+ { -+ ui32AccumulatedChunks++; -+ } -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) -+ for (j = i; j < (i + ui32AccumulatedChunks); j++) -+ { -+ PVRSRVStatsRemoveMemAllocRecord(uiStat, -+ RA_BASE_STRIP_GHOST_BIT(psPageArrayData->aBaseArray[j]), -+ psPageArrayData->uiPid); -+ if (bPoisonOnFree) -+#else -+ for (j = i; j < (i + ui32AccumulatedChunks) && bPoisonOnFree; j++) -+ { -+#endif -+ { -+ eError = _PhysPgMemSet(psPageArrayData, -+ &psPageArrayData->aBaseArray[j], -+ uiChunkSize, -+ PVRSRV_POISON_ON_FREE_VALUE); -+ PVR_LOG_IF_ERROR(eError, "_PhysPgMemSet"); -+ } -+ } -+ -+ eError = RA_FreeMulti(psPageArrayData->psArena, -+ &psPageArrayData->aBaseArray[i], -+ ui32AccumulatedChunks); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMulti"); -+ -+ psPageArrayData->iNumChunksAllocated -= ui32AccumulatedChunks; -+ i += ui32AccumulatedChunks; -+ } -+ else if (RA_BASE_IS_INVALID(psPageArrayData->aBaseArray[i])) -+ { -+ i++; -+ } -+ } -+ -+ /* We have freed all allocations in the previous loop */ -+ PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+_FreeLMPagesSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData, -+ IMG_UINT32 *pui32FreeIndices, -+ IMG_UINT32 ui32FreeChunkCount) -+{ -+ RA_ARENA *pArena = psPageArrayData->psArena; -+ IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; -+ IMG_UINT32 uiChunkSize = 1ULL << uiLog2ChunkSize; -+ IMG_UINT32 ui32Flags = psPageArrayData->ui32Flags; -+ IMG_UINT32 uiActualFreeCount = ui32FreeChunkCount; -+ PVRSRV_ERROR eError; -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ IMG_UINT32 uiStat = PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES; -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_ZOMBIE)) -+ { -+ uiStat = PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES; -+ } -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ -+ -+ PVR_ASSERT(psPageArrayData->iNumChunksAllocated != 0); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < ui32FreeChunkCount; i++) -+ { -+ IMG_UINT32 ui32Index = pui32FreeIndices[i]; -+ -+ PVRSRVStatsRemoveMemAllocRecord(uiStat, -+ (IMG_UINT64) RA_BASE_STRIP_GHOST_BIT( -+ psPageArrayData->aBaseArray[ui32Index]), -+ psPageArrayData->uiPid); -+ } -+ } -+#endif -+ -+ if (BIT_ISSET(ui32Flags, FLAG_POISON_ON_FREE)) -+ { -+ IMG_UINT32 i, ui32Index = 0; -+ for (i = 0; i < ui32FreeChunkCount; i++) -+ { -+ ui32Index = pui32FreeIndices[i]; -+ -+ eError = _PhysPgMemSet(psPageArrayData, -+ &psPageArrayData->aBaseArray[ui32Index], -+ uiChunkSize, -+ PVRSRV_POISON_ON_FREE_VALUE); -+ PVR_LOG_IF_ERROR(eError, "_PhysPgMemSet"); -+ } -+ } -+ -+ eError = RA_FreeMultiSparse(pArena, -+ psPageArrayData->aBaseArray, -+ psPageArrayData->uiTotalNumChunks, -+ uiLog2ChunkSize, -+ pui32FreeIndices, -+ &uiActualFreeCount); -+ psPageArrayData->iNumChunksAllocated -= uiActualFreeCount; -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ PVRSRVStatsDecrMemAllocStat(uiStat, -+ uiActualFreeCount << psPageArrayData->uiLog2ChunkSize, -+ psPageArrayData->uiPid); -+#endif -+ if (eError == PVRSRV_ERROR_RA_FREE_INVALID_CHUNK) -+ { -+ /* Log the RA error but convert it to PMR level to match the interface, -+ * this is important because other PMR factories may not use the RA but -+ * still return error, returning a PMR based error -+ * keeps the interface agnostic to implementation behaviour. -+ */ -+ PVR_LOG_IF_ERROR(eError, "RA_FreeMultiSparse"); -+ return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK; -+ } -+ PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMultiSparse"); -+ -+ PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated); -+ -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: freed %d local memory for PMR @0x%p", -+ __func__, -+ (uiActualFreeCount * uiChunkSize), -+ psPageArrayData)); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, -+ IMG_UINT32 *pui32FreeIndices, -+ IMG_UINT32 ui32FreeChunkCount) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_SPARSE)) -+ { -+ if (!pui32FreeIndices) -+ { -+ eError = _FreeLMPagesRemainingSparse(psPageArrayData); -+ } -+ else -+ { -+ eError = _FreeLMPagesSparse(psPageArrayData, pui32FreeIndices, ui32FreeChunkCount); -+ } -+ } -+ else -+ { -+ eError = _FreeLMPagesContig(psPageArrayData); -+ } -+ -+ return eError; -+} -+ -+/* -+ * -+ * Implementation of callback functions -+ * -+ */ -+ -+/* destructor func is called after last reference disappears, but -+ before PMR itself is freed. */ -+static void -+PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv) -+{ -+ PVRSRV_ERROR eError; -+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; -+ -+ /* We can't free pages until now. */ -+ if (psLMAllocArrayData->iNumChunksAllocated != 0) -+ { -+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ IMG_UINT32 ui32LMALeakMax = psPVRSRVData->sMemLeakIntervals.ui32GPU; -+ -+ mutex_lock(&g_sLMALeakMutex); -+ -+ g_ui32LMALeakCounter++; -+ if (ui32LMALeakMax && g_ui32LMALeakCounter >= ui32LMALeakMax) -+ { -+ g_ui32LMALeakCounter = 0; -+ mutex_unlock(&g_sLMALeakMutex); -+ -+ PVR_DPF((PVR_DBG_WARNING, "%s: Skipped freeing of PMR 0x%p to trigger memory leak.", __func__, pvPriv)); -+ return; -+ } -+ -+ mutex_unlock(&g_sLMALeakMutex); -+#endif -+ eError = _FreeLMPages(psLMAllocArrayData, NULL, 0); -+ PVR_LOG_IF_ERROR(eError, "_FreeLMPages"); -+ PVR_ASSERT (eError == PVRSRV_OK); -+ } -+ -+ _FreeLMPageArray(psLMAllocArrayData); -+} -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+static PVRSRV_ERROR PMRZombifyLocalMem(PMR_IMPL_PRIVDATA pvPriv, PMR *psPMR) -+{ -+ PMR_LMALLOCARRAY_DATA *psPageArrayData = pvPriv; -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize; -+ IMG_PID uiPid = psPageArrayData->uiPid; -+#endif -+ -+ BIT_SET(psPageArrayData->ui32Flags, FLAG_ZOMBIE); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ { -+ IMG_UINT64 uiSize = BIT_ISSET(psPageArrayData->ui32Flags, FLAG_SPARSE) ? -+ (IMG_UINT64) psPageArrayData->iNumChunksAllocated << uiLog2ChunkSize : -+ (IMG_UINT64) psPageArrayData->uiTotalNumChunks << uiLog2ChunkSize; -+ -+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiSize, uiPid); -+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES, uiSize, uiPid); -+ } -+#else /* !defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+ if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_SPARSE)) -+ { -+ /* _FreeLMPagesRemainingSparse path */ -+ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psPageArrayData->uiTotalNumChunks; i++) -+ { -+ if (RA_BASE_IS_REAL(psPageArrayData->aBaseArray[i])) -+ { -+ IMG_UINT32 j; -+ IMG_UINT32 ui32AccumulatedChunks = 1; -+ -+ for (j = i; -+ j + 1 != psPageArrayData->uiTotalNumChunks && -+ RA_BASE_IS_GHOST(psPageArrayData->aBaseArray[j + 1]); -+ j++) -+ { -+ ui32AccumulatedChunks++; -+ } -+ -+ for (j = i; j < (i + ui32AccumulatedChunks); j++) -+ { -+ IMG_CPU_PHYADDR sCpuPAddr = { -+ .uiAddr = RA_BASE_STRIP_GHOST_BIT(psPageArrayData->aBaseArray[j]), -+ }; -+ -+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, -+ sCpuPAddr.uiAddr, -+ uiPid); -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES, -+ NULL, -+ sCpuPAddr, -+ 1ULL << uiLog2ChunkSize, -+ uiPid -+ DEBUG_MEMSTATS_VALUES); -+ } -+ } -+ } -+ } -+ else -+ { -+ /* _FreeLMPagesContig path */ -+ -+ if (BIT_ISSET(psPageArrayData->ui32Flags, FLAG_PHYS_CONTIG)) -+ { -+ IMG_CPU_PHYADDR sCpuPAddr = { -+ .uiAddr = psPageArrayData->aBaseArray[0] -+ }; -+ -+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, -+ psPageArrayData->aBaseArray[0], -+ uiPid); -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES, -+ NULL, -+ sCpuPAddr, -+ psPageArrayData->uiTotalNumChunks << uiLog2ChunkSize, -+ uiPid -+ DEBUG_MEMSTATS_VALUES); -+ } -+ else -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psPageArrayData->uiTotalNumChunks; i++) -+ { -+ if (RA_BASE_IS_REAL(psPageArrayData->aBaseArray[i])) -+ { -+ IMG_CPU_PHYADDR sCpuPAddr = { -+ .uiAddr = RA_BASE_STRIP_GHOST_BIT(psPageArrayData->aBaseArray[i]), -+ }; -+ -+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, -+ psPageArrayData->aBaseArray[i], -+ uiPid); -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES, -+ NULL, -+ sCpuPAddr, -+ 1ULL << psPageArrayData->uiLog2ChunkSize, -+ uiPid -+ DEBUG_MEMSTATS_VALUES); -+ } -+ } -+ -+ } -+ } -+#endif /* !defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ -+ -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ -+ return PVRSRV_OK; -+} -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+/* callback function for locking the system physical page addresses. -+ As we are LMA there is nothing to do as we control physical memory. */ -+static PVRSRV_ERROR -+PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv) -+{ -+ -+ PVRSRV_ERROR eError; -+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData; -+ -+ psLMAllocArrayData = pvPriv; -+ -+ if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_ONDEMAND)) -+ { -+ /* Allocate Memory for deferred allocation */ -+ eError = _AllocLMPages(psLMAllocArrayData, NULL); -+ PVR_RETURN_IF_ERROR(eError); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData; -+ -+ psLMAllocArrayData = pvPriv; -+ -+ if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_ONDEMAND)) -+ { -+ /* Free Memory for deferred allocation */ -+ eError = _FreeLMPages(psLMAllocArrayData, NULL, 0); -+ PVR_RETURN_IF_ERROR(eError); -+ } -+ -+ PVR_ASSERT(eError == PVRSRV_OK); -+ return eError; -+} -+ -+/* N.B. It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */ -+static PVRSRV_ERROR -+PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_DEVMEM_OFFSET_T *puiOffset, -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ IMG_UINT64 ui64IPAPolicyValue, -+ IMG_UINT64 ui64IPAClearMask, -+#endif -+ IMG_BOOL *pbValid, -+ IMG_DEV_PHYADDR *psDevPAddr) -+{ -+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; -+ IMG_UINT32 idx; -+ IMG_UINT32 uiLog2AllocSize; -+ IMG_UINT64 uiAllocIndex; -+ IMG_DEVMEM_OFFSET_T uiInAllocOffset; -+ IMG_UINT32 uiNumAllocs = psLMAllocArrayData->uiTotalNumChunks; -+ -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ PVR_UNREFERENCED_PARAMETER(ui64IPAPolicyValue); -+ PVR_UNREFERENCED_PARAMETER(ui64IPAClearMask); -+#endif -+ -+ if (psLMAllocArrayData->uiLog2ChunkSize < ui32Log2PageSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Requested physical addresses from PMR " -+ "for incompatible contiguity %u!", -+ __func__, -+ ui32Log2PageSize)); -+ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; -+ } -+ -+ PVR_ASSERT(psLMAllocArrayData->uiLog2ChunkSize != 0); -+ PVR_ASSERT(ui32Log2PageSize >= RA_BASE_FLAGS_LOG2); -+ -+ if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_PHYS_CONTIG)) -+ { -+ for (idx=0; idx < ui32NumOfPages; idx++) -+ { -+ if (pbValid[idx]) -+ { -+ psDevPAddr[idx].uiAddr = psLMAllocArrayData->aBaseArray[0] + puiOffset[idx]; -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ /* Modify the physical address with the associated IPA values */ -+ psDevPAddr[idx].uiAddr &= ~ui64IPAClearMask; -+ psDevPAddr[idx].uiAddr |= ui64IPAPolicyValue; -+#endif -+ } -+ } -+ } -+ else -+ { -+ uiLog2AllocSize = psLMAllocArrayData->uiLog2ChunkSize; -+ -+ for (idx=0; idx < ui32NumOfPages; idx++) -+ { -+ if (pbValid[idx]) -+ { -+ uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize; -+ uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize); -+ -+ PVR_LOG_RETURN_IF_FALSE(uiAllocIndex < uiNumAllocs, -+ "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE); -+ -+ PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize)); -+ -+ /* The base may or may not be a ghost base, but we don't care, -+ * we just need the real representation of the base. -+ */ -+ psDevPAddr[idx].uiAddr = RA_BASE_STRIP_GHOST_BIT( -+ psLMAllocArrayData->aBaseArray[uiAllocIndex]) + uiInAllocOffset; -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ /* Modify the physical address with the associated IPA values */ -+ psDevPAddr[idx].uiAddr &= ~ui64IPAClearMask; -+ psDevPAddr[idx].uiAddr |= ui64IPAPolicyValue; -+#endif -+ } -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv, -+ size_t uiOffset, -+ size_t uiSize, -+ void **ppvKernelAddressOut, -+ IMG_HANDLE *phHandleOut, -+ PMR_FLAGS_T ulFlags) -+{ -+ PVRSRV_ERROR eError; -+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; -+ PMR_KERNEL_MAPPING *psKernelMapping; -+ RA_BASE_T *paBaseArray; -+ IMG_UINT32 ui32ChunkIndex = 0; -+ size_t uiOffsetMask = uiOffset; -+ -+ IMG_UINT32 uiLog2ChunkSize = psLMAllocArrayData->uiLog2ChunkSize; -+ IMG_UINT64 uiChunkSize = 1ULL << uiLog2ChunkSize; -+ IMG_UINT64 uiPhysSize; -+ -+ PVR_ASSERT(psLMAllocArrayData); -+ PVR_ASSERT(ppvKernelAddressOut); -+ PVR_ASSERT(phHandleOut); -+ -+ if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_SPARSE)) -+ { -+ IMG_UINT32 i; -+ /* Locate the desired physical chunk to map in */ -+ ui32ChunkIndex = uiOffset >> psLMAllocArrayData->uiLog2ChunkSize; -+ -+ if (OSIsMapPhysNonContigSupported()) -+ { -+ /* If a size hasn't been supplied assume we are mapping a single page */ -+ IMG_UINT32 uiNumChunksToMap; -+ -+ /* This is to support OSMapPMR originated parameters */ -+ if (uiOffset == 0 && uiSize == 0) -+ { -+ uiNumChunksToMap = psLMAllocArrayData->iNumChunksAllocated; -+ } -+ else -+ { -+ uiNumChunksToMap = uiSize >> psLMAllocArrayData->uiLog2ChunkSize; -+ } -+ -+ /* Check we are attempting to map at least a chunk in size */ -+ if (uiNumChunksToMap < 1) -+ { -+ PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_INVALID_PARAMS, "uiNumChunksToMap < 1"); -+ } -+ -+ /* Check contiguous region doesn't exceed size of PMR */ -+ if (ui32ChunkIndex + (uiNumChunksToMap - 1) > psLMAllocArrayData->uiTotalNumChunks) -+ { -+ PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_INVALID_PARAMS, -+ "Mapping range exceeds total num chunks in PMR"); -+ } -+ -+ /* Check the virtually contiguous region given is physically backed */ -+ for (i = ui32ChunkIndex; i < ui32ChunkIndex + uiNumChunksToMap; i++) -+ { -+ if (RA_BASE_IS_INVALID(psLMAllocArrayData->aBaseArray[i])) -+ { -+ PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, "Sparse contiguity check"); -+ } -+ } -+ /* Size of virtually contiguous sparse alloc */ -+ uiPhysSize = (IMG_UINT64) uiNumChunksToMap << psLMAllocArrayData->uiLog2ChunkSize; -+ } -+ else -+ { -+ size_t uiStart = uiOffset; -+ size_t uiEnd = uiOffset + uiSize - 1; -+ size_t uiChunkMask = ~((1 << psLMAllocArrayData->uiLog2ChunkSize) - 1); -+ -+ /* We can still map if only one chunk is required */ -+ if ((uiStart & uiChunkMask) != (uiEnd & uiChunkMask)) -+ { -+ PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, "Sparse contiguity check"); -+ } -+ /* Map a single chunk */ -+ uiPhysSize = uiChunkSize; -+ } -+ -+ paBaseArray = &psLMAllocArrayData->aBaseArray[ui32ChunkIndex]; -+ -+ /* Offset mask to be used for address offsets within a chunk */ -+ uiOffsetMask = (1U << psLMAllocArrayData->uiLog2ChunkSize) - 1; -+ } -+ else -+ { -+ paBaseArray = psLMAllocArrayData->aBaseArray; -+ uiPhysSize = (IMG_UINT64) psLMAllocArrayData->uiTotalNumChunks << uiLog2ChunkSize; -+ } -+ -+ PVR_ASSERT(ui32ChunkIndex < psLMAllocArrayData->uiTotalNumChunks); -+ -+ psKernelMapping = OSAllocMem(sizeof(*psKernelMapping)); -+ PVR_RETURN_IF_NOMEM(psKernelMapping); -+ -+ eError = _MapPMRKernel(psLMAllocArrayData, -+ paBaseArray, -+ uiPhysSize, -+ ulFlags, -+ psKernelMapping); -+ if (eError == PVRSRV_OK) -+ { -+ /* uiOffset & uiOffsetMask is used to get the kernel addr within the page */ -+ *ppvKernelAddressOut = ((IMG_CHAR *) psKernelMapping->pvKernelAddress) + (uiOffset & uiOffsetMask); -+ *phHandleOut = psKernelMapping; -+ } -+ else -+ { -+ OSFreeMem(psKernelMapping); -+ PVR_LOG_ERROR(eError, "_MapPMRKernel"); -+ } -+ -+ return eError; -+} -+ -+static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_HANDLE hHandle) -+{ -+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv; -+ PMR_KERNEL_MAPPING *psKernelMapping = (PMR_KERNEL_MAPPING *) hHandle; -+ -+ PVR_ASSERT(psLMAllocArrayData); -+ PVR_ASSERT(psKernelMapping); -+ -+ _UnMapPMRKernel(psLMAllocArrayData, -+ psKernelMapping); -+ -+ OSFreeMem(psKernelMapping); -+} -+ -+static PVRSRV_ERROR -+CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes, -+ void (*pfnCopyBytes)(IMG_UINT8 *pcBuffer, -+ IMG_UINT8 *pcPMR, -+ size_t uiSize)) -+{ -+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; -+ size_t uiBytesCopied; -+ size_t uiBytesToCopy; -+ size_t uiBytesCopyableFromAlloc; -+ PMR_KERNEL_MAPPING sMapping; -+ IMG_UINT8 *pcKernelPointer = NULL; -+ size_t uiBufferOffset; -+ IMG_UINT64 uiAllocIndex; -+ IMG_DEVMEM_OFFSET_T uiInAllocOffset; -+ IMG_UINT32 uiLog2ChunkSize = psLMAllocArrayData->uiLog2ChunkSize; -+ IMG_UINT64 uiChunkSize = 1ULL << uiLog2ChunkSize; -+ IMG_UINT64 uiPhysSize; -+ PVRSRV_ERROR eError; -+ -+ uiBytesCopied = 0; -+ uiBytesToCopy = uiBufSz; -+ uiBufferOffset = 0; -+ -+ if (BIT_ISSET(psLMAllocArrayData->ui32Flags, FLAG_SPARSE)) -+ { -+ while (uiBytesToCopy > 0) -+ { -+ /* we have to map one alloc in at a time */ -+ PVR_ASSERT(psLMAllocArrayData->uiLog2ChunkSize != 0); -+ uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2ChunkSize; -+ uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2ChunkSize); -+ uiBytesCopyableFromAlloc = uiBytesToCopy; -+ if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2ChunkSize)) -+ { -+ uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2ChunkSize)-uiInAllocOffset); -+ } -+ /* Mapping a single chunk at a time */ -+ uiPhysSize = uiChunkSize; -+ -+ PVR_ASSERT(uiBytesCopyableFromAlloc != 0); -+ PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumChunks); -+ PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2ChunkSize)); -+ -+ eError = _MapPMRKernel(psLMAllocArrayData, -+ &psLMAllocArrayData->aBaseArray[uiAllocIndex], -+ uiPhysSize, -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, -+ &sMapping); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ pcKernelPointer = sMapping.pvKernelAddress; -+ pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc); -+ -+ _UnMapPMRKernel(psLMAllocArrayData, -+ &sMapping); -+ -+ uiBufferOffset += uiBytesCopyableFromAlloc; -+ uiBytesToCopy -= uiBytesCopyableFromAlloc; -+ uiOffset += uiBytesCopyableFromAlloc; -+ uiBytesCopied += uiBytesCopyableFromAlloc; -+ } -+ } -+ else -+ { -+ uiPhysSize = (IMG_UINT64) psLMAllocArrayData->uiTotalNumChunks << uiLog2ChunkSize; -+ PVR_ASSERT((uiOffset + uiBufSz) <= uiPhysSize); -+ PVR_ASSERT(uiChunkSize != 0); -+ eError = _MapPMRKernel(psLMAllocArrayData, -+ psLMAllocArrayData->aBaseArray, -+ uiPhysSize, -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, -+ &sMapping); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ pcKernelPointer = sMapping.pvKernelAddress; -+ pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz); -+ -+ _UnMapPMRKernel(psLMAllocArrayData, -+ &sMapping); -+ -+ uiBytesCopied = uiBufSz; -+ } -+ *puiNumBytes = uiBytesCopied; -+ return PVRSRV_OK; -+e0: -+ *puiNumBytes = uiBytesCopied; -+ return eError; -+} -+ -+static void ReadLocalMem(IMG_UINT8 *pcBuffer, -+ IMG_UINT8 *pcPMR, -+ size_t uiSize) -+{ -+ /* the memory is mapped as WC (and also aligned to page size) so we can -+ * safely call "Cached" memcpy */ -+ OSCachedMemCopy(pcBuffer, pcPMR, uiSize); -+} -+ -+static PVRSRV_ERROR -+PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes) -+{ -+ return CopyBytesLocalMem(pvPriv, -+ uiOffset, -+ pcBuffer, -+ uiBufSz, -+ puiNumBytes, -+ ReadLocalMem); -+} -+ -+static void WriteLocalMem(IMG_UINT8 *pcBuffer, -+ IMG_UINT8 *pcPMR, -+ size_t uiSize) -+{ -+ /* the memory is mapped as WC (and also aligned to page size) so we can -+ * safely call "Cached" memcpy but need to issue a write memory barrier -+ * to flush the write buffers after */ -+ OSCachedMemCopyWMB(pcPMR, pcBuffer, uiSize); -+} -+ -+static PVRSRV_ERROR -+PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes) -+{ -+ return CopyBytesLocalMem(pvPriv, -+ uiOffset, -+ pcBuffer, -+ uiBufSz, -+ puiNumBytes, -+ WriteLocalMem); -+} -+ -+/*************************************************************************/ /*! -+@Function PMRChangeSparseMemLocalMem -+@Description This function Changes the sparse mapping by allocating and -+ freeing of pages. It also changes the GPU maps accordingly. -+@Return PVRSRV_ERROR failure code -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv, -+ const PMR *psPMR, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_UINT32 uiFlags) -+{ -+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ IMG_UINT32 ui32AdtnlAllocPages = 0; -+ IMG_UINT32 ui32AdtnlFreePages = 0; -+ IMG_UINT32 ui32CommonRequstCount = 0; -+ IMG_UINT32 ui32Loop = 0; -+ IMG_UINT32 ui32Index = 0; -+ IMG_UINT32 uiAllocpgidx; -+ IMG_UINT32 uiFreepgidx; -+ -+ PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv; -+ IMG_UINT32 uiLog2ChunkSize = psPMRPageArrayData->uiLog2ChunkSize; -+ IMG_UINT32 uiChunkSize = 1ULL << uiLog2ChunkSize; -+ -+#if defined(DEBUG) -+ IMG_BOOL bPoisonFail = IMG_FALSE; -+ IMG_BOOL bZeroFail = IMG_FALSE; -+#endif -+ -+ /* Fetch the Page table array represented by the PMR */ -+ RA_BASE_T *paBaseArray = psPMRPageArrayData->aBaseArray; -+ PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappingTable(psPMR); -+ -+ /* The incoming request is classified into two operations independent of -+ * each other: alloc & free chunks. -+ * These operations can be combined with two mapping operations as well -+ * which are GPU & CPU space mappings. -+ * -+ * From the alloc and free chunk requests, the net amount of chunks to be -+ * allocated or freed is computed. Chunks that were requested to be freed -+ * will be reused to fulfil alloc requests. -+ * -+ * The order of operations is: -+ * 1. Allocate new Chunks. -+ * 2. Move the free chunks from free request to alloc positions. -+ * 3. Free the rest of the chunks not used for alloc -+ * -+ * Alloc parameters are validated at the time of allocation -+ * and any error will be handled then. */ -+ -+ if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH)) -+ { -+ ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ? -+ ui32FreePageCount : ui32AllocPageCount; -+ -+ PDUMP_PANIC(PMR_DeviceNode(psPMR), SPARSEMEM_SWAP, "Request to swap alloc & free chunks not supported"); -+ } -+ -+ if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC)) -+ { -+ ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequstCount; -+ } -+ else -+ { -+ ui32AllocPageCount = 0; -+ } -+ -+ if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE)) -+ { -+ ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequstCount; -+ } -+ else -+ { -+ ui32FreePageCount = 0; -+ } -+ -+ PVR_LOG_RETURN_IF_FALSE( -+ (ui32CommonRequstCount | ui32AdtnlAllocPages | ui32AdtnlFreePages) != 0, -+ "Invalid combination of parameters: ui32CommonRequstCount," -+ " ui32AdtnlAllocPages and ui32AdtnlFreePages.", -+ PVRSRV_ERROR_INVALID_PARAMS -+ ); -+ -+ { -+ /* Validate the free page indices */ -+ if (ui32FreePageCount) -+ { -+ if (pai32FreeIndices != NULL) -+ { -+ for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) -+ { -+ uiFreepgidx = pai32FreeIndices[ui32Loop]; -+ -+ if (uiFreepgidx >= psPMRPageArrayData->uiTotalNumChunks) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); -+ } -+ -+ if (RA_BASE_IS_INVALID(paBaseArray[uiFreepgidx])) -+ { -+ PVR_LOG_GOTO_WITH_ERROR("paBaseArray[uiFreepgidx]", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); -+ } -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Given non-zero free count but missing indices array", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+ -+ /* The following block of code verifies any issues with common alloc chunk indices */ -+ for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++) -+ { -+ uiAllocpgidx = pai32AllocIndices[ui32Loop]; -+ if (uiAllocpgidx >= psPMRPageArrayData->uiTotalNumChunks) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); -+ } -+ -+ if ((!RA_BASE_IS_INVALID(paBaseArray[uiAllocpgidx])) || -+ (psPMRMapTable->aui32Translation[uiAllocpgidx] != TRANSLATION_INVALID)) -+ { -+ PVR_LOG_GOTO_WITH_ERROR("Trying to allocate already allocated page again", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); -+ } -+ } -+ -+ ui32Loop = 0; -+ -+ /* Allocate new chunks */ -+ if (0 != ui32AdtnlAllocPages) -+ { -+ /* Say how many chunks to allocate */ -+ psPMRPageArrayData->uiChunksToAlloc = ui32AdtnlAllocPages; -+ -+ eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_AllocLMPages", e0); -+ -+ /* Mark the corresponding chunks of translation table as valid */ -+ for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++) -+ { -+ psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop]; -+ } -+ -+ psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages; -+ } -+ -+ ui32Index = ui32Loop; -+ ui32Loop = 0; -+ -+ /* Move the corresponding free chunks to alloc request */ -+ eError = RA_SwapSparseMem(psPMRPageArrayData->psArena, -+ paBaseArray, -+ psPMRPageArrayData->uiTotalNumChunks, -+ psPMRPageArrayData->uiLog2ChunkSize, -+ &pai32AllocIndices[ui32Index], -+ &pai32FreeIndices[ui32Loop], -+ ui32CommonRequstCount); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RA_SwapSparseMem", unwind_alloc); -+ -+ for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++) -+ { -+ uiAllocpgidx = pai32AllocIndices[ui32Index]; -+ uiFreepgidx = pai32FreeIndices[ui32Loop]; -+ -+ psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID; -+ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; -+ -+ /* Be sure to honour the attributes associated with the allocation -+ * such as zeroing, poisoning etc. */ -+ if (BIT_ISSET(psPMRPageArrayData->ui32Flags, FLAG_POISON_ON_ALLOC)) -+ { -+ eError = _PhysPgMemSet(psPMRPageArrayData, -+ &psPMRPageArrayData->aBaseArray[uiAllocpgidx], -+ uiChunkSize, -+ PVRSRV_POISON_ON_ALLOC_VALUE); -+ -+ /* Consider this as a soft failure and go ahead but log error to kernel log */ -+ if (eError != PVRSRV_OK) -+ { -+#if defined(DEBUG) -+ bPoisonFail = IMG_TRUE; -+#endif -+ } -+ } -+ -+ if (BIT_ISSET(psPMRPageArrayData->ui32Flags, FLAG_ZERO)) -+ { -+ eError = _PhysPgMemSet(psPMRPageArrayData, -+ &psPMRPageArrayData->aBaseArray[uiAllocpgidx], -+ uiChunkSize, -+ ZERO_PAGE_VALUE); -+ /* Consider this as a soft failure and go ahead but log error to kernel log */ -+ if (eError != PVRSRV_OK) -+ { -+#if defined(DEBUG) -+ /* Don't think we need to zero any chunks further */ -+ bZeroFail = IMG_TRUE; -+#endif -+ } -+ } -+ } -+ -+ /* Free the additional free chunks */ -+ if (0 != ui32AdtnlFreePages) -+ { -+ ui32Index = ui32Loop; -+ eError = _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_FreeLMPages", e0); -+ -+ ui32Loop = 0; -+ -+ while (ui32Loop++ < ui32AdtnlFreePages) -+ { -+ /* Set the corresponding mapping table entry to invalid address */ -+ psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID; -+ } -+ -+ psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages; -+ } -+ } -+ -+#if defined(DEBUG) -+ if (IMG_TRUE == bPoisonFail) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the chunk", __func__)); -+ } -+ -+ if (IMG_TRUE == bZeroFail) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the chunk", __func__)); -+ } -+#endif -+ -+ return PVRSRV_OK; -+ -+unwind_alloc: -+ _FreeLMPages(psPMRPageArrayData, pai32AllocIndices, ui32Index); -+ -+ for (ui32Loop = 0; ui32Loop < ui32Index; ui32Loop++) -+ { -+ psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = TRANSLATION_INVALID; -+ } -+ -+e0: -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function PMRChangeSparseMemCPUMapLocalMem -+@Description This function Changes CPU maps accordingly -+@Return PVRSRV_ERROR failure code -+*/ /**************************************************************************/ -+static -+PVRSRV_ERROR PMRChangeSparseMemCPUMapLocalMem(PMR_IMPL_PRIVDATA pPriv, -+ const PMR *psPMR, -+ IMG_UINT64 sCpuVAddrBase, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices) -+{ -+ PVRSRV_ERROR eError; -+ IMG_DEV_PHYADDR *psPageArray; -+ PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv; -+ uintptr_t sCpuVABase = sCpuVAddrBase; -+ IMG_CPU_PHYADDR sCpuAddrPtr; -+ IMG_BOOL bValid = IMG_FALSE; -+ IMG_UINT32 i; -+ -+ /* Get the base address of the heap */ -+ eError = PMR_CpuPhysAddr(psPMR, -+ psPMRPageArrayData->uiLog2ChunkSize, -+ 1, -+ 0, /* offset zero here mean first page in the PMR */ -+ &sCpuAddrPtr, -+ &bValid); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PMR_CpuPhysAddr"); -+ -+ /* Phys address of heap is computed here by subtracting the offset of this page -+ * basically phys address of any page = Base address of heap + offset of the page */ -+ sCpuAddrPtr.uiAddr -= RA_BASE_STRIP_GHOST_BIT(psPMRPageArrayData->aBaseArray[0]); -+ -+ /* We still have ghost bits in the base array, this interface expects true page -+ * addresses so we need to pre mask / translate the base array -+ */ -+ psPageArray = OSAllocMem(sizeof(IMG_DEV_PHYADDR)* -+ psPMRPageArrayData->uiTotalNumChunks); -+ PVR_LOG_RETURN_IF_NOMEM(psPageArray, "Page translation array"); -+ -+ for (i = 0; i < psPMRPageArrayData->uiTotalNumChunks; i++) -+ { -+ psPageArray[i].uiAddr = RA_BASE_STRIP_GHOST_BIT(psPMRPageArrayData->aBaseArray[i]); -+ } -+ -+ eError = OSChangeSparseMemCPUAddrMap((void**) psPageArray, -+ sCpuVABase, -+ sCpuAddrPtr, -+ ui32AllocPageCount, -+ pai32AllocIndices, -+ ui32FreePageCount, -+ pai32FreeIndices, -+ IMG_TRUE); -+ -+ OSFreeMem(psPageArray); -+ -+ return eError; -+} -+ -+static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = { -+ .pfnLockPhysAddresses = &PMRLockSysPhysAddressesLocalMem, -+ .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesLocalMem, -+ .pfnDevPhysAddr = &PMRSysPhysAddrLocalMem, -+ .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataLocalMem, -+ .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataLocalMem, -+ .pfnReadBytes = &PMRReadBytesLocalMem, -+ .pfnWriteBytes = &PMRWriteBytesLocalMem, -+ .pfnChangeSparseMem = &PMRChangeSparseMemLocalMem, -+ .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapLocalMem, -+ .pfnMMap = NULL, -+ .pfnFinalize = &PMRFinalizeLocalMem, -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ .pfnZombify = &PMRZombifyLocalMem, -+#endif -+}; -+ -+PVRSRV_ERROR -+PhysmemNewLocalRamBackedPMR(PHYS_HEAP *psPhysHeap, -+ CONNECTION_DATA *psConnection, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 uiLog2AllocPageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszAnnotation, -+ IMG_PID uiPid, -+ PMR **ppsPMRPtr, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_ERROR eError2; -+ PMR *psPMR = NULL; -+ PMR_LMALLOCARRAY_DATA *psPrivData = NULL; -+ PMR_FLAGS_T uiPMRFlags; -+ IMG_UINT32 ui32LMAllocFlags = 0; -+ -+ /* This path is checking for the type of PMR to create, if sparse we -+ * have to perform additional validation as we can only map sparse ranges -+ * if the os functionality to do so is present. We can also only map virtually -+ * contiguous sparse regions. Non backed gaps in a range cannot be mapped. -+ */ -+ if (ui32NumPhysChunks != ui32NumVirtChunks || ui32NumVirtChunks > 1) -+ { -+ if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) && -+ !OSIsMapPhysNonContigSupported()) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: LMA kernel mapping functions not available " -+ "for physically discontiguous memory.", -+ __func__)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, errorOnParam); -+ } -+ BIT_SET(ui32LMAllocFlags, FLAG_SPARSE); -+ } -+ -+ if (PVRSRV_CHECK_ON_DEMAND(uiFlags)) -+ { -+ BIT_SET(ui32LMAllocFlags, FLAG_ONDEMAND); -+ } -+ -+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)) -+ { -+ BIT_SET(ui32LMAllocFlags, FLAG_ZERO); -+ } -+ -+ if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) -+ { -+ BIT_SET(ui32LMAllocFlags, FLAG_POISON_ON_ALLOC); -+ } -+ -+#if defined(DEBUG) -+ if (PVRSRV_CHECK_POISON_ON_FREE(uiFlags)) -+ { -+ BIT_SET(ui32LMAllocFlags, FLAG_POISON_ON_FREE); -+ } -+#endif -+ -+ /* Create Array structure that holds the physical pages */ -+ eError = _AllocLMPageArray(uiSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ uiLog2AllocPageSize, -+ ui32LMAllocFlags, -+ psPhysHeap, -+ uiFlags, -+ uiPid, -+ &psPrivData, -+ psConnection); -+ PVR_GOTO_IF_ERROR(eError, errorOnAllocPageArray); -+ -+ if (!BIT_ISSET(ui32LMAllocFlags, FLAG_ONDEMAND)) -+ { -+ /* Allocate the physical pages */ -+ eError = _AllocLMPages(psPrivData, pui32MappingTable); -+ PVR_GOTO_IF_ERROR(eError, errorOnAllocPages); -+ } -+ -+ /* In this instance, we simply pass flags straight through. -+ -+ Generically, uiFlags can include things that control the PMR -+ factory, but we don't need any such thing (at the time of -+ writing!), and our caller specifies all PMR flags so we don't -+ need to meddle with what was given to us. -+ */ -+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); -+ /* check no significant bits were lost in cast due to different -+ bit widths for flags */ -+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); -+ -+ if (BIT_ISSET(ui32LMAllocFlags, FLAG_ONDEMAND)) -+ { -+ PDUMPCOMMENT(PhysHeapDeviceNode(psPhysHeap), "Deferred Allocation PMR (LMA)"); -+ } -+ -+ eError = PMRCreatePMR(psPhysHeap, -+ uiSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ pui32MappingTable, -+ uiLog2AllocPageSize, -+ uiPMRFlags, -+ pszAnnotation, -+ &_sPMRLMAFuncTab, -+ psPrivData, -+ PMR_TYPE_LMA, -+ &psPMR, -+ ui32PDumpFlags); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMRCreatePMR", errorOnCreate); -+ -+ *ppsPMRPtr = psPMR; -+ return PVRSRV_OK; -+ -+errorOnCreate: -+ if (!BIT_ISSET(ui32LMAllocFlags, FLAG_ONDEMAND) && psPrivData->iNumChunksAllocated) -+ { -+ eError2 = _FreeLMPages(psPrivData, NULL, 0); -+ PVR_ASSERT(eError2 == PVRSRV_OK); -+ } -+ -+errorOnAllocPages: -+ _FreeLMPageArray(psPrivData); -+ -+errorOnAllocPageArray: -+errorOnParam: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/physmem_lma.h b/drivers/gpu/drm/img-rogue/physmem_lma.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_lma.h -@@ -0,0 +1,94 @@ -+/**************************************************************************/ /*! -+@File -+@Title Header for local card memory allocator -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of the memory management. This module is responsible for -+ implementing the function callbacks for local card memory. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef SRVSRV_PHYSMEM_LMA_H -+#define SRVSRV_PHYSMEM_LMA_H -+ -+/* include/ */ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+ -+/* services/server/include/ */ -+#include "pmr.h" -+#include "pmr_impl.h" -+ -+/*************************************************************************/ /*! -+@Function PhysmemCreateHeapLMA -+@Description Create and register new LMA heap with LMA specific details. -+@Input psDevNode Pointer to device node struct. -+@Input uiPolicy Heap allocation policy flags -+@Input psConfig Heap configuration. -+@Input pszLabel Debug identifier label -+@Output ppsPhysHeap Pointer to the created heap. -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PhysmemCreateHeapLMA(PVRSRV_DEVICE_NODE *psDevNode, -+ PHYS_HEAP_POLICY uiPolicy, -+ PHYS_HEAP_CONFIG *psConfig, -+ IMG_CHAR *pszLabel, -+ PHYS_HEAP **ppsPhysHeap); -+ -+/* -+ * PhysmemNewLocalRamBackedPMR -+ * -+ * This function will create a PMR using the local card memory and is OS -+ * agnostic. -+ */ -+PVRSRV_ERROR -+PhysmemNewLocalRamBackedPMR(PHYS_HEAP *psPhysHeap, -+ CONNECTION_DATA *psConnection, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 uiLog2PageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszAnnotation, -+ IMG_PID uiPid, -+ PMR **ppsPMRPtr, -+ IMG_UINT32 ui32PDumpFlags); -+ -+#endif /* #ifndef SRVSRV_PHYSMEM_LMA_H */ -diff --git a/drivers/gpu/drm/img-rogue/physmem_osmem.c b/drivers/gpu/drm/img-rogue/physmem_osmem.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_osmem.c -@@ -0,0 +1,91 @@ -+/*************************************************************************/ /*! -+@File physmem_osmem.c -+@Title OS Memory PMR Factory common definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of Services memory management. This file defines the -+ OS memory PMR factory API that must be defined so that the -+ common & device layer code in the Services Server can allocate -+ new PMRs back with pages from the OS page allocator. Applicable -+ for UMA based platforms, such platforms must implement this API -+ in the OS Porting layer, in the "env" directory for that -+ system. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* include/ */ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+/* services/server/include/ */ -+#include "physheap.h" -+#include "osfunc.h" -+#include "physmem_osmem.h" -+ -+ -+static IMG_UINT32 PhysHeapOSGetPageShift(void) -+{ -+ return (IMG_UINT32)OSGetPageShift(); -+} -+ -+static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs = -+{ -+ .pfnDestroyData = NULL, -+ .pfnGetPMRFactoryMemStats = PhysmemGetOSRamMemStats, -+ .pfnCreatePMR = PhysmemNewOSRamBackedPMR, -+ .pfnPagesAlloc = &OSPhyContigPagesAlloc, -+ .pfnPagesFree = &OSPhyContigPagesFree, -+ .pfnPagesMap = &OSPhyContigPagesMap, -+ .pfnPagesUnMap = &OSPhyContigPagesUnmap, -+ .pfnPagesClean = &OSPhyContigPagesClean, -+ .pfnGetPageShift = &PhysHeapOSGetPageShift, -+}; -+ -+ -+PVRSRV_ERROR -+PhysmemCreateHeapOSMEM(PVRSRV_DEVICE_NODE *psDevNode, -+ PHYS_HEAP_POLICY uiPolicy, -+ PHYS_HEAP_CONFIG *psConfig, -+ PHYS_HEAP **ppsPhysHeap) -+{ -+ return PhysHeapCreate(psDevNode, -+ psConfig, -+ uiPolicy, -+ NULL, -+ &_sPHEAPImplFuncs, -+ ppsPhysHeap); -+} -diff --git a/drivers/gpu/drm/img-rogue/physmem_osmem.h b/drivers/gpu/drm/img-rogue/physmem_osmem.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_osmem.h -@@ -0,0 +1,151 @@ -+/*************************************************************************/ /*! -+@File physmem_osmem.h -+@Title OS memory PMR factory API -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of Services memory management. This file defines the -+ OS memory PMR factory API that must be defined so that the -+ common & device layer code in the Services Server can allocate -+ new PMRs back with pages from the OS page allocator. Applicable -+ for UMA based platforms, such platforms must implement this API -+ in the OS Porting layer, in the "env" directory for that -+ system. -+ -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PHYSMEM_OSMEM_H -+#define PHYSMEM_OSMEM_H -+ -+/* include/ */ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+ -+/* services/server/include/ */ -+#include "pmr.h" -+#include "pmr_impl.h" -+#include "connection_server.h" -+#include "physheap.h" -+ -+/*************************************************************************/ /*! -+@Function PhysmemCreateHeapLMA -+@Description Create and register new OSMEM heap with OSMEM specific details. -+@Input psDevNode Pointer to device node struct. -+@Input uiPolicy Heap allocation policy flags -+@Input psConfig Heap configuration. -+@Output ppsPhysHeap Pointer to the created heap. -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PhysmemCreateHeapOSMEM(PVRSRV_DEVICE_NODE *psDevNode, -+ PHYS_HEAP_POLICY uiPolicy, -+ PHYS_HEAP_CONFIG *psConfig, -+ PHYS_HEAP **ppsPhysHeap); -+ -+/*************************************************************************/ /*! -+@Function PhysmemNewOSRamBackedPMR -+@Description Rogue Services will call this function to allocate GPU device -+ memory from the PMR factory supported by the OS DDK port. This -+ factory typically obtains physical memory from the kernel/OS -+ API that allocates memory from the default heap of shared -+ system memory available on the platform. The allocated memory -+ must be page-aligned and be a whole number of pages. -+ After allocating the required memory, the implementation must -+ then call PMRCreatePMR() to obtain the PMR structure that -+ describes this allocation to the upper layers of the Services. -+ memory management sub-system. -+ NB. Implementation of this function is mandatory. If shared -+ system memory is not to be used in the OS port then the -+ implementation must return PVRSRV_ERROR_NOT_SUPPORTED. -+ -+@Input psPhysHeap the phys heap -+@Input psConnection the connection to the originator process -+@Input uiSize the size of the allocation -+ (must be a multiple of page size) -+@Input ui32NumPhysChunks when sparse allocations are requested, -+ this is the number of physical chunks -+ to be allocated. -+ For regular allocations, this will be 1. -+@Input ui32NumVirtChunks when sparse allocations are requested, -+ this is the number of virtual chunks -+ covering the sparse allocation. -+ For regular allocations, this will be 1. -+@Input pui32MappingTable when sparse allocations are requested, -+ this is the list of the indices of -+ each physically-backed virtual chunk -+ For regular allocations, this will -+ be NULL. -+@Input uiLog2PageSize the physical pagesize in log2(bytes). -+@Input uiFlags the allocation flags. -+@Input pszAnnotation string describing the PMR (for debug). -+ This should be passed into the function -+ PMRCreatePMR(). -+@Input uiPid The process ID that this allocation should -+ be associated with. -+@Output ppsPMROut pointer to the PMR created for the -+ new allocation -+@Input ui32PDumpFlags the pdump flags. -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, -+ CONNECTION_DATA *psConnection, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ IMG_UINT32 uiLog2PageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszAnnotation, -+ IMG_PID uiPid, -+ PMR **ppsPMROut, -+ IMG_UINT32 ui32PDumpFlags); -+ -+/*************************************************************************/ /*! -+@Function PhysmemGetOSRamMemStats -+@Description Function that gets the OS memory usage statistics -+@Input pvImplData Physical heap private data. -+@Output pui64TotalSize Buffer that holds the total OS memory size -+@Output pui64FreeSize Buffer that holds the free OS memory size -+@Return None. -+*/ /**************************************************************************/ -+void PhysmemGetOSRamMemStats(PHEAP_IMPL_DATA pvImplData, -+ IMG_UINT64 *pui64TotalSize, -+ IMG_UINT64 *pui64FreeSize); -+ -+#endif /* PHYSMEM_OSMEM_H */ -diff --git a/drivers/gpu/drm/img-rogue/physmem_osmem_linux.c b/drivers/gpu/drm/img-rogue/physmem_osmem_linux.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_osmem_linux.c -@@ -0,0 +1,3940 @@ -+/*************************************************************************/ /*! -+@File -+@Title Implementation of PMR functions for OS managed memory -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of the memory management. This module is responsible for -+ implementing the function callbacks for physical memory borrowed -+ from that normally managed by the operating system. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#if defined(CONFIG_X86) -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) -+#include -+#else -+#include -+#endif -+#endif -+ -+/* include/ */ -+#include "rgx_heaps.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+#include "rgx_pdump_panics.h" -+/* services/server/include/ */ -+#include "allocmem.h" -+#include "osfunc.h" -+#include "pdump_km.h" -+#include "pmr.h" -+#include "pmr_impl.h" -+#include "cache_km.h" -+#include "devicemem_server_utils.h" -+#include "pvr_vmap.h" -+#include "physheap.h" -+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) -+#include "physmem_cpumap_history.h" -+#endif -+ -+/* ourselves */ -+#include "physmem_osmem.h" -+#include "physmem_osmem_linux.h" -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#include "process_stats.h" -+#if !defined(PVRSRV_ENABLE_MEMORY_STATS) -+#include "hash.h" -+#endif -+#endif -+ -+#include "kernel_compatibility.h" -+#include "sysconfig.h" -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) -+static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM; -+#else -+/* split_page not available on older kernels */ -+#undef PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM -+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0 -+static IMG_UINT32 g_uiMaxOrder; -+#endif -+ -+/* -+ These corresponds to the MMU min/max page sizes and associated PTE -+ alignment that can be used on the device for an allocation. It is -+ 4KB (min) and 2MB (max) respectively. -+*/ -+#define PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ RGX_HEAP_4KB_PAGE_SHIFT -+#define PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ RGX_HEAP_2MB_PAGE_SHIFT -+ -+/* Defines how many pages should be mapped at once to the kernel */ -+#define PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES 1024 /* 4 MB */ -+ -+/* -+ These are used to get/set/mask lower-order bits in a dma_addr_t -+ to provide side-band information associated with that address. -+ These includes whether the address was obtained via alloc_page -+ or dma_alloc and if address came allocated pre-aligned or an -+ adjustment was made manually to aligned it. -+*/ -+#define DMA_SET_ADJUSTED_ADDR(x) ((x) | ((dma_addr_t)0x02)) -+#define DMA_IS_ADDR_ADJUSTED(x) ((x) & ((dma_addr_t)0x02)) -+#define DMA_SET_ALLOCPG_ADDR(x) ((x) | ((dma_addr_t)0x01)) -+#define DMA_IS_ALLOCPG_ADDR(x) ((x) & ((dma_addr_t)0x01)) -+#define DMA_GET_ALIGN_ADJUSTMENT(x) ((x>>2) & ((dma_addr_t)0x3ff)) -+#define DMA_SET_ALIGN_ADJUSTMENT(x,y) ((x) | (((dma_addr_t)y)<<0x02)) -+#define DMA_GET_ADDR(x) (((dma_addr_t)x) & ((dma_addr_t)~0xfff)) -+#define DMA_VADDR_NOT_IN_USE 0xCAFEF00DDEADBEEFULL -+ -+#define PVRSRV_ZERO_VALUE 0 -+ -+typedef struct _PMR_OSPAGEARRAY_DATA_ { -+ /* Device for which this allocation has been made */ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ /* The pid that made this allocation */ -+ IMG_PID uiPid; -+ -+ /* -+ * iNumOSPagesAllocated: -+ * Number of pages allocated in this PMR so far. -+ * This allows for up to (2^31 - 1) pages. With 4KB pages, that's 8TB of memory for each PMR. -+ */ -+ IMG_INT32 iNumOSPagesAllocated; -+ -+ /* -+ * uiTotalNumOSPages: -+ * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size) -+ * number of "pages" (a.k.a. macro pages, compound pages, higher order pages, etc...) -+ */ -+ IMG_UINT32 uiTotalNumOSPages; -+ -+ /* -+ uiLog2AllocPageSize; -+ -+ size of each "page" -- this would normally be the same as -+ PAGE_SHIFT, but we support the idea that we may allocate pages -+ in larger chunks for better contiguity, using order>0 in the -+ call to alloc_pages() -+ */ -+ IMG_UINT32 uiLog2AllocPageSize; -+ -+ /* -+ ui64DmaMask; -+ */ -+ IMG_UINT64 ui64DmaMask; -+ -+ /* -+ For non DMA/CMA allocation, pagearray references the pages -+ thus allocated; one entry per compound page when compound -+ pages are used. In addition, for DMA/CMA allocations, we -+ track the returned cpu virtual and device bus address. -+ */ -+ struct page **pagearray; -+ dma_addr_t *dmaphysarray; -+ void **dmavirtarray; -+ -+ -+#define FLAG_ZERO (0U) -+#define FLAG_POISON_ON_FREE (1U) -+#define FLAG_POISON_ON_ALLOC (2U) -+#define FLAG_ONDEMAND (3U) -+ -+#define FLAG_IS_CMA (5U) -+#define FLAG_UNSET_MEMORY_TYPE (6U) -+ -+#define FLAG_IS_ZOMBIE (7U) -+ -+ /* -+ * Allocation flags related to the pages: -+ * Zero - Should we Zero memory on alloc -+ * Poison on free - Should we Poison the memory on free. -+ * Poison on alloc - Should we Poison the memory on alloc. -+ * On demand - Is the allocation on Demand i.e Do we defer allocation to time of use. -+ * CMA - Is CMA memory allocated via DMA framework -+ * Unset Memory Type - Upon free do we need to revert the cache type before return to OS -+ * Zombie - Pages are part of a zombie PMR -+ * */ -+ IMG_UINT32 ui32AllocFlags; -+ -+ /* -+ The cache mode of the PMR. Additionally carrying the CPU-Cache-Clean -+ flag, advising us to do cache maintenance on behalf of the caller. -+ Boolean used to track if we need to revert the cache attributes -+ of the pages used in this allocation. Depends on OS/architecture. -+ */ -+ IMG_UINT32 ui32CPUCacheFlags; -+ /* -+ * In CMA allocation path, algorithm can allocate double the size of -+ * requested allocation size to satisfy the alignment. In this case -+ * the additional pages allocated are tracked through this additional -+ * variable and are accounted for in the memory statistics */ -+ IMG_UINT32 ui32CMAAdjustedPageCount; -+ -+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) -+ /* -+ Handle on the parent PMR -+ */ -+ void *hPMR; -+#endif -+ -+} PMR_OSPAGEARRAY_DATA; -+ -+/*********************************** -+ * Page pooling for uncached pages * -+ ***********************************/ -+ -+static INLINE void -+_FreeOSPage_CMA(struct device *dev, -+ size_t alloc_size, -+ IMG_UINT32 uiOrder, -+ void *virt_addr, -+ dma_addr_t dev_addr, -+ struct page *psPage); -+ -+static void -+_FreeOSPage(IMG_UINT32 uiOrder, -+ IMG_BOOL bUnsetMemoryType, -+ struct page *psPage); -+ -+static PVRSRV_ERROR -+_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_UINT32 ui32FreePageCount); -+ -+static PVRSRV_ERROR -+_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree, -+ IMG_UINT32 *puiPagesFreed); -+ -+/* A struct for our page pool holding an array of zeroed (!) pages. -+ * We always put units of page arrays to the pool but are -+ * able to take individual pages */ -+typedef struct -+{ -+ /* Linkage for page pool LRU list */ -+ struct list_head sPagePoolItem; -+ -+ /* How many items are still in the page array */ -+ IMG_UINT32 uiItemsRemaining; -+ /* Array of the actual pages */ -+ struct page **ppsPageArray; -+ -+} LinuxPagePoolEntry; -+ -+/* CleanupThread structure to put allocation in page pool */ -+typedef struct -+{ -+ PVRSRV_CLEANUP_THREAD_WORK sCleanupWork; -+ IMG_UINT32 ui32CPUCacheMode; -+ LinuxPagePoolEntry *psPoolEntry; -+} LinuxCleanupData; -+ -+ -+/* Caches to hold page pool and page array structures */ -+static struct kmem_cache *g_psLinuxPagePoolCache; -+static struct kmem_cache *g_psLinuxPageArray; -+ -+/* Track what is live, all protected by pool lock. -+ * x86 needs two page pools because we have to change the memory attributes -+ * of the pages which is expensive due to an implicit flush. -+ * See set_pages_array_uc/wc/wb. */ -+static IMG_UINT32 g_ui32PagePoolUCCount; -+#if defined(CONFIG_X86) -+static IMG_UINT32 g_ui32PagePoolWCCount; -+#endif -+/* Tracks asynchronous tasks currently accessing the page pool. -+ * It is incremented if a defer free task -+ * is created. Both will decrement the value when they finished the work. -+ * The atomic prevents piling up of deferred work in case the deferred thread -+ * cannot keep up with the application.*/ -+static ATOMIC_T g_iPoolCleanTasks; -+/* We don't want too many asynchronous threads trying to access the page pool -+ * at the same time */ -+#define PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS 128 -+ -+/* Defines how many pages the page cache should hold. */ -+#if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES) -+static const IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES; -+#else -+static const IMG_UINT32 g_ui32PagePoolMaxEntries; -+#endif -+ -+/* We double check if we would exceed this limit if we are below MAX_POOL_PAGES -+ and want to add an allocation to the pool. -+ This prevents big allocations being given back to the OS just because they -+ exceed the MAX_POOL_PAGES limit even though the pool is currently empty. */ -+#if defined(PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES) -+static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES; -+#else -+static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries; -+#endif -+ -+#if defined(CONFIG_X86) -+#define PHYSMEM_OSMEM_NUM_OF_POOLS 2 -+static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = { -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC -+}; -+#else -+#define PHYSMEM_OSMEM_NUM_OF_POOLS 1 -+static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = { -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED -+}; -+#endif -+ -+/* Global structures we use to manage the page pool */ -+static DEFINE_MUTEX(g_sPagePoolMutex); -+ -+/* List holding the page array pointers: */ -+static LIST_HEAD(g_sPagePoolList_WC); -+static LIST_HEAD(g_sPagePoolList_UC); -+ -+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) -+/* Global structure to manage GPU memory leak */ -+static DEFINE_MUTEX(g_sUMALeakMutex); -+static IMG_UINT32 g_ui32UMALeakCounter = 0; -+#endif -+ -+static IMG_BOOL g_bInitialisedOnAlloc = IMG_FALSE; -+ -+static inline IMG_BOOL -+_ShouldInitMem(IMG_UINT32 ui32AllocFlags) -+{ -+ return BIT_ISSET(ui32AllocFlags, FLAG_ZERO) && !g_bInitialisedOnAlloc; -+} -+ -+static inline IMG_UINT32 -+_PagesInPoolUnlocked(void) -+{ -+ IMG_UINT32 uiCnt = g_ui32PagePoolUCCount; -+#if defined(CONFIG_X86) -+ uiCnt += g_ui32PagePoolWCCount; -+#endif -+ return uiCnt; -+} -+ -+static inline void -+_PagePoolLock(void) -+{ -+ mutex_lock(&g_sPagePoolMutex); -+} -+ -+static inline int -+_PagePoolTrylock(void) -+{ -+ return mutex_trylock(&g_sPagePoolMutex); -+} -+ -+static inline void -+_PagePoolUnlock(void) -+{ -+ mutex_unlock(&g_sPagePoolMutex); -+} -+ -+static inline IMG_BOOL -+_GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags, -+ struct list_head **ppsPoolHead, -+ IMG_UINT32 **ppuiCounter) -+{ -+ switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags)) -+ { -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: -+#if defined(CONFIG_X86) -+ /* -+ For x86 we need to keep different lists for uncached -+ and write-combined as we must always honour the PAT -+ setting which cares about this difference. -+ */ -+ -+ *ppsPoolHead = &g_sPagePoolList_WC; -+ *ppuiCounter = &g_ui32PagePoolWCCount; -+ break; -+#endif -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: -+ *ppsPoolHead = &g_sPagePoolList_UC; -+ *ppuiCounter = &g_ui32PagePoolUCCount; -+ break; -+ -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unknown CPU caching mode. " -+ "Using default UC pool.", -+ __func__)); -+ *ppsPoolHead = &g_sPagePoolList_UC; -+ *ppuiCounter = &g_ui32PagePoolUCCount; -+ PVR_ASSERT(0); -+ return IMG_FALSE; -+ } -+ return IMG_TRUE; -+} -+ -+static struct shrinker g_sShrinker; -+ -+/* Returning the number of pages that still reside in the page pool. */ -+static unsigned long -+_GetNumberOfPagesInPoolUnlocked(void) -+{ -+ return _PagesInPoolUnlocked(); -+} -+ -+/* Linux shrinker function that informs the OS about how many pages we are caching and -+ * it is able to reclaim. */ -+static unsigned long -+_CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) -+{ -+ int remain; -+ -+ PVR_ASSERT(psShrinker == &g_sShrinker); -+ (void)psShrinker; -+ (void)psShrinkControl; -+ -+ /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */ -+ if (_PagePoolTrylock() == 0) -+ return 0; -+ remain = _GetNumberOfPagesInPoolUnlocked(); -+ _PagePoolUnlock(); -+ -+ return remain; -+} -+ -+/* Linux shrinker function to reclaim the pages from our page pool */ -+static unsigned long -+_ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) -+{ -+ unsigned long uNumToScan = psShrinkControl->nr_to_scan; -+ IMG_UINT32 uiPagesFreed; -+ -+ PVR_ASSERT(psShrinker == &g_sShrinker); -+ (void)psShrinker; -+ -+ /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */ -+ if (_PagePoolTrylock() == 0) -+ return SHRINK_STOP; -+ -+ _FreePagesFromPoolUnlocked(uNumToScan, -+ &uiPagesFreed); -+ uNumToScan -= uiPagesFreed; -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) -+ { -+ int remain; -+ remain = _GetNumberOfPagesInPoolUnlocked(); -+ _PagePoolUnlock(); -+ return remain; -+ } -+#else -+ /* Returning the number of pages freed during the scan */ -+ _PagePoolUnlock(); -+ return psShrinkControl->nr_to_scan - uNumToScan; -+#endif -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) -+static int -+_ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) -+{ -+ if (psShrinkControl->nr_to_scan != 0) -+ { -+ return _ScanObjectsInPagePool(psShrinker, psShrinkControl); -+ } -+ else -+ { -+ /* No pages are being reclaimed so just return the page count */ -+ return _CountObjectsInPagePool(psShrinker, psShrinkControl); -+ } -+} -+ -+static struct shrinker g_sShrinker = -+{ -+ .shrink = _ShrinkPagePool, -+ .seeks = DEFAULT_SEEKS -+}; -+#else -+static struct shrinker g_sShrinker = -+{ -+ .count_objects = _CountObjectsInPagePool, -+ .scan_objects = _ScanObjectsInPagePool, -+ .seeks = DEFAULT_SEEKS -+}; -+#endif -+ -+/* Register the shrinker so Linux can reclaim cached pages */ -+void LinuxInitPhysmem(void) -+{ -+ g_psLinuxPageArray = kmem_cache_create("pvr-pa", sizeof(PMR_OSPAGEARRAY_DATA), 0, 0, NULL); -+ -+ g_psLinuxPagePoolCache = kmem_cache_create("pvr-pp", sizeof(LinuxPagePoolEntry), 0, 0, NULL); -+ if (g_psLinuxPagePoolCache) -+ { -+ /* Only create the shrinker if we created the cache OK */ -+ register_shrinker(&g_sShrinker, "pvr-pp"); -+ } -+ -+ OSAtomicWrite(&g_iPoolCleanTasks, 0); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,0)) -+/* Check both config and modparam setting */ -+#if PVRSRV_USE_LINUX_CONFIG_INIT_ON_ALLOC == 1 -+ g_bInitialisedOnAlloc = want_init_on_alloc(0x0); -+ -+/* Assume modparam setting not in use on system */ -+#elif PVRSRV_USE_LINUX_CONFIG_INIT_ON_ALLOC == 2 -+# if defined(CONFIG_INIT_ON_ALLOC_DEFAULT_ON) -+ g_bInitialisedOnAlloc = IMG_TRUE; -+# else -+ g_bInitialisedOnAlloc = IMG_FALSE; -+# endif -+ -+/* Ignore both config and modparam settings */ -+#else -+ g_bInitialisedOnAlloc = IMG_FALSE; -+#endif -+#endif -+} -+ -+/* Unregister the shrinker and remove all pages from the pool that are still left */ -+void LinuxDeinitPhysmem(void) -+{ -+ IMG_UINT32 uiPagesFreed; -+ -+ if (OSAtomicRead(&g_iPoolCleanTasks) > 0) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "Still deferred cleanup tasks running " -+ "while deinitialising memory subsystem.")); -+ } -+ -+ _PagePoolLock(); -+ if (_FreePagesFromPoolUnlocked(IMG_UINT32_MAX, &uiPagesFreed) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when " -+ "deinitialising memory subsystem.")); -+ PVR_ASSERT(0); -+ } -+ -+ PVR_ASSERT(_PagesInPoolUnlocked() == 0); -+ -+ /* Free the page cache */ -+ kmem_cache_destroy(g_psLinuxPagePoolCache); -+ -+ unregister_shrinker(&g_sShrinker); -+ _PagePoolUnlock(); -+ -+ kmem_cache_destroy(g_psLinuxPageArray); -+} -+ -+static void EnableOOMKiller(void) -+{ -+ current->flags &= ~PF_DUMPCORE; -+} -+ -+static void DisableOOMKiller(void) -+{ -+ /* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled. -+ * -+ * As oom_killer_disable() is an inline, non-exported function, we -+ * can't use it from a modular driver. Furthermore, the OOM killer -+ * API doesn't look thread safe, which 'current' is. -+ */ -+ WARN_ON(current->flags & PF_DUMPCORE); -+ current->flags |= PF_DUMPCORE; -+} -+ -+/* Prints out the addresses in a page array for debugging purposes -+ * Define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY locally to activate: */ -+/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY 1 */ -+static inline void -+_DumpPageArray(struct page **pagearray, IMG_UINT32 uiPagesToPrint) -+{ -+#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY) -+ IMG_UINT32 i; -+ if (pagearray) -+ { -+ printk("Array %p:\n", pagearray); -+ for (i = 0; i < uiPagesToPrint; i++) -+ { -+ printk("%p | ", (pagearray)[i]); -+ } -+ printk("\n"); -+ } -+ else -+ { -+ printk("Array is NULL:\n"); -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(pagearray); -+ PVR_UNREFERENCED_PARAMETER(uiPagesToPrint); -+#endif -+} -+ -+/* Debugging function that dumps out the number of pages for every -+ * page array that is currently in the page pool. -+ * Not defined by default. Define locally to activate feature: */ -+/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL 1 */ -+static void -+_DumpPoolStructure(void) -+{ -+#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL) -+ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; -+ struct list_head *psPoolHead = NULL; -+ IMG_UINT32 j; -+ IMG_UINT32 *puiCounter; -+ -+ printk("\n"); -+ /* Empty all pools */ -+ for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++) -+ { -+ -+ printk("pool = %u\n", j); -+ -+ /* Get the correct list for this caching mode */ -+ if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter)) -+ { -+ break; -+ } -+ -+ list_for_each_entry_safe(psPagePoolEntry, -+ psTempPoolEntry, -+ psPoolHead, -+ sPagePoolItem) -+ { -+ printk("%u | ", psPagePoolEntry->uiItemsRemaining); -+ } -+ printk("\n"); -+ } -+#endif -+} -+ -+/* Free a certain number of pages from the page pool. -+ * Mainly used in error paths or at deinitialisation to -+ * empty the whole pool. */ -+static PVRSRV_ERROR -+_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree, -+ IMG_UINT32 *puiPagesFreed) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; -+ struct list_head *psPoolHead = NULL; -+ IMG_UINT32 i, j; -+ IMG_UINT32 *puiCounter; -+ -+ *puiPagesFreed = uiMaxPagesToFree; -+ -+ /* Empty all pools */ -+ for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++) -+ { -+ -+ /* Get the correct list for this caching mode */ -+ if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter)) -+ { -+ break; -+ } -+ -+ /* Free the pages and remove page arrays from the pool if they are exhausted */ -+ list_for_each_entry_safe(psPagePoolEntry, -+ psTempPoolEntry, -+ psPoolHead, -+ sPagePoolItem) -+ { -+ IMG_UINT32 uiItemsToFree; -+ struct page **ppsPageArray; -+ -+ /* Check if we are going to free the whole page array or just parts */ -+ if (psPagePoolEntry->uiItemsRemaining <= uiMaxPagesToFree) -+ { -+ uiItemsToFree = psPagePoolEntry->uiItemsRemaining; -+ ppsPageArray = psPagePoolEntry->ppsPageArray; -+ } -+ else -+ { -+ uiItemsToFree = uiMaxPagesToFree; -+ ppsPageArray = &(psPagePoolEntry->ppsPageArray[psPagePoolEntry->uiItemsRemaining - uiItemsToFree]); -+ } -+ -+#if defined(CONFIG_X86) -+ /* Set the correct page caching attributes on x86 */ -+ if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[j])) -+ { -+ int ret; -+ ret = set_pages_array_wb(ppsPageArray, uiItemsToFree); -+ if (ret) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to reset page attributes", -+ __func__)); -+ eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES; -+ goto e_exit; -+ } -+ } -+#endif -+ -+ /* Free the actual pages */ -+ for (i = 0; i < uiItemsToFree; i++) -+ { -+ __free_pages(ppsPageArray[i], 0); -+ ppsPageArray[i] = NULL; -+ } -+ -+ /* Reduce counters */ -+ uiMaxPagesToFree -= uiItemsToFree; -+ *puiCounter -= uiItemsToFree; -+ psPagePoolEntry->uiItemsRemaining -= uiItemsToFree; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ /* -+ * MemStats usually relies on having the bridge lock held, however -+ * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and -+ * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so -+ * the page pool lock is used to ensure these calls are mutually -+ * exclusive -+ */ -+ PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * uiItemsToFree); -+#endif -+ -+ /* Is this pool entry exhausted, delete it */ -+ if (psPagePoolEntry->uiItemsRemaining == 0) -+ { -+ OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); -+ list_del(&psPagePoolEntry->sPagePoolItem); -+ kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); -+ } -+ -+ /* Return if we have all our pages */ -+ if (uiMaxPagesToFree == 0) -+ { -+ goto e_exit; -+ } -+ } -+ } -+ -+e_exit: -+ *puiPagesFreed -= uiMaxPagesToFree; -+ _DumpPoolStructure(); -+ return eError; -+} -+ -+/* Get a certain number of pages from the page pool and -+ * copy them directly into a given page array. */ -+static void -+_GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags, -+ IMG_UINT32 uiMaxNumPages, -+ struct page **ppsPageArray, -+ IMG_UINT32 *puiNumReceivedPages) -+{ -+ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; -+ struct list_head *psPoolHead = NULL; -+ IMG_UINT32 i; -+ IMG_UINT32 *puiCounter; -+ -+ *puiNumReceivedPages = 0; -+ -+ /* Get the correct list for this caching mode */ -+ if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter)) -+ { -+ return; -+ } -+ -+ /* Check if there are actually items in the list */ -+ if (list_empty(psPoolHead)) -+ { -+ return; -+ } -+ -+ PVR_ASSERT(*puiCounter > 0); -+ -+ /* Receive pages from the pool */ -+ list_for_each_entry_safe(psPagePoolEntry, -+ psTempPoolEntry, -+ psPoolHead, -+ sPagePoolItem) -+ { -+ /* Get the pages from this pool entry */ -+ for (i = psPagePoolEntry->uiItemsRemaining; i != 0 && *puiNumReceivedPages < uiMaxNumPages; i--) -+ { -+ ppsPageArray[*puiNumReceivedPages] = psPagePoolEntry->ppsPageArray[i-1]; -+ (*puiNumReceivedPages)++; -+ psPagePoolEntry->uiItemsRemaining--; -+ } -+ -+ /* Is this pool entry exhausted, delete it */ -+ if (psPagePoolEntry->uiItemsRemaining == 0) -+ { -+ OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); -+ list_del(&psPagePoolEntry->sPagePoolItem); -+ kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); -+ } -+ -+ /* Return if we have all our pages */ -+ if (*puiNumReceivedPages == uiMaxNumPages) -+ { -+ goto exit_ok; -+ } -+ } -+ -+exit_ok: -+ -+ /* Update counters */ -+ *puiCounter -= *puiNumReceivedPages; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ /* MemStats usually relies on having the bridge lock held, however -+ * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and -+ * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so -+ * the page pool lock is used to ensure these calls are mutually -+ * exclusive -+ */ -+ PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * (*puiNumReceivedPages)); -+#endif -+ -+ _DumpPoolStructure(); -+} -+ -+/* Same as _GetPagesFromPoolUnlocked but handles locking and -+ * checks first whether pages from the pool are a valid option. */ -+static inline void -+_GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32CPUCacheFlags, -+ IMG_UINT32 uiPagesToAlloc, -+ IMG_UINT32 uiOrder, -+ IMG_BOOL bZero, -+ struct page **ppsPageArray, -+ IMG_UINT32 *puiPagesFromPool) -+{ -+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) -+ PVR_UNREFERENCED_PARAMETER(bZero); -+#else -+ /* Don't get pages from pool if it doesn't provide zeroed pages */ -+ if (bZero) -+ { -+ return; -+ } -+#endif -+ -+ /* The page pool stores only order 0 pages. If we need zeroed memory we -+ * directly allocate from the OS because it is faster than -+ * doing it within the driver. */ -+ if (uiOrder == 0 && -+ !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags)) -+ { -+ -+ _PagePoolLock(); -+ _GetPagesFromPoolUnlocked(ui32CPUCacheFlags, -+ uiPagesToAlloc, -+ ppsPageArray, -+ puiPagesFromPool); -+ _PagePoolUnlock(); -+ } -+} -+ -+/* Takes a page array and maps it into the kernel to write zeros */ -+static PVRSRV_ERROR -+_MemsetPageArray(IMG_UINT32 uiNumToClean, -+ struct page **ppsCleanArray, -+ pgprot_t pgprot, -+ IMG_UINT8 ui8Pattern) -+{ -+ IMG_CPU_VIRTADDR pvAddr; -+ IMG_UINT32 uiMaxPagesToMap = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES, -+ uiNumToClean); -+ -+ /* Map and fill the pages with zeros. -+ * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE -+ * at a time. */ -+ while (uiNumToClean != 0) -+ { -+ IMG_UINT32 uiToClean = MIN(uiNumToClean, uiMaxPagesToMap); -+ -+ pvAddr = pvr_vmap(ppsCleanArray, uiToClean, VM_MAP, pgprot); -+ if (!pvAddr) -+ { -+ if (uiMaxPagesToMap <= 1) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Out of vmalloc memory, unable to map pages for %s.", -+ __func__, -+ ui8Pattern == PVRSRV_ZERO_VALUE ? "zeroing" : "poisoning")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ else -+ { -+ /* Halve the pages to map at once and try again. */ -+ uiMaxPagesToMap = uiMaxPagesToMap >> 1; -+ continue; -+ } -+ } -+ -+ if (pgprot_val(pgprot) == pgprot_val(pgprot_noncached(PAGE_KERNEL))) -+ { -+ /* this is most likely unnecessary as all pages must be 8-bytes -+ * aligned so there unaligned access is impossible */ -+ OSDeviceMemSet(pvAddr, ui8Pattern, PAGE_SIZE * uiToClean); -+ } -+ else if (pgprot_val(pgprot) == pgprot_val(pgprot_writecombine(PAGE_KERNEL))) -+ { -+ OSCachedMemSetWMB(pvAddr, ui8Pattern, PAGE_SIZE * uiToClean); -+ } -+ else -+ { -+ OSCachedMemSet(pvAddr, ui8Pattern, PAGE_SIZE * uiToClean); -+ } -+ pvr_vunmap(pvAddr, uiToClean, pgprot); -+ ppsCleanArray = &(ppsCleanArray[uiToClean]); -+ uiNumToClean -= uiToClean; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+_CleanupThread_CleanPages(void *pvData) -+{ -+ LinuxCleanupData *psCleanupData = (LinuxCleanupData*) pvData; -+ LinuxPagePoolEntry *psPagePoolEntry = psCleanupData->psPoolEntry; -+ struct list_head *psPoolHead = NULL; -+ IMG_UINT32 *puiCounter = NULL; -+ -+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) -+ PVRSRV_ERROR eError; -+ pgprot_t pgprot; -+ IMG_UINT32 i; -+#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ -+ -+ /* Get the correct pool for this caching mode. */ -+ _GetPoolListHead(psCleanupData->ui32CPUCacheMode , &psPoolHead, &puiCounter); -+ -+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) -+ switch (PVRSRV_CPU_CACHE_MODE(psCleanupData->ui32CPUCacheMode)) -+ { -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: -+#if defined(CONFIG_X86) -+ /* For x86 we can only map with the same attributes -+ * as in the PAT settings*/ -+ pgprot = pgprot_noncached(PAGE_KERNEL); -+ break; -+#endif -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: -+ pgprot = pgprot_writecombine(PAGE_KERNEL); -+ break; -+ -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unknown caching mode to set page protection flags.", -+ __func__)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto eExit; -+ } -+ -+ /* Map and fill the pages with zeros. -+ * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE -+ * at a time. */ -+ eError = _MemsetPageArray(psPagePoolEntry->uiItemsRemaining, -+ psPagePoolEntry->ppsPageArray, -+ pgprot, PVRSRV_ZERO_VALUE); -+ if (eError != PVRSRV_OK) -+ { -+ goto eExit; -+ } -+#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ -+ -+ /* Lock down pool and add item */ -+ _PagePoolLock(); -+ -+ /* Pool counters were already updated so don't do it here again*/ -+ -+ /* The pages are all zeroed so return them to the pool. */ -+ list_add_tail(&psPagePoolEntry->sPagePoolItem, psPoolHead); -+ -+ _DumpPoolStructure(); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ /* Calling PVRSRVStatsIncrMemAllocPoolStat and PVRSRVStatsDecrMemAllocPoolStat -+ * inside page pool lock ensures that the stat reflects the state of the pool. */ -+ PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * psPagePoolEntry->uiItemsRemaining); -+#endif -+ -+ _PagePoolUnlock(); -+ -+ OSFreeMem(pvData); -+ OSAtomicDecrement(&g_iPoolCleanTasks); -+ -+ return PVRSRV_OK; -+ -+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) -+eExit: -+ /* we failed to zero the pages so return the error so we can -+ * retry during the next spin */ -+ if ((psCleanupData->sCleanupWork.ui32RetryCount - 1) > 0) -+ { -+ return eError; -+ } -+ -+ /* this was the last retry, give up and free pages to OS */ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Deferred task error, freeing pages to OS.", -+ __func__)); -+ _PagePoolLock(); -+ -+ *puiCounter -= psPagePoolEntry->uiItemsRemaining; -+ -+ _PagePoolUnlock(); -+ -+ for (i = 0; i < psCleanupData->psPoolEntry->uiItemsRemaining; i++) -+ { -+ _FreeOSPage(0, IMG_TRUE, psPagePoolEntry->ppsPageArray[i]); -+ } -+ OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); -+ kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); -+ OSFreeMem(psCleanupData); -+ -+ OSAtomicDecrement(&g_iPoolCleanTasks); -+ -+ return PVRSRV_OK; -+#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ -+} -+ -+ -+/* Put page array to the page pool. -+ * Handles locking and checks whether the pages are -+ * suitable to be stored in the pool. */ -+static inline IMG_BOOL -+_PutPagesToPoolLocked(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32CPUCacheFlags, -+ struct page **ppsPageArray, -+ IMG_UINT32 uiOrder, -+ IMG_UINT32 uiNumPages) -+{ -+ LinuxCleanupData *psCleanupData; -+ PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn; -+#if defined(SUPPORT_PHYSMEM_TEST) -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+#endif -+ -+ if (uiOrder == 0 && -+ !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags)) -+ { -+ IMG_UINT32 uiEntries; -+ IMG_UINT32 *puiCounter; -+ struct list_head *psPoolHead; -+ -+ -+ _PagePoolLock(); -+ -+ uiEntries = _PagesInPoolUnlocked(); -+ -+ /* Check for number of current page pool entries and whether -+ * we have other asynchronous tasks in-flight */ -+ if ( (uiEntries < g_ui32PagePoolMaxEntries) && -+ ((uiEntries + uiNumPages) < -+ (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) )) -+ { -+ if (OSAtomicIncrement(&g_iPoolCleanTasks) <= -+ PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS) -+ { -+#if defined(SUPPORT_PHYSMEM_TEST) -+ if (!psPVRSRVData->hCleanupThread) -+ { -+ goto eDecrement; -+ } -+#endif -+ -+ psCleanupData = OSAllocMem(sizeof(*psCleanupData)); -+ -+ if (!psCleanupData) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to get memory for deferred page pool cleanup. " -+ "Trying to free pages immediately", -+ __func__)); -+ goto eDecrement; -+ } -+ -+ psCleanupThreadFn = &psCleanupData->sCleanupWork; -+ psCleanupData->ui32CPUCacheMode = ui32CPUCacheFlags; -+ psCleanupData->psPoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL); -+ -+ if (!psCleanupData->psPoolEntry) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to get memory for deferred page pool cleanup. " -+ "Trying to free pages immediately", -+ __func__)); -+ goto eFreeCleanupData; -+ } -+ -+ if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to get correct page pool", -+ __func__)); -+ goto eFreePoolEntry; -+ } -+ -+ /* Increase counter here to avoid deferred cleanup tasks piling up */ -+ *puiCounter = *puiCounter + uiNumPages; -+ -+ psCleanupData->psPoolEntry->ppsPageArray = ppsPageArray; -+ psCleanupData->psPoolEntry->uiItemsRemaining = uiNumPages; -+ -+ psCleanupThreadFn->pfnFree = _CleanupThread_CleanPages; -+ psCleanupThreadFn->pvData = psCleanupData; -+ psCleanupThreadFn->bDependsOnHW = IMG_FALSE; -+ psCleanupThreadFn->eCleanupType = PVRSRV_CLEANUP_TYPE_OSMEM; -+ CLEANUP_THREAD_SET_RETRY_COUNT(psCleanupThreadFn, -+ CLEANUP_THREAD_RETRY_COUNT_DEFAULT); -+ -+ /* We must not hold the pool lock when calling AddWork because it might call us back to -+ * free pooled pages directly when unloading the driver */ -+ _PagePoolUnlock(); -+ -+ PVRSRVCleanupThreadAddWork(psDevNode, psCleanupThreadFn); -+ } -+ else -+ { -+ goto eDecrement; -+ } -+ } -+ else -+ { -+ goto eUnlock; -+ } -+ } -+ else -+ { -+ goto eExitFalse; -+ } -+ -+ return IMG_TRUE; -+ -+eFreePoolEntry: -+ OSFreeMem(psCleanupData->psPoolEntry); -+eFreeCleanupData: -+ OSFreeMem(psCleanupData); -+eDecrement: -+ OSAtomicDecrement(&g_iPoolCleanTasks); -+eUnlock: -+ _PagePoolUnlock(); -+eExitFalse: -+ return IMG_FALSE; -+} -+ -+/* Get the GFP flags that we pass to the page allocator */ -+static inline gfp_t -+_GetGFPFlags(IMG_BOOL bZero, -+ PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ struct device *psDev = psDevNode->psDevConfig->pvOSDevice; -+ gfp_t gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC; -+ -+#if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) -+ /* Force use of HIGHMEM */ -+ gfp_flags |= __GFP_HIGHMEM; -+ -+ PVR_UNREFERENCED_PARAMETER(psDev); -+#else -+ if (psDev) -+ { -+#if defined(CONFIG_64BIT) || defined(CONFIG_ARM_LPAE) || defined(CONFIG_X86_PAE) -+ if (*psDev->dma_mask > DMA_BIT_MASK(32)) -+ { -+ /* If our system is able to handle large addresses use highmem */ -+ gfp_flags |= __GFP_HIGHMEM; -+ } -+ else if (*psDev->dma_mask == DMA_BIT_MASK(32)) -+ { -+ /* Limit to 32 bit. -+ * Achieved by setting __GFP_DMA32 for 64 bit systems */ -+ gfp_flags |= __GFP_DMA32; -+ } -+ else -+ { -+ /* Limit to size of DMA zone. */ -+ gfp_flags |= __GFP_DMA; -+ } -+#else -+ if (*psDev->dma_mask < DMA_BIT_MASK(32)) -+ { -+ gfp_flags |= __GFP_DMA; -+ } -+ else -+ { -+ gfp_flags |= __GFP_HIGHMEM; -+ } -+#endif /* if defined(CONFIG_64BIT) || defined(CONFIG_ARM_LPAE) || defined(CONFIG_X86_PAE) */ -+ } -+ -+#endif /* if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) */ -+ -+ if (bZero) -+ { -+ gfp_flags |= __GFP_ZERO; -+ } -+ -+ return gfp_flags; -+} -+ -+/* -+ * @Function _PoisonDevicePage -+ * -+ * @Description Poisons a device page. In normal case the device page has the -+ * same size as the OS page and so the ui32DevPageOrder will be -+ * equal to 0 and page argument will point to one OS page -+ * structure. In case of Non4K pages the order will be greater -+ * than 0 and page argument will point to an array of OS -+ * allocated pages. -+ * -+ * @Input psDevNode pointer to the device object -+ * @Input page array of the pages allocated by from the OS -+ * @Input ui32DevPageOrder order of the page (same as the one used to allocate -+ * the page array by alloc_pages()) -+ * @Input ui32CPUCacheFlags CPU cache flags applied to the page -+ * @Input ui8PoisonValue value used to poison the page -+ */ -+static void -+_PoisonDevicePage(PVRSRV_DEVICE_NODE *psDevNode, -+ struct page *page, -+ IMG_UINT32 ui32DevPageOrder, -+ IMG_UINT32 ui32CPUCacheFlags, -+ IMG_BYTE ui8PoisonValue) -+{ -+ IMG_UINT32 ui32OsPageIdx; -+ -+ for (ui32OsPageIdx = 0; -+ ui32OsPageIdx < (1U << ui32DevPageOrder); -+ ui32OsPageIdx++) -+ { -+ struct page *current_page = page + ui32OsPageIdx; -+ IMG_CPU_PHYADDR sCPUPhysAddrStart = {page_to_phys(current_page)}; -+ IMG_CPU_PHYADDR sCPUPhysAddrEnd = {sCPUPhysAddrStart.uiAddr + PAGE_SIZE}; -+ -+ void *kvaddr = kmap_atomic(current_page); -+ -+ /* kmap_atomic maps pages as cached so it's safe to use OSCachedMemSet -+ * here (also pages are always 8 bytes aligned anyway) */ -+ OSCachedMemSet(kvaddr, ui8PoisonValue, PAGE_SIZE); -+ -+ OSCPUCacheFlushRangeKM(psDevNode, kvaddr, kvaddr + PAGE_SIZE, -+ sCPUPhysAddrStart, sCPUPhysAddrEnd); -+ -+ kunmap_atomic(kvaddr); -+ } -+} -+ -+/* Allocate and initialise the structure to hold the metadata of the allocation */ -+static PVRSRV_ERROR -+_AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode, -+ PMR_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 uiLog2AllocPageSize, -+ IMG_UINT32 ui32AllocFlags, -+ IMG_UINT32 ui32CPUCacheFlags, -+ IMG_PID uiPid, -+ PMR_OSPAGEARRAY_DATA **ppsPageArrayDataPtr) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiNumOSPageSizeVirtPages; -+ IMG_UINT32 uiNumDevPageSizeVirtPages; -+ PMR_OSPAGEARRAY_DATA *psPageArrayData; -+ IMG_UINT64 ui64DmaMask = 0; -+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); -+ -+ /* Use of cast below is justified by the assertion that follows to -+ * prove that no significant bits have been truncated */ -+ uiNumOSPageSizeVirtPages = (IMG_UINT32) (((uiSize - 1) >> PAGE_SHIFT) + 1); -+ PVR_ASSERT(((PMR_SIZE_T) uiNumOSPageSizeVirtPages << PAGE_SHIFT) == uiSize); -+ -+ uiNumDevPageSizeVirtPages = uiNumOSPageSizeVirtPages >> (uiLog2AllocPageSize - PAGE_SHIFT); -+ -+ /* Allocate the struct to hold the metadata */ -+ psPageArrayData = kmem_cache_alloc(g_psLinuxPageArray, GFP_KERNEL); -+ if (psPageArrayData == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: OS refused the memory allocation for the private data.", -+ __func__)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e_freed_none; -+ } -+ -+ /* -+ * Allocate the page array -+ * -+ * We avoid tracking this memory because this structure might go into the page pool. -+ * The OS can drain the pool asynchronously and when doing that we have to avoid -+ * any potential deadlocks. -+ * -+ * In one scenario the process stats vmalloc hash table lock is held and then -+ * the oom-killer softirq is trying to call _ScanObjectsInPagePool(), it must not -+ * try to acquire the vmalloc hash table lock again. -+ */ -+ psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumDevPageSizeVirtPages); -+ if (psPageArrayData->pagearray == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e_free_kmem_cache; -+ } -+ else -+ { -+ if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ /* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */ -+ psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumDevPageSizeVirtPages); -+ if (psPageArrayData->dmavirtarray == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e_free_pagearray; -+ } -+ -+ psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumDevPageSizeVirtPages); -+ if (psPageArrayData->dmaphysarray == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e_free_cpuvirtaddrarray; -+ } -+ } -+ else -+ { -+ psPageArrayData->dmavirtarray = NULL; -+ psPageArrayData->dmaphysarray = NULL; -+ } -+ } -+ -+ if (psDevNode->psDevConfig && psDevNode->psDevConfig->pvOSDevice) -+ { -+ struct device *psDev = psDevNode->psDevConfig->pvOSDevice; -+ ui64DmaMask = *psDev->dma_mask; -+ } -+ -+ /* Init metadata */ -+ psPageArrayData->psDevNode = psDevNode; -+ psPageArrayData->uiPid = uiPid; -+ psPageArrayData->iNumOSPagesAllocated = 0; -+ psPageArrayData->uiTotalNumOSPages = uiNumOSPageSizeVirtPages; -+ psPageArrayData->uiLog2AllocPageSize = uiLog2AllocPageSize; -+ psPageArrayData->ui64DmaMask = ui64DmaMask; -+ psPageArrayData->ui32AllocFlags = ui32AllocFlags; -+ psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags; -+ psPageArrayData->ui32CMAAdjustedPageCount = 0; -+ -+ *ppsPageArrayDataPtr = psPageArrayData; -+ return PVRSRV_OK; -+ -+/* Error path */ -+e_free_cpuvirtaddrarray: -+ OSFreeMemNoStats(psPageArrayData->dmavirtarray); -+ -+e_free_pagearray: -+ OSFreeMemNoStats(psPageArrayData->pagearray); -+ -+e_free_kmem_cache: -+ kmem_cache_free(g_psLinuxPageArray, psPageArrayData); -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: OS refused the memory allocation for the page pointer table. " -+ "Did you ask for too much?", -+ __func__)); -+ -+e_freed_none: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+static inline void -+_ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode, -+ struct page **ppsPage, -+ IMG_UINT32 uiNumPages) -+{ -+ void * pvAddr; -+ -+ if (OSCPUCacheOpAddressType(psDevNode) == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) -+ { -+ pgprot_t pgprot = PAGE_KERNEL; -+ -+ IMG_UINT32 uiNumToClean = uiNumPages; -+ struct page **ppsCleanArray = ppsPage; -+ -+ /* Map and flush page. -+ * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE -+ * at a time. */ -+ while (uiNumToClean != 0) -+ { -+ IMG_UINT32 uiToClean = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES, -+ uiNumToClean); -+ IMG_CPU_PHYADDR sUnused = -+ { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) }; -+ -+ pvAddr = pvr_vmap(ppsCleanArray, uiToClean, VM_MAP, pgprot); -+ if (!pvAddr) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Unable to flush page cache for new allocation, skipping flush.")); -+ return; -+ } -+ -+ CacheOpExec(psDevNode, -+ pvAddr, -+ pvAddr + PAGE_SIZE, -+ sUnused, -+ sUnused, -+ PVRSRV_CACHE_OP_FLUSH); -+ -+ pvr_vunmap(pvAddr, uiToClean, pgprot); -+ ppsCleanArray = &(ppsCleanArray[uiToClean]); -+ uiNumToClean -= uiToClean; -+ } -+ } -+ else -+ { -+ IMG_UINT32 ui32Idx; -+ -+ for (ui32Idx = 0; ui32Idx < uiNumPages; ++ui32Idx) -+ { -+ IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd; -+ -+ pvAddr = kmap(ppsPage[ui32Idx]); -+ sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]); -+ sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE; -+ -+ /* If we're zeroing, we need to make sure the cleared memory is pushed out -+ * of the cache before the cache lines are invalidated */ -+ CacheOpExec(psDevNode, -+ pvAddr, -+ pvAddr + PAGE_SIZE, -+ sCPUPhysAddrStart, -+ sCPUPhysAddrEnd, -+ PVRSRV_CACHE_OP_FLUSH); -+ -+ kunmap(ppsPage[ui32Idx]); -+ } -+ } -+} -+ -+/* Change the caching attribute of pages on x86 systems and takes care of -+ * cache maintenance. This function is supposed to be called once for pages that -+ * came from alloc_pages(). It expects an array of OS page sized pages! -+ * -+ * Flush/Invalidate pages in case the allocation is not cached. Necessary to -+ * remove pages from the cache that might be flushed later and corrupt memory. */ -+static inline PVRSRV_ERROR -+_ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode, -+ struct page **ppsPage, -+ IMG_UINT32 uiNumPages, -+ IMG_BOOL bFlush, -+ IMG_UINT32 ui32CPUCacheFlags) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_BOOL bCPUCached = PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags); -+ IMG_BOOL bCPUUncached = PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags); -+ IMG_BOOL bCPUWriteCombine = PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags); -+ -+ if (ppsPage != NULL && uiNumPages != 0) -+ { -+#if defined(CONFIG_X86) -+ /* On x86 we have to set page cache attributes for non-cached pages. -+ * The call is implicitly taking care of all flushing/invalidating -+ * and therefore we can skip the usual cache maintenance after this. */ -+ if (bCPUUncached || bCPUWriteCombine) -+ { -+ /* On x86 if we already have a mapping (e.g. low memory) we need to change the mode of -+ current mapping before we map it ourselves */ -+ int ret = IMG_FALSE; -+ -+ switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags)) -+ { -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: -+ ret = set_pages_array_uc(ppsPage, uiNumPages); -+ if (ret) -+ { -+ eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE; -+ PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to UC failed, returned %d", ret)); -+ } -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: -+ ret = set_pages_array_wc(ppsPage, uiNumPages); -+ if (ret) -+ { -+ eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE; -+ PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to WC failed, returned %d", ret)); -+ } -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED: -+ break; -+ -+ default: -+ break; -+ } -+ } -+ else -+#endif -+ { -+ if ( bFlush || -+ bCPUUncached || bCPUWriteCombine || -+ (bCPUCached && PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags)) ) -+ { -+ /* We can be given pages which still remain in the cache. -+ In order to make sure that the data we write through our mappings -+ doesn't get overwritten by later cache evictions we invalidate the -+ pages that are given to us. -+ -+ Note: -+ This still seems to be true if we request cold pages, it's just less -+ likely to be in the cache. */ -+ _ApplyCacheMaintenance(psDevNode, -+ ppsPage, -+ uiNumPages); -+ } -+ } -+ } -+ -+ return eError; -+} -+ -+/* Same as _AllocOSPage except it uses DMA framework to perform allocation. -+ * uiPageIndex is expected to be the pagearray index where to store the higher order page. */ -+static PVRSRV_ERROR -+_AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData, -+ gfp_t gfp_flags, -+ IMG_UINT32 ui32AllocOrder, -+ IMG_UINT32 ui32MinOrder, -+ IMG_UINT32 uiPageIndex) -+{ -+ void *virt_addr; -+ struct page *page; -+ dma_addr_t bus_addr; -+ IMG_UINT32 uiAllocIsMisaligned; -+ size_t alloc_size = PAGE_SIZE << ui32AllocOrder; -+ struct device *dev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice; -+ PVR_ASSERT(ui32AllocOrder == ui32MinOrder); -+ -+ do -+ { -+ DisableOOMKiller(); -+#if defined(PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC) -+ virt_addr = NULL; -+#else -+ virt_addr = dma_alloc_coherent(dev, alloc_size, &bus_addr, gfp_flags); -+#endif -+ if (virt_addr == NULL) -+ { -+ /* The idea here is primarily to support some older kernels with -+ broken or non-functioning DMA/CMA implementations (< Linux-3.4) -+ and to also handle DMA/CMA allocation failures by attempting a -+ normal page allocation though we expect dma_alloc_coherent() -+ already attempts this internally also before failing but -+ nonetheless it does no harm to retry the allocation ourselves */ -+ page = alloc_pages(gfp_flags, ui32AllocOrder); -+ if (page) -+ { -+ /* Taint bus_addr as alloc_page, needed when freeing; -+ also acquire the low memory page address only, this -+ prevents mapping possible high memory pages into -+ kernel virtual address space which might exhaust -+ the VMALLOC address space */ -+ bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page)); -+ virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE; -+ } -+ else -+ { -+ EnableOOMKiller(); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ } -+ else -+ { -+#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) -+ page = pfn_to_page(bus_addr >> PAGE_SHIFT); -+#else -+ /* Assumes bus address space is identical to physical address space */ -+ page = phys_to_page(bus_addr); -+#endif -+ } -+ EnableOOMKiller(); -+ -+ /* Physical allocation alignment works/hidden behind the scene transparently, -+ we do this here if the allocated buffer address does not meet its alignment -+ requirement by over-allocating using the next power-2 order and reporting -+ aligned-adjusted values back to meet the requested alignment constraint. -+ Evidently we waste memory by doing this so should only do so if we do not -+ initially meet the alignment constraint. */ -+ uiAllocIsMisaligned = DMA_GET_ADDR(bus_addr) & ((PAGE_SIZE< ui32MinOrder) -+ { -+ IMG_BOOL bUsedAllocPages = DMA_IS_ALLOCPG_ADDR(bus_addr); -+ if (ui32AllocOrder == ui32MinOrder) -+ { -+ if (bUsedAllocPages) -+ { -+ __free_pages(page, ui32AllocOrder); -+ } -+ else -+ { -+ dma_free_coherent(dev, alloc_size, virt_addr, bus_addr); -+ } -+ -+ ui32AllocOrder = ui32AllocOrder + 1; -+ alloc_size = PAGE_SIZE << ui32AllocOrder; -+ -+ PVR_ASSERT(uiAllocIsMisaligned != 0); -+ } -+ else -+ { -+ size_t align_adjust = PAGE_SIZE << ui32MinOrder; -+ -+ /* Adjust virtual/bus addresses to meet alignment */ -+ bus_addr = bUsedAllocPages ? page_to_phys(page) : bus_addr; -+ align_adjust = PVR_ALIGN((size_t)bus_addr, align_adjust); -+ align_adjust -= (size_t)bus_addr; -+ -+ if (align_adjust) -+ { -+ if (bUsedAllocPages) -+ { -+ page += align_adjust >> PAGE_SHIFT; -+ bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page)); -+ virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE; -+ } -+ else -+ { -+ bus_addr += align_adjust; -+ virt_addr += align_adjust; -+#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) -+ page = pfn_to_page(bus_addr >> PAGE_SHIFT); -+#else -+ /* Assumes bus address space is identical to physical address space */ -+ page = phys_to_page(bus_addr); -+#endif -+ } -+ -+ /* Store adjustments in PAGE_SIZE counts */ -+ align_adjust = align_adjust >> PAGE_SHIFT; -+ bus_addr = DMA_SET_ALIGN_ADJUSTMENT(bus_addr, align_adjust); -+ } -+ -+ /* Taint bus_addr due to over-allocation, allows us to free -+ * memory correctly */ -+ bus_addr = DMA_SET_ADJUSTED_ADDR(bus_addr); -+ uiAllocIsMisaligned = 0; -+ } -+ } -+ } while (uiAllocIsMisaligned); -+ -+ /* Convert OSPageSize-based index into DevicePageSize-based index */ -+ psPageArrayData->ui32CMAAdjustedPageCount += (alloc_size - (PAGE_SIZE << ui32AllocOrder )); -+ -+ psPageArrayData->dmavirtarray[uiPageIndex] = virt_addr; -+ psPageArrayData->dmaphysarray[uiPageIndex] = bus_addr; -+ psPageArrayData->pagearray[uiPageIndex] = page; -+ -+ return PVRSRV_OK; -+} -+ -+/* Allocate a page of order uiAllocOrder and stores it in the page array ppsPage at -+ * position uiPageIndex. -+ * -+ * If the order is higher than 0, it splits the page into multiples and -+ * stores them at position uiPageIndex to uiPageIndex+(1<= KERNEL_VERSION(3,10,0)) -+ /* In case we need to, split the higher order page; -+ this should only be used for order-0 allocations -+ as higher order allocations should use DMA/CMA */ -+ if (uiAllocOrder != 0) -+ { -+ split_page(psPage, uiAllocOrder); -+ } -+#endif -+ -+ /* Store the page (or multiple split pages) in the page array */ -+ for (ui32Count = 0; ui32Count < (1 << uiAllocOrder); ui32Count++) -+ { -+ psPageArrayData->pagearray[uiPageIndex + ui32Count] = &(psPage[ui32Count]); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ -+static inline void _AddMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, -+ const struct page *psPage) -+{ -+ IMG_CPU_PHYADDR sCPUPhysAddr = { page_to_phys(psPage) }; -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, -+ NULL, sCPUPhysAddr, -+ 1 << psPageArrayData->uiLog2AllocPageSize, -+ psPageArrayData->uiPid -+ DEBUG_MEMSTATS_VALUES); -+} -+ -+static inline void _RemoveMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, -+ const struct page *psPage) -+{ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ IMG_UINT32 uiStat = BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_ZOMBIE) -+ ? PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES -+ : PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES; -+#else -+ IMG_UINT32 uiStat = PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES; -+#endif -+ -+ PVRSRVStatsRemoveMemAllocRecord(uiStat, (IMG_UINT64) page_to_phys(psPage), -+ psPageArrayData->uiPid); -+} -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+static inline void _AddMemZombieRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, -+ const struct page *psPage) -+{ -+ IMG_CPU_PHYADDR sCPUPhysAddr = { page_to_phys(psPage) }; -+ -+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, -+ (IMG_UINT64) sCPUPhysAddr.uiAddr, -+ psPageArrayData->uiPid); -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES, -+ NULL, sCPUPhysAddr, -+ 1 << psPageArrayData->uiLog2AllocPageSize, -+ psPageArrayData->uiPid -+ DEBUG_MEMSTATS_VALUES); -+} -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+ -+static inline void _IncrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid) -+{ -+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, -+ uiSize, uiPid); -+} -+ -+static inline void _DecrMemAllocStat_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, -+ size_t uiSize, IMG_PID uiPid) -+{ -+ IMG_UINT32 uiStat = PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES; -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_ZOMBIE)) -+ { -+ uiStat = PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES; -+ } -+#endif -+ -+ PVRSRVStatsDecrMemAllocStat(uiStat, uiSize, uiPid); -+} -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+static inline void _ZombifyMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid) -+{ -+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, -+ uiSize, uiPid); -+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES, -+ uiSize, uiPid); -+} -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ -+ -+/* Allocation of OS pages: We may allocate 2^N order pages at a time for two reasons. -+ * -+ * Firstly to support device pages which are larger than OS. By asking the OS for 2^N -+ * order OS pages at a time we guarantee the device page is contiguous. -+ * -+ * Secondly for performance where we may ask for 2^N order pages to reduce the number -+ * of calls to alloc_pages, and thus reduce time for huge allocations. -+ * -+ * Regardless of page order requested, we need to break them down to track _OS pages. -+ * The maximum order requested is increased if all max order allocations were successful. -+ * If any request fails we reduce the max order. -+ */ -+static PVRSRV_ERROR -+_AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiArrayIndex = 0; -+ IMG_UINT32 ui32Order; -+ IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; -+ IMG_BOOL bIncreaseMaxOrder = IMG_TRUE; -+ -+ IMG_UINT32 ui32NumPageReq; -+ IMG_UINT32 uiOSPagesToAlloc; -+ IMG_UINT32 uiDevPagesFromPool = 0; -+ -+ gfp_t gfp_flags = _GetGFPFlags(ui32MinOrder ? _ShouldInitMem(psPageArrayData->ui32AllocFlags) : IMG_FALSE, /* Zero all pages later as batch */ -+ psPageArrayData->psDevNode); -+ gfp_t ui32GfpFlags; -+ gfp_t ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY); -+ -+ struct page **ppsPageArray = psPageArrayData->pagearray; -+ struct page **ppsPageAttributeArray = NULL; -+ -+ uiOSPagesToAlloc = psPageArrayData->uiTotalNumOSPages; -+ -+ /* Try to get pages from the pool since it is faster; -+ the page pool currently only supports zero-order pages -+ thus currently excludes all DMA/CMA allocated memory. -+ _ShouldInitMem() must not be used for bZero argument since it only -+ applies to new pages allocated from the kernel. */ -+ _GetPagesFromPoolLocked(psPageArrayData->psDevNode, -+ psPageArrayData->ui32CPUCacheFlags, -+ uiOSPagesToAlloc, -+ ui32MinOrder, -+ BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO), -+ ppsPageArray, -+ &uiDevPagesFromPool); -+ -+ uiArrayIndex = uiDevPagesFromPool; -+ -+ if ((uiOSPagesToAlloc - uiDevPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD) -+ { /* Small allocations: ask for one device page at a time */ -+ ui32Order = ui32MinOrder; -+ bIncreaseMaxOrder = IMG_FALSE; -+ } -+ else -+ { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) -+ /* Large zero-order or none zero-order allocations, ask for -+ MAX(max-order, min-order) order pages at a time; alloc -+ failures throttles this down to ZeroOrder allocations */ -+ ui32Order = MAX(g_uiMaxOrder, ui32MinOrder); -+#else -+ /* Because split_page() is not available on older kernels -+ we cannot mix-and-match any-order pages in the PMR; -+ only same-order pages must be present in page array. -+ So we unconditionally force it to use ui32MinOrder on -+ these older kernels */ -+ ui32Order = ui32MinOrder; -+#if defined(DEBUG) -+ if (! BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ /* Check that this is zero */ -+ PVR_ASSERT(! ui32Order); -+ } -+#endif -+#endif -+ } -+ -+ /* Only if asking for more contiguity than we actually need, let it fail */ -+ ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; -+ ui32NumPageReq = (1 << ui32Order); -+ -+ while (uiArrayIndex < uiOSPagesToAlloc) -+ { -+ IMG_UINT32 ui32PageRemain = uiOSPagesToAlloc - uiArrayIndex; -+ -+ while (ui32NumPageReq > ui32PageRemain) -+ { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) -+ /* Pages to request is larger than that remaining -+ so ask for less so never over allocate */ -+ ui32Order = MAX(ui32Order >> 1, ui32MinOrder); -+#else -+ /* Pages to request is larger than that remaining so -+ do nothing thus over allocate as we do not support -+ mix/match of any-order pages in PMR page-array in -+ older kernels (simplifies page free logic) */ -+ PVR_ASSERT(ui32Order == ui32MinOrder); -+#endif -+ ui32NumPageReq = (1 << ui32Order); -+ ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; -+ } -+ -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ /* As the DMA/CMA framework rounds-up request to the -+ next power-of-two, we request multiple uiMinOrder -+ pages to satisfy allocation request in order to -+ minimise wasting memory */ -+ eError = _AllocOSPage_CMA(psPageArrayData, -+ ui32GfpFlags, -+ ui32Order, -+ ui32MinOrder, -+ uiArrayIndex >> ui32MinOrder); -+ } -+ else -+ { -+ /* Allocate uiOrder pages at uiArrayIndex */ -+ eError = _AllocOSPage(psPageArrayData, -+ ui32GfpFlags, -+ ui32Order, -+ ui32MinOrder, -+ uiArrayIndex); -+ } -+ -+ if (eError == PVRSRV_OK) -+ { -+ /* Successful request. Move onto next. */ -+ uiArrayIndex += ui32NumPageReq; -+ } -+ else -+ { -+ if (ui32Order > ui32MinOrder) -+ { -+ /* Last request failed. Let's ask for less next time */ -+ ui32Order = MAX(ui32Order >> 1, ui32MinOrder); -+ bIncreaseMaxOrder = IMG_FALSE; -+ ui32NumPageReq = (1 << ui32Order); -+ ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; -+ g_uiMaxOrder = ui32Order; -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) -+ /* We should not trigger this code path in older kernels, -+ this is enforced by ensuring ui32Order == ui32MinOrder */ -+ PVR_ASSERT(ui32Order == ui32MinOrder); -+#endif -+ } -+ else -+ { -+ /* Failed to alloc pages at required contiguity. Failed allocation */ -+ PVR_DPF((PVR_DBG_ERROR, "%s: %s failed to honour request at %u of %u, flags = %x, order = %u (%s)", -+ __func__, -+ BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA) ? "dma_alloc_coherent" : "alloc_pages", -+ uiArrayIndex, -+ uiOSPagesToAlloc, -+ ui32GfpFlags, -+ ui32Order, -+ PVRSRVGetErrorString(eError))); -+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; -+ goto e_free_pages; -+ } -+ } -+ } -+ -+ if (bIncreaseMaxOrder && (g_uiMaxOrder < PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM)) -+ { /* All successful allocations on max order. Let's ask for more next time */ -+ g_uiMaxOrder++; -+ } -+ -+ /* Construct table of page pointers to apply attributes */ -+ ppsPageAttributeArray = &ppsPageArray[uiDevPagesFromPool]; -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ IMG_UINT32 uiIdx, uiIdy, uiIdz; -+ -+ ppsPageAttributeArray = OSAllocMem(sizeof(struct page *) * uiOSPagesToAlloc); -+ PVR_LOG_GOTO_IF_NOMEM(ppsPageAttributeArray, eError, e_free_pages); -+ -+ for (uiIdx = 0; uiIdx < uiOSPagesToAlloc; uiIdx += ui32NumPageReq) -+ { -+ uiIdy = uiIdx >> ui32Order; -+ for (uiIdz = 0; uiIdz < ui32NumPageReq; uiIdz++) -+ { -+ ppsPageAttributeArray[uiIdx+uiIdz] = ppsPageArray[uiIdy]; -+ ppsPageAttributeArray[uiIdx+uiIdz] += uiIdz; -+ } -+ } -+ } -+ -+ if (_ShouldInitMem(psPageArrayData->ui32AllocFlags) && ui32MinOrder == 0) -+ { -+ eError = _MemsetPageArray(uiOSPagesToAlloc - uiDevPagesFromPool, -+ ppsPageAttributeArray, PAGE_KERNEL, -+ PVRSRV_ZERO_VALUE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (fast)")); -+ goto e_free_pages; -+ } -+ } -+ else if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_ALLOC)) -+ { -+ /* need to call twice because ppsPageArray and ppsPageAttributeArray -+ * can point to different allocations: first for pages obtained from -+ * the pool and then the remaining pages */ -+ eError = _MemsetPageArray(uiDevPagesFromPool, ppsPageArray, PAGE_KERNEL, -+ PVRSRV_POISON_ON_ALLOC_VALUE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to poison pages (fast)")); -+ } -+ eError = _MemsetPageArray(uiOSPagesToAlloc - uiDevPagesFromPool, -+ ppsPageAttributeArray, PAGE_KERNEL, -+ PVRSRV_POISON_ON_ALLOC_VALUE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to poison pages (fast)")); -+ } -+ -+ /* for poisoning need to also flush the pool pages as the 0s have -+ * been overwritten */ -+ _ApplyCacheMaintenance(psPageArrayData->psDevNode, ppsPageArray, -+ uiDevPagesFromPool); -+ } -+ -+ /* Do the cache management as required */ -+ eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode, -+ ppsPageAttributeArray, -+ uiOSPagesToAlloc - uiDevPagesFromPool, -+ BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO) || -+ BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_ALLOC), -+ psPageArrayData->ui32CPUCacheFlags); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes")); -+ goto e_free_pages; -+ } -+ else -+ { -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ OSFreeMem(ppsPageAttributeArray); -+ } -+ } -+ -+ /* Update metadata */ -+ psPageArrayData->iNumOSPagesAllocated = psPageArrayData->uiTotalNumOSPages; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ { -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ IMG_UINT32 ui32NumPages = -+ psPageArrayData->iNumOSPagesAllocated >> ui32MinOrder; -+ IMG_UINT32 i; -+ -+ for (i = 0; i < ui32NumPages; i++) -+ { -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]); -+ } -+ else -+ { -+ _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i << ui32MinOrder]); -+ } -+ } -+#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+ _IncrMemAllocStat_UmaPages(((uiOSPagesToAlloc * PAGE_SIZE)+(psPageArrayData->ui32CMAAdjustedPageCount)), -+ psPageArrayData->uiPid); -+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+ } -+#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ -+ -+ return PVRSRV_OK; -+ -+/* Error path */ -+e_free_pages: -+ { -+ IMG_UINT32 ui32PageToFree; -+ -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ IMG_UINT32 uiDevArrayIndex = uiArrayIndex >> ui32Order; -+ IMG_UINT32 uiDevPageSize = PAGE_SIZE << ui32Order; -+ PVR_ASSERT(ui32Order == ui32MinOrder); -+ -+ if (ppsPageAttributeArray) -+ { -+ OSFreeMem(ppsPageAttributeArray); -+ } -+ -+ for (ui32PageToFree = 0; ui32PageToFree < uiDevArrayIndex; ui32PageToFree++) -+ { -+ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, -+ uiDevPageSize, -+ ui32MinOrder, -+ psPageArrayData->dmavirtarray[ui32PageToFree], -+ psPageArrayData->dmaphysarray[ui32PageToFree], -+ ppsPageArray[ui32PageToFree]); -+ psPageArrayData->dmaphysarray[ui32PageToFree]= (dma_addr_t)0; -+ psPageArrayData->dmavirtarray[ui32PageToFree] = NULL; -+ ppsPageArray[ui32PageToFree] = NULL; -+ } -+ } -+ else -+ { -+ /* Free the pages we got from the pool */ -+ for (ui32PageToFree = 0; ui32PageToFree < uiDevPagesFromPool; ui32PageToFree++) -+ { -+ _FreeOSPage(ui32MinOrder, -+ BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE), -+ ppsPageArray[ui32PageToFree]); -+ ppsPageArray[ui32PageToFree] = NULL; -+ } -+ -+ for (ui32PageToFree = uiDevPagesFromPool; ui32PageToFree < uiArrayIndex; ui32PageToFree++) -+ { -+ _FreeOSPage(ui32MinOrder, IMG_FALSE, ppsPageArray[ui32PageToFree]); -+ ppsPageArray[ui32PageToFree] = NULL; -+ } -+ } -+ -+ return eError; -+ } -+} -+ -+static INLINE PVRSRV_ERROR -+_CheckIfIndexInRange(IMG_UINT32 ui32Index, IMG_UINT32 *pui32Indices, IMG_UINT32 ui32Limit) -+{ -+ if (pui32Indices[ui32Index] >= ui32Limit) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Given alloc index %u at %u is larger than page array %u.", -+ __func__, pui32Indices[ui32Index], ui32Index, ui32Limit)); -+ return PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static INLINE PVRSRV_ERROR -+_CheckIfPageNotAllocated(IMG_UINT32 ui32Index, IMG_UINT32 *pui32Indices, struct page **ppsPageArray) -+{ -+ if (ppsPageArray[pui32Indices[ui32Index]] != NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Mapping number %u at page array index %u already exists. " -+ "Page struct %p", __func__, pui32Indices[ui32Index], ui32Index, -+ ppsPageArray[pui32Indices[ui32Index]])); -+ return PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/* Allocation of OS pages: This function is used for sparse allocations. -+ * -+ * Sparse allocations provide only a proportion of sparse physical backing within the total -+ * virtual range. */ -+static PVRSRV_ERROR -+_AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, -+ IMG_UINT32 *puiAllocIndices, -+ IMG_UINT32 uiDevPagesToAlloc) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ struct page **ppsPageArray = psPageArrayData->pagearray; -+ IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; -+ IMG_UINT32 uiDevPagesFromPool = 0; -+ IMG_UINT32 uiOSPagesToAlloc = uiDevPagesToAlloc * (1 << uiOrder); -+ IMG_UINT32 uiDevPagesAllocated = psPageArrayData->uiTotalNumOSPages >> uiOrder; -+ const IMG_UINT32 ui32AllocFlags = psPageArrayData->ui32AllocFlags; -+ gfp_t ui32GfpFlags = _GetGFPFlags(uiOrder ? _ShouldInitMem(ui32AllocFlags) : IMG_FALSE, /* Zero pages later as batch */ -+ psPageArrayData->psDevNode); -+ -+ /* We use this page array to receive pages from the pool and then reuse it afterwards to -+ * store pages that need their cache attribute changed on x86 */ -+ struct page **ppsTempPageArray; -+ IMG_UINT32 uiTempPageArrayIndex = 0; -+ -+ /* Allocate the temporary page array that we need here to receive pages -+ * from the pool and to store pages that need their caching attributes changed. -+ * Allocate number of OS pages to be able to use the attribute function later. */ -+ ppsTempPageArray = OSAllocMem(sizeof(struct page*) * uiOSPagesToAlloc); -+ PVR_LOG_GOTO_IF_NOMEM(ppsTempPageArray, eError, e_exit); -+ -+ /* Check the requested number of pages if they fit in the page array */ -+ if (uiDevPagesAllocated < -+ ((psPageArrayData->iNumOSPagesAllocated >> uiOrder) + uiDevPagesToAlloc)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Trying to allocate more pages (Order %u) than this buffer can handle, " -+ "Request + Allocated < Max! Request %u, Allocated %u, Max %u.", -+ __func__, -+ uiOrder, -+ uiDevPagesToAlloc, -+ psPageArrayData->iNumOSPagesAllocated >> uiOrder, -+ uiDevPagesAllocated)); -+ eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; -+ goto e_free_temp_array; -+ } -+ -+ /* Try to get pages from the pool since it is faster. The pages from pool are going to be -+ * allocated only if: -+ * - PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES == 1 && uiOrder == 0 -+ * - PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES == 0 && uiOrder == 0 && -+ * !(BIT_ISSET(ui32AllocFlags, FLAG_ZERO)) -+ * _ShouldInitMem() must not be used for bZero argument since it only -+ * applies to new pages allocated from the kernel. */ -+ _GetPagesFromPoolLocked(psPageArrayData->psDevNode, -+ psPageArrayData->ui32CPUCacheFlags, -+ uiDevPagesToAlloc, -+ uiOrder, -+ BIT_ISSET(ui32AllocFlags, FLAG_ZERO), -+ ppsTempPageArray, -+ &uiDevPagesFromPool); -+ -+ /* In general device pages can have higher order than 0 but page pool always provides only 0 -+ * order pages so they can be assigned to the OS pages values (in other words if we're -+ * allocating non-4k pages uiDevPagesFromPool will always be 0) */ -+ uiTempPageArrayIndex = uiDevPagesFromPool; -+ -+ /* Move pages we got from the pool to the array. */ -+ for (i = 0; i < uiDevPagesFromPool; i++) -+ { -+ eError = _CheckIfIndexInRange(i, puiAllocIndices, uiDevPagesAllocated); -+ PVR_GOTO_IF_ERROR(eError, e_free_pool_pages); -+ eError = _CheckIfPageNotAllocated(i, puiAllocIndices, ppsPageArray); -+ PVR_GOTO_IF_ERROR(eError, e_free_pool_pages); -+ -+ ppsPageArray[puiAllocIndices[i]] = ppsTempPageArray[i]; -+ } -+ -+ /* Allocate pages from the OS */ -+ for (i = uiDevPagesFromPool; i < uiDevPagesToAlloc; i++) -+ { -+ eError = _CheckIfIndexInRange(i, puiAllocIndices, uiDevPagesAllocated); -+ PVR_GOTO_IF_ERROR(eError, e_free_pages); -+ eError = _CheckIfPageNotAllocated(i, puiAllocIndices, ppsPageArray); -+ PVR_GOTO_IF_ERROR(eError, e_free_pages); -+ -+ /* Allocated pages and assign them the array. */ -+ if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ /* As the DMA/CMA framework rounds-up request to the -+ next power-of-two, we request multiple uiMinOrder -+ pages to satisfy allocation request in order to -+ minimise wasting memory */ -+ eError = _AllocOSPage_CMA(psPageArrayData, -+ ui32GfpFlags, -+ uiOrder, -+ uiOrder, -+ puiAllocIndices[i]); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to alloc CMA pages")); -+ goto e_free_pages; -+ } -+ } -+ else -+ { -+ DisableOOMKiller(); -+ ppsPageArray[puiAllocIndices[i]] = alloc_pages(ui32GfpFlags, uiOrder); -+ EnableOOMKiller(); -+ } -+ -+ if (ppsPageArray[puiAllocIndices[i]] != NULL) -+ { -+ /* Append pages to the temporary array so it's easier to process -+ * them later on. */ -+ -+ if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ IMG_UINT32 idx; -+ struct page *psPageAddr; -+ -+ psPageAddr = ppsPageArray[puiAllocIndices[i]]; -+ -+ /* "divide" CMA pages into OS pages if they have higher order */ -+ for (idx = 0; idx < (1 << uiOrder); idx++) -+ { -+ ppsTempPageArray[uiTempPageArrayIndex + idx] = psPageAddr; -+ psPageAddr++; -+ } -+ uiTempPageArrayIndex += (1 << uiOrder); -+ } -+ else -+ { -+ ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[puiAllocIndices[i]]; -+ uiTempPageArrayIndex++; -+ } -+ } -+ else -+ { -+ /* Failed to alloc pages at required contiguity. Failed allocation */ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u", -+ __func__, i, uiDevPagesToAlloc, ui32GfpFlags, uiOrder)); -+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; -+ goto e_free_pages; -+ } -+ } -+ -+ if (_ShouldInitMem(ui32AllocFlags) && uiOrder == 0) -+ { -+ /* At this point this array contains pages allocated from the page pool at its start -+ * and pages allocated from the OS after that. -+ * If there are pages from the pool here they must be zeroed already hence we don't have -+ * to do it again. This is because if PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES is enabled pool pages -+ * are zeroed in the cleanup thread. If it's disabled they aren't, and in that case we never -+ * allocate pages with FLAG_ZERO from the pool. This is why those pages need to be zeroed -+ * here. -+ * All of the above is true for the 0 order pages. For higher order we never allocated from -+ * the pool and those pages are allocated already zeroed from the OS. -+ * Long story short we can always skip pages allocated from the pool because they are either -+ * zeroed or we didn't allocate any of them. */ -+ eError = _MemsetPageArray(uiTempPageArrayIndex - uiDevPagesFromPool, -+ &ppsTempPageArray[uiDevPagesFromPool], -+ PAGE_KERNEL, PVRSRV_ZERO_VALUE); -+ PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to zero pages (sparse)", e_free_pages); -+ } -+ else if (BIT_ISSET(ui32AllocFlags, FLAG_POISON_ON_ALLOC)) -+ { -+ /* Here we need to poison all of the pages regardless if they were -+ * allocated from the pool or from the system. */ -+ eError = _MemsetPageArray(uiTempPageArrayIndex, ppsTempPageArray, -+ PAGE_KERNEL, PVRSRV_POISON_ON_ALLOC_VALUE); -+ PVR_LOG_IF_FALSE(eError == PVRSRV_OK, "failed to poison pages (sparse)"); -+ -+ /* We need to flush the cache for the poisoned pool pages here. The flush for the pages -+ * allocated from the system is done below because we also need to add appropriate cache -+ * attributes to them. Pages allocated from the pool already come with correct caching -+ * mode. */ -+ _ApplyCacheMaintenance(psPageArrayData->psDevNode, ppsTempPageArray, uiDevPagesFromPool); -+ } -+ -+ /* Do the cache management as required */ -+ eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode, -+ &ppsTempPageArray[uiDevPagesFromPool], -+ uiTempPageArrayIndex - uiDevPagesFromPool, -+ BIT_ISSET(ui32AllocFlags, FLAG_ZERO) || -+ BIT_ISSET(ui32AllocFlags, FLAG_POISON_ON_ALLOC), -+ psPageArrayData->ui32CPUCacheFlags); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes")); -+ goto e_free_pages; -+ } -+ -+ /* Update metadata */ -+ psPageArrayData->iNumOSPagesAllocated += uiOSPagesToAlloc; -+ -+ /* Free temporary page array */ -+ OSFreeMem(ppsTempPageArray); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ for (i = 0; i < uiDevPagesToAlloc; i++) -+ { -+ _AddMemAllocRecord_UmaPages(psPageArrayData, -+ ppsPageArray[puiAllocIndices[i]]); -+ } -+#else -+ _IncrMemAllocStat_UmaPages(((uiOSPagesToAlloc * PAGE_SIZE)+(psPageArrayData->ui32CMAAdjustedPageCount)), -+ psPageArrayData->uiPid); -+#endif -+#endif -+ -+ return PVRSRV_OK; -+ -+e_free_pages: -+ if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder; -+ -+ /* Free the pages we just allocated from the CMA */ -+ for (; i > uiDevPagesFromPool; i--) -+ { -+ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, -+ uiDevPageSize, -+ uiOrder, -+ psPageArrayData->dmavirtarray[puiAllocIndices[i-1]], -+ psPageArrayData->dmaphysarray[puiAllocIndices[i-1]], -+ ppsPageArray[puiAllocIndices[i-1]]); -+ psPageArrayData->dmaphysarray[puiAllocIndices[i-1]]= (dma_addr_t) 0; -+ psPageArrayData->dmavirtarray[puiAllocIndices[i-1]] = NULL; -+ ppsPageArray[puiAllocIndices[i-1]] = NULL; -+ } -+ } -+ else -+ { -+ /* Free the pages we just allocated from the OS */ -+ for (; i > uiDevPagesFromPool; i--) -+ { -+ _FreeOSPage(0, IMG_FALSE, ppsPageArray[puiAllocIndices[i-1]]); -+ ppsPageArray[puiAllocIndices[i-1]] = NULL; -+ } -+ } -+ -+e_free_pool_pages: -+ /* And now free all of the pages we allocated from the pool. */ -+ for (i = 0; i < uiDevPagesFromPool; i++) -+ { -+ _FreeOSPage(0, BIT_ISSET(ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE), -+ ppsTempPageArray[i]); -+ -+ /* not using _CheckIfIndexInRange() to not print error message */ -+ if (puiAllocIndices[i] < uiDevPagesAllocated) -+ { -+ ppsPageArray[puiAllocIndices[i]] = NULL; -+ } -+ } -+ -+e_free_temp_array: -+ OSFreeMem(ppsTempPageArray); -+ -+e_exit: -+ return eError; -+} -+ -+/* Allocate pages for a given page array. -+ * -+ * The executed allocation path depends whether an array with allocation -+ * indices has been passed or not */ -+static PVRSRV_ERROR -+_AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, -+ IMG_UINT32 *puiAllocIndices, -+ IMG_UINT32 uiPagesToAlloc) -+{ -+ PVRSRV_ERROR eError; -+ struct page **ppsPageArray; -+ -+ /* Parameter checks */ -+ PVR_ASSERT(NULL != psPageArrayData); -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ PVR_ASSERT(psPageArrayData->dmaphysarray != NULL); -+ PVR_ASSERT(psPageArrayData->dmavirtarray != NULL); -+ } -+ PVR_ASSERT(psPageArrayData->pagearray != NULL); -+ PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated); -+ -+ ppsPageArray = psPageArrayData->pagearray; -+ -+ /* Go the sparse alloc path if we have an array with alloc indices.*/ -+ if (puiAllocIndices != NULL) -+ { -+ eError = _AllocOSPages_Sparse(psPageArrayData, -+ puiAllocIndices, -+ uiPagesToAlloc); -+ } -+ else -+ { -+ eError = _AllocOSPages_Fast(psPageArrayData); -+ } -+ -+ if (eError != PVRSRV_OK) -+ { -+ goto e_exit; -+ } -+ -+ _DumpPageArray(ppsPageArray, -+ psPageArrayData->uiTotalNumOSPages >> -+ (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) ); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData)); -+ return PVRSRV_OK; -+ -+e_exit: -+ return eError; -+} -+ -+/* Same as _FreeOSPage except free memory using DMA framework */ -+static INLINE void -+_FreeOSPage_CMA(struct device *dev, -+ size_t alloc_size, -+ IMG_UINT32 uiOrder, -+ void *virt_addr, -+ dma_addr_t dev_addr, -+ struct page *psPage) -+{ -+ if (DMA_IS_ALLOCPG_ADDR(dev_addr)) -+ { -+#if defined(CONFIG_X86) -+ void *pvPageVAddr = page_address(psPage); -+ if (pvPageVAddr) -+ { -+ int ret = set_memory_wb((unsigned long)pvPageVAddr, 1); -+ if (ret) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to reset page attribute", -+ __func__)); -+ } -+ } -+#endif -+ -+ if (DMA_IS_ADDR_ADJUSTED(dev_addr)) -+ { -+ psPage -= DMA_GET_ALIGN_ADJUSTMENT(dev_addr); -+ uiOrder += 1; -+ } -+ -+ __free_pages(psPage, uiOrder); -+ } -+ else -+ { -+ if (DMA_IS_ADDR_ADJUSTED(dev_addr)) -+ { -+ size_t align_adjust; -+ -+ align_adjust = DMA_GET_ALIGN_ADJUSTMENT(dev_addr); -+ alloc_size = alloc_size << 1; -+ -+ dev_addr = DMA_GET_ADDR(dev_addr); -+ dev_addr -= align_adjust << PAGE_SHIFT; -+ virt_addr -= align_adjust << PAGE_SHIFT; -+ } -+ -+ dma_free_coherent(dev, alloc_size, virt_addr, DMA_GET_ADDR(dev_addr)); -+ } -+} -+ -+/* Free a single page back to the OS. -+ * Make sure the cache type is set back to the default value. -+ * -+ * Note: -+ * We must _only_ check bUnsetMemoryType in the case where we need to free -+ * the page back to the OS since we may have to revert the cache properties -+ * of the page to the default as given by the OS when it was allocated. */ -+static void -+_FreeOSPage(IMG_UINT32 uiOrder, -+ IMG_BOOL bUnsetMemoryType, -+ struct page *psPage) -+{ -+ -+#if defined(CONFIG_X86) -+ void *pvPageVAddr; -+ pvPageVAddr = page_address(psPage); -+ -+ if (pvPageVAddr && bUnsetMemoryType) -+ { -+ int ret; -+ -+ ret = set_memory_wb((unsigned long)pvPageVAddr, 1); -+ if (ret) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute", -+ __func__)); -+ } -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType); -+#endif -+ __free_pages(psPage, uiOrder); -+} -+ -+/* Free the struct holding the metadata */ -+static void -+_FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData) -+{ -+ PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData)); -+ -+ /* Check if the page array actually still exists. -+ * It might be the case that has been moved to the page pool */ -+ if (psPageArrayData->pagearray != NULL) -+ { -+ OSFreeMemNoStats(psPageArrayData->pagearray); -+ } -+ -+ /* Check if we need to free additional DMA/CMA cpu kernel virtual address & device bus address arrays */ -+ if (psPageArrayData->dmaphysarray != NULL) -+ { -+ OSFreeMemNoStats(psPageArrayData->dmaphysarray); -+ } -+ if (psPageArrayData->dmavirtarray != NULL) -+ { -+ OSFreeMemNoStats(psPageArrayData->dmavirtarray); -+ } -+ -+ kmem_cache_free(g_psLinuxPageArray, psPageArrayData); -+} -+ -+/* Free all or some pages from a sparse page array */ -+static PVRSRV_ERROR -+_FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_UINT32 ui32FreePageCount) -+{ -+ IMG_BOOL bSuccess; -+ IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; -+ IMG_UINT32 uiPageIndex, i, j, uiTempIdx = 0; -+ struct page **ppsPageArray = psPageArrayData->pagearray; -+ IMG_UINT32 uiNumPages; -+ -+ struct page **ppsTempPageArray; -+ IMG_UINT32 uiTempArraySize; -+ -+ /* We really should have something to free before we call this */ -+ PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0); -+ -+ if (pai32FreeIndices == NULL) -+ { -+ uiNumPages = psPageArrayData->uiTotalNumOSPages >> uiOrder; -+ uiTempArraySize = psPageArrayData->iNumOSPagesAllocated; -+ } -+ else -+ { -+ uiNumPages = ui32FreePageCount; -+ uiTempArraySize = ui32FreePageCount << uiOrder; -+ } -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) -+ for (i = 0; i < uiNumPages; i++) -+ { -+ IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; -+ -+ if (NULL != ppsPageArray[idx]) -+ { -+ _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[idx]); -+ } -+ } -+#endif -+ -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_FREE)) -+ { -+ for (i = 0; i < uiNumPages; i++) -+ { -+ IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; -+ -+ if (NULL != ppsPageArray[idx]) -+ { -+ _PoisonDevicePage(psPageArrayData->psDevNode, -+ ppsPageArray[idx], -+ uiOrder, -+ psPageArrayData->ui32CPUCacheFlags, -+ PVRSRV_POISON_ON_FREE_VALUE); -+ } -+ else if (pai32FreeIndices != NULL) -+ { -+ /* Attempt to poison an index not containing a valid page */ -+ return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK; -+ } -+ } -+ } -+ -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ IMG_UINT32 uiDevNumPages = uiNumPages; -+ IMG_UINT32 uiDevPageSize = 1<uiLog2AllocPageSize; -+ -+ for (i = 0; i < uiDevNumPages; i++) -+ { -+ IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; -+ if (NULL != ppsPageArray[idx]) -+ { -+ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, -+ uiDevPageSize, -+ uiOrder, -+ psPageArrayData->dmavirtarray[idx], -+ psPageArrayData->dmaphysarray[idx], -+ ppsPageArray[idx]); -+ psPageArrayData->dmaphysarray[idx] = (dma_addr_t)0; -+ psPageArrayData->dmavirtarray[idx] = NULL; -+ ppsPageArray[idx] = NULL; -+ uiTempIdx++; -+ } -+ else if (pai32FreeIndices != NULL) -+ { -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ /* Attempt to keep memstats consistent in event of fail as we have -+ * freed some pages -+ */ -+ uiTempIdx <<= uiOrder; -+ _DecrMemAllocStat_UmaPages(psPageArrayData, uiTempIdx * PAGE_SIZE, psPageArrayData->uiPid); -+#endif -+ /* Attempt to free an already free index, could be duplicated free indices */ -+ return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK; -+ } -+ } -+ uiTempIdx <<= uiOrder; -+ } -+ else -+ { -+ -+ /* OSAllocMemNoStats required because this code may be run without the bridge lock held */ -+ ppsTempPageArray = OSAllocMemNoStats(sizeof(struct page*) * uiTempArraySize); -+ if (ppsTempPageArray == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __func__)); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ /* Put pages in a contiguous array so further processing is easier */ -+ for (i = 0; i < uiNumPages; i++) -+ { -+ uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i; -+ if (NULL != ppsPageArray[uiPageIndex]) -+ { -+ struct page *psPage = ppsPageArray[uiPageIndex]; -+ -+ for (j = 0; j < (1<uiPid); -+#endif -+ -+ OSFreeMemNoStats(ppsTempPageArray); -+ return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK; -+ } -+ } -+ -+ /* Try to move the temp page array to the pool */ -+ bSuccess = _PutPagesToPoolLocked(psPageArrayData->psDevNode, -+ psPageArrayData->ui32CPUCacheFlags, -+ ppsTempPageArray, -+ 0, -+ uiTempIdx); -+ if (bSuccess) -+ { -+ goto exit_ok; -+ } -+ -+ /* Free pages and reset page caching attributes on x86 */ -+#if defined(CONFIG_X86) -+ if (uiTempIdx != 0 && BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE)) -+ { -+ int iError; -+ iError = set_pages_array_wb(ppsTempPageArray, uiTempIdx); -+ -+ if (iError) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __func__)); -+ } -+ } -+#endif -+ -+ /* Free the pages */ -+ for (i = 0; i < uiTempIdx; i++) -+ { -+ __free_pages(ppsTempPageArray[i], 0); -+ } -+ -+ /* Free the temp page array here if it did not move to the pool */ -+ OSFreeMemNoStats(ppsTempPageArray); -+ } -+ -+exit_ok: -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) -+ _DecrMemAllocStat_UmaPages(psPageArrayData, -+ ((uiTempIdx * PAGE_SIZE)-(psPageArrayData->ui32CMAAdjustedPageCount)), -+ psPageArrayData->uiPid); -+#endif -+ -+ if (pai32FreeIndices && ((uiTempIdx >> uiOrder) != ui32FreePageCount)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Probable sparse duplicate indices: ReqFreeCount: %d " -+ "ActualFreedCount: %d", __func__, ui32FreePageCount, (uiTempIdx >> uiOrder))); -+ } -+ /* Update metadata */ -+ psPageArrayData->iNumOSPagesAllocated -= uiTempIdx; -+ PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated); -+ return PVRSRV_OK; -+} -+ -+/* Free all the pages in a page array */ -+static PVRSRV_ERROR -+_FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) -+{ -+ IMG_BOOL bSuccess; -+ IMG_UINT32 i; -+ IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumOSPages; -+ IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; -+ IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder; -+ IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder; -+ struct page **ppsPageArray = psPageArrayData->pagearray; -+ -+ /* We really should have something to free before we call this */ -+ PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ for (i = 0; i < uiDevNumPages; i++) -+ { -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]); -+ } -+ else -+ { -+ _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i << uiOrder]); -+ } -+ } -+#else -+ _DecrMemAllocStat_UmaPages(psPageArrayData, uiNumPages * PAGE_SIZE - psPageArrayData->ui32CMAAdjustedPageCount, -+ psPageArrayData->uiPid); -+#endif -+#endif -+ -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_FREE)) -+ { -+ for (i = 0; i < uiDevNumPages; i++) -+ { -+ _PoisonDevicePage(psPageArrayData->psDevNode, -+ ppsPageArray[i], -+ uiOrder, -+ psPageArrayData->ui32CPUCacheFlags, -+ PVRSRV_POISON_ON_FREE_VALUE); -+ } -+ } -+ -+ /* Try to move the page array to the pool */ -+ bSuccess = _PutPagesToPoolLocked(psPageArrayData->psDevNode, -+ psPageArrayData->ui32CPUCacheFlags, -+ ppsPageArray, -+ uiOrder, -+ uiNumPages); -+ if (bSuccess) -+ { -+ psPageArrayData->pagearray = NULL; -+ goto exit_ok; -+ } -+ -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ for (i = 0; i < uiDevNumPages; i++) -+ { -+ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, -+ uiDevPageSize, -+ uiOrder, -+ psPageArrayData->dmavirtarray[i], -+ psPageArrayData->dmaphysarray[i], -+ ppsPageArray[i]); -+ psPageArrayData->dmaphysarray[i] = (dma_addr_t)0; -+ psPageArrayData->dmavirtarray[i] = NULL; -+ ppsPageArray[i] = NULL; -+ } -+ } -+ else -+ { -+#if defined(CONFIG_X86) -+ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE)) -+ { -+ int ret; -+ -+ ret = set_pages_array_wb(ppsPageArray, uiNumPages); -+ if (ret) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", -+ __func__)); -+ } -+ } -+#endif -+ -+ for (i = 0; i < uiNumPages; i++) -+ { -+ _FreeOSPage(uiOrder, IMG_FALSE, ppsPageArray[i]); -+ ppsPageArray[i] = NULL; -+ } -+ } -+ -+exit_ok: -+ /* Update metadata */ -+ psPageArrayData->iNumOSPagesAllocated = 0; -+ return PVRSRV_OK; -+} -+ -+/* Free pages from a page array. -+ * Takes care of mem stats and chooses correct free path depending on parameters. */ -+static PVRSRV_ERROR -+_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_UINT32 ui32FreePageCount) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Go the sparse or non-sparse path */ -+ if (psPageArrayData->iNumOSPagesAllocated != psPageArrayData->uiTotalNumOSPages -+ || pai32FreeIndices != NULL) -+ { -+ eError = _FreeOSPages_Sparse(psPageArrayData, -+ pai32FreeIndices, -+ ui32FreePageCount); -+ } -+ else -+ { -+ eError = _FreeOSPages_Fast(psPageArrayData); -+ } -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed")); -+ } -+ -+ _DumpPageArray(psPageArrayData->pagearray, -+ psPageArrayData->uiTotalNumOSPages >> -+ (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) ); -+ -+ return eError; -+} -+ -+/* -+ * -+ * Implementation of callback functions -+ * -+ */ -+ -+/* Destruction function is called after last reference disappears, -+ * but before PMR itself is freed. -+ */ -+static void -+PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv) -+{ -+ PVRSRV_ERROR eError; -+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; -+ -+ /* We can't free pages until now. */ -+ if (psOSPageArrayData->iNumOSPagesAllocated != 0) -+ { -+#if defined(DEBUG) && defined(SUPPORT_VALIDATION) -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ IMG_UINT32 ui32UMALeakMax = psPVRSRVData->sMemLeakIntervals.ui32GPU; -+ -+ mutex_lock(&g_sUMALeakMutex); -+ -+ g_ui32UMALeakCounter++; -+ if (ui32UMALeakMax && g_ui32UMALeakCounter >= ui32UMALeakMax) -+ { -+ g_ui32UMALeakCounter = 0; -+ mutex_unlock(&g_sUMALeakMutex); -+ -+ PVR_DPF((PVR_DBG_WARNING, "%s: Skipped freeing of PMR 0x%p to trigger memory leak.", __func__, pvPriv)); -+ return; -+ } -+ -+ mutex_unlock(&g_sUMALeakMutex); -+#endif -+ -+ eError = _FreeOSPages(psOSPageArrayData, NULL, 0); -+ PVR_LOG_IF_ERROR(eError, "_FreeOSPages"); -+ PVR_ASSERT(eError == PVRSRV_OK); /* can we do better? */ -+ } -+ -+ _FreeOSPagesArray(psOSPageArrayData); -+} -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+static PVRSRV_ERROR PMRZombifyOSMem(PMR_IMPL_PRIVDATA pvPriv, PMR *psPMR) -+{ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PMR_OSPAGEARRAY_DATA *psPageArrayData = pvPriv; -+ IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumOSPages; -+ const IMG_UINT32 uiFlags = psPageArrayData->ui32AllocFlags; -+ -+ BIT_SET(psPageArrayData->ui32AllocFlags, FLAG_IS_ZOMBIE); -+ -+ /* no need to check for free indices as it's always all the memory we're -+ * freeing */ -+ if (psPageArrayData->iNumOSPagesAllocated != psPageArrayData->uiTotalNumOSPages) -+ { -+ /* _FreeOSPages_Sparse() path */ -+ -+ struct page *const *const ppsPageArray = psPageArrayData->pagearray; -+ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ IMG_UINT32 i; -+ -+ for (i = 0; i < uiNumPages; i++) -+ { -+ if (ppsPageArray[i] != NULL) -+ { -+ _AddMemZombieRecord_UmaPages(psPageArrayData, ppsPageArray[i]); -+ } -+ } -+#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+ const IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; -+ IMG_UINT32 i, j, uiAllocatedNumPages = 0; -+ -+ uiNumPages >>= uiOrder; -+ -+ if (BIT_ISSET(uiFlags, FLAG_IS_CMA)) -+ { -+ for (i = 0; i < uiNumPages; i++) -+ { -+ if (ppsPageArray[i] != NULL) -+ { -+ uiAllocatedNumPages++; -+ } -+ } -+ uiAllocatedNumPages <<= uiOrder; -+ } -+ else -+ { -+ for (i = 0; i < uiNumPages; i++) -+ { -+ if (ppsPageArray[i] != NULL) -+ { -+ for (j = 0; j < (1<ui32CMAAdjustedPageCount, -+ psPageArrayData->uiPid -+ ); -+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+ } -+ else -+ { -+ /* _FreeOSPages_Fast() path */ -+ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ struct page *const *const ppsPageArray = psPageArrayData->pagearray; -+ const IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; -+ IMG_UINT32 i; -+ -+ for (i = 0; i < uiNumPages; i++) -+ { -+ IMG_UINT32 uiIdx = BIT_ISSET(uiFlags, FLAG_IS_CMA) ? i : i << uiOrder; -+ _AddMemZombieRecord_UmaPages(psPageArrayData, ppsPageArray[uiIdx]); -+ } -+#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+ _ZombifyMemAllocStat_UmaPages( -+ uiNumPages * PAGE_SIZE - psPageArrayData->ui32CMAAdjustedPageCount, -+ psPageArrayData->uiPid -+ ); -+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+ } -+#else /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ -+#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ -+ -+ PVR_UNREFERENCED_PARAMETER(pvPriv); -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ -+ return PVRSRV_OK; -+} -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+/* Callback function for locking the system physical page addresses. -+ * This function must be called before the lookup address func. */ -+static PVRSRV_ERROR -+PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv) -+{ -+ PVRSRV_ERROR eError; -+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; -+ -+ if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND)) -+ { -+ /* Allocate Memory for deferred allocation */ -+ eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ } -+ -+ eError = PVRSRV_OK; -+ return eError; -+} -+ -+static PVRSRV_ERROR -+PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv) -+{ -+ /* Just drops the refcount. */ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; -+ -+ if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND)) -+ { -+ /* Free Memory for deferred allocation */ -+ eError = _FreeOSPages(psOSPageArrayData, -+ NULL, -+ 0); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ } -+ -+ PVR_ASSERT(eError == PVRSRV_OK); -+ return eError; -+} -+ -+static INLINE IMG_BOOL IsOffsetValid(const PMR_OSPAGEARRAY_DATA *psOSPageArrayData, -+ IMG_UINT32 ui32Offset) -+{ -+ return (ui32Offset >> psOSPageArrayData->uiLog2AllocPageSize) < -+ psOSPageArrayData->uiTotalNumOSPages; -+} -+ -+/* Determine PA for specified offset into page array. */ -+static IMG_DEV_PHYADDR GetOffsetPA(const PMR_OSPAGEARRAY_DATA *psOSPageArrayData, -+ IMG_UINT32 ui32Offset) -+{ -+ IMG_UINT32 ui32Log2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize; -+ IMG_UINT32 ui32PageIndex = ui32Offset >> ui32Log2AllocPageSize; -+ IMG_UINT32 ui32InPageOffset = ui32Offset - (ui32PageIndex << ui32Log2AllocPageSize); -+ IMG_DEV_PHYADDR sPA; -+ -+ PVR_ASSERT(ui32InPageOffset < (1U << ui32Log2AllocPageSize)); -+ -+ sPA.uiAddr = phys_cpu2gpu(page_to_phys(psOSPageArrayData->pagearray[ui32PageIndex])); -+ sPA.uiAddr += ui32InPageOffset; -+ -+ return sPA; -+} -+ -+/* N.B. It is assumed that PMRLockSysPhysAddressesOSMem() is called _before_ this function! */ -+static PVRSRV_ERROR -+PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_DEVMEM_OFFSET_T *puiOffset, -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ IMG_UINT64 ui64IPAPolicyValue, -+ IMG_UINT64 ui64IPAClearMask, -+#endif -+ IMG_BOOL *pbValid, -+ IMG_DEV_PHYADDR *psDevPAddr) -+{ -+ const PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; -+ IMG_UINT32 uiIdx; -+ -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ PVR_UNREFERENCED_PARAMETER(ui64IPAPolicyValue); -+ PVR_UNREFERENCED_PARAMETER(ui64IPAClearMask); -+#endif -+ -+ if (psOSPageArrayData->uiLog2AllocPageSize < ui32Log2PageSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Requested physical addresses from PMR " -+ "for incompatible contiguity %u!", -+ __func__, -+ ui32Log2PageSize)); -+ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; -+ } -+ -+ for (uiIdx=0; uiIdx < ui32NumOfPages; uiIdx++) -+ { -+ if (pbValid[uiIdx]) -+ { -+ PVR_LOG_RETURN_IF_FALSE(IsOffsetValid(psOSPageArrayData, puiOffset[uiIdx]), -+ "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE); -+ -+ psDevPAddr[uiIdx] = GetOffsetPA(psOSPageArrayData, puiOffset[uiIdx]); -+ -+#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) -+ /* this is just a precaution, normally this should be always -+ * available */ -+ if (psOSPageArrayData->ui64DmaMask) -+ { -+ if (psDevPAddr[uiIdx].uiAddr > psOSPageArrayData->ui64DmaMask) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: physical address" -+ " (%" IMG_UINT64_FMTSPECX ") out of allowable range" -+ " [0; %" IMG_UINT64_FMTSPECX "]", __func__, -+ psDevPAddr[uiIdx].uiAddr, -+ psOSPageArrayData->ui64DmaMask)); -+ BUG(); -+ } -+ } -+#endif -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ /* Modify the physical address with the associated IPA values */ -+ psDevPAddr[uiIdx].uiAddr &= ~ui64IPAClearMask; -+ psDevPAddr[uiIdx].uiAddr |= ui64IPAPolicyValue; -+#endif -+ -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+typedef struct _PMR_OSPAGEARRAY_KERNMAP_DATA_ { -+ void *pvBase; -+ IMG_UINT32 ui32PageCount; -+ pgprot_t PageProps; -+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) -+ IMG_UINT32 ui32CpuCacheFlags; -+#endif -+} PMR_OSPAGEARRAY_KERNMAP_DATA; -+ -+static PVRSRV_ERROR -+PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, -+ size_t uiOffset, -+ size_t uiSize, -+ void **ppvKernelAddressOut, -+ IMG_HANDLE *phHandleOut, -+ PMR_FLAGS_T ulFlags) -+{ -+ PVRSRV_ERROR eError; -+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; -+ void *pvAddress; -+ pgprot_t prot = PAGE_KERNEL; -+ IMG_UINT32 ui32PageOffset=0; -+ size_t uiMapOffset=0; -+ IMG_UINT32 ui32PageCount = 0; -+ IMG_UINT32 uiLog2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize; -+ IMG_UINT32 uiOSPageShift = OSGetPageShift(); -+ IMG_UINT32 uiPageSizeDiff = 0; -+ struct page **pagearray; -+ PMR_OSPAGEARRAY_KERNMAP_DATA *psData; -+ -+ /* For cases device page size greater than the OS page size, -+ * multiple physically contiguous OS pages constitute one device page. -+ * However only the first page address of such an ensemble is stored -+ * as part of the mapping table in the driver. Hence when mapping the PMR -+ * in part/full, all OS pages that constitute the device page -+ * must also be mapped to kernel. -+ * -+ * For the case where device page size less than OS page size, -+ * treat it the same way as the page sizes are equal */ -+ if (uiLog2AllocPageSize > uiOSPageShift) -+ { -+ uiPageSizeDiff = uiLog2AllocPageSize - uiOSPageShift; -+ } -+ -+ /* -+ Zero offset and size as a special meaning which means map in the -+ whole of the PMR, this is due to fact that the places that call -+ this callback might not have access to be able to determine the -+ physical size -+ */ -+ if ((uiOffset == 0) && (uiSize == 0)) -+ { -+ ui32PageOffset = 0; -+ uiMapOffset = 0; -+ /* Page count = amount of OS pages */ -+ ui32PageCount = psOSPageArrayData->iNumOSPagesAllocated; -+ } -+ else -+ { -+ size_t uiEndoffset; -+ -+ ui32PageOffset = uiOffset >> uiLog2AllocPageSize; -+ uiMapOffset = uiOffset - (ui32PageOffset << uiLog2AllocPageSize); -+ uiEndoffset = uiOffset + uiSize - 1; -+ /* Add one as we want the count, not the offset */ -+ /* Page count = amount of device pages (note uiLog2AllocPageSize being used) */ -+ ui32PageCount = (uiEndoffset >> uiLog2AllocPageSize) + 1; -+ ui32PageCount -= ui32PageOffset; -+ -+ /* The OS page count to be mapped might be different if the -+ * OS page size is lesser than the device page size */ -+ ui32PageCount <<= uiPageSizeDiff; -+ } -+ -+ switch (PVRSRV_CPU_CACHE_MODE(psOSPageArrayData->ui32CPUCacheFlags)) -+ { -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: -+ prot = pgprot_noncached(prot); -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: -+ prot = pgprot_writecombine(prot); -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED: -+ break; -+ -+ default: -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e0; -+ } -+ -+ if (uiPageSizeDiff) -+ { -+ /* Each device page can be broken down into ui32SubPageCount OS pages */ -+ IMG_UINT32 ui32SubPageCount = 1 << uiPageSizeDiff; -+ IMG_UINT32 i; -+ struct page **psPage = &psOSPageArrayData->pagearray[ui32PageOffset]; -+ -+ /* Allocate enough memory for the OS page pointers for this mapping */ -+ pagearray = OSAllocMem(ui32PageCount * sizeof(pagearray[0])); -+ -+ if (pagearray == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e0; -+ } -+ -+ /* construct array that holds the page pointers that constitute the requested -+ * mapping */ -+ for (i = 0; i < ui32PageCount; i++) -+ { -+ IMG_UINT32 ui32OSPageArrayIndex = i / ui32SubPageCount; -+ IMG_UINT32 ui32OSPageArrayOffset = i % ui32SubPageCount; -+ -+ /* -+ * The driver only stores OS page pointers for the first OS page -+ * within each device page (psPage[ui32OSPageArrayIndex]). -+ * Get the next OS page structure at device page granularity, -+ * then calculate OS page pointers for all the other pages. -+ */ -+ pagearray[i] = psPage[ui32OSPageArrayIndex] + ui32OSPageArrayOffset; -+ } -+ } -+ else -+ { -+ pagearray = &psOSPageArrayData->pagearray[ui32PageOffset]; -+ } -+ -+ psData = OSAllocMem(sizeof(*psData)); -+ if (psData == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e1; -+ } -+ -+ pvAddress = pvr_vmap(pagearray, ui32PageCount, VM_MAP, prot); -+ if (pvAddress == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e2; -+ } -+ -+ *ppvKernelAddressOut = pvAddress + uiMapOffset; -+ psData->pvBase = pvAddress; -+ psData->ui32PageCount = ui32PageCount; -+ psData->PageProps = prot; -+ *phHandleOut = psData; -+ -+ if (uiPageSizeDiff) -+ { -+ OSFreeMem(pagearray); -+ } -+ -+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) -+ { -+ IMG_CPU_PHYADDR pvAddrPhy; -+ pvAddrPhy.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(*pagearray)); -+ InsertMappingRecord(PMR_GetAnnotation(psOSPageArrayData->hPMR), -+ psOSPageArrayData->uiPid, -+ pvAddress, -+ pvAddrPhy, -+ psOSPageArrayData->ui32CPUCacheFlags, -+ uiMapOffset, -+ ui32PageCount); -+ -+ psData->ui32CpuCacheFlags = psOSPageArrayData->ui32CPUCacheFlags; -+ } -+#endif -+ -+ return PVRSRV_OK; -+ /* -+ error exit paths follow -+ */ -+e2: -+ OSFreeMem(psData); -+e1: -+ if (uiPageSizeDiff) -+ { -+ OSFreeMem(pagearray); -+ } -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+static void PMRReleaseKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_HANDLE hHandle) -+{ -+ PMR_OSPAGEARRAY_KERNMAP_DATA *psData = hHandle; -+ PVR_UNREFERENCED_PARAMETER(pvPriv); -+ -+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) -+ { -+ IMG_CPU_PHYADDR pvAddrPhy; -+ pvAddrPhy.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(vmalloc_to_page(psData->pvBase))); -+ InsertUnMappingRecord(psData->pvBase, -+ pvAddrPhy, -+ psData->ui32CpuCacheFlags, -+ psData->ui32PageCount); -+ } -+#endif -+ -+ pvr_vunmap(psData->pvBase, psData->ui32PageCount, psData->PageProps); -+ OSFreeMem(psData); -+} -+ -+/*************************************************************************/ /*! -+@Function PMRChangeSparseMemOSMem -+@Description This function Changes the sparse mapping by allocating and -+ freeing of pages. It changes the GPU and CPU maps accordingly. -+@Return PVRSRV_ERROR failure code -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, -+ const PMR *psPMR, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_UINT32 uiFlags) -+{ -+ PVRSRV_ERROR eError; -+ -+ PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappingTable(psPMR); -+ PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv; -+ struct page **psPageArray = psPMRPageArrayData->pagearray; -+ void **psDMAVirtArray = psPMRPageArrayData->dmavirtarray; -+ dma_addr_t *psDMAPhysArray = psPMRPageArrayData->dmaphysarray; -+ -+ struct page *psPage; -+ dma_addr_t psDMAPAddr; -+ void *pvDMAVAddr; -+ -+ IMG_UINT32 ui32AdtnlAllocPages = 0; /*uiLog2AllocPageSize - PAGE_SHIFT; -+ IMG_BOOL bCMA = BIT_ISSET(psPMRPageArrayData->ui32AllocFlags, FLAG_IS_CMA); -+ -+ -+ /* Check SPARSE flags and calculate pages to allocate and free */ -+ if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH)) -+ { -+ ui32CommonRequestCount = (ui32AllocPageCount > ui32FreePageCount) ? -+ ui32FreePageCount : ui32AllocPageCount; -+ -+ PDUMP_PANIC(PMR_DeviceNode(psPMR), SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported"); -+ } -+ -+ if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC)) -+ { -+ ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequestCount; -+ } -+ else -+ { -+ ui32AllocPageCount = 0; -+ } -+ -+ if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE)) -+ { -+ ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequestCount; -+ } -+ else -+ { -+ ui32FreePageCount = 0; -+ } -+ -+ if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages)) -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Missing parameters for number of pages to alloc/free", -+ __func__)); -+ return eError; -+ } -+ -+ /* The incoming request is classified into two operations independent of -+ * each other: alloc & free pages. -+ * These operations can be combined with two mapping operations as well -+ * which are GPU & CPU space mappings. -+ * -+ * From the alloc and free page requests, the net amount of pages to be -+ * allocated or freed is computed. Pages that were requested to be freed -+ * will be reused to fulfil alloc requests. -+ * -+ * The order of operations is: -+ * 1. Allocate new pages from the OS -+ * 2. Move the free pages from free request to alloc positions. -+ * 3. Free the rest of the pages not used for alloc -+ * -+ * Alloc parameters are validated at the time of allocation -+ * and any error will be handled then. */ -+ -+ /* Validate the free indices */ -+ if (ui32FreePageCount) -+ { -+ if (NULL != pai32FreeIndices){ -+ -+ for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) -+ { -+ uiFreepgidx = pai32FreeIndices[ui32Loop]; -+ -+ if (uiFreepgidx >= (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder)) -+ { -+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; -+ goto e0; -+ } -+ -+ if (NULL == psPageArray[uiFreepgidx]) -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Trying to free non-allocated page", -+ __func__)); -+ goto e0; -+ } -+ } -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Given non-zero free count but missing indices array", -+ __func__)); -+ return eError; -+ } -+ } -+ -+ /* Validate the alloc indices */ -+ for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++) -+ { -+ uiAllocpgidx = pai32AllocIndices[ui32Loop]; -+ -+ if (uiAllocpgidx >= (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder)) -+ { -+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; -+ goto e0; -+ } -+ -+ if ((NULL != psPageArray[uiAllocpgidx]) || -+ (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx])) -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Trying to allocate already allocated page again", -+ __func__)); -+ goto e0; -+ } -+ } -+ -+ ui32Loop = 0; -+ -+ /* Allocate new pages from the OS */ -+ if (0 != ui32AdtnlAllocPages) -+ { -+ eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: New Addtl Allocation of pages failed", -+ __func__)); -+ goto e0; -+ } -+ -+ psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages; -+ /*Mark the corresponding pages of translation table as valid */ -+ for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++) -+ { -+ psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop]; -+ } -+ } -+ -+ -+ ui32Index = ui32Loop; -+ -+ /* Move the corresponding free pages to alloc request */ -+ for (ui32Loop = 0; ui32Loop < ui32CommonRequestCount; ui32Loop++, ui32Index++) -+ { -+ uiAllocpgidx = pai32AllocIndices[ui32Index]; -+ uiFreepgidx = pai32FreeIndices[ui32Loop]; -+ -+ psPage = psPageArray[uiAllocpgidx]; -+ psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx]; -+ -+ if (bCMA) -+ { -+ pvDMAVAddr = psDMAVirtArray[uiAllocpgidx]; -+ psDMAPAddr = psDMAPhysArray[uiAllocpgidx]; -+ psDMAVirtArray[uiAllocpgidx] = psDMAVirtArray[uiFreepgidx]; -+ psDMAPhysArray[uiAllocpgidx] = psDMAPhysArray[uiFreepgidx]; -+ } -+ -+ psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID; -+ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; -+ psPageArray[uiFreepgidx] = NULL; -+ if (bCMA) -+ { -+ psDMAVirtArray[uiFreepgidx] = NULL; -+ psDMAPhysArray[uiFreepgidx] = (dma_addr_t)0; -+ } -+ } -+ -+ /* Free the additional free pages */ -+ if (0 != ui32AdtnlFreePages) -+ { -+ eError = _FreeOSPages(psPMRPageArrayData, -+ &pai32FreeIndices[ui32Loop], -+ ui32AdtnlFreePages); -+ if (eError != PVRSRV_OK) -+ { -+ goto e0; -+ } -+ psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages; -+ while (ui32Loop < ui32FreePageCount) -+ { -+ psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Loop]] = TRANSLATION_INVALID; -+ ui32Loop++; -+ } -+ } -+ -+ eError = PVRSRV_OK; -+ -+e0: -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function PMRChangeSparseMemCPUMapOSMem -+@Description This function Changes CPU maps accordingly -+@Return PVRSRV_ERROR failure code -+*/ /**************************************************************************/ -+static -+PVRSRV_ERROR PMRChangeSparseMemCPUMapOSMem(PMR_IMPL_PRIVDATA pPriv, -+ const PMR *psPMR, -+ IMG_UINT64 sCpuVAddrBase, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices) -+{ -+ struct page **psPageArray; -+ PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv; -+ IMG_CPU_PHYADDR sCPUPAddr; -+ -+ sCPUPAddr.uiAddr = 0; -+ psPageArray = psPMRPageArrayData->pagearray; -+ -+ return OSChangeSparseMemCPUAddrMap((void **)psPageArray, -+ sCpuVAddrBase, -+ sCPUPAddr, -+ ui32AllocPageCount, -+ pai32AllocIndices, -+ ui32FreePageCount, -+ pai32FreeIndices, -+ IMG_FALSE); -+} -+ -+static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = { -+ .pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem, -+ .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem, -+ .pfnDevPhysAddr = &PMRSysPhysAddrOSMem, -+ .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataOSMem, -+ .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem, -+ .pfnReadBytes = NULL, -+ .pfnWriteBytes = NULL, -+ .pfnChangeSparseMem = &PMRChangeSparseMemOSMem, -+ .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem, -+ .pfnFinalize = &PMRFinalizeOSMem, -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ .pfnZombify = &PMRZombifyOSMem, -+#endif -+}; -+ -+/* Wrapper around OS page allocation. */ -+static PVRSRV_ERROR -+DoPageAlloc(PMR_OSPAGEARRAY_DATA *psPrivData, -+ IMG_UINT32 *puiAllocIndices, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 ui32Log2AllocPageSize) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ /* Do we fill the whole page array or just parts (sparse)? */ -+ if (ui32NumPhysChunks == ui32NumVirtChunks) -+ { -+ /* Allocate the physical pages */ -+ eError = _AllocOSPages(psPrivData, -+ NULL, -+ psPrivData->uiTotalNumOSPages >> -+ (ui32Log2AllocPageSize - PAGE_SHIFT)); -+ } -+ else if (ui32NumPhysChunks != 0) -+ { -+ /* Allocate the physical pages */ -+ eError = _AllocOSPages(psPrivData, puiAllocIndices, -+ ui32NumPhysChunks); -+ } -+ -+ return eError; -+} -+ -+static void _EncodeAllocationFlags(IMG_UINT32 uiLog2AllocPageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ IMG_UINT32* ui32AllocFlags) -+{ -+ -+ /* -+ * Use CMA framework if order is greater than OS page size; please note -+ * that OSMMapPMRGeneric() has the same expectation as well. -+ */ -+ /* IsCMA? */ -+ if (uiLog2AllocPageSize > PAGE_SHIFT) -+ { -+ BIT_SET(*ui32AllocFlags, FLAG_IS_CMA); -+ } -+ -+ /* OnDemand? */ -+ if (PVRSRV_CHECK_ON_DEMAND(uiFlags)) -+ { -+ BIT_SET(*ui32AllocFlags, FLAG_ONDEMAND); -+ } -+ -+ /* Zero? */ -+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)) -+ { -+ BIT_SET(*ui32AllocFlags, FLAG_ZERO); -+ } -+ -+ /* Poison on alloc? */ -+ if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) -+ { -+ BIT_SET(*ui32AllocFlags, FLAG_POISON_ON_ALLOC); -+ } -+ -+#if defined(DEBUG) -+ /* Poison on free? */ -+ if (PVRSRV_CHECK_POISON_ON_FREE(uiFlags)) -+ { -+ BIT_SET(*ui32AllocFlags, FLAG_POISON_ON_FREE); -+ } -+#endif -+ -+ /* Indicate whether this is an allocation with default caching attribute (i.e cached) or not */ -+ if (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || -+ PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags)) -+ { -+ BIT_SET(*ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE); -+ } -+ -+} -+ -+void PhysmemGetOSRamMemStats(PHEAP_IMPL_DATA pvImplData, -+ IMG_UINT64 *pui64TotalSize, -+ IMG_UINT64 *pui64FreeSize) -+{ -+ struct sysinfo sMeminfo; -+ si_meminfo(&sMeminfo); -+ -+ PVR_UNREFERENCED_PARAMETER(pvImplData); -+ -+ *pui64TotalSize = sMeminfo.totalram * sMeminfo.mem_unit; -+ *pui64FreeSize = sMeminfo.freeram * sMeminfo.mem_unit; -+ -+} -+ -+PVRSRV_ERROR -+PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, -+ CONNECTION_DATA *psConnection, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *puiAllocIndices, -+ IMG_UINT32 uiLog2AllocPageSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszAnnotation, -+ IMG_PID uiPid, -+ PMR **ppsPMRPtr, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_ERROR eError2; -+ PMR *psPMR; -+ struct _PMR_OSPAGEARRAY_DATA_ *psPrivData; -+ PMR_FLAGS_T uiPMRFlags; -+ IMG_UINT32 ui32CPUCacheFlags; -+ IMG_UINT32 ui32AllocFlags = 0; -+ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ /* -+ * The host driver (but not guest) can still use this factory for firmware -+ * allocations -+ */ -+ if (PVRSRV_VZ_MODE_IS(GUEST) && PVRSRV_CHECK_FW_MAIN(uiFlags)) -+ { -+ PVR_ASSERT(0); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto errorOnParam; -+ } -+ -+ /* Select correct caching mode */ -+ eError = DevmemCPUCacheMode(psDevNode, uiFlags, &ui32CPUCacheFlags); -+ if (eError != PVRSRV_OK) -+ { -+ goto errorOnParam; -+ } -+ -+ if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags)) -+ { -+ ui32CPUCacheFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN; -+ } -+ -+ _EncodeAllocationFlags(uiLog2AllocPageSize, uiFlags, &ui32AllocFlags); -+ -+ -+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) -+ /* Overwrite flags and always zero pages that could go back to UM */ -+ BIT_SET(ui32AllocFlags, FLAG_ZERO); -+ BIT_UNSET(ui32AllocFlags, FLAG_POISON_ON_ALLOC); -+#endif -+ -+ /* Physical allocation alignment is generally not supported except under -+ very restrictive conditions, also there is a maximum alignment value -+ which must not exceed the largest device page-size. If these are not -+ met then fail the aligned-requested allocation */ -+ if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) -+ { -+ IMG_UINT32 uiAlign = 1 << uiLog2AllocPageSize; -+ if (uiAlign > uiSize || uiAlign > (1 << PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid PA alignment: size 0x%llx, align 0x%x", -+ __func__, uiSize, uiAlign)); -+ eError = PVRSRV_ERROR_INVALID_ALIGNMENT; -+ goto errorOnParam; -+ } -+ PVR_ASSERT(uiLog2AllocPageSize > PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ); -+ } -+ -+ /* Create Array structure that hold the physical pages */ -+ eError = _AllocOSPageArray(psDevNode, -+ uiSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ uiLog2AllocPageSize, -+ ui32AllocFlags, -+ ui32CPUCacheFlags, -+ uiPid, -+ &psPrivData); -+ if (eError != PVRSRV_OK) -+ { -+ goto errorOnAllocPageArray; -+ } -+ -+ if (!BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND)) -+ { -+ eError = DoPageAlloc(psPrivData, puiAllocIndices, ui32NumPhysChunks, -+ ui32NumVirtChunks, uiLog2AllocPageSize); -+ if (eError != PVRSRV_OK) -+ { -+ goto errorOnAllocPages; -+ } -+ } -+ -+ /* -+ * In this instance, we simply pass flags straight through. -+ * -+ * Generically, uiFlags can include things that control the PMR factory, but -+ * we don't need any such thing (at the time of writing!), and our caller -+ * specifies all PMR flags so we don't need to meddle with what was given to -+ * us. -+ */ -+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); -+ -+ /* -+ * Check no significant bits were lost in cast due to different bit widths -+ * for flags -+ */ -+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); -+ -+ if (BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND)) -+ { -+ PDUMPCOMMENT(PhysHeapDeviceNode(psPhysHeap), "Deferred Allocation PMR (UMA)"); -+ } -+ -+ eError = PMRCreatePMR(psPhysHeap, -+ uiSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ puiAllocIndices, -+ uiLog2AllocPageSize, -+ uiPMRFlags, -+ pszAnnotation, -+ &_sPMROSPFuncTab, -+ psPrivData, -+ PMR_TYPE_OSMEM, -+ &psPMR, -+ ui32PDumpFlags); -+ if (eError != PVRSRV_OK) -+ { -+ goto errorOnCreate; -+ } -+ -+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) -+ psPrivData->hPMR = psPMR; -+#endif -+ -+ *ppsPMRPtr = psPMR; -+ -+ return PVRSRV_OK; -+ -+errorOnCreate: -+ if (!BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND)) -+ { -+ eError2 = _FreeOSPages(psPrivData, NULL, 0); -+ PVR_ASSERT(eError2 == PVRSRV_OK); -+ } -+ -+errorOnAllocPages: -+ _FreeOSPagesArray(psPrivData); -+ -+errorOnAllocPageArray: -+errorOnParam: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/physmem_osmem_linux.h b/drivers/gpu/drm/img-rogue/physmem_osmem_linux.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_osmem_linux.h -@@ -0,0 +1,49 @@ -+/*************************************************************************/ /*! -+@File -+@Title Linux OS physmem implementation -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PHYSMEM_OSMEM_LINUX_H -+#define PHYSMEM_OSMEM_LINUX_H -+ -+void LinuxInitPhysmem(void); -+void LinuxDeinitPhysmem(void); -+ -+#endif /* PHYSMEM_OSMEM_LINUX_H */ -diff --git a/drivers/gpu/drm/img-rogue/physmem_test.c b/drivers/gpu/drm/img-rogue/physmem_test.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_test.c -@@ -0,0 +1,1037 @@ -+/*************************************************************************/ /*! -+@Title Physmem_test -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Single entry point for testing of page factories -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "physmem_test.h" -+#include "device.h" -+#include "syscommon.h" -+#include "pmr.h" -+#include "osfunc.h" -+#include "physmem.h" -+#include "physmem_osmem.h" -+#include "physmem_lma.h" -+#include "pvrsrv.h" -+ -+#define PHYSMEM_TEST_PAGES 2 /* Mem test pages */ -+#define PHYSMEM_TEST_PASSES_MAX 1000 /* Limit number of passes to some reasonable value */ -+ -+ -+/* Test patterns for mem test */ -+ -+static const IMG_UINT64 gui64Patterns[] = { -+ 0, -+ 0xffffffffffffffffULL, -+ 0x5555555555555555ULL, -+ 0xaaaaaaaaaaaaaaaaULL, -+ 0x1111111111111111ULL, -+ 0x2222222222222222ULL, -+ 0x4444444444444444ULL, -+ 0x8888888888888888ULL, -+ 0x3333333333333333ULL, -+ 0x6666666666666666ULL, -+ 0x9999999999999999ULL, -+ 0xccccccccccccccccULL, -+ 0x7777777777777777ULL, -+ 0xbbbbbbbbbbbbbbbbULL, -+ 0xddddddddddddddddULL, -+ 0xeeeeeeeeeeeeeeeeULL, -+ 0x7a6c7258554e494cULL, -+}; -+ -+static const IMG_UINT32 gui32Patterns[] = { -+ 0, -+ 0xffffffffU, -+ 0x55555555U, -+ 0xaaaaaaaaU, -+ 0x11111111U, -+ 0x22222222U, -+ 0x44444444U, -+ 0x88888888U, -+ 0x33333333U, -+ 0x66666666U, -+ 0x99999999U, -+ 0xccccccccU, -+ 0x77777777U, -+ 0xbbbbbbbbU, -+ 0xddddddddU, -+ 0xeeeeeeeeU, -+ 0x7a6c725cU, -+}; -+ -+static const IMG_UINT16 gui16Patterns[] = { -+ 0, -+ 0xffffU, -+ 0x5555U, -+ 0xaaaaU, -+ 0x1111U, -+ 0x2222U, -+ 0x4444U, -+ 0x8888U, -+ 0x3333U, -+ 0x6666U, -+ 0x9999U, -+ 0xccccU, -+ 0x7777U, -+ 0xbbbbU, -+ 0xddddU, -+ 0xeeeeU, -+ 0x7a6cU, -+}; -+ -+static const IMG_UINT8 gui8Patterns[] = { -+ 0, -+ 0xffU, -+ 0x55U, -+ 0xaaU, -+ 0x11U, -+ 0x22U, -+ 0x44U, -+ 0x88U, -+ 0x33U, -+ 0x66U, -+ 0x99U, -+ 0xccU, -+ 0x77U, -+ 0xbbU, -+ 0xddU, -+ 0xeeU, -+ 0x6cU, -+}; -+ -+ -+/* Following function does minimal required initialisation for mem test using dummy device node */ -+static PVRSRV_ERROR -+PhysMemTestInit(PVRSRV_DEVICE_NODE **ppsDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_ERROR eError; -+ -+ /* Dummy device node */ -+ psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode)); -+ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem"); -+ -+ PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_CREATED); -+ psDeviceNode->psDevConfig = psDevConfig; -+ psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; -+ -+ /* Initialise Phys mem heaps */ -+ eError = PhysHeapInitDeviceHeaps(psDeviceNode, psDevConfig); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapInitDeviceHeaps", ErrorSysDevDeInit); -+ -+ *ppsDeviceNode = psDeviceNode; -+ -+ return PVRSRV_OK; -+ -+ErrorSysDevDeInit: -+ psDevConfig->psDevNode = NULL; -+ OSFreeMem(psDeviceNode); -+ return eError; -+} -+ -+/* Undo initialisation done for mem test */ -+static void -+PhysMemTestDeInit(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ /* Deinitialise Phys mem heaps */ -+ PhysHeapDeInitDeviceHeaps(psDeviceNode); -+ -+ OSFreeMem(psDeviceNode); -+} -+ -+static PVRSRV_ERROR -+PMRContiguousSparseMappingTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags) -+{ -+ PVRSRV_ERROR eError, eError1; -+ PHYS_HEAP *psHeap; -+ PHYS_HEAP_POLICY psHeapPolicy; -+ -+ PMR *psPMR = NULL; -+ PMR *psSpacingPMR = NULL, *psSecondSpacingPMR = NULL; -+ IMG_UINT32 aui32MappingTableFirstAlloc[4] = {0,1,2,3}; -+ IMG_UINT32 aui32MappingTableSecondAlloc[8] = {4,5,6,7,8,9,10,11}; -+ IMG_UINT32 aui32MappingTableThirdAlloc[4] = {12,13,14,15}; -+ IMG_UINT32 ui32NoMappingTable = 0; -+ IMG_UINT8 *pcWriteBuffer, *pcReadBuffer; -+ IMG_BOOL *pbValid; -+ IMG_DEV_PHYADDR *apsDevPAddr; -+ IMG_UINT32 ui32NumOfPages = 16; -+ size_t uiMappedSize, uiPageSize; -+ IMG_UINT32 i, uiAttempts; -+ IMG_HANDLE hPrivData = NULL; -+ void *pvKernAddr = NULL; -+ -+ eError = PhysHeapAcquireByID(PVRSRV_GET_PHYS_HEAP_HINT(uiFlags), -+ psDeviceNode, -+ &psHeap); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByID", ErrorReturn); -+ -+ psHeapPolicy = PhysHeapGetPolicy(psHeap); -+ -+ PhysHeapRelease(psHeap); -+ -+ /* If this is the case then it's not supported and so don't attempt the test */ -+ if (psHeapPolicy != PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG) -+ { -+ return PVRSRV_OK; -+ } -+ -+ uiPageSize = OSGetPageSize(); -+ -+ /* Allocate OS memory for PMR page list */ -+ apsDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR)); -+ PVR_LOG_RETURN_IF_NOMEM(apsDevPAddr, "OSAllocMem"); -+ -+ /* Allocate OS memory for PMR page state */ -+ pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL)); -+ PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrorFreePMRPageListMem); -+ -+ /* Allocate OS memory for write buffer */ -+ pcWriteBuffer = OSAllocMem(uiPageSize * ui32NumOfPages); -+ PVR_LOG_GOTO_IF_NOMEM(pcWriteBuffer, eError, ErrorFreePMRPageStateMem); -+ OSCachedMemSet(pcWriteBuffer, 0xF, uiPageSize); -+ -+ /* Allocate OS memory for read buffer */ -+ pcReadBuffer = OSAllocMem(uiPageSize * ui32NumOfPages); -+ PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer); -+ -+ /* Allocate Sparse PMR with SPARSE | READ | WRITE | UNCACHED_WC attributes */ -+ uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING | -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; -+ -+ /* -+ * Construct a sparse PMR attempting to ensure the allocations -+ * are physically non contiguous but sequentially placed in the mapping -+ * table. -+ */ -+ for (uiAttempts = 3; uiAttempts > 0; uiAttempts--) -+ { -+ /* Allocate a sparse PMR from given physical heap - CPU/GPU/FW */ -+ eError = PhysmemNewRamBackedPMR(NULL, -+ psDeviceNode, -+ ui32NumOfPages * uiPageSize, -+ 4, -+ ui32NumOfPages, -+ aui32MappingTableFirstAlloc, -+ OSGetPageShift(), -+ uiFlags, -+ sizeof("PMRContiguousSparseMappingTest"), -+ "PMRContiguousSparseMappingTest", -+ OSGetCurrentClientProcessIDKM(), -+ &psPMR, -+ PDUMP_NONE, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR")); -+ goto ErrorFreeReadBuffer; -+ } -+ -+ /* Allocate some memory from the same physheap so that we can ensure -+ * the allocations aren't linear -+ */ -+ eError = PhysmemNewRamBackedPMR(NULL, -+ psDeviceNode, -+ ui32NumOfPages * uiPageSize, -+ 1, -+ 1, -+ &ui32NoMappingTable, -+ OSGetPageShift(), -+ uiFlags, -+ sizeof("PMRContiguousSparseMappingTest"), -+ "PMRContiguousSparseMappingTest", -+ OSGetCurrentClientProcessIDKM(), -+ &psSpacingPMR, -+ PDUMP_NONE, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR")); -+ goto ErrorUnrefPMR; -+ } -+ -+ /* Allocate 8 more physical pages on the Sparse PMR */ -+ eError = PMR_ChangeSparseMem(psPMR, -+ 8, -+ aui32MappingTableSecondAlloc, -+ 0, -+ NULL, -+ uiFlags | SPARSE_RESIZE_ALLOC); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMR_ChangeSparseMem", ErrorUnrefSpacingPMR); -+ -+ /* Allocate some more memory from the same physheap so that we can ensure -+ * the allocations aren't linear -+ */ -+ eError = PhysmemNewRamBackedPMR(NULL, -+ psDeviceNode, -+ ui32NumOfPages * uiPageSize, -+ 1, -+ 1, -+ &ui32NoMappingTable, -+ OSGetPageShift(), -+ uiFlags, -+ sizeof("PMRContiguousSparseMappingTest"), -+ "PMRContiguousSparseMappingTest", -+ OSGetCurrentClientProcessIDKM(), -+ &psSecondSpacingPMR, -+ PDUMP_NONE, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR")); -+ goto ErrorUnrefSpacingPMR; -+ } -+ -+ /* Allocate final 4 physical pages on the Sparse PMR */ -+ eError = PMR_ChangeSparseMem(psPMR, -+ 4, -+ aui32MappingTableThirdAlloc, -+ 0, -+ NULL, -+ uiFlags | SPARSE_RESIZE_ALLOC); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMR_ChangeSparseMem", ErrorUnrefSecondSpacingPMR); -+ -+ /* -+ * Check we have in fact managed to obtain a PMR with non contiguous -+ * physical pages. -+ */ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to lock PMR")); -+ goto ErrorUnrefSecondSpacingPMR; -+ } -+ -+ /* Get the Device physical addresses of the pages */ -+ eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), ui32NumOfPages, 0, apsDevPAddr, pbValid, CPU_USE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses")); -+ goto ErrorUnlockPhysAddresses; -+ } -+ -+ { -+ IMG_BOOL bPhysicallyContiguous = IMG_TRUE; -+ IMG_DEV_PHYADDR sPrevDevPAddr = apsDevPAddr[0]; -+ for (i = 1; i < ui32NumOfPages && bPhysicallyContiguous; i++) -+ { -+ if (apsDevPAddr[i].uiAddr != sPrevDevPAddr.uiAddr + uiPageSize) -+ { -+ bPhysicallyContiguous = IMG_FALSE; -+ } -+ sPrevDevPAddr = apsDevPAddr[i]; -+ } -+ -+ if (bPhysicallyContiguous) -+ { -+ /* We haven't yet managed to create the mapping scenario we -+ * require: unwind and attempt again. -+ */ -+ eError1 = PMRUnlockSysPhysAddresses(psPMR); -+ if (eError1 != PVRSRV_OK) -+ { -+ eError = (eError == PVRSRV_OK)? eError1 : eError; -+ PVR_DPF((PVR_DBG_ERROR, "Failed to unlock PMR")); -+ } -+ eError1 = PMRUnrefPMR(psPMR); -+ if (eError1 != PVRSRV_OK) -+ { -+ eError = (eError == PVRSRV_OK)? eError1 : eError; -+ PVR_DPF((PVR_DBG_ERROR, "Failed to free PMR")); -+ } -+ eError1 = PMRUnrefPMR(psSpacingPMR); -+ if (eError1 != PVRSRV_OK) -+ { -+ eError = (eError == PVRSRV_OK)? eError1 : eError; -+ PVR_DPF((PVR_DBG_ERROR, "Failed to free Spacing PMR")); -+ } -+ eError1 = PMRUnrefPMR(psSecondSpacingPMR); -+ if (eError1 != PVRSRV_OK) -+ { -+ eError = (eError == PVRSRV_OK)? eError1 : eError; -+ PVR_DPF((PVR_DBG_ERROR, "Failed to free Second Spacing PMR")); -+ } -+ } else { -+ /* We have the scenario, break out of the attempt loop */ -+ break; -+ } -+ } -+ } -+ -+ if (uiAttempts == 0) -+ { -+ /* We can't create the scenario, very unlikely this would happen */ -+ PVR_LOG_GOTO_IF_ERROR(PVRSRV_ERROR_MEMORY_TEST_FAILED, -+ "Unable to create Non Contiguous PMR scenario", -+ ErrorFreeReadBuffer); -+ } -+ -+ /* We have the PMR scenario to test, now attempt to map the whole PMR, -+ * write and then read from it -+ */ -+ eError = PMRAcquireSparseKernelMappingData(psPMR, 0, ui32NumOfPages * uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); -+ goto ErrorUnlockPhysAddresses; -+ } -+ -+ OSCachedMemCopyWMB(pvKernAddr, pcWriteBuffer, ui32NumOfPages * uiPageSize); -+ -+ eError = PMRReleaseKernelMappingData(psPMR, hPrivData); -+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); -+ -+ /* -+ * Release and reacquire the mapping to exercise the mapping paths -+ */ -+ eError = PMRAcquireSparseKernelMappingData(psPMR, 0, ui32NumOfPages * uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); -+ goto ErrorUnlockPhysAddresses; -+ } -+ -+ OSCachedMemSetWMB(pcReadBuffer, 0x0, ui32NumOfPages * uiPageSize); -+ OSCachedMemCopyWMB(pcReadBuffer, pvKernAddr, ui32NumOfPages * uiPageSize); -+ -+ eError = PMRReleaseKernelMappingData(psPMR, hPrivData); -+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); -+ -+ for (i = 0; i < ui32NumOfPages * uiPageSize; i++) -+ { -+ if (pcReadBuffer[i] != pcWriteBuffer[i]) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Test failed. Got (0x%hhx), expected (0x%hhx)! @ %u", -+ __func__, pcReadBuffer[i], pcWriteBuffer[i], i)); -+ eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; -+ goto ErrorUnlockPhysAddresses; -+ } -+ } -+ -+ErrorUnlockPhysAddresses: -+ /* Unlock and Unref the PMR to destroy it */ -+ eError1 = PMRUnlockSysPhysAddresses(psPMR); -+ if (eError1 != PVRSRV_OK) -+ { -+ eError = (eError == PVRSRV_OK)? eError1 : eError; -+ PVR_DPF((PVR_DBG_ERROR, "Failed to unlock PMR")); -+ } -+ -+ErrorUnrefSecondSpacingPMR: -+ eError1 = PMRUnrefPMR(psSecondSpacingPMR); -+ if (eError1 != PVRSRV_OK) -+ { -+ eError = (eError == PVRSRV_OK)? eError1 : eError; -+ PVR_DPF((PVR_DBG_ERROR, "Failed to free Second Spacing PMR")); -+ } -+ErrorUnrefSpacingPMR: -+ eError1 = PMRUnrefPMR(psSpacingPMR); -+ if (eError1 != PVRSRV_OK) -+ { -+ eError = (eError == PVRSRV_OK)? eError1 : eError; -+ PVR_DPF((PVR_DBG_ERROR, "Failed to free Spacing PMR")); -+ } -+ErrorUnrefPMR: -+ eError1 = PMRUnrefPMR(psPMR); -+ if (eError1 != PVRSRV_OK) -+ { -+ eError = (eError == PVRSRV_OK)? eError1 : eError; -+ PVR_DPF((PVR_DBG_ERROR, "Failed to free PMR")); -+ } -+ -+ErrorFreeReadBuffer: -+ OSFreeMem(pcReadBuffer); -+ErrorFreeWriteBuffer: -+ OSFreeMem(pcWriteBuffer); -+ErrorFreePMRPageStateMem: -+ OSFreeMem(pbValid); -+ErrorFreePMRPageListMem: -+ OSFreeMem(apsDevPAddr); -+ErrorReturn: -+ return eError; -+} -+ -+/* Test for PMR factory validation */ -+static PVRSRV_ERROR -+PMRValidationTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags) -+{ -+ PVRSRV_ERROR eError, eError1; -+ IMG_UINT32 i = 0, j = 0, ui32Index = 0; -+ IMG_UINT32 *pui32MappingTable = NULL; -+ PMR *psPMR = NULL; -+ IMG_BOOL *pbValid; -+ IMG_DEV_PHYADDR *apsDevPAddr; -+ IMG_UINT32 ui32NumOfPages = 10, ui32NumOfPhysPages = 5; -+ size_t uiMappedSize, uiPageSize; -+ IMG_UINT8 *pcWriteBuffer, *pcReadBuffer; -+ IMG_HANDLE hPrivData = NULL; -+ void *pvKernAddr = NULL; -+ -+ uiPageSize = OSGetPageSize(); -+ -+ /* Allocate OS memory for PMR page list */ -+ apsDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR)); -+ PVR_LOG_RETURN_IF_NOMEM(apsDevPAddr, "OSAllocMem"); -+ -+ /* Allocate OS memory for PMR page state */ -+ pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL)); -+ PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrorFreePMRPageListMem); -+ -+ /* Allocate OS memory for write buffer */ -+ pcWriteBuffer = OSAllocMem(uiPageSize); -+ PVR_LOG_GOTO_IF_NOMEM(pcWriteBuffer, eError, ErrorFreePMRPageStateMem); -+ OSCachedMemSet(pcWriteBuffer, 0xF, uiPageSize); -+ -+ /* Allocate OS memory for read buffer */ -+ pcReadBuffer = OSAllocMem(uiPageSize); -+ PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer); -+ -+ /* Allocate OS memory for mapping table */ -+ pui32MappingTable = (IMG_UINT32 *)OSAllocMem(ui32NumOfPhysPages * sizeof(*pui32MappingTable)); -+ PVR_LOG_GOTO_IF_NOMEM(pui32MappingTable, eError, ErrorFreeReadBuffer); -+ -+ /* Pages having even index will have physical backing in PMR */ -+ for (ui32Index=0; ui32Index < ui32NumOfPages; ui32Index+=2) -+ { -+ pui32MappingTable[i++] = ui32Index; -+ } -+ -+ /* Allocate Sparse PMR with SPARSE | READ | WRITE | UNCACHED_WC attributes */ -+ uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING | -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; -+ -+ /* Allocate a sparse PMR from given physical heap - CPU/GPU/FW */ -+ eError = PhysmemNewRamBackedPMR(NULL, -+ psDeviceNode, -+ ui32NumOfPages * uiPageSize, -+ ui32NumOfPhysPages, -+ ui32NumOfPages, -+ pui32MappingTable, -+ OSGetPageShift(), -+ uiFlags, -+ sizeof("PMR ValidationTest"), -+ "PMR ValidationTest", -+ OSGetCurrentClientProcessIDKM(), -+ &psPMR, -+ PDUMP_NONE, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR")); -+ goto ErrorFreeMappingTable; -+ } -+ -+ /* Check whether allocated PMR can be locked and obtain physical addresses -+ * of underlying memory pages. -+ */ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to lock PMR")); -+ goto ErrorUnrefPMR; -+ } -+ -+ /* Get the Device physical addresses of the pages */ -+ eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), ui32NumOfPages, 0, apsDevPAddr, pbValid, CPU_USE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses")); -+ goto ErrorUnlockPhysAddresses; -+ } -+ -+ /* Check whether device address of each physical page is OS PAGE_SIZE aligned */ -+ for (i = 0; i < ui32NumOfPages; i++) -+ { -+ if (pbValid[i]) -+ { -+ if ((apsDevPAddr[i].uiAddr & OSGetPageMask()) != 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Physical memory of PMR is not page aligned")); -+ eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; -+ goto ErrorUnlockPhysAddresses; -+ } -+ } -+ } -+ -+ /* Acquire kernel virtual address of each physical page and write to it -+ * and then release it. -+ */ -+ for (i = 0; i < ui32NumOfPages; i++) -+ { -+ if (pbValid[i]) -+ { -+ eError = PMRAcquireSparseKernelMappingData(psPMR, (i * uiPageSize), uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); -+ goto ErrorUnlockPhysAddresses; -+ } -+ OSCachedMemCopyWMB(pvKernAddr, pcWriteBuffer, OSGetPageSize()); -+ -+ eError = PMRReleaseKernelMappingData(psPMR, hPrivData); -+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); -+ } -+ } -+ -+ /* Acquire kernel virtual address of each physical page and read -+ * from it and check where contents are intact. -+ */ -+ for (i = 0; i < ui32NumOfPages; i++) -+ { -+ if (pbValid[i]) -+ { -+ eError = PMRAcquireSparseKernelMappingData(psPMR, (i * uiPageSize), uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); -+ goto ErrorUnlockPhysAddresses; -+ } -+ OSCachedMemSetWMB(pcReadBuffer, 0x0, uiPageSize); -+ OSCachedMemCopyWMB(pcReadBuffer, pvKernAddr, uiMappedSize); -+ -+ eError = PMRReleaseKernelMappingData(psPMR, hPrivData); -+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); -+ -+ for (j = 0; j < uiPageSize; j++) -+ { -+ if (pcReadBuffer[j] != pcWriteBuffer[j]) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", -+ __func__, pcReadBuffer[j], pcWriteBuffer[j])); -+ eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; -+ goto ErrorUnlockPhysAddresses; -+ } -+ } -+ } -+ } -+ -+ErrorUnlockPhysAddresses: -+ /* Unlock and Unref the PMR to destroy it */ -+ eError1 = PMRUnlockSysPhysAddresses(psPMR); -+ if (eError1 != PVRSRV_OK) -+ { -+ eError = (eError == PVRSRV_OK)? eError1 : eError; -+ PVR_DPF((PVR_DBG_ERROR, "Failed to unlock PMR")); -+ } -+ -+ErrorUnrefPMR: -+ eError1 = PMRUnrefPMR(psPMR); -+ if (eError1 != PVRSRV_OK) -+ { -+ eError = (eError == PVRSRV_OK)? eError1 : eError; -+ PVR_DPF((PVR_DBG_ERROR, "Failed to free PMR")); -+ } -+ErrorFreeMappingTable: -+ OSFreeMem(pui32MappingTable); -+ErrorFreeReadBuffer: -+ OSFreeMem(pcReadBuffer); -+ErrorFreeWriteBuffer: -+ OSFreeMem(pcWriteBuffer); -+ErrorFreePMRPageStateMem: -+ OSFreeMem(pbValid); -+ErrorFreePMRPageListMem: -+ OSFreeMem(apsDevPAddr); -+ -+ return eError; -+} -+ -+#define DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, Patterns, NumOfPatterns, Error, ptr, i) \ -+ for (i = 0; i < NumOfPatterns; i++) \ -+ { \ -+ /* Write pattern */ \ -+ for (ptr = StartAddr; ptr < EndAddr; ptr++) \ -+ { \ -+ *ptr = Patterns[i]; \ -+ } \ -+ \ -+ /* Read back and validate pattern */ \ -+ for (ptr = StartAddr; ptr < EndAddr ; ptr++) \ -+ { \ -+ if (*ptr != Patterns[i]) \ -+ { \ -+ Error = PVRSRV_ERROR_MEMORY_TEST_FAILED; \ -+ break; \ -+ } \ -+ } \ -+ \ -+ if (Error != PVRSRV_OK) \ -+ { \ -+ break; \ -+ } \ -+ } -+ -+static PVRSRV_ERROR -+TestPatternU8(void *pvKernAddr, size_t uiMappedSize) -+{ -+ IMG_UINT8 *StartAddr = (IMG_UINT8 *) pvKernAddr; -+ IMG_UINT8 *EndAddr = ((IMG_UINT8 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT8)); -+ IMG_UINT8 *p; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT8)) == 0); -+ -+ DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui8Patterns, sizeof(gui8Patterns)/sizeof(IMG_UINT8), eError, p, i); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", -+ __func__, *p, gui8Patterns[i])); -+ } -+ -+ return eError; -+} -+ -+ -+static PVRSRV_ERROR -+TestPatternU16(void *pvKernAddr, size_t uiMappedSize) -+{ -+ IMG_UINT16 *StartAddr = (IMG_UINT16 *) pvKernAddr; -+ IMG_UINT16 *EndAddr = ((IMG_UINT16 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT16)); -+ IMG_UINT16 *p; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT16)) == 0); -+ -+ DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui16Patterns, sizeof(gui16Patterns)/sizeof(IMG_UINT16), eError, p, i); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Test failed. Got (0x%hx), expected (0x%hx)!", -+ __func__, *p, gui16Patterns[i])); -+ } -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR -+TestPatternU32(void *pvKernAddr, size_t uiMappedSize) -+{ -+ IMG_UINT32 *StartAddr = (IMG_UINT32 *) pvKernAddr; -+ IMG_UINT32 *EndAddr = ((IMG_UINT32 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT32)); -+ IMG_UINT32 *p; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT32)) == 0); -+ -+ DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui32Patterns, sizeof(gui32Patterns)/sizeof(IMG_UINT32), eError, p, i); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Test failed. Got (0x%x), expected (0x%x)!", -+ __func__, *p, gui32Patterns[i])); -+ } -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR -+TestPatternU64(void *pvKernAddr, size_t uiMappedSize) -+{ -+ IMG_UINT64 *StartAddr = (IMG_UINT64 *) pvKernAddr; -+ IMG_UINT64 *EndAddr = ((IMG_UINT64 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT64)); -+ IMG_UINT64 *p; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT64)) == 0); -+ -+ DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui64Patterns, sizeof(gui64Patterns)/sizeof(IMG_UINT64), eError, p, i); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Test failed. Got (0x%llx), expected (0x%llx)!", -+ __func__, *p, gui64Patterns[i])); -+ } -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR -+TestSplitCacheline(void *pvKernAddr, size_t uiMappedSize) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ size_t uiCacheLineSize; -+ size_t uiBlockSize; -+ size_t j; -+ IMG_UINT8 *pcWriteBuffer, *pcReadBuffer; -+ IMG_UINT8 *StartAddr = (IMG_UINT8 *) pvKernAddr; -+ IMG_UINT8 *EndAddr, *p; -+ -+ uiCacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); -+ -+ if (uiCacheLineSize > 0) -+ { -+ uiBlockSize = (uiCacheLineSize * 2)/3; /* split cacheline */ -+ -+ pcWriteBuffer = OSAllocMem(uiBlockSize); -+ PVR_LOG_RETURN_IF_NOMEM(pcWriteBuffer, "OSAllocMem"); -+ -+ /* Fill the write buffer with test data, 0xAB*/ -+ OSCachedMemSet(pcWriteBuffer, 0xAB, uiBlockSize); -+ -+ pcReadBuffer = OSAllocMem(uiBlockSize); -+ PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer); -+ -+ /* Fit only complete blocks in uiMappedSize, ignore leftover bytes */ -+ EndAddr = StartAddr + (uiBlockSize * (uiMappedSize / uiBlockSize)); -+ -+ /* Write blocks into the memory */ -+ for (p = StartAddr; p < EndAddr; p += uiBlockSize) -+ { -+ OSCachedMemCopy(p, pcWriteBuffer, uiBlockSize); -+ } -+ -+ /* Read back blocks and check */ -+ for (p = StartAddr; p < EndAddr; p += uiBlockSize) -+ { -+ OSCachedMemCopy(pcReadBuffer, p, uiBlockSize); -+ -+ for (j = 0; j < uiBlockSize; j++) -+ { -+ if (pcReadBuffer[j] != pcWriteBuffer[j]) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", __func__, pcReadBuffer[j], pcWriteBuffer[j])); -+ eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; -+ goto ErrorMemTestFailed; -+ } -+ } -+ } -+ -+ErrorMemTestFailed: -+ OSFreeMem(pcReadBuffer); -+ErrorFreeWriteBuffer: -+ OSFreeMem(pcWriteBuffer); -+ } -+ -+ return eError; -+} -+ -+/* Memory test - writes and reads back different patterns to memory and validate the same */ -+static PVRSRV_ERROR -+MemTestPatterns(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32MappingTable = 0; -+ PMR *psPMR = NULL; -+ size_t uiMappedSize, uiPageSize; -+ IMG_HANDLE hPrivData = NULL; -+ void *pvKernAddr = NULL; -+ -+ uiPageSize = OSGetPageSize(); -+ -+ /* Allocate PMR with READ | WRITE | WRITE_COMBINE attributes */ -+ uiFlags |= PVRSRV_MEMALLOCFLAG_CPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; -+ -+ /*Allocate a PMR from given physical heap */ -+ eError = PhysmemNewRamBackedPMR(NULL, -+ psDeviceNode, -+ uiPageSize * PHYSMEM_TEST_PAGES, -+ 1, -+ 1, -+ &ui32MappingTable, -+ OSGetPageShift(), -+ uiFlags, -+ sizeof("PMR PhysMemTest"), -+ "PMR PhysMemTest", -+ OSGetCurrentClientProcessIDKM(), -+ &psPMR, -+ PDUMP_NONE, -+ NULL); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemNewRamBackedPMR"); -+ -+ /* Check whether allocated PMR can be locked and obtain physical -+ * addresses of underlying memory pages. -+ */ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", ErrorUnrefPMR); -+ -+ /* Map the physical page(s) into kernel space, acquire kernel mapping -+ * for PMR. -+ */ -+ eError = PMRAcquireKernelMappingData(psPMR, 0, uiPageSize * PHYSMEM_TEST_PAGES, &pvKernAddr, &uiMappedSize, &hPrivData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", ErrorUnlockPhysAddresses); -+ -+ PVR_ASSERT((uiPageSize * PHYSMEM_TEST_PAGES) == uiMappedSize); -+ -+ /* Test various patterns */ -+ eError = TestPatternU64(pvKernAddr, uiMappedSize); -+ if (eError != PVRSRV_OK) -+ { -+ goto ErrorReleaseKernelMappingData; -+ } -+ -+ eError = TestPatternU32(pvKernAddr, uiMappedSize); -+ if (eError != PVRSRV_OK) -+ { -+ goto ErrorReleaseKernelMappingData; -+ } -+ -+ eError = TestPatternU16(pvKernAddr, uiMappedSize); -+ if (eError != PVRSRV_OK) -+ { -+ goto ErrorReleaseKernelMappingData; -+ } -+ -+ eError = TestPatternU8(pvKernAddr, uiMappedSize); -+ if (eError != PVRSRV_OK) -+ { -+ goto ErrorReleaseKernelMappingData; -+ } -+ -+ /* Test split cachelines */ -+ eError = TestSplitCacheline(pvKernAddr, uiMappedSize); -+ -+ErrorReleaseKernelMappingData: -+ (void) PMRReleaseKernelMappingData(psPMR, hPrivData); -+ -+ErrorUnlockPhysAddresses: -+ /* Unlock and Unref the PMR to destroy it, ignore returned value */ -+ (void) PMRUnlockSysPhysAddresses(psPMR); -+ErrorUnrefPMR: -+ (void) PMRUnrefPMR(psPMR); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR -+PhysMemTestRun(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags, IMG_UINT32 ui32Passes) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ -+ /* PMR validation test */ -+ eError = PMRValidationTest(psDeviceNode, uiFlags); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: PMR Contiguous PhysHeap self test failed! %"PVRSRV_MEMALLOCFLAGS_FMTSPEC, -+ __func__, -+ uiFlags)); -+ return eError; -+ } -+ -+ eError = PMRContiguousSparseMappingTest(psDeviceNode, uiFlags); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: PMR Non-contiguous PhysHeap self test failed! %"PVRSRV_MEMALLOCFLAGS_FMTSPEC, -+ __func__, -+ uiFlags)); -+ return eError; -+ } -+ -+ -+ for (i = 0; i < ui32Passes; i++) -+ { -+ /* Mem test */ -+ eError = MemTestPatterns(psDeviceNode, uiFlags); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: [Pass#%u] MemTestPatterns failed!", -+ __func__, i)); -+ break; -+ } -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_DEVICE_CONFIG *psDevConfig = pvDevConfig; -+ PHYS_HEAP_CONFIG *psHeapConfig; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ -+ /* validate memtest passes requested */ -+ ui32MemTestPasses = (ui32MemTestPasses > PHYSMEM_TEST_PASSES_MAX)? PHYSMEM_TEST_PASSES_MAX : ui32MemTestPasses; -+ -+ /* Do minimal initialisation before test */ -+ eError = PhysMemTestInit(&psDeviceNode, psDevConfig); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Test failed to initialize", __func__)); -+ return eError; -+ } -+ -+ for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++) -+ { -+ psHeapConfig = &psDevConfig->pasPhysHeaps[i]; -+ if (psHeapConfig->ui32UsageFlags & PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(GPU_LOCAL)) -+ { -+ /* GPU local mem (should be only up to 1 heap) */ -+ eError = PhysMemTestRun(psDeviceNode, PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(GPU_LOCAL), ui32MemTestPasses); -+ PVR_LOG_GOTO_IF_ERROR(eError, "GPU local memory test failed!", ErrorPhysMemTestDeinit); -+ } -+ if (psHeapConfig->ui32UsageFlags & PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL)) -+ { -+ /* CPU local mem (should be only up to 1 heap) */ -+ eError = PhysMemTestRun(psDeviceNode, PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL), ui32MemTestPasses); -+ PVR_LOG_GOTO_IF_ERROR(eError, "CPU local memory test failed!", ErrorPhysMemTestDeinit); -+ } -+ } -+ -+ PVR_LOG(("PhysMemTest: Passed.")); -+ goto PhysMemTestPassed; -+ -+ErrorPhysMemTestDeinit: -+ PVR_DPF((PVR_DBG_ERROR, "PhysMemTest: Failed.")); -+PhysMemTestPassed: -+ PhysMemTestDeInit(psDeviceNode); -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/physmem_test.h b/drivers/gpu/drm/img-rogue/physmem_test.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/physmem_test.h -@@ -0,0 +1,51 @@ -+/*************************************************************************/ /*! -+@Title Physmem test header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for single entry point for testing of page factories -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SRVSRV_PHYSMEM_TEST_H -+#define SRVSRV_PHYSMEM_TEST_H -+/* -+ * PhysMemTest -+ */ -+PVRSRV_ERROR -+PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses); -+ -+#endif /* SRVSRV_PHYSMEM_TEST_H */ -diff --git a/drivers/gpu/drm/img-rogue/plato_drv.h b/drivers/gpu/drm/img-rogue/plato_drv.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/plato_drv.h -@@ -0,0 +1,464 @@ -+/* -+ * @File plato_drv.h -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef _PLATO_DRV_H -+#define _PLATO_DRV_H -+ -+/* -+ * This contains the hooks for the plato pci driver, as used by the -+ * Rogue and PDP sub-devices, and the platform data passed to each of their -+ * drivers -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) -+#include -+ -+#define PLATO_MULTI_DEVICE -+#endif -+ -+// Debug output: -+// Sometimes will want to always output info or error even in release mode. -+// In that case use dev_info, dev_err directly. -+#if defined(PLATO_DRM_DEBUG) -+ #define plato_dev_info(dev, fmt, ...) \ -+ dev_info(dev, fmt, ##__VA_ARGS__) -+ #define plato_dev_warn(dev, fmt, ...) \ -+ dev_warn(dev, fmt, ##__VA_ARGS__) -+ #define plato_dev_error(dev, fmt, ...) \ -+ dev_err(dev, fmt, ##__VA_ARGS__) -+ #define PLATO_DRM_CHECKPOINT pr_info("line %d\n", __LINE__) -+#else -+ #define plato_dev_info(dev, fmt, ...) -+ #define plato_dev_warn(dev, fmt, ...) -+ #define plato_dev_error(dev, fmt, ...) -+ #define PLATO_DRM_CHECKPOINT -+#endif -+ -+#define PLATO_INIT_SUCCESS 0 -+#define PLATO_INIT_FAILURE 1 -+#define PLATO_INIT_RETRY 2 -+ -+#define PCI_VENDOR_ID_PLATO (0x1AEE) -+#define PCI_DEVICE_ID_PLATO (0x0003) -+ -+#define PLATO_SYSTEM_NAME "Plato" -+ -+#define PLATO_MAX_CARDS 4 -+ -+#define PLATO_MAX_DEVICE_NAME_LEN 32 -+ -+#if defined(PLATO_MULTI_DEVICE) -+#define PLATO_MAKE_DEVICE_TEMPLATE(p) (p "_%u") -+#define PLATO_MAKE_DEVICE_NAME(p, i) (p "_" __stringify(i)) -+#endif -+ -+/* Interrupt defines */ -+enum PLATO_INTERRUPT { -+ PLATO_INTERRUPT_GPU = 0, -+ PLATO_INTERRUPT_PDP, -+ PLATO_INTERRUPT_HDMI, -+ PLATO_INTERRUPT_MAX, -+}; -+ -+#define PLATO_INT_SHIFT_GPU (0) -+#define PLATO_INT_SHIFT_PDP (8) -+#define PLATO_INT_SHIFT_HDMI (9) -+#define PLATO_INT_SHIFT_HDMI_WAKEUP (11) -+#define PLATO_INT_SHIFT_TEMP_A (12) -+ -+ -+struct plato_region { -+ resource_size_t base; -+ resource_size_t size; -+}; -+ -+struct plato_io_region { -+ struct plato_region region; -+ void __iomem *registers; -+}; -+ -+/* The following structs are initialised and passed down by the parent plato -+ * driver to the respective sub-drivers -+ */ -+ -+#define PLATO_DEVICE_NAME_PDP_PREFIX "plato_pdp" -+ -+#if defined(PLATO_MULTI_DEVICE) -+#define PLATO_DEVICE_NAME_PDP_TEMPLATE PLATO_MAKE_DEVICE_TEMPLATE( \ -+ PLATO_DEVICE_NAME_PDP_PREFIX) -+ -+#define PLATO_DEVICE_NAME_PDP_PRINTF_ARGS(i) PLATO_DEVICE_NAME_PDP_TEMPLATE, i -+ -+#define PLATO_MAKE_DEVICE_NAME_PDP(i) PLATO_MAKE_DEVICE_NAME( \ -+ PLATO_DEVICE_NAME_PDP_PREFIX, \ -+ i) -+#else -+#define PLATO_DEVICE_NAME_PDP_TEMPLATE PLATO_DEVICE_NAME_PDP_PREFIX -+#define PLATO_DEVICE_NAME_PDP_PRINTF_ARGS(i) PLATO_DEVICE_NAME_PDP_TEMPLATE -+#define PLATO_DEVICE_NAME_PDP PLATO_DEVICE_NAME_PDP_PREFIX -+#endif -+ -+#define PLATO_PDP_RESOURCE_REGS "pdp-regs" -+#define PLATO_PDP_RESOURCE_BIF_REGS "pdp-bif-regs" -+ -+#define PLATO_DEVICE_NAME_HDMI "plato_hdmi" -+#define PLATO_HDMI_RESOURCE_REGS "hdmi-regs" -+ -+struct plato_pdp_platform_data { -+ resource_size_t memory_base; -+ -+ /* The following is used by the drm_pdp driver as it manages the -+ * pdp memory -+ */ -+ resource_size_t pdp_heap_memory_base; -+ resource_size_t pdp_heap_memory_size; -+ -+ /* Used to export host address instead of pdp address, -+ * defaults to false. -+ */ -+ bool dma_map_export_host_addr; -+}; -+ -+struct plato_hdmi_platform_data { -+ resource_size_t plato_memory_base; -+}; -+ -+ -+#define PLATO_DEVICE_NAME_ROGUE_PREFIX "plato_rogue" -+ -+#if defined(PLATO_MULTI_DEVICE) -+#define PLATO_DEVICE_NAME_ROGUE_TEMPLATE PLATO_MAKE_DEVICE_TEMPLATE( \ -+ PLATO_DEVICE_NAME_ROGUE_PREFIX) -+ -+#define PLATO_DEVICE_NAME_ROGUE_PRINTF_ARGS(i) PLATO_DEVICE_NAME_ROGUE_TEMPLATE, i -+ -+#define PLATO_MAKE_DEVICE_NAME_ROGUE(i) PLATO_MAKE_DEVICE_NAME( \ -+ PLATO_DEVICE_NAME_ROGUE_PREFIX, \ -+ i) -+#else -+#define PLATO_DEVICE_NAME_ROGUE_TEMPLATE PLATO_DEVICE_NAME_ROGUE_PREFIX -+#define PLATO_DEVICE_NAME_ROGUE_PRINTF_ARGS(i) PLATO_DEVICE_NAME_ROGUE_TEMPLATE -+#define PLATO_DEVICE_NAME_ROGUE PLATO_DEVICE_NAME_ROGUE_PREFIX -+#endif -+ -+#define PLATO_ROGUE_RESOURCE_REGS "rogue-regs" -+ -+struct plato_rogue_platform_data { -+ -+ /* The base address of the plato memory (CPU physical address) - -+ * used to convert from CPU-Physical to device-physical addresses -+ */ -+ resource_size_t plato_memory_base; -+ -+ /* The following is used to setup the services heaps */ -+ int has_nonmappable; -+ struct plato_region rogue_heap_mappable; -+ resource_size_t rogue_heap_dev_addr; -+ struct plato_region rogue_heap_nonmappable; -+#if defined(SUPPORT_PLATO_DISPLAY) -+ struct plato_region pdp_heap; -+#endif -+}; -+ -+struct plato_interrupt_handler { -+ bool enabled; -+ void (*handler_function)(void *data); -+ void *handler_data; -+}; -+ -+struct plato_device { -+ struct pci_dev *pdev; -+ -+ struct plato_io_region sys_io; -+ struct plato_io_region aon_regs; -+ -+ spinlock_t interrupt_handler_lock; -+ spinlock_t interrupt_enable_lock; -+ -+ struct plato_interrupt_handler interrupt_handlers[PLATO_INTERRUPT_MAX]; -+ -+ struct plato_region rogue_mem; -+ struct plato_region rogue_heap_mappable; -+ struct plato_region rogue_heap_nonmappable; -+ int has_nonmappable; -+ -+ resource_size_t dev_mem_base; /* Pointer to device memory base */ -+ -+ struct platform_device *rogue_dev; -+ -+#if defined(SUPPORT_PLATO_DISPLAY) -+ struct platform_device *pdp_dev; -+ struct plato_region pdp_heap; -+ -+ struct platform_device *hdmi_dev; -+#endif -+ -+#if defined(CONFIG_MTRR) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) -+ int mtrr; -+#endif -+ int instance; -+}; -+ -+#if defined(PLATO_LOG_CHECKPOINTS) -+#define PLATO_CHECKPOINT(p) dev_info(&p->pdev->dev, \ -+ "- %s: %d", __func__, __LINE__) -+#else -+#define PLATO_CHECKPOINT(p) -+#endif -+ -+#define plato_write_reg32(base, offset, value) \ -+ iowrite32(value, (base) + (offset)) -+#define plato_read_reg32(base, offset) ioread32(base + offset) -+#define plato_sleep_ms(x) msleep(x) -+#define plato_sleep_us(x) msleep(x/1000) -+ -+/* Valid values for the PLATO_MEMORY_CONFIG configuration option */ -+#define PLATO_MEMORY_LOCAL (1) -+#define PLATO_MEMORY_HOST (2) -+#define PLATO_MEMORY_HYBRID (3) -+ -+#if defined(PLATO_MEMORY_CONFIG) -+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) -+#define PVRSRV_PHYS_HEAP_CONFIG_PDP_LOCAL_ID 3 -+#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) -+#define PVRSRV_PHYS_HEAP_CONFIG_PDP_LOCAL_ID 2 -+#endif -+#endif /* PLATO_MEMORY_CONFIG */ -+ -+#define DCPDP_PHYS_HEAP_ID PVRSRV_PHYS_HEAP_CONFIG_PDP_LOCAL_ID -+ -+#define PLATO_PDP_MEM_SIZE (384 * 1024 * 1024) -+ -+#define SYS_PLATO_REG_PCI_BASENUM (1) -+#define SYS_PLATO_REG_REGION_SIZE (4 * 1024 * 1024) -+ -+/* -+ * Give system region a whole span of the reg space including -+ * RGX registers. That's because there are sys register segments -+ * both before and after the RGX segment. -+ */ -+#define SYS_PLATO_REG_SYS_OFFSET (0x0) -+#define SYS_PLATO_REG_SYS_SIZE (4 * 1024 * 1024) -+ -+/* Entire Peripheral region */ -+#define SYS_PLATO_REG_PERIP_OFFSET (0x20000) -+#define SYS_PLATO_REG_PERIP_SIZE (164 * 1024) -+ -+/* Chip level registers */ -+#define SYS_PLATO_REG_CHIP_LEVEL_OFFSET (SYS_PLATO_REG_PERIP_OFFSET) -+#define SYS_PLATO_REG_CHIP_LEVEL_SIZE (64 * 1024) -+ -+#define SYS_PLATO_REG_TEMPA_OFFSET (0x80000) -+#define SYS_PLATO_REG_TEMPA_SIZE (64 * 1024) -+ -+/* USB, DMA not included */ -+ -+#define SYS_PLATO_REG_DDR_A_CTRL_OFFSET (0x120000) -+#define SYS_PLATO_REG_DDR_A_CTRL_SIZE (64 * 1024) -+ -+#define SYS_PLATO_REG_DDR_B_CTRL_OFFSET (0x130000) -+#define SYS_PLATO_REG_DDR_B_CTRL_SIZE (64 * 1024) -+ -+#define SYS_PLATO_REG_DDR_A_PUBL_OFFSET (0x140000) -+#define SYS_PLATO_REG_DDR_A_PUBL_SIZE (64 * 1024) -+ -+#define SYS_PLATO_REG_DDR_B_PUBL_OFFSET (0x150000) -+#define SYS_PLATO_REG_DDR_B_PUBL_SIZE (64 * 1024) -+ -+#define SYS_PLATO_REG_NOC_OFFSET (0x160000) -+#define SYS_PLATO_REG_NOC_SIZE (64 * 1024) -+ -+/* Debug NOC registers */ -+#define SYS_PLATO_REG_NOC_DBG_DDR_A_CTRL_OFFSET (0x1500) -+#define SYS_PLATO_REG_NOC_DBG_DDR_A_DATA_OFFSET (0x1580) -+#define SYS_PLATO_REG_NOC_DBG_DDR_A_PUBL_OFFSET (0x1600) -+#define SYS_PLATO_REG_NOC_DBG_DDR_B_CTRL_OFFSET (0x1680) -+#define SYS_PLATO_REG_NOC_DBG_DDR_B_DATA_OFFSET (0x1700) -+#define SYS_PLATO_REG_NOC_DBG_DDR_B_PUBL_OFFSET (0x1780) -+#define SYS_PLATO_REG_NOC_DBG_DISPLAY_S_OFFSET (0x1800) -+#define SYS_PLATO_REG_NOC_DBG_GPIO_0_S_OFFSET (0x1900) -+#define SYS_PLATO_REG_NOC_DBG_GPIO_1_S_OFFSET (0x1980) -+#define SYS_PLATO_REG_NOC_DBG_GPU_S_OFFSET (0x1A00) -+#define SYS_PLATO_REG_NOC_DBG_PCI_PHY_OFFSET (0x1A80) -+#define SYS_PLATO_REG_NOC_DBG_PCI_REG_OFFSET (0x1B00) -+#define SYS_PLATO_REG_NOC_DBG_PCI_S_OFFSET (0x1B80) -+#define SYS_PLATO_REG_NOC_DBG_PERIPH_S_OFFSET (0x1c00) -+#define SYS_PLATO_REG_NOC_DBG_RET_REG_OFFSET (0x1D00) -+#define SYS_PLATO_REG_NOC_DBG_SERVICE_OFFSET (0x1E00) -+ -+#define SYS_PLATO_REG_RGX_OFFSET (0x170000) -+#define SYS_PLATO_REG_RGX_SIZE (64 * 1024) -+ -+#define SYS_PLATO_REG_AON_OFFSET (0x180000) -+#define SYS_PLATO_REG_AON_SIZE (64 * 1024) -+ -+#define SYS_PLATO_REG_PDP_OFFSET (0x200000) -+#define SYS_PLATO_REG_PDP_SIZE (0x1000) -+ -+#define SYS_PLATO_REG_PDP_BIF_OFFSET \ -+ (SYS_PLATO_REG_PDP_OFFSET + SYS_PLATO_REG_PDP_SIZE) -+#define SYS_PLATO_REG_PDP_BIF_SIZE (0x200) -+ -+#define SYS_PLATO_REG_HDMI_OFFSET \ -+ (SYS_PLATO_REG_PDP_OFFSET + 0x20000) -+#define SYS_PLATO_REG_HDMI_SIZE (128 * 1024) -+ -+/* Device memory (including HP mapping) on base register 4 */ -+#define SYS_DEV_MEM_PCI_BASENUM (4) -+ -+/* Device memory size */ -+#define ONE_GB_IN_BYTES (0x40000000ULL) -+#define SYS_DEV_MEM_REGION_SIZE \ -+ (PLATO_MEMORY_SIZE_GIGABYTES * ONE_GB_IN_BYTES) -+ -+/* Plato DDR offset in device memory map at 32GB */ -+#define PLATO_DDR_DEV_PHYSICAL_BASE (0x800000000) -+ -+/* DRAM is split at 48GB */ -+#define PLATO_DRAM_SPLIT_ADDR (0xc00000000) -+ -+/* -+ * Plato DDR region is aliased if less than 32GB memory is present. -+ * This defines memory base closest to the DRAM split point. -+ * If 32GB is present this is equal to PLATO_DDR_DEV_PHYSICAL_BASE -+ */ -+#define PLATO_DDR_ALIASED_DEV_PHYSICAL_BASE \ -+ (PLATO_DRAM_SPLIT_ADDR - (SYS_DEV_MEM_REGION_SIZE >> 1)) -+ -+#define PLATO_DDR_ALIASED_DEV_PHYSICAL_END \ -+ (PLATO_DRAM_SPLIT_ADDR + (SYS_DEV_MEM_REGION_SIZE >> 1)) -+ -+#define PLATO_DDR_ALIASED_DEV_SEGMENT_SIZE \ -+ ((32ULL / PLATO_MEMORY_SIZE_GIGABYTES) * ONE_GB_IN_BYTES) -+ -+/* Plato Host memory offset in device memory map at 512GB */ -+#define PLATO_HOSTRAM_DEV_PHYSICAL_BASE (0x8000000000) -+ -+/* Plato PLL, DDR/GPU, PDP and HDMI-SFR/CEC clocks */ -+#define PLATO_PLL_REF_CLOCK_SPEED (19200000) -+ -+/* 600 MHz */ -+#define PLATO_MEM_CLOCK_SPEED (600000000) -+#define PLATO_MIN_MEM_CLOCK_SPEED (600000000) -+#define PLATO_MAX_MEM_CLOCK_SPEED (800000000) -+ -+/* 396 MHz (~400 MHz) on HW, around 1MHz on the emulator */ -+#if defined(EMULATOR) || defined(VIRTUAL_PLATFORM) -+#define PLATO_RGX_CORE_CLOCK_SPEED (1000000) -+#define PLATO_RGX_MIN_CORE_CLOCK_SPEED (1000000) -+#define PLATO_RGX_MAX_CORE_CLOCK_SPEED (1000000) -+#else -+ -+#define PLATO_RGX_CORE_CLOCK_SPEED (396000000) -+#define PLATO_RGX_MIN_CORE_CLOCK_SPEED (396000000) -+#define PLATO_RGX_MAX_CORE_CLOCK_SPEED (742500000) -+#endif -+ -+#define PLATO_MIN_PDP_CLOCK_SPEED (165000000) -+#define PLATO_TARGET_HDMI_SFR_CLOCK_SPEED (27000000) -+#define PLATO_TARGET_HDMI_CEC_CLOCK_SPEED (32768) -+ -+#define REG_TO_CELSIUS(reg) (((reg) * 352/4096) - 109) -+#define CELSIUS_TO_REG(temp) ((((temp) + 109) * 4096) / 352) -+#define PLATO_MAX_TEMP_CELSIUS (100) -+ -+#define PLATO_LMA_HEAP_REGION_MAPPABLE 0 -+#define PLATO_LMA_HEAP_REGION_NONMAPPABLE 1 -+ -+struct plato_debug_register { -+ char *description; -+ unsigned int offset; -+ unsigned int value; -+}; -+ -+#if defined(ENABLE_PLATO_HDMI) -+ -+#if defined(HDMI_PDUMP) -+/* Hard coded video formats for pdump type run only */ -+#define VIDEO_FORMAT_1280_720p 0 -+#define VIDEO_FORMAT_1920_1080p 1 -+#define DC_DEFAULT_VIDEO_FORMAT (VIDEO_FORMAT_1920_1080p) -+#endif -+ -+#endif /* ENABLE_PLATO_HDMI */ -+ -+/* Exposed APIs */ -+int plato_enable(struct device *dev); -+void plato_disable(struct device *dev); -+ -+int plato_enable_interrupt(struct device *dev, -+ enum PLATO_INTERRUPT interrupt_id); -+int plato_disable_interrupt(struct device *dev, -+ enum PLATO_INTERRUPT interrupt_id); -+ -+int plato_set_interrupt_handler(struct device *dev, -+ enum PLATO_INTERRUPT interrupt_id, -+ void (*handler_function)(void *), -+ void *handler_data); -+unsigned int plato_core_clock_speed(struct device *dev); -+unsigned int plato_mem_clock_speed(struct device *dev); -+unsigned int plato_pll_clock_speed(struct device *dev, -+ unsigned int clock_speed); -+void plato_enable_pdp_clock(struct device *dev); -+void plato_enable_pixel_clock(struct device *dev, u32 pixel_clock); -+ -+int plato_debug_info(struct device *dev, -+ struct plato_debug_register *noc_dbg_regs, -+ struct plato_debug_register *aon_dbg_regs); -+ -+/* Internal */ -+int plato_memory_init(struct plato_device *plato); -+void plato_memory_deinit(struct plato_device *plato); -+int plato_cfg_init(struct plato_device *plato); -+int request_pci_io_addr(struct pci_dev *pdev, u32 index, -+ resource_size_t offset, resource_size_t length); -+void release_pci_io_addr(struct pci_dev *pdev, u32 index, -+ resource_size_t start, resource_size_t length); -+ -+#endif /* _PLATO_DRV_H */ -diff --git a/drivers/gpu/drm/img-rogue/pmr.c b/drivers/gpu/drm/img-rogue/pmr.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pmr.c -@@ -0,0 +1,4189 @@ -+/*************************************************************************/ /*! -+@File -+@Title Physmem (PMR) abstraction -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of the memory management. This module is responsible for -+ the "PMR" abstraction. A PMR (Physical Memory Resource) -+ represents some unit of physical memory which is -+ allocated/freed/mapped/unmapped as an indivisible unit -+ (higher software levels provide an abstraction above that -+ to deal with dividing this down into smaller manageable units). -+ Importantly, this module knows nothing of virtual memory, or -+ of MMUs etc., with one excusable exception. We have the -+ concept of a "page size", which really means nothing in -+ physical memory, but represents a "contiguity quantum" such -+ that the higher level modules which map this memory are able -+ to verify that it matches the needs of the page size for the -+ virtual realm into which it is being mapped. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ /**************************************************************************/ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+ -+#include "pdump.h" -+#include "devicemem_server_utils.h" -+ -+#include "osfunc.h" -+#include "pdump_km.h" -+#include "pdump_physmem.h" -+#include "pmr_impl.h" -+#include "pmr_os.h" -+#include "pvrsrv.h" -+ -+#include "allocmem.h" -+#include "lock.h" -+#include "uniq_key_splay_tree.h" -+ -+#if defined(SUPPORT_SECURE_EXPORT) -+#include "secure_export.h" -+#include "ossecure_export.h" -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+#include "ri_server.h" -+#endif -+ -+/* ourselves */ -+#include "pmr.h" -+ -+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) -+#include "mmap_stats.h" -+#endif -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#include "process_stats.h" -+#include "proc_stats.h" -+#endif -+ -+#include "pdump_km.h" -+ -+#define PMR_FLAG_INTERNAL_SPARSE_ALLOC (1 << 0) -+#define PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE (1 << 1) -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+#define PMR_FLAG_INTERNAL_DEFER_FREE (1 << 2) -+#define PMR_FLAG_INTERNAL_IS_ZOMBIE (1 << 3) -+ -+/* Indicates PMR should be destroyed immediately and not deferred. */ -+#define PMR_NO_ZOMBIE_FENCE IMG_UINT64_MAX -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+/* Memalloc flags can be converted into pmr, ra or psplay flags. -+ * Ensure flags types are same size. -+ */ -+static_assert(sizeof(PVRSRV_MEMALLOCFLAGS_T) == sizeof(PMR_FLAGS_T), -+ "Mismatch memalloc and pmr flags type size."); -+static_assert(sizeof(PVRSRV_MEMALLOCFLAGS_T) == sizeof(RA_FLAGS_T), -+ "Mismatch memalloc and ra flags type size."); -+static_assert(sizeof(PVRSRV_MEMALLOCFLAGS_T) == sizeof(IMG_PSPLAY_FLAGS_T), -+ "Mismatch memalloc and psplay flags type size."); -+ -+/* A "context" for the physical memory block resource allocator. -+ * -+ * Context is probably the wrong word. -+ * -+ * There is almost certainly only one of these, ever, in the system. -+ * But, let's keep the notion of a context anyway, "just-in-case". -+ */ -+static struct _PMR_CTX_ -+{ -+ /* For debugging, and PDump, etc., let's issue a forever incrementing -+ * serial number to each allocation. -+ */ -+ IMG_UINT64 uiNextSerialNum; -+ -+ /* For security, we only allow a PMR to be mapped if the caller knows -+ * its key. We can pseudo-randomly generate keys -+ */ -+ IMG_UINT64 uiNextKey; -+ -+ /* For debugging only, I guess: Number of live PMRs */ -+ ATOMIC_T uiNumLivePMRs; -+ -+ /* Lock for this structure */ -+ POS_LOCK hLock; -+ -+ /* In order to seed the uiNextKey, we enforce initialisation at driver -+ * load time. Also, we can debug check at driver unload that the PMR -+ * count is zero. -+ */ -+ IMG_BOOL bModuleInitialised; -+} _gsSingletonPMRContext = { 1, 0, {0}, NULL, IMG_FALSE }; -+ -+/* A PMR. One per physical allocation. May be "shared". -+ * -+ * "shared" is ambiguous. We need to be careful with terminology. -+ * There are two ways in which a PMR may be "shared" and we need to be sure -+ * that we are clear which we mean. -+ * -+ * i) multiple small allocations living together inside one PMR. -+ * -+ * ii) one single allocation filling a PMR but mapped into multiple memory -+ * contexts. -+ * -+ * This is more important further up the stack - at this level, all we care is -+ * that the PMR is being referenced multiple times. -+ */ -+struct _PMR_ -+{ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ /* List node used to put the PMR on the zombie list -+ * (psDevNode->sPMRZombieList). */ -+ DLLIST_NODE sZombieNode; -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+ /* This object is strictly refcounted. References include: -+ * - mapping -+ * - live handles (to this object) -+ * - live export handles -+ * (thus it is normal for allocated and exported memory to have a refcount of 3) -+ * The object is destroyed when and only when the refcount reaches 0 -+ */ -+ -+ /* Physical address translation (device <> cpu) is done on a per device -+ * basis which means we need the physical heap info -+ */ -+ PHYS_HEAP *psPhysHeap; -+ -+ ATOMIC_T iRefCount; -+ -+ /* CPU mapping count - this is the number of times the PMR has been -+ * mapped to the CPU. It is used to determine when it is safe to permit -+ * modification of a sparse allocation's layout. -+ * Note that the process of mapping also increments iRefCount -+ * independently (as that is used to determine when a PMR may safely -+ * be destroyed). -+ */ -+ ATOMIC_T iCpuMapCount; -+ -+ /* Lock count - this is the number of times PMRLockSysPhysAddresses() -+ * has been called, less the number of PMRUnlockSysPhysAddresses() -+ * calls. This is arguably here for debug reasons only, as the refcount -+ * is already incremented as a matter of course. -+ * Really, this just allows us to trap protocol errors: i.e. calling -+ * PMRSysPhysAddr(), without a lock, or calling -+ * PMRUnlockSysPhysAddresses() too many or too few times. -+ */ -+ ATOMIC_T iLockCount; -+ -+ /* Lock for this structure */ -+ POS_LOCK hLock; -+ -+ /* Incrementing serial number to each allocation. */ -+ IMG_UINT64 uiSerialNum; -+ -+ /* For security, we only allow a PMR to be mapped if the caller knows -+ * its key. We can pseudo-randomly generate keys -+ */ -+ PMR_PASSWORD_T uiKey; -+ -+ /* Callbacks for per-flavour functions */ -+ const PMR_IMPL_FUNCTAB *psFuncTab; -+ -+ /* Data associated with the "subtype" */ -+ PMR_IMPL_PRIVDATA pvFlavourData; -+ -+ /* What kind of PMR do we have? */ -+ PMR_IMPL_TYPE eFlavour; -+ -+ /* And for pdump */ -+ const IMG_CHAR *pszPDumpDefaultMemspaceName; -+ -+ /* Allocation annotation */ -+ IMG_CHAR szAnnotation[DEVMEM_ANNOTATION_MAX_LEN]; -+ -+#if defined(PDUMP) -+ -+ IMG_HANDLE hPDumpAllocHandle; -+ -+ IMG_UINT32 uiNumPDumpBlocks; -+#endif -+ -+ /* Logical size of allocation. "logical", because a PMR can represent -+ * memory that will never physically exist. This is the amount of -+ * virtual space that the PMR would consume when it's mapped into a -+ * virtual allocation. -+ */ -+ PMR_SIZE_T uiLogicalSize; -+ -+ /* Mapping table for the allocation. -+ * PMR's can be sparse in which case not all the "logic" addresses in -+ * it are valid. We need to know which addresses are and aren't valid -+ * when mapping or reading the PMR. -+ * The mapping table translates "logical" offsets into physical offsets -+ * which is what we always pass to the PMR factory (so it doesn't have -+ * to be concerned about sparseness issues) -+ */ -+ PMR_MAPPING_TABLE *psMappingTable; -+ -+ /* Minimum Physical Contiguity Guarantee. Might be called "page size", -+ * but that would be incorrect, as page size is something meaningful -+ * only in virtual realm. This contiguity guarantee provides an -+ * inequality that can be verified/asserted/whatever to ensure that -+ * this PMR conforms to the page size requirement of the place the PMR -+ * gets mapped. (May be used to select an appropriate heap in variable -+ * page size systems) -+ * -+ * The absolutely necessary condition is this: -+ * -+ * device MMU page size <= actual physical contiguity. -+ * -+ * We go one step further in order to be able to provide an early -+ * warning / early compatibility check and say this: -+ * -+ * device MMU page size <= -+ * 2**(uiLog2ContiguityGuarantee) <= -+ * actual physical contiguity. -+ * -+ * In this way, it is possible to make the page table reservation -+ * in the device MMU without even knowing the granularity of the -+ * physical memory (i.e. useful for being able to allocate virtual -+ * before physical) -+ */ -+ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee; -+ -+ /* Flags. We store a copy of the "PMR flags" (usually a subset of the -+ * flags given at allocation time) and return them to any caller of -+ * PMR_Flags(). The intention of these flags is that the ones stored -+ * here are used to represent permissions, such that no one is able -+ * to map a PMR in a mode in which they are not allowed, e.g., -+ * writeable for a read-only PMR, etc. -+ */ -+ PMR_FLAGS_T uiFlags; -+ -+ /* Various flags informing about PMR's state: -+ * -+ * SPARSE_ALLOC: -+ * indicates whether this PMR has been allocated as sparse. -+ * The condition for this variable to be set at allocation time is: -+ * (numVirtChunks != numPhysChunks) || (numVirtChunks > 1) -+ * -+ * NO_LAYOUT_CHANGE: -+ * Flag that conveys mutability of the PMR: -+ * - set: indicates the PMR is immutable (no more memory changes) -+ * - unset: means the memory layout associated with the PMR is mutable -+ * -+ * A PMR is always mutable by default but is marked immutable on the -+ * first export for the rest of its life. -+ * -+ * Also, any PMRs that track the same memory through imports are -+ * marked immutable as well. -+ * -+ * DEFER_FREE: -+ * If present the PMR is marked to be freed by the CleanupThread. -+ * -+ * IS_ZOMBIE: -+ * Indicates if the PMR is in the zombie state (marked for free in the -+ * CleanupThread). */ -+ IMG_UINT32 uiInternalFlags; -+ -+ /* Do we really need this? -+ * For now we'll keep it, until we know we don't. -+ * NB: this is not the "memory context" in client terms - this is -+ * _purely_ the "PMR" context, of which there is almost certainly only -+ * ever one per system as a whole, but we'll keep the concept anyway, -+ * just-in-case. -+ */ -+ struct _PMR_CTX_ *psContext; -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ /* Stored handle to PMR RI entry */ -+ void *hRIHandle; -+#endif -+}; -+ -+/* Do we need a struct for the export handle? -+ * I'll use one for now, but if nothing goes in it, we'll lose it -+ */ -+struct _PMR_EXPORT_ -+{ -+ struct _PMR_ *psPMR; -+}; -+ -+struct _PMR_PAGELIST_ -+{ -+ struct _PMR_ *psReferencePMR; -+}; -+ -+#if defined(PDUMP) -+static INLINE IMG_BOOL _IsHostDevicePMR(const PMR *const psPMR) -+{ -+ const PVRSRV_DEVICE_NODE *psDevNode = PVRSRVGetPVRSRVData()->psHostMemDeviceNode; -+ return psPMR->psPhysHeap == psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]; -+} -+ -+static void -+PDumpPMRFreePMR(PMR *psPMR, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiBlockSize, -+ IMG_UINT32 uiLog2Contiguity, -+ IMG_HANDLE hPDumpAllocationInfoHandle); -+ -+static void -+PDumpPMRMallocPMR(PMR *psPMR, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32ChunkSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *puiMappingTable, -+ IMG_UINT32 uiLog2Contiguity, -+ IMG_BOOL bInitialise, -+ IMG_UINT8 ui8InitValue, -+ IMG_HANDLE *phPDumpAllocInfoOut, -+ IMG_UINT32 ui32PDumpFlags); -+ -+static void -+PDumpPMRChangeSparsePMR(PMR *psPMR, -+ IMG_UINT32 uiBlockSize, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_BOOL bInitialise, -+ IMG_UINT8 ui8InitValue, -+ IMG_HANDLE *phPDumpAllocInfoOut); -+#endif /* defined PDUMP */ -+ -+IMG_INT32 PMRGetLiveCount(void) -+{ -+ return OSAtomicRead(&_gsSingletonPMRContext.uiNumLivePMRs); -+} -+ -+PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR) -+{ -+ PPVRSRV_DEVICE_NODE psReturnedDeviceNode = NULL; -+ -+ PVR_ASSERT(psExportPMR != NULL); -+ if (psExportPMR) -+ { -+ PVR_ASSERT(psExportPMR->psPMR != NULL); -+ if (psExportPMR->psPMR) -+ { -+ PVR_ASSERT(OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0); -+ if (OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0) -+ { -+ psReturnedDeviceNode = PMR_DeviceNode(psExportPMR->psPMR); -+ } -+ } -+ } -+ -+ return psReturnedDeviceNode; -+} -+ -+static PVRSRV_ERROR -+_PMRCreate(PMR_SIZE_T uiLogicalSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, -+ PMR_FLAGS_T uiFlags, -+ PMR **ppsPMR) -+{ -+ void *pvPMRLinAddr; -+ PMR *psPMR; -+ PMR_MAPPING_TABLE *psMappingTable; -+ struct _PMR_CTX_ *psContext; -+ IMG_UINT32 i, ui32Temp = 0; -+ PVRSRV_ERROR eError; -+ IMG_BOOL bSparse = IMG_FALSE; -+ PMR_SIZE_T uiChunkSize; -+ -+ psContext = &_gsSingletonPMRContext; -+ -+ /* Do we have a sparse allocation? */ -+ if ( (ui32NumVirtChunks != ui32NumPhysChunks) || -+ (ui32NumVirtChunks > 1) ) -+ { -+ bSparse = IMG_TRUE; -+ uiChunkSize = 1ULL << uiLog2ContiguityGuarantee; -+ } -+ else -+ { -+ uiChunkSize = uiLogicalSize; -+ } -+ -+ /* Extra checks required for sparse PMRs */ -+ if (bSparse) -+ { -+ /* Check the logical size and chunk information agree with each other */ -+ if (uiLogicalSize != (uiChunkSize * ui32NumVirtChunks)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Bad mapping size (uiLogicalSize = 0x%llx, uiChunkSize = 0x%llx, ui32NumVirtChunks = %d)", -+ __func__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumVirtChunks)); -+ return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; -+ } -+ } -+ -+ pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + sizeof(IMG_UINT32) * ui32NumVirtChunks); -+ PVR_RETURN_IF_NOMEM(pvPMRLinAddr); -+ -+ psPMR = (PMR *) pvPMRLinAddr; -+ psMappingTable = IMG_OFFSET_ADDR(pvPMRLinAddr, sizeof(*psPMR)); -+ -+ /* Setup the mapping table */ -+ psMappingTable->uiChunkSize = uiChunkSize; -+ psMappingTable->ui32NumVirtChunks = ui32NumVirtChunks; -+ psMappingTable->ui32NumPhysChunks = ui32NumPhysChunks; -+ OSCachedMemSet(&psMappingTable->aui32Translation[0], 0xFF, sizeof(psMappingTable->aui32Translation[0])* -+ ui32NumVirtChunks); -+ for (i=0; iaui32Translation[ui32Temp] = ui32Temp; -+ } -+ else -+ { -+ OSFreeMem(psPMR); -+ return PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY; -+ } -+ } -+ -+ eError = OSLockCreate(&psPMR->hLock); -+ if (eError != PVRSRV_OK) -+ { -+ OSFreeMem(psPMR); -+ return eError; -+ } -+ -+ /* Setup the PMR */ -+ OSAtomicWrite(&psPMR->iRefCount, 0); -+ OSAtomicWrite(&psPMR->iCpuMapCount, 0); -+ -+ /* If allocation is not made on demand, it will be backed now and -+ * backing will not be removed until the PMR is destroyed, therefore -+ * we can initialise the iLockCount to 1 rather than 0. -+ */ -+ OSAtomicWrite(&psPMR->iLockCount, (PVRSRV_CHECK_ON_DEMAND(uiFlags) ? 0 : 1)); -+ -+ psPMR->psContext = psContext; -+ psPMR->uiLogicalSize = uiLogicalSize; -+ psPMR->uiLog2ContiguityGuarantee = uiLog2ContiguityGuarantee; -+ psPMR->uiFlags = uiFlags; -+ psPMR->psMappingTable = psMappingTable; -+ psPMR->uiInternalFlags = bSparse ? PMR_FLAG_INTERNAL_SPARSE_ALLOC : 0; -+ psPMR->szAnnotation[0] = '\0'; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ dllist_init(&psPMR->sZombieNode); -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ psPMR->hRIHandle = NULL; -+#endif -+ OSLockAcquire(psContext->hLock); -+ psPMR->uiKey = psContext->uiNextKey; -+ psPMR->uiSerialNum = psContext->uiNextSerialNum; -+ psContext->uiNextKey = (0x80200003 * psContext->uiNextKey) -+ ^ (0xf00f0081 * (uintptr_t)pvPMRLinAddr); -+ psContext->uiNextSerialNum++; -+ *ppsPMR = psPMR; -+ OSLockRelease(psContext->hLock); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: 0x%p, key:0x%016" IMG_UINT64_FMTSPECX ", numLive:%d", -+ __func__, psPMR, psPMR->uiKey, OSAtomicRead(&psPMR->psContext->uiNumLivePMRs))); -+ -+ /* Increment live PMR count */ -+ OSAtomicIncrement(&psContext->uiNumLivePMRs); -+ -+ return PVRSRV_OK; -+} -+ -+static IMG_UINT32 -+_Ref(PMR *psPMR) -+{ -+ if (OSAtomicRead(&psPMR->iRefCount) == 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "pmr.c: Ref Count == 0 PMR: @0x%p Annot: %s", -+ psPMR, -+ psPMR->szAnnotation)); -+ OSWarnOn(1); -+ } -+ return OSAtomicIncrement(&psPMR->iRefCount); -+} -+ -+static IMG_UINT32 -+_Unref(PMR *psPMR) -+{ -+ if (OSAtomicRead(&psPMR->iRefCount) <= 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "pmr.c: Unref Count <= 0 PMR: @0x%p Annot: %s RefCount: %d", -+ psPMR, -+ psPMR->szAnnotation, -+ (IMG_INT32) OSAtomicRead(&psPMR->iRefCount))); -+ OSWarnOn(1); -+ } -+ return OSAtomicDecrement(&psPMR->iRefCount); -+} -+ -+static INLINE void -+_FactoryLock(const PMR_IMPL_FUNCTAB *psFuncTable) -+{ -+ if (psFuncTable->pfnGetPMRFactoryLock != NULL) -+ { -+ psFuncTable->pfnGetPMRFactoryLock(); -+ } -+} -+ -+static INLINE void -+_FactoryUnlock(const PMR_IMPL_FUNCTAB *psFuncTable) -+{ -+ if (psFuncTable->pfnReleasePMRFactoryLock != NULL) -+ { -+ psFuncTable->pfnReleasePMRFactoryLock(); -+ } -+} -+ -+static void -+_PMRDestroy(PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ if (psPMR->psFuncTab->pfnFinalize != NULL) -+ { -+ psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData); -+ } -+ -+#if defined(PDUMP) -+ /* if allocation is done on the host node don't include it in the PDUMP */ -+ if (!_IsHostDevicePMR(psPMR)) -+ { -+ PDumpPMRFreePMR(psPMR, -+ psPMR->uiLogicalSize, -+ (1 << psPMR->uiLog2ContiguityGuarantee), -+ psPMR->uiLog2ContiguityGuarantee, -+ psPMR->hPDumpAllocHandle); -+ } -+#endif -+ -+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) -+ /* This PMR is about to be destroyed, update its mmap stats record (if present) -+ * to avoid dangling pointer. Additionally, this is required because mmap stats -+ * are identified by PMRs and a new PMR down the line "might" get the same address -+ * as the one we're about to free and we'd like 2 different entries in mmaps -+ * stats for such cases */ -+ MMapStatsRemovePMR(psPMR); -+#endif -+ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ /* If not backed on demand, iLockCount should be 1 otherwise it should be 0 */ -+ PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ /* Delete RI entry */ -+ if (psPMR->hRIHandle) -+ { -+ PVRSRV_ERROR eError = RIDeletePMREntryKM(psPMR->hRIHandle); -+ PVR_LOG_IF_ERROR(eError, "RIDeletePMREntryKM"); -+ /* continue destroying the PMR */ -+ } -+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ -+ -+ /* Decrement live PMR count. Probably only of interest for debugging */ -+ PVR_ASSERT(OSAtomicRead(&psPMR->psContext->uiNumLivePMRs) > 0); -+ OSAtomicDecrement(&psPMR->psContext->uiNumLivePMRs); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: 0x%p, key:0x%016" IMG_UINT64_FMTSPECX ", numLive:%d", -+ __func__, psPMR, psPMR->uiKey, OSAtomicRead(&psPMR->psContext->uiNumLivePMRs))); -+ -+ OSLockDestroy(psPMR->hLock); -+ OSFreeMem(psPMR); -+} -+ -+static void -+_UnrefAndMaybeDestroy(PMR *psPMR) -+{ -+ const PMR_IMPL_FUNCTAB *psFuncTable; -+ IMG_INT iRefCount; -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ PVRSRV_DEV_POWER_STATE ePowerState; -+ PVRSRV_DEVICE_NODE *psDevNode; -+ PVRSRV_ERROR eError; -+#endif -+ -+ PVR_ASSERT(psPMR != NULL); -+ -+ psFuncTable = psPMR->psFuncTab; -+ -+ _FactoryLock(psFuncTable); -+ -+ iRefCount = _Unref(psPMR); -+ -+ if (iRefCount > 0) -+ { -+ _FactoryUnlock(psFuncTable); -+ return; -+ } -+ -+#if !defined(SUPPORT_PMR_DEFERRED_FREE) -+ /* Don't defer PMR destruction in NoHW and PDUMP drivers. */ -+ _PMRDestroy(psPMR); -+#else /* !defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ psDevNode = PhysHeapDeviceNode(psPMR->psPhysHeap); -+ -+ eError = PVRSRVGetDevicePowerState(psDevNode, &ePowerState); -+ if (eError != PVRSRV_OK) -+ { -+ /* Treat unknown power state as ON. */ -+ ePowerState = PVRSRV_DEV_POWER_STATE_ON; -+ } -+ -+ /* PMRs that are not marked for deferred free can be freed right away. -+ * Those are the PMRs that are not device mappable (so only CPU -+ * readable/writeable). -+ * All PMRs that are device mappable need to go through the defer free -+ * path unless the power is OFF. If power is OFF the cache invalidation -+ * comes as a given. */ -+ if (!BITMASK_HAS(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_DEFER_FREE) || -+ ePowerState == PVRSRV_DEV_POWER_STATE_OFF || -+ psDevNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR) -+ { -+ _PMRDestroy(psPMR); -+ } -+ else -+ { -+ /* Defer freeing the PMR until the Firmware invalidates the caches. */ -+ OSLockAcquire(psDevNode->hPMRZombieListLock); -+ -+ BITMASK_SET(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_IS_ZOMBIE); -+ dllist_add_to_tail(&psDevNode->sPMRZombieList, &psPMR->sZombieNode); -+ psDevNode->uiPMRZombieCount++; -+ -+ /* PMR pages are accounted by the driver/process stats. Those stats -+ * are available on page level hence they need to be adjusted by -+ * the factories. This is done by the pfnZombify callback. -+ * Operation needs to be done while holding hPMRZombieListLock -+ * to prevent CleanupThread from freeing pages while memory stats -+ * accounting is ongoing. */ -+ if (psPMR->psFuncTab->pfnZombify != NULL) -+ { -+ PVRSRV_ERROR eError = psPMR->psFuncTab->pfnZombify(psPMR->pvFlavourData, psPMR); -+ PVR_LOG_IF_ERROR(eError, "pfnZombify"); -+ } -+ -+ OSLockRelease(psDevNode->hPMRZombieListLock); -+ } -+#endif /* !defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+ _FactoryUnlock(psFuncTable); -+} -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+typedef struct _PMR_ZOMBIE_CLEANUP_ITEM_ -+{ -+ PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn; -+ DLLIST_NODE sZombieList; -+ PPVRSRV_DEVICE_NODE psDevNode; -+ PVRSRV_CLIENT_SYNC_PRIM *psSync; -+ IMG_UINT32 uiRequiredSyncValue; -+ IMG_UINT32 uiRequiredPowerOffCounter; -+} PMR_ZOMBIE_CLEANUP_ITEM; -+ -+static INLINE void -+_ZombieListLock(PMR_ZOMBIE_CLEANUP_ITEM *psCleanupItem) -+{ -+ OSLockAcquire(psCleanupItem->psDevNode->hPMRZombieListLock); -+} -+ -+static INLINE void -+_ZombieListUnlock(PMR_ZOMBIE_CLEANUP_ITEM *psCleanupItem) -+{ -+ OSLockRelease(psCleanupItem->psDevNode->hPMRZombieListLock); -+} -+ -+static INLINE IMG_BOOL -+_CanNotFreeZombies(const PMR_ZOMBIE_CLEANUP_ITEM *psCleanupItem) -+{ -+ const PVRSRV_DEVICE_NODE *psDevNode = psCleanupItem->psDevNode; -+ -+ /* For a zombie PMR to be eligible to be freed either the GPU MMU caches -+ * need to be flushed (the Firmware updates the sync) or the GPU power needs -+ * to be off. */ -+ return !PVRSRVHasCounter32Advanced(OSReadDeviceMem32(psCleanupItem->psSync->pui32LinAddr), -+ psCleanupItem->uiRequiredSyncValue) && -+ !PVRSRVHasCounter32Advanced(psDevNode->uiPowerOffCounter, -+ psCleanupItem->uiRequiredPowerOffCounter); -+} -+ -+static PVRSRV_ERROR _PmrZombieCleanup(void *pvData) -+{ -+ PMR_ZOMBIE_CLEANUP_ITEM *psCleanupItem = pvData; -+ DLLIST_NODE *psNode; -+ -+ if (_CanNotFreeZombies(psCleanupItem)) -+ { -+ return PVRSRV_ERROR_RETRY; -+ } -+ -+ do -+ { -+ PMR *psPMR; -+ const PMR_IMPL_FUNCTAB *psFuncTable; -+ -+ /* hPMRZombieListLock will prevent removing a node while the list is -+ * processed. If the lock is already acquired by other process which -+ * intends to remove an item from the list it'll assure the list -+ * consistency. -+ * If this thread acquires the lock first it's possible that another -+ * thread might be holding PMR factory lock. */ -+ -+ _ZombieListLock(psCleanupItem); -+ psNode = dllist_get_next_node(&psCleanupItem->sZombieList); -+ _ZombieListUnlock(psCleanupItem); -+ -+ if (psNode != NULL) -+ { -+ psPMR = IMG_CONTAINER_OF(psNode, PMR, sZombieNode); -+ psFuncTable = psPMR->psFuncTab; -+ -+ _FactoryLock(psFuncTable); -+ _ZombieListLock(psCleanupItem); -+ -+ /* It is possible that the element might have been removed so -+ * we have to check if the PMR is still a zombie. */ -+ -+ if (PMR_IsZombie(psPMR)) -+ { -+ dllist_remove_node(psNode); -+ psCleanupItem->psDevNode->uiPMRZombieCountInCleanup--; -+ /* Unlock here to avoid locking dependency with the power lock. -+ * It's okay to do it here since the factory lock is the one -+ * that needs to be held during PMR destruction. */ -+ _ZombieListUnlock(psCleanupItem); -+ -+ _PMRDestroy(psPMR); -+ } -+ else -+ { -+ _ZombieListUnlock(psCleanupItem); -+ } -+ _FactoryUnlock(psFuncTable); -+ } -+ } while (psNode != NULL); -+ -+ OSFreeMem(psCleanupItem); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_BOOL PMRQueueZombiesForCleanup(PPVRSRV_DEVICE_NODE psDevNode) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PMR_ZOMBIE_CLEANUP_ITEM *psCleanupItem; -+ -+ /* Don't defer the freeing if we are currently unloading the driver -+ * or if the sync has been destroyed */ -+ if (psPVRSRVData->bUnload || psDevNode->psMMUCacheSyncPrim == NULL) -+ { -+ return IMG_FALSE; -+ } -+ -+ OSLockAcquire(psDevNode->hPMRZombieListLock); -+ -+ if (dllist_is_empty(&psDevNode->sPMRZombieList)) -+ { -+ OSLockRelease(psDevNode->hPMRZombieListLock); -+ return IMG_FALSE; -+ } -+ -+ psCleanupItem = OSAllocMem(sizeof(*psCleanupItem)); -+ if (psCleanupItem == NULL) -+ { -+ OSLockRelease(psDevNode->hPMRZombieListLock); -+ return IMG_FALSE; -+ } -+ -+ psCleanupItem->sCleanupThreadFn.pfnFree = _PmrZombieCleanup; -+ psCleanupItem->sCleanupThreadFn.pvData = psCleanupItem; -+ psCleanupItem->sCleanupThreadFn.bDependsOnHW = IMG_TRUE; -+ psCleanupItem->sCleanupThreadFn.eCleanupType = PVRSRV_CLEANUP_TYPE_PMR; -+ CLEANUP_THREAD_SET_RETRY_TIMEOUT(&psCleanupItem->sCleanupThreadFn, -+ CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); -+ -+ psCleanupItem->psDevNode = psDevNode; -+ psCleanupItem->psSync = psDevNode->psMMUCacheSyncPrim; -+ psCleanupItem->uiRequiredSyncValue = psDevNode->ui32NextMMUInvalidateUpdate; -+ psCleanupItem->uiRequiredPowerOffCounter = psDevNode->uiPowerOffCounterNext; -+ -+ /* This moves the zombie list to the cleanup item. */ -+ dllist_replace_head(&psDevNode->sPMRZombieList, &psCleanupItem->sZombieList); -+ psDevNode->uiPMRZombieCountInCleanup += psDevNode->uiPMRZombieCount; -+ psDevNode->uiPMRZombieCount = 0; -+ -+ OSLockRelease(psDevNode->hPMRZombieListLock); -+ -+ PVRSRVCleanupThreadAddWork(psDevNode, &psCleanupItem->sCleanupThreadFn); -+ -+ return IMG_TRUE; -+} -+ -+void -+PMRDequeueZombieAndRef(PMR *psPMR) -+{ -+ /* If this was on a list then it's brought back to life. */ -+ OSLockAcquire(PhysHeapDeviceNode(psPMR->psPhysHeap)->hPMRZombieListLock); -+ -+ /* Need to reference this PMR since it was about to be destroyed and its -+ * reference count must be 0 (can't use _Ref() due to the warning). */ -+ OSAtomicIncrement(&psPMR->iRefCount); -+ -+#if defined(SUPPORT_VALIDATION) || defined(DEBUG) || defined(PVR_TESTING_UTILS) -+ PVR_LOG(("%s: 0x%p, key:0x%016" IMG_UINT64_FMTSPECX ", numLive:%d", -+ __func__, psPMR, psPMR->uiKey, OSAtomicRead(&psPMR->psContext->uiNumLivePMRs))); -+#endif -+ -+ /* If we got to this point the PMR must be on a list. If it's not -+ * it should mean a race of some sort. */ -+ PVR_ASSERT(!dllist_is_empty(&psPMR->sZombieNode)); -+ -+ /* Revive the PMR (remove it from the zombie list) and therefore -+ * prevent it's destruction. */ -+ dllist_remove_node(&psPMR->sZombieNode); -+ BITMASK_UNSET(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_IS_ZOMBIE); -+ PhysHeapDeviceNode(psPMR->psPhysHeap)->uiPMRZombieCountInCleanup--; -+ -+ OSLockRelease(PhysHeapDeviceNode(psPMR->psPhysHeap)->hPMRZombieListLock); -+} -+ -+void -+PMRMarkForDeferFree(PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ BITMASK_SET(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_DEFER_FREE); -+} -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+static INLINE IMG_BOOL _PMRIsSparse(const PMR *psPMR) -+{ -+ return BITMASK_HAS(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_SPARSE_ALLOC); -+} -+ -+PVRSRV_ERROR -+PMRCreatePMR(PHYS_HEAP *psPhysHeap, -+ PMR_SIZE_T uiLogicalSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, -+ PMR_FLAGS_T uiFlags, -+ const IMG_CHAR *pszAnnotation, -+ const PMR_IMPL_FUNCTAB *psFuncTab, -+ PMR_IMPL_PRIVDATA pvPrivData, -+ PMR_IMPL_TYPE eType, -+ PMR **ppsPMRPtr, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PMR *psPMR = NULL; -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszAnnotation != NULL, "pszAnnotation"); -+ -+ eError = _PMRCreate(uiLogicalSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ pui32MappingTable, -+ uiLog2ContiguityGuarantee, -+ uiFlags, -+ &psPMR); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ psPMR->psPhysHeap = psPhysHeap; -+ psPMR->psFuncTab = psFuncTab; -+ psPMR->pszPDumpDefaultMemspaceName = PhysHeapPDumpMemspaceName(psPhysHeap); -+ psPMR->pvFlavourData = pvPrivData; -+ psPMR->eFlavour = eType; -+ OSAtomicWrite(&psPMR->iRefCount, 1); -+ -+ OSStringLCopy(psPMR->szAnnotation, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN); -+ -+#if defined(PDUMP) -+ /* if allocation was done on the host node don't include it in the PDUMP */ -+ if (!_IsHostDevicePMR(psPMR)) -+ { -+ PMR_FLAGS_T uiFlags = psPMR->uiFlags; -+ IMG_BOOL bInitialise = IMG_FALSE; -+ IMG_UINT8 ui8InitValue = 0; -+ -+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)) -+ { -+ bInitialise = IMG_TRUE; -+ } -+ else if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) -+ { -+ ui8InitValue = (IMG_UINT8)PVRSRV_POISON_ON_ALLOC_VALUE; -+ bInitialise = IMG_TRUE; -+ } -+ -+ PDumpPMRMallocPMR(psPMR, -+ uiLogicalSize, -+ 1ULL<hPDumpAllocHandle, -+ ui32PDumpFlags); -+ } -+#endif -+ -+ *ppsPMRPtr = psPMR; -+ -+ return PVRSRV_OK; -+ -+ /* Error exit paths follow */ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR, -+ IMG_UINT32 ui32NestingLevel) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psPMR != NULL); -+ -+ /* Note: taking this lock is not required to protect the PMR reference -+ * count, because the PMR reference count is atomic. Rather, taking -+ * the lock here guarantees that no caller will exit this function -+ * without the underlying physical addresses being locked. -+ */ -+ OSLockAcquireNested(psPMR->hLock, ui32NestingLevel); -+ /* We also count the locks as references, so that the PMR is not freed -+ * while someone is using a physical address. -+ * "lock" here simply means incrementing the refcount. It means the -+ * refcount is multipurpose, but that's okay. We only have to promise -+ * that physical addresses are valid after this point, and remain valid -+ * until the corresponding PMRUnlockSysPhysAddressesOSMem() -+ */ -+ _Ref(psPMR); -+ -+ /* Also count locks separately from other types of references, to -+ * allow for debug assertions -+ */ -+ -+ /* Only call callback if lockcount transitions from 0 to 1 (or 1 to 2 if not backed on demand) */ -+ if (OSAtomicIncrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 1 : 2)) -+ { -+ if (psPMR->psFuncTab->pfnLockPhysAddresses != NULL) -+ { -+ /* must always have lock and unlock in pairs! */ -+ PVR_ASSERT(psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL); -+ -+ eError = psPMR->psFuncTab->pfnLockPhysAddresses(psPMR->pvFlavourData); -+ -+ PVR_GOTO_IF_ERROR(eError, e1); -+ } -+ } -+ OSLockRelease(psPMR->hLock); -+ -+ return PVRSRV_OK; -+ -+e1: -+ OSAtomicDecrement(&psPMR->iLockCount); -+ _Unref(psPMR); -+ PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) != 0); -+ OSLockRelease(psPMR->hLock); -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR -+PMRLockSysPhysAddresses(PMR *psPMR) -+{ -+ return PMRLockSysPhysAddressesNested(psPMR, 0); -+} -+ -+PVRSRV_ERROR -+PMRUnlockSysPhysAddresses(PMR *psPMR) -+{ -+ return PMRUnlockSysPhysAddressesNested(psPMR, 2); -+} -+ -+PVRSRV_ERROR -+PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psPMR != NULL); -+ -+ /* Acquiring the lock here, as well as during the Lock operation ensures -+ * the lock count hitting zero and the unlocking of the phys addresses is -+ * an atomic operation -+ */ -+ OSLockAcquireNested(psPMR->hLock, ui32NestingLevel); -+ PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); -+ -+ if (OSAtomicDecrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)) -+ { -+ if (psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL) -+ { -+ PVR_ASSERT(psPMR->psFuncTab->pfnLockPhysAddresses != NULL); -+ -+ eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData); -+ /* must never fail */ -+ PVR_ASSERT(eError == PVRSRV_OK); -+ } -+ } -+ -+ OSLockRelease(psPMR->hLock); -+ -+ /* We also count the locks as references, so that the PMR is not -+ * freed while someone is using a physical address. -+ */ -+ _UnrefAndMaybeDestroy(psPMR); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PMRMakeLocalImportHandle(PMR *psPMR, -+ PMR **ppsPMR) -+{ -+ PMRRefPMR(psPMR); -+ *ppsPMR = psPMR; -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PMRUnmakeLocalImportHandle(PMR *psPMR) -+{ -+ PMRUnrefPMR(psPMR); -+ return PVRSRV_OK; -+} -+ -+/* -+ Note: -+ We pass back the PMR as it was passed in as a different handle type -+ (DEVMEM_MEM_IMPORT) and it allows us to change the import structure -+ type if we should need to embed any meta data in it. -+ */ -+PVRSRV_ERROR -+PMRLocalImportPMR(PMR *psPMR, -+ PMR **ppsPMR, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign) -+{ -+ _Ref(psPMR); -+ -+ /* Return the PMR */ -+ *ppsPMR = psPMR; -+ *puiSize = psPMR->uiLogicalSize; -+ *puiAlign = 1ULL << psPMR->uiLog2ContiguityGuarantee; -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PMRGetUID(PMR *psPMR, -+ IMG_UINT64 *pui64UID) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ *pui64UID = psPMR->uiSerialNum; -+ -+ return PVRSRV_OK; -+} -+ -+#if defined(SUPPORT_INSECURE_EXPORT) -+PVRSRV_ERROR -+PMRExportPMR(PMR *psPMR, -+ PMR_EXPORT **ppsPMRExportPtr, -+ PMR_SIZE_T *puiSize, -+ PMR_LOG2ALIGN_T *puiLog2Contig, -+ PMR_PASSWORD_T *puiPassword) -+{ -+ IMG_UINT64 uiPassword; -+ PMR_EXPORT *psPMRExport; -+ -+ uiPassword = psPMR->uiKey; -+ -+ psPMRExport = OSAllocMem(sizeof(*psPMRExport)); -+ PVR_RETURN_IF_NOMEM(psPMRExport); -+ -+ psPMRExport->psPMR = psPMR; -+ _Ref(psPMR); -+ /* The layout of a PMR can't change once exported -+ * to make sure the importers view of the memory is -+ * the same as exporter. */ -+ PMR_SetLayoutFixed(psPMR, IMG_TRUE); -+ -+ *ppsPMRExportPtr = psPMRExport; -+ *puiSize = psPMR->uiLogicalSize; -+ *puiLog2Contig = psPMR->uiLog2ContiguityGuarantee; -+ *puiPassword = uiPassword; -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR -+PMRUnexportPMR(PMR_EXPORT *psPMRExport) -+{ -+ PVR_ASSERT(psPMRExport != NULL); -+ PVR_ASSERT(psPMRExport->psPMR != NULL); -+ PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0); -+ -+ _UnrefAndMaybeDestroy(psPMRExport->psPMR); -+ -+ OSFreeMem(psPMRExport); -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR -+PMRImportPMR(PMR_EXPORT *psPMRExport, -+ PMR_PASSWORD_T uiPassword, -+ PMR_SIZE_T uiSize, -+ PMR_LOG2ALIGN_T uiLog2Contig, -+ PMR **ppsPMR) -+{ -+ PMR *psPMR; -+ -+ PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0); -+ -+ psPMR = psPMRExport->psPMR; -+ -+ PVR_ASSERT(PMR_IsMemLayoutFixed(psPMR)); -+ -+ if (psPMR->uiKey != uiPassword) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PMRImport: Import failed, password specified does not match the export")); -+ return PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR; -+ } -+ -+ if (psPMR->uiLogicalSize != uiSize || psPMR->uiLog2ContiguityGuarantee != uiLog2Contig) -+ { -+ return PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES; -+ } -+ -+ _Ref(psPMR); -+ -+ *ppsPMR = psPMR; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PMRUnimportPMR(PMR *psPMR) -+{ -+ _UnrefAndMaybeDestroy(psPMR); -+ -+ return PVRSRV_OK; -+} -+ -+#endif /* if defined(SUPPORT_INSECURE_EXPORT) */ -+ -+#if defined(SUPPORT_SECURE_EXPORT) -+PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR) -+{ -+ _UnrefAndMaybeDestroy(psPMR); -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR _ReleaseSecurePMR(void *psExport) -+{ -+ return PMRSecureUnexportPMR(psExport); -+} -+ -+PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDevNode, -+ PMR *psPMR, -+ IMG_SECURE_TYPE *phSecure, -+ PMR **ppsPMR, -+ CONNECTION_DATA **ppsSecureConnection) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ PVR_UNREFERENCED_PARAMETER(ppsSecureConnection); -+ -+ /* We are acquiring reference to PMR here because OSSecureExport -+ * releases bridge lock and PMR lock for a moment and we don't want PMR -+ * to be removed by other thread in the meantime. */ -+ _Ref(psPMR); -+ -+ eError = OSSecureExport("secure_pmr", -+ _ReleaseSecurePMR, -+ (void *) psPMR, -+ phSecure); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ *ppsPMR = psPMR; -+ -+ /* Mark the PMR immutable once exported -+ * This allows the importers and exporter to have -+ * the same view of the memory */ -+ PMR_SetLayoutFixed(psPMR, IMG_TRUE); -+ -+ return PVRSRV_OK; -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ _UnrefAndMaybeDestroy(psPMR); -+ return eError; -+} -+ -+PVRSRV_ERROR PMRSecureImportPMR(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_SECURE_TYPE hSecure, -+ PMR **ppsPMR, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psPMR; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ eError = OSSecureImport(hSecure, (void **) &psPMR); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ PVR_LOG_RETURN_IF_FALSE(PhysHeapDeviceNode(psPMR->psPhysHeap) == psDevNode, -+ "PMR invalid for this device", -+ PVRSRV_ERROR_PMR_NOT_PERMITTED); -+ -+ _Ref(psPMR); -+ /* The PMR should be immutable once exported -+ * This allows the importers and exporter to have -+ * the same view of the memory */ -+ PVR_ASSERT(PMR_IsMemLayoutFixed(psPMR)); -+ -+ /* Return the PMR */ -+ *ppsPMR = psPMR; -+ *puiSize = psPMR->uiLogicalSize; -+ *puiAlign = 1ull << psPMR->uiLog2ContiguityGuarantee; -+ return PVRSRV_OK; -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR) -+{ -+ _UnrefAndMaybeDestroy(psPMR); -+ return PVRSRV_OK; -+} -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+PVRSRV_ERROR -+PMRStoreRIHandle(PMR *psPMR, -+ void *hRIHandle) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ psPMR->hRIHandle = hRIHandle; -+ return PVRSRV_OK; -+} -+#endif -+ -+static PVRSRV_ERROR -+_PMRAcquireKernelMappingData(PMR *psPMR, -+ size_t uiLogicalOffset, -+ size_t uiSize, -+ void **ppvKernelAddressOut, -+ size_t *puiLengthOut, -+ IMG_HANDLE *phPrivOut, -+ IMG_BOOL bMapSparse) -+{ -+ PVRSRV_ERROR eError; -+ void *pvKernelAddress; -+ IMG_HANDLE hPriv; -+ -+ PVR_ASSERT(psPMR != NULL); -+ -+ if (_PMRIsSparse(psPMR) && !bMapSparse) -+ { -+ /* Mapping of sparse allocations must be signalled. */ -+ return PVRSRV_ERROR_PMR_NOT_PERMITTED; -+ } -+ -+ /* Acquire/Release functions must be overridden in pairs */ -+ if (psPMR->psFuncTab->pfnAcquireKernelMappingData == NULL) -+ { -+ PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData == NULL); -+ -+ /* If PMR implementation does not supply this pair of -+ * functions, it means they do not permit the PMR to be mapped -+ * into kernel memory at all -+ */ -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, e0); -+ } -+ PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL); -+ -+ eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData, -+ uiLogicalOffset, -+ uiSize, -+ &pvKernelAddress, -+ &hPriv, -+ psPMR->uiFlags); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ *ppvKernelAddressOut = pvKernelAddress; -+ if (uiSize == 0) -+ { -+ /* Zero size means map in the whole PMR ... */ -+ *puiLengthOut = (size_t)psPMR->uiLogicalSize; -+ } -+ else if (uiSize > (1 << psPMR->uiLog2ContiguityGuarantee)) -+ { -+ /* ... map in the requested pages ... */ -+ *puiLengthOut = uiSize; -+ } -+ else -+ { -+ /* ... otherwise we just map in one page */ -+ *puiLengthOut = 1 << psPMR->uiLog2ContiguityGuarantee; -+ } -+ *phPrivOut = hPriv; -+ -+ return PVRSRV_OK; -+ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR -+PMRAcquireKernelMappingData(PMR *psPMR, -+ size_t uiLogicalOffset, -+ size_t uiSize, -+ void **ppvKernelAddressOut, -+ size_t *puiLengthOut, -+ IMG_HANDLE *phPrivOut) -+{ -+ return _PMRAcquireKernelMappingData(psPMR, -+ uiLogicalOffset, -+ uiSize, -+ ppvKernelAddressOut, -+ puiLengthOut, -+ phPrivOut, -+ IMG_FALSE); -+} -+ -+PVRSRV_ERROR -+PMRAcquireSparseKernelMappingData(PMR *psPMR, -+ size_t uiLogicalOffset, -+ size_t uiSize, -+ void **ppvKernelAddressOut, -+ size_t *puiLengthOut, -+ IMG_HANDLE *phPrivOut) -+{ -+ return _PMRAcquireKernelMappingData(psPMR, -+ uiLogicalOffset, -+ uiSize, -+ ppvKernelAddressOut, -+ puiLengthOut, -+ phPrivOut, -+ IMG_TRUE); -+} -+ -+PVRSRV_ERROR -+PMRReleaseKernelMappingData(PMR *psPMR, -+ IMG_HANDLE hPriv) -+{ -+ PVR_ASSERT (psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL); -+ PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL); -+ -+ psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, -+ hPriv); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ _PMRLogicalOffsetToPhysicalOffset -+ -+ Translate between the "logical" offset which the upper levels -+ provide and the physical offset which is what the PMR -+ factories works on. -+ -+ As well as returning the physical offset we return the number of -+ bytes remaining till the next chunk and if this chunk is valid. -+ -+ For multi-page operations, upper layers communicate their -+ Log2PageSize else argument is redundant (set to zero). -+ */ -+ -+static void -+_PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_DEVMEM_OFFSET_T *puiPhysicalOffset, -+ IMG_UINT32 *pui32BytesRemain, -+ IMG_BOOL *bValid) -+{ -+ PMR_MAPPING_TABLE *psMappingTable = psPMR->psMappingTable; -+ IMG_DEVMEM_OFFSET_T uiPageSize = 1ULL << ui32Log2PageSize; -+ IMG_DEVMEM_OFFSET_T uiOffset = uiLogicalOffset; -+ IMG_UINT64 ui64ChunkIndex; -+ IMG_UINT32 ui32Remain; -+ IMG_UINT32 idx; -+ -+ /* Must be translating at least a page */ -+ PVR_ASSERT(ui32NumOfPages); -+ -+ if (psMappingTable->ui32NumPhysChunks == psMappingTable->ui32NumVirtChunks) -+ { -+ /* Fast path the common case, as logical and physical offsets are -+ equal we assume the ui32NumOfPages span is also valid */ -+ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiOffset); -+ puiPhysicalOffset[0] = uiOffset; -+ bValid[0] = IMG_TRUE; -+ -+ if (ui32NumOfPages > 1) -+ { -+ /* initial offset may not be page aligned, round down */ -+ uiOffset &= ~(uiPageSize-1); -+ for (idx=1; idx < ui32NumOfPages; idx++) -+ { -+ uiOffset += uiPageSize; -+ puiPhysicalOffset[idx] = uiOffset; -+ bValid[idx] = IMG_TRUE; -+ } -+ } -+ } -+ else -+ { -+ for (idx=0; idx < ui32NumOfPages; idx++) -+ { -+ ui64ChunkIndex = OSDivide64r64( -+ uiOffset, -+ TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize), -+ &ui32Remain); -+ -+ if (psMappingTable->aui32Translation[ui64ChunkIndex] == TRANSLATION_INVALID) -+ { -+ bValid[idx] = IMG_FALSE; -+ } -+ else -+ { -+ bValid[idx] = IMG_TRUE; -+ } -+ -+ if (idx == 0) -+ { -+ if (ui32Remain == 0) -+ { -+ /* Start of chunk so return the chunk size */ -+ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize); -+ } -+ else -+ { -+ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize - ui32Remain); -+ } -+ -+ puiPhysicalOffset[idx] = (psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize) + ui32Remain; -+ -+ /* initial offset may not be page aligned, round down */ -+ uiOffset &= ~(uiPageSize-1); -+ } -+ else -+ { -+ puiPhysicalOffset[idx] = psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize + ui32Remain; -+ } -+ uiOffset += uiPageSize; -+ } -+ } -+} -+ -+static PVRSRV_ERROR -+_PMR_ReadBytesPhysical(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (psPMR->psFuncTab->pfnReadBytes != NULL) -+ { -+ /* defer to callback if present */ -+ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ eError = psPMR->psFuncTab->pfnReadBytes(psPMR->pvFlavourData, -+ uiPhysicalOffset, -+ pcBuffer, -+ uiBufSz, -+ puiNumBytes); -+ PMRUnlockSysPhysAddresses(psPMR); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ } -+ else if (psPMR->psFuncTab->pfnAcquireKernelMappingData) -+ { -+ /* "default" handler for reading bytes */ -+ -+ IMG_HANDLE hKernelMappingHandle; -+ IMG_UINT8 *pcKernelAddress; -+ -+ eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData, -+ (size_t) uiPhysicalOffset, -+ uiBufSz, -+ (void **)&pcKernelAddress, -+ &hKernelMappingHandle, -+ psPMR->uiFlags); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ /* Use the conservative 'DeviceMemCopy' here because we can't -+ * know if this PMR will be mapped cached. -+ */ -+ -+ OSDeviceMemCopy(&pcBuffer[0], pcKernelAddress, uiBufSz); -+ *puiNumBytes = uiBufSz; -+ -+ psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, -+ hKernelMappingHandle); -+ } -+ else -+ { -+ OSPanic(); -+ PVR_LOG_GOTO_WITH_ERROR("psPMR->psFuncTab", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); -+ } -+ -+ return PVRSRV_OK; -+ -+ /* Error exit paths follow */ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ *puiNumBytes = 0; -+ return eError; -+} -+ -+PVRSRV_ERROR -+PMR_ReadBytes(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset; -+ size_t uiBytesCopied = 0; -+ -+ /* Check for integer overflow as uiLogicalOffset might come from the client */ -+ if (uiLogicalOffset + uiBufSz < uiLogicalOffset) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize) -+ { -+ uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset); -+ } -+ PVR_ASSERT(uiBufSz > 0); -+ PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize); -+ -+ /* PMR implementations can override this. If they don't, a "default" -+ * handler uses kernel virtual mappings. If the kernel can't -+ * provide a kernel virtual mapping, this function fails. -+ */ -+ PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL || -+ psPMR->psFuncTab->pfnReadBytes != NULL); -+ -+ while (uiBytesCopied != uiBufSz) -+ { -+ IMG_UINT32 ui32Remain; -+ size_t uiBytesToCopy; -+ size_t uiRead; -+ IMG_BOOL bValid; -+ -+ _PMRLogicalOffsetToPhysicalOffset(psPMR, -+ 0, -+ 1, -+ uiLogicalOffset, -+ &uiPhysicalOffset, -+ &ui32Remain, -+ &bValid); -+ /* Copy till either then end of the chunk or end -+ * of the buffer -+ */ -+ uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain); -+ -+ if (bValid) -+ { -+ /* Read the data from the PMR */ -+ eError = _PMR_ReadBytesPhysical(psPMR, -+ uiPhysicalOffset, -+ &pcBuffer[uiBytesCopied], -+ uiBytesToCopy, -+ &uiRead); -+ if ((eError != PVRSRV_OK) || (uiRead != uiBytesToCopy)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to read chunk (eError = %s, uiRead = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")", -+ __func__, -+ PVRSRVGetErrorString(eError), -+ uiRead, -+ uiBytesToCopy)); -+ /* Bail out as soon as we hit an error */ -+ break; -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Invalid phys offset at logical offset (" IMG_DEVMEM_OFFSET_FMTSPEC ") logical size (" IMG_DEVMEM_OFFSET_FMTSPEC ")", -+ __func__, -+ uiLogicalOffset, -+ psPMR->uiLogicalSize)); -+ /* Fill invalid chunks with 0 */ -+ OSCachedMemSet(&pcBuffer[uiBytesCopied], 0, uiBytesToCopy); -+ uiRead = uiBytesToCopy; -+ eError = PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR; -+ } -+ uiLogicalOffset += uiRead; -+ uiBytesCopied += uiRead; -+ } -+ -+ *puiNumBytes = uiBytesCopied; -+ return eError; -+} -+ -+static PVRSRV_ERROR -+_PMR_WriteBytesPhysical(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (psPMR->psFuncTab->pfnWriteBytes != NULL) -+ { -+ /* defer to callback if present */ -+ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ eError = psPMR->psFuncTab->pfnWriteBytes(psPMR->pvFlavourData, -+ uiPhysicalOffset, -+ pcBuffer, -+ uiBufSz, -+ puiNumBytes); -+ PMRUnlockSysPhysAddresses(psPMR); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ } -+ else if (psPMR->psFuncTab->pfnAcquireKernelMappingData) -+ { -+ /* "default" handler for reading bytes */ -+ -+ IMG_HANDLE hKernelMappingHandle; -+ IMG_UINT8 *pcKernelAddress; -+ -+ eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData, -+ (size_t) uiPhysicalOffset, -+ uiBufSz, -+ (void **)&pcKernelAddress, -+ &hKernelMappingHandle, -+ psPMR->uiFlags); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ /* Use the conservative 'DeviceMemCopy' here because we can't know -+ * if this PMR will be mapped cached. -+ */ -+ -+ OSDeviceMemCopy(pcKernelAddress, &pcBuffer[0], uiBufSz); -+ *puiNumBytes = uiBufSz; -+ -+ psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, -+ hKernelMappingHandle); -+ } -+ else -+ { -+ /* The write callback is optional as it's only required by the -+ * debug tools -+ */ -+ OSPanic(); -+ PVR_LOG_GOTO_WITH_ERROR("psPMR->psFuncTab", eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, e0); -+ } -+ -+ return PVRSRV_OK; -+ -+ /* Error exit paths follow */ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ *puiNumBytes = 0; -+ return eError; -+} -+ -+PVRSRV_ERROR -+PMR_WriteBytes(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset; -+ size_t uiBytesCopied = 0; -+ -+ /* Check for integer overflow as uiLogicalOffset might come from the client */ -+ if (uiLogicalOffset + uiBufSz < uiLogicalOffset) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize) -+ { -+ uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset); -+ } -+ PVR_ASSERT(uiBufSz > 0); -+ PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize); -+ -+ /* PMR implementations can override this. If they don't, a "default" -+ * handler uses kernel virtual mappings. If the kernel can't provide -+ * a kernel virtual mapping, this function fails. -+ */ -+ PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL || -+ psPMR->psFuncTab->pfnWriteBytes != NULL); -+ -+ while (uiBytesCopied != uiBufSz) -+ { -+ IMG_UINT32 ui32Remain; -+ size_t uiBytesToCopy; -+ size_t uiWrite; -+ IMG_BOOL bValid; -+ -+ _PMRLogicalOffsetToPhysicalOffset(psPMR, -+ 0, -+ 1, -+ uiLogicalOffset, -+ &uiPhysicalOffset, -+ &ui32Remain, -+ &bValid); -+ -+ /* Copy till either then end of the chunk or end of the buffer -+ */ -+ uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain); -+ -+ if (bValid) -+ { -+ /* Write the data to the PMR */ -+ eError = _PMR_WriteBytesPhysical(psPMR, -+ uiPhysicalOffset, -+ &pcBuffer[uiBytesCopied], -+ uiBytesToCopy, -+ &uiWrite); -+ if ((eError != PVRSRV_OK) || (uiWrite != uiBytesToCopy)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to read chunk (eError = %s, uiWrite = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")", -+ __func__, -+ PVRSRVGetErrorString(eError), -+ uiWrite, -+ uiBytesToCopy)); -+ /* Bail out as soon as we hit an error */ -+ break; -+ } -+ } -+ else -+ { -+ /* Ignore writes to invalid pages */ -+ uiWrite = uiBytesToCopy; -+ } -+ uiLogicalOffset += uiWrite; -+ uiBytesCopied += uiWrite; -+ } -+ -+ *puiNumBytes = uiBytesCopied; -+ return eError; -+} -+ -+PVRSRV_ERROR -+PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData, PVRSRV_MEMALLOCFLAGS_T uiFlags) -+{ -+ /* if writeable mapping is requested on non-writeable PMR then fail */ -+ PVR_RETURN_IF_FALSE(PVRSRV_CHECK_CPU_WRITEABLE(psPMR->uiFlags) || -+ !PVRSRV_CHECK_CPU_WRITEABLE(uiFlags), -+ PVRSRV_ERROR_PMR_NOT_PERMITTED); -+ -+ if (psPMR->psFuncTab->pfnMMap) -+ { -+ return psPMR->psFuncTab->pfnMMap(psPMR->pvFlavourData, psPMR, pOSMMapData); -+ } -+ -+ return OSMMapPMRGeneric(psPMR, pOSMMapData); -+} -+ -+void -+PMRRefPMR(PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ _Ref(psPMR); -+ -+ /* Lock phys addresses if PMR backing was allocated immediately */ -+ if (PVRSRV_CHECK_PHYS_ALLOC_NOW(psPMR->uiFlags)) -+ { -+ PMRLockSysPhysAddresses(psPMR); -+ } -+} -+ -+PVRSRV_ERROR -+PMRUnrefPMR(PMR *psPMR) -+{ -+ /* Unlock phys addresses if PMR backing was allocated immediately */ -+ if (PVRSRV_CHECK_PHYS_ALLOC_NOW(psPMR->uiFlags)) -+ { -+ PMRUnlockSysPhysAddresses(psPMR); -+ } -+ -+ _UnrefAndMaybeDestroy(psPMR); -+ return PVRSRV_OK; -+} -+ -+void -+PMRRefPMR2(PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ _Ref(psPMR); -+} -+ -+void -+PMRUnrefPMR2(PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ _UnrefAndMaybeDestroy(psPMR); -+} -+ -+PVRSRV_ERROR -+PMRUnrefUnlockPMR(PMR *psPMR) -+{ -+ PMRUnlockSysPhysAddresses(psPMR); -+ -+ _UnrefAndMaybeDestroy(psPMR); -+ -+ return PVRSRV_OK; -+} -+ -+#define PMR_CPUMAPCOUNT_MIN 0 -+#define PMR_CPUMAPCOUNT_MAX IMG_INT32_MAX -+void -+PMRCpuMapCountIncr(PMR *psPMR) -+{ -+ IMG_BOOL bSuccess; -+ -+ bSuccess = OSAtomicAddUnless(&psPMR->iCpuMapCount, 1, -+ PMR_CPUMAPCOUNT_MAX); -+ if (!bSuccess) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: iCpuMapCount for PMR: @0x%p (%s) has overflowed.", -+ __func__, -+ psPMR, -+ psPMR->szAnnotation)); -+ OSWarnOn(1); -+ } -+} -+ -+void -+PMRCpuMapCountDecr(PMR *psPMR) -+{ -+ IMG_BOOL bSuccess; -+ -+ bSuccess = OSAtomicSubtractUnless(&psPMR->iCpuMapCount, 1, -+ PMR_CPUMAPCOUNT_MIN); -+ if (!bSuccess) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: iCpuMapCount (now %d) for PMR: @0x%p (%s) has underflowed.", -+ __func__, -+ (IMG_INT32) OSAtomicRead(&psPMR->iCpuMapCount), -+ psPMR, -+ psPMR->szAnnotation)); -+ OSWarnOn(1); -+ } -+} -+ -+static IMG_BOOL -+_PMR_IsMapped(PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ return (OSAtomicRead(&psPMR->iCpuMapCount) > 0); -+} -+ -+PVRSRV_DEVICE_NODE * -+PMR_DeviceNode(const PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ return PhysHeapDeviceNode(psPMR->psPhysHeap); -+} -+ -+PMR_FLAGS_T -+PMR_Flags(const PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ return psPMR->uiFlags; -+} -+ -+IMG_BOOL -+PMR_IsSparse(const PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ return _PMRIsSparse(psPMR); -+} -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+IMG_BOOL -+PMR_IsZombie(const PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ return BITMASK_HAS(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_IS_ZOMBIE); -+} -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+/* Function that alters the mutability property -+ * of the PMR -+ * Setting it to TRUE makes sure the PMR memory layout -+ * can't be changed through future calls */ -+void -+PMR_SetLayoutFixed(PMR *psPMR, IMG_BOOL bFlag) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ if (bFlag) -+ { -+ BITMASK_SET(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE); -+ } -+ else -+ { -+ BITMASK_UNSET(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE); -+ } -+} -+ -+IMG_BOOL PMR_IsMemLayoutFixed(PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ return BITMASK_HAS(psPMR->uiInternalFlags, PMR_FLAG_INTERNAL_NO_LAYOUT_CHANGE); -+} -+ -+void -+PMR_LogicalSize(const PMR *psPMR, -+ IMG_DEVMEM_SIZE_T *puiLogicalSize) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ *puiLogicalSize = psPMR->uiLogicalSize; -+} -+ -+PVRSRV_ERROR -+PMR_PhysicalSize(const PMR *psPMR, -+ IMG_DEVMEM_SIZE_T *puiPhysicalSize) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ -+ /* iLockCount will be > 0 for any backed PMR (backed on demand or not) */ -+ if (OSAtomicRead(&psPMR->iLockCount) > 0) -+ { -+ if (_PMRIsSparse(psPMR)) -+ { -+ *puiPhysicalSize = psPMR->psMappingTable->uiChunkSize * psPMR->psMappingTable->ui32NumPhysChunks; -+ } -+ else -+ { -+ *puiPhysicalSize = psPMR->uiLogicalSize; -+ } -+ } -+ else -+ { -+ *puiPhysicalSize = 0; -+ } -+ return PVRSRV_OK; -+} -+ -+PHYS_HEAP * -+PMR_PhysHeap(const PMR *psPMR) -+{ -+ return psPMR->psPhysHeap; -+} -+ -+PVRSRV_ERROR -+PMR_IsOffsetValid(const PMR *psPMR, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_BOOL *pbValid) -+{ -+ IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_UINT32 aui32BytesRemain[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset; -+ IMG_UINT32 *pui32BytesRemain = aui32BytesRemain; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_ASSERT(psPMR != NULL); -+ PVR_ASSERT(psPMR->uiLogicalSize >= uiLogicalOffset); -+ -+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) -+ { -+ puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T)); -+ PVR_GOTO_IF_NOMEM(puiPhysicalOffset, eError, e0); -+ -+ pui32BytesRemain = OSAllocMem(ui32NumOfPages * sizeof(IMG_UINT32)); -+ PVR_GOTO_IF_NOMEM(pui32BytesRemain, eError, e0); -+ } -+ -+ _PMRLogicalOffsetToPhysicalOffset(psPMR, -+ ui32Log2PageSize, -+ ui32NumOfPages, -+ uiLogicalOffset, -+ puiPhysicalOffset, -+ pui32BytesRemain, -+ pbValid); -+ -+e0: -+ if (puiPhysicalOffset != auiPhysicalOffset && puiPhysicalOffset != NULL) -+ { -+ OSFreeMem(puiPhysicalOffset); -+ } -+ -+ if (pui32BytesRemain != aui32BytesRemain && pui32BytesRemain != NULL) -+ { -+ OSFreeMem(pui32BytesRemain); -+ } -+ -+ return eError; -+} -+ -+PMR_MAPPING_TABLE * -+PMR_GetMappingTable(const PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ return psPMR->psMappingTable; -+ -+} -+ -+IMG_UINT32 -+PMR_GetLog2Contiguity(const PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ return psPMR->uiLog2ContiguityGuarantee; -+} -+ -+IMG_UINT32 PMRGetMaxChunkCount(PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ return (PMR_MAX_SUPPORTED_SIZE >> psPMR->uiLog2ContiguityGuarantee); -+} -+ -+const IMG_CHAR * -+PMR_GetAnnotation(const PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ return psPMR->szAnnotation; -+} -+ -+PMR_IMPL_TYPE -+PMR_GetType(const PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ return psPMR->eFlavour; -+} -+ -+IMG_CHAR * -+PMR_GetTypeStr(const PMR *psPMR) -+{ -+ static IMG_CHAR *pszFlavour[] = { -+#define X(type) #type -+ PMR_IMPL_TYPES -+#undef X -+ }; -+ -+ if (psPMR->eFlavour >= PMR_TYPE_LAST) -+ { -+ return "INVALID"; -+ } -+ -+ return pszFlavour[psPMR->eFlavour]; -+} -+ -+IMG_INT32 -+PMR_GetRefCount(const PMR *psPMR) -+{ -+ PVR_ASSERT(psPMR != NULL); -+ return OSAtomicRead(&psPMR->iRefCount); -+} -+ -+#if defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) -+PVRSRV_ERROR -+PMRGetIPAPolicy(PMR *psPMR, -+ IMG_UINT8 *pui8IPAPolicy) -+{ -+ IMG_UINT32 ui32FlagsIPAPolicy; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pui8IPAPolicy != NULL, "pui8IPAPolicy"); -+ ui32FlagsIPAPolicy = PVRSRV_MEMALLOCFLAG_IPA_POLICY(psPMR->uiFlags); -+ -+ *pui8IPAPolicy = (IMG_UINT8)ui32FlagsIPAPolicy; -+ return PVRSRV_OK; -+} -+#endif /* defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) */ -+ -+#if defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) -+PVRSRV_ERROR -+PMRGetIPAInfo(PMR *psPMR, IMG_UINT32 *pui32IPAPolicy, IMG_UINT32 *pui32IPAShift, -+ IMG_UINT32 *pui32IPAMask, IMG_UINT32 *pui32IPAFlagsValue) -+{ -+ IMG_UINT32 ui32IPAFlagsPolicyValue; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pui32IPAPolicy != NULL, "pui32IPAPolicy"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pui32IPAShift != NULL, "pui32IPAShift"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pui32IPAMask != NULL, "pui32IPAMask"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pui32IPAFlagsValue != NULL, "pui32IPAFlagsValue"); -+ -+ /* Get the underlying heap-provided default IPA settings (if any) */ -+ *pui32IPAShift = PhysHeapGetIPAShift(psPMR->psPhysHeap); -+ *pui32IPAPolicy = PhysHeapGetIPAValue(psPMR->psPhysHeap); -+ *pui32IPAMask = PhysHeapGetIPAMask(psPMR->psPhysHeap); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Shift, Policy, Mask for Heap %p = %d, %d, %d", __func__, -+ psPMR->psPhysHeap, *pui32IPAShift, *pui32IPAPolicy, *pui32IPAMask)); -+ -+ /* Query the current PMR flags settings for current IPA policy */ -+ ui32IPAFlagsPolicyValue = PVRSRV_MEMALLOCFLAG_IPA_POLICY(psPMR->uiFlags); -+ *pui32IPAFlagsValue = ui32IPAFlagsPolicyValue; -+ return PVRSRV_OK; -+} -+#endif /* PVRSRV_INTERNAL_IPA_FEATURE_TESTING */ -+ -+#if defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) -+PVRSRV_ERROR -+PMRModifyIPAPolicy(PMR *psPMR, -+ IMG_UINT8 ui8NewIPAPolicy) -+{ -+ IMG_UINT64 ui64IPAFlagsPolicyValue; -+ IMG_UINT32 ui32IPAPolicyMaskValue; -+ -+ ui32IPAPolicyMaskValue = (IMG_UINT32)(PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK >> PVRSRV_MEMALLOCFLAG_IPA_POLICY_OFFSET); -+ -+ /* ui8NewIPAPolicy must be between 0 .. uiMaskValue inclusive */ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ui8NewIPAPolicy <= ui32IPAPolicyMaskValue, -+ "ui8NewIPAPolicy"); -+ -+ /* Set the appropriate policy bits in the PMR */ -+ ui64IPAFlagsPolicyValue = ((IMG_UINT64)(ui8NewIPAPolicy) << PVRSRV_MEMALLOCFLAG_IPA_POLICY_OFFSET) & -+ PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK; -+ -+ psPMR->uiFlags &= ~PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK; -+ psPMR->uiFlags |= ui64IPAFlagsPolicyValue; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: ui32IPAPolicy Mask = 0x%x, Value = 0x%x", -+ __func__, ui32IPAPolicyMaskValue, (IMG_UINT32)ui8NewIPAPolicy)); -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: uiFlags = 0x%016" IMG_UINT64_FMTSPECx ", ui64IPAFlags = 0x%016" IMG_UINT64_FMTSPECx, __func__, psPMR->uiFlags, ui64IPAFlagsPolicyValue)); -+ -+ return PVRSRV_OK; -+} -+#endif /* defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) */ -+ -+/* must have called PMRLockSysPhysAddresses() before calling this! */ -+PVRSRV_ERROR -+PMR_DevPhysAddr(const PMR *psPMR, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_DEV_PHYADDR *psDevAddrPtr, -+ IMG_BOOL *pbValid, -+ PMR_USAGE_TYPE ePMRUsage) -+{ -+ IMG_UINT32 ui32Remain; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset; -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ IMG_UINT32 ui32FlagsIPAPolicy; /* Local value for the IPA policy */ -+ IMG_UINT32 ui32IPAHeapShift; /* Phys-heap bit-shift value */ -+ IMG_UINT32 ui32IPAHeapPolicyValue; /* Phys-heap default policy value */ -+ IMG_UINT32 ui32IPAHeapClearMask; /* Phys-heap ClearMask bitmask */ -+ IMG_UINT64 ui64IPAPolicy; /* IPAPolicy value to be applied to physical address(es) */ -+ IMG_UINT64 ui64IPAClearMask; /* IPAClearMask to be applied to physical address(es) */ -+#else -+ PVR_UNREFERENCED_PARAMETER(ePMRUsage); -+#endif -+ -+ PVR_ASSERT(psPMR != NULL); -+ PVR_ASSERT(ui32NumOfPages > 0); -+ PVR_ASSERT(psPMR->psFuncTab->pfnDevPhysAddr != NULL); -+ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); -+#endif -+ -+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) -+ { -+ puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T)); -+ PVR_RETURN_IF_NOMEM(puiPhysicalOffset); -+ } -+ -+ _PMRLogicalOffsetToPhysicalOffset(psPMR, -+ ui32Log2PageSize, -+ ui32NumOfPages, -+ uiLogicalOffset, -+ puiPhysicalOffset, -+ &ui32Remain, -+ pbValid); -+ -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ /* Need to determine the values to pass into the pfnDevPhysAddr -+ * for Intermediate Physical Address settings associated with -+ * this PMR. -+ * If the ui32FlagsIPAPolicy value is non-zero, the value will be used in -+ * preference to the default value specified in the physheap config. -+ * Whichever value is used the associated physheap configuration bit shift and -+ * mask values are passed to the pfnDevPhysAddr PMR factory function to modify -+ * the returned address(es). -+ */ -+ -+ PVR_ASSERT(psPMR->psPhysHeap != NULL); -+ -+ ui32IPAHeapShift = PhysHeapGetIPAShift(psPMR->psPhysHeap); -+ ui32IPAHeapPolicyValue = PhysHeapGetIPAValue(psPMR->psPhysHeap); -+ ui32IPAHeapClearMask = PhysHeapGetIPAMask(psPMR->psPhysHeap); -+#if defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) -+ ui32FlagsIPAPolicy = PVRSRV_MEMALLOCFLAG_IPA_POLICY(psPMR->uiFlags); -+ if (ui32FlagsIPAPolicy == 0U) -+ { -+ ui32FlagsIPAPolicy = ui32IPAHeapPolicyValue; -+ } -+#else -+ ui32FlagsIPAPolicy = ui32IPAHeapPolicyValue; /* Use heap default values*/ -+#endif /* PVRSRV_INTERNAL_IPA_FEATURE_TESTING */ -+ /* To handle the 'disabling' of IPAPolicy setting for some callers we -+ * check to see if the ePMRUsage is set to DEVICE_USE. -+ * If so, we simply use the calculated shifts and policy values determined -+ * above. If disabled (ePMRUsage == CPU_USE) we pass 0 values to the PMR -+ * factory which will result in no IPA modification being made to the -+ * phys_heap physical addresses. -+ */ -+ if (unlikely(ePMRUsage == CPU_USE)) -+ { -+ ui32IPAHeapClearMask = 0U; -+ ui32FlagsIPAPolicy = 0U; -+ } -+ ui64IPAPolicy = (IMG_UINT64)ui32FlagsIPAPolicy << ui32IPAHeapShift; -+ ui64IPAClearMask = (IMG_UINT64)ui32IPAHeapClearMask << ui32IPAHeapShift; -+#endif /* PVRSRV_SUPPORT_IPA_FEATURE */ -+ -+ /* Sparse PMR may not always have the first page valid */ -+ eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData, -+ ui32Log2PageSize, -+ ui32NumOfPages, -+ puiPhysicalOffset, -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ ui64IPAPolicy, -+ ui64IPAClearMask, -+#endif -+ pbValid, -+ psDevAddrPtr); -+ PVR_GOTO_IF_ERROR(eError, FreeOffsetArray); -+ -+#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) -+ /* Currently excluded from the default build because of performance -+ * concerns. -+ * We do not need this part in all systems because the GPU has the same -+ * address view of system RAM as the CPU. -+ * Alternatively this could be implemented as part of the PMR-factories -+ * directly */ -+ if (PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_UMA || -+ PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_DMA) -+ { -+ IMG_UINT32 i; -+ IMG_DEV_PHYADDR sDevPAddrCorrected; -+ -+ /* Copy the translated addresses to the correct array */ -+ for (i = 0; i < ui32NumOfPages; i++) -+ { -+ PhysHeapCpuPAddrToDevPAddr(psPMR->psPhysHeap, -+ 1, -+ &sDevPAddrCorrected, -+ (IMG_CPU_PHYADDR *) &psDevAddrPtr[i]); -+ psDevAddrPtr[i].uiAddr = sDevPAddrCorrected.uiAddr; -+ } -+ } -+#endif -+ -+ -+FreeOffsetArray: -+ if (puiPhysicalOffset != auiPhysicalOffset) -+ { -+ OSFreeMem(puiPhysicalOffset); -+ } -+ -+ return eError; -+} -+ -+/* must have called PMRLockSysPhysAddresses() before calling this! */ -+PVRSRV_ERROR -+PMR_CpuPhysAddr(const PMR *psPMR, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_CPU_PHYADDR *psCpuAddrPtr, -+ IMG_BOOL *pbValid) -+{ -+ IMG_UINT32 idx; -+ PVRSRV_ERROR eError; -+ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_DEV_PHYADDR *psDevPAddr = asDevPAddr; -+ -+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) -+ { -+ psDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR)); -+ PVR_GOTO_IF_NOMEM(psDevPAddr, eError, e0); -+ } -+ -+ eError = PMR_DevPhysAddr(psPMR, ui32Log2PageSize, ui32NumOfPages, -+ uiLogicalOffset, psDevPAddr, pbValid, CPU_USE); -+ PVR_GOTO_IF_ERROR(eError, e1); -+ -+ if (_PMRIsSparse(psPMR)) -+ { -+ /* Loop over each page. -+ * If Dev addr valid, populate the CPU addr from the Dev addr -+ */ -+ for (idx = 0; idx < ui32NumOfPages; idx++) -+ { -+ if (pbValid[idx]) -+ { -+ PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, 1, &psCpuAddrPtr[idx], &psDevPAddr[idx]); -+ } -+ } -+ } -+ else -+ { -+ /* In this case all addrs will be valid, so we can block translate */ -+ PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, ui32NumOfPages, psCpuAddrPtr, psDevPAddr); -+ } -+ -+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) -+ { -+ OSFreeMem(psDevPAddr); -+ } -+ -+ return PVRSRV_OK; -+e1: -+ if (psDevPAddr != asDevPAddr) -+ { -+ OSFreeMem(psDevPAddr); -+ } -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_UINT32 uiSparseFlags) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (PMR_IsMemLayoutFixed(psPMR) || _PMR_IsMapped(psPMR)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: This PMR layout cannot be changed - PMR_IsMemLayoutFixed()=%c, _PMR_IsMapped()=%c", -+ __func__, -+ PMR_IsMemLayoutFixed(psPMR) ? 'Y' : 'n', -+ _PMR_IsMapped(psPMR) ? 'Y' : 'n')); -+ return PVRSRV_ERROR_PMR_NOT_PERMITTED; -+ } -+ -+ if (NULL == psPMR->psFuncTab->pfnChangeSparseMem) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: This type of sparse PMR cannot be changed.", -+ __func__)); -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+ } -+ -+ eError = psPMR->psFuncTab->pfnChangeSparseMem(psPMR->pvFlavourData, -+ psPMR, -+ ui32AllocPageCount, -+ pai32AllocIndices, -+ ui32FreePageCount, -+ pai32FreeIndices, -+ uiSparseFlags); -+ if (eError != PVRSRV_OK) -+ { -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ if (eError == PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES) -+ { -+ PVRSRVStatsUpdateOOMStat(NULL, -+ PMR_DeviceNode(psPMR), -+ PVRSRV_DEVICE_STAT_TYPE_OOM_PHYSMEM_COUNT, -+ OSGetCurrentClientProcessIDKM()); -+ } -+#endif -+ goto e0; -+ } -+ -+#if defined(PDUMP) -+ { -+ IMG_BOOL bInitialise = IMG_FALSE; -+ IMG_UINT8 ui8InitValue = 0; -+ -+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(PMR_Flags(psPMR))) -+ { -+ bInitialise = IMG_TRUE; -+ } -+ else if (PVRSRV_CHECK_POISON_ON_ALLOC(PMR_Flags(psPMR))) -+ { -+ ui8InitValue = (IMG_UINT8)PVRSRV_POISON_ON_ALLOC_VALUE; -+ bInitialise = IMG_TRUE; -+ } -+ -+ PDumpPMRChangeSparsePMR(psPMR, -+ 1 << psPMR->uiLog2ContiguityGuarantee, -+ ui32AllocPageCount, -+ pai32AllocIndices, -+ ui32FreePageCount, -+ pai32FreeIndices, -+ bInitialise, -+ ui8InitValue, -+ &psPMR->hPDumpAllocHandle); -+ } -+ -+#endif -+ -+e0: -+ return eError; -+} -+ -+ -+PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR, -+ IMG_UINT64 sCpuVAddrBase, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices) -+{ -+ PVRSRV_ERROR eError; -+ -+ if ((NULL == psPMR->psFuncTab) || -+ (NULL == psPMR->psFuncTab->pfnChangeSparseMemCPUMap)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: This type of sparse PMR cannot be changed.", -+ __func__)); -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+ } -+ -+ if (PMR_IsMemLayoutFixed(psPMR)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: This PMR layout cannot be changed", -+ __func__)); -+ return PVRSRV_ERROR_PMR_NOT_PERMITTED; -+ } -+ -+ eError = psPMR->psFuncTab->pfnChangeSparseMemCPUMap(psPMR->pvFlavourData, -+ psPMR, -+ sCpuVAddrBase, -+ ui32AllocPageCount, -+ pai32AllocIndices, -+ ui32FreePageCount, -+ pai32FreeIndices); -+ -+ return eError; -+} -+ -+ -+#if defined(PDUMP) -+ -+static PVRSRV_ERROR -+_PMR_PDumpSymbolicAddrPhysical(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset, -+ IMG_UINT32 ui32MemspaceNameLen, -+ IMG_CHAR *pszMemspaceName, -+ IMG_UINT32 ui32SymbolicAddrLen, -+ IMG_CHAR *pszSymbolicAddr, -+ IMG_DEVMEM_OFFSET_T *puiNewOffset, -+ IMG_DEVMEM_OFFSET_T *puiNextSymName) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPMR->psPhysHeap); -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+#if defined(SUPPORT_SECURITY_VALIDATION) -+ if (PVRSRV_CHECK_PHYS_HEAP(FW_CODE, psPMR->uiFlags) || -+ PVRSRV_CHECK_PHYS_HEAP(FW_PRIV_DATA, psPMR->uiFlags) || -+ PVRSRV_CHECK_PHYS_HEAP(GPU_SECURE, psPMR->uiFlags)) -+ { -+ OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC, -+ psPMR->pszPDumpDefaultMemspaceName); -+ } -+ else -+#endif -+ if (DevmemCPUCacheCoherency(psDevNode, psPMR->uiFlags) || -+ DevmemDeviceCacheCoherency(psDevNode, psPMR->uiFlags)) -+ { -+ OSSNPrintf(pszMemspaceName, -+ ui32MemspaceNameLen, -+ PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC, -+ psPMR->pszPDumpDefaultMemspaceName); -+ } -+ else -+ { -+ OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC, -+ psPMR->pszPDumpDefaultMemspaceName); -+ } -+ -+ OSSNPrintf(pszSymbolicAddr, -+ ui32SymbolicAddrLen, -+ PMR_SYMBOLICADDR_FMTSPEC, -+ PMR_IsSparse(psPMR) ? PMR_SPARSE_PREFIX : PMR_DEFAULT_PREFIX, -+ psPMR->uiSerialNum, -+ uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR), -+ psPMR->szAnnotation); -+ -+ if (pszSymbolicAddr) -+ { -+ PDumpMakeStringValid(pszSymbolicAddr, OSStringLength(pszSymbolicAddr)); -+ } -+ -+ -+ *puiNewOffset = uiPhysicalOffset & ((1 << PMR_GetLog2Contiguity(psPMR))-1); -+ *puiNextSymName = (IMG_DEVMEM_OFFSET_T) (((uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR))+1) -+ << PMR_GetLog2Contiguity(psPMR)); -+ -+ return eError; -+} -+ -+ -+PVRSRV_ERROR -+PMR_PDumpSymbolicAddr(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32MemspaceNameLen, -+ IMG_CHAR *pszMemspaceName, -+ IMG_UINT32 ui32SymbolicAddrLen, -+ IMG_CHAR *pszSymbolicAddr, -+ IMG_DEVMEM_OFFSET_T *puiNewOffset, -+ IMG_DEVMEM_OFFSET_T *puiNextSymName -+) -+{ -+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset; -+ IMG_UINT32 ui32Remain; -+ IMG_BOOL bValid; -+ -+ PVR_ASSERT(uiLogicalOffset < psPMR->uiLogicalSize); -+ -+ /* Confirm that the device node's ui32InternalID matches the bound -+ * PDump device stored* in PVRSRV_DATA. -+ */ -+ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) -+ { -+ return PVRSRV_OK; -+ } -+ -+ _PMRLogicalOffsetToPhysicalOffset(psPMR, -+ 0, -+ 1, -+ uiLogicalOffset, -+ &uiPhysicalOffset, -+ &ui32Remain, -+ &bValid); -+ -+ if (!bValid) -+ { -+ /* For sparse allocations, for a given logical address, there -+ * may not be a physical memory backing, the virtual range can -+ * still be valid. -+ */ -+ uiPhysicalOffset = uiLogicalOffset; -+ } -+ -+ return _PMR_PDumpSymbolicAddrPhysical(psPMR, -+ uiPhysicalOffset, -+ ui32MemspaceNameLen, -+ pszMemspaceName, -+ ui32SymbolicAddrLen, -+ pszSymbolicAddr, -+ puiNewOffset, -+ puiNextSymName); -+} -+ -+/*! -+ * @brief Writes a WRW command to the script2 buffer, representing a -+ * dword write to a physical allocation. Size is always -+ * sizeof(IMG_UINT32). -+ * @param psPMR - PMR object representing allocation -+ * @param uiLogicalOffset - offset -+ * @param ui32Value - value to write -+ * @param uiPDumpFlags - pdump flags -+ * @return PVRSRV_ERROR -+ */ -+PVRSRV_ERROR -+PMRPDumpLoadMemValue32(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32Value, -+ PDUMP_FLAGS_T uiPDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; -+ IMG_DEVMEM_OFFSET_T uiNextSymName; -+ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; -+ -+ /* Confirm that the device node's ui32InternalID matches the bound -+ * PDump device stored* in PVRSRV_DATA. -+ */ -+ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) -+ { -+ return PVRSRV_OK; -+ } -+ -+ PVR_ASSERT(uiLogicalOffset + sizeof(ui32Value) <= psPMR->uiLogicalSize); -+ /* Especially make sure to not cross a block boundary */ -+ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value)) -+ <= uiPMRPageSize)); -+ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* Get the symbolic address of the PMR */ -+ eError = PMR_PDumpSymbolicAddr(psPMR, -+ uiLogicalOffset, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiPDumpSymbolicOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* Write the WRW script command */ -+ eError = PDumpPMRWRW32(PMR_DeviceNode(psPMR), -+ aszMemspaceName, -+ aszSymbolicName, -+ uiPDumpSymbolicOffset, -+ ui32Value, -+ uiPDumpFlags); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ eError = PMRUnlockSysPhysAddresses(psPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+ * @brief Writes a RDW followed by a WRW command to the pdump script to perform -+ * an effective copy from memory to memory. Memory copied is of size -+ * sizeof(IMG_UINT32) -+ * -+ * @param psDstPMR - PMR object representing allocation of destination -+ * @param uiDstLogicalOffset - destination offset -+ * @param psSrcPMR - PMR object representing allocation of source -+ * @param uiSrcLogicalOffset - source offset -+ * @param pszTmpVar - pdump temporary variable used during the copy -+ * @param uiPDumpFlags - pdump flags -+ * @return PVRSRV_ERROR -+ */ -+PVRSRV_ERROR -+PMRPDumpCopyMem32(PMR *psDstPMR, -+ IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, -+ PMR *psSrcPMR, -+ IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, -+ const IMG_CHAR *pszTmpVar, -+ PDUMP_FLAGS_T uiPDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; -+ IMG_DEVMEM_OFFSET_T uiNextSymName; -+ const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee; -+ const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee; -+ -+ PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize); -+ /* Especially make sure to not cross a block boundary */ -+ PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32)) -+ <= uiSrcPMRPageSize)); -+ -+ PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize); -+ /* Especially make sure to not cross a block boundary */ -+ PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32)) -+ <= uiDstPMRPageSize)); -+ -+ eError = PMRLockSysPhysAddresses(psSrcPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* Get the symbolic address of the source PMR */ -+ eError = PMR_PDumpSymbolicAddr(psSrcPMR, -+ uiSrcLogicalOffset, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiPDumpSymbolicOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* Issue PDump read command */ -+ eError = PDumpPMRRDW32MemToInternalVar(PMR_DeviceNode(psSrcPMR), -+ pszTmpVar, -+ aszMemspaceName, -+ aszSymbolicName, -+ uiPDumpSymbolicOffset, -+ uiPDumpFlags); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ eError = PMRUnlockSysPhysAddresses(psSrcPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ -+ -+ eError = PMRLockSysPhysAddresses(psDstPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ -+ /* Get the symbolic address of the destination PMR */ -+ eError = PMR_PDumpSymbolicAddr(psDstPMR, -+ uiDstLogicalOffset, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiPDumpSymbolicOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ -+ /* Write the WRW script command */ -+ eError = PDumpPMRWRW32InternalVarToMem(PMR_DeviceNode(psDstPMR), -+ aszMemspaceName, -+ aszSymbolicName, -+ uiPDumpSymbolicOffset, -+ pszTmpVar, -+ uiPDumpFlags); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ -+ eError = PMRUnlockSysPhysAddresses(psDstPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+ * @brief Writes a WRW64 command to the script2 buffer, representing a -+ * dword write to a physical allocation. Size is always -+ * sizeof(IMG_UINT64). -+ * @param psPMR - PMR object representing allocation -+ * @param uiLogicalOffset - offset -+ * @param ui64Value - value to write -+ * @param uiPDumpFlags - pdump flags -+ * @return PVRSRV_ERROR -+ */ -+PVRSRV_ERROR -+PMRPDumpLoadMemValue64(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT64 ui64Value, -+ PDUMP_FLAGS_T uiPDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; -+ IMG_DEVMEM_OFFSET_T uiNextSymName; -+ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; -+ -+ /* Confirm that the device node's ui32InternalID matches the bound -+ * PDump device stored in PVRSRV_DATA. -+ */ -+ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) -+ { -+ return PVRSRV_OK; -+ } -+ -+ PVR_ASSERT(uiLogicalOffset + sizeof(ui64Value) <= psPMR->uiLogicalSize); -+ /* Especially make sure to not cross a block boundary */ -+ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui64Value)) -+ <= uiPMRPageSize)); -+ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* Get the symbolic address of the PMR */ -+ eError = PMR_PDumpSymbolicAddr(psPMR, -+ uiLogicalOffset, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiPDumpSymbolicOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* Write the WRW script command */ -+ eError = PDumpPMRWRW64(PMR_DeviceNode(psPMR), -+ aszMemspaceName, -+ aszSymbolicName, -+ uiPDumpSymbolicOffset, -+ ui64Value, -+ uiPDumpFlags); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ eError = PMRUnlockSysPhysAddresses(psPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+ * @brief Writes a RDW64 followed by a WRW64 command to the pdump script to -+ * perform an effective copy from memory to memory. Memory copied is of -+ * size sizeof(IMG_UINT32) -+ * -+ * @param psDstPMR - PMR object representing allocation of destination -+ * @param uiDstLogicalOffset - destination offset -+ * @param psSrcPMR - PMR object representing allocation of source -+ * @param uiSrcLogicalOffset - source offset -+ * @param pszTmpVar - pdump temporary variable used during the copy -+ * @param uiPDumpFlags - pdump flags -+ * @return PVRSRV_ERROR -+ */ -+PVRSRV_ERROR -+PMRPDumpCopyMem64(PMR *psDstPMR, -+ IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, -+ PMR *psSrcPMR, -+ IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, -+ const IMG_CHAR *pszTmpVar, -+ PDUMP_FLAGS_T uiPDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; -+ IMG_DEVMEM_OFFSET_T uiNextSymName; -+ const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee; -+ const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee; -+ -+ PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize); -+ /* Especially make sure to not cross a block boundary */ -+ PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32)) -+ <= uiSrcPMRPageSize)); -+ -+ PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize); -+ /* Especially make sure to not cross a block boundary */ -+ PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32)) -+ <= uiDstPMRPageSize)); -+ -+ eError = PMRLockSysPhysAddresses(psSrcPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* Get the symbolic address of the source PMR */ -+ eError = PMR_PDumpSymbolicAddr(psSrcPMR, -+ uiSrcLogicalOffset, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiPDumpSymbolicOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* Issue PDump read command */ -+ eError = PDumpPMRRDW64MemToInternalVar(PMR_DeviceNode(psSrcPMR), -+ pszTmpVar, -+ aszMemspaceName, -+ aszSymbolicName, -+ uiPDumpSymbolicOffset, -+ uiPDumpFlags); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ eError = PMRUnlockSysPhysAddresses(psSrcPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ -+ -+ eError = PMRLockSysPhysAddresses(psDstPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ -+ /* Get the symbolic address of the destination PMR */ -+ eError = PMR_PDumpSymbolicAddr(psDstPMR, -+ uiDstLogicalOffset, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiPDumpSymbolicOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ -+ /* Write the WRW script command */ -+ eError = PDumpPMRWRW64InternalVarToMem(PMR_DeviceNode(psDstPMR), -+ aszMemspaceName, -+ aszSymbolicName, -+ uiPDumpSymbolicOffset, -+ pszTmpVar, -+ uiPDumpFlags); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ -+ eError = PMRUnlockSysPhysAddresses(psDstPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+ * @brief PDumps the contents of the given allocation. -+ * If bZero is IMG_TRUE then the zero page in the parameter stream is used -+ * as the source of data, rather than the allocation's actual backing. -+ * @param psPMR - PMR object representing allocation -+ * @param uiLogicalOffset - Offset to write at -+ * @param uiSize - Number of bytes to write -+ * @param uiPDumpFlags - PDump flags -+ * @param bZero - Use the PDump zero page as the source -+ * @return PVRSRV_ERROR -+ */ -+PVRSRV_ERROR -+PMRPDumpLoadMem(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PDUMP_FLAGS_T uiPDumpFlags, -+ IMG_BOOL bZero) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiOutOffset; -+ IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset; -+ IMG_DEVMEM_OFFSET_T uiNextSymName = 0; -+ const IMG_CHAR *pszParamStreamFileName; -+ PDUMP_FILEOFFSET_T uiParamStreamFileOffset; -+ -+ /* required when !bZero */ -+#define PMR_MAX_PDUMP_BUFSZ (1<<21) -+ IMG_CHAR aszParamStreamFilename[PDUMP_PARAM_MAX_FILE_NAME]; -+ IMG_UINT8 *pcBuffer = NULL; -+ size_t uiBufSz; -+ IMG_BOOL bValid; -+ IMG_DEVMEM_SIZE_T uiSizeRemain = uiSize; -+ PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR); -+ -+ /* Confirm that the device node's ui32InternalID matches the bound -+ * PDump device stored* in PVRSRV_DATA. -+ */ -+ if (!PDumpIsDevicePermitted(psDevNode)) -+ { -+ return PVRSRV_OK; -+ } -+ -+ /* Forcibly initialise the name to a 'NULL' 0-length string */ -+ aszParamStreamFilename[0] = '\0'; -+ -+ PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize); -+ -+ /* Check if pdump client is connected */ -+ if (!PDumpCheckFlagsWrite(psDevNode, -+ PDUMP_FLAGS_CONTINUOUS)) -+ { -+ /* Dumping of memory in Pdump buffer will be rejected for no client connected case. -+ * So return early and save reading of data from PMR. */ -+ return PVRSRV_OK; -+ } -+ -+ /* Get the correct PDump stream file name */ -+ if (bZero) -+ { -+ PDumpCommentWithFlags(psDevNode, -+ uiPDumpFlags, -+ "Zeroing allocation (" IMG_DEVMEM_SIZE_FMTSPEC " bytes)", -+ uiSize); -+ -+ /* get the zero page information. it is constant for this function */ -+ PDumpGetParameterZeroPageInfo(&uiParamStreamFileOffset, -+ &uiBufSz, -+ &pszParamStreamFileName); -+ } -+ else -+ { -+ -+ uiBufSz = 1 << PMR_GetLog2Contiguity(psPMR); -+ PVR_ASSERT((1 << PMR_GetLog2Contiguity(psPMR)) <= PMR_MAX_PDUMP_BUFSZ); -+ -+ pcBuffer = OSAllocMem(uiBufSz); -+ -+ PVR_LOG_RETURN_IF_NOMEM(pcBuffer, "OSAllocMem"); -+ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ pszParamStreamFileName = aszParamStreamFilename; -+ } -+ -+ /* Loop over all touched symbolic addresses of the PMR and -+ * emit LDBs to load the contents. */ -+ while (uiCurrentOffset < (uiLogicalOffset + uiSize)) -+ { -+ /* Get the correct symbolic name for the current offset */ -+ eError = PMR_PDumpSymbolicAddr(psPMR, -+ uiCurrentOffset, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiOutOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ PVR_ASSERT((uiNextSymName - uiCurrentOffset) <= uiBufSz); -+ -+ PMR_IsOffsetValid(psPMR, -+ 0, -+ 1, -+ uiCurrentOffset, -+ &bValid); -+ -+ /* Either just LDB the zeros or read from the PMR and store that -+ * in the pdump stream */ -+ if (bValid) -+ { -+ size_t uiNumBytes; -+ IMG_BOOL bOk2Write = IMG_TRUE; -+ -+ if (bZero) -+ { -+ uiNumBytes = MIN(uiSizeRemain, uiNextSymName - uiCurrentOffset); -+ } -+ else -+ { -+ IMG_DEVMEM_OFFSET_T uiReadOffset; -+ uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ? -+ uiLogicalOffset + uiSize - uiCurrentOffset : -+ uiNextSymName - uiCurrentOffset); -+ -+ eError = PMR_ReadBytes(psPMR, -+ uiCurrentOffset, -+ pcBuffer, -+ uiReadOffset, -+ &uiNumBytes); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ eError = PDumpWriteParameterBlob(psDevNode, -+ pcBuffer, -+ uiNumBytes, -+ uiPDumpFlags, -+ &aszParamStreamFilename[0], -+ sizeof(aszParamStreamFilename), -+ &uiParamStreamFileOffset); -+ -+ if (eError == PVRSRV_ERROR_PDUMP_NOT_ALLOWED) -+ { -+ /* Write to parameter file prevented under the flags and -+ * current state of the driver so skip further writes. -+ */ -+ eError = PVRSRV_OK; -+ bOk2Write = IMG_FALSE; /* Do *NOT* write anything */ -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PDUMP_ERROR(psDevNode, -+ eError, "Failed to write PMR memory to parameter file"); -+ bOk2Write = IMG_FALSE; /* Do *NOT* write anything */ -+ } -+ } -+ -+ if (bOk2Write) -+ { -+ /* Emit the LDB command to the current symbolic address */ -+ eError = PDumpPMRLDB(psDevNode, -+ aszMemspaceName, -+ aszSymbolicName, -+ uiOutOffset, -+ uiNumBytes, -+ pszParamStreamFileName, -+ uiParamStreamFileOffset, -+ uiPDumpFlags); -+ } -+ uiSizeRemain = uiSizeRemain - uiNumBytes; -+ } -+ uiCurrentOffset = uiNextSymName; -+ } -+ -+ if (!bZero) -+ { -+ eError = PMRUnlockSysPhysAddresses(psPMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ OSFreeMem(pcBuffer); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+ -+ -+PVRSRV_ERROR -+PMRPDumpSaveToFile(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 uiArraySize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 uiFileOffset) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiOutOffset; -+ IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset; -+ IMG_DEVMEM_OFFSET_T uiNextSymName = 0; -+ IMG_UINT32 uiCurrentFileOffset = uiFileOffset; -+ -+ PVR_UNREFERENCED_PARAMETER(uiArraySize); -+ -+ /* Confirm that the device node's ui32InternalID matches the bound -+ * PDump device stored* in PVRSRV_DATA. -+ */ -+ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) -+ { -+ return PVRSRV_OK; -+ } -+ -+ PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize); -+ -+ while (uiCurrentOffset < (uiLogicalOffset + uiSize)) -+ { -+ IMG_DEVMEM_OFFSET_T uiReadOffset; -+ -+ eError = PMR_PDumpSymbolicAddr(psPMR, -+ uiCurrentOffset, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiOutOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ PVR_ASSERT(uiNextSymName <= psPMR->uiLogicalSize); -+ -+ uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ? -+ uiLogicalOffset + uiSize - uiCurrentOffset : -+ uiNextSymName - uiCurrentOffset); -+ -+ eError = PDumpPMRSAB(PMR_DeviceNode(psPMR), -+ aszMemspaceName, -+ aszSymbolicName, -+ uiOutOffset, -+ uiReadOffset, -+ pszFilename, -+ uiCurrentFileOffset); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ uiCurrentFileOffset += uiNextSymName - uiCurrentOffset; -+ uiCurrentOffset = uiNextSymName; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PMRPDumpPol32(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T uiPDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiPDumpOffset; -+ IMG_DEVMEM_OFFSET_T uiNextSymName; -+ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; -+ -+ /* Confirm that the device node's ui32InternalID matches the bound -+ * PDump device stored* in PVRSRV_DATA. -+ */ -+ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) -+ { -+ return PVRSRV_OK; -+ } -+ -+ /* Make sure to not cross a block boundary */ -+ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value)) -+ <= uiPMRPageSize)); -+ -+ eError = PMR_PDumpSymbolicAddr(psPMR, -+ uiLogicalOffset, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiPDumpOffset, -+ &uiNextSymName); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+#define _MEMPOLL_DELAY (1000) -+#define _MEMPOLL_COUNT (2000000000 / _MEMPOLL_DELAY) -+ -+ eError = PDumpPMRPOL(PMR_DeviceNode(psPMR), -+ aszMemspaceName, -+ aszSymbolicName, -+ uiPDumpOffset, -+ ui32Value, -+ ui32Mask, -+ eOperator, -+ _MEMPOLL_COUNT, -+ _MEMPOLL_DELAY, -+ uiPDumpFlags); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ return PVRSRV_OK; -+ -+ /* Error exit paths follow */ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR -+PMRPDumpCheck32(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T uiPDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiPDumpOffset; -+ IMG_DEVMEM_OFFSET_T uiNextSymName; -+ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; -+ -+ /* Confirm that the device node's ui32InternalID matches the bound -+ * PDump device stored* in PVRSRV_DATA. -+ */ -+ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) -+ { -+ return PVRSRV_OK; -+ } -+ -+ /* Make sure to not cross a block boundary */ -+ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value)) -+ < uiPMRPageSize)); -+ -+ eError = PMR_PDumpSymbolicAddr(psPMR, -+ uiLogicalOffset, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiPDumpOffset, -+ &uiNextSymName); -+ if (eError != PVRSRV_OK) -+ { -+ goto e0; -+ } -+ -+ eError = PDumpPMRPOL(PMR_DeviceNode(psPMR), -+ aszMemspaceName, -+ aszSymbolicName, -+ uiPDumpOffset, -+ ui32Value, -+ ui32Mask, -+ eOperator, -+ 1, -+ 1, -+ uiPDumpFlags); -+ if (eError != PVRSRV_OK) -+ { -+ goto e0; -+ } -+ -+ return PVRSRV_OK; -+ -+ /* Error exit paths follow */ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR -+PMRPDumpCBP(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiReadOffset, -+ IMG_DEVMEM_OFFSET_T uiWriteOffset, -+ IMG_DEVMEM_SIZE_T uiPacketSize, -+ IMG_DEVMEM_SIZE_T uiBufferSize) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiPDumpOffset; -+ IMG_DEVMEM_OFFSET_T uiNextSymName; -+ -+ /* Confirm that the device node's ui32InternalID matches the bound -+ * PDump device stored* in PVRSRV_DATA. -+ */ -+ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) -+ { -+ return PVRSRV_OK; -+ } -+ -+ eError = PMR_PDumpSymbolicAddr(psPMR, -+ uiReadOffset, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiPDumpOffset, -+ &uiNextSymName); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ eError = PDumpPMRCBP(PMR_DeviceNode(psPMR), -+ aszMemspaceName, -+ aszSymbolicName, -+ uiPDumpOffset, -+ uiWriteOffset, -+ uiPacketSize, -+ uiBufferSize); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ return PVRSRV_OK; -+ -+ /* Error exit paths follow */ -+e0: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+static void -+PDumpPMRChangeSparsePMR(PMR *psPMR, -+ IMG_UINT32 uiBlockSize, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_BOOL bInitialise, -+ IMG_UINT8 ui8InitValue, -+ IMG_HANDLE *phPDumpAllocInfoOut) -+{ -+ PVRSRV_ERROR eError; -+ IMG_HANDLE *phPDumpAllocInfo = (IMG_HANDLE*) psPMR->hPDumpAllocHandle; -+ -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiOffset; -+ IMG_DEVMEM_OFFSET_T uiNextSymName; -+ IMG_UINT32 i, uiIndex; -+ PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR); -+ -+ /* Remove pages from the PMR */ -+ for (i = 0; i < ui32FreePageCount; i++) -+ { -+ uiIndex = pai32FreeIndices[i]; -+ -+ eError = PDumpFree(psDevNode, -+ phPDumpAllocInfo[uiIndex]); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ phPDumpAllocInfo[uiIndex] = NULL; -+ } -+ -+ /* Add new pages to the PMR */ -+ for (i = 0; i < ui32AllocPageCount; i++) -+ { -+ uiIndex = pai32AllocIndices[i]; -+ -+ PVR_ASSERT(phPDumpAllocInfo[uiIndex] == NULL); -+ -+ eError = PMR_PDumpSymbolicAddr(psPMR, -+ uiIndex * uiBlockSize, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ eError = PDumpMalloc(psDevNode, -+ aszMemspaceName, -+ aszSymbolicName, -+ uiBlockSize, -+ uiBlockSize, -+ bInitialise, -+ ui8InitValue, -+ &phPDumpAllocInfo[uiIndex], -+ PDUMP_NONE); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ } -+ -+ /* (IMG_HANDLE) <- (IMG_HANDLE*) */ -+ *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo; -+} -+ -+static void -+PDumpPMRFreePMR(PMR *psPMR, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiBlockSize, -+ IMG_UINT32 uiLog2Contiguity, -+ IMG_HANDLE hPDumpAllocationInfoHandle) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ -+ /* (IMG_HANDLE*) <- (IMG_HANDLE) */ -+ IMG_HANDLE *ahPDumpAllocHandleArray = (IMG_HANDLE*) hPDumpAllocationInfoHandle; -+ -+ PDUMP_LOCK(PDUMP_FLAGS_NONE); -+ -+ for (i = 0; i < psPMR->uiNumPDumpBlocks; i++) -+ { -+ if (ahPDumpAllocHandleArray[i] != NULL) -+ { -+ eError = PDumpFreeUnlocked(PMR_DeviceNode(psPMR), -+ ahPDumpAllocHandleArray[i]); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ ahPDumpAllocHandleArray[i] = NULL; -+ } -+ } -+ -+ PDUMP_UNLOCK(PDUMP_FLAGS_NONE); -+ OSFreeMem(ahPDumpAllocHandleArray); -+} -+ -+static void -+PDumpPMRMallocPMR(PMR *psPMR, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32ChunkSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *puiMappingTable, -+ IMG_UINT32 uiLog2Contiguity, -+ IMG_BOOL bInitialise, -+ IMG_UINT8 ui8InitValue, -+ IMG_HANDLE *phPDumpAllocInfoOut, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_HANDLE *phPDumpAllocInfo; -+ -+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiOffset; -+ IMG_DEVMEM_OFFSET_T uiNextSymName; -+ IMG_UINT32 uiNumPhysBlocks; -+ IMG_UINT32 uiNumVirtBlocks; -+ IMG_UINT32 i, uiIndex; -+ -+ if (PMR_IsSparse(psPMR)) -+ { -+ uiNumPhysBlocks = (ui32ChunkSize * ui32NumPhysChunks) >> uiLog2Contiguity; -+ /* Make sure we did not cut off anything */ -+ PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == (ui32ChunkSize * ui32NumPhysChunks)); -+ } -+ else -+ { -+ uiNumPhysBlocks = uiSize >> uiLog2Contiguity; -+ /* Make sure we did not cut off anything */ -+ PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == uiSize); -+ } -+ -+ uiNumVirtBlocks = uiSize >> uiLog2Contiguity; -+ PVR_ASSERT(uiNumVirtBlocks << uiLog2Contiguity == uiSize); -+ -+ psPMR->uiNumPDumpBlocks = uiNumVirtBlocks; -+ -+ phPDumpAllocInfo = (IMG_HANDLE*) OSAllocZMem(uiNumVirtBlocks * sizeof(IMG_HANDLE)); -+ -+ PDUMP_LOCK(ui32PDumpFlags); -+ -+ for (i = 0; i < uiNumPhysBlocks; i++) -+ { -+ uiIndex = PMR_IsSparse(psPMR) ? puiMappingTable[i] : i; -+ -+ eError = PMR_PDumpSymbolicAddr(psPMR, -+ uiIndex * ui32ChunkSize, -+ sizeof(aszMemspaceName), -+ &aszMemspaceName[0], -+ sizeof(aszSymbolicName), -+ &aszSymbolicName[0], -+ &uiOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ eError = PDumpMallocUnlocked(PMR_DeviceNode(psPMR), -+ aszMemspaceName, -+ aszSymbolicName, -+ ui32ChunkSize, -+ ui32ChunkSize, -+ bInitialise, -+ ui8InitValue, -+ &phPDumpAllocInfo[uiIndex], -+ ui32PDumpFlags); -+ PVR_LOG_RETURN_VOID_IF_FALSE((eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE), -+ "PDumpPMRMalloc PDump capture bound to other device"); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ } -+ -+ PDUMP_UNLOCK(ui32PDumpFlags); -+ -+ /* (IMG_HANDLE) <- (IMG_HANDLE*) */ -+ *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo; -+ -+} -+#endif /* PDUMP */ -+ -+ -+void *PMRGetPrivateData(const PMR *psPMR, -+ const PMR_IMPL_FUNCTAB *psFuncTab) -+{ -+ return (psFuncTab == psPMR->psFuncTab) ? psPMR->pvFlavourData : NULL; -+} -+ -+#define PMR_PM_WORD_SIZE 4 -+ -+PVRSRV_ERROR -+PMRWritePMPageList(/* Target PMR, offset, and length */ -+ PMR *psPageListPMR, -+ IMG_DEVMEM_OFFSET_T uiTableOffset, -+ IMG_DEVMEM_SIZE_T uiTableLength, -+ /* Referenced PMR, and "page" granularity */ -+ PMR *psReferencePMR, -+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize, -+ PMR_PAGELIST **ppsPageList) -+{ -+ PVRSRV_ERROR eError; -+ IMG_DEVMEM_SIZE_T uiWordSize; -+ IMG_UINT32 uiNumPages; -+ IMG_UINT32 uiPageIndex; -+ PMR_FLAGS_T uiFlags = psPageListPMR->uiFlags; -+ PMR_PAGELIST *psPageList; -+#if defined(PDUMP) -+ IMG_CHAR aszTableEntryMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszTableEntrySymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiTableEntryPDumpOffset; -+ IMG_CHAR aszPageMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; -+ IMG_CHAR aszPageSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiPagePDumpOffset; -+ IMG_DEVMEM_OFFSET_T uiNextSymName; -+#endif -+#if !defined(NO_HARDWARE) -+ IMG_UINT32 uiPageListPageSize = 1 << psPageListPMR->uiLog2ContiguityGuarantee; -+ IMG_UINT64 uiPageListPMRPage = 0; -+ IMG_UINT64 uiPrevPageListPMRPage = 0; -+ IMG_HANDLE hPrivData = NULL; -+ void *pvKernAddr = NULL; -+ IMG_UINT32 *pui32DataPtr = NULL; -+ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_DEV_PHYADDR *pasDevAddrPtr; -+ IMG_BOOL *pbPageIsValid; -+#endif -+ -+ uiWordSize = PMR_PM_WORD_SIZE; -+ -+ /* check we're being asked to write the same number of 4-byte units as there are pages */ -+ uiNumPages = (IMG_UINT32)(psReferencePMR->uiLogicalSize >> uiLog2PageSize); -+ -+ if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psReferencePMR->uiLogicalSize) -+ { -+ /* Strictly speaking, it's possible to provoke this error in two ways: -+ (i) if it's not a whole multiple of the page size; or -+ (ii) if there are more than 4 billion pages. -+ The latter is unlikely. :) but the check is required in order to justify the cast. -+ */ -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, return_error); -+ } -+ uiWordSize = (IMG_UINT32)uiTableLength / uiNumPages; -+ if (uiNumPages * uiWordSize != uiTableLength) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, return_error); -+ } -+ -+ /* Check for integer overflow */ -+ PVR_GOTO_IF_INVALID_PARAM(uiTableOffset + uiTableLength > uiTableOffset, eError, return_error); -+ /* Check we're not being asked to write off the end of the PMR */ -+ PVR_GOTO_IF_INVALID_PARAM(uiTableOffset + uiTableLength <= psPageListPMR->uiLogicalSize, eError, return_error); -+ -+ /* the PMR into which we are writing must not be user CPU mappable: */ -+ if (PVRSRV_CHECK_CPU_READABLE(uiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(uiFlags)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Masked flags = 0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC, -+ (PMR_FLAGS_T)(uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)))); -+ PVR_DPF((PVR_DBG_ERROR, -+ "Page list PMR allows CPU mapping (0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC ")", -+ uiFlags)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS, return_error); -+ } -+ -+ if (_PMRIsSparse(psPageListPMR)) -+ { -+ PVR_LOG_GOTO_WITH_ERROR("psPageListPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, return_error); -+ } -+ -+ if (_PMRIsSparse(psReferencePMR)) -+ { -+ PVR_LOG_GOTO_WITH_ERROR("psReferencePMR", eError, PVRSRV_ERROR_INVALID_PARAMS, return_error); -+ } -+ -+ psPageList = OSAllocMem(sizeof(PMR_PAGELIST)); -+ PVR_LOG_GOTO_IF_NOMEM(psPageList, eError, return_error); -+ -+ psPageList->psReferencePMR = psReferencePMR; -+ -+ /* Need to lock down the physical addresses of the reference PMR */ -+ /* N.B. This also checks that the requested "contiguity" is achievable */ -+ eError = PMRLockSysPhysAddresses(psReferencePMR); -+ PVR_GOTO_IF_ERROR(eError, free_page_list); -+ -+#if !defined(NO_HARDWARE) -+ if (uiNumPages > PMR_MAX_TRANSLATION_STACK_ALLOC) -+ { -+ pasDevAddrPtr = OSAllocMem(uiNumPages * sizeof(IMG_DEV_PHYADDR)); -+ PVR_LOG_GOTO_IF_NOMEM(pasDevAddrPtr, eError, unlock_phys_addrs); -+ -+ pbPageIsValid = OSAllocMem(uiNumPages * sizeof(IMG_BOOL)); -+ if (pbPageIsValid == NULL) -+ { -+ /* Clean-up before exit */ -+ OSFreeMem(pasDevAddrPtr); -+ -+ PVR_LOG_GOTO_WITH_ERROR("pbPageIsValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, free_devaddr_array); -+ } -+ } -+ else -+ { -+ pasDevAddrPtr = asDevPAddr; -+ pbPageIsValid = abValid; -+ } -+ -+ eError = PMR_DevPhysAddr(psReferencePMR, uiLog2PageSize, uiNumPages, 0, -+ pasDevAddrPtr, pbPageIsValid, DEVICE_USE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", free_valid_array); -+#endif -+ -+ for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++) -+ { -+ IMG_DEVMEM_OFFSET_T uiPMROffset = uiTableOffset + (uiWordSize * uiPageIndex); -+ -+#if defined(PDUMP) -+ eError = PMR_PDumpSymbolicAddr(psPageListPMR, -+ uiPMROffset, -+ sizeof(aszTableEntryMemspaceName), -+ &aszTableEntryMemspaceName[0], -+ sizeof(aszTableEntrySymbolicName), -+ &aszTableEntrySymbolicName[0], -+ &uiTableEntryPDumpOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ eError = PMR_PDumpSymbolicAddr(psReferencePMR, -+ (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize, -+ sizeof(aszPageMemspaceName), -+ &aszPageMemspaceName[0], -+ sizeof(aszPageSymbolicName), -+ &aszPageSymbolicName[0], -+ &uiPagePDumpOffset, -+ &uiNextSymName); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ eError = PDumpWriteShiftedMaskedValue(PMR_DeviceNode(psReferencePMR), -+ /* destination */ -+ aszTableEntryMemspaceName, -+ aszTableEntrySymbolicName, -+ uiTableEntryPDumpOffset, -+ /* source */ -+ aszPageMemspaceName, -+ aszPageSymbolicName, -+ uiPagePDumpOffset, -+ /* shift right */ -+ uiLog2PageSize, -+ /* shift left */ -+ 0, -+ /* mask */ -+ 0xffffffff, -+ /* word size */ -+ uiWordSize, -+ /* flags */ -+ PDUMP_FLAGS_CONTINUOUS); -+ PVR_ASSERT(eError == PVRSRV_OK); -+#else -+ PVR_UNREFERENCED_PARAMETER(uiPMROffset); -+#endif -+ -+#if !defined(NO_HARDWARE) -+ -+ /* -+ We check for sparse PMR's at function entry, but as we can, -+ check that every page is valid -+ */ -+ PVR_ASSERT(pbPageIsValid[uiPageIndex]); -+ PVR_ASSERT(pasDevAddrPtr[uiPageIndex].uiAddr != 0); -+ PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0); -+ -+ uiPageListPMRPage = uiPMROffset >> psReferencePMR->uiLog2ContiguityGuarantee; -+ -+ if ((pui32DataPtr == NULL) || (uiPageListPMRPage != uiPrevPageListPMRPage)) -+ { -+ size_t uiMappingOffset = uiPMROffset & (~(uiPageListPageSize - 1)); -+ size_t uiMappedSize; -+ -+ /* If we already had a page list mapped, we need to unmap it... */ -+ if (pui32DataPtr != NULL) -+ { -+ PMRReleaseKernelMappingData(psPageListPMR, hPrivData); -+ } -+ -+ eError = PMRAcquireKernelMappingData(psPageListPMR, -+ uiMappingOffset, -+ uiPageListPageSize, -+ &pvKernAddr, -+ &uiMappedSize, -+ &hPrivData); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Error mapping page list PMR page (%" IMG_UINT64_FMTSPEC ") into kernel (%d)", -+ uiPageListPMRPage, eError)); -+ goto free_valid_array; -+ } -+ -+ uiPrevPageListPMRPage = uiPageListPMRPage; -+ PVR_ASSERT(uiMappedSize >= uiPageListPageSize); -+ PVR_ASSERT(pvKernAddr != NULL); -+ -+ pui32DataPtr = IMG_OFFSET_ADDR(pvKernAddr, (uiPMROffset & (uiPageListPageSize - 1))); -+ } -+ -+ PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0); -+ -+ /* Write the physical page index into the page list PMR */ -+ *pui32DataPtr++ = TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize); -+ -+ /* Last page so unmap */ -+ if (uiPageIndex == (uiNumPages - 1)) -+ { -+ PMRReleaseKernelMappingData(psPageListPMR, hPrivData); -+ } -+#endif -+ } -+ -+ OSWriteMemoryBarrier(NULL); -+ -+#if !defined(NO_HARDWARE) -+ if (pasDevAddrPtr != asDevPAddr) -+ { -+ OSFreeMem(pbPageIsValid); -+ OSFreeMem(pasDevAddrPtr); -+ } -+#endif -+ *ppsPageList = psPageList; -+ return PVRSRV_OK; -+ -+ /* Error exit paths follow */ -+#if !defined(NO_HARDWARE) -+ -+free_valid_array: -+ if (pbPageIsValid != abValid) -+ { -+ OSFreeMem(pbPageIsValid); -+ } -+ -+free_devaddr_array: -+ if (pasDevAddrPtr != asDevPAddr) -+ { -+ OSFreeMem(pasDevAddrPtr); -+ } -+ -+unlock_phys_addrs: -+ PMRUnlockSysPhysAddresses(psReferencePMR); -+#endif -+ -+free_page_list: -+ OSFreeMem(psPageList); -+ -+return_error: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+ -+PVRSRV_ERROR -+PMRUnwritePMPageList(PMR_PAGELIST *psPageList) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = PMRUnlockSysPhysAddresses(psPageList->psReferencePMR); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ OSFreeMem(psPageList); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PMRZeroingPMR(PMR *psPMR, -+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize) -+{ -+ IMG_UINT32 uiNumPages; -+ IMG_UINT32 uiPageIndex; -+ IMG_UINT32 ui32PageSize = 1 << uiLog2PageSize; -+ IMG_HANDLE hPrivData = NULL; -+ void *pvKernAddr = NULL; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ size_t uiMappedSize; -+ -+ PVR_ASSERT(psPMR); -+ -+ /* Calculate number of pages in this PMR */ -+ uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize); -+ -+ /* Verify the logical Size is a multiple or the physical page size */ -+ if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: PMR is not a multiple of %u", -+ __func__, -+ ui32PageSize)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, MultiPage_Error); -+ } -+ -+ if (_PMRIsSparse(psPMR)) -+ { -+ PVR_LOG_GOTO_WITH_ERROR("psPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, Sparse_Error); -+ } -+ -+ /* Scan through all pages of the PMR */ -+ for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++) -+ { -+ /* map the physical page (for a given PMR offset) into kernel space */ -+ eError = PMRAcquireKernelMappingData(psPMR, -+ (size_t)uiPageIndex << uiLog2PageSize, -+ ui32PageSize, -+ &pvKernAddr, -+ &uiMappedSize, -+ &hPrivData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", AcquireKernelMapping_Error); -+ -+ /* ensure the mapped page size is the same as the physical page size */ -+ if (uiMappedSize != ui32PageSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Physical Page size = 0x%08x, Size of Mapping = 0x%016" IMG_UINT64_FMTSPECx, -+ __func__, -+ ui32PageSize, -+ (IMG_UINT64)uiMappedSize)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, MappingSize_Error); -+ } -+ -+ /* Use the conservative 'DeviceMemSet' here because we can't know -+ * if this PMR will be mapped cached. -+ */ -+ OSDeviceMemSet(pvKernAddr, 0, ui32PageSize); -+ -+ /* release mapping */ -+ PMRReleaseKernelMappingData(psPMR, hPrivData); -+ -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Zeroing PMR %p done (num pages %u, page size %u)", -+ __func__, -+ psPMR, -+ uiNumPages, -+ ui32PageSize)); -+ -+ return PVRSRV_OK; -+ -+ -+ /* Error handling */ -+ -+MappingSize_Error: -+ PMRReleaseKernelMappingData(psPMR, hPrivData); -+ -+AcquireKernelMapping_Error: -+Sparse_Error: -+MultiPage_Error: -+ -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR -+PMRDumpPageList(PMR *psPMR, -+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize) -+{ -+ IMG_DEV_PHYADDR sDevAddrPtr; -+ IMG_UINT32 uiNumPages; -+ IMG_UINT32 uiPageIndex; -+ IMG_BOOL bPageIsValid; -+ IMG_UINT32 ui32Col = 16; -+ IMG_UINT32 ui32SizePerCol = 11; -+ IMG_UINT32 ui32ByteCount = 0; -+ IMG_CHAR pszBuffer[16 /* ui32Col */ * 11 /* ui32SizePerCol */ + 1]; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ /* Get number of pages */ -+ uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize); -+ -+ /* Verify the logical Size is a multiple or the physical page size */ -+ if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PMR is not a multiple of %" IMG_UINT64_FMTSPEC, -+ __func__, (IMG_UINT64) (1ULL << uiLog2PageSize))); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, MultiPage_Error); -+ } -+ -+ if (_PMRIsSparse(psPMR)) -+ { -+ PVR_LOG_GOTO_WITH_ERROR("psPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, Sparse_Error); -+ } -+ -+ PVR_LOG((" PMR %p, Number of pages %u, Log2PageSize %d", psPMR, uiNumPages, uiLog2PageSize)); -+ -+ /* Print the address of the physical pages */ -+ for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++) -+ { -+ /* Get Device physical Address */ -+ eError = PMR_DevPhysAddr(psPMR, -+ uiLog2PageSize, -+ 1, -+ (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize, -+ &sDevAddrPtr, -+ &bPageIsValid, -+ DEVICE_USE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PMR %p failed to get DevPhysAddr with error %u", -+ __func__, -+ psPMR, -+ eError)); -+ goto DevPhysAddr_Error; -+ } -+ -+ ui32ByteCount += OSSNPrintf(pszBuffer + ui32ByteCount, ui32SizePerCol + 1, "%08x ", (IMG_UINT32)(sDevAddrPtr.uiAddr >> uiLog2PageSize)); -+ PVR_ASSERT(ui32ByteCount < ui32Col * ui32SizePerCol); -+ -+ if (uiPageIndex % ui32Col == ui32Col-1) -+ { -+ PVR_LOG((" Phys Page: %s", pszBuffer)); -+ ui32ByteCount = 0; -+ } -+ } -+ if (ui32ByteCount > 0) -+ { -+ PVR_LOG((" Phys Page: %s", pszBuffer)); -+ } -+ -+ return PVRSRV_OK; -+ -+ /* Error handling */ -+DevPhysAddr_Error: -+Sparse_Error: -+MultiPage_Error: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR -+PMRInit(void) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Singleton PMR context already initialised */ -+ if (_gsSingletonPMRContext.bModuleInitialised) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); -+ } -+ -+ eError = OSLockCreate(&_gsSingletonPMRContext.hLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", out); -+ -+ _gsSingletonPMRContext.uiNextSerialNum = 1; -+ -+ _gsSingletonPMRContext.uiNextKey = 0x8300f001 * (uintptr_t)&_gsSingletonPMRContext; -+ -+ _gsSingletonPMRContext.bModuleInitialised = IMG_TRUE; -+ -+ OSAtomicWrite(&_gsSingletonPMRContext.uiNumLivePMRs, 0); -+ -+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) -+ eError = MMapStatsInit(); -+ PVR_LOG_GOTO_IF_ERROR(eError, "MMapStatsInit", destroy_context_lock); -+#endif -+ -+ return PVRSRV_OK; -+ -+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) -+destroy_context_lock: -+ OSLockDestroy(_gsSingletonPMRContext.hLock); -+ _gsSingletonPMRContext.hLock = NULL; -+#endif -+out: -+ return eError; -+} -+ -+PVRSRV_ERROR -+PMRDeInit(void) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) -+ { -+ goto out; -+ } -+ -+ /* Singleton PMR context is not initialised */ -+ if (!_gsSingletonPMRContext.bModuleInitialised) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); -+ } -+ -+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) -+ MMapStatsDeInit(); -+#endif -+ -+ if (OSAtomicRead(&_gsSingletonPMRContext.uiNumLivePMRs) != 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Error: %d live PMRs remain", -+ __func__, -+ OSAtomicRead(&_gsSingletonPMRContext.uiNumLivePMRs))); -+ PVR_DPF((PVR_DBG_ERROR, "%s: This is an unrecoverable error; a subsequent crash is inevitable", -+ __func__)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); -+ } -+ -+ if (_gsSingletonPMRContext.hLock != NULL) -+ { -+ OSLockDestroy(_gsSingletonPMRContext.hLock); -+ } -+ -+ _gsSingletonPMRContext.bModuleInitialised = IMG_FALSE; -+ -+out: -+ PVR_ASSERT(eError == PVRSRV_OK); -+ return eError; -+} -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+PVRSRV_ERROR -+PMRInitDevice(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = OSLockCreate(&psDeviceNode->hPMRZombieListLock); -+ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); -+ -+ dllist_init(&psDeviceNode->sPMRZombieList); -+ psDeviceNode->uiPMRZombieCount = 0; -+ psDeviceNode->uiPMRZombieCountInCleanup = 0; -+ -+ return PVRSRV_OK; -+} -+ -+void -+PMRFreeZombies(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ DECLARE_DLLIST(sZombieList); -+ DLLIST_NODE *psThis, *psNext; -+ IMG_INT32 uiZombieCount; -+ -+ OSLockAcquire(psDeviceNode->hPMRZombieListLock); -+ /* Move the zombie list to a local copy. The original list will become -+ * an empty list. This will allow us to process the list without holding -+ * the list lock. */ -+ dllist_replace_head(&psDeviceNode->sPMRZombieList, &sZombieList); -+ uiZombieCount = psDeviceNode->uiPMRZombieCount; -+ psDeviceNode->uiPMRZombieCount = 0; -+ OSLockRelease(psDeviceNode->hPMRZombieListLock); -+ -+ dllist_foreach_node(&sZombieList, psThis, psNext) -+ { -+ PMR *psPMR = IMG_CONTAINER_OF(psThis, PMR, sZombieNode); -+ -+ dllist_remove_node(&psPMR->sZombieNode); -+ -+ _PMRDestroy(psPMR); -+ -+ uiZombieCount--; -+ } -+ -+ PVR_ASSERT(uiZombieCount == 0); -+} -+ -+void -+PMRDumpZombies(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ DLLIST_NODE *psThis, *psNext; -+ -+ OSLockAcquire(psDeviceNode->hPMRZombieListLock); -+ -+ PVR_DPF((PVR_DBG_ERROR, "Items in zombie list: %u", -+ psDeviceNode->uiPMRZombieCount)); -+ -+ dllist_foreach_node(&psDeviceNode->sPMRZombieList, psThis, psNext) -+ { -+ PMR *psPMR = IMG_CONTAINER_OF(psThis, PMR, sZombieNode); -+ -+ PVR_DPF((PVR_DBG_ERROR, "PMR = %px, Flavour = %s, Annotation: %s", -+ psPMR, PMR_GetTypeStr(psPMR), PMR_GetAnnotation(psPMR))); -+ } -+ -+ OSLockRelease(psDeviceNode->hPMRZombieListLock); -+} -+ -+void -+PMRDeInitDevice(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ PMRFreeZombies(psDeviceNode); -+ -+ OSLockDestroy(psDeviceNode->hPMRZombieListLock); -+} -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -diff --git a/drivers/gpu/drm/img-rogue/pmr.h b/drivers/gpu/drm/img-rogue/pmr.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pmr.h -@@ -0,0 +1,1137 @@ -+/*************************************************************************/ /*! -+@File -+@Title Physmem (PMR) abstraction -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of the memory management. This module is responsible for -+ the "PMR" abstraction. A PMR (Physical Memory Resource) -+ represents some unit of physical memory which is -+ allocated/freed/mapped/unmapped as an indivisible unit -+ (higher software levels provide an abstraction above that -+ to deal with dividing this down into smaller manageable units). -+ Importantly, this module knows nothing of virtual memory, or -+ of MMUs etc., with one excusable exception. We have the -+ concept of a "page size", which really means nothing in -+ physical memory, but represents a "contiguity quantum" such -+ that the higher level modules which map this memory are able -+ to verify that it matches the needs of the page size for the -+ virtual realm into which it is being mapped. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SRVSRV_PMR_H -+#define SRVSRV_PMR_H -+ -+/* include/ */ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pdumpdefs.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memallocflags.h" -+#include "devicemem_typedefs.h" /* Required for export DEVMEM_EXPORTCOOKIE */ -+ -+/* services/include */ -+#include "pdump.h" -+#include "physheap.h" -+ -+/* services/server/include/ */ -+#include "pmr_impl.h" -+#include "opaque_types.h" -+ -+#define PMR_MAX_TRANSLATION_STACK_ALLOC (32) -+ -+/* Maximum size PMR can have is 8G of memory */ -+#define PMR_MAX_SUPPORTED_SIZE (0x200000000ULL) -+/* Max number of pages in a PMR at 4k page size */ -+#define PMR_MAX_SUPPORTED_4K_PAGE_COUNT (PMR_MAX_SUPPORTED_SIZE >> 12ULL) -+ -+typedef IMG_UINT64 PMR_BASE_T; -+typedef IMG_UINT64 PMR_SIZE_T; -+#define PMR_SIZE_FMTSPEC "0x%010"IMG_UINT64_FMTSPECX -+#define PMR_VALUE32_FMTSPEC "0x%08X" -+#define PMR_VALUE64_FMTSPEC "0x%016"IMG_UINT64_FMTSPECX -+typedef IMG_UINT32 PMR_LOG2ALIGN_T; -+typedef IMG_UINT64 PMR_PASSWORD_T; -+ -+struct _PMR_MAPPING_TABLE_ -+{ -+ PMR_SIZE_T uiChunkSize; /*!< Size of a "chunk" */ -+ IMG_UINT32 ui32NumPhysChunks; /*!< Number of physical chunks that are valid */ -+ IMG_UINT32 ui32NumVirtChunks; /*!< Number of virtual chunks in the mapping */ -+ /* Must be last */ -+ IMG_UINT32 aui32Translation[1]; /*!< Translation mapping for "logical" to physical */ -+}; -+ -+#define TRANSLATION_INVALID 0xFFFFFFFFUL -+ -+typedef struct _PMR_EXPORT_ PMR_EXPORT; -+ -+typedef struct _PMR_PAGELIST_ PMR_PAGELIST; -+ -+IMG_INT32 PMRGetLiveCount(void); -+ -+/* -+ * PMRValidateSize -+ * -+ * Given a size value, check the value against the max supported -+ * PMR size of 1GB. Return IMG_FALSE if size exceeds max, IMG_TRUE -+ * otherwise. -+ */ -+static inline IMG_BOOL PMRValidateSize(IMG_UINT64 uiSize) -+{ -+ return (uiSize > PMR_MAX_SUPPORTED_SIZE) ? IMG_FALSE : IMG_TRUE; -+} -+ -+/* -+ * PMRCreatePMR -+ * -+ * Not to be called directly, only via implementations of PMR -+ * factories, e.g. in physmem_osmem.c, deviceclass.c, etc. -+ * -+ * Creates a PMR object, with callbacks and private data as per the -+ * FuncTab/PrivData args. -+ * -+ * Note that at creation time the PMR must set in stone the "logical -+ * size" and the "contiguity guarantee" -+ * -+ * Flags are also set at this time. (T.B.D. flags also immutable for -+ * the life of the PMR?) -+ * -+ * Logical size is the amount of Virtual space this allocation would -+ * take up when mapped. Note that this does not have to be the same -+ * as the actual physical size of the memory. For example, consider -+ * the sparsely allocated non-power-of-2 texture case. In this -+ * instance, the "logical size" would be the virtual size of the -+ * rounded-up power-of-2 texture. That some pages of physical memory -+ * may not exist does not affect the logical size calculation. -+ * -+ * The PMR must also supply the "contiguity guarantee" which is the -+ * finest granularity of alignment and size of physical pages that the -+ * PMR will provide after LockSysPhysAddresses is called. Note that -+ * the calling code may choose to call PMRSysPhysAddr with a finer -+ * granularity than this, for example if it were to map into a device -+ * MMU with a smaller page size, and it's also OK for the PMR to -+ * supply physical memory in larger chunks than this. But -+ * importantly, never the other way around. -+ * -+ * More precisely, the following inequality must be maintained -+ * whenever mappings and/or physical addresses exist: -+ * -+ * (device MMU page size) <= 2**(uiLog2ContiguityGuarantee) <= (actual contiguity of physical memory) -+ * -+ * The function table will contain the following callbacks which may -+ * be overridden by the PMR implementation: -+ * -+ * pfnLockPhysAddresses -+ * -+ * Called when someone locks requests that Physical pages are to -+ * be locked down via the PMRLockSysPhysAddresses() API. Note -+ * that if physical pages are prefaulted at PMR creation time and -+ * therefore static, it would not be necessary to override this -+ * function, in which case NULL may be supplied. -+ * -+ * pfnUnlockPhysAddresses -+ * -+ * The reverse of pfnLockPhysAddresses. Note that this should be -+ * NULL if and only if pfnLockPhysAddresses is NULL -+ * -+ * pfnSysPhysAddr -+ * -+ * This function is mandatory. This is the one which returns the -+ * system physical address for a given offset into this PMR. The -+ * "lock" function will have been called, if overridden, before -+ * this function, thus the implementation should not increase any -+ * refcount when answering this call. Refcounting, if necessary, -+ * should be done in the lock/unlock calls. Refcounting would -+ * not be necessary in the prefaulted/static scenario, as the -+ * pmr.c abstraction will handle the refcounting for the whole -+ * PMR. -+ * -+ * pfnFinalize -+ * -+ * Called when the PMR's refcount reaches zero and it gets -+ * destroyed. This allows the implementation to free up any -+ * resource acquired during creation time. -+ * -+ */ -+PVRSRV_ERROR -+PMRCreatePMR(PHYS_HEAP *psPhysHeap, -+ PMR_SIZE_T uiLogicalSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, -+ PMR_FLAGS_T uiFlags, -+ const IMG_CHAR *pszAnnotation, -+ const PMR_IMPL_FUNCTAB *psFuncTab, -+ PMR_IMPL_PRIVDATA pvPrivData, -+ PMR_IMPL_TYPE eType, -+ PMR **ppsPMRPtr, -+ IMG_UINT32 ui32PDumpFlags); -+ -+/* -+ * PMRLockSysPhysAddresses() -+ * -+ * Calls the relevant callback to lock down the system physical addresses of -+ * the memory that makes up the whole PMR. -+ * -+ * Before this call, it is not valid to use any of the information -+ * getting APIs: PMR_Flags(), PMR_SysPhysAddr(), -+ * [ see note below about lock/unlock semantics ] -+ * -+ * The caller of this function does not have to care about how the PMR -+ * is implemented. He only has to know that he is allowed access to -+ * the physical addresses _after_ calling this function and _until_ -+ * calling PMRUnlockSysPhysAddresses(). -+ * -+ * -+ * Notes to callback implementers (authors of PMR Factories): -+ * -+ * Some PMR implementations will be such that the physical memory exists for -+ * the lifetime of the PMR, with a static address, (and normally flags and -+ * symbolic address are static too) and so it is legal for a PMR -+ * implementation to not provide an implementation for the lock callback. -+ * -+ * Some PMR implementation may wish to page memory in from secondary storage -+ * on demand. The lock/unlock callbacks _may_ be the place to do this. -+ * (More likely, there would be a separate API for doing this, but this API -+ * provides a useful place to assert that it has been done) -+ */ -+ -+PVRSRV_ERROR -+PMRLockSysPhysAddresses(PMR *psPMR); -+ -+PVRSRV_ERROR -+PMRLockSysPhysAddressesNested(PMR *psPMR, -+ IMG_UINT32 ui32NestingLevel); -+ -+/* -+ * PMRUnlockSysPhysAddresses() -+ * -+ * the reverse of PMRLockSysPhysAddresses() -+ */ -+PVRSRV_ERROR -+PMRUnlockSysPhysAddresses(PMR *psPMR); -+ -+PVRSRV_ERROR -+PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel); -+ -+/* -+ * PhysmemPMRExport() -+ * -+ * Given a PMR, creates a PMR "Export", which is a handle that -+ * provides sufficient data to be able to "import" this PMR elsewhere. -+ * The PMR Export is an object in its own right, whose existence -+ * implies a reference on the PMR, thus the PMR cannot be destroyed -+ * while the PMR Export exists. The intention is that the PMR Export -+ * will be wrapped in the devicemem layer by a cross process handle, -+ * and some IPC by which to communicate the handle value and password -+ * to other processes. The receiving process is able to unwrap this -+ * to gain access to the same PMR Export in this layer, and, via -+ * PhysmemPMRImport(), obtain a reference to the original PMR. -+ * -+ * The caller receives, along with the PMR Export object, information -+ * about the size and contiguity guarantee for the PMR, and also the -+ * PMRs secret password, in order to authenticate the subsequent -+ * import. -+ * -+ * N.B. If you call PMRExportPMR() (and it succeeds), you are -+ * promising to later call PMRUnexportPMR() -+ */ -+PVRSRV_ERROR -+PMRExportPMR(PMR *psPMR, -+ PMR_EXPORT **ppsPMRExport, -+ PMR_SIZE_T *puiSize, -+ PMR_LOG2ALIGN_T *puiLog2Contig, -+ PMR_PASSWORD_T *puiPassword); -+ -+/*! -+******************************************************************************* -+ -+ @Function PMRMakeLocalImportHandle -+ -+ @Description -+ -+ Transform a general handle type into one that we are able to import. -+ Takes a PMR reference. -+ -+ @Input psPMR The input PMR. -+ @Output ppsPMR The output PMR that is going to be transformed to the -+ correct handle type. -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR -+PMRMakeLocalImportHandle(PMR *psPMR, -+ PMR **ppsPMR); -+ -+/*! -+******************************************************************************* -+ -+ @Function PMRUnmakeLocalImportHandle -+ -+ @Description -+ -+ Take a PMR, destroy the handle and release a reference. -+ Counterpart to PMRMakeServerExportClientExport(). -+ -+ @Input psPMR PMR to destroy. -+ Created by PMRMakeLocalImportHandle(). -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR -+PMRUnmakeLocalImportHandle(PMR *psPMR); -+ -+/* -+ * PMRUnexportPMR() -+ * -+ * The reverse of PMRExportPMR(). This causes the PMR to no longer be -+ * exported. If the PMR has already been imported, the imported PMR -+ * reference will still be valid, but no further imports will be possible. -+ */ -+PVRSRV_ERROR -+PMRUnexportPMR(PMR_EXPORT *psPMRExport); -+ -+/* -+ * PMRImportPMR() -+ * -+ * Takes a PMR Export object, as obtained by PMRExportPMR(), and -+ * obtains a reference to the original PMR. -+ * -+ * The password must match, and is assumed to have been (by whatever -+ * means, IPC etc.) preserved intact from the former call to -+ * PMRExportPMR() -+ * -+ * The size and contiguity arguments are entirely irrelevant for the -+ * import, however they are verified in order to trap bugs. -+ * -+ * N.B. If you call PhysmemPMRImport() (and it succeeds), you are -+ * promising to later call PhysmemPMRUnimport() -+ */ -+PVRSRV_ERROR -+PMRImportPMR(PMR_EXPORT *psPMRExport, -+ PMR_PASSWORD_T uiPassword, -+ PMR_SIZE_T uiSize, -+ PMR_LOG2ALIGN_T uiLog2Contig, -+ PMR **ppsPMR); -+ -+/* Function that alters the mutability property -+ * of the PMR -+ * Setting it to TRUE makes sure the PMR memory layout -+ * can't be changed through future calls */ -+void -+PMR_SetLayoutFixed(PMR *psPMR, IMG_BOOL bFlag); -+ -+IMG_BOOL PMR_IsMemLayoutFixed(PMR *psPMR); -+ -+/* -+ * PMRUnimportPMR() -+ * -+ * releases the reference on the PMR as obtained by PMRImportPMR() -+ */ -+PVRSRV_ERROR -+PMRUnimportPMR(PMR *psPMR); -+ -+PVRSRV_ERROR -+PMRLocalImportPMR(PMR *psPMR, -+ PMR **ppsPMR, -+ IMG_DEVMEM_SIZE_T *puiSize, -+ IMG_DEVMEM_ALIGN_T *puiAlign); -+ -+/* -+ * Equivalent mapping functions when in kernel mode. -+ */ -+PVRSRV_ERROR -+PMRAcquireKernelMappingData(PMR *psPMR, -+ size_t uiLogicalOffset, -+ size_t uiSize, -+ void **ppvKernelAddressOut, -+ size_t *puiLengthOut, -+ IMG_HANDLE *phPrivOut); -+ -+PVRSRV_ERROR -+PMRAcquireSparseKernelMappingData(PMR *psPMR, -+ size_t uiLogicalOffset, -+ size_t uiSize, -+ void **ppvKernelAddressOut, -+ size_t *puiLengthOut, -+ IMG_HANDLE *phPrivOut); -+ -+PVRSRV_ERROR -+PMRReleaseKernelMappingData(PMR *psPMR, -+ IMG_HANDLE hPriv); -+ -+/* -+ * PMR_ReadBytes() -+ * -+ * calls into the PMR implementation to read up to uiBufSz bytes, -+ * returning the actual number read in *puiNumBytes -+ * -+ * this will read up to the end of the PMR, or the next symbolic name -+ * boundary, or until the requested number of bytes is read, whichever -+ * comes first -+ * -+ * In the case of sparse PMR's the caller doesn't know what offsets are -+ * valid and which ones aren't so we will just write 0 to invalid offsets -+ */ -+PVRSRV_ERROR -+PMR_ReadBytes(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes); -+ -+/* -+ * PMR_WriteBytes() -+ * -+ * calls into the PMR implementation to write up to uiBufSz bytes, -+ * returning the actual number read in *puiNumBytes -+ * -+ * this will write up to the end of the PMR, or the next symbolic name -+ * boundary, or until the requested number of bytes is written, whichever -+ * comes first -+ * -+ * In the case of sparse PMR's the caller doesn't know what offsets are -+ * valid and which ones aren't so we will just ignore data at invalid offsets -+ */ -+PVRSRV_ERROR -+PMR_WriteBytes(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes); -+ -+/*************************************************************************/ /*! -+@Function PMRMMapPMR -+@Description Performs the necessary steps to map the PMR into a user process -+ address space. The caller does not need to call -+ PMRLockSysPhysAddresses before calling this function. -+ -+@Input psPMR PMR to map. -+ -+@Input pOSMMapData OS specific data needed to create a mapping. -+ -+@Input uiCpuAccessFlags Flags to indicate if the mapping request -+ requires read, write or both access. -+ -+@Return PVRSRV_ERROR: PVRSRV_OK on success or an error otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PMRMMapPMR(PMR *psPMR, -+ PMR_MMAP_DATA pOSMMapData, -+ PVRSRV_MEMALLOCFLAGS_T uiCpuAccessFlags); -+ -+/* -+ * PMRRefPMR() -+ * -+ * Take a reference on the passed in PMR -+ */ -+void -+PMRRefPMR(PMR *psPMR); -+ -+/* -+ * PMRUnrefPMR() -+ * -+ * This undoes a call to any of the PhysmemNew* family of APIs -+ * (i.e. any PMR factory "constructor") -+ * -+ * This relinquishes a reference to the PMR, and, where the refcount -+ * reaches 0, causes the PMR to be destroyed (calling the finalizer -+ * callback on the PMR, if there is one) -+ */ -+PVRSRV_ERROR -+PMRUnrefPMR(PMR *psPMR); -+ -+/* -+ * PMRRefPMR2() -+ * -+ * Take a reference on the passed in PMR. -+ * -+ * This function does not perform address locking as opposed to PMRRefPMR(). -+ */ -+void -+PMRRefPMR2(PMR *psPMR); -+ -+/* -+ * PMRUnrefPMR2() -+ * -+ * This undoes a call to any of the PhysmemNew* family of APIs -+ * (i.e. any PMR factory "constructor"). -+ * -+ * This relinquishes a reference to the PMR, and, where the refcount -+ * reaches 0, causes the PMR to be destroyed (calling the finalizer -+ * callback on the PMR, if there is one) -+ */ -+void -+PMRUnrefPMR2(PMR *psPMR); -+ -+/* -+ * PMRUnrefUnlockPMR() -+ * -+ * Same as above but also unlocks the PMR. -+ */ -+PVRSRV_ERROR -+PMRUnrefUnlockPMR(PMR *psPMR); -+ -+/* -+ * PMRCpuMapCountIncr() -+ * -+ * Increment count of the number of current CPU mappings of the PMR. -+ * -+ */ -+void -+PMRCpuMapCountIncr(PMR *psPMR); -+ -+/* -+ * PMRCpuMapCountDecr() -+ * -+ * Decrement count of the number of current CPU mappings of the PMR. -+ * -+ */ -+void -+PMRCpuMapCountDecr(PMR *psPMR); -+ -+PPVRSRV_DEVICE_NODE -+PMR_DeviceNode(const PMR *psPMR); -+ -+/* -+ * PMR_Flags() -+ * -+ * Flags are static and guaranteed for the life of the PMR. Thus this -+ * function is idempotent and acquire/release semantics is not required. -+ * -+ * Returns the flags as specified on the PMR. The flags are to be -+ * interpreted as mapping permissions -+ */ -+PMR_FLAGS_T -+PMR_Flags(const PMR *psPMR); -+ -+IMG_BOOL -+PMR_IsSparse(const PMR *psPMR); -+ -+void -+PMR_LogicalSize(const PMR *psPMR, -+ IMG_DEVMEM_SIZE_T *puiLogicalSize); -+ -+PVRSRV_ERROR -+PMR_PhysicalSize(const PMR *psPMR, -+ IMG_DEVMEM_SIZE_T *puiPhysicalSize); -+ -+PHYS_HEAP * -+PMR_PhysHeap(const PMR *psPMR); -+ -+PMR_MAPPING_TABLE * -+PMR_GetMappingTable(const PMR *psPMR); -+ -+IMG_UINT32 -+PMR_GetLog2Contiguity(const PMR *psPMR); -+ -+/* -+ * PMRGetMaxChunkCount -+ * -+ * Given a PMR, calculate the maximum number of chunks supported by -+ * the PMR from the contiguity and return it. -+ */ -+IMG_UINT32 PMRGetMaxChunkCount(PMR *psPMR); -+ -+const IMG_CHAR * -+PMR_GetAnnotation(const PMR *psPMR); -+ -+/* -+ * PMR_IsOffsetValid() -+ * -+ * Returns if an address offset inside a PMR has a valid -+ * physical backing. -+ */ -+PVRSRV_ERROR -+PMR_IsOffsetValid(const PMR *psPMR, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_BOOL *pbValid); -+ -+PMR_IMPL_TYPE -+PMR_GetType(const PMR *psPMR); -+ -+IMG_CHAR * -+PMR_GetTypeStr(const PMR *psPMR); -+ -+IMG_INT32 -+PMR_GetRefCount(const PMR *psPMR); -+ -+/* PMR usage type for callers of PMR_DevPhysAddr() */ -+typedef enum _PMR_USAGE_TYPE_ -+{ -+ CPU_USE = 0, -+ DEVICE_USE -+} PMR_USAGE_TYPE; -+ -+/* -+ * PMR_DevPhysAddr() -+ * -+ * A note regarding Lock/Unlock semantics -+ * ====================================== -+ * -+ * PMR_DevPhysAddr may only be called after PMRLockSysPhysAddresses() -+ * has been called. The data returned may be used only until -+ * PMRUnlockSysPhysAddresses() is called after which time the licence -+ * to use the data is revoked and the information may be invalid. -+ * -+ * Given an offset, this function returns the device physical address of the -+ * corresponding page in the PMR. It may be called multiple times -+ * until the address of all relevant pages has been determined. -+ * -+ * If caller only wants one physical address it is sufficient to pass in: -+ * ui32Log2PageSize==0 and ui32NumOfPages==1 -+ */ -+PVRSRV_ERROR -+PMR_DevPhysAddr(const PMR *psPMR, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_DEV_PHYADDR *psDevAddr, -+ IMG_BOOL *pbValid, -+ PMR_USAGE_TYPE ePMRUsage); -+ -+/* -+ * PMR_CpuPhysAddr() -+ * -+ * See note above about Lock/Unlock semantics. -+ * -+ * Given an offset, this function returns the CPU physical address of the -+ * corresponding page in the PMR. It may be called multiple times -+ * until the address of all relevant pages has been determined. -+ * -+ */ -+PVRSRV_ERROR -+PMR_CpuPhysAddr(const PMR *psPMR, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_CPU_PHYADDR *psCpuAddrPtr, -+ IMG_BOOL *pbValid); -+ -+PVRSRV_ERROR -+PMRGetUID(PMR *psPMR, -+ IMG_UINT64 *pui64UID); -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+/* -+ * PMR_IsZombie() -+ * -+ * Indicates if a PMR is a "zombie" PMR. This function **must** be called -+ * inside a PMR factory lock. -+ */ -+IMG_BOOL -+PMR_IsZombie(const PMR *psPMR); -+ -+/* -+ * PMRMarkForDeferFree -+ * -+ * Sets sync value required for this PMR to be freed. -+ */ -+void -+PMRMarkForDeferFree(PMR *psPMR); -+ -+/* -+ * PMRQueueZombiesForCleanup -+ * -+ * Defers cleanup of all zombie PMRs to the CleanupThread. -+ * -+ * Returns IMG_TRUE if any PMRs were queued for free and IMG_FALSE if no PMRs -+ * were queued. -+ */ -+IMG_BOOL -+PMRQueueZombiesForCleanup(PPVRSRV_DEVICE_NODE psDevNode); -+ -+/* -+ * PMRDequeueZombieAndRef -+ * -+ * Removed the PMR either form zombie list or cleanup item's list -+ * and references it. -+ */ -+void -+PMRDequeueZombieAndRef(PMR *psPMR); -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+/* -+ * PMR_ChangeSparseMem() -+ * -+ * See note above about Lock/Unlock semantics. -+ * -+ * This function alters the memory map of the given PMR in device space by -+ * adding/deleting the pages as requested. -+ * -+ */ -+PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_UINT32 uiSparseFlags); -+ -+/* -+ * PMR_ChangeSparseMemCPUMap() -+ * -+ * See note above about Lock/Unlock semantics. -+ * -+ * This function alters the memory map of the given PMR in CPU space by -+ * adding/deleting the pages as requested. -+ */ -+PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR, -+ IMG_UINT64 sCpuVAddrBase, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices); -+ -+#if defined(PDUMP) -+ -+/* -+ * PMR_PDumpSymbolicAddr() -+ * -+ * Given an offset, returns the pdump memspace name and symbolic -+ * address of the corresponding page in the PMR. -+ * -+ * Note that PDump memspace names and symbolic addresses are static -+ * and valid for the lifetime of the PMR, therefore we don't require -+ * acquire/release semantics here. -+ * -+ * Note that it is expected that the pdump "mapping" code will call -+ * this function multiple times as each page is mapped in turn -+ * -+ * Note that NextSymName is the offset from the base of the PMR to the -+ * next pdump symbolic address (or the end of the PMR if the PMR only -+ * had one PDUMPMALLOC -+ */ -+PVRSRV_ERROR -+PMR_PDumpSymbolicAddr(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32NamespaceNameLen, -+ IMG_CHAR *pszNamespaceName, -+ IMG_UINT32 ui32SymbolicAddrLen, -+ IMG_CHAR *pszSymbolicAddr, -+ IMG_DEVMEM_OFFSET_T *puiNewOffset, -+ IMG_DEVMEM_OFFSET_T *puiNextSymName -+ ); -+ -+/* -+ * PMRPDumpLoadMemValue32() -+ * -+ * writes the current contents of a dword in PMR memory to the pdump -+ * script stream. Useful for patching a buffer by simply editing the -+ * script output file in ASCII plain text. -+ * -+ */ -+PVRSRV_ERROR -+PMRPDumpLoadMemValue32(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32Value, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+/* -+ * PMRPDumpCopyMem32 -+ * -+ * Adds in the pdump script stream a copy of a dword in one PMR memory -+ * location to another PMR memory location. -+ * -+ */ -+PVRSRV_ERROR -+PMRPDumpCopyMem32(PMR *psDstPMR, -+ IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, -+ PMR *psSrcPMR, -+ IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, -+ const IMG_CHAR *pszTmpVar, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+/* -+ * PMRPDumpLoadMemValue64() -+ * -+ * writes the current contents of a dword in PMR memory to the pdump -+ * script stream. Useful for patching a buffer by simply editing the -+ * script output file in ASCII plain text. -+ * -+ */ -+PVRSRV_ERROR -+PMRPDumpLoadMemValue64(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT64 ui64Value, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+/* -+ * PMRPDumpCopyMem64 -+ * -+ * Adds in the pdump script stream a copy of a quadword in one PMR memory -+ * location to another PMR memory location. -+ */ -+PVRSRV_ERROR -+PMRPDumpCopyMem64(PMR *psDstPMR, -+ IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, -+ PMR *psSrcPMR, -+ IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, -+ const IMG_CHAR *pszTmpVar, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+/* -+ * PMRPDumpLoadMem() -+ * -+ * Writes the current contents of the PMR memory to the pdump PRM stream, -+ * and emits some PDump code to the script stream to LDB said bytes from -+ * said file. If bZero is IMG_TRUE then the PDump zero page is used as the -+ * source for the LDB. -+ */ -+PVRSRV_ERROR -+PMRPDumpLoadMem(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PDUMP_FLAGS_T uiPDumpFlags, -+ IMG_BOOL bZero); -+ -+/* -+ * PMRPDumpSaveToFile() -+ * -+ * Emits some PDump that does an SAB (save bytes) using the PDump symbolic -+ * address of the PMR. Note that this is generally not the preferred way to -+ * dump the buffer contents. There is an equivalent function in -+ * devicemem_server.h which also emits SAB but using the virtual address, -+ * which is the "right" way to dump the buffer contents to a file. -+ * This function exists just to aid testing by providing a means to dump -+ * the PMR directly by symbolic address also. -+ */ -+PVRSRV_ERROR -+PMRPDumpSaveToFile(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 uiArraySize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 uiFileOffset); -+#else /* PDUMP */ -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PMR_PDumpSymbolicAddr) -+#endif -+static INLINE PVRSRV_ERROR -+PMR_PDumpSymbolicAddr(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32NamespaceNameLen, -+ IMG_CHAR *pszNamespaceName, -+ IMG_UINT32 ui32SymbolicAddrLen, -+ IMG_CHAR *pszSymbolicAddr, -+ IMG_DEVMEM_OFFSET_T *puiNewOffset, -+ IMG_DEVMEM_OFFSET_T *puiNextSymName) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); -+ PVR_UNREFERENCED_PARAMETER(ui32NamespaceNameLen); -+ PVR_UNREFERENCED_PARAMETER(pszNamespaceName); -+ PVR_UNREFERENCED_PARAMETER(ui32SymbolicAddrLen); -+ PVR_UNREFERENCED_PARAMETER(pszSymbolicAddr); -+ PVR_UNREFERENCED_PARAMETER(puiNewOffset); -+ PVR_UNREFERENCED_PARAMETER(puiNextSymName); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PMRPDumpLoadMemValue32) -+#endif -+static INLINE PVRSRV_ERROR -+PMRPDumpLoadMemValue32(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32Value, -+ PDUMP_FLAGS_T uiPDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PMRPDumpLoadMemValue64) -+#endif -+static INLINE PVRSRV_ERROR -+PMRPDumpLoadMemValue64(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT64 ui64Value, -+ PDUMP_FLAGS_T uiPDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); -+ PVR_UNREFERENCED_PARAMETER(ui64Value); -+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PMRPDumpLoadMem) -+#endif -+static INLINE PVRSRV_ERROR -+PMRPDumpLoadMem(PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PDUMP_FLAGS_T uiPDumpFlags, -+ IMG_BOOL bZero) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); -+ PVR_UNREFERENCED_PARAMETER(uiSize); -+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); -+ PVR_UNREFERENCED_PARAMETER(bZero); -+ return PVRSRV_OK; -+} -+ -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PMRPDumpSaveToFile) -+#endif -+static INLINE PVRSRV_ERROR -+PMRPDumpSaveToFile(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 uiArraySize, -+ const IMG_CHAR *pszFilename, -+ IMG_UINT32 uiFileOffset) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); -+ PVR_UNREFERENCED_PARAMETER(uiSize); -+ PVR_UNREFERENCED_PARAMETER(uiArraySize); -+ PVR_UNREFERENCED_PARAMETER(pszFilename); -+ PVR_UNREFERENCED_PARAMETER(uiFileOffset); -+ return PVRSRV_OK; -+} -+ -+#endif /* PDUMP */ -+ -+/* This function returns the private data that a pmr subtype embedded in -+ * here. We use the function table pointer as "authorisation" that this -+ * function is being called by the pmr subtype implementation. We can -+ * assume (assert) that. It would be a bug in the implementation of the -+ * pmr subtype if this assertion ever fails. -+ */ -+void * -+PMRGetPrivateData(const PMR *psPMR, -+ const PMR_IMPL_FUNCTAB *psFuncTab); -+ -+PVRSRV_ERROR -+PMRZeroingPMR(PMR *psPMR, -+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize); -+ -+PVRSRV_ERROR -+PMRDumpPageList(PMR *psReferencePMR, -+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize); -+ -+PVRSRV_ERROR -+PMRWritePMPageList(/* Target PMR, offset, and length */ -+ PMR *psPageListPMR, -+ IMG_DEVMEM_OFFSET_T uiTableOffset, -+ IMG_DEVMEM_SIZE_T uiTableLength, -+ /* Referenced PMR, and "page" granularity */ -+ PMR *psReferencePMR, -+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize, -+ PMR_PAGELIST **ppsPageList); -+ -+/* Doesn't actually erase the page list - just releases -+ * the appropriate refcounts -+ */ -+PVRSRV_ERROR // should be void, surely -+PMRUnwritePMPageList(PMR_PAGELIST *psPageList); -+ -+#if defined(PDUMP) -+PVRSRV_ERROR -+PMRPDumpPol32(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T uiFlags); -+ -+PVRSRV_ERROR -+PMRPDumpCheck32(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T uiPDumpFlags); -+ -+PVRSRV_ERROR -+PMRPDumpCBP(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiReadOffset, -+ IMG_DEVMEM_OFFSET_T uiWriteOffset, -+ IMG_DEVMEM_SIZE_T uiPacketSize, -+ IMG_DEVMEM_SIZE_T uiBufferSize); -+#else -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PMRPDumpPol32) -+#endif -+static INLINE PVRSRV_ERROR -+PMRPDumpPol32(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T uiFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ PVR_UNREFERENCED_PARAMETER(ui32Mask); -+ PVR_UNREFERENCED_PARAMETER(eOperator); -+ PVR_UNREFERENCED_PARAMETER(uiFlags); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PMRPDumpCheck32) -+#endif -+static INLINE PVRSRV_ERROR -+PMRPDumpCheck32(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T uiFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ PVR_UNREFERENCED_PARAMETER(ui32Mask); -+ PVR_UNREFERENCED_PARAMETER(eOperator); -+ PVR_UNREFERENCED_PARAMETER(uiFlags); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PMRPDumpCBP) -+#endif -+static INLINE PVRSRV_ERROR -+PMRPDumpCBP(const PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiReadOffset, -+ IMG_DEVMEM_OFFSET_T uiWriteOffset, -+ IMG_DEVMEM_SIZE_T uiPacketSize, -+ IMG_DEVMEM_SIZE_T uiBufferSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPMR); -+ PVR_UNREFERENCED_PARAMETER(uiReadOffset); -+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); -+ PVR_UNREFERENCED_PARAMETER(uiPacketSize); -+ PVR_UNREFERENCED_PARAMETER(uiBufferSize); -+ return PVRSRV_OK; -+} -+#endif -+ -+PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR); -+ -+/* -+ * PMRInit() -+ * -+ * To be called once and only once to initialise the internal data in -+ * the PMR module (mutexes and such) -+ * -+ * Not for general use. Only PVRSRVInit(); should be calling this. -+ */ -+PVRSRV_ERROR -+PMRInit(void); -+ -+/* -+ * PMRDeInit() -+ * -+ * To be called once and only once to deinitialise the internal data in -+ * the PMR module (mutexes and such) and for debug checks -+ * -+ * Not for general use. Only PVRSRVDeInit(); should be calling this. -+ */ -+PVRSRV_ERROR -+PMRDeInit(void); -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+/* -+ * PMRInitDevice() -+ * -+ * Initialised device specific PMR data. -+ */ -+PVRSRV_ERROR -+PMRInitDevice(PPVRSRV_DEVICE_NODE psDeviceNode); -+ -+/* -+ * PMRFreeZombies() -+ * -+ * Free deferred PMRs. -+ */ -+void -+PMRFreeZombies(PPVRSRV_DEVICE_NODE psDeviceNode); -+ -+/* -+ * PMRFreeZombies() -+ * -+ * Print all zombies to the log. -+ */ -+void -+PMRDumpZombies(PPVRSRV_DEVICE_NODE psDeviceNode); -+ -+/* -+ * PMRDeInitDevice() -+ * -+ * Cleans up device specific PMR data. -+ */ -+void -+PMRDeInitDevice(PPVRSRV_DEVICE_NODE psDeviceNode); -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+PVRSRV_ERROR -+PMRStoreRIHandle(PMR *psPMR, void *hRIHandle); -+#endif -+ -+#if defined(PVRSRV_INTERNAL_IPA_FEATURE_TESTING) -+PVRSRV_ERROR -+PMRModifyIPAPolicy(PMR *psPMR, IMG_UINT8 ui8NewIPAPolicy); -+ -+PVRSRV_ERROR -+PMRGetIPAPolicy(PMR *psPMR, IMG_UINT8 *pui8IPAPolicy); -+ -+PVRSRV_ERROR -+PMRGetIPAInfo(PMR *psPMR, IMG_UINT32 *pui32IPAPolicy, IMG_UINT32 *pui32IPAShift, -+ IMG_UINT32 *pui32IPAMask, IMG_UINT32 *pui32IPAFlagsValue); -+#endif -+ -+#endif /* #ifdef SRVSRV_PMR_H */ -diff --git a/drivers/gpu/drm/img-rogue/pmr_impl.h b/drivers/gpu/drm/img-rogue/pmr_impl.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pmr_impl.h -@@ -0,0 +1,558 @@ -+/**************************************************************************/ /*! -+@File -+@Title Implementation Callbacks for Physmem (PMR) abstraction -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Part of the memory management. This file is for definitions -+ that are private to the world of PMRs, but that need to be -+ shared between pmr.c itself and the modules that implement the -+ callbacks for the PMR. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef SRVSRV_PMR_IMPL_H -+#define SRVSRV_PMR_IMPL_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+/*! Physical Memory Resource type. -+ */ -+typedef struct _PMR_ PMR; -+ -+/*! Per-flavour callbacks need to be shared with generic implementation -+ * (pmr.c). -+ */ -+typedef void *PMR_IMPL_PRIVDATA; -+ -+/*! Type for holding flags passed to the PMR factory. -+ */ -+typedef PVRSRV_MEMALLOCFLAGS_T PMR_FLAGS_T; -+ -+/*! Mapping table for the allocation. -+ * -+ * PMR's can be sparse in which case not all the logical addresses in it are -+ * valid. The mapping table translates logical offsets into physical offsets. -+ * -+ * This table is always passed to the PMR factory regardless if the memory is -+ * sparse or not. In case of non-sparse memory all virtual offsets are mapped -+ * to physical offsets. -+ */ -+typedef struct _PMR_MAPPING_TABLE_ PMR_MAPPING_TABLE; -+ -+/*! Private data passed to the ::PFN_MMAP_FN function. -+ */ -+typedef void *PMR_MMAP_DATA; -+ -+#define PMR_IMPL_TYPES \ -+ X(NONE), \ -+ X(OSMEM), \ -+ X(LMA), \ -+ X(DMABUF), \ -+ X(EXTMEM), \ -+ X(DC), \ -+ X(TDFWMEM), \ -+ X(TDSECBUF), \ -+ X(LAST) -+ -+/*! PMR factory type. -+ */ -+typedef enum _PMR_IMPL_TYPE_ -+{ -+#define X(type) PMR_TYPE_##type -+ PMR_IMPL_TYPES -+#undef X -+} PMR_IMPL_TYPE; -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_LOCK_PHYS_ADDRESSES_FN -+ -+@Description Called to lock down the physical addresses for all pages -+ allocated for a PMR. -+ The default implementation is to simply increment a -+ lock-count for debugging purposes. -+ If overridden, the PFN_LOCK_PHYS_ADDRESSES_FN function will -+ be called when someone first requires a physical address, -+ and the PFN_UNLOCK_PHYS_ADDRESSES_FN counterpart will be -+ called when the last such reference is released. -+ The PMR implementation may assume that physical addresses -+ will have been "locked" in this manner before any call is -+ made to the pfnDevPhysAddr() callback -+ -+@Input pvPriv Private data (which was generated by the -+ PMR factory when PMR was created) -+ -+@Return PVRSRV_OK if the operation was successful, an error code -+ otherwise. -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_LOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv); -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_UNLOCK_PHYS_ADDRESSES_FN -+ -+@Description Called to release the lock taken on the physical addresses -+ for all pages allocated for a PMR. -+ The default implementation is to simply decrement a -+ lock-count for debugging purposes. -+ If overridden, the PFN_UNLOCK_PHYS_ADDRESSES_FN will be -+ called when the last reference taken on the PMR is -+ released. -+ -+@Input pvPriv Private data (which was generated by the -+ PMR factory when PMR was created) -+ -+@Return PVRSRV_OK if the operation was successful, an error code -+ otherwise. -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv); -+ -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_DEV_PHYS_ADDR_FN -+ -+@Description Called to obtain one or more physical addresses for given -+ offsets within a PMR. -+ -+ The PFN_LOCK_PHYS_ADDRESSES_FN callback (if overridden) is -+ guaranteed to have been called prior to calling the -+ PFN_DEV_PHYS_ADDR_FN callback and the caller promises not to -+ rely on the physical address thus obtained after the -+ PFN_UNLOCK_PHYS_ADDRESSES_FN callback is called. -+ -+ Implementation of this callback is mandatory. -+ -+@Input pvPriv Private data (which was generated by the -+ PMR factory when PMR was created) -+@Input ui32Log2PageSize The log2 page size. -+@Input ui32NumOfAddr The number of addresses to be returned -+@Input puiOffset The offset from the start of the PMR -+ (in bytes) for which the physical -+ address is required. Where multiple -+ addresses are requested, this will -+ contain a list of offsets. -+@Input ui64IPAPolicyValue The Intermediate Physical Address (IPA) -+ Policy value to be applied to -+ the physical address -+@Input ui64IPAClearMask The IPA Clear mask to be applied to -+ the physical address when setting policy. -+@Output pbValid List of boolean flags indicating which -+ addresses in the returned list -+ (psDevAddrPtr) are valid (for sparse -+ allocations, not all pages may have a -+ physical backing) -+@Output psDevAddrPtr Returned list of physical addresses -+ -+@Return PVRSRV_OK if the operation was successful, an error code -+ otherwise. -+*/ /**************************************************************************/ -+#else -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_DEV_PHYS_ADDR_FN -+ -+@Description Called to obtain one or more physical addresses for given -+ offsets within a PMR. -+ -+ The PFN_LOCK_PHYS_ADDRESSES_FN callback (if overridden) is -+ guaranteed to have been called prior to calling the -+ PFN_DEV_PHYS_ADDR_FN callback and the caller promises not to -+ rely on the physical address thus obtained after the -+ PFN_UNLOCK_PHYS_ADDRESSES_FN callback is called. -+ -+ Implementation of this callback is mandatory. -+ -+@Input pvPriv Private data (which was generated by the -+ PMR factory when PMR was created) -+@Input ui32Log2PageSize The log2 page size. -+@Input ui32NumOfAddr The number of addresses to be returned -+@Input puiOffset The offset from the start of the PMR -+ (in bytes) for which the physical -+ address is required. Where multiple -+ addresses are requested, this will -+ contain a list of offsets. -+@Output pbValid List of boolean flags indicating which -+ addresses in the returned list -+ (psDevAddrPtr) are valid (for sparse -+ allocations, not all pages may have a -+ physical backing) -+@Output psDevAddrPtr Returned list of physical addresses -+ -+@Return PVRSRV_OK if the operation was successful, an error code -+ otherwise. -+*/ /**************************************************************************/ -+#endif -+typedef PVRSRV_ERROR (*PFN_DEV_PHYS_ADDR_FN)(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_DEVMEM_OFFSET_T *puiOffset, -+#if defined(PVRSRV_SUPPORT_IPA_FEATURE) -+ IMG_UINT64 ui64IPAPolicyValue, -+ IMG_UINT64 ui64IPAClearMask, -+#endif -+ IMG_BOOL *pbValid, -+ IMG_DEV_PHYADDR *psDevAddrPtr); -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN -+ -+@Description Called to obtain a kernel-accessible address (mapped to a -+ virtual address if required) for the PMR for use internally -+ in Services. -+ -+ Implementation of this function for the (default) PMR factory providing -+ OS-allocations is mandatory (the driver will expect to be able to call -+ this function for OS-provided allocations). -+ For other PMR factories, implementation of this function is only necessary -+ where an MMU mapping is required for the Kernel to be able to access the -+ allocated memory. -+ If no mapping is needed, this function can remain unimplemented and the -+ pfn may be set to NULL. -+@Input pvPriv Private data (which was generated by -+ the PMR factory when PMR was created) -+@Input uiOffset Offset from the beginning of the PMR -+ at which mapping is to start -+@Input uiSize Size of mapping (in bytes) -+@Output ppvKernelAddressOut Mapped kernel address -+@Output phHandleOut Returned handle of the new mapping -+@Input ulFlags Mapping flags -+ -+@Return PVRSRV_OK if the mapping was successful, an error code -+ otherwise. -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv, -+ size_t uiOffset, -+ size_t uiSize, -+ void **ppvKernelAddressOut, -+ IMG_HANDLE *phHandleOut, -+ PMR_FLAGS_T ulFlags); -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_RELEASE_KERNEL_MAPPING_DATA_FN -+ -+@Description Called to release a mapped kernel virtual address -+ -+ Implementation of this callback is mandatory if -+ PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN is provided for the PMR factory, -+ otherwise this function can remain unimplemented and the pfn may be set -+ to NULL. -+ -+@Input pvPriv Private data (which was generated by the -+ PMR factory when PMR was created) -+@Input hHandle Handle of the mapping to be released -+ -+@Return None -+*/ /**************************************************************************/ -+typedef void (*PFN_RELEASE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_HANDLE hHandle); -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_READ_BYTES_FN -+ -+@Description Called to read bytes from an unmapped allocation -+ -+ Implementation of this callback is optional - where it is not provided, -+ the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN to map the entire -+ PMR (if an MMU mapping is required for the Kernel to be able to access the -+ allocated memory). -+ -+@Input pvPriv Private data (which was generated by the -+ PMR factory when PMR was created) -+@Input uiOffset Offset from the beginning of the PMR at -+ which to begin reading -+@Output pcBuffer Buffer in which to return the read data -+@Input uiBufSz Number of bytes to be read -+@Output puiNumBytes Number of bytes actually read (may be -+ less than uiBufSz) -+ -+@Return PVRSRV_OK if the read was successful, an error code -+ otherwise. -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_READ_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes); -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_WRITE_BYTES_FN -+ -+@Description Called to write bytes into an unmapped allocation -+ -+ Implementation of this callback is optional - where it is not provided, -+ the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN to map the entire -+ PMR (if an MMU mapping is required for the Kernel to be able to access the -+ allocated memory). -+ -+@Input pvPriv Private data (which was generated by the -+ PMR factory when PMR was created) -+@Input uiOffset Offset from the beginning of the PMR at -+ which to begin writing -+@Input pcBuffer Buffer containing the data to be written -+@Input uiBufSz Number of bytes to be written -+@Output puiNumBytes Number of bytes actually written (may be -+ less than uiBufSz) -+ -+@Return PVRSRV_OK if the write was successful, an error code -+ otherwise. -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_WRITE_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_UINT8 *pcBuffer, -+ size_t uiBufSz, -+ size_t *puiNumBytes); -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_CHANGE_SPARSE_MEM_FN -+ -+@Description Called to modify the physical backing for a given sparse -+ allocation. -+ The caller provides a list of the pages within the sparse -+ allocation which should be backed with a physical allocation -+ and a list of the pages which do not require backing. -+ -+ Implementation of this callback is mandatory. -+ -+@Input pvPriv Private data (which was generated by the -+ PMR factory when PMR was created) -+@Input psPMR The PMR of the sparse allocation to be -+ modified -+@Input ui32AllocPageCount The number of pages specified in -+ pai32AllocIndices -+@Input pai32AllocIndices The list of pages in the sparse -+ allocation that should be backed with a -+ physical allocation. Pages are -+ referenced by their index within the -+ sparse allocation (e.g. in a 10 page -+ allocation, pages are denoted by -+ indices 0 to 9) -+@Input ui32FreePageCount The number of pages specified in -+ pai32FreeIndices -+@Input pai32FreeIndices The list of pages in the sparse -+ allocation that do not require -+ a physical allocation. -+@Input ui32Flags Allocation flags -+ -+@Return PVRSRV_OK if the sparse allocation physical backing was updated -+ successfully, an error code otherwise. -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_FN)(PMR_IMPL_PRIVDATA pPriv, -+ const PMR *psPMR, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices, -+ IMG_UINT32 uiFlags); -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN -+ -+@Description Called to modify which pages are mapped for a given sparse -+ allocation. -+ The caller provides a list of the pages within the sparse -+ allocation which should be given a CPU mapping and a list -+ of the pages which do not require a CPU mapping. -+ -+ Implementation of this callback is mandatory. -+ -+@Input pvPriv Private data (which was generated by the -+ PMR factory when PMR was created) -+@Input psPMR The PMR of the sparse allocation to be -+ modified -+@Input sCpuVAddrBase The virtual base address of the sparse -+ allocation -+@Input ui32AllocPageCount The number of pages specified in -+ pai32AllocIndices -+@Input pai32AllocIndices The list of pages in the sparse -+ allocation that should be given a CPU -+ mapping. Pages are referenced by their -+ index within the sparse allocation (e.g. -+ in a 10 page allocation, pages are -+ denoted by indices 0 to 9) -+@Input ui32FreePageCount The number of pages specified in -+ pai32FreeIndices -+@Input pai32FreeIndices The list of pages in the sparse -+ allocation that do not require a CPU -+ mapping. -+ -+@Return PVRSRV_OK if the page mappings were updated successfully, an -+ error code otherwise. -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN)(PMR_IMPL_PRIVDATA pPriv, -+ const PMR *psPMR, -+ IMG_UINT64 sCpuVAddrBase, -+ IMG_UINT32 ui32AllocPageCount, -+ IMG_UINT32 *pai32AllocIndices, -+ IMG_UINT32 ui32FreePageCount, -+ IMG_UINT32 *pai32FreeIndices); -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_MMAP_FN -+ -+@Description Called to map pages in the specified PMR. -+ -+ Implementation of this callback is optional. -+ Where it is provided, it will be used in place of OSMMapPMRGeneric(). -+ -+@Input pvPriv Private data (which was generated by the -+ PMR factory when PMR was created) -+@Input psPMR The PMR of the allocation to be mapped -+@Input pMMapData OS-specific data to describe how mapping -+ should be performed -+ -+@Return PVRSRV_OK if the mapping was successful, an error code -+ otherwise. -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_MMAP_FN)(PMR_IMPL_PRIVDATA pPriv, -+ PMR *psPMR, -+ PMR_MMAP_DATA pMMapData); -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_FINALIZE_FN -+ -+@Description Called to destroy the PMR. -+ This callback will be called only when all references to -+ the PMR have been dropped. -+ The PMR was created via a call to PhysmemNewRamBackedPMR() -+ and is destroyed via this callback. -+ -+ Implementation of this callback is mandatory. -+ -+@Input pvPriv Private data (which was generated by the -+ PMR factory when PMR was created) -+ -+@Return None -+*/ /**************************************************************************/ -+typedef void (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv); -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_ACQUIRE_PMR_FACTORY_LOCK_FN -+ -+@Description Called to acquire the PMR factory's global lock, if it has one, -+ hence callback optional. Factories which support entry points -+ in addition to the normal bridge calls, for example, from the -+ native OS that manipulate the PMR reference count should -+ create a factory lock and implementations for these call backs. -+ -+ Implementation of this callback is optional. -+ -+@Return None -+*/ -+/*****************************************************************************/ -+typedef void (*PFN_ACQUIRE_PMR_FACTORY_LOCK_FN)(void); -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_RELEASE_PMR_FACTORY_LOCK_FN -+ -+@Description Called to release the PMR factory's global lock acquired by calling -+ pfn_acquire_pmr_factory_lock callback. -+ -+ Implementation of this callback is optional. -+ -+@Return None -+*/ /**************************************************************************/ -+typedef void (*PFN_RELEASE_PMR_FACTORY_LOCK_FN)(void); -+ -+#ifdef SUPPORT_PMR_DEFERRED_FREE -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_ZOMBIFY_FN -+ -+@Description Called to perform factory actions necessary when PMR becomes -+ a zombie PMR. -+ -+ This function should at least adjust the driver/process memory -+ stats to reflect the amount of memory is occupied by the zombie -+ PMRs and at the same time subtract the memory from the main -+ memory stat the pages are accounted under. -+ -+ Implementation of this callback is required when SUPPORT_PMR_DEFERRED_FREE=1. -+ -+@Return PVRSRV_OK if the operation was successful, an error code -+ otherwise. -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_ZOMBIFY_FN)(PMR_IMPL_PRIVDATA pvPriv, -+ PMR *psPMR); -+#endif -+ -+/*! PMR factory callback table. -+ */ -+struct _PMR_IMPL_FUNCTAB_ { -+ /*! Callback function pointer, see ::PFN_LOCK_PHYS_ADDRESSES_FN */ -+ PFN_LOCK_PHYS_ADDRESSES_FN pfnLockPhysAddresses; -+ /*! Callback function pointer, see ::PFN_UNLOCK_PHYS_ADDRESSES_FN */ -+ PFN_UNLOCK_PHYS_ADDRESSES_FN pfnUnlockPhysAddresses; -+ -+ /*! Callback function pointer, see ::PFN_DEV_PHYS_ADDR_FN */ -+ PFN_DEV_PHYS_ADDR_FN pfnDevPhysAddr; -+ -+ /*! Callback function pointer, see ::PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN */ -+ PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN pfnAcquireKernelMappingData; -+ /*! Callback function pointer, see ::PFN_RELEASE_KERNEL_MAPPING_DATA_FN */ -+ PFN_RELEASE_KERNEL_MAPPING_DATA_FN pfnReleaseKernelMappingData; -+ -+ /*! Callback function pointer, see ::PFN_READ_BYTES_FN */ -+ PFN_READ_BYTES_FN pfnReadBytes; -+ /*! Callback function pointer, see ::PFN_WRITE_BYTES_FN */ -+ PFN_WRITE_BYTES_FN pfnWriteBytes; -+ -+ /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_FN */ -+ PFN_CHANGE_SPARSE_MEM_FN pfnChangeSparseMem; -+ /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN */ -+ PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN pfnChangeSparseMemCPUMap; -+ -+ /*! Callback function pointer, see ::PFN_MMAP_FN */ -+ PFN_MMAP_FN pfnMMap; -+ -+ /*! Callback function pointer, see ::PFN_FINALIZE_FN */ -+ PFN_FINALIZE_FN pfnFinalize; -+ -+ /*! Callback function pointer, see ::PFN_ACQUIRE_PMR_FACTORY_LOCK_FN */ -+ PFN_ACQUIRE_PMR_FACTORY_LOCK_FN pfnGetPMRFactoryLock; -+ -+ /*! Callback function pointer, see ::PFN_RELEASE_PMR_FACTORY_LOCK_FN */ -+ PFN_RELEASE_PMR_FACTORY_LOCK_FN pfnReleasePMRFactoryLock; -+ -+#ifdef SUPPORT_PMR_DEFERRED_FREE -+ /*! Callback function pointer, see ::PFN_ZOMBIFY_FN */ -+ PFN_ZOMBIFY_FN pfnZombify; -+#endif -+}; -+ -+/*! PMR factory callback table. -+ */ -+typedef struct _PMR_IMPL_FUNCTAB_ PMR_IMPL_FUNCTAB; -+ -+#endif /* SRVSRV_PMR_IMPL_H */ -diff --git a/drivers/gpu/drm/img-rogue/pmr_os.c b/drivers/gpu/drm/img-rogue/pmr_os.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pmr_os.c -@@ -0,0 +1,611 @@ -+/*************************************************************************/ /*! -+@File -+@Title Linux OS PMR functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+#include -+#include -+#include -+#include -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+#include -+#include -+#endif -+ -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "allocmem.h" -+#include "devicemem_server_utils.h" -+#include "pmr.h" -+#include "pmr_os.h" -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#include "process_stats.h" -+#endif -+ -+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) -+#include "mmap_stats.h" -+#endif -+ -+#include "kernel_compatibility.h" -+ -+/* -+ * x86_32: -+ * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM -+ * pages with default memory attributes; these HIGHMEM pages are skipped in -+ * set_pages_array_[uc,wc] during allocation; see reserve_pfn_range(). -+ * Also vm_insert_page is faster. -+ * -+ * x86_64: -+ * Use vm_insert_page because it is faster. -+ * -+ * Other platforms: -+ * Use remap_pfn_range by default because it does not issue a cache flush. -+ * It is known that ARM32 benefits from this. When other platforms become -+ * available it has to be investigated if this assumption holds for them as well. -+ * -+ * Since vm_insert_page does more precise memory accounting we have the build -+ * flag PVR_MMAP_USE_VM_INSERT that forces its use. This is useful as a debug -+ * feature. -+ * -+ */ -+#if defined(CONFIG_X86) || defined(PVR_MMAP_USE_VM_INSERT) -+#define PMR_OS_USE_VM_INSERT_PAGE 1 -+#endif -+ -+static void MMapPMROpen(struct vm_area_struct *ps_vma) -+{ -+ PMR *psPMR = ps_vma->vm_private_data; -+ -+ /* Our VM flags should ensure this function never gets called */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Unexpected mmap open call, this is probably an application bug.", -+ __func__)); -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: vma struct: 0x%p, vAddr: %#lX, length: %#lX, PMR pointer: 0x%p", -+ __func__, -+ ps_vma, -+ ps_vma->vm_start, -+ ps_vma->vm_end - ps_vma->vm_start, -+ psPMR)); -+ -+ /* In case we get called anyway let's do things right by increasing the refcount and -+ * locking down the physical addresses. */ -+ PMRRefPMR(psPMR); -+ -+ if (PMRLockSysPhysAddresses(psPMR) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__)); -+ PMRUnrefPMR(psPMR); -+ } -+ else -+ { -+ /* MMapPMROpen() is call when a process is forked, but only if -+ * mappings are to be inherited so increment mapping count of the -+ * PMR to prevent its layout cannot be changed (if sparse). -+ */ -+ PMRCpuMapCountIncr(psPMR); -+ } -+} -+ -+static void MMapPMRClose(struct vm_area_struct *ps_vma) -+{ -+ PMR *psPMR = ps_vma->vm_private_data; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ { -+ uintptr_t vAddr = ps_vma->vm_start; -+ -+ while (vAddr < ps_vma->vm_end) -+ { -+ /* USER MAPPING */ -+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, -+ (IMG_UINT64)vAddr, -+ OSGetCurrentClientProcessIDKM()); -+ vAddr += PAGE_SIZE; -+ } -+ } -+#else -+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, -+ ps_vma->vm_end - ps_vma->vm_start, -+ OSGetCurrentClientProcessIDKM()); -+#endif -+#endif -+ -+ PMRUnlockSysPhysAddresses(psPMR); -+ /* Decrement the mapping count before Unref of PMR (as Unref could destroy the PMR) */ -+ PMRCpuMapCountDecr(psPMR); -+ PMRUnrefPMR(psPMR); -+} -+ -+/* -+ * This vma operation is used to read data from mmap regions. It is called -+ * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace -+ * requests and reads from /proc//mem. -+ */ -+static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr, -+ void *buf, int len, int write) -+{ -+ PMR *psPMR = ps_vma->vm_private_data; -+ unsigned long ulOffset = addr - ps_vma->vm_start; -+ size_t uiBytesCopied; -+ PVRSRV_ERROR eError; -+ int iRetVal = -EINVAL; -+ -+ if (write) -+ { -+ eError = PMR_WriteBytes(psPMR, -+ (IMG_DEVMEM_OFFSET_T) ulOffset, -+ buf, -+ len, -+ &uiBytesCopied); -+ } -+ else -+ { -+ eError = PMR_ReadBytes(psPMR, -+ (IMG_DEVMEM_OFFSET_T) ulOffset, -+ buf, -+ len, -+ &uiBytesCopied); -+ } -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)", -+ __func__, -+ write ? "PMR_WriteBytes" : "PMR_ReadBytes", -+ eError)); -+ } -+ else -+ { -+ iRetVal = uiBytesCopied; -+ } -+ -+ return iRetVal; -+} -+ -+static const struct vm_operations_struct gsMMapOps = -+{ -+ .open = &MMapPMROpen, -+ .close = &MMapPMRClose, -+ .access = MMapVAccess, -+}; -+ -+static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode, -+ struct vm_area_struct *ps_vma, -+ IMG_DEVMEM_OFFSET_T uiOffset, -+ IMG_CPU_PHYADDR *psCpuPAddr, -+ IMG_UINT32 uiLog2PageSize, -+ IMG_BOOL bUseVMInsertPage, -+ IMG_BOOL bUseMixedMap) -+{ -+ IMG_INT32 iStatus; -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ pfn_t sPFN; -+#else -+ unsigned long uiPFN; -+#endif -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ sPFN = phys_to_pfn_t(psCpuPAddr->uiAddr, 0); -+#else -+ uiPFN = psCpuPAddr->uiAddr >> PAGE_SHIFT; -+ PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr->uiAddr); -+#endif -+ -+ /* -+ * vm_insert_page() allows insertion of individual pages into user -+ * VMA space _only_ if page is a order-zero allocated page -+ */ -+ if (bUseVMInsertPage) -+ { -+ if (bUseMixedMap) -+ { -+ /* -+ * This path is just for debugging. It should be -+ * equivalent to the remap_pfn_range() path. -+ */ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) -+ vm_fault_t vmf; -+ -+ vmf = vmf_insert_mixed(ps_vma, -+ ps_vma->vm_start + uiOffset, -+ sPFN); -+ if (vmf & VM_FAULT_ERROR) -+ { -+ iStatus = vm_fault_to_errno(vmf, 0); -+ } -+ else -+ { -+ iStatus = 0; -+ } -+#else -+ iStatus = vm_insert_mixed(ps_vma, -+ ps_vma->vm_start + uiOffset, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ sPFN); -+#else -+ uiPFN); -+#endif -+#endif -+ } -+ else -+ { -+ /* Since kernel 3.7 this sets VM_MIXEDMAP internally */ -+ iStatus = vm_insert_page(ps_vma, -+ ps_vma->vm_start + uiOffset, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ pfn_t_to_page(sPFN)); -+#else -+ pfn_to_page(uiPFN)); -+#endif -+ } -+ } -+ else -+ { -+ /* -+ NOTE: Regarding absence of dma_mmap_coherent() in _OSMMapPMR() -+ -+ The current services mmap model maps in a PMR's full-length size -+ into the user VMA & applies any user specified offset to the kernel -+ returned zero-offset based VA in services client; this essentially -+ means services server ignores ps_vma->vm_pgoff (this houses hPMR) -+ during a mmap call. -+ -+ Furthermore, during a DMA/CMA memory allocation, multiple order-n -+ pages are used to satisfy an allocation request due to DMA/CMA -+ framework rounding-up allocation size to next power-of-two which -+ can lead to wasted memory (so we don't allocate using single call). -+ -+ The combination of the above two issues mean that we cannot use the -+ dma_mmap_coherent() for a number of reasons outlined below: -+ -+ - Services mmap semantics does not fit with dma_mmap_coherent() -+ which requires proper ps_vma->vm_pgoff; seeing this houses a -+ hPMR handle value, calls into dma_mmap_coherent() fails. This -+ could be avoided by forcing ps_vma->vm_pgoff to zero but the -+ ps_vma->vm_pgoff is applied to DMA bus address PFN and not -+ user VMA which is always mapped at ps_vma->vm_start. -+ -+ - As multiple order-n pages are used for DMA/CMA allocations, a -+ single dma_mmap_coherent() call with a vma->vm_pgoff set to -+ zero cannot (maybe) be used because there is no guarantee that -+ all of the multiple order-n pages in the PMR are physically -+ contiguous from the first entry to the last. Whilst this is -+ highly likely to be the case, there is no guarantee that it -+ will be so we cannot depend on this being the case. -+ -+ The solution is to manually mmap DMA/CMA pages into user VMA -+ using remap_pfn_range() directly. Furthermore, accounting is -+ always compromised for DMA/CMA allocations. -+ */ -+ size_t uiNumContiguousBytes = 1ULL << uiLog2PageSize; -+ -+ iStatus = remap_pfn_range(ps_vma, -+ ps_vma->vm_start + uiOffset, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ pfn_t_to_pfn(sPFN), -+#else -+ uiPFN, -+#endif -+ uiNumContiguousBytes, -+ ps_vma->vm_page_prot); -+ } -+ -+ return iStatus; -+} -+ -+PVRSRV_ERROR -+OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) -+{ -+ struct vm_area_struct *ps_vma = pOSMMapData; -+ PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR); -+ PVRSRV_ERROR eError; -+ size_t uiLength; -+ IMG_INT32 iStatus; -+ IMG_DEVMEM_OFFSET_T uiOffset; -+ IMG_UINT32 ui32CPUCacheFlags; -+ pgprot_t sPageProt; -+ IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; -+ IMG_UINT32 uiOffsetIdx; -+ IMG_UINT32 uiNumOfPFNs; -+ IMG_UINT32 uiLog2PageSize; -+ IMG_CPU_PHYADDR *psCpuPAddr; -+ IMG_BOOL *pbValid; -+ IMG_BOOL bUseMixedMap = IMG_FALSE; -+ IMG_BOOL bUseVMInsertPage = IMG_FALSE; -+ vm_flags_t uVMFlags = ps_vma->vm_flags; -+ -+ /* if writeable but not shared mapping is requested then fail */ -+ PVR_RETURN_IF_INVALID_PARAM(((uVMFlags & VM_WRITE) == 0) || -+ ((uVMFlags & VM_SHARED) != 0)); -+ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ if (eError != PVRSRV_OK) -+ { -+ goto e0; -+ } -+ -+ sPageProt = vm_get_page_prot(uVMFlags); -+ -+ eError = DevmemCPUCacheMode(psDevNode, -+ PMR_Flags(psPMR), -+ &ui32CPUCacheFlags); -+ if (eError != PVRSRV_OK) -+ { -+ goto e0; -+ } -+ -+ switch (ui32CPUCacheFlags) -+ { -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: -+ sPageProt = pgprot_noncached(sPageProt); -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: -+ sPageProt = pgprot_writecombine(sPageProt); -+ break; -+ -+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED: -+ { -+/* Do not set to write-combine for plato */ -+#if !defined(PLATO_MEMORY_CONFIG) -+ PHYS_HEAP *psPhysHeap = PMR_PhysHeap(psPMR); -+ -+ if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA) -+ sPageProt = pgprot_writecombine(sPageProt); -+#endif -+ break; -+ } -+ -+ default: -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e1; -+ } -+ ps_vma->vm_page_prot = sPageProt; -+ -+ uVMFlags |= VM_IO; -+ -+ /* Don't include the mapping in core dumps */ -+ uVMFlags |= VM_DONTDUMP; -+ -+ /* -+ * Disable mremap because our nopage handler assumes all -+ * page requests have already been validated. -+ */ -+ uVMFlags |= VM_DONTEXPAND; -+ -+ /* Don't allow mapping to be inherited across a process fork */ -+ uVMFlags |= VM_DONTCOPY; -+ -+ uiLength = ps_vma->vm_end - ps_vma->vm_start; -+ -+ /* Is this mmap targeting non order-zero pages or does it use pfn mappings? -+ * If yes, don't use vm_insert_page */ -+ uiLog2PageSize = PMR_GetLog2Contiguity(psPMR); -+ -+#if defined(PMR_OS_USE_VM_INSERT_PAGE) -+ bUseVMInsertPage = (uiLog2PageSize == PAGE_SHIFT) && (PMR_GetType(psPMR) != PMR_TYPE_EXTMEM); -+#endif -+ -+ /* Can we use stack allocations */ -+ uiNumOfPFNs = uiLength >> uiLog2PageSize; -+ if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC) -+ { -+ psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(*psCpuPAddr)); -+ if (psCpuPAddr == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e1; -+ } -+ -+ /* Should allocation fail, clean-up here before exiting */ -+ pbValid = OSAllocMem(uiNumOfPFNs * sizeof(*pbValid)); -+ if (pbValid == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ OSFreeMem(psCpuPAddr); -+ goto e2; -+ } -+ } -+ else -+ { -+ psCpuPAddr = asCpuPAddr; -+ pbValid = abValid; -+ } -+ -+ /* Obtain map range pfns */ -+ eError = PMR_CpuPhysAddr(psPMR, -+ uiLog2PageSize, -+ uiNumOfPFNs, -+ 0, -+ psCpuPAddr, -+ pbValid); -+ if (eError != PVRSRV_OK) -+ { -+ goto e3; -+ } -+ -+ /* -+ * Scan the map range for pfns without struct page* handling. If -+ * we find one, this is a mixed map, and we can't use vm_insert_page() -+ * NOTE: vm_insert_page() allows insertion of individual pages into user -+ * VMA space _only_ if said page is an order-zero allocated page. -+ */ -+ if (bUseVMInsertPage) -+ { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ pfn_t sPFN; -+#else -+ unsigned long uiPFN; -+#endif -+ -+ for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx) -+ { -+ if (pbValid[uiOffsetIdx]) -+ { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) -+ sPFN = phys_to_pfn_t(psCpuPAddr[uiOffsetIdx].uiAddr, 0); -+ -+ if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0) -+#else -+ uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT; -+ PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr); -+ -+ if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0) -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ -+ { -+ bUseMixedMap = IMG_TRUE; -+ break; -+ } -+ } -+ } -+ -+ if (bUseMixedMap) -+ { -+ uVMFlags |= VM_MIXEDMAP; -+ } -+ } -+ else -+ { -+ uVMFlags |= VM_PFNMAP; -+ } -+ -+ /* Actually initialise the flags */ -+ pvr_vm_flags_init(ps_vma, uVMFlags); -+ -+ /* For each PMR page-size contiguous bytes, map page(s) into user VMA */ -+ for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<> uiLog2PageSize; -+ /* -+ * Only map in pages that are valid, any that aren't will be -+ * picked up by the nopage handler which will return a zeroed -+ * page for us. -+ */ -+ if (pbValid[uiOffsetIdx]) -+ { -+ iStatus = _OSMMapPMR(psDevNode, -+ ps_vma, -+ uiOffset, -+ &psCpuPAddr[uiOffsetIdx], -+ uiLog2PageSize, -+ bUseVMInsertPage, -+ bUseMixedMap); -+ if (iStatus) -+ { -+ /* Failure error code doesn't get propagated */ -+ eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED; -+ PVR_ASSERT(0); -+ goto e3; -+ } -+ } -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) -+#define PMR_OS_BAD_CPUADDR 0x0BAD0BAD -+ { -+ IMG_CPU_PHYADDR sPAddr; -+ sPAddr.uiAddr = pbValid[uiOffsetIdx] ? -+ psCpuPAddr[uiOffsetIdx].uiAddr : -+ IMG_CAST_TO_CPUPHYADDR_UINT(PMR_OS_BAD_CPUADDR); -+ -+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, -+ (void*)(uintptr_t)(ps_vma->vm_start + uiOffset), -+ sPAddr, -+ 1<vm_private_data = psPMR; -+ -+ /* Install open and close handlers for ref-counting */ -+ ps_vma->vm_ops = &gsMMapOps; -+ -+ /* -+ * Take a reference on the PMR so that it can't be freed while mapped -+ * into the user process. -+ */ -+ PMRRefPMR(psPMR); -+ -+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) -+ /* record the stats */ -+ MMapStatsAddOrUpdatePMR(psPMR, uiLength); -+#endif -+ -+ /* Increment mapping count of the PMR so that its layout cannot be -+ * changed (if sparse). -+ */ -+ PMRCpuMapCountIncr(psPMR); -+ -+ return PVRSRV_OK; -+ -+ /* Error exit paths follow */ -+e3: -+ if (pbValid != abValid) -+ { -+ OSFreeMem(pbValid); -+ } -+e2: -+ if (psCpuPAddr != asCpuPAddr) -+ { -+ OSFreeMem(psCpuPAddr); -+ } -+e1: -+ PMRUnlockSysPhysAddresses(psPMR); -+e0: -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/pmr_os.h b/drivers/gpu/drm/img-rogue/pmr_os.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pmr_os.h -@@ -0,0 +1,62 @@ -+/*************************************************************************/ /*! -+@File -+@Title OS PMR functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description OS specific PMR functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(PMR_OS_H) -+#define PMR_OS_H -+ -+#include "pmr_impl.h" -+ -+/*************************************************************************/ /*! -+@Function OSMMapPMRGeneric -+@Description Implements a generic PMR mapping function, which is used -+ to CPU map a PMR where the PMR does not have a mapping -+ function defined by the creating PMR factory. -+@Input psPMR the PMR to be mapped -+@Output pOSMMapData pointer to any private data -+ needed by the generic mapping function -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData); -+ -+#endif /* !defined(PMR_OS_H) */ -diff --git a/drivers/gpu/drm/img-rogue/power.c b/drivers/gpu/drm/img-rogue/power.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/power.c -@@ -0,0 +1,1301 @@ -+/*************************************************************************/ /*! -+@File power.c -+@Title Power management functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Main APIs for power management functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "pdump_km.h" -+#include "allocmem.h" -+#include "osfunc.h" -+ -+#include "lock.h" -+#include "pvrsrv.h" -+#include "pvr_debug.h" -+#include "htbserver.h" -+#include "di_server.h" -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+typedef struct _EXTRA_POWER_STATS_ -+{ -+ IMG_UINT64 ui64PreClockSpeedChangeDuration; -+ IMG_UINT64 ui64BetweenPreEndingAndPostStartingDuration; -+ IMG_UINT64 ui64PostClockSpeedChangeDuration; -+} EXTRA_POWER_STATS; -+ -+/* For the power timing stats we need 16 variables to store all the -+ * combinations of forced/not forced, power-on/power-off, pre-power/post-power -+ * and device/system statistics -+ */ -+#define NUM_POWER_STATS (16) -+#define NUM_EXTRA_POWER_STATS 10 -+ -+typedef struct PVRSRV_POWER_STATS_TAG -+{ -+ EXTRA_POWER_STATS asClockSpeedChanges[NUM_EXTRA_POWER_STATS]; -+ IMG_UINT64 ui64PreClockSpeedChangeMark; -+ IMG_UINT64 ui64FirmwareIdleDuration; -+ IMG_UINT32 aui32PowerTimingStats[NUM_POWER_STATS]; -+ IMG_UINT32 ui32ClockSpeedIndexStart; -+ IMG_UINT32 ui32ClockSpeedIndexEnd; -+ IMG_UINT32 ui32FirmwareStartTimestamp; -+} PVRSRV_POWER_STATS; -+#endif -+ -+struct _PVRSRV_POWER_DEV_TAG_ -+{ -+ PFN_PRE_POWER pfnDevicePrePower; -+ PFN_POST_POWER pfnDevicePostPower; -+ PFN_SYS_PRE_POWER pfnSystemPrePower; -+ PFN_SYS_POST_POWER pfnSystemPostPower; -+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange; -+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange; -+ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest; -+ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest; -+ PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange; -+ IMG_HANDLE hSysData; -+ IMG_HANDLE hDevCookie; -+ PVRSRV_DEV_POWER_STATE eDefaultPowerState; -+ ATOMIC_T eCurrentPowerState; -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVRSRV_POWER_STATS sPowerStats; -+#endif -+}; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+/* -+ * Power statistics related definitions -+ */ -+ -+/* For the mean time, use an exponentially weighted moving average with a -+ * 1/4 weighting for the new measurement. -+ */ -+#define MEAN_TIME(A, B) ( ((3*(A))/4) + ((1 * (B))/4) ) -+ -+#define UPDATE_TIME(time, newtime) \ -+ ((time) > 0 ? MEAN_TIME((time), (newtime)) : (newtime)) -+ -+/* Enum to be used as input to GET_POWER_STAT_INDEX */ -+typedef enum -+{ -+ DEVICE = 0, -+ SYSTEM = 1, -+ POST_POWER = 0, -+ PRE_POWER = 2, -+ POWER_OFF = 0, -+ POWER_ON = 4, -+ NOT_FORCED = 0, -+ FORCED = 8, -+} PVRSRV_POWER_STAT_TYPE; -+ -+/* Macro used to access one of the power timing statistics inside an array */ -+#define GET_POWER_STAT_INDEX(forced,powon,prepow,system) \ -+ ((forced) + (powon) + (prepow) + (system)) -+ -+void PVRSRVSetFirmwareStartTime(PVRSRV_POWER_DEV *psPowerDevice, -+ IMG_UINT32 ui32Time) -+{ -+ PVRSRV_POWER_STATS *psPowerStats = &psPowerDevice->sPowerStats; -+ -+ psPowerStats->ui32FirmwareStartTimestamp = -+ UPDATE_TIME(psPowerStats->ui32FirmwareStartTimestamp, -+ ui32Time); -+} -+ -+void PVRSRVSetFirmwareHandshakeIdleTime(PVRSRV_POWER_DEV *psPowerDevice, -+ IMG_UINT64 ui64Duration) -+{ -+ PVRSRV_POWER_STATS *psPowerStats = &psPowerDevice->sPowerStats; -+ -+ psPowerStats->ui64FirmwareIdleDuration = -+ UPDATE_TIME(psPowerStats->ui64FirmwareIdleDuration, -+ ui64Duration); -+} -+ -+static void _InsertPowerTimeStatistic(PVRSRV_POWER_DEV *psPowerDevice, -+ IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime, -+ IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime, -+ IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) -+{ -+ PVRSRV_POWER_STATS *psPowerStats = &psPowerDevice->sPowerStats; -+ IMG_UINT32 *pui32Stat; -+ IMG_UINT64 ui64DeviceDiff = ui64DevEndTime - ui64DevStartTime; -+ IMG_UINT64 ui64SystemDiff = ui64SysEndTime - ui64SysStartTime; -+ IMG_UINT32 ui32Index; -+ -+ if (bPrePower) -+ { -+ HTBLOGK(HTB_SF_MAIN_PRE_POWER, bPowerOn, ui64DeviceDiff, ui64SystemDiff); -+ } -+ else -+ { -+ HTBLOGK(HTB_SF_MAIN_POST_POWER, bPowerOn, ui64SystemDiff, ui64DeviceDiff); -+ } -+ -+ ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED, -+ bPowerOn ? POWER_ON : POWER_OFF, -+ bPrePower ? PRE_POWER : POST_POWER, -+ DEVICE); -+ pui32Stat = &psPowerStats->aui32PowerTimingStats[ui32Index]; -+ *pui32Stat = UPDATE_TIME(*pui32Stat, ui64DeviceDiff); -+ -+ ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED, -+ bPowerOn ? POWER_ON : POWER_OFF, -+ bPrePower ? PRE_POWER : POST_POWER, -+ SYSTEM); -+ pui32Stat = &psPowerStats->aui32PowerTimingStats[ui32Index]; -+ *pui32Stat = UPDATE_TIME(*pui32Stat, ui64SystemDiff); -+} -+ -+static void _InsertPowerTimeStatisticExtraPre(PVRSRV_POWER_DEV *psPowerDevice, -+ IMG_UINT64 ui64StartTimer, -+ IMG_UINT64 ui64Stoptimer) -+{ -+ PVRSRV_POWER_STATS *psPowerStats = &psPowerDevice->sPowerStats; -+ -+ psPowerStats->asClockSpeedChanges[psPowerStats->ui32ClockSpeedIndexEnd].ui64PreClockSpeedChangeDuration = -+ ui64Stoptimer - ui64StartTimer; -+ -+ psPowerStats->ui64PreClockSpeedChangeMark = OSClockus(); -+} -+ -+static void _InsertPowerTimeStatisticExtraPost(PVRSRV_POWER_DEV *psPowerDevice, -+ IMG_UINT64 ui64StartTimer, -+ IMG_UINT64 ui64StopTimer) -+{ -+ PVRSRV_POWER_STATS *psPowerStats = &psPowerDevice->sPowerStats; -+ IMG_UINT64 ui64Duration = ui64StartTimer - psPowerStats->ui64PreClockSpeedChangeMark; -+ -+ PVR_ASSERT(psPowerStats->ui64PreClockSpeedChangeMark > 0); -+ -+ psPowerStats->asClockSpeedChanges[psPowerStats->ui32ClockSpeedIndexEnd].ui64BetweenPreEndingAndPostStartingDuration = ui64Duration; -+ psPowerStats->asClockSpeedChanges[psPowerStats->ui32ClockSpeedIndexEnd].ui64PostClockSpeedChangeDuration = ui64StopTimer - ui64StartTimer; -+ -+ psPowerStats->ui32ClockSpeedIndexEnd = (psPowerStats->ui32ClockSpeedIndexEnd + 1) % NUM_EXTRA_POWER_STATS; -+ -+ if (psPowerStats->ui32ClockSpeedIndexEnd == psPowerStats->ui32ClockSpeedIndexStart) -+ { -+ psPowerStats->ui32ClockSpeedIndexStart = (psPowerStats->ui32ClockSpeedIndexStart + 1) % NUM_EXTRA_POWER_STATS; -+ } -+ -+ psPowerStats->ui64PreClockSpeedChangeMark = 0; -+} -+ -+static INLINE void _PowerStatsPrintGroup(IMG_UINT32 *pui32Stats, -+ OSDI_IMPL_ENTRY *psEntry, -+ PVRSRV_POWER_STAT_TYPE eForced, -+ PVRSRV_POWER_STAT_TYPE ePowerOn) -+{ -+ IMG_UINT32 ui32Index; -+ -+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, DEVICE); -+ DIPrintf(psEntry, " Pre-Device: %9u\n", pui32Stats[ui32Index]); -+ -+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, SYSTEM); -+ DIPrintf(psEntry, " Pre-System: %9u\n", pui32Stats[ui32Index]); -+ -+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, SYSTEM); -+ DIPrintf(psEntry, " Post-System: %9u\n", pui32Stats[ui32Index]); -+ -+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, DEVICE); -+ DIPrintf(psEntry, " Post-Device: %9u\n", pui32Stats[ui32Index]); -+} -+ -+int PVRSRVPowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); -+ PVRSRV_POWER_DEV *psPowerDevice = psDeviceNode->psPowerDev; -+ PVRSRV_POWER_STATS *psPowerStats; -+ IMG_UINT32 *pui32Stats; -+ IMG_UINT32 ui32Idx; -+ -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ if (psPowerDevice == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Device not initialised when " -+ "reading power timing stats!")); -+ return -EIO; -+ } -+ -+ psPowerStats = &psPowerDevice->sPowerStats; -+ -+ pui32Stats = &psPowerStats->aui32PowerTimingStats[0]; -+ -+ DIPrintf(psEntry, "Forced Power-on Transition (nanoseconds):\n"); -+ _PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_ON); -+ DIPrintf(psEntry, "\n"); -+ -+ DIPrintf(psEntry, "Forced Power-off Transition (nanoseconds):\n"); -+ _PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_OFF); -+ DIPrintf(psEntry, "\n"); -+ -+ DIPrintf(psEntry, "Not Forced Power-on Transition (nanoseconds):\n"); -+ _PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_ON); -+ DIPrintf(psEntry, "\n"); -+ -+ DIPrintf(psEntry, "Not Forced Power-off Transition (nanoseconds):\n"); -+ _PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_OFF); -+ DIPrintf(psEntry, "\n"); -+ -+ -+ DIPrintf(psEntry, "FW bootup time (timer ticks): %u\n", psPowerStats->ui32FirmwareStartTimestamp); -+ DIPrintf(psEntry, "Host Acknowledge Time for FW Idle Signal (timer ticks): %u\n", (IMG_UINT32)(psPowerStats->ui64FirmwareIdleDuration)); -+ DIPrintf(psEntry, "\n"); -+ -+ DIPrintf(psEntry, "Last %d Clock Speed Change Timers (nanoseconds):\n", NUM_EXTRA_POWER_STATS); -+ DIPrintf(psEntry, "Prepare DVFS\tDVFS Change\tPost DVFS\n"); -+ -+ for (ui32Idx = psPowerStats->ui32ClockSpeedIndexStart; -+ ui32Idx != psPowerStats->ui32ClockSpeedIndexEnd; -+ ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS) -+ { -+ DIPrintf(psEntry, "%12llu\t%11llu\t%9llu\n", -+ psPowerStats->asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration, -+ psPowerStats->asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration, -+ psPowerStats->asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration); -+ } -+ -+ return 0; -+} -+ -+#else /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ -+ -+static void _InsertPowerTimeStatistic(PVRSRV_POWER_DEV *psPowerDevice, -+ IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime, -+ IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime, -+ IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPowerDevice); -+ PVR_UNREFERENCED_PARAMETER(ui64SysStartTime); -+ PVR_UNREFERENCED_PARAMETER(ui64SysEndTime); -+ PVR_UNREFERENCED_PARAMETER(ui64DevStartTime); -+ PVR_UNREFERENCED_PARAMETER(ui64DevEndTime); -+ PVR_UNREFERENCED_PARAMETER(bForced); -+ PVR_UNREFERENCED_PARAMETER(bPowerOn); -+ PVR_UNREFERENCED_PARAMETER(bPrePower); -+} -+ -+static void _InsertPowerTimeStatisticExtraPre(PVRSRV_POWER_DEV *psPowerDevice, -+ IMG_UINT64 ui64StartTimer, -+ IMG_UINT64 ui64Stoptimer) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPowerDevice); -+ PVR_UNREFERENCED_PARAMETER(ui64StartTimer); -+ PVR_UNREFERENCED_PARAMETER(ui64Stoptimer); -+} -+ -+static void _InsertPowerTimeStatisticExtraPost(PVRSRV_POWER_DEV *psPowerDevice, -+ IMG_UINT64 ui64StartTimer, -+ IMG_UINT64 ui64StopTimer) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPowerDevice); -+ PVR_UNREFERENCED_PARAMETER(ui64StartTimer); -+ PVR_UNREFERENCED_PARAMETER(ui64StopTimer); -+} -+#endif -+ -+const char *PVRSRVSysPowerStateToString(PVRSRV_SYS_POWER_STATE eState) -+{ -+ switch (eState) { -+#define X(name, _) \ -+ case PVRSRV_SYS_POWER_STATE_##name: \ -+ return #name; -+ _PVRSRV_SYS_POWER_STATES -+#undef X -+ default: -+ return "unknown"; -+ } -+} -+ -+const char *PVRSRVDevPowerStateToString(PVRSRV_DEV_POWER_STATE eState) -+{ -+ switch (eState) { -+ case PVRSRV_DEV_POWER_STATE_DEFAULT: -+ return "DEFAULT"; -+ case PVRSRV_DEV_POWER_STATE_OFF: -+ return "OFF"; -+ case PVRSRV_DEV_POWER_STATE_ON: -+ return "ON"; -+ default: -+ return "unknown"; -+ } -+} -+ -+/*! -+ Typedef for a pointer to a function that will be called for re-acquiring -+ device powerlock after releasing it temporarily for some timeout period -+ in function PVRSRVDeviceIdleRequestKM -+ */ -+typedef PVRSRV_ERROR (*PFN_POWER_LOCK_ACQUIRE) (PPVRSRV_DEVICE_NODE psDevNode); -+ -+static inline IMG_UINT64 PVRSRVProcessStatsGetTimeNs(void) -+{ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ return OSClockns64(); -+#else -+ return 0; -+#endif -+} -+ -+static inline IMG_UINT64 PVRSRVProcessStatsGetTimeUs(void) -+{ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ return OSClockus(); -+#else -+ return 0; -+#endif -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function _IsSystemStatePowered -+ -+ @Description Tests whether a given system state represents powered-up. -+ -+ @Input eSystemPowerState : a system power state -+ -+ @Return IMG_BOOL -+ -+******************************************************************************/ -+static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState) -+{ -+ return (eSystemPowerState == PVRSRV_SYS_POWER_STATE_ON); -+} -+ -+/* We don't expect PID=0 to acquire device power-lock */ -+#define PWR_LOCK_OWNER_PID_CLR_VAL 0 -+ -+PVRSRV_ERROR PVRSRVPowerLockInit(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = OSLockCreate(&psDeviceNode->hPowerLock); -+ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); -+ -+ psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL; -+ return PVRSRV_OK; -+} -+ -+void PVRSRVPowerLockDeInit(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL; -+ OSLockDestroy(psDeviceNode->hPowerLock); -+} -+ -+IMG_BOOL PVRSRVPwrLockIsLockedByMe(PCPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ return OSLockIsLocked(psDeviceNode->hPowerLock) && -+ OSGetCurrentClientProcessIDKM() == psDeviceNode->uiPwrLockOwnerPID; -+} -+ -+PVRSRV_ERROR PVRSRVPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ OSLockAcquire(psDeviceNode->hPowerLock); -+ -+ /* Only allow to take powerlock when the system power is on */ -+ if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState)) -+ { -+ psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); -+ return PVRSRV_OK; -+ } -+ -+ OSLockRelease(psDeviceNode->hPowerLock); -+ -+ return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF; -+} -+ -+PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ if (!(OSTryLockAcquire(psDeviceNode->hPowerLock))) -+ { -+ return PVRSRV_ERROR_RETRY; -+ } -+ -+ /* Only allow to take powerlock when the system power is on */ -+ if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState)) -+ { -+ psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); -+ -+ /* System is powered ON, return OK */ -+ return PVRSRV_OK; -+ } -+ else -+ { -+ /* System is powered OFF, release the lock and return error */ -+ OSLockRelease(psDeviceNode->hPowerLock); -+ return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF; -+ } -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function _PVRSRVForcedPowerLock -+ -+ @Description Obtain the mutex for power transitions regardless of system -+ power state -+ -+ @Return Always returns PVRSRV_OK. Function prototype required same as -+ PFN_POWER_LOCK_ACQUIRE -+ -+******************************************************************************/ -+static PVRSRV_ERROR _PVRSRVForcedPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ OSLockAcquire(psDeviceNode->hPowerLock); -+ psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); -+ -+ return PVRSRV_OK; -+} -+ -+void PVRSRVPowerUnlock(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ PVR_ASSERT(PVRSRVPwrLockIsLockedByMe(psDeviceNode)); -+ -+ /* Reset uiPwrLockOwnerPID before releasing lock */ -+ psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL; -+ OSLockRelease(psDeviceNode->hPowerLock); -+} -+ -+IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice) -+{ -+ return (psPowerDevice->eDefaultPowerState == PVRSRV_DEV_POWER_STATE_OFF); -+} -+ -+PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, -+ PVRSRV_DEV_POWER_STATE eNewPowerState) -+{ -+ PVRSRV_POWER_DEV *psPowerDevice; -+ -+ psPowerDevice = psDeviceNode->psPowerDev; -+ if (psPowerDevice == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_DEVICE; -+ } -+ -+ psPowerDevice->eDefaultPowerState = eNewPowerState; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVSetDeviceCurrentPowerState(PVRSRV_POWER_DEV *psPowerDevice, -+ PVRSRV_DEV_POWER_STATE eNewPowerState) -+{ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_DEV_POWER_STATE eOldPowerState; -+#endif -+ -+ if (psPowerDevice == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_DEVICE; -+ } -+ -+#if !defined(SUPPORT_PMR_DEFERRED_FREE) -+ OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eNewPowerState); -+#else -+ eOldPowerState = OSAtomicExchange(&psPowerDevice->eCurrentPowerState, -+ eNewPowerState); -+ -+ psDeviceNode = psPowerDevice->hDevCookie; -+ PVR_ASSERT(psDeviceNode); -+ -+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF && -+ eNewPowerState != eOldPowerState) -+ { -+ psDeviceNode->uiPowerOffCounter = psDeviceNode->uiPowerOffCounterNext; -+ -+ /* It's not really important to know if any zombies were queued. */ -+ (void) PMRQueueZombiesForCleanup(psDeviceNode); -+ -+ psDeviceNode->uiPowerOffCounterNext++; -+ } -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ @Input pfnPowerLockAcquire : Function to re-acquire power-lock in-case -+ it was necessary to release it. -+*/ -+static PVRSRV_ERROR _PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, -+ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff, -+ IMG_BOOL bDeviceOffPermitted, -+ PFN_POWER_LOCK_ACQUIRE pfnPowerLockAcquire) -+{ -+ PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev; -+ PVRSRV_ERROR eError; -+ -+ /* if pfnIsDefaultStateOff not provided or pfnIsDefaultStateOff(psPowerDev) -+ * is true (which means that the default state is OFF) then force idle. */ -+ if ((psPowerDev && psPowerDev->pfnForcedIdleRequest) && -+ (pfnIsDefaultStateOff == NULL || pfnIsDefaultStateOff(psPowerDev))) -+ { -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = psPowerDev->pfnForcedIdleRequest(psPowerDev->hDevCookie, -+ bDeviceOffPermitted); -+ if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED) -+ { -+ PVRSRV_ERROR eErrPwrLockAcq; -+ /* FW denied idle request */ -+ PVRSRVPowerUnlock(psDeviceNode); -+ -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ -+ eErrPwrLockAcq = pfnPowerLockAcquire(psDeviceNode); -+ if (eErrPwrLockAcq != PVRSRV_OK) -+ { -+ /* We only understand PVRSRV_ERROR_RETRY, so assert on others. -+ * Moreover, we've ended-up releasing the power-lock which was -+ * originally "held" by caller before calling this function - -+ * since this needs vigilant handling at call-site, we pass -+ * back an explicit error, for caller(s) to "avoid" calling -+ * PVRSRVPowerUnlock */ -+ PVR_ASSERT(eErrPwrLockAcq == PVRSRV_ERROR_RETRY); -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to re-acquire power-lock " -+ "(%s) after releasing it for a time-out", -+ __func__, PVRSRVGetErrorString(eErrPwrLockAcq))); -+ return PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED; -+ } -+ } -+ else -+ { -+ /* idle request successful or some other error occurred, return */ -+ break; -+ } -+ } END_LOOP_UNTIL_TIMEOUT(); -+ } -+ else -+ { -+ return PVRSRV_OK; -+ } -+ -+ return eError; -+} -+ -+/* -+ * Wrapper function helps limiting calling complexity of supplying additional -+ * PFN_POWER_LOCK_ACQUIRE argument (required by _PVRSRVDeviceIdleRequestKM) -+ */ -+inline PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, -+ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff, -+ IMG_BOOL bDeviceOffPermitted) -+{ -+ return _PVRSRVDeviceIdleRequestKM(psDeviceNode, -+ pfnIsDefaultStateOff, -+ bDeviceOffPermitted, -+ PVRSRVPowerLock); -+} -+ -+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev; -+ -+ if (psPowerDev && psPowerDev->pfnForcedIdleCancelRequest) -+ { -+ return psPowerDev->pfnForcedIdleCancelRequest(psPowerDev->hDevCookie); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVDevicePrePowerStateKM -+ -+ @Description -+ -+ Perform device-specific processing required before a power transition -+ -+ @Input psPowerDevice : Power device -+ @Input eNewPowerState : New power state -+ @Input ePwrFlags : Power state change flags -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+static -+PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags) -+{ -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState; -+ IMG_UINT64 ui64SysTimer1 = 0; -+ IMG_UINT64 ui64SysTimer2 = 0; -+ IMG_UINT64 ui64DevTimer1 = 0; -+ IMG_UINT64 ui64DevTimer2 = 0; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); -+ -+ eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); -+ -+ if (psPowerDevice->pfnDevicePrePower != NULL) -+ { -+ ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs(); -+ -+ /* Call the device's power callback. */ -+ eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie, -+ eNewPowerState, -+ eCurrentPowerState, -+ ePwrFlags); -+ -+ ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs(); -+ -+ PVR_RETURN_IF_ERROR(eError); -+ } -+ -+ /* Do any required system-layer processing. */ -+ if (psPowerDevice->pfnSystemPrePower != NULL) -+ { -+ ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs(); -+ -+ eError = psPowerDevice->pfnSystemPrePower(psPowerDevice->hSysData, -+ (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) ? -+ PVRSRV_SYS_POWER_STATE_ON : -+ PVRSRV_SYS_POWER_STATE_OFF, -+ (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) ? -+ PVRSRV_SYS_POWER_STATE_ON : -+ PVRSRV_SYS_POWER_STATE_OFF, -+ ePwrFlags); -+ -+ ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs(); -+ -+ PVR_GOTO_IF_ERROR(eError, ErrRestorePowerState); -+ } -+ -+ _InsertPowerTimeStatistic(psPowerDevice, ui64SysTimer1, ui64SysTimer2, -+ ui64DevTimer1, ui64DevTimer2, -+ BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED), -+ eNewPowerState == PVRSRV_DEV_POWER_STATE_ON, -+ IMG_TRUE); -+ -+ return PVRSRV_OK; -+ -+ErrRestorePowerState: -+ /* In a situation where pfnDevicePrePower() succeeded but pfnSystemPrePower() -+ * failed we need to restore the device's power state from before the current -+ * request. Otherwise it will result in an inconsistency between the device's -+ * actual state and what the driver thinks the state is. */ -+ { -+ PVRSRV_ERROR eError2 = PVRSRV_OK; -+ -+ if (psPowerDevice->pfnDevicePrePower != NULL) -+ { -+ /* Call the device's power callback. */ -+ eError2 = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie, -+ eCurrentPowerState, -+ eNewPowerState, -+ ePwrFlags); -+ PVR_LOG_IF_ERROR(eError2, "pfnDevicePrePower"); -+ } -+ if (eError2 == PVRSRV_OK && psPowerDevice->pfnDevicePostPower != NULL) -+ { -+ /* Call the device's power callback. */ -+ eError2 = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie, -+ eCurrentPowerState, -+ eNewPowerState, -+ ePwrFlags); -+ PVR_LOG_IF_ERROR(eError2, "pfnDevicePostPower"); -+ } -+ } -+ -+ return eError; -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVDevicePostPowerStateKM -+ -+ @Description -+ -+ Perform device-specific processing required after a power transition -+ -+ @Input psPowerDevice : Power device -+ @Input eNewPowerState : New power state -+ @Input ePwrFlags : Power state change flags -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+static -+PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags) -+{ -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState; -+ IMG_UINT64 ui64SysTimer1 = 0; -+ IMG_UINT64 ui64SysTimer2 = 0; -+ IMG_UINT64 ui64DevTimer1 = 0; -+ IMG_UINT64 ui64DevTimer2 = 0; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); -+ -+ eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); -+ -+ /* Do any required system-layer processing. */ -+ if (psPowerDevice->pfnSystemPostPower != NULL) -+ { -+ ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs(); -+ -+ eError = psPowerDevice->pfnSystemPostPower(psPowerDevice->hSysData, -+ (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) ? -+ PVRSRV_SYS_POWER_STATE_ON : -+ PVRSRV_SYS_POWER_STATE_OFF, -+ (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) ? -+ PVRSRV_SYS_POWER_STATE_ON : -+ PVRSRV_SYS_POWER_STATE_OFF, -+ ePwrFlags); -+ -+ ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs(); -+ -+ PVR_RETURN_IF_ERROR(eError); -+ } -+ -+ if (psPowerDevice->pfnDevicePostPower != NULL) -+ { -+ ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs(); -+ -+ /* Call the device's power callback. */ -+ eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie, -+ eNewPowerState, -+ eCurrentPowerState, -+ ePwrFlags); -+ -+ ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs(); -+ -+ PVR_RETURN_IF_ERROR(eError); -+ } -+ -+ _InsertPowerTimeStatistic(psPowerDevice, ui64SysTimer1, ui64SysTimer2, -+ ui64DevTimer1, ui64DevTimer2, -+ BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED), -+ eNewPowerState == PVRSRV_DEV_POWER_STATE_ON, -+ IMG_FALSE); -+ -+ PVRSRVSetDeviceCurrentPowerState(psPowerDevice, eNewPowerState); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_POWER_DEV *psPowerDevice; -+ -+ psPowerDevice = psDeviceNode->psPowerDev; -+ if (!psPowerDevice) -+ { -+ return PVRSRV_OK; -+ } -+ -+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) -+ { -+ eNewPowerState = psPowerDevice->eDefaultPowerState; -+ } -+ -+ /* Call power function if the state change or if this is an OS request. */ -+ if (OSAtomicRead(&psPowerDevice->eCurrentPowerState) != eNewPowerState || -+ BITMASK_ANY(ePwrFlags, PVRSRV_POWER_FLAGS_OSPM_SUSPEND_REQ | PVRSRV_POWER_FLAGS_OSPM_RESUME_REQ)) -+ { -+ eError = PVRSRVDevicePrePowerStateKM(psPowerDevice, -+ eNewPowerState, -+ ePwrFlags); -+ PVR_GOTO_IF_ERROR(eError, ErrorExit); -+ -+ eError = PVRSRVDevicePostPowerStateKM(psPowerDevice, -+ eNewPowerState, -+ ePwrFlags); -+ PVR_GOTO_IF_ERROR(eError, ErrorExit); -+ -+ /* Signal Device Watchdog Thread about power mode change. */ -+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) -+ { -+ psPVRSRVData->ui32DevicesWatchdogPwrTrans++; -+#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+ if (psPVRSRVData->ui32DevicesWatchdogTimeout == DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT) -+#endif -+ { -+ eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); -+ } -+ } -+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+ else if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) -+ { -+ /* signal watchdog thread and give it a chance to switch to -+ * longer / infinite wait time */ -+ eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); -+ } -+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ -+ } -+ -+ return PVRSRV_OK; -+ -+ErrorExit: -+ -+ if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Transition to %d was denied, Flags=0x%08x", -+ __func__, eNewPowerState, ePwrFlags)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Transition to %d FAILED (%s)", -+ __func__, eNewPowerState, PVRSRVGetErrorString(eError))); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, -+ PVRSRV_SYS_POWER_STATE eNewSysPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT uiStage = 0; -+ -+ PVRSRV_DEV_POWER_STATE eNewDevicePowerState = _IsSystemStatePowered(eNewSysPowerState) -+ ? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF; -+ -+ /* If setting devices to default state, force idle all devices whose default state is off */ -+ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff = -+ (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? PVRSRVDeviceIsDefaultStateOFF : NULL; -+ -+ /* Require a proper power state */ -+ if (eNewSysPowerState == PVRSRV_SYS_POWER_STATE_Unspecified) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Prevent simultaneous SetPowerStateKM calls */ -+ _PVRSRVForcedPowerLock(psDeviceNode); -+ -+ /* No power transition requested, so do nothing */ -+ if (eNewSysPowerState == psDeviceNode->eCurrentSysPowerState) -+ { -+ PVRSRVPowerUnlock(psDeviceNode); -+ return PVRSRV_OK; -+ } -+ -+ /* If the device is already off don't send the idle request. */ -+ if (psDeviceNode->eCurrentSysPowerState != PVRSRV_SYS_POWER_STATE_OFF) -+ { -+ eError = _PVRSRVDeviceIdleRequestKM(psDeviceNode, pfnIsDefaultStateOff, -+ IMG_TRUE, _PVRSRVForcedPowerLock); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "_PVRSRVDeviceIdleRequestKM"); -+ uiStage = 1; -+ goto ErrorExit; -+ } -+ } -+ -+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, eNewDevicePowerState, -+ ePwrFlags | PVRSRV_POWER_FLAGS_FORCED); -+ if (eError != PVRSRV_OK) -+ { -+ uiStage = 2; -+ goto ErrorExit; -+ } -+ -+ psDeviceNode->eCurrentSysPowerState = eNewSysPowerState; -+ -+ PVRSRVPowerUnlock(psDeviceNode); -+ -+ return PVRSRV_OK; -+ -+ErrorExit: -+ PVRSRVPowerUnlock(psDeviceNode); -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Transition from %s to %s FAILED (%s) at stage " -+ "%u. Dumping debug info.", __func__, -+ PVRSRVSysPowerStateToString(psDeviceNode->eCurrentSysPowerState), -+ PVRSRVSysPowerStateToString(eNewSysPowerState), -+ PVRSRVGetErrorString(eError), uiStage)); -+ -+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVSetSystemPowerState(PVRSRV_DEVICE_CONFIG *psDevConfig, -+ PVRSRV_SYS_POWER_STATE eNewSysPowerState) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_NODE *psDevNode = psDevConfig->psDevNode; -+ PVRSRV_SYS_POWER_STATE eCurrentSysPowerState; -+ -+ if (psDevNode != NULL) -+ { -+ eCurrentSysPowerState = psDevNode->eCurrentSysPowerState; -+ } -+ else -+ { -+ /* assume power is off if no device node */ -+ eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_OFF; -+ } -+ -+ /* no power transition requested, so do nothing */ -+ if (eNewSysPowerState == eCurrentSysPowerState) -+ { -+ return PVRSRV_OK; -+ } -+ -+ if (psDevConfig->pfnPrePowerState != NULL) -+ { -+ eError = psDevConfig->pfnPrePowerState(psDevConfig->hSysData, -+ eNewSysPowerState, -+ eCurrentSysPowerState, -+ PVRSRV_POWER_FLAGS_FORCED); -+ -+ PVR_RETURN_IF_ERROR(eError); -+ } -+ -+ if (psDevConfig->pfnPostPowerState != NULL) -+ { -+ eError = psDevConfig->pfnPostPowerState(psDevConfig->hSysData, -+ eNewSysPowerState, -+ eCurrentSysPowerState, -+ PVRSRV_POWER_FLAGS_FORCED); -+ -+ PVR_RETURN_IF_ERROR(eError); -+ } -+ -+ if (psDevNode != NULL) -+ { -+ psDevNode->eCurrentSysPowerState = eNewSysPowerState; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+void PVRSRVSetPowerCallbacks(PPVRSRV_DEVICE_NODE psDeviceNode, -+ PVRSRV_POWER_DEV *psPowerDevice, -+ PFN_PRE_POWER pfnDevicePrePower, -+ PFN_POST_POWER pfnDevicePostPower, -+ PFN_SYS_PRE_POWER pfnSystemPrePower, -+ PFN_SYS_POST_POWER pfnSystemPostPower, -+ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, -+ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest) -+{ -+ if (psPowerDevice != NULL) -+ { -+ if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) -+ { -+ psPowerDevice->pfnSystemPrePower = NULL; -+ psPowerDevice->pfnSystemPostPower = NULL; -+ } -+ else -+ { -+ psPowerDevice->pfnSystemPrePower = pfnSystemPrePower; -+ psPowerDevice->pfnSystemPostPower = pfnSystemPostPower; -+ } -+ -+ psPowerDevice->pfnDevicePrePower = pfnDevicePrePower; -+ psPowerDevice->pfnDevicePostPower = pfnDevicePostPower; -+ psPowerDevice->pfnForcedIdleRequest = pfnForcedIdleRequest; -+ psPowerDevice->pfnForcedIdleCancelRequest = pfnForcedIdleCancelRequest; -+ } -+} -+ -+PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode, -+ PFN_PRE_POWER pfnDevicePrePower, -+ PFN_POST_POWER pfnDevicePostPower, -+ PFN_SYS_PRE_POWER pfnSystemPrePower, -+ PFN_SYS_POST_POWER pfnSystemPostPower, -+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange, -+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, -+ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, -+ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest, -+ PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange, -+ IMG_HANDLE hDevCookie, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState, -+ PVRSRV_DEV_POWER_STATE eDefaultPowerState) -+{ -+ PVRSRV_POWER_DEV *psPowerDevice; -+ -+ PVR_ASSERT(!psDeviceNode->psPowerDev); -+ -+ PVR_ASSERT(eCurrentPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); -+ PVR_ASSERT(eDefaultPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); -+ -+ psPowerDevice = OSAllocMem(sizeof(PVRSRV_POWER_DEV)); -+ PVR_LOG_RETURN_IF_NOMEM(psPowerDevice, "psPowerDevice"); -+ -+ /* setup device for power manager */ -+ PVRSRVSetPowerCallbacks(psDeviceNode, -+ psPowerDevice, -+ pfnDevicePrePower, -+ pfnDevicePostPower, -+ pfnSystemPrePower, -+ pfnSystemPostPower, -+ pfnForcedIdleRequest, -+ pfnForcedIdleCancelRequest); -+ -+ psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange; -+ psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange; -+ psPowerDevice->pfnGPUUnitsPowerChange = pfnGPUUnitsPowerChange; -+ psPowerDevice->hSysData = psDeviceNode->psDevConfig->hSysData; -+ psPowerDevice->hDevCookie = hDevCookie; -+ PVRSRVSetDeviceCurrentPowerState(psPowerDevice, eCurrentPowerState); -+ psPowerDevice->eDefaultPowerState = eDefaultPowerState; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ OSCachedMemSet(&psPowerDevice->sPowerStats, 0, sizeof(psPowerDevice->sPowerStats)); -+#endif -+ -+ psDeviceNode->psPowerDev = psPowerDevice; -+ -+ return PVRSRV_OK; -+} -+ -+void PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ if (psDeviceNode->psPowerDev) -+ { -+ OSFreeMem(psDeviceNode->psPowerDev); -+ psDeviceNode->psPowerDev = NULL; -+ } -+} -+ -+PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, -+ PPVRSRV_DEV_POWER_STATE pePowerState) -+{ -+ PVRSRV_POWER_DEV *psPowerDevice; -+ -+ psPowerDevice = psDeviceNode->psPowerDev; -+ if (psPowerDevice == NULL) -+ { -+ return PVRSRV_ERROR_UNKNOWN_POWER_STATE; -+ } -+ -+ *pePowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode) -+{ -+ PVRSRV_DEV_POWER_STATE ePowerState; -+ -+ if (PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState) != PVRSRV_OK) -+ { -+ return IMG_FALSE; -+ } -+ -+ return (ePowerState == PVRSRV_DEV_POWER_STATE_ON); -+} -+ -+PVRSRV_ERROR -+PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, -+ IMG_BOOL bIdleDevice, -+ void* pvInfo) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_POWER_DEV *psPowerDevice = psDeviceNode->psPowerDev; -+ IMG_UINT64 ui64StartTimer, ui64StopTimer; -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState; -+ -+ PVR_UNREFERENCED_PARAMETER(pvInfo); -+ -+ if (psPowerDevice == NULL) -+ { -+ return PVRSRV_OK; -+ } -+ -+ ui64StartTimer = PVRSRVProcessStatsGetTimeUs(); -+ -+ /* This mutex is released in PVRSRVDevicePostClockSpeedChange. */ -+ eError = PVRSRVPowerLock(psDeviceNode); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); -+ -+ eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); -+ -+ if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice) -+ { -+ /* We can change the clock speed if the device is either IDLE or OFF */ -+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE); -+ -+ if (eError != PVRSRV_OK) -+ { -+ /* FW Can signal denied when busy with SPM or other work it can not idle */ -+ if (eError != PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Error (%s) from %s()", __func__, -+ PVRSRVGETERRORSTRING(eError), "PVRSRVDeviceIdleRequestKM")); -+ } -+ if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) -+ { -+ PVRSRVPowerUnlock(psDeviceNode); -+ } -+ return eError; -+ } -+ } -+ -+ eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie, -+ eCurrentPowerState); -+ -+ ui64StopTimer = PVRSRVProcessStatsGetTimeUs(); -+ -+ _InsertPowerTimeStatisticExtraPre(psPowerDevice, ui64StartTimer, ui64StopTimer); -+ -+ return eError; -+} -+ -+void -+PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, -+ IMG_BOOL bIdleDevice, -+ void* pvInfo) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_POWER_DEV *psPowerDevice = psDeviceNode->psPowerDev; -+ IMG_UINT64 ui64StartTimer, ui64StopTimer; -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState; -+ -+ PVR_UNREFERENCED_PARAMETER(pvInfo); -+ -+ if (psPowerDevice == NULL) -+ { -+ return; -+ } -+ -+ ui64StartTimer = PVRSRVProcessStatsGetTimeUs(); -+ -+ eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); -+ -+ eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie, -+ eCurrentPowerState); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)", -+ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); -+ } -+ -+ if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice) -+ { -+ eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceIdleCancelRequestKM"); -+ } -+ -+ /* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */ -+ PVRSRVPowerUnlock(psDeviceNode); -+ -+ OSAtomicIncrement(&psDeviceNode->iNumClockSpeedChanges); -+ -+ ui64StopTimer = PVRSRVProcessStatsGetTimeUs(); -+ -+ _InsertPowerTimeStatisticExtraPost(psPowerDevice, ui64StartTimer, ui64StopTimer); -+} -+ -+PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode, -+ IMG_UINT32 ui32NewValue) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_POWER_DEV *psPowerDevice; -+ -+ psPowerDevice = psDeviceNode->psPowerDev; -+ if (psPowerDevice) -+ { -+ PVRSRV_DEV_POWER_STATE eDevicePowerState; -+ -+ eError = PVRSRVPowerLock(psDeviceNode); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); -+ -+ eDevicePowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); -+ if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON) -+ { -+ /* Device must be idle to change GPU unit(s) power state */ -+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_FALSE); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM"); -+ if (eError == PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) -+ { -+ goto ErrorExit; -+ } -+ goto ErrorUnlockAndExit; -+ } -+ } -+ -+ if (psPowerDevice->pfnGPUUnitsPowerChange != NULL) -+ { -+ PVRSRV_ERROR eError2 = psPowerDevice->pfnGPUUnitsPowerChange(psPowerDevice->hDevCookie, ui32NewValue); -+ -+ if (eError2 != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)", -+ __func__, psDeviceNode, -+ PVRSRVGetErrorString(eError2))); -+ } -+ } -+ -+ if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON) -+ { -+ eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDeviceIdleCancelRequestKM", ErrorUnlockAndExit); -+ } -+ -+ PVRSRVPowerUnlock(psDeviceNode); -+ } -+ -+ return eError; -+ -+ErrorUnlockAndExit: -+ PVRSRVPowerUnlock(psDeviceNode); -+ErrorExit: -+ return eError; -+} -+ -+/****************************************************************************** -+ End of file (power.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/power.h b/drivers/gpu/drm/img-rogue/power.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/power.h -@@ -0,0 +1,457 @@ -+/*************************************************************************/ /*! -+@File -+@Title Power Management Functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Main APIs for power management functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef POWER_H -+#define POWER_H -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_device.h" -+#include "pvrsrv_error.h" -+#include "servicesext.h" -+#include "opaque_types.h" -+#include "di_common.h" -+ -+/*! -+ ***************************************************************************** -+ * Power management -+ *****************************************************************************/ -+ -+typedef struct _PVRSRV_POWER_DEV_TAG_ PVRSRV_POWER_DEV; -+ -+typedef IMG_BOOL (*PFN_SYS_DEV_IS_DEFAULT_STATE_OFF)(PVRSRV_POWER_DEV *psPowerDevice); -+ -+/* Power transition handler prototypes */ -+ -+/*! -+ Typedef for a pointer to a Function that will be called before a transition -+ from one power state to another. See also PFN_POST_POWER. -+ */ -+typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags); -+/*! -+ Typedef for a pointer to a Function that will be called after a transition -+ from one power state to another. See also PFN_PRE_POWER. -+ */ -+typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags); -+ -+const char *PVRSRVSysPowerStateToString(PVRSRV_SYS_POWER_STATE eState); -+const char *PVRSRVDevPowerStateToString(PVRSRV_DEV_POWER_STATE eState); -+ -+PVRSRV_ERROR PVRSRVPowerLockInit(PPVRSRV_DEVICE_NODE psDeviceNode); -+void PVRSRVPowerLockDeInit(PPVRSRV_DEVICE_NODE psDeviceNode); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVPowerLock -+ -+ @Description Obtain the mutex for power transitions. Only allowed when -+ system power is on. -+ -+ @Return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or PVRSRV_OK -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVPowerUnlock -+ -+ @Description Release the mutex for power transitions -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+void PVRSRVPowerUnlock(PPVRSRV_DEVICE_NODE psDeviceNode); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVPowerTryLock -+ -+ @Description Try to obtain the mutex for power transitions. Only allowed when -+ system power is on. -+ -+ @Return PVRSRV_ERROR_RETRY or PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or -+ PVRSRV_OK -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVPwrLockIsLockedByMe -+ -+ @Description Determine if the calling context is holding the device power-lock -+ -+ @Return IMG_BOOL -+ -+******************************************************************************/ -+IMG_BOOL PVRSRVPwrLockIsLockedByMe(PCPVRSRV_DEVICE_NODE psDeviceNode); -+IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVSetDevicePowerStateKM -+ -+ @Description Set the Device into a new state -+ -+ @Input psDeviceNode : Device node -+ @Input eNewPowerState : New power state -+ @Input ePwrFlags : Power state change flags -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVSetDeviceSystemPowerState -+@Description Set the device into a new power state based on the systems power -+ state -+@Input psDeviceNode Device node -+@Input eNewSysPowerState New system power state -+@Input ePwrFlags Power state change flags -+@Return PVRSRV_ERROR PVRSRV_OK on success or an error otherwise -+*/ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, -+ PVRSRV_SYS_POWER_STATE eNewSysPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVSetDeviceDefaultPowerState -+ -+ @Description Set the default device power state to eNewPowerState -+ -+ @Input psDeviceNode : Device node -+ @Input eNewPowerState : New power state -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, -+ PVRSRV_DEV_POWER_STATE eNewPowerState); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVSetDeviceCurrentPowerState -+ -+ @Description Set the current device power state to eNewPowerState -+ -+ @Input psPowerDevice : Power device -+ @Input eNewPowerState : New power state -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVSetDeviceCurrentPowerState(PVRSRV_POWER_DEV *psPowerDevice, -+ PVRSRV_DEV_POWER_STATE eNewPowerState); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVSetSystemPowerState -+ -+ @Description Set the system power state to eNewPowerState -+ -+ @Input psDeviceConfig : Device config -+ @Input eNewPowerState : New power state -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVSetSystemPowerState(PVRSRV_DEVICE_CONFIG * psDeviceConfig, -+ PVRSRV_SYS_POWER_STATE eNewSysPowerState); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVSetPowerCallbacks -+ -+ @Description Initialise the Power Device's function pointers -+ to the appropriate callbacks depending on driver mode and -+ system setup. -+ -+ @Input psDeviceNode : Device node -+ @Input psPowerDevice : Power device -+ @Input pfnDevicePrePower : regular device pre power callback -+ @Input pfnDevicePostPower : regular device post power callback -+ @Input pfnSystemPrePower : regular system pre power callback -+ @Input pfnDevicePostPower : regular system post power callback -+ @Input pfnSystemPrePower : regular device pre power callback -+ @Input pfnSystemPostPower : regular device pre power callback -+ @Input pfnForcedIdleRequest : forced idle request callback -+ @Input pfnForcedIdleCancelRequest : forced idle request cancel callback -+ -+******************************************************************************/ -+void PVRSRVSetPowerCallbacks(PPVRSRV_DEVICE_NODE psDeviceNode, -+ PVRSRV_POWER_DEV *psPowerDevice, -+ PFN_PRE_POWER pfnDevicePrePower, -+ PFN_POST_POWER pfnDevicePostPower, -+ PFN_SYS_PRE_POWER pfnSystemPrePower, -+ PFN_SYS_POST_POWER pfnSystemPostPower, -+ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, -+ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest); -+ -+/* Type PFN_DC_REGISTER_POWER */ -+PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode, -+ PFN_PRE_POWER pfnDevicePrePower, -+ PFN_POST_POWER pfnDevicePostPower, -+ PFN_SYS_PRE_POWER pfnSystemPrePower, -+ PFN_SYS_POST_POWER pfnSystemPostPower, -+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange, -+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, -+ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, -+ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest, -+ PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange, -+ IMG_HANDLE hDevCookie, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState, -+ PVRSRV_DEV_POWER_STATE eDefaultPowerState); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVRemovePowerDevice -+ -+ @Description -+ -+ Removes device from power management register. Device is located by Device Index -+ -+ @Input psDeviceNode : Device node -+ -+******************************************************************************/ -+void PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVGetDevicePowerState -+ -+ @Description -+ -+ Return the device power state -+ -+ @Input psDeviceNode : Device node -+ @Output pePowerState : Current power state -+ -+ @Return PVRSRV_ERROR_UNKNOWN_POWER_STATE if device could not be found. -+ PVRSRV_OK otherwise. -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, -+ PPVRSRV_DEV_POWER_STATE pePowerState); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVIsDevicePowered -+ -+ @Description -+ -+ Whether the device is powered, for the purposes of lockup detection. -+ -+ @Input psDeviceNode : Device node -+ -+ @Return IMG_BOOL -+ -+******************************************************************************/ -+IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode); -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDevicePreClockSpeedChange -+ -+@Description This function is called before a voltage/frequency change is -+ made to the GPU HW. It informs the host driver of the intention -+ to make a DVFS change. If allows the host driver to idle -+ the GPU and begin a hold off period from starting new work -+ on the GPU. -+ When this call succeeds the caller *must* call -+ PVRSRVDevicePostClockSpeedChange() to end the hold off period -+ to allow new work to be submitted to the GPU. -+ -+ Called from system layer or OS layer implementation that -+ is responsible for triggering a GPU DVFS transition. -+ -+@Input psDeviceNode pointer to the device affected by DVFS transition. -+@Input bIdleDevice when True, the driver will wait for the GPU to -+ reach an idle state before the call returns. -+@Input pvInfo unused -+ -+@Return PVRSRV_OK on success, power lock acquired and held on exit, -+ GPU idle. -+ PVRSRV_ERROR on failure, power lock not held on exit, do not -+ call PVRSRVDevicePostClockSpeedChange(). -+*/ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, -+ IMG_BOOL bIdleDevice, -+ void *pvInfo); -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDevicePostClockSpeedChange -+ -+@Description This function is called after a voltage/frequency change has -+ been made to the GPU HW following a call to -+ PVRSRVDevicePreClockSpeedChange(). -+ Before calling this function the caller must ensure the system -+ data RGX_DATA->RGX_TIMING_INFORMATION->ui32CoreClockSpeed has -+ been updated with the new frequency set, measured in Hz. -+ The function informs the host driver that the DVFS change has -+ completed. The driver will end the work hold off period, cancel -+ the device idle period and update its time data records. -+ When this call returns work submissions are unblocked and -+ are submitted to the GPU as normal. -+ This function *must* not be called if the preceding call to -+ PVRSRVDevicePreClockSpeedChange() failed. -+ -+ Called from system layer or OS layer implementation that -+ is responsible for triggering a GPU DVFS transition. -+ -+@Input psDeviceNode pointer to the device affected by DVFS transition. -+@Input bIdleDevice when True, the driver will cancel the GPU -+ device idle state before the call returns. Value -+ given must match that used in the call to -+ PVRSRVDevicePreClockSpeedChange() otherwise -+ undefined behaviour will result. -+@Input pvInfo unused -+ -+@Return void power lock released, no longer held on exit. -+*/ /**************************************************************************/ -+void PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, -+ IMG_BOOL bIdleDevice, -+ void *pvInfo); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVDeviceIdleRequestKM -+ -+ @Description Perform device-specific processing required to force the device -+ idle. The device power-lock might be temporarily released (and -+ again re-acquired) during the course of this call, hence to -+ maintain lock-ordering power-lock should be the last acquired -+ lock before calling this function -+ -+ @Input psDeviceNode : Device node -+ -+ @Input pfnIsDefaultStateOff : When specified, the idle request is only -+ processed if this function passes. -+ -+ @Input bDeviceOffPermitted : IMG_TRUE if the transition should not fail -+ if device off -+ IMG_FALSE if the transition should fail if -+ device off -+ -+ @Return PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED -+ When re-acquisition of power-lock failed. -+ This error NEEDS EXPLICIT HANDLING at call -+ site as it signifies the caller needs to -+ AVOID calling PVRSRVPowerUnlock, since -+ power-lock is no longer "possessed" by -+ this context. -+ -+ PVRSRV_OK When idle request succeeded. -+ PVRSRV_ERROR Other system errors. -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, -+ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff, -+ IMG_BOOL bDeviceOffPermitted); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVDeviceIdleCancelRequestKM -+ -+ @Description Perform device-specific processing required to cancel the forced idle state -+ on the device, returning to normal operation. -+ -+ @Input psDeviceNode : Device node -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode); -+ -+/*! -+****************************************************************************** -+ -+@Function PVRSRVDeviceGPUUnitsPowerChange -+@Description Request from system layer for changing power state of GPU -+ units -+@Input psDeviceNode RGX Device Node. -+@Input ui32NewValue Value indicating the new power state -+ of GPU units. how this is interpreted -+ depends upon the device-specific -+ function subsequently called by the -+ server via a pfn. -+@Return PVRSRV_ERROR. -+*/ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode, -+ IMG_UINT32 ui32NewValue); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+void PVRSRVSetFirmwareStartTime(PVRSRV_POWER_DEV *psPowerDevice, IMG_UINT32 ui32TimeStamp); -+ -+void PVRSRVSetFirmwareHandshakeIdleTime(PVRSRV_POWER_DEV *psPowerDevice, IMG_UINT64 ui64Duration); -+ -+int PVRSRVPowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); -+#endif -+ -+#endif /* POWER_H */ -+ -+/****************************************************************************** -+ End of file (power.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/powervr/buffer_attribs.h b/drivers/gpu/drm/img-rogue/powervr/buffer_attribs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/powervr/buffer_attribs.h -@@ -0,0 +1,193 @@ -+/*************************************************************************/ /*! -+@File -+@Title 3D types for use by IMG APIs -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License MIT -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -+THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef POWERVR_BUFFER_ATTRIBS_H -+#define POWERVR_BUFFER_ATTRIBS_H -+ -+/*! -+ * Memory layouts -+ * Defines how pixels are laid out within a surface. -+ */ -+typedef enum -+{ -+ IMG_MEMLAYOUT_STRIDED, /**< Resource is strided, one row at a time */ -+ IMG_MEMLAYOUT_TWIDDLED, /**< Resource is 2D twiddled to match HW */ -+ IMG_MEMLAYOUT_3DTWIDDLED, /**< Resource is 3D twiddled, classic style */ -+ IMG_MEMLAYOUT_TILED, /**< Resource is tiled, tiling config specified elsewhere. */ -+ IMG_MEMLAYOUT_PAGETILED, /**< Resource is pagetiled */ -+ IMG_MEMLAYOUT_INVNTWIDDLED, /**< Resource is 2D twiddled !N style */ -+} IMG_MEMLAYOUT; -+ -+/*! -+ * Rotation types -+ */ -+typedef enum -+{ -+ IMG_ROTATION_0DEG = 0, -+ IMG_ROTATION_90DEG = 1, -+ IMG_ROTATION_180DEG = 2, -+ IMG_ROTATION_270DEG = 3, -+ IMG_ROTATION_FLIP_Y = 4, -+ -+ IMG_ROTATION_BAD = 255, -+} IMG_ROTATION; -+ -+/*! -+ * Alpha types. -+ */ -+typedef enum -+{ -+ IMG_COLOURSPACE_FORMAT_UNKNOWN = 0x0UL << 16, -+ IMG_COLOURSPACE_FORMAT_LINEAR = 0x1UL << 16, -+ IMG_COLOURSPACE_FORMAT_SRGB = 0x2UL << 16, -+ IMG_COLOURSPACE_FORMAT_SCRGB = 0x3UL << 16, -+ IMG_COLOURSPACE_FORMAT_SCRGB_LINEAR = 0x4UL << 16, -+ IMG_COLOURSPACE_FORMAT_DISPLAY_P3_LINEAR = 0x5UL << 16, -+ IMG_COLOURSPACE_FORMAT_DISPLAY_P3 = 0x6UL << 16, -+ IMG_COLOURSPACE_FORMAT_BT2020_PQ = 0x7UL << 16, -+ IMG_COLOURSPACE_FORMAT_BT2020_LINEAR = 0x8UL << 16, -+ IMG_COLOURSPACE_FORMAT_DISPLAY_P3_PASSTHROUGH = 0x9UL << 16, -+ IMG_COLOURSPACE_FORMAT_MASK = 0xFUL << 16, -+} IMG_COLOURSPACE_FORMAT; -+ -+/*! -+ * Determines if FB Compression is Lossy -+ */ -+#define IS_FBCDC_LOSSY(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_TRUE : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_TRUE : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_TRUE : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_TRUE : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_TRUE : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_TRUE : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8) ? IMG_TRUE : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4) ? IMG_TRUE : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2) ? IMG_TRUE : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_TRUE : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_TRUE : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_TRUE : IMG_FALSE) -+ -+/*! -+ * Determines if FB Compression is Packed -+ */ -+#define IS_FBCDC_PACKED(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_TRUE : IMG_FALSE) -+ -+/*! -+ * Returns type of FB Compression -+ */ -+#define GET_FBCDC_BLOCK_TYPE(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : mode) -+ -+/*! -+ * Adds Packing compression setting to mode if viable -+ */ -+#define FBCDC_MODE_ADD_PACKING(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_PACKED_8x8 : mode) -+ -+/*! -+ * Removes Packing compression setting from mode -+ */ -+#define FBCDC_MODE_REMOVE_PACKING(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : mode) -+ -+/*! -+ * Adds Lossy25 compression setting to mode if viable -+ */ -+#define FBCDC_MODE_ADD_LOSSY25(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2 : mode) -+ -+/*! -+ * Adds Lossy37 compression setting to mode if viable -+ */ -+#define FBCDC_MODE_ADD_LOSSY37(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2 : mode) -+ -+/*! -+ * Adds Lossy50 compression setting to mode if viable -+ */ -+#define FBCDC_MODE_ADD_LOSSY50(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2 : mode) -+ -+/*! -+ * Adds Lossy75 compression setting to mode if viable -+ */ -+#define FBCDC_MODE_ADD_LOSSY75(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2 : mode) -+ -+/*! -+ * Removes Lossy compression setting from mode -+ */ -+#define FBCDC_MODE_REMOVE_LOSSY(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ -+ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : mode) -+ -+/*! -+ * Types of framebuffer compression -+ */ -+typedef enum -+{ -+ IMG_FB_COMPRESSION_NONE, -+ IMG_FB_COMPRESSION_DIRECT_8x8, -+ IMG_FB_COMPRESSION_DIRECT_16x4, -+ IMG_FB_COMPRESSION_DIRECT_32x2, -+ IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8, -+ IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4, -+ IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2, -+ IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8, -+ IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8, -+ IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4, -+ IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2, -+ IMG_FB_COMPRESSION_DIRECT_PACKED_8x8, -+ IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4, -+ IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2, -+ IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8, -+ IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4, -+ IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2, -+} IMG_FB_COMPRESSION; -+ -+ -+#endif /* POWERVR_BUFFER_ATTRIBS_H */ -diff --git a/drivers/gpu/drm/img-rogue/powervr/img_drm_fourcc.h b/drivers/gpu/drm/img-rogue/powervr/img_drm_fourcc.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/powervr/img_drm_fourcc.h -@@ -0,0 +1,143 @@ -+/*************************************************************************/ /*! -+@File -+@Title Wrapper around drm_fourcc.h -+@Description FourCCs and DRM framebuffer modifiers that are not in the -+ Kernel's and libdrm's drm_fourcc.h can be added here. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License MIT -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -+THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef IMG_DRM_FOURCC_H -+#define IMG_DRM_FOURCC_H -+ -+#if defined(__KERNEL__) -+#include -+#else -+/* -+ * Include types.h to workaround versions of libdrm older than 2.4.68 -+ * not including the correct headers. -+ */ -+#include -+ -+#include -+#endif -+ -+/* -+ * Don't get too inspired by this example :) -+ * ADF doesn't support DRM modifiers, so the memory layout had to be -+ * included in the fourcc name, but the proper way to specify information -+ * additional to pixel formats is to use DRM modifiers. -+ * -+ * See upstream drm_fourcc.h for the proper naming convention. -+ */ -+#ifndef DRM_FORMAT_BGRA8888_DIRECT_16x4 -+#define DRM_FORMAT_BGRA8888_DIRECT_16x4 fourcc_code('I', 'M', 'G', '0') -+#endif -+ -+#if !defined(__KERNEL__) -+/* -+ * A definition for the same format was added in Linux kernel 5.2 in commit -+ * 88ab9c76d191ad8645b483f31e2b394b0f3e280e. As such, this definition has been -+ * deprecated and the DRM_FORMAT_ABGR16161616F kernel define should be used -+ * instead of this one. -+ */ -+#define DRM_FORMAT_ABGR16_IMG_DEPRECATED fourcc_code('I', 'M', 'G', '1') -+#endif -+ -+/* -+ * Upstream does not have a packed 10 Bits Per Channel YVU format yet, -+ * so let`s make one up. -+ * Note: at the moment this format is not intended to be used with -+ * a framebuffer, so the kernels core DRM doesn`t need to know -+ * about this format. This means that the kernel doesn`t need -+ * to be patched. -+ */ -+#if !defined(__KERNEL__) -+#define DRM_FORMAT_YVU444_PACK10_IMG fourcc_code('I', 'M', 'G', '2') -+#define DRM_FORMAT_YUV422_2PLANE_PACK10_IMG fourcc_code('I', 'M', 'G', '3') -+#define DRM_FORMAT_YUV420_2PLANE_PACK10_IMG fourcc_code('I', 'M', 'G', '4') -+#endif -+ -+/* -+ * Value chosen in the middle of 255 pool to minimise the chance of hitting -+ * the same value potentially defined by other vendors in the drm_fourcc.h -+ */ -+#define DRM_FORMAT_MOD_VENDOR_PVR 0x92 -+ -+#ifndef DRM_FORMAT_MOD_VENDOR_NONE -+#define DRM_FORMAT_MOD_VENDOR_NONE 0 -+#endif -+ -+#ifndef DRM_FORMAT_RESERVED -+#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) -+#endif -+ -+#define img_fourcc_mod_combine(uiModHi, uiModLo) \ -+ ((__u64) ((__u32) (uiModHi)) << 32 | (__u64) ((__u32) (uiModLo))) -+ -+#define img_fourcc_mod_hi(ui64Mod) \ -+ ((__u32) ((__u64) (ui64Mod) >> 32)) -+ -+#define img_fourcc_mod_lo(ui64Mod) \ -+ ((__u32) ((__u64) (ui64Mod)) & 0xffffffff) -+ -+#ifndef fourcc_mod_code -+#define fourcc_mod_code(vendor, val) \ -+ ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) -+#endif -+ -+#ifndef DRM_FORMAT_MOD_INVALID -+#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED) -+#endif -+ -+#ifndef DRM_FORMAT_MOD_LINEAR -+#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0) -+#endif -+ -+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V1 fourcc_mod_code(PVR, 3) -+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V1 fourcc_mod_code(PVR, 9) -+ -+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V7 fourcc_mod_code(PVR, 6) -+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V7 fourcc_mod_code(PVR, 12) -+ -+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V10 fourcc_mod_code(PVR, 21) -+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V10 fourcc_mod_code(PVR, 22) -+#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V10 fourcc_mod_code(PVR, 23) -+ -+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12 fourcc_mod_code(PVR, 15) -+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12 fourcc_mod_code(PVR, 16) -+ -+#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V13 fourcc_mod_code(PVR, 24) -+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_8x8_V13 fourcc_mod_code(PVR, 25) -+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_8x8_V13 fourcc_mod_code(PVR, 26) -+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_8x8_V13 fourcc_mod_code(PVR, 27) -+ -+#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V13 fourcc_mod_code(PVR, 28) -+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_16x4_V13 fourcc_mod_code(PVR, 29) -+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_16x4_V13 fourcc_mod_code(PVR, 30) -+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_16x4_V13 fourcc_mod_code(PVR, 31) -+ -+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY37_8x8_V13 fourcc_mod_code(PVR, 32) -+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY37_16x4_V13 fourcc_mod_code(PVR, 33) -+ -+#endif /* IMG_DRM_FOURCC_H */ -diff --git a/drivers/gpu/drm/img-rogue/powervr/mem_types.h b/drivers/gpu/drm/img-rogue/powervr/mem_types.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/powervr/mem_types.h -@@ -0,0 +1,64 @@ -+/*************************************************************************/ /*! -+@File -+@Title Public types -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License MIT -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -+THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef POWERVR_TYPES_H -+#define POWERVR_TYPES_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#if defined(_MSC_VER) -+ #include "msvc_types.h" -+#elif defined(__linux__) && defined(__KERNEL__) -+ #include -+ #include -+#else -+ #include -+ #define __iomem -+#endif -+ -+typedef void *IMG_CPU_VIRTADDR; -+ -+/* device virtual address */ -+typedef struct -+{ -+ uint64_t uiAddr; -+#define IMG_CAST_TO_DEVVADDR_UINT(var) (uint64_t)(var) -+ -+} IMG_DEV_VIRTADDR; -+ -+typedef uint64_t IMG_DEVMEM_SIZE_T; -+typedef uint64_t IMG_DEVMEM_ALIGN_T; -+typedef uint64_t IMG_DEVMEM_OFFSET_T; -+typedef uint32_t IMG_DEVMEM_LOG2ALIGN_T; -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif -diff --git a/drivers/gpu/drm/img-rogue/powervr/pvrsrv_sync_ext.h b/drivers/gpu/drm/img-rogue/powervr/pvrsrv_sync_ext.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/powervr/pvrsrv_sync_ext.h -@@ -0,0 +1,72 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services external synchronisation interface header -+@Description Defines synchronisation structures that are visible internally -+ and externally -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License MIT -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -+THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef POWERVR_SYNC_EXT_H -+#define POWERVR_SYNC_EXT_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+/*! -+ * Number of sync prims still used internally in operations -+ */ -+#define PVRSRV_MAX_SYNC_PRIMS 4U -+ -+/*! -+ * Maximum number of dev var updates passed in a kick call -+ */ -+#define PVRSRV_MAX_DEV_VARS 13U -+ -+/*! -+ * Number of UFOs in operations -+ */ -+#define PVRSRV_MAX_SYNCS (PVRSRV_MAX_SYNC_PRIMS + PVRSRV_MAX_DEV_VARS) -+ -+/*! Implementation independent types for passing fence/timeline to Services. -+ */ -+typedef int32_t PVRSRV_FENCE; -+typedef int32_t PVRSRV_TIMELINE; -+ -+/*! Maximum length for an annotation name string for fence sync model objects. -+ */ -+#define PVRSRV_SYNC_NAME_LENGTH 32U -+ -+/* Macros for API callers using the fence sync model -+ */ -+#define PVRSRV_NO_TIMELINE ((PVRSRV_TIMELINE) -1) -+#define PVRSRV_NO_FENCE ((PVRSRV_FENCE) -1) -+#define PVRSRV_NO_FENCE_PTR NULL -+#define PVRSRV_NO_TIMELINE_PTR NULL -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif -diff --git a/drivers/gpu/drm/img-rogue/private_data.h b/drivers/gpu/drm/img-rogue/private_data.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/private_data.h -@@ -0,0 +1,59 @@ -+/*************************************************************************/ /*! -+@File -+@Title Linux private data structure -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(INCLUDED_PRIVATE_DATA_H) -+#define INCLUDED_PRIVATE_DATA_H -+ -+#include -+ -+#include "connection_server.h" -+#include "pvr_drm.h" -+ -+#define PVR_SRVKM_PRIV_DATA_IDX 0 -+#define PVR_SYNC_PRIV_DATA_IDX 1 -+ -+#define PVR_NUM_PRIV_DATA_IDXS 2 -+ -+CONNECTION_DATA *LinuxServicesConnectionFromFile(struct file *pFile); -+CONNECTION_DATA *LinuxSyncConnectionFromFile(struct file *pFile); -+ -+#endif /* !defined(INCLUDED_PRIVATE_DATA_H) */ -diff --git a/drivers/gpu/drm/img-rogue/proc_stats.h b/drivers/gpu/drm/img-rogue/proc_stats.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/proc_stats.h -@@ -0,0 +1,153 @@ -+/*************************************************************************/ /*! -+@File -+@Title Process and driver statistic definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PROC_STATS_H -+#define PROC_STATS_H -+ -+/* X-Macro for Process stat keys */ -+#define PVRSRV_PROCESS_STAT_KEY \ -+ X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, "MemoryUsageAllocPTMemoryUMA") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, "MemoryUsageAllocPTMemoryLMA") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, "MemoryUsageAllocGPUMemLMA") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES_MAX, "MemoryUsageAllocGPUMemLMAMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES, "MemoryUsageZombieGPUMemLMA") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES_MAX, "MemoryUsageZombieGPUMemLMAMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, "MemoryUsageAllocGPUMemUMA") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES_MAX, "MemoryUsageAllocGPUMemUMAMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES, "MemoryUsageZombieGPUMemUMA") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES_MAX, "MemoryUsageZombieGPUMemUMAMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, "MemoryUsageMappedGPUMemUMA/LMA") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE, "MemoryUsageDmaBufZombie") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE_MAX, "MemoryUsageDmaBufZombieMax") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_TOTAL, "MemoryUsageTotal") \ -+ X(PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX, "MemoryUsageTotalMax") -+ -+/* X-Macro for Device stat keys */ -+#define PVRSRV_DEVICE_STAT_KEY \ -+ X(PVRSRV_DEVICE_STAT_TYPE_CONNECTIONS, "Connections") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_MAX_CONNECTIONS, "ConnectionsMax") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_RC_OOMS, "RenderContextOutOfMemoryEvents") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_RC_PRS, "RenderContextPartialRenders") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_RC_GROWS, "RenderContextGrows") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_RC_PUSH_GROWS, "RenderContextPushGrows") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_RC_TA_STORES, "RenderContextTAStores") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_RC_3D_STORES, "RenderContext3DStores") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_RC_CDM_STORES, "RenderContextCDMStores") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_RC_TDM_STORES, "RenderContextTDMStores") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_RC_RAY_STORES, "RenderContextRayStores") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_ZSBUFFER_REQS_BY_APP, "ZSBufferRequestsByApp") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_ZSBUFFER_REQS_BY_FW, "ZSBufferRequestsByFirmware") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_FREELIST_GROW_REQS_BY_APP, "FreeListGrowRequestsByApp") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_FREELIST_GROW_REQS_BY_FW, "FreeListGrowRequestsByFirmware") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_FREELIST_PAGES_INIT, "FreeListInitialPages") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_FREELIST_MAX_PAGES, "FreeListMaxPages") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_OOM_VIRTMEM_COUNT, "MemoryOOMCountDeviceVirtual") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_OOM_PHYSMEM_COUNT, "MemoryOOMCountPhysicalHeap") \ -+ X(PVRSRV_DEVICE_STAT_TYPE_INVALID_VIRTMEM, "MemoryOOMCountDeviceVirtualAtAddr") -+ -+/* X-Macro for Driver stat keys */ -+#define PVRSRV_DRIVER_STAT_KEY \ -+ X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, "MemoryUsageAllocPTMemoryUMA") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, "MemoryUsageAllocPTMemoryLMA") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, "MemoryUsageAllocGPUMemLMA") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA_MAX, "MemoryUsageAllocGPUMemLMAMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_LMA, "MemoryUsageZombieGPUMemLMA") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_LMA_MAX, "MemoryUsageZombieGPUMemLMAMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, "MemoryUsageAllocGPUMemUMA") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_MAX, "MemoryUsageAllocGPUMemUMAMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_UMA, "MemoryUsageZombieGPUMemUMA") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_UMA_MAX, "MemoryUsageZombieGPUMemUMAMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, "MemoryUsageAllocGPUMemUMAPool") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL_MAX, "MemoryUsageAllocGPUMemUMAPoolMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, "MemoryUsageMappedGPUMemUMA/LMA") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_ZOMBIE, "MemoryUsageDmaBufZombie") \ -+ X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_ZOMBIE_MAX, "MemoryUsageDmaBufZombieMax") -+ -+ -+typedef enum { -+#define X(stat_type, stat_str) stat_type, -+ PVRSRV_PROCESS_STAT_KEY -+#undef X -+ PVRSRV_PROCESS_STAT_TYPE_COUNT -+}PVRSRV_PROCESS_STAT_TYPE; -+ -+typedef enum { -+#define X(stat_type, stat_str) stat_type, -+ PVRSRV_DEVICE_STAT_KEY -+#undef X -+ PVRSRV_DEVICE_STAT_TYPE_COUNT -+}PVRSRV_DEVICE_STAT_TYPE; -+ -+typedef enum { -+#define X(stat_type, stat_str) stat_type, -+ PVRSRV_DRIVER_STAT_KEY -+#undef X -+ PVRSRV_DRIVER_STAT_TYPE_COUNT -+}PVRSRV_DRIVER_STAT_TYPE; -+ -+#endif // PROC_STATS_H -diff --git a/drivers/gpu/drm/img-rogue/process_stats.c b/drivers/gpu/drm/img-rogue/process_stats.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/process_stats.c -@@ -0,0 +1,3312 @@ -+/*************************************************************************/ /*! -+@File -+@Title Process based statistics -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Manages a collection of statistics based around a process -+ and referenced via OS agnostic methods. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvr_debug.h" -+#include "lock.h" -+#include "allocmem.h" -+#include "osfunc.h" -+#include "process_stats.h" -+#include "ri_server.h" -+#include "hash.h" -+#include "connection_server.h" -+#include "pvrsrv.h" -+#include "proc_stats.h" -+#include "pvr_ricommon.h" -+#include "di_server.h" -+#include "dllist.h" -+#if defined(__linux__) -+#include "trace_events.h" -+#endif -+ -+/* Enabled OS Statistics entries: DEBUGFS on Linux, undefined for other OSs */ -+#if defined(__linux__) && ( \ -+ defined(PVRSRV_ENABLE_PERPID_STATS) || \ -+ defined(PVRSRV_ENABLE_CACHEOP_STATS) || \ -+ defined(PVRSRV_ENABLE_MEMORY_STATS) || \ -+ defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ) -+#define ENABLE_DEBUGFS_PIDS -+#endif -+ -+/* Enable GPU memory accounting tracepoint */ -+#if defined(__linux__) && ( \ -+ defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) ) -+#define ENABLE_GPU_MEM_TRACEPOINT -+#endif -+ -+/* -+ * Maximum history of process statistics that will be kept. -+ */ -+#define MAX_DEAD_LIST_PROCESSES (10) -+ -+/* -+ * Definition of all the strings used to format process based statistics. -+ */ -+ -+#if defined(PVRSRV_ENABLE_PERPID_STATS) -+/* Array of Process stat type defined using the X-Macro */ -+#define X(stat_type, stat_str) stat_str, -+static const IMG_CHAR *const pszProcessStatType[PVRSRV_PROCESS_STAT_TYPE_COUNT] = { PVRSRV_PROCESS_STAT_KEY }; -+static const IMG_CHAR *const pszDeviceStatType[PVRSRV_DEVICE_STAT_TYPE_COUNT] = { PVRSRV_DEVICE_STAT_KEY }; -+#undef X -+#endif -+ -+/* Array of Driver stat type defined using the X-Macro */ -+#define X(stat_type, stat_str) stat_str, -+static const IMG_CHAR *const pszDriverStatType[PVRSRV_DRIVER_STAT_TYPE_COUNT] = { PVRSRV_DRIVER_STAT_KEY }; -+#undef X -+ -+/* structure used in hash table to track statistic entries */ -+typedef struct { -+ size_t uiSizeInBytes; -+ IMG_PID uiPid; -+} _PVR_STATS_TRACKING_HASH_ENTRY; -+ -+/* Function used internally to decrement tracked per-process statistic entries */ -+static void _StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry, -+ PVRSRV_MEM_ALLOC_TYPE eAllocType); -+ -+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) -+int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); -+#endif -+int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); -+ -+/* Note: all of the accesses to the global stats should be protected -+ * by the gsGlobalStats.hGlobalStatsLock lock. This means all of the -+ * invocations of macros *_GLOBAL_STAT_VALUE. */ -+ -+/* Macros for fetching stat values */ -+#define GET_STAT_VALUE(ptr,var) (ptr)->i64StatValue[(var)] -+#define GET_GLOBAL_STAT_VALUE(idx) gsGlobalStats.ui64StatValue[idx] -+ -+#define GET_GPUMEM_GLOBAL_STAT_VALUE() \ -+ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) + \ -+ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA) + \ -+ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) + \ -+ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) + \ -+ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT) -+ -+#define GET_GPUMEM_PERPID_STAT_VALUE(ptr) \ -+ GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA) + \ -+ GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA) + \ -+ GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) + \ -+ GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES) + \ -+ GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT) -+/* -+ * Macros for updating stat values. -+ */ -+#define UPDATE_MAX_VALUE(a,b) do { if ((b) > (a)) {(a) = (b);} } while (0) -+#define INCREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i64StatValue[(var)] += (IMG_INT64)(val); if ((ptr)->i64StatValue[(var)] > (ptr)->i64StatValue[(var##_MAX)]) {(ptr)->i64StatValue[(var##_MAX)] = (ptr)->i64StatValue[(var)];} } while (0) -+#define INCREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui64StatValue[(idx)] += (IMG_UINT64)(val); if ((var).ui64StatValue[(idx)] > (var).ui64StatValue[(idx##_MAX)]) {(var).ui64StatValue[(idx##_MAX)] = (var).ui64StatValue[(idx)];} } while (0) -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+/* Allow stats to go negative */ -+#define DECREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i64StatValue[(var)] -= (val); } while (0) -+#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui64StatValue[(idx)] -= (val); } while (0) -+#else -+#define DECREASE_STAT_VALUE(ptr,var,val) do { if ((ptr)->i64StatValue[(var)] >= (val)) { (ptr)->i64StatValue[(var)] -= (IMG_INT64)(val); } else { (ptr)->i64StatValue[(var)] = 0; } } while (0) -+#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { if ((var).ui64StatValue[(idx)] >= (val)) { (var).ui64StatValue[(idx)] -= (IMG_UINT64)(val); } else { (var).ui64StatValue[(idx)] = 0; } } while (0) -+#endif -+#define MAX_CACHEOP_STAT 16 -+#define INCREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x+1) >= MAX_CACHEOP_STAT ? 0 : (x+1)) -+#define DECREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x-1) < 0 ? (MAX_CACHEOP_STAT-1) : (x-1)) -+ -+/* -+ * Track the search of one process when PVRSRV_DEBUG_LINUX_MEMORY_STATS -+ * is enabled. -+ */ -+typedef enum _PVRSRV_PROC_SEARCH_STATE_ -+{ -+ PVRSRV_PROC_NOTFOUND, -+ PVRSRV_PROC_FOUND, -+ PVRSRV_PROC_RESURRECTED, -+} PVRSRV_PROC_SEARCH_STATE; -+ -+/* -+ * Structures for holding statistics... -+ */ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+typedef struct _PVRSRV_MEM_ALLOC_REC_ -+{ -+ PVRSRV_MEM_ALLOC_TYPE eAllocType; -+ void* pvCpuVAddr; -+ IMG_CPU_PHYADDR sCpuPAddr; -+ size_t uiBytes; -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) -+ void* pvAllocdFromFile; -+ IMG_UINT32 ui32AllocdFromLine; -+#endif -+} PVRSRV_MEM_ALLOC_REC; -+ -+typedef struct PVRSRV_MEM_ALLOC_PRINT_DATA_TAG -+{ -+ OSDI_IMPL_ENTRY *psEntry; -+ IMG_PID pid; -+ IMG_UINT32 ui32NumEntries; -+} PVRSRV_MEM_ALLOC_PRINT_DATA; -+#endif -+ -+typedef struct _PVRSRV_PROCESS_STATS_ { -+ -+ /* Linked list pointers */ -+ DLLIST_NODE sNode; -+ -+ /* Create per process lock that need to be held -+ * to edit of its members */ -+ POS_LOCK hLock; -+ -+ /* OS level process ID */ -+ IMG_PID pid; -+ IMG_UINT32 ui32RefCount; -+ -+ /* Process memory stats */ -+ IMG_INT64 i64StatValue[PVRSRV_PROCESS_STAT_TYPE_COUNT]; -+ IMG_UINT32 ui32StatAllocFlags; -+ -+#if defined(PVRSRV_ENABLE_CACHEOP_STATS) -+ struct _CACHEOP_STRUCT_ { -+ PVRSRV_CACHE_OP uiCacheOp; -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ IMG_DEV_VIRTADDR sDevVAddr; -+ IMG_DEV_PHYADDR sDevPAddr; -+#endif -+ IMG_DEVMEM_SIZE_T uiOffset; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_UINT64 ui64ExecuteTime; -+ IMG_BOOL bUserModeFlush; -+ IMG_BOOL bIsFence; -+ IMG_PID ownerPid; -+ } asCacheOp[MAX_CACHEOP_STAT]; -+ IMG_INT32 uiCacheOpWriteIndex; -+#endif -+ -+ /* Other statistics structures */ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ HASH_TABLE* psMemoryRecords; -+#endif -+ /* Device stats */ -+ IMG_UINT32 ui32DevCount; -+ IMG_INT32 ai32DevStats[][PVRSRV_DEVICE_STAT_TYPE_COUNT]; -+} PVRSRV_PROCESS_STATS; -+ -+#if defined(ENABLE_DEBUGFS_PIDS) -+ -+typedef struct _PVRSRV_OS_STAT_ENTRY_ -+{ -+ DI_GROUP *psStatsDIGroup; -+ DI_ENTRY *psProcessStatsDIEntry; -+ DI_ENTRY *psMemStatsDIEntry; -+ DI_ENTRY *psRIMemStatsDIEntry; -+ DI_ENTRY *psCacheOpStatsDIEntry; -+} PVRSRV_OS_STAT_ENTRY; -+ -+static PVRSRV_OS_STAT_ENTRY gsLiveStatEntries; -+static PVRSRV_OS_STAT_ENTRY gsRetiredStatEntries; -+ -+int GenericStatsPrintElementsLive(OSDI_IMPL_ENTRY *psEntry, void *pvData); -+int GenericStatsPrintElementsRetired(OSDI_IMPL_ENTRY *psEntry, void *pvData); -+ -+/* -+ * Functions for printing the information stored... -+ */ -+#if defined(PVRSRV_ENABLE_PERPID_STATS) -+void ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, -+ PVRSRV_PROCESS_STATS *psProcessStats); -+#endif -+ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+void MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, -+ PVRSRV_PROCESS_STATS *psProcessStats); -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+void RIMemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, -+ PVRSRV_PROCESS_STATS *psProcessStats); -+#endif -+ -+#if defined(PVRSRV_ENABLE_CACHEOP_STATS) -+void CacheOpStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, -+ PVRSRV_PROCESS_STATS *psProcessStats); -+#endif -+ -+typedef void (PVRSRV_STATS_PRINT_ELEMENTS)(OSDI_IMPL_ENTRY *psEntry, -+ PVRSRV_PROCESS_STATS *psProcessStats); -+ -+typedef enum -+{ -+ PVRSRV_STAT_TYPE_PROCESS, -+ PVRSRV_STAT_TYPE_MEMORY, -+ PVRSRV_STAT_TYPE_RIMEMORY, -+ PVRSRV_STAT_TYPE_CACHEOP, -+ PVRSRV_STAT_TYPE_LAST -+} PVRSRV_STAT_TYPE; -+ -+#define SEPARATOR_STR_LEN 166 -+ -+typedef struct _PVRSRV_STAT_PV_DATA_ { -+ -+ PVRSRV_STAT_TYPE eStatType; -+ PVRSRV_STATS_PRINT_ELEMENTS* pfnStatsPrintElements; -+ IMG_CHAR szLiveStatsHeaderStr[SEPARATOR_STR_LEN + 1]; -+ IMG_CHAR szRetiredStatsHeaderStr[SEPARATOR_STR_LEN + 1]; -+ -+} PVRSRV_STAT_PV_DATA; -+ -+static PVRSRV_STAT_PV_DATA g_StatPvDataArr[] = { -+ { PVRSRV_STAT_TYPE_PROCESS, NULL, " Process" , " Process" }, -+ { PVRSRV_STAT_TYPE_MEMORY, NULL, " Memory Allocation" , " Memory Allocation" }, -+ { PVRSRV_STAT_TYPE_RIMEMORY, NULL, " Resource Allocation" , " Resource Allocation" }, -+ { PVRSRV_STAT_TYPE_CACHEOP, NULL, " Cache Maintenance Ops" , " Cache Maintenance Ops" } -+ }; -+ -+#define GET_STAT_ENTRY_ID(STAT_TYPE) &g_StatPvDataArr[(STAT_TYPE)] -+ -+/* Generic header strings */ -+static const IMG_CHAR g_szLiveHeaderStr[] = " Statistics for LIVE Processes "; -+static const IMG_CHAR g_szRetiredHeaderStr[] = " Statistics for RETIRED Processes "; -+ -+/* Separator string used for separating stats for different PIDs */ -+static IMG_CHAR g_szSeparatorStr[SEPARATOR_STR_LEN + 1] = ""; -+ -+static inline void -+_prepareStatsHeaderString(IMG_CHAR *pszStatsSpecificStr, const IMG_CHAR* pszGenericHeaderStr) -+{ -+ IMG_UINT32 ui32NumSeparators; -+ IMG_CHAR szStatsHeaderFooterStr[75]; -+ -+ /* Prepare text content of the header in a local string */ -+ OSStringLCopy(szStatsHeaderFooterStr, pszStatsSpecificStr, ARRAY_SIZE(szStatsHeaderFooterStr)); -+ OSStringLCat(szStatsHeaderFooterStr, pszGenericHeaderStr, ARRAY_SIZE(szStatsHeaderFooterStr)); -+ -+ /* Write all '-' characters to the header string */ -+ memset(pszStatsSpecificStr, '-', SEPARATOR_STR_LEN); -+ pszStatsSpecificStr[SEPARATOR_STR_LEN] = '\0'; -+ -+ /* Find the spot for text content in the header string */ -+ ui32NumSeparators = (SEPARATOR_STR_LEN - OSStringLength(szStatsHeaderFooterStr)) >> 1; -+ -+ /* Finally write the text content */ -+ OSSNPrintf(pszStatsSpecificStr + ui32NumSeparators, -+ OSStringLength(szStatsHeaderFooterStr), -+ "%s", szStatsHeaderFooterStr); -+ -+ /* Overwrite the '\0' character added by OSSNPrintf() */ -+ if (OSStringLength(szStatsHeaderFooterStr) > 0) -+ { -+ pszStatsSpecificStr[ui32NumSeparators + OSStringLength(szStatsHeaderFooterStr) - 1] = ' '; -+ } -+} -+ -+static inline void -+_prepareSeparatorStrings(void) -+{ -+ IMG_UINT32 i; -+ -+ /* Prepare header strings for each stat type */ -+ for (i = 0; i < PVRSRV_STAT_TYPE_LAST; ++i) -+ { -+ _prepareStatsHeaderString(g_StatPvDataArr[i].szLiveStatsHeaderStr, g_szLiveHeaderStr); -+ _prepareStatsHeaderString(g_StatPvDataArr[i].szRetiredStatsHeaderStr, g_szRetiredHeaderStr); -+ } -+ -+ /* Prepare separator string to separate stats for different PIDs */ -+ memset(g_szSeparatorStr, '-', SEPARATOR_STR_LEN); -+ g_szSeparatorStr[SEPARATOR_STR_LEN] = '\0'; -+} -+ -+static inline void -+_prepareStatsPrivateData(void) -+{ -+#if defined(PVRSRV_ENABLE_PERPID_STATS) -+ g_StatPvDataArr[PVRSRV_STAT_TYPE_PROCESS].pfnStatsPrintElements = ProcessStatsPrintElements; -+#endif -+ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ g_StatPvDataArr[PVRSRV_STAT_TYPE_MEMORY].pfnStatsPrintElements = MemStatsPrintElements; -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ g_StatPvDataArr[PVRSRV_STAT_TYPE_RIMEMORY].pfnStatsPrintElements = RIMemStatsPrintElements; -+#endif -+ -+#if defined(PVRSRV_ENABLE_CACHEOP_STATS) -+ g_StatPvDataArr[PVRSRV_STAT_TYPE_CACHEOP].pfnStatsPrintElements = CacheOpStatsPrintElements; -+#endif -+ -+ _prepareSeparatorStrings(); -+} -+ -+#endif -+ -+/* -+ * Global Boolean to flag when the statistics are ready to monitor -+ * memory allocations. -+ */ -+static IMG_BOOL bProcessStatsInitialised = IMG_FALSE; -+ -+/* -+ * Linked lists for process stats. Live stats are for processes which are still running -+ * and the dead list holds those that have exited. -+ */ -+static DLLIST_NODE gsLiveList; -+static DLLIST_NODE gsDeadList; -+ -+static POS_LOCK g_psLinkedListLock; -+/* Lockdep feature in the kernel cannot differentiate between different instances of same lock type. -+ * This allows it to group all such instances of the same lock type under one class -+ * The consequence of this is that, if lock acquisition is nested on different instances, it generates -+ * a false warning message about the possible occurrence of deadlock due to recursive lock acquisition. -+ * Hence we create the following sub classes to explicitly appraise Lockdep of such safe lock nesting */ -+#define PROCESS_LOCK_SUBCLASS_CURRENT 1 -+#if defined(ENABLE_DEBUGFS_PIDS) -+/* -+ * Pointer to OS folder to hold PID folders. -+ */ -+static DI_GROUP *psProcStatsDIGroup; -+#endif -+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) -+static DI_ENTRY *psProcStatsDIEntry; -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+/* Global driver PID stats registration handle */ -+static IMG_HANDLE g_hDriverProcessStats; -+#endif -+ -+/* Global driver-data folders */ -+typedef struct _GLOBAL_STATS_ -+{ -+ IMG_UINT64 ui64StatValue[PVRSRV_DRIVER_STAT_TYPE_COUNT]; -+ POS_LOCK hGlobalStatsLock; -+} GLOBAL_STATS; -+ -+static DI_ENTRY *psGlobalMemDIEntry; -+static GLOBAL_STATS gsGlobalStats; -+ -+#define HASH_INITIAL_SIZE 5 -+/* A hash table used to store the size of any vmalloc'd allocation -+ * against its address (not needed for kmallocs as we can use ksize()) */ -+static HASH_TABLE* gpsSizeTrackingHashTable; -+static POS_LOCK gpsSizeTrackingHashTableLock; -+ -+static PVRSRV_ERROR _RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid); -+ -+static void _DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats); -+ -+static void _DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ PVRSRV_PROCESS_STATS* psProcessStats, -+ IMG_UINT64 uiBytes); -+ -+/*************************************************************************/ /*! -+@Function _FindProcessStatsInLiveList -+@Description Searches the Live Process List for a statistics structure that -+ matches the PID given. -+@Input pid Process to search for. -+@Return Pointer to stats structure for the process. -+*/ /**************************************************************************/ -+static PVRSRV_PROCESS_STATS* -+_FindProcessStatsInLiveList(IMG_PID pid) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ -+ dllist_foreach_node(&gsLiveList, psNode, psNext) -+ { -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); -+ -+ if (psProcessStats->pid == pid) -+ { -+ return psProcessStats; -+ } -+ } -+ return NULL; -+} /* _FindProcessStatsInLiveList */ -+ -+/*************************************************************************/ /*! -+@Function _FindProcessStatsInDeadList -+@Description Searches the Dead Process List for a statistics structure that -+ matches the PID given. -+@Input pid Process to search for. -+@Return Pointer to stats structure for the process. -+*/ /**************************************************************************/ -+static PVRSRV_PROCESS_STATS* -+_FindProcessStatsInDeadList(IMG_PID pid) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ -+ dllist_foreach_node(&gsDeadList, psNode, psNext) -+ { -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); -+ -+ if (psProcessStats->pid == pid) -+ { -+ return psProcessStats; -+ } -+ } -+ return NULL; -+} /* _FindProcessStatsInDeadList */ -+ -+/*************************************************************************/ /*! -+@Function _FindProcessStats -+@Description Searches the Live and Dead Process Lists for a statistics -+ structure that matches the PID given. -+@Input pid Process to search for. -+@Return Pointer to stats structure for the process. -+*/ /**************************************************************************/ -+static PVRSRV_PROCESS_STATS* -+_FindProcessStats(IMG_PID pid) -+{ -+ PVRSRV_PROCESS_STATS* psProcessStats = _FindProcessStatsInLiveList(pid); -+ -+ if (psProcessStats == NULL) -+ { -+ psProcessStats = _FindProcessStatsInDeadList(pid); -+ } -+ -+ return psProcessStats; -+} /* _FindProcessStats */ -+ -+/*************************************************************************/ /*! -+@Function _CompressMemoryUsage -+@Description Reduces memory usage by deleting old statistics data. -+ This function requires that the list lock is not held! -+*/ /**************************************************************************/ -+static void -+_CompressMemoryUsage(void) -+{ -+ IMG_INT32 i32ItemsRemaining; -+ DLLIST_NODE *psNode, *psNext; -+ DLLIST_NODE sToBeFreedHead; -+ -+ /* -+ * We hold the lock whilst checking the list, but we'll release it -+ * before freeing memory (as that will require the lock too)! -+ */ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ /* Check that the dead list is not bigger than the max size... */ -+ i32ItemsRemaining = MAX_DEAD_LIST_PROCESSES; -+ -+ dllist_init(&sToBeFreedHead); -+ -+ dllist_foreach_node(&gsDeadList, psNode, psNext) -+ { -+ i32ItemsRemaining--; -+ if (i32ItemsRemaining < 0) -+ { -+ /* This is the last allowed process, cut the linked list here! */ -+ dllist_remove_node(psNode); -+ dllist_add_to_tail(&sToBeFreedHead, psNode); -+ } -+ } -+ -+ OSLockRelease(g_psLinkedListLock); -+ -+ dllist_foreach_node(&sToBeFreedHead, psNode, psNext) -+ { -+ PVRSRV_PROCESS_STATS *psProcessStatsToBeFreed; -+ psProcessStatsToBeFreed = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); -+ _DestroyProcessStat(psProcessStatsToBeFreed); -+ } -+} /* _CompressMemoryUsage */ -+ -+/* These functions move the process stats from the live to the dead list. -+ * _MoveProcessToDeadList moves the entry in the global lists and -+ * it needs to be protected by g_psLinkedListLock. -+ * _MoveProcessToDeadList performs the OS calls and it -+ * shouldn't be used under g_psLinkedListLock because this could generate a -+ * lockdep warning. */ -+static void -+_MoveProcessToDeadList(PVRSRV_PROCESS_STATS* psProcessStats) -+{ -+ /* Take the element out of the live list and append to the dead list... */ -+ PVR_ASSERT(psProcessStats != NULL); -+ dllist_remove_node(&psProcessStats->sNode); -+ dllist_add_to_head(&gsDeadList, &psProcessStats->sNode); -+} /* _MoveProcessToDeadList */ -+ -+/* These functions move the process stats from the dead to the live list. -+ * _MoveProcessToLiveList moves the entry in the global lists and -+ * it needs to be protected by g_psLinkedListLock. -+ * _MoveProcessToLiveList performs the OS calls and it -+ * shouldn't be used under g_psLinkedListLock because this could generate a -+ * lockdep warning. */ -+static void -+_MoveProcessToLiveList(PVRSRV_PROCESS_STATS* psProcessStats) -+{ -+ /* Take the element out of the live list and append to the dead list... */ -+ PVR_ASSERT(psProcessStats != NULL); -+ dllist_remove_node(&psProcessStats->sNode); -+ dllist_add_to_head(&gsLiveList, &psProcessStats->sNode); -+} /* _MoveProcessToLiveList */ -+ -+static PVRSRV_ERROR -+_AllocateProcessStats(PVRSRV_PROCESS_STATS **ppsProcessStats, IMG_PID ownerPid) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_PROCESS_STATS *psProcessStats; -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ IMG_UINT32 ui32DevCount = 0; -+ -+ if (psPVRSRVData != NULL) -+ { -+ ui32DevCount = psPVRSRVData->ui32RegisteredDevices; -+ } -+ -+ psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS) + -+ ui32DevCount * PVRSRV_DEVICE_STAT_TYPE_COUNT * sizeof(IMG_INT32)); -+ PVR_RETURN_IF_NOMEM(psProcessStats); -+ -+ psProcessStats->pid = ownerPid; -+ psProcessStats->ui32RefCount = 1; -+ psProcessStats->ui32DevCount = ui32DevCount; -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ psProcessStats->psMemoryRecords = HASH_Create(HASH_INITIAL_SIZE); -+ PVR_GOTO_IF_NOMEM(psProcessStats->psMemoryRecords, eError, free_process_stats); -+#endif -+ -+ eError = OSLockCreateNoStats(&psProcessStats->hLock); -+ PVR_GOTO_IF_ERROR(eError, destroy_mem_recs); -+ -+ *ppsProcessStats = psProcessStats; -+ return PVRSRV_OK; -+ -+destroy_mem_recs: -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ HASH_Delete(psProcessStats->psMemoryRecords); -+free_process_stats: -+#endif -+ OSFreeMemNoStats(psProcessStats); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+} -+ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+static PVRSRV_ERROR _FreeMemStatsEntry(uintptr_t k, uintptr_t v, void* pvPriv) -+{ -+ PVRSRV_MEM_ALLOC_REC *psRecord = (PVRSRV_MEM_ALLOC_REC *)(uintptr_t)v; -+ -+ PVR_UNREFERENCED_PARAMETER(pvPriv); -+ -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) -+ PVR_DPF((PVR_DBG_WARNING, "Mem Stats Record not freed: 0x%" IMG_UINT64_FMTSPECx " %p, size="IMG_SIZE_FMTSPEC", %s:%d", -+ (IMG_UINT64)(k), psRecord, psRecord->uiBytes, -+ (IMG_CHAR*)psRecord->pvAllocdFromFile, psRecord->ui32AllocdFromLine)); -+#else -+ PVR_UNREFERENCED_PARAMETER(k); -+#endif -+ OSFreeMemNoStats(psRecord); -+ -+ return PVRSRV_OK; -+} -+#endif -+ -+/*************************************************************************/ /*! -+@Function _DestroyProcessStat -+@Description Frees memory and resources held by a process statistic. -+@Input psProcessStats Process stats to destroy. -+*/ /**************************************************************************/ -+static void -+_DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats) -+{ -+ PVR_ASSERT(psProcessStats != NULL); -+ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ -+ /* Free the memory statistics... */ -+ HASH_Iterate(psProcessStats->psMemoryRecords, (HASH_pfnCallback)_FreeMemStatsEntry, NULL); -+ HASH_Delete(psProcessStats->psMemoryRecords); -+ -+ OSLockRelease(psProcessStats->hLock); -+#endif -+ -+ /*Destroy the lock */ -+ OSLockDestroyNoStats(psProcessStats->hLock); -+ -+ /* Free the memory... */ -+ OSFreeMemNoStats(psProcessStats); -+} /* _DestroyProcessStat */ -+ -+#if defined(ENABLE_DEBUGFS_PIDS) -+static inline void -+_createStatsFiles(PVRSRV_OS_STAT_ENTRY* psStatsEntries, -+ DI_PFN_SHOW pfnStatsShow) -+{ -+ PVRSRV_ERROR eError; -+ DI_ITERATOR_CB sIterator = {.pfnShow = pfnStatsShow}; -+ -+#if defined(PVRSRV_ENABLE_PERPID_STATS) -+ eError = DICreateEntry("process_stats", psStatsEntries->psStatsDIGroup, -+ &sIterator, -+ GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_PROCESS), -+ DI_ENTRY_TYPE_GENERIC, -+ &psStatsEntries->psProcessStatsDIEntry); -+ PVR_LOG_IF_ERROR(eError, "DICreateEntry (1)"); -+#endif -+ -+#if defined(PVRSRV_ENABLE_CACHEOP_STATS) -+ eError = DICreateEntry("cache_ops_exec", psStatsEntries->psStatsDIGroup, -+ &sIterator, -+ GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_CACHEOP), -+ DI_ENTRY_TYPE_GENERIC, -+ &psStatsEntries->psCacheOpStatsDIEntry); -+ PVR_LOG_IF_ERROR(eError, "DICreateEntry (2)"); -+#endif -+ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ eError = DICreateEntry("mem_area", psStatsEntries->psStatsDIGroup, -+ &sIterator, -+ GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_MEMORY), -+ DI_ENTRY_TYPE_GENERIC, -+ &psStatsEntries->psMemStatsDIEntry); -+ PVR_LOG_IF_ERROR(eError, "DICreateEntry (3)"); -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ eError = DICreateEntry("gpu_mem_area", psStatsEntries->psStatsDIGroup, -+ &sIterator, -+ GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_RIMEMORY), -+ DI_ENTRY_TYPE_GENERIC, -+ &psStatsEntries->psRIMemStatsDIEntry); -+ PVR_LOG_IF_ERROR(eError, "DICreateEntry (4)"); -+#endif -+} -+ -+static inline void -+_createStatisticsEntries(void) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = DICreateGroup("proc_stats", NULL, &psProcStatsDIGroup); -+ PVR_LOG_IF_ERROR(eError, "DICreateGroup (1)"); -+ eError = DICreateGroup("live_pids_stats", psProcStatsDIGroup, -+ &gsLiveStatEntries.psStatsDIGroup); -+ PVR_LOG_IF_ERROR(eError, "DICreateGroup (2)"); -+ eError = DICreateGroup("retired_pids_stats", psProcStatsDIGroup, -+ &gsRetiredStatEntries.psStatsDIGroup); -+ PVR_LOG_IF_ERROR(eError, "DICreateGroup (3)"); -+ -+ _createStatsFiles(&gsLiveStatEntries, GenericStatsPrintElementsLive); -+ _createStatsFiles(&gsRetiredStatEntries, GenericStatsPrintElementsRetired); -+ -+ _prepareStatsPrivateData(); -+} -+ -+static inline void -+_removeStatsFiles(PVRSRV_OS_STAT_ENTRY* psStatsEntries) -+{ -+#if defined(PVRSRV_ENABLE_PERPID_STATS) -+ DIDestroyEntry(psStatsEntries->psProcessStatsDIEntry); -+ psStatsEntries->psProcessStatsDIEntry = NULL; -+#endif -+ -+#if defined(PVRSRV_ENABLE_CACHEOP_STATS) -+ DIDestroyEntry(psStatsEntries->psCacheOpStatsDIEntry); -+ psStatsEntries->psCacheOpStatsDIEntry = NULL; -+#endif -+ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ DIDestroyEntry(psStatsEntries->psMemStatsDIEntry); -+ psStatsEntries->psMemStatsDIEntry = NULL; -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ DIDestroyEntry(psStatsEntries->psRIMemStatsDIEntry); -+ psStatsEntries->psRIMemStatsDIEntry = NULL; -+#endif -+} -+ -+static inline void -+_removeStatisticsEntries(void) -+{ -+ _removeStatsFiles(&gsLiveStatEntries); -+ _removeStatsFiles(&gsRetiredStatEntries); -+ -+ DIDestroyGroup(gsLiveStatEntries.psStatsDIGroup); -+ gsLiveStatEntries.psStatsDIGroup = NULL; -+ DIDestroyGroup(gsRetiredStatEntries.psStatsDIGroup); -+ gsRetiredStatEntries.psStatsDIGroup = NULL; -+ DIDestroyGroup(psProcStatsDIGroup); -+ psProcStatsDIGroup = NULL; -+} -+#endif -+ -+/*************************************************************************/ /*! -+@Function PVRSRVStatsInitialise -+@Description Entry point for initialising the statistics module. -+@Return Standard PVRSRV_ERROR error code. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVStatsInitialise(void) -+{ -+ PVRSRV_ERROR error; -+ -+ PVR_ASSERT(g_psLinkedListLock == NULL); -+ PVR_ASSERT(gpsSizeTrackingHashTable == NULL); -+ PVR_ASSERT(bProcessStatsInitialised == IMG_FALSE); -+ -+ /* We need a lock to protect the linked lists... */ -+#if defined(__linux__) && defined(__KERNEL__) -+ error = OSLockCreateNoStats(&g_psLinkedListLock); -+#else -+ error = OSLockCreate(&g_psLinkedListLock); -+#endif -+ PVR_GOTO_IF_ERROR(error, return_); -+ -+ /* We also need a lock to protect the hash table used for size tracking. */ -+#if defined(__linux__) && defined(__KERNEL__) -+ error = OSLockCreateNoStats(&gpsSizeTrackingHashTableLock); -+#else -+ error = OSLockCreate(&gpsSizeTrackingHashTableLock); -+#endif -+ PVR_GOTO_IF_ERROR(error, destroy_linked_list_lock_); -+ -+ /* We also need a lock to protect the GlobalStat counters */ -+#if defined(__linux__) && defined(__KERNEL__) -+ error = OSLockCreateNoStats(&gsGlobalStats.hGlobalStatsLock); -+#else -+ error = OSLockCreate(&gsGlobalStats.hGlobalStatsLock); -+#endif -+ PVR_GOTO_IF_ERROR(error, destroy_hashtable_lock_); -+ -+ /* Flag that we are ready to start monitoring memory allocations. */ -+ -+ gpsSizeTrackingHashTable = HASH_Create(HASH_INITIAL_SIZE); -+ PVR_GOTO_IF_NOMEM(gpsSizeTrackingHashTable, error, destroy_stats_lock_); -+ -+ dllist_init(&gsLiveList); -+ dllist_init(&gsDeadList); -+ -+ bProcessStatsInitialised = IMG_TRUE; -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ /* Register our 'system' PID to hold driver-wide alloc stats */ -+ _RegisterProcess(&g_hDriverProcessStats, PVR_SYS_ALLOC_PID); -+#endif -+ -+#if defined(ENABLE_DEBUGFS_PIDS) -+ _createStatisticsEntries(); -+#endif -+ -+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) -+ { -+ DI_ITERATOR_CB sIterator = {.pfnShow = RawProcessStatsPrintElements}; -+ error = DICreateEntry("memtrack_stats", NULL, &sIterator, NULL, -+ DI_ENTRY_TYPE_GENERIC, &psProcStatsDIEntry); -+ PVR_LOG_IF_ERROR(error, "DICreateEntry (1)"); -+ } -+#endif -+ -+ { -+ DI_ITERATOR_CB sIterator = {.pfnShow = GlobalStatsPrintElements}; -+ error = DICreateEntry("driver_stats", NULL, &sIterator, NULL, -+ DI_ENTRY_TYPE_GENERIC, &psGlobalMemDIEntry); -+ PVR_LOG_IF_ERROR(error, "DICreateEntry (3)"); -+ } -+ -+ return PVRSRV_OK; -+ -+destroy_stats_lock_: -+#if defined(__linux__) && defined(__KERNEL__) -+ OSLockDestroyNoStats(gsGlobalStats.hGlobalStatsLock); -+#else -+ OSLockDestroy(gsGlobalStats.hGlobalStatsLock); -+#endif -+ gsGlobalStats.hGlobalStatsLock = NULL; -+destroy_hashtable_lock_: -+#if defined(__linux__) && defined(__KERNEL__) -+ OSLockDestroyNoStats(gpsSizeTrackingHashTableLock); -+#else -+ OSLockDestroy(gpsSizeTrackingHashTableLock); -+#endif -+ gpsSizeTrackingHashTableLock = NULL; -+destroy_linked_list_lock_: -+#if defined(__linux__) && defined(__KERNEL__) -+ OSLockDestroyNoStats(g_psLinkedListLock); -+#else -+ OSLockDestroy(g_psLinkedListLock); -+#endif -+ g_psLinkedListLock = NULL; -+return_: -+ return error; -+ -+} -+ -+static PVRSRV_ERROR _DumpAllVMallocEntries (uintptr_t k, uintptr_t v, void* pvPriv) -+{ -+#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN) -+ _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)(uintptr_t)v; -+ IMG_UINT64 uiCpuVAddr = (IMG_UINT64)k; -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: " IMG_SIZE_FMTSPEC " bytes @ 0x%" IMG_UINT64_FMTSPECx " (PID %u)", __func__, -+ psNewTrackingHashEntry->uiSizeInBytes, -+ uiCpuVAddr, -+ psNewTrackingHashEntry->uiPid)); -+ -+ PVR_UNREFERENCED_PARAMETER(pvPriv); -+#endif -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function PVRSRVStatsDestroy -+@Description Method for destroying the statistics module data. -+*/ /**************************************************************************/ -+void -+PVRSRVStatsDestroy(void) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ -+ PVR_ASSERT(bProcessStatsInitialised); -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ /* Deregister our 'system' PID which holds driver-wide alloc stats */ -+ PVRSRVStatsDeregisterProcess(g_hDriverProcessStats); -+#endif -+ -+ /* Stop monitoring memory allocations... */ -+ bProcessStatsInitialised = IMG_FALSE; -+ -+ /* Destroy the locks... */ -+ if (g_psLinkedListLock != NULL) -+ { -+#if defined(__linux__) && defined(__KERNEL__) -+ OSLockDestroyNoStats(g_psLinkedListLock); -+#else -+ OSLockDestroy(g_psLinkedListLock); -+#endif -+ g_psLinkedListLock = NULL; -+ } -+ -+ /* Free the live and dead lists... */ -+ dllist_foreach_node(&gsLiveList, psNode, psNext) -+ { -+ PVRSRV_PROCESS_STATS* psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); -+ dllist_remove_node(&psProcessStats->sNode); -+ _DestroyProcessStat(psProcessStats); -+ } -+ -+ dllist_foreach_node(&gsDeadList, psNode, psNext) -+ { -+ PVRSRV_PROCESS_STATS* psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); -+ dllist_remove_node(&psProcessStats->sNode); -+ _DestroyProcessStat(psProcessStats); -+ } -+ -+ if (gpsSizeTrackingHashTable != NULL) -+ { -+ /* Dump all remaining entries in HASH table (list any remaining vmallocs) */ -+ HASH_Iterate(gpsSizeTrackingHashTable, (HASH_pfnCallback)_DumpAllVMallocEntries, NULL); -+ HASH_Delete(gpsSizeTrackingHashTable); -+ } -+ if (gpsSizeTrackingHashTableLock != NULL) -+ { -+#if defined(__linux__) && defined(__KERNEL__) -+ OSLockDestroyNoStats(gpsSizeTrackingHashTableLock); -+#else -+ OSLockDestroy(gpsSizeTrackingHashTableLock); -+#endif -+ gpsSizeTrackingHashTableLock = NULL; -+ } -+ -+ if (NULL != gsGlobalStats.hGlobalStatsLock) -+ { -+#if defined(__linux__) && defined(__KERNEL__) -+ OSLockDestroyNoStats(gsGlobalStats.hGlobalStatsLock); -+#else -+ OSLockDestroy(gsGlobalStats.hGlobalStatsLock); -+#endif -+ gsGlobalStats.hGlobalStatsLock = NULL; -+ } -+ -+} -+ -+void -+PVRSRVStatsDestroyDI(void) -+{ -+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) -+ if (psProcStatsDIEntry != NULL) -+ { -+ DIDestroyEntry(psProcStatsDIEntry); -+ psProcStatsDIEntry = NULL; -+ } -+#endif -+ -+ /* Destroy the global data entry */ -+ if (psGlobalMemDIEntry != NULL) -+ { -+ DIDestroyEntry(psGlobalMemDIEntry); -+ psGlobalMemDIEntry = NULL; -+ } -+ -+#if defined(ENABLE_DEBUGFS_PIDS) -+ _removeStatisticsEntries(); -+#endif -+} -+ -+static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ size_t uiBytes) -+{ -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ IMG_UINT64 ui64InitialSize; -+#endif -+ -+ OSLockAcquire(gsGlobalStats.hGlobalStatsLock); -+ -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ ui64InitialSize = GET_GPUMEM_GLOBAL_STAT_VALUE(); -+#endif -+ -+ switch (eAllocType) -+ { -+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes); -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_LMA, uiBytes); -+ break; -+#endif -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes); -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_UMA, uiBytes); -+ break; -+#endif -+ -+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE: -+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_ZOMBIE, uiBytes); -+ break; -+#endif -+ -+ default: -+ PVR_ASSERT(0); -+ break; -+ } -+ -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ { -+ IMG_UINT64 ui64Size = GET_GPUMEM_GLOBAL_STAT_VALUE(); -+ if (ui64Size != ui64InitialSize) -+ { -+ TracepointUpdateGPUMemGlobal(0, ui64Size); -+ } -+ } -+#endif -+ -+ OSLockRelease(gsGlobalStats.hGlobalStatsLock); -+} -+ -+static void _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ size_t uiBytes) -+{ -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ IMG_UINT64 ui64InitialSize; -+#endif -+ -+ OSLockAcquire(gsGlobalStats.hGlobalStatsLock); -+ -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ ui64InitialSize = GET_GPUMEM_GLOBAL_STAT_VALUE(); -+#endif -+ -+ switch (eAllocType) -+ { -+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes); -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_LMA, uiBytes); -+ break; -+#endif -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes); -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ZOMBIE_GPUMEM_UMA, uiBytes); -+ break; -+#endif -+ -+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes); -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE: -+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_ZOMBIE, uiBytes); -+ break; -+#endif -+ -+ default: -+ PVR_ASSERT(0); -+ break; -+ } -+ -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ { -+ IMG_UINT64 ui64Size = GET_GPUMEM_GLOBAL_STAT_VALUE(); -+ if (ui64Size != ui64InitialSize) -+ { -+ TracepointUpdateGPUMemGlobal(0, ui64Size); -+ } -+ } -+#endif -+ -+ OSLockRelease(gsGlobalStats.hGlobalStatsLock); -+} -+ -+static PVRSRV_ERROR -+_RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid) -+{ -+ PVRSRV_PROCESS_STATS* psProcessStats=NULL; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(phProcessStats != NULL); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Register process PID %d [%s]", -+ __func__, ownerPid, (ownerPid == PVR_SYS_ALLOC_PID) -+ ? "system" : OSGetCurrentClientProcessNameKM())); -+ -+ /* Check the PID has not already moved to the dead list... */ -+ OSLockAcquire(g_psLinkedListLock); -+ psProcessStats = _FindProcessStatsInDeadList(ownerPid); -+ if (psProcessStats != NULL) -+ { -+ /* Move it back onto the live list! */ -+ _MoveProcessToLiveList(psProcessStats); -+ } -+ else -+ { -+ /* Check the PID is not already registered in the live list... */ -+ psProcessStats = _FindProcessStatsInLiveList(ownerPid); -+ } -+ -+ /* If the PID is on the live list then just increment the ref count and return... */ -+ if (psProcessStats != NULL) -+ { -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ -+ psProcessStats->ui32RefCount++; -+ -+ OSLockRelease(psProcessStats->hLock); -+ OSLockRelease(g_psLinkedListLock); -+ -+ *phProcessStats = psProcessStats; -+ -+ return PVRSRV_OK; -+ } -+ OSLockRelease(g_psLinkedListLock); -+ -+ /* Allocate a new node structure and initialise it... */ -+ eError = _AllocateProcessStats(&psProcessStats, ownerPid); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ /* Add it to the live list... */ -+ OSLockAcquire(g_psLinkedListLock); -+ dllist_add_to_head(&gsLiveList, &psProcessStats->sNode); -+ OSLockRelease(g_psLinkedListLock); -+ -+ /* Done */ -+ *phProcessStats = (IMG_HANDLE) psProcessStats; -+ -+ return PVRSRV_OK; -+ -+e0: -+ *phProcessStats = (IMG_HANDLE) NULL; -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+} /* _RegisterProcess */ -+ -+/*************************************************************************/ /*! -+@Function PVRSRVStatsRegisterProcess -+@Description Register a process into the list statistics list. -+@Output phProcessStats Handle to the process to be used to deregister. -+@Return Standard PVRSRV_ERROR error code. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats) -+{ -+ return _RegisterProcess(phProcessStats, OSGetCurrentClientProcessIDKM()); -+} -+ -+/*************************************************************************/ /*! -+@Function PVRSRVStatsDeregisterProcess -+@Input hProcessStats Handle to the process returned when registered. -+@Description Method for destroying the statistics module data. -+*/ /**************************************************************************/ -+void -+PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats) -+{ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Deregister process entered PID %d [%s]", -+ __func__, OSGetCurrentClientProcessIDKM(), -+ OSGetCurrentProcessName())); -+ -+ if (hProcessStats != (IMG_HANDLE) NULL) -+ { -+ PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) hProcessStats; -+ -+ /* Lower the reference count, if zero then move it to the dead list */ -+ OSLockAcquire(g_psLinkedListLock); -+ if (psProcessStats->ui32RefCount > 0) -+ { -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ psProcessStats->ui32RefCount--; -+ -+#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ if (psProcessStats->ui32RefCount == 0) -+ { -+ OSLockRelease(psProcessStats->hLock); -+ _MoveProcessToDeadList(psProcessStats); -+ }else -+#endif -+ { -+ OSLockRelease(psProcessStats->hLock); -+ } -+ } -+ OSLockRelease(g_psLinkedListLock); -+ -+ /* Check if the dead list needs to be reduced */ -+ _CompressMemoryUsage(); -+ } -+} /* PVRSRVStatsDeregisterProcess */ -+ -+PVRSRV_ERROR PVRSRVStatsDeviceConnect(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ IMG_UINT32 ui32DevID = psDeviceNode->sDevId.ui32InternalID; -+ IMG_PID ownerPid = OSGetCurrentClientProcessIDKM(); -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ psProcessStats = _FindProcessStatsInLiveList(ownerPid); -+ -+ if (psProcessStats != NULL) -+ { -+ if ((ui32DevID < psProcessStats->ui32DevCount) || -+ (ui32DevID == 0 && psProcessStats->ui32DevCount == 0)) -+ { -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_CONNECTIONS]++; -+ UPDATE_MAX_VALUE(psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_MAX_CONNECTIONS], -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_CONNECTIONS]); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Device index %d is greater than device count %d for PID %d.", -+ __func__, ui32DevID, psProcessStats->ui32DevCount, ownerPid)); -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Process %d not found.", -+ __func__, ownerPid)); -+ } -+ -+ OSLockRelease(g_psLinkedListLock); -+ -+ return PVRSRV_OK; -+} -+ -+void PVRSRVStatsDeviceDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ IMG_UINT32 ui32DevID = psDeviceNode->sDevId.ui32InternalID; -+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); -+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); -+ IMG_PID currentPid = OSGetCurrentClientProcessIDKM(); -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ if (psPVRSRVData) -+ { -+ if ((currentPid == psPVRSRVData->cleanupThreadPid) && -+ (currentCleanupPid != 0)) -+ { -+ psProcessStats = _FindProcessStats(currentCleanupPid); -+ } -+ else -+ { -+ psProcessStats = _FindProcessStatsInLiveList(currentPid); -+ } -+ } -+ else -+ { -+ psProcessStats = _FindProcessStatsInLiveList(currentPid); -+ } -+ -+ if (psProcessStats != NULL) -+ { -+ if ((ui32DevID < psProcessStats->ui32DevCount) || -+ (ui32DevID == 0 && psProcessStats->ui32DevCount == 0)) -+ { -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_CONNECTIONS]--; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Device index %d is greater than device count %d for PID %d.", -+ __func__, ui32DevID, psProcessStats->ui32DevCount, currentPid)); -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Process %d not found.", -+ __func__, currentPid)); -+ } -+ -+ OSLockRelease(g_psLinkedListLock); -+} -+ -+void -+PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ void *pvCpuVAddr, -+ IMG_CPU_PHYADDR sCpuPAddr, -+ size_t uiBytes, -+ IMG_PID currentPid -+ DEBUG_MEMSTATS_PARAMS) -+{ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); -+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_MEM_ALLOC_REC* psRecord = NULL; -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ __maybe_unused PVRSRV_PROC_SEARCH_STATE eProcSearch = PVRSRV_PROC_FOUND; -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ IMG_UINT64 ui64InitialSize; -+#endif -+ -+ /* Don't do anything if we are not initialised or we are shutting down! */ -+ if (!bProcessStatsInitialised) -+ { -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Called when process statistics module is not initialised", -+ __func__)); -+#endif -+ return; -+ } -+ -+ /* -+ * To prevent a recursive loop, we make the memory allocations for our -+ * memstat records via OSAllocMemNoStats(), which does not try to -+ * create a memstat record entry. -+ */ -+ -+ /* Allocate the memory record... */ -+ psRecord = OSAllocZMemNoStats(sizeof(PVRSRV_MEM_ALLOC_REC)); -+ if (psRecord == NULL) -+ { -+ return; -+ } -+ -+ psRecord->eAllocType = eAllocType; -+ psRecord->pvCpuVAddr = pvCpuVAddr; -+ psRecord->sCpuPAddr.uiAddr = sCpuPAddr.uiAddr; -+ psRecord->uiBytes = uiBytes; -+ -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) -+ psRecord->pvAllocdFromFile = pvAllocFromFile; -+ psRecord->ui32AllocdFromLine = ui32AllocFromLine; -+#endif -+ -+ _increase_global_stat(eAllocType, uiBytes); -+ /* Lock while we find the correct process... */ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ if (psPVRSRVData) -+ { -+ if ((currentPid == psPVRSRVData->cleanupThreadPid) && -+ (currentCleanupPid != 0)) -+ { -+ psProcessStats = _FindProcessStats(currentCleanupPid); -+ } -+ else -+ { -+ psProcessStats = _FindProcessStatsInLiveList(currentPid); -+ if (!psProcessStats) -+ { -+ psProcessStats = _FindProcessStatsInDeadList(currentPid); -+ eProcSearch = PVRSRV_PROC_RESURRECTED; -+ } -+ } -+ } -+ else -+ { -+ psProcessStats = _FindProcessStatsInLiveList(currentPid); -+ if (!psProcessStats) -+ { -+ psProcessStats = _FindProcessStatsInDeadList(currentPid); -+ eProcSearch = PVRSRV_PROC_RESURRECTED; -+ } -+ } -+ -+ if (psProcessStats == NULL) -+ { -+ eProcSearch = PVRSRV_PROC_NOTFOUND; -+ -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Process stat increment called for 'unknown' process PID(%d)", -+ __func__, currentPid)); -+ -+ if (_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK) -+ { -+ OSLockRelease(g_psLinkedListLock); -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s UNABLE TO CREATE process_stats entry for pid %d [%s] (" IMG_SIZE_FMTSPEC " bytes)", -+ __func__, currentPid, OSGetCurrentProcessName(), uiBytes)); -+ goto free_record; -+ } -+ -+ /* Add it to the live list... */ -+ dllist_add_to_head(&gsLiveList, &psProcessStats->sNode); -+ -+ OSLockRelease(g_psLinkedListLock); -+ -+#else /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ -+ OSLockRelease(g_psLinkedListLock); -+ goto free_record; -+#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ -+ } -+ else -+ { -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ if (eProcSearch == PVRSRV_PROC_RESURRECTED) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Process stat incremented on 'dead' process PID(%d)", -+ __func__, currentPid)); -+ /* Move process from dead list to live list */ -+ _MoveProcessToLiveList(psProcessStats); -+ } -+#endif -+ OSLockRelease(g_psLinkedListLock); -+ } -+ -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ { -+ IMG_UINT64 ui64Key; -+ -+ if (eAllocType == PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA -+ || eAllocType == PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES -+ || eAllocType == PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ || eAllocType == PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES -+ || eAllocType == PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES -+#endif -+ ) -+ { -+ ui64Key = psRecord->sCpuPAddr.uiAddr; -+ } -+ else -+ { -+ ui64Key = (IMG_UINT64)psRecord->pvCpuVAddr; -+ } -+ -+ /* Insert the memory record... */ -+ if (!HASH_Insert(psProcessStats->psMemoryRecords, ui64Key, (uintptr_t)psRecord)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s UNABLE TO CREATE mem stats record for pid %d [%s] (" IMG_SIZE_FMTSPEC " bytes)", -+ __func__, currentPid, OSGetCurrentProcessName(), uiBytes)); -+ } -+ } -+#endif -+ -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); -+#endif -+ -+ /* Update the memory watermarks... */ -+ switch (eAllocType) -+ { -+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+#endif -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+#endif -+ -+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ default: -+ { -+ PVR_ASSERT(0); -+ } -+ break; -+ } -+ -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ if (psProcessStats->pid != PVR_SYS_ALLOC_PID) -+ { -+ IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); -+ if (ui64Size != ui64InitialSize) -+ { -+ TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size); -+ } -+ } -+#endif -+ -+ OSLockRelease(psProcessStats->hLock); -+ -+ return; -+ -+free_record: -+ _decrease_global_stat(eAllocType, uiBytes); -+ if (psRecord != NULL) -+ { -+ OSFreeMemNoStats(psRecord); -+ } -+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ -+} /* PVRSRVStatsAddMemAllocRecord */ -+ -+void -+PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ IMG_UINT64 ui64Key, -+ IMG_PID currentPid) -+{ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); -+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_PROCESS_STATS* psProcessStats = NULL; -+ PVRSRV_MEM_ALLOC_REC* psRecord = NULL; -+ IMG_BOOL bFound = IMG_FALSE; -+ -+ /* Don't do anything if we are not initialised or we are shutting down! */ -+ if (!bProcessStatsInitialised) -+ { -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Called when process statistics module is not initialised", -+ __func__)); -+#endif -+ return; -+ } -+ -+ /* Lock while we find the correct process and remove this record... */ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ if (psPVRSRVData) -+ { -+ if ((currentPid == psPVRSRVData->cleanupThreadPid) && -+ (currentCleanupPid != 0)) -+ { -+ psProcessStats = _FindProcessStats(currentCleanupPid); -+ } -+ else -+ { -+ psProcessStats = _FindProcessStats(currentPid); -+ } -+ } -+ else -+ { -+ psProcessStats = _FindProcessStats(currentPid); -+ } -+ if (psProcessStats != NULL) -+ { -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ psRecord = (PVRSRV_MEM_ALLOC_REC*)HASH_Remove(psProcessStats->psMemoryRecords, ui64Key); -+ OSLockRelease(psProcessStats->hLock); -+ bFound = psRecord != NULL; -+ } -+ -+ /* If not found, we need to do a full search in case it was allocated to a different PID... */ -+ if (!bFound) -+ { -+ PVRSRV_PROCESS_STATS* psProcessStatsAlreadyChecked = psProcessStats; -+ DLLIST_NODE *psNode, *psNext; -+ -+ /* Search all live lists first... */ -+ dllist_foreach_node(&gsLiveList, psNode, psNext) -+ { -+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); -+ if (psProcessStats != psProcessStatsAlreadyChecked) -+ { -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ psRecord = (PVRSRV_MEM_ALLOC_REC*)HASH_Remove(psProcessStats->psMemoryRecords, ui64Key); -+ OSLockRelease(psProcessStats->hLock); -+ bFound = psRecord != NULL; -+ } -+ -+ if (bFound) -+ { -+ break; -+ } -+ } -+ -+ /* If not found, then search all dead lists next... */ -+ if (!bFound) -+ { -+ dllist_foreach_node(&gsDeadList, psNode, psNext) -+ { -+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); -+ if (psProcessStats != psProcessStatsAlreadyChecked) -+ { -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ psRecord = (PVRSRV_MEM_ALLOC_REC*)HASH_Remove(psProcessStats->psMemoryRecords, ui64Key); -+ OSLockRelease(psProcessStats->hLock); -+ bFound = psRecord != NULL; -+ } -+ -+ if (bFound) -+ { -+ break; -+ } -+ } -+ } -+ } -+ -+ /* Update the watermark and remove this record...*/ -+ if (bFound) -+ { -+ _decrease_global_stat(eAllocType, psRecord->uiBytes); -+ -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ -+ _DecreaseProcStatValue(eAllocType, -+ psProcessStats, -+ psRecord->uiBytes); -+ -+ OSLockRelease(psProcessStats->hLock); -+ OSLockRelease(g_psLinkedListLock); -+ -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ /* If all stats are now zero, remove the entry for this thread */ -+ if (psProcessStats->ui32StatAllocFlags == 0) -+ { -+ OSLockAcquire(g_psLinkedListLock); -+ _MoveProcessToDeadList(psProcessStats); -+ OSLockRelease(g_psLinkedListLock); -+ -+ /* Check if the dead list needs to be reduced */ -+ _CompressMemoryUsage(); -+ } -+#endif -+ /* -+ * Free the record outside the lock so we don't deadlock and so we -+ * reduce the time the lock is held. -+ */ -+ OSFreeMemNoStats(psRecord); -+ } -+ else -+ { -+ OSLockRelease(g_psLinkedListLock); -+ } -+ -+#else -+PVR_UNREFERENCED_PARAMETER(eAllocType); -+PVR_UNREFERENCED_PARAMETER(ui64Key); -+#endif -+} /* PVRSRVStatsRemoveMemAllocRecord */ -+ -+void -+PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ size_t uiBytes, -+ IMG_UINT64 uiCpuVAddr, -+ IMG_PID uiPid) -+{ -+ IMG_BOOL bRes = IMG_FALSE; -+ _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = NULL; -+ -+ if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL)) -+ { -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Called when process statistics module is not initialised", -+ __func__)); -+#endif -+ return; -+ } -+ -+ /* Alloc untracked memory for the new hash table entry */ -+ psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)OSAllocMemNoStats(sizeof(*psNewTrackingHashEntry)); -+ if (psNewTrackingHashEntry == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "*** %s : @ line %d Failed to alloc memory for psNewTrackingHashEntry!", -+ __func__, __LINE__)); -+ return; -+ } -+ -+ /* Fill-in the size of the allocation and PID of the allocating process */ -+ psNewTrackingHashEntry->uiSizeInBytes = uiBytes; -+ psNewTrackingHashEntry->uiPid = uiPid; -+ OSLockAcquire(gpsSizeTrackingHashTableLock); -+ /* Insert address of the new struct into the hash table */ -+ bRes = HASH_Insert(gpsSizeTrackingHashTable, uiCpuVAddr, (uintptr_t)psNewTrackingHashEntry); -+ OSLockRelease(gpsSizeTrackingHashTableLock); -+ if (bRes) -+ { -+ PVRSRVStatsIncrMemAllocStat(eAllocType, uiBytes, uiPid); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d HASH_Insert() failed!", -+ __func__, __LINE__)); -+ /* Free the memory allocated for psNewTrackingHashEntry, as we -+ * failed to insert it into the Hash table. -+ */ -+ OSFreeMemNoStats(psNewTrackingHashEntry); -+ } -+} -+ -+void -+PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ size_t uiBytes, -+ IMG_PID currentPid) -+ -+{ -+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); -+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_PROCESS_STATS* psProcessStats = NULL; -+ __maybe_unused PVRSRV_PROC_SEARCH_STATE eProcSearch = PVRSRV_PROC_FOUND; -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ IMG_UINT64 ui64InitialSize; -+#endif -+ -+ /* Don't do anything if we are not initialised or we are shutting down! */ -+ if (!bProcessStatsInitialised) -+ { -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Called when process statistics module is not initialised", -+ __func__)); -+#endif -+ return; -+ } -+ -+ _increase_global_stat(eAllocType, uiBytes); -+ OSLockAcquire(g_psLinkedListLock); -+ if (psPVRSRVData) -+ { -+ if ((currentPid == psPVRSRVData->cleanupThreadPid) && -+ (currentCleanupPid != 0)) -+ { -+ psProcessStats = _FindProcessStats(currentCleanupPid); -+ } -+ else -+ { -+ psProcessStats = _FindProcessStatsInLiveList(currentPid); -+ if (!psProcessStats) -+ { -+ psProcessStats = _FindProcessStatsInDeadList(currentPid); -+ eProcSearch = PVRSRV_PROC_RESURRECTED; -+ } -+ } -+ } -+ else -+ { -+ psProcessStats = _FindProcessStatsInLiveList(currentPid); -+ if (!psProcessStats) -+ { -+ psProcessStats = _FindProcessStatsInDeadList(currentPid); -+ eProcSearch = PVRSRV_PROC_RESURRECTED; -+ } -+ } -+ -+ if (psProcessStats == NULL) -+ { -+ eProcSearch = PVRSRV_PROC_NOTFOUND; -+ -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Process stat increment called for 'unknown' process PID(%d)", -+ __func__, currentPid)); -+ -+ if (bProcessStatsInitialised) -+ { -+ if (_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK) -+ { -+ OSLockRelease(g_psLinkedListLock); -+ return; -+ } -+ /* Add it to the live list... */ -+ dllist_add_to_head(&gsLiveList, &psProcessStats->sNode); -+ } -+#else -+ OSLockRelease(g_psLinkedListLock); -+#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ -+ -+ } -+ -+ if (psProcessStats != NULL) -+ { -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ if (eProcSearch == PVRSRV_PROC_RESURRECTED) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Process stat incremented on 'dead' process PID(%d)", -+ __func__, currentPid)); -+ -+ /* Move process from dead list to live list */ -+ _MoveProcessToLiveList(psProcessStats); -+ } -+#endif -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ /* Release the list lock as soon as we acquire the process lock, -+ * this ensures if the process is in deadlist the entry cannot be -+ * deleted or modified -+ */ -+ OSLockRelease(g_psLinkedListLock); -+ -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); -+#endif -+ -+ /* Update the memory watermarks... */ -+ switch (eAllocType) -+ { -+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+#endif -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES, uiBytes); -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+#endif -+ -+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE: -+ { -+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE, uiBytes); -+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ break; -+#endif -+ -+ default: -+ { -+ PVR_ASSERT(0); -+ } -+ break; -+ } -+ -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ if (psProcessStats->pid != PVR_SYS_ALLOC_PID) -+ { -+ IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); -+ if (ui64Size != ui64InitialSize) -+ { -+ TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, -+ ui64Size); -+ } -+ } -+#endif -+ -+ OSLockRelease(psProcessStats->hLock); -+ } -+ -+} -+ -+static void -+_DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ PVRSRV_PROCESS_STATS* psProcessStats, -+ IMG_UINT64 uiBytes) -+{ -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ IMG_UINT64 ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); -+#endif -+ -+ switch (eAllocType) -+ { -+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes); -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes); -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes); -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes); -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes); -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES, uiBytes); -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+#endif -+ -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes); -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES, uiBytes); -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ZOMBIE_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+#endif -+ -+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+ -+ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE: -+ { -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE, uiBytes); -+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE] == 0) -+ { -+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_ZOMBIE-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); -+ } -+ } -+ break; -+#endif -+ -+ default: -+ { -+ PVR_ASSERT(0); -+ } -+ break; -+ } -+ -+#if defined(ENABLE_GPU_MEM_TRACEPOINT) -+ if (psProcessStats->pid != PVR_SYS_ALLOC_PID) -+ { -+ IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); -+ if (ui64Size != ui64InitialSize) -+ { -+ TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size); -+ } -+ } -+#endif -+} -+ -+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) -+int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVRSRV_PROCESS_STATS *psProcessStats; -+ DLLIST_NODE *psNode, *psNext; -+ -+ DIPrintf(psEntry, -+ "%s,%s,%s,%s,%s,%s,%s\n", -+ "PID", -+ "MemoryUsageKMalloc", // PVRSRV_PROCESS_STAT_TYPE_KMALLOC -+ "MemoryUsageAllocPTMemoryUMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA -+ "MemoryUsageAllocPTMemoryLMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA -+ "MemoryUsageAllocGPUMemLMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES -+ "MemoryUsageAllocGPUMemUMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES -+ "MemoryUsageDmaBufImport"); // PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT -+ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ dllist_foreach_node(&gsLiveList, psNode, psNext) -+ { -+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); -+ if (psProcessStats->pid != PVR_SYS_ALLOC_PID) -+ { -+ DIPrintf(psEntry, -+ "%d,%"IMG_INT64_FMTSPECd",%"IMG_INT64_FMTSPECd"," -+ "%"IMG_INT64_FMTSPECd",%"IMG_INT64_FMTSPECd"," -+ "%"IMG_INT64_FMTSPECd",%"IMG_INT64_FMTSPECd"\n", -+ psProcessStats->pid, -+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC], -+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA], -+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA], -+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES], -+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES], -+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT]); -+ } -+ } -+ -+ OSLockRelease(g_psLinkedListLock); -+ -+ return 0; -+} /* RawProcessStatsPrintElements */ -+#endif -+ -+void -+PVRSRVStatsDecrMemKAllocStat(size_t uiBytes, -+ IMG_PID decrPID) -+{ -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ -+ /* Don't do anything if we are not initialised or we are shutting down! */ -+ if (!bProcessStatsInitialised) -+ { -+ return; -+ } -+ -+ _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, uiBytes); -+ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ psProcessStats = _FindProcessStats(decrPID); -+ -+ if (psProcessStats != NULL) -+ { -+ /* Decrement the kmalloc memory stat... */ -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes); -+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); -+ } -+ -+ OSLockRelease(g_psLinkedListLock); -+} -+ -+static void -+_StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry, -+ PVRSRV_MEM_ALLOC_TYPE eAllocType) -+{ -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ -+ /* Don't do anything if we are not initialised or we are shutting down! */ -+ if (!bProcessStatsInitialised) -+ { -+ return; -+ } -+ -+ _decrease_global_stat(eAllocType, psTrackingHashEntry->uiSizeInBytes); -+ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ psProcessStats = _FindProcessStats(psTrackingHashEntry->uiPid); -+ -+ if (psProcessStats != NULL) -+ { -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ /* Decrement the memory stat... */ -+ _DecreaseProcStatValue(eAllocType, -+ psProcessStats, -+ psTrackingHashEntry->uiSizeInBytes); -+ OSLockRelease(psProcessStats->hLock); -+ } -+ -+ OSLockRelease(g_psLinkedListLock); -+} -+ -+void -+PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ IMG_UINT64 uiCpuVAddr) -+{ -+ _PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry = NULL; -+ -+ if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL)) -+ { -+ return; -+ } -+ -+ OSLockAcquire(gpsSizeTrackingHashTableLock); -+ psTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)HASH_Remove(gpsSizeTrackingHashTable, uiCpuVAddr); -+ OSLockRelease(gpsSizeTrackingHashTableLock); -+ if (psTrackingHashEntry) -+ { -+ _StatsDecrMemTrackedStat(psTrackingHashEntry, eAllocType); -+ OSFreeMemNoStats(psTrackingHashEntry); -+ } -+} -+ -+void -+PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ size_t uiBytes, -+ IMG_PID currentPid) -+{ -+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); -+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_PROCESS_STATS* psProcessStats = NULL; -+ -+ /* Don't do anything if we are not initialised or we are shutting down! */ -+ if (!bProcessStatsInitialised) -+ { -+ return; -+ } -+ -+ _decrease_global_stat(eAllocType, uiBytes); -+ -+ OSLockAcquire(g_psLinkedListLock); -+ if (psPVRSRVData) -+ { -+ if ((currentPid == psPVRSRVData->cleanupThreadPid) && -+ (currentCleanupPid != 0)) -+ { -+ psProcessStats = _FindProcessStats(currentCleanupPid); -+ } -+ else -+ { -+ psProcessStats = _FindProcessStats(currentPid); -+ } -+ } -+ else -+ { -+ psProcessStats = _FindProcessStats(currentPid); -+ } -+ -+ -+ if (psProcessStats != NULL) -+ { -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ /* Release the list lock as soon as we acquire the process lock, -+ * this ensures if the process is in deadlist the entry cannot be -+ * deleted or modified -+ */ -+ OSLockRelease(g_psLinkedListLock); -+ /* Update the memory watermarks... */ -+ _DecreaseProcStatValue(eAllocType, -+ psProcessStats, -+ uiBytes); -+ OSLockRelease(psProcessStats->hLock); -+ -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) -+ /* If all stats are now zero, remove the entry for this thread */ -+ if (psProcessStats->ui32StatAllocFlags == 0) -+ { -+ OSLockAcquire(g_psLinkedListLock); -+ _MoveProcessToDeadList(psProcessStats); -+ OSLockRelease(g_psLinkedListLock); -+ -+ /* Check if the dead list needs to be reduced */ -+ _CompressMemoryUsage(); -+ } -+#endif -+ }else{ -+ OSLockRelease(g_psLinkedListLock); -+ } -+} -+ -+/* For now we do not want to expose the global stats API -+ * so we wrap it into this specific function for pooled pages. -+ * As soon as we need to modify the global stats directly somewhere else -+ * we want to replace these functions with more general ones. -+ */ -+void -+PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes) -+{ -+ _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes); -+} -+ -+void -+PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes) -+{ -+ _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes); -+} -+ -+PVRSRV_ERROR -+PVRSRVStatsUpdateOOMStat(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32OOMStatType, -+ IMG_PID pidOwner) -+{ -+ PVRSRV_DEVICE_STAT_TYPE eOOMStatType = (PVRSRV_DEVICE_STAT_TYPE) ui32OOMStatType; -+ IMG_PID pidCurrent = pidOwner; -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ /* Don't do anything if we are not initialised or we are shutting down! */ -+ if (!bProcessStatsInitialised) -+ { -+ return PVRSRV_ERROR_NOT_INITIALISED; -+ } -+ -+ if (ui32OOMStatType >= PVRSRV_DEVICE_STAT_TYPE_COUNT) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Lock while we find the correct process and update the record... */ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ psProcessStats = _FindProcessStats(pidCurrent); -+ if (psProcessStats != NULL) -+ { -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ psProcessStats->ai32DevStats[psDeviceNode->sDevId.ui32InternalID][eOOMStatType]++; -+ OSLockRelease(psProcessStats->hLock); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateOOMStat: Process not found for Pid=%d", pidCurrent)); -+ } -+ -+ OSLockRelease(g_psLinkedListLock); -+ -+ return PVRSRV_OK; -+} /* PVRSRVStatsUpdateOOMStat */ -+ -+void -+PVRSRVStatsUpdateRenderContextStats(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32TotalNumPartialRenders, -+ IMG_UINT32 ui32TotalNumOutOfMemory, -+ IMG_UINT32 ui32NumTAStores, -+ IMG_UINT32 ui32Num3DStores, -+ IMG_UINT32 ui32NumCDMStores, -+ IMG_UINT32 ui32NumTDMStores, -+ IMG_UINT32 ui32NumRayStores, -+ IMG_PID pidOwner) -+{ -+ IMG_PID pidCurrent = pidOwner; -+ -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ -+ /* Don't do anything if we are not initialised or we are shutting down! */ -+ if (!bProcessStatsInitialised) -+ { -+ return; -+ } -+ -+ /* Lock while we find the correct process and update the record... */ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ psProcessStats = _FindProcessStats(pidCurrent); -+ if (psProcessStats != NULL) -+ { -+ IMG_UINT32 ui32DevID = psDeviceNode->sDevId.ui32InternalID; -+ -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_PRS] += ui32TotalNumPartialRenders; -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_OOMS] += ui32TotalNumOutOfMemory; -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_TA_STORES] += ui32NumTAStores; -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores; -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores; -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_TDM_STORES]+= ui32NumTDMStores; -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_RAY_STORES]+= ui32NumRayStores; -+ OSLockRelease(psProcessStats->hLock); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateRenderContextStats: Process not found for Pid=%d", pidCurrent)); -+ } -+ -+ OSLockRelease(g_psLinkedListLock); -+} /* PVRSRVStatsUpdateRenderContextStats */ -+ -+void -+PVRSRVStatsUpdateZSBufferStats(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32NumReqByApp, -+ IMG_UINT32 ui32NumReqByFW, -+ IMG_PID owner) -+{ -+ IMG_PID currentPid = (owner==0)?OSGetCurrentClientProcessIDKM():owner; -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ -+ -+ /* Don't do anything if we are not initialised or we are shutting down! */ -+ if (!bProcessStatsInitialised) -+ { -+ return; -+ } -+ -+ /* Lock while we find the correct process and update the record... */ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ psProcessStats = _FindProcessStats(currentPid); -+ if (psProcessStats != NULL) -+ { -+ IMG_UINT32 ui32DevID = psDeviceNode->sDevId.ui32InternalID; -+ -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_ZSBUFFER_REQS_BY_APP] += ui32NumReqByApp; -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_ZSBUFFER_REQS_BY_FW] += ui32NumReqByFW; -+ OSLockRelease(psProcessStats->hLock); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Process not found for Pid=%d", __func__, currentPid)); -+ } -+ -+ OSLockRelease(g_psLinkedListLock); -+} /* PVRSRVStatsUpdateZSBufferStats */ -+ -+void -+PVRSRVStatsUpdateFreelistStats(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32NumGrowReqByApp, -+ IMG_UINT32 ui32NumGrowReqByFW, -+ IMG_UINT32 ui32InitFLPages, -+ IMG_UINT32 ui32NumHighPages, -+ IMG_PID ownerPid) -+{ -+ IMG_PID currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM(); -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ -+ /* Don't do anything if we are not initialised or we are shutting down! */ -+ if (!bProcessStatsInitialised) -+ { -+ return; -+ } -+ -+ /* Lock while we find the correct process and update the record... */ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ psProcessStats = _FindProcessStats(currentPid); -+ -+ if (psProcessStats != NULL) -+ { -+ IMG_UINT32 ui32DevID = psDeviceNode->sDevId.ui32InternalID; -+ -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_FREELIST_GROW_REQS_BY_APP] += ui32NumGrowReqByApp; -+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_FREELIST_GROW_REQS_BY_FW] += ui32NumGrowReqByFW; -+ -+ UPDATE_MAX_VALUE(psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_FREELIST_PAGES_INIT], -+ (IMG_INT32) ui32InitFLPages); -+ -+ UPDATE_MAX_VALUE(psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_FREELIST_MAX_PAGES], -+ (IMG_INT32) ui32NumHighPages); -+ OSLockRelease(psProcessStats->hLock); -+ -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Process not found for Pid=%d", __func__, currentPid)); -+ } -+ -+ OSLockRelease(g_psLinkedListLock); -+} /* PVRSRVStatsUpdateFreelistStats */ -+ -+ -+#if defined(ENABLE_DEBUGFS_PIDS) -+ -+int -+GenericStatsPrintElementsLive(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry); -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ DLLIST_NODE *psNode, *psNext; -+ -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ PVR_ASSERT(psStatType->pfnStatsPrintElements != NULL); -+ -+ DIPrintf(psEntry, "%s\n", psStatType->szLiveStatsHeaderStr); -+ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ if (dllist_is_empty(&gsLiveList)) -+ { -+ DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr); -+ } -+ else -+ { -+ dllist_foreach_node(&gsLiveList, psNode, psNext) -+ { -+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); -+ psStatType->pfnStatsPrintElements(psEntry, psProcessStats); -+ DIPrintf(psEntry, "%s\n", g_szSeparatorStr); -+ } -+ } -+ OSLockRelease(g_psLinkedListLock); -+ -+ return 0; -+} -+ -+int -+GenericStatsPrintElementsRetired(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry); -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ DLLIST_NODE *psNode, *psNext; -+ -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ PVR_ASSERT(psStatType->pfnStatsPrintElements != NULL); -+ -+ DIPrintf(psEntry, "%s\n", psStatType->szRetiredStatsHeaderStr); -+ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ if (dllist_is_empty(&gsDeadList)) -+ { -+ DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr); -+ } -+ else -+ { -+ dllist_foreach_node(&gsDeadList, psNode, psNext) -+ { -+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); -+ psStatType->pfnStatsPrintElements(psEntry, psProcessStats); -+ DIPrintf(psEntry, "%s\n", g_szSeparatorStr); -+ } -+ } -+ OSLockRelease(g_psLinkedListLock); -+ -+ return 0; -+} -+ -+#if defined(PVRSRV_ENABLE_PERPID_STATS) -+/*************************************************************************/ /*! -+@Function ProcessStatsPrintElements -+@Description Prints all elements for this process statistic record. -+@Input pvStatPtr Pointer to statistics structure. -+@Input pfnOSStatsPrintf Printf function to use for output. -+*/ /**************************************************************************/ -+void -+ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, -+ PVRSRV_PROCESS_STATS *psProcessStats) -+{ -+ IMG_UINT32 ui32StatNumber; -+ -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ -+ DIPrintf(psEntry, "PID %u\n", psProcessStats->pid); -+ -+ /* Print device stats table PVRSRV_DEVICE_STAT_TYPE */ -+ if (psProcessStats->ui32DevCount > 0) -+ { -+ IMG_UINT32 i; -+ -+ for (ui32StatNumber = 0; -+ ui32StatNumber < ARRAY_SIZE(pszDeviceStatType); -+ ui32StatNumber++) -+ { -+ if (OSStringNCompare(pszDeviceStatType[ui32StatNumber], "", 1) != 0) -+ { -+ DIPrintf(psEntry, "%-34s", -+ pszDeviceStatType[ui32StatNumber]); -+ -+ for (i = 0; i < psProcessStats->ui32DevCount; i++) -+ { -+ if (i == 0) -+ { -+ DIPrintf(psEntry, "%10d", -+ psProcessStats->ai32DevStats[i][ui32StatNumber]); -+ } -+ else -+ { -+ DIPrintf(psEntry, ",%d", -+ psProcessStats->ai32DevStats[i][ui32StatNumber]); -+ } -+ } -+ } -+ -+ DIPrintf(psEntry, "\n"); -+ } -+ } -+ -+ /* Print process memory stats table PVRSRV_PROCESS_STAT_TYPE */ -+ for (ui32StatNumber = 0; -+ ui32StatNumber < ARRAY_SIZE(pszProcessStatType); -+ ui32StatNumber++) -+ { -+ if (OSStringNCompare(pszProcessStatType[ui32StatNumber], "", 1) != 0) -+ { -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ if ((ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) || -+ (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES)) -+ { -+ /* get the stat from RI */ -+ IMG_INT32 ui32Total = RITotalAllocProcessKM(psProcessStats->pid, -+ (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) -+ ? PHYS_HEAP_TYPE_LMA : PHYS_HEAP_TYPE_UMA); -+ -+ DIPrintf(psEntry, "%-34s%10d %8dK\n", -+ pszProcessStatType[ui32StatNumber], ui32Total, ui32Total>>10); -+ } -+ else -+#endif -+ { -+ if (ui32StatNumber >= PVRSRV_PROCESS_STAT_TYPE_KMALLOC && -+ ui32StatNumber <= PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX) -+ { -+ DIPrintf(psEntry, "%-34s%10"IMG_INT64_FMTSPECd" %8"IMG_INT64_FMTSPECd"K\n", -+ pszProcessStatType[ui32StatNumber], -+ psProcessStats->i64StatValue[ui32StatNumber], -+ psProcessStats->i64StatValue[ui32StatNumber] >> 10); -+ } -+ else -+ { -+ DIPrintf(psEntry, "%-34s%10"IMG_INT64_FMTSPECd"\n", -+ pszProcessStatType[ui32StatNumber], -+ psProcessStats->i64StatValue[ui32StatNumber]); -+ } -+ } -+ } -+ } -+ -+ OSLockRelease(psProcessStats->hLock); -+} /* ProcessStatsPrintElements */ -+#endif -+ -+#if defined(PVRSRV_ENABLE_CACHEOP_STATS) -+void -+PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp, -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEV_PHYADDR sDevPAddr, -+#endif -+ IMG_DEVMEM_SIZE_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT64 ui64ExecuteTime, -+ IMG_BOOL bUserModeFlush, -+ IMG_PID ownerPid) -+{ -+ IMG_PID currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM(); -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ -+ /* Don't do anything if we are not initialised or we are shutting down! */ -+ if (!bProcessStatsInitialised) -+ { -+ return; -+ } -+ -+ /* Lock while we find the correct process and update the record... */ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ psProcessStats = _FindProcessStats(currentPid); -+ -+ if (psProcessStats != NULL) -+ { -+ IMG_INT32 Idx; -+ -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ -+ /* Look-up next buffer write index */ -+ Idx = psProcessStats->uiCacheOpWriteIndex; -+ psProcessStats->uiCacheOpWriteIndex = INCREMENT_CACHEOP_STAT_IDX_WRAP(Idx); -+ -+ /* Store all CacheOp meta-data */ -+ psProcessStats->asCacheOp[Idx].uiCacheOp = uiCacheOp; -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ psProcessStats->asCacheOp[Idx].sDevVAddr = sDevVAddr; -+ psProcessStats->asCacheOp[Idx].sDevPAddr = sDevPAddr; -+#endif -+ psProcessStats->asCacheOp[Idx].uiOffset = uiOffset; -+ psProcessStats->asCacheOp[Idx].uiSize = uiSize; -+ psProcessStats->asCacheOp[Idx].bUserModeFlush = bUserModeFlush; -+ psProcessStats->asCacheOp[Idx].ui64ExecuteTime = ui64ExecuteTime; -+ -+ OSLockRelease(psProcessStats->hLock); -+ } -+ -+ OSLockRelease(g_psLinkedListLock); -+} /* PVRSRVStatsUpdateCacheOpStats */ -+ -+/*************************************************************************/ /*! -+@Function CacheOpStatsPrintElements -+@Description Prints all elements for this process statistic CacheOp record. -+@Input pvStatPtr Pointer to statistics structure. -+@Input pfnOSStatsPrintf Printf function to use for output. -+*/ /**************************************************************************/ -+void -+CacheOpStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, -+ PVRSRV_PROCESS_STATS *psProcessStats) -+{ -+ IMG_CHAR *pszCacheOpType, *pszFlushType, *pszFlushMode; -+ IMG_INT32 i32WriteIdx, i32ReadIdx; -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ #define CACHEOP_RI_PRINTF_HEADER \ -+ "%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12s\n" -+ #define CACHEOP_RI_PRINTF \ -+ "%-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-12llu\n" -+#else -+ #define CACHEOP_PRINTF_HEADER \ -+ "%-10s %-10s %-5s %-10s %-10s %-12s\n" -+ #define CACHEOP_PRINTF \ -+ "%-10s %-10s %-5s 0x%-8llx 0x%-8llx %-12llu\n" -+#endif -+ -+ DIPrintf(psEntry, "PID %u\n", psProcessStats->pid); -+ -+ /* File header info */ -+ DIPrintf(psEntry, -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ CACHEOP_RI_PRINTF_HEADER, -+#else -+ CACHEOP_PRINTF_HEADER, -+#endif -+ "CacheOp", -+ "Type", -+ "Mode", -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ "DevVAddr", -+ "DevPAddr", -+#endif -+ "Offset", -+ "Size", -+ "Time (us)"); -+ -+ /* Take a snapshot of write index, read backwards in buffer -+ and wrap round at boundary */ -+ i32WriteIdx = psProcessStats->uiCacheOpWriteIndex; -+ for (i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32WriteIdx); -+ i32ReadIdx != i32WriteIdx; -+ i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32ReadIdx)) -+ { -+ IMG_UINT64 ui64ExecuteTime = psProcessStats->asCacheOp[i32ReadIdx].ui64ExecuteTime; -+ IMG_DEVMEM_SIZE_T ui64NumOfPages = psProcessStats->asCacheOp[i32ReadIdx].uiSize >> OSGetPageShift(); -+ -+ if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC) -+ { -+ pszFlushType = "RBF.Fast"; -+ } -+ else -+ { -+ pszFlushType = "RBF.Slow"; -+ } -+ -+ if (psProcessStats->asCacheOp[i32ReadIdx].bUserModeFlush) -+ { -+ pszFlushMode = "UM"; -+ } -+ else -+ { -+ pszFlushMode = "KM"; -+ } -+ -+ switch (psProcessStats->asCacheOp[i32ReadIdx].uiCacheOp) -+ { -+ case PVRSRV_CACHE_OP_NONE: -+ pszCacheOpType = "None"; -+ break; -+ case PVRSRV_CACHE_OP_CLEAN: -+ pszCacheOpType = "Clean"; -+ break; -+ case PVRSRV_CACHE_OP_INVALIDATE: -+ pszCacheOpType = "Invalidate"; -+ break; -+ case PVRSRV_CACHE_OP_FLUSH: -+ pszCacheOpType = "Flush"; -+ break; -+ default: -+ pszCacheOpType = "Unknown"; -+ break; -+ } -+ -+ DIPrintf(psEntry, -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ CACHEOP_RI_PRINTF, -+#else -+ CACHEOP_PRINTF, -+#endif -+ pszCacheOpType, -+ pszFlushType, -+ pszFlushMode, -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ psProcessStats->asCacheOp[i32ReadIdx].sDevVAddr.uiAddr, -+ psProcessStats->asCacheOp[i32ReadIdx].sDevPAddr.uiAddr, -+#endif -+ psProcessStats->asCacheOp[i32ReadIdx].uiOffset, -+ psProcessStats->asCacheOp[i32ReadIdx].uiSize, -+ ui64ExecuteTime); -+ } -+ -+} /* CacheOpStatsPrintElements */ -+#endif -+ -+#if defined(PVRSRV_ENABLE_MEMORY_STATS) -+static PVRSRV_ERROR _PrintMemStatsEntry(uintptr_t k, uintptr_t v, void* pvPriv) -+{ -+ IMG_UINT32 ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32); -+ IMG_UINT32 ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32); -+ IMG_UINT32 ui32ItemNumber; -+ PVRSRV_MEM_ALLOC_REC *psRecord = (PVRSRV_MEM_ALLOC_REC *)(uintptr_t)v; -+ PVRSRV_MEM_ALLOC_PRINT_DATA *psPrintData = (PVRSRV_MEM_ALLOC_PRINT_DATA *)pvPriv; -+ OSDI_IMPL_ENTRY *psEntry = psPrintData->psEntry; -+ -+ if (psRecord != NULL) -+ { -+ IMG_BOOL bPrintStat = IMG_TRUE; -+ -+ DIPrintf(psEntry, "%-5d ", psPrintData->pid); -+ -+ switch (psRecord->eAllocType) -+ { -+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: DIPrintf(psEntry, "KMALLOC "); break; -+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: DIPrintf(psEntry, "VMALLOC "); break; -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: DIPrintf(psEntry, "ALLOC_PAGES_PT_LMA "); break; -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: DIPrintf(psEntry, "ALLOC_PAGES_PT_UMA "); break; -+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: DIPrintf(psEntry, "IOREMAP_PT_LMA "); break; -+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: DIPrintf(psEntry, "VMAP_PT_UMA "); break; -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: DIPrintf(psEntry, "ALLOC_LMA_PAGES "); break; -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES: DIPrintf(psEntry, "ZOMBIE_LMA_PAGES "); break; -+#endif -+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: DIPrintf(psEntry, "ALLOC_UMA_PAGES "); break; -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES: DIPrintf(psEntry, "ZOMBIE_UMA_PAGES "); break; -+#endif -+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: DIPrintf(psEntry, "MAP_UMA_LMA_PAGES "); break; -+ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: DIPrintf(psEntry, "DMA_BUF_IMPORT "); break; -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE: DIPrintf(psEntry, "DMA_BUF_ZOMBIE "); break; -+#endif -+ default: DIPrintf(psEntry, "INVALID "); break; -+ } -+ -+ if (bPrintStat) -+ { -+ for (ui32ItemNumber = 0; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++) -+ { -+ DIPrintf(psEntry, "%08x", *(((IMG_UINT32*) &psRecord->pvCpuVAddr) + ui32VAddrFields - ui32ItemNumber - 1)); -+ } -+ DIPrintf(psEntry, " "); -+ -+ for (ui32ItemNumber = 0; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++) -+ { -+ DIPrintf(psEntry, "%08x", *(((IMG_UINT32*) &psRecord->sCpuPAddr.uiAddr) + ui32PAddrFields - ui32ItemNumber - 1)); -+ } -+ -+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) -+ DIPrintf(psEntry, " " IMG_SIZE_FMTSPEC, psRecord->uiBytes); -+ -+ DIPrintf(psEntry, " %s", (IMG_CHAR*) psRecord->pvAllocdFromFile); -+ -+ DIPrintf(psEntry, " %d\n", psRecord->ui32AllocdFromLine); -+#else -+ DIPrintf(psEntry, " " IMG_SIZE_FMTSPEC "\n", psRecord->uiBytes); -+#endif -+ } -+ -+ psPrintData->ui32NumEntries++; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function MemStatsPrintElements -+@Description Prints all elements for the memory statistic record. -+@Input pvStatPtr Pointer to statistics structure. -+@Input pfnOSStatsPrintf Printf function to use for output. -+*/ /**************************************************************************/ -+void -+MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, -+ PVRSRV_PROCESS_STATS *psProcessStats) -+{ -+ IMG_UINT32 ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32); -+ IMG_UINT32 ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32); -+ IMG_UINT32 ui32ItemNumber; -+ PVRSRV_MEM_ALLOC_PRINT_DATA sPrintData; -+ -+ sPrintData.psEntry = psEntry; -+ sPrintData.pid = psProcessStats->pid; -+ sPrintData.ui32NumEntries = 0; -+ -+ /* Write the header... */ -+ DIPrintf(psEntry, "PID "); -+ -+ DIPrintf(psEntry, "Type VAddress"); -+ for (ui32ItemNumber = 1; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++) -+ { -+ DIPrintf(psEntry, " "); -+ } -+ -+ DIPrintf(psEntry, " PAddress"); -+ for (ui32ItemNumber = 1; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++) -+ { -+ DIPrintf(psEntry, " "); -+ } -+ -+ DIPrintf(psEntry, " Size(bytes)\n"); -+ -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ HASH_Iterate(psProcessStats->psMemoryRecords, (HASH_pfnCallback)_PrintMemStatsEntry, &sPrintData); -+ OSLockRelease(psProcessStats->hLock); -+ -+ if (sPrintData.ui32NumEntries == 0) -+ { -+ DIPrintf(psEntry, "%-5d\n", psProcessStats->pid); -+ } -+} /* MemStatsPrintElements */ -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+/*************************************************************************/ /*! -+@Function RIMemStatsPrintElements -+@Description Prints all elements for the RI Memory record. -+@Input pvStatPtr Pointer to statistics structure. -+@Input pfnOSStatsPrintf Printf function to use for output. -+*/ /**************************************************************************/ -+void RIMemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, -+ PVRSRV_PROCESS_STATS *psProcessStats) -+{ -+ IMG_CHAR *pszStatFmtText = NULL; -+ IMG_HANDLE *pRIHandle = NULL; -+ -+ /* Acquire RI lock */ -+ RILockAcquireKM(); -+ -+ /* -+ * Loop through the RI system to get each line of text. -+ */ -+ while (RIGetListEntryKM(psProcessStats->pid, -+ &pRIHandle, -+ &pszStatFmtText)) -+ { -+ DIPrintf(psEntry, "%s", pszStatFmtText); -+ } -+ -+ /* Release RI lock */ -+ RILockReleaseKM(); -+ -+} /* RIMemStatsPrintElements */ -+#endif -+ -+#endif -+ -+int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ IMG_UINT32 ui32StatNumber; -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ OSLockAcquire(gsGlobalStats.hGlobalStatsLock); -+ -+ for (ui32StatNumber = 0; -+ ui32StatNumber < ARRAY_SIZE(pszDriverStatType); -+ ui32StatNumber++) -+ { -+ if (OSStringNCompare(pszDriverStatType[ui32StatNumber], "", 1) != 0) -+ { -+ DIPrintf(psEntry, "%-34s%12llu\n", -+ pszDriverStatType[ui32StatNumber], -+ GET_GLOBAL_STAT_VALUE(ui32StatNumber)); -+ } -+ } -+ -+ OSLockRelease(gsGlobalStats.hGlobalStatsLock); -+ -+ return 0; -+} -+ -+/*************************************************************************/ /*! -+@Function PVRSRVFindProcessMemStats -+@Description Using the provided PID find memory stats for that process. -+ Memstats will be provided for live/connected processes only. -+ Memstat values provided by this API relate only to the physical -+ memory allocated by the process and does not relate to any of -+ the mapped or imported memory. -+@Input pid Process to search for. -+@Input ArraySize Size of the array where memstat -+ records will be stored -+@Input bAllProcessStats Flag to denote if stats for -+ individual process are requested -+ stats for all processes are -+ requested -+@Input MemoryStats Handle to the memory where memstats -+ are stored. -+@Output Memory statistics records for the requested pid. -+*/ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, -+ IMG_UINT32 ui32ArrSize, -+ IMG_BOOL bAllProcessStats, -+ IMG_UINT64 *pui64MemoryStats) -+{ -+ IMG_INT i; -+ PVRSRV_PROCESS_STATS* psProcessStats; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pui64MemoryStats, "pui64MemoryStats"); -+ -+ if (bAllProcessStats) -+ { -+ PVR_LOG_RETURN_IF_FALSE(ui32ArrSize == PVRSRV_DRIVER_STAT_TYPE_COUNT, -+ "MemStats array size is incorrect", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ OSLockAcquire(gsGlobalStats.hGlobalStatsLock); -+ -+ for (i = 0; i < ui32ArrSize; i++) -+ { -+ pui64MemoryStats[i] = GET_GLOBAL_STAT_VALUE(i); -+ } -+ -+ OSLockRelease(gsGlobalStats.hGlobalStatsLock); -+ -+ return PVRSRV_OK; -+ } -+ -+ PVR_LOG_RETURN_IF_FALSE(ui32ArrSize == PVRSRV_PROCESS_STAT_TYPE_COUNT, -+ "MemStats array size is incorrect", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ /* Search for the given PID in the Live List */ -+ psProcessStats = _FindProcessStatsInLiveList(pid); -+ -+ if (psProcessStats == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Process %d not found. This process may not be live anymore.", (IMG_INT)pid)); -+ OSLockRelease(g_psLinkedListLock); -+ -+ return PVRSRV_ERROR_PROCESS_NOT_FOUND; -+ } -+ -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ for (i = 0; i < ui32ArrSize; i++) -+ { -+ pui64MemoryStats[i] = psProcessStats->i64StatValue[i]; -+ } -+ OSLockRelease(psProcessStats->hLock); -+ -+ OSLockRelease(g_psLinkedListLock); -+ -+ return PVRSRV_OK; -+ -+} /* PVRSRVFindProcessMemStats */ -+ -+/*************************************************************************/ /*! -+@Function PVRSRVGetProcessMemUsage -+@Description Calculate allocated kernel and graphics memory for all live or -+ connected processes. Memstat values provided by this API relate -+ only to the physical memory allocated by the process and does -+ not relate to any of the mapped or imported memory. -+@Output pui64TotalMem Total memory usage for all live -+ PIDs connected to the driver. -+@Output pui32NumberOfLivePids Number of live pids currently -+ connected to the server. -+@Output ppsPerProcessMemUsageData Handle to an array of -+ PVRSRV_PER_PROCESS_MEM_USAGE, -+ number of elements defined by -+ pui32NumberOfLivePids. -+@Return PVRSRV_OK Success -+ PVRSRV_ERROR_PROCESS_NOT_FOUND No live processes. -+ PVRSRV_ERROR_OUT_OF_MEMORY Failed to allocate memory for -+ ppsPerProcessMemUsageData. -+*/ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT64 *pui64TotalMem, -+ IMG_UINT32 *pui32NumberOfLivePids, -+ PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData) -+{ -+ IMG_UINT32 ui32NumberOfLivePids = 0; -+ PVRSRV_ERROR eError = PVRSRV_ERROR_PROCESS_NOT_FOUND; -+ PVRSRV_PER_PROCESS_MEM_USAGE* psPerProcessMemUsageData = NULL; -+ DLLIST_NODE *psNode, *psNext; -+ -+ OSLockAcquire(gsGlobalStats.hGlobalStatsLock); -+ -+ *pui64TotalMem = GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC) + -+ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC) + -+ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) + -+ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) + -+ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) + -+ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA); -+ -+ OSLockRelease(gsGlobalStats.hGlobalStatsLock); -+ -+ OSLockAcquire(g_psLinkedListLock); -+ -+ dllist_foreach_node(&gsLiveList, psNode, psNext) -+ { -+ ui32NumberOfLivePids++; -+ } -+ -+ if (ui32NumberOfLivePids > 0) -+ { -+ /* Use OSAllocZMemNoStats to prevent deadlock. */ -+ psPerProcessMemUsageData = OSAllocZMemNoStats(ui32NumberOfLivePids * sizeof(*psPerProcessMemUsageData)); -+ -+ if (psPerProcessMemUsageData) -+ { -+ PVRSRV_PROCESS_STATS* psProcessStats = NULL; -+ IMG_UINT32 ui32Counter = 0; -+ -+ dllist_foreach_node(&gsLiveList, psNode, psNext) -+ { -+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode); -+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); -+ -+ psPerProcessMemUsageData[ui32Counter].ui32Pid = (IMG_UINT32)psProcessStats->pid; -+ -+ psPerProcessMemUsageData[ui32Counter].ui64KernelMemUsage = -+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] + -+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC]; -+ -+ psPerProcessMemUsageData[ui32Counter].ui64GraphicsMemUsage = -+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] + -+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] + -+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] + -+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES]; -+ -+ OSLockRelease(psProcessStats->hLock); -+ ui32Counter++; -+ } -+ eError = PVRSRV_OK; -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ } -+ -+ OSLockRelease(g_psLinkedListLock); -+ *pui32NumberOfLivePids = ui32NumberOfLivePids; -+ *ppsPerProcessMemUsageData = psPerProcessMemUsageData; -+ -+ return eError; -+ -+} /* PVRSRVGetProcessMemUsage */ -diff --git a/drivers/gpu/drm/img-rogue/process_stats.h b/drivers/gpu/drm/img-rogue/process_stats.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/process_stats.h -@@ -0,0 +1,214 @@ -+/*************************************************************************/ /*! -+@File -+@Title Functions for creating and reading proc filesystem entries. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PROCESS_STATS_H -+#define PROCESS_STATS_H -+ -+#include -+ -+#include "pvrsrv_error.h" -+#include "allocmem.h" -+#include "cache_ops.h" -+#include "device.h" -+#include "connection_server.h" -+ -+/* -+ * The publishing of Process Stats is controlled by the -+ * PVRSRV_ENABLE_PROCESS_STATS build option. The recording of all Memory -+ * allocations is controlled by the PVRSRV_ENABLE_MEMORY_STATS build option. -+ * -+ * Note: There will be a performance degradation with memory allocation -+ * recording enabled! -+ */ -+ -+ -+/* -+ * Memory types which can be tracked... -+ */ -+typedef enum { -+ PVRSRV_MEM_ALLOC_TYPE_KMALLOC, /* memory allocated by kmalloc() */ -+ PVRSRV_MEM_ALLOC_TYPE_VMALLOC, /* memory allocated by vmalloc() */ -+ PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, /* pages allocated from UMA to hold page table information */ -+ PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, /* ALLOC_PAGES_PT_UMA mapped to kernel address space */ -+ PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, /* pages allocated from LMA to hold page table information */ -+ PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, /* ALLOC_PAGES_PT_LMA mapped to kernel address space */ -+ PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, /* pages allocated from LMA */ -+ PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, /* pages allocated from UMA */ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_LMA_PAGES, /* zombie pages allocated from LMA */ -+ PVRSRV_MEM_ALLOC_TYPE_ZOMBIE_UMA_PAGES, /* zombie pages allocated from UMA */ -+#endif -+ PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, /* mapped UMA/LMA pages */ -+ PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, /* pages in the page pool */ -+ PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, /* dma-buf imports */ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_ZOMBIE, /* dma-buf zombie */ -+#endif -+ -+ /* Must be the last enum...*/ -+ PVRSRV_MEM_ALLOC_TYPE_COUNT -+} PVRSRV_MEM_ALLOC_TYPE; -+ -+/* -+ * Functions for managing the processes recorded... -+ */ -+PVRSRV_ERROR PVRSRVStatsInitialise(void); -+void PVRSRVStatsDestroy(void); -+void PVRSRVStatsDestroyDI(void); -+ -+PVRSRV_ERROR PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats); -+ -+void PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats); -+ -+PVRSRV_ERROR PVRSRVStatsDeviceConnect(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+void PVRSRVStatsDeviceDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+#define MAX_POWER_STAT_ENTRIES 51 -+ -+/* -+ * Functions for recording the statistics... -+ */ -+ -+void PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ void *pvCpuVAddr, -+ IMG_CPU_PHYADDR sCpuPAddr, -+ size_t uiBytes, -+ IMG_PID uiPid -+ DEBUG_MEMSTATS_PARAMS); -+ -+void PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ IMG_UINT64 ui64Key, -+ IMG_PID uiPid); -+ -+void PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ size_t uiBytes, -+ IMG_PID uiPid); -+ -+/* -+ * Increases the memory stat for eAllocType. Tracks the allocation size value -+ * by inserting a value into a hash table with uiCpuVAddr as key. -+ * Pair with PVRSRVStatsDecrMemAllocStatAndUntrack(). -+ */ -+void PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ size_t uiBytes, -+ IMG_UINT64 uiCpuVAddr, -+ IMG_PID uiPid); -+ -+void PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ size_t uiBytes, -+ IMG_PID uiPid); -+ -+void PVRSRVStatsDecrMemKAllocStat(size_t uiBytes, -+ IMG_PID decrPID); -+ -+/* -+ * Decrease the memory stat for eAllocType. Takes the allocation size value -+ * from the hash table with uiCpuVAddr as key. -+ * Pair with PVRSRVStatsIncrMemAllocStatAndTrack(). -+ */ -+void PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, -+ IMG_UINT64 uiCpuVAddr); -+ -+void -+PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes); -+ -+void -+PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes); -+ -+PVRSRV_ERROR -+PVRSRVStatsUpdateOOMStat(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32OOMStatType, -+ IMG_PID pidOwner); -+ -+void PVRSRVStatsUpdateRenderContextStats(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32TotalNumPartialRenders, -+ IMG_UINT32 ui32TotalNumOutOfMemory, -+ IMG_UINT32 ui32TotalTAStores, -+ IMG_UINT32 ui32Total3DStores, -+ IMG_UINT32 ui32TotalCDMStores, -+ IMG_UINT32 ui32TotalTDMStores, -+ IMG_UINT32 ui32NumRayStores, -+ IMG_PID owner); -+ -+void PVRSRVStatsUpdateZSBufferStats(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32NumReqByApp, -+ IMG_UINT32 ui32NumReqByFW, -+ IMG_PID owner); -+ -+void PVRSRVStatsUpdateFreelistStats(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32NumGrowReqByApp, -+ IMG_UINT32 ui32NumGrowReqByFW, -+ IMG_UINT32 ui32InitFLPages, -+ IMG_UINT32 ui32NumHighPages, -+ IMG_PID ownerPid); -+#if defined(PVRSRV_ENABLE_CACHEOP_STATS) -+void PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp, -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) -+ IMG_DEV_VIRTADDR sDevVAddr, -+ IMG_DEV_PHYADDR sDevPAddr, -+#endif -+ IMG_DEVMEM_SIZE_T uiOffset, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT64 ui64ExecuteTimeMs, -+ IMG_BOOL bUserModeFlush, -+ IMG_PID ownerPid); -+#endif -+ -+/* Functions used for calculating the memory usage statistics of a process */ -+PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, -+ IMG_UINT32 ui32ArrSize, -+ IMG_BOOL bAllProcessStats, -+ IMG_UINT64 *pui64MemoryStats); -+ -+typedef struct { -+ IMG_UINT32 ui32Pid; -+ IMG_UINT64 ui64KernelMemUsage; -+ IMG_UINT64 ui64GraphicsMemUsage; -+} PVRSRV_PER_PROCESS_MEM_USAGE; -+ -+PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT64 *pui64TotalMem, -+ IMG_UINT32 *pui32NumberOfLivePids, -+ PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData); -+ -+#endif /* PROCESS_STATS_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_bridge.h b/drivers/gpu/drm/img-rogue/pvr_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_bridge.h -@@ -0,0 +1,457 @@ -+/*************************************************************************/ /*! -+@File -+@Title PVR Bridge Functionality -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the PVR Bridge code -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVR_BRIDGE_H -+#define PVR_BRIDGE_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#include "pvrsrv_error.h" -+#if defined(SUPPORT_DISPLAY_CLASS) -+#include "common_dc_bridge.h" -+#if defined(SUPPORT_DCPLAT_BRIDGE) -+#include "common_dcplat_bridge.h" -+#endif -+#endif -+#include "common_mm_bridge.h" -+#if defined(SUPPORT_MMPLAT_BRIDGE) -+#include "common_mmplat_bridge.h" -+#endif -+#if defined(SUPPORT_WRAP_EXTMEM) -+#include "common_mmextmem_bridge.h" -+#endif -+#if !defined(EXCLUDE_CMM_BRIDGE) -+#include "common_cmm_bridge.h" -+#endif -+#if defined(__linux__) -+#include "common_dmabuf_bridge.h" -+#endif -+#if defined(PDUMP) -+#include "common_pdump_bridge.h" -+#include "common_pdumpctrl_bridge.h" -+#include "common_pdumpmm_bridge.h" -+#endif -+#include "common_cache_bridge.h" -+#if defined(SUPPORT_DMA_TRANSFER) -+#include "common_dma_bridge.h" -+#endif -+#include "common_srvcore_bridge.h" -+#include "common_sync_bridge.h" -+#if defined(SUPPORT_SECURE_EXPORT) -+#include "common_smm_bridge.h" -+#endif -+#if defined(PVRSRV_ENABLE_HTB) -+#include "common_htbuffer_bridge.h" -+#endif -+#include "common_pvrtl_bridge.h" -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+#include "common_ri_bridge.h" -+#endif -+ -+#if defined(SUPPORT_VALIDATION_BRIDGE) -+#include "common_validation_bridge.h" -+#endif -+ -+#if defined(PVR_TESTING_UTILS) -+#include "common_tutils_bridge.h" -+#endif -+ -+#include "common_devicememhistory_bridge.h" -+#include "common_synctracking_bridge.h" -+ -+#if defined(SUPPORT_FALLBACK_FENCE_SYNC) -+#include "common_syncfallback_bridge.h" -+#endif -+ -+#if defined(SUPPORT_DI_BRG_IMPL) -+#include "common_di_bridge.h" -+#endif -+ -+/* -+ * Bridge Cmd Ids -+ */ -+ -+ -+/* Note: The pattern -+ * #define PVRSRV_BRIDGE_FEATURE (PVRSRV_BRIDGE_PREVFEATURE + 1) -+ * #if defined(SUPPORT_FEATURE) -+ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST + 1) -+ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST (PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST + PVRSRV_BRIDGE_FEATURE_CMD_LAST) -+ * #else -+ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST 0 -+ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST) -+ * #endif -+ * is used in the macro definitions below to make PVRSRV_BRIDGE_FEATURE_* -+ * take up no space in the dispatch table if SUPPORT_FEATURE is disabled. -+ * -+ * Note however that a bridge always defines PVRSRV_BRIDGE_FEATURE, even where -+ * the feature is not enabled (each bridge group retains its own ioctl number). -+ */ -+ -+#define PVRSRV_BRIDGE_FIRST 0UL -+ -+/* 0: Default handler */ -+#define PVRSRV_BRIDGE_DEFAULT 0UL -+#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST 0UL -+#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST (PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST) -+/* 1: CORE functions */ -+#define PVRSRV_BRIDGE_SRVCORE 1UL -+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST (PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST+1) -+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST (PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST + PVRSRV_BRIDGE_SRVCORE_CMD_LAST) -+ -+/* 2: SYNC functions */ -+#define PVRSRV_BRIDGE_SYNC 2UL -+#define PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_SYNC_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNC_CMD_LAST) -+ -+/* 3,4: Reserved */ -+#define PVRSRV_BRIDGE_RESERVED1 3UL -+#define PVRSRV_BRIDGE_RESERVED1_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_RESERVED1_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST) -+ -+#define PVRSRV_BRIDGE_RESERVED2 4UL -+#define PVRSRV_BRIDGE_RESERVED2_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_RESERVED2_DISPATCH_LAST (PVRSRV_BRIDGE_RESERVED1_DISPATCH_LAST) -+ -+/* 5: PDUMP CTRL layer functions */ -+#define PVRSRV_BRIDGE_PDUMPCTRL 5UL -+#if defined(PDUMP) -+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST) -+#endif -+ -+/* 6: Memory Management functions */ -+#define PVRSRV_BRIDGE_MM 6UL -+#define PVRSRV_BRIDGE_MM_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_MM_DISPATCH_LAST (PVRSRV_BRIDGE_MM_DISPATCH_FIRST + PVRSRV_BRIDGE_MM_CMD_LAST) -+ -+/* 7: Non-Linux Memory Management functions */ -+#define PVRSRV_BRIDGE_MMPLAT 7UL -+#if defined(SUPPORT_MMPLAT_BRIDGE) -+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_MM_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_MMPLAT_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_MM_DISPATCH_LAST) -+#endif -+ -+/* 8: Context Memory Management functions */ -+#define PVRSRV_BRIDGE_CMM 8UL -+#if !defined(EXCLUDE_CMM_BRIDGE) -+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST (PVRSRV_BRIDGE_CMM_DISPATCH_FIRST + PVRSRV_BRIDGE_CMM_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST) -+#endif -+ -+/* 9: PDUMP Memory Management functions */ -+#define PVRSRV_BRIDGE_PDUMPMM 9UL -+#if defined(PDUMP) -+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPMM_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST) -+#endif -+ -+/* 10: PDUMP functions */ -+#define PVRSRV_BRIDGE_PDUMP 10UL -+#if defined(PDUMP) -+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMP_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST) -+#endif -+ -+/* 11: DMABUF functions */ -+#define PVRSRV_BRIDGE_DMABUF 11UL -+#if defined(__linux__) -+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST (PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST + PVRSRV_BRIDGE_DMABUF_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST) -+#endif -+ -+/* 12: Display Class functions */ -+#define PVRSRV_BRIDGE_DC 12UL -+#if defined(SUPPORT_DISPLAY_CLASS) -+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST (PVRSRV_BRIDGE_DC_DISPATCH_FIRST + PVRSRV_BRIDGE_DC_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST) -+#endif -+ -+/* 13: Cache interface functions */ -+#define PVRSRV_BRIDGE_CACHE 13UL -+#define PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST (PVRSRV_BRIDGE_DC_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_CACHE_DISPATCH_LAST (PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST + PVRSRV_BRIDGE_CACHE_CMD_LAST) -+ -+/* 14: Secure Memory Management functions */ -+#define PVRSRV_BRIDGE_SMM 14UL -+#if defined(SUPPORT_SECURE_EXPORT) -+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST (PVRSRV_BRIDGE_SMM_DISPATCH_FIRST + PVRSRV_BRIDGE_SMM_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST) -+#endif -+ -+/* 15: Transport Layer interface functions */ -+#define PVRSRV_BRIDGE_PVRTL 15UL -+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST (PVRSRV_BRIDGE_SMM_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST (PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST + PVRSRV_BRIDGE_PVRTL_CMD_LAST) -+ -+/* 16: Resource Information (RI) interface functions */ -+#define PVRSRV_BRIDGE_RI 16UL -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST (PVRSRV_BRIDGE_RI_DISPATCH_FIRST + PVRSRV_BRIDGE_RI_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST) -+#endif -+ -+/* 17: Validation interface functions */ -+#define PVRSRV_BRIDGE_VALIDATION 17UL -+#if defined(SUPPORT_VALIDATION_BRIDGE) -+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST (PVRSRV_BRIDGE_RI_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST + PVRSRV_BRIDGE_VALIDATION_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST (PVRSRV_BRIDGE_RI_DISPATCH_LAST) -+#endif -+ -+/* 18: TUTILS interface functions */ -+#define PVRSRV_BRIDGE_TUTILS 18UL -+#if defined(PVR_TESTING_UTILS) -+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST (PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST + PVRSRV_BRIDGE_TUTILS_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST) -+#endif -+ -+/* 19: DevMem history interface functions */ -+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY 19UL -+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST + PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST) -+ -+/* 20: Host Trace Buffer interface functions */ -+#define PVRSRV_BRIDGE_HTBUFFER 20UL -+#if defined(PVRSRV_ENABLE_HTB) -+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST + PVRSRV_BRIDGE_HTBUFFER_CMD_LAST) -+#else /* !PVRSRV_ENABLE_HTB */ -+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST) -+#endif /* PVRSRV_ENABLE_HTB */ -+ -+/* 21: Non-Linux Display functions */ -+#define PVRSRV_BRIDGE_DCPLAT 21UL -+#if defined(SUPPORT_DISPLAY_CLASS) && defined(SUPPORT_DCPLAT_BRIDGE) -+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_DCPLAT_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST) -+#endif -+ -+/* 22: Extmem functions */ -+#define PVRSRV_BRIDGE_MMEXTMEM 22UL -+#if defined(SUPPORT_WRAP_EXTMEM) -+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST + PVRSRV_BRIDGE_MMEXTMEM_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST) -+#endif -+ -+/* 23: Sync tracking functions */ -+#define PVRSRV_BRIDGE_SYNCTRACKING 23UL -+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST) -+ -+/* 24: Sync fallback functions */ -+#define PVRSRV_BRIDGE_SYNCFALLBACK 24UL -+#if defined(SUPPORT_FALLBACK_FENCE_SYNC) -+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCFALLBACK_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST) -+#endif -+ -+/* 25: Debug Information (DI) interface functions */ -+#define PVRSRV_BRIDGE_DI 25UL -+#if defined(SUPPORT_DI_BRG_IMPL) -+#define PVRSRV_BRIDGE_DI_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_DI_DISPATCH_LAST (PVRSRV_BRIDGE_DI_DISPATCH_FIRST + PVRSRV_BRIDGE_DI_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_DI_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_DI_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST) -+#endif -+ -+/* 26: DMA transfer functions */ -+ -+#define PVRSRV_BRIDGE_DMA 26UL -+#if defined(SUPPORT_DMA_TRANSFER) -+#define PVRSRV_BRIDGE_DMA_DISPATCH_FIRST (PVRSRV_BRIDGE_DI_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_DMA_DISPATCH_LAST (PVRSRV_BRIDGE_DMA_DISPATCH_FIRST + PVRSRV_BRIDGE_DMA_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_DMA_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_DMA_DISPATCH_LAST (PVRSRV_BRIDGE_DI_DISPATCH_LAST) -+#endif -+ -+/* NB PVRSRV_BRIDGE_LAST below must be the last bridge group defined above (PVRSRV_BRIDGE_FEATURE) */ -+#define PVRSRV_BRIDGE_LAST (PVRSRV_BRIDGE_DMA) -+/* NB PVRSRV_BRIDGE_DISPATCH LAST below must be the last dispatch entry defined above (PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST) */ -+#define PVRSRV_BRIDGE_DISPATCH_LAST (PVRSRV_BRIDGE_DMA_DISPATCH_LAST) -+ -+/* bit mask representing the enabled PVR bridges */ -+ -+static const IMG_UINT32 gui32PVRBridges = -+ (1U << (PVRSRV_BRIDGE_DEFAULT - PVRSRV_BRIDGE_FIRST)) -+ | (1U << (PVRSRV_BRIDGE_SRVCORE - PVRSRV_BRIDGE_FIRST)) -+ | (1U << (PVRSRV_BRIDGE_SYNC - PVRSRV_BRIDGE_FIRST)) -+ -+#if defined(PDUMP) -+ | (1U << (PVRSRV_BRIDGE_PDUMPCTRL - PVRSRV_BRIDGE_FIRST)) -+#endif -+ | (1U << (PVRSRV_BRIDGE_MM - PVRSRV_BRIDGE_FIRST)) -+#if defined(SUPPORT_MMPLAT_BRIDGE) -+ | (1U << (PVRSRV_BRIDGE_MMPLAT - PVRSRV_BRIDGE_FIRST)) -+#endif -+#if defined(SUPPORT_CMM) -+ | (1U << (PVRSRV_BRIDGE_CMM - PVRSRV_BRIDGE_FIRST)) -+#endif -+#if defined(PDUMP) -+ | (1U << (PVRSRV_BRIDGE_PDUMPMM - PVRSRV_BRIDGE_FIRST)) -+ | (1U << (PVRSRV_BRIDGE_PDUMP - PVRSRV_BRIDGE_FIRST)) -+#endif -+#if defined(__linux__) -+ | (1U << (PVRSRV_BRIDGE_DMABUF - PVRSRV_BRIDGE_FIRST)) -+#endif -+#if defined(SUPPORT_DISPLAY_CLASS) -+ | (1U << (PVRSRV_BRIDGE_DC - PVRSRV_BRIDGE_FIRST)) -+#endif -+ | (1U << (PVRSRV_BRIDGE_CACHE - PVRSRV_BRIDGE_FIRST)) -+#if defined(SUPPORT_SECURE_EXPORT) -+ | (1U << (PVRSRV_BRIDGE_SMM - PVRSRV_BRIDGE_FIRST)) -+#endif -+ | (1U << (PVRSRV_BRIDGE_PVRTL - PVRSRV_BRIDGE_FIRST)) -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ | (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST)) -+#endif -+#if defined(SUPPORT_VALIDATION) -+ | (1U << (PVRSRV_BRIDGE_VALIDATION - PVRSRV_BRIDGE_FIRST)) -+#endif -+#if defined(PVR_TESTING_UTILS) -+ | (1U << (PVRSRV_BRIDGE_TUTILS - PVRSRV_BRIDGE_FIRST)) -+#endif -+ | (1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST)) -+#if defined(PVRSRV_ENABLE_HTB) -+ | (1U << (PVRSRV_BRIDGE_HTBUFFER - PVRSRV_BRIDGE_FIRST)) -+#endif -+#if defined(SUPPORT_DISPLAY_CLASS) && defined(SUPPORT_DCPLAT_BRIDGE) -+ | (1U << (PVRSRV_BRIDGE_DCPLAT - PVRSRV_BRIDGE_FIRST)) -+#endif -+#if defined(SUPPORT_WRAP_EXTMEM) -+ | (1U << (PVRSRV_BRIDGE_MMEXTMEM - PVRSRV_BRIDGE_FIRST)) -+#endif -+ | (1U << (PVRSRV_BRIDGE_SYNCTRACKING - PVRSRV_BRIDGE_FIRST)) -+#if defined(SUPPORT_FALLBACK_FENCE_SYNC) -+ | (1U << (PVRSRV_BRIDGE_SYNCFALLBACK - PVRSRV_BRIDGE_FIRST)) -+#endif -+#if defined(SUPPORT_DI_BRG_IMPL) -+ | (1U << (PVRSRV_BRIDGE_DI - PVRSRV_BRIDGE_FIRST)) -+#endif -+#if defined(SUPPORT_DMA_TRANSFER) -+ | (1U << (PVRSRV_BRIDGE_DMA - PVRSRV_BRIDGE_FIRST)) -+#endif -+ ; -+ -+/* bit field representing which PVR bridge groups may optionally not -+ * be present in the server -+ */ -+#define PVR_BRIDGES_OPTIONAL \ -+ ( \ -+ (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST)) \ -+ ) -+ -+/****************************************************************************** -+ * Generic bridge structures -+ *****************************************************************************/ -+ -+ -+/****************************************************************************** -+ * bridge packaging structure -+ *****************************************************************************/ -+typedef struct PVRSRV_BRIDGE_PACKAGE_TAG -+{ -+ IMG_UINT32 ui32BridgeID; /*!< ioctl bridge group */ -+ IMG_UINT32 ui32FunctionID; /*!< ioctl function index */ -+ IMG_UINT32 ui32Size; /*!< size of structure */ -+ void __user *pvParamIn; /*!< input data buffer */ -+ IMG_UINT32 ui32InBufferSize; /*!< size of input data buffer */ -+ void __user *pvParamOut; /*!< output data buffer */ -+ IMG_UINT32 ui32OutBufferSize; /*!< size of output data buffer */ -+}PVRSRV_BRIDGE_PACKAGE; -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* PVR_BRIDGE_H */ -+ -+/****************************************************************************** -+ End of file (pvr_bridge.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/pvr_bridge_k.c b/drivers/gpu/drm/img-rogue/pvr_bridge_k.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_bridge_k.c -@@ -0,0 +1,635 @@ -+/*************************************************************************/ /*! -+@File -+@Title PVR Bridge Module (kernel side) -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Receives calls from the user portion of services and -+ despatches them to functions in the kernel portion. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+ -+#include -+ -+#include "img_defs.h" -+#include "pvr_bridge.h" -+#include "pvr_bridge_k.h" -+#include "connection_server.h" -+#include "syscommon.h" -+#include "pvr_debug.h" -+#include "di_server.h" -+#include "private_data.h" -+#include "linkage.h" -+#include "pmr.h" -+#include "rgx_bvnc_defs_km.h" -+#include "pvrsrv_bridge_init.h" -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#include -+#else -+#include -+#endif -+ -+#include "pvr_drm.h" -+#include "pvr_drv.h" -+ -+#include "env_connection.h" -+#include -+#include -+ -+/* RGX: */ -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+ -+#include "srvcore.h" -+#include "common_srvcore_bridge.h" -+ -+PVRSRV_ERROR InitDMABUFBridge(void); -+void DeinitDMABUFBridge(void); -+ -+#if defined(MODULE_TEST) -+/************************************************************************/ -+// additional includes for services testing -+/************************************************************************/ -+#include "pvr_test_bridge.h" -+#include "kern_test.h" -+/************************************************************************/ -+// end of additional includes -+/************************************************************************/ -+#endif -+ -+/* The mmap code has its own mutex, to prevent possible re-entrant issues -+ * when the same PMR is mapped from two different connections/processes. -+ */ -+static DEFINE_MUTEX(g_sMMapMutex); -+ -+#define _SUSPENDED 1 -+#define _NOT_SUSPENDED 0 -+static ATOMIC_T g_iDriverSuspendCount; -+static ATOMIC_T g_iNumActiveDriverThreads; -+static ATOMIC_T g_iNumActiveKernelThreads; -+static IMG_HANDLE g_hDriverThreadEventObject; -+ -+#if defined(PVR_TESTING_UTILS) -+#include "pvrsrv.h" -+#endif -+ -+#if defined(DEBUG_BRIDGE_KM) -+static DI_ENTRY *gpsDIBridgeStatsEntry; -+ -+static void *BridgeStatsDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) -+{ -+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = DIGetPrivData(psEntry); -+ -+ if (psDispatchTable == NULL || *pui64Pos > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) -+ { -+ return NULL; -+ } -+ -+ if (*pui64Pos == 0) -+ { -+ return DI_START_TOKEN; -+ } -+ -+ return &(psDispatchTable[*pui64Pos - 1]); -+} -+ -+static void BridgeStatsDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ PVR_UNREFERENCED_PARAMETER(psEntry); -+ PVR_UNREFERENCED_PARAMETER(pvData); -+} -+ -+static void *BridgeStatsDINext(OSDI_IMPL_ENTRY *psEntry, void *pvData, -+ IMG_UINT64 *pui64Pos) -+{ -+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = DIGetPrivData(psEntry); -+ IMG_UINT64 uiItemAskedFor = *pui64Pos; /* pui64Pos on entry is the index to return */ -+ -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ /* Is the item asked for (starts at 0) a valid table index? */ -+ if (uiItemAskedFor < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) -+ { -+ (*pui64Pos)++; /* on exit it is the next DI index to ask for */ -+ return &(psDispatchTable[uiItemAskedFor]); -+ } -+ -+ /* Now passed the end of the table to indicate stop */ -+ return NULL; -+} -+ -+static int BridgeStatsDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) -+{ -+ if (pvData == DI_START_TOKEN) -+ { -+ BridgeGlobalStatsLock(); -+ DIPrintf(psEntry, -+ "Total ioctl call count = %u\n" -+ "Total number of bytes copied via copy_from_user = %u\n" -+ "Total number of bytes copied via copy_to_user = %u\n" -+ "Total number of bytes copied via copy_*_user = %u\n\n" -+ "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s\n", -+ g_BridgeGlobalStats.ui32IOCTLCount, -+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes, -+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes, -+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + -+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes, -+ "#", -+ "Bridge Name", -+ "Wrapper Function", -+ "Call Count", -+ "copy_from_user (B)", -+ "copy_to_user (B)", -+ "Total Time (us)", -+ "Max Time (us)"); -+ BridgeGlobalStatsUnlock(); -+ } -+ else if (pvData != NULL) -+ { -+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psTableEntry = pvData; -+ IMG_UINT32 ui32Remainder; -+ -+ BridgeGlobalStatsLock(); -+ DIPrintf(psEntry, -+ "%3d: %-60s %-48s %-10u %-20u %-20u %-20" IMG_UINT64_FMTSPEC " %-20" IMG_UINT64_FMTSPEC "\n", -+ (IMG_UINT32)(((size_t)psTableEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)), -+ psTableEntry->pszIOCName, -+ (psTableEntry->pfFunction != NULL) ? psTableEntry->pszFunctionName : "(null)", -+ psTableEntry->ui32CallCount, -+ psTableEntry->ui32CopyFromUserTotalBytes, -+ psTableEntry->ui32CopyToUserTotalBytes, -+ OSDivide64r64(psTableEntry->ui64TotalTimeNS, 1000, &ui32Remainder), -+ OSDivide64r64(psTableEntry->ui64MaxTimeNS, 1000, &ui32Remainder)); -+ BridgeGlobalStatsUnlock(); -+ } -+ -+ return 0; -+} -+ -+static IMG_INT64 BridgeStatsWrite(const IMG_CHAR *pcBuffer, -+ IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, -+ void *pvData) -+{ -+ IMG_UINT32 i; -+ -+ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); -+ PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); -+ PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL); -+ PVR_RETURN_IF_FALSE(pcBuffer[0] == '0', -EINVAL); -+ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); -+ -+ /* Reset stats. */ -+ -+ BridgeGlobalStatsLock(); -+ -+ g_BridgeGlobalStats.ui32IOCTLCount = 0; -+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes = 0; -+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(g_BridgeDispatchTable); i++) -+ { -+ g_BridgeDispatchTable[i].ui32CallCount = 0; -+ g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0; -+ g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0; -+ g_BridgeDispatchTable[i].ui64TotalTimeNS = 0; -+ g_BridgeDispatchTable[i].ui64MaxTimeNS = 0; -+ } -+ -+ BridgeGlobalStatsUnlock(); -+ -+ return ui64Count; -+} -+ -+#endif /* defined(DEBUG_BRIDGE_KM) */ -+ -+PVRSRV_ERROR OSPlatformBridgeInit(void) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = InitDMABUFBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitDMABUFBridge"); -+ -+ OSAtomicWrite(&g_iDriverSuspendCount, 0); -+ OSAtomicWrite(&g_iNumActiveDriverThreads, 0); -+ OSAtomicWrite(&g_iNumActiveKernelThreads, 0); -+ -+ eError = OSEventObjectCreate("Global driver thread event object", -+ &g_hDriverThreadEventObject); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", error_); -+ -+#if defined(DEBUG_BRIDGE_KM) -+ { -+ DI_ITERATOR_CB sIter = { -+ .pfnStart = BridgeStatsDIStart, -+ .pfnStop = BridgeStatsDIStop, -+ .pfnNext = BridgeStatsDINext, -+ .pfnShow = BridgeStatsDIShow, -+ .pfnWrite = BridgeStatsWrite, -+ -+ //Expects '0' + Null terminator -+ .ui32WriteLenMax = ((1U)+1U) -+ }; -+ -+ eError = DICreateEntry("bridge_stats", NULL, &sIter, -+ &g_BridgeDispatchTable[0], -+ DI_ENTRY_TYPE_GENERIC, -+ &gpsDIBridgeStatsEntry); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", error_); -+ } -+#endif -+ -+ return PVRSRV_OK; -+ -+error_: -+ if (g_hDriverThreadEventObject) { -+ OSEventObjectDestroy(g_hDriverThreadEventObject); -+ g_hDriverThreadEventObject = NULL; -+ } -+ -+ return eError; -+} -+ -+void OSPlatformBridgeDeInit(void) -+{ -+#if defined(DEBUG_BRIDGE_KM) -+ if (gpsDIBridgeStatsEntry != NULL) -+ { -+ DIDestroyEntry(gpsDIBridgeStatsEntry); -+ } -+#endif -+ -+ DeinitDMABUFBridge(); -+ -+ if (g_hDriverThreadEventObject != NULL) { -+ OSEventObjectDestroy(g_hDriverThreadEventObject); -+ g_hDriverThreadEventObject = NULL; -+ } -+} -+ -+PVRSRV_ERROR LinuxBridgeBlockClientsAccess(struct pvr_drm_private *psDevPriv, -+ IMG_BOOL bShutdown) -+{ -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hEvent; -+ IMG_INT iSuspendCount; -+ -+ eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); -+ return eError; -+ } -+ -+ iSuspendCount = OSAtomicIncrement(&g_iDriverSuspendCount); -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Driver suspended %d times.", __func__, -+ iSuspendCount)); -+ -+ if (OSAtomicCompareExchange(&psDevPriv->suspended, _NOT_SUSPENDED, -+ _SUSPENDED) == _SUSPENDED) -+ { -+ OSAtomicDecrement(&g_iDriverSuspendCount); -+ PVR_DPF((PVR_DBG_ERROR, "%s: Device %p already suspended", __func__, -+ psDevPriv->dev_node)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto CloseEventObject; -+ } -+ -+ /* now wait for any threads currently in the server to exit */ -+ while (OSAtomicRead(&g_iNumActiveDriverThreads) != 0 || -+ (OSAtomicRead(&g_iNumActiveKernelThreads) != 0 && !bShutdown)) -+ { -+ if (OSAtomicRead(&g_iNumActiveDriverThreads) != 0) -+ { -+ PVR_LOG(("%s: waiting for user threads (%d)", __func__, -+ OSAtomicRead(&g_iNumActiveDriverThreads))); -+ } -+ if (OSAtomicRead(&g_iNumActiveKernelThreads) != 0) -+ { -+ PVR_LOG(("%s: waiting for kernel threads (%d)", __func__, -+ OSAtomicRead(&g_iNumActiveKernelThreads))); -+ } -+ /* Regular wait is called here (and not OSEventObjectWaitKernel) because -+ * this code is executed by the caller of .suspend/.shutdown callbacks -+ * which is most likely PM (or other actor responsible for suspend -+ * process). Because of that this thread shouldn't and most likely -+ * event cannot be frozen. */ -+ OSEventObjectWait(hEvent); -+ } -+ -+CloseEventObject: -+ OSEventObjectClose(hEvent); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(struct pvr_drm_private *psDevPriv) -+{ -+ PVRSRV_ERROR eError; -+ IMG_INT iSuspendCount; -+ -+ /* resume the driver and then signal so any waiting threads wake up */ -+ if (OSAtomicCompareExchange(&psDevPriv->suspended, _SUSPENDED, -+ _NOT_SUSPENDED) == _NOT_SUSPENDED) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Device is not suspended", __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ iSuspendCount = OSAtomicDecrement(&g_iDriverSuspendCount); -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Driver suspended %d times.", __func__, -+ iSuspendCount)); -+ -+ eError = OSEventObjectSignal(g_hDriverThreadEventObject); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: OSEventObjectSignal failed: %s", -+ __func__, PVRSRVGetErrorString(eError))); -+ } -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR LinuxBridgeSignalIfSuspended(void) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (OSAtomicRead(&g_iDriverSuspendCount) > 0) -+ { -+ PVRSRV_ERROR eError = OSEventObjectSignal(g_hDriverThreadEventObject); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal driver thread event" -+ " object: %s", __func__, PVRSRVGetErrorString(eError))); -+ } -+ } -+ -+ return eError; -+} -+ -+void LinuxBridgeNumActiveKernelThreadsIncrement(void) -+{ -+ OSAtomicIncrement(&g_iNumActiveKernelThreads); -+} -+ -+void LinuxBridgeNumActiveKernelThreadsDecrement(void) -+{ -+ OSAtomicDecrement(&g_iNumActiveKernelThreads); -+ PVR_ASSERT(OSAtomicRead(&g_iNumActiveKernelThreads) >= 0); -+ -+ /* Signal on every decrement in case LinuxBridgeBlockClientsAccess() is -+ * waiting for the threads to freeze. -+ * (error is logged in called function so ignore, we can't do much with -+ * it anyway) */ -+ (void) LinuxBridgeSignalIfSuspended(); -+} -+ -+static PVRSRV_ERROR _WaitForDriverUnsuspend(void) -+{ -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hEvent; -+ -+ eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); -+ return eError; -+ } -+ -+ while (OSAtomicRead(&g_iDriverSuspendCount) == 0) -+ { -+ /* we should be able to use normal (not kernel) wait here since -+ * we were just unfrozen and most likely we're not going to -+ * be frozen again (?) */ -+ OSEventObjectWait(hEvent); -+ } -+ -+ OSEventObjectClose(hEvent); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVDriverThreadEnter(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ CONNECTION_DATA *psConnection = (CONNECTION_DATA *)pvData; -+ -+ /* Block if the associated device has been placed into a FROZEN state. -+ * In this case we must await a PVRSRVDeviceThaw() completion request. -+ * Device is obtained from the incoming psConnection if pvData is non-NULL. -+ */ -+ if (likely(pvData != NULL)) -+ { -+ PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnection); -+ PVRSRVBlockIfFrozen(psDevNode); -+ OSAtomicIncrement(&psDevNode->iThreadsActive); -+ } -+#if defined(PVR_TESTING_UTILS) -+ else -+ { -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ OSAtomicIncrement(&psPVRSRVData->iNumDriverTasksActive); -+ } -+#endif /* defined(PVR_TESTING_UTILS) */ -+ -+ /* increment first so there is no race between this value and -+ * g_iDriverSuspendCount in LinuxBridgeBlockClientsAccess() */ -+ OSAtomicIncrement(&g_iNumActiveDriverThreads); -+ -+ if (OSAtomicRead(&g_iDriverSuspendCount) > 0) -+ { -+ /* decrement here because the driver is going to be suspended and -+ * this thread is going to be frozen so we don't want to wait for -+ * it in LinuxBridgeBlockClientsAccess() */ -+ OSAtomicDecrement(&g_iNumActiveDriverThreads); -+ -+ /* during suspend procedure this will put the current thread to -+ * the freezer but during shutdown this will just return */ -+ try_to_freeze(); -+ -+ /* if the thread was unfrozen but the number of suspends is non-0 wait -+ * for it -+ * in case this is a shutdown the thread was not frozen so we'll -+ * wait here indefinitely but this is ok (and this is in fact what -+ * we want) because no thread should be entering the driver in such -+ * case */ -+ eError = _WaitForDriverUnsuspend(); -+ -+ /* increment here because that means that the thread entered the -+ * driver */ -+ OSAtomicIncrement(&g_iNumActiveDriverThreads); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to wait for driver" -+ " unsuspend: %s", __func__, -+ PVRSRVGetErrorString(eError))); -+ return eError; -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+void PVRSRVDriverThreadExit(void *pvData) -+{ -+ CONNECTION_DATA *psConnection = (CONNECTION_DATA *)pvData; -+ OSAtomicDecrement(&g_iNumActiveDriverThreads); -+ -+ /* Decrement the number of threads active on this device if the -+ * connection is known. -+ */ -+ if (psConnection != NULL) -+ { -+ PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnection); -+ OSAtomicDecrement(&psDevNode->iThreadsActive); -+ } -+ /* if the driver is being suspended then we need to signal the -+ * event object as the thread suspending the driver is waiting -+ * for active threads to exit -+ * error is logged in called function so ignore returned error -+ */ -+ (void) LinuxBridgeSignalIfSuspended(); -+} -+ -+int -+PVRSRV_BridgeDispatchKM(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *pDRMFile) -+{ -+ struct drm_pvr_srvkm_cmd *psSrvkmCmd = (struct drm_pvr_srvkm_cmd *) arg; -+ PVRSRV_BRIDGE_PACKAGE sBridgePackageKM = { 0 }; -+ CONNECTION_DATA *psConnection = LinuxServicesConnectionFromFile(pDRMFile->filp); -+ PVRSRV_ERROR error; -+ -+ if (psConnection == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Invalid connection data")); -+ return -EFAULT; -+ } -+ -+ PVR_ASSERT(psSrvkmCmd != NULL); -+ -+ DRM_DEBUG("tgid=%d, tgid_connection=%d, bridge_id=%d, func_id=%d", -+ task_tgid_nr(current), -+ ((ENV_CONNECTION_DATA *)PVRSRVConnectionPrivateData(psConnection))->owner, -+ psSrvkmCmd->bridge_id, -+ psSrvkmCmd->bridge_func_id); -+ -+ error = PVRSRVDriverThreadEnter(psConnection); -+ PVR_LOG_GOTO_IF_ERROR(error, "PVRSRVDriverThreadEnter", e0); -+ -+ sBridgePackageKM.ui32BridgeID = psSrvkmCmd->bridge_id; -+ sBridgePackageKM.ui32FunctionID = psSrvkmCmd->bridge_func_id; -+ sBridgePackageKM.ui32Size = sizeof(sBridgePackageKM); -+ sBridgePackageKM.pvParamIn = (void __user *)(uintptr_t)psSrvkmCmd->in_data_ptr; -+ sBridgePackageKM.ui32InBufferSize = psSrvkmCmd->in_data_size; -+ sBridgePackageKM.pvParamOut = (void __user *)(uintptr_t)psSrvkmCmd->out_data_ptr; -+ sBridgePackageKM.ui32OutBufferSize = psSrvkmCmd->out_data_size; -+ -+ error = BridgedDispatchKM(psConnection, &sBridgePackageKM); -+ -+e0: -+ PVRSRVDriverThreadExit(psConnection); -+ -+ return OSPVRSRVToNativeError(error); -+} -+ -+int -+PVRSRV_MMap(struct file *pFile, struct vm_area_struct *ps_vma) -+{ -+ CONNECTION_DATA *psConnection = LinuxServicesConnectionFromFile(pFile); -+ IMG_HANDLE hSecurePMRHandle = (IMG_HANDLE)((uintptr_t)ps_vma->vm_pgoff); -+ PMR *psPMR; -+ PVRSRV_ERROR eError; -+ PVRSRV_MEMALLOCFLAGS_T uiProtFlags = -+ (BITMASK_HAS(ps_vma->vm_flags, VM_READ) ? PVRSRV_MEMALLOCFLAG_CPU_READABLE : 0) | -+ (BITMASK_HAS(ps_vma->vm_flags, VM_WRITE) ? PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0); -+ -+ if (psConnection == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Invalid connection data")); -+ return -ENOENT; -+ } -+ -+ eError = PVRSRVDriverThreadEnter(psConnection); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDriverThreadEnter", e0); -+ -+ /* -+ * The bridge lock used here to protect PVRSRVLookupHandle is replaced -+ * by a specific lock considering that the handle functions have now -+ * their own lock. This change was necessary to solve the lockdep issues -+ * related with the PVRSRV_MMap. -+ */ -+ -+ eError = PVRSRVLookupHandle(psConnection->psHandleBase, -+ (void **)&psPMR, -+ hSecurePMRHandle, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, -+ IMG_TRUE); -+ if (eError != PVRSRV_OK) -+ { -+ goto e0; -+ } -+ -+ mutex_lock(&g_sMMapMutex); -+ /* Note: PMRMMapPMR will take a reference on the PMR. -+ * Unref the handle immediately, because we have now done -+ * the required operation on the PMR (whether it succeeded or not) -+ */ -+ eError = PMRMMapPMR(psPMR, ps_vma, uiProtFlags); -+ mutex_unlock(&g_sMMapMutex); -+ PVRSRVReleaseHandle(psConnection->psHandleBase, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PMRMMapPMR failed (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ goto e0; -+ } -+ -+ PVRSRVDriverThreadExit(psConnection); -+ -+ return 0; -+ -+e0: -+ PVRSRVDriverThreadExit(psConnection); -+ -+ PVR_DPF((PVR_DBG_ERROR, "Failed with error: %s", PVRSRVGetErrorString(eError))); -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ return OSPVRSRVToNativeError(eError); -+} -diff --git a/drivers/gpu/drm/img-rogue/pvr_bridge_k.h b/drivers/gpu/drm/img-rogue/pvr_bridge_k.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_bridge_k.h -@@ -0,0 +1,111 @@ -+/*************************************************************************/ /*! -+@File -+@Title PVR Bridge Module (kernel side) -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Receives calls from the user portion of services and -+ despatches them to functions in the kernel portion. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVR_BRIDGE_K_H -+#define PVR_BRIDGE_K_H -+ -+#include "pvrsrv_error.h" -+#include "pvr_drv.h" -+ -+/*! -+****************************************************************************** -+ @Function LinuxBridgeBlockClientsAccess -+ @Description This function will wait for any existing threads in the Server -+ to exit and then disable access to the driver. New threads will -+ not be allowed to enter the Server until the driver is -+ unsuspended (see LinuxBridgeUnblockClientsAccess). -+ @Input psDevPriv pointer to devices OS specific data -+ @Input bShutdown this flag indicates that the function was called -+ from a shutdown callback and therefore it will -+ not wait for the kernel threads to get frozen -+ (because this doesn't happen during shutdown -+ procedure) -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR LinuxBridgeBlockClientsAccess(struct pvr_drm_private *psDevPriv, -+ IMG_BOOL bShutdown); -+ -+/*! -+****************************************************************************** -+ @Function LinuxBridgeUnblockClientsAccess -+ @Description This function will re-enable the bridge and allow any threads -+ waiting to enter the Server to continue. -+ @Input psDevPriv pointer to devices OS specific data -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(struct pvr_drm_private *psDevPriv); -+ -+void LinuxBridgeNumActiveKernelThreadsIncrement(void); -+void LinuxBridgeNumActiveKernelThreadsDecrement(void); -+ -+/*! -+****************************************************************************** -+ @Function PVRSRVDriverThreadEnter -+ @Description Increments number of client threads currently operating -+ in the driver's context. -+ If the driver is currently being suspended this function -+ will call try_to_freeze() on behalf of the client thread. -+ When the driver is resumed the function will exit and allow -+ the thread into the driver. -+ @Input Reference to Connection data. NULL if no associated -+ connection / device. -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVDriverThreadEnter(void *pvData); -+ -+/*! -+****************************************************************************** -+ @Function PVRSRVDriverThreadExit -+ @Description Decrements the number of client threads currently operating -+ in the driver's context to match the call to -+ PVRSRVDriverThreadEnter(). -+ The function also signals the driver that a thread left the -+ driver context so if it's waiting to suspend it knows that -+ the number of threads decreased. -+ @Input Reference to Connection data. NULL if no associated -+ connection / device. -+******************************************************************************/ -+void PVRSRVDriverThreadExit(void *pvData); -+ -+#endif /* PVR_BRIDGE_K_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_buffer_sync.c b/drivers/gpu/drm/img-rogue/pvr_buffer_sync.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_buffer_sync.c -@@ -0,0 +1,739 @@ -+/* -+ * @File -+ * @Title Linux buffer sync interface -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+ -+#include "services_kernel_client.h" -+#include "pvr_dma_resv.h" -+#include "pvr_buffer_sync.h" -+#include "pvr_buffer_sync_shared.h" -+#include "pvr_drv.h" -+#include "pvr_fence.h" -+ -+struct pvr_buffer_sync_context { -+ struct mutex ctx_lock; -+ struct pvr_fence_context *fence_ctx; -+ struct ww_acquire_ctx acquire_ctx; -+}; -+ -+struct pvr_buffer_sync_check_data { -+ struct dma_fence_cb base; -+ -+ u32 nr_fences; -+ struct pvr_fence **fences; -+}; -+ -+struct pvr_buffer_sync_append_data { -+ struct pvr_buffer_sync_context *ctx; -+ -+ u32 nr_pmrs; -+ struct _PMR_ **pmrs; -+ u32 *pmr_flags; -+ -+ struct pvr_fence *update_fence; -+ struct pvr_buffer_sync_check_data *check_data; -+}; -+ -+static struct dma_resv * -+pmr_reservation_object_get(struct _PMR_ *pmr) -+{ -+ struct dma_buf *dmabuf; -+ -+ dmabuf = PhysmemGetDmaBuf(pmr); -+ if (dmabuf) -+ return dmabuf->resv; -+ -+ return NULL; -+} -+ -+static int -+pvr_buffer_sync_pmrs_lock(struct pvr_buffer_sync_context *ctx, -+ u32 nr_pmrs, -+ struct _PMR_ **pmrs) -+{ -+ struct dma_resv *resv, *cresv = NULL, *lresv = NULL; -+ int i, err; -+ struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx; -+ -+ mutex_lock(&ctx->ctx_lock); -+ -+ ww_acquire_init(acquire_ctx, &reservation_ww_class); -+retry: -+ for (i = 0; i < nr_pmrs; i++) { -+ resv = pmr_reservation_object_get(pmrs[i]); -+ if (!resv) { -+ pr_err("%s: Failed to get reservation object from pmr %p\n", -+ __func__, pmrs[i]); -+ err = -EINVAL; -+ goto fail; -+ } -+ -+ if (resv != lresv) { -+ err = ww_mutex_lock_interruptible(&resv->lock, -+ acquire_ctx); -+ if (err) { -+ cresv = (err == -EDEADLK) ? resv : NULL; -+ goto fail; -+ } -+ } else { -+ lresv = NULL; -+ } -+ } -+ -+ ww_acquire_done(acquire_ctx); -+ -+ return 0; -+ -+fail: -+ while (i--) { -+ resv = pmr_reservation_object_get(pmrs[i]); -+ if (WARN_ON_ONCE(!resv)) -+ continue; -+ ww_mutex_unlock(&resv->lock); -+ } -+ -+ if (lresv) -+ ww_mutex_unlock(&lresv->lock); -+ -+ if (cresv) { -+ err = ww_mutex_lock_slow_interruptible(&cresv->lock, -+ acquire_ctx); -+ if (!err) { -+ lresv = cresv; -+ cresv = NULL; -+ goto retry; -+ } -+ } -+ -+ ww_acquire_fini(acquire_ctx); -+ -+ mutex_unlock(&ctx->ctx_lock); -+ return err; -+} -+ -+static void -+pvr_buffer_sync_pmrs_unlock(struct pvr_buffer_sync_context *ctx, -+ u32 nr_pmrs, -+ struct _PMR_ **pmrs) -+{ -+ struct dma_resv *resv; -+ int i; -+ struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx; -+ -+ for (i = 0; i < nr_pmrs; i++) { -+ resv = pmr_reservation_object_get(pmrs[i]); -+ if (WARN_ON_ONCE(!resv)) -+ continue; -+ ww_mutex_unlock(&resv->lock); -+ } -+ -+ ww_acquire_fini(acquire_ctx); -+ -+ mutex_unlock(&ctx->ctx_lock); -+} -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) -+ -+static void -+dma_resv_count_fences(struct dma_resv *resv, u32 *read_fence_count_out, u32 *write_fence_count_out) -+{ -+ struct dma_resv_iter cursor; -+ u32 write_fence_count = 0; -+ u32 read_fence_count = 0; -+ struct dma_fence *fence; -+ -+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ); -+ dma_resv_for_each_fence_unlocked(&cursor, fence) { -+ if (dma_resv_iter_is_restarted(&cursor)) { -+ read_fence_count = 0; -+ write_fence_count = 0; -+ } -+ if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_READ) -+ read_fence_count++; -+ else if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_WRITE) -+ write_fence_count++; -+ } -+ -+ *read_fence_count_out = read_fence_count; -+ *write_fence_count_out = write_fence_count; -+} -+ -+static u32 -+pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs, -+ u32 *pmr_flags) -+{ -+ struct dma_resv *resv; -+ u32 fence_count = 0; -+ bool exclusive; -+ int i; -+ -+ for (i = 0; i < nr_pmrs; i++) { -+ u32 write_fence_count = 0; -+ u32 read_fence_count = 0; -+ -+ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE); -+ -+ resv = pmr_reservation_object_get(pmrs[i]); -+ if (WARN_ON_ONCE(!resv)) -+ continue; -+ -+ dma_resv_count_fences(resv, &read_fence_count, &write_fence_count); -+ -+ if (!exclusive || !read_fence_count) -+ fence_count += write_fence_count; -+ if (exclusive) -+ fence_count += read_fence_count; -+ } -+ -+ return fence_count; -+} -+ -+static struct pvr_buffer_sync_check_data * -+pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx, -+ PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, -+ u32 nr_pmrs, -+ struct _PMR_ **pmrs, -+ u32 *pmr_flags) -+{ -+ struct pvr_buffer_sync_check_data *data; -+ struct dma_resv *resv; -+ struct dma_fence *fence; -+ u32 fence_count; -+ bool exclusive; -+ int i; -+ -+ data = kzalloc(sizeof(*data), GFP_KERNEL); -+ if (!data) -+ return NULL; -+ -+ fence_count = pvr_buffer_sync_pmrs_fence_count(nr_pmrs, pmrs, -+ pmr_flags); -+ if (fence_count) { -+ data->fences = kcalloc(fence_count, sizeof(*data->fences), -+ GFP_KERNEL); -+ if (!data->fences) -+ goto err_check_data_free; -+ } -+ -+ for (i = 0; i < nr_pmrs; i++) { -+ struct dma_resv_iter cursor; -+ bool include_write_fences; -+ bool include_read_fences; -+ u32 write_fence_count = 0; -+ u32 read_fence_count = 0; -+ -+ resv = pmr_reservation_object_get(pmrs[i]); -+ if (WARN_ON_ONCE(!resv)) -+ continue; -+ -+ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE); -+ -+ dma_resv_count_fences(resv, &read_fence_count, &write_fence_count); -+ -+ include_write_fences = (!exclusive || !read_fence_count); -+ include_read_fences = exclusive; -+ -+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ); -+ dma_resv_for_each_fence_unlocked(&cursor, fence) { -+ enum dma_resv_usage usage = dma_resv_iter_usage(&cursor); -+ -+ if ((!include_write_fences && usage == DMA_RESV_USAGE_WRITE) || -+ (!include_read_fences && usage == DMA_RESV_USAGE_READ)) -+ continue; -+ -+ data->fences[data->nr_fences++] = -+ pvr_fence_create_from_fence(fence_ctx, -+ sync_checkpoint_ctx, -+ fence, -+ PVRSRV_NO_FENCE, -+ (usage == DMA_RESV_USAGE_WRITE) ? -+ "write check fence" : -+ "read check fence"); -+ if (!data->fences[data->nr_fences - 1]) { -+ data->nr_fences--; -+ PVR_FENCE_TRACE(fence, -+ (usage == DMA_RESV_USAGE_WRITE) ? -+ "waiting on write fence" : -+ "waiting on read fence\n"); -+ WARN_ON(dma_fence_wait(fence, true) <= 0); -+ } -+ } -+ } -+ -+ WARN_ON((i != nr_pmrs) || (data->nr_fences != fence_count)); -+ -+ return data; -+ -+err_check_data_free: -+ kfree(data); -+ return NULL; -+} -+ -+#else -+ -+static u32 -+pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs, -+ u32 *pmr_flags) -+{ -+ struct dma_resv *resv; -+ struct dma_resv_list *resv_list; -+ struct dma_fence *fence; -+ u32 fence_count = 0; -+ bool exclusive; -+ int i; -+ -+ for (i = 0; i < nr_pmrs; i++) { -+ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE); -+ -+ resv = pmr_reservation_object_get(pmrs[i]); -+ if (WARN_ON_ONCE(!resv)) -+ continue; -+ -+ resv_list = dma_resv_shared_list(resv); -+ fence = dma_resv_excl_fence(resv); -+ -+ if (fence && -+ (!exclusive || !resv_list || !resv_list->shared_count)) -+ fence_count++; -+ -+ if (exclusive && resv_list) -+ fence_count += resv_list->shared_count; -+ } -+ -+ return fence_count; -+} -+ -+static struct pvr_buffer_sync_check_data * -+pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx, -+ PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, -+ u32 nr_pmrs, -+ struct _PMR_ **pmrs, -+ u32 *pmr_flags) -+{ -+ struct pvr_buffer_sync_check_data *data; -+ struct dma_resv *resv; -+ struct dma_resv_list *resv_list; -+ struct dma_fence *fence; -+ u32 fence_count; -+ bool exclusive; -+ int i, j; -+ int err; -+ -+ data = kzalloc(sizeof(*data), GFP_KERNEL); -+ if (!data) -+ return NULL; -+ -+ fence_count = pvr_buffer_sync_pmrs_fence_count(nr_pmrs, pmrs, -+ pmr_flags); -+ if (fence_count) { -+ data->fences = kcalloc(fence_count, sizeof(*data->fences), -+ GFP_KERNEL); -+ if (!data->fences) -+ goto err_check_data_free; -+ } -+ -+ for (i = 0; i < nr_pmrs; i++) { -+ resv = pmr_reservation_object_get(pmrs[i]); -+ if (WARN_ON_ONCE(!resv)) -+ continue; -+ -+ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE); -+ if (!exclusive) { -+ err = dma_resv_reserve_shared(resv -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) -+ , 1 -+#endif -+ ); -+ if (err) -+ goto err_destroy_fences; -+ } -+ -+ resv_list = dma_resv_shared_list(resv); -+ fence = dma_resv_excl_fence(resv); -+ -+ if (fence && -+ (!exclusive || !resv_list || !resv_list->shared_count)) { -+ data->fences[data->nr_fences++] = -+ pvr_fence_create_from_fence(fence_ctx, -+ sync_checkpoint_ctx, -+ fence, -+ PVRSRV_NO_FENCE, -+ "exclusive check fence"); -+ if (!data->fences[data->nr_fences - 1]) { -+ data->nr_fences--; -+ PVR_FENCE_TRACE(fence, -+ "waiting on exclusive fence\n"); -+ WARN_ON(dma_fence_wait(fence, true) <= 0); -+ } -+ } -+ -+ if (exclusive && resv_list) { -+ for (j = 0; j < resv_list->shared_count; j++) { -+ fence = rcu_dereference_protected(resv_list->shared[j], -+ dma_resv_held(resv)); -+ data->fences[data->nr_fences++] = -+ pvr_fence_create_from_fence(fence_ctx, -+ sync_checkpoint_ctx, -+ fence, -+ PVRSRV_NO_FENCE, -+ "check fence"); -+ if (!data->fences[data->nr_fences - 1]) { -+ data->nr_fences--; -+ PVR_FENCE_TRACE(fence, -+ "waiting on non-exclusive fence\n"); -+ WARN_ON(dma_fence_wait(fence, true) <= 0); -+ } -+ } -+ } -+ } -+ -+ WARN_ON((i != nr_pmrs) || (data->nr_fences != fence_count)); -+ -+ return data; -+ -+err_destroy_fences: -+ for (i = 0; i < data->nr_fences; i++) -+ pvr_fence_destroy(data->fences[i]); -+ kfree(data->fences); -+err_check_data_free: -+ kfree(data); -+ return NULL; -+} -+ -+#endif -+ -+static void -+pvr_buffer_sync_check_fences_destroy(struct pvr_buffer_sync_check_data *data) -+{ -+ int i; -+ -+ for (i = 0; i < data->nr_fences; i++) -+ pvr_fence_destroy(data->fences[i]); -+ -+ kfree(data->fences); -+ kfree(data); -+} -+ -+struct pvr_buffer_sync_context * -+pvr_buffer_sync_context_create(struct device *dev, const char *name) -+{ -+ struct drm_device *ddev = dev_get_drvdata(dev); -+ struct pvr_drm_private *priv = ddev->dev_private; -+ struct pvr_buffer_sync_context *ctx; -+ int err; -+ -+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); -+ if (!ctx) { -+ err = -ENOMEM; -+ goto err_exit; -+ } -+ -+ ctx->fence_ctx = pvr_fence_context_create(priv->dev_node, -+ NativeSyncGetFenceStatusWq(), -+ name); -+ if (!ctx->fence_ctx) { -+ err = -ENOMEM; -+ goto err_free_ctx; -+ } -+ -+ mutex_init(&ctx->ctx_lock); -+ -+ return ctx; -+ -+err_free_ctx: -+ kfree(ctx); -+err_exit: -+ return ERR_PTR(err); -+} -+ -+void -+pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx) -+{ -+ pvr_fence_context_destroy(ctx->fence_ctx); -+ kfree(ctx); -+} -+ -+int -+pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx, -+ PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, -+ u32 nr_pmrs, -+ struct _PMR_ **pmrs, -+ u32 *pmr_flags, -+ u32 *nr_fence_checkpoints_out, -+ PSYNC_CHECKPOINT **fence_checkpoints_out, -+ PSYNC_CHECKPOINT *update_checkpoints_out, -+ struct pvr_buffer_sync_append_data **data_out) -+{ -+ struct pvr_buffer_sync_append_data *data; -+ PSYNC_CHECKPOINT *fence_checkpoints; -+ const size_t data_size = sizeof(*data); -+ const size_t pmrs_size = sizeof(*pmrs) * nr_pmrs; -+ const size_t pmr_flags_size = sizeof(*pmr_flags) * nr_pmrs; -+ int i; -+ int j; -+ int err; -+ -+ if (unlikely((nr_pmrs && !(pmrs && pmr_flags)) || -+ !nr_fence_checkpoints_out || !fence_checkpoints_out || -+ !update_checkpoints_out)) -+ return -EINVAL; -+ -+ for (i = 0; i < nr_pmrs; i++) { -+ if (unlikely(!(pmr_flags[i] & PVR_BUFFER_FLAG_MASK))) { -+ pr_err("%s: Invalid flags %#08x for pmr %p\n", -+ __func__, pmr_flags[i], pmrs[i]); -+ return -EINVAL; -+ } -+ } -+ -+#if defined(NO_HARDWARE) -+ /* -+ * For NO_HARDWARE there's no checking or updating of sync checkpoints -+ * which means SW waits on our fences will cause a deadlock (since they -+ * will never be signalled). Avoid this by not creating any fences. -+ */ -+ nr_pmrs = 0; -+#endif -+ -+ if (!nr_pmrs) { -+ *nr_fence_checkpoints_out = 0; -+ *fence_checkpoints_out = NULL; -+ *update_checkpoints_out = NULL; -+ *data_out = NULL; -+ -+ return 0; -+ } -+ -+ data = kzalloc(data_size + pmrs_size + pmr_flags_size, GFP_KERNEL); -+ if (unlikely(!data)) -+ return -ENOMEM; -+ -+ data->ctx = ctx; -+ data->pmrs = (struct _PMR_ **)(void *)(data + 1); -+ data->pmr_flags = (u32 *)(void *)(data->pmrs + nr_pmrs); -+ -+ /* -+ * It's expected that user space will provide a set of unique PMRs -+ * but, as a PMR can have multiple handles, it's still possible to -+ * end up here with duplicates. Take this opportunity to filter out -+ * any remaining duplicates (updating flags when necessary) before -+ * trying to process them further. -+ */ -+ for (i = 0; i < nr_pmrs; i++) { -+ for (j = 0; j < data->nr_pmrs; j++) { -+ if (data->pmrs[j] == pmrs[i]) { -+ data->pmr_flags[j] |= pmr_flags[i]; -+ break; -+ } -+ } -+ -+ if (j == data->nr_pmrs) { -+ data->pmrs[j] = pmrs[i]; -+ data->pmr_flags[j] = pmr_flags[i]; -+ data->nr_pmrs++; -+ } -+ } -+ -+ err = pvr_buffer_sync_pmrs_lock(ctx, data->nr_pmrs, data->pmrs); -+ if (unlikely(err)) { -+ /* -+ * -EINTR is returned if a signal arrives while trying to acquire a PMR -+ * lock. In this case the operation should be retried after the signal -+ * has been serviced. As this is expected behaviour, don't print an -+ * error in this case. -+ */ -+ if (err != -EINTR) { -+ pr_err("%s: failed to lock pmrs (errno=%d)\n", -+ __func__, err); -+ } -+ goto err_free_data; -+ } -+ -+ /* create the check data */ -+ data->check_data = pvr_buffer_sync_check_fences_create(ctx->fence_ctx, -+ sync_checkpoint_ctx, -+ data->nr_pmrs, -+ data->pmrs, -+ data->pmr_flags); -+ if (unlikely(!data->check_data)) { -+ err = -ENOMEM; -+ goto err_pmrs_unlock; -+ } -+ -+ fence_checkpoints = kcalloc(data->check_data->nr_fences, -+ sizeof(*fence_checkpoints), -+ GFP_KERNEL); -+ if (fence_checkpoints) { -+ pvr_fence_get_checkpoints(data->check_data->fences, -+ data->check_data->nr_fences, -+ fence_checkpoints); -+ } else { -+ if (unlikely(data->check_data->nr_fences)) { -+ err = -ENOMEM; -+ goto err_free_check_data; -+ } -+ } -+ -+ /* create the update fence */ -+ data->update_fence = pvr_fence_create(ctx->fence_ctx, -+ sync_checkpoint_ctx, -+ SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, "update fence"); -+ if (unlikely(!data->update_fence)) { -+ err = -ENOMEM; -+ goto err_free_fence_checkpoints; -+ } -+ -+ /* -+ * We need to clean up the fences once the HW has finished with them. -+ * We can do this using fence callbacks. However, instead of adding a -+ * callback to every fence, which would result in more work, we can -+ * simply add one to the update fence since this will be the last fence -+ * to be signalled. This callback can do all the necessary clean up. -+ * -+ * Note: we take an additional reference on the update fence in case -+ * it signals before we can add it to a reservation object. -+ */ -+ PVR_FENCE_TRACE(&data->update_fence->base, -+ "create fence calling dma_fence_get\n"); -+ dma_fence_get(&data->update_fence->base); -+ -+ *nr_fence_checkpoints_out = data->check_data->nr_fences; -+ *fence_checkpoints_out = fence_checkpoints; -+ *update_checkpoints_out = pvr_fence_get_checkpoint(data->update_fence); -+ *data_out = data; -+ -+ return 0; -+ -+err_free_fence_checkpoints: -+ kfree(fence_checkpoints); -+err_free_check_data: -+ pvr_buffer_sync_check_fences_destroy(data->check_data); -+err_pmrs_unlock: -+ pvr_buffer_sync_pmrs_unlock(ctx, data->nr_pmrs, data->pmrs); -+err_free_data: -+ kfree(data); -+ return err; -+} -+ -+void -+pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data) -+{ -+ struct dma_resv *resv; -+ int i; -+ -+ dma_fence_enable_sw_signaling(&data->update_fence->base); -+ -+ for (i = 0; i < data->nr_pmrs; i++) { -+ resv = pmr_reservation_object_get(data->pmrs[i]); -+ if (WARN_ON_ONCE(!resv)) -+ continue; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) -+ dma_resv_reserve_fences(resv, 1); -+#endif -+ if (data->pmr_flags[i] & PVR_BUFFER_FLAG_WRITE) { -+ PVR_FENCE_TRACE(&data->update_fence->base, -+ "added exclusive fence (%s) to resv %p\n", -+ data->update_fence->name, resv); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) -+ dma_resv_add_fence(resv, -+ &data->update_fence->base, -+ DMA_RESV_USAGE_WRITE); -+#else -+ dma_resv_add_excl_fence(resv, -+ &data->update_fence->base); -+#endif -+ } else if (data->pmr_flags[i] & PVR_BUFFER_FLAG_READ) { -+ PVR_FENCE_TRACE(&data->update_fence->base, -+ "added non-exclusive fence (%s) to resv %p\n", -+ data->update_fence->name, resv); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) -+ dma_resv_add_fence(resv, -+ &data->update_fence->base, -+ DMA_RESV_USAGE_READ); -+#else -+ dma_resv_add_shared_fence(resv, -+ &data->update_fence->base); -+#endif -+ } -+ } -+ -+ /* -+ * Now that the fence has been added to the necessary -+ * reservation objects we can safely drop the extra reference -+ * we took in pvr_buffer_sync_resolve_and_create_fences(). -+ */ -+ dma_fence_put(&data->update_fence->base); -+ pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs, -+ data->pmrs); -+ -+ /* destroy the check fences */ -+ pvr_buffer_sync_check_fences_destroy(data->check_data); -+ /* destroy the update fence */ -+ pvr_fence_destroy(data->update_fence); -+ -+ /* free the append data */ -+ kfree(data); -+} -+ -+void -+pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data) -+{ -+ -+ /* drop the extra reference we took on the update fence in -+ * pvr_buffer_sync_resolve_and_create_fences(). -+ */ -+ dma_fence_put(&data->update_fence->base); -+ -+ if (data->nr_pmrs > 0) -+ pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs, -+ data->pmrs); -+ -+ /* destroy the check fences */ -+ pvr_buffer_sync_check_fences_destroy(data->check_data); -+ /* destroy the update fence */ -+ pvr_fence_destroy(data->update_fence); -+ -+ /* free the append data */ -+ kfree(data); -+} -diff --git a/drivers/gpu/drm/img-rogue/pvr_buffer_sync.h b/drivers/gpu/drm/img-rogue/pvr_buffer_sync.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_buffer_sync.h -@@ -0,0 +1,125 @@ -+/* -+ * @File pvr_buffer_sync.h -+ * @Title PowerVR Linux buffer sync interface -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef PVR_BUFFER_SYNC_H -+#define PVR_BUFFER_SYNC_H -+ -+#include -+#include -+#include -+ -+struct _PMR_; -+struct pvr_buffer_sync_context; -+struct pvr_buffer_sync_append_data; -+ -+/** -+ * pvr_buffer_sync_context_create - creates a buffer sync context -+ * @dev: Linux device -+ * @name: context name (used for debugging) -+ * -+ * pvr_buffer_sync_context_destroy() should be used to clean up the buffer -+ * sync context. -+ * -+ * Return: A buffer sync context or NULL if it fails for any reason. -+ */ -+struct pvr_buffer_sync_context * -+pvr_buffer_sync_context_create(struct device *dev, const char *name); -+ -+/** -+ * pvr_buffer_sync_context_destroy() - frees a buffer sync context -+ * @ctx: buffer sync context -+ */ -+void -+pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx); -+ -+/** -+ * pvr_buffer_sync_resolve_and_create_fences() - create checkpoints from -+ * buffers -+ * @ctx: buffer sync context -+ * @sync_checkpoint_ctx: context in which to create sync checkpoints -+ * @nr_pmrs: number of buffer objects (PMRs) -+ * @pmrs: buffer array -+ * @pmr_flags: internal flags -+ * @nr_fence_checkpoints_out: returned number of fence sync checkpoints -+ * @fence_checkpoints_out: returned array of fence sync checkpoints -+ * @update_checkpoint_out: returned update sync checkpoint -+ * @data_out: returned buffer sync data -+ * -+ * After this call, either pvr_buffer_sync_kick_succeeded() or -+ * pvr_buffer_sync_kick_failed() must be called. -+ * -+ * Return: 0 on success or an error code otherwise. -+ */ -+int -+pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx, -+ PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, -+ u32 nr_pmrs, -+ struct _PMR_ **pmrs, -+ u32 *pmr_flags, -+ u32 *nr_fence_checkpoints_out, -+ PSYNC_CHECKPOINT **fence_checkpoints_out, -+ PSYNC_CHECKPOINT *update_checkpoint_out, -+ struct pvr_buffer_sync_append_data **data_out); -+ -+/** -+ * pvr_buffer_sync_kick_succeeded() - cleans up after a successful kick -+ * operation -+ * @data: buffer sync data returned by -+ * pvr_buffer_sync_resolve_and_create_fences() -+ * -+ * Should only be called following pvr_buffer_sync_resolve_and_create_fences(). -+ */ -+void -+pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data); -+ -+/** -+ * pvr_buffer_sync_kick_failed() - cleans up after a failed kick operation -+ * @data: buffer sync data returned by -+ * pvr_buffer_sync_resolve_and_create_fences() -+ * -+ * Should only be called following pvr_buffer_sync_resolve_and_create_fences(). -+ */ -+void -+pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data); -+ -+#endif /* PVR_BUFFER_SYNC_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_buffer_sync_shared.h b/drivers/gpu/drm/img-rogue/pvr_buffer_sync_shared.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_buffer_sync_shared.h -@@ -0,0 +1,57 @@ -+/*************************************************************************/ /*! -+@File -+@Title PVR buffer sync shared -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Shared definitions between client and server -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVR_BUFFER_SYNC_SHARED_H -+#define PVR_BUFFER_SYNC_SHARED_H -+ -+#define PVR_BUFFER_FLAG_READ (1U << 0) -+#define PVR_BUFFER_FLAG_WRITE (1U << 1) -+#define PVR_BUFFER_FLAG_MASK (PVR_BUFFER_FLAG_READ | \ -+ PVR_BUFFER_FLAG_WRITE) -+ -+/* Maximum number of PMRs passed -+ * in a kick when using buffer sync -+ */ -+#define PVRSRV_MAX_BUFFERSYNC_PMRS 32 -+ -+#endif /* PVR_BUFFER_SYNC_SHARED_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_counting_timeline.c b/drivers/gpu/drm/img-rogue/pvr_counting_timeline.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_counting_timeline.c -@@ -0,0 +1,308 @@ -+/* -+ * @File -+ * @Title PowerVR Linux software "counting" timeline fence implementation -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+#include -+#include -+ -+#include "services_kernel_client.h" -+#include "pvr_counting_timeline.h" -+#include "pvr_sw_fence.h" -+ -+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ -+ do { \ -+ if (pfnDumpDebugPrintf) \ -+ pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ -+ ## __VA_ARGS__); \ -+ else \ -+ pr_err(fmt "\n", ## __VA_ARGS__); \ -+ } while (0) -+ -+struct pvr_counting_fence_timeline { -+ struct pvr_sw_fence_context *context; -+ -+ void *dbg_request_handle; -+ -+ spinlock_t active_fences_lock; -+ u64 current_value; /* guarded by active_fences_lock */ -+ u64 next_value; /* guarded by active_fences_lock */ -+ struct list_head active_fences; -+ -+ struct kref kref; -+}; -+ -+struct pvr_counting_fence { -+ u64 value; -+ struct dma_fence *fence; -+ struct list_head active_list_entry; -+}; -+ -+void pvr_counting_fence_timeline_dump_timeline( -+ void *data, -+ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, -+ void *dump_debug_file) -+{ -+ -+ struct pvr_counting_fence_timeline *timeline = -+ (struct pvr_counting_fence_timeline *) data; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&timeline->active_fences_lock, flags); -+ -+ PVR_DUMPDEBUG_LOG(dump_debug_printf, -+ dump_debug_file, -+ "TL:%s SeqNum: %llu/%llu", -+ pvr_sw_fence_context_name( -+ timeline->context), -+ timeline->current_value, -+ timeline->next_value); -+ -+ spin_unlock_irqrestore(&timeline->active_fences_lock, flags); -+} -+ -+static void -+pvr_counting_fence_timeline_debug_request(void *data, u32 verbosity, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ struct pvr_counting_fence_timeline *timeline = -+ (struct pvr_counting_fence_timeline *)data; -+ struct pvr_counting_fence *obj; -+ unsigned long flags; -+ char value[128]; -+ -+ if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) { -+ spin_lock_irqsave(&timeline->active_fences_lock, flags); -+ pvr_sw_fence_context_value_str(timeline->context, value, -+ sizeof(value)); -+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, -+ "sw: %s @%s cur=%llu", -+ pvr_sw_fence_context_name(timeline->context), -+ value, timeline->current_value); -+ list_for_each_entry(obj, &timeline->active_fences, -+ active_list_entry) { -+ obj->fence->ops->fence_value_str(obj->fence, -+ value, sizeof(value)); -+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, -+ " @%s: val=%llu", value, obj->value); -+ } -+ spin_unlock_irqrestore(&timeline->active_fences_lock, flags); -+ } -+} -+ -+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create( -+ const char *name) -+{ -+ PVRSRV_ERROR srv_err; -+ struct pvr_counting_fence_timeline *timeline = -+ kzalloc(sizeof(*timeline), GFP_KERNEL); -+ -+ if (!timeline) -+ goto err_out; -+ -+ timeline->context = pvr_sw_fence_context_create(name, -+ "pvr_sw_sync"); -+ if (!timeline->context) -+ goto err_free_timeline; -+ -+ srv_err = PVRSRVRegisterDriverDbgRequestNotify( -+ &timeline->dbg_request_handle, -+ pvr_counting_fence_timeline_debug_request, -+ DEBUG_REQUEST_LINUXFENCE, -+ timeline); -+ if (srv_err != PVRSRV_OK) { -+ pr_err("%s: failed to register debug request callback (%s)\n", -+ __func__, PVRSRVGetErrorString(srv_err)); -+ goto err_free_timeline_ctx; -+ } -+ -+ timeline->current_value = 0; -+ timeline->next_value = 1; -+ kref_init(&timeline->kref); -+ spin_lock_init(&timeline->active_fences_lock); -+ INIT_LIST_HEAD(&timeline->active_fences); -+ -+err_out: -+ return timeline; -+ -+err_free_timeline_ctx: -+ pvr_sw_fence_context_destroy(timeline->context); -+ -+err_free_timeline: -+ kfree(timeline); -+ timeline = NULL; -+ goto err_out; -+} -+ -+void pvr_counting_fence_timeline_force_complete( -+ struct pvr_counting_fence_timeline *timeline) -+{ -+ struct list_head *entry, *tmp; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&timeline->active_fences_lock, flags); -+ -+#if defined(DEBUG) && !defined(SUPPORT_AUTOVZ) -+ /* This is just a safety measure. Normally we should never see any -+ * unsignaled sw fences when we come here. Warn if we still do! -+ */ -+ WARN_ON(!list_empty(&timeline->active_fences)); -+#endif -+ -+ list_for_each_safe(entry, tmp, &timeline->active_fences) { -+ struct pvr_counting_fence *fence = -+ list_entry(entry, struct pvr_counting_fence, -+ active_list_entry); -+ dma_fence_signal(fence->fence); -+ dma_fence_put(fence->fence); -+ fence->fence = NULL; -+ list_del(&fence->active_list_entry); -+ kfree(fence); -+ } -+ spin_unlock_irqrestore(&timeline->active_fences_lock, flags); -+} -+ -+static void pvr_counting_fence_timeline_destroy( -+ struct kref *kref) -+{ -+ struct pvr_counting_fence_timeline *timeline = -+ container_of(kref, struct pvr_counting_fence_timeline, kref); -+ -+ WARN_ON(!list_empty(&timeline->active_fences)); -+ -+ PVRSRVUnregisterDriverDbgRequestNotify(timeline->dbg_request_handle); -+ -+ pvr_sw_fence_context_destroy(timeline->context); -+ kfree(timeline); -+} -+ -+void pvr_counting_fence_timeline_put( -+ struct pvr_counting_fence_timeline *timeline) -+{ -+ kref_put(&timeline->kref, pvr_counting_fence_timeline_destroy); -+} -+ -+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get( -+ struct pvr_counting_fence_timeline *timeline) -+{ -+ if (!timeline) -+ return NULL; -+ kref_get(&timeline->kref); -+ return timeline; -+} -+ -+struct dma_fence *pvr_counting_fence_create( -+ struct pvr_counting_fence_timeline *timeline, u64 *sync_pt_idx) -+{ -+ unsigned long flags; -+ struct dma_fence *sw_fence; -+ struct pvr_counting_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); -+ -+ if (!fence) -+ return NULL; -+ -+ sw_fence = pvr_sw_fence_create(timeline->context); -+ if (!sw_fence) -+ goto err_free_fence; -+ -+ fence->fence = dma_fence_get(sw_fence); -+ -+ spin_lock_irqsave(&timeline->active_fences_lock, flags); -+ -+ fence->value = timeline->next_value++; -+ if (sync_pt_idx) -+ *sync_pt_idx = fence->value; -+ -+ list_add_tail(&fence->active_list_entry, &timeline->active_fences); -+ -+ spin_unlock_irqrestore(&timeline->active_fences_lock, flags); -+ -+ /* Counting fences can be signalled any time after creation */ -+ dma_fence_enable_sw_signaling(sw_fence); -+ -+ return sw_fence; -+ -+err_free_fence: -+ kfree(fence); -+ return NULL; -+} -+ -+bool pvr_counting_fence_timeline_inc( -+ struct pvr_counting_fence_timeline *timeline, u64 *sync_pt_idx) -+{ -+ struct list_head *entry, *tmp; -+ unsigned long flags; -+ bool res; -+ -+ spin_lock_irqsave(&timeline->active_fences_lock, flags); -+ -+ if (timeline->current_value == timeline->next_value-1) { -+ res = false; -+ goto exit_unlock; -+ } -+ -+ timeline->current_value++; -+ -+ if (sync_pt_idx) -+ *sync_pt_idx = timeline->current_value; -+ -+ list_for_each_safe(entry, tmp, &timeline->active_fences) { -+ struct pvr_counting_fence *fence = -+ list_entry(entry, struct pvr_counting_fence, -+ active_list_entry); -+ if (fence->value <= timeline->current_value) { -+ dma_fence_signal(fence->fence); -+ dma_fence_put(fence->fence); -+ fence->fence = NULL; -+ list_del(&fence->active_list_entry); -+ kfree(fence); -+ } -+ } -+ -+ res = true; -+ -+exit_unlock: -+ spin_unlock_irqrestore(&timeline->active_fences_lock, flags); -+ -+ return res; -+} -diff --git a/drivers/gpu/drm/img-rogue/pvr_counting_timeline.h b/drivers/gpu/drm/img-rogue/pvr_counting_timeline.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_counting_timeline.h -@@ -0,0 +1,68 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__PVR_COUNTING_TIMELINE_H__) -+#define __PVR_COUNTING_TIMELINE_H__ -+ -+#include "pvr_linux_fence.h" -+ -+struct pvr_counting_fence_timeline; -+ -+void pvr_counting_fence_timeline_dump_timeline( -+ void *data, -+ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, -+ void *dump_debug_file); -+ -+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create( -+ const char *name); -+void pvr_counting_fence_timeline_put( -+ struct pvr_counting_fence_timeline *fence_timeline); -+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get( -+ struct pvr_counting_fence_timeline *fence_timeline); -+struct dma_fence *pvr_counting_fence_create( -+ struct pvr_counting_fence_timeline *fence_timeline, u64 *sync_pt_idx); -+bool pvr_counting_fence_timeline_inc( -+ struct pvr_counting_fence_timeline *fence_timeline, u64 *sync_pt_idx); -+void pvr_counting_fence_timeline_force_complete( -+ struct pvr_counting_fence_timeline *fence_timeline); -+ -+#endif /* !defined(__PVR_COUNTING_TIMELINE_H__) */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_debug.c b/drivers/gpu/drm/img-rogue/pvr_debug.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_debug.c -@@ -0,0 +1,486 @@ -+/*************************************************************************/ /*! -+@File -+@Title Debug Functionality -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Provides kernel side Debug Functionality. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+#include -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "linkage.h" -+#include "pvrsrv.h" -+#include "osfunc.h" -+#include "di_server.h" -+ -+#if defined(PVRSRV_NEED_PVR_DPF) -+ -+/******** BUFFERED LOG MESSAGES ********/ -+ -+/* Because we don't want to have to handle CCB wrapping, each buffered -+ * message is rounded up to PVRSRV_DEBUG_CCB_MESG_MAX bytes. This means -+ * there is the same fixed number of messages that can be stored, -+ * regardless of message length. -+ */ -+ -+#if defined(PVRSRV_DEBUG_CCB_MAX) -+ -+#define PVRSRV_DEBUG_CCB_MESG_MAX PVR_MAX_DEBUG_MESSAGE_LEN -+ -+typedef struct -+{ -+ const IMG_CHAR *pszFile; -+ IMG_INT iLine; -+ IMG_UINT32 ui32TID; -+ IMG_UINT32 ui32PID; -+ IMG_CHAR pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX]; -+ struct timeval sTimeVal; -+} -+PVRSRV_DEBUG_CCB; -+ -+static PVRSRV_DEBUG_CCB gsDebugCCB[PVRSRV_DEBUG_CCB_MAX]; -+ -+static IMG_UINT giOffset; -+ -+/* protects access to gsDebugCCB */ -+static DEFINE_SPINLOCK(gsDebugCCBLock); -+ -+static void -+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, -+ const IMG_CHAR *szBuffer) -+{ -+ unsigned long uiFlags; -+ -+ spin_lock_irqsave(&gsDebugCCBLock, uiFlags); -+ -+ gsDebugCCB[giOffset].pszFile = pszFileName; -+ gsDebugCCB[giOffset].iLine = ui32Line; -+ gsDebugCCB[giOffset].ui32TID = current->pid; -+ gsDebugCCB[giOffset].ui32PID = current->tgid; -+ -+ do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal); -+ -+ OSStringLCopy(gsDebugCCB[giOffset].pcMesg, szBuffer, -+ PVRSRV_DEBUG_CCB_MESG_MAX); -+ -+ giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX; -+ -+ spin_unlock_irqrestore(&gsDebugCCBLock, uiFlags); -+} -+ -+void PVRSRVDebugPrintfDumpCCB(void) -+{ -+ int i; -+ unsigned long uiFlags; -+ -+ spin_lock_irqsave(&gsDebugCCBLock, uiFlags); -+ -+ for (i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++) -+ { -+ PVRSRV_DEBUG_CCB *psDebugCCBEntry = -+ &gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX]; -+ -+ /* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */ -+ if (!psDebugCCBEntry->pszFile) -+ { -+ continue; -+ } -+ -+ printk(KERN_ERR "%s:%d: (%ld.%ld, tid=%u, pid=%u) %s\n", -+ psDebugCCBEntry->pszFile, -+ psDebugCCBEntry->iLine, -+ (long)psDebugCCBEntry->sTimeVal.tv_sec, -+ (long)psDebugCCBEntry->sTimeVal.tv_usec, -+ psDebugCCBEntry->ui32TID, -+ psDebugCCBEntry->ui32PID, -+ psDebugCCBEntry->pcMesg); -+ -+ /* Clear this entry so it doesn't get printed the next time again. */ -+ psDebugCCBEntry->pszFile = NULL; -+ } -+ -+ spin_unlock_irqrestore(&gsDebugCCBLock, uiFlags); -+} -+ -+#else /* defined(PVRSRV_DEBUG_CCB_MAX) */ -+ -+static INLINE void -+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, -+ const IMG_CHAR *szBuffer) -+{ -+ (void)pszFileName; -+ (void)szBuffer; -+ (void)ui32Line; -+} -+ -+void PVRSRVDebugPrintfDumpCCB(void) -+{ -+ /* Not available */ -+} -+ -+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */ -+ -+static IMG_UINT32 PVRDebugLevel = -+ ( -+ DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING | DBGPRIV_DEBUG -+#if defined(PVRSRV_DEBUG_CCB_MAX) -+ | DBGPRIV_BUFFERED -+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */ -+#if defined(PVR_DPF_ADHOC_DEBUG_ON) -+ | DBGPRIV_DEBUG -+#endif /* defined(PVR_DPF_ADHOC_DEBUG_ON) */ -+ ); -+ -+module_param(PVRDebugLevel, uint, 0644); -+MODULE_PARM_DESC(PVRDebugLevel, -+ "Sets the level of debug output (default 0x7)"); -+ -+IMG_UINT32 OSDebugLevel(void) -+{ -+ return PVRDebugLevel; -+} -+ -+void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel) -+{ -+ PVRDebugLevel = ui32DebugLevel; -+} -+ -+IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel) -+{ -+ return (PVRDebugLevel & ui32DebugLevel) != 0; -+} -+ -+#else /* defined(PVRSRV_NEED_PVR_DPF) */ -+ -+IMG_UINT32 OSDebugLevel(void) -+{ -+ return 0; -+} -+ -+void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel) -+{ -+ PVR_UNREFERENCED_PARAMETER(ui32DebugLevel); -+} -+ -+IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel) -+{ -+ PVR_UNREFERENCED_PARAMETER(ui32DebugLevel); -+ return IMG_FALSE; -+} -+ -+#endif /* defined(PVRSRV_NEED_PVR_DPF) */ -+ -+#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN -+ -+/* Message buffer for messages */ -+static IMG_CHAR gszBuffer[PVR_MAX_MSG_LEN + 1]; -+ -+/* The lock is used to control access to gszBuffer */ -+static DEFINE_SPINLOCK(gsDebugLock); -+ -+/* -+ * Append a string to a buffer using formatted conversion. -+ * The function takes a variable number of arguments, pointed -+ * to by the var args list. -+ */ -+__printf(3, 0) -+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, va_list VArgs) -+{ -+ IMG_UINT32 ui32Used; -+ IMG_UINT32 ui32Space; -+ IMG_INT32 i32Len; -+ -+ ui32Used = OSStringLength(pszBuf); -+ BUG_ON(ui32Used >= ui32BufSiz); -+ ui32Space = ui32BufSiz - ui32Used; -+ -+ i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs); -+ pszBuf[ui32BufSiz - 1] = 0; -+ -+ /* Return true if string was truncated */ -+ return i32Len < 0 || i32Len >= (IMG_INT32)ui32Space; -+} -+ -+void PVRSRVReleasePrintfVArgs(const IMG_CHAR *pszFormat, va_list vaArgs) -+{ -+ unsigned long ulLockFlags = 0; -+ IMG_CHAR *pszBuf = gszBuffer; -+ IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); -+ IMG_INT32 result; -+ -+ spin_lock_irqsave(&gsDebugLock, ulLockFlags); -+ -+ result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR_K: %u: ", current->pid); -+ PVR_ASSERT(result>0); -+ ui32BufSiz -= result; -+ -+ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) -+ { -+ printk(KERN_INFO "%s (truncated)\n", pszBuf); -+ } -+ else -+ { -+ printk(KERN_INFO "%s\n", pszBuf); -+ } -+ -+ spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); -+} -+ -+/*************************************************************************/ /*! -+@Function PVRSRVReleasePrintf -+@Description To output an important message to the user in release builds -+@Input pszFormat The message format string -+@Input ... Zero or more arguments for use by the format string -+*/ /**************************************************************************/ -+void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) -+{ -+ va_list vaArgs; -+ -+ va_start(vaArgs, pszFormat); -+ PVRSRVReleasePrintfVArgs(pszFormat, vaArgs); -+ va_end(vaArgs); -+} -+ -+#if defined(PVRSRV_NEED_PVR_TRACE) -+ -+/*************************************************************************/ /*! -+@Function PVRTrace -+@Description To output a debug message to the user -+@Input pszFormat The message format string -+@Input ... Zero or more arguments for use by the format string -+*/ /**************************************************************************/ -+void PVRSRVTrace(const IMG_CHAR *pszFormat, ...) -+{ -+ va_list VArgs; -+ unsigned long ulLockFlags = 0; -+ IMG_CHAR *pszBuf = gszBuffer; -+ IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); -+ IMG_INT32 result; -+ -+ va_start(VArgs, pszFormat); -+ -+ spin_lock_irqsave(&gsDebugLock, ulLockFlags); -+ -+ result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR: %u: ", current->pid); -+ PVR_ASSERT(result>0); -+ ui32BufSiz -= result; -+ -+ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs)) -+ { -+ printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf); -+ } -+ else -+ { -+ printk(KERN_ERR "%s\n", pszBuf); -+ } -+ -+ spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); -+ -+ va_end(VArgs); -+} -+ -+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */ -+ -+#if defined(PVRSRV_NEED_PVR_DPF) -+ -+/* -+ * Append a string to a buffer using formatted conversion. -+ * The function takes a variable number of arguments, calling -+ * VBAppend to do the actual work. -+ */ -+__printf(3, 4) -+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...) -+{ -+ va_list VArgs; -+ IMG_BOOL bTrunc; -+ -+ va_start (VArgs, pszFormat); -+ -+ bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs); -+ -+ va_end (VArgs); -+ -+ return bTrunc; -+} -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDebugPrintf -+@Description To output a debug message to the user -+@Input uDebugLevel The current debug level -+@Input pszFile The source file generating the message -+@Input uLine The line of the source file -+@Input pszFormat The message format string -+@Input ... Zero or more arguments for use by the format string -+*/ /**************************************************************************/ -+void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, -+ const IMG_CHAR *pszFullFileName, -+ IMG_UINT32 ui32Line, -+ const IMG_CHAR *pszFormat, -+ ...) -+{ -+ const IMG_CHAR *pszFileName = pszFullFileName; -+ IMG_CHAR *pszLeafName; -+ va_list vaArgs; -+ unsigned long ulLockFlags = 0; -+ IMG_CHAR *pszBuf = gszBuffer; -+ IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); -+ -+ if (!(PVRDebugLevel & ui32DebugLevel)) -+ { -+ return; -+ } -+ -+ va_start(vaArgs, pszFormat); -+ -+ spin_lock_irqsave(&gsDebugLock, ulLockFlags); -+ -+ switch (ui32DebugLevel) -+ { -+ case DBGPRIV_FATAL: -+ { -+ OSStringLCopy(pszBuf, "PVR_K:(Fatal): ", ui32BufSiz); -+ PVRSRV_REPORT_ERROR(); -+ break; -+ } -+ case DBGPRIV_ERROR: -+ { -+ OSStringLCopy(pszBuf, "PVR_K:(Error): ", ui32BufSiz); -+ PVRSRV_REPORT_ERROR(); -+ break; -+ } -+ case DBGPRIV_WARNING: -+ { -+ OSStringLCopy(pszBuf, "PVR_K:(Warn): ", ui32BufSiz); -+ break; -+ } -+ case DBGPRIV_MESSAGE: -+ { -+ OSStringLCopy(pszBuf, "PVR_K:(Mesg): ", ui32BufSiz); -+ break; -+ } -+ case DBGPRIV_VERBOSE: -+ { -+ OSStringLCopy(pszBuf, "PVR_K:(Verb): ", ui32BufSiz); -+ break; -+ } -+ case DBGPRIV_DEBUG: -+ { -+ OSStringLCopy(pszBuf, "PVR_K:(Debug): ", ui32BufSiz); -+ break; -+ } -+ case DBGPRIV_CALLTRACE: -+ case DBGPRIV_ALLOC: -+ case DBGPRIV_BUFFERED: -+ default: -+ { -+ OSStringLCopy(pszBuf, "PVR_K: ", ui32BufSiz); -+ break; -+ } -+ } -+ -+ if (current->pid == task_tgid_nr(current)) -+ { -+ (void) BAppend(pszBuf, ui32BufSiz, "%5u: ", current->pid); -+ } -+ else -+ { -+ (void) BAppend(pszBuf, ui32BufSiz, "%5u-%5u: ", task_tgid_nr(current) /* pid id of group*/, current->pid /* task id */); -+ } -+ -+ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) -+ { -+ printk(KERN_ERR "%s (truncated)\n", pszBuf); -+ } -+ else -+ { -+ IMG_BOOL bTruncated = IMG_FALSE; -+ -+#if !defined(__sh__) -+ pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '/'); -+ -+ if (pszLeafName) -+ { -+ pszFileName = pszLeafName+1; -+ } -+#endif /* __sh__ */ -+ -+#if defined(DEBUG) -+ { -+ static const IMG_CHAR *lastFile; -+ -+ if (lastFile == pszFileName) -+ { -+ bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line); -+ } -+ else -+ { -+ bTruncated = BAppend(pszBuf, ui32BufSiz, " [%s:%u]", pszFileName, ui32Line); -+ lastFile = pszFileName; -+ } -+ } -+#else -+ bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line); -+#endif -+ -+ if (bTruncated) -+ { -+ printk(KERN_ERR "%s (truncated)\n", pszBuf); -+ } -+ else -+ { -+ if (ui32DebugLevel & DBGPRIV_BUFFERED) -+ { -+ AddToBufferCCB(pszFileName, ui32Line, pszBuf); -+ } -+ else -+ { -+ printk(KERN_ERR "%s\n", pszBuf); -+ } -+ } -+ } -+ -+ spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); -+ -+ va_end (vaArgs); -+} -+ -+#endif /* PVRSRV_NEED_PVR_DPF */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_debug.h b/drivers/gpu/drm/img-rogue/pvr_debug.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_debug.h -@@ -0,0 +1,1071 @@ -+/*************************************************************************/ /*! -+@File -+@Title PVR Debug Declarations -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Provides debug functionality -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVR_DEBUG_H -+#define PVR_DEBUG_H -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+/* If the kernel pre defined macro is present, we will use the definition of HTBLOGK -+ * in htbserver.h with the format strings specified in ht_buffer_sf.h. Otherwise, -+ * HTBLOGK needs to be defined as empty since it is not used by user-mode code. -+ */ -+#if defined(__KERNEL__) -+# include "htbserver.h" -+# include "htbuffer_sf.h" -+#else -+# define HTBLOGK(...) ((void) 0) -+#endif -+ -+/*! @cond Doxygen_Suppress */ -+#if defined(_MSC_VER) -+# define MSC_SUPPRESS_4127 __pragma(warning(suppress:4127)) -+#else -+# define MSC_SUPPRESS_4127 -+#endif -+/*! @endcond */ -+ -+#if defined(__linux__) && defined(__KERNEL__) -+ #include -+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) -+ #include -+ #else -+ #include -+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ -+#else -+ #include -+#endif /* __linux__ && __KERNEL__*/ -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#define PVR_MAX_DEBUG_MESSAGE_LEN (512) /*!< Max length of a Debug Message */ -+ -+/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */ -+#define DBGPRIV_FATAL 0x001U /*!< Debug-Fatal. Privately used by pvr_debug. */ -+#define DBGPRIV_ERROR 0x002U /*!< Debug-Error. Privately used by pvr_debug. */ -+#define DBGPRIV_WARNING 0x004U /*!< Debug-Warning. Privately used by pvr_debug. */ -+#define DBGPRIV_MESSAGE 0x008U /*!< Debug-Message. Privately used by pvr_debug. */ -+#define DBGPRIV_VERBOSE 0x010U /*!< Debug-Verbose. Privately used by pvr_debug. */ -+#define DBGPRIV_CALLTRACE 0x020U /*!< Debug-CallTrace. Privately used by pvr_debug. */ -+#define DBGPRIV_ALLOC 0x040U /*!< Debug-Alloc. Privately used by pvr_debug. */ -+#define DBGPRIV_BUFFERED 0x080U /*!< Debug-Buffered. Privately used by pvr_debug. */ -+#define DBGPRIV_DEBUG 0x100U /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */ -+#define DBGPRIV_LAST 0x100U /*!< Always set to highest mask value. Privately used by pvr_debug. */ -+ -+/* Enable DPF logging for locally from some make targets */ -+#if defined(PVRSRV_NEED_PVR_DPF_LOCAL) -+#undef PVRSRV_NEED_PVR_DPF -+#define PVRSRV_NEED_PVR_DPF -+#endif -+ -+#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG) -+#define PVRSRV_NEED_PVR_ASSERT -+#endif -+ -+#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF) -+#define PVRSRV_NEED_PVR_DPF -+#endif -+ -+#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING)) -+#define PVRSRV_NEED_PVR_TRACE -+#endif -+ -+#if !defined(DOXYGEN) -+/*************************************************************************/ /* -+PVRSRVGetErrorString -+Returns a string describing the provided PVRSRV_ERROR code -+NB No doxygen comments provided as this function does not require porting -+ for other operating systems -+*/ /**************************************************************************/ -+const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError); -+#define PVRSRVGETERRORSTRING PVRSRVGetErrorString -+#endif -+ -+/* PVR_ASSERT() and PVR_DBG_BREAK handling */ -+ -+#if defined(__KLOCWORK__) -+/* A dummy no-return function to be used under Klocwork to mark unreachable -+ paths instead of abort() in order to avoid MISRA.STDLIB.ABORT issues. */ -+__noreturn void klocwork_abort(void); -+#endif -+ -+#if defined(PVRSRV_NEED_PVR_ASSERT) || defined(DOXYGEN) -+ -+/* Unfortunately the Klocwork static analysis checker doesn't understand our -+ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert -+ * macros in a special way when the code is analysed by Klocwork avoids -+ * them. -+ */ -+#if defined(__KLOCWORK__) -+#define PVR_ASSERT(x) do { if (!(x)) {klocwork_abort();} } while (false) -+#else /* ! __KLOCWORKS__ */ -+ -+#if defined(_WIN32) -+#define PVR_ASSERT(expr) do \ -+ { \ -+ MSC_SUPPRESS_4127 \ -+ if (unlikely(!(expr))) \ -+ { \ -+ PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,\ -+ "*** Debug assertion failed!"); \ -+ __debugbreak(); \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+#else -+ -+#if defined(__linux__) && defined(__KERNEL__) -+#include -+#include -+ -+/* In Linux kernel mode, use WARN_ON() directly. This produces the -+ * correct filename and line number in the warning message. -+ */ -+#define PVR_ASSERT(EXPR) do \ -+ { \ -+ if (unlikely(!(EXPR))) \ -+ { \ -+ PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__, \ -+ "Debug assertion failed!"); \ -+ WARN_ON(1); \ -+ } \ -+ } while (false) -+ -+#else /* defined(__linux__) && defined(__KERNEL__) */ -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDebugAssertFail -+@Description Indicate to the user that a debug assertion has failed and -+ prevent the program from continuing. -+ Invoked from the macro PVR_ASSERT(). -+@Input pszFile The name of the source file where the assertion failed -+@Input ui32Line The line number of the failed assertion -+@Input pszAssertion String describing the assertion -+@Return NEVER! -+*/ /**************************************************************************/ -+IMG_EXPORT void IMG_CALLCONV __noreturn -+PVRSRVDebugAssertFail(const IMG_CHAR *pszFile, -+ IMG_UINT32 ui32Line, -+ const IMG_CHAR *pszAssertion); -+ -+#define PVR_ASSERT(EXPR) do \ -+ { \ -+ if (unlikely(!(EXPR))) \ -+ { \ -+ PVRSRVDebugAssertFail(__FILE__, __LINE__, #EXPR); \ -+ } \ -+ } while (false) -+ -+#endif /* defined(__linux__) && defined(__KERNEL__) */ -+#endif /* defined(_WIN32) */ -+#endif /* defined(__KLOCWORK__) */ -+ -+#if defined(__KLOCWORK__) -+ #define PVR_DBG_BREAK do { klocwork_abort(); } while (false) -+#else -+ #if defined(WIN32) -+ #define PVR_DBG_BREAK __debugbreak() /*!< Implementation of PVR_DBG_BREAK for (non-WinCE) Win32 */ -+ #else -+ #if defined(PVR_DBG_BREAK_ASSERT_FAIL) -+ /*!< Implementation of PVR_DBG_BREAK that maps onto PVRSRVDebugAssertFail */ -+ #if defined(_WIN32) -+ #define PVR_DBG_BREAK DBG_BREAK -+ #else -+ #if defined(__linux__) && defined(__KERNEL__) -+ #define PVR_DBG_BREAK BUG() -+ #else -+ #define PVR_DBG_BREAK PVRSRVDebugAssertFail(__FILE__, __LINE__, "PVR_DBG_BREAK") -+ #endif -+ #endif -+ #else -+ /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */ -+ #define PVR_DBG_BREAK -+ #endif -+ #endif -+#endif -+ -+ -+#else /* defined(PVRSRV_NEED_PVR_ASSERT) */ -+ /* Unfortunately the Klocwork static analysis checker doesn't understand our -+ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert -+ * macros in a special way when the code is analysed by Klocwork avoids -+ * them. -+ */ -+ #if defined(__KLOCWORK__) -+ #define PVR_ASSERT(EXPR) do { if (!(EXPR)) {klocwork_abort();} } while (false) -+ #else -+ #define PVR_ASSERT(EXPR) (void)(EXPR) /*!< Null Implementation of PVR_ASSERT (does nothing) */ -+ #endif -+ -+ #define PVR_DBG_BREAK /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */ -+ -+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */ -+ -+ -+/* PVR_DPF() handling */ -+ -+#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN) -+ -+ /* New logging mechanism */ -+ #define PVR_DBG_FATAL DBGPRIV_FATAL /*!< Debug level passed to PVRSRVDebugPrintf() for fatal errors. */ -+ #define PVR_DBG_ERROR DBGPRIV_ERROR /*!< Debug level passed to PVRSRVDebugPrintf() for non-fatal errors. */ -+ #define PVR_DBG_WARNING DBGPRIV_WARNING /*!< Debug level passed to PVRSRVDebugPrintf() for warnings. */ -+ #define PVR_DBG_MESSAGE DBGPRIV_MESSAGE /*!< Debug level passed to PVRSRVDebugPrintf() for information only. */ -+ #define PVR_DBG_VERBOSE DBGPRIV_VERBOSE /*!< Debug level passed to PVRSRVDebugPrintf() for very low-priority debug. */ -+ #define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE /*!< Debug level passed to PVRSRVDebugPrintf() for function tracing purposes. */ -+ #define PVR_DBG_ALLOC DBGPRIV_ALLOC /*!< Debug level passed to PVRSRVDebugPrintf() for tracking some of drivers memory operations. */ -+ #define PVR_DBG_BUFFERED DBGPRIV_BUFFERED /*!< Debug level passed to PVRSRVDebugPrintf() when debug should be written to the debug circular buffer. */ -+ #define PVR_DBG_DEBUG DBGPRIV_DEBUG /*!< Debug level passed to PVRSRVDebugPrintf() for debug messages. */ -+ -+ /* These levels are always on with PVRSRV_NEED_PVR_DPF */ -+ /*! @cond Doxygen_Suppress */ -+ #define PVR_DPF_0x001U(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__) -+ #define PVR_DPF_0x002U(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__) -+ #define PVR_DPF_0x080U(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__) -+ -+ /* -+ * The AdHoc-Debug level is only supported when enabled in the local -+ * build environment and may need to be used in both debug and release -+ * builds. An error is generated in the formal build if it is checked in. -+ */ -+#if defined(PVR_DPF_ADHOC_DEBUG_ON) -+ #define PVR_DPF_0x100U(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__) -+#else -+ /* Use an undefined token here to stop compilation dead in the offending module */ -+ #define PVR_DPF_0x100U(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing -+#endif -+ -+ /* Some are compiled out completely in release builds */ -+#if defined(DEBUG) || defined(DOXYGEN) -+ #define PVR_DPF_0x004U(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__) -+ #define PVR_DPF_0x008U(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__) -+ #define PVR_DPF_0x010U(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__) -+ #define PVR_DPF_0x020U(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__) -+ #define PVR_DPF_0x040U(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__) -+#else -+ #define PVR_DPF_0x004U(...) -+ #define PVR_DPF_0x008U(...) -+ #define PVR_DPF_0x010U(...) -+ #define PVR_DPF_0x020U(...) -+ #define PVR_DPF_0x040U(...) -+#endif -+ -+ /* Translate the different log levels to separate macros -+ * so they can each be compiled out. -+ */ -+#if defined(DEBUG) -+ #define PVR_DPF_EX(lvl, ...) PVR_DPF_ ## lvl (__FILE__, __LINE__, __VA_ARGS__) -+#else -+ #define PVR_DPF_EX(lvl, ...) PVR_DPF_ ## lvl ("", __LINE__, __VA_ARGS__) -+#endif -+ /*! @endcond */ -+ -+ /* Get rid of the double bracketing */ -+ #define PVR_DPF(x) PVR_DPF_EX x -+ -+ #define PVR_LOG_ERROR(_rc, _call) do \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ -+ } while (false) -+ -+ #define PVR_LOG_IF_ERROR(_rc, _call) do \ -+ { \ -+ if (unlikely(_rc != PVRSRV_OK)) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_WARN_IF_ERROR(_rc, _call) do \ -+ { \ -+ if (unlikely(_rc != PVRSRV_OK)) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_WARNING, _rc, __LINE__); \ -+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do \ -+ { \ -+ if (unlikely(_expr == NULL)) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_OUT_OF_MEMORY, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do \ -+ { \ -+ if (unlikely(_expr == NULL)) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_OUT_OF_MEMORY, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", #_expr, __func__)); \ -+ _err = PVRSRV_ERROR_OUT_OF_MEMORY; \ -+ goto _go; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do \ -+ { \ -+ if (unlikely(_rc != PVRSRV_OK)) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ -+ return _rc; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do \ -+ { \ -+ if (unlikely(_rc != PVRSRV_OK)) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ -+ return; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do \ -+ { \ -+ if (unlikely(_rc != PVRSRV_OK)) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ -+ goto _go; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ -+ _err = _rc; \ -+ goto _go; \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_IF_FALSE(_expr, _msg) do \ -+ { \ -+ if (unlikely(!(_expr))) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_COND_ERROR_F, PVRSRV_ERROR_UNEXPECTED_FALSE_EXPR, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do \ -+ { \ -+ if (unlikely(!(_expr))) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_COND_ERROR_F, PVRSRV_ERROR_UNEXPECTED_FALSE_EXPR, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ -+ return _rc; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do \ -+ { \ -+ if (unlikely(!(_expr))) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_COND_ERROR_F, PVRSRV_ERROR_UNEXPECTED_FALSE_EXPR, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ -+ return; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do \ -+ { \ -+ if (unlikely(!(_expr))) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_COND_ERROR_F, PVRSRV_ERROR_UNEXPECTED_FALSE_EXPR, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ -+ goto _go; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_RETURN_IF_TRUE(_expr, _msg, _rc) do \ -+ { \ -+ if (unlikely((_expr))) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_COND_ERROR_T, PVRSRV_ERROR_UNEXPECTED_TRUE_EXPR, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ -+ return _rc; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do \ -+ { \ -+ if (unlikely(!(_expr))) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_INVALID_PARAMS, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", _param, __func__)); \ -+ return PVRSRV_ERROR_INVALID_PARAMS; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \ -+ { if (unlikely(!(_expr))) { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_INVALID_PARAMS, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", #_expr, __func__)); \ -+ _err = PVRSRV_ERROR_INVALID_PARAMS; \ -+ goto _go; } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_MSG(_lvl, _msg) do \ -+ { \ -+ if (_lvl == PVR_DBG_ERROR) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_NOT_SUPPORTED, __LINE__); \ -+ } \ -+ else if (_lvl == PVR_DBG_WARNING) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_WARNING, PVRSRV_ERROR_NOT_SUPPORTED, __LINE__); \ -+ } \ -+ PVR_DPF((_lvl, ("In %s() "_msg), __func__)); \ -+ } while (false) -+ -+ #define PVR_LOG_VA(_lvl, _msg, ...) do \ -+ { \ -+ if (_lvl == PVR_DBG_ERROR) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_NOT_SUPPORTED, __LINE__); \ -+ } \ -+ else if (_lvl == PVR_DBG_WARNING) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_WARNING, PVRSRV_ERROR_NOT_SUPPORTED, __LINE__); \ -+ } \ -+ PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ -+ } while (false) -+ -+ #define PVR_LOG_IF_ERROR_VA(_lvl, _rc, _msg, ...) do \ -+ { \ -+ if (unlikely(_rc != PVRSRV_OK)) \ -+ { \ -+ if (_lvl == PVR_DBG_ERROR) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, __LINE__); \ -+ } \ -+ else if (_lvl == PVR_DBG_WARNING) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_WARNING, _rc, __LINE__); \ -+ } \ -+ PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_IF_FALSE_VA(_lvl, _expr, _msg, ...) do \ -+ { \ -+ if (unlikely(!(_expr))) \ -+ { \ -+ if (_lvl == PVR_DBG_ERROR) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_NOT_SUPPORTED, __LINE__); \ -+ } \ -+ else if (_lvl == PVR_DBG_WARNING) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_WARNING, PVRSRV_ERROR_NOT_SUPPORTED, __LINE__); \ -+ } \ -+ PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_RETURN_IF_ERROR_VA(_rc, _msg, ...) do \ -+ { \ -+ if (unlikely(_rc != PVRSRV_OK)) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ -+ return _rc; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_GOTO_IF_ERROR_VA(_rc, _go, _msg, ...) do \ -+ { \ -+ if (unlikely(_rc != PVRSRV_OK)) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ -+ goto _go; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_RETURN_IF_FALSE_VA(_expr, _rc, _msg, ...) do \ -+ { \ -+ if (unlikely(!(_expr))) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, ("At %s: "_msg), __func__, __VA_ARGS__)); \ -+ return _rc; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_GOTO_IF_FALSE_VA(_expr, _go, _msg, ...) do \ -+ { \ -+ if (unlikely(!(_expr))) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, PVRSRV_ERROR_UNEXPECTED_FALSE_EXPR, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ -+ goto _go; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+ #define PVR_LOG_RETURN_IF_TRUE_VA(_expr, _rc, _msg, ...) do \ -+ { \ -+ if (unlikely((_expr))) \ -+ { \ -+ HTBLOGK(HTB_SF_MAIN_DBG_ERROR, _rc, __LINE__); \ -+ PVR_DPF((PVR_DBG_ERROR, ("At %s: "_msg), __func__, __VA_ARGS__)); \ -+ return _rc; \ -+ } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+#else /* defined(PVRSRV_NEED_PVR_DPF) */ -+ -+ #define PVR_DPF(X) /*!< Null Implementation of PowerVR Debug Printf (does nothing) */ -+ -+ #define PVR_LOG_MSG(_lvl, _msg) -+ #define PVR_LOG_VA(_lvl, _msg, ...) -+ #define PVR_LOG_ERROR(_rc, _call) (void)(_rc) -+ #define PVR_LOG_IF_ERROR(_rc, _call) (void)(_rc) -+ #define PVR_WARN_IF_ERROR(_rc, _call) (void)(_rc) -+ -+ #define PVR_LOG_IF_ERROR_VA(_lvl, _rc, _msg, ...) (void)(_rc) -+ #define PVR_LOG_IF_FALSE_VA(_lvl, _expr, _msg, ...) (void)(_expr) -+ -+ #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do { if (unlikely(_expr == NULL)) { return PVRSRV_ERROR_OUT_OF_MEMORY; } MSC_SUPPRESS_4127 } while (false) -+ #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do { if (unlikely(_expr == NULL)) { _err = PVRSRV_ERROR_OUT_OF_MEMORY; goto _go; } MSC_SUPPRESS_4127 } while (false) -+ -+ #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while (false) -+ #define PVR_LOG_RETURN_IF_ERROR_VA(_rc, _msg, ...) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while (false) -+ #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return; } MSC_SUPPRESS_4127 } while (false) -+ -+ #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while (false) -+ #define PVR_LOG_GOTO_IF_ERROR_VA(_rc, _go, _msg, ...) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while (false) -+ #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do { _err = _rc; goto _go; MSC_SUPPRESS_4127 } while (false) -+ -+ #define PVR_LOG_IF_FALSE(_expr, _msg) (void)(_expr) -+ #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false) -+ #define PVR_LOG_RETURN_IF_FALSE_VA(_expr, _rc, _msg, ...) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false) -+ -+ #define PVR_LOG_RETURN_IF_TRUE(_expr, _msg, _rc) do { if (unlikely((_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false) -+ #define PVR_LOG_RETURN_IF_TRUE_VA(_expr, _rc, _msg, ...) do { if (unlikely((_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false) -+ -+ #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do { if (unlikely(!(_expr))) { return; } MSC_SUPPRESS_4127 } while (false) -+ #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (false) -+ #define PVR_LOG_GOTO_IF_FALSE_VA(_expr, _go, _msg, ...) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (false) -+ -+ #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do { if (unlikely(!(_expr))) { return PVRSRV_ERROR_INVALID_PARAMS; } MSC_SUPPRESS_4127 } while (false) -+ #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do { if (unlikely(!(_expr))) { _err = PVRSRV_ERROR_INVALID_PARAMS; goto _go; } MSC_SUPPRESS_4127 } while (false) -+ -+ #undef PVR_DPF_FUNCTION_TRACE_ON -+ -+#endif /* defined(PVRSRV_NEED_PVR_DPF) */ -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDebugPrintf -+@Description Output a debug message to the user, using an OS-specific -+ method, to a log or console which can be read by developers -+ Invoked from the macro PVR_DPF(). -+@Input ui32DebugLevel The debug level of the message. This can -+ be used to restrict the output of debug -+ messages based on their severity. -+ If this is PVR_DBG_BUFFERED, the message -+ should be written into a debug circular -+ buffer instead of being output immediately -+ (useful when performance would otherwise -+ be adversely affected). -+ The debug circular buffer shall only be -+ output when PVRSRVDebugPrintfDumpCCB() is -+ called. -+@Input pszFileName The source file containing the code that is -+ generating the message -+@Input ui32Line The line number in the source file -+@Input pszFormat The formatted message string -+@Input ... Zero or more arguments for use by the -+ formatted string -+@Return None -+*/ /**************************************************************************/ -+IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, -+ const IMG_CHAR *pszFileName, -+ IMG_UINT32 ui32Line, -+ const IMG_CHAR *pszFormat, -+ ...) __printf(4, 5); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDebugPrintfDumpCCB -+@Description When PVRSRVDebugPrintf() is called with the ui32DebugLevel -+ specified as DBGPRIV_BUFFERED, the debug shall be written to -+ the debug circular buffer instead of being output immediately. -+ (This could be used to obtain debug without incurring a -+ performance hit by printing it at that moment). -+ This function shall dump the contents of that debug circular -+ buffer to be output in an OS-specific method to a log or -+ console which can be read by developers. -+@Return None -+*/ /**************************************************************************/ -+IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void); -+ -+#if !defined(DOXYGEN) -+#define PVR_DPF_FUNC__(lvl, message, ...) PVR_DPF((lvl, "%s: " message, __func__, ##__VA_ARGS__)) -+#define PVR_DPF_FUNC(x) PVR_DPF_FUNC__ x -+#endif -+ -+/* Note: Use only when a log message due to the error absolutely should not -+ * be printed. Otherwise use PVR_LOG_RETURN_IF_ERROR macro. -+ */ -+#define PVR_RETURN_IF_ERROR(_rc) do \ -+ { if (unlikely(_rc != PVRSRV_OK)) { \ -+ return _rc; } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+/* Note: Use only when a log message due to the error absolutely should not -+ * be printed. Otherwise use PVR_LOG_RETURN_IF_FALSE macro. -+ */ -+#define PVR_RETURN_IF_FALSE(_expr, _rc) do \ -+ { if (unlikely(!(_expr))) { \ -+ return _rc; } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+/* Note: Use only when a log message due to the error absolutely should not -+ * be printed. Otherwise use PVR_LOG_RETURN_IF_INVALID_PARAM macro. -+ */ -+#define PVR_RETURN_IF_INVALID_PARAM(_expr) do \ -+ { if (unlikely(!(_expr))) { \ -+ return PVRSRV_ERROR_INVALID_PARAMS; } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+/* Note: Use only when a log message due to the error absolutely should not -+ * be printed. Otherwise use PVR_LOG_RETURN_IF_NOMEM macro. -+ */ -+#define PVR_RETURN_IF_NOMEM(_expr) do \ -+ { if (unlikely(!(_expr))) { \ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+/* Note: Use only when a log message due to the error absolutely should not -+ * be printed. Otherwise use PVR_LOG_GOTO_IF_NOMEM macro. -+ */ -+#define PVR_GOTO_IF_NOMEM(_expr, _err, _go) do \ -+ { if (unlikely(_expr == NULL)) { \ -+ _err = PVRSRV_ERROR_OUT_OF_MEMORY; \ -+ goto _go; } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+/* Note: Use only when a log message due to the error absolutely should not -+ * be printed. Otherwise use PVR_LOG_GOTO_IF_INVALID_PARAM macro. -+ */ -+#define PVR_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \ -+ { if (unlikely(!(_expr))) { \ -+ _err = PVRSRV_ERROR_INVALID_PARAMS; \ -+ goto _go; } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+/* Note: Use only when a log message due to the error absolutely should not -+ * be printed. Otherwise use PVR_LOG_GOTO_IF_FALSE macro. -+ */ -+#define PVR_GOTO_IF_FALSE(_expr, _go) do \ -+ { if (unlikely(!(_expr))) { \ -+ goto _go; } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+/* Note: Use only when a log message due to the error absolutely should not -+ * be printed. Otherwise use PVR_LOG_GOTO_IF_ERROR macro. -+ */ -+#define PVR_GOTO_IF_ERROR(_rc, _go) do \ -+ { if (unlikely(_rc != PVRSRV_OK)) { \ -+ goto _go; } \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+/* Note: Use only when a log message due to the error absolutely should not -+ * be printed. Otherwise use PVR_LOG_GOTO_WITH_ERROR macro. -+ */ -+#define PVR_GOTO_WITH_ERROR(_err, _rc, _go) do \ -+ { _err = _rc; goto _go; \ -+ MSC_SUPPRESS_4127 \ -+ } while (false) -+ -+/*! @cond Doxygen_Suppress */ -+#if defined(PVR_DPF_FUNCTION_TRACE_ON) -+ -+ #define PVR_DPF_ENTERED \ -+ PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered", __func__, __LINE__)) -+ -+ #define PVR_DPF_ENTERED1(p1) \ -+ PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered (0x%lx)", __func__, __LINE__, ((unsigned long)p1))) -+ -+ #define PVR_DPF_RETURN_RC(a) \ -+ do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d", __func__, __LINE__, (_r))); return (_r); MSC_SUPPRESS_4127 } while (false) -+ -+ #define PVR_DPF_RETURN_RC1(a,p1) \ -+ do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d (0x%lx)", __func__, __LINE__, (_r), ((unsigned long)p1))); return (_r); MSC_SUPPRESS_4127 } while (false) -+ -+ #define PVR_DPF_RETURN_VAL(a) \ -+ do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned with value", __func__, __LINE__)); return (a); MSC_SUPPRESS_4127 } while (false) -+ -+ #define PVR_DPF_RETURN_OK \ -+ do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned ok", __func__, __LINE__)); return PVRSRV_OK; MSC_SUPPRESS_4127 } while (false) -+ -+ #define PVR_DPF_RETURN \ -+ do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned", __func__, __LINE__)); return; MSC_SUPPRESS_4127 } while (false) -+ -+ #if !defined(DEBUG) -+ #error PVR DPF Function trace enabled in release build, rectify -+ #endif -+ -+#else /* defined(PVR_DPF_FUNCTION_TRACE_ON) */ -+ -+ #define PVR_DPF_ENTERED -+ #define PVR_DPF_ENTERED1(p1) -+ #define PVR_DPF_RETURN_RC(a) return (a) -+ #define PVR_DPF_RETURN_RC1(a,p1) return (a) -+ #define PVR_DPF_RETURN_VAL(a) return (a) -+ #define PVR_DPF_RETURN_OK return PVRSRV_OK -+ #define PVR_DPF_RETURN return -+ -+#endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */ -+/*! @endcond */ -+ -+#if defined(__KERNEL__) || defined(DOXYGEN) || defined(__QNXNTO__) -+/*Use PVR_DPF() unless message is necessary in release build */ -+#define PVR_LOG(X) PVRSRVReleasePrintf X -+ -+/*************************************************************************/ /*! -+@Function PVRSRVReleasePrintf -+@Description Output an important message, using an OS-specific method, -+ to the Server log or console which will always be output in -+ both release and debug builds. -+ Invoked from the macro PVR_LOG(). Used in Services Server only. -+@Input pszFormat The message format string -+@Input ... Zero or more arguments for use by the format string -+@Return None -+*/ /**************************************************************************/ -+void IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) __printf(1, 2); -+ -+ /*************************************************************************/ /*! -+ @Function PVRSRVReleasePrintfVArgs -+ @Description Output an important message, using an OS-specific method, -+ to the Server log or console which will always be output in -+ both release and debug builds. -+ Calls to va_start and va_end should wrap this function when -+ passing in va_list args. -+ @Input pszFormat The message format string -+ @Input vaArgs va_list arguments to print using pszFormat. -+ @Return None -+ */ /**************************************************************************/ -+void IMG_CALLCONV PVRSRVReleasePrintfVArgs(const IMG_CHAR *pszFormat, va_list vaArgs); -+#endif -+ -+/* PVR_TRACE() handling */ -+ -+#if defined(PVRSRV_NEED_PVR_TRACE) || defined(DOXYGEN) -+ -+ #define PVR_TRACE(X) PVRSRVTrace X /*!< PowerVR Debug Trace Macro */ -+ /* Empty string implementation that is -O0 build friendly */ -+ #define PVR_TRACE_EMPTY_LINE() PVR_TRACE(("%s", "")) -+ -+/*************************************************************************/ /*! -+@Function PVRTrace -+@Description Output a debug message to the user -+ Invoked from the macro PVR_TRACE(). -+@Input pszFormat The message format string -+@Input ... Zero or more arguments for use by the format string -+*/ /**************************************************************************/ -+IMG_EXPORT void IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... ) -+ __printf(1, 2); -+ -+#else /* defined(PVRSRV_NEED_PVR_TRACE) */ -+ /*! Null Implementation of PowerVR Debug Trace Macro (does nothing) */ -+ #define PVR_TRACE(X) -+ -+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */ -+ -+ -+#if defined(PVRSRV_NEED_PVR_ASSERT) -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(TRUNCATE_64BITS_TO_32BITS) -+#endif -+ INLINE static IMG_UINT32 TRUNCATE_64BITS_TO_32BITS(IMG_UINT64 uiInput) -+ { -+ IMG_UINT32 uiTruncated; -+ -+ uiTruncated = (IMG_UINT32)uiInput; -+ PVR_ASSERT(uiInput == uiTruncated); -+ return uiTruncated; -+ } -+ -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(TRUNCATE_64BITS_TO_SIZE_T) -+#endif -+ INLINE static size_t TRUNCATE_64BITS_TO_SIZE_T(IMG_UINT64 uiInput) -+ { -+ size_t uiTruncated; -+ -+ uiTruncated = (size_t)uiInput; -+ PVR_ASSERT(uiInput == uiTruncated); -+ return uiTruncated; -+ } -+ -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(TRUNCATE_SIZE_T_TO_32BITS) -+#endif -+ INLINE static IMG_UINT32 TRUNCATE_SIZE_T_TO_32BITS(size_t uiInput) -+ { -+ IMG_UINT32 uiTruncated; -+ -+ uiTruncated = (IMG_UINT32)uiInput; -+ PVR_ASSERT(uiInput == uiTruncated); -+ return uiTruncated; -+ } -+ -+ -+#else /* defined(PVRSRV_NEED_PVR_ASSERT) */ -+ #define TRUNCATE_64BITS_TO_32BITS(expr) ((IMG_UINT32)(expr)) -+ #define TRUNCATE_64BITS_TO_SIZE_T(expr) ((size_t)(expr)) -+ #define TRUNCATE_SIZE_T_TO_32BITS(expr) ((IMG_UINT32)(expr)) -+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */ -+ -+/*! @cond Doxygen_Suppress */ -+/* Macros used to trace calls */ -+#if defined(DEBUG) -+ #define PVR_DBG_FILELINE , (__FILE__), (__LINE__) -+ #define PVR_DBG_FILELINE_PARAM , const IMG_CHAR *pszaFile, IMG_UINT32 ui32Line -+ #define PVR_DBG_FILELINE_ARG , pszaFile, ui32Line -+ #define PVR_DBG_FILELINE_FMT " %s:%u" -+ #define PVR_DBG_FILELINE_UNREF() do { PVR_UNREFERENCED_PARAMETER(pszaFile); \ -+ PVR_UNREFERENCED_PARAMETER(ui32Line); } while (false) -+#else -+ #define PVR_DBG_FILELINE -+ #define PVR_DBG_FILELINE_PARAM -+ #define PVR_DBG_FILELINE_ARG -+ #define PVR_DBG_FILELINE_FMT -+ #define PVR_DBG_FILELINE_UNREF() -+#endif -+/*! @endcond */ -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+/*! -+ @def PVR_ASSERT -+ @brief Aborts the program if assertion fails. -+ -+ The macro will be defined only when PVRSRV_NEED_PVR_ASSERT macro is -+ enabled. It's ignored otherwise. -+ -+ @def PVR_DPF -+ @brief PowerVR Debug Printf logging macro used throughout the driver. -+ -+ The macro allows to print logging messages to appropriate log. The -+ destination log is based on the component (user space / kernel space) and -+ operating system (Linux, Android, etc.). -+ -+ The macro also supports severity levels that allow to turn on/off messages -+ based on their importance. -+ -+ This macro will print messages with severity level higher that error only -+ if PVRSRV_NEED_PVR_DPF macro is defined. -+ -+ @def PVR_LOG_ERROR -+ @brief Logs error. -+ -+ @def PVR_LOG_IF_ERROR -+ @brief Logs error if not PVRSRV_OK. -+ -+ @def PVR_WARN_IF_ERROR -+ @brief Logs warning if not PVRSRV_OK. -+ -+ @def PVR_LOG_RETURN_IF_NOMEM -+ @brief Logs error if expression is NULL and returns PVRSRV_ERROR_OUT_OF_MEMORY. -+ -+ @def PVR_LOG_GOTO_IF_NOMEM -+ @brief Logs error if expression is NULL and jumps to given label. -+ -+ @def PVR_LOG_RETURN_IF_ERROR -+ @brief Logs error if not PVRSRV_OK and returns the error. -+ -+ @def PVR_LOG_RETURN_VOID_IF_ERROR -+ @brief Logs error if not PVRSRV_OK and returns (used in function that return void). -+ -+ @def PVR_LOG_GOTO_IF_ERROR -+ @brief Logs error if not PVRSRV_OK and jumps to label. -+ -+ @def PVR_LOG_GOTO_WITH_ERROR -+ @brief Logs error, goes to a label and sets the error code. -+ -+ @def PVR_LOG_IF_FALSE -+ @brief Prints error message if expression is false. -+ -+ @def PVR_LOG_RETURN_IF_FALSE -+ @brief Prints error message if expression is false and returns given error. -+ -+ @def PVR_LOG_RETURN_VOID_IF_FALSE -+ @brief Prints error message if expression is false and returns (used in function that return void). -+ -+ @def PVR_LOG_GOTO_IF_FALSE -+ @brief Prints error message if expression is false and jumps to label. -+ -+ @def PVR_LOG_RETURN_IF_TRUE -+ @brief Prints error message if expression is true and returns given error. -+ -+ @def PVR_LOG_RETURN_IF_INVALID_PARAM -+ @brief Prints error message if expression is false and returns PVRSRV_ERROR_INVALID_PARAMS. -+ -+ @def PVR_LOG_GOTO_IF_INVALID_PARAM -+ @brief Prints error message if expression is false and jumps to label. -+ -+ @def PVR_RETURN_IF_ERROR -+ @brief Returns passed error code if it's different than PVRSRV_OK; -+ -+ @def PVR_RETURN_IF_FALSE -+ @brief Returns passed error code if expression is false. -+ -+ @def PVR_RETURN_IF_INVALID_PARAM -+ @brief Returns PVRSRV_ERROR_INVALID_PARAMS if expression is false. -+ -+ @def PVR_RETURN_IF_NOMEM -+ @brief Returns PVRSRV_ERROR_OUT_OF_MEMORY if expression is NULL. -+ -+ @def PVR_GOTO_IF_NOMEM -+ @brief Goes to a label if expression is NULL. -+ -+ @def PVR_GOTO_IF_INVALID_PARAM -+ @brief Goes to a label if expression is false. -+ -+ @def PVR_GOTO_IF_FALSE -+ @brief Goes to a label if expression is false. -+ -+ @def PVR_GOTO_IF_ERROR -+ @brief Goes to a label if the error code is different than PVRSRV_OK; -+ -+ @def PVR_GOTO_WITH_ERROR -+ @brief Goes to a label and sets the error code. -+ -+ @def PVR_LOG -+ @brief Prints message to a log unconditionally. -+ -+ This macro will print messages only if PVRSRV_NEED_PVR_LOG macro is defined. -+ @def PVR_LOG_MSG -+ @brief Prints message to a log with the given log-level. -+ -+ @def PVR_LOG_VA -+ @brief Prints message with var-args to a log with the given log-level. -+ -+ @def PVR_LOG_IF_ERROR_VA -+ @brief Prints message with var-args to a log if the error code is different than PVRSRV_OK. -+ -+ @def PVR_LOG_IF_FALSE_VA -+ @brief Prints message with var-args if expression is false. -+ -+ @def PVR_LOG_RETURN_IF_ERROR_VA -+ @brief Prints message with var-args to a log and returns the error code. -+ -+ @def PVR_LOG_GOTO_IF_ERROR_VA -+ @brief Prints message with var-args to a log and goes to a label if the error code is different than PVRSRV_OK. -+ -+ @def PVR_LOG_RETURN_IF_FALSE_VA -+ @brief Logs the error message with var-args if the expression is false and returns the error code. -+ -+ @def PVR_LOG_GOTO_IF_FALSE_VA -+ @brief Logs the error message with var-args and goes to a label if the expression is false. -+ -+ @def PVR_LOG_RETURN_IF_TRUE_VA -+ @brief Logs the error message with var-args if the expression is true and returns the error code. -+ -+ @def PVR_TRACE_EMPTY_LINE -+ @brief Prints empty line to a log (PVRSRV_NEED_PVR_LOG must be defined). -+ -+ @def TRUNCATE_64BITS_TO_32BITS -+ @brief Truncates 64 bit value to 32 bit value (with possible precision loss). -+ -+ @def TRUNCATE_64BITS_TO_SIZE_T -+ @brief Truncates 64 bit value to size_t value (with possible precision loss). -+ -+ @def TRUNCATE_SIZE_T_TO_32BITS -+ @brief Truncates size_t value to 32 bit value (with possible precision loss). -+ */ -+ -+#endif /* PVR_DEBUG_H */ -+ -+/****************************************************************************** -+ End of file (pvr_debug.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/pvr_debugfs.c b/drivers/gpu/drm/img-rogue/pvr_debugfs.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_debugfs.c -@@ -0,0 +1,622 @@ -+/*************************************************************************/ /*! -+@File -+@Title DebugFS implementation of Debug Info interface. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements osdi_impl.h API to provide access to driver's -+ debug data via DebugFS. -+ -+ Note about locking in DebugFS module. -+ -+ Access to DebugFS is protected against the race where any -+ file could be removed while being accessed or accessed while -+ being removed. Any calls to debugfs_remove() will block -+ until all operations are finished. -+ -+ See implementation of proxy file operations (FULL_PROXY_FUNC) -+ and implementation of debugfs_file_[get|put]() in -+ fs/debugfs/file.c in Linux kernel sources for more details. -+ -+ Not about locking for sequential files. -+ -+ The seq_file objects have a mutex that protects access -+ to all of the file operations hence all of the sequential -+ *read* operations are protected. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+#include -+#include -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "pvr_debugfs.h" -+#include "osfunc.h" -+#include "allocmem.h" -+#include "pvr_bridge_k.h" -+#include "pvr_uaccess.h" -+#include "osdi_impl.h" -+ -+#define _DRIVER_THREAD_ENTER() \ -+ do { \ -+ PVRSRV_ERROR eLocalError = PVRSRVDriverThreadEnter(NULL); \ -+ if (eLocalError != PVRSRV_OK) \ -+ { \ -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s", \ -+ __func__, PVRSRVGetErrorString(eLocalError))); \ -+ return OSPVRSRVToNativeError(eLocalError); \ -+ } \ -+ } while (0) -+ -+#define _DRIVER_THREAD_EXIT() \ -+ PVRSRVDriverThreadExit(NULL) -+ -+#define PVR_DEBUGFS_PVR_DPF_LEVEL PVR_DBG_ERROR -+ -+typedef struct DFS_DIR -+{ -+ struct dentry *psDirEntry; -+ struct DFS_DIR *psParentDir; -+} DFS_DIR; -+ -+typedef struct DFS_ENTRY -+{ -+ OSDI_IMPL_ENTRY sImplEntry; -+ DI_ITERATOR_CB sIterCb; -+} DFS_ENTRY; -+ -+typedef struct DFS_FILE -+{ -+ struct dentry *psFileEntry; -+ struct DFS_DIR *psParentDir; -+ const struct seq_operations *psSeqOps; -+ struct DFS_ENTRY sEntry; -+ DI_ENTRY_TYPE eType; -+} DFS_FILE; -+ -+/* ----- native callbacks interface ----------------------------------------- */ -+ -+static void _WriteData(void *pvNativeHandle, const void *pvData, -+ IMG_UINT32 uiSize) -+{ -+ seq_write(pvNativeHandle, pvData, uiSize); -+} -+ -+static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt, -+ va_list pArgs) -+{ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) -+ seq_vprintf(pvNativeHandle, pszFmt, pArgs); -+#else -+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; -+ -+ vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFmt, pArgs); -+ seq_printf(pvNativeHandle, "%s", szBuffer); -+#endif -+} -+ -+static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr) -+{ -+ seq_puts(pvNativeHandle, pszStr); -+} -+ -+static IMG_BOOL _HasOverflowed(void *pvNativeHandle) -+{ -+ struct seq_file *psSeqFile = pvNativeHandle; -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) -+ return seq_has_overflowed(psSeqFile); -+#else -+ return psSeqFile->count == psSeqFile->size; -+#endif -+} -+ -+static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = { -+ .pfnWrite = _WriteData, -+ .pfnVPrintf = _VPrintf, -+ .pfnPuts = _Puts, -+ .pfnHasOverflowed = _HasOverflowed, -+}; -+ -+/* ----- sequential file operations ----------------------------------------- */ -+ -+static void *_Start(struct seq_file *psSeqFile, loff_t *puiPos) -+{ -+ DFS_ENTRY *psEntry = psSeqFile->private; -+ -+ void *pvRet = psEntry->sIterCb.pfnStart(&psEntry->sImplEntry, puiPos); -+ -+ if (pvRet == DI_START_TOKEN) -+ { -+ return SEQ_START_TOKEN; -+ } -+ -+ return pvRet; -+} -+ -+static void _Stop(struct seq_file *psSeqFile, void *pvPriv) -+{ -+ DFS_ENTRY *psEntry = psSeqFile->private; -+ -+ psEntry->sIterCb.pfnStop(&psEntry->sImplEntry, pvPriv); -+} -+ -+static void *_Next(struct seq_file *psSeqFile, void *pvPriv, loff_t *puiPos) -+{ -+ DFS_ENTRY *psEntry = psSeqFile->private; -+ -+ return psEntry->sIterCb.pfnNext(&psEntry->sImplEntry, pvPriv, puiPos); -+} -+ -+static int _Show(struct seq_file *psSeqFile, void *pvPriv) -+{ -+ DFS_ENTRY *psEntry = psSeqFile->private; -+ -+ if (pvPriv == SEQ_START_TOKEN) -+ { -+ pvPriv = DI_START_TOKEN; -+ } -+ -+ return psEntry->sIterCb.pfnShow(&psEntry->sImplEntry, pvPriv); -+} -+ -+static struct seq_operations _g_sSeqOps = { -+ .start = _Start, -+ .stop = _Stop, -+ .next = _Next, -+ .show = _Show -+}; -+ -+/* ----- file operations ---------------------------------------------------- */ -+ -+static int _Open(struct inode *psINode, struct file *psFile) -+{ -+ DFS_FILE *psDFSFile; -+ int iRes; -+ -+ PVR_LOG_RETURN_IF_FALSE(psINode != NULL && psINode->i_private != NULL, -+ "psDFSFile is NULL", -EIO); -+ -+ _DRIVER_THREAD_ENTER(); -+ -+ psDFSFile = psINode->i_private; -+ -+ if (psDFSFile->sEntry.sIterCb.pfnStart != NULL) -+ { -+ iRes = seq_open(psFile, psDFSFile->psSeqOps); -+ } -+ else -+ { -+ /* private data is NULL as it's going to be set below */ -+ iRes = single_open(psFile, _Show, NULL); -+ } -+ -+ if (iRes == 0) -+ { -+ struct seq_file *psSeqFile = psFile->private_data; -+ -+ DFS_ENTRY *psEntry = OSAllocMem(sizeof(*psEntry)); -+ if (psEntry == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem() failed", __func__)); -+ iRes = -ENOMEM; -+ goto return_; -+ } -+ -+ *psEntry = psDFSFile->sEntry; -+ psSeqFile->private = psEntry; -+ psEntry->sImplEntry.pvNative = psSeqFile; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to seq_open psFile, returning %d", -+ __func__, iRes)); -+ } -+ -+return_: -+ _DRIVER_THREAD_EXIT(); -+ -+ return iRes; -+} -+ -+static int _Close(struct inode *psINode, struct file *psFile) -+{ -+ DFS_FILE *psDFSFile = psINode->i_private; -+ DFS_ENTRY *psEntry; -+ int iRes; -+ -+ PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", -+ -EIO); -+ -+ _DRIVER_THREAD_ENTER(); -+ -+ /* save pointer to DFS_ENTRY */ -+ psEntry = ((struct seq_file *) psFile->private_data)->private; -+ -+ if (psDFSFile->sEntry.sIterCb.pfnStart != NULL) -+ { -+ iRes = seq_release(psINode, psFile); -+ } -+ else -+ { -+ iRes = single_release(psINode, psFile); -+ } -+ -+ /* free DFS_ENTRY allocated in _Open */ -+ OSFreeMem(psEntry); -+ -+ /* Validation check as seq_release (and single_release which calls it) -+ * never fail */ -+ if (iRes != 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release psFile, returning %d", -+ __func__, iRes)); -+ } -+ -+ _DRIVER_THREAD_EXIT(); -+ -+ return iRes; -+} -+ -+static ssize_t _Read(struct file *psFile, char __user *pcBuffer, -+ size_t uiCount, loff_t *puiPos) -+{ -+ DFS_FILE *psDFSFile = psFile->f_path.dentry->d_inode->i_private; -+ ssize_t iRes = -1; -+ -+ _DRIVER_THREAD_ENTER(); -+ -+ if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC) -+ { -+ iRes = seq_read(psFile, pcBuffer, uiCount, puiPos); -+ if (iRes < 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to read from file, pfnRead() " -+ "returned %zd", __func__, iRes)); -+ goto return_; -+ } -+ } -+ else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) -+ { -+ DFS_ENTRY *psEntry = &psDFSFile->sEntry; -+ IMG_UINT64 ui64Count = uiCount; -+ -+ IMG_CHAR *pcLocalBuffer = OSAllocMem(uiCount); -+ PVR_GOTO_IF_FALSE(pcLocalBuffer != NULL, return_); -+ -+ iRes = psEntry->sIterCb.pfnRead(pcLocalBuffer, ui64Count, puiPos, -+ psEntry->sImplEntry.pvPrivData); -+ if (iRes < 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to read from file, pfnRead() " -+ "returned %zd", __func__, iRes)); -+ OSFreeMem(pcLocalBuffer); -+ goto return_; -+ } -+ -+ if (pvr_copy_to_user(pcBuffer, pcLocalBuffer, iRes) != 0) -+ { -+ iRes = -1; -+ } -+ -+ OSFreeMem(pcLocalBuffer); -+ } -+ -+return_: -+ _DRIVER_THREAD_EXIT(); -+ -+ return iRes; -+} -+ -+static loff_t _LSeek(struct file *psFile, loff_t iOffset, int iOrigin) -+{ -+ DFS_FILE *psDFSFile = psFile->f_path.dentry->d_inode->i_private; -+ loff_t iRes = -1; -+ -+ _DRIVER_THREAD_ENTER(); -+ -+ if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC) -+ { -+ iRes = seq_lseek(psFile, iOffset, iOrigin); -+ if (iRes < 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to set file position in psFile<%p> to offset " -+ "%lld, iOrigin %d, seq_lseek() returned %lld (dentry='%s')", __func__, -+ psFile, iOffset, iOrigin, iRes, psFile->f_path.dentry->d_name.name)); -+ goto return_; -+ } -+ } -+ else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) -+ { -+ DFS_ENTRY *psEntry = &psDFSFile->sEntry; -+ IMG_UINT64 ui64Pos; -+ -+ switch (iOrigin) -+ { -+ case SEEK_SET: -+ ui64Pos = psFile->f_pos + iOffset; -+ break; -+ case SEEK_CUR: -+ ui64Pos = iOffset; -+ break; -+ case SEEK_END: -+ /* not supported as we don't know the file size here */ -+ /* fall through */ -+ default: -+ return -1; -+ } -+ -+ /* only pass the absolute position to the callback, it's up to the -+ * implementer to determine if the position is valid */ -+ -+ iRes = psEntry->sIterCb.pfnSeek(ui64Pos, -+ psEntry->sImplEntry.pvPrivData); -+ if (iRes < 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to set file position to offset " -+ "%lld, pfnSeek() returned %lld", __func__, -+ iOffset, iRes)); -+ goto return_; -+ } -+ -+ psFile->f_pos = ui64Pos; -+ } -+ -+return_: -+ _DRIVER_THREAD_EXIT(); -+ -+ return iRes; -+} -+ -+static ssize_t _Write(struct file *psFile, const char __user *pszBuffer, -+ size_t uiCount, loff_t *puiPos) -+{ -+ struct inode *psINode = psFile->f_path.dentry->d_inode; -+ DFS_FILE *psDFSFile = psINode->i_private; -+ DI_ITERATOR_CB *psIter = &psDFSFile->sEntry.sIterCb; -+ IMG_CHAR *pcLocalBuffer; -+ IMG_UINT64 ui64Count; -+ IMG_INT64 i64Res = -EIO; -+ IMG_UINT64 ui64Pos = *puiPos; -+ -+ PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", -+ -EIO); -+ PVR_LOG_RETURN_IF_FALSE(psIter->pfnWrite != NULL, "pfnWrite is NULL", -+ -EIO); -+ -+ _DRIVER_THREAD_ENTER(); -+ -+ /* Make sure we allocate the smallest amount of needed memory*/ -+ ui64Count = psIter->ui32WriteLenMax; -+ PVR_LOG_GOTO_IF_FALSE(uiCount <= ui64Count, "uiCount too long", return_); -+ ui64Count = MIN(uiCount + 1, ui64Count); -+ -+ /* allocate buffer with one additional byte for NUL character */ -+ pcLocalBuffer = OSAllocMem(ui64Count); -+ PVR_LOG_GOTO_IF_FALSE(pcLocalBuffer != NULL, "OSAllocMem() failed", -+ return_); -+ -+ i64Res = pvr_copy_from_user(pcLocalBuffer, pszBuffer, ui64Count); -+ PVR_LOG_GOTO_IF_FALSE(i64Res == 0, "pvr_copy_from_user() failed", -+ free_local_buffer_); -+ -+ /* ensure that the framework user gets a NUL terminated buffer */ -+ pcLocalBuffer[ui64Count - 1] = '\0'; -+ -+ i64Res = psIter->pfnWrite(pcLocalBuffer, ui64Count, &ui64Pos, -+ psDFSFile->sEntry.sImplEntry.pvPrivData); -+ PVR_LOG_GOTO_IF_FALSE(i64Res >= 0, "pfnWrite failed", free_local_buffer_); -+ -+ *puiPos = ui64Pos; -+ -+free_local_buffer_: -+ OSFreeMem(pcLocalBuffer); -+ -+return_: -+ _DRIVER_THREAD_EXIT(); -+ -+ return i64Res; -+} -+ -+static const struct file_operations _g_psFileOpsGen = { -+ .owner = THIS_MODULE, -+ .open = _Open, -+ .release = _Close, -+ .read = _Read, -+ .llseek = _LSeek, -+ .write = _Write, -+}; -+ -+static const struct file_operations _g_psFileOpsRndAcc = { -+ .owner = THIS_MODULE, -+ .read = _Read, -+ .llseek = _LSeek, -+ .write = _Write, -+}; -+ -+/* ----- DI implementation interface ---------------------------------------- */ -+ -+static PVRSRV_ERROR _Init(void) -+{ -+ return PVRSRV_OK; -+} -+ -+static void _DeInit(void) -+{ -+} -+ -+static PVRSRV_ERROR _CreateFile(const IMG_CHAR *pszName, -+ DI_ENTRY_TYPE eType, -+ const DI_ITERATOR_CB *psIterCb, -+ void *pvPrivData, -+ void *pvParentDir, -+ void **pvFile) -+{ -+ DFS_DIR *psParentDir = pvParentDir; -+ DFS_FILE *psFile; -+ umode_t uiMode = S_IFREG; -+ struct dentry *psEntry; -+ const struct file_operations *psFileOps = NULL; -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pvFile != NULL, "pvFile"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pvParentDir != NULL, "pvParentDir"); -+ -+ switch (eType) -+ { -+ case DI_ENTRY_TYPE_GENERIC: -+ psFileOps = &_g_psFileOpsGen; -+ break; -+ case DI_ENTRY_TYPE_RANDOM_ACCESS: -+ psFileOps = &_g_psFileOpsRndAcc; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "eType invalid in %s()", __func__)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto return_; -+ } -+ -+ psFile = OSAllocMem(sizeof(*psFile)); -+ PVR_LOG_GOTO_IF_NOMEM(psFile, eError, return_); -+ -+ uiMode |= psIterCb->pfnShow != NULL || psIterCb->pfnRead != NULL ? -+ S_IRUGO : 0; -+ uiMode |= psIterCb->pfnWrite != NULL ? S_IWUSR : 0; -+ -+ psEntry = debugfs_create_file(pszName, uiMode, psParentDir->psDirEntry, -+ psFile, psFileOps); -+ if (IS_ERR_OR_NULL(psEntry)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create debugfs '%s' file", -+ __func__, pszName)); -+ -+ eError = psEntry == NULL ? -+ PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_DEVICE; -+ goto free_file_; -+ } -+ -+ psFile->eType = eType; -+ psFile->psSeqOps = &_g_sSeqOps; -+ psFile->sEntry.sIterCb = *psIterCb; -+ psFile->sEntry.sImplEntry.pvPrivData = pvPrivData; -+ psFile->sEntry.sImplEntry.pvNative = NULL; -+ psFile->sEntry.sImplEntry.psCb = &_g_sEntryCallbacks; -+ psFile->psParentDir = psParentDir; -+ psFile->psFileEntry = psEntry; -+ -+ *pvFile = psFile; -+ -+ return PVRSRV_OK; -+ -+free_file_: -+ OSFreeMem(psFile); -+ -+return_: -+ return eError; -+} -+ -+static void _DestroyFile(void *pvFile) -+{ -+ DFS_FILE *psFile = pvFile; -+ -+ PVR_ASSERT(psFile != NULL); -+ -+ psFile->psFileEntry->d_inode->i_private = NULL; -+ -+ debugfs_remove(psFile->psFileEntry); -+ OSFreeMem(psFile); -+} -+ -+static PVRSRV_ERROR _CreateDir(const IMG_CHAR *pszName, -+ void *pvParentDir, -+ void **ppvDir) -+{ -+ DFS_DIR *psNewDir; -+ struct dentry *psDirEntry, *psParentDir = NULL; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppvDir != NULL, "ppvDir"); -+ -+ psNewDir = OSAllocMem(sizeof(*psNewDir)); -+ PVR_LOG_RETURN_IF_NOMEM(psNewDir, "OSAllocMem"); -+ -+ psNewDir->psParentDir = pvParentDir; -+ -+ if (pvParentDir != NULL) -+ { -+ psParentDir = psNewDir->psParentDir->psDirEntry; -+ } -+ -+ psDirEntry = debugfs_create_dir(pszName, psParentDir); -+ if (IS_ERR_OR_NULL(psDirEntry)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create '%s' debugfs directory", -+ __func__, pszName)); -+ OSFreeMem(psNewDir); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psNewDir->psDirEntry = psDirEntry; -+ *ppvDir = psNewDir; -+ -+ return PVRSRV_OK; -+} -+ -+static void _DestroyDir(void *pvDir) -+{ -+ DFS_DIR *psDir = pvDir; -+ -+ PVR_ASSERT(psDir != NULL); -+ -+ debugfs_remove(psDir->psDirEntry); -+ OSFreeMem(psDir); -+} -+ -+PVRSRV_ERROR PVRDebugFsRegister(void) -+{ -+ OSDI_IMPL_CB sImplCb = { -+ .pfnInit = _Init, -+ .pfnDeInit = _DeInit, -+ .pfnCreateEntry = _CreateFile, -+ .pfnDestroyEntry = _DestroyFile, -+ .pfnCreateGroup = _CreateDir, -+ .pfnDestroyGroup = _DestroyDir -+ }; -+ -+ return DIRegisterImplementation("debugfs", &sImplCb); -+} -diff --git a/drivers/gpu/drm/img-rogue/pvr_debugfs.h b/drivers/gpu/drm/img-rogue/pvr_debugfs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_debugfs.h -@@ -0,0 +1,50 @@ -+/*************************************************************************/ /*! -+@File -+@Title DebugFS implementation of Debug Info interface. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVR_DEBUGFS_H -+#define PVR_DEBUGFS_H -+ -+#include "pvrsrv_error.h" -+ -+PVRSRV_ERROR PVRDebugFsRegister(void); -+ -+#endif /* PVR_DEBUGFS_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_dicommon.h b/drivers/gpu/drm/img-rogue/pvr_dicommon.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_dicommon.h -@@ -0,0 +1,59 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services Debug Information (DI) common types and definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Debug Information (DI) common types and definitions included -+ in both user mode and kernel mode source. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVR_DICOMMON_H -+#define PVR_DICOMMON_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+/*! Maximum DI entry path length including the null byte. */ -+#define DI_IMPL_BRG_PATH_LEN 64 -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* PVR_DICOMMON_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_dma_resv.h b/drivers/gpu/drm/img-rogue/pvr_dma_resv.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_dma_resv.h -@@ -0,0 +1,80 @@ -+/*************************************************************************/ /*! -+@Title Kernel reservation object compatibility header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Per-version macros to allow code to seamlessly use older kernel -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef __PVR_DMA_RESV_H__ -+#define __PVR_DMA_RESV_H__ -+ -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) -+#include -+#else -+#include -+ -+/* Reservation object types */ -+#define dma_resv reservation_object -+#define dma_resv_list reservation_object_list -+ -+/* Reservation object functions */ -+#define dma_resv_add_excl_fence reservation_object_add_excl_fence -+#define dma_resv_add_shared_fence reservation_object_add_shared_fence -+#define dma_resv_fini reservation_object_fini -+#define dma_resv_get_excl reservation_object_get_excl -+#define dma_resv_get_list reservation_object_get_list -+#define dma_resv_held reservation_object_held -+#define dma_resv_init reservation_object_init -+#define dma_resv_reserve_shared reservation_object_reserve_shared -+#define dma_resv_test_signaled_rcu reservation_object_test_signaled_rcu -+#define dma_resv_wait_timeout_rcu reservation_object_wait_timeout_rcu -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)) -+ -+#define dma_resv_shared_list dma_resv_get_list -+#define dma_resv_excl_fence dma_resv_get_excl -+#define dma_resv_wait_timeout dma_resv_wait_timeout_rcu -+#define dma_resv_test_signaled dma_resv_test_signaled_rcu -+#define dma_resv_get_fences dma_resv_get_fences_rcu -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)) */ -+ -+#endif /* __PVR_DMA_RESV_H__ */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_drm.c b/drivers/gpu/drm/img-rogue/pvr_drm.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_drm.c -@@ -0,0 +1,452 @@ -+/* -+ * @File -+ * @Title PowerVR DRM driver -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+ -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#include -+#include -+#include -+#include -+#else -+#include /* include before drm_crtc.h for kernels older than 3.9 */ -+#endif -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "module_common.h" -+#include "pvr_drm.h" -+#include "pvr_drv.h" -+#include "pvrversion.h" -+#include "services_kernel_client.h" -+#include "pvr_sync_ioctl_drm.h" -+ -+#include "kernel_compatibility.h" -+ -+#define PVR_DRM_DRIVER_NAME PVR_DRM_NAME -+#define PVR_DRM_DRIVER_DESC "Imagination Technologies PVR DRM" -+#define PVR_DRM_DRIVER_DATE "20170530" -+ -+/* -+ * Protects global PVRSRV_DATA on a multi device system. i.e. this is used to -+ * protect the PVRSRVCommonDeviceXXXX() APIs in the Server common layer which -+ * are not re-entrant for device creation and initialisation. -+ */ -+static DEFINE_MUTEX(g_device_mutex); -+ -+/* Executed before sleep/suspend-to-RAM/S3. During this phase the content -+ * of the video memory is preserved (copied to system RAM). This step is -+ * necessary because the device can be powered off and the content of the -+ * video memory lost. -+ */ -+static int pvr_pm_suspend(struct device *dev) -+{ -+ struct drm_device *ddev = dev_get_drvdata(dev); -+ -+ DRM_DEBUG_DRIVER("device %p\n", dev); -+ -+ return PVRSRVDeviceSuspend(ddev); -+} -+ -+/* Executed after the system is woken up from sleep/suspend-to-RAM/S3. This -+ * phase restores the content of the video memory from the system RAM. -+ */ -+static int pvr_pm_resume(struct device *dev) -+{ -+ struct drm_device *ddev = dev_get_drvdata(dev); -+ -+ DRM_DEBUG_DRIVER("device %p\n", dev); -+ -+ return PVRSRVDeviceResume(ddev); -+} -+ -+/* Executed before the hibernation image is created. This callback allows to -+ * preserve the content of the video RAM into the system RAM which in turn -+ * is then stored into a disk. -+ */ -+static int pvr_pm_freeze(struct device *dev) -+{ -+ struct drm_device *ddev = dev_get_drvdata(dev); -+ -+ DRM_DEBUG_DRIVER("%s(): device %p\n", __func__, dev); -+ -+ return PVRSRVDeviceSuspend(ddev); -+} -+ -+/* Executed after the hibernation image is created or if the creation of the -+ * image has failed. This callback should undo whatever was done in -+ * pvr_pm_freeze to allow the device to operate in the same way as before the -+ * call to pvr_pm_freeze. -+ */ -+static int pvr_pm_thaw(struct device *dev) -+{ -+ struct drm_device *ddev = dev_get_drvdata(dev); -+ -+ DRM_DEBUG_DRIVER("%s(): device %p\n", __func__, dev); -+ -+ return PVRSRVDeviceResume(ddev); -+} -+ -+/* Executed after the hibernation image is created. This callback should not -+ * preserve the content of the video memory since this was already done -+ * in pvr_pm_freeze. -+ * -+ * Note: from the tests performed on a TestChip this callback is not executed -+ * and driver's pvr_shutdown() is executed instead. -+ */ -+static int pvr_pm_poweroff(struct device *dev) -+{ -+ struct drm_device *ddev = dev_get_drvdata(dev); -+ -+ DRM_DEBUG_DRIVER("%s(): device %p\n", __func__, dev); -+ -+ PVRSRVDeviceShutdown(ddev); -+ -+ return 0; -+} -+ -+/* Executed after the content of the system memory is restored from the -+ * hibernation image. This callback restored video RAM from the system RAM -+ * and performs any necessary device setup required for the device to operate -+ * properly. -+ */ -+static int pvr_pm_restore(struct device *dev) -+{ -+ struct drm_device *ddev = dev_get_drvdata(dev); -+ -+ DRM_DEBUG_DRIVER("%s(): device %p\n", __func__, dev); -+ -+ return PVRSRVDeviceResume(ddev); -+} -+ -+const struct dev_pm_ops pvr_pm_ops = { -+ /* Sleep (suspend-to-RAM/S3) callbacks. -+ * This mode saves the content of the video RAM to the system RAM and -+ * powers off the device to reduce the power consumption. Because the -+ * video RAM can be powered off, it needs to be preserved beforehand. -+ */ -+ .suspend = pvr_pm_suspend, -+ .resume = pvr_pm_resume, -+ -+ /* Hibernation (suspend-to-disk/S4) callbacks. -+ * This mode saves the content of the video RAM to the system RAM and then -+ * dumps the system RAM to disk (swap partition or swap file). The system -+ * then powers off. After power on the system RAM content is loaded from -+ * the disk and then video RAM is restored from the system RAM. -+ * -+ * The procedure is executed in following order -+ * -+ * - Suspend-to-disk is triggered -+ * At this point the OS goes through the list of all registered devices and -+ * calls provided callbacks. -+ * -- pvr_pm_freeze() is called -+ * The GPU is powered of and submitting new work is blocked. -+ * The content of the video RAM is saved to the system RAM, and -+ * other actions required to suspend the device are performed. -+ * -- system RAM image is created and saved on the disk -+ * The disk now contains a snapshot for the DDK Driver for the -+ * moment when pvr_pm_freeze() was called. -+ * -- pvr_pm_thaw() is called -+ * All actions taken in pvr_pm_freeze() are undone. The memory -+ * allocated for the video RAM is freed and all actions necessary -+ * to bring the device to operational state are taken. -+ * This makes sure that regardless if image was created successfully -+ * or not the device remains operational. -+ * -+ * - System is powered off -+ * -- pvr_shutdown() is called -+ * No actions are required beside powering off the GPU. -+ * -+ * - System is powered up -+ * -- system RAM image is read from the disk -+ * This restores the snapshot of the DDK driver along with the saved -+ * video RAM buffer. -+ * -- pvr_pm_restore() is called -+ * Video RAM is restored from the buffer located in the system RAM. -+ * Actions to reset the device and bring it back to working state -+ * are taken. Video RAM buffer is freed. -+ * In summary the same procedure as in the case of pvr_pm_thaw() is -+ * performed. -+ */ -+ .freeze = pvr_pm_freeze, -+ .thaw = pvr_pm_thaw, -+ .poweroff = pvr_pm_poweroff, -+ .restore = pvr_pm_restore, -+}; -+ -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) -+static -+#endif -+int pvr_drm_load(struct drm_device *ddev, unsigned long flags) -+{ -+ struct pvr_drm_private *priv; -+ enum PVRSRV_ERROR_TAG srv_err; -+ int err, deviceId; -+ -+ DRM_DEBUG_DRIVER("device %p\n", ddev->dev); -+ -+ dev_set_drvdata(ddev->dev, ddev); -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) -+ /* -+ * Older kernels do not have render drm_minor member in drm_device, -+ * so we fallback to primary node for device identification -+ */ -+ deviceId = ddev->primary->index; -+#else -+ if (ddev->render) -+ deviceId = ddev->render->index; -+ else /* when render node is NULL, fallback to primary node */ -+ deviceId = ddev->primary->index; -+#endif -+ -+ priv = kzalloc(sizeof(*priv), GFP_KERNEL); -+ if (!priv) { -+ err = -ENOMEM; -+ goto err_exit; -+ } -+ ddev->dev_private = priv; -+ -+ if (!ddev->dev->dma_parms) -+ ddev->dev->dma_parms = &priv->dma_parms; -+ dma_set_max_seg_size(ddev->dev, DMA_BIT_MASK(32)); -+ dma_set_mask(ddev->dev, DMA_BIT_MASK(40)); -+ -+ mutex_lock(&g_device_mutex); -+ -+ srv_err = PVRSRVCommonDeviceCreate(ddev->dev, deviceId, &priv->dev_node); -+ if (srv_err != PVRSRV_OK) { -+ DRM_ERROR("failed to create device node for device %p (%s)\n", -+ ddev->dev, PVRSRVGetErrorString(srv_err)); -+ if (srv_err == PVRSRV_ERROR_PROBE_DEFER) -+ err = -EPROBE_DEFER; -+ else -+ err = -ENODEV; -+ goto err_unset_dma_parms; -+ } -+ -+ err = PVRSRVDeviceInit(priv->dev_node); -+ if (err) { -+ DRM_ERROR("device %p initialisation failed (err=%d)\n", -+ ddev->dev, err); -+ goto err_device_destroy; -+ } -+ -+ drm_mode_config_init(ddev); -+ -+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE) -+ srv_err = PVRSRVCommonDeviceInitialise(priv->dev_node); -+ if (srv_err != PVRSRV_OK) { -+ err = -ENODEV; -+ DRM_ERROR("device %p initialisation failed (err=%d)\n", -+ ddev->dev, err); -+ goto err_device_deinit; -+ } -+#endif -+ -+ mutex_unlock(&g_device_mutex); -+ -+ return 0; -+ -+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE) -+err_device_deinit: -+ drm_mode_config_cleanup(ddev); -+ PVRSRVDeviceDeinit(priv->dev_node); -+#endif -+err_device_destroy: -+ PVRSRVCommonDeviceDestroy(priv->dev_node); -+err_unset_dma_parms: -+ mutex_unlock(&g_device_mutex); -+ if (ddev->dev->dma_parms == &priv->dma_parms) -+ ddev->dev->dma_parms = NULL; -+ kfree(priv); -+err_exit: -+ return err; -+} -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) -+static -+#endif -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) -+int pvr_drm_unload(struct drm_device *ddev) -+#else -+void pvr_drm_unload(struct drm_device *ddev) -+#endif -+{ -+ struct pvr_drm_private *priv = ddev->dev_private; -+ -+ DRM_DEBUG_DRIVER("device %p\n", ddev->dev); -+ -+ drm_mode_config_cleanup(ddev); -+ -+ PVRSRVDeviceDeinit(priv->dev_node); -+ -+ mutex_lock(&g_device_mutex); -+ PVRSRVCommonDeviceDestroy(priv->dev_node); -+ mutex_unlock(&g_device_mutex); -+ -+ if (ddev->dev->dma_parms == &priv->dma_parms) -+ ddev->dev->dma_parms = NULL; -+ -+ kfree(priv); -+ ddev->dev_private = NULL; -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) -+ return 0; -+#endif -+} -+ -+static int pvr_drm_open(struct drm_device *ddev, struct drm_file *dfile) -+{ -+#if (PVRSRV_DEVICE_INIT_MODE != PVRSRV_LINUX_DEV_INIT_ON_CONNECT) -+ struct pvr_drm_private *priv = ddev->dev_private; -+ int err; -+#endif -+ -+ if (!try_module_get(THIS_MODULE)) { -+ DRM_ERROR("failed to get module reference\n"); -+ return -ENOENT; -+ } -+ -+#if (PVRSRV_DEVICE_INIT_MODE != PVRSRV_LINUX_DEV_INIT_ON_CONNECT) -+ err = PVRSRVDeviceServicesOpen(priv->dev_node, dfile); -+ if (err) -+ module_put(THIS_MODULE); -+ -+ return err; -+#else -+ return 0; -+#endif -+} -+ -+static void pvr_drm_release(struct drm_device *ddev, struct drm_file *dfile) -+{ -+ struct pvr_drm_private *priv = ddev->dev_private; -+ -+ PVRSRVDeviceRelease(priv->dev_node, dfile); -+ -+ module_put(THIS_MODULE); -+} -+ -+/* -+ * The DRM global lock is taken for ioctls unless the DRM_UNLOCKED flag is set. -+ */ -+static struct drm_ioctl_desc pvr_drm_ioctls[] = { -+ DRM_IOCTL_DEF_DRV(PVR_SRVKM_CMD, PVRSRV_BridgeDispatchKM, -+ DRM_RENDER_ALLOW | DRM_UNLOCKED), -+ DRM_IOCTL_DEF_DRV(PVR_SRVKM_INIT, drm_pvr_srvkm_init, -+ DRM_RENDER_ALLOW | DRM_UNLOCKED), -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE) -+ DRM_IOCTL_DEF_DRV(PVR_SYNC_RENAME_CMD, pvr_sync_rename_ioctl, -+ DRM_RENDER_ALLOW | DRM_UNLOCKED), -+ DRM_IOCTL_DEF_DRV(PVR_SYNC_FORCE_SW_ONLY_CMD, pvr_sync_force_sw_only_ioctl, -+ DRM_RENDER_ALLOW | DRM_UNLOCKED), -+ DRM_IOCTL_DEF_DRV(PVR_SW_SYNC_CREATE_FENCE_CMD, pvr_sw_sync_create_fence_ioctl, -+ DRM_RENDER_ALLOW | DRM_UNLOCKED), -+ DRM_IOCTL_DEF_DRV(PVR_SW_SYNC_INC_CMD, pvr_sw_sync_inc_ioctl, -+ DRM_RENDER_ALLOW | DRM_UNLOCKED), -+#endif -+}; -+ -+#if defined(CONFIG_COMPAT) -+static long pvr_compat_ioctl(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ unsigned int nr = DRM_IOCTL_NR(cmd); -+ -+ if (nr < DRM_COMMAND_BASE) -+ return drm_compat_ioctl(file, cmd, arg); -+ -+ return drm_ioctl(file, cmd, arg); -+} -+#endif /* defined(CONFIG_COMPAT) */ -+ -+const struct file_operations pvr_drm_fops = { -+ .owner = THIS_MODULE, -+ .open = drm_open, -+ .release = drm_release, -+ .unlocked_ioctl = drm_ioctl, -+#if defined(CONFIG_COMPAT) -+ .compat_ioctl = pvr_compat_ioctl, -+#endif -+ .mmap = PVRSRV_MMap, -+ .poll = drm_poll, -+ .read = drm_read, -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) -+ .fasync = drm_fasync, -+#endif -+}; -+ -+const struct drm_driver pvr_drm_generic_driver = { -+ .driver_features = DRIVER_MODESET | DRIVER_RENDER, -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) -+ .load = NULL, -+ .unload = NULL, -+#else -+ .load = pvr_drm_load, -+ .unload = pvr_drm_unload, -+#endif -+ .open = pvr_drm_open, -+ .postclose = pvr_drm_release, -+ -+ .ioctls = pvr_drm_ioctls, -+ .num_ioctls = ARRAY_SIZE(pvr_drm_ioctls), -+ .fops = &pvr_drm_fops, -+ -+ .name = PVR_DRM_DRIVER_NAME, -+ .desc = PVR_DRM_DRIVER_DESC, -+ .date = PVR_DRM_DRIVER_DATE, -+ .major = PVRVERSION_MAJ, -+ .minor = PVRVERSION_MIN, -+ .patchlevel = PVRVERSION_BUILD, -+}; -diff --git a/drivers/gpu/drm/img-rogue/pvr_drm.h b/drivers/gpu/drm/img-rogue/pvr_drm.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_drm.h -@@ -0,0 +1,146 @@ -+/* -+ * @File -+ * @Title PVR DRM definitions shared between kernel and user space. -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__PVR_DRM_H__) -+#define __PVR_DRM_H__ -+ -+#include -+ -+#if defined(__KERNEL__) -+#include -+#else -+#include -+#endif -+ -+/* -+ * IMPORTANT: -+ * All structures below are designed to be the same size when compiled for 32 -+ * and/or 64 bit architectures, i.e. there should be no compiler inserted -+ * padding. This is achieved by sticking to the following rules: -+ * 1) only use fixed width types -+ * 2) always naturally align fields by arranging them appropriately and by using -+ * padding fields when necessary -+ * -+ * These rules should _always_ be followed when modifying or adding new -+ * structures to this file. -+ */ -+ -+struct drm_pvr_srvkm_cmd { -+ __u32 bridge_id; -+ __u32 bridge_func_id; -+ __u64 in_data_ptr; -+ __u64 out_data_ptr; -+ __u32 in_data_size; -+ __u32 out_data_size; -+}; -+ -+struct pvr_sync_rename_ioctl_data { -+ char szName[32]; -+}; -+ -+struct pvr_sw_sync_create_fence_data { -+ char name[32]; -+ __s32 fence; -+ __u32 pad; -+ __u64 sync_pt_idx; -+}; -+ -+struct pvr_sw_timeline_advance_data { -+ __u64 sync_pt_idx; -+}; -+ -+#define PVR_SRVKM_SERVICES_INIT 1 -+#define PVR_SRVKM_SYNC_INIT 2 -+struct drm_pvr_srvkm_init_data { -+ __u32 init_module; -+}; -+ -+/* Values used to configure the PVRSRV_DEVICE_INIT_MODE tunable (Linux-only) */ -+#define PVRSRV_LINUX_DEV_INIT_ON_PROBE 1 -+#define PVRSRV_LINUX_DEV_INIT_ON_OPEN 2 -+#define PVRSRV_LINUX_DEV_INIT_ON_CONNECT 3 -+ -+/* -+ * DRM command numbers, relative to DRM_COMMAND_BASE. -+ * These defines must be prefixed with "DRM_". -+ */ -+ -+/* PVR Services command */ -+#define DRM_PVR_SRVKM_CMD 0 -+ -+/* PVR Sync commands */ -+#define DRM_PVR_SYNC_RENAME_CMD 1 -+#define DRM_PVR_SYNC_FORCE_SW_ONLY_CMD 2 -+ -+/* PVR Software Sync commands */ -+#define DRM_PVR_SW_SYNC_CREATE_FENCE_CMD 3 -+#define DRM_PVR_SW_SYNC_INC_CMD 4 -+ -+/* PVR Services Render Device Init command */ -+#define DRM_PVR_SRVKM_INIT 5 -+ -+/* These defines must be prefixed with "DRM_IOCTL_". */ -+#define DRM_IOCTL_PVR_SRVKM_CMD \ -+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SRVKM_CMD, \ -+ struct drm_pvr_srvkm_cmd) -+ -+#define DRM_IOCTL_PVR_SYNC_RENAME_CMD \ -+ DRM_IOW(DRM_COMMAND_BASE + DRM_PVR_SYNC_RENAME_CMD, \ -+ struct pvr_sync_rename_ioctl_data) -+ -+#define DRM_IOCTL_PVR_SYNC_FORCE_SW_ONLY_CMD \ -+ DRM_IO(DRM_COMMAND_BASE + DRM_PVR_SYNC_FORCE_SW_ONLY_CMD) -+ -+#define DRM_IOCTL_PVR_SW_SYNC_CREATE_FENCE_CMD \ -+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SW_SYNC_CREATE_FENCE_CMD, \ -+ struct pvr_sw_sync_create_fence_data) -+ -+#define DRM_IOCTL_PVR_SW_SYNC_INC_CMD \ -+ DRM_IOR(DRM_COMMAND_BASE + DRM_PVR_SW_SYNC_INC_CMD, \ -+ struct pvr_sw_timeline_advance_data) -+ -+#define DRM_IOCTL_PVR_SRVKM_INIT \ -+ DRM_IOW(DRM_COMMAND_BASE + DRM_PVR_SRVKM_INIT, \ -+ struct drm_pvr_srvkm_init_data) -+ -+#endif /* defined(__PVR_DRM_H__) */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_drv.h b/drivers/gpu/drm/img-rogue/pvr_drv.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_drv.h -@@ -0,0 +1,112 @@ -+/* -+ * @File -+ * @Title PowerVR DRM driver -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__PVR_DRV_H__) -+#define __PVR_DRV_H__ -+ -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#include -+#include -+#include -+#else -+#include -+#endif -+ -+#include -+ -+struct file; -+struct _PVRSRV_DEVICE_NODE_; -+struct workqueue_struct; -+struct vm_area_struct; -+ -+/* This structure is used to store Linux specific per-device information. */ -+struct pvr_drm_private { -+ struct _PVRSRV_DEVICE_NODE_ *dev_node; -+ -+ /* -+ * This is needed for devices that don't already have their own dma -+ * parameters structure, e.g. platform devices, and, if necessary, will -+ * be assigned to the 'struct device' during device initialisation. It -+ * should therefore never be accessed directly via this structure as -+ * this may not be the version of dma parameters in use. -+ */ -+ struct device_dma_parameters dma_parms; -+ -+ /* PVR Sync debug notify handle */ -+ void *sync_debug_notify_handle; -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) -+ /* Only used in fence sync as sync_debug_notify_handle is used -+ * to print a header only. Content is registered separately. -+ * Used to print foreign sync debug -+ */ -+ void *sync_foreign_debug_notify_handle; -+#endif -+ -+ /* Flag stating if the device was suspended/resumed. If this is 0 then -+ * the device was either resumed or no suspend was called but if 1 then -+ * the OS called suspend on this device. -+ */ -+ atomic_t suspended; -+}; -+ -+extern const struct dev_pm_ops pvr_pm_ops; -+extern const struct drm_driver pvr_drm_generic_driver; -+extern const struct file_operations pvr_drm_fops; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) -+int pvr_drm_load(struct drm_device *ddev, unsigned long flags); -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) -+int pvr_drm_unload(struct drm_device *ddev); -+#else -+void pvr_drm_unload(struct drm_device *ddev); -+#endif -+#endif -+ -+int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg, -+ struct drm_file *file); -+int PVRSRV_MMap(struct file *file, struct vm_area_struct *ps_vma); -+ -+#endif /* !defined(__PVR_DRV_H__) */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_fd_sync_kernel.h b/drivers/gpu/drm/img-rogue/pvr_fd_sync_kernel.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_fd_sync_kernel.h -@@ -0,0 +1,64 @@ -+/*************************************************************************/ /*! -+@File pvr_fd_sync_kernel.h -+@Title Kernel/userspace interface definitions to use the kernel sync -+ driver -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+ -+#ifndef _PVR_FD_SYNC_KERNEL_H_ -+#define _PVR_FD_SYNC_KERNEL_H_ -+ -+#include -+#include -+ -+#include "pvr_drm.h" -+ -+#define PVR_SYNC_MAX_QUERY_FENCE_POINTS 14 -+ -+struct pvr_sync_pt_info { -+ /* Output */ -+ __u32 id; -+ __u32 ui32FWAddr; -+ __u32 ui32CurrOp; -+ __u32 ui32NextOp; -+ __u32 ui32TlTaken; -+} __attribute__((packed, aligned(8))); -+ -+#endif /* _PVR_FD_SYNC_KERNEL_H_ */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_fence.c b/drivers/gpu/drm/img-rogue/pvr_fence.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_fence.c -@@ -0,0 +1,1153 @@ -+/* -+ * @File -+ * @Title PowerVR Linux fence interface -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "pvr_fence.h" -+#include "services_kernel_client.h" -+#include "sync_checkpoint_external.h" -+ -+#define CREATE_TRACE_POINTS -+#include "pvr_fence_trace.h" -+ -+/* This header must always be included last */ -+#include "kernel_compatibility.h" -+ -+/* Global kmem_cache for pvr_fence object allocations */ -+static struct kmem_cache *pvr_fence_cache; -+static DEFINE_MUTEX(pvr_fence_cache_mutex); -+static u32 pvr_fence_cache_refcount; -+ -+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ -+ do { \ -+ if (pfnDumpDebugPrintf) \ -+ pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ -+ ## __VA_ARGS__); \ -+ else \ -+ pr_err(fmt "\n", ## __VA_ARGS__); \ -+ } while (0) -+ -+static inline void -+pvr_fence_sync_signal(struct pvr_fence *pvr_fence, u32 fence_sync_flags) -+{ -+ SyncCheckpointSignal(pvr_fence->sync_checkpoint, fence_sync_flags); -+} -+ -+static inline bool -+pvr_fence_sync_is_signaled(struct pvr_fence *pvr_fence, u32 fence_sync_flags) -+{ -+ return SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint, -+ fence_sync_flags); -+} -+ -+static inline u32 -+pvr_fence_sync_value(struct pvr_fence *pvr_fence) -+{ -+ if (SyncCheckpointIsErrored(pvr_fence->sync_checkpoint, -+ PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) -+ return PVRSRV_SYNC_CHECKPOINT_ERRORED; -+ else if (SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint, -+ PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) -+ return PVRSRV_SYNC_CHECKPOINT_SIGNALLED; -+ else -+ return PVRSRV_SYNC_CHECKPOINT_ACTIVE; -+} -+ -+static void -+pvr_fence_context_check_status(struct work_struct *data) -+{ -+ PVRSRVCheckStatus(NULL); -+} -+ -+void -+pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size) -+{ -+ snprintf(str, size, -+ "%u ctx=%llu refs=%u", -+ atomic_read(&fctx->fence_seqno), -+ fctx->fence_context, -+ refcount_read(&fctx->kref.refcount)); -+} -+ -+static void -+pvr_fence_context_fences_dump(struct pvr_fence_context *fctx, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ struct pvr_fence *pvr_fence; -+ unsigned long flags; -+ char value[128]; -+ -+ spin_lock_irqsave(&fctx->list_lock, flags); -+ pvr_context_value_str(fctx, value, sizeof(value)); -+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, -+ "%s: @%s", fctx->name, value); -+ list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) { -+ struct dma_fence *fence = pvr_fence->fence; -+ const char *timeline_value_str = "unknown timeline value"; -+ const char *fence_value_str = "unknown fence value"; -+ -+ pvr_fence->base.ops->fence_value_str(&pvr_fence->base, value, -+ sizeof(value)); -+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, -+ " @%s", value); -+ -+ if (is_pvr_fence(fence)) -+ continue; -+ -+ if (fence->ops->timeline_value_str) { -+ fence->ops->timeline_value_str(fence, value, -+ sizeof(value)); -+ timeline_value_str = value; -+ } -+ -+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, -+ " | %s: %s (driver: %s)", -+ fence->ops->get_timeline_name(fence), -+ timeline_value_str, -+ fence->ops->get_driver_name(fence)); -+ -+ if (fence->ops->fence_value_str) { -+ fence->ops->fence_value_str(fence, value, -+ sizeof(value)); -+ fence_value_str = value; -+ } -+ -+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, -+ " | @%s (foreign)", fence_value_str); -+ } -+ spin_unlock_irqrestore(&fctx->list_lock, flags); -+} -+ -+static inline unsigned int -+pvr_fence_context_seqno_next(struct pvr_fence_context *fctx) -+{ -+ return atomic_inc_return(&fctx->fence_seqno) - 1; -+} -+ -+/* This function prepends seqno to fence name */ -+static inline void -+pvr_fence_prepare_name(char *fence_name, size_t fence_name_size, -+ const char *name, unsigned int seqno) -+{ -+ unsigned int len; -+ -+ len = OSStringUINT32ToStr(fence_name, fence_name_size, seqno); -+ if (likely((len > 0) && (fence_name_size >= (len + 1)))) { -+ fence_name[len] = '-'; -+ fence_name[len + 1] = '\0'; -+ } -+ strlcat(fence_name, name, fence_name_size); -+} -+ -+static void -+pvr_fence_sched_free(struct rcu_head *rcu) -+{ -+ struct pvr_fence *pvr_fence = container_of(rcu, struct pvr_fence, rcu); -+ -+ kmem_cache_free(pvr_fence_cache, pvr_fence); -+} -+ -+static inline void -+pvr_fence_context_free_deferred(struct pvr_fence_context *fctx) -+{ -+ struct pvr_fence *pvr_fence, *tmp; -+ LIST_HEAD(deferred_free_list); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&fctx->list_lock, flags); -+ list_for_each_entry_safe(pvr_fence, tmp, -+ &fctx->deferred_free_list, -+ fence_head) -+ list_move(&pvr_fence->fence_head, &deferred_free_list); -+ spin_unlock_irqrestore(&fctx->list_lock, flags); -+ -+ list_for_each_entry_safe(pvr_fence, tmp, -+ &deferred_free_list, -+ fence_head) { -+ list_del(&pvr_fence->fence_head); -+ SyncCheckpointFree(pvr_fence->sync_checkpoint); -+ call_rcu(&pvr_fence->rcu, pvr_fence_sched_free); -+ module_put(THIS_MODULE); -+ } -+} -+ -+void -+pvr_fence_context_free_deferred_callback(void *data) -+{ -+ struct pvr_fence_context *fctx = (struct pvr_fence_context *)data; -+ -+ /* -+ * Free up any fence objects we have deferred freeing. -+ */ -+ pvr_fence_context_free_deferred(fctx); -+} -+ -+static void -+pvr_fence_context_signal_fences(void *data) -+{ -+ struct pvr_fence_context *fctx = (struct pvr_fence_context *)data; -+ struct pvr_fence *pvr_fence, *tmp; -+ unsigned long flags1; -+ -+ LIST_HEAD(signal_list); -+ -+ /* -+ * We can't call fence_signal while holding the lock as we can end up -+ * in a situation whereby pvr_fence_foreign_signal_sync, which also -+ * takes the list lock, ends up being called as a result of the -+ * fence_signal below, i.e. fence_signal(fence) -> fence->callback() -+ * -> fence_signal(foreign_fence) -> foreign_fence->callback() where -+ * the foreign_fence callback is pvr_fence_foreign_signal_sync. -+ * -+ * So extract the items we intend to signal and add them to their own -+ * queue. -+ */ -+ spin_lock_irqsave(&fctx->list_lock, flags1); -+ list_for_each_entry_safe(pvr_fence, tmp, &fctx->signal_list, signal_head) { -+ if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) -+ list_move_tail(&pvr_fence->signal_head, &signal_list); -+ } -+ spin_unlock_irqrestore(&fctx->list_lock, flags1); -+ -+ list_for_each_entry_safe(pvr_fence, tmp, &signal_list, signal_head) { -+ -+ PVR_FENCE_TRACE(&pvr_fence->base, "signalled fence (%s)\n", -+ pvr_fence->name); -+ trace_pvr_fence_signal_fence(pvr_fence); -+ spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags1); -+ list_del(&pvr_fence->signal_head); -+ spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags1); -+ dma_fence_signal(pvr_fence->fence); -+ dma_fence_put(pvr_fence->fence); -+ } -+ -+ /* -+ * Take this opportunity to free up any fence objects we -+ * have deferred freeing. -+ */ -+ pvr_fence_context_free_deferred(fctx); -+} -+ -+void -+pvr_fence_context_signal_fences_nohw(void *data) -+{ -+ pvr_fence_context_signal_fences(data); -+} -+ -+static void -+pvr_fence_context_destroy_internal(struct pvr_fence_context *fctx) -+{ -+ pvr_fence_context_free_deferred(fctx); -+ -+ if (WARN_ON(!list_empty_careful(&fctx->fence_list))) -+ pvr_fence_context_fences_dump(fctx, NULL, NULL); -+ -+ PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle); -+ -+ // wait for all fences to be freed before kmem_cache_destroy() is called -+ rcu_barrier(); -+ -+ /* Destroy pvr_fence object cache, if no one is using it */ -+ WARN_ON(pvr_fence_cache == NULL); -+ mutex_lock(&pvr_fence_cache_mutex); -+ if (--pvr_fence_cache_refcount == 0) -+ kmem_cache_destroy(pvr_fence_cache); -+ mutex_unlock(&pvr_fence_cache_mutex); -+ -+ kfree(fctx); -+} -+ -+static void -+pvr_fence_context_unregister_dbg(void *dbg_request_handle) -+{ -+ PVRSRVUnregisterDeviceDbgRequestNotify(dbg_request_handle); -+} -+ -+static void -+pvr_fence_foreign_context_destroy_work(struct work_struct *data) -+{ -+ struct pvr_fence_context *fctx = -+ container_of(data, struct pvr_fence_context, destroy_work); -+ -+ pvr_fence_context_destroy_internal(fctx); -+} -+ -+static void -+pvr_fence_context_destroy_work(struct work_struct *data) -+{ -+ struct pvr_fence_context *fctx = -+ container_of(data, struct pvr_fence_context, destroy_work); -+ -+ pvr_fence_context_unregister_dbg(fctx->dbg_request_handle); -+ pvr_fence_context_destroy_internal(fctx); -+} -+ -+static void -+pvr_fence_context_debug_request(void *data, u32 verbosity, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ struct pvr_fence_context *fctx = (struct pvr_fence_context *)data; -+ -+ if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) -+ pvr_fence_context_fences_dump(fctx, pfnDumpDebugPrintf, -+ pvDumpDebugFile); -+} -+ -+static struct pvr_fence_context * -+pvr_fence_context_create_internal(struct workqueue_struct *fence_status_wq, -+ const char *name, -+ work_func_t destroy_callback) -+{ -+ struct pvr_fence_context *fctx; -+ PVRSRV_ERROR srv_err; -+ -+ fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); -+ if (!fctx) -+ return NULL; -+ -+ spin_lock_init(&fctx->lock); -+ atomic_set(&fctx->fence_seqno, 0); -+ INIT_WORK(&fctx->check_status_work, pvr_fence_context_check_status); -+ INIT_WORK(&fctx->destroy_work, destroy_callback); -+ spin_lock_init(&fctx->list_lock); -+ INIT_LIST_HEAD(&fctx->signal_list); -+ INIT_LIST_HEAD(&fctx->fence_list); -+ INIT_LIST_HEAD(&fctx->deferred_free_list); -+ -+ fctx->fence_wq = fence_status_wq; -+ -+ fctx->fence_context = dma_fence_context_alloc(1); -+ strlcpy(fctx->name, name, sizeof(fctx->name)); -+ -+ srv_err = PVRSRVRegisterCmdCompleteNotify(&fctx->cmd_complete_handle, -+ pvr_fence_context_signal_fences, -+ fctx); -+ if (srv_err != PVRSRV_OK) { -+ pr_err("%s: failed to register command complete callback (%s)\n", -+ __func__, PVRSRVGetErrorString(srv_err)); -+ goto err_free_fctx; -+ } -+ -+ /* Create pvr_fence object cache, if not already created */ -+ mutex_lock(&pvr_fence_cache_mutex); -+ if (pvr_fence_cache_refcount == 0) { -+ pvr_fence_cache = KMEM_CACHE(pvr_fence, 0); -+ if (!pvr_fence_cache) { -+ pr_err("%s: failed to allocate pvr_fence cache\n", -+ __func__); -+ mutex_unlock(&pvr_fence_cache_mutex); -+ goto err_unregister_cmd_complete_notify; -+ } -+ } -+ pvr_fence_cache_refcount++; -+ mutex_unlock(&pvr_fence_cache_mutex); -+ -+ kref_init(&fctx->kref); -+ -+ PVR_FENCE_CTX_TRACE(fctx, "created fence context (%s)\n", name); -+ trace_pvr_fence_context_create(fctx); -+ -+ return fctx; -+ -+err_unregister_cmd_complete_notify: -+ PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle); -+err_free_fctx: -+ kfree(fctx); -+ return NULL; -+} -+ -+/** -+ * pvr_fence_context_register_dbg - registers the debug handler for a -+ * fence context -+ * -+ * @dbg_request_handle: handle used to keep a reference for deregister -+ * @dev: device to attach the debug notifier. -+ * @pvr_fence_context: context used as data to the callback for debug -+ * -+ * Registers a debug notifier for a given context for a given device. -+ * -+ * Returns PVRSRV_OK if successful. -+ */ -+PVRSRV_ERROR pvr_fence_context_register_dbg(void *dbg_request_handle, -+ void *dev, -+ struct pvr_fence_context *fctx) -+{ -+ PVRSRV_ERROR srv_err; -+ -+ srv_err = PVRSRVRegisterDeviceDbgRequestNotify(dbg_request_handle, -+ dev, -+ pvr_fence_context_debug_request, -+ DEBUG_REQUEST_LINUXFENCE, -+ fctx); -+ if (srv_err != PVRSRV_OK) { -+ pr_err("%s: failed to register debug request callback (%s)\n", -+ __func__, PVRSRVGetErrorString(srv_err)); -+ } -+ -+ return srv_err; -+} -+ -+/** -+ * pvr_fence_foreign_context_create - creates a PVR fence context -+ * @fence_status_wq: linux workqueue used to signal foreign fences -+ * @name: context name (used for debugging) -+ * -+ * Creates a PVR foreign fence context that can be used to create PVR fences -+ * or to create PVR fences from an existing fence. -+ * -+ * pvr_fence_context_destroy should be called to clean up the fence context. -+ * -+ * Returns NULL if a context cannot be created. -+ */ -+struct pvr_fence_context * -+pvr_fence_foreign_context_create(struct workqueue_struct *fence_status_wq, -+ const char *name) -+{ -+ return pvr_fence_context_create_internal(fence_status_wq, name, -+ pvr_fence_foreign_context_destroy_work); -+} -+ -+/** -+ * pvr_fence_context_create - creates a PVR fence context -+ * @dev_cookie: services device cookie -+ * @fence_status_wq: Status workqueue to queue fence update CBs. -+ * @name: context name (used for debugging) -+ * -+ * Creates a PVR fence context that can be used to create PVR fences or to -+ * create PVR fences from an existing fence. -+ * -+ * pvr_fence_context_destroy should be called to clean up the fence context. -+ * -+ * Returns NULL if a context cannot be created. -+ */ -+struct pvr_fence_context * -+pvr_fence_context_create(void *dev_cookie, -+ struct workqueue_struct *fence_status_wq, -+ const char *name) -+{ -+ struct pvr_fence_context *fctx; -+ PVRSRV_ERROR eError; -+ -+ fctx = pvr_fence_context_create_internal(fence_status_wq, name, -+ pvr_fence_context_destroy_work); -+ if (fctx == NULL) { -+ pr_err("%s: failed to create fence context", __func__); -+ goto err_out; -+ } -+ -+ fctx->dev_cookie = dev_cookie; -+ -+ eError = pvr_fence_context_register_dbg(&fctx->dbg_request_handle, -+ dev_cookie, -+ fctx); -+ if (eError != PVRSRV_OK) { -+ pr_err("%s: failed to register fence context debug (%s)\n", -+ __func__, PVRSRVGetErrorString(eError)); -+ goto err_destroy_ctx; -+ } -+ -+ return fctx; -+ -+err_destroy_ctx: -+ pvr_fence_context_destroy(fctx); -+err_out: -+ return NULL; -+} -+ -+static void pvr_fence_context_destroy_kref(struct kref *kref) -+{ -+ struct pvr_fence_context *fctx = -+ container_of(kref, struct pvr_fence_context, kref); -+ -+ PVR_FENCE_CTX_TRACE(fctx, "destroyed fence context (%s)\n", fctx->name); -+ -+ trace_pvr_fence_context_destroy_kref(fctx); -+ -+ schedule_work(&fctx->destroy_work); -+} -+ -+/** -+ * pvr_fence_context_destroy - destroys a context -+ * @fctx: PVR fence context to destroy -+ * -+ * Destroys a PVR fence context with the expectation that all fences have been -+ * destroyed. -+ */ -+void -+pvr_fence_context_destroy(struct pvr_fence_context *fctx) -+{ -+ trace_pvr_fence_context_destroy(fctx); -+ -+ kref_put(&fctx->kref, pvr_fence_context_destroy_kref); -+} -+ -+static const char * -+pvr_fence_get_driver_name(struct dma_fence *fence) -+{ -+ return PVR_LDM_DRIVER_REGISTRATION_NAME; -+} -+ -+static const char * -+pvr_fence_get_timeline_name(struct dma_fence *fence) -+{ -+ struct pvr_fence *pvr_fence = to_pvr_fence(fence); -+ -+ if (pvr_fence) -+ return pvr_fence->fctx->name; -+ return NULL; -+} -+ -+static -+void pvr_fence_fence_value_str(struct dma_fence *fence, char *str, int size) -+{ -+ struct pvr_fence *pvr_fence = to_pvr_fence(fence); -+ -+ if (!pvr_fence) -+ return; -+ -+ snprintf(str, size, -+ "%llu: (%s%s) refs=%u fwaddr=%#08x enqueue=%u status=%-9s %s%s", -+ (u64) pvr_fence->fence->seqno, -+ test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, -+ &pvr_fence->fence->flags) ? "+" : "-", -+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, -+ &pvr_fence->fence->flags) ? "+" : "-", -+ refcount_read(&pvr_fence->fence->refcount.refcount), -+ SyncCheckpointGetFirmwareAddr( -+ pvr_fence->sync_checkpoint), -+ SyncCheckpointGetEnqueuedCount(pvr_fence->sync_checkpoint), -+ SyncCheckpointGetStateString(pvr_fence->sync_checkpoint), -+ pvr_fence->name, -+ (&pvr_fence->base != pvr_fence->fence) ? -+ "(foreign)" : ""); -+} -+ -+static -+void pvr_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) -+{ -+ struct pvr_fence *pvr_fence = to_pvr_fence(fence); -+ -+ if (pvr_fence) -+ pvr_context_value_str(pvr_fence->fctx, str, size); -+} -+ -+static bool -+pvr_fence_enable_signaling(struct dma_fence *fence) -+{ -+ struct pvr_fence *pvr_fence = to_pvr_fence(fence); -+ unsigned long flags; -+ -+ if (!pvr_fence) -+ return false; -+ -+ WARN_ON_SMP(!spin_is_locked(&pvr_fence->fctx->lock)); -+ -+ if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) -+ return false; -+ -+ dma_fence_get(&pvr_fence->base); -+ -+ spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags); -+ list_add_tail(&pvr_fence->signal_head, &pvr_fence->fctx->signal_list); -+ spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags); -+ -+ PVR_FENCE_TRACE(&pvr_fence->base, "signalling enabled (%s)\n", -+ pvr_fence->name); -+ trace_pvr_fence_enable_signaling(pvr_fence); -+ -+ return true; -+} -+ -+static bool -+pvr_fence_is_signaled(struct dma_fence *fence) -+{ -+ struct pvr_fence *pvr_fence = to_pvr_fence(fence); -+ -+ if (pvr_fence) -+ return pvr_fence_sync_is_signaled(pvr_fence, -+ PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT); -+ return false; -+} -+ -+static void -+pvr_fence_release(struct dma_fence *fence) -+{ -+ struct pvr_fence *pvr_fence = to_pvr_fence(fence); -+ unsigned long flags; -+ -+ if (pvr_fence) { -+ struct pvr_fence_context *fctx = pvr_fence->fctx; -+ -+ PVR_FENCE_TRACE(&pvr_fence->base, "released fence (%s)\n", -+ pvr_fence->name); -+ trace_pvr_fence_release(pvr_fence); -+ -+ spin_lock_irqsave(&fctx->list_lock, flags); -+ list_move(&pvr_fence->fence_head, -+ &fctx->deferred_free_list); -+ spin_unlock_irqrestore(&fctx->list_lock, flags); -+ -+ kref_put(&fctx->kref, pvr_fence_context_destroy_kref); -+ } -+} -+ -+const struct dma_fence_ops pvr_fence_ops = { -+ .get_driver_name = pvr_fence_get_driver_name, -+ .get_timeline_name = pvr_fence_get_timeline_name, -+ .fence_value_str = pvr_fence_fence_value_str, -+ .timeline_value_str = pvr_fence_timeline_value_str, -+ .enable_signaling = pvr_fence_enable_signaling, -+ .signaled = pvr_fence_is_signaled, -+ .wait = dma_fence_default_wait, -+ .release = pvr_fence_release, -+}; -+ -+/** -+ * pvr_fence_create - creates a PVR fence -+ * @fctx: PVR fence context on which the PVR fence should be created -+ * @sync_checkpoint_ctx: context in which to create sync checkpoints -+ * @timeline_fd: timeline on which the PVR fence should be created -+ * @name: PVR fence name (used for debugging) -+ * -+ * Creates a PVR fence. -+ * -+ * Once the fence is finished with, pvr_fence_destroy should be called. -+ * -+ * Returns NULL if a PVR fence cannot be created. -+ */ -+struct pvr_fence * -+pvr_fence_create(struct pvr_fence_context *fctx, -+ struct SYNC_CHECKPOINT_CONTEXT_TAG *sync_checkpoint_ctx, -+ int timeline_fd, const char *name) -+{ -+ struct pvr_fence *pvr_fence; -+ unsigned int seqno; -+ unsigned long flags; -+ PVRSRV_ERROR srv_err; -+ -+ if (!try_module_get(THIS_MODULE)) -+ goto err_exit; -+ -+ /* Note: As kmem_cache is used to allocate pvr_fence objects, -+ * make sure that all members of pvr_fence struct are initialized -+ * here -+ */ -+ pvr_fence = kmem_cache_alloc(pvr_fence_cache, GFP_KERNEL); -+ if (unlikely(!pvr_fence)) -+ goto err_module_put; -+ -+ srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx, -+ (PVRSRV_TIMELINE) timeline_fd, PVRSRV_NO_FENCE, -+ name, &pvr_fence->sync_checkpoint); -+ if (unlikely(srv_err != PVRSRV_OK)) -+ goto err_free_fence; -+ -+ INIT_LIST_HEAD(&pvr_fence->fence_head); -+ INIT_LIST_HEAD(&pvr_fence->signal_head); -+ pvr_fence->fctx = fctx; -+ seqno = pvr_fence_context_seqno_next(fctx); -+ /* Add the seqno to the fence name for easier debugging */ -+ pvr_fence_prepare_name(pvr_fence->name, sizeof(pvr_fence->name), -+ name, seqno); -+ -+ /* Reset cb to zero */ -+ memset(&pvr_fence->cb, 0, sizeof(pvr_fence->cb)); -+ pvr_fence->fence = &pvr_fence->base; -+ -+ dma_fence_init(&pvr_fence->base, &pvr_fence_ops, &fctx->lock, -+ fctx->fence_context, seqno); -+ -+ spin_lock_irqsave(&fctx->list_lock, flags); -+ list_add_tail(&pvr_fence->fence_head, &fctx->fence_list); -+ spin_unlock_irqrestore(&fctx->list_lock, flags); -+ -+ kref_get(&fctx->kref); -+ -+ PVR_FENCE_TRACE(&pvr_fence->base, "created fence (%s)\n", name); -+ trace_pvr_fence_create(pvr_fence); -+ -+ return pvr_fence; -+ -+err_free_fence: -+ kmem_cache_free(pvr_fence_cache, pvr_fence); -+err_module_put: -+ module_put(THIS_MODULE); -+err_exit: -+ return NULL; -+} -+ -+static const char * -+pvr_fence_foreign_get_driver_name(struct dma_fence *fence) -+{ -+ return PVR_LDM_DRIVER_REGISTRATION_NAME; -+} -+ -+static const char * -+pvr_fence_foreign_get_timeline_name(struct dma_fence *fence) -+{ -+ return "foreign"; -+} -+ -+static -+void pvr_fence_foreign_fence_value_str(struct dma_fence *fence, char *str, -+ int size) -+{ -+ struct pvr_fence *pvr_fence = to_pvr_fence(fence); -+ u32 sync_addr = 0; -+ u32 sync_value_next; -+ -+ if (WARN_ON(!pvr_fence)) -+ return; -+ -+ sync_addr = SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint); -+ sync_value_next = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; -+ -+ /* -+ * Include the fence flag bits from the foreign fence instead of our -+ * shadow copy. This is done as the shadow fence flag bits aren't used. -+ */ -+ snprintf(str, size, -+ "%llu: (%s%s) refs=%u fwaddr=%#08x cur=%#08x nxt=%#08x %s", -+ (u64) fence->seqno, -+ test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, -+ &pvr_fence->fence->flags) ? "+" : "-", -+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, -+ &pvr_fence->fence->flags) ? "+" : "-", -+ refcount_read(&fence->refcount.refcount), -+ sync_addr, -+ pvr_fence_sync_value(pvr_fence), -+ sync_value_next, -+ pvr_fence->name); -+} -+ -+static -+void pvr_fence_foreign_timeline_value_str(struct dma_fence *fence, char *str, -+ int size) -+{ -+ struct pvr_fence *pvr_fence = to_pvr_fence(fence); -+ -+ if (pvr_fence) -+ pvr_context_value_str(pvr_fence->fctx, str, size); -+} -+ -+static bool -+pvr_fence_foreign_enable_signaling(struct dma_fence *fence) -+{ -+ WARN_ON("cannot enable signalling on foreign fence"); -+ return false; -+} -+ -+static signed long -+pvr_fence_foreign_wait(struct dma_fence *fence, bool intr, signed long timeout) -+{ -+ WARN_ON("cannot wait on foreign fence"); -+ return 0; -+} -+ -+static void -+pvr_fence_foreign_release(struct dma_fence *fence) -+{ -+ struct pvr_fence *pvr_fence = to_pvr_fence(fence); -+ unsigned long flags; -+ -+ if (pvr_fence) { -+ struct pvr_fence_context *fctx = pvr_fence->fctx; -+ struct dma_fence *foreign_fence = pvr_fence->fence; -+ -+ PVR_FENCE_TRACE(&pvr_fence->base, -+ "released fence for foreign fence %llu#%d (%s)\n", -+ (u64) pvr_fence->fence->context, -+ pvr_fence->fence->seqno, pvr_fence->name); -+ trace_pvr_fence_foreign_release(pvr_fence); -+ -+ spin_lock_irqsave(&fctx->list_lock, flags); -+ list_move(&pvr_fence->fence_head, -+ &fctx->deferred_free_list); -+ spin_unlock_irqrestore(&fctx->list_lock, flags); -+ -+ dma_fence_put(foreign_fence); -+ -+ kref_put(&fctx->kref, -+ pvr_fence_context_destroy_kref); -+ } -+} -+ -+const struct dma_fence_ops pvr_fence_foreign_ops = { -+ .get_driver_name = pvr_fence_foreign_get_driver_name, -+ .get_timeline_name = pvr_fence_foreign_get_timeline_name, -+ .fence_value_str = pvr_fence_foreign_fence_value_str, -+ .timeline_value_str = pvr_fence_foreign_timeline_value_str, -+ .enable_signaling = pvr_fence_foreign_enable_signaling, -+ .wait = pvr_fence_foreign_wait, -+ .release = pvr_fence_foreign_release, -+}; -+ -+static void -+pvr_fence_foreign_signal_sync(struct dma_fence *fence, struct dma_fence_cb *cb) -+{ -+ struct pvr_fence *pvr_fence = container_of(cb, struct pvr_fence, cb); -+ struct pvr_fence_context *fctx = pvr_fence->fctx; -+ -+ /* Callback registered by dma_fence_add_callback can be called from an atomic ctx */ -+ pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_CTX_ATOMIC); -+ -+ trace_pvr_fence_foreign_signal(pvr_fence); -+ -+ queue_work(fctx->fence_wq, &fctx->check_status_work); -+ -+ PVR_FENCE_TRACE(&pvr_fence->base, -+ "foreign fence %llu#%d signalled (%s)\n", -+ (u64) pvr_fence->fence->context, -+ pvr_fence->fence->seqno, pvr_fence->name); -+ -+ /* Drop the reference on the base fence */ -+ dma_fence_put(&pvr_fence->base); -+} -+ -+/** -+ * pvr_fence_create_from_fence - creates a PVR fence from a fence -+ * @fctx: PVR fence context on which the PVR fence should be created -+ * @sync_checkpoint_ctx: context in which to create sync checkpoints -+ * @fence: fence from which the PVR fence should be created -+ * @fence_fd: fd for the sync file to which the fence belongs. If it doesn't -+ * belong to a sync file then PVRSRV_NO_FENCE should be given -+ * instead. -+ * @name: PVR fence name (used for debugging) -+ * -+ * Creates a PVR fence from an existing fence. If the fence is a foreign fence, -+ * i.e. one that doesn't originate from a PVR fence context, then a new PVR -+ * fence will be created using the specified sync_checkpoint_context. -+ * Otherwise, a reference will be taken on the underlying fence and the PVR -+ * fence will be returned. -+ * -+ * Once the fence is finished with, pvr_fence_destroy should be called. -+ * -+ * Returns NULL if a PVR fence cannot be created. -+ */ -+ -+struct pvr_fence * -+pvr_fence_create_from_fence(struct pvr_fence_context *fctx, -+ struct SYNC_CHECKPOINT_CONTEXT_TAG *sync_checkpoint_ctx, -+ struct dma_fence *fence, -+ PVRSRV_FENCE fence_fd, -+ const char *name) -+{ -+ struct pvr_fence *pvr_fence = to_pvr_fence(fence); -+ unsigned int seqno; -+ unsigned long flags; -+ PVRSRV_ERROR srv_err; -+ int err; -+ bool mirror_other_dev_fence = false; -+ char tempString[40] = {0}; -+ -+ if (pvr_fence) { -+ if ((fctx->dev_cookie == NULL) || -+ (fctx->dev_cookie == pvr_fence->fctx->dev_cookie)) { -+ if (WARN_ON(fence->ops == &pvr_fence_foreign_ops)) -+ return NULL; -+ dma_fence_get(fence); -+ -+ PVR_FENCE_TRACE(fence, "created fence from PVR fence (%s)\n", -+ name); -+ return pvr_fence; -+ } else { -+ snprintf(tempString, sizeof(tempString), "Mirror(FWAddr0x%x)", -+ SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint)); -+ mirror_other_dev_fence = true; -+ } -+ } -+ -+ if (!try_module_get(THIS_MODULE)) -+ goto err_exit; -+ -+ /* Note: As kmem_cache is used to allocate pvr_fence objects, -+ * make sure that all members of pvr_fence struct are initialized -+ * here -+ */ -+ pvr_fence = kmem_cache_alloc(pvr_fence_cache, GFP_KERNEL); -+ if (!pvr_fence) { -+ pr_err("%s: kmem_cache_alloc() failed", -+ __func__); -+ goto err_module_put; -+ } -+ -+ srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx, -+ SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, -+ fence_fd, -+ mirror_other_dev_fence ? tempString : name, -+ &pvr_fence->sync_checkpoint); -+ if (srv_err != PVRSRV_OK) { -+ pr_err("%s: SyncCheckpointAlloc() failed (srv_err=%d)", -+ __func__, srv_err); -+ goto err_free_pvr_fence; -+ } -+ -+ INIT_LIST_HEAD(&pvr_fence->fence_head); -+ INIT_LIST_HEAD(&pvr_fence->signal_head); -+ pvr_fence->fctx = fctx; -+ pvr_fence->fence = dma_fence_get(fence); -+ seqno = pvr_fence_context_seqno_next(fctx); -+ /* Add the seqno to the fence name for easier debugging */ -+ pvr_fence_prepare_name(pvr_fence->name, sizeof(pvr_fence->name), -+ name, seqno); -+ -+ /* -+ * We use the base fence to refcount the PVR fence and to do the -+ * necessary clean up once the refcount drops to 0. -+ */ -+ dma_fence_init(&pvr_fence->base, &pvr_fence_foreign_ops, &fctx->lock, -+ fctx->fence_context, seqno); -+ -+ /* -+ * Take an extra reference on the base fence that gets dropped when the -+ * foreign fence is signalled. -+ */ -+ dma_fence_get(&pvr_fence->base); -+ -+ spin_lock_irqsave(&fctx->list_lock, flags); -+ list_add_tail(&pvr_fence->fence_head, &fctx->fence_list); -+ spin_unlock_irqrestore(&fctx->list_lock, flags); -+ kref_get(&fctx->kref); -+ -+ PVR_FENCE_TRACE(&pvr_fence->base, -+ "created fence from foreign fence %llu#%d (%s)\n", -+ (u64) pvr_fence->fence->context, -+ pvr_fence->fence->seqno, name); -+ -+ err = dma_fence_add_callback(fence, &pvr_fence->cb, -+ pvr_fence_foreign_signal_sync); -+ if (err) { -+ if (err != -ENOENT) { -+ pr_err("%s: failed to add fence callback (err=%d)", -+ __func__, err); -+ goto err_put_ref; -+ } -+ -+ -+ /* -+ * The fence has already signalled so set the sync as signalled. -+ * The "signalled" hwperf packet should be emitted because the -+ * callback won't be called for already signalled fence hence, -+ * PVRSRV_FENCE_FLAG_NONE flag. -+ */ -+ pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE); -+ PVR_FENCE_TRACE(&pvr_fence->base, -+ "foreign fence %llu#%d already signaled (%s)\n", -+ (u64) pvr_fence->fence->context, -+ pvr_fence->fence->seqno, -+ name); -+ dma_fence_put(&pvr_fence->base); -+ } -+ -+ trace_pvr_fence_foreign_create(pvr_fence); -+ -+ return pvr_fence; -+ -+err_put_ref: -+ kref_put(&fctx->kref, pvr_fence_context_destroy_kref); -+ spin_lock_irqsave(&fctx->list_lock, flags); -+ list_del(&pvr_fence->fence_head); -+ spin_unlock_irqrestore(&fctx->list_lock, flags); -+ SyncCheckpointFree(pvr_fence->sync_checkpoint); -+err_free_pvr_fence: -+ kmem_cache_free(pvr_fence_cache, pvr_fence); -+err_module_put: -+ module_put(THIS_MODULE); -+err_exit: -+ return NULL; -+} -+ -+/** -+ * pvr_fence_destroy - destroys a PVR fence -+ * @pvr_fence: PVR fence to destroy -+ * -+ * Destroys a PVR fence. Upon return, the PVR fence may still exist if something -+ * else still references the underlying fence, e.g. a reservation object, or if -+ * software signalling has been enabled and the fence hasn't yet been signalled. -+ */ -+void -+pvr_fence_destroy(struct pvr_fence *pvr_fence) -+{ -+ PVR_FENCE_TRACE(&pvr_fence->base, "destroyed fence (%s)\n", -+ pvr_fence->name); -+ -+ dma_fence_put(&pvr_fence->base); -+} -+ -+/** -+ * pvr_fence_sw_signal - signals a PVR fence sync -+ * @pvr_fence: PVR fence to signal -+ * -+ * Sets the PVR fence sync value to signalled. -+ * -+ * Returns -EINVAL if the PVR fence represents a foreign fence. -+ */ -+int -+pvr_fence_sw_signal(struct pvr_fence *pvr_fence) -+{ -+ if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base)) -+ return -EINVAL; -+ -+ pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE); -+ -+ queue_work(pvr_fence->fctx->fence_wq, -+ &pvr_fence->fctx->check_status_work); -+ -+ PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync signalled (%s)\n", -+ pvr_fence->name); -+ -+ return 0; -+} -+ -+/** -+ * pvr_fence_sw_error - errors the sync checkpoint backing a PVR fence -+ * @pvr_fence: PVR fence to error -+ * -+ * Sets the PVR fence sync checkpoint value to errored. -+ * -+ * Returns -EINVAL if the PVR fence represents a foreign fence. -+ */ -+int -+pvr_fence_sw_error(struct pvr_fence *pvr_fence) -+{ -+ if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base)) -+ return -EINVAL; -+ -+ SyncCheckpointError(pvr_fence->sync_checkpoint, PVRSRV_FENCE_FLAG_NONE); -+ PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync errored (%s)\n", -+ pvr_fence->name); -+ -+ return 0; -+} -+ -+int -+pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences, -+ struct SYNC_CHECKPOINT_TAG **fence_checkpoints) -+{ -+ struct SYNC_CHECKPOINT_TAG **next_fence_checkpoint = fence_checkpoints; -+ struct pvr_fence **next_pvr_fence = pvr_fences; -+ int fence_checkpoint_idx; -+ -+ if (nr_fences > 0) { -+ -+ for (fence_checkpoint_idx = 0; fence_checkpoint_idx < nr_fences; -+ fence_checkpoint_idx++) { -+ struct pvr_fence *next_fence = *next_pvr_fence++; -+ *next_fence_checkpoint++ = next_fence->sync_checkpoint; -+ /* Take reference on sync checkpoint (will be dropped -+ * later by kick code) -+ */ -+ SyncCheckpointTakeRef(next_fence->sync_checkpoint); -+ } -+ } -+ -+ return 0; -+} -+ -+struct SYNC_CHECKPOINT_TAG * -+pvr_fence_get_checkpoint(struct pvr_fence *update_fence) -+{ -+ return update_fence->sync_checkpoint; -+} -+ -+/** -+ * pvr_fence_dump_info_on_stalled_ufos - displays debug -+ * information on a native fence associated with any of -+ * the ufos provided. This function will be called from -+ * pvr_sync_file.c if the driver determines any GPU work -+ * is stuck waiting for a sync checkpoint representing a -+ * foreign sync to be signalled. -+ * @fctx: fence context -+ * @nr_ufos: number of ufos in vaddrs -+ * @vaddrs: array of FW addresses of UFOs which the -+ * driver is waiting on. -+ * -+ * Output debug information to kernel log on linux fences -+ * which would be responsible for signalling the sync -+ * checkpoints indicated by the ufo vaddresses. -+ * -+ * Returns the number of ufos in the array which were found -+ * to be associated with foreign syncs. -+ */ -+u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx, -+ u32 nr_ufos, u32 *vaddrs) -+{ -+ int our_ufo_ct = 0; -+ struct pvr_fence *pvr_fence; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&fctx->list_lock, flags); -+ /* dump info on any ufos in our active list */ -+ list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) { -+ u32 *this_ufo_vaddr = vaddrs; -+ int ufo_num; -+ DUMPDEBUG_PRINTF_FUNC *pfnDummy = NULL; -+ -+ for (ufo_num = 0; ufo_num < nr_ufos; ufo_num++) { -+ struct SYNC_CHECKPOINT_TAG *checkpoint = -+ pvr_fence->sync_checkpoint; -+ const u32 fence_ufo_addr = -+ SyncCheckpointGetFirmwareAddr(checkpoint); -+ -+ if (fence_ufo_addr != this_ufo_vaddr[ufo_num]) -+ continue; -+ -+ /* Dump sync info */ -+ PVR_DUMPDEBUG_LOG(pfnDummy, NULL, -+ "\tSyncID = %d, FWAddr = 0x%08x: TLID = %d (Foreign Fence - [%p] %s)", -+ SyncCheckpointGetId(checkpoint), -+ fence_ufo_addr, -+ SyncCheckpointGetTimeline(checkpoint), -+ pvr_fence->fence, -+ pvr_fence->name); -+ our_ufo_ct++; -+ } -+ } -+ spin_unlock_irqrestore(&fctx->list_lock, flags); -+ return our_ufo_ct; -+} -diff --git a/drivers/gpu/drm/img-rogue/pvr_fence.h b/drivers/gpu/drm/img-rogue/pvr_fence.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_fence.h -@@ -0,0 +1,240 @@ -+/* -+ * @File -+ * @Title PowerVR Linux fence interface -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__PVR_FENCE_H__) -+#define __PVR_FENCE_H__ -+ -+#include -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) -+static inline void pvr_fence_cleanup(void) -+{ -+} -+#else -+#include "services_kernel_client.h" -+#include "pvr_linux_fence.h" -+#include -+#include -+#include -+ -+struct SYNC_CHECKPOINT_CONTEXT_TAG; -+struct SYNC_CHECKPOINT_TAG; -+ -+/** -+ * pvr_fence_context - PVR fence context used to create and manage PVR fences -+ * @lock: protects the context and fences created on the context -+ * @name: fence context name (used for debugging) -+ * @dbg_request_handle: handle for callback used to dump debug data -+ * @fence_context: fence context with which to associate fences -+ * @fence_seqno: sequence number to use for the next fence -+ * @fence_wq: work queue for signalled fence work -+ * @check_status_work: work item used to inform services when a foreign fence -+ * has signalled -+ * @cmd_complete_handle: handle for callback used to signal fences when fence -+ * syncs are met -+ * @list_lock: protects the active and active foreign lists -+ * @signal_list: list of fences waiting to be signalled -+ * @fence_list: list of fences (used for debugging) -+ * @deferred_free_list: list of fences that we will free when we are no longer -+ * holding spinlocks. The frees get implemented when an update fence is -+ * signalled or the context is freed. -+ */ -+struct pvr_fence_context { -+ spinlock_t lock; -+ char name[32]; -+ void *dbg_request_handle; -+ u64 fence_context; -+ atomic_t fence_seqno; -+ -+ struct workqueue_struct *fence_wq; -+ struct work_struct check_status_work; -+ -+ void *cmd_complete_handle; -+ -+ spinlock_t list_lock; -+ struct list_head signal_list; -+ struct list_head fence_list; -+ struct list_head deferred_free_list; -+ -+ struct kref kref; -+ struct work_struct destroy_work; -+ void *dev_cookie; -+}; -+ -+/** -+ * pvr_fence - PVR fence that represents both native and foreign fences -+ * @base: fence structure -+ * @fctx: fence context on which this fence was created -+ * @name: fence name (used for debugging) -+ * @fence: pointer to base fence structure or foreign fence -+ * @sync_checkpoint: services sync checkpoint used by hardware -+ * @fence_head: entry on the context fence and deferred free list -+ * @signal_head: entry on the context signal list -+ * @cb: foreign fence callback to set the sync to signalled -+ */ -+struct pvr_fence { -+ struct dma_fence base; -+ struct pvr_fence_context *fctx; -+ char name[32]; -+ -+ struct dma_fence *fence; -+ struct SYNC_CHECKPOINT_TAG *sync_checkpoint; -+ -+ struct list_head fence_head; -+ struct list_head signal_head; -+ struct dma_fence_cb cb; -+ struct rcu_head rcu; -+}; -+ -+extern const struct dma_fence_ops pvr_fence_ops; -+extern const struct dma_fence_ops pvr_fence_foreign_ops; -+ -+static inline bool is_our_fence(struct pvr_fence_context *fctx, -+ struct dma_fence *fence) -+{ -+ return (fence->context == fctx->fence_context); -+} -+ -+static inline bool is_pvr_fence(struct dma_fence *fence) -+{ -+ return ((fence->ops == &pvr_fence_ops) || -+ (fence->ops == &pvr_fence_foreign_ops)); -+} -+ -+static inline struct pvr_fence *to_pvr_fence(struct dma_fence *fence) -+{ -+ if (is_pvr_fence(fence)) -+ return container_of(fence, struct pvr_fence, base); -+ -+ return NULL; -+} -+ -+PVRSRV_ERROR pvr_fence_context_register_dbg(void *dbg_request_handle, -+ void *dev, -+ struct pvr_fence_context *fctx); -+struct pvr_fence_context * -+pvr_fence_foreign_context_create(struct workqueue_struct *fence_status_wq, -+ const char *name); -+struct pvr_fence_context * -+pvr_fence_context_create(void *dev_cookie, -+ struct workqueue_struct *fence_status_wq, -+ const char *name); -+void pvr_fence_context_destroy(struct pvr_fence_context *fctx); -+void pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size); -+ -+struct pvr_fence * -+pvr_fence_create(struct pvr_fence_context *fctx, -+ struct SYNC_CHECKPOINT_CONTEXT_TAG *sync_checkpoint_ctx, -+ int timeline_fd, const char *name); -+struct pvr_fence * -+pvr_fence_create_from_fence(struct pvr_fence_context *fctx, -+ struct SYNC_CHECKPOINT_CONTEXT_TAG *sync_checkpoint_ctx, -+ struct dma_fence *fence, -+ PVRSRV_FENCE fence_fd, -+ const char *name); -+void pvr_fence_destroy(struct pvr_fence *pvr_fence); -+int pvr_fence_sw_signal(struct pvr_fence *pvr_fence); -+int pvr_fence_sw_error(struct pvr_fence *pvr_fence); -+ -+int pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences, -+ struct SYNC_CHECKPOINT_TAG **fence_checkpoints); -+struct SYNC_CHECKPOINT_TAG * -+pvr_fence_get_checkpoint(struct pvr_fence *update_fence); -+ -+void pvr_fence_context_signal_fences_nohw(void *data); -+ -+void pvr_fence_context_free_deferred_callback(void *data); -+ -+u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx, -+ u32 nr_ufos, -+ u32 *vaddrs); -+ -+static inline void pvr_fence_cleanup(void) -+{ -+ /* -+ * Ensure all PVR fence contexts have been destroyed, by flushing -+ * the global workqueue. -+ */ -+ flush_scheduled_work(); -+} -+ -+#if defined(PVR_FENCE_DEBUG) -+#define PVR_FENCE_CTX_TRACE(c, fmt, ...) \ -+ do { \ -+ struct pvr_fence_context *__fctx = (c); \ -+ pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \ -+ ## __VA_ARGS__); \ -+ } while (0) -+#else -+#define PVR_FENCE_CTX_TRACE(c, fmt, ...) -+#endif -+ -+#define PVR_FENCE_CTX_WARN(c, fmt, ...) \ -+ do { \ -+ struct pvr_fence_context *__fctx = (c); \ -+ pr_warn("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \ -+ ## __VA_ARGS__); \ -+ } while (0) -+ -+#define PVR_FENCE_CTX_ERR(c, fmt, ...) \ -+ do { \ -+ struct pvr_fence_context *__fctx = (c); \ -+ pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \ -+ ## __VA_ARGS__); \ -+ } while (0) -+ -+#if defined(PVR_FENCE_DEBUG) -+#define PVR_FENCE_TRACE(f, fmt, ...) \ -+ DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__) -+#else -+#define PVR_FENCE_TRACE(f, fmt, ...) -+#endif -+ -+#define PVR_FENCE_WARN(f, fmt, ...) \ -+ DMA_FENCE_WARN(f, "(PVR) " fmt, ## __VA_ARGS__) -+ -+#define PVR_FENCE_ERR(f, fmt, ...) \ -+ DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__) -+ -+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ -+#endif /* !defined(__PVR_FENCE_H__) */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_fence_trace.h b/drivers/gpu/drm/img-rogue/pvr_fence_trace.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_fence_trace.h -@@ -0,0 +1,225 @@ -+/* -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#undef TRACE_SYSTEM -+#define TRACE_SYSTEM pvr_fence -+ -+#if !defined(_TRACE_PVR_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) -+#define _TRACE_PVR_FENCE_H -+ -+#include -+ -+struct pvr_fence; -+struct pvr_fence_context; -+ -+DECLARE_EVENT_CLASS(pvr_fence_context, -+ -+ TP_PROTO(struct pvr_fence_context *fctx), -+ TP_ARGS(fctx), -+ -+ TP_STRUCT__entry( -+ __string(name, fctx->name) -+ __array(char, val, 128) -+ ), -+ -+ TP_fast_assign( -+ __assign_str(name, fctx->name) -+ pvr_context_value_str(fctx, __entry->val, -+ sizeof(__entry->val)); -+ ), -+ -+ TP_printk("name=%s val=%s", -+ __get_str(name), -+ __entry->val -+ ) -+); -+ -+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_create, -+ TP_PROTO(struct pvr_fence_context *fctx), -+ TP_ARGS(fctx) -+); -+ -+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy, -+ TP_PROTO(struct pvr_fence_context *fctx), -+ TP_ARGS(fctx) -+); -+ -+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy_kref, -+ TP_PROTO(struct pvr_fence_context *fctx), -+ TP_ARGS(fctx) -+); -+ -+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_signal_fences, -+ TP_PROTO(struct pvr_fence_context *fctx), -+ TP_ARGS(fctx) -+); -+ -+DECLARE_EVENT_CLASS(pvr_fence, -+ TP_PROTO(struct pvr_fence *fence), -+ TP_ARGS(fence), -+ -+ TP_STRUCT__entry( -+ __string(driver, -+ fence->base.ops->get_driver_name(&fence->base)) -+ __string(timeline, -+ fence->base.ops->get_timeline_name(&fence->base)) -+ __array(char, val, 128) -+ __field(u64, context) -+ ), -+ -+ TP_fast_assign( -+ __assign_str(driver, -+ fence->base.ops->get_driver_name(&fence->base)) -+ __assign_str(timeline, -+ fence->base.ops->get_timeline_name(&fence->base)) -+ fence->base.ops->fence_value_str(&fence->base, -+ __entry->val, sizeof(__entry->val)); -+ __entry->context = fence->base.context; -+ ), -+ -+ TP_printk("driver=%s timeline=%s ctx=%llu val=%s", -+ __get_str(driver), __get_str(timeline), -+ __entry->context, __entry->val -+ ) -+); -+ -+DEFINE_EVENT(pvr_fence, pvr_fence_create, -+ TP_PROTO(struct pvr_fence *fence), -+ TP_ARGS(fence) -+); -+ -+DEFINE_EVENT(pvr_fence, pvr_fence_release, -+ TP_PROTO(struct pvr_fence *fence), -+ TP_ARGS(fence) -+); -+ -+DEFINE_EVENT(pvr_fence, pvr_fence_enable_signaling, -+ TP_PROTO(struct pvr_fence *fence), -+ TP_ARGS(fence) -+); -+ -+DEFINE_EVENT(pvr_fence, pvr_fence_signal_fence, -+ TP_PROTO(struct pvr_fence *fence), -+ TP_ARGS(fence) -+); -+ -+DECLARE_EVENT_CLASS(pvr_fence_foreign, -+ TP_PROTO(struct pvr_fence *fence), -+ TP_ARGS(fence), -+ -+ TP_STRUCT__entry( -+ __string(driver, -+ fence->base.ops->get_driver_name(&fence->base)) -+ __string(timeline, -+ fence->base.ops->get_timeline_name(&fence->base)) -+ __array(char, val, 128) -+ __field(u64, context) -+ __string(foreign_driver, -+ fence->fence->ops->get_driver_name ? -+ fence->fence->ops->get_driver_name(fence->fence) : -+ "unknown") -+ __string(foreign_timeline, -+ fence->fence->ops->get_timeline_name ? -+ fence->fence->ops->get_timeline_name(fence->fence) : -+ "unknown") -+ __array(char, foreign_val, 128) -+ __field(u64, foreign_context) -+ ), -+ -+ TP_fast_assign( -+ __assign_str(driver, -+ fence->base.ops->get_driver_name(&fence->base)) -+ __assign_str(timeline, -+ fence->base.ops->get_timeline_name(&fence->base)) -+ fence->base.ops->fence_value_str(&fence->base, __entry->val, -+ sizeof(__entry->val)); -+ __entry->context = fence->base.context; -+ __assign_str(foreign_driver, -+ fence->fence->ops->get_driver_name ? -+ fence->fence->ops->get_driver_name(fence->fence) : -+ "unknown") -+ __assign_str(foreign_timeline, -+ fence->fence->ops->get_timeline_name ? -+ fence->fence->ops->get_timeline_name(fence->fence) : -+ "unknown") -+ fence->fence->ops->fence_value_str ? -+ fence->fence->ops->fence_value_str( -+ fence->fence, __entry->foreign_val, -+ sizeof(__entry->foreign_val)) : -+ (void) strlcpy(__entry->foreign_val, -+ "unknown", sizeof(__entry->foreign_val)); -+ __entry->foreign_context = fence->fence->context; -+ ), -+ -+ TP_printk("driver=%s timeline=%s ctx=%llu val=%s foreign: driver=%s timeline=%s ctx=%llu val=%s", -+ __get_str(driver), __get_str(timeline), __entry->context, -+ __entry->val, __get_str(foreign_driver), -+ __get_str(foreign_timeline), __entry->foreign_context, -+ __entry->foreign_val -+ ) -+); -+ -+DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_create, -+ TP_PROTO(struct pvr_fence *fence), -+ TP_ARGS(fence) -+); -+ -+DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_release, -+ TP_PROTO(struct pvr_fence *fence), -+ TP_ARGS(fence) -+); -+ -+DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_signal, -+ TP_PROTO(struct pvr_fence *fence), -+ TP_ARGS(fence) -+); -+ -+#endif /* _TRACE_PVR_FENCE_H */ -+ -+#undef TRACE_INCLUDE_PATH -+#undef TRACE_INCLUDE_FILE -+#define TRACE_INCLUDE_PATH . -+ -+/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */ -+#define TRACE_INCLUDE_FILE pvr_fence_trace -+ -+/* This part must be outside protection */ -+#include -diff --git a/drivers/gpu/drm/img-rogue/pvr_gputrace.c b/drivers/gpu/drm/img-rogue/pvr_gputrace.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_gputrace.c -@@ -0,0 +1,1685 @@ -+/*************************************************************************/ /*! -+@File pvr_gputrace.c -+@Title PVR GPU Trace module Linux implementation -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+#include -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) -+#include -+#else -+#include -+#endif -+ -+#include "pvrsrv_error.h" -+#include "pvrsrv_apphint.h" -+#include "pvr_debug.h" -+#include "ospvr_gputrace.h" -+#include "rgxhwperf.h" -+#include "rgxtimecorr.h" -+#include "device.h" -+#include "trace_events.h" -+#include "pvrsrv.h" -+#include "pvrsrv_tlstreams.h" -+#include "tlclient.h" -+#include "tlstream.h" -+#include "pvr_debug.h" -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+#define CREATE_TRACE_POINTS -+#include "rogue_trace_events.h" -+#endif -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+#include "pvr_gpuwork.h" -+#define TRACE_FS_CLK "/sys/kernel/tracing/trace_clock" -+#else -+#define TRACE_FS_CLK "/sys/kernel/debug/tracing/trace_clock" -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) -+#include "pvr_gpufreq.h" -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) */ -+ -+#if defined(SUPPORT_RGX) -+#if defined(PVRSRV_FORCE_HWPERF_TO_SCHED_CLK) -+#define TRACE_CLK RGXTIMECORR_CLOCK_SCHED -+#define TRACE_CLK_STR "local\n" -+#else -+#define TRACE_CLK RGXTIMECORR_CLOCK_MONO -+#define TRACE_CLK_STR "mono\n" -+#endif -+#endif -+ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) || \ -+ defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) || \ -+ defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) -+#define TRACE_EVENTS_DEFINED -+#endif -+ -+/****************************************************************************** -+ Module internal implementation -+******************************************************************************/ -+ -+typedef enum { -+ PVR_GPUTRACE_SWITCH_TYPE_UNDEF = 0, -+ -+ PVR_GPUTRACE_SWITCH_TYPE_BEGIN = 1, -+ PVR_GPUTRACE_SWITCH_TYPE_END = 2, -+ PVR_GPUTRACE_SWITCH_TYPE_SINGLE = 3 -+} PVR_GPUTRACE_SWITCH_TYPE; -+ -+typedef struct RGX_HWPERF_FTRACE_DATA { -+ /* This lock ensures the HWPerf TL stream reading resources are not destroyed -+ * by one thread disabling it while another is reading from it. Keeps the -+ * state and resource create/destroy atomic and consistent. */ -+ POS_LOCK hFTraceResourceLock; -+ -+ IMG_HANDLE hGPUTraceCmdCompleteHandle; -+ IMG_HANDLE hGPUFTraceTLStream; -+ IMG_UINT64 ui64LastSampledTimeCorrOSTimeStamp; -+ IMG_UINT32 ui32FTraceLastOrdinal; -+ IMG_BOOL bTrackOrdinals; -+} RGX_HWPERF_FTRACE_DATA; -+ -+/* This lock ensures state change of GPU_TRACING on/off is done atomically */ -+static POS_LOCK ghGPUTraceStateLock; -+static IMG_BOOL gbFTraceGPUEventsEnabled = PVRSRV_APPHINT_ENABLEFTRACEGPU; -+ -+#if !defined(PVRSRV_FORCE_HWPERF_TO_SCHED_CLK) -+/* This is the FTrace clock source prior to driver initialisation */ -+static IMG_CHAR gszLastClockSource[32] = {0}; -+#endif -+ -+/* This lock ensures that the reference counting operation on the FTrace UFO -+ * events and enable/disable operation on firmware event are performed as -+ * one atomic operation. This should ensure that there are no race conditions -+ * between reference counting and firmware event state change. -+ * See below comment for guiUfoEventRef. -+ */ -+static POS_LOCK ghLockFTraceEventLock; -+ -+/* Multiple FTrace UFO events are reflected in the firmware as only one event. When -+ * we enable FTrace UFO event we want to also at the same time enable it in -+ * the firmware. Since there is a multiple-to-one relation between those events -+ * we count how many FTrace UFO events is enabled. If at least one event is -+ * enabled we enabled the firmware event. When all FTrace UFO events are disabled -+ * we disable firmware event. */ -+static IMG_UINT guiUfoEventRef; -+ -+/****************************************************************************** -+ Module In-bound API -+******************************************************************************/ -+ -+static PVRSRV_ERROR _GpuTraceDisable( -+ PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_BOOL bDeInit); -+ -+#if defined(TRACE_EVENTS_DEFINED) -+static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE); -+static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo, void *pBuffer, -+ IMG_UINT32 ui32ReadLen); -+#endif -+ -+static void _FTrace_FWOnReaderOpenCB(void *pvArg) -+{ -+ PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*) pvArg; -+ psRgxDevInfo->bSuspendHWPerfL2DataCopy[RGX_HWPERF_L2_STREAM_FTRACE] = IMG_FALSE; -+} -+ -+/* Currently supported by default */ -+#if defined(SUPPORT_TL_PRODUCER_CALLBACK) -+static PVRSRV_ERROR GPUTraceTLCB(IMG_HANDLE hStream, -+ IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser) -+{ -+ /* IN DEV: Not required as the streams goal is to be a copy of HWPerf */ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_UNREFERENCED_PARAMETER(hStream); -+ PVR_UNREFERENCED_PARAMETER(ui32ReqOp); -+ PVR_UNREFERENCED_PARAMETER(ui32Resp); -+ PVR_UNREFERENCED_PARAMETER(pvUser); -+ -+ return eError; -+} -+#endif /* defined(SUPPORT_TL_PRODUCER_CALLBACK) */ -+ -+#if defined(SUPPORT_RGX) -+#if !defined(PVRSRV_FORCE_HWPERF_TO_SCHED_CLK) -+/* Configure the FTrace clock source to use the DDK apphint clock source */ -+static void PVRGpuTraceInitFTraceClockSource(void) -+{ -+ int ret, i, j; -+ bool bFound = false; -+ loff_t pos = 0; -+ char str[64]; -+ -+ /* Just force the value to be what the DDK says it is -+ Note for filp_open, the mode is only used for O_CREAT -+ Hence its value doesn't matter in this context -+ */ -+ struct file *filep = filp_open(TRACE_FS_CLK, O_RDWR, 0); -+ PVR_LOG_RETURN_VOID_IF_FALSE(!IS_ERR(filep), "TraceFS not found"); -+ -+ PVR_LOG_VA(PVR_DBG_MESSAGE, -+ "Writing %s to %s to enable parallel HWPerf and FTrace support", -+ TRACE_CLK_STR, TRACE_FS_CLK); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) -+ ret = kernel_read(filep, str, sizeof(str)-1, &pos); -+#else -+ ret = kernel_read(filep, pos, str, sizeof(str)-1); -+#endif -+ PVR_LOG_RETURN_VOID_IF_FALSE((ret > 0), "TraceFS Read failed"); -+ str[ret] = 0; -+ pos = 0; -+ -+ /* Determine clock value. trace_clock value is stored like [] -+ File example: [local] global counter mono mono_raw x86-tsc -+ */ -+ for (i = 0, j = 0; i < sizeof(str) && j < sizeof(gszLastClockSource); i++) -+ { -+ if (str[i] == ']') -+ { -+ break; -+ } -+ else if (str[i] == '[') -+ { -+ bFound = true; -+ } -+ else if (bFound) -+ { -+ gszLastClockSource[j] = str[i]; -+ j++; -+ } -+ } -+ PVR_LOG_VA(PVR_DBG_MESSAGE, "Got %s from FTraceFS", gszLastClockSource); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) -+ ret = kernel_write(filep, TRACE_CLK_STR, sizeof(TRACE_CLK_STR), &pos); -+#else -+ ret = kernel_write(filep, TRACE_CLK_STR, sizeof(TRACE_CLK_STR), pos); -+#endif -+ PVR_LOG_IF_FALSE((ret > 0), "Setting FTrace clock source failed"); -+ -+ filp_close(filep, NULL); -+} -+ -+/* Reset the FTrace clock source to the original clock source */ -+static void PVRGpuTraceDeinitFTraceClockSource(void) -+{ -+ int ret; -+ loff_t pos = 0; -+ -+ /* Return the original value of the file */ -+ struct file *filep = filp_open(TRACE_FS_CLK, O_WRONLY, 0); -+ PVR_LOG_RETURN_VOID_IF_FALSE(!IS_ERR(filep), "TraceFS not found"); -+ -+ /* FTraceFS write will ignore any writes to it that don't match a clock source */ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) -+ ret = kernel_write(filep, gszLastClockSource, sizeof(gszLastClockSource), &pos); -+#else -+ ret = kernel_write(filep, gszLastClockSource, sizeof(gszLastClockSource), pos); -+#endif -+ PVR_LOG_IF_FALSE((ret >= 0), "Setting FTrace clock source failed"); -+ -+ filp_close(filep, NULL); -+} -+#endif /* !defined(SUPPORT_ANDROID_PLATFORM) */ -+#endif /* defined(SUPPORT_RGX) */ -+ -+PVRSRV_ERROR PVRGpuTraceSupportInit(void) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (ghLockFTraceEventLock != NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "FTrace Support is already initialized")); -+ return PVRSRV_OK; -+ } -+ -+ /* common module params initialization */ -+ eError = OSLockCreate(&ghLockFTraceEventLock); -+ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); -+ -+ eError = OSLockCreate(&ghGPUTraceStateLock); -+ PVR_LOG_RETURN_IF_ERROR (eError, "OSLockCreate"); -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+ eError = GpuTraceWorkPeriodInitialize(); -+ PVR_LOG_RETURN_IF_ERROR (eError, "GpuTraceWorkPeriodInitialize"); -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) -+ eError = GpuTraceFreqInitialize(); -+ PVR_LOG_RETURN_IF_ERROR (eError, "GpuTraceFreqInitialize"); -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) */ -+ -+#if defined(SUPPORT_RGX) -+#if !defined(PVRSRV_FORCE_HWPERF_TO_SCHED_CLK) -+ PVRGpuTraceInitFTraceClockSource(); -+#endif /* !defined(PVRSRV_FORCE_HWPERF_TO_SCHED_CLK) */ -+#endif /* defined(SUPPORT_RGX) */ -+ -+ return PVRSRV_OK; -+} -+ -+void PVRGpuTraceSupportDeInit(void) -+{ -+#if defined(SUPPORT_RGX) -+#if !defined(PVRSRV_FORCE_HWPERF_TO_SCHED_CLK) -+ PVRGpuTraceDeinitFTraceClockSource(); -+#endif /* !defined(PVRSRV_FORCE_HWPERF_TO_SCHED_CLK) */ -+#endif /* defined(SUPPORT_RGX) */ -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) -+ GpuTraceFreqDeInitialize(); -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) */ -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+ GpuTraceSupportDeInitialize(); -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+ if (ghGPUTraceStateLock) -+ { -+ OSLockDestroy(ghGPUTraceStateLock); -+ } -+ -+ if (ghLockFTraceEventLock) -+ { -+ OSLockDestroy(ghLockFTraceEventLock); -+ ghLockFTraceEventLock = NULL; -+ } -+} -+ -+/* Called from RGXHWPerfInitOnDemandL2Stream() which is alway called from -+ * a critical section protected by hHWPerfLock. */ -+PVRSRV_ERROR PVRGpuTraceInitStream(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CHAR pszFTraceStreamName[sizeof(PVRSRV_TL_FTRACE_RGX_FW_STREAM) + 4]; -+ /* + 4 is used to allow names up to "ftrace_999", which is enough */ -+ -+ IMG_HANDLE hStream = NULL; -+ -+ /* form the FTrace stream name, corresponding to this DevNode; which can make sense in the UM */ -+ if (OSSNPrintf(pszFTraceStreamName, sizeof(pszFTraceStreamName), "%s%d", -+ PVRSRV_TL_FTRACE_RGX_FW_STREAM, -+ psDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to form FTrace stream name for device %d", -+ __func__, -+ psDevInfo->psDeviceNode->sDevId.i32KernelDeviceID)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = TLStreamCreate(&hStream, -+ pszFTraceStreamName, -+ psDevInfo->ui32RGXL2HWPerfBufSize, -+ TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT, -+ _FTrace_FWOnReaderOpenCB, psDevInfo, -+#if !defined(SUPPORT_TL_PRODUCER_CALLBACK) -+ NULL, NULL -+#else -+ /* Not enabled by default */ -+ GPUTraceTLCB, psDevInfo -+#endif -+ ); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ psDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_FTRACE] = hStream; -+ psDevInfo->uiHWPerfStreamCount++; -+ PVR_ASSERT(psDevInfo->uiHWPerfStreamCount <= RGX_HWPERF_L2_STREAM_LAST); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ RGX_HWPERF_FTRACE_DATA *psData; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ psData = OSAllocZMem(sizeof(RGX_HWPERF_FTRACE_DATA)); -+ psDevInfo->pvGpuFtraceData = psData; -+ PVR_LOG_GOTO_IF_NOMEM(psData, eError, e0); -+ -+ /* We initialise it only once because we want to track if any -+ * packets were dropped. */ -+ psData->ui32FTraceLastOrdinal = IMG_UINT32_MAX - 1; -+ -+ eError = OSLockCreate(&psData->hFTraceResourceLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); -+ -+ return PVRSRV_OK; -+ -+e0: -+ PVRGpuTraceDeInitDevice(psDeviceNode); -+ return eError; -+} -+ -+void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_HWPERF_FTRACE_DATA *psData = psDevInfo->pvGpuFtraceData; -+ -+ PVRSRV_VZ_RETN_IF_MODE(GUEST); -+ if (psData) -+ { -+ /* first disable the tracing, to free up TL resources */ -+ if (psData->hFTraceResourceLock) -+ { -+ OSLockAcquire(psData->hFTraceResourceLock); -+ _GpuTraceDisable(psDeviceNode->pvDevice, IMG_TRUE); -+ OSLockRelease(psData->hFTraceResourceLock); -+ -+ /* now free all the FTrace resources */ -+ OSLockDestroy(psData->hFTraceResourceLock); -+ } -+ OSFreeMem(psData); -+ psDevInfo->pvGpuFtraceData = NULL; -+ } -+} -+ -+static INLINE IMG_BOOL PVRGpuTraceIsEnabled(void) -+{ -+ return gbFTraceGPUEventsEnabled; -+} -+ -+void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ if (PVRGpuTraceIsEnabled()) -+ { -+ IMG_BOOL bEnable = IMG_FALSE; -+ -+ PVRSRV_ERROR eError = PVRGpuTraceSetEnabled(psDeviceNode, IMG_TRUE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to initialise GPU event tracing" -+ " (%s)", PVRSRVGetErrorString(eError))); -+ } -+ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ /* below functions will enable FTrace events which in turn will -+ * execute HWPerf callbacks that set appropriate filter values -+ * note: unfortunately the functions don't allow to pass private -+ * data so they enable events for all of the devices -+ * at once, which means that this can happen more than once -+ * if there is more than one device */ -+ -+ /* single events can be enabled by calling trace_set_clr_event() -+ * with the event name, e.g.: -+ * trace_set_clr_event("rogue", "rogue_ufo_update", 1) */ -+ if (trace_set_clr_event("rogue", NULL, 1) != 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"rogue\" event" -+ " group")); -+ } -+ else -+ { -+ PVR_LOG(("FTrace events from \"rogue\" group enabled")); -+ bEnable = IMG_TRUE; -+ } -+#endif -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+ if (trace_set_clr_event("power", "gpu_work_period", 1) != 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"gpu_work_period\" event")); -+ } -+ else -+ { -+ PVR_LOG(("FTrace event from \"gpu_work_period\" enabled")); -+ bEnable = IMG_TRUE; -+ } -+#endif -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) -+ if (trace_set_clr_event("power", "gpu_frequency", 1) != 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"gpu_frequency\" event")); -+ } -+ else -+ { -+ PVR_LOG(("FTrace event from \"gpu_frequency\" enabled")); -+ bEnable = IMG_TRUE; -+ } -+#endif -+ -+ if (bEnable) -+ { -+ /* this enables FTrace globally (if not enabled nothing will appear -+ * in the FTrace buffer) */ -+ tracing_on(); -+ } -+ } -+} -+ -+/* Caller must now hold hFTraceResourceLock before calling this method. -+ */ -+static PVRSRV_ERROR _GpuTraceEnable(PVRSRV_RGXDEV_INFO *psRgxDevInfo) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGX_HWPERF_FTRACE_DATA *psFtraceData; -+ PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode; -+ IMG_CHAR pszFTraceStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 4]; -+ IMG_UINT64 uiFilter; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psRgxDevInfo); -+ -+ psFtraceData = psRgxDevInfo->pvGpuFtraceData; -+ -+ PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock)); -+ -+ /* return if already enabled */ -+ if (psFtraceData->hGPUFTraceTLStream != NULL) -+ { -+ return PVRSRV_OK; -+ } -+ -+ uiFilter = -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+ RGX_HWPERF_EVENT_MASK_HW_KICKFINISH | -+#endif -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO | -+#endif -+#if defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) -+ RGX_HWPERF_CLKS_CHG | -+#endif -+ 0; -+ -+#if defined(SUPPORT_RGX) -+ /* Signal FW to enable event generation */ -+ if (psRgxDevInfo->bFirmwareInitialised) -+ { -+ eError = PVRSRVRGXCtrlHWPerfFW(psRgxDevNode, -+ RGX_HWPERF_L2_STREAM_FTRACE, -+ uiFilter, HWPERF_FILTER_OPERATION_SET); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfFW", err_out); -+ } -+ else -+#endif -+ { -+ /* only set filter and exit */ -+ (void) RGXHWPerfFwSetEventFilter(psRgxDevInfo, RGX_HWPERF_L2_STREAM_FTRACE, uiFilter); -+ -+ return PVRSRV_OK; -+ } -+ -+ /* form the FTrace stream name, corresponding to this DevNode; which can make sense in the UM */ -+ if (OSSNPrintf(pszFTraceStreamName, sizeof(pszFTraceStreamName), "%s%d", -+ PVRSRV_TL_FTRACE_RGX_FW_STREAM, psRgxDevNode->sDevId.i32KernelDeviceID) < 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to form FTrace stream name for device %d", -+ __func__, -+ psRgxDevNode->sDevId.i32KernelDeviceID)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Open the TL Stream for FTrace data consumption */ -+ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, -+ pszFTraceStreamName, -+ PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING, -+ &psFtraceData->hGPUFTraceTLStream); -+ PVR_LOG_GOTO_IF_ERROR(eError, "TLClientOpenStream", err_out); -+ -+#if defined(SUPPORT_RGX) -+ if (RGXTimeCorrGetClockSource(psRgxDevNode) != TRACE_CLK) -+ { -+ /* Set clock source for timer correlation data to hwperf clock */ -+ eError = RGXTimeCorrSetClockSource(psRgxDevNode, TRACE_CLK); -+ PVR_LOG_IF_ERROR(eError, "RGXTimeCorrSetClockSource"); -+ } -+#endif -+ -+ /* Reset the ordinal tracking flag. We should skip checking for gaps in -+ * ordinal on the first run after FTrace is enabled in case another HWPerf -+ * consumer was connected while FTrace was disabled. */ -+ psFtraceData->bTrackOrdinals = IMG_FALSE; -+ -+ /* Reset the OS timestamp coming from the timer correlation data -+ * associated with the latest HWPerf event we processed. -+ */ -+ psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = 0; -+ -+#if defined(TRACE_EVENTS_DEFINED) -+ /* Register a notifier to collect HWPerf data whenever the HW completes -+ * an operation. -+ */ -+ eError = PVRSRVRegisterCmdCompleteNotify( -+ &psFtraceData->hGPUTraceCmdCompleteHandle, -+ &_GpuTraceCmdCompleteNotify, -+ psRgxDevInfo); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterCmdCompleteNotify", err_close_stream); -+#endif /* defined(TRACE_EVENTS_DEFINED) */ -+ -+err_out: -+ PVR_DPF_RETURN_RC(eError); -+#if defined(TRACE_EVENTS_DEFINED) -+err_close_stream: -+#endif /* defined(TRACE_EVENTS_DEFINED) */ -+ TLClientCloseStream(DIRECT_BRIDGE_HANDLE, -+ psFtraceData->hGPUFTraceTLStream); -+ psFtraceData->hGPUFTraceTLStream = NULL; -+ goto err_out; -+} -+ -+/* Caller must now hold hFTraceResourceLock before calling this method. -+ */ -+static PVRSRV_ERROR _GpuTraceDisable(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL bDeInit) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGX_HWPERF_FTRACE_DATA *psFtraceData; -+ IMG_PBYTE pBuffer; -+ IMG_UINT32 ui32ReadLen; -+#if defined(SUPPORT_RGX) -+ PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode; -+#endif -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psRgxDevInfo); -+ -+ psFtraceData = psRgxDevInfo->pvGpuFtraceData; -+ -+ PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock)); -+ -+ /* if FW is not yet initialised, just set filter and exit */ -+ if (!psRgxDevInfo->bFirmwareInitialised) -+ { -+ (void) RGXHWPerfFwSetEventFilter(psRgxDevInfo, RGX_HWPERF_L2_STREAM_FTRACE, -+ RGX_HWPERF_EVENT_MASK_NONE); -+ return PVRSRV_OK; -+ } -+ -+ if (psFtraceData->hGPUFTraceTLStream == NULL) -+ { -+ /* Tracing already disabled, just return */ -+ return PVRSRV_OK; -+ } -+ -+#if defined(SUPPORT_RGX) -+ if (!bDeInit) -+ { -+ eError = PVRSRVRGXCtrlHWPerfFW(psRgxDevNode, -+ RGX_HWPERF_L2_STREAM_FTRACE, -+ RGX_HWPERF_EVENT_MASK_NONE, -+ HWPERF_FILTER_OPERATION_SET); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfFW"); -+ } -+#endif -+ -+#if defined(TRACE_EVENTS_DEFINED) -+ if (psFtraceData->hGPUTraceCmdCompleteHandle) -+ { -+ /* Tracing is being turned off. Unregister the notifier. */ -+ eError = PVRSRVUnregisterCmdCompleteNotify( -+ psFtraceData->hGPUTraceCmdCompleteHandle); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVUnregisterCmdCompleteNotify"); -+ psFtraceData->hGPUTraceCmdCompleteHandle = NULL; -+ } -+#endif /* defined(TRACE_EVENTS_DEFINED) */ -+ -+ /* We have to flush both the L1 (FW) and L2 (Host) buffers in case there -+ * are some events left unprocessed in this FTrace/systrace "session" -+ * (note that even if we have just disabled HWPerf on the FW some packets -+ * could have been generated and already copied to L2 by the MISR handler). -+ * -+ * With the following calls we will both copy new data to the Host buffer -+ * (done by the producer callback in TLClientAcquireData) and advance -+ * the read offset in the buffer to catch up with the latest events. -+ */ -+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, -+ psFtraceData->hGPUFTraceTLStream, -+ &pBuffer, &ui32ReadLen); -+ PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); -+ -+#if defined(TRACE_EVENTS_DEFINED) -+ /* We still need to process packets if there were any so that there is -+ * no gap in the ordinal value. -+ */ -+ if (ui32ReadLen > 0) -+ { -+ _GpuTraceProcessPackets(psRgxDevInfo, pBuffer, ui32ReadLen); -+ } -+#endif /* defined(TRACE_EVENTS_DEFINED) */ -+ -+ psRgxDevInfo->bSuspendHWPerfL2DataCopy[RGX_HWPERF_L2_STREAM_FTRACE] = IMG_TRUE; -+ -+ /* Let close stream perform the release data on the outstanding acquired -+ * data */ -+ eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, -+ psFtraceData->hGPUFTraceTLStream); -+ PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); -+ -+ psFtraceData->hGPUFTraceTLStream = NULL; -+ -+#if defined(SUPPORT_RGX) -+ if (psRgxDevInfo->ui32LastClockSource != TRACE_CLK) -+ { -+ RGXTimeCorrSetClockSource(psRgxDevNode, psRgxDevInfo->ui32LastClockSource); -+ } -+#endif -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+static PVRSRV_ERROR _GpuTraceSetEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_BOOL bNewValue) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGX_HWPERF_FTRACE_DATA *psFtraceData; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psRgxDevInfo); -+ psFtraceData = psRgxDevInfo->pvGpuFtraceData; -+ -+ /* About to create/destroy FTrace resources, lock critical section -+ * to avoid HWPerf MISR thread contention. -+ */ -+ OSLockAcquire(psFtraceData->hFTraceResourceLock); -+ -+ eError = (bNewValue ? _GpuTraceEnable(psRgxDevInfo) -+ : _GpuTraceDisable(psRgxDevInfo, IMG_FALSE)); -+ -+ OSLockRelease(psFtraceData->hFTraceResourceLock); -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+static PVRSRV_ERROR _GpuTraceSetEnabledForAllDevices(IMG_BOOL bNewValue) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ psDeviceNode = psPVRSRVData->psDeviceNodeList; -+ -+ /* enable/disable GPU trace on all devices */ -+ while (psDeviceNode) -+ { -+ eError = _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue); -+ if (eError != PVRSRV_OK) -+ { -+ break; -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+PVRSRV_ERROR PVRGpuTraceSetEnabled(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bNewValue) -+{ -+ return _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue); -+} -+ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ -+/* ----- HWPerf to FTrace packet processing and events injection ------------ */ -+ -+static const IMG_CHAR *_HWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType) -+{ -+ static const IMG_CHAR *aszKickType[RGX_HWPERF_KICK_TYPE2_LAST+1] = { -+ "TA3D", /* Deprecated */ -+#if defined(RGX_FEATURE_HWPERF_VOLCANIC) -+ /* Volcanic deprecated kick types */ -+ "CDM", "RS", "SHG", "TQTDM", "SYNC", "TA", "3D", "LAST", -+ -+ "", "", "", "", "", -+ "", "", "", -+#else -+ /* Rogue deprecated kick types */ -+ "TQ2D", "TQ3D", "CDM", "RS", "VRDM", "TQTDM", "SYNC", "TA", "3D", "LAST", -+ -+ "", "", "", "", "", -+ "", -+#endif -+ "TQ2D", "TQ3D", "TQTDM", "CDM", "GEOM", "3D", "SYNC", "RS", "LAST" -+ }; -+ -+ /* cast in case of negative value */ -+ if (((IMG_UINT32) eKickType) >= RGX_HWPERF_KICK_TYPE2_LAST) -+ { -+ return ""; -+ } -+ -+ return aszKickType[eKickType]; -+} -+void PVRGpuTraceEnqueueEvent( -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32FirmwareCtx, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ RGX_HWPERF_KICK_TYPE eKickType) -+{ -+ const IMG_CHAR *pszKickType = _HWPerfKickTypeToStr(eKickType); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "PVRGpuTraceEnqueueEvent(%s): contextId %u, " -+ "jobId %u", pszKickType, ui32FirmwareCtx, ui32IntJobRef)); -+ -+ if (PVRGpuTraceIsEnabled()) -+ { -+ trace_rogue_job_enqueue(psDevNode->sDevId.ui32InternalID, ui32FirmwareCtx, -+ ui32IntJobRef, ui32ExtJobRef, pszKickType); -+ } -+} -+ -+static void _GpuTraceWorkSwitch( -+ IMG_UINT64 ui64HWTimestampInOSTime, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32CtxId, -+ IMG_UINT32 ui32CtxPriority, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ const IMG_CHAR* pszWorkType, -+ PVR_GPUTRACE_SWITCH_TYPE eSwType) -+{ -+ PVR_ASSERT(pszWorkType); -+ trace_rogue_sched_switch(pszWorkType, eSwType, ui64HWTimestampInOSTime, -+ ui32GpuId, ui32CtxId, 2-ui32CtxPriority, ui32IntJobRef, -+ ui32ExtJobRef); -+} -+ -+static void _GpuTraceUfo( -+ IMG_UINT64 ui64OSTimestamp, -+ const RGX_HWPERF_UFO_EV eEvType, -+ const IMG_UINT32 ui32GpuId, -+ const IMG_UINT32 ui32CtxId, -+ const IMG_UINT32 ui32ExtJobRef, -+ const IMG_UINT32 ui32IntJobRef, -+ const IMG_UINT32 ui32UFOCount, -+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) -+{ -+ switch (eEvType) { -+ case RGX_HWPERF_UFO_EV_UPDATE: -+ trace_rogue_ufo_updates(ui64OSTimestamp, ui32GpuId, ui32CtxId, -+ ui32ExtJobRef, ui32IntJobRef, ui32UFOCount, puData); -+ break; -+ case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: -+ trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32GpuId, ui32CtxId, -+ ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount, -+ puData); -+ break; -+ case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: -+ trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32GpuId, ui32CtxId, -+ ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount, -+ puData); -+ break; -+ case RGX_HWPERF_UFO_EV_CHECK_FAIL: -+ trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32GpuId, ui32CtxId, -+ ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount, -+ puData); -+ break; -+ case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: -+ trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32GpuId, ui32CtxId, -+ ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount, -+ puData); -+ break; -+ default: -+ break; -+ } -+} -+ -+static void _GpuTraceFirmware( -+ IMG_UINT64 ui64HWTimestampInOSTime, -+ IMG_UINT32 ui32GpuId, -+ const IMG_CHAR* pszWorkType, -+ PVR_GPUTRACE_SWITCH_TYPE eSwType) -+{ -+ trace_rogue_firmware_activity(ui64HWTimestampInOSTime, ui32GpuId, pszWorkType, eSwType); -+} -+ -+static void _GpuTraceEventsLost( -+ const RGX_HWPERF_STREAM_ID eStreamId, -+ IMG_UINT32 ui32GpuId, -+ const IMG_UINT32 ui32LastOrdinal, -+ const IMG_UINT32 ui32CurrOrdinal) -+{ -+ trace_rogue_events_lost(eStreamId, ui32GpuId, ui32LastOrdinal, ui32CurrOrdinal); -+} -+#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ -+ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+/* Calculate the OS timestamp given an RGX timestamp in the HWPerf event. */ -+static uint64_t CalculateEventTimestamp( -+ PVRSRV_RGXDEV_INFO *psDevInfo, -+ uint32_t ui32TimeCorrIndex, -+ uint64_t ui64EventTimestamp) -+{ -+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; -+ RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData; -+ RGXFWIF_TIME_CORR *psTimeCorr; -+ uint64_t ui64CRTimeStamp; -+ uint64_t ui64OSTimeStamp; -+ uint64_t ui64CRDeltaToOSDeltaKNs; -+ uint64_t ui64EventOSTimestamp, deltaRgxTimer, delta_ns; -+ -+ RGXFwSharedMemCacheOpValue(psGpuUtilFWCB->sTimeCorr[ui32TimeCorrIndex], INVALIDATE); -+ psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32TimeCorrIndex]; -+ ui64CRTimeStamp = psTimeCorr->ui64CRTimeStamp; -+ ui64OSTimeStamp = psTimeCorr->ui64OSTimeStamp; -+ ui64CRDeltaToOSDeltaKNs = psTimeCorr->ui64CRDeltaToOSDeltaKNs; -+ -+ if (psFtraceData->ui64LastSampledTimeCorrOSTimeStamp > ui64OSTimeStamp) -+ { -+ /* The previous packet had a time reference (time correlation data) more -+ * recent than the one in the current packet, it means the timer -+ * correlation array wrapped too quickly (buffer too small) and in the -+ * previous call to _GpuTraceUfoEvent we read one of the -+ * newest timer correlations rather than one of the oldest ones. -+ */ -+ PVR_DPF((PVR_DBG_ERROR, "%s: The timestamps computed so far could be " -+ "wrong! The time correlation array size should be increased " -+ "to avoid this.", __func__)); -+ } -+ -+ psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = ui64OSTimeStamp; -+ -+ /* RGX CR timer ticks delta */ -+ deltaRgxTimer = ui64EventTimestamp - ui64CRTimeStamp; -+ /* RGX time delta in nanoseconds */ -+ delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs); -+ /* Calculate OS time of HWPerf event */ -+ ui64EventOSTimestamp = ui64OSTimeStamp + delta_ns; -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "%s: psCurrentDvfs RGX %llu, OS %llu, DVFSCLK %u", -+ __func__, ui64CRTimeStamp, ui64OSTimeStamp, -+ psTimeCorr->ui32CoreClockSpeed)); -+ -+ return ui64EventOSTimestamp; -+} -+ -+static void _GpuTraceSwitchEvent(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName, -+ PVR_GPUTRACE_SWITCH_TYPE eSwType) -+{ -+ IMG_UINT64 ui64Timestamp; -+ RGX_HWPERF_HW_DATA* psHWPerfPktData; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psHWPerfPkt); -+ PVR_ASSERT(pszWorkName); -+ -+ psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); -+ -+ ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex, -+ psHWPerfPkt->ui64Timestamp); -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceSwitchEvent: %s ui32ExtJobRef=%d, ui32IntJobRef=%d, eSwType=%d", -+ pszWorkName != NULL ? pszWorkName : "?", psHWPerfPktData->ui32DMContext, -+ psHWPerfPktData->ui32IntJobRef, eSwType)); -+ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ _GpuTraceWorkSwitch(ui64Timestamp, -+ psDevInfo->psDeviceNode->sDevId.ui32InternalID, -+ psHWPerfPktData->ui32DMContext, -+ psHWPerfPktData->ui32CtxPriority, -+ psHWPerfPktData->ui32ExtJobRef, -+ psHWPerfPktData->ui32IntJobRef, -+ pszWorkName, -+ eSwType); -+#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+ GpuTraceWorkPeriod(psHWPerfPktData->ui32PID, -+ psDevInfo->psDeviceNode->sDevId.ui32InternalID, -+ ui64Timestamp, -+ psHWPerfPktData->ui32IntJobRef, -+ (eSwType == PVR_GPUTRACE_SWITCH_TYPE_BEGIN) ? -+ PVR_GPU_WORK_EVENT_START : PVR_GPU_WORK_EVENT_END); -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+ -+ PVR_DPF_RETURN; -+} -+#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+static void _GpuTraceUfoEvent(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt) -+{ -+ IMG_UINT64 ui64Timestamp; -+ RGX_HWPERF_UFO_DATA *psHWPerfPktData; -+ IMG_UINT32 ui32UFOCount; -+ RGX_HWPERF_UFO_DATA_ELEMENT *puData; -+ -+ psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); -+ -+ ui32UFOCount = RGX_HWPERF_GET_UFO_STREAMSIZE(psHWPerfPktData->ui32StreamInfo); -+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) IMG_OFFSET_ADDR(psHWPerfPktData, RGX_HWPERF_GET_UFO_STREAMOFFSET(psHWPerfPktData->ui32StreamInfo)); -+ -+ ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex, -+ psHWPerfPkt->ui64Timestamp); -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceUfoEvent: ui32ExtJobRef=%d, " -+ "ui32IntJobRef=%d", psHWPerfPktData->ui32ExtJobRef, -+ psHWPerfPktData->ui32IntJobRef)); -+ -+ _GpuTraceUfo(ui64Timestamp, psHWPerfPktData->eEvType, -+ psDevInfo->psDeviceNode->sDevId.ui32InternalID, -+ psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32ExtJobRef, -+ psHWPerfPktData->ui32IntJobRef, ui32UFOCount, puData); -+} -+ -+static void _GpuTraceFirmwareEvent(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName, -+ PVR_GPUTRACE_SWITCH_TYPE eSwType) -+ -+{ -+ uint64_t ui64Timestamp; -+ RGX_HWPERF_FW_DATA *psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); -+ -+ ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex, -+ psHWPerfPkt->ui64Timestamp); -+ -+ _GpuTraceFirmware(ui64Timestamp, psDevInfo->psDeviceNode->sDevId.ui32InternalID, pszWorkName, -+ eSwType); -+} -+#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) -+static void _GpuTraceCLKsEvent(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGX_HWPERF_V2_PACKET_HDR *psHWPerfPkt) -+ -+{ -+ RGX_HWPERF_V2_PACKET_DATA *psHWPerfPktData = -+ RGX_HWPERF_GET_PACKET_DATA(psHWPerfPkt); -+ -+ switch (psHWPerfPktData->sCLKSCHG.eClockName) -+ { -+ case RGX_HWPERF_CLKS_CHG_NAME_CORE: -+ { -+ GpuTraceFrequency(psDevInfo->psDeviceNode->sDevId.ui32InternalID, -+ psHWPerfPktData->sCLKSCHG.ui64NewClockSpeed); -+ break; -+ } -+ default: -+ break; -+ } -+} -+#endif -+ -+#if defined(TRACE_EVENTS_DEFINED) -+static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt) -+{ -+ RGX_HWPERF_EVENT_TYPE eType; -+ RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData; -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+ IMG_UINT32 ui32HwEventTypeIndex; -+ static const struct { -+ IMG_CHAR* pszName; -+ PVR_GPUTRACE_SWITCH_TYPE eSwType; -+ } aszHwEventTypeMap[] = { -+#define _T(T) PVR_GPUTRACE_SWITCH_TYPE_##T -+ { "BG", _T(BEGIN) }, /* RGX_HWPERF_FW_BGSTART */ -+ { "BG", _T(END) }, /* RGX_HWPERF_FW_BGEND */ -+ { "IRQ", _T(BEGIN) }, /* RGX_HWPERF_FW_IRQSTART */ -+ { "IRQ", _T(END) }, /* RGX_HWPERF_FW_IRQEND */ -+ { "DBG", _T(BEGIN) }, /* RGX_HWPERF_FW_DBGSTART */ -+ { "DBG", _T(END) }, /* RGX_HWPERF_FW_DBGEND */ -+ { "PMOOM_TAPAUSE", _T(END) }, /* RGX_HWPERF_HW_PMOOM_TAPAUSE */ -+ { "TA", _T(BEGIN) }, /* RGX_HWPERF_HW_TAKICK */ -+ { "TA", _T(END) }, /* RGX_HWPERF_HW_TAFINISHED */ -+ { "TQ3D", _T(BEGIN) }, /* RGX_HWPERF_HW_3DTQKICK */ -+ { "3D", _T(BEGIN) }, /* RGX_HWPERF_HW_3DKICK */ -+ { "3D", _T(END) }, /* RGX_HWPERF_HW_3DFINISHED */ -+ { "CDM", _T(BEGIN) }, /* RGX_HWPERF_HW_CDMKICK */ -+ { "CDM", _T(END) }, /* RGX_HWPERF_HW_CDMFINISHED */ -+ { "TQ2D", _T(BEGIN) }, /* RGX_HWPERF_HW_TLAKICK */ -+ { "TQ2D", _T(END) }, /* RGX_HWPERF_HW_TLAFINISHED */ -+ { "3DSPM", _T(BEGIN) }, /* RGX_HWPERF_HW_3DSPMKICK */ -+ { NULL, 0 }, /* RGX_HWPERF_HW_PERIODIC (unsupported) */ -+ { "RTU", _T(BEGIN) }, /* RGX_HWPERF_HW_RTUKICK */ -+ { "RTU", _T(END) }, /* RGX_HWPERF_HW_RTUFINISHED */ -+ { "SHG", _T(BEGIN) }, /* RGX_HWPERF_HW_SHGKICK */ -+ { "SHG", _T(END) }, /* RGX_HWPERF_HW_SHGFINISHED */ -+ { "TQ3D", _T(END) }, /* RGX_HWPERF_HW_3DTQFINISHED */ -+ { "3DSPM", _T(END) }, /* RGX_HWPERF_HW_3DSPMFINISHED */ -+ { "PMOOM_TARESUME", _T(BEGIN) }, /* RGX_HWPERF_HW_PMOOM_TARESUME */ -+ { "TDM", _T(BEGIN) }, /* RGX_HWPERF_HW_TDMKICK */ -+ { "TDM", _T(END) }, /* RGX_HWPERF_HW_TDMFINISHED */ -+ { "NULL", _T(SINGLE) }, /* RGX_HWPERF_HW_NULLKICK */ -+#undef _T -+ }; -+ static_assert(RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE == RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE + 1, -+ "FW and HW events are not contiguous in RGX_HWPERF_EVENT_TYPE"); -+#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+ -+ PVR_ASSERT(psHWPerfPkt); -+ eType = RGX_HWPERF_GET_TYPE(psHWPerfPkt); -+ -+ if (psFtraceData->bTrackOrdinals) -+ { -+ if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1) -+ { -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ _GpuTraceEventsLost(RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt), -+ psDevInfo->psDeviceNode->sDevId.ui32InternalID, -+ psFtraceData->ui32FTraceLastOrdinal, -+ psHWPerfPkt->ui32Ordinal); -+#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ -+ PVR_DPF((PVR_DBG_WARNING, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)", -+ RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt), psFtraceData->ui32FTraceLastOrdinal, -+ psHWPerfPkt->ui32Ordinal)); -+ } -+ } -+ else -+ { -+ psFtraceData->bTrackOrdinals = IMG_TRUE; -+ } -+ -+ psFtraceData->ui32FTraceLastOrdinal = psHWPerfPkt->ui32Ordinal; -+ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ /* Process UFO packets */ -+ if (eType == RGX_HWPERF_UFO) -+ { -+ _GpuTraceUfoEvent(psDevInfo, psHWPerfPkt); -+ return IMG_TRUE; -+ } -+#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) -+ if (eType == RGX_HWPERF_CLKS_CHG) -+ { -+ _GpuTraceCLKsEvent(psDevInfo, psHWPerfPkt); -+ return IMG_TRUE; -+ } -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) */ -+ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+ if (eType <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) -+ { -+ /* this ID belongs to range 0, so index directly in range 0 */ -+ ui32HwEventTypeIndex = eType - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; -+ } -+ else -+ { -+ /* this ID belongs to range 1, so first index in range 1 and skip number of slots used up for range 0 */ -+ ui32HwEventTypeIndex = (eType - RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE) + -+ (RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE + 1); -+ } -+ -+ if (ui32HwEventTypeIndex >= ARRAY_SIZE(aszHwEventTypeMap)) -+ { -+ goto err_unsupported; -+ } -+ -+ if (aszHwEventTypeMap[ui32HwEventTypeIndex].pszName == NULL) -+ { -+ /* Not supported map entry, ignore event */ -+ goto err_unsupported; -+ } -+ -+ if (HWPERF_PACKET_IS_HW_TYPE(eType)) -+ { -+ if (aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType == PVR_GPUTRACE_SWITCH_TYPE_SINGLE) -+ { -+ _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt, -+ aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, -+ PVR_GPUTRACE_SWITCH_TYPE_BEGIN); -+ _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt, -+ aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, -+ PVR_GPUTRACE_SWITCH_TYPE_END); -+ } -+ else -+ { -+ _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt, -+ aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, -+ aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType); -+ } -+ -+ return IMG_TRUE; -+ } -+#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ if (HWPERF_PACKET_IS_FW_TYPE(eType)) -+ { -+ _GpuTraceFirmwareEvent(psDevInfo, psHWPerfPkt, -+ aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, -+ aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType); -+ -+ return IMG_TRUE; -+ } -+#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ -+ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+err_unsupported: -+#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) || defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+ PVR_DPF((PVR_DBG_VERBOSE, "%s: Unsupported event type %d", __func__, eType)); -+ return IMG_FALSE; -+} -+ -+static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo, void *pBuffer, -+ IMG_UINT32 ui32ReadLen) -+{ -+ IMG_UINT32 ui32TlPackets = 0; -+ IMG_UINT32 ui32HWPerfPackets = 0; -+ IMG_UINT32 ui32HWPerfPacketsSent = 0; -+ void *pBufferEnd; -+ PVRSRVTL_PPACKETHDR psHDRptr; -+ PVRSRVTL_PACKETTYPE ui16TlType; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psDevInfo); -+ PVR_ASSERT(pBuffer); -+ PVR_ASSERT(ui32ReadLen); -+ -+ /* Process the TL Packets -+ */ -+ pBufferEnd = IMG_OFFSET_ADDR(pBuffer, ui32ReadLen); -+ psHDRptr = GET_PACKET_HDR(pBuffer); -+ while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd ) -+ { -+ ui16TlType = GET_PACKET_TYPE(psHDRptr); -+ if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA) -+ { -+ IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr); -+ if (0 == ui16DataLen) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "_GpuTraceProcessPackets: ZERO Data in TL data packet: %p", psHDRptr)); -+ } -+ else -+ { -+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt; -+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfEnd; -+ -+ /* Check for lost hwperf data packets */ -+ psHWPerfEnd = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)+ui16DataLen); -+ psHWPerfPkt = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)); -+ do -+ { -+ if (ValidAndEmitFTraceEvent(psDevInfo, psHWPerfPkt)) -+ { -+ ui32HWPerfPacketsSent++; -+ } -+ ui32HWPerfPackets++; -+ psHWPerfPkt = RGX_HWPERF_GET_NEXT_PACKET(psHWPerfPkt); -+ } -+ while (psHWPerfPkt < psHWPerfEnd); -+ } -+ } -+ else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "_GpuTraceProcessPackets: Indication that the transport buffer was full")); -+ } -+ else -+ { -+ /* else Ignore padding packet type and others */ -+ PVR_DPF((PVR_DBG_MESSAGE, "_GpuTraceProcessPackets: Ignoring TL packet, type %d", ui16TlType )); -+ } -+ -+ psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr); -+ ui32TlPackets++; -+ } -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceProcessPackets: TL " -+ "Packets processed %03d, HWPerf packets %03d, sent %03d", -+ ui32TlPackets, ui32HWPerfPackets, ui32HWPerfPacketsSent)); -+ -+ PVR_DPF_RETURN; -+} -+ -+static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) -+{ -+ PVRSRV_RGXDEV_INFO *psDeviceInfo = hCmdCompHandle; -+ RGX_HWPERF_FTRACE_DATA *psFtraceData; -+ PVRSRV_ERROR eError; -+ IMG_PBYTE pBuffer; -+ IMG_UINT32 ui32ReadLen; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psDeviceInfo != NULL); -+ -+ psFtraceData = psDeviceInfo->pvGpuFtraceData; -+ -+ /* Command-complete notifiers can run concurrently. If this is happening, -+ * just bail out and let the previous call finish. -+ * This is ok because we can process the queued packets on the next call. -+ */ -+ if (!OSTryLockAcquire(psFtraceData->hFTraceResourceLock)) -+ { -+ PVR_DPF_RETURN; -+ } -+ -+ /* If this notifier is called, it means the TL resources will be valid -+ * at-least until the end of this call, since the DeInit function will wait -+ * on the hFTraceResourceLock to clean-up the TL resources and un-register -+ * the notifier, so just assert here. -+ */ -+ PVR_ASSERT(psFtraceData->hGPUFTraceTLStream != NULL); -+ -+ /* If we have a valid stream attempt to acquire some data */ -+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, -+ psFtraceData->hGPUFTraceTLStream, -+ &pBuffer, &ui32ReadLen); -+ if (eError != PVRSRV_OK) -+ { -+ if (eError != PVRSRV_ERROR_TIMEOUT) -+ { -+ PVR_LOG_ERROR(eError, "TLClientAcquireData"); -+ } -+ -+ goto unlock; -+ } -+ -+ /* Process the HWPerf packets and release the data */ -+ if (ui32ReadLen > 0) -+ { -+ PVR_DPF((PVR_DBG_VERBOSE, "%s: DATA AVAILABLE offset=%p, length=%d", -+ __func__, pBuffer, ui32ReadLen)); -+ -+ /* Process the transport layer data for HWPerf packets... */ -+ _GpuTraceProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen); -+ -+ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, -+ psFtraceData->hGPUFTraceTLStream); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "TLClientReleaseData"); -+ -+ /* Serious error, disable FTrace GPU events */ -+ _GpuTraceDisable(psDeviceInfo, IMG_FALSE); -+ } -+ } -+ -+unlock: -+ OSLockRelease(psFtraceData->hFTraceResourceLock); -+ -+ PVR_DPF_RETURN; -+} -+#endif /* defined(TRACE_EVENTS_DEFINED) */ -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+PVRSRV_ERROR -+PVRSRVGpuTraceWorkPeriodEventStatsRegister(IMG_HANDLE -+ *phGpuWorkPeriodEventStats) -+{ -+ return GpuTraceWorkPeriodEventStatsRegister(phGpuWorkPeriodEventStats); -+} -+ -+void -+PVRSRVGpuTraceWorkPeriodEventStatsUnregister( -+ IMG_HANDLE hGpuWorkPeriodEventStats) -+{ -+ GpuTraceWorkPeriodEventStatsUnregister(hGpuWorkPeriodEventStats); -+} -+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */ -+ -+/* ----- AppHint interface -------------------------------------------------- */ -+ -+static PVRSRV_ERROR _GpuTraceIsEnabledCallback( -+ const PVRSRV_DEVICE_NODE *device, -+ const void *private_data, -+ IMG_BOOL *value) -+{ -+ PVR_UNREFERENCED_PARAMETER(device); -+ PVR_UNREFERENCED_PARAMETER(private_data); -+ -+ *value = gbFTraceGPUEventsEnabled; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR _GpuTraceSetEnabledCallback( -+ const PVRSRV_DEVICE_NODE *device, -+ const void *private_data, -+ IMG_BOOL value) -+{ -+ PVR_UNREFERENCED_PARAMETER(device); -+ -+ /* Lock down the state to avoid concurrent writes */ -+ OSLockAcquire(ghGPUTraceStateLock); -+ -+ if (value != gbFTraceGPUEventsEnabled) -+ { -+#if (defined(PVRSRV_NEED_PVR_TRACE) || defined(PVRSRV_NEED_PVR_DPF)) && \ -+ defined(TRACE_EVENTS_DEFINED) -+ const IMG_CHAR *pszOperation = value ? "enable" : "disable"; -+ /* in case MESSAGE level is compiled out */ -+ PVR_UNREFERENCED_PARAMETER(pszOperation); -+#endif -+ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ if (trace_set_clr_event("rogue", NULL, (int) value) != 0) -+ { -+ PVR_TRACE(("FAILED to %s GPU FTrace event group", pszOperation)); -+ goto err_restore_state; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "FTrace events from \"rogue\" group %sd", -+ pszOperation)); -+ } -+#endif -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+ if (trace_set_clr_event("power", "gpu_work_period", (int) value) != 0) -+ { -+ PVR_TRACE(("FAILED to %s gpu_work_period GPU FTrace event", -+ pszOperation)); -+ goto err_restore_state; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "FTrace event from \"gpu_work_period\" %sd", -+ pszOperation)); -+ } -+#endif -+ -+#if defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) -+ if (trace_set_clr_event("power", "gpu_frequency", (int) value) != 0) -+ { -+ PVR_TRACE(("FAILED to %s gpu_frequency GPU FTrace event", -+ pszOperation)); -+ goto err_restore_state; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "FTrace event from \"gpu_frequency\" %sd", -+ pszOperation)); -+ } -+#endif -+ -+ if (value) -+ { -+ /* this enables FTrace globally (if not enabled nothing will appear -+ * in the FTrace buffer) */ -+ tracing_on(); -+ } -+ -+ /* The HWPerf supplier is activated here, -+ The FTrace consumer is activated above, -+ The consumer should be active before the supplier */ -+ if (_GpuTraceSetEnabledForAllDevices(value) != PVRSRV_OK) -+ { -+ PVR_TRACE(("FAILED to %s GPU FTrace for all devices", pszOperation)); -+ goto err_restore_state; -+ } -+ -+ PVR_TRACE(("%s GPU FTrace", value ? "ENABLED" : "DISABLED")); -+ gbFTraceGPUEventsEnabled = value; -+ } -+ else -+ { -+ PVR_TRACE(("GPU FTrace already %s!", value ? "enabled" : "disabled")); -+ } -+ -+ OSLockRelease(ghGPUTraceStateLock); -+ -+ return PVRSRV_OK; -+ -+err_restore_state: -+ /* On failure, partial enable/disable might have resulted. Try best to -+ * restore to previous state. Ignore error */ -+ (void) _GpuTraceSetEnabledForAllDevices(gbFTraceGPUEventsEnabled); -+ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ (void) trace_set_clr_event("rogue", NULL, -+ (int) gbFTraceGPUEventsEnabled); -+#endif -+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) -+ (void) trace_set_clr_event("power", "gpu_work_period", -+ (int) gbFTraceGPUEventsEnabled); -+#endif -+#if defined(PVRSRV_ANDROID_TRACE_GPU_FREQ) -+ (void) trace_set_clr_event("power", "gpu_frequency", -+ (int) gbFTraceGPUEventsEnabled); -+#endif -+ -+ OSLockRelease(ghGPUTraceStateLock); -+ -+ return PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT; -+} -+ -+void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ /* Do not register callback handlers if we are in GUEST mode */ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ return; -+ } -+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFTraceGPU, -+ _GpuTraceIsEnabledCallback, -+ _GpuTraceSetEnabledCallback, -+ psDeviceNode, NULL); -+} -+ -+/* ----- FTrace event callbacks -------------------------------------------- */ -+ -+void PVRGpuTraceEnableUfoCallback(void) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ /* Lock down events state, for consistent value of guiUfoEventRef */ -+ OSLockAcquire(ghLockFTraceEventLock); -+ if (guiUfoEventRef++ == 0) -+ { -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ psDeviceNode = psPVRSRVData->psDeviceNodeList; -+ -+ /* make sure UFO events are enabled on all rogue devices */ -+ while (psDeviceNode) -+ { -+#if defined(SUPPORT_RGX) -+ PVRSRV_ERROR eError; -+ -+ /* Small chance exists that ui64HWPerfFilter can be changed here and -+ * the newest filter value will be changed to the old one + UFO event. -+ * This is not a critical problem. */ -+ eError = PVRSRVRGXCtrlHWPerfFW(psDeviceNode, RGX_HWPERF_L2_STREAM_FTRACE, -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO), -+ HWPERF_FILTER_OPERATION_BIT_OR); -+ if (eError == PVRSRV_ERROR_NOT_INITIALISED) -+ { -+ /* If we land here that means that the FW is not initialised yet. -+ * We stored the filter and it will be passed to the firmware -+ * during its initialisation phase. So ignore. */ -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf events on device %d", psDeviceNode->sDevId.i32KernelDeviceID)); -+ } -+#endif -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ } -+ OSLockRelease(ghLockFTraceEventLock); -+} -+ -+void PVRGpuTraceDisableUfoCallback(void) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ /* We have to check if lock is valid because on driver unload -+ * PVRGpuTraceSupportDeInit is called before kernel disables the ftrace -+ * events. This means that the lock will be destroyed before this callback -+ * is called. -+ * We can safely return if that situation happens because driver will be -+ * unloaded so we don't care about HWPerf state anymore. */ -+ if (ghLockFTraceEventLock == NULL) -+ return; -+ -+ /* Lock down events state, for consistent value of guiUfoEventRef */ -+ OSLockAcquire(ghLockFTraceEventLock); -+ if (--guiUfoEventRef == 0) -+ { -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ psDeviceNode = psPVRSRVData->psDeviceNodeList; -+ -+ /* make sure UFO events are disabled on all rogue devices */ -+ while (psDeviceNode) -+ { -+#if defined(SUPPORT_RGX) -+ PVRSRV_ERROR eError; -+ -+ /* Small chance exists that ui64HWPerfFilter can be changed here and -+ * the newest filter value will be changed to the old one + UFO event. -+ * This is not a critical problem. */ -+ eError = PVRSRVRGXCtrlHWPerfFW(psDeviceNode, RGX_HWPERF_L2_STREAM_FTRACE, -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO), -+ HWPERF_FILTER_OPERATION_BIT_CLR); -+ if (eError == PVRSRV_ERROR_NOT_INITIALISED) -+ { -+ /* If we land here that means that the FW is not initialised yet. -+ * We stored the filter and it will be passed to the firmware -+ * during its initialisation phase. So ignore. */ -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Could not disable UFO HWPerf events on device %d", -+ psDeviceNode->sDevId.i32KernelDeviceID)); -+ } -+#endif -+ -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ } -+ OSLockRelease(ghLockFTraceEventLock); -+} -+ -+void PVRGpuTraceEnableFirmwareActivityCallback(void) -+{ -+#if defined(SUPPORT_RGX) -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_UINT64 uiFilter = 0; -+ int i; -+ -+ for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; -+ i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++) -+ { -+ uiFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i); -+ } -+ -+ OSLockAcquire(ghLockFTraceEventLock); -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ psDeviceNode = psPVRSRVData->psDeviceNodeList; -+ /* Enable all FW events on all the devices */ -+ while (psDeviceNode) -+ { -+ PVRSRV_ERROR eError; -+ -+ eError = PVRSRVRGXCtrlHWPerfFW(psDeviceNode, RGX_HWPERF_L2_STREAM_FTRACE, -+ uiFilter, HWPERF_FILTER_OPERATION_BIT_OR); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Could not enable HWPerf event for firmware" -+ " task timings (%s).", PVRSRVGetErrorString(eError))); -+ } -+ -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ -+ OSLockRelease(ghLockFTraceEventLock); -+#endif /* defined(SUPPORT_RGX) */ -+} -+ -+void PVRGpuTraceDisableFirmwareActivityCallback(void) -+{ -+#if defined(SUPPORT_RGX) -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_UINT64 uiFilter = 0; -+ int i; -+ -+ /* We have to check if lock is valid because on driver unload -+ * PVRGpuTraceSupportDeInit is called before kernel disables the ftrace -+ * events. This means that the lock will be destroyed before this callback -+ * is called. -+ * We can safely return if that situation happens because driver will be -+ * unloaded so we don't care about HWPerf state anymore. */ -+ if (ghLockFTraceEventLock == NULL) -+ return; -+ -+ OSLockAcquire(ghLockFTraceEventLock); -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ psDeviceNode = psPVRSRVData->psDeviceNodeList; -+ -+ for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; -+ i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++) -+ { -+ uiFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i); -+ } -+ -+ /* Disable all FW events on all the devices */ -+ while (psDeviceNode) -+ { -+ if (PVRSRVRGXCtrlHWPerfFW(psDeviceNode, RGX_HWPERF_L2_STREAM_FTRACE, -+ uiFilter, HWPERF_FILTER_OPERATION_BIT_CLR) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Could not disable HWPerf event for firmware task timings.")); -+ } -+ psDeviceNode = psDeviceNode->psNext; -+ } -+ -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ -+ OSLockRelease(ghLockFTraceEventLock); -+#endif /* defined(SUPPORT_RGX) */ -+} -+ -+/****************************************************************************** -+ End of file (pvr_gputrace.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/pvr_intrinsics.h b/drivers/gpu/drm/img-rogue/pvr_intrinsics.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_intrinsics.h -@@ -0,0 +1,70 @@ -+/*************************************************************************/ /*! -+@File -+@Title Intrinsics definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVR_INTRINSICS_H -+#define PVR_INTRINSICS_H -+ -+/* PVR_CTZLL: -+ * Count the number of trailing zeroes in a long long integer -+ */ -+ -+#if defined(__GNUC__) -+#if defined(__x86_64__) -+ -+ #define PVR_CTZLL __builtin_ctzll -+#endif -+#endif -+ -+/* PVR_CLZLL: -+ * Count the number of leading zeroes in a long long integer -+ */ -+ -+#if defined(__GNUC__) -+#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \ -+ defined(__arm__) || defined(__mips) -+ -+#define PVR_CLZLL __builtin_clzll -+ -+#endif -+#endif -+ -+#endif /* PVR_INTRINSICS_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_ion_stats.h b/drivers/gpu/drm/img-rogue/pvr_ion_stats.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_ion_stats.h -@@ -0,0 +1,91 @@ -+/*************************************************************************/ /*! -+@File -+@Title Functions for recording ION memory stats. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVR_ION_STATS_H -+#define PVR_ION_STATS_H -+ -+#include "pvrsrv_error.h" -+#include "img_defs.h" -+ -+struct dma_buf; -+ -+#if defined(PVRSRV_ENABLE_PVR_ION_STATS) -+PVRSRV_ERROR PVRSRVIonStatsInitialise(void); -+ -+void PVRSRVIonStatsDestroy(void); -+ -+void PVRSRVIonAddMemAllocRecord(struct dma_buf *psDmaBuf); -+ -+void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf); -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+void PVRSRVIonZombifyMemAllocRecord(const struct dma_buf *psDmaBuf); -+#endif -+#else -+static INLINE PVRSRV_ERROR PVRSRVIonStatsInitialise(void) -+{ -+ return PVRSRV_OK; -+} -+ -+static INLINE void PVRSRVIonStatsDestroy(void) -+{ -+} -+ -+static INLINE void PVRSRVIonAddMemAllocRecord(struct dma_buf *psDmaBuf) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDmaBuf); -+} -+ -+static INLINE void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDmaBuf); -+} -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+static INLINE void PVRSRVIonZombifyMemAllocRecord(const struct dma_buf *psDmaBuf) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDmaBuf); -+} -+#endif -+#endif /* defined(PVRSRV_ENABLE_PVR_ION_STATS) */ -+ -+#endif /* PVR_ION_STATS_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_linux_fence.h b/drivers/gpu/drm/img-rogue/pvr_linux_fence.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_linux_fence.h -@@ -0,0 +1,103 @@ -+/* -+ * @File -+ * @Title PowerVR Linux fence compatibility header -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__PVR_LINUX_FENCE_H__) -+#define __PVR_LINUX_FENCE_H__ -+ -+#include -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \ -+ !defined(CHROMIUMOS_KERNEL_HAS_DMA_FENCE) -+#include -+#else -+#include -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \ -+ !defined(CHROMIUMOS_KERNEL_HAS_DMA_FENCE) -+/* Structures */ -+#define dma_fence fence -+#define dma_fence_array fence_array -+#define dma_fence_cb fence_cb -+#define dma_fence_ops fence_ops -+ -+/* Defines and Enums */ -+#define DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT -+#define DMA_FENCE_FLAG_SIGNALED_BIT FENCE_FLAG_SIGNALED_BIT -+#define DMA_FENCE_FLAG_USER_BITS FENCE_FLAG_USER_BITS -+ -+#define DMA_FENCE_ERR FENCE_ERR -+#define DMA_FENCE_TRACE FENCE_TRACE -+#define DMA_FENCE_WARN FENCE_WARN -+ -+/* Functions */ -+#define dma_fence_add_callback fence_add_callback -+#define dma_fence_context_alloc fence_context_alloc -+#define dma_fence_default_wait fence_default_wait -+#define dma_fence_is_signaled fence_is_signaled -+#define dma_fence_enable_sw_signaling fence_enable_sw_signaling -+#define dma_fence_free fence_free -+#define dma_fence_get fence_get -+#define dma_fence_get_rcu fence_get_rcu -+#define dma_fence_init fence_init -+#define dma_fence_is_array fence_is_array -+#define dma_fence_put fence_put -+#define dma_fence_signal fence_signal -+#define dma_fence_wait fence_wait -+#define to_dma_fence_array to_fence_array -+ -+static inline signed long -+dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) -+{ -+ signed long lret; -+ -+ lret = fence_wait_timeout(fence, intr, timeout); -+ if (lret || timeout) -+ return lret; -+ -+ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ? 1 : 0; -+} -+ -+#endif -+ -+#endif /* !defined(__PVR_LINUX_FENCE_H__) */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_notifier.c b/drivers/gpu/drm/img-rogue/pvr_notifier.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_notifier.c -@@ -0,0 +1,657 @@ -+/*************************************************************************/ /*! -+@File -+@Title PowerVR notifier interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "allocmem.h" -+#include "dllist.h" -+ -+#include "device.h" -+#include "pvr_notifier.h" -+#include "pvrsrv.h" -+#include "pvrversion.h" -+#include "connection_server.h" -+ -+#include "osfunc.h" -+#include "sofunc_pvr.h" -+ -+#define PVR_DUMP_DRIVER_INFO(x, y) \ -+ PVR_DUMPDEBUG_LOG("%s info: %d.%d @ %8d (%s) build options: 0x%08x", \ -+ (x), \ -+ PVRVERSION_UNPACK_MAJ((y).ui32BuildVersion), \ -+ PVRVERSION_UNPACK_MIN((y).ui32BuildVersion), \ -+ (y).ui32BuildRevision, \ -+ (BUILD_TYPE_DEBUG == (y).ui32BuildType) ? "debug":"release", \ -+ (y).ui32BuildOptions); -+ -+#if !defined(WINDOW_SYSTEM) -+#define WINDOW_SYSTEM "Unknown" -+#endif -+ -+#define IS_DECLARED(x) (x[0] != '\0') -+ -+/*************************************************************************/ /*! -+Command Complete Notifier Interface -+*/ /**************************************************************************/ -+ -+typedef struct PVRSRV_CMDCOMP_NOTIFY_TAG -+{ -+ PVRSRV_CMDCOMP_HANDLE hCmdCompHandle; -+ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify; -+ DLLIST_NODE sListNode; -+} PVRSRV_CMDCOMP_NOTIFY; -+ -+/* Head of the list of callbacks called when command complete happens */ -+static DLLIST_NODE g_sCmdCompNotifyHead; -+static POSWR_LOCK g_hCmdCompNotifyLock; -+ -+PVRSRV_ERROR -+PVRSRVCmdCompleteInit(void) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = OSWRLockCreate(&g_hCmdCompNotifyLock); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ dllist_init(&g_sCmdCompNotifyHead); -+ -+ return PVRSRV_OK; -+} -+ -+void -+PVRSRVCmdCompleteDeinit(void) -+{ -+ /* Check that all notify function have been unregistered */ -+ if (!dllist_is_empty(&g_sCmdCompNotifyHead)) -+ { -+ PDLLIST_NODE psNode; -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Command complete notify list is not empty!", __func__)); -+ -+ /* Clean up any stragglers */ -+ psNode = dllist_get_next_node(&g_sCmdCompNotifyHead); -+ while (psNode) -+ { -+ PVRSRV_CMDCOMP_NOTIFY *psNotify; -+ -+ dllist_remove_node(psNode); -+ -+ psNotify = IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode); -+ OSFreeMem(psNotify); -+ -+ psNode = dllist_get_next_node(&g_sCmdCompNotifyHead); -+ } -+ } -+ -+ if (g_hCmdCompNotifyLock) -+ { -+ OSWRLockDestroy(g_hCmdCompNotifyLock); -+ } -+} -+ -+PVRSRV_ERROR -+PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify, -+ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, -+ PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) -+{ -+ PVRSRV_CMDCOMP_NOTIFY *psNotify; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(phNotify, "phNotify"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pfnCmdCompleteNotify, "pfnCmdCompleteNotify"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(hCmdCompHandle, "hCmdCompHandle"); -+ -+ psNotify = OSAllocMem(sizeof(*psNotify)); -+ PVR_LOG_RETURN_IF_NOMEM(psNotify, "psNotify"); -+ -+ /* Set-up the notify data */ -+ psNotify->hCmdCompHandle = hCmdCompHandle; -+ psNotify->pfnCmdCompleteNotify = pfnCmdCompleteNotify; -+ -+ /* Add it to the list of Notify functions */ -+ OSWRLockAcquireWrite(g_hCmdCompNotifyLock); -+ dllist_add_to_tail(&g_sCmdCompNotifyHead, &psNotify->sListNode); -+ OSWRLockReleaseWrite(g_hCmdCompNotifyLock); -+ -+ *phNotify = psNotify; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify) -+{ -+ PVRSRV_CMDCOMP_NOTIFY *psNotify; -+ -+ psNotify = (PVRSRV_CMDCOMP_NOTIFY *) hNotify; -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psNotify, "hNotify"); -+ -+ OSWRLockAcquireWrite(g_hCmdCompNotifyLock); -+ dllist_remove_node(&psNotify->sListNode); -+ OSWRLockReleaseWrite(g_hCmdCompNotifyLock); -+ -+ OSFreeMem(psNotify); -+ -+ return PVRSRV_OK; -+} -+ -+void -+PVRSRVNotifyCommandCompletion(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle) -+{ -+#if !defined(NO_HARDWARE) -+ DLLIST_NODE *psNode, *psNext; -+ -+ /* Call notify callbacks to check if blocked work items can now proceed */ -+ OSWRLockAcquireRead(g_hCmdCompNotifyLock); -+ dllist_foreach_node(&g_sCmdCompNotifyHead, psNode, psNext) -+ { -+ PVRSRV_CMDCOMP_NOTIFY *psNotify = -+ IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode); -+ -+ if (hCmdCompCallerHandle != psNotify->hCmdCompHandle) -+ { -+ psNotify->pfnCmdCompleteNotify(psNotify->hCmdCompHandle); -+ } -+ } -+ OSWRLockReleaseRead(g_hCmdCompNotifyLock); -+#endif -+} -+ -+inline void -+PVRSRVSignalDriverWideEO(void) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ if (psPVRSRVData->hGlobalEventObject) -+ { -+ OSEventObjectSignal(psPVRSRVData->hGlobalEventObject); -+ } -+ /* Cleanup Thread could be waiting on Cleanup event object, -+ * signal it as well to ensure work is processed -+ */ -+ if (psPVRSRVData->hCleanupEventObject) -+ { -+ OSEventObjectSignal(psPVRSRVData->hCleanupEventObject); -+ } -+} -+ -+inline void -+PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle) -+{ -+ PVRSRVNotifyCommandCompletion(hCmdCompCallerHandle); -+ PVRSRVSignalDriverWideEO(); -+} -+ -+/*************************************************************************/ /*! -+Debug Notifier Interface -+*/ /**************************************************************************/ -+ -+/* Lockdep sees both locks as the same class due to same struct used thus warns -+ * about a possible deadlock (false positive), -+ * using nested api we can supply separate Class' -+ * */ -+#define DN_LOCKCLASS_DRIVER 0 -+#define DN_LOCKCLASS_DEVICE 1 -+ -+typedef struct DEBUG_REQUEST_ENTRY_TAG -+{ -+ IMG_UINT32 ui32RequesterID; -+ DLLIST_NODE sListHead; -+} DEBUG_REQUEST_ENTRY; -+ -+typedef struct DEBUG_REQUEST_TABLE_TAG -+{ -+ POSWR_LOCK hLock; -+ DEBUG_REQUEST_ENTRY asEntry[1]; -+} DEBUG_REQUEST_TABLE; -+ -+typedef struct DEBUG_REQUEST_NOTIFY_TAG -+{ -+ IMG_HANDLE hDebugTable; -+ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle; -+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify; -+ IMG_UINT32 ui32RequesterID; -+ DLLIST_NODE sListNode; -+} DEBUG_REQUEST_NOTIFY; -+ -+static DEBUG_REQUEST_TABLE *g_psDriverDebugTable; -+ -+static const IMG_UINT32 g_aui32DebugOrderTable[] = { -+ DEBUG_REQUEST_SRV, -+ DEBUG_REQUEST_RGX, -+ DEBUG_REQUEST_SYS, -+ DEBUG_REQUEST_APPHINT, -+ DEBUG_REQUEST_HTB, -+ DEBUG_REQUEST_DC, -+ DEBUG_REQUEST_SYNCCHECKPOINT, -+ DEBUG_REQUEST_SYNCTRACKING, -+ DEBUG_REQUEST_ANDROIDSYNC, -+ DEBUG_REQUEST_FALLBACKSYNC, -+ DEBUG_REQUEST_LINUXFENCE -+}; -+static const IMG_UINT32 g_ui32DebugOrderTableReqCount = ARRAY_SIZE(g_aui32DebugOrderTable); -+ -+static PVRSRV_ERROR -+_RegisterDebugTableI(DEBUG_REQUEST_TABLE **ppsDebugTable) -+{ -+ DEBUG_REQUEST_TABLE *psDebugTable; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ -+ if (*ppsDebugTable) -+ { -+ return PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED; -+ } -+ -+ psDebugTable = OSAllocMem(sizeof(DEBUG_REQUEST_TABLE) + -+ (sizeof(DEBUG_REQUEST_ENTRY) * (g_ui32DebugOrderTableReqCount-1))); -+ PVR_RETURN_IF_NOMEM(psDebugTable); -+ -+ eError = OSWRLockCreate(&psDebugTable->hLock); -+ PVR_GOTO_IF_ERROR(eError, ErrorFreeDebugTable); -+ -+ /* Init the list heads */ -+ for (i = 0; i < g_ui32DebugOrderTableReqCount; i++) -+ { -+ psDebugTable->asEntry[i].ui32RequesterID = g_aui32DebugOrderTable[i]; -+ dllist_init(&psDebugTable->asEntry[i].sListHead); -+ } -+ -+ *ppsDebugTable = psDebugTable; -+ -+ return PVRSRV_OK; -+ -+ErrorFreeDebugTable: -+ OSFreeMem(psDebugTable); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVRegisterDeviceDbgTable(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ return _RegisterDebugTableI((DEBUG_REQUEST_TABLE**)&psDevNode->hDebugTable); -+} -+ -+PVRSRV_ERROR -+PVRSRVRegisterDriverDbgTable(void) -+{ -+ return _RegisterDebugTableI(&g_psDriverDebugTable); -+} -+ -+static void _UnregisterDbgTableI(DEBUG_REQUEST_TABLE **ppsDebugTable) -+{ -+ DEBUG_REQUEST_TABLE *psDebugTable; -+ IMG_UINT32 i; -+ -+ PVR_ASSERT(*ppsDebugTable); -+ psDebugTable = *ppsDebugTable; -+ *ppsDebugTable = NULL; -+ -+ for (i = 0; i < g_ui32DebugOrderTableReqCount; i++) -+ { -+ if (!dllist_is_empty(&psDebugTable->asEntry[i].sListHead)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Found registered callback(s) on %d", -+ __func__, i)); -+ } -+ } -+ -+ OSWRLockDestroy(psDebugTable->hLock); -+ psDebugTable->hLock = NULL; -+ -+ OSFreeMem(psDebugTable); -+} -+ -+void -+PVRSRVUnregisterDeviceDbgTable(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ _UnregisterDbgTableI((DEBUG_REQUEST_TABLE**)&psDevNode->hDebugTable); -+ PVR_ASSERT(!psDevNode->hDebugTable); -+} -+ -+void -+PVRSRVUnregisterDriverDbgTable(void) -+{ -+ _UnregisterDbgTableI(&g_psDriverDebugTable); -+ PVR_ASSERT(!g_psDriverDebugTable); -+} -+ -+static PVRSRV_ERROR -+_RegisterDbgRequestNotifyI(IMG_HANDLE *phNotify, -+ DEBUG_REQUEST_TABLE *psDebugTable, -+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, -+ IMG_UINT32 ui32RequesterID, -+ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle) -+{ -+ DEBUG_REQUEST_NOTIFY *psNotify; -+ PDLLIST_NODE psHead = NULL; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(phNotify, "phNotify"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDebugTable, "psDebugTable"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(pfnDbgRequestNotify, "pfnDbRequestNotify"); -+ -+ /* NoStats used since this may be called outside of the register/de-register -+ * process calls which track memory use. */ -+ psNotify = OSAllocMemNoStats(sizeof(*psNotify)); -+ PVR_LOG_RETURN_IF_NOMEM(psNotify, "psNotify"); -+ -+ /* Set-up the notify data */ -+ psNotify->hDebugTable = psDebugTable; -+ psNotify->hDbgRequestHandle = hDbgRequestHandle; -+ psNotify->pfnDbgRequestNotify = pfnDbgRequestNotify; -+ psNotify->ui32RequesterID = ui32RequesterID; -+ -+ /* Lock down all the lists */ -+ OSWRLockAcquireWrite(psDebugTable->hLock); -+ -+ /* Find which list to add it to */ -+ for (i = 0; i < g_ui32DebugOrderTableReqCount; i++) -+ { -+ if (psDebugTable->asEntry[i].ui32RequesterID == ui32RequesterID) -+ { -+ psHead = &psDebugTable->asEntry[i].sListHead; -+ } -+ } -+ -+ /* Failed to find debug requester */ -+ PVR_LOG_GOTO_IF_INVALID_PARAM(psHead, eError, ErrorReleaseLock); -+ -+ /* Add it to the list of Notify functions */ -+ dllist_add_to_tail(psHead, &psNotify->sListNode); -+ -+ /* Unlock the lists */ -+ OSWRLockReleaseWrite(psDebugTable->hLock); -+ -+ *phNotify = psNotify; -+ -+ return PVRSRV_OK; -+ -+ErrorReleaseLock: -+ OSWRLockReleaseWrite(psDebugTable->hLock); -+ OSFreeMem(psNotify); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVRegisterDeviceDbgRequestNotify(IMG_HANDLE *phNotify, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, -+ IMG_UINT32 ui32RequesterID, -+ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle) -+{ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode, "psDevNode"); -+ if (!psDevNode->hDebugTable) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: psDevNode->hDebugTable not yet initialised!", -+ __func__)); -+ return PVRSRV_ERROR_NOT_INITIALISED; -+ } -+ -+ return _RegisterDbgRequestNotifyI(phNotify, -+ (DEBUG_REQUEST_TABLE *)psDevNode->hDebugTable, -+ pfnDbgRequestNotify, -+ ui32RequesterID, -+ hDbgRequestHandle); -+} -+ -+PVRSRV_ERROR -+PVRSRVRegisterDriverDbgRequestNotify(IMG_HANDLE *phNotify, -+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, -+ IMG_UINT32 ui32RequesterID, -+ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle) -+{ -+ if (!g_psDriverDebugTable) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: g_psDriverDebugTable not yet initialised!", -+ __func__)); -+ return PVRSRV_ERROR_NOT_INITIALISED; -+ } -+ -+ return _RegisterDbgRequestNotifyI(phNotify, -+ g_psDriverDebugTable, -+ pfnDbgRequestNotify, -+ ui32RequesterID, -+ hDbgRequestHandle); -+} -+ -+PVRSRV_ERROR -+SOPvrDbgRequestNotifyRegister(IMG_HANDLE *phNotify, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, -+ IMG_UINT32 ui32RequesterID, -+ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle) -+{ -+ return PVRSRVRegisterDeviceDbgRequestNotify(phNotify, -+ psDevNode, -+ pfnDbgRequestNotify, -+ ui32RequesterID, -+ hDbgRequestHandle); -+} -+ -+static PVRSRV_ERROR -+_UnregisterDbgRequestNotify(IMG_HANDLE hNotify) -+{ -+ DEBUG_REQUEST_NOTIFY *psNotify = (DEBUG_REQUEST_NOTIFY *) hNotify; -+ DEBUG_REQUEST_TABLE *psDebugTable; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psNotify, "psNotify"); -+ -+ psDebugTable = (DEBUG_REQUEST_TABLE *) psNotify->hDebugTable; -+ -+ OSWRLockAcquireWrite(psDebugTable->hLock); -+ dllist_remove_node(&psNotify->sListNode); -+ OSWRLockReleaseWrite(psDebugTable->hLock); -+ -+ OSFreeMemNoStats(psNotify); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PVRSRVUnregisterDeviceDbgRequestNotify(IMG_HANDLE hNotify) -+{ -+ return _UnregisterDbgRequestNotify(hNotify); -+} -+ -+PVRSRV_ERROR -+PVRSRVUnregisterDriverDbgRequestNotify(IMG_HANDLE hNotify) -+{ -+ return _UnregisterDbgRequestNotify(hNotify); -+} -+ -+PVRSRV_ERROR -+SOPvrDbgRequestNotifyUnregister(IMG_HANDLE hNotify) -+{ -+ return _UnregisterDbgRequestNotify(hNotify); -+} -+ -+void -+PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ DEBUG_REQUEST_TABLE *psDebugTable = -+ (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable; -+ DEBUG_REQUEST_TABLE *psDriverDebugTable = -+ (DEBUG_REQUEST_TABLE *) g_psDriverDebugTable; -+ static const IMG_CHAR *apszVerbosityTable[] = { "Low", "Medium", "High" }; -+ const IMG_CHAR *szVerbosityLevel; -+ const IMG_CHAR *Bit32 = "32 Bit", *Bit64 = "64 Bit"; -+ IMG_UINT32 i; -+ -+ static_assert(ARRAY_SIZE(apszVerbosityTable) == DEBUG_REQUEST_VERBOSITY_MAX+1, -+ "Incorrect number of verbosity levels"); -+ -+ PVR_ASSERT(psDebugTable); -+ PVR_ASSERT(psDriverDebugTable); -+ -+ if (ui32VerbLevel < ARRAY_SIZE(apszVerbosityTable)) -+ { -+ szVerbosityLevel = apszVerbosityTable[ui32VerbLevel]; -+ } -+ else -+ { -+ szVerbosityLevel = "unknown"; -+ PVR_ASSERT(!"Invalid verbosity level received"); -+ } -+ -+ PVR_DUMPDEBUG_LOG("------------[ PVR DBG: START (%s) ]------------", -+ szVerbosityLevel); -+ -+#if defined(RGX_IRQ_HYPERV_HANDLER) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+#endif -+ { -+ OSDumpVersionInfo(pfnDumpDebugPrintf, pvDumpDebugFile); -+ } -+ -+ PVR_DUMPDEBUG_LOG("DDK info: %s (%s) %s", -+ PVRVERSION_STRING, PVR_BUILD_TYPE, PVR_BUILD_DIR); -+ -+ PVR_DUMPDEBUG_LOG("Time now: %" IMG_UINT64_FMTSPEC "us", -+ OSClockus64()); -+ -+ switch (psPVRSRVData->eServicesState) -+ { -+ case PVRSRV_SERVICES_STATE_OK: -+ PVR_DUMPDEBUG_LOG("Services State: OK"); -+ break; -+ case PVRSRV_SERVICES_STATE_BAD: -+ PVR_DUMPDEBUG_LOG("Services State: BAD"); -+ break; -+ case PVRSRV_SERVICES_STATE_UNDEFINED: -+ PVR_DUMPDEBUG_LOG("Services State: UNDEFINED"); -+ break; -+ default: -+ PVR_DUMPDEBUG_LOG("Services State: UNKNOWN (%d)", -+ psPVRSRVData->eServicesState); -+ break; -+ } -+ -+ PVR_DUMPDEBUG_LOG("Server Errors: %d", -+ PVRSRV_KM_ERRORS); -+ -+ PVRSRVConnectionDebugNotify(psDevNode, pfnDumpDebugPrintf, pvDumpDebugFile); -+ -+ PVR_DUMPDEBUG_LOG("------[ Driver Info ]------"); -+ -+ PVR_DUMPDEBUG_LOG("Comparison of UM/KM components: %s", -+ (psPVRSRVData->sDriverInfo.bIsNoMatch) ? "MISMATCH" : "MATCHING"); -+ -+ if (psPVRSRVData->sDriverInfo.ui8KMBitArch) -+ { -+ PVR_DUMPDEBUG_LOG("KM Arch: %s",(psPVRSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT) ? Bit64 : Bit32); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("KM Arch is undefined"); -+ } -+ -+ PVR_DUMPDEBUG_LOG("Driver Mode: %s", -+ PVRSRV_VZ_MODE_IS(NATIVE) ? "Native" : (PVRSRV_VZ_MODE_IS(HOST)) ? "Host":"Guest"); -+ -+ if (psPVRSRVData->sDriverInfo.ui8UMSupportedArch) -+ { -+ if ((psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_BOTH) == -+ BUILD_ARCH_BOTH) -+ { -+ PVR_DUMPDEBUG_LOG("UM Connected Clients Arch: %s and %s", Bit64, Bit32); -+ -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("UM Connected Clients: %s", -+ (psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_64BIT) ? Bit64 : Bit32); -+ } -+ } -+ -+ PVR_DUMP_DRIVER_INFO("UM", psPVRSRVData->sDriverInfo.sUMBuildInfo); -+ PVR_DUMP_DRIVER_INFO("KM", psPVRSRVData->sDriverInfo.sKMBuildInfo); -+ -+ PVR_DUMPDEBUG_LOG("Window system: %s", (IS_DECLARED(WINDOW_SYSTEM)) ? (WINDOW_SYSTEM) : "Not declared"); -+ -+ /* Driver debug table */ -+ OSWRLockAcquireReadNested(psDriverDebugTable->hLock, DN_LOCKCLASS_DRIVER); -+ /* Device debug table*/ -+ OSWRLockAcquireReadNested(psDebugTable->hLock, DN_LOCKCLASS_DEVICE); -+ -+ /* For each requester in Driver and Device table */ -+ for (i = 0; i < g_ui32DebugOrderTableReqCount; i++) -+ { -+ DLLIST_NODE *psNode; -+ DLLIST_NODE *psNext; -+ -+ /* For each notifier on this requestor */ -+ dllist_foreach_node(&psDriverDebugTable->asEntry[i].sListHead, psNode, psNext) -+ { -+ DEBUG_REQUEST_NOTIFY *psNotify = -+ IMG_CONTAINER_OF(psNode, DEBUG_REQUEST_NOTIFY, sListNode); -+ psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, ui32VerbLevel, -+ pfnDumpDebugPrintf, pvDumpDebugFile); -+ } -+ -+ /* For each notifier on this requestor */ -+ dllist_foreach_node(&psDebugTable->asEntry[i].sListHead, psNode, psNext) -+ { -+ DEBUG_REQUEST_NOTIFY *psNotify = -+ IMG_CONTAINER_OF(psNode, DEBUG_REQUEST_NOTIFY, sListNode); -+ psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, ui32VerbLevel, -+ pfnDumpDebugPrintf, pvDumpDebugFile); -+ } -+ } -+ -+ OSWRLockReleaseRead(psDebugTable->hLock); -+ OSWRLockReleaseRead(psDriverDebugTable->hLock); -+ -+ PVR_DUMPDEBUG_LOG("------------[ PVR DBG: END ]------------"); -+ -+ if (!pfnDumpDebugPrintf) -+ { -+ /* Only notify OS of an issue if the debug dump has gone there */ -+ OSWarnOn(IMG_TRUE); -+ } -+} -diff --git a/drivers/gpu/drm/img-rogue/pvr_notifier.h b/drivers/gpu/drm/img-rogue/pvr_notifier.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_notifier.h -@@ -0,0 +1,326 @@ -+/*************************************************************************/ /*! -+@File -+@Title PowerVR notifier interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(PVR_NOTIFIER_H) -+#define PVR_NOTIFIER_H -+ -+#include "img_types.h" -+#include "pvr_debug.h" -+ -+ -+/*************************************************************************/ /*! -+Command Complete Notifier Interface -+*/ /**************************************************************************/ -+ -+typedef IMG_HANDLE PVRSRV_CMDCOMP_HANDLE; -+#ifndef CMDCOMPNOTIFY_PFN -+typedef void (*PFN_CMDCOMP_NOTIFY)(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle); -+#define CMDCOMPNOTIFY_PFN -+#endif -+ -+/*************************************************************************/ /*! -+@Function PVRSRVCmdCompleteInit -+@Description Performs initialisation of the command complete notifier -+ interface. -+@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVCmdCompleteInit(void); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVCmdCompleteDeinit -+@Description Performs cleanup for the command complete notifier interface. -+@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error -+*/ /**************************************************************************/ -+void -+PVRSRVCmdCompleteDeinit(void); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVRegisterCmdCompleteNotify -+@Description Register a callback function that is called when some device -+ finishes some work, which is signalled via a call to -+ PVRSRVCheckStatus. -+@Output phNotify On success, points to command complete -+ notifier handle -+@Input pfnCmdCompleteNotify Function callback -+@Input hPrivData Data to be passed back to the caller via -+ the callback function -+@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify, -+ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, -+ PVRSRV_CMDCOMP_HANDLE hPrivData); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVUnregisterCmdCompleteNotify -+@Description Unregister a previously registered callback function. -+@Input hNotify Command complete notifier handle -+@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVCheckStatus -+@Description Calls PVRSRVNotifyCommandCompletion() to notify registered -+ command complete handlers of work completion and then calls -+ PVRSRVSignalGlobalEO() to signal the driver wide event objects. -+@Input hCmdCompCallerHandle Used to prevent a handler from being -+ notified. A NULL value results in all -+ handlers being notified. -+*/ /**************************************************************************/ -+void -+PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVNotifyCommandCompletion -+@Description Notify any registered command complete handlers that some work -+ has been finished (unless hCmdCompCallerHandle matches a -+ handler's hPrivData). -+@Input hCmdCompCallerHandle Used to prevent a handler from being -+ notified. A NULL value results in all -+ handlers being notified. -+*/ /**************************************************************************/ -+void -+PVRSRVNotifyCommandCompletion(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVSignalDriverWideEO -+@Description Signals the driver wide event objects. -+*/ /**************************************************************************/ -+void -+PVRSRVSignalDriverWideEO(void); -+ -+ -+/*************************************************************************/ /*! -+Debug Notifier Interface -+*/ /**************************************************************************/ -+ -+#define DEBUG_REQUEST_DC 0 -+#define DEBUG_REQUEST_SYNCTRACKING 1 -+#define DEBUG_REQUEST_SRV 2 -+#define DEBUG_REQUEST_SYS 3 -+#define DEBUG_REQUEST_RGX 4 -+#define DEBUG_REQUEST_ANDROIDSYNC 5 -+#define DEBUG_REQUEST_LINUXFENCE 6 -+#define DEBUG_REQUEST_SYNCCHECKPOINT 7 -+#define DEBUG_REQUEST_HTB 8 -+#define DEBUG_REQUEST_APPHINT 9 -+#define DEBUG_REQUEST_FALLBACKSYNC 10 -+ -+#define DEBUG_REQUEST_VERBOSITY_LOW 0 -+#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1 -+#define DEBUG_REQUEST_VERBOSITY_HIGH 2 -+#define DEBUG_REQUEST_VERBOSITY_MAX DEBUG_REQUEST_VERBOSITY_HIGH -+ -+#define DD_VERB_LVL_ENABLED(_verbLvl, _verbLvlChk) ((_verbLvl) >= (_verbLvlChk)) -+ -+/* -+ * Macro used within debug dump functions to send output either to PVR_LOG or -+ * a custom function. The custom function should be stored as a function -+ * pointer in a local variable called 'pfnDumpDebugPrintf'. 'pvDumpDebugFile' -+ * is also required as a local variable to serve as a file identifier for the -+ * printf function if required. -+ */ -+#define PVR_DUMPDEBUG_LOG(...) \ -+ do \ -+ { \ -+ if (pfnDumpDebugPrintf) \ -+ pfnDumpDebugPrintf(pvDumpDebugFile, __VA_ARGS__); \ -+ else \ -+ PVR_LOG((__VA_ARGS__)); \ -+ } while (0) -+ -+struct _PVRSRV_DEVICE_NODE_; -+ -+typedef IMG_HANDLE PVRSRV_DBGREQ_HANDLE; -+#ifndef DBGNOTIFY_PFNS -+typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile, -+ const IMG_CHAR *pszFormat, ...); -+typedef void (*PFN_DBGREQ_NOTIFY)(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+#define DBGNOTIFY_PFNS -+#endif -+ -+/*************************************************************************/ /*! -+@Function PVRSRVRegisterDeviceDbgTable -+@Description Registers a debug requester table for the given device. The -+ order in which the debug requester IDs appear in the -+ table determine the order in which a set of notifier callbacks -+ will be called. In other words, the requester ID that appears -+ first will have all of its associated debug notifier callbacks -+ called first. This will then be followed by all the callbacks -+ associated with the next requester ID in the table and so on. -+ The order table is handled internally. -+@Input psDevNode Device node to register requester table with -+@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVRegisterDeviceDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVRegisterDriverDbgTable -+@Description Registers a debug requester table for the driver. The -+ order in which the debug requester IDs appear in the -+ table determine the order in which a set of notifier callbacks -+ will be called. In other words, the requester ID that appears -+ first will have all of its associated debug notifier callbacks -+ called first. This will then be followed by all the callbacks -+ associated with the next requester ID in the table and so on. -+ The order table is handled internally. -+@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVRegisterDriverDbgTable(void); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVUnregisterDeviceDbgTable -+@Description Unregisters a debug requester table. -+@Input psDevNode Device node for which the requester table should -+ be unregistered -+@Return void -+*/ /**************************************************************************/ -+void -+PVRSRVUnregisterDeviceDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVUnregisterDriverDbgTable -+@Description Unregisters the driver debug requester table. -+@Return void -+*/ /**************************************************************************/ -+void -+PVRSRVUnregisterDriverDbgTable(void); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVRegisterDeviceDbgRequestNotify -+@Description Register a callback function on a given device that is called -+ when a debug request is made via a call PVRSRVDebugRequest. -+ There are a number of verbosity levels ranging from -+ DEBUG_REQUEST_VERBOSITY_LOW up to -+ DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once -+ for each level up to the highest level specified to -+ PVRSRVDebugRequest. -+@Output phNotify Points to debug notifier handle on success -+@Input psDevNode Device node for which the debug callback -+ should be registered -+@Input pfnDbgRequestNotify Function callback -+@Input ui32RequesterID Requester ID. This is used to determine -+ the order in which callbacks are called -+@Input hDbgReqeustHandle Data to be passed back to the caller via -+ the callback function -+@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVRegisterDeviceDbgRequestNotify(IMG_HANDLE *phNotify, -+ struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, -+ IMG_UINT32 ui32RequesterID, -+ PVRSRV_DBGREQ_HANDLE hDbgReqeustHandle); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVRegisterDriverDbgRequestNotify -+@Description Register a callback function that is called when a debug request -+ is made via a call PVRSRVDebugRequest. There are a number of -+ verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to -+ DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once -+ for each level up to the highest level specified to -+ PVRSRVDebugRequest. -+@Output phNotify Points to debug notifier handle on success -+@Input pfnDbgRequestNotify Function callback -+@Input ui32RequesterID Requester ID. This is used to determine -+ the order in which callbacks are called -+@Input hDbgReqeustHandle Data to be passed back to the caller via -+ the callback function -+@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVRegisterDriverDbgRequestNotify(IMG_HANDLE *phNotify, -+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, -+ IMG_UINT32 ui32RequesterID, -+ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVUnregisterDeviceDbgRequestNotify -+@Description Unregister a previously registered (device context) callback -+ function. -+@Input hNotify Debug notifier handle. -+@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVUnregisterDeviceDbgRequestNotify(IMG_HANDLE hNotify); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVUnregisterDriverDbgRequestNotify -+@Description Unregister a previously registered (driver context) callback -+ function. -+@Input hNotify Debug notifier handle. -+@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVUnregisterDriverDbgRequestNotify(IMG_HANDLE hNotify); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDebugRequest -+@Description Notify any registered debug request handlers that a debug -+ request has been made and at what level. -+@Input psDevNode Device node for which the debug request -+ has been made -+@Input ui32VerbLevel The maximum verbosity level to dump -+@Input pfnDumpDebugPrintf Used to specify the print function that -+ should be used to dump any debug -+ information. If this argument is NULL then -+ PVR_LOG() will be used as the default -+ print function. -+@Input pvDumpDebugFile Optional file identifier to be passed to -+ the print function if required. -+@Return void -+*/ /**************************************************************************/ -+void -+PVRSRVDebugRequest(struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+ -+#endif /* !defined(PVR_NOTIFIER_H) */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_platform_drv.c b/drivers/gpu/drm/img-rogue/pvr_platform_drv.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_platform_drv.c -@@ -0,0 +1,337 @@ -+/* -+ * @File -+ * @Title PowerVR DRM platform driver -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) -+#include -+#include -+#include -+#include -+#include -+#include -+#else -+#include -+#endif -+ -+#include -+#include -+ -+#include "module_common.h" -+#include "pvr_drv.h" -+#include "pvrmodule.h" -+#include "sysinfo.h" -+ -+ -+/* This header must always be included last */ -+#include "kernel_compatibility.h" -+ -+MODULE_IMPORT_NS(DMA_BUF); -+ -+static struct drm_driver pvr_drm_platform_driver; -+ -+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) -+static unsigned int pvr_num_devices = 1; -+static struct platform_device **pvr_devices; -+ -+#if defined(NO_HARDWARE) -+static int pvr_num_devices_set(const char *val, -+ const struct kernel_param *param) -+{ -+ int err; -+ -+ err = param_set_uint(val, param); -+ if (err) -+ return err; -+ -+ if (pvr_num_devices == 0 || pvr_num_devices > PVRSRV_MAX_DEVICES) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static const struct kernel_param_ops pvr_num_devices_ops = { -+ .set = pvr_num_devices_set, -+ .get = param_get_uint, -+}; -+ -+#define STR(s) #s -+#define STRINGIFY(s) STR(s) -+ -+module_param_cb(num_devices, &pvr_num_devices_ops, &pvr_num_devices, 0444); -+MODULE_PARM_DESC(num_devices, -+ "Number of platform devices to register (default: 1 - max: " -+ STRINGIFY(PVRSRV_MAX_DEVICES) ")"); -+#endif /* defined(NO_HARDWARE) */ -+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */ -+ -+static int pvr_devices_register(void) -+{ -+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) -+ struct platform_device_info pvr_dev_info = { -+ .name = SYS_RGX_DEV_NAME, -+ .id = -2, -+#if defined(NO_HARDWARE) -+ /* Not all cores have 40 bit physical support, but this -+ * will work unless > 32 bit address is returned on those cores. -+ * In the future this will be fixed more correctly. -+ */ -+ .dma_mask = DMA_BIT_MASK(40), -+#else -+ .dma_mask = DMA_BIT_MASK(40), -+#endif -+ }; -+ unsigned int i; -+ -+ BUG_ON(pvr_num_devices == 0 || pvr_num_devices > PVRSRV_MAX_DEVICES); -+ -+ pvr_devices = kmalloc_array(pvr_num_devices, sizeof(*pvr_devices), -+ GFP_KERNEL); -+ if (!pvr_devices) -+ return -ENOMEM; -+ -+ for (i = 0; i < pvr_num_devices; i++) { -+ pvr_devices[i] = platform_device_register_full(&pvr_dev_info); -+ if (IS_ERR(pvr_devices[i])) { -+ DRM_ERROR("unable to register device %u (err=%ld)\n", -+ i, PTR_ERR(pvr_devices[i])); -+ pvr_devices[i] = NULL; -+ return -ENODEV; -+ } -+ } -+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */ -+ -+ return 0; -+} -+ -+static void pvr_devices_unregister(void) -+{ -+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) -+ unsigned int i; -+ -+ BUG_ON(!pvr_devices); -+ -+ for (i = 0; i < pvr_num_devices && pvr_devices[i]; i++) -+ platform_device_unregister(pvr_devices[i]); -+ -+ kfree(pvr_devices); -+ pvr_devices = NULL; -+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */ -+} -+ -+static int pvr_probe(struct platform_device *pdev) -+{ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) -+ struct drm_device *ddev; -+ int ret; -+ -+ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); -+ -+ ddev = drm_dev_alloc(&pvr_drm_platform_driver, &pdev->dev); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) -+ if (IS_ERR(ddev)) -+ return PTR_ERR(ddev); -+#else -+ if (!ddev) -+ return -ENOMEM; -+#endif -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) -+ /* Needed by drm_platform_set_busid */ -+ ddev->platformdev = pdev; -+#endif -+ -+ /* -+ * The load callback, called from drm_dev_register, is deprecated, -+ * because of potential race conditions. Calling the function here, -+ * before calling drm_dev_register, avoids those potential races. -+ */ -+ BUG_ON(pvr_drm_platform_driver.load != NULL); -+ ret = pvr_drm_load(ddev, 0); -+ if (ret) -+ goto err_drm_dev_put; -+ -+ ret = drm_dev_register(ddev, 0); -+ if (ret) -+ goto err_drm_dev_unload; -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) -+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", -+ pvr_drm_platform_driver.name, -+ pvr_drm_platform_driver.major, -+ pvr_drm_platform_driver.minor, -+ pvr_drm_platform_driver.patchlevel, -+ pvr_drm_platform_driver.date, -+ ddev->primary->index); -+#endif -+ return 0; -+ -+err_drm_dev_unload: -+ pvr_drm_unload(ddev); -+err_drm_dev_put: -+ drm_dev_put(ddev); -+ return ret; -+#else -+ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); -+ -+ return drm_platform_init(&pvr_drm_platform_driver, pdev); -+#endif -+} -+ -+static int pvr_remove(struct platform_device *pdev) -+{ -+ struct drm_device *ddev = platform_get_drvdata(pdev); -+ -+ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) -+ drm_dev_unregister(ddev); -+ -+ /* The unload callback, called from drm_dev_unregister, is -+ * deprecated. Call the unload function directly. -+ */ -+ BUG_ON(pvr_drm_platform_driver.unload != NULL); -+ pvr_drm_unload(ddev); -+ -+ drm_dev_put(ddev); -+#else -+ drm_put_dev(ddev); -+#endif -+ return 0; -+} -+ -+static void pvr_shutdown(struct platform_device *pdev) -+{ -+ struct drm_device *ddev = platform_get_drvdata(pdev); -+ -+ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); -+ -+ PVRSRVDeviceShutdown(ddev); -+} -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) -+static const struct of_device_id pvr_of_ids[] = { -+#if defined(SYS_RGX_OF_COMPATIBLE) -+ { .compatible = SYS_RGX_OF_COMPATIBLE, }, -+#endif -+ {}, -+}; -+ -+#if !defined(CHROMIUMOS_KERNEL) || !defined(MODULE) -+MODULE_DEVICE_TABLE(of, pvr_of_ids); -+#endif -+#endif -+ -+static struct platform_device_id pvr_platform_ids[] = { -+#if defined(SYS_RGX_DEV_NAME) -+ { SYS_RGX_DEV_NAME, 0 }, -+#endif -+#if defined(SYS_RGX_DEV_NAME_0) -+ { SYS_RGX_DEV_NAME_0, 0 }, -+#endif -+#if defined(SYS_RGX_DEV_NAME_1) -+ { SYS_RGX_DEV_NAME_1, 0 }, -+#endif -+#if defined(SYS_RGX_DEV_NAME_2) -+ { SYS_RGX_DEV_NAME_2, 0 }, -+#endif -+#if defined(SYS_RGX_DEV_NAME_3) -+ { SYS_RGX_DEV_NAME_3, 0 }, -+#endif -+ { } -+}; -+ -+#if !defined(CHROMIUMOS_KERNEL) || !defined(MODULE) -+MODULE_DEVICE_TABLE(platform, pvr_platform_ids); -+#endif -+ -+static struct platform_driver pvr_platform_driver = { -+ .driver = { -+ .name = DRVNAME, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) -+ .of_match_table = of_match_ptr(pvr_of_ids), -+#endif -+ .pm = &pvr_pm_ops, -+ }, -+ .id_table = pvr_platform_ids, -+ .probe = pvr_probe, -+ .remove = pvr_remove, -+ .shutdown = pvr_shutdown, -+}; -+ -+static int __init pvr_init(void) -+{ -+ int err; -+ -+ DRM_DEBUG_DRIVER("\n"); -+ -+ pvr_drm_platform_driver = pvr_drm_generic_driver; -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \ -+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) -+ pvr_drm_platform_driver.set_busid = drm_platform_set_busid; -+#endif -+ -+ err = PVRSRVDriverInit(); -+ if (err) -+ return err; -+ -+ err = platform_driver_register(&pvr_platform_driver); -+ if (err) -+ return err; -+ -+ return pvr_devices_register(); -+} -+ -+static void __exit pvr_exit(void) -+{ -+ DRM_DEBUG_DRIVER("\n"); -+ -+ pvr_devices_unregister(); -+ platform_driver_unregister(&pvr_platform_driver); -+ PVRSRVDriverDeinit(); -+ -+ DRM_DEBUG_DRIVER("done\n"); -+} -+ -+device_initcall(pvr_init); -+module_exit(pvr_exit); -diff --git a/drivers/gpu/drm/img-rogue/pvr_procfs.h b/drivers/gpu/drm/img-rogue/pvr_procfs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_procfs.h -@@ -0,0 +1,50 @@ -+/*************************************************************************/ /*! -+@File -+@Title ProcFS implementation of Debug Info interface. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVR_PROCFS_H -+#define PVR_PROCFS_H -+ -+#include "pvrsrv_error.h" -+ -+PVRSRV_ERROR PVRProcFsRegister(void); -+ -+#endif /* PVR_PROCFS_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_ricommon.h b/drivers/gpu/drm/img-rogue/pvr_ricommon.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_ricommon.h -@@ -0,0 +1,68 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services Resource Information (RI) common types and definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Resource Information (RI) common types and definitions included -+ in both user mode and kernel mode source. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef PVR_RICOMMON_H -+#define PVR_RICOMMON_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#include "img_defs.h" -+ -+/*! Maximum text string length including the null byte */ -+#define PRVSRVRI_MAX_TEXT_LENGTH 20U -+ -+/* PID used to hold PMR allocations which are driver-wide (i.e. have a lifetime -+ * longer than an application process) -+ */ -+#define PVR_SYS_ALLOC_PID 1 -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* PVR_RICOMMON_H */ -+/****************************************************************************** -+ End of file (pvr_ricommon.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/pvr_sw_fence.c b/drivers/gpu/drm/img-rogue/pvr_sw_fence.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_sw_fence.c -@@ -0,0 +1,199 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "pvr_sw_fence.h" -+ -+struct pvr_sw_fence_context { -+ struct kref kref; -+ unsigned int context; -+ char context_name[32]; -+ char driver_name[32]; -+ atomic_t seqno; -+ atomic_t fence_count; -+}; -+ -+struct pvr_sw_fence { -+ struct dma_fence base; -+ struct pvr_sw_fence_context *fence_context; -+ spinlock_t lock; -+}; -+ -+#define to_pvr_sw_fence(fence) container_of(fence, struct pvr_sw_fence, base) -+ -+const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx) -+{ -+ return fctx->context_name; -+} -+ -+void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx, -+ char *str, int size) -+{ -+ snprintf(str, size, "%d", atomic_read(&fctx->seqno)); -+} -+ -+static inline unsigned -+pvr_sw_fence_context_seqno_next(struct pvr_sw_fence_context *fence_context) -+{ -+ return atomic_inc_return(&fence_context->seqno) - 1; -+} -+ -+static const char *pvr_sw_fence_get_driver_name(struct dma_fence *fence) -+{ -+ struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); -+ -+ return pvr_sw_fence->fence_context->driver_name; -+} -+ -+static const char *pvr_sw_fence_get_timeline_name(struct dma_fence *fence) -+{ -+ struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); -+ -+ return pvr_sw_fence_context_name(pvr_sw_fence->fence_context); -+} -+ -+static void pvr_sw_fence_value_str(struct dma_fence *fence, char *str, int size) -+{ -+ snprintf(str, size, "%llu", (u64) fence->seqno); -+} -+ -+static void pvr_sw_fence_timeline_value_str(struct dma_fence *fence, -+ char *str, int size) -+{ -+ struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); -+ -+ pvr_sw_fence_context_value_str(pvr_sw_fence->fence_context, str, size); -+} -+ -+static bool pvr_sw_fence_enable_signaling(struct dma_fence *fence) -+{ -+ return true; -+} -+ -+static void pvr_sw_fence_context_destroy_kref(struct kref *kref) -+{ -+ struct pvr_sw_fence_context *fence_context = -+ container_of(kref, struct pvr_sw_fence_context, kref); -+ unsigned int fence_count; -+ -+ fence_count = atomic_read(&fence_context->fence_count); -+ if (WARN_ON(fence_count)) -+ pr_debug("%s context has %u fence(s) remaining\n", -+ fence_context->context_name, fence_count); -+ -+ kfree(fence_context); -+} -+ -+static void pvr_sw_fence_release(struct dma_fence *fence) -+{ -+ struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); -+ -+ atomic_dec(&pvr_sw_fence->fence_context->fence_count); -+ kref_put(&pvr_sw_fence->fence_context->kref, -+ pvr_sw_fence_context_destroy_kref); -+ kfree(pvr_sw_fence); -+} -+ -+static const struct dma_fence_ops pvr_sw_fence_ops = { -+ .get_driver_name = pvr_sw_fence_get_driver_name, -+ .get_timeline_name = pvr_sw_fence_get_timeline_name, -+ .fence_value_str = pvr_sw_fence_value_str, -+ .timeline_value_str = pvr_sw_fence_timeline_value_str, -+ .enable_signaling = pvr_sw_fence_enable_signaling, -+ .wait = dma_fence_default_wait, -+ .release = pvr_sw_fence_release, -+}; -+ -+struct pvr_sw_fence_context * -+pvr_sw_fence_context_create(const char *context_name, const char *driver_name) -+{ -+ struct pvr_sw_fence_context *fence_context; -+ -+ fence_context = kmalloc(sizeof(*fence_context), GFP_KERNEL); -+ if (!fence_context) -+ return NULL; -+ -+ fence_context->context = dma_fence_context_alloc(1); -+ strlcpy(fence_context->context_name, context_name, -+ sizeof(fence_context->context_name)); -+ strlcpy(fence_context->driver_name, driver_name, -+ sizeof(fence_context->driver_name)); -+ atomic_set(&fence_context->seqno, 0); -+ atomic_set(&fence_context->fence_count, 0); -+ kref_init(&fence_context->kref); -+ -+ return fence_context; -+} -+ -+void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context) -+{ -+ kref_put(&fence_context->kref, pvr_sw_fence_context_destroy_kref); -+} -+ -+struct dma_fence * -+pvr_sw_fence_create(struct pvr_sw_fence_context *fence_context) -+{ -+ struct pvr_sw_fence *pvr_sw_fence; -+ unsigned int seqno; -+ -+ pvr_sw_fence = kmalloc(sizeof(*pvr_sw_fence), GFP_KERNEL); -+ if (!pvr_sw_fence) -+ return NULL; -+ -+ spin_lock_init(&pvr_sw_fence->lock); -+ pvr_sw_fence->fence_context = fence_context; -+ -+ seqno = pvr_sw_fence_context_seqno_next(fence_context); -+ dma_fence_init(&pvr_sw_fence->base, &pvr_sw_fence_ops, -+ &pvr_sw_fence->lock, fence_context->context, seqno); -+ -+ atomic_inc(&fence_context->fence_count); -+ kref_get(&fence_context->kref); -+ -+ return &pvr_sw_fence->base; -+} -diff --git a/drivers/gpu/drm/img-rogue/pvr_sw_fence.h b/drivers/gpu/drm/img-rogue/pvr_sw_fence.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_sw_fence.h -@@ -0,0 +1,60 @@ -+/* -+ * @File -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#if !defined(__PVR_SW_FENCES_H__) -+#define __PVR_SW_FENCES_H__ -+ -+#include "pvr_linux_fence.h" -+ -+struct pvr_sw_fence_context; -+ -+struct pvr_sw_fence_context *pvr_sw_fence_context_create(const char *name, -+ const char *driver_name); -+void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context); -+struct dma_fence *pvr_sw_fence_create(struct pvr_sw_fence_context * -+ fence_context); -+ -+const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx); -+void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx, -+ char *str, int size); -+ -+#endif /* !defined(__PVR_SW_FENCES_H__) */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_sync.h b/drivers/gpu/drm/img-rogue/pvr_sync.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_sync.h -@@ -0,0 +1,120 @@ -+/* -+ * @File pvr_sync.h -+ * @Title Kernel driver for Android's sync mechanism -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef _PVR_SYNC_H -+#define _PVR_SYNC_H -+ -+#include -+ -+#include "pvr_fd_sync_kernel.h" -+#include "services_kernel_client.h" -+ -+ -+/* Services internal interface */ -+ -+/** -+ * pvr_sync_register_functions() -+ * -+ * Return: PVRSRV_OK on success. -+ */ -+enum PVRSRV_ERROR_TAG pvr_sync_register_functions(void); -+ -+/** -+ * pvr_sync_init - register the pvr_sync misc device -+ * -+ * Return: error code, 0 on success. -+ */ -+int pvr_sync_init(void); -+ -+/** -+ * pvr_sync_deinit - unregister the pvr_sync misc device -+ */ -+void pvr_sync_deinit(void); -+ -+/** -+ * pvr_sync_device_init() - create an internal sync context -+ * @dev: Linux device -+ * -+ * Return: PVRSRV_OK on success. -+ */ -+enum PVRSRV_ERROR_TAG pvr_sync_device_init(struct device *dev); -+ -+/** -+ * pvr_sync_device_deinit() - destroy an internal sync context -+ * -+ * Drains any work items with outstanding sync fence updates/dependencies. -+ */ -+void pvr_sync_device_deinit(struct device *dev); -+ -+enum PVRSRV_ERROR_TAG pvr_sync_fence_wait(void *fence, u32 timeout_in_ms); -+ -+enum PVRSRV_ERROR_TAG pvr_sync_fence_release(void *fence); -+ -+enum PVRSRV_ERROR_TAG pvr_sync_fence_get(int fence_fd, void **fence_out); -+ -+enum PVRSRV_ERROR_TAG -+pvr_sync_sw_timeline_fence_create(struct _PVRSRV_DEVICE_NODE_ *pvrsrv_dev_node, -+ int timeline_fd, -+ const char *fence_name, -+ int *fence_fd_out, -+ u64 *sync_pt_idx); -+ -+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_advance(void *timeline, -+ u64 *sync_pt_idx); -+ -+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_release(void *timeline); -+ -+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_get(int timeline_fd, -+ void **timeline_out); -+ -+enum PVRSRV_ERROR_TAG -+sync_dump_fence(void *sw_fence_obj, -+ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, -+ void *dump_debug_file); -+ -+enum PVRSRV_ERROR_TAG -+sync_sw_dump_timeline(void *sw_timeline_obj, -+ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, -+ void *dump_debug_file); -+ -+#endif /* _PVR_SYNC_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_sync_api.h b/drivers/gpu/drm/img-rogue/pvr_sync_api.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_sync_api.h -@@ -0,0 +1,63 @@ -+/* -+ * @File pvr_sync_api.h -+ * @Title Kernel driver for Android's sync mechanism -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef _PVR_SYNC_API_H -+#define _PVR_SYNC_API_H -+ -+#include "img_types.h" -+ -+int pvr_sync_api_init(void *file_handle, void **api_priv); -+int pvr_sync_api_deinit(void *api_priv, bool is_sw); -+int pvr_sync_api_rename(void *api_priv, void *user_data); -+int pvr_sync_api_force_sw_only(void *api_priv, void **api_priv_new); -+int pvr_sync_api_sw_create_fence(void *api_priv, void *user_data); -+int pvr_sync_api_sw_inc(void *api_priv, void *user_data); -+ -+struct file; -+ -+int pvr_sync_ioctl_init(void); -+void pvr_sync_ioctl_deinit(void); -+void *pvr_sync_get_api_priv(struct file *file); -+struct file *pvr_sync_get_file_struct(void *file_handle); -+ -+#endif /* _PVR_SYNC_API_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_sync_file.c b/drivers/gpu/drm/img-rogue/pvr_sync_file.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_sync_file.c -@@ -0,0 +1,1094 @@ -+/* -+ * @File pvr_sync_file.c -+ * @Title Kernel driver for Android's sync mechanism -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include "services_kernel_client.h" -+#include "pvr_drv.h" -+#include "pvr_sync.h" -+#include "pvr_fence.h" -+#include "pvr_counting_timeline.h" -+ -+#include "linux_sw_sync.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "pvr_sync_api.h" -+ -+/* This header must always be included last */ -+#include "kernel_compatibility.h" -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) && !defined(CHROMIUMOS_KERNEL) -+#define sync_file_user_name(s) ((s)->name) -+#else -+#define sync_file_user_name(s) ((s)->user_name) -+#endif -+ -+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ -+ do { \ -+ if (pfnDumpDebugPrintf) \ -+ pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ -+ ## __VA_ARGS__); \ -+ else \ -+ pr_err(fmt "\n", ## __VA_ARGS__); \ -+ } while (0) -+ -+#define FILE_NAME "pvr_sync_file" -+ -+struct sw_sync_create_fence_data { -+ __u32 value; -+ char name[32]; -+ __s32 fence; -+}; -+#define SW_SYNC_IOC_MAGIC 'W' -+#define SW_SYNC_IOC_CREATE_FENCE \ -+ (_IOWR(SW_SYNC_IOC_MAGIC, 0, struct sw_sync_create_fence_data)) -+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) -+ -+/* Global data for the sync driver */ -+static struct { -+ struct pvr_fence_context *foreign_fence_context; -+ PFN_SYNC_CHECKPOINT_STRUCT sync_checkpoint_ops; -+} pvr_sync_data; -+ -+#if defined(NO_HARDWARE) -+static DEFINE_MUTEX(pvr_timeline_active_list_lock); -+static struct list_head pvr_timeline_active_list; -+#endif -+ -+/* This is the actual timeline metadata. We might keep this around after the -+ * base sync driver has destroyed the pvr_sync_timeline_wrapper object. -+ */ -+struct pvr_sync_timeline { -+ char name[32]; -+ void *file_handle; -+ bool is_sw; -+ /* Fence context used for hw fences */ -+ struct pvr_fence_context *hw_fence_context; -+ /* Timeline and context for sw fences */ -+ struct pvr_counting_fence_timeline *sw_fence_timeline; -+#if defined(NO_HARDWARE) -+ /* List of all timelines (used to advance all timelines in nohw builds) */ -+ struct list_head list; -+#endif -+}; -+ -+static -+void pvr_sync_free_checkpoint_list_mem(void *mem_ptr) -+{ -+ kfree(mem_ptr); -+} -+ -+#if defined(NO_HARDWARE) -+/* function used to signal pvr fence in nohw builds */ -+static -+void pvr_sync_nohw_signal_fence(void *fence_data_to_signal) -+{ -+ struct pvr_sync_timeline *this_timeline; -+ -+ mutex_lock(&pvr_timeline_active_list_lock); -+ list_for_each_entry(this_timeline, &pvr_timeline_active_list, list) { -+ pvr_fence_context_signal_fences_nohw(this_timeline->hw_fence_context); -+ } -+ mutex_unlock(&pvr_timeline_active_list_lock); -+} -+#endif -+ -+static struct pvr_sync_timeline *pvr_sync_timeline_fget(int fd) -+{ -+ struct file *file = fget(fd); -+ struct pvr_sync_timeline *timeline; -+ -+ if (!file) -+ return NULL; -+ -+ timeline = pvr_sync_get_api_priv(file); -+ if (!timeline) -+ fput(file); -+ -+ return timeline; -+} -+ -+static void pvr_sync_timeline_fput(struct pvr_sync_timeline *timeline) -+{ -+ struct file *file = pvr_sync_get_file_struct(timeline->file_handle); -+ -+ if (file) -+ fput(file); -+ else -+ pr_err(FILE_NAME ": %s: Timeline incomplete\n", __func__); -+} -+ -+/* ioctl and fops handling */ -+ -+int pvr_sync_api_init(void *file_handle, void **api_priv) -+{ -+ struct pvr_sync_timeline *timeline; -+ char task_comm[TASK_COMM_LEN]; -+ -+ get_task_comm(task_comm, current); -+ -+ timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); -+ if (!timeline) -+ return -ENOMEM; -+ -+ strlcpy(timeline->name, task_comm, sizeof(timeline->name)); -+ timeline->file_handle = file_handle; -+ timeline->is_sw = false; -+ -+ *api_priv = (void *)timeline; -+ -+ return 0; -+} -+ -+int pvr_sync_api_deinit(void *api_priv, bool is_sw) -+{ -+ struct pvr_sync_timeline *timeline = api_priv; -+ -+ if (!timeline) -+ return 0; -+ -+ if (timeline->sw_fence_timeline) { -+ /* This makes sure any outstanding SW syncs are marked as -+ * complete at timeline close time. Otherwise it'll leak the -+ * timeline (as outstanding fences hold a ref) and possibly -+ * wedge the system if something is waiting on one of those -+ * fences -+ */ -+ pvr_counting_fence_timeline_force_complete( -+ timeline->sw_fence_timeline); -+ pvr_counting_fence_timeline_put(timeline->sw_fence_timeline); -+ } -+ -+ if (timeline->hw_fence_context) { -+#if defined(NO_HARDWARE) -+ mutex_lock(&pvr_timeline_active_list_lock); -+ list_del(&timeline->list); -+ mutex_unlock(&pvr_timeline_active_list_lock); -+#endif -+ pvr_fence_context_destroy(timeline->hw_fence_context); -+ } -+ -+ kfree(timeline); -+ -+ return 0; -+} -+ -+/* -+ * This is the function that kick code will call in order to 'finalise' a -+ * created output fence just prior to returning from the kick function. -+ * The OS native sync code needs to implement a function meeting this -+ * specification - the implementation may be a nop if the OS does not need -+ * to perform any actions at this point. -+ * -+ * Input: fence_fd The PVRSRV_FENCE to be 'finalised'. This value -+ * will have been returned by an earlier call to -+ * pvr_sync_create_fence(). -+ * Input: finalise_data The finalise data returned by an earlier call -+ * to pvr_sync_create_fence(). -+ */ -+static enum PVRSRV_ERROR_TAG -+pvr_sync_finalise_fence(PVRSRV_FENCE fence_fd, void *finalise_data) -+{ -+ struct sync_file *sync_file = finalise_data; -+ struct pvr_fence *pvr_fence; -+ -+ if (!sync_file || (fence_fd < 0)) { -+ pr_err(FILE_NAME ": %s: Invalid input fence\n", __func__); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ pvr_fence = to_pvr_fence(sync_file->fence); -+ -+ if (!pvr_fence) { -+ pr_err(FILE_NAME ": %s: Fence not a pvr fence\n", __func__); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* pvr fences can be signalled any time after creation */ -+ dma_fence_enable_sw_signaling(&pvr_fence->base); -+ -+ fd_install(fence_fd, sync_file->file); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * This is the function that kick code will call in order to obtain a new -+ * PVRSRV_FENCE from the OS native sync code and the PSYNC_CHECKPOINT used -+ * in that fence. The OS native sync code needs to implement a function -+ * meeting this specification. -+ * -+ * Input: device Device node to use in creating a hw_fence_ctx -+ * Input: fence_name A string to annotate the fence with (for -+ * debug). -+ * Input: timeline The timeline on which the new fence is to be -+ * created. -+ * Output: new_fence The new PVRSRV_FENCE to be returned by the -+ * kick call. -+ * Output: fence_uid Unique ID of the update fence. -+ * Output: fence_finalise_data Pointer to data needed to finalise the fence. -+ * Output: new_checkpoint_handle The PSYNC_CHECKPOINT used by the new fence. -+ */ -+static enum PVRSRV_ERROR_TAG -+pvr_sync_create_fence( -+ struct _PVRSRV_DEVICE_NODE_ *device, -+ const char *fence_name, -+ PVRSRV_TIMELINE new_fence_timeline, -+ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, -+ PVRSRV_FENCE *new_fence, u64 *fence_uid, -+ void **fence_finalise_data, -+ PSYNC_CHECKPOINT *new_checkpoint_handle, -+ void **timeline_update_sync, -+ __u32 *timeline_update_value) -+{ -+ PVRSRV_ERROR err = PVRSRV_OK; -+ PVRSRV_FENCE new_fence_fd = -1; -+ struct pvr_sync_timeline *timeline; -+ struct pvr_fence *pvr_fence; -+ PSYNC_CHECKPOINT checkpoint; -+ struct sync_file *sync_file; -+ -+ if (new_fence_timeline < 0 || !new_fence || !new_checkpoint_handle -+ || !fence_finalise_data) { -+ pr_err(FILE_NAME ": %s: Invalid input params\n", __func__); -+ err = PVRSRV_ERROR_INVALID_PARAMS; -+ goto err_out; -+ } -+ -+ /* We reserve the new fence FD before taking any operations -+ * as we do not want to fail (e.g. run out of FDs) -+ */ -+ new_fence_fd = get_unused_fd_flags(O_CLOEXEC); -+ if (new_fence_fd < 0) { -+ pr_err(FILE_NAME ": %s: Failed to get fd\n", __func__); -+ err = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; -+ goto err_out; -+ } -+ -+ timeline = pvr_sync_timeline_fget(new_fence_timeline); -+ if (!timeline) { -+ pr_err(FILE_NAME ": %s: Failed to open supplied timeline fd (%d)\n", -+ __func__, new_fence_timeline); -+ err = PVRSRV_ERROR_INVALID_PARAMS; -+ goto err_put_fd; -+ } -+ -+ if (timeline->is_sw) { -+ /* This should never happen! */ -+ pr_err(FILE_NAME ": %s: Request to create a pvr fence on sw timeline (%d)\n", -+ __func__, new_fence_timeline); -+ err = PVRSRV_ERROR_INVALID_PARAMS; -+ goto err_put_timeline; -+ } -+ -+ if (!timeline->hw_fence_context) { -+ /* First time we use this timeline, so create a context. */ -+ timeline->hw_fence_context = -+ pvr_fence_context_create( -+ device, -+ NativeSyncGetFenceStatusWq(), -+ timeline->name); -+ if (!timeline->hw_fence_context) { -+ pr_err(FILE_NAME ": %s: Failed to create fence context (%d)\n", -+ __func__, new_fence_timeline); -+ err = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_put_timeline; -+ } -+#if defined(NO_HARDWARE) -+ /* Add timeline to active list */ -+ INIT_LIST_HEAD(&timeline->list); -+ mutex_lock(&pvr_timeline_active_list_lock); -+ list_add_tail(&timeline->list, &pvr_timeline_active_list); -+ mutex_unlock(&pvr_timeline_active_list_lock); -+#endif -+ } -+ -+ pvr_fence = pvr_fence_create(timeline->hw_fence_context, -+ psSyncCheckpointContext, -+ new_fence_timeline, -+ fence_name); -+ if (!pvr_fence) { -+ pr_err(FILE_NAME ": %s: Failed to create new pvr_fence\n", -+ __func__); -+ err = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_put_timeline; -+ } -+ -+ checkpoint = pvr_fence_get_checkpoint(pvr_fence); -+ if (!checkpoint) { -+ pr_err(FILE_NAME ": %s: Failed to get fence checkpoint\n", -+ __func__); -+ err = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_destroy_fence; -+ } -+ -+ sync_file = sync_file_create(&pvr_fence->base); -+ if (!sync_file) { -+ pr_err(FILE_NAME ": %s: Failed to create sync_file\n", -+ __func__); -+ err = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_destroy_fence; -+ } -+ strlcpy(sync_file_user_name(sync_file), -+ pvr_fence->name, -+ sizeof(sync_file_user_name(sync_file))); -+ dma_fence_put(&pvr_fence->base); -+ -+ *new_fence = new_fence_fd; -+ *fence_finalise_data = sync_file; -+ *new_checkpoint_handle = checkpoint; -+ *fence_uid = OSGetCurrentClientProcessIDKM(); -+ *fence_uid = (*fence_uid << 32) | (new_fence_fd & U32_MAX); -+ /* not used but don't want to return dangling pointers */ -+ *timeline_update_sync = NULL; -+ *timeline_update_value = 0; -+ -+ pvr_sync_timeline_fput(timeline); -+err_out: -+ return err; -+ -+err_destroy_fence: -+ pvr_fence_destroy(pvr_fence); -+err_put_timeline: -+ pvr_sync_timeline_fput(timeline); -+err_put_fd: -+ put_unused_fd(new_fence_fd); -+ *fence_uid = PVRSRV_NO_FENCE; -+ goto err_out; -+} -+ -+/* -+ * This is the function that kick code will call in order to 'rollback' a -+ * created output fence should an error occur when submitting the kick. -+ * The OS native sync code needs to implement a function meeting this -+ * specification. -+ * -+ * Input: fence_to_rollback The PVRSRV_FENCE to be 'rolled back'. The fence -+ * should be destroyed and any actions taken due to -+ * its creation that need to be undone should be -+ * reverted. -+ * Input: finalise_data The finalise data for the fence to be 'rolled back'. -+ */ -+static enum PVRSRV_ERROR_TAG -+pvr_sync_rollback_fence_data(PVRSRV_FENCE fence_to_rollback, -+ void *fence_data_to_rollback) -+{ -+ struct sync_file *sync_file = fence_data_to_rollback; -+ struct pvr_fence *pvr_fence; -+ -+ if (!sync_file || fence_to_rollback < 0) { -+ pr_err(FILE_NAME ": %s: Invalid fence (%d)\n", __func__, -+ fence_to_rollback); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ pvr_fence = to_pvr_fence(sync_file->fence); -+ if (!pvr_fence) { -+ pr_err(FILE_NAME -+ ": %s: Non-PVR fence (%p)\n", -+ __func__, sync_file->fence); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ fput(sync_file->file); -+ -+ put_unused_fd(fence_to_rollback); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * This is the function that kick code will call in order to obtain a list of -+ * the PSYNC_CHECKPOINTs for a given PVRSRV_FENCE passed to a kick function. -+ * The OS native sync code will allocate the memory to hold the returned list -+ * of PSYNC_CHECKPOINT ptrs. The caller will free this memory once it has -+ * finished referencing it. -+ * -+ * Input: fence The input (check) fence -+ * Output: nr_checkpoints The number of PVRSRV_SYNC_CHECKPOINT ptrs -+ * returned in the checkpoint_handles -+ * parameter. -+ * Output: fence_uid Unique ID of the check fence -+ * Input/Output: checkpoint_handles The returned list of PVRSRV_SYNC_CHECKPOINTs. -+ */ -+static enum PVRSRV_ERROR_TAG -+pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, -+ PVRSRV_FENCE fence_to_resolve, u32 *nr_checkpoints, -+ PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid) -+{ -+ PSYNC_CHECKPOINT *checkpoints = NULL; -+ unsigned int i, num_fences = 0, num_used_fences = 0; -+ struct dma_fence **fences = NULL; -+ struct dma_fence *fence; -+ PVRSRV_ERROR err = PVRSRV_OK; -+ -+ if (!nr_checkpoints || !checkpoint_handles || !fence_uid) { -+ pr_err(FILE_NAME ": %s: Invalid input checkpoint pointer\n", -+ __func__); -+ err = PVRSRV_ERROR_INVALID_PARAMS; -+ goto err_out; -+ } -+ -+ *nr_checkpoints = 0; -+ *checkpoint_handles = NULL; -+ *fence_uid = 0; -+ -+ if (fence_to_resolve < 0) -+ goto err_out; -+ -+ fence = sync_file_get_fence(fence_to_resolve); -+ if (!fence) { -+ pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n", -+ __func__, fence_to_resolve); -+ err = PVRSRV_ERROR_HANDLE_NOT_FOUND; -+ goto err_out; -+ } -+ -+ if (dma_fence_is_array(fence)) { -+ struct dma_fence_array *array = to_dma_fence_array(fence); -+ -+ if (array) { -+ fences = array->fences; -+ num_fences = array->num_fences; -+ } -+ } else { -+ fences = &fence; -+ num_fences = 1; -+ } -+ -+ checkpoints = kmalloc_array(num_fences, sizeof(PSYNC_CHECKPOINT), -+ GFP_KERNEL); -+ if (!checkpoints) { -+ err = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_put_fence; -+ } -+ for (i = 0; i < num_fences; i++) { -+ /* -+ * Only return the checkpoint if the fence is still active. -+ * Don't checked for signalled on PDUMP drivers as we need -+ * to make sure that all fences make it to the pdump. -+ */ -+#if !defined(PDUMP) -+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, -+ &fences[i]->flags)) -+#endif -+ { -+ struct pvr_fence *pvr_fence = -+ pvr_fence_create_from_fence( -+ pvr_sync_data.foreign_fence_context, -+ psSyncCheckpointContext, -+ fences[i], -+ fence_to_resolve, -+ "foreign"); -+ if (!pvr_fence) { -+ pr_err(FILE_NAME ": %s: Failed to create fence\n", -+ __func__); -+ err = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_free_checkpoints; -+ } -+ checkpoints[num_used_fences] = -+ pvr_fence_get_checkpoint(pvr_fence); -+ SyncCheckpointTakeRef(checkpoints[num_used_fences]); -+ ++num_used_fences; -+ dma_fence_put(&pvr_fence->base); -+ } -+ } -+ /* If we don't return any checkpoints, delete the array because -+ * the caller will not. -+ */ -+ if (num_used_fences == 0) { -+ kfree(checkpoints); -+ checkpoints = NULL; -+ } -+ -+ *checkpoint_handles = checkpoints; -+ *nr_checkpoints = num_used_fences; -+ *fence_uid = OSGetCurrentClientProcessIDKM(); -+ *fence_uid = (*fence_uid << 32) | (fence_to_resolve & U32_MAX); -+ -+err_put_fence: -+ dma_fence_put(fence); -+err_out: -+ return err; -+ -+err_free_checkpoints: -+ for (i = 0; i < num_used_fences; i++) { -+ if (checkpoints[i]) -+ SyncCheckpointDropRef(checkpoints[i]); -+ } -+ kfree(checkpoints); -+ goto err_put_fence; -+} -+ -+/* -+ * This is the function that driver code will call in order to request the -+ * sync implementation to output debug information relating to any sync -+ * checkpoints it may have created which appear in the provided array of -+ * FW addresses of Unified Fence Objects (UFOs). -+ * -+ * Input: nr_ufos The number of FW addresses provided in the -+ * vaddrs parameter. -+ * Input: vaddrs The array of FW addresses of UFOs. The sync -+ * implementation should check each of these to -+ * see if any relate to sync checkpoints it has -+ * created and where they do output debug information -+ * pertaining to the native/fallback sync with -+ * which it is associated. -+ */ -+static u32 -+pvr_sync_dump_info_on_stalled_ufos(u32 nr_ufos, u32 *vaddrs) -+{ -+ return pvr_fence_dump_info_on_stalled_ufos(pvr_sync_data.foreign_fence_context, -+ nr_ufos, -+ vaddrs); -+} -+ -+#if defined(PDUMP) -+static enum PVRSRV_ERROR_TAG -+pvr_sync_fence_get_checkpoints(PVRSRV_FENCE fence_to_pdump, u32 *nr_checkpoints, -+ struct SYNC_CHECKPOINT_TAG ***checkpoint_handles) -+{ -+ struct dma_fence **fences = NULL; -+ struct dma_fence *fence; -+ struct pvr_fence *pvr_fence; -+ struct SYNC_CHECKPOINT_TAG **checkpoints = NULL; -+ unsigned int i, num_fences, num_used_fences = 0; -+ enum PVRSRV_ERROR_TAG err; -+ -+ if (fence_to_pdump < 0) { -+ err = PVRSRV_ERROR_INVALID_PARAMS; -+ goto err_out; -+ } -+ -+ if (!nr_checkpoints || !checkpoint_handles) { -+ pr_err(FILE_NAME ": %s: Invalid input checkpoint pointer\n", -+ __func__); -+ err = PVRSRV_ERROR_INVALID_PARAMS; -+ goto err_out; -+ } -+ -+ fence = sync_file_get_fence(fence_to_pdump); -+ if (!fence) { -+ pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n", -+ __func__, fence_to_pdump); -+ err = PVRSRV_ERROR_HANDLE_NOT_FOUND; -+ goto err_out; -+ } -+ -+ if (dma_fence_is_array(fence)) { -+ struct dma_fence_array *array = to_dma_fence_array(fence); -+ -+ fences = array->fences; -+ num_fences = array->num_fences; -+ } else { -+ fences = &fence; -+ num_fences = 1; -+ } -+ -+ checkpoints = kmalloc_array(num_fences, sizeof(*checkpoints), -+ GFP_KERNEL); -+ if (!checkpoints) { -+ pr_err("pvr_sync_file: %s: Failed to alloc memory for returned list of sync checkpoints\n", -+ __func__); -+ err = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_put_fence; -+ } -+ -+ for (i = 0; i < num_fences; i++) { -+ pvr_fence = to_pvr_fence(fences[i]); -+ if (!pvr_fence) -+ continue; -+ checkpoints[num_used_fences] = pvr_fence_get_checkpoint(pvr_fence); -+ ++num_used_fences; -+ } -+ -+ *checkpoint_handles = checkpoints; -+ *nr_checkpoints = num_used_fences; -+ err = PVRSRV_OK; -+ -+err_put_fence: -+ dma_fence_put(fence); -+err_out: -+ return err; -+} -+#endif -+ -+int pvr_sync_api_rename(void *api_priv, void *user_data) -+{ -+ struct pvr_sync_timeline *timeline = api_priv; -+ struct pvr_sync_rename_ioctl_data *data = user_data; -+ -+ data->szName[sizeof(data->szName) - 1] = '\0'; -+ strlcpy(timeline->name, data->szName, sizeof(timeline->name)); -+ if (timeline->hw_fence_context) -+ strlcpy(timeline->hw_fence_context->name, data->szName, -+ sizeof(timeline->hw_fence_context->name)); -+ -+ return 0; -+} -+ -+int pvr_sync_api_force_sw_only(void *api_priv, void **api_priv_new) -+{ -+ struct pvr_sync_timeline *timeline = api_priv; -+ -+ /* Already in SW mode? */ -+ if (timeline->sw_fence_timeline) -+ return 0; -+ -+ /* Create a sw_sync timeline with the old GPU timeline's name */ -+ timeline->sw_fence_timeline = pvr_counting_fence_timeline_create( -+ timeline->name); -+ if (!timeline->sw_fence_timeline) -+ return -ENOMEM; -+ -+ timeline->is_sw = true; -+ -+ return 0; -+} -+ -+int pvr_sync_api_sw_create_fence(void *api_priv, void *user_data) -+{ -+ struct pvr_sync_timeline *timeline = api_priv; -+ struct pvr_sw_sync_create_fence_data *data = user_data; -+ struct sync_file *sync_file; -+ int fd = get_unused_fd_flags(O_CLOEXEC); -+ struct dma_fence *fence; -+ int err; -+ -+ if (fd < 0) { -+ pr_err(FILE_NAME ": %s: Failed to find unused fd (%d)\n", -+ __func__, fd); -+ err = -EMFILE; -+ goto err_out; -+ } -+ -+ fence = pvr_counting_fence_create(timeline->sw_fence_timeline, &data->sync_pt_idx); -+ if (!fence) { -+ pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n", -+ __func__, fd); -+ err = -ENOMEM; -+ goto err_put_fd; -+ } -+ -+ sync_file = sync_file_create(fence); -+ dma_fence_put(fence); -+ if (!sync_file) { -+ pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n", -+ __func__, fd); -+ err = -ENOMEM; -+ goto err_put_fd; -+ } -+ -+ data->fence = fd; -+ -+ fd_install(fd, sync_file->file); -+ -+ return 0; -+ -+err_put_fd: -+ put_unused_fd(fd); -+err_out: -+ return err; -+} -+ -+int pvr_sync_api_sw_inc(void *api_priv, void *user_data) -+{ -+ struct pvr_sync_timeline *timeline = api_priv; -+ struct pvr_sw_timeline_advance_data *data = user_data; -+ bool res; -+ -+ res = pvr_counting_fence_timeline_inc(timeline->sw_fence_timeline, &data->sync_pt_idx); -+ -+ /* pvr_counting_fence_timeline_inc won't allow sw timeline to be -+ * advanced beyond the last defined point -+ */ -+ if (!res) { -+ pr_err("pvr_sync_file: attempt to advance SW timeline beyond last defined point\n"); -+ return -EPERM; -+ } -+ -+ return 0; -+} -+ -+static void -+pvr_sync_debug_request_heading(void *data, u32 verbosity, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) -+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, -+ "------[ Native Fence Sync: timelines ]------"); -+} -+ -+enum PVRSRV_ERROR_TAG pvr_sync_register_functions(void) -+{ -+ /* Register the resolve fence and create fence functions with -+ * sync_checkpoint.c -+ * The pvr_fence context registers its own EventObject callback to -+ * update sync status -+ */ -+ /* Initialise struct and register with sync_checkpoint.c */ -+ pvr_sync_data.sync_checkpoint_ops.pfnFenceResolve = pvr_sync_resolve_fence; -+ pvr_sync_data.sync_checkpoint_ops.pfnFenceCreate = pvr_sync_create_fence; -+ pvr_sync_data.sync_checkpoint_ops.pfnFenceDataRollback = pvr_sync_rollback_fence_data; -+ pvr_sync_data.sync_checkpoint_ops.pfnFenceFinalise = pvr_sync_finalise_fence; -+#if defined(NO_HARDWARE) -+ pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = pvr_sync_nohw_signal_fence; -+#else -+ pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = NULL; -+#endif -+ pvr_sync_data.sync_checkpoint_ops.pfnFreeCheckpointListMem = -+ pvr_sync_free_checkpoint_list_mem; -+ pvr_sync_data.sync_checkpoint_ops.pfnDumpInfoOnStalledUFOs = -+ pvr_sync_dump_info_on_stalled_ufos; -+ strlcpy(pvr_sync_data.sync_checkpoint_ops.pszImplName, "pvr_sync_file", -+ SYNC_CHECKPOINT_IMPL_MAX_STRLEN); -+#if defined(PDUMP) -+ pvr_sync_data.sync_checkpoint_ops.pfnSyncFenceGetCheckpoints = -+ pvr_sync_fence_get_checkpoints; -+#endif -+ -+ return SyncCheckpointRegisterFunctions(&pvr_sync_data.sync_checkpoint_ops); -+} -+ -+int pvr_sync_init(void) -+{ -+ int err; -+ -+ pvr_sync_data.foreign_fence_context = -+ pvr_fence_foreign_context_create( -+ NativeSyncGetFenceStatusWq(), -+ "foreign_sync"); -+ if (!pvr_sync_data.foreign_fence_context) { -+ pr_err(FILE_NAME ": %s: Failed to create foreign sync context\n", -+ __func__); -+ err = -ENOMEM; -+ goto err_out; -+ } -+ -+#if defined(NO_HARDWARE) -+ INIT_LIST_HEAD(&pvr_timeline_active_list); -+#endif -+ -+ err = pvr_sync_ioctl_init(); -+ if (err) { -+ pr_err(FILE_NAME ": %s: Failed to register pvr_sync device (%d)\n", -+ __func__, err); -+ goto err_ioctl_init; -+ } -+ -+ return 0; -+ -+err_ioctl_init: -+ pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context); -+ pvr_fence_cleanup(); -+err_out: -+ return err; -+} -+ -+void pvr_sync_deinit(void) -+{ -+ pvr_sync_ioctl_deinit(); -+ pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context); -+ pvr_fence_cleanup(); -+} -+ -+enum PVRSRV_ERROR_TAG pvr_sync_device_init(struct device *dev) -+{ -+ struct drm_device *ddev = dev_get_drvdata(dev); -+ struct pvr_drm_private *priv = ddev->dev_private; -+ enum PVRSRV_ERROR_TAG error; -+ -+ error = PVRSRVRegisterDeviceDbgRequestNotify( -+ &priv->sync_debug_notify_handle, -+ priv->dev_node, -+ pvr_sync_debug_request_heading, -+ DEBUG_REQUEST_LINUXFENCE, -+ NULL); -+ if (error != PVRSRV_OK) { -+ pr_err("%s: failed to register debug request callback (%s)\n", -+ __func__, PVRSRVGetErrorString(error)); -+ goto err_out; -+ } -+ -+ /* Register the foreign sync context debug notifier on each device */ -+ error = pvr_fence_context_register_dbg( -+ &priv->sync_foreign_debug_notify_handle, -+ priv->dev_node, -+ pvr_sync_data.foreign_fence_context); -+ if (error != PVRSRV_OK) { -+ pr_err("%s: failed to register fence debug request callback (%s)\n", -+ __func__, PVRSRVGetErrorString(error)); -+ goto err_context_regdbg; -+ } -+ -+#if defined(NO_HARDWARE) -+ INIT_LIST_HEAD(&pvr_timeline_active_list); -+#endif -+ -+ return PVRSRV_OK; -+ -+err_context_regdbg: -+ PVRSRVUnregisterDeviceDbgRequestNotify(priv->sync_debug_notify_handle); -+err_out: -+ return error; -+} -+ -+void pvr_sync_device_deinit(struct device *dev) -+{ -+ struct drm_device *ddev = dev_get_drvdata(dev); -+ struct pvr_drm_private *priv = ddev->dev_private; -+ -+ PVRSRVUnregisterDeviceDbgRequestNotify(priv->sync_foreign_debug_notify_handle); -+ PVRSRVUnregisterDeviceDbgRequestNotify(priv->sync_debug_notify_handle); -+} -+ -+enum PVRSRV_ERROR_TAG pvr_sync_fence_wait(void *fence, u32 timeout_in_ms) -+{ -+ long timeout = msecs_to_jiffies(timeout_in_ms); -+ int err; -+ -+ err = dma_fence_wait_timeout(fence, true, timeout); -+ /* -+ * dma_fence_wait_timeout returns: -+ * - the remaining timeout on success -+ * - 0 on timeout -+ * - -ERESTARTSYS if interrupted -+ */ -+ if (err > 0) -+ return PVRSRV_OK; -+ else if (err == 0) -+ return PVRSRV_ERROR_TIMEOUT; -+ -+ return PVRSRV_ERROR_FAILED_DEPENDENCIES; -+} -+ -+enum PVRSRV_ERROR_TAG pvr_sync_fence_release(void *fence) -+{ -+ dma_fence_put(fence); -+ -+ return PVRSRV_OK; -+} -+ -+enum PVRSRV_ERROR_TAG pvr_sync_fence_get(int fence_fd, void **fence_out) -+{ -+ struct dma_fence *fence; -+ -+ fence = sync_file_get_fence(fence_fd); -+ if (fence == NULL) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ *fence_out = fence; -+ -+ return PVRSRV_OK; -+} -+ -+enum PVRSRV_ERROR_TAG -+pvr_sync_sw_timeline_fence_create(struct _PVRSRV_DEVICE_NODE_ *pvrsrv_dev_node, -+ int timeline_fd, -+ const char *fence_name, -+ int *fence_fd_out, -+ u64 *sync_pt_idx) -+{ -+ enum PVRSRV_ERROR_TAG srv_err; -+ struct pvr_sync_timeline *timeline; -+ struct dma_fence *fence = NULL; -+ struct sync_file *sync_file = NULL; -+ int fd; -+ -+ (void)(pvrsrv_dev_node); -+ -+ fd = get_unused_fd_flags(O_CLOEXEC); -+ if (fd < 0) -+ return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; -+ -+ timeline = pvr_sync_timeline_fget(timeline_fd); -+ if (!timeline) { -+ /* unrecognised timeline */ -+ srv_err = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; -+ goto err_put_fd; -+ } -+ if (!timeline->is_sw) { -+ pvr_sync_timeline_fput(timeline); -+ srv_err = PVRSRV_ERROR_INVALID_PARAMS; -+ goto err_put_fd; -+ } -+ -+ fence = pvr_counting_fence_create(timeline->sw_fence_timeline, sync_pt_idx); -+ pvr_sync_timeline_fput(timeline); -+ if (!fence) { -+ srv_err = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_put_fd; -+ } -+ -+ sync_file = sync_file_create(fence); -+ dma_fence_put(fence); -+ if (!sync_file) { -+ srv_err = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_put_fd; -+ } -+ -+ fd_install(fd, sync_file->file); -+ -+ *fence_fd_out = fd; -+ -+ return PVRSRV_OK; -+ -+err_put_fd: -+ put_unused_fd(fd); -+ return srv_err; -+} -+ -+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_advance(void *timeline, u64 *sync_pt_idx) -+{ -+ if (timeline == NULL) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ pvr_counting_fence_timeline_inc(timeline, sync_pt_idx); -+ -+ return PVRSRV_OK; -+} -+ -+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_release(void *timeline) -+{ -+ if (timeline == NULL) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ pvr_counting_fence_timeline_put(timeline); -+ -+ return PVRSRV_OK; -+} -+ -+enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_get(int timeline_fd, -+ void **timeline_out) -+{ -+ struct pvr_counting_fence_timeline *sw_timeline; -+ struct pvr_sync_timeline *timeline; -+ -+ timeline = pvr_sync_timeline_fget(timeline_fd); -+ if (!timeline) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ sw_timeline = -+ pvr_counting_fence_timeline_get(timeline->sw_fence_timeline); -+ pvr_sync_timeline_fput(timeline); -+ if (!sw_timeline) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ *timeline_out = sw_timeline; -+ -+ return PVRSRV_OK; -+} -+static void _dump_sync_point(struct dma_fence *fence, -+ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, -+ void *dump_debug_file) -+{ -+ const struct dma_fence_ops *fence_ops = fence->ops; -+ bool signaled = dma_fence_is_signaled(fence); -+ char time[16] = { '\0' }; -+ -+ fence_ops->timeline_value_str(fence, time, sizeof(time)); -+ -+ PVR_DUMPDEBUG_LOG(dump_debug_printf, -+ dump_debug_file, -+ "<%p> Seq#=%llu TS=%s State=%s TLN=%s", -+ fence, -+ (u64) fence->seqno, -+ time, -+ (signaled) ? "Signalled" : "Active", -+ fence_ops->get_timeline_name(fence)); -+} -+ -+static void _dump_fence(struct dma_fence *fence, -+ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, -+ void *dump_debug_file) -+{ -+ if (dma_fence_is_array(fence)) { -+ struct dma_fence_array *fence_array = to_dma_fence_array(fence); -+ int i; -+ -+ if (fence_array) { -+ PVR_DUMPDEBUG_LOG(dump_debug_printf, -+ dump_debug_file, -+ "Fence: [%p] Sync Points:\n", -+ fence_array); -+ -+ for (i = 0; i < fence_array->num_fences; i++) -+ _dump_sync_point(fence_array->fences[i], -+ dump_debug_printf, -+ dump_debug_file); -+ } -+ -+ } else { -+ _dump_sync_point(fence, dump_debug_printf, dump_debug_file); -+ } -+} -+ -+enum PVRSRV_ERROR_TAG -+sync_dump_fence(void *sw_fence_obj, -+ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, -+ void *dump_debug_file) -+{ -+ struct dma_fence *fence = (struct dma_fence *) sw_fence_obj; -+ -+ _dump_fence(fence, dump_debug_printf, dump_debug_file); -+ -+ return PVRSRV_OK; -+} -+ -+enum PVRSRV_ERROR_TAG -+sync_sw_dump_timeline(void *sw_timeline_obj, -+ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, -+ void *dump_debug_file) -+{ -+ pvr_counting_fence_timeline_dump_timeline(sw_timeline_obj, -+ dump_debug_printf, -+ dump_debug_file); -+ -+ return PVRSRV_OK; -+} -diff --git a/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.c b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.c -@@ -0,0 +1,277 @@ -+/* -+ * @File pvr_sync_ioctl_common.c -+ * @Title Kernel driver for Android's sync mechanism -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+ -+#include "pvr_drm.h" -+#include "pvr_sync_api.h" -+#include "pvr_sync_ioctl_common.h" -+ -+/* -+ * The PVR Sync API is unusual in that some operations configure the -+ * timeline for use, and are no longer allowed once the timeline is -+ * in use. A locking mechanism, such as a read/write semaphore, would -+ * be one method of helping to ensure the API rules are followed, but -+ * this would add unnecessary overhead once the timeline has been -+ * configured, as read locks would continue to have to be taken after -+ * the timeline is in use. To avoid locks, two atomic variables are used, -+ * together with memory barriers. The in_setup variable indicates a "rename" -+ * or "force software only" ioctl is in progress. At most one of these two -+ * configuration ioctls can be in progress at any one time, and they can't -+ * overlap with any other Sync ioctl. The in_use variable indicates one -+ * of the other Sync ioctls has started. Once set, in_use stays set, and -+ * prevents any further configuration ioctls. Non-configuration ioctls -+ * are allowed to overlap. -+ * It is possible for a configuration and non-configuration ioctl to race, -+ * but at most one will be allowed to proceed, and perhaps neither. -+ * Given the intended usage of the API in user space, where the timeline -+ * is fully configured before being used, the race behaviour won't be -+ * an issue. -+ */ -+ -+struct pvr_sync_file_data { -+ atomic_t in_setup; -+ atomic_t in_use; -+ void *api_private; -+ bool is_sw; -+}; -+ -+static bool pvr_sync_set_in_use(struct pvr_sync_file_data *fdata) -+{ -+ if (atomic_read(&fdata->in_use) < 2) { -+ atomic_set(&fdata->in_use, 1); -+ /* Ensure in_use change is visible before in_setup is read */ -+ smp_mb(); -+ if (atomic_read(&fdata->in_setup) != 0) -+ return false; -+ -+ atomic_set(&fdata->in_use, 2); -+ } else { -+ /* Ensure stale private data isn't read */ -+ smp_rmb(); -+ } -+ -+ return true; -+} -+ -+static bool pvr_sync_set_in_setup(struct pvr_sync_file_data *fdata) -+{ -+ int in_setup; -+ -+ in_setup = atomic_inc_return(&fdata->in_setup); -+ if (in_setup > 1 || atomic_read(&fdata->in_use) != 0) { -+ atomic_dec(&fdata->in_setup); -+ return false; -+ } -+ -+ return true; -+} -+ -+static inline void pvr_sync_reset_in_setup(struct pvr_sync_file_data *fdata) -+{ -+ /* -+ * Ensure setup changes are visible before allowing other -+ * operations to proceed. -+ */ -+ smp_mb__before_atomic(); -+ atomic_dec(&fdata->in_setup); -+} -+ -+void *pvr_sync_get_api_priv_common(struct file *file) -+{ -+ if (file != NULL && pvr_sync_is_timeline(file)) { -+ struct pvr_sync_file_data *fdata = pvr_sync_get_private_data(file); -+ -+ if (fdata != NULL && pvr_sync_set_in_use(fdata)) -+ return fdata->api_private; -+ } -+ -+ return NULL; -+} -+ -+int pvr_sync_open_common(void *connection_data, void *file_handle) -+{ -+ void *data = NULL; -+ struct pvr_sync_file_data *fdata; -+ int err; -+ -+ fdata = kzalloc(sizeof(*fdata), GFP_KERNEL); -+ if (!fdata) -+ return -ENOMEM; -+ -+ atomic_set(&fdata->in_setup, 0); -+ atomic_set(&fdata->in_use, 0); -+ -+ if (!pvr_sync_set_private_data(connection_data, fdata)) { -+ kfree(fdata); -+ return -EINVAL; -+ } -+ -+ err = pvr_sync_api_init(file_handle, &data); -+ if (err) -+ kfree(fdata); -+ else -+ fdata->api_private = data; -+ -+ return err; -+} -+ -+int pvr_sync_close_common(void *connection_data) -+{ -+ struct pvr_sync_file_data *fdata; -+ -+ fdata = pvr_sync_connection_private_data(connection_data); -+ if (fdata) { -+ int err; -+ -+ err = pvr_sync_api_deinit(fdata->api_private, fdata->is_sw); -+ -+ kfree(fdata); -+ -+ return err; -+ } -+ -+ return 0; -+} -+ -+static inline int pvr_sync_ioctl_rename(void *api_priv, void *arg) -+{ -+ struct pvr_sync_rename_ioctl_data *data = arg; -+ -+ return pvr_sync_api_rename(api_priv, data); -+} -+ -+static inline int pvr_sync_ioctl_force_sw_only(struct pvr_sync_file_data *fdata) -+{ -+ void *data = fdata->api_private; -+ int err; -+ -+ err = pvr_sync_api_force_sw_only(fdata->api_private, &data); -+ if (!err) { -+ if (data != fdata->api_private) -+ fdata->api_private = data; -+ -+ fdata->is_sw = true; -+ } -+ -+ return err; -+} -+ -+static inline int pvr_sync_ioctl_sw_create_fence(void *api_priv, void *arg) -+{ -+ struct pvr_sw_sync_create_fence_data *data = arg; -+ -+ return pvr_sync_api_sw_create_fence(api_priv, data); -+} -+ -+static inline int pvr_sync_ioctl_sw_inc(void *api_priv, void *arg) -+{ -+ struct pvr_sw_timeline_advance_data *data = arg; -+ -+ return pvr_sync_api_sw_inc(api_priv, data); -+} -+ -+int pvr_sync_ioctl_common(struct file *file, unsigned int cmd, void *arg) -+{ -+ int err = -ENOTTY; -+ struct pvr_sync_file_data *fdata; -+ bool in_setup; -+ -+ fdata = pvr_sync_get_private_data(file); -+ if (!fdata) -+ return -EINVAL; -+ -+ switch (cmd) { -+ case DRM_PVR_SYNC_RENAME_CMD: -+ case DRM_PVR_SYNC_FORCE_SW_ONLY_CMD: -+ if (!pvr_sync_set_in_setup(fdata)) -+ return -EBUSY; -+ -+ in_setup = true; -+ break; -+ default: -+ if (!pvr_sync_set_in_use(fdata)) -+ return -EBUSY; -+ -+ in_setup = false; -+ break; -+ } -+ -+ if (in_setup) { -+ if (fdata->is_sw) -+ err = -ENOTTY; -+ else -+ switch (cmd) { -+ case DRM_PVR_SYNC_RENAME_CMD: -+ err = pvr_sync_ioctl_rename(fdata->api_private, -+ arg); -+ break; -+ case DRM_PVR_SYNC_FORCE_SW_ONLY_CMD: -+ err = pvr_sync_ioctl_force_sw_only(fdata); -+ break; -+ default: -+ break; -+ } -+ } else { -+ if (!fdata->is_sw) -+ err = -ENOTTY; -+ else -+ switch (cmd) { -+ case DRM_PVR_SW_SYNC_CREATE_FENCE_CMD: -+ err = pvr_sync_ioctl_sw_create_fence(fdata->api_private, -+ arg); -+ break; -+ case DRM_PVR_SW_SYNC_INC_CMD: -+ err = pvr_sync_ioctl_sw_inc(fdata->api_private, -+ arg); -+ break; -+ default: -+ break; -+ } -+ } -+ -+ if (in_setup) -+ pvr_sync_reset_in_setup(fdata); -+ -+ return err; -+} -diff --git a/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.h b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.h -@@ -0,0 +1,71 @@ -+/* -+ * @File pvr_sync_ioctl_common.h -+ * @Title Kernel driver for Android's sync mechanism -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef _PVR_SYNC_IOCTL_COMMON_H -+#define _PVR_SYNC_IOCTL_COMMON_H -+ -+struct file; -+ -+/* Functions provided by pvr_sync_ioctl_common */ -+ -+int pvr_sync_open_common(void *connection_data, void *file_handle); -+int pvr_sync_close_common(void *connection_data); -+int pvr_sync_ioctl_common(struct file *file, unsigned int cmd, void *arg); -+void *pvr_sync_get_api_priv_common(struct file *file); -+ -+struct pvr_sync_file_data; -+ -+/* Functions required by pvr_sync_ioctl_common */ -+ -+bool pvr_sync_set_private_data(void *connection_data, -+ struct pvr_sync_file_data *fdata); -+ -+struct pvr_sync_file_data * -+pvr_sync_connection_private_data(void *connection_data); -+ -+struct pvr_sync_file_data * -+pvr_sync_get_private_data(struct file *file); -+ -+bool pvr_sync_is_timeline(struct file *file); -+ -+#endif /* _PVR_SYNC_IOCTL_COMMON_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.c b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.c -@@ -0,0 +1,168 @@ -+/* -+ * @File pvr_sync_ioctl_drm.c -+ * @Title Kernel driver for Android's sync mechanism -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include "pvr_drv.h" -+#include "pvr_drm.h" -+#include "private_data.h" -+#include "env_connection.h" -+#include "pvr_sync_api.h" -+#include "pvr_sync_ioctl_common.h" -+#include "pvr_sync_ioctl_drm.h" -+ -+bool pvr_sync_set_private_data(void *connection_data, -+ struct pvr_sync_file_data *fdata) -+{ -+ if (connection_data) { -+ ENV_CONNECTION_DATA *env_data; -+ -+ env_data = PVRSRVConnectionPrivateData(connection_data); -+ if (env_data) { -+ env_data->pvPvrSyncPrivateData = fdata; -+ -+ return true; -+ } -+ } -+ -+ return false; -+} -+ -+struct pvr_sync_file_data * -+pvr_sync_connection_private_data(void *connection_data) -+{ -+ if (connection_data) { -+ ENV_CONNECTION_DATA *env_data; -+ -+ env_data = PVRSRVConnectionPrivateData(connection_data); -+ -+ if (env_data) -+ return env_data->pvPvrSyncPrivateData; -+ } -+ -+ return NULL; -+} -+ -+struct pvr_sync_file_data * -+pvr_sync_get_private_data(struct file *file) -+{ -+ CONNECTION_DATA *connection_data = LinuxSyncConnectionFromFile(file); -+ -+ return pvr_sync_connection_private_data(connection_data); -+} -+ -+bool pvr_sync_is_timeline(struct file *file) -+{ -+ return file->f_op == &pvr_drm_fops; -+} -+ -+void *pvr_sync_get_api_priv(struct file *file) -+{ -+ return pvr_sync_get_api_priv_common(file); -+} -+ -+struct file *pvr_sync_get_file_struct(void *file_handle) -+{ -+ if (file_handle) { -+ struct drm_file *file = file_handle; -+ -+ return file->filp; -+ } -+ -+ return NULL; -+} -+ -+int pvr_sync_open(void *connection_data, struct drm_file *file) -+{ -+ /* -+ * The file structure pointer (file->filp) may not have been -+ * initialised at this point, so pass down a pointer to the -+ * drm_file structure instead. -+ */ -+ return pvr_sync_open_common(connection_data, file); -+} -+ -+void pvr_sync_close(void *connection_data) -+{ -+ int iErr = pvr_sync_close_common(connection_data); -+ -+ if (iErr < 0) -+ pr_err("%s: ERROR (%d) returned by pvr_sync_close_common()\n", -+ __func__, iErr); -+} -+ -+ -+int pvr_sync_rename_ioctl(struct drm_device __maybe_unused *dev, -+ void *arg, struct drm_file *file) -+{ -+ return pvr_sync_ioctl_common(file->filp, -+ DRM_PVR_SYNC_RENAME_CMD, arg); -+} -+ -+int pvr_sync_force_sw_only_ioctl(struct drm_device __maybe_unused *dev, -+ void *arg, struct drm_file *file) -+{ -+ return pvr_sync_ioctl_common(file->filp, -+ DRM_PVR_SYNC_FORCE_SW_ONLY_CMD, arg); -+} -+ -+int pvr_sw_sync_create_fence_ioctl(struct drm_device __maybe_unused *dev, -+ void *arg, struct drm_file *file) -+{ -+ return pvr_sync_ioctl_common(file->filp, -+ DRM_PVR_SW_SYNC_CREATE_FENCE_CMD, arg); -+} -+ -+int pvr_sw_sync_inc_ioctl(struct drm_device __maybe_unused *dev, -+ void *arg, struct drm_file *file) -+{ -+ return pvr_sync_ioctl_common(file->filp, -+ DRM_PVR_SW_SYNC_INC_CMD, arg); -+} -+ -+int pvr_sync_ioctl_init(void) -+{ -+ return 0; -+} -+ -+void pvr_sync_ioctl_deinit(void) -+{ -+} -diff --git a/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.h b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.h -@@ -0,0 +1,62 @@ -+/* -+ * @File pvr_sync_ioctl_drm.h -+ * @Title Kernel driver for Android's sync mechanism -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef _PVR_SYNC_IOCTL_DRM_H -+#define _PVR_SYNC_IOCTL_DRM_H -+ -+struct drm_device; -+struct drm_file; -+ -+int pvr_sync_open(void *connection_data, struct drm_file *file); -+void pvr_sync_close(void *connection_data); -+ -+int pvr_sync_rename_ioctl(struct drm_device *dev, void *arg, -+ struct drm_file *file); -+int pvr_sync_force_sw_only_ioctl(struct drm_device *dev, void *arg, -+ struct drm_file *file); -+int pvr_sw_sync_create_fence_ioctl(struct drm_device *dev, void *arg, -+ struct drm_file *file); -+int pvr_sw_sync_inc_ioctl(struct drm_device *dev, void *arg, -+ struct drm_file *file); -+ -+#endif /* _PVR_SYNC_IOCTL_DRM_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_uaccess.h b/drivers/gpu/drm/img-rogue/pvr_uaccess.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_uaccess.h -@@ -0,0 +1,99 @@ -+/*************************************************************************/ /*! -+@File -+@Title Utility functions for user space access -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef PVR_UACCESS_H -+#define PVR_UACCESS_H -+ -+#include -+#include -+ -+static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes) -+{ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) -+ if (access_ok(VERIFY_WRITE, pvTo, ulBytes)) -+#else -+ if (access_ok(pvTo, ulBytes)) -+#endif -+ { -+ return __copy_to_user(pvTo, pvFrom, ulBytes); -+ } -+ -+ return ulBytes; -+} -+ -+ -+#if defined(__KLOCWORK__) -+ /* this part is only to tell Klocwork not to report false positive because -+ it doesn't understand that pvr_copy_from_user will initialise the memory -+ pointed to by pvTo */ -+#include /* get the memset prototype */ -+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes) -+{ -+ if (pvTo != NULL) -+ { -+ memset(pvTo, 0xAA, ulBytes); -+ return 0; -+ } -+ return 1; -+} -+ -+#else /* real implementation */ -+ -+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes) -+{ -+ /* -+ * The compile time correctness checking introduced for copy_from_user in -+ * Linux 2.6.33 isn't fully compatible with our usage of the function. -+ */ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) -+ if (access_ok(VERIFY_READ, pvFrom, ulBytes)) -+#else -+ if (access_ok(pvFrom, ulBytes)) -+#endif -+ { -+ return __copy_from_user(pvTo, pvFrom, ulBytes); -+ } -+ -+ return ulBytes; -+} -+#endif /* klocworks */ -+ -+#endif /* PVR_UACCESS_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvr_vmap.h b/drivers/gpu/drm/img-rogue/pvr_vmap.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvr_vmap.h -@@ -0,0 +1,83 @@ -+/* -+ * @File pvr_vmap.h -+ * @Title Utility functions for virtual memory mapping -+ * @Codingstyle LinuxKernel -+ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+ * @License Dual MIT/GPLv2 -+ * -+ * The contents of this file are subject to the MIT license as set out below. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a copy -+ * of this software and associated documentation files (the "Software"), to deal -+ * in the Software without restriction, including without limitation the rights -+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+ * copies of the Software, and to permit persons to whom the Software is -+ * furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * Alternatively, the contents of this file may be used under the terms of -+ * the GNU General Public License Version 2 ("GPL") in which case the provisions -+ * of GPL are applicable instead of those above. -+ * -+ * If you wish to allow use of your version of this file only under the terms of -+ * GPL, and not to allow others to use your version of this file under the terms -+ * of the MIT license, indicate your decision by deleting the provisions above -+ * and replace them with the notice and other provisions required by GPL as set -+ * out in the file called "GPL-COPYING" included in this distribution. If you do -+ * not delete the provisions above, a recipient may use your version of this file -+ * under the terms of either the MIT license or GPL. -+ * -+ * This License is also included in this distribution in the file called -+ * "MIT-COPYING". -+ * -+ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef PVR_VMAP_H -+#define PVR_VMAP_H -+ -+#include -+#include -+ -+static inline void *pvr_vmap(struct page **pages, -+ unsigned int count, -+ __maybe_unused unsigned long flags, -+ pgprot_t prot) -+{ -+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) -+ return vmap(pages, count, flags, prot); -+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) -+ return vm_map_ram(pages, count, -1, prot); -+#else -+ if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) -+ return vm_map_ram(pages, count, -1); -+ else -+ return vmap(pages, count, flags, prot); -+#endif /* !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) */ -+} -+ -+static inline void pvr_vunmap(const void *vaddr, -+ __maybe_unused unsigned int count, -+ __maybe_unused pgprot_t prot) -+{ -+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) -+ vunmap(vaddr); -+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) -+ vm_unmap_ram(vaddr, count); -+#else -+ if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) -+ vm_unmap_ram(vaddr, count); -+ else -+ vunmap(vaddr); -+#endif /* !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) */ -+} -+ -+#endif /* PVR_VMAP_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrmodule.h b/drivers/gpu/drm/img-rogue/pvrmodule.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrmodule.h -@@ -0,0 +1,48 @@ -+/*************************************************************************/ /*! -+@Title Module Author and License. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef _PVRMODULE_H_ -+#define _PVRMODULE_H_ -+ -+MODULE_AUTHOR("Imagination Technologies Ltd. "); -+MODULE_LICENSE("Dual MIT/GPL"); -+ -+#endif /* _PVRMODULE_H_ */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv.c b/drivers/gpu/drm/img-rogue/pvrsrv.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv.c -@@ -0,0 +1,3748 @@ -+/*************************************************************************/ /*! -+@File -+@Title core services functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Main APIs for core services functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#define CLEANUP_TYPE_STRINGS -+ -+#include "img_defs.h" -+#include "rgxdebug_common.h" -+#include "handle.h" -+ -+#include "pmr.h" -+#include "connection_server.h" -+#include "osconnection_server.h" -+#include "pdump_km.h" -+#include "ra.h" -+#include "allocmem.h" -+#include "pvrsrv.h" -+#include "srvcore.h" -+#include "services_km.h" -+#include "pvrsrv_device.h" -+#include "pvr_debug.h" -+#include "debug_common.h" -+#include "pvr_notifier.h" -+#include "sync.h" -+#include "sync_server.h" -+#include "sync_checkpoint.h" -+#include "sync_fallback_server.h" -+#include "sync_checkpoint_init.h" -+#include "devicemem.h" -+#include "cache_km.h" -+#include "info_page.h" -+#include "info_page_defs.h" -+#include "pvrsrv_bridge_init.h" -+#include "devicemem_server.h" -+#include "km_apphint_defs.h" -+#include "di_server.h" -+#include "di_impl_brg.h" -+#include "htb_debug.h" -+#include "dma_km.h" -+#include "pmr.h" -+ -+#include "log2.h" -+ -+#include "lists.h" -+#include "dllist.h" -+#include "syscommon.h" -+#include "sysvalidation.h" -+ -+#include "physmem_lma.h" -+#include "physmem_osmem.h" -+#include "physmem_hostmem.h" -+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) -+#include "physmem_cpumap_history.h" -+#endif -+ -+#include "tlintern.h" -+#include "htbserver.h" -+ -+#define MULTI_DEVICE_BRINGUP -+ -+#if defined(MULTI_DEVICE_BRINGUP) -+#define MULTI_DEVICE_BRINGUP_DPF(msg, ...) PVR_DPF((PVR_DBG_MESSAGE, msg, __VA_ARGS__)) -+#else -+#define MULTI_DEVICE_BRINGUP_DPF(msg, ...) -+#endif -+ -+#if defined(SUPPORT_RGX) -+#include "rgxinit.h" -+#include "rgxhwperf.h" -+#include "rgxfwutils.h" -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+#include "ri_server.h" -+#endif -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#include "process_stats.h" -+#endif -+ -+#include "vz_vmm_pvz.h" -+ -+#include "devicemem_history_server.h" -+ -+#if defined(SUPPORT_LINUX_DVFS) -+#include "pvr_dvfs_device.h" -+#endif -+ -+#if defined(SUPPORT_DISPLAY_CLASS) -+#include "dc_server.h" -+#endif -+ -+#include "rgx_options.h" -+#include "srvinit.h" -+#include "rgxutils.h" -+ -+#include "os_apphint.h" -+#include "pvrsrv_apphint.h" -+ -+#include "pvrsrv_tlstreams.h" -+#include "tlstream.h" -+ -+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) -+#include "physmem_test.h" -+#endif -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+#include "virt_validation_defs.h" -+#endif -+ -+#if defined(__linux__) -+#include "km_apphint.h" -+#endif /* defined(__linux__) */ -+ -+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+#define INFINITE_SLEEP_TIMEOUT 0ULL -+#endif -+ -+/*! Wait 100ms before retrying deferred clean-up again */ -+#define CLEANUP_THREAD_WAIT_RETRY_TIMEOUT 100000ULL -+ -+/*! Wait 8hrs when no deferred clean-up required. Allows a poll several times -+ * a day to check for any missed clean-up. */ -+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT INFINITE_SLEEP_TIMEOUT -+#else -+#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT 28800000000ULL -+#endif -+ -+/*! When unloading try a few times to free everything remaining on the list */ -+#define CLEANUP_THREAD_UNLOAD_RETRY 4 -+ -+#define PVRSRV_TL_CTLR_STREAM_SIZE 4096 -+ -+static PVRSRV_DATA *gpsPVRSRVData; -+static IMG_UINT32 g_ui32InitFlags; -+ -+/* mark which parts of Services were initialised */ -+#define INIT_DATA_ENABLE_PDUMPINIT 0x1U -+ -+#define CLEANUP_QUEUE_TOTAL_DPF "QUEUED: %1.5d " -+#define CLEANUP_QUEUE_TOTAL_DPF_MAX_SIZE sizeof(CLEANUP_QUEUE_TOTAL_DPF)+1 -+ -+#define CLEANUP_STRING_SUMMARY_MAX_LEN (CLEANUP_QUEUE_TOTAL_DPF_MAX_SIZE+(PVRSRV_CLEANUP_TYPE_LAST * CLEANUP_TYPE_ITEM_DPF_MAX_SIZE)) -+ -+static IMG_CHAR *_ConcatCleanupString(IMG_CHAR *cleanupString) -+{ -+ IMG_UINT32 uiLoop; -+ IMG_UINT32 strSize = CLEANUP_STRING_SUMMARY_MAX_LEN; -+ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ OSSNPrintf(cleanupString, CLEANUP_QUEUE_TOTAL_DPF_MAX_SIZE, CLEANUP_QUEUE_TOTAL_DPF, -+ OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsQueued)); -+ -+ PVR_ASSERT(PVRSRV_CLEANUP_TYPE_CONNECTION == 1); -+ -+ for (uiLoop=PVRSRV_CLEANUP_TYPE_CONNECTION; (strSize > CLEANUP_TYPE_ITEM_DPF_MAX_SIZE) && (uiLoop < PVRSRV_CLEANUP_TYPE_LAST); uiLoop++) -+ { -+ IMG_CHAR acTempQueued[CLEANUP_TYPE_ITEM_DPF_MAX_SIZE]; -+ OSSNPrintf(acTempQueued, CLEANUP_TYPE_ITEM_DPF_MAX_SIZE, CLEANUP_TYPE_ITEM_DPF, -+ PVRSRVGetCleanupName(uiLoop), -+ OSAtomicRead(&psPVRSRVData->i32CleanupItemTypes[uiLoop])); -+ OSStringLCat(cleanupString, acTempQueued, strSize); -+ } -+ -+ return cleanupString; -+} -+ -+/* Callback to dump info of cleanup thread in debug_dump */ -+static void CleanupThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ IMG_CHAR acCleanupString[CLEANUP_STRING_SUMMARY_MAX_LEN]; -+ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ PVR_DUMPDEBUG_LOG(" Number of deferred cleanup items: %s", _ConcatCleanupString(acCleanupString)); -+ -+ PVR_DUMPDEBUG_LOG(" Number of deferred cleanup items dropped after " -+ "retry limit reached : %d", -+ OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsNotCompleted)); -+} -+ -+static void _CleanupThreadDecrementStats(PVRSRV_DATA *psPVRSRVData, -+ PVRSRV_CLEANUP_TYPE eCleanupType) -+{ -+#if defined(DEBUG) -+ IMG_CHAR acCleanupString[CLEANUP_STRING_SUMMARY_MAX_LEN]; -+#endif -+ -+ PVR_ASSERT(OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsQueued) >= 0); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "BEFORE REMOVING ----- %s", _ConcatCleanupString(acCleanupString))); -+ -+ OSAtomicDecrement(&psPVRSRVData->i32NumCleanupItemsQueued); -+ -+ if ((eCleanupType <= PVRSRV_CLEANUP_TYPE_UNDEF) || ((eCleanupType >= PVRSRV_CLEANUP_TYPE_LAST))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Incorrect cleanup item passed: %d", eCleanupType)); -+ } -+ else -+ { -+ PVR_ASSERT(OSAtomicRead(&psPVRSRVData->i32CleanupItemTypes[eCleanupType]) >= 0); -+ OSAtomicDecrement(&psPVRSRVData->i32CleanupItemTypes[eCleanupType]); -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "AFTER REMOVING ----- %s", _ConcatCleanupString(acCleanupString))); -+} -+ -+#if defined(DEBUG) -+static void _CleanupThreadWorkListDump(PVRSRV_DATA *psPVRSRVData) -+{ -+ DLLIST_NODE *psNode; -+ DLLIST_NODE *psNextNode; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ OS_SPINLOCK_FLAGS uiFlags; -+ char pszCleanupLog[128]; -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ -+ /* Iterate over all devices. */ -+ for (psDeviceNode = psPVRSRVData->psDeviceNodeList; -+ psDeviceNode != NULL; -+ psDeviceNode = psDeviceNode->psNext) -+ { -+ if (dllist_is_empty(&psDeviceNode->sCleanupThreadWorkList)) -+ { -+ OSSNPrintf(pszCleanupLog, 128, "Dev_%p: CLEAN", psDeviceNode); -+ PVR_LOG(("%s", pszCleanupLog)); -+ continue; -+ } -+ -+ OSSNPrintf(pszCleanupLog, 128, "Dev_%p: TASKS", psDeviceNode); -+ PVR_LOG(("%s", pszCleanupLog)); -+ -+ /* Iterate over all cleanup items. */ -+ dllist_foreach_node(&psDeviceNode->sCleanupThreadWorkList, psNode, psNextNode) -+ { -+ PVRSRV_CLEANUP_THREAD_WORK *psData; -+ -+ psData = IMG_CONTAINER_OF(psNode, PVRSRV_CLEANUP_THREAD_WORK, sNode); -+ -+ PVR_ASSERT(psData != NULL); -+ -+ if ((psData->eCleanupType <= PVRSRV_CLEANUP_TYPE_UNDEF) || -+ ((psData->eCleanupType >= PVRSRV_CLEANUP_TYPE_LAST))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Incorrect cleanup item found: %d.", psData->eCleanupType)); -+ continue; -+ } -+ -+ OSSNPrintf(pszCleanupLog, 128, "+ %p: type %u, depends-HW %s (%s:%d)", -+ psData, -+ psData->eCleanupType, -+ (psData->bDependsOnHW) ? "Yes" : " No", -+ psData->pszFun, psData->ui32LineNum); -+ PVR_LOG(("%s", pszCleanupLog)); -+ } -+ } -+ -+ OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+} -+#endif -+ -+/* Add work to the cleanup thread work list. -+ * The work item will be executed by the cleanup thread -+ */ -+#if defined(DEBUG) -+void PVRSRVCleanupThreadAddWork_Debug(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_CLEANUP_THREAD_WORK *psData, -+ const char *pszFun, const unsigned int ui32LineNum) -+#else -+void PVRSRVCleanupThreadAddWork_Int(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_CLEANUP_THREAD_WORK *psData) -+#endif -+{ -+#if defined(DEBUG) -+ IMG_CHAR acCleanupString[CLEANUP_STRING_SUMMARY_MAX_LEN]; -+#endif -+ -+ PVRSRV_DATA *psPVRSRVData; -+ PVRSRV_ERROR eError; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ PVR_ASSERT(psData != NULL); -+ -+ if ((psData->eCleanupType <= PVRSRV_CLEANUP_TYPE_UNDEF) || -+ ((psData->eCleanupType >= PVRSRV_CLEANUP_TYPE_LAST))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Incorrect cleanup item passed: %d.", psData->eCleanupType)); -+ } -+ -+ psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "AFTER QUEUEING ----- %s", _ConcatCleanupString(acCleanupString))); -+ -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK || psPVRSRVData->bUnload) -+#else -+ if (psPVRSRVData->bUnload) -+#endif -+ { -+ CLEANUP_THREAD_FN pfnFree = psData->pfnFree; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Cleanup thread has already quit: doing work immediately")); -+ -+ eError = pfnFree(psData->pvData); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "BEFORE ADDING ----- %s", _ConcatCleanupString(acCleanupString))); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to free resource " -+ "(callback " IMG_PFN_FMTSPEC "). " -+ "Immediate free will not be retried.", -+ pfnFree)); -+ } -+ } -+ else -+ { -+ /* -+ * Access psData before putting it in the work list. -+ * Cleanup thread will free psData after it is done with the cleanup. -+ * We should consider psData potentially freed after it was put into the worklist. -+ */ -+ -+ OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItemsQueued); -+ OSAtomicIncrement(&psPVRSRVData->i32CleanupItemTypes[psData->eCleanupType]); -+#if defined(DEBUG) -+ psData->pszFun = pszFun; -+ psData->ui32LineNum = ui32LineNum; -+#endif -+ -+ /* add this work item to the list */ -+ OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ dllist_add_to_tail(&psDeviceNode->sCleanupThreadWorkList, &psData->sNode); -+ OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ -+ /* signal the cleanup thread to ensure this item gets processed */ -+ eError = OSEventObjectSignal(psPVRSRVData->hCleanupEventObject); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "AFTER ADDING ----- %s", _ConcatCleanupString(acCleanupString))); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); -+ } -+} -+ -+void PVRSRVCleanupThreadWaitForDevice(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ IMG_BOOL bIsEmpty; -+ -+ PVR_ASSERT(psDeviceNode != NULL); -+ -+ if (gpsPVRSRVData->hCleanupThread == NULL) -+ { -+ return; -+ } -+ -+ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) -+ { -+ OS_SPINLOCK_FLAGS uiFlags; -+ PVRSRV_ERROR eError; -+ -+ if (gpsPVRSRVData->hCleanupEventObject) -+ { -+ eError = OSEventObjectSignal(gpsPVRSRVData->hCleanupEventObject); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); -+ } -+ -+ OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ bIsEmpty = dllist_is_empty(&psDeviceNode->sCleanupThreadWorkList); -+ OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ -+ if (bIsEmpty) -+ { -+ break; -+ } -+ -+ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US / OS_THREAD_DESTROY_RETRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ PVR_LOG_IF_FALSE(bIsEmpty, "Failed to flush device cleanup queue"); -+#if defined(DEBUG) -+ _CleanupThreadWorkListDump(psPVRSRVData); -+#endif -+} -+ -+static INLINE DLLIST_NODE *_CleanupThreadWorkListLast(PVRSRV_DATA *psPVRSRVData) -+{ -+ DLLIST_NODE *psNode = NULL; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ -+ /* We treat the Host device node as the last node in the list. */ -+ psNode = dllist_get_prev_node(&psPVRSRVData->psHostMemDeviceNode->sCleanupThreadWorkList); -+ if (psNode != NULL) -+ { -+ OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ return psNode; -+ } -+ -+ /* Iterate over all devices and find the last node of the last device. */ -+ for (psDeviceNode = psPVRSRVData->psDeviceNodeList; -+ psDeviceNode != NULL; -+ psDeviceNode = psDeviceNode->psNext) -+ { -+ DLLIST_NODE *psCurrNode = dllist_get_prev_node(&psDeviceNode->sCleanupThreadWorkList); -+ if (psCurrNode != NULL) -+ { -+ psNode = psCurrNode; -+ } -+ } -+ -+ OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ -+ return psNode; -+} -+ -+/* Pop an item from the head of the cleanup thread work lists. -+ * -+ * This pops an item in a round robin manner: -+ * -+ * 1. It starts from the first device and moves to next device if no cleanup -+ * item found. -+ * 2. If no items were found in the regular devices bound lists it moves to the -+ * Host device list. -+ * 3. In case there was nothing in the Host device items list but there was -+ * something in the devices lists, it returns first found device bound item -+ * -+ * If `*ppsDeviceNode` is not NULL the lookup of the next cleanup item will -+ * start from the next devices after `*ppsDeviceNode`. This makes sure that -+ * we always pop a first item from each device and move to the next device. -+ * We prevent "starvation" of some devices this way. -+ */ -+static INLINE DLLIST_NODE *_CleanupThreadWorkListPop(PVRSRV_DATA *psPVRSRVData, -+ PVRSRV_DEVICE_NODE **ppsDeviceNode) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = NULL, *psFirstDevice = NULL; -+ DLLIST_NODE *psNode = NULL, *psFirstNode = NULL; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ -+ /* always need to start from the beginning of the list in case some of the -+ * devices were already destroyed */ -+ for (psDeviceNode = psPVRSRVData->psDeviceNodeList; -+ psDeviceNode != NULL; -+ psDeviceNode = psDeviceNode->psNext) -+ { -+ psNode = dllist_get_next_node(&psDeviceNode->sCleanupThreadWorkList); -+ -+ /* remember fist non-NULL node and device in case no items are found -+ * on later devices */ -+ if (psNode != NULL && psFirstNode == NULL) -+ { -+ psFirstNode = psNode; -+ psFirstDevice = psDeviceNode; -+ } -+ -+ /* to ensure that further device don't get starved skip the last device -+ * until all devices are processed and then move to non-device bound -+ * items */ -+ if (*ppsDeviceNode != NULL) { -+ if (psDeviceNode == *ppsDeviceNode) -+ { -+ *ppsDeviceNode = NULL; -+ } -+ -+ /* in case this is the last device and next iteration exits the -+ * loop */ -+ psNode = NULL; -+ -+ continue; -+ } -+ -+ if (psNode != NULL) -+ { -+ *ppsDeviceNode = psDeviceNode; -+ dllist_remove_node(psNode); -+ -+ break; -+ } -+ } -+ -+ /* if no item found in the regular devices check also the Host device */ -+ if (psNode == NULL) -+ { -+ psNode = dllist_get_next_node(&psPVRSRVData->psHostMemDeviceNode->sCleanupThreadWorkList); -+ if (psNode != NULL) -+ { -+ dllist_remove_node(psNode); -+ } -+ *ppsDeviceNode = psPVRSRVData->psHostMemDeviceNode; -+ } -+ -+ /* if no item found for the Host device check if there was a cleanup item -+ * in one of the previous devices, this starts processing items from the -+ * beginning without one extra empty call to "wrap around" */ -+ if (psNode == NULL && psFirstNode != NULL) -+ { -+ psNode = psFirstNode; -+ *ppsDeviceNode = psFirstDevice; -+ dllist_remove_node(psNode); -+ } -+ -+ OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ -+ return psNode; -+} -+ -+/* Process the cleanup thread work list */ -+static IMG_BOOL _CleanupThreadProcessWorkList(PVRSRV_DATA *psPVRSRVData, -+ IMG_BOOL *pbUseHWTimeout) -+{ -+ DLLIST_NODE *psNodeIter, *psNodeLast; -+ PVRSRV_ERROR eError; -+ IMG_BOOL bNeedRetry = IMG_FALSE; -+ OS_SPINLOCK_FLAGS uiFlags; -+ PVRSRV_CLEANUP_TYPE eCleanupType; -+ PVRSRV_DEVICE_NODE *psDeviceNode = NULL; -+ -+ /* Reset HWTimeout Flag */ -+ *pbUseHWTimeout = IMG_FALSE; -+ -+ psNodeLast = _CleanupThreadWorkListLast(psPVRSRVData); -+ if (psNodeLast == NULL) -+ { -+ /* no elements to clean up */ -+ return IMG_FALSE; -+ } -+ -+ /* any callback functions which return error will be -+ * moved to the back of the list, and additional items can be added -+ * to the list at any time so we ensure we only iterate from the -+ * head of the list to the current tail (since the tail may always -+ * be changing) -+ */ -+ do -+ { -+ PVRSRV_CLEANUP_THREAD_WORK *psData; -+ CLEANUP_THREAD_FN pfnFree; -+ IMG_BOOL bRetry = IMG_FALSE; -+ -+ psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData, &psDeviceNode); -+ if (psNodeIter == NULL) -+ { -+ return IMG_FALSE; -+ } -+ -+ psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode); -+ -+ /* get the function pointer address here so we have access to it -+ * in order to report the error in case of failure, without having -+ * to depend on psData not having been freed -+ */ -+ pfnFree = psData->pfnFree; -+ eCleanupType = psData->eCleanupType; -+ eError = pfnFree(psData->pvData); -+ -+ if (eError != PVRSRV_OK) -+ { -+ /* Move to back of the list, if this item's -+ * retry count hasn't hit zero. -+ */ -+ if (CLEANUP_THREAD_IS_RETRY_TIMEOUT(psData)) -+ { -+ if (CLEANUP_THREAD_RETRY_TIMEOUT_NOT_REACHED(psData)) -+ { -+ bNeedRetry = IMG_TRUE; -+ bRetry = IMG_TRUE; -+ /* If any items require retry and are HW dependent -+ * use the HW timeout -+ */ -+ if (psData->bDependsOnHW) -+ { -+ *pbUseHWTimeout = psData->bDependsOnHW; -+ } -+ } -+ } -+ else -+ { -+ if (psData->ui32RetryCount > 0) -+ { -+ psData->ui32RetryCount--; -+ bNeedRetry = IMG_TRUE; -+ bRetry = IMG_TRUE; -+ /* If any items require retry and are HW dependent -+ * use the HW timeout -+ */ -+ if (psData->bDependsOnHW) -+ { -+ *pbUseHWTimeout = psData->bDependsOnHW; -+ } -+ } -+ } -+ -+ /* If the work depends on HW then we should add it to the back of the list, -+ * the cleanup thread will sleep for longer if required and the next MISR -+ * from the device will wake the task again in which it might be ready. -+ */ -+ if (bRetry || psData->bDependsOnHW) -+ { -+ /* If any items on the work list depend on HW -+ * and didn't get cleaned up. -+ */ -+ OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ dllist_add_to_tail(&psDeviceNode->sCleanupThreadWorkList, psNodeIter); -+ OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to free resource (callback " IMG_PFN_FMTSPEC "). " -+ "Retry limit reached", pfnFree)); -+ OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItemsNotCompleted); -+ /* Dropping item */ -+ _CleanupThreadDecrementStats(psPVRSRVData, eCleanupType); -+ } -+ } -+ else -+ { -+ /* Ok returned */ -+ _CleanupThreadDecrementStats(psPVRSRVData, eCleanupType); -+ } -+ } while (psNodeIter != NULL && psNodeIter != psNodeLast); -+ -+ return bNeedRetry; -+} -+ -+// #define CLEANUP_DPFL PVR_DBG_WARNING -+#define CLEANUP_DPFL PVR_DBG_MESSAGE -+ -+/* Create/initialise data required by the cleanup thread, -+ * before the cleanup thread is started -+ */ -+static PVRSRV_ERROR _CleanupThreadPrepare(PVRSRV_DATA *psPVRSRVData) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Create the clean up event object */ -+ -+ eError = OSEventObjectCreate("PVRSRV_CLEANUP_EVENTOBJECT", &gpsPVRSRVData->hCleanupEventObject); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Exit); -+ -+ /* initialise the mutex and linked list required for the cleanup thread work list */ -+ -+ eError = OSSpinLockCreate(&psPVRSRVData->hCleanupThreadWorkListLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", Exit); -+ -+Exit: -+ return eError; -+} -+ -+static void CleanupThread(void *pvData) -+{ -+ PVRSRV_DATA *psPVRSRVData = pvData; -+ IMG_BOOL bRetryWorkList = IMG_FALSE; -+ IMG_BOOL bUseHWTimeout = IMG_FALSE; -+ IMG_HANDLE hOSEvent; -+ PVRSRV_ERROR eRc; -+ IMG_UINT32 uiUnloadRetry = 0; -+ DLLIST_NODE *psNodeIter, *psNodeLast; -+ -+ /* Store the process id (pid) of the clean-up thread */ -+ psPVRSRVData->cleanupThreadPid = OSGetCurrentProcessID(); -+ psPVRSRVData->cleanupThreadTid = OSGetCurrentThreadID(); -+ -+ PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread starting... ")); -+ -+ /* Open an event on the clean up event object so we can listen on it, -+ * abort the clean up thread and driver if this fails. -+ */ -+ eRc = OSEventObjectOpen(psPVRSRVData->hCleanupEventObject, &hOSEvent); -+ PVR_ASSERT(eRc == PVRSRV_OK); -+ -+ /* While the driver is in a good state and is not being unloaded -+ * try to free any deferred items when signalled -+ */ -+ while (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) -+ { -+ IMG_UINT64 ui64Timeoutus; -+ if (psPVRSRVData->bUnload) -+ { -+ if (dllist_is_empty(&psPVRSRVData->psHostMemDeviceNode->sCleanupThreadWorkList) || -+ uiUnloadRetry > CLEANUP_THREAD_UNLOAD_RETRY) -+ { -+ break; -+ } -+ uiUnloadRetry++; -+ } -+ -+ /* Wait until signalled for deferred clean up OR wait for a -+ * short period if the previous deferred clean up was not able -+ * to release all the resources before trying again. -+ * Bridge lock re-acquired on our behalf before the wait call returns. -+ */ -+ -+ if (bRetryWorkList && bUseHWTimeout) -+ { -+ /* If item depends on HW we are -+ * waiting for GPU work to finish, so -+ * use MAX_HW_TIME_US as timeout (this -+ * will be set appropriately when -+ * running on systems with emulated -+ * hardware, etc). -+ */ -+ ui64Timeoutus = MAX_HW_TIME_US; -+ } -+ else -+ { -+ /* Use the default retry timeout. */ -+ ui64Timeoutus = CLEANUP_THREAD_WAIT_RETRY_TIMEOUT; -+ } -+ -+ eRc = OSEventObjectWaitKernel(hOSEvent, -+ bRetryWorkList ? -+ ui64Timeoutus : -+ CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT); -+ if (eRc == PVRSRV_ERROR_TIMEOUT) -+ { -+ PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait timeout")); -+ } -+ else if (eRc == PVRSRV_OK) -+ { -+ PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait OK, signal received")); -+ } -+ else -+ { -+ PVR_LOG_ERROR(eRc, "OSEventObjectWaitKernel"); -+ } -+ -+ bRetryWorkList = _CleanupThreadProcessWorkList(psPVRSRVData, -+ &bUseHWTimeout); -+ } -+ -+ psNodeLast = _CleanupThreadWorkListLast(psPVRSRVData); -+ if (psNodeLast != NULL) -+ { -+ do -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = NULL; -+ PVRSRV_CLEANUP_THREAD_WORK *psData; -+ psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData, &psDeviceNode); -+ if (psNodeIter == NULL) -+ { -+ break; -+ } -+ -+ psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode); -+ OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItemsNotCompleted); -+ /* Dropping item */ -+ _CleanupThreadDecrementStats(psPVRSRVData, psData->eCleanupType); -+ } -+ while (psNodeIter != NULL && psNodeIter != psNodeLast); -+ -+ PVR_DPF((PVR_DBG_ERROR, "Cleanup Thread Failed to free %d resources", OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsNotCompleted))); -+ } -+ -+ OSSpinLockDestroy(psPVRSRVData->hCleanupThreadWorkListLock); -+ -+ eRc = OSEventObjectClose(hOSEvent); -+ PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose"); -+ -+ PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread ending... ")); -+} -+ -+IMG_PID PVRSRVCleanupThreadGetPid(void) -+{ -+ return gpsPVRSRVData->cleanupThreadPid; -+} -+ -+uintptr_t PVRSRVCleanupThreadGetTid(void) -+{ -+ return gpsPVRSRVData->cleanupThreadTid; -+} -+ -+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) -+/* -+ * Firmware is unresponsive. -+ * The Host will initiate a recovery process during which the -+ * Firmware and GPU are reset and returned to a working state. -+ */ -+static PVRSRV_ERROR HandleFwHostSideRecovery(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; -+ DLLIST_NODE *psNode, *psNext; -+ IMG_UINT32 ui32CtxIdx = 0U; -+ IMG_UINT32 ui32Nodes = 0U; -+ -+ OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock); -+ /* Get the number of nodes in a linked list */ -+ dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) -+ { -+ ++ui32Nodes; -+ } -+ -+ /* Any client contexts active at the moment? */ -+ if (ui32Nodes > 0U) -+ { -+ /* Free the active context buffer previously allocated */ -+ if (psDevInfo->psRGXFWIfActiveContextBufDesc) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfActiveContextBufDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfActiveContextBufDesc); -+ psDevInfo->psRGXFWIfActiveContextBufDesc = NULL; -+ } -+ -+ /* Setup allocations to store the active contexts */ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, -+ (ui32Nodes + 1) * sizeof(RGXFWIF_ACTIVE_CONTEXT_BUF_DATA), -+ "FwSysActiveContextBufData", -+ &psDevInfo->psRGXFWIfActiveContextBufDesc, -+ (void *) &psDevInfo->psRGXFWIfSysInit->sActiveContextBufBase.ui32Addr, -+ (void **) &psDevInfo->psRGXFWIfActiveContextBuf, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation",Error); -+ -+ /* List of contexts to be rekicked by FW powering up the device */ -+ dllist_foreach_node_backwards(&psDevInfo->sCommonCtxtListHead, psNode, psNext) -+ { -+ psDevInfo->psRGXFWIfActiveContextBuf[ui32CtxIdx].psContext = -+ RGXGetFWCommonContextAddrFromServerCommonCtx(psDevInfo, psNode); -+ ++ui32CtxIdx; -+ } -+ /* Null context as the terminator marker */ -+ psDevInfo->psRGXFWIfActiveContextBuf[ui32CtxIdx].psContext.ui32Addr = 0; -+ } -+ OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock); -+ -+ /* Host can't expect a response on power-down request as FW is in BAD state */ -+ eError = PVRSRVSetDeviceCurrentPowerState(psDeviceNode->psPowerDev, PVRSRV_DEV_POWER_STATE_OFF); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVSetDeviceCurrentPowerState OFF", Error); -+ -+ /* Flag to be set to notify FW while recovering from crash */ -+ psDevInfo->psRGXFWIfSysInit->bFwHostRecoveryMode = IMG_TRUE; -+ -+ /* Flush here because we have setup a fw alloc addr in the structure earlier */ -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfSysInit, FLUSH); -+ -+ /* Power-on the device resetting GPU & FW */ -+ OSLockAcquire(psDeviceNode->hPowerLock); -+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, -+ PVRSRV_DEV_POWER_STATE_ON, -+ PVRSRV_POWER_FLAGS_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVSetDevicePowerStateKM ON", Error); -+ OSLockRelease(psDeviceNode->hPowerLock); -+ -+Error: -+ return eError; -+} -+#endif -+ -+void PVRSRVDeviceSetState(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_STATE eNewDevState) -+{ -+ if (eNewDevState == psDeviceNode->eDevState) -+ { -+ return; -+ } -+ -+ switch (eNewDevState) -+ { -+ case PVRSRV_DEVICE_STATE_PCI_ERROR: -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetDebugDevStateString(eNewDevState))); -+ psDeviceNode->eDevState = eNewDevState; -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ PMRFreeZombies(psDeviceNode); -+#endif -+ break; -+ } -+ case PVRSRV_DEVICE_STATE_CREATING: -+ case PVRSRV_DEVICE_STATE_CREATED: -+ case PVRSRV_DEVICE_STATE_ACTIVE: -+ case PVRSRV_DEVICE_STATE_FROZEN: -+ case PVRSRV_DEVICE_STATE_DEINIT: -+ case PVRSRV_DEVICE_STATE_BAD: -+ case PVRSRV_DEVICE_STATE_DEINIT_POWERED_OFF: -+ { -+ /* PCI_ERROR is a terminal state. Reload driver to recover. */ -+ if (eNewDevState != PVRSRV_DEVICE_STATE_PCI_ERROR) -+ { -+ psDeviceNode->eDevState = eNewDevState; -+ } -+ break; -+ } -+ case PVRSRV_DEVICE_STATE_UNDEFINED: -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown state (%d)", __func__, eNewDevState)); -+ break; -+ } -+ } -+} -+ -+static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, -+ va_list va) -+{ -+#if defined(SUPPORT_RGX) -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; -+#endif -+ PVRSRV_DEVICE_HEALTH_STATUS *pePreviousHealthStatus, eHealthStatus; -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_DEBUG_DUMP_STATUS eDebugDumpState; -+ IMG_BOOL bCheckAfterTimePassed; -+ -+ pePreviousHealthStatus = va_arg(va, PVRSRV_DEVICE_HEALTH_STATUS *); -+ /* IMG_BOOL (i.e. bool) is converted to int during default argument promotion -+ * in variadic argument list. Thus, fetch an int to get IMG_BOOL */ -+ bCheckAfterTimePassed = (IMG_BOOL) va_arg(va, int); -+ -+ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) -+ { -+ return; -+ } -+ -+ /* Block here if needed */ -+ PVRSRVBlockIfFrozen(psDeviceNode); -+ -+ if (psDeviceNode->pfnUpdateHealthStatus != NULL) -+ { -+ eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, bCheckAfterTimePassed); -+ PVR_WARN_IF_ERROR(eError, "pfnUpdateHealthStatus"); -+ } -+ eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus); -+ -+ if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_OK) -+ { -+ if (eHealthStatus != *pePreviousHealthStatus) -+ { -+#if defined(SUPPORT_RGX) -+ if (!(psDevInfo->ui32DeviceFlags & -+ RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN)) -+#else -+ /* In this case we don't have an RGX device */ -+ if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED) -+#endif -+ { -+ PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: " -+ "Device status not OK!!!")); -+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, -+ NULL, NULL); -+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) -+ HandleFwHostSideRecovery(psDeviceNode); -+#endif -+ } -+ } -+ } -+ -+ *pePreviousHealthStatus = eHealthStatus; -+ -+ /* Have we received request from FW to capture debug dump(could be due to HWR) */ -+ eDebugDumpState = (PVRSRV_DEVICE_DEBUG_DUMP_STATUS)OSAtomicCompareExchange( -+ &psDeviceNode->eDebugDumpRequested, -+ PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE, -+ PVRSRV_DEVICE_DEBUG_DUMP_NONE); -+ if (PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE == eDebugDumpState) -+ { -+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); -+ } -+ -+} -+ -+#if defined(SUPPORT_RGX) -+static void HWPerfPeriodicHostEventsThread(void *pvData) -+{ -+ PVRSRV_DATA *psPVRSRVData = pvData; -+ IMG_HANDLE hOSEvent; -+ PVRSRV_ERROR eError; -+ -+ eError = OSEventObjectOpen(psPVRSRVData->hHWPerfHostPeriodicEvObj, &hOSEvent); -+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); -+ -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+ while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && -+ !psPVRSRVData->bUnload && !psPVRSRVData->bHWPerfHostThreadStop) -+#else -+ while (!psPVRSRVData->bUnload && !psPVRSRVData->bHWPerfHostThreadStop) -+#endif -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_BOOL bInfiniteSleep = IMG_TRUE; -+ -+ eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)psPVRSRVData->ui32HWPerfHostThreadTimeout * 1000); -+ if (eError == PVRSRV_OK && (psPVRSRVData->bUnload || psPVRSRVData->bHWPerfHostThreadStop)) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "HWPerfPeriodicHostEventsThread: Shutdown event received.")); -+ break; -+ } -+ -+ for (psDeviceNode = psPVRSRVData->psDeviceNodeList; -+ psDeviceNode != NULL; -+ psDeviceNode = psDeviceNode->psNext) -+ { -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ /* If the psDevInfo or hHWPerfHostStream are NULL it most -+ * likely means that this device or stream has not been -+ * initialised yet, so just skip */ -+ if (psDevInfo == NULL || psDevInfo->hHWPerfHostStream == NULL) -+ { -+ continue; -+ } -+ -+ /* Check if the HWPerf host stream is open for reading before writing -+ * a packet, this covers cases where the event filter is not zeroed -+ * before a reader disconnects. */ -+ if (TLStreamIsOpenForReading(psDevInfo->hHWPerfHostStream)) -+ { -+ /* As long as any of the streams is opened don't go into -+ * indefinite sleep. */ -+ bInfiniteSleep = IMG_FALSE; -+#if defined(SUPPORT_RGX) -+ RGXSRV_HWPERF_HOST_INFO(psDevInfo, RGX_HWPERF_INFO_EV_MEM64_USAGE); -+#endif -+ } -+ } -+ -+ if (bInfiniteSleep) -+ { -+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+ psPVRSRVData->ui32HWPerfHostThreadTimeout = INFINITE_SLEEP_TIMEOUT; -+#else -+ /* Use an 8 hour timeout if indefinite sleep is not supported. */ -+ psPVRSRVData->ui32HWPerfHostThreadTimeout = 60 * 60 * 8 * 1000; -+#endif -+ } -+ } -+ -+ eError = OSEventObjectClose(hOSEvent); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); -+} -+#endif -+ -+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+ -+typedef enum -+{ -+ DWT_ST_INIT, -+ DWT_ST_SLEEP_POWERON, -+ DWT_ST_SLEEP_POWEROFF, -+ DWT_ST_SLEEP_DEFERRED, -+ DWT_ST_FINAL -+} DWT_STATE; -+ -+typedef enum -+{ -+ DWT_SIG_POWERON, -+ DWT_SIG_POWEROFF, -+ DWT_SIG_TIMEOUT, -+ DWT_SIG_UNLOAD, -+ DWT_SIG_ERROR -+} DWT_SIGNAL; -+ -+static inline IMG_BOOL _DwtIsPowerOn(PVRSRV_DATA *psPVRSRVData) -+{ -+ return List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList, -+ PVRSRVIsDevicePowered); -+} -+ -+static inline void _DwtCheckHealthStatus(PVRSRV_DATA *psPVRSRVData, -+ PVRSRV_DEVICE_HEALTH_STATUS *peStatus, -+ IMG_BOOL bTimeOut) -+{ -+ List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList, -+ DevicesWatchdogThread_ForEachVaCb, -+ peStatus, -+ bTimeOut); -+} -+ -+static DWT_SIGNAL _DwtWait(PVRSRV_DATA *psPVRSRVData, IMG_HANDLE hOSEvent, -+ IMG_UINT32 ui32Timeout) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64) ui32Timeout * 1000); -+ -+#ifdef PVR_TESTING_UTILS -+ psPVRSRVData->ui32DevicesWdWakeupCounter++; -+#endif -+ -+ if (eError == PVRSRV_OK) -+ { -+ if (psPVRSRVData->bUnload) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event" -+ " received.")); -+ return DWT_SIG_UNLOAD; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state " -+ "change event received.")); -+ -+ if (_DwtIsPowerOn(psPVRSRVData)) -+ { -+ return DWT_SIG_POWERON; -+ } -+ else -+ { -+ return DWT_SIG_POWEROFF; -+ } -+ } -+ } -+ else if (eError == PVRSRV_ERROR_TIMEOUT) -+ { -+ return DWT_SIG_TIMEOUT; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: Error (%d) when" -+ " waiting for event!", eError)); -+ return DWT_SIG_ERROR; -+} -+ -+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ -+ -+static void DevicesWatchdogThread(void *pvData) -+{ -+ PVRSRV_DATA *psPVRSRVData = pvData; -+ PVRSRV_DEVICE_HEALTH_STATUS ePreviousHealthStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK; -+ IMG_HANDLE hOSEvent; -+ PVRSRV_ERROR eError; -+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+ DWT_STATE eState = DWT_ST_INIT; -+ const IMG_UINT32 ui32OnTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; -+ const IMG_UINT32 ui32OffTimeout = INFINITE_SLEEP_TIMEOUT; -+#else -+ IMG_UINT32 ui32Timeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; -+ /* Flag used to defer the sleep timeout change by 1 loop iteration. -+ * This helps to ensure at least two health checks are performed before a long sleep. -+ */ -+ IMG_BOOL bDoDeferredTimeoutChange = IMG_FALSE; -+#endif -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power off sleep time: %d.", -+ DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT)); -+ -+ /* Open an event on the devices watchdog event object so we can listen on it -+ and abort the devices watchdog thread. */ -+ eError = OSEventObjectOpen(psPVRSRVData->hDevicesWatchdogEvObj, &hOSEvent); -+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); -+ -+ /* Loop continuously checking the device status every few seconds. */ -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+ while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && -+ !psPVRSRVData->bUnload) -+#else -+ while (!psPVRSRVData->bUnload) -+#endif -+ { -+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+ -+ switch (eState) -+ { -+ case DWT_ST_INIT: -+ { -+ if (_DwtIsPowerOn(psPVRSRVData)) -+ { -+ eState = DWT_ST_SLEEP_POWERON; -+ } -+ else -+ { -+ eState = DWT_ST_SLEEP_POWEROFF; -+ } -+ -+ break; -+ } -+ case DWT_ST_SLEEP_POWERON: -+ { -+ DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent, -+ ui32OnTimeout); -+ -+ switch (eSignal) { -+ case DWT_SIG_POWERON: -+ /* self-transition, nothing to do */ -+ break; -+ case DWT_SIG_POWEROFF: -+ eState = DWT_ST_SLEEP_DEFERRED; -+ break; -+ case DWT_SIG_TIMEOUT: -+ _DwtCheckHealthStatus(psPVRSRVData, -+ &ePreviousHealthStatus, -+ IMG_TRUE); -+ /* self-transition */ -+ break; -+ case DWT_SIG_UNLOAD: -+ eState = DWT_ST_FINAL; -+ break; -+ case DWT_SIG_ERROR: -+ /* deliberately ignored */ -+ break; -+ } -+ -+ break; -+ } -+ case DWT_ST_SLEEP_POWEROFF: -+ { -+ DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent, -+ ui32OffTimeout); -+ -+ switch (eSignal) { -+ case DWT_SIG_POWERON: -+ eState = DWT_ST_SLEEP_POWERON; -+ _DwtCheckHealthStatus(psPVRSRVData, -+ &ePreviousHealthStatus, -+ IMG_FALSE); -+ break; -+ case DWT_SIG_POWEROFF: -+ /* self-transition, nothing to do */ -+ break; -+ case DWT_SIG_TIMEOUT: -+ /* self-transition */ -+ _DwtCheckHealthStatus(psPVRSRVData, -+ &ePreviousHealthStatus, -+ IMG_TRUE); -+ break; -+ case DWT_SIG_UNLOAD: -+ eState = DWT_ST_FINAL; -+ break; -+ case DWT_SIG_ERROR: -+ /* deliberately ignored */ -+ break; -+ } -+ -+ break; -+ } -+ case DWT_ST_SLEEP_DEFERRED: -+ { -+ DWT_SIGNAL eSignal =_DwtWait(psPVRSRVData, hOSEvent, -+ ui32OnTimeout); -+ -+ switch (eSignal) { -+ case DWT_SIG_POWERON: -+ eState = DWT_ST_SLEEP_POWERON; -+ _DwtCheckHealthStatus(psPVRSRVData, -+ &ePreviousHealthStatus, -+ IMG_FALSE); -+ break; -+ case DWT_SIG_POWEROFF: -+ /* self-transition, nothing to do */ -+ break; -+ case DWT_SIG_TIMEOUT: -+ eState = DWT_ST_SLEEP_POWEROFF; -+ _DwtCheckHealthStatus(psPVRSRVData, -+ &ePreviousHealthStatus, -+ IMG_FALSE); -+ break; -+ case DWT_SIG_UNLOAD: -+ eState = DWT_ST_FINAL; -+ break; -+ case DWT_SIG_ERROR: -+ /* deliberately ignored */ -+ break; -+ } -+ -+ break; -+ } -+ case DWT_ST_FINAL: -+ /* the loop should terminate on next spin if this state is -+ * reached so nothing to do here. */ -+ break; -+ } -+ -+#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ -+ IMG_BOOL bPwrIsOn = IMG_FALSE; -+ IMG_BOOL bTimeOut = IMG_FALSE; -+ -+ /* Wait time between polls (done at the start of the loop to allow devices -+ to initialise) or for the event signal (shutdown or power on). */ -+ eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000); -+ -+#ifdef PVR_TESTING_UTILS -+ psPVRSRVData->ui32DevicesWdWakeupCounter++; -+#endif -+ if (eError == PVRSRV_OK) -+ { -+ if (psPVRSRVData->bUnload) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event received.")); -+ break; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state change event received.")); -+ } -+ } -+ else if (eError != PVRSRV_ERROR_TIMEOUT) -+ { -+ /* If timeout do nothing otherwise print warning message. */ -+ PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: " -+ "Error (%d) when waiting for event!", eError)); -+ } -+ else -+ { -+ bTimeOut = IMG_TRUE; -+ } -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList, -+ DevicesWatchdogThread_ForEachVaCb, -+ &ePreviousHealthStatus, -+ bTimeOut); -+ bPwrIsOn = List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList, -+ PVRSRVIsDevicePowered); -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ -+ if (bPwrIsOn || psPVRSRVData->ui32DevicesWatchdogPwrTrans) -+ { -+ psPVRSRVData->ui32DevicesWatchdogPwrTrans = 0; -+ ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; -+ bDoDeferredTimeoutChange = IMG_FALSE; -+ } -+ else -+ { -+ /* First, check if the previous loop iteration signalled a need to change the timeout period */ -+ if (bDoDeferredTimeoutChange) -+ { -+ ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT; -+ bDoDeferredTimeoutChange = IMG_FALSE; -+ } -+ else -+ { -+ /* Signal that we need to change the sleep timeout in the next loop iteration -+ * to allow the device health check code a further iteration at the current -+ * sleep timeout in order to determine bad health (e.g. stalled cCCB) by -+ * comparing past and current state snapshots */ -+ bDoDeferredTimeoutChange = IMG_TRUE; -+ } -+ } -+ -+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ -+ } -+ -+ eError = OSEventObjectClose(hOSEvent); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); -+} -+ -+#if defined(SUPPORT_AUTOVZ) -+static void AutoVzWatchdogThread_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) -+ { -+ return; -+ } -+ else if (psDeviceNode->pfnUpdateAutoVzWatchdog != NULL) -+ { -+ psDeviceNode->pfnUpdateAutoVzWatchdog(psDeviceNode); -+ } -+} -+ -+static void AutoVzWatchdogThread(void *pvData) -+{ -+ PVRSRV_DATA *psPVRSRVData = pvData; -+ IMG_HANDLE hOSEvent; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32Timeout = PVR_AUTOVZ_WDG_PERIOD_MS / 3; -+ -+ /* Open an event on the devices watchdog event object so we can listen on it -+ and abort the devices watchdog thread. */ -+ eError = OSEventObjectOpen(psPVRSRVData->hAutoVzWatchdogEvObj, &hOSEvent); -+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); -+ -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+ while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && -+ !psPVRSRVData->bUnload) -+#else -+ while (!psPVRSRVData->bUnload) -+#endif -+ { -+ /* Wait time between polls (done at the start of the loop to allow devices -+ to initialise) or for the event signal (shutdown or power on). */ -+ eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000); -+ -+ List_PVRSRV_DEVICE_NODE_ForEach(psPVRSRVData->psDeviceNodeList, -+ AutoVzWatchdogThread_ForEachCb); -+ } -+ -+ eError = OSEventObjectClose(hOSEvent); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); -+} -+#endif /* SUPPORT_AUTOVZ */ -+ -+PVRSRV_DATA *PVRSRVGetPVRSRVData(void) -+{ -+ return gpsPVRSRVData; -+} -+ -+static PVRSRV_ERROR InitialiseInfoPageTimeouts(PVRSRV_DATA *psPVRSRVData) -+{ -+ if (NULL == psPVRSRVData) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_VALUE_RETRIES] = WAIT_TRY_COUNT; -+ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_VALUE_TIMEOUT_MS] = -+ ((MAX_HW_TIME_US / 10000) + 1000); -+ /* TIMEOUT_INFO_VALUE_TIMEOUT_MS resolves to... -+ vp : 2000 + 1000 -+ emu : 2000 + 1000 -+ rgx_nohw : 50 + 1000 -+ plato : 30000 + 1000 (VIRTUAL_PLATFORM or EMULATOR) -+ 50 + 1000 (otherwise) -+ */ -+ -+ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_CONDITION_RETRIES] = 5; -+ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_CONDITION_TIMEOUT_MS] = -+ ((MAX_HW_TIME_US / 10000) + 100); -+ /* TIMEOUT_INFO_CONDITION_TIMEOUT_MS resolves to... -+ vp : 2000 + 100 -+ emu : 2000 + 100 -+ rgx_nohw : 50 + 100 -+ plato : 30000 + 100 (VIRTUAL_PLATFORM or EMULATOR) -+ 50 + 100 (otherwise) -+ */ -+ -+ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_RETRIES] = 10; -+ -+ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = -+ MAX_HW_TIME_US / 1000U; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR PopulateInfoPageBridges(PVRSRV_DATA *psPVRSRVData) -+{ -+ PVR_RETURN_IF_INVALID_PARAM(psPVRSRVData); -+ -+ psPVRSRVData->pui32InfoPage[BRIDGE_INFO_PVR_BRIDGES] = gui32PVRBridges; -+ -+#if defined(SUPPORT_RGX) -+ psPVRSRVData->pui32InfoPage[BRIDGE_INFO_RGX_BRIDGES] = gui32RGXBridges; -+#else -+ psPVRSRVData->pui32InfoPage[BRIDGE_INFO_RGX_BRIDGES] = 0; -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+static void _ThreadsDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ PVR_UNREFERENCED_PARAMETER(hDbgRequestHandle); -+ -+ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) -+ { -+ PVR_DUMPDEBUG_LOG("------[ Server Thread Summary ]------"); -+ OSThreadDumpInfo(pfnDumpDebugPrintf, pvDumpDebugFile); -+ } -+} -+ -+PVRSRV_ERROR -+PVRSRVCommonDriverInit(void) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVRSRV_DATA *psPVRSRVData = NULL; -+ -+ IMG_UINT32 ui32AppHintCleanupThreadPriority; -+ IMG_UINT32 ui32AppHintWatchdogThreadPriority; -+ IMG_BOOL bEnablePageFaultDebug; -+ IMG_BOOL bEnableFullSyncTracking; -+ -+ void *pvAppHintState = NULL; -+ IMG_UINT32 ui32AppHintDefault; -+ IMG_BOOL bAppHintDefault; -+ -+ /* -+ * As this function performs one time driver initialisation, use the -+ * Services global device-independent data to determine whether or not -+ * this function has already been called. -+ */ -+ if (gpsPVRSRVData) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Driver already initialised", __func__)); -+ return PVRSRV_ERROR_ALREADY_EXISTS; -+ } -+ -+ eError = DIInit(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ eError = PVRSRVStatsInitialise(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+#endif /* PVRSRV_ENABLE_PROCESS_STATS */ -+ -+#if defined(SUPPORT_DI_BRG_IMPL) -+ eError = PVRDIImplBrgRegister(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+#endif -+ -+ eError = HTB_CreateDIEntry(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ /* -+ * Allocate the device-independent data. Do NOT track this allocation. -+ */ -+ psPVRSRVData = OSAllocZMemNoStats(sizeof(*gpsPVRSRVData)); -+ PVR_GOTO_IF_NOMEM(psPVRSRVData, eError, Error); -+ -+ /* Now it is set up, point gpsPVRSRVData to the actual data */ -+ gpsPVRSRVData = psPVRSRVData; -+ -+ eError = OSWRLockCreate(&gpsPVRSRVData->hDeviceNodeListLock); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ /* Register the driver context debug table */ -+ eError = PVRSRVRegisterDriverDbgTable(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ /* Register the Server Thread Debug notifier */ -+ eError = PVRSRVRegisterDriverDbgRequestNotify(&gpsPVRSRVData->hThreadsDbgReqNotify, -+ _ThreadsDebugRequestNotify, -+ DEBUG_REQUEST_SRV, -+ NULL); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ /* -+ * Initialise the server bridges -+ */ -+ eError = ServerBridgeInit(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ eError = DevmemIntInit(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ eError = DebugCommonInitDriver(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ eError = BridgeDispatcherInit(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ /* Init any OS specific's */ -+ eError = OSInitEnvData(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ /* Early init. server cache maintenance */ -+ eError = CacheOpInit(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ RIInitKM(); -+#endif -+ -+ bAppHintDefault = PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG; -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnablePageFaultDebug, -+ &bAppHintDefault, &bEnablePageFaultDebug); -+ OSFreeAppHintState(pvAppHintState); -+ -+ if (bEnablePageFaultDebug) -+ { -+ eError = DevicememHistoryInitKM(); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevicememHistoryInitKM", Error); -+ } -+ -+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) -+ eError = CPUMappingHistoryInit(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+#endif -+ -+ eError = PMRInit(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+#if defined(SUPPORT_DISPLAY_CLASS) -+ eError = DCInit(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+#endif -+ -+ /* Initialise overall system state */ -+ gpsPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_OK; -+ -+ /* Create an event object */ -+ eError = OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", &gpsPVRSRVData->hGlobalEventObject); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ gpsPVRSRVData->ui32GEOConsecutiveTimeouts = 0; -+ -+ eError = PVRSRVCmdCompleteInit(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ eError = PVRSRVHandleInit(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ OSCreateAppHintState(&pvAppHintState); -+ ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADPRIORITY; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, CleanupThreadPriority, -+ &ui32AppHintDefault, &ui32AppHintCleanupThreadPriority); -+ -+ ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, WatchdogThreadPriority, -+ &ui32AppHintDefault, &ui32AppHintWatchdogThreadPriority); -+ -+ bAppHintDefault = PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableFullSyncTracking, -+ &bAppHintDefault, &bEnableFullSyncTracking); -+ OSFreeAppHintState(pvAppHintState); -+ pvAppHintState = NULL; -+ -+ eError = HostMemDeviceCreate(&gpsPVRSRVData->psHostMemDeviceNode); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ eError = _CleanupThreadPrepare(gpsPVRSRVData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_CleanupThreadPrepare", Error); -+ -+ /* Create a thread which is used to do the deferred cleanup */ -+ eError = OSThreadCreatePriority(&gpsPVRSRVData->hCleanupThread, -+ "pvr_defer_free", -+ CleanupThread, -+ CleanupThreadDumpInfo, -+ IMG_TRUE, -+ gpsPVRSRVData, -+ ui32AppHintCleanupThreadPriority); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:1", Error); -+ -+ /* Create the devices watchdog event object */ -+ eError = OSEventObjectCreate("PVRSRV_DEVICESWATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hDevicesWatchdogEvObj); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Error); -+ -+ /* Create a thread which is used to detect fatal errors */ -+ eError = OSThreadCreatePriority(&gpsPVRSRVData->hDevicesWatchdogThread, -+ "pvr_device_wdg", -+ DevicesWatchdogThread, -+ NULL, -+ IMG_TRUE, -+ gpsPVRSRVData, -+ ui32AppHintWatchdogThreadPriority); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:2", Error); -+ -+#if defined(SUPPORT_AUTOVZ) -+ /* Create the devices watchdog event object */ -+ eError = OSEventObjectCreate("PVRSRV_AUTOVZ_WATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hAutoVzWatchdogEvObj); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Error); -+ -+ /* Create a thread that maintains the FW-KM connection by regularly updating the virtualization watchdog */ -+ eError = OSThreadCreatePriority(&gpsPVRSRVData->hAutoVzWatchdogThread, -+ "pvr_autovz_wdg", -+ AutoVzWatchdogThread, -+ NULL, -+ IMG_TRUE, -+ gpsPVRSRVData, -+ OS_THREAD_HIGHEST_PRIORITY); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:3", Error); -+#endif /* SUPPORT_AUTOVZ */ -+ -+#if defined(SUPPORT_RGX) -+ eError = OSLockCreate(&gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", Error); -+#endif -+ -+ /* Initialise the Transport Layer */ -+ eError = TLInit(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ /* Initialise pdump */ -+ eError = PDUMPINIT(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT; -+ -+ /* Initialise TL control stream */ -+ eError = TLStreamCreate(&psPVRSRVData->hTLCtrlStream, -+ PVRSRV_TL_CTLR_STREAM, PVRSRV_TL_CTLR_STREAM_SIZE, -+ TL_OPMODE_DROP_OLDEST, NULL, NULL, NULL, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "TLStreamCreate"); -+ psPVRSRVData->hTLCtrlStream = NULL; -+ } -+ -+ eError = InfoPageCreate(psPVRSRVData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "InfoPageCreate", Error); -+ -+ -+ /* Initialise the Timeout Info */ -+ eError = InitialiseInfoPageTimeouts(psPVRSRVData); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ eError = PopulateInfoPageBridges(psPVRSRVData); -+ -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+ if (bEnableFullSyncTracking) -+ { -+ psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED; -+ } -+ if (bEnablePageFaultDebug) -+ { -+ psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED; -+ } -+ -+ { -+ IMG_UINT64 *pui64InfoPage; -+ -+ pui64InfoPage = IMG_OFFSET_ADDR_DW(psPVRSRVData->pui32InfoPage, -+ DEVMEM_INFO_PHYS_BUF_MAX_SIZE); -+ *pui64InfoPage = PMR_MAX_SUPPORTED_SIZE; -+ } -+ -+ /* Initialise the Host Trace Buffer */ -+ eError = HTBInit(); -+ PVR_GOTO_IF_ERROR(eError, Error); -+ -+#if defined(SUPPORT_RGX) -+ RGXHWPerfClientInitAppHintCallbacks(); -+#endif -+ -+ /* Late init. client cache maintenance via info. page */ -+ eError = CacheOpInit2(); -+ PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpInit2", Error); -+ -+#if defined(SUPPORT_FALLBACK_FENCE_SYNC) -+ eError = SyncFbRegisterSyncFunctions(); -+ PVR_LOG_GOTO_IF_ERROR(eError, "SyncFbRegisterSyncFunctions", Error); -+#endif -+ -+#if defined(PDUMP) -+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) -+ /* If initialising the device on first connection, we will -+ * bind PDump capture to the first device we connect to later. -+ */ -+ psPVRSRVData->ui32PDumpBoundDevice = PVRSRV_MAX_DEVICES; -+#else -+ /* If not initialising the device on first connection, bind PDump -+ * capture to device 0. This is because we need to capture PDump -+ * during device initialisation but only want to capture PDump for -+ * a single device (by default, device 0). -+ */ -+ psPVRSRVData->ui32PDumpBoundDevice = 0; -+#endif -+#endif -+ -+ return 0; -+ -+Error: -+ PVRSRVCommonDriverDeInit(); -+ return eError; -+} -+ -+void -+PVRSRVCommonDriverDeInit(void) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_BOOL bEnablePageFaultDebug = IMG_FALSE; -+ -+ if (gpsPVRSRVData == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: missing device-independent data", -+ __func__)); -+ return; -+ } -+ -+ if (gpsPVRSRVData->pui32InfoPage != NULL) -+ { -+ bEnablePageFaultDebug = GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED; -+ } -+ -+ gpsPVRSRVData->bUnload = IMG_TRUE; -+ -+#if defined(SUPPORT_RGX) -+ PVRSRVDestroyHWPerfHostThread(); -+ if (gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock) -+ { -+ OSLockDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); -+ gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock = NULL; -+ } -+#endif -+ -+ if (gpsPVRSRVData->hGlobalEventObject) -+ { -+ OSEventObjectSignal(gpsPVRSRVData->hGlobalEventObject); -+ } -+ -+#if defined(SUPPORT_AUTOVZ) -+ /* Stop and cleanup the devices watchdog thread */ -+ if (gpsPVRSRVData->hAutoVzWatchdogThread) -+ { -+ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) -+ { -+ if (gpsPVRSRVData->hAutoVzWatchdogEvObj) -+ { -+ eError = OSEventObjectSignal(gpsPVRSRVData->hAutoVzWatchdogEvObj); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); -+ } -+ -+ eError = OSThreadDestroy(gpsPVRSRVData->hAutoVzWatchdogThread); -+ if (PVRSRV_OK == eError) -+ { -+ gpsPVRSRVData->hAutoVzWatchdogThread = NULL; -+ break; -+ } -+ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); -+ } -+ -+ if (gpsPVRSRVData->hAutoVzWatchdogEvObj) -+ { -+ eError = OSEventObjectDestroy(gpsPVRSRVData->hAutoVzWatchdogEvObj); -+ gpsPVRSRVData->hAutoVzWatchdogEvObj = NULL; -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); -+ } -+#endif /* SUPPORT_AUTOVZ */ -+ -+ /* Stop and cleanup the devices watchdog thread */ -+ if (gpsPVRSRVData->hDevicesWatchdogThread) -+ { -+ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) -+ { -+ if (gpsPVRSRVData->hDevicesWatchdogEvObj) -+ { -+ eError = OSEventObjectSignal(gpsPVRSRVData->hDevicesWatchdogEvObj); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); -+ } -+ -+ eError = OSThreadDestroy(gpsPVRSRVData->hDevicesWatchdogThread); -+ if (PVRSRV_OK == eError) -+ { -+ gpsPVRSRVData->hDevicesWatchdogThread = NULL; -+ break; -+ } -+ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); -+ } -+ -+ if (gpsPVRSRVData->hDevicesWatchdogEvObj) -+ { -+ eError = OSEventObjectDestroy(gpsPVRSRVData->hDevicesWatchdogEvObj); -+ gpsPVRSRVData->hDevicesWatchdogEvObj = NULL; -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); -+ } -+ -+ /* Stop and cleanup the deferred clean up thread, event object and -+ * deferred context list. -+ */ -+ if (gpsPVRSRVData->hCleanupThread) -+ { -+ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) -+ { -+ if (gpsPVRSRVData->hCleanupEventObject) -+ { -+ eError = OSEventObjectSignal(gpsPVRSRVData->hCleanupEventObject); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); -+ } -+ -+ eError = OSThreadDestroy(gpsPVRSRVData->hCleanupThread); -+ if (PVRSRV_OK == eError) -+ { -+ gpsPVRSRVData->hCleanupThread = NULL; -+ break; -+ } -+ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); -+ } -+ -+ if (gpsPVRSRVData->hCleanupEventObject) -+ { -+ eError = OSEventObjectDestroy(gpsPVRSRVData->hCleanupEventObject); -+ gpsPVRSRVData->hCleanupEventObject = NULL; -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); -+ } -+ -+ /* Tear down the HTB before PVRSRVHandleDeInit() removes its TL handle */ -+ /* HTB De-init happens in device de-registration currently */ -+ eError = HTBDeInit(); -+ PVR_LOG_IF_ERROR(eError, "HTBDeInit"); -+ -+ /* Tear down CacheOp framework information page first */ -+ CacheOpDeInit2(); -+ -+ /* Clean up information page */ -+ InfoPageDestroy(gpsPVRSRVData); -+ -+ /* Close the TL control plane stream. */ -+ if (gpsPVRSRVData->hTLCtrlStream != NULL) -+ { -+ TLStreamClose(gpsPVRSRVData->hTLCtrlStream); -+ } -+ -+ /* deinitialise pdump */ -+ if ((g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0) -+ { -+ PDUMPDEINIT(); -+ } -+ -+ /* Clean up Transport Layer resources that remain */ -+ TLDeInit(); -+ -+ HostMemDeviceDestroy(gpsPVRSRVData->psHostMemDeviceNode); -+ gpsPVRSRVData->psHostMemDeviceNode = NULL; -+ -+ eError = PVRSRVHandleDeInit(); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVHandleDeInit"); -+ -+ /* destroy event object */ -+ if (gpsPVRSRVData->hGlobalEventObject) -+ { -+ OSEventObjectDestroy(gpsPVRSRVData->hGlobalEventObject); -+ gpsPVRSRVData->hGlobalEventObject = NULL; -+ } -+ -+ PVRSRVCmdCompleteDeinit(); -+ -+#if defined(SUPPORT_DISPLAY_CLASS) -+ eError = DCDeInit(); -+ PVR_LOG_IF_ERROR(eError, "DCDeInit"); -+#endif -+ -+ eError = PMRDeInit(); -+ PVR_LOG_IF_ERROR(eError, "PMRDeInit"); -+ -+ BridgeDispatcherDeinit(); -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ RIDeInitKM(); -+#endif -+ -+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY) -+ CPUMappingHistoryDeInit(); -+#endif -+ -+ if (bEnablePageFaultDebug) -+ { -+ /* Clear all allocated history tracking data */ -+ DevicememHistoryDeInitKM(); -+ } -+ -+ CacheOpDeInit(); -+ -+ OSDeInitEnvData(); -+ -+ (void) DevmemIntDeInit(); -+ -+ ServerBridgeDeInit(); -+ -+ HTB_DestroyDIEntry(); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVRSRVStatsDestroyDI(); /* Stage 1 freeing */ -+#endif /* PVRSRV_ENABLE_PROCESS_STATS */ -+ -+ DebugCommonDeInitDriver(); -+ -+ DIDeInit(); -+ -+ if (gpsPVRSRVData->hThreadsDbgReqNotify) -+ { -+ PVRSRVUnregisterDriverDbgRequestNotify(gpsPVRSRVData->hThreadsDbgReqNotify); -+ } -+ -+ PVRSRVUnregisterDriverDbgTable(); -+ -+ OSWRLockDestroy(gpsPVRSRVData->hDeviceNodeListLock); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVRSRVStatsDestroy(); /* Stage 2 freeing */ -+#endif /* PVRSRV_ENABLE_PROCESS_STATS */ -+ -+ OSFreeMemNoStats(gpsPVRSRVData); /* Not trackable */ -+ -+ gpsPVRSRVData = NULL; -+} -+ -+static void _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ /* Only dump info once */ -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hDebugRequestHandle; -+ -+ PVR_DUMPDEBUG_LOG("------[ System Summary Device ID:%d ]------", psDeviceNode->sDevId.ui32InternalID); -+ -+ switch (psDeviceNode->eCurrentSysPowerState) -+ { -+ case PVRSRV_SYS_POWER_STATE_OFF: -+ PVR_DUMPDEBUG_LOG("Device System Power State: OFF"); -+ break; -+ case PVRSRV_SYS_POWER_STATE_ON: -+ PVR_DUMPDEBUG_LOG("Device System Power State: ON"); -+ break; -+ default: -+ PVR_DUMPDEBUG_LOG("Device System Power State: UNKNOWN (%d)", -+ psDeviceNode->eCurrentSysPowerState); -+ break; -+ } -+ -+ PVR_DUMPDEBUG_LOG("MaxHWTOut: %dus, WtTryCt: %d, WDGTOut(on,off): (%dms,%dms)", -+ MAX_HW_TIME_US, WAIT_TRY_COUNT, DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT, DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT); -+ -+ SysDebugInfo(psDeviceNode->psDevConfig, pfnDumpDebugPrintf, pvDumpDebugFile); -+} -+ -+PHYS_HEAP_CONFIG* PVRSRVFindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig, -+ PHYS_HEAP_USAGE_FLAGS ui32Flags) -+{ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++) -+ { -+ if (BITMASK_HAS(psDevConfig->pasPhysHeaps[i].ui32UsageFlags, ui32Flags)) -+ { -+ return &psDevConfig->pasPhysHeaps[i]; -+ } -+ } -+ -+ return NULL; -+} -+ -+/*************************************************************************/ /*! -+@Function PVRSRVAcquireInternalID -+@Description Returns the lowest free device ID. -+@Output pui32InternalID The device ID -+@Return PVRSRV_ERROR PVRSRV_OK or an error code -+*/ /**************************************************************************/ -+static PVRSRV_ERROR PVRSRVAcquireInternalID(IMG_UINT32 *pui32InternalID) -+{ -+ IMG_UINT32 ui32InternalID = 0; -+ IMG_BOOL bFound = IMG_FALSE; -+ -+ for (ui32InternalID = 0; -+ ui32InternalID < PVRSRV_MAX_DEVICES; -+ ui32InternalID++) -+ { -+ if (PVRSRVGetDeviceInstance(ui32InternalID) == NULL) -+ { -+ bFound = IMG_TRUE; -+ break; -+ } -+ } -+ -+ if (bFound) -+ { -+ *pui32InternalID = ui32InternalID; -+ return PVRSRV_OK; -+ } -+ else -+ { -+ return PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE; -+ } -+} -+ -+PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, -+ IMG_INT32 i32KernelDeviceID, -+ PVRSRV_DEVICE_NODE **ppsDeviceNode) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_CONFIG *psDevConfig; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ IMG_UINT32 ui32AppHintDefault; -+ IMG_UINT32 ui32AppHintDriverMode; -+ -+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) -+ IMG_UINT32 ui32AppHintPhysMemTestPasses; -+#endif -+ void *pvAppHintState = NULL; -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ IMG_HANDLE hProcessStats; -+#endif -+ IMG_BOOL bAppHintDefault; -+ IMG_BOOL bEnablePageFaultDebug = IMG_FALSE; -+#if defined(SUPPORT_AUTOVZ) -+ IMG_BOOL bAutoVzGPUPowerdown = IMG_FALSE; -+#endif -+ -+ MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceCreate: DevId %d", i32KernelDeviceID); -+ -+ /* Read driver mode (i.e. native, host or guest) AppHint early as it is -+ required by SysDevInit */ -+ ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE; -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DriverMode, -+ &ui32AppHintDefault, &ui32AppHintDriverMode); -+ psPVRSRVData->eDriverMode = PVRSRV_VZ_APPHINT_MODE(ui32AppHintDriverMode); -+ psPVRSRVData->bForceApphintDriverMode = PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(ui32AppHintDriverMode); -+ -+ /* Determine if we've got EnablePageFaultDebug set or not */ -+ bAppHintDefault = PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnablePageFaultDebug, -+ &bAppHintDefault, &bEnablePageFaultDebug); -+ -+#if defined(SUPPORT_AUTOVZ) -+ bAppHintDefault = IMG_FALSE; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, AutoVzGPUPowerdown, -+ &bAppHintDefault, &bAutoVzGPUPowerdown); -+#endif -+ OSFreeAppHintState(pvAppHintState); -+ pvAppHintState = NULL; -+ -+ psDeviceNode = OSAllocZMemNoStats(sizeof(*psDeviceNode)); -+ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "psDeviceNode"); -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ /* Allocate process statistics */ -+ eError = PVRSRVStatsRegisterProcess(&hProcessStats); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsRegisterProcess", ErrorFreeDeviceNode); -+#endif -+ -+ dllist_init(&psDeviceNode->sCleanupThreadWorkList); -+ -+ /* Record setting of EnablePageFaultDebug in device-node */ -+ psDeviceNode->bEnablePFDebug = bEnablePageFaultDebug; -+#if defined(SUPPORT_AUTOVZ) -+ psDeviceNode->bAutoVzAllowGPUPowerdown = bAutoVzGPUPowerdown; -+#endif -+ psDeviceNode->sDevId.i32KernelDeviceID = i32KernelDeviceID; -+ eError = PVRSRVAcquireInternalID(&psDeviceNode->sDevId.ui32InternalID); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAcquireInternalID", ErrorDeregisterStats); -+ -+ eError = SysDevInit(pvOSDevice, &psDevConfig); -+ PVR_LOG_GOTO_IF_ERROR(eError, "SysDevInit", ErrorDeregisterStats); -+ -+ PVR_ASSERT(psDevConfig); -+ PVR_ASSERT(psDevConfig->pvOSDevice == pvOSDevice); -+ PVR_ASSERT(!psDevConfig->psDevNode); -+ -+ if ((psDevConfig->eDefaultHeap != PVRSRV_PHYS_HEAP_GPU_LOCAL) && -+ (psDevConfig->eDefaultHeap != PVRSRV_PHYS_HEAP_CPU_LOCAL)) -+ { -+ PVR_LOG_MSG(PVR_DBG_ERROR, "DEFAULT Heap is invalid, " -+ "it must be GPU_LOCAL or CPU_LOCAL"); -+ PVR_LOG_GOTO_IF_ERROR(eError, "SysDevInit", ErrorDeregisterStats); -+ } -+ PVR_DPF((PVR_DBG_MESSAGE, "Device PhysHeap Default: %s", -+ (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_CPU_LOCAL) -+ ? "CPU_LOCAL" : "GPU_LOCAL")); -+ -+ PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_CREATING); -+ -+ if (psDevConfig->pfnGpuDomainPower) -+ { -+ psDeviceNode->eCurrentSysPowerState = psDevConfig->pfnGpuDomainPower(psDeviceNode); -+ } -+ else -+ { -+ /* If the System Layer doesn't provide a function to query the power state -+ * of the system hardware, use a default implementation that keeps track of -+ * the power state locally and assumes the system starting state */ -+ psDevConfig->pfnGpuDomainPower = PVRSRVDefaultDomainPower; -+ -+#if defined(SUPPORT_AUTOVZ) -+ psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; -+#else -+ psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_OFF; -+#endif -+ } -+ -+ psDeviceNode->psDevConfig = psDevConfig; -+ psDevConfig->psDevNode = psDeviceNode; -+ -+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) -+ if (PVRSRV_VZ_MODE_IS(NATIVE)) -+ { -+ /* Read AppHint - Configurable memory test pass count */ -+ ui32AppHintDefault = 0; -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, PhysMemTestPasses, -+ &ui32AppHintDefault, &ui32AppHintPhysMemTestPasses); -+ OSFreeAppHintState(pvAppHintState); -+ pvAppHintState = NULL; -+ -+ if (ui32AppHintPhysMemTestPasses > 0) -+ { -+ eError = PhysMemTest(psDevConfig, ui32AppHintPhysMemTestPasses); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysMemTest", ErrorSysDevDeInit); -+ } -+ } -+#endif -+ -+ /* Initialise the paravirtualised connection */ -+ if (!PVRSRV_VZ_MODE_IS(NATIVE)) -+ { -+ PvzConnectionInit(); -+ PVR_GOTO_IF_ERROR(eError, ErrorSysDevDeInit); -+ } -+ -+ BIT_SET(psDevConfig->psDevNode->ui32VmState, RGXFW_HOST_DRIVER_ID); -+ -+ /* Next update value will be 0xFFFFFFF7 since sync prim starts with 0xFFFFFFF6. -+ * Has to be set before call to PMRInitDevice(). */ -+ psDeviceNode->ui32NextMMUInvalidateUpdate = 0xFFFFFFF7U; -+#if defined(SUPPORT_PMR_DEFERRED_FREE) || defined(SUPPORT_MMU_DEFERRED_FREE) -+ psDeviceNode->uiPowerOffCounter = 0; -+ psDeviceNode->uiPowerOffCounterNext = 1; -+#endif -+ -+ eError = PVRSRVRegisterDeviceDbgTable(psDeviceNode); -+ PVR_GOTO_IF_ERROR(eError, ErrorPvzConnectionDeInit); -+ -+ eError = PVRSRVPowerLockInit(psDeviceNode); -+ PVR_GOTO_IF_ERROR(eError, ErrorUnregisterDbgTable); -+ -+ eError = PhysHeapInitDeviceHeaps(psDeviceNode, psDevConfig); -+ PVR_GOTO_IF_ERROR(eError, ErrorPowerLockDeInit); -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ eError = PMRInitDevice(psDeviceNode); -+ PVR_GOTO_IF_ERROR(eError, ErrorPhysHeapDeInitDeviceHeaps); -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+#if defined(SUPPORT_RGX) -+ /* Requirements: -+ * registered GPU and FW local heaps */ -+ /* debug table */ -+ eError = RGXRegisterDevice(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "RGXRegisterDevice"); -+ eError = PVRSRV_ERROR_DEVICE_REGISTER_FAILED; -+ goto ErrorPMRDeInitDevice; -+ } -+#endif -+ -+ /* Inform the device layer PhysHeaps are now initialised so that device -+ * specific heaps can be obtained along with carrying out any Vz setup. */ -+ if (psDeviceNode->pfnPhysMemDeviceHeapsInit != NULL) -+ { -+ eError = psDeviceNode->pfnPhysMemDeviceHeapsInit(psDeviceNode); -+ PVR_GOTO_IF_ERROR(eError, ErrorPMRDeInitDevice); -+ } -+ -+ /* Carry out initialisation of a dedicated FW MMU data, if the FW CPU has -+ * an MMU separate to the GPU MMU e.g. MIPS based FW. */ -+ if (psDeviceNode->pfnFwMMUInit != NULL) -+ { -+ eError = psDeviceNode->pfnFwMMUInit(psDeviceNode); -+ PVR_GOTO_IF_ERROR(eError, ErrorFwMMUDeinit); -+ } -+ -+ eError = SyncServerInit(psDeviceNode); -+ PVR_GOTO_IF_ERROR(eError, ErrorDeInitRgx); -+ -+ eError = SyncCheckpointInit(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "SyncCheckpointInit", ErrorSyncCheckpointInit); -+ -+ /* -+ * This is registered before doing device specific initialisation to ensure -+ * generic device information is dumped first during a debug request. -+ */ -+ eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDeviceNode->hDbgReqNotify, -+ psDeviceNode, -+ _SysDebugRequestNotify, -+ DEBUG_REQUEST_SYS, -+ psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterDeviceDbgRequestNotify", ErrorRegDbgReqNotify); -+ -+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) -+ eError = InitDVFS(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "InitDVFS", ErrorDVFSInitFail); -+#endif -+ -+ OSAtomicWrite(&psDeviceNode->iNumClockSpeedChanges, 0); -+ -+ OSWRLockCreate(&psDeviceNode->hMemoryContextPageFaultNotifyListLock); -+ if (psDeviceNode->hMemoryContextPageFaultNotifyListLock == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for PF notify list", -+ __func__)); -+ goto ErrorPageFaultLockFailCreate; -+ } -+ -+ dllist_init(&psDeviceNode->sMemoryContextPageFaultNotifyListHead); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Registered device %p", psDeviceNode)); -+ PVR_DPF((PVR_DBG_MESSAGE, "Register bank address = 0x%08lx", -+ (unsigned long)psDevConfig->sRegsCpuPBase.uiAddr)); -+ PVR_DPF((PVR_DBG_MESSAGE, "IRQ = %d", psDevConfig->ui32IRQ)); -+ -+/* SUPPORT_ALT_REGBASE is defined for rogue cores only */ -+#if defined(SUPPORT_RGX) && defined(SUPPORT_ALT_REGBASE) -+ { -+ IMG_DEV_PHYADDR sRegsGpuPBase; -+ -+ PhysHeapCpuPAddrToDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL], -+ 1, -+ &sRegsGpuPBase, -+ &(psDeviceNode->psDevConfig->sRegsCpuPBase)); -+ -+ PVR_LOG(("%s: Using alternate Register bank GPU address: 0x%08lx (orig: 0x%08lx)", __func__, -+ (unsigned long)psDevConfig->sAltRegsGpuPBase.uiAddr, -+ (unsigned long)sRegsGpuPBase.uiAddr)); -+ } -+#endif -+ -+ eError = DebugCommonInitDevice(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DebugCommonInitDevice", -+ ErrorDestroyMemoryContextPageFaultNotifyListLock); -+ -+#if defined(PVR_TESTING_UTILS) -+ TUtilsInit(psDeviceNode); -+#endif -+ /* Create the devicemem_history hook for the device. We need to -+ * have the debug-info instantiated before calling this. -+ */ -+ if (psDeviceNode->bEnablePFDebug) -+ { -+ eError = DevicememHistoryDeviceCreate(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevicememHistoryDeviceCreate", ErrorDebugCommonDeInitDevice); -+ } -+ -+#if defined(__linux__) -+ /* Register the device specific AppHints so individual AppHints can be -+ * configured before the FW is initialised. This must be called after -+ * DebugCommonInitDevice() above as it depends on the created gpuXX/apphint -+ * DI Group. -+ */ -+ { -+ int iError = pvr_apphint_device_register(psDeviceNode); -+ PVR_LOG_IF_FALSE(iError == 0, "pvr_apphint_device_register() failed"); -+ } -+#endif /* defined(__linux__) */ -+ -+#if defined(SUPPORT_RGX) -+ RGXHWPerfInitAppHintCallbacks(psDeviceNode); -+#endif -+ -+ /* Finally insert the device into the dev-list and set it as active */ -+ OSWRLockAcquireWrite(psPVRSRVData->hDeviceNodeListLock); -+ List_PVRSRV_DEVICE_NODE_InsertTail(&psPVRSRVData->psDeviceNodeList, -+ psDeviceNode); -+ psPVRSRVData->ui32RegisteredDevices++; -+ OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock); -+ -+ *ppsDeviceNode = psDeviceNode; -+ -+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) -+ /* Register the DVFS device now the device node is present in the dev-list */ -+ eError = RegisterDVFSDevice(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RegisterDVFSDevice", ErrorRegisterDVFSDeviceFail); -+#endif -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ /* Close the process statistics */ -+ PVRSRVStatsDeregisterProcess(hProcessStats); -+#endif -+ -+#if defined(SUPPORT_VALIDATION) -+ OSLockCreateNoStats(&psDeviceNode->hValidationLock); -+#endif -+ -+ PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_CREATED); -+ -+ return PVRSRV_OK; -+ -+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) -+ErrorRegisterDVFSDeviceFail: -+ /* Remove the device from the list */ -+ OSWRLockAcquireWrite(psPVRSRVData->hDeviceNodeListLock); -+ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode); -+ psPVRSRVData->ui32RegisteredDevices--; -+ OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock); -+ -+#if defined(__linux__) -+ pvr_apphint_device_unregister(psDeviceNode); -+#endif /* defined(__linux__) */ -+ -+ /* Remove the devicemem_history hook if we created it */ -+ if (psDeviceNode->bEnablePFDebug) -+ { -+ DevicememHistoryDeviceDestroy(psDeviceNode); -+ } -+#endif -+ -+ErrorDebugCommonDeInitDevice: -+#if defined(PVR_TESTING_UTILS) -+ TUtilsDeinit(psDeviceNode); -+#endif -+ DebugCommonDeInitDevice(psDeviceNode); -+ -+ErrorDestroyMemoryContextPageFaultNotifyListLock: -+ OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock); -+ psDeviceNode->hMemoryContextPageFaultNotifyListLock = NULL; -+ -+ErrorPageFaultLockFailCreate: -+ -+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) -+ErrorDVFSInitFail: -+#endif -+ -+ if (psDeviceNode->hDbgReqNotify) -+ { -+ PVRSRVUnregisterDeviceDbgRequestNotify(psDeviceNode->hDbgReqNotify); -+ } -+ -+ErrorRegDbgReqNotify: -+ SyncCheckpointDeinit(psDeviceNode); -+ -+ErrorSyncCheckpointInit: -+ SyncServerDeinit(psDeviceNode); -+ -+ErrorDeInitRgx: -+#if defined(SUPPORT_RGX) -+ DevDeInitRGX(psDeviceNode); -+#endif -+ErrorFwMMUDeinit: -+ErrorPMRDeInitDevice: -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ PMRDeInitDevice(psDeviceNode); -+ErrorPhysHeapDeInitDeviceHeaps: -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ PhysHeapDeInitDeviceHeaps(psDeviceNode); -+ErrorPowerLockDeInit: -+ PVRSRVPowerLockDeInit(psDeviceNode); -+ErrorUnregisterDbgTable: -+ PVRSRVUnregisterDeviceDbgTable(psDeviceNode); -+ErrorPvzConnectionDeInit: -+ psDevConfig->psDevNode = NULL; -+ if (!PVRSRV_VZ_MODE_IS(NATIVE)) -+ { -+ PvzConnectionDeInit(); -+ } -+ErrorSysDevDeInit: -+ SysDevDeInit(psDevConfig); -+ErrorDeregisterStats: -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ /* Close the process statistics */ -+ PVRSRVStatsDeregisterProcess(hProcessStats); -+ErrorFreeDeviceNode: -+#endif -+ OSFreeMemNoStats(psDeviceNode); -+ -+ return eError; -+} -+ -+#if defined(SUPPORT_RGX) -+static PVRSRV_ERROR _SetDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice, -+ const void *psPrivate, IMG_BOOL bValue) -+{ -+ PVRSRV_ERROR eResult = PVRSRV_OK; -+ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); -+ -+ PVR_RETURN_IF_INVALID_PARAM(ui32Flag); -+ PVR_RETURN_IF_FALSE(psDevice != APPHINT_OF_DRIVER_NO_DEVICE, -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ eResult = RGXSetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice, -+ ui32Flag, bValue); -+ -+ return eResult; -+} -+ -+static PVRSRV_ERROR _ReadDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice, -+ const void *psPrivate, IMG_BOOL *pbValue) -+{ -+ PVRSRV_ERROR eResult = PVRSRV_OK; -+ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); -+ IMG_UINT32 ui32State; -+ -+ PVR_RETURN_IF_INVALID_PARAM(ui32Flag); -+ PVR_RETURN_IF_FALSE(psDevice != APPHINT_OF_DRIVER_NO_DEVICE, -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ eResult = RGXGetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice, -+ &ui32State); -+ -+ if (PVRSRV_OK == eResult) -+ { -+ *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE; -+ } -+ -+ return eResult; -+} -+static PVRSRV_ERROR _SetStateFlag(const PVRSRV_DEVICE_NODE *psDevice, -+ const void *psPrivate, IMG_BOOL bValue) -+{ -+ PVRSRV_ERROR eResult = PVRSRV_OK; -+ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); -+ -+ PVR_RETURN_IF_INVALID_PARAM(ui32Flag); -+ PVR_RETURN_IF_FALSE(psDevice != APPHINT_OF_DRIVER_NO_DEVICE, -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ eResult = RGXStateFlagCtrl((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice, -+ ui32Flag, NULL, bValue); -+ -+ return eResult; -+} -+ -+static PVRSRV_ERROR _ReadStateFlag(const PVRSRV_DEVICE_NODE *psDevice, -+ const void *psPrivate, IMG_BOOL *pbValue) -+{ -+ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); -+ IMG_UINT32 ui32State; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_RETURN_IF_INVALID_PARAM(ui32Flag); -+ PVR_RETURN_IF_FALSE(psDevice != APPHINT_OF_DRIVER_NO_DEVICE, -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevice->pvDevice; -+ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags, -+ INVALIDATE); -+ ui32State = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; -+ -+ if (pbValue) -+ { -+ *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE; -+ } -+ -+ return PVRSRV_OK; -+} -+#endif -+ -+PVRSRV_ERROR PVRSRVCommonDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ IMG_BOOL bInitSuccesful = IMG_FALSE; -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ IMG_HANDLE hProcessStats; -+#endif -+ PVRSRV_ERROR eError; -+ -+ PDUMPCOMMENT(psDeviceNode, "Common Device Initialisation"); -+ -+ MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceInitialise: DevId %d", psDeviceNode->sDevId.i32KernelDeviceID); -+ -+ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_CREATED) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Device already initialised", __func__)); -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ /* Allocate an OSEventObject to use in the Freeze / Thaw transitioning to -+ * allow LBIST requests to be serviced. This object is held for the life of -+ * this device-node. -+ */ -+ eError = OSEventObjectCreate("PVRSRV_DEVICE_THREAD_EVENT_OBJECT", -+ &psDeviceNode->hDeviceThreadEvObj); -+ PVR_LOG_RETURN_IF_ERROR(eError, "OSEventObjectCreate"); -+ -+ eError = OSEventObjectOpen(psDeviceNode->hDeviceThreadEvObj, -+ &psDeviceNode->hDeviceFreezeThaw); -+ -+ if (PVRSRV_OK != eError) -+ { -+ OSEventObjectDestroy(psDeviceNode->hDeviceThreadEvObj); -+ -+ PVR_LOG_RETURN_IF_ERROR(eError, "OSEventObjectOpen"); -+ } -+ -+ /* Initial zero-set for the number of Frozen threads */ -+ OSAtomicWrite(&psDeviceNode->iFreezeCount, 0); -+ OSAtomicWrite(&psDeviceNode->iTotalFreezes, 0); -+ -+ /* Initial zero-set for number of active threads on this device */ -+ OSAtomicWrite(&psDeviceNode->iThreadsActive, 0); -+ -+ /* Allocate devmem_history backing store for the device if we have -+ * EnablePageFaultDebug set -+ */ -+ if (psDeviceNode->bEnablePFDebug) -+ { -+ eError = DevicememHistoryDeviceInit(psDeviceNode); -+ PVR_LOG_RETURN_IF_ERROR(eError, "DevicememHistoryDeviceInit"); -+ } -+ -+#if defined(PDUMP) -+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) -+ { -+ PVRSRV_DATA *psSRVData = PVRSRVGetPVRSRVData(); -+ -+ /* If first connection, bind this and future PDump clients to use this device */ -+ if (psSRVData->ui32PDumpBoundDevice == PVRSRV_MAX_DEVICES) -+ { -+ psSRVData->ui32PDumpBoundDevice = psDeviceNode->sDevId.ui32InternalID; -+ } -+ } -+#endif -+#endif -+ -+ /* Initialise Connection_Data access mechanism */ -+ dllist_init(&psDeviceNode->sConnections); -+ eError = OSLockCreate(&psDeviceNode->hConnectionsLock); -+ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); -+ -+ /* Allocate process statistics */ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ eError = PVRSRVStatsRegisterProcess(&hProcessStats); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVStatsRegisterProcess"); -+#endif -+ -+ eError = MMU_InitDevice(psDeviceNode); -+ PVR_LOG_RETURN_IF_ERROR(eError, "MMU_InitDevice"); -+ -+#if defined(SUPPORT_RGX) -+ eError = RGXInit(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInit", Exit); -+#endif -+ -+#if defined(SUPPORT_DMA_TRANSFER) -+ PVRSRVInitialiseDMA(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVInitialiseDMA", Exit); -+#endif -+ -+ bInitSuccesful = IMG_TRUE; -+ -+#if defined(SUPPORT_RGX) -+Exit: -+#endif -+ eError = PVRSRVDeviceFinalise(psDeviceNode, bInitSuccesful); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceFinalise"); -+ -+#if defined(SUPPORT_RGX) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableClockGating, -+ _ReadStateFlag, _SetStateFlag, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_CLKGATING_EN)); -+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableDMOverlap, -+ _ReadStateFlag, _SetStateFlag, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_DM_OVERLAP)); -+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOnHWRTrigger, -+ _ReadStateFlag, _SetStateFlag, -+ psDeviceNode, -+ (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER)); -+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOutOfMemory, -+ _ReadStateFlag, _SetStateFlag, -+ psDeviceNode, -+ (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY)); -+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_CheckMList, -+ _ReadStateFlag, _SetStateFlag, -+ psDeviceNode, -+ (void*)((uintptr_t)RGXFWIF_INICFG_CHECK_MLIST_EN)); -+ } -+ -+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableFEDLogging, -+ _ReadDeviceFlag, _SetDeviceFlag, -+ psDeviceNode, -+ (void*)((uintptr_t)RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN)); -+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_ZeroFreelist, -+ _ReadDeviceFlag, _SetDeviceFlag, -+ psDeviceNode, -+ (void*)((uintptr_t)RGXKM_DEVICE_STATE_ZERO_FREELIST)); -+#if defined(SUPPORT_VALIDATION) -+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_GPUUnitsPowerChange, -+ _ReadDeviceFlag, _SetDeviceFlag, -+ psDeviceNode, -+ (void*)((uintptr_t)RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN)); -+#endif -+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisablePDumpPanic, -+ RGXQueryPdumpPanicDisable, RGXSetPdumpPanicDisable, -+ psDeviceNode, -+ NULL); -+#endif -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ /* Close the process statistics */ -+ PVRSRVStatsDeregisterProcess(hProcessStats); -+#endif -+ -+ return eError; -+} -+ -+void PVRSRVCommonDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_ERROR eError; -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+ IMG_BOOL bForceUnload = IMG_FALSE; -+ -+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) -+ { -+ bForceUnload = IMG_TRUE; -+ } -+#endif -+ -+ /* Remove DI hook for the devicemem_history for this device (if any). -+ * The associated devicemem_history buffers are freed by the final -+ * call to DevicememHistoryDeInitKM() as they are used asynchronously -+ * by other parts of the DDK. -+ */ -+ if (psDeviceNode->bEnablePFDebug) -+ { -+ DevicememHistoryDeviceDestroy(psDeviceNode); -+ } -+ -+ MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceDestroy: DevId %d", psDeviceNode->sDevId.i32KernelDeviceID); -+ -+ PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_DEINIT); -+ -+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) -+ UnregisterDVFSDevice(psDeviceNode); -+#endif -+ -+#if defined(__linux__) -+ pvr_apphint_device_unregister(psDeviceNode); -+#endif /* defined(__linux__) */ -+ -+#if defined(PVR_TESTING_UTILS) -+ TUtilsDeinit(psDeviceNode); -+#endif -+ DebugCommonDeInitDevice(psDeviceNode); -+ -+ if (psDeviceNode->hMemoryContextPageFaultNotifyListLock != NULL) -+ { -+ OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock); -+ } -+ -+#if defined(SUPPORT_VALIDATION) -+ OSLockDestroyNoStats(psDeviceNode->hValidationLock); -+ psDeviceNode->hValidationLock = NULL; -+#endif -+ -+#if defined(SUPPORT_FALLBACK_FENCE_SYNC) -+ SyncFbDeregisterDevice(psDeviceNode); -+#endif -+ /* Counter part to what gets done in PVRSRVDeviceFinalise */ -+ if (psDeviceNode->hSyncCheckpointContext) -+ { -+ SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext); -+ psDeviceNode->hSyncCheckpointContext = NULL; -+ } -+ if (psDeviceNode->hSyncPrimContext) -+ { -+ if (psDeviceNode->psMMUCacheSyncPrim) -+ { -+ PVRSRV_CLIENT_SYNC_PRIM *psSync = psDeviceNode->psMMUCacheSyncPrim; -+ -+ /* Ensure there are no pending MMU Cache Ops in progress before freeing this sync. */ -+ eError = PVRSRVPollForValueKM(psDeviceNode, -+ psSync->pui32LinAddr, -+ psDeviceNode->ui32NextMMUInvalidateUpdate-1, -+ 0xFFFFFFFF, -+ POLL_FLAG_LOG_ERROR, -+ NULL); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVPollForValueKM"); -+ -+ /* Important to set the device node pointer to NULL -+ * before we free the sync-prim to make sure we don't -+ * defer the freeing of the sync-prim's page tables itself. -+ * The sync is used to defer the MMU page table -+ * freeing. */ -+ psDeviceNode->psMMUCacheSyncPrim = NULL; -+ -+ /* Free general purpose sync primitive */ -+ SyncPrimFree(psSync); -+ } -+ -+ SyncPrimContextDestroy(psDeviceNode->hSyncPrimContext); -+ psDeviceNode->hSyncPrimContext = NULL; -+ } -+ -+ eError = PVRSRVPowerLock(psDeviceNode); -+ if (eError == PVRSRV_OK) -+ { -+ IMG_BOOL bHasPowerLock = IMG_TRUE; -+ -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+ /* -+ * Firmware probably not responding if bForceUnload is set, but we still want to unload the -+ * driver. -+ */ -+ if (!bForceUnload) -+#endif -+ { -+ /* Force idle device */ -+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceIdleRequestKM"); -+ if (eError == PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) -+ { -+ bHasPowerLock = IMG_FALSE; -+ } -+ } -+ -+ if (bHasPowerLock) -+ { -+ /* Power down the device if necessary */ -+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, -+ PVRSRV_DEV_POWER_STATE_OFF, -+ PVRSRV_POWER_FLAGS_FORCED); -+ PVRSRVPowerUnlock(psDeviceNode); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "PVRSRVSetDevicePowerStateKM"); -+ -+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); -+ } -+ } -+ } -+ -+ PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_DEINIT_POWERED_OFF); -+ -+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) -+ DeinitDVFS(psDeviceNode); -+#endif -+ -+ if (psDeviceNode->hDbgReqNotify) -+ { -+ PVRSRVUnregisterDeviceDbgRequestNotify(psDeviceNode->hDbgReqNotify); -+ } -+ -+ SyncCheckpointDeinit(psDeviceNode); -+ -+ SyncServerDeinit(psDeviceNode); -+ -+ MMU_DeInitDevice(psDeviceNode); -+ -+#if defined(SUPPORT_RGX) -+ DevDeInitRGX(psDeviceNode); -+#endif -+ -+#if defined(SUPPORT_PMR_DEFERRED_FREE) -+ /* must be called before PhysHeapDeInitDeviceHeaps() */ -+ PMRDeInitDevice(psDeviceNode); -+#endif /* defined(SUPPORT_PMR_DEFERRED_FREE) */ -+ -+ PhysHeapDeInitDeviceHeaps(psDeviceNode); -+ PVRSRVPowerLockDeInit(psDeviceNode); -+ -+ PVRSRVUnregisterDeviceDbgTable(psDeviceNode); -+ -+ /* Release the Connection-Data lock as late as possible. */ -+ if (psDeviceNode->hConnectionsLock) -+ { -+ OSLockDestroy(psDeviceNode->hConnectionsLock); -+ } -+ -+ /* Release the hDeviceThreadEvObj as late as possible. */ -+ if (psDeviceNode->hDeviceThreadEvObj) -+ { -+ OSEventObjectClose(psDeviceNode->hDeviceFreezeThaw); -+ -+ OSEventObjectDestroy(psDeviceNode->hDeviceThreadEvObj); -+ } -+ -+ PVR_ASSERT(OSAtomicRead(&psDeviceNode->iFreezeCount) == 0); -+ -+ psDeviceNode->psDevConfig->psDevNode = NULL; -+ -+ if (!PVRSRV_VZ_MODE_IS(NATIVE)) -+ { -+ PvzConnectionDeInit(); -+ } -+ SysDevDeInit(psDeviceNode->psDevConfig); -+ -+ PVRSRVCleanupThreadWaitForDevice(psDeviceNode); -+ -+ OSWRLockAcquireWrite(psPVRSRVData->hDeviceNodeListLock); -+ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode); -+ psPVRSRVData->ui32RegisteredDevices--; -+ OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock); -+ -+ OSFreeMemNoStats(psDeviceNode); -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVDeviceFinalise -+@Description Performs the final parts of device initialisation. -+@Input psDeviceNode Device node of the device to finish -+ initialising -+@Input bInitSuccessful Whether or not device specific -+ initialisation was successful -+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise -+*/ /***************************************************************************/ -+PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bInitSuccessful) -+{ -+ PVRSRV_ERROR eError; -+ __maybe_unused PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice); -+ -+ if (bInitSuccessful) -+ { -+ eError = SyncCheckpointContextCreate(psDeviceNode, -+ &psDeviceNode->hSyncCheckpointContext); -+ PVR_LOG_GOTO_IF_ERROR(eError, "SyncCheckpointContextCreate", ErrorExit); -+#if defined(SUPPORT_FALLBACK_FENCE_SYNC) -+ eError = SyncFbRegisterDevice(psDeviceNode); -+ PVR_GOTO_IF_ERROR(eError, ErrorExit); -+#endif -+ eError = SyncPrimContextCreate(psDeviceNode, -+ &psDeviceNode->hSyncPrimContext); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "SyncPrimContextCreate"); -+ SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext); -+ goto ErrorExit; -+ } -+ -+ /* Allocate MMU cache invalidate sync */ -+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext, -+ &psDeviceNode->psMMUCacheSyncPrim, -+ "pvrsrv dev MMU cache"); -+ PVR_LOG_GOTO_IF_ERROR(eError, "SyncPrimAlloc", ErrorExit); -+ -+ /* Set the sync prim value to a much higher value near the -+ * wrapping range. This is so any wrapping bugs would be -+ * seen early in the driver start-up. -+ */ -+ SyncPrimSet(psDeviceNode->psMMUCacheSyncPrim, 0xFFFFFFF6UL); -+ -+ eError = PVRSRVPowerLock(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPowerLock", ErrorExit); -+ -+ /* -+ * Always ensure a single power on command appears in the pdump. This -+ * should be the only power related call outside of PDUMPPOWCMDSTART -+ * and PDUMPPOWCMDEND. -+ */ -+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, -+ PVRSRV_DEV_POWER_STATE_ON, -+ PVRSRV_POWER_FLAGS_FORCED); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to set device %p power state to 'on' (%s)", -+ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); -+ PVRSRVPowerUnlock(psDeviceNode); -+ goto ErrorExit; -+ } -+ -+#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) -+ eError = ValidateFWOnLoad(psDeviceNode->pvDevice); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "ValidateFWOnLoad"); -+ PVRSRVPowerUnlock(psDeviceNode); -+ return eError; -+ } -+#endif -+ -+ /* -+ * Guest driver must do a runtime compatibility check against the -+ * data provided by the Firmware. -+ */ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ eError = PVRSRVDevInitCompatCheck(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed compatibility check for device %p (%s)", -+ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); -+ PVRSRVPowerUnlock(psDeviceNode); -+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); -+ goto ErrorExit; -+ } -+ } -+ -+ PDUMPPOWCMDSTART(psDeviceNode); -+ -+ /* Force the device to idle if its default power state is off */ -+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, -+ &PVRSRVDeviceIsDefaultStateOFF, -+ IMG_TRUE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM"); -+ if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) -+ { -+ PVRSRVPowerUnlock(psDeviceNode); -+ } -+ goto ErrorExit; -+ } -+ -+ /* Place device into its default power state. */ -+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, -+ PVRSRV_DEV_POWER_STATE_DEFAULT, -+ PVRSRV_POWER_FLAGS_FORCED); -+ PDUMPPOWCMDEND(psDeviceNode); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to set device %p into its default power state (%s)", -+ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); -+ -+ PVRSRVPowerUnlock(psDeviceNode); -+ goto ErrorExit; -+ } -+ -+ PVRSRVPowerUnlock(psDeviceNode); -+ -+ /* -+ * If PDUMP is enabled and RGX device is supported, then initialise the -+ * performance counters that can be further modified in PDUMP. Then, -+ * before ending the init phase of the pdump, drain the commands put in -+ * the kCCB during the init phase. -+ */ -+#if defined(SUPPORT_RGX) -+#if defined(PDUMP) -+ { -+ eError = RGXInitHWPerfCounters(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitHWPerfCounters", ErrorExit); -+ -+ eError = RGXPdumpDrainKCCB(psDevInfo, -+ psDevInfo->psKernelCCBCtlLocal->ui32WriteOffset); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", ErrorExit); -+ } -+#endif -+#endif /* defined(SUPPORT_RGX) */ -+ /* Now that the device(s) are fully initialised set them as active */ -+ PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_ACTIVE); -+ eError = PVRSRV_OK; -+ } -+ else -+ { -+ /* Initialisation failed so set the device(s) into a bad state */ -+ PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_BAD); -+ eError = PVRSRV_ERROR_NOT_INITIALISED; -+ } -+ -+ /* Give PDump control a chance to end the init phase, depends on OS */ -+ PDUMPENDINITPHASE(psDeviceNode); -+ return eError; -+ -+ErrorExit: -+ /* Initialisation failed so set the device(s) into a bad state */ -+ PVRSRVDeviceSetState(psDeviceNode, PVRSRV_DEVICE_STATE_BAD); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ /* Only check devices which specify a compatibility check callback */ -+ if (psDeviceNode->pfnInitDeviceCompatCheck) -+ return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode); -+ else -+ return PVRSRV_OK; -+} -+ -+/* -+ PollForValueKM -+*/ -+static -+PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32 __iomem *pui32LinMemAddr, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ IMG_UINT32 ui32Timeoutus, -+ IMG_UINT32 ui32PollPeriodus, -+ POLL_FLAGS ePollFlags, -+ PFN_INVALIDATE_CACHEFUNC pfnFwInvalidate) -+{ -+#if defined(NO_HARDWARE) -+ PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ PVR_UNREFERENCED_PARAMETER(ui32Mask); -+ PVR_UNREFERENCED_PARAMETER(ui32Timeoutus); -+ PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus); -+ PVR_UNREFERENCED_PARAMETER(ePollFlags); -+ return PVRSRV_OK; -+#else -+ IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */ -+ -+ LOOP_UNTIL_TIMEOUT(ui32Timeoutus) -+ { -+ if (pfnFwInvalidate) -+ { -+ pfnFwInvalidate((const volatile void __force *)pui32LinMemAddr, -+ sizeof(*pui32LinMemAddr), -+ PVRSRV_CACHE_OP_INVALIDATE); -+ } -+ -+ ui32ActualValue = OSReadHWReg32((void __iomem *)pui32LinMemAddr, 0) & ui32Mask; -+ -+ if (ui32ActualValue == ui32Value) -+ { -+ return PVRSRV_OK; -+ } -+ -+ if (gpsPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) -+ { -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ OSWaitus(ui32PollPeriodus); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if (BITMASK_HAS(ePollFlags, POLL_FLAG_LOG_ERROR)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).", -+ ui32Value, ui32ActualValue, ui32Mask)); -+ } -+ -+ return PVRSRV_ERROR_TIMEOUT; -+#endif /* NO_HARDWARE */ -+} -+ -+ -+/* -+ PVRSRVPollForValueKM -+*/ -+PVRSRV_ERROR PVRSRVPollForValueKM (PVRSRV_DEVICE_NODE *psDevNode, -+ volatile IMG_UINT32 __iomem *pui32LinMemAddr, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ POLL_FLAGS ePollFlags, -+ PFN_INVALIDATE_CACHEFUNC pfnFwInvalidate) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = PollForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, -+ MAX_HW_TIME_US, -+ MAX_HW_TIME_US/WAIT_TRY_COUNT, -+ ePollFlags, -+ pfnFwInvalidate); -+ if (eError != PVRSRV_OK && BITMASK_HAS(ePollFlags, POLL_FLAG_DEBUG_DUMP)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)", -+ __func__, PVRSRVGetErrorString(eError), -+ pui32LinMemAddr, ui32Value)); -+ PVRSRVDebugRequest(psDevNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PFN_INVALIDATE_CACHEFUNC pfnFwInvalidate) -+{ -+#if defined(NO_HARDWARE) -+ PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ PVR_UNREFERENCED_PARAMETER(ui32Mask); -+ return PVRSRV_OK; -+#else -+ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ IMG_HANDLE hOSEvent; -+ PVRSRV_ERROR eError; -+ PVRSRV_ERROR eErrorWait; -+ IMG_UINT32 ui32ActualValue; -+ -+ eError = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hOSEvent); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectOpen", EventObjectOpenError); -+ -+ eError = PVRSRV_ERROR_TIMEOUT; /* Initialiser for following loop */ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ if (pfnFwInvalidate) -+ { -+ pfnFwInvalidate((const volatile void __force *)pui32LinMemAddr, -+ sizeof(*pui32LinMemAddr), -+ PVRSRV_CACHE_OP_INVALIDATE); -+ } -+ -+ ui32ActualValue = (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask); -+ -+ if (ui32ActualValue == ui32Value) -+ { -+ /* Expected value has been found */ -+ eError = PVRSRV_OK; -+ break; -+ } -+ else if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) -+ { -+ /* Services in bad state, don't wait any more */ -+ eError = PVRSRV_ERROR_NOT_READY; -+ break; -+ } -+ else -+ { -+ /* wait for event and retry */ -+ eErrorWait = OSEventObjectWait(hOSEvent); -+ if (eErrorWait != PVRSRV_OK && eErrorWait != PVRSRV_ERROR_TIMEOUT) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Failed with error %d. Found value 0x%x but was expected " -+ "to be 0x%x (Mask 0x%08x). Retrying", -+ __func__, -+ eErrorWait, -+ ui32ActualValue, -+ ui32Value, -+ ui32Mask)); -+ } -+ } -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ OSEventObjectClose(hOSEvent); -+ -+ /* One last check in case the object wait ended after the loop timeout... */ -+ if (eError != PVRSRV_OK && -+ (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask) == ui32Value) -+ { -+ eError = PVRSRV_OK; -+ } -+ -+ /* Provide event timeout information to aid the Device Watchdog Thread... */ -+ if (eError == PVRSRV_OK) -+ { -+ psPVRSRVData->ui32GEOConsecutiveTimeouts = 0; -+ } -+ else if (eError == PVRSRV_ERROR_TIMEOUT) -+ { -+ psPVRSRVData->ui32GEOConsecutiveTimeouts++; -+ } -+ -+EventObjectOpenError: -+ -+ return eError; -+ -+#endif /* NO_HARDWARE */ -+} -+ -+int PVRSRVGetDriverStatus(void) -+{ -+ return PVRSRVGetPVRSRVData()->eServicesState; -+} -+ -+/* -+ PVRSRVSystemHasCacheSnooping -+*/ -+IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ if ((psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_NONE) && -+ (psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_EMULATED)) -+ { -+ return IMG_TRUE; -+ } -+ return IMG_FALSE; -+} -+ -+IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ if (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_EMULATED) -+ { -+ return IMG_TRUE; -+ } -+ return IMG_FALSE; -+} -+ -+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CPU_ONLY) || -+ (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS)) -+ { -+ return IMG_TRUE; -+ } -+ return IMG_FALSE; -+} -+ -+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_DEVICE_ONLY) || -+ (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS)) -+ { -+ return IMG_TRUE; -+ } -+ return IMG_FALSE; -+} -+ -+IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ return psDevConfig->bHasNonMappableLocalMemory; -+} -+ -+/* -+ PVRSRVSystemWaitCycles -+*/ -+void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles) -+{ -+ /* Delay in us */ -+ IMG_UINT32 ui32Delayus = 1; -+ -+ /* obtain the device freq */ -+ if (psDevConfig->pfnClockFreqGet != NULL) -+ { -+ IMG_UINT32 ui32DeviceFreq; -+ -+ ui32DeviceFreq = psDevConfig->pfnClockFreqGet(psDevConfig->hSysData); -+ -+ ui32Delayus = (ui32Cycles*1000000)/ui32DeviceFreq; -+ -+ if (ui32Delayus == 0) -+ { -+ ui32Delayus = 1; -+ } -+ } -+ -+ OSWaitus(ui32Delayus); -+} -+ -+static void * -+PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, -+ va_list va) -+{ -+ void *pvOSDevice = va_arg(va, void *); -+ -+ if (psDeviceNode->psDevConfig->pvOSDevice == pvOSDevice) -+ { -+ return psDeviceNode; -+ } -+ -+ return NULL; -+} -+ -+PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice, -+ IMG_UINT32 ui32IRQ, -+ const IMG_CHAR *pszName, -+ PFN_LISR pfnLISR, -+ void *pvData, -+ IMG_HANDLE *phLISRData) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ psDeviceNode = -+ List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, -+ &PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb, -+ pvOSDevice); -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ -+ if (!psDeviceNode) -+ { -+ /* Device can't be found in the list so it isn't in the system */ -+ PVR_DPF((PVR_DBG_ERROR, "%s: device %p with irq %d is not present", -+ __func__, pvOSDevice, ui32IRQ)); -+ return PVRSRV_ERROR_INVALID_DEVICE; -+ } -+ -+ return SysInstallDeviceLISR(psDeviceNode->psDevConfig->hSysData, ui32IRQ, -+ pszName, pfnLISR, pvData, phLISRData); -+} -+ -+PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData) -+{ -+ return SysUninstallDeviceLISR(hLISRData); -+} -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR) -+/* functions only used on rogue, but header defining them is common */ -+void SetAxiProtOSid(IMG_HANDLE hSysData, IMG_UINT32 ui32OSid, IMG_BOOL bState) -+{ -+ SysSetAxiProtOSid(hSysData, ui32OSid, bState); -+} -+ -+void SetTrustedDeviceAceEnabled(IMG_HANDLE hSysData) -+{ -+ SysSetTrustedDeviceAceEnabled(hSysData); -+} -+#endif -+ -+#if defined(SUPPORT_RGX) -+PVRSRV_ERROR PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (!ui32Timeout) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ OSLockAcquire(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); -+ -+ /* Create only once */ -+ if (gpsPVRSRVData->hHWPerfHostPeriodicThread == NULL) -+ { -+ /* Create the HWPerf event object */ -+ eError = OSEventObjectCreate("PVRSRV_HWPERFHOSTPERIODIC_EVENTOBJECT", &gpsPVRSRVData->hHWPerfHostPeriodicEvObj); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectCreate"); -+ -+ if (eError == PVRSRV_OK) -+ { -+ gpsPVRSRVData->bHWPerfHostThreadStop = IMG_FALSE; -+ gpsPVRSRVData->ui32HWPerfHostThreadTimeout = ui32Timeout; -+ /* Create a thread which is used to periodically emit host stream packets */ -+ eError = OSThreadCreate(&gpsPVRSRVData->hHWPerfHostPeriodicThread, -+ "pvr_hwperf_host", -+ HWPerfPeriodicHostEventsThread, -+ NULL, IMG_TRUE, gpsPVRSRVData); -+ PVR_LOG_IF_ERROR(eError, "OSThreadCreate"); -+ } -+ } -+ /* If the thread has already been created then just update the timeout and wake up thread */ -+ else -+ { -+ gpsPVRSRVData->ui32HWPerfHostThreadTimeout = ui32Timeout; -+ eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); -+ } -+ -+ OSLockRelease(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ OSLockAcquire(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); -+ -+ /* Stop and cleanup the HWPerf periodic thread */ -+ if (gpsPVRSRVData->hHWPerfHostPeriodicThread) -+ { -+ if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj) -+ { -+ gpsPVRSRVData->bHWPerfHostThreadStop = IMG_TRUE; -+ eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); -+ } -+ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) -+ { -+ eError = OSThreadDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread); -+ if (PVRSRV_OK == eError) -+ { -+ gpsPVRSRVData->hHWPerfHostPeriodicThread = NULL; -+ break; -+ } -+ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); -+ -+ if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj) -+ { -+ eError = OSEventObjectDestroy(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); -+ gpsPVRSRVData->hHWPerfHostPeriodicEvObj = NULL; -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); -+ } -+ } -+ -+ OSLockRelease(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); -+ return eError; -+} -+#endif -+ -+/* -+ * Scan the list of known devices until we find the specific instance or -+ * exhaust the list -+ */ -+PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstance(IMG_UINT32 uiInstance) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ -+ if (uiInstance >= gpsPVRSRVData->ui32RegisteredDevices) -+ { -+ return NULL; -+ } -+ OSWRLockAcquireRead(gpsPVRSRVData->hDeviceNodeListLock); -+ for (psDevNode = gpsPVRSRVData->psDeviceNodeList; -+ psDevNode != NULL; psDevNode = psDevNode->psNext) -+ { -+ if (uiInstance == psDevNode->sDevId.ui32InternalID) -+ { -+ break; -+ } -+ } -+ OSWRLockReleaseRead(gpsPVRSRVData->hDeviceNodeListLock); -+ -+ return psDevNode; -+} -+ -+PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByKernelDevID(IMG_INT32 i32OSInstance) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ -+ OSWRLockAcquireRead(gpsPVRSRVData->hDeviceNodeListLock); -+ for (psDevNode = gpsPVRSRVData->psDeviceNodeList; -+ psDevNode != NULL; psDevNode = psDevNode->psNext) -+ { -+ if (i32OSInstance == psDevNode->sDevId.i32KernelDeviceID) -+ { -+ MULTI_DEVICE_BRINGUP_DPF("%s: Found DevId %d. Retrieving node.", __func__, i32OSInstance); -+ break; -+ } -+ else -+ { -+ MULTI_DEVICE_BRINGUP_DPF("%s: Searching for DevId %d: Id %d not matching", __func__, i32OSInstance, psDevNode->sDevId.i32KernelDeviceID); -+ } -+ } -+ OSWRLockReleaseRead(gpsPVRSRVData->hDeviceNodeListLock); -+ -+ if (psDevNode == NULL) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: DevId %d not found.", __func__, i32OSInstance)); -+ } -+ return psDevNode; -+} -+ -+/* Default function for querying the power state of the system */ -+PVRSRV_SYS_POWER_STATE PVRSRVDefaultDomainPower(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ return psDevNode->eCurrentSysPowerState; -+} -+ -+#define _FROZEN 1 /* Device is already frozen */ -+#define _NOT_FROZEN 0 /* Device is not frozen */ -+ -+/* Freeze the specified device if not already frozen */ -+PVRSRV_ERROR PVRSRVDeviceFreeze(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ PVRSRV_ERROR eError; -+ IMG_BOOL bHasPowerLock = IMG_FALSE; -+ -+ /* Verify that given argument *IS* a recognised device node */ -+ PVRSRV_DEVICE_NODE *lpsDevNode; -+ -+ OSWRLockAcquireRead(gpsPVRSRVData->hDeviceNodeListLock); -+ for (lpsDevNode = gpsPVRSRVData->psDeviceNodeList; -+ lpsDevNode != NULL && lpsDevNode != psDevNode; -+ lpsDevNode = lpsDevNode->psNext) -+ ; -+ OSWRLockReleaseRead(gpsPVRSRVData->hDeviceNodeListLock); -+ -+ PVR_LOG_RETURN_IF_FALSE((lpsDevNode == psDevNode), "Device node not known", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ /* Mark the device as 'frozen' if not already marked */ -+ if (OSAtomicCompareExchange(&psDevNode->eFrozen, _NOT_FROZEN, -+ _FROZEN) == _FROZEN) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Device %p already frozen", __func__, -+ psDevNode)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Ensure there are no other threads active within this device. */ -+ while (OSAtomicRead(&psDevNode->iThreadsActive) > 0) -+ { -+ OSReleaseThreadQuanta(); /* Let other threads execute */ -+ } -+ -+ /* Attempt to idle the GPU without power-down. If this fails, we try with -+ * a potential power-down. -+ */ -+ eError = PVRSRVPowerLock(psDevNode); -+ if (eError != PVRSRV_OK) -+ { -+ /* Device is powered down, we can continue -+ * as there are no in-flight requests in the GPU. -+ */ -+ PVR_DPF((PVR_DBG_WARNING, "%s: Unable to Idle Device (%p) [%u/%d]" -+ " Device powered down", -+ __func__, psDevNode, psDevNode->sDevId.ui32InternalID, -+ psDevNode->sDevId.i32KernelDeviceID)); -+ eError = PVRSRV_OK; -+ } -+ else -+ { -+ bHasPowerLock = IMG_TRUE; -+ -+ eError = PVRSRVDeviceIdleRequestKM(psDevNode, -+ &PVRSRVDeviceIsDefaultStateOFF, -+ IMG_TRUE); -+ } -+ -+ if (eError != PVRSRV_OK) -+ { -+ /* Failed to Idle. Need to remove the pending _FROZEN state. */ -+ (void) OSAtomicExchange(&psDevNode->eFrozen, _NOT_FROZEN); -+ PVR_DPF((PVR_DBG_ERROR, "%s: Unable to Idle Device (%p) [%u/%d]", -+ __func__, psDevNode, psDevNode->sDevId.ui32InternalID, -+ psDevNode->sDevId.i32KernelDeviceID)); -+ -+ if (bHasPowerLock) -+ { -+ PVRSRVPowerUnlock(psDevNode); -+ } -+ return eError; -+ } -+ -+ /* Now change the device-state to not be ACTIVE until we unfreeze */ -+ if (psDevNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Unexpected Device State %u for device %p", -+ __func__, psDevNode->eDevState, psDevNode)); -+ -+ /* Reset device to UNFROZEN */ -+ (void) OSAtomicExchange(&psDevNode->eFrozen, _NOT_FROZEN); -+ -+ if (bHasPowerLock) -+ { -+ (void) PVRSRVDeviceIdleCancelRequestKM(psDevNode); -+ PVRSRVPowerUnlock(psDevNode); -+ } -+ else -+ { -+ (void) PVRSRVSetDeviceSystemPowerState(psDevNode, -+ PVRSRV_SYS_POWER_STATE_ON, -+ PVRSRV_POWER_FLAGS_NONE); -+ } -+ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PVRSRVDeviceSetState(psDevNode, PVRSRV_DEVICE_STATE_FROZEN); -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "%s: Device (%p) [%u/%d] FROZEN", __func__, -+ psDevNode, -+ psDevNode->sDevId.ui32InternalID, -+ psDevNode->sDevId.i32KernelDeviceID)); -+ -+ OSAtomicIncrement(&psDevNode->iTotalFreezes); -+ -+ if (bHasPowerLock) -+ { -+ PVRSRVPowerUnlock(psDevNode); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/* Unfreeze / Thaw the specified device if frozen */ -+PVRSRV_ERROR PVRSRVDeviceThaw(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_NODE *lpsDevNode; -+ IMG_BOOL bHasPowerLock = IMG_FALSE; -+ -+ /* Verify that the given argument *IS* a recognised device node */ -+ OSWRLockAcquireRead(gpsPVRSRVData->hDeviceNodeListLock); -+ for (lpsDevNode = gpsPVRSRVData->psDeviceNodeList; -+ lpsDevNode != NULL && lpsDevNode != psDevNode; -+ lpsDevNode = lpsDevNode->psNext) -+ ; -+ OSWRLockReleaseRead(gpsPVRSRVData->hDeviceNodeListLock); -+ -+ PVR_LOG_RETURN_IF_FALSE((lpsDevNode == psDevNode), "Device node not known", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ /* Unfreeze the device */ -+ if (OSAtomicCompareExchange(&psDevNode->eFrozen, _FROZEN, -+ _NOT_FROZEN) == _NOT_FROZEN) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Device (%p) [%u/%d] already unfrozen", -+ __func__, -+ psDevNode, psDevNode->sDevId.ui32InternalID, -+ psDevNode->sDevId.i32KernelDeviceID)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (psDevNode->eDevState != PVRSRV_DEVICE_STATE_FROZEN) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unexpected Device state %u for device (%p) [%u/%d]", -+ __func__, psDevNode->eDevState, psDevNode, -+ psDevNode->sDevId.ui32InternalID, -+ psDevNode->sDevId.i32KernelDeviceID)); -+ } -+ else -+ { -+ PVRSRVDeviceSetState(psDevNode, PVRSRV_DEVICE_STATE_ACTIVE); -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "%s: Device (%p) [%u/%d] UNFROZEN", __func__, -+ psDevNode, -+ psDevNode->sDevId.ui32InternalID, -+ psDevNode->sDevId.i32KernelDeviceID)); -+ } -+ -+ /* Now unblock the device by clearing any ForcedIdle state and/or -+ * powering-on the device if needed. -+ */ -+ eError = PVRSRVPowerLock(psDevNode); -+ -+ bHasPowerLock = (eError == PVRSRV_OK) ? IMG_TRUE : IMG_FALSE; -+ -+ if (bHasPowerLock) -+ { -+ eError = PVRSRVDeviceIdleCancelRequestKM(psDevNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Could not cancel Idle state: Device (%p) [%u/%d] '%s'", -+ __func__, -+ psDevNode, -+ psDevNode->sDevId.ui32InternalID, -+ psDevNode->sDevId.i32KernelDeviceID, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ PVRSRVPowerUnlock(psDevNode); -+ } -+ else -+ { -+ /* Force device back into POWER_STATE_ON as it must already be OFF. */ -+ (void) PVRSRVSetDeviceSystemPowerState(psDevNode, -+ PVRSRV_SYS_POWER_STATE_ON, -+ PVRSRV_POWER_FLAGS_NONE); -+ } -+ -+ /* Unblock any waiting threads */ -+ while (OSAtomicRead(&psDevNode->iFreezeCount) > 0) -+ { -+ eError = OSEventObjectSignal(psDevNode->hDeviceThreadEvObj); -+ if (OSAtomicRead(&psDevNode->iFreezeCount) > 0) -+ { -+ /* Sleep for 1ms to allow waiter to receive signal */ -+ OSSleepms(1U); -+ } -+ } -+ -+ /* Ensure that any blocked queues get rescheduled if we've woken up a -+ * waiter. -+ */ -+ PVRSRVCheckStatus(NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/***************************************************************************** -+ End of file (pvrsrv.c) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv.h b/drivers/gpu/drm/img-rogue/pvrsrv.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv.h -@@ -0,0 +1,553 @@ -+/*************************************************************************/ /*! -+@File -+@Title PowerVR services server header file -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVRSRV_H -+#define PVRSRV_H -+ -+#include "connection_server.h" -+#include "pvrsrv_pool.h" -+#include "device.h" -+#include "power.h" -+#include "syscommon.h" -+#include "sysinfo.h" -+#include "physheap.h" -+#include "cache_ops.h" -+#include "pvr_notifier.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) -+#define __pvrsrv_defined_struct_enum__ -+#include -+#endif -+ -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+#include "virt_validation_defs.h" -+#endif -+ -+#include "dma_support.h" -+#include "vz_vmm_pvz.h" -+ -+/*! -+ * For OSThreadDestroy(), which may require a retry -+ * Try for 100 ms to destroy an OS thread before failing -+ */ -+#define OS_THREAD_DESTROY_TIMEOUT_US 100000ULL -+#define OS_THREAD_DESTROY_RETRY_COUNT 10 -+ -+typedef enum _POLL_FLAGS_ -+{ -+ POLL_FLAG_NONE = 0, /* No message or dump is printed on poll timeout */ -+ POLL_FLAG_LOG_ERROR = 1, /* Log error on poll timeout */ -+ POLL_FLAG_DEBUG_DUMP = 2 /* Print debug dump on poll timeout */ -+} POLL_FLAGS; -+ -+typedef struct _BUILD_INFO_ -+{ -+ IMG_UINT32 ui32BuildOptions; -+ IMG_UINT32 ui32BuildVersion; -+ IMG_UINT32 ui32BuildRevision; -+ IMG_UINT32 ui32BuildType; -+#define BUILD_TYPE_DEBUG 0 -+#define BUILD_TYPE_RELEASE 1 -+ /* The above fields are self explanatory */ -+ /* B.V.N.C can be added later if required */ -+} BUILD_INFO; -+ -+typedef struct _DRIVER_INFO_ -+{ -+ BUILD_INFO sUMBuildInfo; -+ BUILD_INFO sKMBuildInfo; -+ IMG_UINT8 ui8UMSupportedArch; -+ IMG_UINT8 ui8KMBitArch; -+ -+#define BUILD_ARCH_64BIT (1 << 0) -+#define BUILD_ARCH_32BIT (1 << 1) -+#define BUILD_ARCH_BOTH (BUILD_ARCH_32BIT | BUILD_ARCH_64BIT) -+ IMG_BOOL bIsNoMatch; -+}DRIVER_INFO; -+ -+#if defined(SUPPORT_VALIDATION) && defined(__linux__) -+typedef struct MEM_LEAK_INTERVALS_TAG -+{ -+ IMG_UINT32 ui32OSAlloc; -+ IMG_UINT32 ui32GPU; -+ IMG_UINT32 ui32MMU; -+} MEM_LEAK_INTERVALS; -+#endif -+ -+typedef struct PVRSRV_DATA_TAG -+{ -+ PVRSRV_DRIVER_MODE eDriverMode; /*!< Driver mode (i.e. native, host or guest) */ -+ IMG_BOOL bForceApphintDriverMode; /*!< Indicate if driver mode is forced via apphint */ -+ DRIVER_INFO sDriverInfo; -+ IMG_UINT32 ui32DPFErrorCount; /*!< Number of Fatal/Error DPFs */ -+ -+ POSWR_LOCK hDeviceNodeListLock; /*!< Read-Write lock to protect the list of devices */ -+ PVRSRV_DEVICE_NODE *psDeviceNodeList; /*!< List head of device nodes */ -+ IMG_UINT32 ui32RegisteredDevices; -+ PVRSRV_DEVICE_NODE *psHostMemDeviceNode; /*!< DeviceNode to be used for device independent -+ host based memory allocations where the DevMem -+ framework is to be used e.g. TL */ -+ PVRSRV_SERVICES_STATE eServicesState; /*!< global driver state */ -+ -+ IMG_HANDLE hGlobalEventObject; /*!< OS Global Event Object */ -+ IMG_UINT32 ui32GEOConsecutiveTimeouts; /*!< OS Global Event Object Timeouts */ -+ -+ IMG_HANDLE hCleanupThread; /*!< Cleanup thread */ -+ IMG_HANDLE hCleanupEventObject; /*!< Event object to drive cleanup thread */ -+ POS_SPINLOCK hCleanupThreadWorkListLock; /*!< Lock protecting the cleanup thread work list */ -+ IMG_PID cleanupThreadPid; /*!< Cleanup thread process id */ -+ uintptr_t cleanupThreadTid; /*!< Cleanup thread id */ -+ ATOMIC_T i32NumCleanupItemsQueued; /*!< Number of items in cleanup thread work list */ -+ ATOMIC_T i32NumCleanupItemsNotCompleted; /*!< Number of items dropped from cleanup thread work list -+ after retry limit reached */ -+ ATOMIC_T i32CleanupItemTypes[PVRSRV_CLEANUP_TYPE_LAST]; /*!< Array containing the counts for different cleanup item types. */ -+ -+ IMG_HANDLE hDevicesWatchdogThread; /*!< Devices watchdog thread */ -+ IMG_HANDLE hDevicesWatchdogEvObj; /*! Event object to drive devices watchdog thread */ -+ volatile IMG_UINT32 ui32DevicesWatchdogPwrTrans; /*! Number of off -> on power state transitions */ -+#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) -+ volatile IMG_UINT32 ui32DevicesWatchdogTimeout; /*! Timeout for the Devices watchdog Thread */ -+#endif -+#ifdef PVR_TESTING_UTILS -+ volatile IMG_UINT32 ui32DevicesWdWakeupCounter; /* Need this for the unit tests. */ -+#endif -+ -+#if defined(SUPPORT_AUTOVZ) -+ IMG_HANDLE hAutoVzWatchdogThread; /*!< Devices watchdog thread */ -+ IMG_HANDLE hAutoVzWatchdogEvObj; /*! Event object to drive devices watchdog thread */ -+#endif -+ -+ POS_LOCK hHWPerfHostPeriodicThread_Lock; /*!< Lock for the HWPerf Host periodic thread */ -+ IMG_HANDLE hHWPerfHostPeriodicThread; /*!< HWPerf Host periodic thread */ -+ IMG_HANDLE hHWPerfHostPeriodicEvObj; /*! Event object to drive HWPerf thread */ -+ volatile IMG_BOOL bHWPerfHostThreadStop; -+ IMG_UINT32 ui32HWPerfHostThreadTimeout; -+ -+ IMG_HANDLE hPvzConnection; /*!< PVZ connection used for cross-VM hyper-calls */ -+ POS_LOCK hPvzConnectionLock; /*!< Lock protecting PVZ connection */ -+ -+ IMG_BOOL bUnload; /*!< Driver unload is in progress */ -+ -+ IMG_HANDLE hTLCtrlStream; /*! Control plane for TL streams */ -+ -+ IMG_HANDLE hDriverThreadEventObject; /*! Event object relating to multi-threading in the Server */ -+ IMG_BOOL bDriverSuspended; /*! if TRUE, the driver is suspended and new threads should not enter */ -+ ATOMIC_T iNumActiveDriverThreads; /*! Number of threads active in the Server */ -+ -+ PMR *psInfoPagePMR; /*! Handle to exportable PMR of the information page. */ -+ IMG_UINT32 *pui32InfoPage; /*! CPU memory mapping for information page. */ -+ DEVMEM_MEMDESC *psInfoPageMemDesc; /*! Memory descriptor of the information page. */ -+ POS_LOCK hInfoPageLock; /*! Lock guarding access to information page. */ -+ -+#if defined(SUPPORT_VALIDATION) && defined(__linux__) -+ MEM_LEAK_INTERVALS sMemLeakIntervals; /*!< How often certain memory leak types will trigger */ -+#endif -+ IMG_HANDLE hThreadsDbgReqNotify; -+ -+ IMG_UINT32 ui32PDumpBoundDevice; /*!< PDump is bound to the device first connected to */ -+ ATOMIC_T iNumDriverTasksActive; /*!< Number of device-agnostic tasks active in the server */ -+} PVRSRV_DATA; -+ -+/* Function pointer used to invalidate cache between loops in wait/poll for value functions */ -+typedef PVRSRV_ERROR (*PFN_INVALIDATE_CACHEFUNC)(const volatile void*, IMG_UINT64, PVRSRV_CACHE_OP); -+ -+/*! -+****************************************************************************** -+ @Function PVRSRVGetPVRSRVData -+ -+ @Description Get a pointer to the global data -+ -+ @Return PVRSRV_DATA * -+******************************************************************************/ -+PVRSRV_DATA *PVRSRVGetPVRSRVData(void); -+ -+#define PVRSRV_KM_ERRORS ( PVRSRVGetPVRSRVData() ? PVRSRVGetPVRSRVData()->ui32DPFErrorCount : IMG_UINT32_MAX) -+#define PVRSRV_ERROR_LIMIT_REACHED (PVRSRV_KM_ERRORS == IMG_UINT32_MAX) -+#define PVRSRV_REPORT_ERROR() do { if (!(PVRSRV_ERROR_LIMIT_REACHED)) { PVRSRVGetPVRSRVData()->ui32DPFErrorCount++; } } while (0) -+ -+#define PVRSRV_VZ_MODE_IS(_expr) (DRIVER_MODE_##_expr == PVRSRVGetPVRSRVData()->eDriverMode) -+#define PVRSRV_VZ_RETN_IF_MODE(_expr) do { if ( PVRSRV_VZ_MODE_IS(_expr)) { return; } } while (0) -+#define PVRSRV_VZ_RETN_IF_NOT_MODE(_expr) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return; } } while (0) -+#define PVRSRV_VZ_RET_IF_MODE(_expr, _rc) do { if ( PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while (0) -+#define PVRSRV_VZ_RET_IF_NOT_MODE(_expr, _rc) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while (0) -+ -+/*! -+****************************************************************************** -+@Note The driver execution mode AppHint (i.e. PVRSRV_APPHINT_DRIVERMODE) -+ can be an override or non-override 32-bit value. An override value -+ has the MSB bit set & a non-override value has this MSB bit cleared. -+ Excluding this MSB bit & interpreting the remaining 31-bit as a -+ signed 31-bit integer, the mode values are: -+ [-1 native : 0 host : +1 guest ]. -+******************************************************************************/ -+#define PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(_expr) ((IMG_UINT32)(_expr)&(IMG_UINT32)(1<<31)) -+#define PVRSRV_VZ_APPHINT_MODE(_expr) \ -+ ((((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) == (IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_NATIVE : \ -+ !((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_HOST : \ -+ ((IMG_UINT32)((IMG_UINT32)(_expr)&(IMG_UINT)0x7FFFFFFF)==(IMG_UINT32)0x1) ? DRIVER_MODE_GUEST : \ -+ ((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF)) -+ -+#define PVRSRV_VZ_TIME_SLICE_MAX (100ULL) -+ -+typedef struct _PHYS_HEAP_ITERATOR_ PHYS_HEAP_ITERATOR; -+ -+/*! -+****************************************************************************** -+ @Function LMA_HeapIteratorCreate -+ -+ @Description -+ Creates iterator for traversing physical heap requested by ePhysHeap. The -+ iterator will go through all of the segments (a segment is physically -+ contiguous) of the physical heap and return their CPU physical address and -+ size. -+ -+ @Input psDevNode: Pointer to device node struct. -+ @Input ePhysHeap: Find the matching heap. -+ @Output ppsIter: Pointer to the iterator object. -+ -+ @Return PVRSRV_OK upon success and PVRSRV_ERROR otherwise. -+******************************************************************************/ -+PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode, -+ PVRSRV_PHYS_HEAP ePhysHeap, -+ PHYS_HEAP_ITERATOR **ppsIter); -+ -+/*! -+****************************************************************************** -+ @Function LMA_HeapIteratorDestroy -+ -+ @Description -+ Frees the iterator object created with LMA_HeapIteratorCreate. -+ -+ @Input psIter: Pointer to the iterator object. -+******************************************************************************/ -+void LMA_HeapIteratorDestroy(PHYS_HEAP_ITERATOR *psIter); -+ -+/*! -+****************************************************************************** -+ @Function LMA_HeapIteratorReset -+ -+ @Description -+ Resets the iterator the first segment of the physical heap. -+ -+ @Input psIter: Pointer to the iterator object. -+******************************************************************************/ -+PVRSRV_ERROR LMA_HeapIteratorReset(PHYS_HEAP_ITERATOR *psIter); -+ -+/*! -+****************************************************************************** -+ @Function LMA_HeapIteratorNext -+ -+ @Description -+ Retrieves current segment's physical device address and size and moves the -+ iterator to the next element (if exists). If the iterator reached an end of -+ the heap and no segment was retrieved, this function returns IMG_FALSE. -+ -+ @Input psIter: Pointer to the iterator object. -+ @Output psDevPAddr: Device physical address of the current segment. -+ @Output puiSize: Size of the current segment. -+ -+ @Return IMG TRUE if a segment was found and retrieved, IMG_FALSE otherwise. -+******************************************************************************/ -+IMG_BOOL LMA_HeapIteratorNext(PHYS_HEAP_ITERATOR *psIter, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_UINT64 *puiSize); -+ -+/*! -+****************************************************************************** -+ @Function LMA_HeapIteratorGetHeapStats -+ -+ @Description -+ Retrieves phys heap's usage statistics. -+ -+ @Input psPhysHeap: Pointer to the physical heap object. -+ @Output puiTotalSize: Total size of the physical heap. -+ @Output puiInUseSize: Used space in the physical heap. -+ -+ @Return PVRSRV_OK upon success and PVRSRV_otherwise. -+******************************************************************************/ -+PVRSRV_ERROR LMA_HeapIteratorGetHeapStats(PHYS_HEAP_ITERATOR *psIter, -+ IMG_UINT64 *puiTotalSize, -+ IMG_UINT64 *puiInUseSize); -+ -+/*! -+****************************************************************************** -+ @Function PVRSRVPollForValueKM -+ -+ @Description -+ Polls for a value to match a masked read -+ -+ @Input psDevNode : Pointer to device node struct -+ @Input pui32LinMemAddr : CPU linear address to poll -+ @Input ui32Value : required value -+ @Input ui32Mask : Mask -+ @Input bDebugDumpOnFailure : Whether poll failure should result into a debug -+ dump. CAUTION: When calling this function from code paths which are -+ also used by debug-dumping code, this argument MUST be IMG_FALSE -+ otherwise, we might end up requesting debug-dump in recursion and -+ eventually blow-up call stack. -+ @Input pfnFwInvalidate : Function pointer to invalidation function used -+ each loop / poll. This is only used for FWmemctx allocations. -+ -+ @Return PVRSRV_ERROR : -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVPollForValueKM(PVRSRV_DEVICE_NODE *psDevNode, -+ volatile IMG_UINT32 __iomem *pui32LinMemAddr, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ POLL_FLAGS ePollFlags, -+ PFN_INVALIDATE_CACHEFUNC pfnFwInvalidate); -+ -+/*! -+****************************************************************************** -+ @Function PVRSRVWaitForValueKM -+ -+ @Description -+ Waits (using EventObjects) for a value to match a masked read -+ -+ @Input pui32LinMemAddr : CPU linear address to poll -+ @Input ui32Value : Required value -+ @Input ui32Mask : Mask to be applied before checking against -+ ui32Value -+ @Input pfnFwInvalidate : Function pointer to invalidation function used -+ each loop / wait. This is only used for -+ FWmemctx allocations. -+ @Return PVRSRV_ERROR : -+******************************************************************************/ -+PVRSRV_ERROR -+PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PFN_INVALIDATE_CACHEFUNC pfnFwInvalidate); -+ -+/*! -+****************************************************************************** -+ @Function : PVRSRVSystemHasCacheSnooping -+ -+ @Description : Returns whether the system has cache snooping -+ -+ @Return : IMG_TRUE if the system has cache snooping -+******************************************************************************/ -+IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig); -+ -+/*! -+****************************************************************************** -+ @Function : PVRSRVSystemSnoopingIsEmulated -+ -+ @Description : Returns whether system cache snooping support is emulated -+ -+ @Return : IMG_TRUE if the system cache snooping is emulated in software -+******************************************************************************/ -+IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig); -+ -+/*! -+****************************************************************************** -+ @Function : PVRSRVSystemSnoopingOfCPUCache -+ -+ @Description : Returns whether the system supports snooping of the CPU cache -+ -+ @Return : IMG_TRUE if the system has CPU cache snooping -+******************************************************************************/ -+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig); -+ -+/*! -+****************************************************************************** -+ @Function : PVRSRVSystemSnoopingOfDeviceCache -+ -+ @Description : Returns whether the system supports snooping of the device cache -+ -+ @Return : IMG_TRUE if the system has device cache snooping -+******************************************************************************/ -+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig); -+ -+/*! -+****************************************************************************** -+ @Function : PVRSRVSystemHasNonMappableLocalMemory -+ -+ @Description : Returns whether the device has non-mappable part of local memory -+ -+ @Return : IMG_TRUE if the device has non-mappable part of local memory -+******************************************************************************/ -+IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig); -+ -+/*! -+****************************************************************************** -+ @Function : PVRSRVSystemWaitCycles -+ -+ @Description : Waits for at least ui32Cycles of the Device clk. -+******************************************************************************/ -+void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles); -+ -+PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice, -+ IMG_UINT32 ui32IRQ, -+ const IMG_CHAR *pszName, -+ PFN_LISR pfnLISR, -+ void *pvData, -+ IMG_HANDLE *phLISRData); -+ -+PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData); -+ -+int PVRSRVGetDriverStatus(void); -+ -+/*! -+****************************************************************************** -+ @Function : PVRSRVIsBridgeEnabled -+ -+ @Description : Returns whether the given bridge group is enabled -+ -+ @Return : IMG_TRUE if the given bridge group is enabled -+******************************************************************************/ -+static inline IMG_BOOL PVRSRVIsBridgeEnabled(IMG_HANDLE hServices, IMG_UINT32 ui32BridgeGroup) -+{ -+ IMG_UINT32 ui32Bridges; -+ IMG_UINT32 ui32Offset; -+ -+ PVR_UNREFERENCED_PARAMETER(hServices); -+ -+#if defined(SUPPORT_RGX) -+ if (ui32BridgeGroup >= PVRSRV_BRIDGE_RGX_FIRST) -+ { -+ ui32Bridges = gui32RGXBridges; -+ ui32Offset = PVRSRV_BRIDGE_RGX_FIRST; -+ } -+ else -+#endif /* SUPPORT_RGX */ -+ { -+ ui32Bridges = gui32PVRBridges; -+ ui32Offset = PVRSRV_BRIDGE_FIRST; -+ } -+ -+ return (IMG_BOOL)(((1U << (ui32BridgeGroup - ui32Offset)) & ui32Bridges) != 0); -+} -+ -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+#if defined(EMULATOR) -+ void SetAxiProtOSid(IMG_HANDLE hSysData, IMG_UINT32 ui32OSid, IMG_BOOL bState); -+ void SetTrustedDeviceAceEnabled(IMG_HANDLE hSysData); -+#endif -+#endif -+ -+/*! -+****************************************************************************** -+ @Function : PVRSRVCreateHWPerfHostThread -+ -+ @Description : Creates HWPerf event object and thread unless already created -+ -+ @Input ui32Timeout : Initial timeout (ms) between updates on the HWPerf thread -+ -+ @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ -+ error code -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout); -+ -+/*! -+****************************************************************************** -+ @Function : PVRSRVDestroyHWPerfHostThread -+ -+ @Description : Destroys HWPerf event object and thread if created -+ -+ @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ -+ error code -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVFindPhysHeapConfig -+@Description Find Phys Heap Config from Device Config. -+@Input psDevConfig Pointer to device config. -+@Input ui32Flags Find heap that matches flags. -+@Return PHYS_HEAP_CONFIG* Return a config, or NULL if not found. -+*/ /**************************************************************************/ -+PHYS_HEAP_CONFIG* PVRSRVFindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig, -+ PHYS_HEAP_USAGE_FLAGS ui32Flags); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVGetDeviceInstance -+@Description Return the specified device instance from Device node list. -+@Input ui32Instance Device instance to find -+@Return PVRSRV_DEVICE_NODE* Return a device node, or NULL if not found. -+*/ /**************************************************************************/ -+PVRSRV_DEVICE_NODE* PVRSRVGetDeviceInstance(IMG_UINT32 ui32Instance); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVGetDeviceInstanceByKernelDevID -+@Description Return the specified device instance by OS Id. -+@Input i32OSInstance OS device Id to find -+@Return PVRSRV_DEVICE_NODE* Return a device node, or NULL if not found. -+*/ /**************************************************************************/ -+PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByKernelDevID(IMG_INT32 i32OSInstance); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDefaultDomainPower -+@Description Returns psDevNode->eCurrentSysPowerState -+@Input PVRSRV_DEVICE_NODE* Device node -+@Return PVRSRV_SYS_POWER_STATE System power state tracked internally -+*/ /**************************************************************************/ -+PVRSRV_SYS_POWER_STATE PVRSRVDefaultDomainPower(PVRSRV_DEVICE_NODE *psDevNode); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDeviceFreeze -+@Description Stops further processing from occurring on the device after the -+ current work ends and blocks user-mode tasks on the specified -+ device. Device node is put into POWER-DOWN state. -+@Input PVRSRV_DEVICE_NODE* Device node -+@Return PVRSRV_ERROR PVRSRV_OK on success otherwise a -+ PVRSRV_ERR_ code on failure -+*/ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVDeviceFreeze(PVRSRV_DEVICE_NODE *psDevNode); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDeviceThaw -+@Description Unfreezes a previously frozen device. Restarts work on the -+ device, if work queues are non-empty, and unblocks any -+ user-mode tasks on the specified device. -+@Input PVRSRV_DEVICE_NODE* Device node -+@Return PVRSRV_ERROR PVRSRV_OK on success otherwise a -+ PVRSRV_ERR_ code on failure -+*/ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVDeviceThaw(PVRSRV_DEVICE_NODE *psDevNode); -+ -+#endif /* PVRSRV_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_apphint.h b/drivers/gpu/drm/img-rogue/pvrsrv_apphint.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_apphint.h -@@ -0,0 +1,71 @@ -+/**************************************************************************/ /*! -+@File -+@Title PowerVR AppHint generic interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#if !defined(PVRSRV_APPHINT_H) -+#define PVRSRV_APPHINT_H -+ -+/* Supplied to PVRSRVAppHintRegisterHandlers*() functions when the apphint -+ * is a global driver apphint, i.e. apphints not present in -+ * APPHINT_DEBUGFS_DEVICE_ID, i.e. not per device. -+ */ -+#define APPHINT_OF_DRIVER_NO_DEVICE ((void*)-1U) -+ -+#if defined(__linux__) -+ -+#include "km_apphint.h" -+#define PVRSRVAppHintDumpState(d) pvr_apphint_dump_state(d) -+#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) pvr_apphint_register_handlers_uint64(i,q,s,d,p) -+#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) pvr_apphint_register_handlers_uint32(i,q,s,d,p) -+#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) pvr_apphint_register_handlers_bool(i,q,s,d,p) -+#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) pvr_apphint_register_handlers_string(i,q,s,d,p) -+ -+#else -+ -+#define PVRSRVAppHintDumpState(d) -+#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) -+#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) -+#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) -+#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) -+ -+#endif -+ -+#endif /* PVRSRV_APPHINT_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.c b/drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.c -@@ -0,0 +1,390 @@ -+/*************************************************************************/ /*! -+@File -+@Title PVR Common Bridge Init/Deinit Module (kernel side) -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements common PVR Bridge init/deinit code -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "pvrsrv_bridge_init.h" -+#include "srvcore.h" -+ -+/* These will go when full bridge gen comes in */ -+#if defined(PDUMP) -+PVRSRV_ERROR InitPDUMPCTRLBridge(void); -+void DeinitPDUMPCTRLBridge(void); -+PVRSRV_ERROR InitPDUMPBridge(void); -+void DeinitPDUMPBridge(void); -+PVRSRV_ERROR InitRGXPDUMPBridge(void); -+void DeinitRGXPDUMPBridge(void); -+#endif -+#if defined(SUPPORT_DISPLAY_CLASS) -+PVRSRV_ERROR InitDCBridge(void); -+void DeinitDCBridge(void); -+#endif -+PVRSRV_ERROR InitMMBridge(void); -+void DeinitMMBridge(void); -+#if !defined(EXCLUDE_CMM_BRIDGE) -+PVRSRV_ERROR InitCMMBridge(void); -+void DeinitCMMBridge(void); -+#endif -+PVRSRV_ERROR InitPDUMPMMBridge(void); -+void DeinitPDUMPMMBridge(void); -+PVRSRV_ERROR InitSRVCOREBridge(void); -+void DeinitSRVCOREBridge(void); -+PVRSRV_ERROR InitSYNCBridge(void); -+void DeinitSYNCBridge(void); -+#if defined(SUPPORT_DMA_TRANSFER) -+PVRSRV_ERROR InitDMABridge(void); -+void DeinitDMABridge(void); -+#endif -+ -+#if defined(SUPPORT_RGX) -+PVRSRV_ERROR InitRGXTA3DBridge(void); -+void DeinitRGXTA3DBridge(void); -+#if defined(SUPPORT_RGXTQ_BRIDGE) -+PVRSRV_ERROR InitRGXTQBridge(void); -+void DeinitRGXTQBridge(void); -+#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */ -+ -+#if defined(SUPPORT_USC_BREAKPOINT) -+PVRSRV_ERROR InitRGXBREAKPOINTBridge(void); -+void DeinitRGXBREAKPOINTBridge(void); -+#endif -+PVRSRV_ERROR InitRGXFWDBGBridge(void); -+void DeinitRGXFWDBGBridge(void); -+PVRSRV_ERROR InitRGXHWPERFBridge(void); -+void DeinitRGXHWPERFBridge(void); -+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) -+PVRSRV_ERROR InitRGXREGCONFIGBridge(void); -+void DeinitRGXREGCONFIGBridge(void); -+#endif -+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) -+PVRSRV_ERROR InitRGXKICKSYNCBridge(void); -+void DeinitRGXKICKSYNCBridge(void); -+#endif -+#endif /* SUPPORT_RGX */ -+PVRSRV_ERROR InitCACHEBridge(void); -+void DeinitCACHEBridge(void); -+#if defined(SUPPORT_SECURE_EXPORT) -+PVRSRV_ERROR InitSMMBridge(void); -+void DeinitSMMBridge(void); -+#endif -+#if defined(PVRSRV_ENABLE_HTB) -+PVRSRV_ERROR InitHTBUFFERBridge(void); -+void DeinitHTBUFFERBridge(void); -+#endif -+PVRSRV_ERROR InitPVRTLBridge(void); -+void DeinitPVRTLBridge(void); -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+PVRSRV_ERROR InitRIBridge(void); -+void DeinitRIBridge(void); -+#endif -+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void); -+void DeinitDEVICEMEMHISTORYBridge(void); -+#if defined(SUPPORT_VALIDATION_BRIDGE) -+PVRSRV_ERROR InitVALIDATIONBridge(void); -+void DeinitVALIDATIONBridge(void); -+#endif -+#if defined(PVR_TESTING_UTILS) -+PVRSRV_ERROR InitTUTILSBridge(void); -+void DeinitTUTILSBridge(void); -+#endif -+PVRSRV_ERROR InitSYNCTRACKINGBridge(void); -+void DeinitSYNCTRACKINGBridge(void); -+#if defined(SUPPORT_WRAP_EXTMEM) -+PVRSRV_ERROR InitMMEXTMEMBridge(void); -+void DeinitMMEXTMEMBridge(void); -+#endif -+#if defined(SUPPORT_FALLBACK_FENCE_SYNC) -+PVRSRV_ERROR InitSYNCFALLBACKBridge(void); -+void DeinitSYNCFALLBACKBridge(void); -+#endif -+PVRSRV_ERROR InitRGXTIMERQUERYBridge(void); -+void DeinitRGXTIMERQUERYBridge(void); -+#if defined(SUPPORT_DI_BRG_IMPL) -+PVRSRV_ERROR InitDIBridge(void); -+void DeinitDIBridge(void); -+#endif -+ -+PVRSRV_ERROR -+ServerBridgeInit(void) -+{ -+ PVRSRV_ERROR eError; -+ -+ BridgeDispatchTableStartOffsetsInit(); -+ -+ eError = InitSRVCOREBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitSRVCOREBridge"); -+ -+ eError = InitSYNCBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitSYNCBridge"); -+ -+#if defined(PDUMP) -+ eError = InitPDUMPCTRLBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitPDUMPCTRLBridge"); -+#endif -+ -+ eError = InitMMBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitMMBridge"); -+ -+#if !defined(EXCLUDE_CMM_BRIDGE) -+ eError = InitCMMBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitCMMBridge"); -+#endif -+ -+#if defined(PDUMP) -+ eError = InitPDUMPMMBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitPDUMPMMBridge"); -+ -+ eError = InitPDUMPBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitPDUMPBridge"); -+#endif -+ -+#if defined(SUPPORT_DISPLAY_CLASS) -+ eError = InitDCBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitDCBridge"); -+#endif -+ -+ eError = InitCACHEBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitCACHEBridge"); -+ -+#if defined(SUPPORT_SECURE_EXPORT) -+ eError = InitSMMBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitSMMBridge"); -+#endif -+ -+#if defined(PVRSRV_ENABLE_HTB) -+ eError = InitHTBUFFERBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitHTBUFFERBridge"); -+#endif -+ -+ eError = InitPVRTLBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitPVRTLBridge"); -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ eError = InitRIBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitRIBridge"); -+#endif -+ -+#if defined(SUPPORT_VALIDATION_BRIDGE) -+ eError = InitVALIDATIONBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitVALIDATIONBridge"); -+#endif -+ -+#if defined(PVR_TESTING_UTILS) -+ eError = InitTUTILSBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitTUTILSBridge"); -+#endif -+ -+ eError = InitDEVICEMEMHISTORYBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitDEVICEMEMHISTORYBridge"); -+ -+ eError = InitSYNCTRACKINGBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitSYNCTRACKINGBridge"); -+ -+#if defined(SUPPORT_DMA_TRANSFER) -+ eError = InitDMABridge(); -+ PVR_LOG_IF_ERROR(eError, "InitDMABridge"); -+#endif -+ -+#if defined(SUPPORT_RGX) -+ -+#if defined(SUPPORT_RGXTQ_BRIDGE) -+ eError = InitRGXTQBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitRGXTQBridge"); -+#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */ -+ -+ eError = InitRGXTA3DBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitRGXTA3DBridge"); -+ -+ #if defined(SUPPORT_USC_BREAKPOINT) -+ eError = InitRGXBREAKPOINTBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitRGXBREAKPOINTBridge"); -+#endif -+ -+ eError = InitRGXFWDBGBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitRGXFWDBGBridge"); -+ -+#if defined(PDUMP) -+ eError = InitRGXPDUMPBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitRGXPDUMPBridge"); -+#endif -+ -+ eError = InitRGXHWPERFBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitRGXHWPERFBridge"); -+ -+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) -+ eError = InitRGXREGCONFIGBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitRGXREGCONFIGBridge"); -+#endif -+ -+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) -+ eError = InitRGXKICKSYNCBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitRGXKICKSYNCBridge"); -+#endif -+ -+ eError = InitRGXTIMERQUERYBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitRGXTIMERQUERYBridge"); -+ -+#endif /* SUPPORT_RGX */ -+ -+#if defined(SUPPORT_WRAP_EXTMEM) -+ eError = InitMMEXTMEMBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitMMEXTMEMBridge"); -+#endif -+ -+#if defined(SUPPORT_FALLBACK_FENCE_SYNC) -+ eError = InitSYNCFALLBACKBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitSYNCFALLBACKBridge"); -+#endif -+ -+#if defined(SUPPORT_DI_BRG_IMPL) -+ eError = InitDIBridge(); -+ PVR_LOG_IF_ERROR(eError, "InitDIBridge"); -+#endif -+ -+ eError = OSPlatformBridgeInit(); -+ PVR_LOG_IF_ERROR(eError, "OSPlatformBridgeInit"); -+ -+ return eError; -+} -+ -+void ServerBridgeDeInit(void) -+{ -+ OSPlatformBridgeDeInit(); -+ -+#if defined(SUPPORT_DI_BRG_IMPL) -+ DeinitDIBridge(); -+#endif -+ -+#if defined(SUPPORT_FALLBACK_FENCE_SYNC) -+ DeinitSYNCFALLBACKBridge(); -+#endif -+ -+#if defined(SUPPORT_WRAP_EXTMEM) -+ DeinitMMEXTMEMBridge(); -+#endif -+ -+ DeinitSRVCOREBridge(); -+ -+ DeinitSYNCBridge(); -+ -+#if defined(PDUMP) -+ DeinitPDUMPCTRLBridge(); -+#endif -+ -+ DeinitMMBridge(); -+ -+#if !defined(EXCLUDE_CMM_BRIDGE) -+ DeinitCMMBridge(); -+#endif -+ -+#if defined(PDUMP) -+ DeinitPDUMPMMBridge(); -+ -+ DeinitPDUMPBridge(); -+#endif -+ -+#if defined(PVR_TESTING_UTILS) -+ DeinitTUTILSBridge(); -+#endif -+ -+#if defined(SUPPORT_DISPLAY_CLASS) -+ DeinitDCBridge(); -+#endif -+ -+ DeinitCACHEBridge(); -+ -+#if defined(SUPPORT_SECURE_EXPORT) -+ DeinitSMMBridge(); -+#endif -+ -+#if defined(PVRSRV_ENABLE_HTB) -+ DeinitHTBUFFERBridge(); -+#endif -+ -+ DeinitPVRTLBridge(); -+ -+#if defined(SUPPORT_VALIDATION_BRIDGE) -+ DeinitVALIDATIONBridge(); -+#endif -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ DeinitRIBridge(); -+#endif -+ -+ DeinitDEVICEMEMHISTORYBridge(); -+ -+ DeinitSYNCTRACKINGBridge(); -+ -+#if defined(SUPPORT_DMA_TRANSFER) -+ DeinitDMABridge(); -+#endif -+ -+#if defined(SUPPORT_RGX) -+ -+#if defined(SUPPORT_RGXTQ_BRIDGE) -+ DeinitRGXTQBridge(); -+#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */ -+ -+ DeinitRGXTA3DBridge(); -+ -+#if defined(SUPPORT_USC_BREAKPOINT) -+ DeinitRGXBREAKPOINTBridge(); -+#endif -+ -+ DeinitRGXFWDBGBridge(); -+ -+#if defined(PDUMP) -+ DeinitRGXPDUMPBridge(); -+#endif -+ -+ DeinitRGXHWPERFBridge(); -+ -+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) -+ DeinitRGXREGCONFIGBridge(); -+#endif -+ -+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) -+ DeinitRGXKICKSYNCBridge(); -+#endif -+ -+ DeinitRGXTIMERQUERYBridge(); -+#endif /* SUPPORT_RGX */ -+} -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.h b/drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.h -@@ -0,0 +1,53 @@ -+/**************************************************************************/ /*! -+@File -+@Title PVR Common Bridge Init/Deinit Module (kernel side) -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the common PVR Bridge init/deinit code -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef PVRSRV_BRIDGE_INIT_H -+#define PVRSRV_BRIDGE_INIT_H -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+PVRSRV_ERROR ServerBridgeInit(void); -+void ServerBridgeDeInit(void); -+ -+#endif /* PVRSRV_BRIDGE_INIT_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_cleanup.h b/drivers/gpu/drm/img-rogue/pvrsrv_cleanup.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_cleanup.h -@@ -0,0 +1,256 @@ -+/**************************************************************************/ /*! -+@File -+@Title PowerVR SrvKM cleanup thread deferred work interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef PVRSRV_CLEANUP_H -+#define PVRSRV_CLEANUP_H -+ -+#include "dllist.h" -+#include "device.h" -+ -+/**************************************************************************/ /*! -+@Brief CLEANUP_THREAD_FN -+ -+@Description This is the function prototype for the pfnFree member found in -+ the structure PVRSRV_CLEANUP_THREAD_WORK. The function is -+ responsible for carrying out the clean up work and if successful -+ freeing the memory originally supplied to the call -+ PVRSRVCleanupThreadAddWork(). -+ -+@Input pvParam This is private data originally supplied by the caller -+ to PVRSRVCleanupThreadAddWork() when registering the -+ clean up work item, psDAta->pvData. Itr can be cast -+ to a relevant type within the using module. -+ -+@Return PVRSRV_OK if the cleanup operation was successful and the -+ callback has freed the PVRSRV_CLEANUP_THREAD_WORK* work item -+ memory original supplied to PVRSRVCleanupThreadAddWork() -+ Any other error code will lead to the work item -+ being re-queued and hence the original -+ PVRSRV_CLEANUP_THREAD_WORK* must not be freed. -+*/ /***************************************************************************/ -+ -+typedef PVRSRV_ERROR (*CLEANUP_THREAD_FN)(void *pvParam); -+ -+ -+/* Typical number of times a caller should want the work to be retried in case -+ * of the callback function (pfnFree) returning an error. -+ * Callers to PVRSRVCleanupThreadAddWork should provide this value as the retry -+ * count (ui32RetryCount) unless there are special requirements. -+ * A value of 200 corresponds to around ~20s (200 * 100ms). If it is not -+ * successful by then give up as an unrecoverable problem has occurred. -+ */ -+#define CLEANUP_THREAD_RETRY_COUNT_DEFAULT 200u -+/* Like for CLEANUP_THREAD_RETRY_COUNT_DEFAULT but call will wait for -+ * a specified amount of time rather than number of retries. -+ */ -+#define CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT 20000u /* 20s */ -+ -+/* Use to set retry count on a cleanup item. -+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK -+ * _count - retry count -+ */ -+#define CLEANUP_THREAD_SET_RETRY_COUNT(_item,_count) \ -+ do { \ -+ (_item)->ui32RetryCount = (_count); \ -+ (_item)->ui32TimeStart = 0; \ -+ (_item)->ui32TimeEnd = 0; \ -+ } while (0) -+ -+/* Use to set timeout deadline on a cleanup item. -+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK -+ * _timeout - timeout in milliseconds, if 0 -+ * CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT is used -+ */ -+#define CLEANUP_THREAD_SET_RETRY_TIMEOUT(_item,_timeout) \ -+ do { \ -+ (_item)->ui32RetryCount = 0; \ -+ (_item)->ui32TimeStart = OSClockms(); \ -+ (_item)->ui32TimeEnd = (_item)->ui32TimeStart + ((_timeout) > 0 ? \ -+ (_timeout) : CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); \ -+ } while (0) -+ -+/* Indicates if the timeout on a given item has been reached. -+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK -+ */ -+#define CLEANUP_THREAD_RETRY_TIMEOUT_NOT_REACHED(_item) \ -+ ((_item)->ui32TimeEnd - (_item)->ui32TimeStart >= \ -+ OSClockms() - (_item)->ui32TimeStart) -+ -+/* Indicates if the current item is waiting on timeout or retry count. -+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK -+ * */ -+#define CLEANUP_THREAD_IS_RETRY_TIMEOUT(_item) \ -+ ((_item)->ui32TimeStart != (_item->ui32TimeEnd)) -+ -+#define CLEANUP_TYPE_LIST \ -+ X(UNDEF) /**/ \ -+ X(CONNECTION) /**/ \ -+ X(MMU) /**/ \ -+ X(OSMEM) /**/ \ -+ X(PMR) /**/ \ -+ X(LAST) /**/ \ -+ -+#define CLEANUP_TYPE_ITEM_LABEL_MAX_SIZE 11 /* CONNECTION\0 */ -+#define CLEANUP_TYPE_ITEM_DPF " %1.11s : %1.5d" -+#define CLEANUP_TYPE_ITEM_DPF_MAX_SIZE CLEANUP_TYPE_ITEM_LABEL_MAX_SIZE+sizeof(" : ")+5+1 -+ -+typedef enum _PVRSRV_CLEANUP_TYPE_ -+{ -+#define X(_name) PVRSRV_CLEANUP_TYPE_ ## _name, -+ CLEANUP_TYPE_LIST -+#undef X -+ -+} PVRSRV_CLEANUP_TYPE; -+ -+#if defined(CLEANUP_TYPE_STRINGS) -+ -+static const char *const _pszCleanupStrings[] = { -+#define X(_name) #_name, -+ CLEANUP_TYPE_LIST -+#undef X -+}; -+ -+/*************************************************************************/ /*! -+@Function PVRSRVGetCleanupName -+@Description Returns the name of a Cleanup Type. -+ -+@Input eCleanupType The enum value of the cleanup type. -+ -+@Return const IMG_CHAR pointer. -+*/ /**************************************************************************/ -+static inline const IMG_CHAR *PVRSRVGetCleanupName(PVRSRV_CLEANUP_TYPE eCleanupType) -+{ -+ if (eCleanupType < 0 || eCleanupType > PVRSRV_CLEANUP_TYPE_LAST) -+ { -+ return "Undefined"; -+ } -+ -+ PVR_ASSERT(sizeof(_pszCleanupStrings[eCleanupType]) < CLEANUP_TYPE_ITEM_LABEL_MAX_SIZE); -+ -+ return _pszCleanupStrings[eCleanupType]; -+} -+ -+#endif /* CLEANUP_TYPE_STRINGS */ -+ -+/* Clean up work item specifics so that the task can be managed by the -+* pvr_defer_free cleanup thread in the Server. -+*/ -+typedef struct _PVRSRV_CLEANUP_THREAD_WORK_ -+{ -+ DLLIST_NODE sNode; /*!< List node used internally by the cleanup -+ thread */ -+ CLEANUP_THREAD_FN pfnFree; /*!< Pointer to the function to be called to -+ carry out the deferred cleanup */ -+ void *pvData; /*!< private data for pfnFree, usually a way back -+ to the original PVRSRV_CLEANUP_THREAD_WORK* -+ pointer supplied in the call to -+ PVRSRVCleanupThreadAddWork(). */ -+ IMG_UINT32 ui32TimeStart; /*!< Timestamp in ms of the moment when -+ cleanup item has been created. */ -+ IMG_UINT32 ui32TimeEnd; /*!< Time in ms after which no further retry -+ attempts will be made, item discard and -+ error logged when this is reached. */ -+ IMG_UINT32 ui32RetryCount; /*!< Number of times the callback should be -+ re-tried when it returns error. */ -+ IMG_BOOL bDependsOnHW; /*!< Don't drop the cleanup task if retry limit -+ is reached, we could depend on event from -+ device to continue. */ -+ PVRSRV_CLEANUP_TYPE eCleanupType;/*!< Type of work item added to queue */ -+#if defined(DEBUG) -+ const char *pszFun; -+ unsigned int ui32LineNum; -+#endif -+} PVRSRV_CLEANUP_THREAD_WORK; -+ -+ -+/**************************************************************************/ /*! -+@Function PVRSRVCleanupThreadAddWork -+ -+@Description Add a work item to be called from the cleanup thread -+ -+@Input psDevNode : Pointer to the device node -+@Input psData : The function pointer and private data for the -+ callback -+ -+@Return None -+*/ /***************************************************************************/ -+#if defined(DEBUG) -+#define PVRSRVCleanupThreadAddWork(DEV, DATA) PVRSRVCleanupThreadAddWork_Debug(DEV, DATA, __FILE__, __LINE__) -+void PVRSRVCleanupThreadAddWork_Debug(PVRSRV_DEVICE_NODE *psDevNode, -+ PVRSRV_CLEANUP_THREAD_WORK *psData, -+ const char *pszFun, const unsigned int ui32LineNum); -+#else -+#define PVRSRVCleanupThreadAddWork PVRSRVCleanupThreadAddWork_Int -+void PVRSRVCleanupThreadAddWork_Int(PVRSRV_DEVICE_NODE *psDevNode, -+ PVRSRV_CLEANUP_THREAD_WORK *psData); -+#endif -+ -+/**************************************************************************/ /*! -+@Function PVRSRVCleanupThreadWaitForDevice -+ -+@Description Blocking wait for all of the device's items to be cleaned. -+ -+@Input psDevNode : Pointer to the device node -+ -+@Return None -+*/ /***************************************************************************/ -+void PVRSRVCleanupThreadWaitForDevice(PVRSRV_DEVICE_NODE *psDevNode); -+ -+/**************************************************************************/ /*! -+@Function PVRSRVCleanupThreadGetPid -+ -+@Description Returns Cleanup Thread's PID. -+ -+@Return PID of the Cleanup Thread -+*/ /***************************************************************************/ -+IMG_PID PVRSRVCleanupThreadGetPid(void); -+ -+/**************************************************************************/ /*! -+@Function PVRSRVCleanupThreadGetTid -+ -+@Description Returns Cleanup Thread's TID. -+ -+@Return TID of the Cleanup Thread -+*/ /***************************************************************************/ -+uintptr_t PVRSRVCleanupThreadGetTid(void); -+ -+#endif /* PVRSRV_CLEANUP_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_device.h b/drivers/gpu/drm/img-rogue/pvrsrv_device.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_device.h -@@ -0,0 +1,403 @@ -+/**************************************************************************/ /*! -+@File -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef PVRSRV_DEVICE_H -+#define PVRSRV_DEVICE_H -+ -+#include "img_types.h" -+#include "physheap_config.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_memalloc_physheap.h" -+#include "pvrsrv_firmware_boot.h" -+#include "rgx_fwif_km.h" -+#include "servicesext.h" -+#include "cache_ops.h" -+#include "opaque_types.h" -+ -+#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) -+#include "pvr_dvfs.h" -+#endif -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+#include "virt_validation_defs.h" -+#endif -+ -+typedef struct _PVRSRV_DEVICE_CONFIG_ PVRSRV_DEVICE_CONFIG; -+typedef enum _DRIVER_MODE_ -+{ -+/* Do not use these enumerations directly, to query the -+ current driver mode, use the PVRSRV_VZ_MODE_IS() -+ macro */ -+ DRIVER_MODE_NATIVE = -1, -+ DRIVER_MODE_HOST = 0, -+ DRIVER_MODE_GUEST -+} PVRSRV_DRIVER_MODE; -+ -+typedef enum -+{ -+ PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_MAPPABLE = 0, -+ PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_NON_MAPPABLE = 1, -+ PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_LAST -+} PVRSRV_DEVICE_LOCAL_MEMORY_ARENA; -+ -+typedef enum _PVRSRV_DEVICE_SNOOP_MODE_ -+{ -+ PVRSRV_DEVICE_SNOOP_NONE = 0, -+ PVRSRV_DEVICE_SNOOP_CPU_ONLY, -+ PVRSRV_DEVICE_SNOOP_DEVICE_ONLY, -+ PVRSRV_DEVICE_SNOOP_CROSS, -+ PVRSRV_DEVICE_SNOOP_EMULATED, -+} PVRSRV_DEVICE_SNOOP_MODE; -+ -+#if defined(SUPPORT_SOC_TIMER) -+typedef IMG_UINT64 -+(*PFN_SYS_DEV_SOC_TIMER_READ)(IMG_HANDLE hSysData); -+#endif -+ -+typedef enum _PVRSRV_DEVICE_FABRIC_TYPE_ -+{ -+ PVRSRV_DEVICE_FABRIC_NONE = 0, -+ PVRSRV_DEVICE_FABRIC_ACELITE, -+ PVRSRV_DEVICE_FABRIC_FULLACE, -+} PVRSRV_DEVICE_FABRIC_TYPE; -+ -+typedef IMG_UINT32 -+(*PFN_SYS_DEV_CLK_FREQ_GET)(IMG_HANDLE hSysData); -+ -+typedef PVRSRV_ERROR -+(*PFN_SYS_PRE_POWER)(IMG_HANDLE hSysData, -+ PVRSRV_SYS_POWER_STATE eNewPowerState, -+ PVRSRV_SYS_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags); -+ -+typedef PVRSRV_ERROR -+(*PFN_SYS_POST_POWER)(IMG_HANDLE hSysData, -+ PVRSRV_SYS_POWER_STATE eNewPowerState, -+ PVRSRV_SYS_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags); -+ -+/*************************************************************************/ /*! -+@Brief Callback function type PFN_SYS_GET_POWER -+ -+@Description This function queries the SoC power registers to determine -+ if the power domain on which the GPU resides is powered on. -+ -+ Implementation of this callback is optional - where it is not provided, -+ the driver will assume the domain power state depending on driver type: -+ regular drivers assume it is unpowered at startup, while drivers with -+ AutoVz support expect the GPU domain to be powered on initially. The power -+ state will be then tracked internally according to the pfnPrePowerState -+ and pfnPostPowerState calls using a fallback function. -+ -+@Input psDevNode Pointer to node struct of the -+ device being initialised -+ -+@Return PVRSRV_SYS_POWER_STATE_ON if the respective device's hardware -+ domain is powered on -+ PVRSRV_SYS_POWER_STATE_OFF if the domain is powered off -+*/ /**************************************************************************/ -+typedef PVRSRV_SYS_POWER_STATE -+(*PFN_SYS_GET_POWER)(PPVRSRV_DEVICE_NODE psDevNode); -+ -+typedef void -+(*PFN_SYS_DEV_INTERRUPT_HANDLED)(PVRSRV_DEVICE_CONFIG *psDevConfig); -+ -+typedef PVRSRV_ERROR -+(*PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE)(IMG_HANDLE hSysData, -+ IMG_UINT64 ui64MemSize); -+ -+typedef void (*PFN_SYS_DEV_FEAT_DEP_INIT)(PVRSRV_DEVICE_CONFIG *, IMG_UINT64); -+ -+typedef void -+(*PFN_SYS_DEV_HOST_CACHE_MAINTENANCE)(IMG_HANDLE hSysData, -+ PVRSRV_CACHE_OP eRequestType, -+ void *pvVirtStart, -+ void *pvVirtEnd, -+ IMG_CPU_PHYADDR sCPUPhysStart, -+ IMG_CPU_PHYADDR sCPUPhysEnd); -+ -+typedef void* -+(*PFN_SLAVE_DMA_CHAN)(PVRSRV_DEVICE_CONFIG*, char*); -+ -+typedef void -+(*PFN_SLAVE_DMA_FREE)(PVRSRV_DEVICE_CONFIG*, -+ void*); -+ -+typedef void -+(*PFN_DEV_PHY_ADDR_2_DMA_ADDR)(PVRSRV_DEVICE_CONFIG *, -+ IMG_DMA_ADDR *, -+ IMG_DEV_PHYADDR *, -+ IMG_BOOL *, -+ IMG_UINT32, -+ IMG_BOOL); -+ -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+ -+typedef struct _PVRSRV_TD_FW_PARAMS_ -+{ -+ const void *pvFirmware; -+ IMG_UINT32 ui32FirmwareSize; -+ PVRSRV_FW_BOOT_PARAMS uFWP; -+} PVRSRV_TD_FW_PARAMS; -+ -+typedef PVRSRV_ERROR -+(*PFN_TD_SEND_FW_IMAGE)(IMG_HANDLE hSysData, -+ PVRSRV_TD_FW_PARAMS *psTDFWParams); -+ -+typedef struct _PVRSRV_TD_POWER_PARAMS_ -+{ -+ IMG_DEV_PHYADDR sPCAddr; -+ -+ /* MIPS-only fields */ -+ IMG_DEV_PHYADDR sGPURegAddr; -+ IMG_DEV_PHYADDR sBootRemapAddr; -+ IMG_DEV_PHYADDR sCodeRemapAddr; -+ IMG_DEV_PHYADDR sDataRemapAddr; -+} PVRSRV_TD_POWER_PARAMS; -+ -+typedef PVRSRV_ERROR -+(*PFN_TD_SET_POWER_PARAMS)(IMG_HANDLE hSysData, -+ PVRSRV_TD_POWER_PARAMS *psTDPowerParams); -+ -+typedef PVRSRV_ERROR -+(*PFN_TD_RGXSTART)(IMG_HANDLE hSysData); -+ -+typedef PVRSRV_ERROR -+(*PFN_TD_RGXSTOP)(IMG_HANDLE hSysData); -+ -+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */ -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+typedef void (*PFN_SYS_DEV_VIRT_INIT)(IMG_HANDLE hSysData, -+ IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], -+ IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); -+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ -+ -+typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG_ -+{ -+ IMG_UINT32 ui32Status; /*!< FW status */ -+ IMG_UINT32 ui32Reason; /*!< Reason for FW status */ -+} PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG; -+ -+typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_FW_PF_ -+{ -+ IMG_DEV_VIRTADDR sFWFaultAddr; /*!< FW page fault address */ -+} PVRSRV_ROBUSTNESS_ERR_DATA_FW_PF; -+ -+typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_CHECKSUM_ -+{ -+ IMG_UINT32 ui32ExtJobRef; /*!< External Job Reference of any affected GPU work */ -+ RGXFWIF_DM eDM; /*!< Data Master which was running any affected GPU work */ -+} PVRSRV_ROBUSTNESS_ERR_DATA_CHECKSUM; -+ -+typedef struct _PVRSRV_ROBUSTNESS_NOTIFY_DATA_ -+{ -+ RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reason for error/reset */ -+ IMG_PID pid; /*!< Pid of process which created the errored context */ -+ union -+ { -+ PVRSRV_ROBUSTNESS_ERR_DATA_CHECKSUM sChecksumErrData; /*!< Data returned for checksum errors */ -+ PVRSRV_ROBUSTNESS_ERR_DATA_FW_PF sFwPFErrData; /*!< Data returned for FW page faults */ -+ PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG sHostWdgData; /*!< Data returned for Host Wdg FW faults */ -+ } uErrData; -+} PVRSRV_ROBUSTNESS_NOTIFY_DATA; -+ -+typedef void -+(*PFN_SYS_DEV_ERROR_NOTIFY)(IMG_HANDLE hSysData, -+ PVRSRV_ROBUSTNESS_NOTIFY_DATA *psRobustnessErrorData); -+ -+struct _PVRSRV_DEVICE_CONFIG_ -+{ -+ /*! OS device passed to SysDevInit (linux: 'struct device') */ -+ void *pvOSDevice; -+ -+ /*! -+ *! Service representation of pvOSDevice. Should be set to NULL when the -+ *! config is created in SysDevInit. Set by Services once a device node has -+ *! been created for this config and unset before SysDevDeInit is called. -+ */ -+ struct _PVRSRV_DEVICE_NODE_ *psDevNode; -+ -+ /*! Name of the device */ -+ IMG_CHAR *pszName; -+ -+ /*! Version of the device (optional) */ -+ IMG_CHAR *pszVersion; -+ -+ /*! Register bank address */ -+ IMG_CPU_PHYADDR sRegsCpuPBase; -+ /*! Register bank size */ -+ IMG_UINT32 ui32RegsSize; -+ /*! Device interrupt number */ -+ IMG_UINT32 ui32IRQ; -+ -+ PVRSRV_DEVICE_SNOOP_MODE eCacheSnoopingMode; -+ -+ /*! Device specific data handle */ -+ IMG_HANDLE hDevData; -+ -+ /*! System specific data that gets passed into system callback functions. */ -+ IMG_HANDLE hSysData; -+ -+ IMG_BOOL bHasNonMappableLocalMemory; -+ -+ /*! Indicates if system supports FBCDC v3.1 */ -+ IMG_BOOL bHasFBCDCVersion31; -+ -+ /*! Physical Heap definitions for this device. -+ * eDefaultHeap must be set to GPU_LOCAL or CPU_LOCAL. Specifying any other value -+ * (e.g. DEFAULT) will lead to an error at device discovery. -+ * pasPhysHeap array must contain at least one PhysHeap, the declared default heap. -+ */ -+ PVRSRV_PHYS_HEAP eDefaultHeap; -+ PHYS_HEAP_CONFIG *pasPhysHeaps; -+ IMG_UINT32 ui32PhysHeapCount; -+ -+ /*! -+ *! Callbacks to change system device power state at the beginning and end -+ *! of a power state change (optional). -+ */ -+ PFN_SYS_PRE_POWER pfnPrePowerState; -+ PFN_SYS_POST_POWER pfnPostPowerState; -+ PFN_SYS_GET_POWER pfnGpuDomainPower; -+ -+ /*! Callback to obtain the clock frequency from the device (optional). */ -+ PFN_SYS_DEV_CLK_FREQ_GET pfnClockFreqGet; -+ -+#if defined(SUPPORT_SOC_TIMER) -+ /*! Callback to read SoC timer register value (mandatory). */ -+ PFN_SYS_DEV_SOC_TIMER_READ pfnSoCTimerRead; -+#endif -+ -+ /*! -+ *! Callback to perform host CPU cache maintenance. Might be needed for -+ *! architectures which allow extensions such as RISC-V (optional). -+ */ -+ PFN_SYS_DEV_HOST_CACHE_MAINTENANCE pfnHostCacheMaintenance; -+ IMG_BOOL bHasPhysicalCacheMaintenance; -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+ /*! -+ *! Callback to send FW image and FW boot time parameters to the trusted -+ *! device. -+ */ -+ PFN_TD_SEND_FW_IMAGE pfnTDSendFWImage; -+ -+ /*! -+ *! Callback to send parameters needed in a power transition to the trusted -+ *! device. -+ */ -+ PFN_TD_SET_POWER_PARAMS pfnTDSetPowerParams; -+ -+ /*! Callbacks to ping the trusted device to securely run RGXStart/Stop() */ -+ PFN_TD_RGXSTART pfnTDRGXStart; -+ PFN_TD_RGXSTOP pfnTDRGXStop; -+ -+#if defined(PVR_ANDROID_HAS_DMA_HEAP_FIND) -+ /*! Name of DMA heap to allocate secure memory from. Used with dma_heap_find. */ -+ IMG_CHAR *pszSecureDMAHeapName; -+#endif -+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */ -+ -+ /*! Function that does device feature specific system layer initialisation */ -+ PFN_SYS_DEV_FEAT_DEP_INIT pfnSysDevFeatureDepInit; -+ -+#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) -+ PVRSRV_DVFS sDVFS; -+#endif -+ -+#if defined(SUPPORT_ALT_REGBASE) -+ IMG_DEV_PHYADDR sAltRegsGpuPBase; -+#endif -+ -+ /*! -+ *! Indicates if device physical address 0x0 might be used as GPU memory -+ *! (e.g. LMA system or UMA system with CPU PA 0x0 reserved by the OS, -+ *! but CPU PA != device PA and device PA 0x0 available for the GPU) -+ */ -+ IMG_BOOL bDevicePA0IsValid; -+ -+ /*! -+ *! Function to initialize System-specific virtualization. If not supported -+ *! this should be a NULL reference. Only present if -+ *! SUPPORT_GPUVIRT_VALIDATION is defined. -+ */ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ PFN_SYS_DEV_VIRT_INIT pfnSysDevVirtInit; -+#endif -+ -+ /*! -+ *! Callback to notify system layer of device errors. -+ *! NB. implementers should ensure that the minimal amount of work is -+ *! done in the callback function, as it will be executed in the main -+ *! RGX MISR. (e.g. any blocking or lengthy work should be performed by -+ *! a worker queue/thread instead.) -+ */ -+ PFN_SYS_DEV_ERROR_NOTIFY pfnSysDevErrorNotify; -+ -+ /*! -+ *! Slave DMA channel request callbacks -+ */ -+ PFN_SLAVE_DMA_CHAN pfnSlaveDMAGetChan; -+ PFN_SLAVE_DMA_FREE pfnSlaveDMAFreeChan; -+ /*! -+ *! Conversion of device memory to DMA addresses -+ */ -+ PFN_DEV_PHY_ADDR_2_DMA_ADDR pfnDevPhysAddr2DmaAddr; -+ /*! -+ *! DMA channel names -+ */ -+ IMG_CHAR *pszDmaTxChanName; -+ IMG_CHAR *pszDmaRxChanName; -+ /*! -+ *! DMA device transfer restrictions -+ */ -+ IMG_UINT32 ui32DmaAlignment; -+ IMG_UINT32 ui32DmaTransferUnit; -+ /*! -+ *! System-wide presence of DMA capabilities -+ */ -+ IMG_BOOL bHasDma; -+ -+}; -+ -+#endif /* PVRSRV_DEVICE_H*/ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_device_types.h b/drivers/gpu/drm/img-rogue/pvrsrv_device_types.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_device_types.h -@@ -0,0 +1,60 @@ -+/*************************************************************************/ /*! -+@File -+@Title PowerVR device type definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(PVRSRV_DEVICE_TYPES_H) -+#define PVRSRV_DEVICE_TYPES_H -+ -+#include "img_types.h" -+ -+#if !defined(PVRSRV_MAX_DEVICES) -+#error "PVRSRV_MAX_DEVICES must be defined in the DDK build environment" -+#endif /* !defined(PVRSRV_NUM_DEVICES) */ -+#define PVRSRV_HOST_DEVICE_ID 255U /*!< Device ID used for host (non-GPU) device. */ -+ -+static_assert(PVRSRV_MAX_DEVICES < PVRSRV_HOST_DEVICE_ID, "Invalid host device ID."); -+ -+#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) -+#define __pvrsrv_defined_struct_enum__ -+#include -+#endif -+ -+#endif /* PVRSRV_DEVICE_TYPES_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_devvar.h b/drivers/gpu/drm/img-rogue/pvrsrv_devvar.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_devvar.h -@@ -0,0 +1,291 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services Device Variable interface header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines the client side interface for device variables -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVRSRV_DEVVAR_H -+#define PVRSRV_DEVVAR_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#define DEVVAR_MAX_NAME_LEN 32 -+ -+typedef struct SYNC_PRIM_CONTEXT_TAG *PDEVVARCTX; -+typedef struct PVRSRV_CLIENT_SYNC_PRIM_TAG *PDEVVAR; -+ -+typedef struct PVRSRV_DEV_VAR_UPDATE_TAG -+{ -+ PDEVVAR psDevVar; /*!< Pointer to the dev var */ -+ IMG_UINT32 ui32UpdateValue; /*!< the update value */ -+} PVRSRV_DEV_VAR_UPDATE; -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDevVarContextCreate -+ -+@Description Create a new device variable context -+ -+@Input psDevConnection Device to create the device -+ variable context on -+ -+@Output phDevVarContext Handle to the created device -+ variable context -+ -+@Return PVRSRV_OK if the device variable context was successfully -+ created -+*/ -+/*****************************************************************************/ -+IMG_EXPORT PVRSRV_ERROR -+PVRSRVDevVarContextCreate(const PVRSRV_DEV_CONNECTION *psDevConnection, -+ PDEVVARCTX *phDevVarContext); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDevVarContextDestroy -+ -+@Description Destroy a device variable context -+ -+@Input hDevVarContext Handle to the device variable -+ context to destroy -+ -+@Return None -+*/ -+/*****************************************************************************/ -+IMG_EXPORT void -+PVRSRVDevVarContextDestroy(PDEVVARCTX hDevVarContext); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDevVarAlloc -+ -+@Description Allocate a new device variable on the specified device -+ variable context. The device variable's value is initialised -+ with the value passed in ui32InitialValue. -+ -+@Input hDevVarContext Handle to the device variable -+ context -+@Input ui32InitialValue Value to initially assign to the -+ new variable -+@Input pszDevVarName Name assigned to the device variable -+ (for debug purposes) -+ -+@Output ppsDevVar Created device variable -+ -+@Return PVRSRV_OK if the device variable was successfully created -+*/ -+/*****************************************************************************/ -+IMG_EXPORT PVRSRV_ERROR -+PVRSRVDevVarAllocI(PDEVVARCTX hDevVarContext, -+ PDEVVAR *ppsDevVar, -+ IMG_UINT32 ui32InitialValue, -+ const IMG_CHAR *pszDevVarName -+ PVR_DBG_FILELINE_PARAM); -+#define PVRSRVDevVarAlloc(hDevVarContext, ppsDevVar, ui32InitialValue, pszDevVarName) \ -+ PVRSRVDevVarAllocI( (hDevVarContext), (ppsDevVar), (ui32InitialValue), (pszDevVarName) \ -+ PVR_DBG_FILELINE ) -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDevVarFree -+ -+@Description Free a device variable -+ -+@Input psDevVar The device variable to free -+ -+@Return None -+*/ -+/*****************************************************************************/ -+IMG_EXPORT void -+PVRSRVDevVarFree(PDEVVAR psDevVar); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDevVarSet -+ -+@Description Set the device variable to a value -+ -+@Input psDevVar The device variable to set -+ -+@Input ui32Value Value to set it to -+ -+@Return None -+*/ -+/*****************************************************************************/ -+IMG_EXPORT void -+PVRSRVDevVarSet(PDEVVAR psDevVar, -+ IMG_UINT32 ui32Value); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDevVarGet -+ -+@Description Get the current value of the device variable -+ -+@Input psDevVar The device variable to get the -+ value of -+ -+@Return Value of the variable -+*/ -+/*****************************************************************************/ -+IMG_EXPORT IMG_UINT32 -+PVRSRVDevVarGet(PDEVVAR psDevVar); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDevVarGetFirmwareAddr -+ -+@Description Returns the address of the associated firmware value for a -+ specified device integer (not exposed to client) -+ -+@Input psDevVar The device variable to resolve -+ -+@Return The firmware address of the device variable -+*/ -+/*****************************************************************************/ -+IMG_EXPORT IMG_UINT32 -+PVRSRVDevVarGetFirmwareAddr(PDEVVAR psDevVar); -+ -+#if defined(PDUMP) -+/*************************************************************************/ /*! -+@Function PVRSRVDevVarPDump -+ -+@Description PDump the current value of the device variable -+ -+@Input psDevVar The device variable to PDump -+ -+@Return None -+*/ -+/*****************************************************************************/ -+IMG_EXPORT void -+PVRSRVDevVarPDump(PDEVVAR psDevVar); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDevVarPDumpPol -+ -+@Description Do a PDump poll of the device variable -+ -+@Input psDevVar The device variable to PDump -+ -+@Input ui32Value Value to Poll for -+ -+@Input ui32Mask PDump mask operator -+ -+@Input ui32PDumpFlags PDump flags -+ -+@Return None -+*/ -+/*****************************************************************************/ -+IMG_EXPORT void -+PVRSRVDevVarPDumpPol(PDEVVAR psDevVar, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ IMG_UINT32 ui32PDumpFlags); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVDevVarPDumpCBP -+ -+@Description Do a PDump CB poll using the device variable -+ -+@Input psDevVar The device variable to PDump -+ -+@Input uiWriteOffset Current write offset of buffer -+ -+@Input uiPacketSize Size of the packet to write into CB -+ -+@Input uiBufferSize Size of the CB -+ -+@Return None -+*/ -+/*****************************************************************************/ -+IMG_EXPORT void -+PVRSRVDevVarPDumpCBP(PDEVVAR psDevVar, -+ IMG_UINT64 uiWriteOffset, -+ IMG_UINT64 uiPacketSize, -+ IMG_UINT64 uiBufferSize); -+#else /* PDUMP */ -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PVRSRVDevVarPDump) -+#endif -+static INLINE void -+PVRSRVDevVarPDump(PDEVVAR psDevVar) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevVar); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PVRSRVDevVarPDumpPol) -+#endif -+static INLINE void -+PVRSRVDevVarPDumpPol(PDEVVAR psDevVar, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevVar); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ PVR_UNREFERENCED_PARAMETER(ui32Mask); -+ PVR_UNREFERENCED_PARAMETER(eOperator); -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PVRSRVDevVarPDumpCBP) -+#endif -+static INLINE void -+PVRSRVDevVarPDumpCBP(PDEVVAR psDevVar, -+ IMG_UINT64 uiWriteOffset, -+ IMG_UINT64 uiPacketSize, -+ IMG_UINT64 uiBufferSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevVar); -+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); -+ PVR_UNREFERENCED_PARAMETER(uiPacketSize); -+ PVR_UNREFERENCED_PARAMETER(uiBufferSize); -+} -+#endif /* PDUMP */ -+ -+#if defined(__cplusplus) -+} -+#endif -+#endif /* PVRSRV_DEVVAR_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_error.c b/drivers/gpu/drm/img-rogue/pvrsrv_error.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_error.c -@@ -0,0 +1,61 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services error support -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "pvr_debug.h" -+ -+IMG_EXPORT -+const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError) -+{ -+ switch (eError) -+ { -+ case PVRSRV_OK: -+ return "PVRSRV_OK"; -+#define PVRE(x) \ -+ case x: \ -+ return #x; -+#include "pvrsrv_errors.h" -+#undef PVRE -+ default: -+ return "Unknown PVRSRV error number"; -+ } -+} -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_error.h b/drivers/gpu/drm/img-rogue/pvrsrv_error.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_error.h -@@ -0,0 +1,75 @@ -+/*************************************************************************/ /*! -+@File pvrsrv_error.h -+@Title services error enumerant -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines error codes used by any/all services modules -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(PVRSRV_ERROR_H) -+#define PVRSRV_ERROR_H -+ -+/*! -+ ***************************************************************************** -+ * Error values -+ *****************************************************************************/ -+typedef enum PVRSRV_ERROR_TAG -+{ -+ PVRSRV_OK, -+ -+#define PVRE(x) x, -+#include "pvrsrv_errors.h" -+#undef PVRE -+ -+ PVRSRV_ERROR_FORCE_I32 = 0x7fffffff -+ -+} PVRSRV_ERROR; -+ -+/*! -+ * @Function PVRSRVIsRetryError -+ * @Description Checks if error code is one of the errors that require retry -+ * from the caller. -+ * @Input eError Error code. -+ * @Return IMG_TRUE if eError is one of the error codes that require the caller -+ * to retry. -+ */ -+#define PVRSRVIsRetryError(eError) \ -+ (((eError == PVRSRV_ERROR_RETRY) || (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)) ? \ -+ IMG_TRUE : IMG_FALSE) -+ -+#endif /* !defined(PVRSRV_ERROR_H) */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_errors.h b/drivers/gpu/drm/img-rogue/pvrsrv_errors.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_errors.h -@@ -0,0 +1,421 @@ -+/*************************************************************************/ /*! -+@File pvrsrv_errors.h -+@Title services error codes -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines error codes used by any/all services modules -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* Don't add include guards to this file! */ -+ -+PVRE(PVRSRV_ERROR_OUT_OF_MEMORY) -+PVRE(PVRSRV_ERROR_TOO_FEW_BUFFERS) -+PVRE(PVRSRV_ERROR_INVALID_PARAMS) -+PVRE(PVRSRV_ERROR_INIT_FAILURE) -+PVRE(PVRSRV_ERROR_CANT_REGISTER_CALLBACK) -+PVRE(PVRSRV_ERROR_INVALID_DEVICE) -+PVRE(PVRSRV_ERROR_NOT_OWNER) -+PVRE(PVRSRV_ERROR_BAD_MAPPING) -+PVRE(PVRSRV_ERROR_TIMEOUT) -+PVRE(PVRSRV_ERROR_NOT_IMPLEMENTED) -+PVRE(PVRSRV_ERROR_FLIP_CHAIN_EXISTS) -+PVRE(PVRSRV_ERROR_INVALID_SWAPINTERVAL) -+PVRE(PVRSRV_ERROR_SCENE_INVALID) -+PVRE(PVRSRV_ERROR_STREAM_ERROR) -+PVRE(PVRSRV_ERROR_FAILED_DEPENDENCIES) -+PVRE(PVRSRV_ERROR_CMD_NOT_PROCESSED) -+PVRE(PVRSRV_ERROR_CMD_TOO_BIG) -+PVRE(PVRSRV_ERROR_DEVICE_REGISTER_FAILED) -+PVRE(PVRSRV_ERROR_TOOMANYBUFFERS) -+PVRE(PVRSRV_ERROR_NOT_SUPPORTED) -+PVRE(PVRSRV_ERROR_PROCESSING_BLOCKED) -+PVRE(PVRSRV_ERROR_CANNOT_FLUSH_QUEUE) -+PVRE(PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) -+PVRE(PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS) -+PVRE(PVRSRV_ERROR_RETRY) -+PVRE(PVRSRV_ERROR_DDK_VERSION_MISMATCH) -+PVRE(PVRSRV_ERROR_DDK_BUILD_MISMATCH) -+PVRE(PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH) -+PVRE(PVRSRV_ERROR_BVNC_MISMATCH) -+PVRE(PVRSRV_ERROR_FWPROCESSOR_MISMATCH) -+PVRE(PVRSRV_ERROR_UPLOAD_TOO_BIG) -+PVRE(PVRSRV_ERROR_INVALID_FLAGS) -+PVRE(PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS) -+PVRE(PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY) -+PVRE(PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR) -+PVRE(PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED) -+PVRE(PVRSRV_ERROR_BRIDGE_CALL_FAILED) -+PVRE(PVRSRV_ERROR_IOCTL_CALL_FAILED) -+PVRE(PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR) -+PVRE(PVRSRV_ERROR_MMU_CONFIG_IS_WRONG) -+PVRE(PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND) -+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES) -+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_CREATE_HEAP) -+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE) -+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_UNMAP_PAGE_TABLE) -+PVRE(PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE) -+PVRE(PVRSRV_ERROR_MMU_LIVE_ALLOCATIONS_IN_HEAP) -+PVRE(PVRSRV_ERROR_MMU_RESERVATION_NOT_INSIDE_HEAP) -+PVRE(PVRSRV_ERROR_PMR_NEW_MEMORY) -+PVRE(PVRSRV_ERROR_PMR_STILL_REFERENCED) -+PVRE(PVRSRV_ERROR_PMR_CLIENT_NOT_TRUSTED) -+PVRE(PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES) -+PVRE(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY) -+PVRE(PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES) -+PVRE(PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE) -+PVRE(PVRSRV_ERROR_PMR_NOT_PERMITTED) -+PVRE(PVRSRV_ERROR_PMR_ALREADY_OCCUPIED) -+PVRE(PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR) -+PVRE(PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR) -+PVRE(PVRSRV_ERROR_PMR_WRONG_PMR_TYPE) -+PVRE(PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS) -+PVRE(PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE) -+PVRE(PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE) -+PVRE(PVRSRV_ERROR_PMR_MAPPINGTABLE_MISMATCH) -+PVRE(PVRSRV_ERROR_PMR_INVALID_CHUNK) -+PVRE(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING) -+PVRE(PVRSRV_ERROR_PMR_EMPTY) -+PVRE(PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND) -+PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_UNMAP_FAILED) -+PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED) -+PVRE(PVRSRV_ERROR_PMR_PAGE_POISONING_FAILED) -+PVRE(PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY) -+PVRE(PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK) -+PVRE(PVRSRV_ERROR_PMR_TOO_LARGE) -+PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP) -+PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE) -+PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION) -+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX) -+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX) -+PVRE(PVRSRV_ERROR_DEVICEMEM_MAP_FAILED) -+PVRE(PVRSRV_ERROR_DEVICEMEM_NON_ZERO_USAGE_COUNT) -+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE) -+PVRE(PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED) -+PVRE(PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA) -+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM) -+PVRE(PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED) -+PVRE(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING) -+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS) -+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP) -+PVRE(PVRSRV_ERROR_INVALID_MMU_TYPE) -+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND) -+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT) -+PVRE(PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND) -+PVRE(PVRSRV_ERROR_PCI_CALL_FAILED) -+PVRE(PVRSRV_ERROR_PCI_REGION_TOO_SMALL) -+PVRE(PVRSRV_ERROR_PCI_REGION_UNAVAILABLE) -+PVRE(PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH) -+PVRE(PVRSRV_ERROR_REGISTER_BASE_NOT_SET) -+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM) -+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY) -+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC) -+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL) -+PVRE(PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR) -+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY) -+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY) -+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES) -+PVRE(PVRSRV_ERROR_FAILED_TO_FREE_PAGES) -+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_PAGES) -+PVRE(PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES) -+PVRE(PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES) -+PVRE(PVRSRV_ERROR_STILL_MAPPED) -+PVRE(PVRSRV_ERROR_MAPPING_NOT_FOUND) -+PVRE(PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT) -+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE) -+PVRE(PVRSRV_ERROR_INVALID_SEGMENT_BLOCK) -+PVRE(PVRSRV_ERROR_INVALID_GFXDEVDEVDATA) -+PVRE(PVRSRV_ERROR_INVALID_DEVINFO) -+PVRE(PVRSRV_ERROR_INVALID_MEMINFO) -+PVRE(PVRSRV_ERROR_INVALID_MISCINFO) -+PVRE(PVRSRV_ERROR_UNKNOWN_IOCTL) -+PVRE(PVRSRV_ERROR_INVALID_CONTEXT) -+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT) -+PVRE(PVRSRV_ERROR_INVALID_HEAP) -+PVRE(PVRSRV_ERROR_INVALID_NON4K_HEAP_PAGESIZE) -+PVRE(PVRSRV_ERROR_INVALID_KERNELINFO) -+PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE) -+PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE) -+PVRE(PVRSRV_ERROR_INVALID_WRAP_TYPE) -+PVRE(PVRSRV_ERROR_INVALID_PHYS_ADDR) -+PVRE(PVRSRV_ERROR_INVALID_CPU_ADDR) -+PVRE(PVRSRV_ERROR_INVALID_HEAPINFO) -+PVRE(PVRSRV_ERROR_INVALID_PERPROC) -+PVRE(PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO) -+PVRE(PVRSRV_ERROR_INVALID_MAP_REQUEST) -+PVRE(PVRSRV_ERROR_INVALID_UNMAP_REQUEST) -+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP) -+PVRE(PVRSRV_ERROR_MAPPING_STILL_IN_USE) -+PVRE(PVRSRV_ERROR_EXCEEDED_HW_LIMITS) -+PVRE(PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED) -+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA) -+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT) -+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT) -+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT) -+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT) -+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD) -+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD) -+PVRE(PVRSRV_ERROR_THREAD_READ_ERROR) -+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER) -+PVRE(PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR) -+PVRE(PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR) -+PVRE(PVRSRV_ERROR_ISR_ALREADY_INSTALLED) -+PVRE(PVRSRV_ERROR_ISR_NOT_INSTALLED) -+PVRE(PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT) -+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO) -+PVRE(PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT) -+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES) -+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT) -+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE) -+PVRE(PVRSRV_ERROR_INVALID_CCB_COMMAND) -+PVRE(PVRSRV_ERROR_KERNEL_CCB_FULL) -+PVRE(PVRSRV_ERROR_FLIP_FAILED) -+PVRE(PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED) -+PVRE(PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE) -+PVRE(PVRSRV_ERROR_TIMEOUT_WAITING_FOR_CLIENT_CCB) -+PVRE(PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED) -+PVRE(PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG) -+PVRE(PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG) -+PVRE(PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG) -+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID) -+PVRE(PVRSRV_ERROR_BLIT_SETUP_FAILED) -+PVRE(PVRSRV_ERROR_SUBMIT_NEEDED) -+PVRE(PVRSRV_ERROR_PDUMP_NOT_AVAILABLE) -+PVRE(PVRSRV_ERROR_PDUMP_BUFFER_FULL) -+PVRE(PVRSRV_ERROR_PDUMP_BUF_OVERFLOW) -+PVRE(PVRSRV_ERROR_PDUMP_NOT_ACTIVE) -+PVRE(PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES) -+PVRE(PVRSRV_ERROR_MUTEX_DESTROY_FAILED) -+PVRE(PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR) -+PVRE(PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND) -+PVRE(PVRSRV_ERROR_PROCESS_NOT_INITIALISED) -+PVRE(PVRSRV_ERROR_PROCESS_NOT_FOUND) -+PVRE(PVRSRV_ERROR_SRV_CONNECT_FAILED) -+PVRE(PVRSRV_ERROR_SRV_DISCONNECT_FAILED) -+PVRE(PVRSRV_ERROR_DEINT_PHASE_FAILED) -+PVRE(PVRSRV_ERROR_INIT2_PHASE_FAILED) -+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE) -+PVRE(PVRSRV_ERROR_NO_DC_DEVICES_FOUND) -+PVRE(PVRSRV_ERROR_DC_DEVICE_INACCESSIBLE) -+PVRE(PVRSRV_ERROR_DC_INVALID_MAXDEPTH) -+PVRE(PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_UNREGISTER_DEVICE) -+PVRE(PVRSRV_ERROR_NO_DEVICEDATA_FOUND) -+PVRE(PVRSRV_ERROR_NO_DEVICENODE_FOUND) -+PVRE(PVRSRV_ERROR_NO_CLIENTNODE_FOUND) -+PVRE(PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_INIT_TASK) -+PVRE(PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK) -+PVRE(PVRSRV_ERROR_UNABLE_TO_KILL_TASK) -+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER) -+PVRE(PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER) -+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER) -+PVRE(PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT) -+PVRE(PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE) -+PVRE(PVRSRV_ERROR_HANDLE_NOT_ALLOCATED) -+PVRE(PVRSRV_ERROR_HANDLE_TYPE_MISMATCH) -+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE) -+PVRE(PVRSRV_ERROR_HANDLE_NOT_SHAREABLE) -+PVRE(PVRSRV_ERROR_HANDLE_NOT_FOUND) -+PVRE(PVRSRV_ERROR_INVALID_SUBHANDLE) -+PVRE(PVRSRV_ERROR_HANDLE_BATCH_IN_USE) -+PVRE(PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE) -+PVRE(PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED) -+PVRE(PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP) -+PVRE(PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE) -+PVRE(PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE) -+PVRE(PVRSRV_ERROR_INVALID_DEVICEID) -+PVRE(PVRSRV_ERROR_DEVICEID_NOT_FOUND) -+PVRE(PVRSRV_ERROR_MEMORY_TEST_FAILED) -+PVRE(PVRSRV_ERROR_CPUPADDR_TEST_FAILED) -+PVRE(PVRSRV_ERROR_COPY_TEST_FAILED) -+PVRE(PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED) -+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK) -+PVRE(PVRSRV_ERROR_CLOCK_REQUEST_FAILED) -+PVRE(PVRSRV_ERROR_DISABLE_CLOCK_FAILURE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK) -+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_CLOCK) -+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK) -+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK) -+PVRE(PVRSRV_ERROR_UNKNOWN_SGL_ERROR) -+PVRE(PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE) -+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE) -+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) -+PVRE(PVRSRV_ERROR_BAD_SYNC_STATE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE) -+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID) -+PVRE(PVRSRV_ERROR_PARAMETER_BUFFER_INVALID_ALIGNMENT) -+PVRE(PVRSRV_ERROR_UNABLE_TO_ACQUIRE_CONNECTION) -+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CONNECTION) -+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_IN_USE) -+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_INVALID) -+PVRE(PVRSRV_ERROR_PHYSHEAP_CONFIG) -+PVRE(PVRSRV_ERROR_HP_REQUEST_TOO_LONG) -+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM) -+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM_OP) -+PVRE(PVRSRV_ERROR_INVALID_SYNC_CONTEXT) -+PVRE(PVRSRV_ERROR_BP_NOT_SET) -+PVRE(PVRSRV_ERROR_BP_ALREADY_SET) -+PVRE(PVRSRV_ERROR_FEATURE_DISABLED) -+PVRE(PVRSRV_ERROR_REG_CONFIG_ENABLED) -+PVRE(PVRSRV_ERROR_REG_CONFIG_FULL) -+PVRE(PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE) -+PVRE(PVRSRV_ERROR_MEMORY_ACCESS) -+PVRE(PVRSRV_ERROR_NO_SYSTEM_BUFFER) -+PVRE(PVRSRV_ERROR_DC_INVALID_CONFIG) -+PVRE(PVRSRV_ERROR_DC_INVALID_CROP_RECT) -+PVRE(PVRSRV_ERROR_DC_INVALID_DISPLAY_RECT) -+PVRE(PVRSRV_ERROR_DC_INVALID_BUFFER_DIMS) -+PVRE(PVRSRV_ERROR_DC_INVALID_TRANSFORM) -+PVRE(PVRSRV_ERROR_DC_INVALID_SCALE) -+PVRE(PVRSRV_ERROR_DC_INVALID_CUSTOM) -+PVRE(PVRSRV_ERROR_DC_TOO_MANY_PIPES) -+PVRE(PVRSRV_ERROR_DC_INVALID_PLANE_ALPHA) -+PVRE(PVRSRV_ERROR_NOT_READY) -+PVRE(PVRSRV_ERROR_RESOURCE_UNAVAILABLE) -+PVRE(PVRSRV_ERROR_UNSUPPORTED_PIXEL_FORMAT) -+PVRE(PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT) -+PVRE(PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE) -+PVRE(PVRSRV_ERROR_UNSUPPORTED_DIMS) -+PVRE(PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_TIMER) -+PVRE(PVRSRV_ERROR_NOT_FOUND) -+PVRE(PVRSRV_ERROR_ALREADY_OPEN) -+PVRE(PVRSRV_ERROR_STREAM_MISUSE) -+PVRE(PVRSRV_ERROR_STREAM_FULL) -+PVRE(PVRSRV_ERROR_STREAM_READLIMIT_REACHED) -+PVRE(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE) -+PVRE(PVRSRV_ERROR_PHYSMEM_NOT_ALLOCATED) -+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MAX) -+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MIN) -+PVRE(PVRSRV_ERROR_INVALID_PB_CONFIG) -+PVRE(PVRSRV_ERROR_META_THREAD0_NOT_ENABLED) -+PVRE(PVRSRV_ERROR_NOT_AUTHENTICATED) -+PVRE(PVRSRV_ERROR_REQUEST_TDFWMEM_PAGES_FAIL) -+PVRE(PVRSRV_ERROR_INIT_TDFWMEM_PAGES_FAIL) -+PVRE(PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL) -+PVRE(PVRSRV_ERROR_INIT_TDSECUREBUF_PAGES_FAIL) -+PVRE(PVRSRV_ERROR_MUTEX_ALREADY_CREATED) -+PVRE(PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED) -+PVRE(PVRSRV_ERROR_ALREADY_EXISTS) -+PVRE(PVRSRV_ERROR_UNABLE_TO_SEND_PULSE) -+PVRE(PVRSRV_ERROR_TASK_FAILED) -+PVRE(PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED) -+PVRE(PVRSRV_ERROR_INVALID_GPU_ADDR) -+PVRE(PVRSRV_ERROR_INVALID_OFFSET) -+PVRE(PVRSRV_ERROR_CCCB_STALLED) -+PVRE(PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE) -+PVRE(PVRSRV_ERROR_NOT_ENABLED) -+PVRE(PVRSRV_ERROR_SYSTEM_LOCAL_MEMORY_INIT_FAIL) -+PVRE(PVRSRV_ERROR_FW_IMAGE_MISMATCH) -+PVRE(PVRSRV_ERROR_PDUMP_NOT_ALLOWED) -+PVRE(PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL) -+PVRE(PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX) -+PVRE(PVRSRV_ERROR_NONZERO_REFCOUNT) -+PVRE(PVRSRV_ERROR_SETAFFINITY_FAILED) -+PVRE(PVRSRV_ERROR_UNABLE_TO_COMPILE_PDS) -+PVRE(PVRSRV_ERROR_INTERNAL_ERROR) -+PVRE(PVRSRV_ERROR_BRIDGE_EFAULT) -+PVRE(PVRSRV_ERROR_BRIDGE_EINVAL) -+PVRE(PVRSRV_ERROR_BRIDGE_ENOMEM) -+PVRE(PVRSRV_ERROR_BRIDGE_ERANGE) -+PVRE(PVRSRV_ERROR_BRIDGE_EPERM) -+PVRE(PVRSRV_ERROR_BRIDGE_ENOTTY) -+PVRE(PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) -+PVRE(PVRSRV_ERROR_PROBE_DEFER) -+PVRE(PVRSRV_ERROR_INVALID_ALIGNMENT) -+PVRE(PVRSRV_ERROR_CLOSE_FAILED) -+PVRE(PVRSRV_ERROR_NOT_INITIALISED) -+PVRE(PVRSRV_ERROR_CONVERSION_FAILED) -+PVRE(PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) -+PVRE(PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL) -+PVRE(PVRSRV_ERROR_RA_OUT_OF_RESOURCE) -+PVRE(PVRSRV_ERROR_RA_NO_RESOURCE_WITH_FLAGS) -+PVRE(PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED) -+PVRE(PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED) -+PVRE(PVRSRV_ERROR_RA_FREE_INVALID_CHUNK) -+PVRE(PVRSRV_ERROR_OBJECT_STILL_REFERENCED) -+PVRE(PVRSRV_ERROR_BVNC_UNSUPPORTED) -+PVRE(PVRSRV_ERROR_INVALID_BVNC_PARAMS) -+PVRE(PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE) -+PVRE(PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT) -+PVRE(PVRSRV_ERROR_PID_ALREADY_REGISTERED) -+PVRE(PVRSRV_ERROR_PID_NOT_REGISTERED) -+PVRE(PVRSRV_ERROR_SIGNAL_FAILED) -+PVRE(PVRSRV_ERROR_INVALID_NOTIF_STREAM) -+PVRE(PVRSRV_ERROR_INVALID_SPU_MASK) -+PVRE(PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED) -+PVRE(PVRSRV_ERROR_INVALID_PVZ_CONFIG) -+PVRE(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED) -+PVRE(PVRSRV_ERROR_NOT_SW_TIMELINE) -+PVRE(PVRSRV_ERROR_SW_TIMELINE_AT_LATEST_POINT) -+PVRE(PVRSRV_ERROR_INVALID_PVZ_OSID) -+PVRE(PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE) -+PVRE(PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG) -+PVRE(PVRSRV_ERROR_INTERRUPTED) -+PVRE(PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) -+PVRE(PVRSRV_ERROR_PDUMP_INVALID_BLOCKLEN) -+PVRE(PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF) -+PVRE(PVRSRV_ERROR_MULTIPLE_SECURITY_PDUMPS) -+PVRE(PVRSRV_ERROR_BAD_PARAM_SIZE) -+PVRE(PVRSRV_ERROR_INVALID_REQUEST) -+PVRE(PVRSRV_ERROR_FAILED_TO_ACQUIRE_PAGES) -+PVRE(PVRSRV_ERROR_TEST_FAILED) -+PVRE(PVRSRV_ERROR_SYNC_PRIM_OP_NOT_SUPPORTED) -+PVRE(PVRSRV_ERROR_FAILED_TO_GET_VIRT_ADDR) -+PVRE(PVRSRV_ERROR_UNABLE_TO_FREE_RESOURCE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_SEMAPHORE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_SEMAPHORE) -+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_SEMAPHORE) -+PVRE(PVRSRV_ERROR_TOO_MANY_SYNCS) -+PVRE(PVRSRV_ERROR_ION_NO_CLIENT) -+PVRE(PVRSRV_ERROR_ION_FAILED_TO_ALLOC) -+PVRE(PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) -+PVRE(PVRSRV_ERROR_OUT_OF_RANGE) -+PVRE(PVRSRV_ERROR_OUT_OF_APP_POOL_MEMORY) -+PVRE(PVRSRV_ERROR_REFCOUNT_OVERFLOW) -+PVRE(PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY) -+PVRE(PVRSRV_ERROR_UNEXPECTED_TRUE_EXPR) -+PVRE(PVRSRV_ERROR_UNEXPECTED_FALSE_EXPR) -+PVRE(PVRSRV_ERROR_KERNEL_CCB_OFFSET) -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_firmware_boot.h b/drivers/gpu/drm/img-rogue/pvrsrv_firmware_boot.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_firmware_boot.h -@@ -0,0 +1,87 @@ -+/**************************************************************************/ /*! -+@File -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef PVRSRV_FIRMWARE_BOOT_H -+#define PVRSRV_FIRMWARE_BOOT_H -+ -+#include "img_types.h" -+#include "rgx_fwif_shared.h" -+ -+#define TD_MAX_NUM_MIPS_PAGETABLE_PAGES (4U) -+ -+typedef union _PVRSRV_FW_BOOT_PARAMS_ -+{ -+ struct -+ { -+ IMG_DEV_VIRTADDR sFWCodeDevVAddr; -+ IMG_DEV_VIRTADDR sFWDataDevVAddr; -+ IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; -+ RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; -+ IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; -+ IMG_DEV_VIRTADDR sFWCorememDataDevVAddr; -+ RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr; -+ IMG_UINT32 ui32NumThreads; -+ } sMeta; -+ -+ struct -+ { -+ IMG_DEV_PHYADDR sGPURegAddr; -+ IMG_DEV_PHYADDR asFWPageTableAddr[TD_MAX_NUM_MIPS_PAGETABLE_PAGES]; -+ IMG_DEV_PHYADDR sFWStackAddr; -+ IMG_UINT32 ui32FWPageTableLog2PageSize; -+ IMG_UINT32 ui32FWPageTableNumPages; -+ } sMips; -+ -+ struct -+ { -+ IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; -+ RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; -+ IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; -+ -+ IMG_DEV_VIRTADDR sFWCorememDataDevVAddr; -+ RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr; -+ IMG_DEVMEM_SIZE_T uiFWCorememDataSize; -+ } sRISCV; -+ -+} PVRSRV_FW_BOOT_PARAMS; -+ -+ -+#endif /* PVRSRV_FIRMWARE_BOOT_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_memalloc_physheap.h b/drivers/gpu/drm/img-rogue/pvrsrv_memalloc_physheap.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_memalloc_physheap.h -@@ -0,0 +1,215 @@ -+/*************************************************************************/ /*! -+@File pvrsrv_memalloc_physheap.h -+@Title Services Phys Heap types -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Used in creating and allocating from Physical Heaps. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef PVRSRV_MEMALLOC_PHYSHEAP_H -+#define PVRSRV_MEMALLOC_PHYSHEAP_H -+ -+#include "img_defs.h" -+ -+/* -+ * These IDs are replicated in the Device Memory allocation flags to allow -+ * allocations to be made in terms of their locality/use to ensure the correct -+ * physical heap is accessed for the given system/platform configuration. -+ * A system Phys Heap Config is linked to one or more Phys Heaps. When a heap -+ * is not present in the system configuration the allocation will fallback to -+ * the default GPU_LOCAL physical heap which all systems must define. -+ * See PVRSRV_MEMALLOCFLAGS_*_MAPPABLE_MASK. -+ * -+ * NOTE: Enum order important, table in physheap.c must change if order changed. -+ */ -+#define PHYS_HEAP_LIST \ -+ X(DEFAULT) /* Client: default phys heap for device memory allocations */ \ -+ X(CPU_LOCAL) /* Client: used for buffers with more CPU access than GPU */ \ -+ X(GPU_LOCAL) /* Client: used for buffers with more GPU access than CPU */ \ -+ X(GPU_PRIVATE) /* Client: used for buffers that only required GPU read/write access, not visible to the CPU. */ \ -+ X(FW_MAIN) /* Internal: runtime data, e.g. CCBs, sync objects */ \ -+ X(EXTERNAL) /* Internal: used by some PMR import/export factories where the physical memory heap is not managed by the pvrsrv driver */ \ -+ X(GPU_COHERENT) /* Internal: used for a cache coherent region */ \ -+ X(GPU_SECURE) /* Internal: used by security validation */ \ -+ X(FW_CONFIG) /* Internal: subheap of FW_MAIN, configuration data for FW init */ \ -+ X(FW_CODE) /* Internal: used by security validation or dedicated fw */ \ -+ X(FW_PRIV_DATA) /* Internal: internal FW data (like the stack, FW control data structures, etc.) */ \ -+ X(FW_PREMAP_PT) /* Internal: page tables for premapped firmware memory */ \ -+ X(FW_PREMAP0) /* Internal: Host OS premap fw heap */ \ -+ X(FW_PREMAP1) /* Internal: Guest OS 1 premap fw heap */ \ -+ X(FW_PREMAP2) /* Internal: Guest OS 2 premap fw heap */ \ -+ X(FW_PREMAP3) /* Internal: Guest OS 3 premap fw heap */ \ -+ X(FW_PREMAP4) /* Internal: Guest OS 4 premap fw heap */ \ -+ X(FW_PREMAP5) /* Internal: Guest OS 5 premap fw heap */ \ -+ X(FW_PREMAP6) /* Internal: Guest OS 6 premap fw heap */ \ -+ X(FW_PREMAP7) /* Internal: Guest OS 7 premap fw heap */ \ -+ X(WRAP) /* External: Wrap memory */ \ -+ X(DISPLAY) /* External: Display memory */ \ -+ X(LAST) -+ -+typedef enum _PVRSRV_PHYS_HEAP_ -+{ -+#define X(_name) PVRSRV_PHYS_HEAP_ ## _name, -+ PHYS_HEAP_LIST -+#undef X -+ -+ PVRSRV_PHYS_HEAP_INVALID = 0x7FFFFFFF -+} PVRSRV_PHYS_HEAP; -+ -+/* Defines the number of user mode physheaps. These physheaps are: DEFAULT, GPU_LOCAL, -+ * CPU_LOCAL, GPU_PRIVATE, GPU_SECURE. */ -+#define MAX_USER_MODE_ALLOC_PHYS_HEAPS 5 -+ -+static_assert(PVRSRV_PHYS_HEAP_LAST <= (0x1FU + 1U), "Ensure enum fits in memalloc flags bitfield."); -+ -+/*! Type conveys the class of physical heap to instantiate within Services -+ * for the physical pool of memory. */ -+typedef enum _PHYS_HEAP_TYPE_ -+{ -+ PHYS_HEAP_TYPE_UNKNOWN = 0, /*!< Not a valid value for any config */ -+ PHYS_HEAP_TYPE_UMA, /*!< Heap represents OS managed physical memory heap -+ i.e. system RAM. Unified Memory Architecture -+ physmem_osmem PMR factory */ -+ PHYS_HEAP_TYPE_LMA, /*!< Heap represents physical memory pool managed by -+ Services i.e. carve out from system RAM or local -+ card memory. Local Memory Architecture -+ physmem_lma PMR factory */ -+#if defined(__KERNEL__) -+ PHYS_HEAP_TYPE_DMA, /*!< Heap represents a physical memory pool managed by -+ Services, alias of LMA and is only used on -+ VZ non-native system configurations for -+ a heap used for allocations tagged with -+ PVRSRV_PHYS_HEAP_FW_MAIN or -+ PVRSRV_PHYS_HEAP_FW_CONFIG */ -+#if defined(SUPPORT_WRAP_EXTMEMOBJECT) -+ PHYS_HEAP_TYPE_WRAP, /*!< Heap used to group UM buffers given -+ to Services. Integrity OS port only. */ -+#endif -+#endif -+} PHYS_HEAP_TYPE; -+ -+/* Defines used when interpreting the ui32PhysHeapFlags in PHYS_HEAP_MEM_STATS -+ 0x000000000000dttt -+ d = is this the default heap? (1=yes, 0=no) -+ ttt = heap type (000 = PHYS_HEAP_TYPE_UNKNOWN, -+ 001 = PHYS_HEAP_TYPE_UMA, -+ 010 = PHYS_HEAP_TYPE_LMA, -+ 011 = PHYS_HEAP_TYPE_DMA) -+*/ -+#define PVRSRV_PHYS_HEAP_FLAGS_TYPE_MASK (0x7U << 0) -+#define PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT (0x1U << 7) -+ -+/* Force PHYS_HEAP_MEM_STATS size to be a multiple of 8 bytes -+ * (as type is a parameter in bridge calls) -+ */ -+typedef struct PHYS_HEAP_MEM_STATS_TAG -+{ -+ IMG_UINT64 ui64TotalSize; -+ IMG_UINT64 ui64FreeSize; -+ IMG_UINT32 ui32PhysHeapFlags; -+ IMG_UINT32 ui32UnusedPadding; -+}PHYS_HEAP_MEM_STATS, *PHYS_HEAP_MEM_STATS_PTR; -+ -+#if defined(PHYSHEAP_STRINGS) -+ -+static const char *const _pszPhysHeapStrings[] = { -+#define X(_name) #_name, -+ PHYS_HEAP_LIST -+#undef X -+}; -+ -+/*************************************************************************/ /*! -+@Function PVRSRVGetClientPhysHeapTypeName -+@Description Returns the phys heap type as a string. -+ -+@Input ePhysHeapType The physheap type. -+ -+@Return const IMG_CHAR pointer. -+*/ /**************************************************************************/ -+static inline const IMG_CHAR *PVRSRVGetClientPhysHeapTypeName(PHYS_HEAP_TYPE ePhysHeapType) -+{ -+#define HEAPSTR(x) #x -+ switch (ePhysHeapType) -+ { -+ case PHYS_HEAP_TYPE_UMA: -+ return HEAPSTR(PHYS_HEAP_TYPE_UMA); -+ case PHYS_HEAP_TYPE_LMA: -+ return HEAPSTR(PHYS_HEAP_TYPE_LMA); -+ default: -+ return "Unknown Heap Type"; -+ } -+#undef HEAPSTR -+} -+ -+/*************************************************************************/ /*! -+@Function PVRSRVGetPhysHeapName -+@Description Returns the name of a PhysHeap. -+ -+@Input ePhysHeap The enum value of the physheap. -+ -+@Return const IMG_CHAR pointer. -+*/ /**************************************************************************/ -+static inline const IMG_CHAR *PVRSRVGetPhysHeapName(PVRSRV_PHYS_HEAP ePhysHeap) -+{ -+ if (ePhysHeap < 0 || ePhysHeap >= PVRSRV_PHYS_HEAP_LAST) -+ { -+ return "Undefined"; -+ } -+ -+ return _pszPhysHeapStrings[ePhysHeap]; -+} -+ -+/*************************************************************************/ /*! -+@Function PVRSRVGetClientPhysHeapName -+@Description Returns the name of a client PhysHeap. -+ -+@Input ePhysHeap The enum value of the physheap. -+ -+@Return const IMG_CHAR pointer. -+*/ /**************************************************************************/ -+static inline const IMG_CHAR *PVRSRVGetClientPhysHeapName(PVRSRV_PHYS_HEAP ePhysHeap) -+{ -+ if (ePhysHeap > PVRSRV_PHYS_HEAP_GPU_PRIVATE) -+ { -+ return "Unknown Heap"; -+ } -+ -+ return PVRSRVGetPhysHeapName(ePhysHeap); -+} -+#endif /* PHYSHEAP_STRINGS */ -+ -+#endif /* PVRSRV_MEMALLOC_PHYSHEAP_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_memallocflags.h b/drivers/gpu/drm/img-rogue/pvrsrv_memallocflags.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_memallocflags.h -@@ -0,0 +1,1047 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device Memory Management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This file defines flags used on memory allocations and mappings -+ These flags are relevant throughout the memory management -+ software stack and are specified by users of services and -+ understood by all levels of the memory management in both -+ client and server. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVRSRV_MEMALLOCFLAGS_H -+#define PVRSRV_MEMALLOCFLAGS_H -+ -+#include "img_types.h" -+#include "pvrsrv_memalloc_physheap.h" -+ -+/*! -+ Type for specifying memory allocation flags. -+ */ -+ -+typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; -+#define PVRSRV_MEMALLOCFLAGS_FMTSPEC IMG_UINT64_FMTSPECx -+ -+#if defined(__KERNEL__) -+#include "pvrsrv_memallocflags_internal.h" -+#endif /* __KERNEL__ */ -+ -+/* -+ * --- MAPPING FLAGS 0..14 (15-bits) --- -+ * | 0-3 | 4-7 | 8-10 | 11-13 | 14 | -+ * | GPU-RW | CPU-RW | GPU-Caching | CPU-Caching | KM-Mappable | -+ * -+ * --- MISC FLAGS 15..23 (9-bits) --- -+ * | 15 | 16 | 17 | 18 | 19 | 20 | -+ * | Defer | Alloc-Now | SVM | Scratch-Pg | CPU-Cache-Clean | Zero-Pg | -+ * -+ * --- DEV CONTROL FLAGS 26..27 (2-bits) --- -+ * | 21-25 | 26-27 | -+ * | ..... | Device-Flags | -+ * -+ * --- MISC FLAGS 28..31 (4-bits) --- -+ * | 28 | 29 | 30 | 31 | -+ * | No-Cache-Align | Poison-On-Free | P.-On-Alloc | Zero-On-Alloc | -+ * -+ * --- VALIDATION FLAGS --- -+ * | 35 | -+ * | Shared-buffer | -+ * -+ * --- IPA Policy --- -+ * | 53-55 | -+ * | IPA Policy | -+ * -+ * --- PHYS HEAP HINTS --- -+ * | 56 | 57-58 | 59-63 | -+ * | Mandate Heap | | PhysHeap Hints | -+ * -+ */ -+ -+/* -+ * ********************************************************** -+ * * * -+ * * MAPPING FLAGS * -+ * * * -+ * ********************************************************** -+ */ -+ -+/*! -+ * This flag affects the device MMU protection flags, and specifies -+ * that the memory may be read by the GPU. -+ * -+ * Typically all device memory allocations would specify this flag. -+ * -+ * At the moment, memory allocations without this flag are not supported -+ * -+ * This flag will live with the PMR, thus subsequent mappings would -+ * honour this flag. -+ * -+ * This is a dual purpose flag. It specifies that memory is permitted -+ * to be read by the GPU, and also requests that the allocation is -+ * mapped into the GPU as a readable mapping -+ * -+ * To be clear: -+ * - When used as an argument on PMR creation; it specifies -+ * that GPU readable mappings will be _permitted_ -+ * - When used as an argument to a "map" function: it specifies -+ * that a GPU readable mapping is _desired_ -+ * - When used as an argument to "AllocDeviceMem": it specifies -+ * that the PMR will be created with permission to be mapped -+ * with a GPU readable mapping, _and_ that this PMR will be -+ * mapped with a GPU readable mapping. -+ * This distinction becomes important when (a) we export allocations; -+ * and (b) when we separate the creation of the PMR from the mapping. -+ */ -+#define PVRSRV_MEMALLOCFLAG_GPU_READABLE (IMG_UINT64_C(1)<<0) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READABLE flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_GPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READABLE) != 0U) -+ -+/*! -+ * This flag affects the device MMU protection flags, and specifies -+ * that the memory may be written by the GPU -+ * -+ * Using this flag on an allocation signifies that the allocation is -+ * intended to be written by the GPU. -+ * -+ * Omitting this flag causes a read-only mapping. -+ * -+ * This flag will live with the PMR, thus subsequent mappings would -+ * honour this flag. -+ * -+ * This is a dual purpose flag. It specifies that memory is permitted -+ * to be written by the GPU, and also requests that the allocation is -+ * mapped into the GPU as a writable mapping (see note above about -+ * permission vs. mapping mode, and why this flag causes permissions -+ * to be inferred from mapping mode on first allocation) -+ * -+ * N.B. This flag has no relevance to the CPU's MMU mapping, if any, -+ * and would therefore not enforce read-only mapping on CPU. -+ */ -+#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE (IMG_UINT64_C(1)<<1) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_GPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE) != 0U) -+ -+/*! -+ The flag indicates whether an allocation can be mapped as GPU readable in another GPU memory context. -+ */ -+#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED (IMG_UINT64_C(1)<<2) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_GPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED) != 0U) -+ -+/*! -+ The flag indicates whether an allocation can be mapped as GPU writable in another GPU memory context. -+ */ -+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (IMG_UINT64_C(1)<<3) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_GPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) != 0U) -+ -+/*! -+ The flag indicates that an allocation is mapped as readable to the CPU. -+ */ -+#define PVRSRV_MEMALLOCFLAG_CPU_READABLE (IMG_UINT64_C(1)<<4) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READABLE flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_CPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READABLE) != 0U) -+ -+/*! -+ The flag indicates that an allocation is mapped as writable to the CPU. -+ */ -+#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE (IMG_UINT64_C(1)<<5) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) != 0U) -+ -+/*! -+ The flag indicates whether an allocation can be mapped as CPU readable in another CPU memory context. -+ */ -+#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED (IMG_UINT64_C(1)<<6) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_CPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED) != 0U) -+ -+/*! -+ The flag indicates whether an allocation can be mapped as CPU writable in another CPU memory context. -+ */ -+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (IMG_UINT64_C(1)<<7) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_CPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) != 0U) -+ -+ -+/* -+ * ********************************************************** -+ * * * -+ * * CACHE CONTROL FLAGS * -+ * * * -+ * ********************************************************** -+ */ -+ -+/* -+ GPU domain -+ ========== -+ -+ The following defines are used to control the GPU cache bit field. -+ The defines are mutually exclusive. -+ -+ A helper macro, PVRSRV_GPU_CACHE_MODE, is provided to obtain just the GPU -+ cache bit field from the flags. This should be used whenever the GPU cache -+ mode needs to be determined. -+*/ -+ -+/*! -+ GPU domain. Flag indicating uncached memory. This means that any writes to memory -+ allocated with this flag are written straight to memory and thus are -+ coherent for any device in the system. -+*/ -+#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED (IMG_UINT64_C(1)<<8) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_GPU_UNCACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED) -+ -+/*! -+ GPU domain. Use write combiner (if supported) to combine sequential writes -+ together to reduce memory access by doing burst writes. -+*/ -+#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC (IMG_UINT64_C(0)<<8) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC) -+ -+/*! -+ GPU domain. This flag affects the GPU MMU protection flags. -+ The allocation will be cached. -+ Services will try to set the coherent bit in the GPU MMU tables so the -+ GPU cache is snooping the CPU cache. If coherency is not supported the -+ caller is responsible to ensure the caches are up to date. -+*/ -+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT (IMG_UINT64_C(2)<<8) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) -+ -+/*! -+ GPU domain. Request cached memory, but not coherent (i.e. no cache -+ snooping). Services will flush the GPU internal caches after every GPU -+ task so no cache maintenance requests from the users are necessary. -+ -+ Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future -+ expansion. -+*/ -+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT (IMG_UINT64_C(3)<<8) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT) -+ -+/*! -+ GPU domain. This flag is for internal use only and is used to indicate -+ that the underlying allocation should be cached on the GPU after all -+ the snooping and coherent checks have been done -+*/ -+#define PVRSRV_MEMALLOCFLAG_GPU_CACHED (IMG_UINT64_C(7)<<8) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHED mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_GPU_CACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHED) -+ -+/*! -+ GPU domain. GPU cache mode mask. -+*/ -+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK (IMG_UINT64_C(7)<<8) -+ -+/*! -+ @Description A helper macro to obtain just the GPU cache bit field from the flags. -+ This should be used whenever the GPU cache mode needs to be determined. -+ @Input uiFlags Allocation flags. -+ @Return Value of the GPU cache bit field. -+ */ -+#define PVRSRV_GPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK) -+ -+ -+/* -+ CPU domain -+ ========== -+ -+ The following defines are used to control the CPU cache bit field. -+ The defines are mutually exclusive. -+ -+ A helper macro, PVRSRV_CPU_CACHE_MODE, is provided to obtain just the CPU -+ cache bit field from the flags. This should be used whenever the CPU cache -+ mode needs to be determined. -+*/ -+ -+/*! -+ CPU domain. Use write combiner (if supported) to combine sequential writes -+ together to reduce memory access by doing burst writes. -+*/ -+#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC (IMG_UINT64_C(0)<<11) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC) -+ -+/*! -+ CPU domain. This flag affects the CPU MMU protection flags. -+ The allocation will be cached. -+ Services will try to set the coherent bit in the CPU MMU tables so the -+ CPU cache is snooping the GPU cache. If coherency is not supported the -+ caller is responsible to ensure the caches are up to date. -+*/ -+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT (IMG_UINT64_C(2)<<11) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) -+ -+/*! -+ CPU domain. Request cached memory, but not coherent (i.e. no cache -+ snooping). This means that if the allocation needs to transition from -+ one device to another services has to be informed so it can -+ flush/invalidate the appropriate caches. -+ -+ Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future -+ expansion. -+*/ -+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT (IMG_UINT64_C(3)<<11) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) -+ -+/*! -+ CPU domain. This flag is for internal use only and is used to indicate -+ that the underlying allocation should be cached on the CPU -+ after all the snooping and coherent checks have been done -+*/ -+#define PVRSRV_MEMALLOCFLAG_CPU_CACHED (IMG_UINT64_C(7)<<11) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHED mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_CPU_CACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHED) -+ -+/*! -+ CPU domain. CPU cache mode mask -+*/ -+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK (IMG_UINT64_C(7)<<11) -+ -+/*! -+ @Description A helper macro to obtain just the CPU cache bit field from the flags. -+ This should be used whenever the CPU cache mode needs to be determined. -+ @Input uiFlags Allocation flags. -+ @Return Value of the CPU cache bit field. -+ */ -+#define PVRSRV_CPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK) -+ -+/* Helper flags for usual cases */ -+ -+/*! -+ * Memory will be write-combined on CPU and GPU -+ */ -+#define PVRSRV_MEMALLOCFLAG_UNCACHED_WC (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_UNCACHED_WC mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_WRITE_COMBINE(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED_WC) -+ -+/*! -+ * Memory will be cached. -+ * Services will try to set the correct flags in the MMU tables. -+ * In case there is no coherency support the caller has to ensure caches are up to date */ -+#define PVRSRV_MEMALLOCFLAG_CACHE_COHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_COHERENT mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_CACHE_COHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_COHERENT) -+ -+/*! -+ * Memory will be cache-incoherent on CPU and GPU -+ */ -+#define PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_CACHE_INCOHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT) -+ -+/*! -+ Cache mode mask -+*/ -+#define PVRSRV_CACHE_MODE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) | PVRSRV_CPU_CACHE_MODE(uiFlags)) -+ -+ -+/*! -+ CPU MMU Flags mask -- intended for use internal to services only -+ */ -+#define PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK) -+ -+/*! -+ MMU Flags mask -- intended for use internal to services only - used for -+ partitioning the flags bits and determining which flags to pass down to -+ mmu_common.c -+ */ -+#define PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK) -+ -+/*! -+ Indicates that the PMR created due to this allocation will support -+ in-kernel CPU mappings. Only privileged processes may use this flag as -+ it may cause wastage of precious kernel virtual memory on some platforms. -+ */ -+#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE (IMG_UINT64_C(1)<<14) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) != 0U) -+ -+ -+ -+/* -+ * -+ * ********************************************************** -+ * * * -+ * * ALLOC MEMORY FLAGS * -+ * * * -+ * ********************************************************** -+ */ -+ -+/*! ----- Bit 15 -+ -+ Indicates when the allocation of physical memory pages backing the PMR -+ is carried out. When set, pages are not allocated at PMR creation but are -+ instead deferred until they are first needed, i.e. "on demand". -+ When unset, the pages may be allocated at the same time the PMR is created -+ or deferred (at the KM/Server's discretion). -+ See also PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW (below). Note that at most one -+ of these two flags may be set. -+ */ -+#define PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC (IMG_UINT64_C(1)<<15) -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_ON_DEMAND(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC) != 0U) -+ -+/*! ----- Bit 16 -+ -+ Indicates when the allocation of physical memory pages backing the PMR -+ is carried out. When set, pages are allocated at PMR creation. -+ When unset, the pages may be allocated at the same time the PMR is created -+ or deferred (at the KM/Server's discretion). -+ See also PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC (above). Note that at most one -+ of these two flags may be set. -+ */ -+#define PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW (IMG_UINT64_C(1)<<16) -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_PHYS_ALLOC_NOW(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW) != 0U) -+ -+/*! ----- Bit 17 -+ -+ Indicates that the allocation will be accessed by the CPU and GPU using -+ the same virtual address, i.e. for all SVM allocs, -+ IMG_CPU_VIRTADDR == IMG_DEV_VIRTADDR -+ */ -+#define PVRSRV_MEMALLOCFLAG_SVM_ALLOC (IMG_UINT64_C(1)<<17) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SVM_ALLOC flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_SVM_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SVM_ALLOC) != 0U) -+ -+/*! ----- Bit 18 -+ -+ Indicates the particular memory that's being allocated is sparse and the -+ sparse regions should not be backed by scratch page -+ */ -+#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING (IMG_UINT64_C(1) << 18) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_IS_SPARSE_SCRATCH_BACKING_REQUIRED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING) == 0U) -+ -+/*! ----- Bit 19 -+ -+ Used to force Services to carry out at least one CPU cache invalidate on a -+ CPU cached buffer during allocation of the memory. Applicable to incoherent -+ systems, it must be used for buffers which are CPU cached and which will not -+ be 100% written to by the CPU before the GPU accesses it. For performance -+ reasons, avoid usage if the whole buffer that is allocated is written to by -+ the CPU anyway before the next GPU kick, or if the system is coherent. -+ */ -+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN (IMG_UINT64_C(1)<<19) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN) != 0U) -+ -+/*! ----- Bit 20 -+ -+ Indicates the particular memory region should be backed by zero page. -+ This is different with zero on alloc flag such that only physically unbacked -+ pages are backed by zero page at the time of mapping. -+ The zero backed page is always with read only attribute irrespective of its -+ original attributes. -+ */ -+#define PVRSRV_MEMALLOCFLAG_ZERO_BACKING (IMG_UINT64_C(1) << 20) -+#define PVRSRV_IS_ZERO_BACKING_REQUIRED(uiFlags) (((uiFlags) & \ -+ PVRSRV_MEMALLOCFLAG_ZERO_BACKING) == PVRSRV_MEMALLOCFLAG_ZERO_BACKING) -+ -+/* -+ ************************************************************ -+ * PMR Misc flags * -+ ************************************************************ -+ * -+ * These 4 flags are used to indicate miscellaneous info -+ * not otherwise available in the PMR -+ * | 21 | 22-24 | -+ * | PMR can be suballoc'd | Reserved | -+ * -+ */ -+ -+ /*! ----- Bit 25 -+ * -+ Not used. -+ */ -+ -+/* -+ * -+ * ********************************************************** -+ * * * -+ * * MEMORY ZEROING AND POISONING FLAGS * -+ * * * -+ * ********************************************************** -+ * -+ * Zero / Poison, on alloc/free -+ * -+ * We think the following usecases are required: -+ * -+ * don't poison or zero on alloc or free -+ * (normal operation, also most efficient) -+ * poison on alloc -+ * (for helping to highlight bugs) -+ * poison on alloc and free -+ * (for helping to highlight bugs) -+ * zero on alloc -+ * (avoid highlighting security issues in other uses of memory) -+ * zero on alloc and poison on free -+ * (avoid highlighting security issues in other uses of memory, while -+ * helping to highlight a subset of bugs e.g. memory freed prematurely) -+ * -+ * Since there are more than 4, we can't encode this in just two bits, -+ * so we might as well have a separate flag for each of the three -+ * actions. -+ */ -+ -+/*! -+ Ensures that the memory allocated is initialised with zeroes. -+ */ -+#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC (IMG_UINT64_C(1)<<31) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0U) -+ -+/*! -+ Scribbles over the allocated memory with a poison value -+ -+ Not compatible with ZERO_ON_ALLOC -+ -+ Poisoning is very deliberately _not_ reflected in PDump as we want -+ a simulation to cry loudly if the initialised data propagates to a -+ result. -+ */ -+#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC (IMG_UINT64_C(1)<<30) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0U) -+ -+#if defined(DEBUG) -+/*! -+ Causes memory to be trashed when freed, used when debugging only, not to be used -+ as a security measure. -+ */ -+#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (IMG_UINT64_C(1)<<29) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_FREE flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_POISON_ON_FREE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) != 0U) -+#endif /* DEBUG */ -+ -+/*! -+ Avoid address alignment to a CPU or GPU cache line size. -+ */ -+#define PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN (IMG_UINT64_C(1)<<28) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_CHECK_NO_CACHE_LINE_ALIGN flag is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the flag is set, false otherwise -+ */ -+#define PVRSRV_CHECK_NO_CACHE_LINE_ALIGN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN) != 0U) -+ -+ -+/* -+ * -+ * ********************************************************** -+ * * * -+ * * Device specific MMU flags * -+ * * * -+ * ********************************************************** -+ * -+ * (Bits 26 to 27) -+ * -+ * Some services controlled devices have device specific control bits in -+ * their page table entries, we need to allow these flags to be passed down -+ * the memory management layers so the user can control these bits. -+ * For example, RGX device has the file rgx_memallocflags.h -+ */ -+ -+/*! -+ * Offset of device specific MMU flags. -+ */ -+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET 26 -+ -+/*! -+ * Mask for retrieving device specific MMU flags. -+ */ -+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK (IMG_UINT64_C(3) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) -+ -+/*! -+ @Description Helper macro for setting device specific MMU flags. -+ @Input uiFlags Flag index. -+ @Return Flag vector with the specified bit set. -+ */ -+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(uiFlags) \ -+ (((PVRSRV_MEMALLOCFLAGS_T)(uiFlags) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \ -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) -+ -+/* -+ * -+ * ********************************************************** -+ * * * -+ * * Secure validation flags * -+ * * * -+ * ********************************************************** -+ * -+ * (Bit 35) -+ * -+ */ -+ -+/*! -+ PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER -+ */ -+ -+#define PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER (IMG_UINT64_C(1)<<35) -+#define PVRSRV_CHECK_SHARED_BUFFER(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER) != 0U) -+ -+/* -+ * -+ * ********************************************************** -+ * * * -+ * * IPA Policy * -+ * * * -+ * ********************************************************** -+ * -+ * (Bits 53 to 55) -+ * -+ */ -+ -+/*! -+ * Offset of Intermediate Physical Address (IPA) policy. -+ */ -+#define PVRSRV_MEMALLOCFLAG_IPA_POLICY_OFFSET 53 -+ -+/*! -+ * Mask for retrieving IPA policy. -+ */ -+#define PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK (IMG_UINT64_C(7) << PVRSRV_MEMALLOCFLAG_IPA_POLICY_OFFSET) -+#define PVRSRV_MEMALLOCFLAG_IPA_POLICY(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK) >> PVRSRV_MEMALLOCFLAG_IPA_POLICY_OFFSET) -+ -+/* -+ * -+ * ********************************************************** -+ * * * -+ * * Phys Heap Hints * -+ * * * -+ * ********************************************************** -+ * -+ * (Bits 56 to 63) -+ * -+ */ -+ -+/*! -+ * Ensures Physheap isn't reassigned when considered favourable by driver under a OOM condition. -+ */ -+#define PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP (IMG_UINT64_C(1)<<56) -+#define PVRSRV_CHECK_MANDATED_PHYSHEAP(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP) != 0U) -+ -+/*! -+ * Value of enum PVRSRV_PHYS_HEAP stored in memalloc flags. If not set -+ * i.e. PVRSRV_PHYS_HEAP_DEFAULT (value 0) used, the system layer defined default physical heap is used. -+ */ -+#define PVRSRV_PHYS_HEAP_HINT_SHIFT (59) -+#define PVRSRV_PHYS_HEAP_HINT_MASK (IMG_UINT64_C(0x1F) << PVRSRV_PHYS_HEAP_HINT_SHIFT) -+ -+ -+/*! -+ @Description Macro extracting the Phys Heap hint from memalloc flag value. -+ @Input uiFlags Allocation flags -+ @Return returns the value of the PHYS_HEAP_HINT bitfield -+ */ -+#define PVRSRV_GET_PHYS_HEAP_HINT(uiFlags) ((PVRSRV_PHYS_HEAP)(((uiFlags) & PVRSRV_PHYS_HEAP_HINT_MASK) \ -+ >> PVRSRV_PHYS_HEAP_HINT_SHIFT)) -+ -+/*! -+ @Description Macro converting a Phys Heap value into a memalloc bitfield -+ @Input uiFlags Device Phys Heap -+ @Return returns a shifted bitfield with the Device Phys Heap value -+ */ -+#define PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(PhysHeap) ((((PVRSRV_MEMALLOCFLAGS_T)PVRSRV_PHYS_HEAP_ ## PhysHeap) << \ -+ PVRSRV_PHYS_HEAP_HINT_SHIFT) \ -+ & PVRSRV_PHYS_HEAP_HINT_MASK) -+/*! -+ @Description Macro to replace an existing phys heap hint value in flags. -+ @Input PhysHeap Phys Heap Macro -+ @Input uiFlags Allocation flags -+ @Return N/A -+ */ -+#define PVRSRV_SET_PHYS_HEAP_HINT(PhysHeap, uiFlags) (uiFlags) = ((uiFlags) & ~PVRSRV_PHYS_HEAP_HINT_MASK) | \ -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(PhysHeap) -+ -+/*! -+ @Description Macro to replace an existing phys heap hint value using Phys Heap value. -+ @Input PhysHeap Phys Heap Value -+ @Input uiFlags Allocation flags -+ @Return N/A -+ */ -+#define PVRSRV_CHANGE_PHYS_HEAP_HINT(Physheap, uiFlags) (uiFlags) = ((uiFlags) & ~PVRSRV_PHYS_HEAP_HINT_MASK) | \ -+ (((PVRSRV_MEMALLOCFLAGS_T)(Physheap) << \ -+ PVRSRV_PHYS_HEAP_HINT_SHIFT) \ -+ & PVRSRV_PHYS_HEAP_HINT_MASK) -+ -+/*! -+ @Description Macros checking if a Phys Heap hint is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the hint is set, false otherwise -+ */ -+#define PVRSRV_CHECK_PHYS_HEAP(PhysHeap, uiFlags) (PVRSRV_PHYS_HEAP_ ## PhysHeap == PVRSRV_GET_PHYS_HEAP_HINT(uiFlags)) -+ -+#define PVRSRV_CHECK_FW_MAIN(uiFlags) (PVRSRV_CHECK_PHYS_HEAP(FW_MAIN, uiFlags) || \ -+ PVRSRV_CHECK_PHYS_HEAP(FW_CONFIG, uiFlags) || \ -+ PVRSRV_CHECK_PHYS_HEAP(FW_CODE, uiFlags) || \ -+ PVRSRV_CHECK_PHYS_HEAP(FW_PRIV_DATA, uiFlags) || \ -+ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP0, uiFlags) || \ -+ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP1, uiFlags) || \ -+ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP2, uiFlags) || \ -+ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP3, uiFlags) || \ -+ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP4, uiFlags) || \ -+ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP5, uiFlags) || \ -+ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP6, uiFlags) || \ -+ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP7, uiFlags)) -+ -+/*! -+ * Secure buffer mask -- Flags in the mask are allowed for secure buffers -+ * because they are not related to CPU mappings. -+ */ -+#define PVRSRV_MEMALLOCFLAGS_SECBUFMASK ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \ -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ -+ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ -+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) -+ -+/*! -+ * Trusted device mask -- Flags in the mask are allowed for trusted device -+ * because the driver cannot access the memory -+ */ -+#if defined(DEBUG) -+#define PVRSRV_MEMALLOCFLAGS_TDFWMASK ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ -+ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ -+ PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING) -+#else -+#define PVRSRV_MEMALLOCFLAGS_TDFWMASK ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ -+ PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING) -+#endif -+ -+/*! -+ PMR flags mask -- for internal services use only. This is the set of flags -+ that will be passed down and stored with the PMR, this also includes the -+ MMU flags which the PMR has to pass down to mm_common.c at PMRMap time. -+*/ -+#if defined(DEBUG) -+#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ -+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ -+ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ -+ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ -+ PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW | \ -+ PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_BACKING | \ -+ PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \ -+ PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP | \ -+ PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK | \ -+ PVRSRV_PHYS_HEAP_HINT_MASK) -+#else -+#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ -+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ -+ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ -+ PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW | \ -+ PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_BACKING | \ -+ PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \ -+ PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP | \ -+ PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK | \ -+ PVRSRV_PHYS_HEAP_HINT_MASK) -+#endif -+ -+/*! -+ * CPU mappable mask -- Any flag set in the mask requires memory to be CPU mappable -+ */ -+#define PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) -+/*! -+ RA differentiation mask -+ -+ for use internal to services -+ -+ this is the set of flags bits that are able to determine whether a pair of -+ allocations are permitted to live in the same page table. Allocations -+ whose flags differ in any of these places would be allocated from separate -+ RA Imports and therefore would never coexist in the same page. -+ Special cases are zeroing and poisoning of memory. The caller is responsible -+ to set the sub-allocations to the value he wants it to be. To differentiate -+ between zeroed and poisoned RA Imports does not make sense because the -+ memory might be reused. -+ -+*/ -+#if defined(DEBUG) -+#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \ -+ & \ -+ ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)) -+#else -+#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \ -+ & \ -+ ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)) -+#endif -+/*! -+ Flags that affect _allocation_ -+*/ -+#define PVRSRV_MEMALLOCFLAGS_PERALLOCFLAGSMASK (0xFFFFFFFFU) -+ -+/*! -+ Flags that affect _mapping_ -+*/ -+#define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ -+ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ -+ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ -+ PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_BACKING | \ -+ PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING) -+ -+#if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0U) -+#error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK -+#endif -+ -+ -+/*! -+ Flags that affect _physical allocations_ in the DevMemX API -+ */ -+#if defined(DEBUG) -+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ -+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \ -+ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ -+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \ -+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ -+ PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW | \ -+ PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP | \ -+ PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK | \ -+ PVRSRV_PHYS_HEAP_HINT_MASK) -+#else -+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ -+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \ -+ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ -+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \ -+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW | \ -+ PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP | \ -+ PVRSRV_MEMALLOCFLAG_IPA_POLICY_MASK | \ -+ PVRSRV_PHYS_HEAP_HINT_MASK) -+#endif -+ -+/*! -+ Flags that affect _virtual allocations_ in the DevMemX API -+ */ -+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK (PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ -+ PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED | \ -+ PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) -+ -+#endif /* PVRSRV_MEMALLOCFLAGS_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_memallocflags_internal.h b/drivers/gpu/drm/img-rogue/pvrsrv_memallocflags_internal.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_memallocflags_internal.h -@@ -0,0 +1,78 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device Memory Management allocation flags for internal Services -+ use only -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This file defines flags used on memory allocations and mappings -+ These flags are relevant throughout the memory management -+ software stack and are specified by users of services and -+ understood by all levels of the memory management in the server -+ and in special cases in the client. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVRSRV_MEMALLOCFLAGS_INTERNAL_H -+#define PVRSRV_MEMALLOCFLAGS_INTERNAL_H -+ -+/*! -+ CPU domain. Request uncached memory. This means that any writes to memory -+ allocated with this flag are written straight to memory and thus are -+ coherent for any device in the system. -+*/ -+#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED (1ULL<<11) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_CPU_UNCACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) -+ -+/*! -+ * Memory will be uncached on CPU and GPU -+ */ -+#define PVRSRV_MEMALLOCFLAG_UNCACHED (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) -+ -+/*! -+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_UNCACHED mode is set. -+ @Input uiFlags Allocation flags. -+ @Return True if the mode is set, false otherwise -+ */ -+#define PVRSRV_CHECK_UNCACHED(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED) -+ -+#endif /* PVRSRV_MEMALLOCFLAGS_INTERNAL_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_pool.c b/drivers/gpu/drm/img-rogue/pvrsrv_pool.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_pool.c -@@ -0,0 +1,260 @@ -+/**************************************************************************/ /*! -+@File -+@Title Services pool implementation -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Provides a generic pool implementation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "pvrsrv.h" -+#include "lock.h" -+#include "dllist.h" -+#include "allocmem.h" -+ -+struct _PVRSRV_POOL_ -+{ -+ POS_LOCK hLock; -+ /* total max number of permitted entries in the pool */ -+ IMG_UINT uiMaxEntries; -+ /* currently number of pool entries created. these may be in the pool -+ * or in-use -+ */ -+ IMG_UINT uiNumBusy; -+ /* number of not-in-use entries currently free in the pool */ -+ IMG_UINT uiNumFree; -+ -+ DLLIST_NODE sFreeList; -+ -+ const IMG_CHAR *pszName; -+ -+ PVRSRV_POOL_ALLOC_FUNC *pfnAlloc; -+ PVRSRV_POOL_FREE_FUNC *pfnFree; -+ void *pvPrivData; -+}; -+ -+typedef struct _PVRSRV_POOL_ENTRY_ -+{ -+ DLLIST_NODE sNode; -+ void *pvData; -+} PVRSRV_POOL_ENTRY; -+ -+PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc, -+ PVRSRV_POOL_FREE_FUNC *pfnFree, -+ IMG_UINT32 ui32MaxEntries, -+ const IMG_CHAR *pszName, -+ void *pvPrivData, -+ PVRSRV_POOL **ppsPool) -+{ -+ PVRSRV_POOL *psPool; -+ PVRSRV_ERROR eError; -+ -+ psPool = OSAllocMem(sizeof(PVRSRV_POOL)); -+ PVR_GOTO_IF_NOMEM(psPool, eError, err_alloc); -+ -+ eError = OSLockCreate(&psPool->hLock); -+ -+ PVR_GOTO_IF_ERROR(eError, err_lock_create); -+ -+ psPool->uiMaxEntries = ui32MaxEntries; -+ psPool->uiNumBusy = 0; -+ psPool->uiNumFree = 0; -+ psPool->pfnAlloc = pfnAlloc; -+ psPool->pfnFree = pfnFree; -+ psPool->pvPrivData = pvPrivData; -+ psPool->pszName = pszName; -+ -+ dllist_init(&psPool->sFreeList); -+ -+ *ppsPool = psPool; -+ -+ return PVRSRV_OK; -+ -+err_lock_create: -+ OSFreeMem(psPool); -+err_alloc: -+ return eError; -+} -+ -+static PVRSRV_ERROR _DestroyPoolEntry(PVRSRV_POOL *psPool, -+ PVRSRV_POOL_ENTRY *psEntry) -+{ -+ psPool->pfnFree(psPool->pvPrivData, psEntry->pvData); -+ OSFreeMem(psEntry); -+ -+ return PVRSRV_OK; -+} -+ -+void PVRSRVPoolDestroy(PVRSRV_POOL *psPool) -+{ -+ if (psPool->uiNumBusy != 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to destroy pool %s " -+ "with %u entries still in use", -+ __func__, -+ psPool->pszName, -+ psPool->uiNumBusy)); -+ return; -+ } -+ -+ OSLockDestroy(psPool->hLock); -+ -+ if (psPool->uiNumFree) -+ { -+ PVRSRV_POOL_ENTRY *psEntry; -+ DLLIST_NODE *psChosenNode; -+ -+ psChosenNode = dllist_get_next_node(&psPool->sFreeList); -+ -+ while (psChosenNode) -+ { -+ dllist_remove_node(psChosenNode); -+ -+ psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode); -+ _DestroyPoolEntry(psPool, psEntry); -+ -+ psPool->uiNumFree--; -+ -+ psChosenNode = dllist_get_next_node(&psPool->sFreeList); -+ } -+ -+ PVR_ASSERT(psPool->uiNumFree == 0); -+ } -+ -+ OSFreeMem(psPool); -+} -+ -+static PVRSRV_ERROR _CreateNewPoolEntry(PVRSRV_POOL *psPool, -+ PVRSRV_POOL_ENTRY **ppsEntry) -+{ -+ PVRSRV_POOL_ENTRY *psNewEntry; -+ PVRSRV_ERROR eError; -+ -+ psNewEntry = OSAllocMem(sizeof(PVRSRV_POOL_ENTRY)); -+ PVR_GOTO_IF_NOMEM(psNewEntry, eError, err_allocmem); -+ -+ dllist_init(&psNewEntry->sNode); -+ -+ eError = psPool->pfnAlloc(psPool->pvPrivData, &psNewEntry->pvData); -+ -+ PVR_GOTO_IF_ERROR(eError, err_pfn_alloc); -+ -+ *ppsEntry = psNewEntry; -+ -+ return PVRSRV_OK; -+ -+err_pfn_alloc: -+ OSFreeMem(psNewEntry); -+err_allocmem: -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool, -+ PVRSRV_POOL_TOKEN *hToken, -+ void **ppvDataOut) -+{ -+ PVRSRV_POOL_ENTRY *psEntry; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ DLLIST_NODE *psChosenNode; -+ -+ OSLockAcquire(psPool->hLock); -+ -+ psChosenNode = dllist_get_next_node(&psPool->sFreeList); -+ if (unlikely(psChosenNode == NULL)) -+ { -+ /* no available elements in the pool. try to create one */ -+ -+ eError = _CreateNewPoolEntry(psPool, &psEntry); -+ -+ PVR_GOTO_IF_ERROR(eError, out_unlock); -+ } -+ else -+ { -+ dllist_remove_node(psChosenNode); -+ -+ psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode); -+ -+ psPool->uiNumFree--; -+ } -+ -+#if defined(DEBUG) || defined(SUPPORT_VALIDATION) -+ /* Don't poison the IN buffer as that is copied from client and would be -+ * waste of cycles. -+ */ -+ OSCachedMemSet(((IMG_PBYTE)psEntry->pvData)+PVRSRV_MAX_BRIDGE_IN_SIZE, -+ PVRSRV_POISON_ON_ALLOC_VALUE, PVRSRV_MAX_BRIDGE_OUT_SIZE); -+#endif -+ -+ psPool->uiNumBusy++; -+ *hToken = psEntry; -+ *ppvDataOut = psEntry->pvData; -+ -+out_unlock: -+ OSLockRelease(psPool->hLock); -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool, PVRSRV_POOL_TOKEN hToken) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_POOL_ENTRY *psEntry = hToken; -+ -+ PVR_ASSERT(psPool->uiNumBusy > 0); -+ -+ OSLockAcquire(psPool->hLock); -+ -+ /* put this entry in the pool if the pool has space, -+ * otherwise free it -+ */ -+ if (psPool->uiNumFree < psPool->uiMaxEntries) -+ { -+ dllist_add_to_tail(&psPool->sFreeList, &psEntry->sNode); -+ psPool->uiNumFree++; -+ } -+ else -+ { -+ eError = _DestroyPoolEntry(psPool, psEntry); -+ } -+ -+ psPool->uiNumBusy--; -+ -+ OSLockRelease(psPool->hLock); -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_pool.h b/drivers/gpu/drm/img-rogue/pvrsrv_pool.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_pool.h -@@ -0,0 +1,135 @@ -+/**************************************************************************/ /*! -+@File -+@Title Services pool implementation -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Provides a generic pool implementation. -+ The pool allows to dynamically retrieve and return entries from -+ it using functions pair PVRSRVPoolGet/PVRSRVPoolPut. The entries -+ are created in lazy manner which means not until first usage. -+ The pool API allows to pass and allocation/free functions -+ pair that will allocate entry's private data and return it -+ to the caller on every entry 'Get'. -+ The pool will keep up to ui32MaxEntries entries allocated. -+ Every entry that exceeds this number and is 'Put' back to the -+ pool will be freed on the spot instead being returned to the -+ pool. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#if !defined(PVRSRVPOOL_H) -+#define PVRSRVPOOL_H -+ -+/**************************************************************************/ /*! -+ @Description Callback function called during creation of the new element. This -+ function allocates an object that will be stored in the pool. -+ The object can be retrieved from the pool by calling -+ PVRSRVPoolGet. -+ @Input pvPrivData Private data passed to the alloc function. -+ @Output pvOut Allocated object. -+ @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise -+*/ /***************************************************************************/ -+typedef PVRSRV_ERROR (PVRSRV_POOL_ALLOC_FUNC)(void *pvPrivData, void **pvOut); -+ -+/**************************************************************************/ /*! -+ @Description Callback function called to free the object allocated by -+ the counterpart alloc function. -+ @Input pvPrivData Private data passed to the free function. -+ @Output pvFreeData Object allocated by PVRSRV_POOL_ALLOC_FUNC. -+*/ /***************************************************************************/ -+typedef void (PVRSRV_POOL_FREE_FUNC)(void *pvPrivData, void *pvFreeData); -+ -+typedef IMG_HANDLE PVRSRV_POOL_TOKEN; -+ -+typedef struct _PVRSRV_POOL_ PVRSRV_POOL; -+ -+/**************************************************************************/ /*! -+ @Function PVRSRVPoolCreate -+ @Description Creates new buffer pool. -+ @Input pfnAlloc Allocation function pointer. Function is used -+ to allocate new pool entries' data. -+ @Input pfnFree Free function pointer. Function is used to -+ free memory allocated by pfnAlloc function. -+ @Input ui32MaxEntries Total maximum number of entries in the pool. -+ @Input pszName Name of the pool. String has to be NULL -+ terminated. -+ @Input pvPrivData Private data that will be passed to pfnAlloc and -+ pfnFree functions. -+ @Output ppsPool New buffer pool object. -+ @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise -+*/ /***************************************************************************/ -+PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc, -+ PVRSRV_POOL_FREE_FUNC *pfnFree, -+ IMG_UINT32 ui32MaxEntries, -+ const IMG_CHAR *pszName, -+ void *pvPrivData, -+ PVRSRV_POOL **ppsPool); -+ -+/**************************************************************************/ /*! -+ @Function PVRSRVPoolDestroy -+ @Description Destroys pool created by PVRSRVPoolCreate. -+ @Input psPool Buffer pool object meant to be destroyed. -+*/ /***************************************************************************/ -+void PVRSRVPoolDestroy(PVRSRV_POOL *psPool); -+ -+/**************************************************************************/ /*! -+ @Function PVRSRVPoolGet -+ @Description Retrieves an entry from a pool. If no free elements are -+ available new entry will be allocated. -+ @Input psPool Pointer to the pool. -+ @Output hToken Pointer to the entry handle. -+ @Output ppvDataOut Pointer to data stored in the entry (the data -+ allocated by the pfnAlloc function). -+ @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise -+*/ /***************************************************************************/ -+PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool, -+ PVRSRV_POOL_TOKEN *hToken, -+ void **ppvDataOut); -+ -+/**************************************************************************/ /*! -+ @Function PVRSRVPoolPut -+ @Description Returns entry to the pool. If number of entries is greater -+ than ui32MaxEntries set during pool creation the entry will -+ be freed instead. -+ @Input psPool Pointer to the pool. -+ @Input hToken Entry handle. -+ @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise -+*/ /***************************************************************************/ -+PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool, -+ PVRSRV_POOL_TOKEN hToken); -+ -+#endif /* PVRSRVPOOL_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_sync_km.h b/drivers/gpu/drm/img-rogue/pvrsrv_sync_km.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_sync_km.h -@@ -0,0 +1,65 @@ -+/*************************************************************************/ /*! -+@File -+@Title PVR synchronisation interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Types for server side code -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef PVRSRV_SYNC_KM_H -+#define PVRSRV_SYNC_KM_H -+ -+#include -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#define SYNC_FB_FILE_STRING_MAX 256 -+#define SYNC_FB_MODULE_STRING_LEN_MAX (32) -+#define SYNC_FB_DESC_STRING_LEN_MAX (32) -+ -+/* By default, fence-sync module emits into HWPerf (of course, if enabled) and -+ * considers a process (sleepable) context */ -+#define PVRSRV_FENCE_FLAG_NONE (0U) -+#define PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT (1U << 0) -+#define PVRSRV_FENCE_FLAG_CTX_ATOMIC (1U << 1) -+ -+#if defined(__cplusplus) -+} -+#endif -+#endif /* PVRSRV_SYNC_KM_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_sync_server.h b/drivers/gpu/drm/img-rogue/pvrsrv_sync_server.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_sync_server.h -@@ -0,0 +1,278 @@ -+/**************************************************************************/ /*! -+@File -+@Title Fence sync server interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef PVRSRV_SYNC_SERVER_H -+#define PVRSRV_SYNC_SERVER_H -+ -+#if defined(SUPPORT_FALLBACK_FENCE_SYNC) -+#include "sync_fallback_server.h" -+#include "pvr_notifier.h" -+#include "img_types.h" -+#include "pvrsrv_sync_km.h" -+#elif defined(SUPPORT_NATIVE_FENCE_SYNC) -+#include "pvr_sync.h" -+#endif -+ -+#include "rgxhwperf.h" -+ -+#define SYNC_SW_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH -+#define SYNC_SW_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH -+ -+typedef struct _SYNC_TIMELINE_OBJ_ -+{ -+ void *pvTlObj; /* Implementation specific timeline object */ -+ -+ PVRSRV_TIMELINE hTimeline; /* Reference to implementation-independent timeline object */ -+} SYNC_TIMELINE_OBJ; -+ -+typedef struct _SYNC_FENCE_OBJ_ -+{ -+ void *pvFenceObj; /* Implementation specific fence object */ -+ -+ PVRSRV_FENCE hFence; /* Reference to implementation-independent fence object */ -+} SYNC_FENCE_OBJ; -+ -+static inline void SyncClearTimelineObj(SYNC_TIMELINE_OBJ *psSTO) -+{ -+ psSTO->pvTlObj = NULL; -+ psSTO->hTimeline = PVRSRV_NO_TIMELINE; -+} -+ -+static inline IMG_BOOL SyncIsTimelineObjValid(const SYNC_TIMELINE_OBJ *psSTO) -+{ -+ return (IMG_BOOL)(psSTO->pvTlObj != NULL); -+} -+ -+static inline void SyncClearFenceObj(SYNC_FENCE_OBJ *psSFO) -+{ -+ psSFO->pvFenceObj = NULL; -+ psSFO->hFence = PVRSRV_NO_FENCE; -+} -+ -+static inline IMG_BOOL SyncIsFenceObjValid(const SYNC_FENCE_OBJ *psSFO) -+{ -+ return (IMG_BOOL)(psSFO->pvFenceObj != NULL); -+} -+ -+ -+/* Mapping of each required function to its appropriate sync-implementation function */ -+#if defined(SUPPORT_FALLBACK_FENCE_SYNC) -+ #define SyncFenceWaitKM_ SyncFbFenceWait -+ #define SyncGetFenceObj_ SyncFbGetFenceObj -+ #define SyncFenceReleaseKM_ SyncFbFenceReleaseKM -+ #define SyncSWTimelineFenceCreateKM_ SyncFbSWTimelineFenceCreateKM -+ #define SyncSWTimelineAdvanceKM_ SyncFbSWTimelineAdvanceKM -+ #define SyncSWGetTimelineObj_ SyncFbSWGetTimelineObj -+ #define SyncSWTimelineReleaseKM_ SyncFbTimelineRelease -+ #define SyncDumpFence_ SyncFbDumpFenceKM -+ #define SyncSWDumpTimeline_ SyncFbSWDumpTimelineKM -+#elif defined(SUPPORT_NATIVE_FENCE_SYNC) -+ #define SyncFenceWaitKM_ pvr_sync_fence_wait -+ #define SyncGetFenceObj_ pvr_sync_fence_get -+ #define SyncFenceReleaseKM_ pvr_sync_fence_release -+ #define SyncSWTimelineFenceCreateKM_ pvr_sync_sw_timeline_fence_create -+ #define SyncSWTimelineAdvanceKM_ pvr_sync_sw_timeline_advance -+ #define SyncSWGetTimelineObj_ pvr_sync_sw_timeline_get -+ #define SyncSWTimelineReleaseKM_ pvr_sync_sw_timeline_release -+ #define SyncDumpFence_ sync_dump_fence -+ #define SyncSWDumpTimeline_ sync_sw_dump_timeline -+#endif -+ -+/*************************************************************************/ /*! -+@Function SyncFenceWaitKM -+ -+@Description Wait for all the sync points in the fence to be signalled. -+ -+@Input psFenceObj Fence to wait on -+ -+@Input ui32TimeoutInMs Maximum time to wait (in milliseconds) -+ -+@Return PVRSRV_OK once the fence has been passed (all -+ containing check points have either -+ signalled or errored) -+ PVRSRV_ERROR_TIMEOUT if the poll has exceeded the timeout -+ PVRSRV_ERROR_FAILED_DEPENDENCIES Other sync-impl specific error -+*/ /**************************************************************************/ -+static inline PVRSRV_ERROR -+SyncFenceWaitKM(PVRSRV_DEVICE_NODE *psDevNode, -+ const SYNC_FENCE_OBJ *psFenceObj, -+ IMG_UINT32 ui32TimeoutInMs) -+{ -+ PVRSRV_ERROR eError; -+ -+ RGXSRV_HWPERF_SYNC_FENCE_WAIT(psDevNode->pvDevice, -+ BEGIN, -+ OSGetCurrentProcessID(), -+ psFenceObj->hFence, -+ ui32TimeoutInMs); -+ -+ eError = SyncFenceWaitKM_(psFenceObj->pvFenceObj, ui32TimeoutInMs); -+ -+ RGXSRV_HWPERF_SYNC_FENCE_WAIT(psDevNode->pvDevice, -+ END, -+ OSGetCurrentProcessID(), -+ psFenceObj->hFence, -+ ((eError == PVRSRV_OK) ? -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED : -+ ((eError == PVRSRV_ERROR_TIMEOUT) ? -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT : -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR))); -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function SyncGetFenceObj -+ -+@Description Get the implementation specific server fence object from -+ opaque implementation independent PVRSRV_FENCE type. -+ When successful, this function gets a reference on the base -+ fence, which needs to be dropped using SyncFenceReleaseKM, -+ when fence object is no longer in use. -+ -+@Input iFence Input opaque fence object -+ -+@Output psFenceObj Pointer to implementation specific fence object -+ -+@Return PVRSRV_ERROR PVRSRV_OK, on success -+*/ /**************************************************************************/ -+static inline PVRSRV_ERROR -+SyncGetFenceObj(PVRSRV_FENCE iFence, -+ SYNC_FENCE_OBJ *psFenceObj) -+{ -+ psFenceObj->hFence = iFence; -+ return SyncGetFenceObj_(iFence, &psFenceObj->pvFenceObj); -+} -+ -+/*************************************************************************/ /*! -+@Function SyncFenceReleaseKM -+ -+@Description Release reference on this fence. -+ -+@Input psFenceObj Fence to be released -+ -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+static inline -+PVRSRV_ERROR SyncFenceReleaseKM(const SYNC_FENCE_OBJ *psFenceObj) -+{ -+ return SyncFenceReleaseKM_(psFenceObj->pvFenceObj); -+} -+ -+/*****************************************************************************/ -+/* */ -+/* SW TIMELINE SPECIFIC FUNCTIONS */ -+/* */ -+/*****************************************************************************/ -+ -+static inline PVRSRV_ERROR -+SyncSWTimelineFenceCreateKM(PVRSRV_DEVICE_NODE *psDevNode, -+ PVRSRV_TIMELINE hSWTimeline, -+ const IMG_CHAR *pszFenceName, -+ PVRSRV_FENCE *phOutFence) -+{ -+ IMG_UINT64 ui64SyncPtIdx; -+ PVRSRV_ERROR eError; -+ eError = SyncSWTimelineFenceCreateKM_(psDevNode, -+ hSWTimeline, -+ pszFenceName, -+ phOutFence, -+ &ui64SyncPtIdx); -+ if (eError == PVRSRV_OK) -+ { -+ RGXSRV_HWPERF_ALLOC_SW_FENCE(psDevNode, OSGetCurrentProcessID(), -+ *phOutFence, hSWTimeline, ui64SyncPtIdx, -+ pszFenceName, OSStringLength(pszFenceName)); -+ } -+ return eError; -+} -+ -+static inline PVRSRV_ERROR -+SyncSWTimelineAdvanceKM(PVRSRV_DEVICE_NODE *psDevNode, -+ const SYNC_TIMELINE_OBJ *psSWTimelineObj) -+{ -+ IMG_UINT64 ui64SyncPtIdx; -+ PVRSRV_ERROR eError; -+ eError = SyncSWTimelineAdvanceKM_(psSWTimelineObj->pvTlObj, -+ &ui64SyncPtIdx); -+ -+ if (eError == PVRSRV_OK) -+ { -+ RGXSRV_HWPERF_SYNC_SW_TL_ADV(psDevNode->pvDevice, -+ OSGetCurrentProcessID(), -+ psSWTimelineObj->hTimeline, -+ ui64SyncPtIdx); -+ } -+ return eError; -+} -+ -+static inline PVRSRV_ERROR -+SyncSWGetTimelineObj(PVRSRV_TIMELINE hSWTimeline, -+ SYNC_TIMELINE_OBJ *psSWTimelineObj) -+{ -+ psSWTimelineObj->hTimeline = hSWTimeline; -+ return SyncSWGetTimelineObj_(hSWTimeline, &psSWTimelineObj->pvTlObj); -+} -+ -+static inline PVRSRV_ERROR -+SyncSWTimelineReleaseKM(const SYNC_TIMELINE_OBJ *psSWTimelineObj) -+{ -+ return SyncSWTimelineReleaseKM_(psSWTimelineObj->pvTlObj); -+} -+ -+static inline PVRSRV_ERROR -+SyncDumpFence(const SYNC_FENCE_OBJ *psFenceObj, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ return SyncDumpFence_(psFenceObj->pvFenceObj, pfnDumpDebugPrintf, pvDumpDebugFile); -+} -+ -+static inline PVRSRV_ERROR -+SyncSWDumpTimeline(const SYNC_TIMELINE_OBJ *psSWTimelineObj, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ return SyncSWDumpTimeline_(psSWTimelineObj->pvTlObj, pfnDumpDebugPrintf, pvDumpDebugFile); -+} -+ -+ -+#endif /* PVRSRV_SYNC_SERVER_H */ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_tlcommon.h b/drivers/gpu/drm/img-rogue/pvrsrv_tlcommon.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_tlcommon.h -@@ -0,0 +1,260 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services Transport Layer common types and definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Transport layer common types and definitions included into -+ both user mode and kernel mode source. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef PVR_TLCOMMON_H -+#define PVR_TLCOMMON_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#include "img_defs.h" -+ -+ -+/*! Handle type for stream descriptor objects as created by this API */ -+typedef IMG_HANDLE PVRSRVTL_SD; -+ -+/*! Maximum stream name length including the null byte */ -+#define PRVSRVTL_MAX_STREAM_NAME_SIZE 40U -+ -+/*! Maximum number of streams expected to exist */ -+#define PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER (32*PRVSRVTL_MAX_STREAM_NAME_SIZE) -+ -+/*! Packet lengths are always rounded up to a multiple of 8 bytes */ -+#define PVRSRVTL_PACKET_ALIGNMENT 8U -+#define PVRSRVTL_ALIGN(x) PVR_ALIGN(x, PVRSRVTL_PACKET_ALIGNMENT) -+ -+ -+/*! A packet is made up of a header structure followed by the data bytes. -+ * There are 3 types of packet: normal (has data), data lost and padding, -+ * see packet flags. Header kept small to reduce data overhead. -+ * -+ * if the ORDER of the structure members is changed, please UPDATE the -+ * PVRSRVTL_PACKET_FLAG_OFFSET macro. -+ * -+ * Layout of uiTypeSize member is : -+ * -+ * |<---------------------------32-bits------------------------------>| -+ * |<----8---->|<-----1----->|<----7--->|<------------16------------->| -+ * | Type | Drop-Oldest | UNUSED | Size | -+ * -+ */ -+typedef struct -+{ -+ IMG_UINT32 uiTypeSize; /*!< Type, Drop-Oldest flag & number of bytes following header */ -+ IMG_UINT32 uiReserved; /*!< Reserve, packets and data must be 8 byte aligned */ -+ -+ /* First bytes of TL packet data follow header ... */ -+} PVRSRVTL_PACKETHDR, *PVRSRVTL_PPACKETHDR; -+ -+/* Structure must always be a size multiple of 8 as stream buffer -+ * still an array of IMG_UINT32s. -+ */ -+static_assert((sizeof(PVRSRVTL_PACKETHDR) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, -+ "sizeof(PVRSRVTL_PACKETHDR) must be a multiple of 8"); -+ -+/*! Packet header reserved word fingerprint "TLP1" */ -+#define PVRSRVTL_PACKETHDR_RESERVED 0x31504C54U -+ -+/*! Packet header mask used to extract the size from the uiTypeSize member. -+ * Do not use directly, see GET macros. -+ */ -+#define PVRSRVTL_PACKETHDR_SIZE_MASK 0x0000FFFFU -+#define PVRSRVTL_MAX_PACKET_SIZE (PVRSRVTL_PACKETHDR_SIZE_MASK & ~0xFU) -+ -+ -+/*! Packet header mask used to extract the type from the uiTypeSize member. -+ * Do not use directly, see GET macros. -+ */ -+#define PVRSRVTL_PACKETHDR_TYPE_MASK 0xFF000000U -+#define PVRSRVTL_PACKETHDR_TYPE_OFFSET 24U -+ -+/*! Packet header mask used to check if packets before this one were dropped -+ * or not. Do not use directly, see GET macros. -+ */ -+#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK 0x00800000U -+#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET 23U -+ -+/*! Packet type enumeration. -+ */ -+typedef IMG_UINT32 PVRSRVTL_PACKETTYPE; -+ -+/*! Undefined packet */ -+#define PVRSRVTL_PACKETTYPE_UNDEF 0U -+ -+/*! Normal packet type. Indicates data follows the header. -+ */ -+#define PVRSRVTL_PACKETTYPE_DATA 1U -+ -+/*! When seen this packet type indicates that at this moment in the stream -+ * packet(s) were not able to be accepted due to space constraints and -+ * that recent data may be lost - depends on how the producer handles the -+ * error. Such packets have no data, data length is 0. -+ */ -+#define PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED 2U -+ -+/*! Packets with this type set are padding packets that contain undefined -+ * data and must be ignored/skipped by the client. They are used when the -+ * circular stream buffer wraps around and there is not enough space for -+ * the data at the end of the buffer. Such packets have a length of 0 or -+ * more. -+ */ -+#define PVRSRVTL_PACKETTYPE_PADDING 3U -+ -+/*! This packet type conveys to the stream consumer that the stream -+ * producer has reached the end of data for that data sequence. The -+ * TLDaemon has several options for processing these packets that can -+ * be selected on a per stream basis. -+ */ -+#define PVRSRVTL_PACKETTYPE_MARKER_EOS 4U -+ -+/*! This is same as PVRSRVTL_PACKETTYPE_MARKER_EOS but additionally removes -+ * old data record output file before opening new/next one -+ */ -+#define PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD 5U -+ -+/*! Packet emitted on first stream opened by writer. Packet carries a name -+ * of the opened stream in a form of null-terminated string. -+ */ -+#define PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE 6U -+ -+/*! Packet emitted on last stream closed by writer. Packet carries a name -+ * of the closed stream in a form of null-terminated string. -+ */ -+#define PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE 7U -+ -+#define PVRSRVTL_PACKETTYPE_LAST 8U -+ -+/* The SET_PACKET_* macros rely on the order the PVRSRVTL_PACKETHDR members are declared: -+ * uiFlags is the upper half of a structure consisting of 2 uint16 quantities. -+ */ -+#define PVRSRVTL_SET_PACKET_DATA(len) (len) | (PVRSRVTL_PACKETTYPE_DATA << PVRSRVTL_PACKETHDR_TYPE_OFFSET) -+#define PVRSRVTL_SET_PACKET_PADDING(len) (len) | (PVRSRVTL_PACKETTYPE_PADDING << PVRSRVTL_PACKETHDR_TYPE_OFFSET) -+#define PVRSRVTL_SET_PACKET_WRITE_FAILED (0U) | (PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED << PVRSRVTL_PACKETHDR_TYPE_OFFSET) -+#define PVRSRVTL_SET_PACKET_HDR(len, type) (len) | ((type) << PVRSRVTL_PACKETHDR_TYPE_OFFSET) -+ -+/*! Returns the number of bytes of data in the packet. -+ * p may be any address type. -+ */ -+#define GET_PACKET_DATA_LEN(p) \ -+ ((IMG_UINT32) ((PVRSRVTL_PPACKETHDR) (void *) (p))->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK) -+ -+ -+/*! Returns a IMG_BYTE* pointer to the first byte of data in the packet */ -+#define GET_PACKET_DATA_PTR(p) \ -+ (((IMG_UINT8 *) (void *) (p)) + sizeof(PVRSRVTL_PACKETHDR)) -+ -+/*! Turns the packet address p into a PVRSRVTL_PPACKETHDR pointer type. -+ */ -+#define GET_PACKET_HDR(p) ((PVRSRVTL_PPACKETHDR) ((void *) (p))) -+ -+/*! Given a PVRSRVTL_PPACKETHDR address, return the address of the next pack -+ * It is up to the caller to determine if the new address is within the -+ * packet buffer. -+ */ -+#define GET_NEXT_PACKET_ADDR(p) \ -+ GET_PACKET_HDR( \ -+ GET_PACKET_DATA_PTR(p) + \ -+ ( \ -+ (GET_PACKET_DATA_LEN(p) + (PVRSRVTL_PACKET_ALIGNMENT-1U)) & \ -+ (~(PVRSRVTL_PACKET_ALIGNMENT-1U)) \ -+ ) \ -+ ) -+ -+/*! Get the type of the packet. p is of type PVRSRVTL_PPACKETHDR. -+ */ -+#define GET_PACKET_TYPE(p) (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_TYPE_MASK)>>PVRSRVTL_PACKETHDR_TYPE_OFFSET) -+ -+/*! Set PACKETS_DROPPED flag in packet header as a part of uiTypeSize. -+ * p is of type PVRSRVTL_PPACKETHDR. -+ */ -+#define SET_PACKETS_DROPPED(p) (((p)->uiTypeSize) | (1UL << PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET)) -+ -+/*! Check if packets were dropped before this packet. -+ * p is of type PVRSRVTL_PPACKETHDR. -+ */ -+#define CHECK_PACKETS_DROPPED(p) ((((p)->uiTypeSize & PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK)>>PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET) != 0U) -+ -+/*! Flags for use with PVRSRVTLOpenStream -+ * 0x01 - Do not block in PVRSRVTLAcquireData() when no bytes are available -+ * 0x02 - When the stream does not exist wait for a bit (2s) in -+ * PVRSRVTLOpenStream() and then exit with a timeout error if it still -+ * does not exist. -+ * 0x04 - Open stream for write only operations. -+ * If flag is not used stream is opened as read-only. This flag is -+ * required if one wants to call reserve/commit/write function on the -+ * stream descriptor. Read from on the stream descriptor opened -+ * with this flag will fail. -+ * 0x08 - Disable Producer Callback. -+ * If this flag is set and the stream becomes empty, do not call any -+ * associated producer callback to generate more data from the reader -+ * context. -+ * 0x10 - Reset stream on open. -+ * When this flag is used the stream will drop all of the stored data. -+ * 0x20 - Limit read position to the write position at time the stream -+ * was opened. Hence this flag will freeze the content read to that -+ * produced before the stream was opened for reading. -+ * 0x40 - Ignore Open Callback. -+ * When this flag is set ignore any OnReaderOpenCallback setting for -+ * the stream. This allows access to the stream to be made without -+ * generating any extra packets into the stream. -+ */ -+ -+#define PVRSRV_STREAM_FLAG_NONE (0U) -+#define PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING (1U<<0) -+#define PVRSRV_STREAM_FLAG_OPEN_WAIT (1U<<1) -+#define PVRSRV_STREAM_FLAG_OPEN_WO (1U<<2) -+#define PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK (1U<<3) -+#define PVRSRV_STREAM_FLAG_RESET_ON_OPEN (1U<<4) -+#define PVRSRV_STREAM_FLAG_READ_LIMIT (1U<<5) -+#define PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK (1U<<6) -+ -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* PVR_TLCOMMON_H */ -+/****************************************************************************** -+ End of file (pvrsrv_tlcommon.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_tlstreams.h b/drivers/gpu/drm/img-rogue/pvrsrv_tlstreams.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrv_tlstreams.h -@@ -0,0 +1,63 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services Transport Layer stream names -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Transport layer common types and definitions included into -+ both user mode and kernel mode source. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVRSRV_TLSTREAMS_H -+#define PVRSRV_TLSTREAMS_H -+ -+#define PVRSRV_TL_CTLR_STREAM "tlctrl" -+ -+#define PVRSRV_TL_HWPERF_RGX_FW_STREAM "hwperf_fw_" -+#define PVRSRV_TL_HWPERF_HOST_SERVER_STREAM "hwperf_host_" -+ -+#define PVRSRV_TL_FTRACE_RGX_FW_STREAM "ftrace_fw_" -+ -+/* Host HWPerf client stream names are of the form 'hwperf_client_' */ -+#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM "hwperf_client_" -+#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC "hwperf_client_%u_%u" -+ -+#endif /* PVRSRV_TLSTREAMS_H */ -+ -+/****************************************************************************** -+ End of file (pvrsrv_tlstreams.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/pvrsrvkm.mk b/drivers/gpu/drm/img-rogue/pvrsrvkm.mk -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrsrvkm.mk -@@ -0,0 +1,154 @@ -+pvrsrvkm-y += \ -+ client_cache_direct_bridge.o \ -+ server_cache_bridge.o \ -+ server_cmm_bridge.o \ -+ client_devicememhistory_direct_bridge.o \ -+ server_devicememhistory_bridge.o \ -+ server_di_bridge.o \ -+ server_dmabuf_bridge.o \ -+ client_mm_direct_bridge.o \ -+ server_mm_bridge.o \ -+ server_mmextmem_bridge.o \ -+ client_pvrtl_direct_bridge.o \ -+ server_pvrtl_bridge.o \ -+ server_rgxbreakpoint_bridge.o \ -+ server_rgxcmp_bridge.o \ -+ server_rgxfwdbg_bridge.o \ -+ server_rgxhwperf_bridge.o \ -+ server_rgxregconfig_bridge.o \ -+ server_rgxta3d_bridge.o \ -+ server_rgxtimerquery_bridge.o \ -+ server_rgxtq2_bridge.o \ -+ server_rgxtq_bridge.o \ -+ server_srvcore_bridge.o \ -+ client_sync_direct_bridge.o \ -+ server_sync_bridge.o \ -+ client_synctracking_direct_bridge.o \ -+ server_synctracking_bridge.o \ -+ cache_km.o \ -+ connection_server.o \ -+ debug_common.o \ -+ devicemem_heapcfg.o \ -+ devicemem_history_server.o \ -+ devicemem_server.o \ -+ di_impl_brg.o \ -+ di_server.o \ -+ handle.o \ -+ info_page_km.o \ -+ lists.o \ -+ mmu_common.o \ -+ physheap.o \ -+ physmem.o \ -+ physmem_extmem.o \ -+ physmem_hostmem.o \ -+ physmem_lma.o \ -+ physmem_osmem.o \ -+ pmr.o \ -+ power.o \ -+ process_stats.o \ -+ pvr_notifier.o \ -+ pvrsrv.o \ -+ pvrsrv_bridge_init.o \ -+ pvrsrv_pool.o \ -+ srvcore.o \ -+ sync_checkpoint.o \ -+ sync_server.o \ -+ tlintern.o \ -+ tlserver.o \ -+ tlstream.o \ -+ vmm_pvz_client.o \ -+ vmm_pvz_server.o \ -+ vz_vmm_pvz.o \ -+ vz_vmm_vm.o \ -+ rgx_bridge_init.o \ -+ rgxbreakpoint.o \ -+ rgxbvnc.o \ -+ rgxccb.o \ -+ rgxcompute.o \ -+ rgxdebug_common.o \ -+ rgxfwcmnctx.o \ -+ rgxfwdbg.o \ -+ rgxfwimageutils.o \ -+ rgxfwtrace_strings.o \ -+ rgxhwperf_common.o \ -+ rgxmem.o \ -+ rgxregconfig.o \ -+ rgxshader.o \ -+ rgxsyncutils.o \ -+ rgxtdmtransfer.o \ -+ rgxtimecorr.o \ -+ rgxtimerquery.o \ -+ rgxutils.o \ -+ rgxdebug.o \ -+ rgxfwriscv.o \ -+ rgxfwutils.o \ -+ rgxhwperf.o \ -+ rgxinit.o \ -+ rgxlayer_impl.o \ -+ rgxmipsmmuinit.o \ -+ rgxmmuinit.o \ -+ rgxmulticore.o \ -+ rgxpower.o \ -+ rgxsrvinit.o \ -+ rgxstartstop.o \ -+ rgxta3d.o \ -+ rgxtransfer.o \ -+ allocmem.o \ -+ event.o \ -+ fwload.o \ -+ handle_idr.o \ -+ km_apphint.o \ -+ module_common.o \ -+ osconnection_server.o \ -+ osfunc.o \ -+ osmmap_stub.o \ -+ physmem_dmabuf.o \ -+ physmem_extmem_linux.o \ -+ physmem_osmem_linux.o \ -+ physmem_test.o \ -+ pmr_os.o \ -+ pvr_bridge_k.o \ -+ pvr_buffer_sync.o \ -+ pvr_counting_timeline.o \ -+ pvr_debug.o \ -+ pvr_debugfs.o \ -+ pvr_drm.o \ -+ pvr_fence.o \ -+ pvr_gputrace.o \ -+ pvr_platform_drv.o \ -+ pvr_sw_fence.o \ -+ pvr_sync_file.o \ -+ pvr_sync_ioctl_common.o \ -+ pvr_sync_ioctl_drm.o \ -+ devicemem.o \ -+ devicemem_utils.o \ -+ hash.o \ -+ mem_utils.o \ -+ pvrsrv_error.o \ -+ ra.o \ -+ sync.o \ -+ tlclient.o \ -+ uniq_key_splay_tree.o \ -+ rgx_hwperf_table.o \ -+ interrupt_support.o \ -+ pci_support.o \ -+ sysconfig_cmn.o \ -+ dma_support.o \ -+ vmm_type_stub.o \ -+ apollo/sysconfig.o -+pvrsrvkm-$(CONFIG_DRM_POWERVR_ROGUE_DEBUG) += \ -+ client_htbuffer_direct_bridge.o \ -+ server_htbuffer_bridge.o \ -+ server_rgxkicksync_bridge.o \ -+ client_ri_direct_bridge.o \ -+ server_ri_bridge.o \ -+ htb_debug.o \ -+ htbserver.o \ -+ ri_server.o \ -+ rgxkicksync.o \ -+ htbuffer.o -+pvrsrvkm-$(CONFIG_ARM) += osfunc_arm.o -+pvrsrvkm-$(CONFIG_ARM64) += osfunc_arm64.o -+pvrsrvkm-$(CONFIG_EVENT_TRACING) += trace_events.o -+pvrsrvkm-$(CONFIG_RISCV) += osfunc_riscv.c -+pvrsrvkm-$(CONFIG_X86) += osfunc_x86.o -diff --git a/drivers/gpu/drm/img-rogue/pvrversion.h b/drivers/gpu/drm/img-rogue/pvrversion.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/pvrversion.h -@@ -0,0 +1,68 @@ -+/*************************************************************************/ /*! -+@File pvrversion.h -+@Title PowerVR version numbers and strings. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Version numbers and strings for PowerVR components. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef PVRVERSION_H -+#define PVRVERSION_H -+ -+#define PVRVERSION_MAJ 23U -+#define PVRVERSION_MIN 2U -+ -+#define PVRVERSION_FAMILY "rogueddk" -+#define PVRVERSION_BRANCHNAME "23.2" -+#define PVRVERSION_BUILD 6460340 -+#define PVRVERSION_BSCONTROL "Rogue_DDK_Linux_WS" -+ -+#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 23.2@6460340" -+#define PVRVERSION_STRING_SHORT "23.2@6460340" -+ -+#define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved." -+ -+#define PVRVERSION_BUILD_HI 646 -+#define PVRVERSION_BUILD_LO 340 -+#define PVRVERSION_STRING_NUMERIC "23.2.646.340" -+ -+#define PVRVERSION_PACK(MAJOR,MINOR) (((IMG_UINT32)((IMG_UINT32)(MAJOR) & 0xFFFFU) << 16U) | (((MINOR) & 0xFFFFU) << 0U)) -+#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16U) & 0xFFFFU) -+#define PVRVERSION_UNPACK_MIN(VERSION) (((VERSION) >> 0U) & 0xFFFFU) -+ -+#endif /* PVRVERSION_H */ -diff --git a/drivers/gpu/drm/img-rogue/ra.c b/drivers/gpu/drm/img-rogue/ra.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/ra.c -@@ -0,0 +1,3472 @@ -+/*************************************************************************/ /*! -+@File -+@Title Resource Allocator -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ -+@Description -+ Implements generic resource allocation. The resource allocator was originally -+ intended to manage address spaces. In practice the resource allocator is -+ generic and can manage arbitrary sets of integers. -+ -+ Resources are allocated from arenas. Arenas can be created with an initial -+ span of resources. Further resources spans can be added to arenas. A -+ callback mechanism allows an arena to request further resource spans on -+ demand. -+ -+ Each arena maintains an ordered list of resource segments each described by a -+ boundary tag. Each boundary tag describes a segment of resources which are -+ either 'free', available for allocation, or 'busy' currently allocated. -+ Adjacent 'free' segments are always coalesced to avoid fragmentation. -+ -+ For allocation, all 'free' segments are kept on lists of 'free' segments in -+ a table index by pvr_log2(segment size) i.e., each table index n holds 'free' -+ segments in the size range 2^n -> 2^(n+1) - 1. -+ -+ Allocation policy is based on an *almost* good fit strategy. -+ -+ Allocated segments are inserted into a self-scaling hash table which maps -+ the base resource of the span to the relevant boundary tag. This allows the -+ code to get back to the boundary tag without exporting explicit boundary tag -+ references through the API. -+ -+ Each arena has an associated quantum size, all allocations from the arena are -+ made in multiples of the basic quantum. -+ -+ On resource exhaustion in an arena, a callback if provided will be used to -+ request further resources. Resource spans allocated by the callback mechanism -+ will be returned when freed (through one of the two callbacks). -+*/ /**************************************************************************/ -+ -+/* Issues: -+ * - flags, flags are passed into the resource allocator but are not currently used. -+ * - determination, of import size, is currently braindead. -+ * - debug code should be moved out to own module and #ifdef'd -+ */ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+#include "dllist.h" -+#include "uniq_key_splay_tree.h" -+ -+#include "hash.h" -+#include "ra.h" -+#include "pvrsrv_memallocflags.h" -+ -+#include "osfunc.h" -+#include "allocmem.h" -+#include "lock.h" -+#include "pvr_intrinsics.h" -+ -+/* The initial, and minimum size of the live address -> boundary tag structure -+ * hash table. The value 64 is a fairly arbitrary choice. The hash table -+ * resizes on demand so the value chosen is not critical. -+ */ -+#define MINIMUM_HASH_SIZE (64) -+ -+/* #define RA_VALIDATE */ -+ -+#if defined(__KLOCWORK__) -+ /* Make sure Klocwork analyses all the code (including the debug one) */ -+ #if !defined(RA_VALIDATE) -+ #define RA_VALIDATE -+ #endif -+#endif -+ -+#if !defined(PVRSRV_NEED_PVR_ASSERT) || !defined(RA_VALIDATE) -+/* Disable the asserts unless explicitly told otherwise. -+ * They slow the driver too much for other people -+ */ -+ -+#undef PVR_ASSERT -+/* Use a macro that really do not do anything when compiling in release -+ * mode! -+ */ -+#define PVR_ASSERT(x) -+#endif -+ -+/* boundary tags, used to describe a resource segment */ -+struct _BT_ -+{ -+ enum bt_type -+ { -+ btt_free, /* free resource segment */ -+ btt_live /* allocated resource segment */ -+ } type; -+ -+ unsigned int is_leftmost; -+ unsigned int is_rightmost; -+ unsigned int free_import; -+ -+ /* The base resource and extent of this segment */ -+ RA_BASE_T base; -+ RA_LENGTH_T uSize; -+ -+ /* doubly linked ordered list of all segments within the arena */ -+ struct _BT_ *pNextSegment; -+ struct _BT_ *pPrevSegment; -+ -+ /* doubly linked un-ordered list of free segments with the same flags. */ -+ struct _BT_ *next_free; -+ struct _BT_ *prev_free; -+ -+ /* A user reference associated with this span, user references are -+ * currently only provided in the callback mechanism -+ */ -+ IMG_HANDLE hPriv; -+ -+ /* Flags to match on this span */ -+ RA_FLAGS_T uFlags; -+ -+}; -+typedef struct _BT_ BT; -+ -+ -+/* resource allocation arena */ -+struct _RA_ARENA_ -+{ -+ /* arena name for diagnostics output */ -+ IMG_CHAR name[RA_MAX_NAME_LENGTH]; -+ -+ /* Spans / Imports within this arena are at least quantum sized -+ * and are a multiple of the uQuantum. This also has the effect of -+ * aligning these Spans to the uQuantum. -+ */ -+ RA_LENGTH_T uQuantum; -+ -+ /* import interface, if provided */ -+ PFN_RA_ALLOC pImportAlloc; -+ -+ PFN_RA_FREE pImportFree; -+ -+ /* Arbitrary handle provided by arena owner to be passed into the -+ * import alloc and free hooks -+ */ -+ void *pImportHandle; -+ -+ IMG_PSPLAY_TREE per_flags_buckets; -+ -+ /* resource segment list */ -+ BT *pHeadSegment; -+ -+ /* segment address to boundary tag hash table */ -+ HASH_TABLE *pSegmentHash; -+ -+ /* Lock for this arena */ -+ POS_LOCK hLock; -+ -+ /* Policies that govern the resource area */ -+ RA_POLICY_T ui32PolicyFlags; -+ -+ /* LockClass of this arena. This is used within lockdep to decide if a -+ * recursive call sequence with the same lock class is allowed or not. -+ */ -+ IMG_UINT32 ui32LockClass; -+ -+ /* Total Size of the Arena */ -+ IMG_UINT64 ui64TotalArenaSize; -+ -+ /* Size available for allocation in the arena */ -+ IMG_UINT64 ui64FreeArenaSize; -+ -+}; -+ -+struct _RA_ARENA_ITERATOR_ -+{ -+ RA_ARENA *pArena; -+ BT *pCurrent; -+ IMG_BOOL bIncludeFreeSegments; -+}; -+ -+static PVRSRV_ERROR _RA_FreeMultiUnlocked(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize); -+static PVRSRV_ERROR -+_RA_FreeMultiUnlockedSparse(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ RA_LENGTH_T uiChunkSize, -+ IMG_UINT32 *puiFreeIndices, -+ IMG_UINT32 *puiFreeCount); -+ -+/*************************************************************************/ /*! -+@Function _RequestAllocFail -+@Description Default callback allocator used if no callback is specified, -+ always fails to allocate further resources to the arena. -+@Input _h - callback handle -+@Input _uSize - requested allocation size -+@Input _uflags - allocation flags -+@Input _uBaseAlignment - Alignment for the returned allocated base -+@Input _pBase - receives allocated base -+@Output _pActualSize - actual allocation size -+@Input _pRef - user reference -+@Return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, this function always fails -+ to allocate. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+_RequestAllocFail(RA_PERARENA_HANDLE _h, -+ RA_LENGTH_T _uSize, -+ RA_FLAGS_T _uFlags, -+ RA_LENGTH_T _uBaseAlignment, -+ const IMG_CHAR *_pszAnnotation, -+ RA_BASE_T *_pBase, -+ RA_LENGTH_T *_pActualSize, -+ RA_PERISPAN_HANDLE *_phPriv) -+{ -+ PVR_UNREFERENCED_PARAMETER(_h); -+ PVR_UNREFERENCED_PARAMETER(_uSize); -+ PVR_UNREFERENCED_PARAMETER(_pActualSize); -+ PVR_UNREFERENCED_PARAMETER(_phPriv); -+ PVR_UNREFERENCED_PARAMETER(_uFlags); -+ PVR_UNREFERENCED_PARAMETER(_uBaseAlignment); -+ PVR_UNREFERENCED_PARAMETER(_pBase); -+ PVR_UNREFERENCED_PARAMETER(_pszAnnotation); -+ -+ return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL; -+} -+ -+ -+#if defined(PVR_CTZLL) -+ /* Make sure to trigger an error if someone change the buckets or the bHasEltsMapping size -+ the bHasEltsMapping is used to quickly determine the smallest bucket containing elements. -+ therefore it must have at least as many bits has the buckets array have buckets. The RA -+ implementation actually uses one more bit. */ -+ static_assert(ARRAY_SIZE(((IMG_PSPLAY_TREE)0)->buckets) -+ < 8 * sizeof(((IMG_PSPLAY_TREE) 0)->bHasEltsMapping), -+ "Too many buckets for bHasEltsMapping bitmap"); -+#endif -+ -+ -+/*************************************************************************/ /*! -+@Function pvr_log2 -+@Description Computes the floor of the log base 2 of a unsigned integer -+@Input n Unsigned integer -+@Return Floor(Log2(n)) -+*/ /**************************************************************************/ -+#if defined(PVR_CLZLL) -+/* make sure to trigger a problem if someone changes the RA_LENGTH_T type -+ indeed the __builtin_clzll is for unsigned long long variables. -+ -+ if someone changes RA_LENGTH to unsigned long, then use __builtin_clzl -+ if it changes to unsigned int, use __builtin_clz -+ -+ if it changes for something bigger than unsigned long long, -+ then revert the pvr_log2 to the classic implementation */ -+static_assert(sizeof(RA_LENGTH_T) == sizeof(unsigned long long), -+ "RA log routines not tuned for sizeof(RA_LENGTH_T)"); -+ -+static inline IMG_UINT32 pvr_log2(RA_LENGTH_T n) -+{ -+ PVR_ASSERT(n != 0); /* Log2 is not defined on 0 */ -+ -+ return (8 * sizeof(RA_LENGTH_T)) - 1 - PVR_CLZLL(n); -+} -+#else -+static IMG_UINT32 -+pvr_log2(RA_LENGTH_T n) -+{ -+ IMG_UINT32 l = 0; -+ -+ PVR_ASSERT(n != 0); /* Log2 is not defined on 0 */ -+ -+ n >>= 1; -+ while (n > 0) -+ { -+ n >>= 1; -+ l++; -+ } -+ return l; -+} -+#endif -+ -+static INLINE void _FreeTableLimitBoundsCheck(IMG_UINT32 *uiIndex) -+{ -+ if (*uiIndex >= FREE_TABLE_LIMIT) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Index exceeds FREE_TABLE_LIMIT (1TB), " -+ "Clamping Index to FREE_TABLE_LIMIT")); -+ *uiIndex = FREE_TABLE_LIMIT - 1; -+ } -+} -+ -+ -+#if defined(RA_VALIDATE) -+/*************************************************************************/ /*! -+@Function _IsInSegmentList -+@Description Tests if a BT is in the segment list. -+@Input pArena The arena. -+@Input pBT The boundary tag to look for. -+@Return IMG_FALSE BT was not in the arena's segment list. -+ IMG_TRUE BT was in the arena's segment list. -+*/ /**************************************************************************/ -+static IMG_BOOL -+_IsInSegmentList(RA_ARENA *pArena, BT *pBT) -+{ -+ BT* pBTScan; -+ -+ PVR_ASSERT(pArena != NULL); -+ PVR_ASSERT(pBT != NULL); -+ -+ /* Walk the segment list until we see the BT pointer... */ -+ pBTScan = pArena->pHeadSegment; -+ while (pBTScan != NULL && pBTScan != pBT) -+ { -+ pBTScan = pBTScan->pNextSegment; -+ } -+ -+ /* Test if we found it and then return */ -+ return (pBTScan == pBT); -+} -+ -+/*************************************************************************/ /*! -+@Function _IsInFreeList -+@Description Tests if a BT is in the free list. -+@Input pArena The arena. -+@Input pBT The boundary tag to look for. -+@Return IMG_FALSE BT was not in the arena's free list. -+ IMG_TRUE BT was in the arena's free list. -+*/ /**************************************************************************/ -+static IMG_BOOL -+_IsInFreeList(RA_ARENA *pArena, BT *pBT) -+{ -+ BT* pBTScan; -+ IMG_UINT32 uIndex; -+ -+ PVR_ASSERT(pArena != NULL); -+ PVR_ASSERT(pBT != NULL); -+ -+ /* Look for the free list that holds BTs of this size... */ -+ uIndex = pvr_log2(pBT->uSize); -+ PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); -+ -+ pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); -+ if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->flags != pBT->uFlags)) -+ { -+ return 0; -+ } -+ else -+ { -+ pBTScan = pArena->per_flags_buckets->buckets[uIndex]; -+ while (pBTScan != NULL && pBTScan != pBT) -+ { -+ pBTScan = pBTScan->next_free; -+ } -+ -+ /* Test if we found it and then return */ -+ return (pBTScan == pBT); -+ } -+} -+ -+/* is_arena_valid should only be used in debug mode. -+ * It checks that some properties an arena must have are verified -+ */ -+static int is_arena_valid(struct _RA_ARENA_ *arena) -+{ -+ struct _BT_ *chunk; -+#if defined(PVR_CTZLL) -+ unsigned int i; -+#endif -+ -+ for (chunk = arena->pHeadSegment; chunk != NULL; chunk = chunk->pNextSegment) -+ { -+ /* if next segment is NULL, then it must be a rightmost */ -+ PVR_ASSERT((chunk->pNextSegment != NULL) || (chunk->is_rightmost)); -+ /* if prev segment is NULL, then it must be a leftmost */ -+ PVR_ASSERT((chunk->pPrevSegment != NULL) || (chunk->is_leftmost)); -+ -+ if (chunk->type == btt_free) -+ { -+ /* checks the correctness of the type field */ -+ PVR_ASSERT(_IsInFreeList(arena, chunk)); -+ -+ /* check that there can't be two consecutive free chunks. -+ Indeed, instead of having two consecutive free chunks, -+ there should be only one that span the size of the two. */ -+ PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->type != btt_free)); -+ PVR_ASSERT((chunk->is_rightmost) || (chunk->pNextSegment->type != btt_free)); -+ } -+ else -+ { -+ /* checks the correctness of the type field */ -+ PVR_ASSERT(!_IsInFreeList(arena, chunk)); -+ } -+ -+ PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->base + chunk->pPrevSegment->uSize == chunk->base)); -+ PVR_ASSERT((chunk->is_rightmost) || (chunk->base + chunk->uSize == chunk->pNextSegment->base)); -+ -+ /* all segments of the same imports must have the same flags ... */ -+ PVR_ASSERT((chunk->is_rightmost) || (chunk->uFlags == chunk->pNextSegment->uFlags)); -+ /* ... and the same import handle */ -+ PVR_ASSERT((chunk->is_rightmost) || (chunk->hPriv == chunk->pNextSegment->hPriv)); -+ -+ -+ /* if a free chunk spans a whole import, then it must be an 'not to free import'. -+ Otherwise it should have been freed. */ -+ PVR_ASSERT((!chunk->is_leftmost) || (!chunk->is_rightmost) || (chunk->type == btt_live) || (!chunk->free_import)); -+ } -+ -+#if defined(PVR_CTZLL) -+ if (arena->per_flags_buckets != NULL) -+ { -+ for (i = 0; i < FREE_TABLE_LIMIT; ++i) -+ { -+ /* verify that the bHasEltsMapping is correct for this flags bucket */ -+ PVR_ASSERT( -+ ((arena->per_flags_buckets->buckets[i] == NULL) && -+ (((arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) == 0))) -+ || -+ ((arena->per_flags_buckets->buckets[i] != NULL) && -+ (((arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) != 0))) -+ ); -+ } -+ } -+#endif -+ -+ /* if arena was not valid, an earlier assert should have triggered */ -+ return 1; -+} -+#endif -+ -+/*************************************************************************/ /*! -+@Function _SegmentListInsertAfter -+@Description Insert a boundary tag into an arena segment list after a -+ specified boundary tag. -+@Input pInsertionPoint The insertion point. -+@Input pBT The boundary tag to insert. -+*/ /**************************************************************************/ -+static INLINE void -+_SegmentListInsertAfter(BT *pInsertionPoint, -+ BT *pBT) -+{ -+ PVR_ASSERT(pBT != NULL); -+ PVR_ASSERT(pInsertionPoint != NULL); -+ -+ pBT->pNextSegment = pInsertionPoint->pNextSegment; -+ pBT->pPrevSegment = pInsertionPoint; -+ if (pInsertionPoint->pNextSegment != NULL) -+ { -+ pInsertionPoint->pNextSegment->pPrevSegment = pBT; -+ } -+ pInsertionPoint->pNextSegment = pBT; -+} -+ -+/*************************************************************************/ /*! -+@Function _SegmentListInsert -+@Description Insert a boundary tag into an arena segment list -+@Input pArena The arena. -+@Input pBT The boundary tag to insert. -+*/ /**************************************************************************/ -+static INLINE void -+_SegmentListInsert(RA_ARENA *pArena, BT *pBT) -+{ -+ PVR_ASSERT(!_IsInSegmentList(pArena, pBT)); -+ -+ /* insert into the segment chain */ -+ pBT->pNextSegment = pArena->pHeadSegment; -+ pArena->pHeadSegment = pBT; -+ if (pBT->pNextSegment != NULL) -+ { -+ pBT->pNextSegment->pPrevSegment = pBT; -+ } -+ -+ pBT->pPrevSegment = NULL; -+} -+ -+/*************************************************************************/ /*! -+@Function _SegmentListRemove -+@Description Remove a boundary tag from an arena segment list. -+@Input pArena The arena. -+@Input pBT The boundary tag to remove. -+*/ /**************************************************************************/ -+static void -+_SegmentListRemove(RA_ARENA *pArena, BT *pBT) -+{ -+ PVR_ASSERT(_IsInSegmentList(pArena, pBT)); -+ -+ if (pBT->pPrevSegment == NULL) -+ pArena->pHeadSegment = pBT->pNextSegment; -+ else -+ pBT->pPrevSegment->pNextSegment = pBT->pNextSegment; -+ -+ if (pBT->pNextSegment != NULL) -+ pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function _BuildBT -+@Description Construct a boundary tag for a free segment. -+@Input base The base of the resource segment. -+@Input uSize The extent of the resource segment. -+@Input uFlags The flags to give to the boundary tag -+@Return Boundary tag or NULL -+*/ /**************************************************************************/ -+static BT * -+_BuildBT(RA_BASE_T base, RA_LENGTH_T uSize, RA_FLAGS_T uFlags) -+{ -+ BT *pBT; -+ -+ pBT = OSAllocZMem(sizeof(BT)); -+ if (pBT == NULL) -+ { -+ return NULL; -+ } -+ -+ pBT->is_leftmost = 1; -+ pBT->is_rightmost = 1; -+ /* pBT->free_import = 0; */ -+ pBT->type = btt_live; -+ pBT->base = base; -+ pBT->uSize = uSize; -+ pBT->uFlags = uFlags; -+ -+ return pBT; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function _SegmentSplit -+@Description Split a segment into two, maintain the arena segment list. The -+ boundary tag should not be in the free table. Neither the -+ original or the new neighbour boundary tag will be in the free -+ table. -+@Input pBT The boundary tag to split. -+@Input uSize The required segment size of boundary tag after -+ splitting. -+@Return New neighbour boundary tag or NULL. -+*/ /**************************************************************************/ -+static BT * -+_SegmentSplit(BT *pBT, RA_LENGTH_T uSize) -+{ -+ BT *pNeighbour; -+ -+ pNeighbour = _BuildBT(pBT->base + uSize, pBT->uSize - uSize, pBT->uFlags); -+ if (pNeighbour == NULL) -+ { -+ return NULL; -+ } -+ -+ _SegmentListInsertAfter(pBT, pNeighbour); -+ -+ pNeighbour->is_leftmost = 0; -+ pNeighbour->is_rightmost = pBT->is_rightmost; -+ pNeighbour->free_import = pBT->free_import; -+ pBT->is_rightmost = 0; -+ pNeighbour->hPriv = pBT->hPriv; -+ pBT->uSize = uSize; -+ pNeighbour->uFlags = pBT->uFlags; -+ -+ return pNeighbour; -+} -+ -+/*************************************************************************/ /*! -+@Function _FreeListInsert -+@Description Insert a boundary tag into an arena free table. -+@Input pArena The arena. -+@Input pBT The boundary tag. -+*/ /**************************************************************************/ -+static void -+_FreeListInsert(RA_ARENA *pArena, BT *pBT) -+{ -+ IMG_UINT32 uIndex; -+ BT *pBTTemp = NULL; -+ uIndex = pvr_log2(pBT->uSize); -+ -+ _FreeTableLimitBoundsCheck(&uIndex); -+ -+ PVR_ASSERT(!_IsInFreeList(pArena, pBT)); -+ -+ pBT->type = btt_free; -+ -+ pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); -+ /* the flags item in the splay tree must have been created before-hand by -+ _InsertResource */ -+ PVR_ASSERT(pArena->per_flags_buckets != NULL); -+ -+ /* Handle NULL values for RELEASE builds and/or disabled ASSERT DEBUG builds */ -+ if (unlikely(pArena->per_flags_buckets == NULL)) -+ { -+ return; -+ } -+ -+ /* Get the first node in the bucket */ -+ pBTTemp = pArena->per_flags_buckets->buckets[uIndex]; -+ -+ if (unlikely((pArena->ui32PolicyFlags & RA_POLICY_ALLOC_NODE_SELECT_MASK) == RA_POLICY_ALLOC_OPTIMAL)) -+ { -+ /* Add the node to the start if the bucket is empty */ -+ if (NULL == pBTTemp) -+ { -+ pArena->per_flags_buckets->buckets[uIndex] = pBT; -+ pBT->next_free = NULL; -+ pBT->prev_free = NULL; -+ -+ } -+ else -+ { -+ BT *pBTPrev = NULL; -+ /* Traverse the list and identify the appropriate -+ * place based on the size of the Boundary being inserted */ -+ while (pBTTemp && (pBTTemp->uSize < pBT->uSize)) -+ { -+ pBTPrev = pBTTemp; -+ pBTTemp = pBTTemp->next_free; -+ } -+ /* point the new node to the first higher size element */ -+ pBT->next_free = pBTTemp; -+ pBT->prev_free = pBTPrev; -+ -+ if (pBTPrev) -+ { -+ /* Set the lower size element in the -+ * chain to point new node */ -+ pBTPrev->next_free = pBT; -+ } -+ else -+ { -+ /* Assign the new node to the start of the bucket -+ * if the bucket is empty */ -+ pArena->per_flags_buckets->buckets[uIndex] = pBT; -+ } -+ /* Make sure the higher size element in the chain points back -+ * to the new node to be introduced */ -+ if (pBTTemp) -+ { -+ pBTTemp->prev_free = pBT; -+ } -+ } -+ } -+ else -+ { -+ pBT->next_free = pBTTemp; -+ if (pBT->next_free != NULL) -+ { -+ pBT->next_free->prev_free = pBT; -+ } -+ pBT->prev_free = NULL; -+ pArena->per_flags_buckets->buckets[uIndex] = pBT; -+ } -+ -+#if defined(PVR_CTZLL) -+ /* tells that bucket[index] now contains elements */ -+ pArena->per_flags_buckets->bHasEltsMapping |= ((IMG_ELTS_MAPPINGS) 1 << uIndex); -+#endif -+ -+} -+ -+/*************************************************************************/ /*! -+@Function _FreeListRemove -+@Description Remove a boundary tag from an arena free table. -+@Input pArena The arena. -+@Input pBT The boundary tag. -+*/ /**************************************************************************/ -+static void -+_FreeListRemove(RA_ARENA *pArena, BT *pBT) -+{ -+ IMG_UINT32 uIndex; -+ uIndex = pvr_log2(pBT->uSize); -+ -+ _FreeTableLimitBoundsCheck(&uIndex); -+ -+ PVR_ASSERT(_IsInFreeList(pArena, pBT)); -+ -+ if (pBT->next_free != NULL) -+ { -+ pBT->next_free->prev_free = pBT->prev_free; -+ } -+ -+ if (pBT->prev_free != NULL) -+ { -+ pBT->prev_free->next_free = pBT->next_free; -+ } -+ else -+ { -+ pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); -+ /* the flags item in the splay tree must have already been created -+ (otherwise how could there be a segment with these flags */ -+ PVR_ASSERT(pArena->per_flags_buckets != NULL); -+ -+ /* Handle unlikely NULL values for RELEASE or ASSERT-disabled builds */ -+ if (unlikely(pArena->per_flags_buckets == NULL)) -+ { -+ pBT->type = btt_live; -+ return; -+ } -+ -+ pArena->per_flags_buckets->buckets[uIndex] = pBT->next_free; -+#if defined(PVR_CTZLL) -+ if (pArena->per_flags_buckets->buckets[uIndex] == NULL) -+ { -+ /* there is no more elements in this bucket. Update the mapping. */ -+ pArena->per_flags_buckets->bHasEltsMapping &= ~((IMG_ELTS_MAPPINGS) 1 << uIndex); -+ } -+#endif -+ } -+ -+ PVR_ASSERT(!_IsInFreeList(pArena, pBT)); -+ pBT->type = btt_live; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function _InsertResource -+@Description Add a free resource segment to an arena. -+@Input pArena The arena. -+@Input base The base of the resource segment. -+@Input uSize The extent of the resource segment. -+@Input uFlags The flags of the new resources. -+@Return New bucket pointer -+ NULL on failure -+*/ /**************************************************************************/ -+static BT * -+_InsertResource(RA_ARENA *pArena, RA_BASE_T base, RA_LENGTH_T uSize, -+ RA_FLAGS_T uFlags) -+{ -+ BT *pBT; -+ PVR_ASSERT(pArena!=NULL); -+ -+ pBT = _BuildBT(base, uSize, uFlags); -+ -+ if (pBT != NULL) -+ { -+ IMG_PSPLAY_TREE tmp = PVRSRVInsert(pBT->uFlags, pArena->per_flags_buckets); -+ if (tmp == NULL) -+ { -+ OSFreeMem(pBT); -+ return NULL; -+ } -+ -+ pArena->per_flags_buckets = tmp; -+ _SegmentListInsert(pArena, pBT); -+ _FreeListInsert(pArena, pBT); -+ } -+ return pBT; -+} -+ -+/*************************************************************************/ /*! -+@Function _InsertResourceSpan -+@Description Add a free resource span to an arena, marked for free_import. -+@Input pArena The arena. -+@Input base The base of the resource segment. -+@Input uSize The extent of the resource segment. -+@Return The boundary tag representing the free resource segment, -+ or NULL on failure. -+*/ /**************************************************************************/ -+static INLINE BT * -+_InsertResourceSpan(RA_ARENA *pArena, -+ RA_BASE_T base, -+ RA_LENGTH_T uSize, -+ RA_FLAGS_T uFlags) -+{ -+ BT *pBT = _InsertResource(pArena, base, uSize, uFlags); -+ if (pBT != NULL) -+ { -+ pBT->free_import = 1; -+ } -+ return pBT; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function _RemoveResourceSpan -+@Description Frees a resource span from an arena, returning the imported -+ span via the callback. -+@Input pArena The arena. -+@Input pBT The boundary tag to free. -+@Return IMG_FALSE failure - span was still in use -+ IMG_TRUE success - span was removed and returned -+*/ /**************************************************************************/ -+static INLINE IMG_BOOL -+_RemoveResourceSpan(RA_ARENA *pArena, BT *pBT) -+{ -+ PVR_ASSERT(pArena!=NULL); -+ PVR_ASSERT(pBT!=NULL); -+ -+ if (pBT->free_import && -+ pBT->is_leftmost && -+ pBT->is_rightmost) -+ { -+ _SegmentListRemove(pArena, pBT); -+ pArena->pImportFree(pArena->pImportHandle, pBT->base, pBT->hPriv); -+ OSFreeMem(pBT); -+ -+ return IMG_TRUE; -+ } -+ -+ return IMG_FALSE; -+} -+ -+/*************************************************************************/ /*! -+@Function _FreeBT -+@Description Free a boundary tag taking care of the segment list and the -+ boundary tag free table. -+@Input pArena The arena. -+@Input pBT The boundary tag to free. -+*/ /**************************************************************************/ -+static void -+_FreeBT(RA_ARENA *pArena, BT *pBT) -+{ -+ BT *pNeighbour; -+ -+ PVR_ASSERT(pArena!=NULL); -+ PVR_ASSERT(pBT!=NULL); -+ PVR_ASSERT(!_IsInFreeList(pArena, pBT)); -+ -+ /* try and coalesce with left neighbour */ -+ pNeighbour = pBT->pPrevSegment; -+ if ((!pBT->is_leftmost) && (pNeighbour->type == btt_free)) -+ { -+ /* Verify list correctness */ -+ PVR_ASSERT(pNeighbour->base + pNeighbour->uSize == pBT->base); -+ -+ _FreeListRemove(pArena, pNeighbour); -+ _SegmentListRemove(pArena, pNeighbour); -+ pBT->base = pNeighbour->base; -+ -+ pBT->uSize += pNeighbour->uSize; -+ pBT->is_leftmost = pNeighbour->is_leftmost; -+ OSFreeMem(pNeighbour); -+ } -+ -+ /* try to coalesce with right neighbour */ -+ pNeighbour = pBT->pNextSegment; -+ if ((!pBT->is_rightmost) && (pNeighbour->type == btt_free)) -+ { -+ /* Verify list correctness */ -+ PVR_ASSERT(pBT->base + pBT->uSize == pNeighbour->base); -+ -+ _FreeListRemove(pArena, pNeighbour); -+ _SegmentListRemove(pArena, pNeighbour); -+ pBT->uSize += pNeighbour->uSize; -+ pBT->is_rightmost = pNeighbour->is_rightmost; -+ OSFreeMem(pNeighbour); -+ } -+ -+ if (_RemoveResourceSpan(pArena, pBT) == IMG_FALSE) -+ { -+ _FreeListInsert(pArena, pBT); -+ PVR_ASSERT((!pBT->is_rightmost) || (!pBT->is_leftmost) || (!pBT->free_import)); -+ } -+ -+ PVR_ASSERT(is_arena_valid(pArena)); -+} -+ -+ -+/* -+ This function returns the first element in a bucket that can be split -+ in a way that one of the sub-segments can meet the size and alignment -+ criteria. -+ -+ The first_elt is the bucket to look into. Remember that a bucket is -+ implemented as a pointer to the first element of the linked list. -+ -+ nb_max_try is used to limit the number of elements considered. -+ This is used to only consider the first nb_max_try elements in the -+ free-list. The special value ~0 is used to say unlimited i.e. consider -+ all elements in the free list -+ */ -+static INLINE -+struct _BT_ *find_chunk_in_bucket(struct _BT_ * first_elt, -+ RA_LENGTH_T uSize, -+ RA_LENGTH_T uAlignment, -+ unsigned int nb_max_try) -+{ -+ struct _BT_ *walker; -+ -+ for (walker = first_elt; (walker != NULL) && (nb_max_try != 0); walker = walker->next_free) -+ { -+ const RA_BASE_T aligned_base = (uAlignment > 1) ? -+ PVR_ALIGN(walker->base, uAlignment) -+ : walker->base; -+ -+ if (walker->base + walker->uSize >= aligned_base + uSize) -+ { -+ return walker; -+ } -+ -+ /* 0xFFFF...FFFF is used has nb_max_try = infinity. */ -+ if (nb_max_try != (unsigned int) ~0) -+ { -+ nb_max_try--; -+ } -+ } -+ -+ return NULL; -+} -+ -+/*************************************************************************/ /*! -+ * @Function _FreeMultiBaseArray -+ * -+ * @Description Given an array (Could be complete or partial reference) -+ * free the region given as the array and size. This function -+ * should be used only when it is known that multiple Real -+ * bases will be freed from the array. -+ * -+ * @Input pArena - The RA Arena to free the bases on. -+ * @Input aBaseArray - The Base array to free from -+ * @Input uiBaseArraySize - The Size of the base array to free. -+ * -+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code otherwise. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+_FreeMultiBaseArray(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize) -+{ -+ IMG_UINT32 i; -+ for (i = 0; i < uiBaseArraySize; i++) -+ { -+ if (RA_BASE_IS_REAL(aBaseArray[i])) -+ { -+ BT *pBT; -+ pBT = (BT *) HASH_Remove_Extended(pArena->pSegmentHash, &aBaseArray[i]); -+ -+ if (pBT) -+ { -+ pArena->ui64FreeArenaSize += pBT->uSize; -+ -+ PVR_ASSERT(pBT->base == aBaseArray[i]); -+ _FreeBT(pArena, pBT); -+ aBaseArray[i] = INVALID_BASE_ADDR; -+ } -+ else -+ { -+ /* Did we attempt to remove a ghost page? -+ * Essentially the base was marked real but was actually a ghost. -+ */ -+ PVR_ASSERT(!"Attempt to free non-existing real base!"); -+ return PVRSRV_ERROR_INVALID_REQUEST; -+ } -+ } -+#if defined(DEBUG) -+ else -+ { -+ aBaseArray[i] = INVALID_BASE_ADDR; -+ } -+#endif -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+ * @Function _FreeSingleBaseArray -+ * -+ * @Description Given an array (Could be complete or partial reference) -+ * free the region given as the array and size. This function -+ * should be used only when it is known that a single Real -+ * base will be freed from the array. All Bases will be -+ * sanitised after the real has been freed. -+ * -+ * @Input pArena - The RA Arena to free the bases on. -+ * @Input aBaseArray - The Base array to free from, entry 0 should be a -+ * Real base -+ * @Input uiBaseArraySize - The Size of the base array to free. -+ * -+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code otherwise. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+_FreeSingleBaseArray(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize) -+{ -+ BT *pBT; -+ PVR_ASSERT(RA_BASE_IS_REAL(aBaseArray[0])); -+ -+ pBT = (BT *) HASH_Remove_Extended(pArena->pSegmentHash, &aBaseArray[0]); -+ -+ if (pBT) -+ { -+ pArena->ui64FreeArenaSize += pBT->uSize; -+ -+ PVR_ASSERT(pBT->base == aBaseArray[0]); -+ _FreeBT(pArena, pBT); -+ } -+ else -+ { -+ /* Did we attempt to remove a ghost page? -+ * Essentially the base was marked real but was actually ghost. -+ */ -+ PVR_ASSERT(!"Attempt to free non-existing real base!"); -+ return PVRSRV_ERROR_INVALID_REQUEST; -+ } -+ -+ /* Set all entries to INVALID_BASE_ADDR */ -+ OSCachedMemSet(aBaseArray, 0xFF, uiBaseArraySize * sizeof(RA_BASE_T)); -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+ * @Function _GenerateGhostBases -+ * -+ * @Description Given an array (Could be complete or partial reference) -+ * generate Ghost bases for the allocation and size. -+ * -+ * @Input uiBase - The Real base to generate Ghost Bases from. -+ * @Input uiBaseSize - The size of the Real Base -+ * @Input uiChunkSize - The Base chunk size used to generate Ghost -+ * bases on specific boundaries. -+ * @Input aBaseArray - The array to add the Ghost bases to. -+ * -+ * @Return array index of element past last Ghost base of given array. -+*/ /**************************************************************************/ -+static IMG_UINT32 -+_GenerateGhostBases(RA_BASE_T uiRealBase, -+ RA_LENGTH_T uiBaseSize, -+ RA_LENGTH_T uiChunkSize, -+ RA_BASE_ARRAY_T aBaseArray) -+{ -+ IMG_UINT32 ui32Index = 0; -+ RA_LENGTH_T uiRemaining = uiBaseSize - uiChunkSize; -+ RA_LENGTH_T uiCurrentBase = uiRealBase + uiChunkSize; -+ aBaseArray[ui32Index] = uiRealBase; -+ -+ for (ui32Index = 1; uiRemaining != 0; ui32Index++) -+ { -+ aBaseArray[ui32Index] = RA_BASE_SET_GHOST_BIT(uiCurrentBase); -+ uiCurrentBase += uiChunkSize; -+ uiRemaining -= uiChunkSize; -+ } -+ -+ return ui32Index; -+} -+ -+/*************************************************************************/ /*! -+ * @Function _FindRealBaseFromGhost -+ * -+ * @Description Given an array and an index into that array for the Ghost Base -+ * find the Real Base hosting the Ghost base in the RA. -+ * @Input aBaseArray - The array the Ghost and Real base reside on. -+ * @Input ui32GhostBaseIndex - The index into the given array for the Ghost Base. -+ * @Output pRealBase - The Real Base hosting the Ghost base. -+ * @Output pui32RealBaseIndex - The index of the Real Base found in the array. -+ * -+ * @Return None. -+*/ /**************************************************************************/ -+static void -+_FindRealBaseFromGhost(RA_BASE_ARRAY_T aBaseArray, -+ IMG_UINT32 ui32GhostBaseIndex, -+ RA_BASE_T *pRealBase, -+ IMG_UINT32 *pui32RealBaseIndex) -+{ -+ IMG_UINT32 ui32Index = ui32GhostBaseIndex; -+ -+ PVR_ASSERT(RA_BASE_IS_GHOST(aBaseArray[ui32GhostBaseIndex])); -+ -+ while (ui32Index != 0 && -+ RA_BASE_IS_GHOST(aBaseArray[ui32Index])) -+ { -+ ui32Index--; -+ } -+ -+ *pRealBase = aBaseArray[ui32Index]; -+ *pui32RealBaseIndex = ui32Index; -+} -+ -+/*************************************************************************/ /*! -+ * @Function _ConvertGhostBaseToReal -+ * -+ * @Description Convert the given Ghost Base to a Real Base in the -+ * RA. This is mainly used in free paths so we can be -+ * agile with memory regions. -+ * @Input pArena - The RA Arena to convert the base on. -+ * @Input aBaseArray - The Base array to convert the base on. -+ * @Input uiRealBase - The Base hosting the Ghost base to convert. -+ * @Input ui32RealBaseArrayIndex - The index in the array of the Real Base. -+ * @Input ui32GhostBaseArrayIndex - The index in the array of the Ghost Base. -+ * @Input uiChunkSize - The chunk size used to generate the Ghost bases on. -+ * -+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+_ConvertGhostBaseToReal(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_T uiRealBase, -+ IMG_UINT32 ui32RealBaseArrayIndex, -+ IMG_UINT32 ui32GhostBaseArrayIndex, -+ RA_LENGTH_T uiChunkSize) -+{ -+ BT *pOrigRealBT; -+ BT *pNewRealBT; -+ -+ pOrigRealBT = (BT *) HASH_Retrieve_Extended(pArena->pSegmentHash, &uiRealBase); -+ pNewRealBT = _SegmentSplit(pOrigRealBT, -+ uiChunkSize * -+ (ui32GhostBaseArrayIndex - ui32RealBaseArrayIndex)); -+ PVR_LOG_RETURN_IF_FALSE(pNewRealBT != NULL, -+ "Unable to split BT, no memory available to allocate new BT", -+ PVRSRV_ERROR_OUT_OF_MEMORY); -+ -+ if (!HASH_Insert_Extended(pArena->pSegmentHash, &pNewRealBT->base, (uintptr_t) pNewRealBT)) -+ { -+ PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE, "HASH_Insert_Extended"); -+ } -+ -+ aBaseArray[ui32GhostBaseArrayIndex] = pNewRealBT->base; -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+ * @Function _FreeGhostBasesFromReal -+ * -+ * @Description Given a ghost base and size, free the contiguous ghost bases from the -+ * real base. This has the effect of shrinking the size of the real base. -+ * If ghost pages remain after the free region, a new Real base will be -+ * created to host them. -+ * @Input pArena - The RA Arena to free the Ghost Bases from. -+ * @Input aBaseArray - The array to remove bases from -+ * @Input uiBaseArraySize - The size of the Base array to free from. -+ * @Input uiChunkSize - The chunk size used to generate the Ghost Bases. -+ * @Input ui32GhostBaseIndex - The index into the array of the initial Ghost base to free -+ * @Input ui32FreeCount - The number of Ghost bases to free from the Real base. -+ * -+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+_FreeGhostBasesFromReal(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ RA_LENGTH_T uiChunkSize, -+ IMG_UINT32 ui32GhostBaseIndex, -+ IMG_UINT32 ui32FreeCount) -+{ -+ PVRSRV_ERROR eError; -+ RA_BASE_T uiRealBase; -+ IMG_UINT32 ui32RealBaseIndex; -+ IMG_UINT32 ui32FreeEndIndex; -+ -+ _FindRealBaseFromGhost(aBaseArray, -+ ui32GhostBaseIndex, -+ &uiRealBase, -+ &ui32RealBaseIndex); -+ -+ /* Make the first Ghost Base to free, real. */ -+ eError = _ConvertGhostBaseToReal(pArena, -+ aBaseArray, -+ uiRealBase, -+ ui32RealBaseIndex, -+ ui32GhostBaseIndex, -+ uiChunkSize); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); -+ -+ /* Calculate the Base after the last to free. */ -+ ui32FreeEndIndex = ui32GhostBaseIndex + ui32FreeCount; -+ -+ /* -+ * If the end of the free region is a Ghost base then we need to -+ * make it a real base so that we can free the intended middle region. -+ */ -+ if (ui32FreeEndIndex != uiBaseArraySize && -+ RA_BASE_IS_GHOST(aBaseArray[ui32FreeEndIndex])) -+ { -+ eError = _ConvertGhostBaseToReal(pArena, -+ aBaseArray, -+ aBaseArray[ui32GhostBaseIndex], -+ ui32GhostBaseIndex, -+ ui32FreeEndIndex, -+ uiChunkSize); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); -+ } -+ -+ /* Free the region calculated */ -+ eError = _FreeSingleBaseArray(pArena, -+ &aBaseArray[ui32GhostBaseIndex], -+ ui32FreeCount); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); -+ -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+ * @Function _ConvertGhostBaseFreeReal -+ * -+ * @Description Used in the case that we want to keep some indices that are ghost pages -+ * but the indices to free start with the real base. In this case we can -+ * convert the keep point to a real base, then free the original real base -+ * along with all ghost bases prior to the new real. -+ * -+ * @Input pArena - The RA Arena to free the bases from. -+ * @Input aBaseArray - The Base array to free from. -+ * @Input uiChunkSize - The chunk size used to generate the Ghost bases. -+ * @Input uiGhostBaseIndex - The index into the array of the Ghost base to convert. -+ * -+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+_ConvertGhostBaseFreeReal(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_LENGTH_T uiChunkSize, -+ IMG_UINT32 uiRealBaseIndex, -+ IMG_UINT32 uiGhostBaseIndex) -+{ -+ PVRSRV_ERROR eError; -+ RA_BASE_T uiRealBase = aBaseArray[uiRealBaseIndex]; -+ -+ eError = _ConvertGhostBaseToReal(pArena, -+ aBaseArray, -+ uiRealBase, -+ uiRealBaseIndex, -+ uiGhostBaseIndex, -+ uiChunkSize); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); -+ -+ eError = _FreeSingleBaseArray(pArena, -+ &aBaseArray[uiRealBaseIndex], -+ uiGhostBaseIndex - uiRealBaseIndex); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArray"); -+ -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+ * @Function _FreeBaseArraySlice -+ * -+ * @Description Free Bases in an Array Slice. -+ * This function assumes that the slice is within a single Real base alloc. -+ * i.e the uiFreeStartIndex and uiFreeCount remain fully within a single real -+ * base alloc and do not cross into another Real base region. -+ * -+ * @Input pArena - The RA Arena to free bases from. -+ * @Input aBaseArray - The Base array to free from. -+ * @Input uiBaseArraySize - The size of the Base array to free from. -+ * @Input uiChunkSize - The base chunk size used to generate the Ghost bases. -+ * @Input uiFreeStartIndex - The index in the array to start freeing from -+ * @Input uiFreeCount - The number of bases to free. -+ * -+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+_FreeBaseArraySlice(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ RA_LENGTH_T uiChunkSize, -+ IMG_UINT32 uiFreeStartIndex, -+ IMG_UINT32 uiFreeCount) -+{ -+ /*3 cases: -+ * Key: () = Region to Free -+ * [R] = Newly Real -+ * R = Real Base -+ * G = Ghost Base -+ * 1. We free the whole Realbase (inc all Ghost bases) -+ * e.g. (RGGGGG) -+ * e.g. RGGG(R)RGG -+ * 2 .We free the Real base but not all the Ghost bases meaning the first -+ * ghost base after the last freed will become a real base. -+ * e.g. RGGGG(RGGGG)[R]GGG -+ * e.g. (RGGGG)[R]GGGG -+ * 3. We free some ghost bases from the real base -+ * e.g. RGGG(GGG) -+ * e.g. RGGG(GGG)[R]GGG -+ * -+ * Invalid Scenarios: -+ * 1. RGG(GR)GGGRG -+ * 2. RGG(GRG)GGRG -+ * Higher levels should prevent these situations by ensuring that the free -+ * index and count always focus on a single real base. -+ * Scenario 1 & 2, correctly handled, would be a case 3. followed by a case 2. -+ */ -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_FALSE(uiBaseArraySize >= uiFreeStartIndex && -+ uiBaseArraySize >= uiFreeStartIndex + (uiFreeCount - 1), -+ "Free Index given out of array bounds", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ /* Find which case we have */ -+ -+ /* Case 1 or 2 */ -+ if (RA_BASE_IS_REAL(aBaseArray[uiFreeStartIndex])) -+ { -+ /* Case 1 */ -+ if (uiFreeStartIndex + uiFreeCount == uiBaseArraySize || -+ RA_BASE_IS_REAL(aBaseArray[uiFreeStartIndex + uiFreeCount]) || -+ RA_BASE_IS_INVALID(aBaseArray[uiFreeStartIndex + uiFreeCount])) -+ { -+ eError = _FreeSingleBaseArray(pArena, -+ &aBaseArray[uiFreeStartIndex], -+ uiFreeCount); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArray"); -+ } -+ /* Case 2*/ -+ else -+ { -+ eError = _ConvertGhostBaseFreeReal(pArena, -+ aBaseArray, -+ uiChunkSize, -+ uiFreeStartIndex, -+ uiFreeStartIndex + uiFreeCount); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); -+ } -+ } -+ /* Case 3 */ -+ else if (RA_BASE_IS_GHOST(aBaseArray[uiFreeStartIndex])) -+ { -+ eError = _FreeGhostBasesFromReal(pArena, -+ aBaseArray, -+ uiBaseArraySize, -+ uiChunkSize, -+ uiFreeStartIndex, -+ uiFreeCount); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_FreeGhostBasesFromReal"); -+ } -+ /* Attempt to free an invalid base, this could be a duplicated -+ * value in the free sparse index array */ -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Attempt to free already free base Index %u", uiFreeStartIndex)); -+ PVR_ASSERT(!"Attempted double free.") -+ return PVRSRV_ERROR_RA_FREE_INVALID_CHUNK; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function _AllocAlignSplit -+@Description Given a valid BT, trim the start and end of the BT according -+ to alignment and size requirements. Also add the resulting -+ BT to the live hash table. -+@Input pArena The arena. -+@Input pBT The BT to trim and add to live hash table -+@Input uSize The requested allocation size. -+@Input uAlignment The alignment requirements of the allocation -+ Required uAlignment, or 0. -+ Must be a power of 2 if not 0 -+@Output pBase Allocated, corrected, resource base -+ (non-optional, must not be NULL) -+@Output phPriv The user references associated with -+ the imported segment. (optional) -+@Return IMG_FALSE failure -+ IMG_TRUE success -+*/ /**************************************************************************/ -+static IMG_BOOL -+_AllocAlignSplit(RA_ARENA *pArena, -+ BT *pBT, -+ RA_LENGTH_T uSize, -+ RA_LENGTH_T uAlignment, -+ RA_BASE_T *pBase, -+ RA_PERISPAN_HANDLE *phPriv) -+{ -+ RA_BASE_T aligned_base; -+ -+ aligned_base = (uAlignment > 1) ? PVR_ALIGN(pBT->base, uAlignment) : pBT->base; -+ -+ _FreeListRemove(pArena, pBT); -+ -+ if ((pArena->ui32PolicyFlags & RA_POLICY_NO_SPLIT_MASK) == RA_POLICY_NO_SPLIT) -+ { -+ goto nosplit; -+ } -+ -+ /* with uAlignment we might need to discard the front of this segment */ -+ if (aligned_base > pBT->base) -+ { -+ BT *pNeighbour; -+ pNeighbour = _SegmentSplit(pBT, (RA_LENGTH_T)(aligned_base - pBT->base)); -+ /* partition the buffer, create a new boundary tag */ -+ if (pNeighbour == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Front split failed", __func__)); -+ /* Put pBT back in the list */ -+ _FreeListInsert(pArena, pBT); -+ return IMG_FALSE; -+ } -+ -+ _FreeListInsert(pArena, pBT); -+ pBT = pNeighbour; -+ } -+ -+ /* the segment might be too big, if so, discard the back of the segment */ -+ if (pBT->uSize > uSize) -+ { -+ BT *pNeighbour; -+ pNeighbour = _SegmentSplit(pBT, uSize); -+ /* partition the buffer, create a new boundary tag */ -+ if (pNeighbour == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Back split failed", __func__)); -+ /* Put pBT back in the list */ -+ _FreeListInsert(pArena, pBT); -+ return IMG_FALSE; -+ } -+ -+ _FreeListInsert(pArena, pNeighbour); -+ } -+nosplit: -+ pBT->type = btt_live; -+ -+ if (!HASH_Insert_Extended(pArena->pSegmentHash, &aligned_base, (uintptr_t)pBT)) -+ { -+ _FreeBT(pArena, pBT); -+ return IMG_FALSE; -+ } -+ -+ if (phPriv != NULL) -+ *phPriv = pBT->hPriv; -+ -+ *pBase = aligned_base; -+ -+ return IMG_TRUE; -+} -+ -+/*************************************************************************/ /*! -+@Function _AttemptAllocAligned -+@Description Attempt an allocation from an arena. -+@Input pArena The arena. -+@Input uSize The requested allocation size. -+@Input uFlags Allocation flags -+@Output phPriv The user references associated with -+ the imported segment. (optional) -+@Input uAlignment Required uAlignment, or 0. -+ Must be a power of 2 if not 0 -+@Output base Allocated resource base (non-optional, must not -+ be NULL) -+@Return IMG_FALSE failure -+ IMG_TRUE success -+*/ /**************************************************************************/ -+static IMG_BOOL -+_AttemptAllocAligned(RA_ARENA *pArena, -+ RA_LENGTH_T uSize, -+ RA_FLAGS_T uFlags, -+ RA_LENGTH_T uAlignment, -+ RA_BASE_T *base, -+ RA_PERISPAN_HANDLE *phPriv) /* this is the "per-import" private data */ -+{ -+ -+ IMG_UINT32 index_low; -+ IMG_UINT32 index_high; -+ IMG_UINT32 i; -+ struct _BT_ *pBT = NULL; -+ -+ PVR_ASSERT(pArena!=NULL); -+ PVR_ASSERT(base != NULL); -+ -+ pArena->per_flags_buckets = PVRSRVSplay(uFlags, pArena->per_flags_buckets); -+ if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->uiFlags != uFlags)) -+ { -+ /* no chunks with these flags. */ -+ return IMG_FALSE; -+ } -+ -+ index_low = pvr_log2(uSize); -+ if (uAlignment) -+ { -+ index_high = pvr_log2(uSize + uAlignment - 1); -+ } -+ else -+ { -+ index_high = index_low; -+ } -+ -+ _FreeTableLimitBoundsCheck(&index_high); -+ _FreeTableLimitBoundsCheck(&index_low); -+ -+ PVR_ASSERT(index_low <= index_high); -+ -+ if (unlikely((pArena->ui32PolicyFlags & RA_POLICY_BUCKET_MASK) == RA_POLICY_BUCKET_BEST_FIT)) -+ { -+ /* This policy ensures the selection of the first lowest size bucket that -+ * satisfies the request size is selected */ -+#if defined(PVR_CTZLL) -+ i = PVR_CTZLL((~(((IMG_ELTS_MAPPINGS)1 << (index_low )) - 1)) & pArena->per_flags_buckets->bHasEltsMapping); -+#else -+ i = index_low; -+#endif -+ for ( ; (i < FREE_TABLE_LIMIT) && (pBT == NULL); ++i) -+ { -+ if (pArena->per_flags_buckets->buckets[i]) -+ { -+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0); -+ } -+ } -+ } -+ else -+ { -+#if defined(PVR_CTZLL) -+ i = PVR_CTZLL((~(((IMG_ELTS_MAPPINGS)1 << (index_high + 1)) - 1)) & pArena->per_flags_buckets->bHasEltsMapping); -+#else -+ for (i = index_high + 1; (i < FREE_TABLE_LIMIT) && (pArena->per_flags_buckets->buckets[i] == NULL); ++i) -+ { -+ } -+#endif -+ PVR_ASSERT(i <= FREE_TABLE_LIMIT); -+ -+ if (i != FREE_TABLE_LIMIT) -+ { -+ /* since we start at index_high + 1, we are guaranteed to exit */ -+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, 1); -+ } -+ else -+ { -+ for (i = index_high; (i != index_low - 1) && (pBT == NULL); --i) -+ { -+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0); -+ } -+ } -+ } -+ -+ if (pBT == NULL) -+ { -+ return IMG_FALSE; -+ } -+ -+ return _AllocAlignSplit(pArena, pBT, uSize, uAlignment, base, phPriv); -+} -+ -+/*************************************************************************/ /*! -+@Function _AttemptAllocAlignedAssured -+@Description Attempt an allocation from an arena. If the arena allows -+ non-contiguous allocations, the allocation is guaranteed -+ given there is enough memory to satisfy the full allocation. -+@Input pArena The arena. -+@Input uSize The requested allocation size. -+@Input uLog2MinContigSize The Log2 minimum contiguity of the bases returned. -+@Input uFlags Allocation flags -+@Input uAlignment Required uAlignment, or 0. -+ Must be a power of 2 if not 0 -+@Input aBaseArray Array to allocate bases to. -+@Input bSparseAlloc Is the allocation we are making sparse. -+@Output bPhysContig Is the allocation we made physically contiguous -+ or did we use the scoop logic -+@Return Success: PVRSRV_OK -+ Fail: PVRSRV_ERROR code. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+_AttemptAllocAlignedAssured(RA_ARENA *pArena, -+ RA_LENGTH_T uSize, -+ IMG_UINT32 uLog2MinContigSize, -+ RA_FLAGS_T uFlags, -+ RA_LENGTH_T uAlignment, -+ RA_BASE_ARRAY_T aBaseArray, -+ IMG_BOOL bSparseAlloc, -+ IMG_BOOL *bPhysContig) -+{ -+ IMG_UINT32 index_low; /* log2 Lowest contiguity required */ -+ IMG_UINT32 index_high; /* log2 Size of full alloc */ -+ IMG_UINT32 i; -+ struct _BT_ *pBT = NULL; -+ RA_PERISPAN_HANDLE phPriv; -+ RA_LENGTH_T uiRemaining = uSize; -+ RA_BASE_T uiBase; -+ IMG_UINT32 uiCurrentArrayIndex = 0; -+ -+ PVR_ASSERT(pArena != NULL); -+ -+ pArena->per_flags_buckets = PVRSRVSplay(uFlags, pArena->per_flags_buckets); -+ if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->uiFlags != uFlags)) -+ { -+ /* no chunks with these flags. */ -+ return PVRSRV_ERROR_RA_NO_RESOURCE_WITH_FLAGS; -+ } -+ -+ if (pArena->ui64FreeArenaSize < uSize) -+ { -+ /* Not enough memory to accommodate kick back for a chance to import more */ -+ return PVRSRV_ERROR_RA_OUT_OF_RESOURCE; -+ } -+ -+ if (uLog2MinContigSize && uAlignment) -+ { -+ index_low = uLog2MinContigSize; -+ index_high = pvr_log2(uSize); -+ } -+ else if (uLog2MinContigSize) -+ { -+ index_low = uLog2MinContigSize; -+ index_high = pvr_log2(uSize); -+ } -+ else if (uAlignment) -+ { -+ index_low = 0; -+ index_high = pvr_log2(uSize + uAlignment - 1); -+ } -+ else -+ { -+ index_low = 0; -+ index_high = pvr_log2(uSize); -+ } -+ -+ PVR_ASSERT(index_low < FREE_TABLE_LIMIT); -+ PVR_ASSERT(index_high < FREE_TABLE_LIMIT); -+ PVR_ASSERT(index_low <= index_high); -+ -+ /* Start at index_high + 1 as then we can check all buckets larger than the desired alloc -+ * If we don't find one larger then we could still find one of requested size in index_high and -+ * shortcut the non-contiguous allocation path. We check index_high + 1 first as it is -+ * guaranteed to have a free region of the requested size if the bucket has entries. Whereas -+ * index_high is not guaranteed to have an allocation that meets the size requested due to it -+ * representing all free regions of size 2^bucket index to 2^bucket index +1. e.g we could have -+ * a request for 19*4k Pages which would be represented by bucket 16, bucket 16 represents free -+ * entries from 16*4k pages to 31*4k Pages in size, if this bucket only had free entries of -+ * 17*4k pages the search would fail, hence not guaranteed at index_high. -+ */ -+#if defined(PVR_CTZLL) -+ i = PVR_CTZLL((~(((IMG_ELTS_MAPPINGS)1 << (index_high + 1)) - 1)) & pArena->per_flags_buckets->bHasEltsMapping); -+#else -+ for (i = index_high + 1; (i < FREE_TABLE_LIMIT) && (pArena->per_flags_buckets->buckets[i] == NULL); i++) -+ { -+ } -+#endif -+ -+ PVR_ASSERT(i <= FREE_TABLE_LIMIT); -+ -+ if (i != FREE_TABLE_LIMIT) -+ { -+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, 1); -+ } -+ else -+ { -+ /* In this case we have searched all buckets index_high + 1 to FREE_TABLE_LIMIT and not found an -+ * available bucket with the required allocation size. -+ * Because we haven't found an allocation of the requested size in index_high + 1 there is still a chance -+ * that we can find an allocation of correct size in index_high, when index_high references the bucket -+ * containing the largest free chunks in the RA Arena. i.e All buckets > index_high == NULL. -+ * We do a final search in that bucket here before we attempt to scoop memory or return NULL. -+ */ -+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[index_high], uSize, uAlignment, 1); -+ } -+ -+ /* We managed to find a contiguous allocation block of sufficient size */ -+ if (pBT != NULL) -+ { -+ IMG_BOOL bResult; -+ bResult = _AllocAlignSplit(pArena, pBT, uSize, uAlignment, &uiBase, &phPriv); -+ if (bResult) -+ { -+ if (!bSparseAlloc) -+ { -+ aBaseArray[0] = uiBase; -+ } -+ else -+ { -+ _GenerateGhostBases(uiBase, uSize, 1ULL << uLog2MinContigSize, aBaseArray); -+ } -+ } -+ else -+ { -+ return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED; -+ } -+ *bPhysContig = IMG_TRUE; -+ -+ return PVRSRV_OK; -+ } -+ -+ /* -+ * If this arena doesn't have the non-contiguous allocation functionality enabled, then -+ * don't attempt to scoop for non physically contiguous allocations. Sparse allocations -+ * are still able to use the scoop functionality as they map in a chunk at a time in the -+ * worst case. -+ */ -+ if (unlikely((pArena->ui32PolicyFlags & RA_POLICY_ALLOC_ALLOW_NONCONTIG_MASK) == 0) && -+ !bSparseAlloc) -+ { -+ return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED; -+ } -+ -+ /* Attempt to Scoop memory from non-contiguous blocks */ -+ for (i = index_high; i >= index_low && uiRemaining != 0; i--) -+ { -+ /* While we have chunks of at least our contig size in the bucket to use */ -+ for ( -+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], 1ULL << uLog2MinContigSize, uAlignment,(unsigned int) ~0); -+ pBT != NULL && uiRemaining != 0; -+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], 1ULL << uLog2MinContigSize, uAlignment,(unsigned int) ~0))//~0 Try all elements in bucket -+ { -+ /* Grab largest chunk possible that is a multiple of our min contiguity size -+ * N.B: C always rounds towards 0 so this effectively floors for us */ -+ IMG_BOOL bResult; -+ RA_BASE_T uiAlignedBase = -+ (uAlignment > 1) ? PVR_ALIGN(pBT->base, uAlignment) : pBT->base; -+ RA_LENGTH_T uiMaxSizeAvailable = (pBT->uSize - (uiAlignedBase - pBT->base)); -+ RA_LENGTH_T uiMaxMultipleOfContig = (uiMaxSizeAvailable >> uLog2MinContigSize) << uLog2MinContigSize; -+ -+ /* -+ * If the size of the BT is larger than the remaining memory to allocate -+ * then just allocate what we need. The rest will be trimmed and put back -+ * into the pool in _AllocAlignSplit -+ */ -+ if (uiMaxMultipleOfContig > uiRemaining) -+ { -+ uiMaxMultipleOfContig = uiRemaining; -+ } -+ -+ bResult = _AllocAlignSplit(pArena, pBT, uiMaxMultipleOfContig, uAlignment, &uiBase, &phPriv); -+ if (!bResult) -+ { -+ /* Something went wrong with splitting or adding to hash, -+ * We can try find another chunk, although this should -+ * never occur. -+ */ -+ PVR_ASSERT(!"_AllocAlignSplit issue."); -+ continue; -+ } -+ -+ uiRemaining -= uiMaxMultipleOfContig; -+ -+ uiCurrentArrayIndex += _GenerateGhostBases(uiBase, -+ uiMaxMultipleOfContig, -+ 1ULL << uLog2MinContigSize, -+ &aBaseArray[uiCurrentArrayIndex]); -+ } -+ } -+ -+ /* If we didn't manage to scoop enough memory then we need to unwind the allocations we just made */ -+ if (uiRemaining != 0) -+ { -+ goto error_unwind; -+ } -+ *bPhysContig = IMG_FALSE; -+ -+ return PVRSRV_OK; -+ -+error_unwind: -+ _RA_FreeMultiUnlocked(pArena, -+ aBaseArray, -+ uiCurrentArrayIndex); -+ return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED; -+} -+ -+/*************************************************************************/ /*! -+@Function _AttemptImportSpanAlloc -+@Description Attempt to Import more memory and create a new span. -+ Function attempts to import more memory from the callback -+ provided at RA creation time, if successful the memory -+ will form a new span in the RA. -+@Input pArena The arena. -+@Input uRequestSize The requested allocation size. -+@Input uImportMultiplier Import x-times more for future requests if -+ we have to import new memory. -+@Input uImportFlags Flags influencing allocation policy. -+@Input uAlignment The alignment requirements of the allocation -+ Required uAlignment, or 0. -+ Must be a power of 2 if not 0 -+@Input pszAnnotation String to describe the allocation -+@Output pImportBase Allocated import base -+ (non-optional, must not be NULL) -+@Output pImportSize Allocated import size -+@Output pImportBT Allocated import BT -+@Return PVRSRV_OK - success -+*/ /**************************************************************************/ -+static PVRSRV_ERROR -+_AttemptImportSpanAlloc(RA_ARENA *pArena, -+ RA_LENGTH_T uRequestSize, -+ IMG_UINT8 uImportMultiplier, -+ RA_FLAGS_T uImportFlags, -+ RA_LENGTH_T uAlignment, -+ const IMG_CHAR *pszAnnotation, -+ RA_BASE_T *pImportBase, -+ RA_LENGTH_T *pImportSize, -+ BT **pImportBT) -+{ -+ IMG_HANDLE hPriv; -+ RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK); -+ BT *pBT; -+ PVRSRV_ERROR eError; -+ -+ *pImportSize = uRequestSize; -+ -+ /* apply over-allocation multiplier after all alignment adjustments */ -+ *pImportSize *= uImportMultiplier; -+ -+ /* ensure that we import according to the quanta of this arena */ -+ *pImportSize = PVR_ALIGN(*pImportSize, pArena->uQuantum); -+ -+ eError = pArena->pImportAlloc(pArena->pImportHandle, -+ *pImportSize, uImportFlags, -+ uAlignment, -+ pszAnnotation, -+ pImportBase, pImportSize, -+ &hPriv); -+ if (PVRSRV_OK != eError) -+ { -+ return eError; -+ } -+ -+ /* If we successfully import more resource, create a span to -+ * represent it else free the resource we imported. -+ */ -+ pBT = _InsertResourceSpan(pArena, *pImportBase, *pImportSize, uFlags); -+ if (pBT == NULL) -+ { -+ /* insufficient resources to insert the newly acquired span, -+ so free it back again */ -+ pArena->pImportFree(pArena->pImportHandle, *pImportBase, hPriv); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', " -+ "size=0x%llx failed!", __func__, pArena->name, -+ (unsigned long long)uRequestSize)); -+ /* RA_Dump (arena); */ -+ -+ return PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED; -+ } -+ -+ pBT->hPriv = hPriv; -+ *pImportBT = pBT; -+ -+ return eError; -+} -+ -+IMG_INTERNAL RA_ARENA * -+RA_Create(IMG_CHAR *name, -+ RA_LOG2QUANTUM_T uLog2Quantum, -+ IMG_UINT32 ui32LockClass, -+ PFN_RA_ALLOC imp_alloc, -+ PFN_RA_FREE imp_free, -+ RA_PERARENA_HANDLE arena_handle, -+ RA_POLICY_T ui32PolicyFlags) -+{ -+ RA_ARENA *pArena; -+ PVRSRV_ERROR eError; -+ -+ if (name == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter 'name' (NULL not accepted)", __func__)); -+ return NULL; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s'", __func__, name)); -+ -+ pArena = OSAllocMem(sizeof(*pArena)); -+ if (pArena == NULL) -+ { -+ goto arena_fail; -+ } -+ -+ eError = OSLockCreate(&pArena->hLock); -+ if (eError != PVRSRV_OK) -+ { -+ goto lock_fail; -+ } -+ -+ pArena->pSegmentHash = HASH_Create_Extended(MINIMUM_HASH_SIZE, sizeof(RA_BASE_T), HASH_Func_Default, HASH_Key_Comp_Default); -+ -+ if (pArena->pSegmentHash==NULL) -+ { -+ goto hash_fail; -+ } -+ -+ OSStringLCopy(pArena->name, name, RA_MAX_NAME_LENGTH); -+ pArena->pImportAlloc = (imp_alloc!=NULL) ? imp_alloc : &_RequestAllocFail; -+ pArena->pImportFree = imp_free; -+ pArena->pImportHandle = arena_handle; -+ pArena->pHeadSegment = NULL; -+ pArena->uQuantum = 1ULL << uLog2Quantum; -+ pArena->per_flags_buckets = NULL; -+ pArena->ui32LockClass = ui32LockClass; -+ pArena->ui32PolicyFlags = ui32PolicyFlags; -+ pArena->ui64TotalArenaSize = 0; -+ pArena->ui64FreeArenaSize = 0; -+ -+ PVR_ASSERT(is_arena_valid(pArena)); -+ return pArena; -+ -+hash_fail: -+ OSLockDestroy(pArena->hLock); -+lock_fail: -+ OSFreeMem(pArena); -+ /* not nulling pointer, out of scope */ -+arena_fail: -+ return NULL; -+} -+ -+static void _LogRegionCreation(const char *pszMemType, -+ IMG_UINT64 ui64CpuPA, -+ IMG_UINT64 ui64DevPA, -+ IMG_UINT64 ui64Size) -+{ -+#if !defined(DEBUG) -+ PVR_UNREFERENCED_PARAMETER(pszMemType); -+ PVR_UNREFERENCED_PARAMETER(ui64CpuPA); -+ PVR_UNREFERENCED_PARAMETER(ui64DevPA); -+ PVR_UNREFERENCED_PARAMETER(ui64Size); -+#else -+ if ((ui64CpuPA != 0) && (ui64DevPA != 0) && (ui64CpuPA != ui64DevPA)) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "Creating RA for \"%s\" memory" -+ " - Cpu PA 0x%016" IMG_UINT64_FMTSPECx "-0x%016" IMG_UINT64_FMTSPECx -+ " - Dev PA 0x%016" IMG_UINT64_FMTSPECx "-0x%016" IMG_UINT64_FMTSPECx, -+ pszMemType, -+ ui64CpuPA, ui64CpuPA + ui64Size, -+ ui64DevPA, ui64DevPA + ui64Size)); -+ } -+ else -+ { -+ __maybe_unused IMG_UINT64 ui64PA = -+ ui64CpuPA != 0 ? ui64CpuPA : ui64DevPA; -+ __maybe_unused const IMG_CHAR *pszAddrType = -+ ui64CpuPA == ui64DevPA ? "Cpu/Dev" : (ui64CpuPA != 0 ? "Cpu" : "Dev"); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "Creating RA for \"%s\" memory - %s PA 0x%016" -+ IMG_UINT64_FMTSPECx "-0x%016" IMG_UINT64_FMTSPECx, -+ pszMemType, pszAddrType, -+ ui64PA, ui64PA + ui64Size)); -+ } -+#endif -+} -+ -+IMG_INTERNAL RA_ARENA * -+RA_Create_With_Span(IMG_CHAR *name, -+ RA_LOG2QUANTUM_T uLog2Quantum, -+ IMG_UINT64 ui64CpuBase, -+ IMG_UINT64 ui64SpanDevBase, -+ IMG_UINT64 ui64SpanSize, -+ RA_POLICY_T ui32PolicyFlags) -+{ -+ RA_ARENA *psRA; -+ IMG_BOOL bSuccess; -+ -+ psRA = RA_Create(name, -+ uLog2Quantum, /* Use OS page size, keeps things simple */ -+ RA_LOCKCLASS_0, /* This arena doesn't use any other arenas. */ -+ NULL, /* No Import */ -+ NULL, /* No free import */ -+ NULL, /* No import handle */ -+ ui32PolicyFlags); /* No restriction on import splitting */ -+ PVR_LOG_GOTO_IF_FALSE(psRA != NULL, "RA_Create() failed", return_); -+ -+ bSuccess = RA_Add(psRA, (RA_BASE_T) ui64SpanDevBase, (RA_LENGTH_T) ui64SpanSize, 0, NULL); -+ PVR_LOG_GOTO_IF_FALSE(bSuccess, "RA_Add() failed", cleanup_); -+ -+ _LogRegionCreation(name, ui64CpuBase, ui64SpanDevBase, ui64SpanSize); -+ -+ return psRA; -+ -+cleanup_: -+ RA_Delete(psRA); -+return_: -+ return NULL; -+} -+ -+IMG_INTERNAL void -+RA_Delete(RA_ARENA *pArena) -+{ -+ IMG_UINT32 uIndex; -+ IMG_BOOL bWarn = IMG_TRUE; -+ -+ PVR_ASSERT(pArena != NULL); -+ -+ if (pArena == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); -+ return; -+ } -+ -+ PVR_ASSERT(is_arena_valid(pArena)); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: name='%s'", __func__, pArena->name)); -+ -+ while (pArena->pHeadSegment != NULL) -+ { -+ BT *pBT = pArena->pHeadSegment; -+ -+ if (pBT->type != btt_free) -+ { -+ if (bWarn) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Allocations still exist in the arena that is being destroyed", __func__)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: base = 0x%llx size=0x%llx", __func__, -+ (unsigned long long)pBT->base, (unsigned long long)pBT->uSize)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: This warning will be issued only once for the first allocation found!", __func__)); -+ bWarn = IMG_FALSE; -+ } -+ } -+ else -+ { -+ _FreeListRemove(pArena, pBT); -+ } -+ -+ _SegmentListRemove(pArena, pBT); -+ OSFreeMem(pBT); -+ /* not nulling original pointer, it has changed */ -+ } -+ -+ while (pArena->per_flags_buckets != NULL) -+ { -+ for (uIndex=0; uIndexper_flags_buckets->buckets[uIndex] == NULL); -+ } -+ -+ pArena->per_flags_buckets = PVRSRVDelete(pArena->per_flags_buckets->uiFlags, pArena->per_flags_buckets); -+ } -+ -+ HASH_Delete(pArena->pSegmentHash); -+ OSLockDestroy(pArena->hLock); -+ OSFreeMem(pArena); -+ /* not nulling pointer, copy on stack */ -+} -+ -+IMG_INTERNAL IMG_BOOL -+RA_Add(RA_ARENA *pArena, -+ RA_BASE_T base, -+ RA_LENGTH_T uSize, -+ RA_FLAGS_T uFlags, -+ RA_PERISPAN_HANDLE hPriv) -+{ -+ struct _BT_* bt; -+ PVR_ASSERT(pArena != NULL); -+ PVR_ASSERT(uSize != 0); -+ -+ if (pArena == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); -+ return IMG_FALSE; -+ } -+ -+ if (uSize == 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid size 0 added to arena %s", __func__, pArena->name)); -+ return IMG_FALSE; -+ } -+ -+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); -+ PVR_ASSERT(is_arena_valid(pArena)); -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', " -+ "base=0x%llx, size=0x%llx", __func__, pArena->name, -+ (unsigned long long)base, (unsigned long long)uSize)); -+ -+ uSize = PVR_ALIGN(uSize, pArena->uQuantum); -+ bt = _InsertResource(pArena, base, uSize, uFlags); -+ if (bt != NULL) -+ { -+ bt->hPriv = hPriv; -+ } -+ -+ PVR_ASSERT(is_arena_valid(pArena)); -+ -+ pArena->ui64TotalArenaSize += uSize; -+ pArena->ui64FreeArenaSize += uSize; -+ OSLockRelease(pArena->hLock); -+ -+ return bt != NULL; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+RA_Alloc(RA_ARENA *pArena, -+ RA_LENGTH_T uRequestSize, -+ IMG_UINT8 uImportMultiplier, -+ RA_FLAGS_T uImportFlags, -+ RA_LENGTH_T uAlignment, -+ const IMG_CHAR *pszAnnotation, -+ RA_BASE_T *base, -+ RA_LENGTH_T *pActualSize, -+ RA_PERISPAN_HANDLE *phPriv) -+{ -+ PVRSRV_ERROR eError; -+ IMG_BOOL bResult; -+ RA_LENGTH_T uSize = uRequestSize; -+ RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK); -+ -+ if (pArena == NULL || uImportMultiplier == 0 || uSize == 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: One of the necessary parameters is 0", __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); -+ PVR_ASSERT(is_arena_valid(pArena)); -+ -+ if (pActualSize != NULL) -+ { -+ *pActualSize = uSize; -+ } -+ -+ /* Must be a power of 2 or 0 */ -+ PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: arena='%s', size=0x%llx(0x%llx), " -+ "alignment=0x%llx", __func__, pArena->name, -+ (unsigned long long)uSize, (unsigned long long)uRequestSize, -+ (unsigned long long)uAlignment)); -+ -+ /* if allocation failed then we might have an import source which -+ can provide more resource, else we will have to fail the -+ allocation to the caller. */ -+ bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv); -+ if (!bResult) -+ { -+ RA_BASE_T uImportBase; -+ RA_LENGTH_T uImportSize; -+ BT *pBT = NULL; -+ -+ eError = _AttemptImportSpanAlloc(pArena, -+ uSize, -+ uImportMultiplier, -+ uImportFlags, -+ uAlignment, -+ pszAnnotation, -+ &uImportBase, -+ &uImportSize, -+ &pBT); -+ if (eError != PVRSRV_OK) -+ { -+ OSLockRelease(pArena->hLock); -+ return eError; -+ } -+ -+ bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv); -+ if (!bResult) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: name='%s' second alloc failed!", -+ __func__, pArena->name)); -+ -+ /* -+ On failure of _AttemptAllocAligned() depending on the exact point -+ of failure, the imported segment may have been used and freed, or -+ left untouched. If the later, we need to return it. -+ */ -+ _FreeBT(pArena, pBT); -+ -+ OSLockRelease(pArena->hLock); -+ return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED; -+ } -+ else -+ { -+ /* Check if the new allocation was in the span we just added... */ -+ if (*base < uImportBase || *base > (uImportBase + uImportSize)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: name='%s' alloc did not occur in the imported span!", -+ __func__, pArena->name)); -+ -+ /* -+ Remove the imported span which should not be in use (if it is then -+ that is okay, but essentially no span should exist that is not used). -+ */ -+ _FreeBT(pArena, pBT); -+ } -+ else -+ { -+ pArena->ui64FreeArenaSize += uImportSize; -+ pArena->ui64TotalArenaSize += uImportSize; -+ } -+ } -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', size=0x%llx, " -+ "*base=0x%llx = %d", __func__, pArena->name, (unsigned long long)uSize, -+ (unsigned long long)*base, bResult)); -+ -+ PVR_ASSERT(is_arena_valid(pArena)); -+ -+ pArena->ui64FreeArenaSize -= uSize; -+ -+ OSLockRelease(pArena->hLock); -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+_RA_AllocMultiUnlocked(RA_ARENA *pArena, -+ RA_LENGTH_T uRequestSize, -+ IMG_UINT32 uiLog2ChunkSize, -+ IMG_UINT8 uImportMultiplier, -+ RA_FLAGS_T uImportFlags, -+ const IMG_CHAR *pszAnnotation, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ IMG_BOOL bSparseAlloc, -+ IMG_BOOL *bPhysContig) -+{ -+ PVRSRV_ERROR eError; -+ RA_LENGTH_T uSize = uRequestSize; -+ RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK); -+ -+ PVR_LOG_RETURN_IF_FALSE(pArena != NULL && uImportMultiplier != 0 && uSize != 0, -+ "One of the necessary parameters is 0", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ PVR_ASSERT((uRequestSize & ((1 << uiLog2ChunkSize) - 1)) == 0) -+ PVR_LOG_RETURN_IF_FALSE((uRequestSize & ((1 << uiLog2ChunkSize) - 1)) == 0, -+ "Require uiLog2ChunkSize pow 2 & multiple of uRequestSize", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ /* Enforce these constraints so we can use those bits to handle Ghost bases. */ -+ PVR_LOG_RETURN_IF_FALSE(uiLog2ChunkSize >= RA_BASE_FLAGS_LOG2 && -+ uiLog2ChunkSize <= RA_BASE_CHUNK_LOG2_MAX, -+ "Log2 chunk size must be 12-64", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ /* Ensure Base Array is large enough for intended allocation */ -+ PVR_LOG_RETURN_IF_FALSE(((RA_LENGTH_T)uiBaseArraySize * (1 << uiLog2ChunkSize)) >= uRequestSize, -+ "Not enough array space to store alloc bases", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ PVR_ASSERT(is_arena_valid(pArena)); -+ -+ /* Must be a power of 2 */ -+ PVR_ASSERT((uAlignment & (uAlignment - 1)) == 0); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: arena='%s', size=0x%llx(0x%llx), " -+ "log2ChunkSize=0x%llx", __func__, pArena->name, -+ (unsigned long long)uSize, (unsigned long long)uRequestSize, -+ (unsigned long long)uiLog2ChunkSize)); -+ -+ /* if allocation failed then we might have an import source which -+ can provide more resource, else we will have to fail the -+ allocation to the caller. */ -+ eError = _AttemptAllocAlignedAssured(pArena, -+ uSize, -+ uiLog2ChunkSize, -+ uFlags, -+ 1ULL << uiLog2ChunkSize, -+ aBaseArray, -+ bSparseAlloc, -+ bPhysContig); -+ if (eError) -+ { -+ RA_BASE_T uImportBase; -+ RA_LENGTH_T uImportSize; -+ BT *pBT; -+ -+ if (eError == PVRSRV_ERROR_RA_OUT_OF_RESOURCE) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE,"RA out of resource, attempt to import more if possible:" -+ " uSize:0x%llx" -+ " uFlags:0x%llx", -+ (unsigned long long) uSize, -+ (unsigned long long) uFlags)); -+ } -+ else if (eError == PVRSRV_ERROR_RA_NO_RESOURCE_WITH_FLAGS) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE,"RA no resource for flags, attempt to import some if possible:" -+ " uSize:0x%llx" -+ " uFlags:0x%llx", -+ (unsigned long long) uSize, -+ (unsigned long long) uFlags)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE,"RA Failed to Allocate, could be fragmented, attempt to import" -+ " more resource if possible.")); -+ } -+ -+ eError = _AttemptImportSpanAlloc(pArena, -+ uSize, -+ uImportMultiplier, -+ uFlags, -+ 1ULL << uiLog2ChunkSize, -+ pszAnnotation, -+ &uImportBase, -+ &uImportSize, -+ &pBT); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ pArena->ui64FreeArenaSize += uImportSize; -+ pArena->ui64TotalArenaSize += uImportSize; -+ -+ eError = _AttemptAllocAlignedAssured(pArena, -+ uSize, -+ uiLog2ChunkSize, -+ uFlags, -+ 1Ull << uiLog2ChunkSize, -+ aBaseArray, -+ bSparseAlloc, -+ bPhysContig); -+ if (eError) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: name='%s' second alloc failed!", -+ __func__, pArena->name)); -+ /* -+ On failure of _AttemptAllocAligned() depending on the exact point -+ of failure, the imported segment may have been used and freed, or -+ left untouched. If the later, we need to return it. -+ */ -+ _FreeBT(pArena, pBT); -+ -+ return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED; -+ } -+#if defined(DEBUG) -+ /* -+ * This block of code checks to see if the extra memory we just imported was -+ * used for the second allocation. If we imported memory but did not use it, -+ * it indicates there is a bug in the allocation logic. We can still recover by -+ * freeing the imported span but we emit an error to signal that there is an -+ * issue. -+ * */ -+ else -+ { -+ IMG_UINT32 i; -+ IMG_BOOL bBasesInNewSpan = IMG_FALSE; -+ -+ for (i = 0; i < uiBaseArraySize; i++) -+ { -+ RA_BASE_T uiBase = RA_BASE_STRIP_GHOST_BIT(aBaseArray[i]); -+ -+ /* If the base hasn't been allocated then skip it */ -+ if (aBaseArray[i] == INVALID_BASE_ADDR) -+ { -+ continue; -+ } -+ -+ if (uiBase >= uImportBase && -+ uiBase <= uImportBase + uImportSize) -+ { -+ bBasesInNewSpan = IMG_TRUE; -+ } -+ } -+ -+ if (!bBasesInNewSpan) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: name='%s' alloc did not occur in the imported span!", -+ __func__, pArena->name)); -+ /* -+ Remove the imported span which should not be in use (if it is then -+ that is okay, but essentially no span should exist that is not used). -+ */ -+ _FreeBT(pArena, pBT); -+ -+ pArena->ui64FreeArenaSize -= uImportSize; -+ pArena->ui64TotalArenaSize -= uImportSize; -+ } -+ } -+#endif -+ } -+ -+ PVR_ASSERT(is_arena_valid(pArena)); -+ -+ pArena->ui64FreeArenaSize -= uSize; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+RA_AllocMulti(RA_ARENA *pArena, -+ RA_LENGTH_T uRequestSize, -+ IMG_UINT32 uiLog2ChunkSize, -+ IMG_UINT8 uImportMultiplier, -+ RA_FLAGS_T uImportFlags, -+ const IMG_CHAR *pszAnnotation, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ IMG_BOOL *bPhysContig) -+{ -+ PVRSRV_ERROR eError; -+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); -+ eError = _RA_AllocMultiUnlocked(pArena, -+ uRequestSize, -+ uiLog2ChunkSize, -+ uImportMultiplier, -+ uImportFlags, -+ pszAnnotation, -+ aBaseArray, -+ uiBaseArraySize, -+ IMG_FALSE, /* Sparse alloc */ -+ bPhysContig); -+ OSLockRelease(pArena->hLock); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+RA_AllocMultiSparse(RA_ARENA *pArena, -+ IMG_UINT32 uiLog2ChunkSize, -+ IMG_UINT8 uImportMultiplier, -+ RA_FLAGS_T uImportFlags, -+ const IMG_CHAR *pszAnnotation, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ IMG_UINT32 *puiAllocIndices, -+ IMG_UINT32 uiAllocCount) -+{ -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ IMG_BOOL bPhysContig; -+ -+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); -+ -+ /* -+ * In this case the arguments given show the allocation is -+ * sparse but has no specific indices, this indicates -+ * we want to populate the full aBaseArray -+ */ -+ if (puiAllocIndices == NULL) -+ { -+ RA_LENGTH_T uRequestSize = (RA_LENGTH_T) uiAllocCount << uiLog2ChunkSize; -+ eError = _RA_AllocMultiUnlocked(pArena, -+ uRequestSize, -+ uiLog2ChunkSize, -+ uImportMultiplier, -+ uImportFlags, -+ pszAnnotation, -+ aBaseArray, -+ uiBaseArraySize, -+ IMG_TRUE, /* Sparse alloc */ -+ &bPhysContig); -+ PVR_LOG_IF_ERROR(eError, "RA_AllocMulti"); -+ OSLockRelease(pArena->hLock); -+ return eError; -+ } -+ -+ /* -+ * This case is optimised for single allocations as we can skip -+ * some of the iteration logic in the full allocation path. -+ */ -+ if (uiAllocCount == 1) -+ { -+ eError = _RA_AllocMultiUnlocked(pArena, -+ 1ULL << uiLog2ChunkSize, -+ uiLog2ChunkSize, -+ uImportMultiplier, -+ uImportFlags, -+ pszAnnotation, -+ &aBaseArray[puiAllocIndices[0]], -+ 1, -+ IMG_TRUE, /* Sparse alloc */ -+ &bPhysContig); -+ PVR_LOG_IF_ERROR(eError, "RA_AllocMulti"); -+ OSLockRelease(pArena->hLock); -+ return eError; -+ } -+ -+ /* -+ * By consolidating / grouping the indices given we can perform sparse allocations -+ * in blocks, this has the effect of reducing fragmentation and creating optimal free -+ * scenarios. Free can be performed in blocks rather than a chunk at a time, this reduces -+ * the amount of BT merging cycles we perform. -+ */ -+ for (i = 0; i < uiAllocCount;) -+ { -+ IMG_UINT32 j; -+ IMG_UINT32 uiConsolidate = 1; -+ -+ for (j = i; -+ j + 1 != uiAllocCount && -+ puiAllocIndices[j + 1] == puiAllocIndices[j] + 1; -+ j++) -+ { -+ uiConsolidate++; -+ } -+ -+ eError = _RA_AllocMultiUnlocked(pArena, -+ (IMG_UINT64) uiConsolidate << uiLog2ChunkSize, -+ uiLog2ChunkSize, -+ uImportMultiplier, -+ uImportFlags, -+ pszAnnotation, -+ &aBaseArray[puiAllocIndices[i]], -+ uiConsolidate, -+ IMG_TRUE, /* Sparse alloc */ -+ &bPhysContig); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RA_AllocMulti", unwind_alloc); -+ i += uiConsolidate; -+ } -+ -+ OSLockRelease(pArena->hLock); -+ return PVRSRV_OK; -+ -+unwind_alloc: -+ if (i != 0) -+ { -+ PVRSRV_ERROR eFreeError; -+ eFreeError = _RA_FreeMultiUnlockedSparse(pArena, -+ aBaseArray, -+ uiBaseArraySize, -+ 1ULL << uiLog2ChunkSize, -+ puiAllocIndices, -+ &i); -+ PVR_LOG_IF_ERROR(eFreeError, "_RA_FreeMultiUnlockedSparse"); -+ } -+ -+ OSLockRelease(pArena->hLock); -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function RA_Find_BT_VARange -+@Description To find the boundary tag associated with the given device -+ virtual address. -+@Input pArena The arena -+@input base Allocated base resource -+@Input uRequestSize The size of resource segment requested. -+@Input uImportFlags Flags influencing allocation policy. -+@Return Boundary Tag - success, NULL on failure -+*/ /**************************************************************************/ -+static BT *RA_Find_BT_VARange(RA_ARENA *pArena, -+ RA_BASE_T base, -+ RA_LENGTH_T uRequestSize, -+ RA_FLAGS_T uImportFlags) -+{ -+ IMG_PSPLAY_TREE psSplaynode; -+ IMG_UINT32 uIndex; -+ -+ /* Find the splay node associated with these import flags */ -+ psSplaynode = PVRSRVFindNode(uImportFlags, pArena->per_flags_buckets); -+ -+ if (psSplaynode == NULL) -+ { -+ return NULL; -+ } -+ -+ uIndex = pvr_log2(uRequestSize); -+ -+ /* Find the free Boundary Tag from the bucket that holds the requested range */ -+ while (uIndex < FREE_TABLE_LIMIT) -+ { -+ BT *pBT = psSplaynode->buckets[uIndex]; -+ -+ while (pBT) -+ { -+ if ((pBT->base <= base) && ((pBT->base + pBT->uSize) >= (base + uRequestSize))) -+ { -+ if (pBT->type == btt_free) -+ { -+ return pBT; -+ } -+ else -+ { -+ PVR_ASSERT(pBT->type == btt_free); -+ } -+ } -+ else{ -+ pBT = pBT->next_free; -+ } -+ } -+ -+#if defined(PVR_CTZLL) -+ /* This could further be optimised to get the next valid bucket */ -+ while (!(psSplaynode->bHasEltsMapping & (1ULL << ++uIndex))); -+#else -+ uIndex++; -+#endif -+ } -+ -+ return NULL; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+RA_Alloc_Range(RA_ARENA *pArena, -+ RA_LENGTH_T uRequestSize, -+ RA_FLAGS_T uImportFlags, -+ RA_LENGTH_T uAlignment, -+ RA_BASE_T base, -+ RA_LENGTH_T *pActualSize) -+{ -+ RA_LENGTH_T uSize = uRequestSize; -+ BT *pBT = NULL; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (pArena == NULL || uSize == 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: One of the necessary parameters is 0", __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); -+ PVR_ASSERT(is_arena_valid(pArena)); -+ -+ /* Align the requested size to the Arena Quantum */ -+ uSize = PVR_ALIGN(uSize, pArena->uQuantum); -+ -+ /* Must be a power of 2 or 0 */ -+ PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0); -+ -+ if (uAlignment > 1) -+ { -+ if (base != PVR_ALIGN(base, uAlignment)) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, unlock_); -+ } -+ } -+ -+ /* Find if the segment in the range exists and is free -+ * Check if the segment can be split -+ * Find the bucket that points to this segment -+ * Find the free segment is in the free list -+ * remove the free segment -+ * split the segment into three segments one prior free, alloc range, -+ * free segment after the range. -+ * remove the allocated range segment from the free list -+ * hook up the prior and after segments back to free list -+ * For each free, find the bucket the segment should go to -+ */ -+ -+ pBT = RA_Find_BT_VARange(pArena, base, uSize, uImportFlags); -+ -+ if (pBT == NULL) -+ { -+ PVR_GOTO_WITH_ERROR(eError, -+ PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL, -+ unlock_); -+ } -+ -+ /* Remove the boundary tag from the free list */ -+ _FreeListRemove (pArena, pBT); -+ -+ /* if requested VA start in the middle of the BT, split the BT accordingly */ -+ if (base > pBT->base) -+ { -+ BT *pNeighbour; -+ pNeighbour = _SegmentSplit (pBT, (RA_LENGTH_T)(base - pBT->base)); -+ /* partition the buffer, create a new boundary tag */ -+ if (pNeighbour == NULL) -+ { -+ /* Put pBT back in the list */ -+ _FreeListInsert (pArena, pBT); -+ PVR_LOG_GOTO_WITH_ERROR("_SegmentSplit (1)", eError, -+ PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, -+ unlock_); -+ } -+ -+ /* Insert back the free BT to the free list */ -+ _FreeListInsert(pArena, pBT); -+ pBT = pNeighbour; -+ } -+ -+ /* the segment might be too big, if so, discard the back of the segment */ -+ if (pBT->uSize > uSize) -+ { -+ BT *pNeighbour; -+ pNeighbour = _SegmentSplit(pBT, uSize); -+ /* partition the buffer, create a new boundary tag */ -+ if (pNeighbour == NULL) -+ { -+ /* Put pBT back in the list */ -+ _FreeListInsert (pArena, pBT); -+ PVR_LOG_GOTO_WITH_ERROR("_SegmentSplit (2)", eError, -+ PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, -+ unlock_); -+ } -+ -+ /* Insert back the free BT to the free list */ -+ _FreeListInsert (pArena, pNeighbour); -+ } -+ -+ pBT->type = btt_live; -+ -+ if (!HASH_Insert_Extended (pArena->pSegmentHash, &base, (uintptr_t)pBT)) -+ { -+ _FreeBT (pArena, pBT); -+ PVR_GOTO_WITH_ERROR(eError, -+ PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED, -+ unlock_); -+ } -+ -+ if (pActualSize != NULL) -+ { -+ *pActualSize = uSize; -+ } -+ -+ pArena->ui64FreeArenaSize -= uSize; -+ -+unlock_: -+ OSLockRelease(pArena->hLock); -+ -+ return eError; -+} -+ -+IMG_INTERNAL void -+RA_Free(RA_ARENA *pArena, RA_BASE_T base) -+{ -+ BT *pBT; -+ -+ PVR_ASSERT(pArena != NULL); -+ -+ if (pArena == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); -+ return; -+ } -+ -+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); -+ PVR_ASSERT(is_arena_valid(pArena)); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', base=0x%llx", __func__, pArena->name, -+ (unsigned long long)base)); -+ -+ pBT = (BT *) HASH_Remove_Extended(pArena->pSegmentHash, &base); -+ PVR_ASSERT(pBT != NULL); -+ -+ if (pBT) -+ { -+ pArena->ui64FreeArenaSize += pBT->uSize; -+ -+ PVR_ASSERT(pBT->base == base); -+ _FreeBT(pArena, pBT); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: no resource span found for given base (0x%llX) in arena %s", -+ __func__, (unsigned long long) base, pArena->name)); -+ } -+ -+ PVR_ASSERT(is_arena_valid(pArena)); -+ OSLockRelease(pArena->hLock); -+} -+ -+static PVRSRV_ERROR -+_RA_FreeMultiUnlocked(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Free the whole array */ -+ if (uiBaseArraySize == 1) -+ { -+ eError = _FreeSingleBaseArray(pArena, aBaseArray, uiBaseArraySize); -+ PVR_LOG_IF_ERROR(eError, "_FreeSingleBaseArray"); -+ } -+ else -+ { -+ eError = _FreeMultiBaseArray(pArena, aBaseArray, uiBaseArraySize); -+ PVR_LOG_IF_ERROR(eError, "_FreeMultiBaseArray"); -+ } -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+RA_FreeMulti(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize) -+{ -+ PVRSRV_ERROR eError; -+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); -+ eError = _RA_FreeMultiUnlocked(pArena, -+ aBaseArray, -+ uiBaseArraySize); -+ OSLockRelease(pArena->hLock); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR -+_RA_FreeMultiUnlockedSparse(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ RA_LENGTH_T uiChunkSize, -+ IMG_UINT32 *puiFreeIndices, -+ IMG_UINT32 *puiFreeCount) -+{ -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiFreeCount = *puiFreeCount; -+ *puiFreeCount = 0; -+ -+ /* Handle case where we only have 1 base to free. */ -+ if (uiFreeCount == 1) -+ { -+ eError = _FreeBaseArraySlice(pArena, -+ aBaseArray, -+ uiBaseArraySize, -+ uiChunkSize, -+ puiFreeIndices[0], -+ 1); -+ PVR_LOG_IF_ERROR(eError, "_FreeBaseArraySlice"); -+ if (eError == PVRSRV_OK) -+ { -+ *puiFreeCount = uiFreeCount; -+ } -+ return eError; -+ } -+ -+ for (i = 0; i < uiFreeCount;) -+ { -+ IMG_UINT32 j; -+ IMG_UINT32 uiConsolidate = 1; -+ -+ PVR_ASSERT(RA_BASE_IS_REAL(aBaseArray[i])); -+ -+ for (j = i; -+ puiFreeIndices[j + 1] == puiFreeIndices[j] + 1 && -+ RA_BASE_IS_GHOST(aBaseArray[puiFreeIndices[j + 1]]); -+ j++) -+ { -+ uiConsolidate++; -+ } -+ -+ eError = _FreeBaseArraySlice(pArena, -+ aBaseArray, -+ uiBaseArraySize, -+ uiChunkSize, -+ puiFreeIndices[i], -+ uiConsolidate); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArraySlice"); -+ -+ i += uiConsolidate; -+ *puiFreeCount += uiConsolidate; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+RA_FreeMultiSparse(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ IMG_UINT32 uiLog2ChunkSize, -+ IMG_UINT32 *puiFreeIndices, -+ IMG_UINT32 *puiFreeCount) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_FALSE(puiFreeCount != NULL, -+ "puiFreeCount Required", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ /* Ensure Base Array is large enough for intended free */ -+ PVR_LOG_RETURN_IF_FALSE(uiBaseArraySize >= *puiFreeCount, -+ "Attempt to free more bases than array holds", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ PVR_LOG_RETURN_IF_FALSE(puiFreeIndices != NULL, -+ "puiFreeIndices Required", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ PVR_LOG_RETURN_IF_FALSE(uiLog2ChunkSize >= RA_BASE_FLAGS_LOG2 && -+ uiLog2ChunkSize <= RA_BASE_CHUNK_LOG2_MAX, -+ "Log2 chunk size must be 12-64", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); -+ eError = _RA_FreeMultiUnlockedSparse(pArena, -+ aBaseArray, -+ uiBaseArraySize, -+ 1ULL << uiLog2ChunkSize, -+ puiFreeIndices, -+ puiFreeCount); -+ OSLockRelease(pArena->hLock); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR -+_TrimBlockMakeReal(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ IMG_UINT32 uiLog2ChunkSize, -+ IMG_UINT32 uiStartIndex, -+ IMG_UINT32 uiEndIndex) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RA_BASE_T sRealBase; -+ IMG_UINT32 uiRealBaseIndex; -+ -+ /* Note: Error return paths in this function do not require unwinding. -+ * Free logic is performed based upon indices and detection of Real base regions, -+ * performance wise it would be more costly to unwind the conversion here than to -+ * just free a smaller Real base region. -+ */ -+ -+ /* Check Start index is real, if not make it real */ -+ if (RA_BASE_IS_GHOST(aBaseArray[uiStartIndex])) -+ { -+ _FindRealBaseFromGhost(aBaseArray, -+ uiStartIndex, -+ &sRealBase, -+ &uiRealBaseIndex); -+ -+ eError = _ConvertGhostBaseToReal(pArena, -+ aBaseArray, -+ sRealBase, -+ uiRealBaseIndex, -+ uiStartIndex, -+ 1ULL << uiLog2ChunkSize); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); -+ } -+ -+ /* Check end +1 is real or end of array , if ghost make real */ -+ if (uiEndIndex + 1 != uiBaseArraySize && -+ RA_BASE_IS_GHOST(aBaseArray[uiEndIndex + 1])) -+ { -+ _FindRealBaseFromGhost(aBaseArray, -+ uiEndIndex + 1, -+ &sRealBase, -+ &uiRealBaseIndex); -+ -+ eError = _ConvertGhostBaseToReal(pArena, -+ aBaseArray, -+ sRealBase, -+ uiRealBaseIndex, -+ uiEndIndex + 1, -+ 1ULL << uiLog2ChunkSize); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal"); -+ } -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+RA_SwapSparseMem(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ IMG_UINT32 uiLog2ChunkSize, -+ IMG_UINT32 *puiXIndices, -+ IMG_UINT32 *puiYIndices, -+ IMG_UINT32 uiSwapCount) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiSwapped = 0; -+ IMG_UINT32 uiStartIndex; -+ /* Consolidation values counting the bases after the start index*/ -+ IMG_UINT32 uiXConsol; -+ IMG_UINT32 uiYConsol; -+ /* Consolidation limit, the smallest consecutive indices between the -+ * two inputs -+ */ -+ IMG_UINT32 uiConsolidateLimit; -+ IMG_UINT32 uiTotalSwapCount; -+ IMG_UINT32 i; -+ -+ /* -+ * The algorithm below aims to swap the desired indices whilst also -+ * maintaining a maximum contiguity of allocation blocks where possible. -+ * It does this by: -+ * Consolidating the contiguous indices of X and Y. -+ * Selecting the smallest of these consolidations as a range to swap in a block. -+ * Trim both block ranges using the indices range to ensure that Real bases are -+ * created to represent regions that have been split due to the indices. -+ * Perform the swap and update the swapped count ready for the next iteration. -+ * Note: Maintaining contiguity improves performance of free logic for sparse -+ * allocations because we can free in regions rather than chunks. -+ */ -+ while (uiSwapped != uiSwapCount) -+ { -+ IMG_UINT32 x, y; -+ uiTotalSwapCount = 1; -+ uiStartIndex = uiSwapped; -+ uiXConsol = 0; -+ uiYConsol = 0; -+ -+ /* Calculate contiguous indices at X */ -+ for (x = uiStartIndex; -+ x < uiSwapCount && -+ puiXIndices[x] + 1 == puiXIndices[x + 1]; -+ x++) -+ { -+ uiXConsol++; -+ } -+ -+ /* Calculate contiguous indices at Y */ -+ for (y = uiStartIndex; -+ y < uiSwapCount && -+ puiYIndices[y] + 1 == puiYIndices[y + 1]; -+ y++) -+ { -+ uiYConsol++; -+ } -+ -+ /* Find lowest consolidation value */ -+ uiConsolidateLimit = (uiXConsol < uiYConsol) ? uiXConsol : uiYConsol; -+ -+ /* Perform RealBase translation where required */ -+ eError = _TrimBlockMakeReal(pArena, -+ aBaseArray, -+ uiBaseArraySize, -+ uiLog2ChunkSize, -+ puiXIndices[uiStartIndex], -+ puiXIndices[uiStartIndex + uiConsolidateLimit]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_TrimBlockMakeReal", unwind); -+ -+ eError = _TrimBlockMakeReal(pArena, -+ aBaseArray, -+ uiBaseArraySize, -+ uiLog2ChunkSize, -+ puiYIndices[uiStartIndex], -+ puiYIndices[uiStartIndex + uiConsolidateLimit]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_TrimBlockMakeReal", unwind); -+ -+ uiTotalSwapCount += uiConsolidateLimit; -+ uiSwapped += uiTotalSwapCount; -+ i = uiStartIndex; -+ -+ do -+ { -+ SWAP(aBaseArray[puiXIndices[i]], aBaseArray[puiYIndices[i]]); -+ uiTotalSwapCount--; -+ i++; -+ } -+ while (uiTotalSwapCount != 0); -+ } -+ -+ return PVRSRV_OK; -+ -+unwind: -+ /* If we hit an error when Trimming we should revert the swapping -+ * that has already been performed. -+ */ -+ for (i = 0; i < uiSwapped; i++) -+ { -+ SWAP(aBaseArray[puiXIndices[i]], aBaseArray[puiYIndices[i]]); -+ } -+ -+ return eError; -+} -+ -+IMG_INTERNAL void -+RA_Get_Usage_Stats(RA_ARENA *pArena, PRA_USAGE_STATS psRAStats) -+{ -+ psRAStats->ui64TotalArenaSize = pArena->ui64TotalArenaSize; -+ psRAStats->ui64FreeArenaSize = pArena->ui64FreeArenaSize; -+} -+ -+IMG_INTERNAL IMG_CHAR * -+RA_GetArenaName(RA_ARENA *pArena) -+{ -+ return pArena->name; -+} -+ -+/* #define _DBG(...) PVR_LOG((__VA_ARGS__)) */ -+#define _DBG(...) -+ -+IMG_INTERNAL RA_ARENA_ITERATOR * -+RA_IteratorAcquire(RA_ARENA *pArena, IMG_BOOL bIncludeFreeSegments) -+{ -+ RA_ARENA_ITERATOR *pIter = OSAllocMem(sizeof(*pIter)); -+ PVR_LOG_RETURN_IF_FALSE(pIter != NULL, "OSAllocMem", NULL); -+ -+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); -+ -+ pIter->pArena = pArena; -+ pIter->bIncludeFreeSegments = bIncludeFreeSegments; -+ -+ RA_IteratorReset(pIter); -+ -+ return pIter; -+} -+ -+IMG_INTERNAL void -+RA_IteratorRelease(RA_ARENA_ITERATOR *pIter) -+{ -+ PVR_ASSERT(pIter != NULL); -+ -+ if (pIter == NULL) -+ { -+ return; -+ } -+ -+ OSLockRelease(pIter->pArena->hLock); -+ -+ OSFreeMem(pIter); -+} -+ -+IMG_INTERNAL void -+RA_IteratorReset(RA_ARENA_ITERATOR *pIter) -+{ -+ BT *pNext; -+ -+ PVR_ASSERT(pIter != NULL); -+ -+ pNext = pIter->pArena->pHeadSegment; -+ -+ /* find next element if we're not including the free ones */ -+ if (!pIter->bIncludeFreeSegments) -+ { -+ while (pNext != NULL && pNext->type != btt_live) -+ { -+ _DBG("(%s()) skipping segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " -+ "type=%u", __func__, (void *) pNext->base, pNext->uSize, -+ pNext->type); -+ pNext = pNext->pNextSegment; -+ } -+ } -+ -+ _DBG("(%s()) current segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " -+ "type=%u", __func__, -+ pNext != NULL ? (void *) pNext->base : NULL, -+ pNext != NULL ? pNext->uSize : 0, -+ pNext != NULL ? pNext->type : 0); -+ -+ /* if bIncludeFreeSegments then pNext here is either a valid pointer to -+ * "live" segment or NULL and if !bIncludeFreeSegments then it's either -+ * a valid pointer to any next segment or NULL */ -+ pIter->pCurrent = pNext; -+} -+ -+IMG_INTERNAL IMG_BOOL -+RA_IteratorNext(RA_ARENA_ITERATOR *pIter, RA_ITERATOR_DATA *pData) -+{ -+ BT *pNext; -+ -+ PVR_ASSERT(pIter != NULL); -+ -+ if (pIter == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "pIter in %s() is NULL", __func__)); -+ return IMG_FALSE; -+ } -+ -+ if (pIter->pCurrent == NULL) -+ { -+ return IMG_FALSE; -+ } -+ -+ pNext = pIter->pCurrent; -+ -+ _DBG("(%s()) current segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " -+ "type=%u", __func__, (void *) pNext->base, pNext->uSize, -+ pNext->type); -+ -+ pData->uiAddr = pIter->pCurrent->base; -+ pData->uiSize = pIter->pCurrent->uSize; -+ pData->bFree = pIter->pCurrent->type == btt_free; -+ -+ /* combine contiguous segments */ -+ while ((pNext = pNext->pNextSegment) != NULL && -+ pNext->type == pNext->pPrevSegment->type && -+ pNext->type == btt_live && -+ pNext->base == pData->uiAddr + pData->uiSize) -+ { -+ _DBG("(%s()) combining segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " -+ "type=%u", __func__, (void *) pNext->base, pNext->uSize, -+ pNext->type); -+ pData->uiSize += pNext->uSize; -+ } -+ -+ /* advance to next */ -+ if (!pIter->bIncludeFreeSegments) -+ { -+ while (pNext != NULL && pNext->type != btt_live) -+ { -+ _DBG("(%s()) skipping segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " -+ "type=%u", __func__, (void *) pNext->base, pNext->uSize, -+ pNext->type); -+ pNext = pNext->pNextSegment; -+ } -+ } -+ -+ _DBG("(%s()) next segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " -+ "type=%u", __func__, -+ pNext != NULL ? (void *) pNext->base : NULL, -+ pNext != NULL ? pNext->uSize : 0, -+ pNext != NULL ? pNext->type : 0); -+ -+ /* if bIncludeFreeSegments then pNext here is either a valid pointer to -+ * "live" segment or NULL and if !bIncludeFreeSegments then it's either -+ * a valid pointer to any next segment or NULL */ -+ pIter->pCurrent = pNext; -+ -+ return IMG_TRUE; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+RA_BlockDump(RA_ARENA *pArena, void (*pfnLogDump)(void*, IMG_CHAR*, ...), void *pPrivData) -+{ -+ RA_ARENA_ITERATOR *pIter = NULL; -+ RA_ITERATOR_DATA sIterData; -+ const IMG_UINT32 uiLineWidth = 64; -+ -+ IMG_UINT32 **papRegionArray = NULL; -+ IMG_UINT32 uiRegionCount = 0; -+ -+ const IMG_UINT32 uiChunkSize = 32; /* 32-bit chunks */ -+ const IMG_UINT32 uiChunkCount = (uiLineWidth / uiChunkSize) * 2; /* This should equal 2 or a multiple of 2 */ -+ const IMG_UINT32 uiRegionSize = uiChunkSize * uiChunkCount; -+ -+ IMG_UINT32 uiRecognisedQuantum = 0; -+ -+ IMG_UINT64 uiLastBase = 0; -+ IMG_UINT64 uiLastSize = 0; -+ -+ IMG_UINT32 i; -+ IMG_UINT32 uiRemainder; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ IMG_UINT64 uiLargestFreeSegmentSize = 0; -+ IMG_UINT32 uiFragPercentage = 0; -+ -+ /* -- papRegionArray Structure -- -+ * papRegionArray Indexes -+ * | Chunk 0 Chunk 1 Chunk 2 Chunk 3 -+ * v |------------|------------|------------|------------| -+ * [0] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | -- | -+ * [1] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | | -+ * [2] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | | -+ * [3] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | | Regions -+ * [4] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | | -+ * [5] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | | -+ * [6] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | -- | -+ * ... -+ */ -+ -+ if (pArena == NULL || pfnLogDump == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ pIter = RA_IteratorAcquire(pArena, IMG_TRUE); -+ PVR_LOG_RETURN_IF_NOMEM(pIter, "RA_IteratorAcquire"); -+ -+ uiRecognisedQuantum = pArena->uQuantum > 0 ? pArena->uQuantum : 4096; -+ -+ while (RA_IteratorNext(pIter, &sIterData)) -+ { -+ if (!sIterData.bFree && sIterData.uiAddr >= uiLastBase) -+ { -+ uiLastBase = sIterData.uiAddr; -+ uiLastSize = sIterData.uiSize; -+ } -+ } -+ -+ uiRegionCount = OSDivide64(uiLastBase + uiLastSize, uiRecognisedQuantum, -+ &uiRemainder); -+ uiRegionCount = OSDivide64(uiRegionCount, uiRegionSize, &uiRemainder); -+ if (uiRemainder != 0 || uiRegionCount == 0) -+ { -+ uiRegionCount += 1; -+ } -+ -+ papRegionArray = OSAllocZMem(sizeof(IMG_UINT32*) * uiRegionCount); -+ PVR_LOG_GOTO_IF_NOMEM(papRegionArray, eError, cleanup_array); -+ -+ RA_IteratorReset(pIter); -+ -+ while (RA_IteratorNext(pIter, &sIterData)) -+ { -+ IMG_UINT64 uiDataDivRecQuant; -+ -+ IMG_UINT32 uiAddrRegionIdx = 0; -+ IMG_UINT32 uiAddrRegionOffset = 0; -+ IMG_UINT32 uiAddrChunkIdx = 0; -+ IMG_UINT32 uiAddrChunkOffset = 0; -+ IMG_UINT32 uiAddrChunkShift; /* The bit-shift needed to fill the chunk */ -+ -+ IMG_UINT32 uiQuantisedSize; -+ IMG_UINT32 uiQuantisedSizeMod; -+ IMG_UINT32 uiAllocLastRegionIdx = 0; /* The last region that this alloc appears in */ -+ IMG_UINT32 uiAllocChunkSize = 0; /* The number of chunks this alloc spans */ -+ -+ IMG_INT32 iBitSetCount = 0; -+ IMG_INT32 iOverflowCheck = 0; -+ IMG_INT32 iOverflow = 0; -+ IMG_UINT32 uiRegionIdx = 0; -+ IMG_UINT32 uiChunkIdx = 0; -+ -+ /* If the current data is for a free block, use it to track largest -+ * contiguous free segment size. -+ */ -+ if (sIterData.bFree && sIterData.uiSize > uiLargestFreeSegmentSize) -+ { -+ uiLargestFreeSegmentSize = sIterData.uiSize; -+ continue; -+ } -+ -+ uiDataDivRecQuant = OSDivide64(sIterData.uiAddr, uiRecognisedQuantum, -+ &uiRemainder); -+ uiAddrRegionIdx = OSDivide64(uiDataDivRecQuant, uiRegionSize, -+ &uiAddrRegionOffset); -+ uiQuantisedSize = OSDivide64(sIterData.uiSize, uiRecognisedQuantum, -+ &uiQuantisedSizeMod); -+ -+ uiAddrChunkIdx = uiAddrRegionOffset / uiChunkSize; -+ uiAddrChunkOffset = uiAddrRegionOffset % uiChunkSize; -+ uiAddrChunkShift = uiChunkSize - uiAddrChunkOffset; -+ uiRegionIdx = uiAddrRegionIdx; -+ uiChunkIdx = uiAddrChunkIdx; -+ -+ if ((uiQuantisedSize == 0) || (uiQuantisedSizeMod != 0)) -+ { -+ uiQuantisedSize += 1; -+ } -+ -+ uiAllocLastRegionIdx = OSDivide64(uiDataDivRecQuant + uiQuantisedSize - 1, -+ uiRegionSize, &uiRemainder); -+ uiAllocChunkSize = (uiAddrChunkOffset + uiQuantisedSize) / uiChunkSize; -+ -+ if ((uiAddrChunkOffset + uiQuantisedSize) % uiChunkSize > 0) -+ { -+ uiAllocChunkSize += 1; -+ } -+ -+ iBitSetCount = uiQuantisedSize; -+ iOverflowCheck = uiQuantisedSize - uiAddrChunkShift; -+ -+ if (iOverflowCheck > 0) -+ { -+ iOverflow = iOverflowCheck; -+ iBitSetCount = uiQuantisedSize - iOverflow; -+ } -+ -+ /** -+ * Allocate memory to represent the chunks for each region the allocation -+ * spans. If one was already allocated before don't do it again. -+ */ -+ for (i = 0; uiAddrRegionIdx + i <= uiAllocLastRegionIdx; i++) -+ { -+ if (papRegionArray[uiAddrRegionIdx + i] == NULL) -+ { -+ papRegionArray[uiAddrRegionIdx + i] = OSAllocZMem(sizeof(IMG_UINT32) * uiChunkCount); -+ PVR_LOG_GOTO_IF_NOMEM(papRegionArray[uiAddrRegionIdx + i], eError, cleanup_regions); -+ } -+ } -+ -+ for (i = 0; i < uiAllocChunkSize; i++) -+ { -+ if (uiChunkIdx >= uiChunkCount) -+ { -+ uiRegionIdx++; -+ uiChunkIdx = 0; -+ } -+ -+ if ((IMG_UINT32)iBitSetCount != uiChunkSize) -+ { -+ IMG_UINT32 uiBitMask = 0; -+ -+ uiBitMask = (1U << iBitSetCount) - 1; -+ uiBitMask <<= (uiAddrChunkShift - iBitSetCount); -+ -+ papRegionArray[uiRegionIdx][uiChunkIdx] |= uiBitMask; -+ } -+ else -+ { -+ papRegionArray[uiRegionIdx][uiChunkIdx] |= 0xFFFFFFFF; -+ } -+ -+ uiChunkIdx++; -+ iOverflow -= uiChunkSize; -+ iBitSetCount = iOverflow >= 0 ? uiChunkSize : uiChunkSize + iOverflow; -+ if (iOverflow < 0) -+ { -+ uiAddrChunkShift = 32; -+ } -+ } -+ } -+ if (pArena->ui64FreeArenaSize && uiLargestFreeSegmentSize) -+ { -+ /* N.B This can look strange in a dual RA when comparing to the dump visualisation -+ * as spans that are freed are not included in the segment list, regardless it is -+ * an accurate representation for the spans in the Arena. -+ */ -+ uiFragPercentage = OSDivide64(100 * pArena->ui64FreeArenaSize, -+ pArena->ui64FreeArenaSize + uiLargestFreeSegmentSize, -+ &uiRemainder); -+ } -+ -+ pfnLogDump(pPrivData, "~~~ '%s' Resource Arena Block Dump", pArena->name); -+ pfnLogDump(pPrivData, " Block Size: %uB", uiRecognisedQuantum); -+ pfnLogDump(pPrivData, -+ " Span Memory Usage: %"IMG_UINT64_FMTSPEC"B" -+ " Free Span Memory: %"IMG_UINT64_FMTSPEC"B" -+ " Largest Free Region Size: %"IMG_UINT64_FMTSPEC"B" -+ " Percent Fragmented %u%%", -+ pArena->ui64TotalArenaSize, -+ pArena->ui64FreeArenaSize, -+ uiLargestFreeSegmentSize, -+ uiFragPercentage); -+ pfnLogDump(pPrivData, -+ "==============================================================================="); -+ -+ for (i = 0; i < uiRegionCount; i++) -+ { -+ static IMG_BOOL bEmptyRegion = IMG_FALSE; -+ if (papRegionArray[i] != NULL) -+ { -+ IMG_CHAR pszLine[65]; -+ IMG_UINT32 j; -+ -+ bEmptyRegion = IMG_FALSE; -+ pszLine[64] = '\0'; -+ -+ for (j = 0; j < uiChunkCount; j+=2) -+ { -+ IMG_UINT8 uiBit = 0; -+ IMG_UINT32 k; -+ IMG_UINT64 uiLineAddress = -+ (i * uiRegionSize + (j >> 1) * uiLineWidth) * uiRecognisedQuantum; -+ -+ /** -+ * Move through each of the 32 bits in the chunk and check their -+ * value. If it is 1 we set the corresponding character to '#', -+ * otherwise it is set to '.' representing empty space -+ */ -+ for (k = 1 << 31; k != 0; k >>= 1) -+ { -+ pszLine[uiBit] = papRegionArray[i][j] & k ? '#' : '.'; -+ pszLine[32 + uiBit] = papRegionArray[i][j+1] & k ? '#' : '.'; -+ uiBit++; -+ } -+ -+ pfnLogDump(pPrivData, -+ "| 0x%08"IMG_UINT64_FMTSPECx" | %s", -+ uiLineAddress, -+ pszLine); -+ } -+ OSFreeMem(papRegionArray[i]); -+ } -+ else -+ { -+ /* We only print this once per gap of n regions */ -+ if (!bEmptyRegion) -+ { -+ pfnLogDump(pPrivData, " ...."); -+ bEmptyRegion = IMG_TRUE; -+ } -+ } -+ } -+ -+ RA_IteratorRelease(pIter); -+ -+ OSFreeMem(papRegionArray); -+ return eError; -+ -+cleanup_regions: -+ for (i = 0; i < uiRegionCount; i++) -+ { -+ if (papRegionArray[i] != NULL) -+ { -+ OSFreeMem(papRegionArray[i]); -+ } -+ } -+ -+cleanup_array: -+ OSFreeMem(papRegionArray); -+ RA_IteratorRelease(pIter); -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/ra.h b/drivers/gpu/drm/img-rogue/ra.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/ra.h -@@ -0,0 +1,644 @@ -+/*************************************************************************/ /*! -+@File -+@Title Resource Allocator API -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RA_H -+#define RA_H -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+#define RA_MAX_NAME_LENGTH 20 -+ -+/** Resource arena. -+ * struct _RA_ARENA_ deliberately opaque -+ */ -+typedef struct _RA_ARENA_ RA_ARENA; //PRQA S 3313 -+ -+/** Resource arena's iterator. -+ * struct _RA_ARENA_ITERATOR_ deliberately opaque -+ */ -+typedef struct _RA_ARENA_ITERATOR_ RA_ARENA_ITERATOR; -+ -+typedef struct _RA_ITERATOR_DATA_ { -+ IMG_UINT64 uiAddr; -+ IMG_UINT64 uiSize; -+ IMG_BOOL bFree; -+} RA_ITERATOR_DATA; -+ -+/** Resource arena usage statistics. -+ * struct _RA_USAGE_STATS -+ */ -+typedef struct _RA_USAGE_STATS { -+ IMG_UINT64 ui64TotalArenaSize; -+ IMG_UINT64 ui64FreeArenaSize; -+}RA_USAGE_STATS, *PRA_USAGE_STATS; -+ -+/* -+ * Per-Arena handle - this is private data for the caller of the RA. -+ * The RA knows nothing about this data. It is given it in RA_Create, and -+ * promises to pass it to calls to the ImportAlloc and ImportFree callbacks -+ */ -+typedef IMG_HANDLE RA_PERARENA_HANDLE; -+/* -+ * Per-Import handle - this is private data for the caller of the RA. -+ * The RA knows nothing about this data. It is given it on a per-import basis, -+ * basis, either the "initial" import at RA_Create time, or further imports -+ * via the ImportAlloc callback. It sends it back via the ImportFree callback, -+ * and also provides it in answer to any RA_Alloc request to signify from -+ * which "import" the allocation came. -+ */ -+typedef IMG_HANDLE RA_PERISPAN_HANDLE; -+ -+typedef IMG_UINT64 RA_BASE_T; -+typedef IMG_UINT32 RA_LOG2QUANTUM_T; -+typedef IMG_UINT64 RA_LENGTH_T; -+typedef IMG_UINT32 RA_POLICY_T; -+ -+typedef struct _RA_BASE_MULTI_ RA_BASE_MULTI_T; -+ -+typedef IMG_UINT32 RA_BASE_ARRAY_SIZE_T; -+ -+ -+/* -+ * RA_BASE_ARRAY can represent a number of bases of which are packed, -+ * that is, they can be one of two types, a Real Base or a Ghost base. -+ * A Real Base is a base that has been created by the RA and is used to -+ * represent an allocated region, it has an entry in the RA Hash table and -+ * as such has a BT associated with it. -+ * A Ghost base is a fabricated base address generated at chunk boundaries -+ * given by the caller. These are used to divide a RealBase into -+ * arbitrary regions that the caller requires e.g. 4k pages. Ghost bases don't -+ * exist from the RA memory tracking point of view but they do exist and are treated -+ * as base addresses from the PoV of the caller. This allows the RA to allocate in -+ * largest possible lengths meaning fewer alloc calls whilst allowing the chunk -+ * flexibility for callers. Ghost refers to the concept that they -+ * don't exist in this RA internals context but do in the callers (LMA) context i.e. -+ * they appear Real from another perspective but we the RA know they are a ghost of the -+ * Real Base. -+ * */ -+#if defined(__GNUC__) && GCC_VERSION_AT_LEAST(9, 0) -+/* Use C99 dynamic arrays, older compilers do not support this. */ -+typedef RA_BASE_T RA_BASE_ARRAY_T[]; -+#else -+/* Variable length array work around, will contain at least 1 element. -+ * Causes errors on newer compilers, in which case use dynamic arrays (see above). -+ */ -+#define RA_FLEX_ARRAY_ONE_OR_MORE_ELEMENTS 1U -+typedef RA_BASE_T RA_BASE_ARRAY_T[RA_FLEX_ARRAY_ONE_OR_MORE_ELEMENTS]; -+#endif -+ -+/* Since 0x0 is a valid BaseAddr, we rely on max 64-bit value to be an invalid -+ * page address. -+ */ -+#define INVALID_BASE_ADDR (IMG_UINT64_MAX) -+/* Used to check for duplicated alloc indices in sparse alloc path -+ * prior to attempting allocations */ -+#define RA_BASE_SPARSE_PREP_ALLOC_ADDR (IMG_UINT64_MAX - 1) -+#define RA_BASE_FLAGS_MASK 0xFFF /* 12 Bits 4k alignment. */ -+#define RA_BASE_FLAGS_LOG2 12 -+#define RA_BASE_CHUNK_LOG2_MAX 64 -+#define RA_BASE_GHOST_BIT (1ULL << 0) -+#define RA_BASE_STRIP_GHOST_BIT(uiBase) ((uiBase) & ~(RA_BASE_GHOST_BIT)) -+#define RA_BASE_SET_GHOST_BIT(uiBase) ((uiBase) |= RA_BASE_GHOST_BIT) -+#define RA_BASE_IS_GHOST(uiBase) (BITMASK_HAS((uiBase), RA_BASE_GHOST_BIT) && (uiBase) != INVALID_BASE_ADDR) -+#define RA_BASE_IS_REAL(uiBase) (!BITMASK_HAS((uiBase), RA_BASE_GHOST_BIT)) -+#define RA_BASE_IS_SPARSE_PREP(uiBase) ((uiBase) == RA_BASE_SPARSE_PREP_ALLOC_ADDR) -+#define RA_BASE_IS_INVALID(uiBase) ((uiBase) == INVALID_BASE_ADDR) -+ -+typedef struct _RA_MULTIBASE_ITERATOR_ RA_MULTIBASE_ITERATOR; -+ -+/* Lock classes: describes the level of nesting between different arenas. */ -+#define RA_LOCKCLASS_0 0 -+#define RA_LOCKCLASS_1 1 -+#define RA_LOCKCLASS_2 2 -+ -+#define RA_NO_IMPORT_MULTIPLIER 1 -+ -+/* -+ * Allocation Policies that govern the resource areas. -+ * */ -+ -+/* --- Resource allocation policy definitions --- -+* | 31.........5|.......4....|......3....|........2.............|1...................0| -+* | Reserved | Non-Contig | No split | Area bucket selection| Alloc node selection| -+*/ -+ -+/* -+ * Fast allocation policy allows to pick the first node -+ * that satisfies the request. -+ * It is the default policy for all arenas. -+ * */ -+#define RA_POLICY_ALLOC_FAST (0U) -+/* -+ * Optimal allocation policy allows to pick the lowest size node -+ * that satisfies the request. This picking policy helps in reducing the fragmentation. -+ * This minimises the necessity to split the nodes more often as the optimal -+ * ones are picked. -+ * As a result any future higher size allocation requests are likely to succeed -+ */ -+#define RA_POLICY_ALLOC_OPTIMAL (1U) -+#define RA_POLICY_ALLOC_NODE_SELECT_MASK (3U) -+ -+/* -+ * Bucket selection policies -+ * */ -+/* Assured bucket policy makes sure the selected bucket is guaranteed -+ * to satisfy the given request. Generally Nodes picked up from such a -+ * bucket need to be further split. However picking node that belongs to this -+ * bucket is likely to succeed and thus promises better response times */ -+#define RA_POLICY_BUCKET_ASSURED_FIT (0U) -+/* -+ * Best fit bucket policy selects a bucket with free nodes that are likely -+ * to satisfy the request and nodes that are close to the requested size. -+ * Nodes picked up from this bucket may likely to satisfy the request but not -+ * guaranteed. Failing to satisfy the request from this bucket mean further -+ * higher size buckets are selected in the later iterations till the request -+ * is satisfied. -+ * -+ * Hence response times may vary depending on availability of free nodes -+ * that satisfy the request. -+ * */ -+#define RA_POLICY_BUCKET_BEST_FIT (4U) -+#define RA_POLICY_BUCKET_MASK (4U) -+ -+/* This flag ensures the imports will not be split up and Allocations will always get -+ * their own import -+ */ -+#define RA_POLICY_NO_SPLIT (8U) -+#define RA_POLICY_NO_SPLIT_MASK (8U) -+ -+/* This flag is used in physmem_lma only. it is used to decide if we should -+ * activate the non-contiguous allocation feature of RA MultiAlloc. -+ * Requirements for activation are that the OS implements the -+ * OSMapPageArrayToKernelVA function in osfunc which allows for mapping -+ * physically sparse pages as a virtually contiguous range. -+ * */ -+#define RA_POLICY_ALLOC_ALLOW_NONCONTIG (16U) -+#define RA_POLICY_ALLOC_ALLOW_NONCONTIG_MASK (16U) -+ -+/* -+ * Default Arena Policy -+ * */ -+#define RA_POLICY_DEFAULT (RA_POLICY_ALLOC_FAST | RA_POLICY_BUCKET_ASSURED_FIT) -+ -+/* -+ * Flags in an "import" must match the flags for an allocation -+ */ -+typedef IMG_UINT64 RA_FLAGS_T; -+ -+/*************************************************************************/ /*! -+@Function Callback function PFN_RA_ALLOC -+@Description RA import allocate function -+@Input RA_PERARENA_HANDLE RA handle -+@Input RA_LENGTH_T Request size -+@Input RA_FLAGS_T RA flags -+@Input RA_LENGTH_T Base Alignment -+@Input IMG_CHAR Annotation -+@Input RA_BASE_T Allocation base -+@Input RA_LENGTH_T Actual size -+@Input RA_PERISPAN_HANDLE Per import private data -+@Return PVRSRV_ERROR PVRSRV_OK or error code -+*/ /**************************************************************************/ -+typedef PVRSRV_ERROR (*PFN_RA_ALLOC)(RA_PERARENA_HANDLE, -+ RA_LENGTH_T, -+ RA_FLAGS_T, -+ RA_LENGTH_T, -+ const IMG_CHAR*, -+ RA_BASE_T*, -+ RA_LENGTH_T*, -+ RA_PERISPAN_HANDLE*); -+ -+/*************************************************************************/ /*! -+@Function Callback function PFN_RA_FREE -+@Description RA free imported allocation -+@Input RA_PERARENA_HANDLE RA handle -+@Input RA_BASE_T Allocation base -+@Output RA_PERISPAN_HANDLE Per import private data -+*/ /**************************************************************************/ -+typedef void (*PFN_RA_FREE)(RA_PERARENA_HANDLE, -+ RA_BASE_T, -+ RA_PERISPAN_HANDLE); -+ -+/** -+ * @Function RA_Create -+ * -+ * @Description To create a resource arena. -+ * -+ * @Input name - the name of the arena for diagnostic purposes. -+ * @Input uLog2Quantum - the arena allocation quantum. -+ * @Input ui32LockClass - the lock class level this arena uses. -+ * @Input imp_alloc - a resource allocation callback or 0. -+ * @Input imp_free - a resource de-allocation callback or 0. -+ * @Input per_arena_handle - private handle passed to alloc and free or 0. -+ * @Input ui32PolicyFlags - Policies that govern the arena. -+ * @Return pointer to arena, or NULL. -+ */ -+RA_ARENA * -+RA_Create(IMG_CHAR *name, -+ /* subsequent imports: */ -+ RA_LOG2QUANTUM_T uLog2Quantum, -+ IMG_UINT32 ui32LockClass, -+ PFN_RA_ALLOC imp_alloc, -+ PFN_RA_FREE imp_free, -+ RA_PERARENA_HANDLE per_arena_handle, -+ RA_POLICY_T ui32PolicyFlags); -+ -+/** -+ * @Function RA_Create_With_Span -+ * -+ * @Description -+ * -+ * Create a resource arena and initialises it, with a given resource span. -+ * -+ * @Input name - String briefly describing the RA's purpose. -+ * @Input uLog2Quantum - the arena allocation quantum. -+ * @Input ui64CpuBase - CPU Physical Base Address of the RA. -+ * @Input ui64SpanDevBase - Device Physical Base Address of the RA. -+ * @Input ui64SpanSize - Size of the span to add to the created RA. -+ * @Input ui32PolicyFlags - Policies that govern the arena. -+ * @Return pointer to arena, or NULL. -+*/ -+RA_ARENA * -+RA_Create_With_Span(IMG_CHAR *name, -+ RA_LOG2QUANTUM_T uLog2Quantum, -+ IMG_UINT64 ui64CpuBase, -+ IMG_UINT64 ui64SpanDevBase, -+ IMG_UINT64 ui64SpanSize, -+ RA_POLICY_T ui32PolicyFlags); -+ -+/** -+ * @Function RA_Delete -+ * -+ * @Description -+ * -+ * To delete a resource arena. All resources allocated from the arena -+ * must be freed before deleting the arena. -+ * -+ * @Input pArena - the arena to delete. -+ * @Return None -+ */ -+void -+RA_Delete(RA_ARENA *pArena); -+ -+/** -+ * @Function RA_Add -+ * -+ * @Description -+ * -+ * To add a resource span to an arena. The span must not overlap with -+ * any span previously added to the arena. -+ * -+ * @Input pArena - the arena to add a span into. -+ * @Input base - the base of the span. -+ * @Input uSize - the extent of the span. -+ * @Input hPriv - handle associated to the span (reserved for user uses) -+ * @Return IMG_TRUE - success, IMG_FALSE - failure -+ */ -+IMG_BOOL -+RA_Add(RA_ARENA *pArena, -+ RA_BASE_T base, -+ RA_LENGTH_T uSize, -+ RA_FLAGS_T uFlags, -+ RA_PERISPAN_HANDLE hPriv); -+ -+/** -+ * @Function RA_Alloc -+ * -+ * @Description To allocate resource from an arena. -+ * -+ * @Input pArena - the arena -+ * @Input uRequestSize - the size of resource segment requested. -+ * @Input uImportMultiplier - Import x-times of the uRequestSize -+ * for future RA_Alloc calls. -+ * Use RA_NO_IMPORT_MULTIPLIER to import the exact size. -+ * @Input uImportFlags - flags influencing allocation policy. -+ * @Input uAlignment - the alignment constraint required for the -+ * allocated segment, use 0 if alignment not required. -+ * @Input pszAnnotation - a string to describe the allocation -+ * @Output base - allocated base resource -+ * @Output pActualSize - the actual_size of resource segment allocated, -+ * typically rounded up by quantum. -+ * @Output phPriv - the user reference associated with allocated -+ * resource span. -+ * @Return PVRSRV_OK - success -+ */ -+PVRSRV_ERROR -+RA_Alloc(RA_ARENA *pArena, -+ RA_LENGTH_T uRequestSize, -+ IMG_UINT8 uImportMultiplier, -+ RA_FLAGS_T uImportFlags, -+ RA_LENGTH_T uAlignment, -+ const IMG_CHAR *pszAnnotation, -+ RA_BASE_T *base, -+ RA_LENGTH_T *pActualSize, -+ RA_PERISPAN_HANDLE *phPriv); -+ -+/*************************************************************************/ /*! -+@Function RA_AllocMulti -+@Description To allocate resource from an arena. -+ This method of allocation can be used to guarantee that if there -+ is enough space in the RA and the contiguity given is the -+ greatest common divisor of the contiguities used on this RA -+ the allocation can be made. -+ Allocations with contiguity less than the current GCD -+ (Greatest Common Divisor) abiding to pow2 are also guaranteed to -+ succeed. See scenario 4. -+ Allocations are not guaranteed but still reduce fragmentation -+ using this method when multiple contiguities are used e.g. -+ 4k & 16k and the current allocation has a contiguity higher than -+ the greatest common divisor used. -+ Scenarios with Log 2 contiguity examples: -+ 1. All allocations have contiguity of 4k. Allocations can be -+ guaranteed given enough RA space since the GCD is always used. -+ 2. Allocations of 4k and 16k contiguity have been previously -+ made on this RA. A new allocation of 4k contiguity is guaranteed -+ to succeed given enough RA space since the contiguity is the GCD. -+ 3. Allocations of 4k and 16k contiguity have been previously made -+ on this RA. A new allocation of 16k contiguity is not guaranteed -+ to succeed since it is not the GCD of all contiguities used. -+ 4. Contiguity 16k and 64k already exist, a 4k contiguity -+ allocation would be guaranteed to succeed but would now be the -+ new GCD. So further allocations would be required to match this -+ GCD to guarantee success. -+ This method does not suffer the same fragmentation pitfalls -+ as RA_Alloc as it constructs the allocation size from many -+ smaller constituent allocations, these are represented and returned -+ in the given array. In addition, Ghost bases are generated in -+ array entries conforming to the chunk size, this allows for -+ representing chunks of any size that work as page addrs -+ in upper levels. -+ The aforementioned array must be at least of size -+ uRequestsize / uiChunkSize, this ensures there is at least one -+ array entry per chunk required. -+ This function must have a uiChunkSize value of -+ at least 4096, this is to ensure space for the base type encoding. -+@Input pArena The arena -+@Input uRequestSize The size of resource requested. -+@Input uiLog2ChunkSize The log2 contiguity multiple of the bases i.e all -+ Real bases must be a multiple in size of this -+ size, also used to generate Ghost bases. -+ Allocations will also be aligned to this value. -+@Input uImportMultiplier Import x-times more for future requests if -+ we have to import new resource. -+@Input uImportFlags Flags influencing allocation policy. -+ required, otherwise must be a power of 2. -+@Input pszAnnotation String to describe the allocation -+@InOut aBaseArray Array of bases to populate. -+@Input uiBaseArraySize Size of the array to populate. -+@Output bPhysContig Are the allocations made in the RA physically -+ contiguous. -+@Return PVRSRV_OK - success -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+RA_AllocMulti(RA_ARENA *pArena, -+ RA_LENGTH_T uRequestSize, -+ IMG_UINT32 uiLog2ChunkSize, -+ IMG_UINT8 uImportMultiplier, -+ RA_FLAGS_T uImportFlags, -+ const IMG_CHAR *pszAnnotation, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ IMG_BOOL *bPhysContig); -+ -+/** -+ * @Function RA_AllocMultiSparse -+ * -+ * @Description To Alloc resource from an RA arena at the specified indices. -+ * This function follows the same conditions and functionality as -+ * RA_AllocMulti although with the added aspect of specifying the -+ * indices to allocate in the Base Array. This means we can still -+ * attempt to maintain contiguity where possible with the aim of -+ * reducing fragmentation and increasing occurrence of optimal free -+ * scenarios. -+ * @Input pArena The Arena -+ * @Input uiLog2ChunkSize The log2 contiguity multiple of the bases i.e all -+ * Real bases must be a multiple in size of this -+ * size, also used to generate Ghost bases. -+ * Allocations will also be aligned to this value. -+ * @Input uImportMultiplier Import x-times more for future requests if -+ * we have to import new resource. -+ * @Input uImportFlags Flags influencing allocation policy. -+ * required, otherwise must be a power of 2. -+ * @Input pszAnnotation String to describe the allocation -+ * @InOut aBaseArray Array of bases to populate. -+ * @Input uiBaseArraySize Size of the array to populate. -+ * @Input puiAllocIndices The indices into the array to alloc, if indices are NULL -+ * then we will allocate uiAllocCount chunks sequentially. -+ * @InOut uiAllocCount The number of bases to alloc from the array. -+ * -+ * @Return PVRSRV_OK - success -+ */ -+PVRSRV_ERROR -+RA_AllocMultiSparse(RA_ARENA *pArena, -+ IMG_UINT32 uiLog2ChunkSize, -+ IMG_UINT8 uImportMultiplier, -+ RA_FLAGS_T uImportFlags, -+ const IMG_CHAR *pszAnnotation, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ IMG_UINT32 *puiAllocIndices, -+ IMG_UINT32 uiAllocCount); -+/** -+ * @Function RA_FreeMulti -+ * -+ * @Description To free a multi-base resource constructed using -+ * a call to RA_AllocMulti. -+ * -+ * @Input pArena - The arena the segment was originally allocated from. -+ * @Input aBaseArray - The array to free bases from. -+ * @Input uiBaseArraysize - Size of the array to free bases from. -+ * -+ * @Return PVRSRV_OK - success -+ */ -+PVRSRV_ERROR -+RA_FreeMulti(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize); -+ -+/** -+ * @Function RA_FreeMultiSparse -+ * -+ * @Description To free part of a multi-base resource constructed using -+ * a call to RA_AllocMulti. -+ * -+ * @Input pArena - The arena the segment was originally allocated from. -+ * @Input aBaseArray - The array to free bases from. -+ * @Input uiBaseArraysize - Size of the array to free bases from. -+ * @Input uiLog2ChunkSize - The log2 chunk size used to generate the Ghost bases. -+ * @Input puiFreeIndices - The indices into the array to free. -+ * @InOut puiFreeCount - The number of bases to free from the array, becomes the number -+ * of bases actually free'd. The in value may differ from the out -+ * value in cases of error when freeing. The out value can then be -+ * used in upper levels to keep any mem tracking structures consistent -+ * with what was actually freed before the error occurred. -+ * -+ * @Return PVRSRV_OK - success -+ */ -+PVRSRV_ERROR -+RA_FreeMultiSparse(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ IMG_UINT32 uiLog2ChunkSize, -+ IMG_UINT32 *puiFreeIndices, -+ IMG_UINT32 *puiFreeCount); -+ -+/** -+ * @Function RA_Alloc_Range -+ * -+ * @Description -+ * -+ * To allocate a resource at a specified base from an arena. -+ * -+ * @Input pArena - the arena -+ * @Input uRequestSize - the size of resource segment requested. -+ * @Input uImportFlags - flags influencing allocation policy. -+ * @Input uAlignment - the alignment constraint required for the -+ * allocated segment, use 0 if alignment not required. -+ * @Input base - allocated base resource -+ * @Output pActualSize - the actual_size of resource segment allocated, -+ * typically rounded up by quantum. -+ * @Return PVRSRV_OK - success -+ */ -+PVRSRV_ERROR -+RA_Alloc_Range(RA_ARENA *pArena, -+ RA_LENGTH_T uRequestSize, -+ RA_FLAGS_T uImportFlags, -+ RA_LENGTH_T uAlignment, -+ RA_BASE_T base, -+ RA_LENGTH_T *pActualSize); -+ -+/** -+ * @Function RA_Free -+ * -+ * @Description To free a resource segment. -+ * -+ * @Input pArena - the arena the segment was originally allocated from. -+ * @Input base - the base of the resource span to free. -+ * -+ * @Return None -+ */ -+void -+RA_Free(RA_ARENA *pArena, RA_BASE_T base); -+ -+/** -+ * @Function RA_SwapSparseMem -+ * -+ * @Description Swaps chunk sized allocations at X<->Y indices. -+ * The function is most optimal when Indices are provided -+ * in ascending order, this allows the internals to optimally -+ * swap based on contiguity and reduces the amount of ghost to -+ * real conversion performed. Note this function can also be used -+ * to move pages, in this case, we effectively swap real allocations -+ * with invalid marked bases. -+ * @Input pArena - The arena. -+ * @InOut aBaseArray - The array to Swap bases in. -+ * @Input uiBaseArraysize - Size of the array to Swap bases in. -+ * @Input uiLog2ChunkSize - The log2 chunk size used to generate the Ghost bases -+ * and size the Real chunks. -+ * @Input puiXIndices - Set of X indices to swap with parallel indices in Y. -+ * @Input puiYIndices - Set of Y indices to swap with parallel indices in X. -+ * @Input uiSwapCount - Number of indices to swap. -+ * -+ * @Return PVRSRV_OK - success -+ */ -+PVRSRV_ERROR -+RA_SwapSparseMem(RA_ARENA *pArena, -+ RA_BASE_ARRAY_T aBaseArray, -+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize, -+ IMG_UINT32 uiLog2ChunkSize, -+ IMG_UINT32 *puiXIndices, -+ IMG_UINT32 *puiYIndices, -+ IMG_UINT32 uiSwapCount); -+ -+/** -+ * @Function RA_Get_Usage_Stats -+ * -+ * @Description To collect the arena usage statistics. -+ * -+ * @Input pArena - the arena to acquire usage statistics from. -+ * @Input psRAStats - the buffer to hold the usage statistics of the arena. -+ * -+ * @Return None -+ */ -+IMG_INTERNAL void -+RA_Get_Usage_Stats(RA_ARENA *pArena, PRA_USAGE_STATS psRAStats); -+ -+/** -+ * @Function RA_GetArenaName -+ * -+ * @Description To obtain the arena name. -+ * -+ * @Input pArena - the arena to acquire the name from. -+ * -+ * @Return IMG_CHAR* Arena name. -+ */ -+IMG_INTERNAL IMG_CHAR * -+RA_GetArenaName(RA_ARENA *pArena); -+ -+IMG_INTERNAL RA_ARENA_ITERATOR * -+RA_IteratorAcquire(RA_ARENA *pArena, IMG_BOOL bIncludeFreeSegments); -+ -+IMG_INTERNAL void -+RA_IteratorReset(RA_ARENA_ITERATOR *pIter); -+ -+IMG_INTERNAL void -+RA_IteratorRelease(RA_ARENA_ITERATOR *pIter); -+ -+IMG_INTERNAL IMG_BOOL -+RA_IteratorNext(RA_ARENA_ITERATOR *pIter, RA_ITERATOR_DATA *pData); -+ -+/*************************************************************************/ /*! -+@Function RA_BlockDump -+@Description Debug dump of all memory allocations within the RA and the space -+ between. A '#' represents a block of memory (the arena's quantum -+ in size) that has been allocated whereas a '.' represents a free -+ block. -+@Input pArena The arena to dump. -+@Input pfnLogDump The dumping method. -+@Input pPrivData Data to be passed into the pfnLogDump method. -+*/ /**************************************************************************/ -+IMG_INTERNAL PVRSRV_ERROR -+RA_BlockDump(RA_ARENA *pArena, -+ __printf(2, 3) void (*pfnLogDump)(void*, IMG_CHAR*, ...), -+ void *pPrivData); -+ -+#endif -diff --git a/drivers/gpu/drm/img-rogue/rgx_bridge.h b/drivers/gpu/drm/img-rogue/rgx_bridge.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_bridge.h -@@ -0,0 +1,252 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX Bridge Functionality -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the Rogue Bridge code -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGX_BRIDGE_H -+#define RGX_BRIDGE_H -+ -+#include "pvr_bridge.h" -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#include "rgx_fwif_km.h" -+ -+#define RGXFWINITPARAMS_VERSION 1 -+#define RGXFWINITPARAMS_EXTENSION 128 -+ -+#include "common_rgxta3d_bridge.h" -+#include "common_rgxcmp_bridge.h" -+#if defined(SUPPORT_FASTRENDER_DM) -+#include "common_rgxtq2_bridge.h" -+#endif -+#if defined(SUPPORT_RGXTQ_BRIDGE) -+#include "common_rgxtq_bridge.h" -+#endif -+#if defined(SUPPORT_USC_BREAKPOINT) -+#include "common_rgxbreakpoint_bridge.h" -+#endif -+#include "common_rgxfwdbg_bridge.h" -+#if defined(PDUMP) -+#include "common_rgxpdump_bridge.h" -+#endif -+#include "common_rgxhwperf_bridge.h" -+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) -+#include "common_rgxregconfig_bridge.h" -+#endif -+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) -+#include "common_rgxkicksync_bridge.h" -+#endif -+#include "common_rgxtimerquery_bridge.h" -+#if defined(SUPPORT_RGXRAY_BRIDGE) -+#include "common_rgxray_bridge.h" -+#endif -+/* -+ * Bridge Cmd Ids -+ */ -+ -+/* *REMEMBER* to update PVRSRV_BRIDGE_RGX_LAST if you add/remove a bridge -+ * group! -+ * Also you need to ensure all PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST offsets -+ * follow on from the previous bridge group's commands! -+ * -+ * If a bridge group is optional, ensure you *ALWAYS* define its index -+ * (e.g. PVRSRV_BRIDGE_RGXCMP is always 151, even is the feature is not -+ * defined). If an optional bridge group is not defined you must still -+ * define PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST for it with an assigned -+ * value of 0. -+ */ -+ -+/* The RGX bridge groups start at 128 (PVRSRV_BRIDGE_RGX_FIRST) rather than -+ * follow-on from the other non-device bridge groups (meaning that they then -+ * won't be displaced if other non-device bridge groups are added). -+ */ -+ -+#define PVRSRV_BRIDGE_RGX_FIRST 128UL -+ -+/* 128: RGX TQ interface functions */ -+#define PVRSRV_BRIDGE_RGXTQ 128UL -+/* The RGXTQ bridge is conditional since the definitions in this header file -+ * support both the rogue and volcanic servers, but the RGXTQ bridge is not -+ * required at all on the Volcanic architecture. -+ */ -+#if defined(SUPPORT_RGXTQ_BRIDGE) -+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST (PVRSRV_BRIDGE_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST (PVRSRV_BRIDGE_DISPATCH_LAST) -+#endif -+ -+/* 129: RGX Compute interface functions */ -+#define PVRSRV_BRIDGE_RGXCMP 129UL -+#define PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXCMP_CMD_LAST) -+ -+/* 130: RGX TA/3D interface functions */ -+#define PVRSRV_BRIDGE_RGXTA3D 130UL -+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTA3D_CMD_LAST) -+ -+/* 131: RGX Breakpoint interface functions */ -+#define PVRSRV_BRIDGE_RGXBREAKPOINT 131UL -+#if defined(SUPPORT_USC_BREAKPOINT) -+#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST (PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST) -+#endif -+ -+/* 132: RGX Debug/Misc interface functions */ -+#define PVRSRV_BRIDGE_RGXFWDBG 132UL -+#define PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST) -+ -+/* 133: RGX PDump interface functions */ -+#define PVRSRV_BRIDGE_RGXPDUMP 133UL -+#if defined(PDUMP) -+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST) -+#endif -+ -+/* 134: RGX HWPerf interface functions */ -+#define PVRSRV_BRIDGE_RGXHWPERF 134UL -+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST) -+ -+/* 135: RGX Register Configuration interface functions */ -+#define PVRSRV_BRIDGE_RGXREGCONFIG 135UL -+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) -+#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST) -+#endif -+ -+/* 136: RGX kicksync interface */ -+#define PVRSRV_BRIDGE_RGXKICKSYNC 136UL -+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) -+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST) -+#endif -+/* 137: RGX TQ2 interface */ -+#define PVRSRV_BRIDGE_RGXTQ2 137UL -+#if defined(SUPPORT_FASTRENDER_DM) -+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ2_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST (0) -+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST) -+#endif -+ -+/* 138: RGX timer query interface */ -+#define PVRSRV_BRIDGE_RGXTIMERQUERY 138UL -+#define PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_LAST) -+ -+/* 139: RGX Ray tracing interface */ -+#define PVRSRV_BRIDGE_RGXRAY 139UL -+#if defined(SUPPORT_RGXRAY_BRIDGE) -+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST + 1) -+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST (PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXRAY_CMD_LAST) -+#else -+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST 0 -+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST) -+#endif -+ -+#define PVRSRV_BRIDGE_RGX_LAST (PVRSRV_BRIDGE_RGXRAY) -+#define PVRSRV_BRIDGE_RGX_DISPATCH_LAST (PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST) -+ -+/* bit mask representing the enabled RGX bridges */ -+ -+static const IMG_UINT32 gui32RGXBridges = -+ (1U << (PVRSRV_BRIDGE_RGXTQ - PVRSRV_BRIDGE_RGX_FIRST)) -+#if defined(RGX_FEATURE_COMPUTE) || defined(__KERNEL__) -+ | (1U << (PVRSRV_BRIDGE_RGXCMP - PVRSRV_BRIDGE_RGX_FIRST)) -+#endif -+ | (1U << (PVRSRV_BRIDGE_RGXTA3D - PVRSRV_BRIDGE_RGX_FIRST)) -+#if defined(SUPPORT_BREAKPOINT) -+ | (1U << (PVRSRV_BRIDGE_BREAKPOINT - PVRSRV_BRIDGE_RGX_FIRST)) -+#endif -+ | (1U << (PVRSRV_BRIDGE_RGXFWDBG - PVRSRV_BRIDGE_RGX_FIRST)) -+#if defined(PDUMP) -+ | (1U << (PVRSRV_BRIDGE_RGXPDUMP - PVRSRV_BRIDGE_RGX_FIRST)) -+#endif -+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) -+ | (1U << (PVRSRV_BRIDGE_RGXKICKSYNC - PVRSRV_BRIDGE_RGX_FIRST)) -+#endif -+ | (1U << (PVRSRV_BRIDGE_RGXHWPERF - PVRSRV_BRIDGE_RGX_FIRST)) -+#if defined(SUPPORT_REGCONFIG) -+ | (1U << (PVRSRV_BRIDGE_RGXREGCONFIG - PVRSRV_BRIDGE_RGX_FIRST)) -+#endif -+ | (1U << (PVRSRV_BRIDGE_RGXKICKSYNC - PVRSRV_BRIDGE_RGX_FIRST)) -+#if defined(SUPPORT_FASTRENDER_DM) || defined(__KERNEL__) -+ | (1U << (PVRSRV_BRIDGE_RGXTQ2 - PVRSRV_BRIDGE_RGX_FIRST)) -+#endif -+#if defined(SUPPORT_TIMERQUERY) -+ | (1U << (PVRSRV_BRIDGE_RGXTIMERQUERY - PVRSRV_BRIDGE_RGX_FIRST)) -+#endif -+ | (1U << (PVRSRV_BRIDGE_RGXRAY - PVRSRV_BRIDGE_RGX_FIRST)) -+ ; -+/* bit field representing which RGX bridge groups may optionally not -+ * be present in the server -+ */ -+ -+#define RGX_BRIDGES_OPTIONAL \ -+ ( \ -+ 0 /* no RGX bridges are currently optional */ \ -+ ) -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* RGX_BRIDGE_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgx_bridge_init.c b/drivers/gpu/drm/img-rogue/rgx_bridge_init.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_bridge_init.c -@@ -0,0 +1,105 @@ -+/*************************************************************************/ /*! -+@File -+@Title PVR device dependent bridge Init/Deinit Module (kernel side) -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements device dependent PVR Bridge init/deinit code -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "rgx_bridge_init.h" -+#include "rgxdevice.h" -+ -+PVRSRV_ERROR InitRGXTQ2Bridge(void); -+void DeinitRGXTQ2Bridge(void); -+PVRSRV_ERROR InitRGXCMPBridge(void); -+void DeinitRGXCMPBridge(void); -+#if defined(SUPPORT_RGXRAY_BRIDGE) -+PVRSRV_ERROR InitRGXRAYBridge(void); -+void DeinitRGXRAYBridge(void); -+#endif -+ -+PVRSRV_ERROR DeviceDepBridgeInit(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)) -+ { -+ eError = InitRGXCMPBridge(); -+ PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXCMPBridge"); -+ } -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) -+ { -+ eError = InitRGXTQ2Bridge(); -+ PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXTQ2Bridge"); -+ } -+ -+#if defined(SUPPORT_RGXRAY_BRIDGE) -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && -+ RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 0) -+ { -+ eError = InitRGXRAYBridge(); -+ PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXRAYBridge"); -+ } -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+void DeviceDepBridgeDeInit(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)) -+ { -+ DeinitRGXCMPBridge(); -+ } -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) -+ { -+ DeinitRGXTQ2Bridge(); -+ } -+ -+#if defined(SUPPORT_RGXRAY_BRIDGE) -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && -+ RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 0) -+ { -+ DeinitRGXRAYBridge(); -+ } -+#endif -+} -diff --git a/drivers/gpu/drm/img-rogue/rgx_bridge_init.h b/drivers/gpu/drm/img-rogue/rgx_bridge_init.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_bridge_init.h -@@ -0,0 +1,55 @@ -+/*************************************************************************/ /*! -+@File -+@Title PVR device dependent bridge Init/Deinit Module (kernel side) -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements device dependent PVR Bridge init/deinit code -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_BRIDGE_INIT_H) -+#define RGX_BRIDGE_INIT_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "device.h" -+#include "rgxdevice.h" -+ -+PVRSRV_ERROR DeviceDepBridgeInit(PVRSRV_RGXDEV_INFO *psDevInfo); -+void DeviceDepBridgeDeInit(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+#endif /* RGX_BRIDGE_INIT_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgx_common.h b/drivers/gpu/drm/img-rogue/rgx_common.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_common.h -@@ -0,0 +1,232 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX Common Types and Defines Header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Common types and definitions for RGX software -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef RGX_COMMON_H -+#define RGX_COMMON_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#include "img_defs.h" -+ -+/* Included to get the BVNC_KM_N defined and other feature defs */ -+#include "km/rgxdefs_km.h" -+ -+#include "rgx_common_asserts.h" -+ -+ -+/* Virtualisation validation builds are meant to test the VZ-related hardware without a fully virtualised platform. -+ * As such a driver can support either the vz-validation code or real virtualisation. */ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) && (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)) -+#error "Invalid build configuration: Virtualisation support (RGX_NUM_DRIVERS_SUPPORTED > 1) and virtualisation validation code (SUPPORT_GPUVIRT_VALIDATION) are mutually exclusive." -+#endif -+ -+/* The RGXFWIF_DM defines assume only one of RGX_FEATURE_TLA or -+ * RGX_FEATURE_FASTRENDER_DM is present. Ensure this with a compile-time check. -+ */ -+#if defined(RGX_FEATURE_TLA) && defined(RGX_FEATURE_FASTRENDER_DM) -+#error "Both RGX_FEATURE_TLA and RGX_FEATURE_FASTRENDER_DM defined. Fix code to handle this!" -+#endif -+ -+/*! The master definition for data masters known to the firmware of RGX. -+ * When a new DM is added to this list, relevant entry should be added to -+ * RGX_HWPERF_DM enum list. -+ * The DM in a V1 HWPerf packet uses this definition. */ -+ -+typedef IMG_UINT32 RGXFWIF_DM; -+ -+#define RGXFWIF_DM_GP IMG_UINT32_C(0) -+/* Either TDM or 2D DM is present. The above build time error is present to verify this */ -+#define RGXFWIF_DM_2D IMG_UINT32_C(1) /* when RGX_FEATURE_TLA defined */ -+#define RGXFWIF_DM_TDM IMG_UINT32_C(1) /* when RGX_FEATURE_FASTRENDER_DM defined */ -+ -+#define RGXFWIF_DM_GEOM IMG_UINT32_C(2) -+#define RGXFWIF_DM_3D IMG_UINT32_C(3) -+#define RGXFWIF_DM_CDM IMG_UINT32_C(4) -+#define RGXFWIF_DM_RAY IMG_UINT32_C(5) -+#define RGXFWIF_DM_GEOM2 IMG_UINT32_C(6) -+#define RGXFWIF_DM_GEOM3 IMG_UINT32_C(7) -+#define RGXFWIF_DM_GEOM4 IMG_UINT32_C(8) -+ -+#define RGXFWIF_DM_LAST RGXFWIF_DM_GEOM4 -+ -+typedef IMG_UINT32 RGX_KICK_TYPE_DM; -+#define RGX_KICK_TYPE_DM_GP IMG_UINT32_C(0x001) -+#define RGX_KICK_TYPE_DM_TDM_2D IMG_UINT32_C(0x002) -+#define RGX_KICK_TYPE_DM_TA IMG_UINT32_C(0x004) -+#define RGX_KICK_TYPE_DM_3D IMG_UINT32_C(0x008) -+#define RGX_KICK_TYPE_DM_CDM IMG_UINT32_C(0x010) -+#define RGX_KICK_TYPE_DM_RTU IMG_UINT32_C(0x020) -+#define RGX_KICK_TYPE_DM_SHG IMG_UINT32_C(0x040) -+#define RGX_KICK_TYPE_DM_TQ2D IMG_UINT32_C(0x080) -+#define RGX_KICK_TYPE_DM_TQ3D IMG_UINT32_C(0x100) -+#define RGX_KICK_TYPE_DM_RAY IMG_UINT32_C(0x200) -+#define RGX_KICK_TYPE_DM_LAST IMG_UINT32_C(0x400) -+ -+/* Maximum number of DM in use: GP, 2D/TDM, GEOM, 3D, CDM, RDM, GEOM2, GEOM3, GEOM4 */ -+#define RGXFWIF_DM_MAX (RGXFWIF_DM_LAST + 1U) -+ -+/* -+ * Data Master Tags to be appended to resources created on behalf of each RGX -+ * Context. -+ */ -+#define RGX_RI_DM_TAG_KS 'K' -+#define RGX_RI_DM_TAG_CDM 'C' -+#define RGX_RI_DM_TAG_RC 'R' /* To be removed once TA/3D Timelines are split */ -+#define RGX_RI_DM_TAG_TA 'V' -+#define RGX_RI_DM_TAG_GEOM 'V' -+#define RGX_RI_DM_TAG_3D 'P' -+#define RGX_RI_DM_TAG_TDM 'T' -+#define RGX_RI_DM_TAG_TQ2D '2' -+#define RGX_RI_DM_TAG_TQ3D 'Q' -+#define RGX_RI_DM_TAG_RAY 'r' -+ -+/* -+ * Client API Tags to be appended to resources created on behalf of each -+ * Client API. -+ */ -+#define RGX_RI_CLIENT_API_GLES1 '1' -+#define RGX_RI_CLIENT_API_GLES3 '3' -+#define RGX_RI_CLIENT_API_VULKAN 'V' -+#define RGX_RI_CLIENT_API_EGL 'E' -+#define RGX_RI_CLIENT_API_OPENCL 'C' -+#define RGX_RI_CLIENT_API_OPENGL 'G' -+#define RGX_RI_CLIENT_API_SERVICES 'S' -+#define RGX_RI_CLIENT_API_WSEGL 'W' -+#define RGX_RI_CLIENT_API_ANDROID 'A' -+#define RGX_RI_CLIENT_API_LWS 'L' -+ -+/* -+ * Format a RI annotation for a given RGX Data Master context -+ */ -+#define RGX_RI_FORMAT_DM_ANNOTATION(annotation, dmTag, clientAPI) do \ -+ { \ -+ (annotation)[0] = (dmTag); \ -+ (annotation)[1] = (clientAPI); \ -+ (annotation)[2] = '\0'; \ -+ } while (false) -+ -+/*! -+ ****************************************************************************** -+ * RGXFW Compiler alignment definitions -+ *****************************************************************************/ -+#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) || defined(INTEGRITY_OS) -+#define RGXFW_ALIGN __attribute__ ((aligned (8))) -+#define RGXFW_ALIGN_DCACHEL __attribute__((aligned (64))) -+#elif defined(_MSC_VER) -+#define RGXFW_ALIGN __declspec(align(8)) -+#define RGXFW_ALIGN_DCACHEL __declspec(align(64)) -+#pragma warning (disable : 4324) -+#else -+#error "Align MACROS need to be defined for this compiler" -+#endif -+ -+/*! -+ ****************************************************************************** -+ * Force 8-byte alignment for structures allocated uncached. -+ *****************************************************************************/ -+#define UNCACHED_ALIGN RGXFW_ALIGN -+ -+ -+/*! -+ ****************************************************************************** -+ * GPU Utilisation states -+ *****************************************************************************/ -+#define RGXFWIF_GPU_UTIL_STATE_IDLE (0U) -+#define RGXFWIF_GPU_UTIL_STATE_ACTIVE (1U) -+#define RGXFWIF_GPU_UTIL_STATE_BLOCKED (2U) -+#define RGXFWIF_GPU_UTIL_STATE_NUM (3U) -+#define RGXFWIF_GPU_UTIL_STATE_MASK IMG_UINT64_C(0x0000000000000003) -+ -+ -+/* -+ * Maximum amount of register writes that can be done by the register -+ * programmer (FW or META DMA). This is not a HW limitation, it is only -+ * a protection against malformed inputs to the register programmer. -+ */ -+#define RGX_MAX_NUM_REGISTER_PROGRAMMER_WRITES (128U) -+ -+/* FW common context priority. */ -+/*! -+ * @AddToGroup WorkloadContexts -+ * @{ -+ */ -+#define RGX_CTX_PRIORITY_REALTIME (INT32_MAX) -+#define RGX_CTX_PRIORITY_HIGH (2) /*!< HIGH priority */ -+#define RGX_CTX_PRIORITY_MEDIUM (1) /*!< MEDIUM priority */ -+#define RGX_CTX_PRIORITY_LOW (0) /*!< LOW priority */ -+/*! -+ * @} End of AddToGroup WorkloadContexts -+ */ -+ -+ -+/* -+ * Use of the 32-bit context property flags mask -+ * ( X = taken/in use, - = available/unused ) -+ * -+ * 0 -+ * | -+ * -------------------------------x -+ */ -+/* -+ * Context creation flags -+ * (specify a context's properties at creation time) -+ */ -+#define RGX_CONTEXT_FLAG_DISABLESLR (1UL << 0) /*!< Disable SLR */ -+ -+/* Bitmask of context flags allowed to be modified after context create. */ -+#define RGX_CONTEXT_FLAGS_WRITEABLE_MASK (RGX_CONTEXT_FLAG_DISABLESLR) -+ -+/* List of attributes that may be set for a context */ -+typedef IMG_UINT32 RGX_CONTEXT_PROPERTY; -+#define RGX_CONTEXT_PROPERTY_FLAGS 0U /*!< Context flags */ -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* RGX_COMMON_H */ -+ -+/****************************************************************************** -+ End of file -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgx_common_asserts.h b/drivers/gpu/drm/img-rogue/rgx_common_asserts.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_common_asserts.h -@@ -0,0 +1,73 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX Common Types and Defines Header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Common types and definitions for RGX software -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef RGX_COMMON_ASSERTS_H -+#define RGX_COMMON_ASSERTS_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+/*! This macro represents a mask of LSBs that must be zero on data structure -+ * sizes and offsets to ensure they are 8-byte granular on types shared between -+ * the FW and host driver */ -+#define RGX_FW_ALIGNMENT_LSB (7U) -+ -+/*! Macro to test structure size alignment */ -+#define RGX_FW_STRUCT_SIZE_ASSERT(_a) \ -+ static_assert((sizeof(_a) & RGX_FW_ALIGNMENT_LSB) == 0U, \ -+ "Size of " #_a " is not properly aligned") -+ -+/*! Macro to test structure member alignment */ -+#define RGX_FW_STRUCT_OFFSET_ASSERT(_a, _b) \ -+ static_assert((offsetof(_a, _b) & RGX_FW_ALIGNMENT_LSB) == 0U, \ -+ "Offset of " #_a "." #_b " is not properly aligned") -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* RGX_COMMON_ASSERTS_H */ -+ -+/****************************************************************************** -+ End of file -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgx_compat_bvnc.h b/drivers/gpu/drm/img-rogue/rgx_compat_bvnc.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_compat_bvnc.h -@@ -0,0 +1,140 @@ -+/*************************************************************************/ /*! -+@File rgx_compat_bvnc.h -+@Title BVNC compatibility check utilities -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Utility functions used for packing BNC and V. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_COMPAT_BVNC_H) -+#define RGX_COMPAT_BVNC_H -+ -+#include "img_types.h" -+ -+#if defined(RGX_FIRMWARE) /* Services firmware */ -+# include "rgxfw_utils.h" -+# define PVR_COMPAT_ASSERT RGXFW_ASSERT -+#elif !defined(RGX_BUILD_BINARY) /* Services host driver code */ -+# include "pvr_debug.h" -+# define PVR_COMPAT_ASSERT PVR_ASSERT -+#else /* FW user-mode tools */ -+# include -+# define PVR_COMPAT_ASSERT assert -+#endif -+ -+/* 64bit endian conversion macros */ -+#if defined(__BIG_ENDIAN__) -+#define RGX_INT64_TO_BE(N) (N) -+#define RGX_INT64_FROM_BE(N) (N) -+#define RGX_INT32_TO_BE(N) (N) -+#define RGX_INT32_FROM_BE(N) (N) -+#else -+#define RGX_INT64_TO_BE(N) \ -+ ((((N) >> 56) & 0xff) \ -+ | (((N) >> 40) & 0xff00) \ -+ | (((N) >> 24) & 0xff0000) \ -+ | (((N) >> 8) & 0xff000000U) \ -+ | ((N) << 56) \ -+ | (((N) & 0xff00) << 40) \ -+ | (((N) & 0xff0000) << 24) \ -+ | (((N) & 0xff000000U) << 8)) -+#define RGX_INT64_FROM_BE(N) RGX_INT64_TO_BE(N) -+ -+#define RGX_INT32_TO_BE(N) \ -+ ((((N) >> 24) & 0xff) \ -+ | (((N) >> 8) & 0xff00) \ -+ | ((N) << 24) \ -+ | ((((N) & 0xff00) << 8))) -+#define RGX_INT32_FROM_BE(N) RGX_INT32_TO_BE(N) -+#endif -+ -+/****************************************************************************** -+ * RGX Version packed into 64-bit (BVNC) to be used by Compatibility Check -+ *****************************************************************************/ -+ -+#define RGX_BVNC_PACK_SHIFT_B 48 -+#define RGX_BVNC_PACK_SHIFT_V 32 -+#define RGX_BVNC_PACK_SHIFT_N 16 -+#define RGX_BVNC_PACK_SHIFT_C 0 -+ -+#define RGX_BVNC_PACK_MASK_B (IMG_UINT64_C(0xFFFF000000000000)) -+#define RGX_BVNC_PACK_MASK_V (IMG_UINT64_C(0x0000FFFF00000000)) -+#define RGX_BVNC_PACK_MASK_N (IMG_UINT64_C(0x00000000FFFF0000)) -+#define RGX_BVNC_PACK_MASK_C (IMG_UINT64_C(0x000000000000FFFF)) -+ -+#define RGX_BVNC_PACKED_EXTR_B(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_B) >> RGX_BVNC_PACK_SHIFT_B)) -+#define RGX_BVNC_PACKED_EXTR_V(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_V) >> RGX_BVNC_PACK_SHIFT_V)) -+#define RGX_BVNC_PACKED_EXTR_N(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_N) >> RGX_BVNC_PACK_SHIFT_N)) -+#define RGX_BVNC_PACKED_EXTR_C(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_C) >> RGX_BVNC_PACK_SHIFT_C)) -+ -+#define RGX_BVNC_EQUAL(L,R,all,version,bvnc) do { \ -+ (bvnc) = IMG_FALSE; \ -+ (version) = ((L).ui32LayoutVersion == (R).ui32LayoutVersion); \ -+ if (version) \ -+ { \ -+ (bvnc) = ((L).ui64BVNC == (R).ui64BVNC); \ -+ } \ -+ (all) = (version) && (bvnc); \ -+ } while (false) -+ -+ -+/**************************************************************************//** -+ * Utility function for packing BVNC -+ *****************************************************************************/ -+static inline IMG_UINT64 rgx_bvnc_pack(IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C) -+{ -+ /* -+ * Test for input B, V, N and C exceeding max bit width. -+ */ -+ PVR_COMPAT_ASSERT((ui32B & (~(RGX_BVNC_PACK_MASK_B >> RGX_BVNC_PACK_SHIFT_B))) == 0U); -+ PVR_COMPAT_ASSERT((ui32V & (~(RGX_BVNC_PACK_MASK_V >> RGX_BVNC_PACK_SHIFT_V))) == 0U); -+ PVR_COMPAT_ASSERT((ui32N & (~(RGX_BVNC_PACK_MASK_N >> RGX_BVNC_PACK_SHIFT_N))) == 0U); -+ PVR_COMPAT_ASSERT((ui32C & (~(RGX_BVNC_PACK_MASK_C >> RGX_BVNC_PACK_SHIFT_C))) == 0U); -+ -+ return (((IMG_UINT64)ui32B << RGX_BVNC_PACK_SHIFT_B) | -+ ((IMG_UINT64)ui32V << RGX_BVNC_PACK_SHIFT_V) | -+ ((IMG_UINT64)ui32N << RGX_BVNC_PACK_SHIFT_N) | -+ ((IMG_UINT64)ui32C << RGX_BVNC_PACK_SHIFT_C)); -+} -+ -+ -+#endif /* RGX_COMPAT_BVNC_H */ -+ -+/****************************************************************************** -+ End of file (rgx_compat_bvnc.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgx_fw_info.h b/drivers/gpu/drm/img-rogue/rgx_fw_info.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_fw_info.h -@@ -0,0 +1,144 @@ -+/*************************************************************************/ /*! -+@File -+@Title FW image information -+ -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Utility functions used internally for HWPerf data retrieval -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_FW_INFO_H) -+#define RGX_FW_INFO_H -+ -+#include "img_types.h" -+#include "rgx_common.h" -+ -+/* -+ * Firmware binary block unit in bytes. -+ * Raw data stored in FW binary will be aligned to this size. -+ */ -+#define FW_BLOCK_SIZE 4096L -+ -+typedef enum -+{ -+ META_CODE = 0, -+ META_PRIVATE_DATA, -+ META_COREMEM_CODE, -+ META_COREMEM_DATA, -+ MIPS_CODE, -+ MIPS_EXCEPTIONS_CODE, -+ MIPS_BOOT_CODE, -+ MIPS_PRIVATE_DATA, -+ MIPS_BOOT_DATA, -+ MIPS_STACK, -+ RISCV_UNCACHED_CODE, -+ RISCV_CACHED_CODE, -+ RISCV_PRIVATE_DATA, -+ RISCV_COREMEM_CODE, -+ RISCV_COREMEM_DATA, -+} RGX_FW_SECTION_ID; -+ -+typedef enum -+{ -+ NONE = 0, -+ FW_CODE, -+ FW_DATA, -+ FW_COREMEM_CODE, -+ FW_COREMEM_DATA -+} RGX_FW_SECTION_TYPE; -+ -+ -+/* -+ * FW binary format with FW info attached: -+ * -+ * Contents Offset -+ * +-----------------+ -+ * | | 0 -+ * | | -+ * | Original binary | -+ * | file | -+ * | (.ldr/.elf) | -+ * | | -+ * | | -+ * +-----------------+ -+ * | FW info header | FILE_SIZE - 4K -+ * +-----------------+ -+ * | | -+ * | FW layout table | -+ * | | -+ * +-----------------+ -+ * FILE_SIZE -+ */ -+ -+#define FW_INFO_VERSION (2) -+ -+/* Firmware is built for open source driver and uses open source version numbering */ -+#define FW_INFO_FLAGS_OPEN_SOURCE (1U) -+ -+typedef struct -+{ -+ /* FW_INFO_VERSION 1 */ -+ IMG_UINT32 ui32InfoVersion; /* FW info version */ -+ IMG_UINT32 ui32HeaderLen; /* Header length */ -+ IMG_UINT32 ui32LayoutEntryNum; /* Number of entries in the layout table */ -+ IMG_UINT32 ui32LayoutEntrySize; /* Size of an entry in the layout table */ -+ IMG_UINT64 RGXFW_ALIGN ui64BVNC; /* BVNC */ -+ IMG_UINT32 ui32FwPageSize; /* Page size of processor on which firmware executes */ -+ IMG_UINT32 ui32Flags; /* Compatibility flags */ -+ -+ /* FW_INFO_VERSION 2 */ -+ IMG_UINT16 ui16PVRVersionMajor; /* DDK major version number */ -+ IMG_UINT16 ui16PVRVersionMinor; /* DDK minor version number */ -+ IMG_UINT32 ui32PVRVersionBuild; /* DDK build number */ -+} RGX_FW_INFO_HEADER; -+ -+typedef struct -+{ -+ RGX_FW_SECTION_ID eId; -+ RGX_FW_SECTION_TYPE eType; -+ IMG_UINT32 ui32BaseAddr; -+ IMG_UINT32 ui32MaxSize; -+ IMG_UINT32 ui32AllocSize; -+ IMG_UINT32 ui32AllocOffset; -+} RGX_FW_LAYOUT_ENTRY; -+ -+#endif /* RGX_FW_INFO_H */ -+ -+/****************************************************************************** -+ End of file (rgx_fw_info.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgx_fwif_alignchecks.h b/drivers/gpu/drm/img-rogue/rgx_fwif_alignchecks.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_fwif_alignchecks.h -@@ -0,0 +1,192 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX fw interface alignment checks -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Checks to avoid disalignment in RGX fw data structures -+ shared with the host -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_FWIF_ALIGNCHECKS_H) -+#define RGX_FWIF_ALIGNCHECKS_H -+ -+/* for the offsetof macro */ -+#if defined(__KERNEL__) && defined(__linux__) -+#include -+#else -+#include -+#endif -+ -+/*! -+ ****************************************************************************** -+ * Alignment UM/FW checks array -+ *****************************************************************************/ -+ -+#define RGXFW_ALIGN_CHECKS_UM_MAX 128U -+ -+#define RGXFW_ALIGN_CHECKS_INIT0 \ -+ (IMG_UINT32)sizeof(RGXFWIF_TRACEBUF), \ -+ offsetof(RGXFWIF_TRACEBUF, ui32LogType), \ -+ offsetof(RGXFWIF_TRACEBUF, sTraceBuf), \ -+ offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), \ -+ offsetof(RGXFWIF_TRACEBUF, ui32TracebufFlags), \ -+ \ -+ (IMG_UINT32)sizeof(RGXFWIF_SYSDATA), \ -+ offsetof(RGXFWIF_SYSDATA, ePowState), \ -+ offsetof(RGXFWIF_SYSDATA, ui32HWPerfDropCount), \ -+ offsetof(RGXFWIF_SYSDATA, ui32LastDropOrdinal), \ -+ offsetof(RGXFWIF_SYSDATA, ui32FWFaults), \ -+ offsetof(RGXFWIF_SYSDATA, ui32HWRStateFlags), \ -+ \ -+ (IMG_UINT32)sizeof(RGXFWIF_OSDATA), \ -+ offsetof(RGXFWIF_OSDATA, ui32HostSyncCheckMark), \ -+ offsetof(RGXFWIF_OSDATA, ui32KCCBCmdsExecuted), \ -+ \ -+ (IMG_UINT32)sizeof(RGXFWIF_HWRINFOBUF), \ -+ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmLockedUpCount), \ -+ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmOverranCount), \ -+ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmRecoveredCount), \ -+ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmFalseDetectCount), \ -+ \ -+ /* RGXFWIF_CMDTA checks */ \ -+ (IMG_UINT32)sizeof(RGXFWIF_CMDTA), \ -+ offsetof(RGXFWIF_CMDTA, sGeomRegs), \ -+ \ -+ /* RGXFWIF_CMD3D checks */ \ -+ (IMG_UINT32)sizeof(RGXFWIF_CMD3D), \ -+ offsetof(RGXFWIF_CMD3D, s3DRegs), \ -+ \ -+ /* RGXFWIF_CMDTRANSFER checks */ \ -+ (IMG_UINT32)sizeof(RGXFWIF_CMDTRANSFER), \ -+ offsetof(RGXFWIF_CMDTRANSFER, sTransRegs), \ -+ \ -+ \ -+ /* RGXFWIF_CMD_COMPUTE checks */ \ -+ (IMG_UINT32)sizeof(RGXFWIF_CMD_COMPUTE), \ -+ offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs), \ -+ \ -+ /* RGXFWIF_FREELIST checks */ \ -+ (IMG_UINT32)sizeof(RGXFWIF_FREELIST), \ -+ offsetof(RGXFWIF_FREELIST, psFreeListDevVAddr), \ -+ offsetof(RGXFWIF_FREELIST, ui32MaxPages), \ -+ offsetof(RGXFWIF_FREELIST, ui32CurrentPages), \ -+ \ -+ /* RGXFWIF_HWRTDATA checks */ \ -+ (IMG_UINT32)sizeof(RGXFWIF_HWRTDATA), \ -+ offsetof(RGXFWIF_HWRTDATA, psVHeapTableDevVAddr), \ -+ offsetof(RGXFWIF_HWRTDATA, psPMMListDevVAddr), \ -+ offsetof(RGXFWIF_HWRTDATA, apsFreeLists), \ -+ offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase), \ -+ offsetof(RGXFWIF_HWRTDATA, eState), \ -+ \ -+ /* RGXFWIF_HWRTDATA_COMMON checks */ \ -+ (IMG_UINT32)sizeof(RGXFWIF_HWRTDATA_COMMON), \ -+ offsetof(RGXFWIF_HWRTDATA_COMMON, bTACachesNeedZeroing),\ -+ \ -+ /* RGXFWIF_HWPERF_CTL_BLK checks */ \ -+ (IMG_UINT32)sizeof(RGXFWIF_HWPERF_CTL_BLK), \ -+ offsetof(RGXFWIF_HWPERF_CTL_BLK, aui64CounterCfg), \ -+ \ -+ /* RGXFWIF_HWPERF_CTL checks */ \ -+ (IMG_UINT32)sizeof(RGXFWIF_HWPERF_CTL), \ -+ offsetof(RGXFWIF_HWPERF_CTL, SelCntr) -+ -+#if defined(RGX_FEATURE_TLA) -+#define RGXFW_ALIGN_CHECKS_INIT1 \ -+ RGXFW_ALIGN_CHECKS_INIT0, \ -+ /* RGXFWIF_CMD2D checks */ \ -+ (IMG_UINT32)sizeof(RGXFWIF_CMD2D), \ -+ offsetof(RGXFWIF_CMD2D, s2DRegs) -+#else -+#define RGXFW_ALIGN_CHECKS_INIT1 RGXFW_ALIGN_CHECKS_INIT0 -+#endif /* RGX_FEATURE_TLA */ -+ -+ -+#if defined(RGX_FEATURE_FASTRENDER_DM) -+#define RGXFW_ALIGN_CHECKS_INIT \ -+ RGXFW_ALIGN_CHECKS_INIT1, \ -+ /* RGXFWIF_CMDTDM checks */ \ -+ (IMG_UINT32)sizeof(RGXFWIF_CMDTDM), \ -+ offsetof(RGXFWIF_CMDTDM, sTDMRegs) -+#else -+#define RGXFW_ALIGN_CHECKS_INIT RGXFW_ALIGN_CHECKS_INIT1 -+#endif /* ! RGX_FEATURE_FASTRENDER_DM */ -+ -+ -+ -+/*! -+ ****************************************************************************** -+ * Alignment KM checks array -+ *****************************************************************************/ -+ -+#define RGXFW_ALIGN_CHECKS_INIT_KM \ -+ (IMG_UINT32)sizeof(RGXFWIF_SYSINIT), \ -+ offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr), \ -+ offsetof(RGXFWIF_SYSINIT, sPDSExecBase), \ -+ offsetof(RGXFWIF_SYSINIT, sUSCExecBase), \ -+ offsetof(RGXFWIF_SYSINIT, asSigBufCtl), \ -+ offsetof(RGXFWIF_SYSINIT, sTraceBufCtl), \ -+ offsetof(RGXFWIF_SYSINIT, sFwSysData), \ -+ (IMG_UINT32)sizeof(RGXFWIF_OSINIT), \ -+ offsetof(RGXFWIF_OSINIT, psKernelCCBCtl), \ -+ offsetof(RGXFWIF_OSINIT, psKernelCCB), \ -+ offsetof(RGXFWIF_OSINIT, psFirmwareCCBCtl), \ -+ offsetof(RGXFWIF_OSINIT, psFirmwareCCB), \ -+ offsetof(RGXFWIF_OSINIT, sFwOsData), \ -+ offsetof(RGXFWIF_OSINIT, sRGXCompChecks), \ -+ \ -+ /* RGXFWIF_FWRENDERCONTEXT checks */ \ -+ (IMG_UINT32)sizeof(RGXFWIF_FWRENDERCONTEXT), \ -+ offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), \ -+ offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), \ -+ \ -+ (IMG_UINT32)sizeof(RGXFWIF_FWCOMMONCONTEXT), \ -+ offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext), \ -+ offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode), \ -+ offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB), \ -+ \ -+ (IMG_UINT32)sizeof(RGXFWIF_MMUCACHEDATA), \ -+ offsetof(RGXFWIF_MMUCACHEDATA, ui32CacheFlags), \ -+ offsetof(RGXFWIF_MMUCACHEDATA, sMMUCacheSync), \ -+ offsetof(RGXFWIF_MMUCACHEDATA, ui32MMUCacheSyncUpdateValue) -+ -+#endif /* RGX_FWIF_ALIGNCHECKS_H */ -+ -+/****************************************************************************** -+ End of file (rgx_fwif_alignchecks.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgx_fwif_hwperf.h b/drivers/gpu/drm/img-rogue/rgx_fwif_hwperf.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_fwif_hwperf.h -@@ -0,0 +1,253 @@ -+/*************************************************************************/ /*! -+@File rgx_fwif_hwperf.h -+@Title RGX HWPerf support -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Shared header between RGX firmware and Init process -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef RGX_FWIF_HWPERF_H -+#define RGX_FWIF_HWPERF_H -+ -+#include "rgx_fwif_shared.h" -+#include "rgx_hwperf.h" -+#include "rgxdefs_km.h" -+ -+ -+/*****************************************************************************/ -+ -+/* Structure to hold a block's parameters for passing between the BG context -+ * and the IRQ context when applying a configuration request. */ -+typedef struct -+{ -+ /* Few members could be booleans but padded to IMG_UINT32 -+ * to workaround pdump alignment requirements */ -+ IMG_UINT32 ui32Valid; -+ IMG_UINT32 ui32Enabled; -+ IMG_UINT32 eBlockID; -+ IMG_UINT32 uiCounterMask; -+ IMG_UINT64 RGXFW_ALIGN aui64CounterCfg[RGX_CNTBLK_MUX_COUNTERS_MAX]; -+} RGXFWIF_HWPERF_CTL_BLK; -+ -+/* Structure used to hold the configuration of the non-mux counters blocks */ -+typedef struct -+{ -+ IMG_UINT32 ui32NumSelectedCounters; -+ IMG_UINT32 aui32SelectedCountersIDs[RGX_HWPERF_MAX_CUSTOM_CNTRS]; -+} RGXFW_HWPERF_SELECT; -+ -+/* Structure used to hold a Direct-Addressable block's parameters for passing -+ * between the BG context and the IRQ context when applying a configuration -+ * request. HWPERF_UNIFIED use only. -+ */ -+typedef struct -+{ -+ IMG_UINT32 uiEnabled; -+ IMG_UINT32 uiNumCounters; -+ IMG_UINT32 eBlockID; -+ RGXFWIF_DEV_VIRTADDR psModel; -+ IMG_UINT32 aui32Counters[RGX_CNTBLK_COUNTERS_MAX]; -+} RGXFWIF_HWPERF_DA_BLK; -+ -+ -+/* Structure to hold the whole configuration request details for all blocks -+ * The block masks and counts are used to optimise reading of this data. */ -+typedef struct -+{ -+ IMG_UINT32 ui32HWPerfCtlFlags; -+ -+ IMG_UINT32 ui32SelectedCountersBlockMask; -+ RGXFW_HWPERF_SELECT RGXFW_ALIGN SelCntr[RGX_HWPERF_MAX_CUSTOM_BLKS]; -+ -+ IMG_UINT32 ui32EnabledMUXBlksCount; -+ RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[RGX_HWPERF_MAX_MUX_BLKS]; -+} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL; -+ -+/* NOTE: The switch statement in this function must be kept in alignment with -+ * the enumeration RGX_HWPERF_CNTBLK_ID defined in rgx_hwperf.h. ASSERTs may -+ * result if not. -+ * The function provides a hash lookup to get a handle on the global store for -+ * a block's configuration store from it's block ID. -+ */ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(rgxfw_hwperf_get_block_ctl) -+#endif -+static INLINE RGXFWIF_HWPERF_CTL_BLK *rgxfw_hwperf_get_block_ctl( -+ RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData) -+{ -+ IMG_UINT32 ui32Idx; -+ -+ /* Hash the block ID into a control configuration array index */ -+ switch (eBlockID) -+ { -+ case RGX_CNTBLK_ID_TA: -+ case RGX_CNTBLK_ID_RASTER: -+ case RGX_CNTBLK_ID_HUB: -+ case RGX_CNTBLK_ID_TORNADO: -+ case RGX_CNTBLK_ID_JONES: -+ { -+ ui32Idx = eBlockID; -+ break; -+ } -+ case RGX_CNTBLK_ID_TPU_MCU0: -+ case RGX_CNTBLK_ID_TPU_MCU1: -+ case RGX_CNTBLK_ID_TPU_MCU2: -+ case RGX_CNTBLK_ID_TPU_MCU3: -+ case RGX_CNTBLK_ID_TPU_MCU4: -+ case RGX_CNTBLK_ID_TPU_MCU5: -+ case RGX_CNTBLK_ID_TPU_MCU6: -+ case RGX_CNTBLK_ID_TPU_MCU7: -+ { -+ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + -+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); -+ break; -+ } -+ case RGX_CNTBLK_ID_USC0: -+ case RGX_CNTBLK_ID_USC1: -+ case RGX_CNTBLK_ID_USC2: -+ case RGX_CNTBLK_ID_USC3: -+ case RGX_CNTBLK_ID_USC4: -+ case RGX_CNTBLK_ID_USC5: -+ case RGX_CNTBLK_ID_USC6: -+ case RGX_CNTBLK_ID_USC7: -+ case RGX_CNTBLK_ID_USC8: -+ case RGX_CNTBLK_ID_USC9: -+ case RGX_CNTBLK_ID_USC10: -+ case RGX_CNTBLK_ID_USC11: -+ case RGX_CNTBLK_ID_USC12: -+ case RGX_CNTBLK_ID_USC13: -+ case RGX_CNTBLK_ID_USC14: -+ case RGX_CNTBLK_ID_USC15: -+ { -+ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + -+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + -+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); -+ break; -+ } -+ case RGX_CNTBLK_ID_TEXAS0: -+ case RGX_CNTBLK_ID_TEXAS1: -+ case RGX_CNTBLK_ID_TEXAS2: -+ case RGX_CNTBLK_ID_TEXAS3: -+ case RGX_CNTBLK_ID_TEXAS4: -+ case RGX_CNTBLK_ID_TEXAS5: -+ case RGX_CNTBLK_ID_TEXAS6: -+ case RGX_CNTBLK_ID_TEXAS7: -+ { -+ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + -+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + -+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + -+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); -+ break; -+ } -+ case RGX_CNTBLK_ID_RASTER0: -+ case RGX_CNTBLK_ID_RASTER1: -+ case RGX_CNTBLK_ID_RASTER2: -+ case RGX_CNTBLK_ID_RASTER3: -+ { -+ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + -+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + -+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + -+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + -+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); -+ break; -+ } -+ case RGX_CNTBLK_ID_BLACKPEARL0: -+ case RGX_CNTBLK_ID_BLACKPEARL1: -+ case RGX_CNTBLK_ID_BLACKPEARL2: -+ case RGX_CNTBLK_ID_BLACKPEARL3: -+ { -+ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + -+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + -+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + -+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + -+ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) + -+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); -+ break; -+ } -+ case RGX_CNTBLK_ID_PBE0: -+ case RGX_CNTBLK_ID_PBE1: -+ case RGX_CNTBLK_ID_PBE2: -+ case RGX_CNTBLK_ID_PBE3: -+ case RGX_CNTBLK_ID_PBE4: -+ case RGX_CNTBLK_ID_PBE5: -+ case RGX_CNTBLK_ID_PBE6: -+ case RGX_CNTBLK_ID_PBE7: -+ case RGX_CNTBLK_ID_PBE8: -+ case RGX_CNTBLK_ID_PBE9: -+ case RGX_CNTBLK_ID_PBE10: -+ case RGX_CNTBLK_ID_PBE11: -+ case RGX_CNTBLK_ID_PBE12: -+ case RGX_CNTBLK_ID_PBE13: -+ case RGX_CNTBLK_ID_PBE14: -+ case RGX_CNTBLK_ID_PBE15: -+ { -+ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + -+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + -+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + -+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + -+ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) + -+ RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) + -+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); -+ break; -+ } -+ default: -+ { -+ ui32Idx = RGX_HWPERF_MAX_DEFINED_BLKS; -+ break; -+ } -+ } -+ if (ui32Idx >= RGX_HWPERF_MAX_DEFINED_BLKS) -+ { -+ return NULL; -+ } -+ return &psHWPerfInitData->sBlkCfg[ui32Idx]; -+} -+ -+/* Stub routine for rgxfw_hwperf_get_da_block_ctl(). Just return a NULL. -+ */ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(rgxfw_hwperf_get_da_block_ctl) -+#endif -+static INLINE RGXFWIF_HWPERF_DA_BLK* rgxfw_hwperf_get_da_block_ctl( -+ RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData) -+{ -+ PVR_UNREFERENCED_PARAMETER(eBlockID); -+ PVR_UNREFERENCED_PARAMETER(psHWPerfInitData); -+ -+ return NULL; -+} -+#endif -diff --git a/drivers/gpu/drm/img-rogue/rgx_fwif_km.h b/drivers/gpu/drm/img-rogue/rgx_fwif_km.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_fwif_km.h -@@ -0,0 +1,2666 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX firmware interface structures used by pvrsrvkm -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX firmware interface structures used by pvrsrvkm -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_FWIF_KM_H) -+#define RGX_FWIF_KM_H -+ -+#include "img_types.h" -+#include "rgx_fwif_shared.h" -+#include "rgxdefs_km.h" -+#include "dllist.h" -+#include "rgx_hwperf.h" -+#include "rgx_mips.h" -+#include "rgxheapconfig.h" -+ -+ -+/*************************************************************************/ /*! -+ Logging type -+*/ /**************************************************************************/ -+#define RGXFWIF_LOG_TYPE_NONE 0x00000000U -+#define RGXFWIF_LOG_TYPE_TRACE 0x00000001U -+#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002U -+#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004U -+#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U -+#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010U -+#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020U -+#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040U -+#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080U -+#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100U -+#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200U -+#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400U -+#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800U -+#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000U -+#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000U -+#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000U -+#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U -+#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU -+#define RGXFWIF_LOG_TYPE_MASK 0x80007FFFU -+ -+/* String used in pvrdebug -h output */ -+#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug" -+ -+/* Table entry to map log group strings to log type value */ -+typedef struct { -+ const IMG_CHAR* pszLogGroupName; -+ IMG_UINT32 ui32LogGroupType; -+} RGXFWIF_LOG_GROUP_MAP_ENTRY; -+ -+/* -+ Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup -+ table where needed. Keep log group names short, no more than 20 chars. -+*/ -+#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \ -+ { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \ -+ { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \ -+ { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \ -+ { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \ -+ { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \ -+ { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \ -+ { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \ -+ { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \ -+ { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \ -+ { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \ -+ { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \ -+ { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \ -+ { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \ -+ { "misc", RGXFWIF_LOG_TYPE_GROUP_MISC }, \ -+ { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG } -+ -+ -+/* Used in print statements to display log group state, one %s per group defined */ -+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" -+ -+/* Used in a print statement to display log group state, one per group */ -+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) ((((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) != 0U) ?("main ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) != 0U) ?("mts ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) != 0U) ?("cleanup ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) != 0U) ?("csw ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) != 0U) ?("bif ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_PM) != 0U) ?("pm ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) != 0U) ?("rtd ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) != 0U) ?("spm ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_POW) != 0U) ?("pow ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) != 0U) ?("hwr ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) != 0U) ?("hwp ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) != 0U) ?("rpm ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) != 0U) ?("dma ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) != 0U) ?("misc ") :("")), \ -+ ((((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) != 0U) ?("debug ") :("")) -+ -+ -+/************************************************************************ -+* RGX FW signature checks -+************************************************************************/ -+#define RGXFW_SIG_BUFFER_SIZE_MIN (8192) -+ -+#define RGXFWIF_TIMEDIFF_ID ((0x1UL << 28) | RGX_CR_TIMER) -+ -+/*! -+ ****************************************************************************** -+ * Trace Buffer -+ *****************************************************************************/ -+ -+/*! Min, Max, and Default size of RGXFWIF_TRACEBUF_SPACE in DWords */ -+#define RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS 8192U /* 32KB */ -+#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U /* ~48KB */ -+#define RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS 32768U /* 128KB */ -+ -+#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U -+#if defined(RGXFW_META_SUPPORT_2ND_THREAD) -+#define RGXFW_THREAD_NUM 2U -+#else -+#define RGXFW_THREAD_NUM 1U -+#endif -+ -+#define RGXFW_POLL_TYPE_SET 0x80000000U -+ -+#define RGXFW_PROCESS_NAME_LEN (16) -+ -+typedef struct -+{ -+ IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; -+ IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; -+ IMG_UINT32 ui32LineNum; -+} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_FILE_INFO_BUF) == 408, -+ "RGXFWIF_FILE_INFO_BUF is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! -+ * @Defgroup SRVAndFWTracing Services and Firmware Tracing data interface -+ * @Brief The document groups/lists the data structures and the interfaces related to Services and Firmware Tracing -+ * @{ -+ */ -+ -+/*! -+ * @Brief Firmware trace buffer details -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32TracePointer; /*!< Trace pointer (write index into Trace Buffer) */ -+ IMG_UINT32 ui32WrapCount; /*!< Number of times the Trace Buffer has wrapped */ -+ -+ RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address), to be used by firmware for writing into trace buffer */ -+ -+ RGXFWIF_FILE_INFO_BUF RGXFW_ALIGN sAssertBuf; -+} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE; -+ -+/*! @} End of Defgroup SRVAndFWTracing */ -+ -+#define RGXFWIF_FWFAULTINFO_MAX (8U) /* Total number of FW fault logs stored */ -+ -+typedef struct -+{ -+ IMG_UINT64 RGXFW_ALIGN ui64CRTimer; -+ IMG_UINT64 RGXFW_ALIGN ui64OSTimer; -+ IMG_UINT64 RGXFW_ALIGN ui64Data; -+ RGXFWIF_FILE_INFO_BUF sFaultBuf; -+} UNCACHED_ALIGN RGX_FWFAULTINFO; -+ -+ -+#define RGXFWIF_POW_STATES \ -+ X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \ -+ X(RGXFWIF_POW_ON) /* running HW commands */ \ -+ X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \ -+ X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */ -+ -+typedef enum -+{ -+#define X(NAME) NAME, -+ RGXFWIF_POW_STATES -+#undef X -+} RGXFWIF_POW_STATE; -+ -+/* Firmware HWR states */ -+#define RGXFWIF_HWR_HARDWARE_OK (IMG_UINT32_C(0x1) << 0U) /*!< The HW state is ok or locked up */ -+#define RGXFWIF_HWR_RESET_IN_PROGRESS (IMG_UINT32_C(0x1) << 1U) /*!< Tells if a HWR reset is in progress */ -+#define RGXFWIF_HWR_GENERAL_LOCKUP (IMG_UINT32_C(0x1) << 3U) /*!< A DM unrelated lockup has been detected */ -+#define RGXFWIF_HWR_DM_RUNNING_OK (IMG_UINT32_C(0x1) << 4U) /*!< At least one DM is running without being close to a lockup */ -+#define RGXFWIF_HWR_DM_STALLING (IMG_UINT32_C(0x1) << 5U) /*!< At least one DM is close to lockup */ -+#define RGXFWIF_HWR_FW_FAULT (IMG_UINT32_C(0x1) << 6U) /*!< The FW has faulted and needs to restart */ -+#define RGXFWIF_HWR_RESTART_REQUESTED (IMG_UINT32_C(0x1) << 7U) /*!< The FW has requested the host to restart it */ -+ -+#define RGXFWIF_PHR_STATE_SHIFT (8U) -+#define RGXFWIF_PHR_RESTART_REQUESTED (IMG_UINT32_C(1) << RGXFWIF_PHR_STATE_SHIFT) /*!< The FW has requested the host to restart it, per PHR configuration */ -+#define RGXFWIF_PHR_RESTART_FINISHED (IMG_UINT32_C(2) << RGXFWIF_PHR_STATE_SHIFT) /*!< A PHR triggered GPU reset has just finished */ -+#define RGXFWIF_PHR_RESTART_MASK (RGXFWIF_PHR_RESTART_REQUESTED | RGXFWIF_PHR_RESTART_FINISHED) -+ -+#define RGXFWIF_PHR_MODE_OFF (0UL) -+#define RGXFWIF_PHR_MODE_RD_RESET (1UL) -+#define RGXFWIF_PHR_MODE_FULL_RESET (2UL) -+ -+typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; -+ -+/* Firmware per-DM HWR states */ -+#define RGXFWIF_DM_STATE_WORKING (0x00U) /*!< DM is working if all flags are cleared */ -+#define RGXFWIF_DM_STATE_READY_FOR_HWR (IMG_UINT32_C(0x1) << 0) /*!< DM is idle and ready for HWR */ -+#define RGXFWIF_DM_STATE_NEEDS_SKIP (IMG_UINT32_C(0x1) << 2) /*!< DM need to skip to next cmd before resuming processing */ -+#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP (IMG_UINT32_C(0x1) << 3) /*!< DM need partial render cleanup before resuming processing */ -+#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR (IMG_UINT32_C(0x1) << 4) /*!< DM need to increment Recovery Count once fully recovered */ -+#define RGXFWIF_DM_STATE_GUILTY_LOCKUP (IMG_UINT32_C(0x1) << 5) /*!< DM was identified as locking up and causing HWR */ -+#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP (IMG_UINT32_C(0x1) << 6) /*!< DM was innocently affected by another lockup which caused HWR */ -+#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING (IMG_UINT32_C(0x1) << 7) /*!< DM was identified as over-running and causing HWR */ -+#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING (IMG_UINT32_C(0x1) << 8) /*!< DM was innocently affected by another DM over-running which caused HWR */ -+#define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH (IMG_UINT32_C(0x1) << 9) /*!< DM was forced into HWR as it delayed more important workloads */ -+#define RGXFWIF_DM_STATE_GPU_ECC_HWR (IMG_UINT32_C(0x1) << 10) /*!< DM was forced into HWR due to an uncorrected GPU ECC error */ -+ -+/* Firmware's connection state */ -+typedef IMG_UINT32 RGXFWIF_CONNECTION_FW_STATE; -+#define RGXFW_CONNECTION_FW_OFFLINE 0U /*!< Firmware is offline */ -+#define RGXFW_CONNECTION_FW_READY 1U /*!< Firmware is initialised */ -+#define RGXFW_CONNECTION_FW_ACTIVE 2U /*!< Firmware connection is fully established */ -+#define RGXFW_CONNECTION_FW_OFFLOADING 3U /*!< Firmware is clearing up connection data */ -+#define RGXFW_CONNECTION_FW_COOLDOWN 4U /*!< Firmware connection is in cooldown period */ -+#define RGXFW_CONNECTION_FW_STATE_COUNT 5U -+ -+/* OS' connection state */ -+typedef enum -+{ -+ RGXFW_CONNECTION_OS_OFFLINE = 0, /*!< OS is offline */ -+ RGXFW_CONNECTION_OS_READY, /*!< OS's KM driver is setup and waiting */ -+ RGXFW_CONNECTION_OS_ACTIVE, /*!< OS connection is fully established */ -+ RGXFW_CONNECTION_OS_STATE_COUNT -+} RGXFWIF_CONNECTION_OS_STATE; -+ -+typedef struct -+{ -+ IMG_UINT bfOsState : 3; -+ IMG_UINT bfFLOk : 1; -+ IMG_UINT bfFLGrowPending : 1; -+ IMG_UINT bfReserved : 27; -+} RGXFWIF_OS_RUNTIME_FLAGS; -+ -+typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS; -+ -+#if defined(PVRSRV_STALLED_CCB_ACTION) -+#define PVR_SLR_LOG_ENTRIES 10U -+#define PVR_SLR_LOG_STRLEN 30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */ -+ -+typedef struct -+{ -+ IMG_UINT64 RGXFW_ALIGN ui64Timestamp; -+ IMG_UINT32 ui32FWCtxAddr; -+ IMG_UINT32 ui32NumUFOs; -+ IMG_CHAR aszCCBName[PVR_SLR_LOG_STRLEN]; -+} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY; -+#endif -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+#define MAX_THREAD_NUM 2 -+ -+static_assert(RGXFW_THREAD_NUM <= MAX_THREAD_NUM, -+ "RGXFW_THREAD_NUM is outside of allowable range for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! -+ * @InGroup SRVAndFWTracing -+ * @Brief Firmware trace control data -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32LogType; /*!< FW trace log group configuration */ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ RGXFWIF_TRACEBUF_SPACE sTraceBuf[MAX_THREAD_NUM]; /*!< FW Trace buffer */ -+#else -+ RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; /*!< FW Trace buffer */ -+#endif -+ IMG_UINT32 ui32TraceBufSizeInDWords; /*!< FW Trace buffer size in dwords, Member initialised only when sTraceBuf is actually allocated -+ (in RGXTraceBufferInitOnDemandResources) */ -+ IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */ -+} UNCACHED_ALIGN RGXFWIF_TRACEBUF; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_TRACEBUF) == 880, -+ "RGXFWIF_TRACEBUF is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! @Brief Firmware system data shared with the Host driver */ -+typedef struct -+{ -+ IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */ -+ IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */ -+ volatile RGXFWIF_POW_STATE ePowState; -+ struct { -+ volatile IMG_UINT32 ui32HWPerfRIdx; -+ volatile IMG_UINT32 ui32HWPerfWIdx; -+ volatile IMG_UINT32 ui32HWPerfWrapCount; -+ } sHWPerfCtrl; /* Struct used to inval/flush HWPerfCtrl members */ -+ IMG_UINT32 ui32HWPerfSize; /*!< Constant after setup, needed in FW */ -+ IMG_UINT32 ui32HWPerfDropCount; /*!< The number of times the FW drops a packet due to buffer full */ -+ -+ /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with -+ * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */ -+ IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */ -+ IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */ -+ IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */ -+ RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OSIDS];/*!< State flags for each Operating System mirrored from Fw coremem */ -+ RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; /*!< Firmware fault info */ -+ IMG_UINT32 ui32FWFaults; /*!< Firmware faults count */ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ IMG_UINT32 aui32CrPollAddr[MAX_THREAD_NUM]; /*!< Failed poll address */ -+ IMG_UINT32 aui32CrPollMask[MAX_THREAD_NUM]; /*!< Failed poll mask */ -+ IMG_UINT32 aui32CrPollCount[MAX_THREAD_NUM]; /*!< Failed poll count */ -+#else -+ IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; /*!< Failed poll address */ -+ IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; /*!< Failed poll mask */ -+ IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; /*!< Failed poll count */ -+#endif -+ IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime; -+ -+#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) -+#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8) -+#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE) -+ IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX]; -+#endif -+ RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags; /*!< Firmware's Current HWR state */ -+ RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_MAX]; /*!< Each DM's HWR state */ -+ IMG_UINT32 ui32FwSysDataFlags; /*!< Compatibility and other flags */ -+ IMG_UINT32 ui32McConfig; /*!< Identify whether MC config is P-P or P-S */ -+ IMG_UINT32 ui32MemFaultCheck; /*!< Device mem fault check on PCI systems */ -+} UNCACHED_ALIGN RGXFWIF_SYSDATA; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_SYSDATA) == 3624, -+ "RGXFWIF_SYSDATA is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER == 3624"); -+#endif -+ -+ -+/*! -+ * @InGroup ContextSwitching -+ * @Brief Firmware per-os data and configuration -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */ -+ IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */ -+ IMG_UINT32 ui32HostSyncCheckMark; /*!< Markers to signal that the Firmware should perform a full sync check */ -+#if defined(PVRSRV_STALLED_CCB_ACTION) || defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ IMG_UINT32 ui32ForcedUpdatesRequested; -+ IMG_UINT8 ui8SLRLogWp; -+ RGXFWIF_SLR_ENTRY sSLRLogFirst; -+ RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES]; -+ IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime; -+#endif -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ volatile IMG_UINT32 aui32InterruptCount[MAX_THREAD_NUM]; /*!< Interrupt count from Threads > */ -+#else -+ volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */ -+#endif -+ IMG_UINT32 ui32KCCBCmdsExecuted; /*!< Executed Kernel CCB command count */ -+ RGXFWIF_DEV_VIRTADDR sPowerSync; /*!< Sync prim used to signal the host the power off state */ -+ IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */ -+#if defined(SUPPORT_VALIDATION) -+ IMG_UINT32 aui32KickCount[RGXFWIF_DM_MAX]; /*!< Count of the number of kicks per DM */ -+ IMG_UINT32 aui32KickPipelineCount[RGXFWIF_DM_MAX];/*!< Count of the number of kicks which pipelined per DM */ -+ IMG_UINT32 aui32KickCancelledCount[RGXFWIF_DM_MAX];/*!< Count of the number of kicks which are cancelled per DM */ -+#endif -+} UNCACHED_ALIGN RGXFWIF_OSDATA; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_OSDATA) == 584, -+ "RGXFWIF_OSDATA is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/* Firmware trace time-stamp field breakup */ -+ -+/* RGX_CR_TIMER register read (48 bits) value*/ -+#define RGXFWT_TIMESTAMP_TIME_SHIFT (0U) -+#define RGXFWT_TIMESTAMP_TIME_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) -+ -+/* Extra debug-info (16 bits) */ -+#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) -+#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~RGXFWT_TIMESTAMP_TIME_CLRMSK -+ -+ -+/* Debug-info sub-fields */ -+/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */ -+#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) -+#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET (1U << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) -+ -+/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */ -+#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) -+#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET (1U << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) -+ -+/* Bit 2: RGX_CR_SLAVE_EVENT register is non-zero */ -+#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U) -+#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET (1U << RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT) -+ -+/* Bit 3-15: Unused bits */ -+ -+#define RGXFWT_DEBUG_INFO_STR_MAXLEN 64 -+#define RGXFWT_DEBUG_INFO_STR_PREPEND " (debug info: " -+#define RGXFWT_DEBUG_INFO_STR_APPEND ")" -+ -+/* Table of debug info sub-field's masks and corresponding message strings -+ * to be appended to firmware trace -+ * -+ * Mask : 16 bit mask to be applied to debug-info field -+ * String : debug info message string -+ */ -+ -+#define RGXFWT_DEBUG_INFO_MSKSTRLIST \ -+/*Mask, String*/ \ -+X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf") \ -+X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") \ -+X(RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET, "slave events") -+ -+/*! -+ ****************************************************************************** -+ * HWR Data -+ *****************************************************************************/ -+/*! -+ * @Defgroup HWRInfo FW HWR shared data interface -+ * @Brief Types grouping data structures and defines used in realising the HWR record. -+ * @{ -+ */ -+/*! @Brief HWR Lockup types */ -+typedef enum -+{ -+ RGX_HWRTYPE_UNKNOWNFAILURE = 0, /*!< Unknown failure */ -+ RGX_HWRTYPE_OVERRUN = 1, /*!< DM overrun */ -+ RGX_HWRTYPE_POLLFAILURE = 2, /*!< Poll failure */ -+ RGX_HWRTYPE_BIF0FAULT = 3, /*!< BIF0 fault */ -+ RGX_HWRTYPE_BIF1FAULT = 4, /*!< BIF1 fault */ -+ RGX_HWRTYPE_TEXASBIF0FAULT = 5, /*!< TEXASBIF0 fault */ -+ RGX_HWRTYPE_MMUFAULT = 6, /*!< MMU fault */ -+ RGX_HWRTYPE_MMUMETAFAULT = 7, /*!< MMU META fault */ -+ RGX_HWRTYPE_MIPSTLBFAULT = 8, /*!< MIPS TLB fault */ -+ RGX_HWRTYPE_ECCFAULT = 9, /*!< ECC fault */ -+ RGX_HWRTYPE_MMURISCVFAULT = 10, /*!< MMU RISCV fault */ -+} RGX_HWRTYPE; -+ -+#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) (((eHWRType) == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1) -+ -+#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((((eHWRType) == RGX_HWRTYPE_BIF0FAULT) || \ -+ ((eHWRType) == RGX_HWRTYPE_BIF1FAULT) || \ -+ ((eHWRType) == RGX_HWRTYPE_TEXASBIF0FAULT) || \ -+ ((eHWRType) == RGX_HWRTYPE_MMUFAULT) || \ -+ ((eHWRType) == RGX_HWRTYPE_MMUMETAFAULT) || \ -+ ((eHWRType) == RGX_HWRTYPE_MIPSTLBFAULT) || \ -+ ((eHWRType) == RGX_HWRTYPE_MMURISCVFAULT)) ? true : false) -+ -+typedef struct -+{ -+ IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus; /*!< BIF request status */ -+ IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus; /*!< MMU status */ -+ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ -+ IMG_UINT64 RGXFW_ALIGN ui64Reserved; -+} RGX_BIFINFO; -+ -+typedef struct -+{ -+ IMG_UINT32 ui32FaultGPU; /*!< ECC fault in GPU */ -+} RGX_ECCINFO; -+ -+typedef struct -+{ -+ IMG_UINT64 RGXFW_ALIGN aui64MMUStatus[2]; /*!< MMU status */ -+ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ -+ IMG_UINT64 RGXFW_ALIGN ui64Reserved; -+} RGX_MMUINFO; -+ -+typedef struct -+{ -+ IMG_UINT32 ui32ThreadNum; /*!< Thread ID performing poll operation */ -+ IMG_UINT32 ui32CrPollAddr; /*!< CR Poll Address */ -+ IMG_UINT32 ui32CrPollMask; /*!< CR Poll mask */ -+ IMG_UINT32 ui32CrPollLastValue; /*!< CR Poll last value */ -+ IMG_UINT64 RGXFW_ALIGN ui64Reserved; -+} UNCACHED_ALIGN RGX_POLLINFO; -+ -+typedef struct -+{ -+ IMG_UINT32 ui32BadVAddr; /*!< VA address */ -+ IMG_UINT32 ui32EntryLo; -+} RGX_TLBINFO; -+ -+/*! @Brief Structure to keep information specific to a lockup e.g. DM, timer, lockup type etc. */ -+typedef struct -+{ -+ union -+ { -+ RGX_BIFINFO sBIFInfo; /*!< BIF failure details */ -+ RGX_MMUINFO sMMUInfo; /*!< MMU failure details */ -+ RGX_POLLINFO sPollInfo; /*!< Poll failure details */ -+ RGX_TLBINFO sTLBInfo; /*!< TLB failure details */ -+ RGX_ECCINFO sECCInfo; /*!< ECC failure details */ -+ } uHWRData; -+ -+ IMG_UINT64 RGXFW_ALIGN ui64CRTimer; /*!< Timer value at the time of lockup */ -+ IMG_UINT64 RGXFW_ALIGN ui64OSTimer; /*!< OS timer value at the time of lockup */ -+ IMG_UINT32 ui32FrameNum; /*!< Frame number of the workload */ -+ IMG_UINT32 ui32PID; /*!< PID belonging to the workload */ -+ IMG_UINT32 ui32ActiveHWRTData; /*!< HWRT data of the workload */ -+ IMG_UINT32 ui32HWRNumber; /*!< HWR number */ -+ IMG_UINT32 ui32EventStatus; /*!< Core specific event status register at the time of lockup */ -+ IMG_UINT32 ui32HWRRecoveryFlags; /*!< DM state flags */ -+ RGX_HWRTYPE eHWRType; /*!< Type of lockup */ -+ RGXFWIF_DM eDM; /*!< Recovery triggered for the DM */ -+ IMG_UINT32 ui32CoreID; /*!< Core ID of the GPU */ -+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; /*!< Workload kick time */ -+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; /*!< HW reset start time */ -+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; /*!< HW reset stop time */ -+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; /*!< freelist ready time on the last HWR */ -+ IMG_CHAR RGXFW_ALIGN szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */ -+} UNCACHED_ALIGN RGX_HWRINFO; -+ -+#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */ -+#define RGXFWIF_HWINFO_MAX_LAST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */ -+#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */ -+#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U) /* Index of the last log in the HWR log buffer */ -+ -+/*! @Brief Firmware HWR information structure allocated by the Services and used by the Firmware to update recovery information. */ -+typedef struct -+{ -+ RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; /*!< Max number of recovery record */ -+ IMG_UINT32 ui32HwrCounter; /*!< HWR counter used in FL reconstruction */ -+ IMG_UINT32 ui32WriteIndex; /*!< Index for updating recovery information in sHWRInfo */ -+ IMG_UINT32 ui32DDReqCount; /*!< Count of DebugDump requested to the host after recovery */ -+ IMG_UINT32 ui32HWRInfoBufFlags; /* Compatibility and other flags */ -+ IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_MAX]; /*!< Lockup count for each DM */ -+ IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_MAX]; /*!< Overrun count for each DM */ -+ IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_MAX]; /*!< Lockup + Overrun count for each DM */ -+ IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_MAX]; /*!< False lockup detection count for each DM */ -+} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_HWRINFOBUF) == 2336, -+ "RGXFWIF_HWRINFOBUF is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! @} End of HWRInfo */ -+ -+#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (IMG_UINT32_C(0x1)) -+#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN (IMG_UINT32_C(0x2)) -+#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN (IMG_UINT32_C(0x3)) -+#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN (IMG_UINT32_C(0x4)) -+ -+#define RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN (IMG_UINT32_C(0x1)) -+#define RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (IMG_UINT32_C(0x2)) -+ -+#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1)) -+#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2)) -+/*! -+ ****************************************************************************** -+ * RGX firmware Init Config Data -+ *****************************************************************************/ -+ -+/* Flag definitions affecting the firmware globally */ -+#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (IMG_UINT32_C(0x1) << 0) /*!< Randomise context switch requests */ -+#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (IMG_UINT32_C(0x1) << 1) -+#define RGXFWIF_INICFG_HWPERF_EN (IMG_UINT32_C(0x1) << 2) -+#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN (IMG_UINT32_C(0x1) << 3) /*!< Randomise DM-killing requests */ -+#define RGXFWIF_INICFG_POW_RASCALDUST (IMG_UINT32_C(0x1) << 4) -+#define RGXFWIF_INICFG_SPU_CLOCK_GATE (IMG_UINT32_C(0x1) << 5) -+#define RGXFWIF_INICFG_FBCDC_V3_1_EN (IMG_UINT32_C(0x1) << 6) -+#define RGXFWIF_INICFG_CHECK_MLIST_EN (IMG_UINT32_C(0x1) << 7) -+#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (IMG_UINT32_C(0x1) << 8) -+#define RGXFWIF_INICFG_TRY_OVERLAPPING_DM_PIPELINES_EN (IMG_UINT32_C(0x1) << 9) -+#define RGXFWIF_INICFG_DM_PIPELINE_ROADBLOCKS_EN (IMG_UINT32_C(0x1) << 10) -+/* 11 unused */ -+#define RGXFWIF_INICFG_REGCONFIG_EN (IMG_UINT32_C(0x1) << 12) -+#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (IMG_UINT32_C(0x1) << 13) -+#define RGXFWIF_INICFG_HWP_DISABLE_FILTER (IMG_UINT32_C(0x1) << 14) -+/* 15 unused */ -+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16) -+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) -+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) -+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) -+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) -+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) -+#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (IMG_UINT32_C(0x1) << 19) -+#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (IMG_UINT32_C(0x1) << 20) -+#define RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED (IMG_UINT32_C(0x1) << 21) -+#define RGXFWIF_INICFG_VALIDATE_IRQ (IMG_UINT32_C(0x1) << 22) -+#define RGXFWIF_INICFG_DISABLE_PDP_EN (IMG_UINT32_C(0x1) << 23) -+#define RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN (IMG_UINT32_C(0x1) << 24) -+#define RGXFWIF_INICFG_WORKEST (IMG_UINT32_C(0x1) << 25) -+#define RGXFWIF_INICFG_PDVFS (IMG_UINT32_C(0x1) << 26) -+#define RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT (27) -+#define RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND (RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) -+#define RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN (RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) -+#define RGXFWIF_INICFG_CDM_ARBITRATION_MASK (IMG_UINT32_C(0x3) << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) -+#define RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT (29) -+#define RGXFWIF_INICFG_ISPSCHEDMODE_NONE (0) -+#define RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP (RGXFWIF_ISP_SCHEDMODE_VER1_IPP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) -+#define RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP (RGXFWIF_ISP_SCHEDMODE_VER2_ISP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) -+#define RGXFWIF_INICFG_ISPSCHEDMODE_MASK (RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP |\ -+ RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP) -+#define RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER (IMG_UINT32_C(0x1) << 31) -+ -+#define RGXFWIF_INICFG_ALL (0xFFFFFFFFU) -+ -+/* Extended Flag definitions affecting the firmware globally */ -+#define RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT (0) -+/* [8] Lossy min channel override -+ * [7] YUV10 override -+ * [6:4] Quality -+ * [3] Quality enable -+ * [2:1] Compression scheme -+ * [0] Lossy group */ -+#define RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK (IMG_UINT32_C(0x1FF)) /* RGX_CR_TFBC_COMPRESSION_CONTROL_MASKFULL */ -+#define RGXFWIF_INICFG_EXT_ALL (RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK) -+ -+#define RGXFWIF_INICFG_SYS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \ -+ RGXFWIF_INICFG_CTXSWITCH_SRESET_EN) -+ -+/* Flag definitions affecting only workloads submitted by a particular OS */ -+ -+/*! -+ * @AddToGroup ContextSwitching -+ * @{ -+ * @Name Per-OS DM context switch configuration flags -+ * @{ -+ */ -+#define RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN (IMG_UINT32_C(0x1) << 0) /*!< Enables TDM context switch */ -+#define RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN (IMG_UINT32_C(0x1) << 1) /*!< Enables GEOM DM context switch */ -+#define RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN (IMG_UINT32_C(0x1) << 2) /*!< Enables FRAG DM context switch */ -+#define RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN (IMG_UINT32_C(0x1) << 3) /*!< Enables CDM context switch */ -+ -+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM (IMG_UINT32_C(0x1) << 4) -+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM (IMG_UINT32_C(0x1) << 5) -+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D (IMG_UINT32_C(0x1) << 6) -+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM (IMG_UINT32_C(0x1) << 7) -+ -+#define RGXFWIF_INICFG_OS_ALL (0xFFU) -+ -+#define RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL (RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \ -+ RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN | \ -+ RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN | \ -+ RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN) -+ -+#define RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL) -+ -+/*! -+ * @} End of Per-OS Context switch configuration flags -+ * @} End of AddToGroup ContextSwitching -+ */ -+ -+#define RGXFWIF_FILTCFG_TRUNCATE_HALF (IMG_UINT32_C(0x1) << 3) -+#define RGXFWIF_FILTCFG_TRUNCATE_INT (IMG_UINT32_C(0x1) << 2) -+#define RGXFWIF_FILTCFG_NEW_FILTER_MODE (IMG_UINT32_C(0x1) << 1) -+ -+typedef IMG_UINT32 RGX_ACTIVEPM_CONF; -+#define RGX_ACTIVEPM_FORCE_OFF 0U -+#define RGX_ACTIVEPM_FORCE_ON 1U -+#define RGX_ACTIVEPM_DEFAULT 2U -+ -+typedef IMG_UINT32 RGX_RD_POWER_ISLAND_CONF; -+#define RGX_RD_POWER_ISLAND_FORCE_OFF 0U -+#define RGX_RD_POWER_ISLAND_FORCE_ON 1U -+#define RGX_RD_POWER_ISLAND_DEFAULT 2U -+ -+#if defined(RGX_FW_IRQ_OS_COUNTERS) -+/* Unused registers re-purposed for storing counters of the Firmware's -+ * interrupts for each OS -+ */ -+#if (RGXFW_MAX_NUM_OSIDS == 8) -+#define IRQ_COUNTER_STORAGE_REGS \ -+ 0x2028U, /* RGX_CR_PM_TA_MMU_FSTACK */ \ -+ 0x2050U, /* RGX_CR_PM_3D_MMU_FSTACK */ \ -+ 0x2030U, /* RGX_CR_PM_START_OF_MMU_TACONTEXT*/ \ -+ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ -+ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ -+ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ -+ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ -+ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ -+#elif (RGXFW_MAX_NUM_OSIDS == 2) -+#define IRQ_COUNTER_STORAGE_REGS \ -+ 0x2028U, /* RGX_CR_PM_TA_MMU_FSTACK */ \ -+ 0x2050U, /* RGX_CR_PM_3D_MMU_FSTACK */ -+#else -+#error Unsupported number of IRQ_COUNTER_STORAGE_REGS registers! -+#endif -+#endif -+ -+#if defined(RGX_FIRMWARE) -+typedef DLLIST_NODE RGXFWIF_DLLIST_NODE; -+#else -+typedef struct {RGXFWIF_DEV_VIRTADDR p; -+ RGXFWIF_DEV_VIRTADDR n;} RGXFWIF_DLLIST_NODE; -+#endif -+ -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SIGBUFFER; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TRACEBUF; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SYSDATA; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_OSDATA; -+#if defined(SUPPORT_TBI_INTERFACE) -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TBIBUF; -+#endif -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERFBUF; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRINFOBUF; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RUNTIME_CFG; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FWCB; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_REG_CFG; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERF_CTL; -+typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_MUX_CNTBLK; -+typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_CNTBLK; -+typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_SELECT_CUSTOM_CNTRS; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_RTN_SLOTS; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWCOMMONCONTEXT; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ZSBUFFER; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CORE_CLK_RATE; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COUNTERBUFFER; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FIRMWAREGCOVBUFFER; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRTDATA; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TIMESTAMP_ADDR; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD; -+ -+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) -+/*! -+ * @Brief Buffer to store KM active client contexts -+ */ -+typedef struct -+{ -+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ -+} RGXFWIF_ACTIVE_CONTEXT_BUF_DATA; -+#endif -+ -+/*! -+ * This number is used to represent an invalid page catalogue physical address -+ */ -+#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU -+ -+/*! -+ * This number is used to represent an unallocated set of page catalog base registers -+ */ -+#define RGXFW_BIF_INVALID_PCSET 0xFFFFFFFFU -+ -+/*! -+ * This number is used to represent an invalid OS ID for the purpose of tracking PC set ownership -+ */ -+#define RGXFW_BIF_INVALID_OSID 0xFFFFFFFFU -+ -+/*! -+ * Firmware memory context. -+ */ -+typedef struct -+{ -+ IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */ -+ IMG_UINT32 uiPageCatBaseRegSet; /*!< associated page catalog base register (RGXFW_BIF_INVALID_PCSET == unallocated) */ -+ IMG_UINT32 uiBreakpointAddr; /*!< breakpoint address */ -+ IMG_UINT32 uiBPHandlerAddr; /*!< breakpoint handler address */ -+ IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */ -+ IMG_UINT32 ui32FwMemCtxFlags; /*!< Compatibility and other flags */ -+ -+#if defined(SUPPORT_CUSTOM_OSID_EMISSION) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ IMG_UINT32 ui32OSid; -+ IMG_BOOL bOSidAxiProt; -+#endif -+ -+} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_FWMEMCONTEXT) == 32, -+ "RGXFWIF_FWMEMCONTEXT is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! -+ * FW context state flags -+ */ -+#define RGXFWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U) -+#define RGXFWIF_CONTEXT_FLAGS_MC_NEED_RESUME_MASKFULL (0x000000FFU) -+#define RGXFWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000100U) -+#define RGXFWIF_CONTEXT_FLAGS_LAST_KICK_SECURE (0x00000200U) -+ -+/*! -+ * @InGroup ContextSwitching -+ * @Brief Firmware GEOM/TA context suspend state (per GEOM core) -+ */ -+typedef struct -+{ -+ /* FW-accessible TA state which must be written out to memory on context store */ -+ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER; /*!< VDM control stream stack pointer, to store in mid-TA */ -+ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER_Init; /*!< Initial value of VDM control stream stack pointer (in case is 'lost' due to a lock-up) */ -+ IMG_UINT32 uTAReg_VBS_SO_PRIM[4]; -+ IMG_UINT16 ui16TACurrentIdx; -+} UNCACHED_ALIGN RGXFWIF_TACTX_STATE_PER_GEOM; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+#define MAX_GEOM_CORE_SIZE 4 -+ -+static_assert(RGX_NUM_GEOM_CORES <= MAX_GEOM_CORE_SIZE, -+ "RGX_NUM_GEOM_CORES is outside of allowable range for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! -+ * @InGroup ContextSwitching -+ * @Brief Firmware GEOM/TA context suspend states for all GEOM cores -+ */ -+typedef struct -+{ -+ /*! FW-accessible TA state which must be written out to memory on context store */ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ RGXFWIF_TACTX_STATE_PER_GEOM asGeomCore[MAX_GEOM_CORE_SIZE]; -+#else -+ RGXFWIF_TACTX_STATE_PER_GEOM asGeomCore[RGX_NUM_GEOM_CORES]; -+#endif -+} UNCACHED_ALIGN RGXFWIF_TACTX_STATE; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_TACTX_STATE) == 160, -+ "RGXFWIF_TACTX_STATE is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! -+ * @InGroup ContextSwitching -+ * @Brief Firmware FRAG/3D context suspend state -+ */ -+typedef struct -+{ -+ /* FW-accessible ISP state which must be written out to memory on context store */ -+ IMG_UINT32 u3DReg_PM_DEALLOCATED_MASK_STATUS; /*!< PM deallocation status */ -+ IMG_UINT32 u3DReg_PM_PDS_MTILEFREE_STATUS; /*!< Macro-tiles (MTs) finished status */ -+ IMG_UINT32 ui32CtxStateFlags; /*!< Compatibility and other flags */ -+ /* au3DReg_ISP_STORE should be the last element of the structure -+ * as this is an array whose size is determined at runtime -+ * after detecting the RGX core */ -+ IMG_UINT32 au3DReg_ISP_STORE[]; /*!< ISP state (per-pipe) */ -+} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE; -+ -+static_assert(sizeof(RGXFWIF_3DCTX_STATE) <= 16U, -+ "Size of structure RGXFWIF_3DCTX_STATE exceeds maximum expected size."); -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_3DCTX_STATE) == 16, -+ "RGXFWIF_3DCTX_STATE is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+#define RGXFWIF_CTX_USING_BUFFER_A (0) -+#define RGXFWIF_CTX_USING_BUFFER_B (1U) -+ -+typedef struct -+{ -+ IMG_UINT32 ui32CtxStateFlags; /*!< Target buffer and other flags */ -+} RGXFWIF_COMPUTECTX_STATE; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_COMPUTECTX_STATE) == 4, -+ "RGXFWIF_COMPUTECTX_STATE is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+#define RGXFWIF_CONTEXT_COMPAT_FLAGS_STATS_PENDING (1U << 0) -+#define RGXFWIF_CONTEXT_COMPAT_FLAGS_HAS_DEFER_COUNT (1U << 1) -+ -+typedef struct -+{ -+ IMG_UINT32 ui32ExtJobRefToDisableZSStore; -+ IMG_BOOL bDisableZStore; -+ IMG_BOOL bDisableSStore; -+} RGXFWIF_DISABLE_ZSSTORE; -+ -+#define MAX_ZSSTORE_DISABLE 8 -+ -+/*! -+ * @InGroup WorkloadContexts -+ * @Brief Firmware Common Context (or FWCC) -+ */ -+typedef struct RGXFWIF_FWCOMMONCONTEXT_ -+{ -+ /* CCB details for this firmware context */ -+ PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */ -+ PRGXFWIF_CCCB psCCB; /*!< CCB base */ -+ RGXFWIF_DMA_ADDR sCCBMetaDMAAddr; -+ -+ /* Context suspend state */ -+ PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */ -+ -+ /* Flags e.g. for context switching */ -+ IMG_UINT32 ui32FWComCtxFlags; -+ IMG_INT32 i32Priority; /*!< Priority level */ -+ IMG_UINT32 ui32PrioritySeqNum; -+ -+ /* Framework state */ -+ PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */ -+ -+ /* Misc and compatibility flags */ -+ IMG_UINT32 ui32CompatFlags; -+ -+ /* Statistic updates waiting to be passed back to the host... */ -+ IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */ -+ IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */ -+ IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */ -+ RGXFWIF_DM eDM; /*!< Data Master type */ -+ IMG_UINT64 RGXFW_ALIGN ui64WaitSignalAddress; /*!< Device Virtual Address of the signal the context is waiting on */ -+ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitSignalNode; /*!< List entry for the wait-signal list */ -+ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */ -+ IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */ -+ -+ IMG_UINT64 RGXFW_ALIGN ui64RobustnessAddress; -+ IMG_UINT32 ui32MaxDeadlineMS; /*!< Max HWR deadline limit in ms */ -+ bool bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */ -+ -+ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */ -+ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sRunNode; /*!< List entry for the run list */ -+ RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */ -+ -+ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ -+ -+ /* References to the host side originators */ -+ IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */ -+ IMG_UINT32 ui32PID; /*!< associated process ID */ -+ -+ IMG_BOOL bGeomOOMDisabled; /*!< True when Geom DM OOM is not allowed */ -+ IMG_CHAR szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */ -+ IMG_UINT32 ui32DeferCount; /*!< Number of context defers before forced scheduling of context */ -+} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT; -+ -+static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) <= 256U, -+ "Size of structure RGXFWIF_FWCOMMONCONTEXT exceeds maximum expected size."); -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) == 168, -+ "RGXFWIF_FWCOMMONCONTEXT is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_TQ[RGX_TRP_MAX_NUM_CORES][1]; -+typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_2D[RGX_TRP_MAX_NUM_CORES][2]; -+typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_3D[RGX_TRP_MAX_NUM_CORES][4]; -+typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_GEOM[RGX_TRP_MAX_NUM_CORES][2]; -+ -+/*! -+ * @InGroup WorkloadContexts -+ * @Brief Firmware render context. -+ */ -+typedef struct -+{ -+ RGXFWIF_FWCOMMONCONTEXT sTAContext; /*!< Firmware context for the TA */ -+ RGXFWIF_FWCOMMONCONTEXT s3DContext; /*!< Firmware context for the 3D */ -+ -+ RGXFWIF_STATIC_RENDERCONTEXT_STATE sStaticRenderContextState; -+ -+ RGXFWIF_DISABLE_ZSSTORE sDisableZSStoreQueue[MAX_ZSSTORE_DISABLE]; -+ -+ IMG_UINT32 ui32ZSStoreQueueCount; -+ IMG_UINT32 ui32WriteOffsetOfDisableZSStore; -+ -+ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ -+ -+ IMG_UINT32 ui32FwRenderCtxFlags; /*!< Compatibility and other flags */ -+ -+#if defined(SUPPORT_TRP) -+ RGXFWIF_TRP_CHECKSUM_3D aui64TRPChecksums3D; /*!< Used by Firmware to store checksums during 3D WRR */ -+ RGXFWIF_TRP_CHECKSUM_GEOM aui64TRPChecksumsGeom; /*!< Used by Firmware to store checksums during TA WRR */ -+ RGXFWIF_DM eTRPGeomCoreAffinity; /* !< Represent the DM affinity for pending 2nd TRP pass of GEOM otherwise points RGXFWIF_DM_MAX. */ -+#endif -+} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT; -+ -+/*! -+ Firmware compute context. -+*/ -+typedef struct -+{ -+ RGXFWIF_FWCOMMONCONTEXT sCDMContext; /*!< Firmware context for the CDM */ -+ -+ RGXFWIF_STATIC_COMPUTECONTEXT_STATE sStaticComputeContextState; -+ -+ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ -+ -+ IMG_UINT32 ui32ComputeCtxFlags; /*!< Compatibility and other flags */ -+ -+ IMG_UINT32 ui32WGPState; -+ IMG_UINT32 aui32WGPChecksum[RGX_WGP_MAX_NUM_CORES]; -+} UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT; -+ -+/*! -+ Firmware TDM context. -+*/ -+typedef struct -+{ -+ RGXFWIF_FWCOMMONCONTEXT sTDMContext; /*!< Firmware context for the TDM */ -+ -+ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ -+ -+} UNCACHED_ALIGN RGXFWIF_FWTDMCONTEXT; -+ -+/*! -+ * @InGroup WorkloadContexts -+ * @Brief Firmware transfer context. -+ */ -+typedef struct -+{ -+ RGXFWIF_FWCOMMONCONTEXT sTQContext; /*!< Firmware context for TQ3D */ -+ -+#if defined(SUPPORT_TRP) -+ IMG_UINT32 ui32TRPState; /*!< Used by Firmware to track current state of a protected kick */ -+ RGXFWIF_TRP_CHECKSUM_TQ aui64TRPChecksumsTQ;/*!< Used by Firmware to store checksums during TQ WRR */ -+#endif -+} UNCACHED_ALIGN RGXFWIF_FWTRANSFERCONTEXT; -+ -+/*! -+ ****************************************************************************** -+ * Defines for CMD_TYPE corruption detection and forward compatibility check -+ *****************************************************************************/ -+ -+/* CMD_TYPE 32bit contains: -+ * 31:16 Reserved for magic value to detect corruption (16 bits) -+ * 15 Reserved for RGX_CCB_TYPE_TASK (1 bit) -+ * 14:0 Bits available for CMD_TYPEs (15 bits) */ -+ -+ -+/* Magic value to detect corruption */ -+#define RGX_CMD_MAGIC_DWORD IMG_UINT32_C(0x2ABC) -+#define RGX_CMD_MAGIC_DWORD_MASK (0xFFFF0000U) -+#define RGX_CMD_MAGIC_DWORD_SHIFT (16U) -+#define RGX_CMD_MAGIC_DWORD_SHIFTED (RGX_CMD_MAGIC_DWORD << RGX_CMD_MAGIC_DWORD_SHIFT) -+ -+/*! -+ * @InGroup KCCBTypes ClientCCBTypes -+ * @Brief Generic CCB control structure -+ */ -+typedef struct -+{ -+ volatile IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */ -+ volatile IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */ -+ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */ -+} UNCACHED_ALIGN RGXFWIF_CCB_CTL; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_CCB_CTL) == 16, -+ "RGXFWIF_CCB_CTL is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! -+ * @Defgroup KCCBTypes Kernel CCB data interface -+ * @Brief Types grouping data structures and defines used in realising the KCCB functionality -+ * @{ -+ */ -+ -+#define RGXFWIF_MMUCACHEDATA_FLAGS_PT (0x1U) /* MMU_CTRL_INVAL_PT_EN */ -+#define RGXFWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */ -+#define RGXFWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */ -+ -+#if !defined(__KERNEL__) -+ -+#if !defined(RGX_FEATURE_SLC_VIVT) -+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10U) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */ -+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8U) /* BIF_CTRL_INVAL_TLB1_EN */ -+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x0U) /* not used */ -+ -+#else /* RGX_FEATURE_SLC_VIVT */ -+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x0U) /* not used */ -+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (0x0U) /* not used */ -+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800U) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ -+#endif -+ -+#else -+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10U) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */ -+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8U) /* BIF_CTRL_INVAL_TLB1_EN */ -+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800U) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ -+#endif -+ -+#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U) /* indicates FW should interrupt the host */ -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_MMUCACHE type command -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32CacheFlags; -+ RGXFWIF_DEV_VIRTADDR sMMUCacheSync; -+ IMG_UINT32 ui32MMUCacheSyncUpdateValue; -+} RGXFWIF_MMUCACHEDATA; -+ -+#define RGXFWIF_BPDATA_FLAGS_ENABLE (1U << 0) -+#define RGXFWIF_BPDATA_FLAGS_WRITE (1U << 1) -+#define RGXFWIF_BPDATA_FLAGS_CTL (1U << 2) -+#define RGXFWIF_BPDATA_FLAGS_REGS (1U << 3) -+ -+typedef struct -+{ -+ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ -+ IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */ -+ IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */ -+ IMG_UINT32 ui32BPDM; /*!< Breakpoint control */ -+ IMG_UINT32 ui32BPDataFlags; -+ IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */ -+ IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */ -+ IMG_UINT64 RGXFW_ALIGN ui64SpillAddr; -+ RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */ -+} RGXFWIF_BPDATA; -+ -+#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS (RGXFWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */ -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_KICK type command -+ */ -+typedef struct -+{ -+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ -+ IMG_UINT32 ui32CWoffUpdate; /*!< Client CCB woff update */ -+ IMG_UINT32 ui32CWrapMaskUpdate; /*!< Client CCB wrap mask update after CCCB growth */ -+ IMG_UINT32 ui32NumCleanupCtl; /*!< number of CleanupCtl pointers attached */ -+ PRGXFWIF_CLEANUP_CTL apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ IMG_UINT32 ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */ -+#endif -+} RGXFWIF_KCCB_CMD_KICK_DATA; -+ -+/*! -+ * @Brief Command data for @Ref RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK type command -+ */ -+typedef struct -+{ -+ RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; /*!< GEOM DM kick command data */ -+ RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; /*!< FRAG DM kick command data */ -+} RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA; -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FORCE_UPDATE type command -+ */ -+typedef struct -+{ -+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ -+ IMG_UINT32 ui32CCBFenceOffset; /*!< Client CCB fence offset */ -+} RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA; -+ -+typedef struct -+{ -+ PRGXFWIF_FWCOMMONCONTEXT psContext; -+ RGXFWIF_DISABLE_ZSSTORE sDisableZSStore; -+} RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE_DATA; -+ -+/*! -+ * @Brief Resource types supported by \ref RGXFWIF_KCCB_CMD_CLEANUP type command -+ */ -+typedef enum -+{ -+ RGXFWIF_CLEANUP_FWCOMMONCONTEXT, /*!< FW common context cleanup */ -+ RGXFWIF_CLEANUP_HWRTDATA, /*!< FW HW RT data cleanup */ -+ RGXFWIF_CLEANUP_FREELIST, /*!< FW freelist cleanup */ -+ RGXFWIF_CLEANUP_ZSBUFFER, /*!< FW ZS Buffer cleanup */ -+} RGXFWIF_CLEANUP_TYPE; -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CLEANUP type command -+ */ -+typedef struct -+{ -+ RGXFWIF_CLEANUP_TYPE eCleanupType; /*!< Cleanup type */ -+ union { -+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< FW common context to cleanup */ -+ PRGXFWIF_HWRTDATA psHWRTData; /*!< HW RT to cleanup */ -+ PRGXFWIF_FREELIST psFreelist; /*!< Freelist to cleanup */ -+ PRGXFWIF_ZSBUFFER psZSBuffer; /*!< ZS Buffer to cleanup */ -+ } uCleanupData; -+} RGXFWIF_CLEANUP_REQUEST; -+ -+/*! -+ * @Brief Type of power requests supported in \ref RGXFWIF_KCCB_CMD_POW type command -+ */ -+typedef enum -+{ -+ RGXFWIF_POW_OFF_REQ = 1, /*!< GPU power-off request */ -+ RGXFWIF_POW_FORCED_IDLE_REQ, /*!< Force-idle related request */ -+ RGXFWIF_POW_NUM_UNITS_CHANGE, /*!< Request to change default powered scalable units */ -+ RGXFWIF_POW_APM_LATENCY_CHANGE /*!< Request to change the APM latency period */ -+} RGXFWIF_POWER_TYPE; -+ -+/*! -+ * @Brief Supported force-idle related requests with \ref RGXFWIF_POW_FORCED_IDLE_REQ type request -+ */ -+typedef enum -+{ -+ RGXFWIF_POWER_FORCE_IDLE = 1, /*!< Request to force-idle GPU */ -+ RGXFWIF_POWER_CANCEL_FORCED_IDLE, /*!< Request to cancel a previously successful force-idle transition */ -+ RGXFWIF_POWER_HOST_TIMEOUT, /*!< Notification that host timed-out waiting for force-idle state */ -+} RGXFWIF_POWER_FORCE_IDLE_TYPE; -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_POW type command -+ */ -+typedef struct -+{ -+ RGXFWIF_POWER_TYPE ePowType; /*!< Type of power request */ -+ union -+ { -+ IMG_UINT32 ui32NumOfDusts; /*!< Number of active Dusts */ -+ IMG_BOOL bForced; /*!< If the operation is mandatory */ -+ RGXFWIF_POWER_FORCE_IDLE_TYPE ePowRequestType; /*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */ -+ } uPowerReqData; -+} RGXFWIF_POWER_REQUEST; -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_SLCFLUSHINVAL type command -+ */ -+typedef struct -+{ -+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */ -+ IMG_BOOL bInval; /*!< Invalidate the cache as well as flushing */ -+ IMG_BOOL bDMContext; /*!< The data to flush/invalidate belongs to a specific DM context */ -+ IMG_UINT64 RGXFW_ALIGN ui64Address; /*!< Optional address of range (only useful when bDMContext == FALSE) */ -+ IMG_UINT64 RGXFW_ALIGN ui64Size; /*!< Optional size of range (only useful when bDMContext == FALSE) */ -+} RGXFWIF_SLCFLUSHINVALDATA; -+ -+typedef enum -+{ -+ RGXFWIF_HWPERF_CTRL_TOGGLE = 0, -+ RGXFWIF_HWPERF_CTRL_SET = 1, -+ RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2 -+} RGXFWIF_HWPERF_UPDATE_CONFIG; -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG type command -+ */ -+typedef struct -+{ -+ RGXFWIF_HWPERF_UPDATE_CONFIG eOpCode; /*!< Control operation code */ -+ IMG_UINT64 RGXFW_ALIGN ui64Mask; /*!< Mask of events to toggle */ -+} RGXFWIF_HWPERF_CTRL; -+ -+typedef struct -+{ -+ IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_MUX_CNTBLK in the array */ -+ PRGX_HWPERF_CONFIG_MUX_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_MUX_CNTBLK array */ -+} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS; -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS type command -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */ -+ PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */ -+} RGXFWIF_HWPERF_CONFIG_DA_BLKS; -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE type command -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32NewClockSpeed; /*!< New clock speed */ -+} RGXFWIF_CORECLKSPEEDCHANGE_DATA; -+ -+#define RGXFWIF_HWPERF_CTRL_BLKS_MAX 16U -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS type command -+ */ -+typedef struct -+{ -+ bool bEnable; -+ IMG_UINT32 ui32NumBlocks; /*!< Number of block IDs in the array */ -+ IMG_UINT16 aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX]; /*!< Array of RGX_HWPERF_CNTBLK_ID values */ -+} RGXFWIF_HWPERF_CTRL_BLKS; -+ -+ -+typedef struct -+{ -+ IMG_UINT16 ui16CustomBlock; -+ IMG_UINT16 ui16NumCounters; -+ PRGX_HWPERF_SELECT_CUSTOM_CNTRS sCustomCounterIDs; -+} RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS; -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE & \ref RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE type commands -+ */ -+typedef struct -+{ -+ RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; /*!< ZS-Buffer FW address */ -+ IMG_BOOL bDone; /*!< action backing/unbacking succeeded */ -+} RGXFWIF_ZSBUFFER_BACKING_DATA; -+ -+#if defined(SUPPORT_VALIDATION) -+typedef struct -+{ -+ IMG_UINT32 ui32RegWidth; -+ IMG_BOOL bWriteOp; -+ IMG_UINT32 ui32RegAddr; -+ IMG_UINT64 RGXFW_ALIGN ui64RegVal; -+} RGXFWIF_RGXREG_DATA; -+ -+typedef struct -+{ -+ IMG_UINT64 ui64BaseAddress; -+ PRGXFWIF_FWCOMMONCONTEXT psContext; -+ IMG_UINT32 ui32Size; -+} RGXFWIF_GPUMAP_DATA; -+#endif -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE type command -+ */ -+typedef struct -+{ -+ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; /*!< Freelist FW address */ -+ IMG_UINT32 ui32DeltaPages; /*!< Amount of the Freelist change */ -+ IMG_UINT32 ui32NewPages; /*!< New amount of pages on the freelist (including ready pages) */ -+ IMG_UINT32 ui32ReadyPages; /*!< Number of ready pages to be held in reserve until OOM */ -+} RGXFWIF_FREELIST_GS_DATA; -+ -+#define RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS * 2U) -+#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE type command -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32FreelistsCount; -+ IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; -+} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA; -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE type command -+ */ -+typedef struct -+{ -+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to that may need to be resumed following write offset update */ -+} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA; -+ -+/*! -+ ****************************************************************************** -+ * Proactive DVFS Structures -+ *****************************************************************************/ -+#define NUM_OPP_VALUES 16 -+ -+typedef struct -+{ -+ IMG_UINT32 ui32Volt; /* V */ -+ IMG_UINT32 ui32Freq; /* Hz */ -+} UNCACHED_ALIGN PDVFS_OPP; -+ -+typedef struct -+{ -+ PDVFS_OPP asOPPValues[NUM_OPP_VALUES]; -+#if defined(DEBUG) -+ IMG_UINT32 ui32MinOPPPoint; -+#endif -+ IMG_UINT32 ui32MaxOPPPoint; -+} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP; -+ -+typedef struct -+{ -+ IMG_UINT32 ui32MaxOPPPoint; -+} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA; -+ -+typedef struct -+{ -+ IMG_UINT32 ui32MinOPPPoint; -+} UNCACHED_ALIGN RGXFWIF_PDVFS_MIN_FREQ_DATA; -+ -+/*! -+ ****************************************************************************** -+ * Register configuration structures -+ *****************************************************************************/ -+ -+#define RGXFWIF_REG_CFG_MAX_SIZE 512 -+ -+typedef enum -+{ -+ RGXFWIF_REGCFG_CMD_ADD = 101, -+ RGXFWIF_REGCFG_CMD_CLEAR = 102, -+ RGXFWIF_REGCFG_CMD_ENABLE = 103, -+ RGXFWIF_REGCFG_CMD_DISABLE = 104 -+} RGXFWIF_REGDATA_CMD_TYPE; -+ -+typedef enum -+{ -+ RGXFWIF_REG_CFG_TYPE_PWR_ON=0, /* Sidekick power event */ -+ RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, /* Rascal / dust power event */ -+ RGXFWIF_REG_CFG_TYPE_TA, /* TA kick */ -+ RGXFWIF_REG_CFG_TYPE_3D, /* 3D kick */ -+ RGXFWIF_REG_CFG_TYPE_CDM, /* Compute kick */ -+ RGXFWIF_REG_CFG_TYPE_TLA, /* TLA kick */ -+ RGXFWIF_REG_CFG_TYPE_TDM, /* TDM kick */ -+ RGXFWIF_REG_CFG_TYPE_ALL /* Applies to all types. Keep as last element */ -+} RGXFWIF_REG_CFG_TYPE; -+ -+typedef struct -+{ -+ IMG_UINT64 ui64Addr; -+ IMG_UINT64 ui64Mask; -+ IMG_UINT64 ui64Value; -+} RGXFWIF_REG_CFG_REC; -+ -+typedef struct -+{ -+ RGXFWIF_REGDATA_CMD_TYPE eCmdType; -+ RGXFWIF_REG_CFG_TYPE eRegConfigType; -+ RGXFWIF_REG_CFG_REC RGXFW_ALIGN sRegConfig; -+ -+} RGXFWIF_REGCONFIG_DATA; -+ -+typedef struct -+{ -+ /** -+ * PDump WRW command write granularity is 32 bits. -+ * Add padding to ensure array size is 32 bit granular. -+ */ -+ IMG_UINT8 RGXFW_ALIGN aui8NumRegsType[PVR_ALIGN((IMG_UINT32)RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))]; -+ RGXFWIF_REG_CFG_REC RGXFW_ALIGN asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE]; -+} UNCACHED_ALIGN RGXFWIF_REG_CFG; -+ -+typedef enum -+{ -+ RGXFWIF_OS_ONLINE = 1, -+ RGXFWIF_OS_OFFLINE -+} RGXFWIF_OS_STATE_CHANGE; -+ -+/*! -+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE type command -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32DriverID; -+ RGXFWIF_OS_STATE_CHANGE eNewOSState; -+} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA; -+ -+typedef enum -+{ -+ RGXFWIF_PWR_COUNTER_DUMP_START = 1, -+ RGXFWIF_PWR_COUNTER_DUMP_STOP, -+ RGXFWIF_PWR_COUNTER_DUMP_SAMPLE, -+} RGXFWIF_COUNTER_DUMP_REQUEST; -+ -+typedef struct -+{ -+ RGXFWIF_COUNTER_DUMP_REQUEST eCounterDumpRequest; -+} RGXFW_ALIGN RGXFWIF_COUNTER_DUMP_DATA; -+ -+/*! -+ * @Brief List of command types supported by the Kernel CCB -+ */ -+typedef enum -+{ -+ /* Common commands */ -+ RGXFWIF_KCCB_CMD_KICK = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< DM workload kick command */ -+ RGXFWIF_KCCB_CMD_MMUCACHE = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< MMU cache invalidation request */ -+ RGXFWIF_KCCB_CMD_BP = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, -+ RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */ -+ RGXFWIF_KCCB_CMD_CLEANUP = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */ -+ RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */ -+ RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */ -+ RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */ -+ RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */ -+ RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */ -+ RGXFWIF_KCCB_CMD_HEALTH_CHECK = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */ -+ RGXFWIF_KCCB_CMD_FORCE_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */ -+ RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 113U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */ -+ RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. */ -+ RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW to disable zs store of a running 3D or add it to queue of render context. */ -+ -+ /* Commands only permitted to the native or host OS */ -+ RGXFWIF_KCCB_CMD_POW = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request */ -+ RGXFWIF_KCCB_CMD_REGCONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, -+ RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 202U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */ -+ RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */ -+ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a maximum frequency/OPP point */ -+ RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE = 205U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the priority/group for a particular driver. It can only be serviced for the Host DDK */ -+ RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */ -+ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */ -+ RGXFWIF_KCCB_CMD_PHR_CFG = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */ -+#if defined(SUPPORT_VALIDATION) -+ RGXFWIF_KCCB_CMD_RGXREG = 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Read RGX Register from FW */ -+#endif -+ RGXFWIF_KCCB_CMD_WDG_CFG = 210U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */ -+ RGXFWIF_KCCB_CMD_COUNTER_DUMP = 211U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */ -+#if defined(SUPPORT_VALIDATION) -+ RGXFWIF_KCCB_CMD_GPUMAP = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Request a FW GPU mapping which is written into by the FW with a pattern */ -+#endif -+ RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE = 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the GPU time slice for a particular driver. It can only be serviced for the Host DDK */ -+ RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE_INTERVAL = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the GPU time slice interval for all drivers. It can only be serviced for the Host DDK */ -+ -+ /* HWPerf commands */ -+ RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 300U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ -+ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 301U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure directly addressable counters for HWPerf */ -+ RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 302U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */ -+ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 303U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */ -+ RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 304U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure the custom counters for HWPerf */ -+ -+} RGXFWIF_KCCB_CMD_TYPE; -+ -+#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_POW - 1) -+ -+/*! @Brief Kernel CCB command packet */ -+typedef struct -+{ -+ RGXFWIF_KCCB_CMD_TYPE eCmdType; /*!< Command type */ -+ IMG_UINT32 ui32KCCBFlags; /*!< Compatibility and other flags */ -+ -+ /* NOTE: Make sure that uCmdData is the last member of this struct -+ * This is to calculate actual command size for device mem copy. -+ * (Refer RGXGetCmdMemCopySize()) -+ * */ -+ union -+ { -+ RGXFWIF_KCCB_CMD_KICK_DATA sCmdKickData; /*!< Data for Kick command */ -+ RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA sCombinedTA3DCmdKickData; /*!< Data for combined TA/3D Kick command */ -+ RGXFWIF_MMUCACHEDATA sMMUCacheData; /*!< Data for MMU cache command */ -+ RGXFWIF_BPDATA sBPData; /*!< Data for Breakpoint Commands */ -+ RGXFWIF_SLCFLUSHINVALDATA sSLCFlushInvalData; /*!< Data for SLC Flush/Inval commands */ -+ RGXFWIF_CLEANUP_REQUEST sCleanupData; /*!< Data for cleanup commands */ -+ RGXFWIF_POWER_REQUEST sPowData; /*!< Data for power request commands */ -+ RGXFWIF_HWPERF_CTRL sHWPerfCtrl; /*!< Data for HWPerf control command */ -+ RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS sHWPerfCfgEnableBlks; /*!< Data for HWPerf configure, clear and enable performance counter block command */ -+ RGXFWIF_HWPERF_CTRL_BLKS sHWPerfCtrlBlks; /*!< Data for HWPerf enable or disable performance counter block commands */ -+ RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS sHWPerfSelectCstmCntrs; /*!< Data for HWPerf configure the custom counters to read */ -+ RGXFWIF_HWPERF_CONFIG_DA_BLKS sHWPerfCfgDABlks; /*!< Data for HWPerf configure Directly Addressable blocks */ -+ RGXFWIF_CORECLKSPEEDCHANGE_DATA sCoreClkSpeedChangeData;/*!< Data for core clock speed change */ -+ RGXFWIF_ZSBUFFER_BACKING_DATA sZSBufferBackingData; /*!< Feedback for Z/S Buffer backing/unbacking */ -+ RGXFWIF_FREELIST_GS_DATA sFreeListGSData; /*!< Feedback for Freelist grow/shrink */ -+ RGXFWIF_FREELISTS_RECONSTRUCTION_DATA sFreeListsReconstructionData; /*!< Feedback for Freelists reconstruction */ -+ RGXFWIF_REGCONFIG_DATA sRegConfigData; /*!< Data for custom register configuration */ -+ RGXFWIF_WRITE_OFFSET_UPDATE_DATA sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */ -+ RGXFWIF_PDVFS_MAX_FREQ_DATA sPDVFSMaxFreqData; /*!< Data for setting the max frequency/OPP */ -+ RGXFWIF_PDVFS_MIN_FREQ_DATA sPDVFSMinFreqData; /*!< Data for setting the min frequency/OPP */ -+ RGXFWIF_OS_STATE_CHANGE_DATA sCmdOSOnlineStateData; /*!< Data for updating the Guest Online states */ -+ RGXFWIF_DEV_VIRTADDR sTBIBuffer; /*!< Dev address for TBI buffer allocated on demand */ -+ RGXFWIF_COUNTER_DUMP_DATA sCounterDumpConfigData; /*!< Data for dumping of register ranges */ -+ RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE_DATA sDisableZSStoreData; /*!< Data for disabling zs store of a 3D workload */ -+ RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA sForceUpdateData; /*!< Data for signalling all unmet fences for a given CCB */ -+#if defined(SUPPORT_VALIDATION) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ RGXFWIF_RGXREG_DATA sFwRgxData; /*!< Data for reading off an RGX register */ -+ RGXFWIF_GPUMAP_DATA sGPUMapData; /*!< Data for requesting a FW GPU mapping which is written into by the FW with a pattern */ -+#endif -+ } UNCACHED_ALIGN uCmdData; -+} UNCACHED_ALIGN RGXFWIF_KCCB_CMD; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD); -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_KCCB_CMD) == 64, -+ "RGXFWIF_KCCB_CMD is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! @} End of KCCBTypes */ -+ -+/*! -+ * @Defgroup FWCCBTypes Firmware CCB data interface -+ * @Brief Types grouping data structures and defines used in realising the Firmware CCB functionality -+ * @{ -+ */ -+ -+/*! -+ ****************************************************************************** -+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING and the -+ * \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING Firmware CCB commands -+ *****************************************************************************/ -+typedef struct -+{ -+ IMG_UINT32 ui32ZSBufferID; /*!< ZS buffer ID */ -+} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA; -+ -+/*! -+ ****************************************************************************** -+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELIST_GROW Firmware CCB -+ * command -+ *****************************************************************************/ -+typedef struct -+{ -+ IMG_UINT32 ui32FreelistID; /*!< Freelist ID */ -+} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA; -+ -+/*! -+ ****************************************************************************** -+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION -+ * Firmware CCB command -+ *****************************************************************************/ -+typedef struct -+{ -+ IMG_UINT32 ui32FreelistsCount; /*!< Freelists count */ -+ IMG_UINT32 ui32HwrCounter; /*!< HWR counter */ -+ IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; /*!< Array of freelist IDs to reconstruct */ -+} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA; -+ -+#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF (1U<<0) /*!< 1 if a page fault happened */ -+#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS (1U<<1) /*!< 1 if applicable to all contexts */ -+ -+/*! -+ ****************************************************************************** -+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION -+ * Firmware CCB command -+ *****************************************************************************/ -+typedef struct -+{ -+ IMG_UINT32 ui32ServerCommonContextID; /*!< Context affected by the reset */ -+ RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reason for reset */ -+ RGXFWIF_DM eDM; /*!< Data Master affected by the reset */ -+ IMG_UINT32 ui32ResetJobRef; /*!< Job ref running at the time of reset */ -+ IMG_UINT32 ui32Flags; /*!< RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */ -+ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< At what page catalog address */ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN sFaultAddress; /*!< Page fault address (only when applicable) */ -+} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA; -+ -+/*! -+ ****************************************************************************** -+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION -+ * Firmware CCB command -+ *****************************************************************************/ -+typedef struct -+{ -+ IMG_DEV_VIRTADDR sFWFaultAddr; /*!< Page fault address */ -+} RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA; -+ -+/*! -+ ****************************************************************************** -+ * List of command types supported by the Firmware CCB -+ *****************************************************************************/ -+typedef enum -+{ -+ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be backed with physical pages -+ \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */ -+ RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be unbacked -+ \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */ -+ RGXFWIF_FWCCB_CMD_FREELIST_GROW = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand freelist grow -+ \n Command data: RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA */ -+ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests freelists reconstruction -+ \n Command data: RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA */ -+ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a HWR event on a context -+ \n Command data: RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA */ -+ RGXFWIF_FWCCB_CMD_DEBUG_DUMP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand debug dump -+ \n Command data: None */ -+ RGXFWIF_FWCCB_CMD_UPDATE_STATS = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand update on process stats -+ \n Command data: RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA */ -+ -+ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, -+ RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests GPU restart -+ \n Command data: None */ -+#if defined(SUPPORT_VALIDATION) -+ RGXFWIF_FWCCB_CMD_REG_READ = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, -+#if defined(SUPPORT_SOC_TIMER) -+ RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED, -+#endif -+#endif -+ RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a FW pagefault -+ \n Command data: RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA */ -+} RGXFWIF_FWCCB_CMD_TYPE; -+ -+/*! -+ ****************************************************************************** -+ * List of the various stats of the process to update/increment -+ *****************************************************************************/ -+typedef enum -+{ -+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */ -+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */ -+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */ -+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */ -+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */ -+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTDMStores stat */ -+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE; -+ -+/*! -+ ****************************************************************************** -+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_UPDATE_STATS Firmware CCB -+ * command -+ *****************************************************************************/ -+typedef struct -+{ -+ RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE eElementToUpdate; /*!< Element to update */ -+ IMG_PID pidOwner; /*!< The pid of the process whose stats are being updated */ -+ IMG_INT32 i32AdjustmentValue; /*!< Adjustment to be made to the statistic */ -+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA; -+ -+typedef struct -+{ -+ IMG_UINT32 ui32CoreClkRate; -+} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA; -+ -+#if defined(SUPPORT_VALIDATION) -+typedef struct -+{ -+ IMG_UINT64 ui64RegValue; -+} RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA; -+ -+#if defined(SUPPORT_SOC_TIMER) -+typedef struct -+{ -+ IMG_UINT64 ui64timerGray; -+ IMG_UINT64 ui64timerBinary; -+ IMG_UINT64 aui64uscTimers[RGX_FEATURE_NUM_CLUSTERS]; -+} RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA; -+#endif -+#endif -+ -+/*! -+ ****************************************************************************** -+ * @Brief Firmware CCB command structure -+ *****************************************************************************/ -+typedef struct -+{ -+ RGXFWIF_FWCCB_CMD_TYPE eCmdType; /*!< Command type */ -+ IMG_UINT32 ui32FWCCBFlags; /*!< Compatibility and other flags */ -+ -+ union -+ { -+ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA sCmdZSBufferBacking; /*!< Data for Z/S-Buffer on-demand (un)backing*/ -+ RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA sCmdFreeListGS; /*!< Data for on-demand freelist grow/shrink */ -+ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA sCmdFreeListsReconstruction; /*!< Data for freelists reconstruction */ -+ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA sCmdContextResetNotification; /*!< Data for context reset notification */ -+ RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */ -+ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange; -+ RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA sCmdFWPagefault; /*!< Data for context reset notification */ -+#if defined(SUPPORT_VALIDATION) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA sCmdRgxRegReadData; -+#if defined(SUPPORT_SOC_TIMER) -+ RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA sCmdTimers; -+#endif -+#endif -+ } RGXFW_ALIGN uCmdData; -+} RGXFW_ALIGN RGXFWIF_FWCCB_CMD; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD); -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_FWCCB_CMD) == 48, -+ "RGXFWIF_FWCCB_CMD is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! @} End of FWCCBTypes */ -+ -+/*! -+ ****************************************************************************** -+ * Workload estimation Firmware CCB command structure for RGX -+ *****************************************************************************/ -+typedef struct -+{ -+ IMG_UINT16 ui16ReturnDataIndex; /*!< Index for return data array */ -+ IMG_UINT32 ui32CyclesTaken; /*!< The cycles the workload took on the hardware */ -+} RGXFWIF_WORKEST_FWCCB_CMD; -+ -+/*! -+ * @Defgroup ClientCCBTypes Client CCB data interface -+ * @Brief Types grouping data structures and defines used in realising Client CCB commands/functionality -+ * @{ -+ */ -+ -+/* Required memory alignment for 64-bit variables accessible by Meta -+ (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared -+ between the host and meta that contains 64-bit variables has to maintain -+ this alignment) */ -+#define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64) -+ -+#define RGX_CCB_TYPE_TASK (IMG_UINT32_C(1) << 15) -+#define RGX_CCB_FWALLOC_ALIGN(size) (PVR_ALIGN(size, RGXFWIF_FWALLOC_ALIGN)) -+ -+typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE; -+ -+/*! -+ * @Name Client CCB command types -+ * @{ -+ */ -+#define RGXFWIF_CCB_CMD_TYPE_GEOM (201U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TA DM command */ -+#define RGXFWIF_CCB_CMD_TYPE_TQ_3D (202U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for TQ operation */ -+#define RGXFWIF_CCB_CMD_TYPE_3D (203U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command */ -+#define RGXFWIF_CCB_CMD_TYPE_3D_PR (204U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for Partial render */ -+#define RGXFWIF_CCB_CMD_TYPE_CDM (205U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< Compute DM command */ -+#define RGXFWIF_CCB_CMD_TYPE_TQ_TDM (206U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TDM command */ -+#define RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE (207U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) -+#define RGXFWIF_CCB_CMD_TYPE_TQ_2D (208U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 2D DM command for TQ operation */ -+#define RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP (209U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) -+#define RGXFWIF_CCB_CMD_TYPE_NULL (210U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) -+#define RGXFWIF_CCB_CMD_TYPE_ABORT (211U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) -+ -+/* Leave a gap between CCB specific commands and generic commands */ -+#define RGXFWIF_CCB_CMD_TYPE_FENCE (212U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a command */ -+#define RGXFWIF_CCB_CMD_TYPE_UPDATE (213U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates of a command */ -+#define RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE (214U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates related to workload resources */ -+#define RGXFWIF_CCB_CMD_TYPE_FENCE_PR (215U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a PR command */ -+#define RGXFWIF_CCB_CMD_TYPE_PRIORITY (216U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Context priority update command */ -+/* Pre and Post timestamp commands are supposed to sandwich the DM cmd. The -+ padding code with the CCB wrap upsets the FW if we don't have the task type -+ bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types. -+*/ -+#define RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP (217U | RGX_CMD_MAGIC_DWORD_SHIFTED) -+#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE (218U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates of a command */ -+#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE (219U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates related to workload resources */ -+ -+#if defined(SUPPORT_VALIDATION) -+#define RGXFWIF_CCB_CMD_TYPE_REG_READ (220U | RGX_CMD_MAGIC_DWORD_SHIFTED) -+#endif -+ -+#define RGXFWIF_CCB_CMD_TYPE_PADDING (221U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Skip without action type command */ -+#define RGXFWIF_CCB_CMD_TYPE_VK_TIMESTAMP (223U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< Process a vulkan timestamp */ -+/*! @} End of Client CCB command types */ -+ -+typedef struct -+{ -+ /* Index for the KM Workload estimation return data array */ -+ IMG_UINT16 RGXFW_ALIGN ui16ReturnDataIndex; -+ /* Predicted time taken to do the work in cycles */ -+ IMG_UINT32 RGXFW_ALIGN ui32CyclesPrediction; -+ /* Deadline for the workload (in usecs) */ -+ IMG_UINT64 RGXFW_ALIGN ui64Deadline; -+} RGXFWIF_WORKEST_KICK_DATA; -+ -+/*! @Brief Command header of a command in the client CCB buffer. -+ * -+ * Followed by this header is the command-data specific to the -+ * command-type as specified in the header. -+ */ -+typedef struct -+{ -+ RGXFWIF_CCB_CMD_TYPE eCmdType; /*!< Command data type following this command header */ -+ IMG_UINT32 ui32CmdSize; /*!< Size of the command following this header */ -+ IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */ -+ IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ RGXFWIF_WORKEST_KICK_DATA RGXFW_ALIGN sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */ -+#endif -+} RGXFWIF_CCB_CMD_HEADER; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_CCB_CMD_HEADER) == 16, -+ "RGXFWIF_CCB_CMD_HEADER is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/* -+ ****************************************************************************** -+ * Client CCB commands which are only required by the kernel -+ *****************************************************************************/ -+ -+/*! @Brief Command data for \ref RGXFWIF_CCB_CMD_TYPE_PRIORITY type client CCB command */ -+typedef struct -+{ -+ IMG_INT32 i32Priority; /*!< Priority level */ -+} RGXFWIF_CMD_PRIORITY; -+ -+/*! @} End of ClientCCBTypes */ -+ -+/*! -+ ****************************************************************************** -+ * Signature and Checksums Buffer -+ *****************************************************************************/ -+typedef struct -+{ -+ PRGXFWIF_SIGBUFFER sBuffer; /*!< Ptr to Signature Buffer memory */ -+ IMG_UINT32 ui32LeftSizeInRegs; /*!< Amount of space left for storing regs in the buffer */ -+} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL; -+ -+typedef struct -+{ -+ PRGXFWIF_COUNTERBUFFER sBuffer; /*!< Ptr to counter dump buffer */ -+ IMG_UINT32 ui32SizeInDwords; /*!< Amount of space for storing in the buffer */ -+} UNCACHED_ALIGN RGXFWIF_COUNTER_DUMP_CTL; -+ -+typedef struct -+{ -+ PRGXFWIF_FIRMWAREGCOVBUFFER sBuffer; /*!< Ptr to firmware gcov buffer */ -+ IMG_UINT32 ui32Size; /*!< Amount of space for storing in the buffer */ -+} UNCACHED_ALIGN RGXFWIF_FIRMWARE_GCOV_CTL; -+ -+/*! -+ ***************************************************************************** -+ * RGX Compatibility checks -+ *****************************************************************************/ -+ -+/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC changes, the -+ following define should be increased by 1 to indicate to the -+ compatibility logic that layout has changed. */ -+#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 3 -+ -+typedef struct -+{ -+ IMG_UINT32 ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */ -+ IMG_UINT64 RGXFW_ALIGN ui64BVNC; -+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC; -+ -+typedef struct -+{ -+ IMG_UINT8 ui8OsCountSupport; -+} UNCACHED_ALIGN RGXFWIF_INIT_OPTIONS; -+ -+#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \ -+ RGXFWIF_COMPCHECKS_BVNC (name) = { \ -+ RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \ -+ 0, \ -+ } -+#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \ -+ do { \ -+ (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \ -+ (name).ui64BVNC = 0; \ -+ } while (false) -+ -+typedef struct -+{ -+ RGXFWIF_COMPCHECKS_BVNC sHWBVNC; /*!< hardware BVNC (from the RGX registers) */ -+ RGXFWIF_COMPCHECKS_BVNC sFWBVNC; /*!< firmware BVNC */ -+ IMG_UINT32 ui32DDKVersion; /*!< software DDK version */ -+ IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */ -+ IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */ -+ RGXFWIF_INIT_OPTIONS sInitOptions; /*!< initialisation options bit-field */ -+ IMG_BOOL bUpdated; /*!< Information is valid */ -+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS; -+ -+/*! @Brief Firmware Runtime configuration data \ref RGXFWIF_RUNTIME_CFG -+ * allocated by services and used by the Firmware on boot -+ **/ -+typedef struct -+{ -+ IMG_UINT32 ui32ActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */ -+ IMG_UINT32 ui32RuntimeCfgFlags; /*!< Compatibility and other flags */ -+ IMG_BOOL bActivePMLatencyPersistant; /*!< If set, APM latency does not reset to system default each GPU power transition */ -+ IMG_UINT32 ui32CoreClockSpeed; /*!< Core clock speed, currently only used to calculate timer ticks */ -+ IMG_UINT32 ui32DefaultDustsNumInit; /*!< Last number of dusts change requested by the host */ -+ IMG_UINT32 ui32PHRMode; /*!< Periodic Hardware Reset configuration values */ -+ IMG_UINT32 ui32HCSDeadlineMS; /*!< New number of milliseconds C/S is allowed to last */ -+ IMG_UINT32 ui32WdgPeriodUs; /*!< The watchdog period in microseconds */ -+ IMG_UINT32 aui32DriverPriority[RGXFW_MAX_NUM_OSIDS]; /*!< Array of priorities per OS */ -+ IMG_UINT32 aui32DriverIsolationGroup[RGXFW_MAX_NUM_OSIDS]; /*!< Array of isolation groups per OS */ -+ IMG_UINT32 aui32DriverTimeSlice[RGXFW_MAX_NUM_OSIDS]; /*!< Array of time slice per OS */ -+ IMG_UINT32 ui32DriverTimeSliceInterval; /*!< Time slice interval */ -+ IMG_UINT32 ui32VzConnectionCooldownPeriodInSec; /*!< Vz Connection Cooldown period in secs */ -+ -+ PRGXFWIF_HWPERFBUF sHWPerfBuf; /*!< On-demand allocated HWPerf buffer address, to be passed to the FW */ -+#if defined(SUPPORT_VALIDATION) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ IMG_BOOL bInjectFWFault; /*!< Injecting firmware fault to validate recovery through Host */ -+#endif -+} RGXFWIF_RUNTIME_CFG; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_RUNTIME_CFG) == 68, -+ "RGXFWIF_RUNTIME_CFG is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! -+ ***************************************************************************** -+ * Control data for RGX -+ *****************************************************************************/ -+ -+#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999U) -+ -+#if defined(PDUMP) -+ -+#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32U -+ -+typedef enum -+{ -+ RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT, -+ RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT -+} RGXFWIF_PID_FILTER_MODE; -+ -+typedef struct -+{ -+ IMG_PID uiPID; -+ IMG_UINT32 ui32DriverID; -+} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM; -+ -+typedef struct -+{ -+ RGXFWIF_PID_FILTER_MODE eMode; -+ /* each process in the filter list is specified by a PID and OS ID pair. -+ * each PID and OS pair is an item in the items array (asItems). -+ * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries -+ * then it must be terminated by an item with pid of zero. -+ */ -+ RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS]; -+} RGXFW_ALIGN RGXFWIF_PID_FILTER; -+#endif -+ -+#if defined(SUPPORT_SECURITY_VALIDATION) -+#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA (0x1U << 0) -+#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE (0x1U << 1) -+#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE (0x1U << 2) -+#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE (0x1U << 3) -+#endif -+ -+typedef enum -+{ -+ RGXFWIF_TPU_DM_PDM = 0, -+ RGXFWIF_TPU_DM_VDM = 1, -+ RGXFWIF_TPU_DM_CDM = 2, -+ RGXFWIF_TPU_DM_TDM = 3, -+ RGXFWIF_TPU_DM_LAST -+} RGXFWIF_TPU_DM; -+ -+typedef enum -+{ -+ RGXFWIF_GPIO_VAL_OFF = 0, /*!< No GPIO validation */ -+ RGXFWIF_GPIO_VAL_GENERAL = 1, /*!< Simple test case that -+ initiates by sending data via the -+ GPIO and then sends back any data -+ received over the GPIO */ -+ RGXFWIF_GPIO_VAL_AP = 2, /*!< More complex test case that writes -+ and reads data across the entire -+ GPIO AP address range.*/ -+#if defined(SUPPORT_STRIP_RENDERING) -+ RGXFWIF_GPIO_VAL_SR_BASIC = 3, /*!< Strip Rendering AP based basic test.*/ -+ RGXFWIF_GPIO_VAL_SR_COMPLEX = 4, /*!< Strip Rendering AP based complex test.*/ -+#endif -+ RGXFWIF_GPIO_VAL_TESTBENCH = 5, /*!< Validates the GPIO Testbench. */ -+ RGXFWIF_GPIO_VAL_LOOPBACK = 6, /*!< Send and then receive each byte -+ in the range 0-255. */ -+ RGXFWIF_GPIO_VAL_LOOPBACK_LITE = 7, /*!< Send and then receive each power-of-2 -+ byte in the range 0-255. */ -+ RGXFWIF_GPIO_VAL_LAST -+} RGXFWIF_GPIO_VAL_MODE; -+ -+typedef IMG_UINT32 FW_PERF_CONF; -+#define FW_PERF_CONF_NONE 0U -+#define FW_PERF_CONF_ICACHE 1U -+#define FW_PERF_CONF_DCACHE 2U -+#define FW_PERF_CONF_JTLB_INSTR 5U -+#define FW_PERF_CONF_INSTRUCTIONS 6U -+ -+typedef enum -+{ -+ FW_BOOT_STAGE_TLB_INIT_FAILURE = -2, -+ FW_BOOT_STAGE_NOT_AVAILABLE = -1, -+ FW_BOOT_NOT_STARTED = 0, -+ FW_BOOT_BLDR_STARTED = 1, -+ FW_BOOT_CACHE_DONE, -+ FW_BOOT_TLB_DONE, -+ FW_BOOT_MAIN_STARTED, -+ FW_BOOT_ALIGNCHECKS_DONE, -+ FW_BOOT_INIT_DONE, -+} FW_BOOT_STAGE; -+ -+/*! -+ * @AddToGroup KCCBTypes -+ * @{ -+ * @Name Kernel CCB return slot responses -+ * @{ -+ * Usage of bit-fields instead of bare integers -+ * allows FW to possibly pack-in several responses for each single kCCB command. -+ */ -+ -+#define RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED (1U << 0) /*!< Command executed (return status from FW) */ -+#define RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY (1U << 1) /*!< A cleanup was requested but resource busy */ -+#define RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE (1U << 2) /*!< Poll failed in FW for a HW operation to complete */ -+ -+#define RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U /*!< Reset value of a kCCB return slot (set by host) */ -+/*! -+ * @} End of Name Kernel CCB return slot responses -+ * @} End of AddToGroup KCCBTypes -+ */ -+ -+/*! @Brief OS connection data \ref RGXFWIF_CONNECTION_CTL allocated -+ * by services and used to track OS state in Firmware and Services -+ **/ -+typedef struct -+{ -+ /* Fw-Os connection states */ -+ volatile RGXFWIF_CONNECTION_FW_STATE eConnectionFwState; /*!< Firmware-OS connection state */ -+ volatile RGXFWIF_CONNECTION_OS_STATE eConnectionOsState; /*!< Services-OS connection state */ -+ volatile IMG_UINT32 ui32AliveFwToken; /*!< OS Alive token updated by Firmware */ -+ volatile IMG_UINT32 ui32AliveOsToken; /*!< OS Alive token updated by Services */ -+} UNCACHED_ALIGN RGXFWIF_CONNECTION_CTL; -+ -+/*! @Brief Firmware OS Initialization data \ref RGXFWIF_OSINIT -+ * allocated by services and used by the Firmware on boot -+ **/ -+typedef struct -+{ -+ /* Kernel CCB */ -+ PRGXFWIF_CCB_CTL psKernelCCBCtl; /*!< Kernel CCB Control */ -+ PRGXFWIF_CCB psKernelCCB; /*!< Kernel CCB */ -+ PRGXFWIF_CCB_RTN_SLOTS psKernelCCBRtnSlots; /*!< Kernel CCB return slots */ -+ -+ /* Firmware CCB */ -+ PRGXFWIF_CCB_CTL psFirmwareCCBCtl; /*!< Firmware CCB control */ -+ PRGXFWIF_CCB psFirmwareCCB; /*!< Firmware CCB */ -+ -+ /* Workload Estimation Firmware CCB */ -+ PRGXFWIF_CCB_CTL psWorkEstFirmwareCCBCtl; /*!< Workload estimation control */ -+ PRGXFWIF_CCB psWorkEstFirmwareCCB; /*!< Workload estimation buffer */ -+ -+ PRGXFWIF_HWRINFOBUF sRGXFWIfHWRInfoBufCtl; /*!< HWRecoveryInfo control */ -+ -+ IMG_UINT32 ui32HWRDebugDumpLimit; /*!< Firmware debug dump maximum limit */ -+ -+ PRGXFWIF_OSDATA sFwOsData; /*!< Firmware per-os shared data */ -+ -+ RGXFWIF_COMPCHECKS sRGXCompChecks; /*!< Compatibility checks to be populated by the Firmware */ -+ -+} UNCACHED_ALIGN RGXFWIF_OSINIT; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_OSINIT) == 104, -+ "RGXFWIF_OSINIT is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! @Brief Firmware System Initialization data \ref RGXFWIF_SYSINIT -+ * allocated by services and used by the Firmware on boot -+ **/ -+typedef struct -+{ -+ RGX_MIPS_STATE sMIPSState; /*!< MIPS Debug Data; this must be the first member in the structure */ -+ -+ IMG_DEV_PHYADDR RGXFW_ALIGN sFaultPhysAddr; /*!< Fault read address */ -+ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSExecBase; /*!< PDS execution base */ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN sUSCExecBase; /*!< USC execution base */ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCStateTableBase; /*!< FBCDC bindless texture state table base */ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCLargeStateTableBase; -+ IMG_DEV_VIRTADDR RGXFW_ALIGN sTextureHeapBase; /*!< Texture state base */ -+ -+ IMG_UINT64 RGXFW_ALIGN ui64HWPerfFilter; /*! Event filter for Firmware events */ -+ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN sSLC3FenceDevVAddr; /*!< Address to use as a fence when issuing SLC3_CFI */ -+ -+ IMG_UINT32 RGXFW_ALIGN aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; -+ -+ RGXFWIF_SIGBUF_CTL asSigBufCtl[RGXFWIF_DM_MAX]; /*!< Signature and Checksum Buffers for DMs */ -+ -+ RGXFWIF_PDVFS_OPP sPDVFSOPPInfo; -+ -+ RGXFWIF_DMA_ADDR sCorememDataStore; /*!< Firmware coremem data */ -+ -+ RGXFWIF_COUNTER_DUMP_CTL sCounterDumpCtl; -+ -+#if defined(SUPPORT_FIRMWARE_GCOV) -+ RGXFWIF_FIRMWARE_GCOV_CTL sFirmwareGcovCtl; /*!< Firmware gcov buffer control */ -+#endif -+ -+ IMG_UINT32 ui32FilterFlags; -+ -+ PRGXFWIF_RUNTIME_CFG sRuntimeCfg; /*!< Firmware Runtime configuration */ -+ -+ PRGXFWIF_TRACEBUF sTraceBufCtl; /*!< Firmware Trace buffer control */ -+ PRGXFWIF_SYSDATA sFwSysData; /*!< Firmware System shared data */ -+#if defined(SUPPORT_TBI_INTERFACE) -+ PRGXFWIF_TBIBUF sTBIBuf; /*!< Tbi log buffer */ -+#endif -+ -+ PRGXFWIF_GPU_UTIL_FWCB sGpuUtilFWCbCtl; /*!< GPU utilization buffer */ -+ PRGXFWIF_REG_CFG sRegCfg; /*!< Firmware register user configuration */ -+ PRGXFWIF_HWPERF_CTL sHWPerfCtl; /*!< HWPerf counter block configuration.*/ -+ -+ RGXFWIF_DEV_VIRTADDR sAlignChecks; /*!< Array holding Server structures alignment data */ -+ -+ IMG_UINT32 ui32InitialCoreClockSpeed; /*!< Core clock speed at FW boot time */ -+ -+ IMG_UINT32 ui32InitialActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */ -+ -+ IMG_BOOL bFirmwareStarted; /*!< Flag to be set by the Firmware after successful start */ -+ -+ IMG_UINT32 ui32MarkerVal; /*!< Host/FW Trace synchronisation Partition Marker */ -+ -+ IMG_UINT32 ui32FirmwareStartedTimeStamp; /*!< Firmware initialization complete time */ -+ -+ IMG_UINT32 ui32JonesDisableMask; -+ -+ FW_PERF_CONF eFirmwarePerf; /*!< Firmware performance counter config */ -+ -+ /** -+ * FW Pointer to memory containing core clock rate in Hz. -+ * Firmware (PDVFS) updates the memory when running on non primary FW thread -+ * to communicate to host driver. -+ */ -+ PRGXFWIF_CORE_CLK_RATE sCoreClockRate; -+ -+#if defined(PDUMP) -+ RGXFWIF_PID_FILTER sPIDFilter; -+#endif -+ -+ RGXFWIF_GPIO_VAL_MODE eGPIOValidationMode; -+ -+ RGX_HWPERF_BVNC sBvncKmFeatureFlags; /*!< Used in HWPerf for decoding BVNC Features*/ -+ -+#if defined(SUPPORT_SECURITY_VALIDATION) -+ IMG_UINT32 ui32SecurityTestFlags; -+ RGXFWIF_DEV_VIRTADDR pbSecureBuffer; -+ RGXFWIF_DEV_VIRTADDR pbNonSecureBuffer; -+#endif -+ -+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) -+ RGXFWIF_DEV_VIRTADDR sActiveContextBufBase; /*!< Active context buffer base */ -+#endif -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS) -+ /* -+ * Used when validation is enabled to allow the host to check -+ * that MTS sent the correct sideband in response to a kick -+ * from a given OSes schedule register. -+ * Testing is enabled if RGXFWIF_KICK_TEST_ENABLED_BIT is set -+ * -+ * Set by the host to: -+ * (osid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT -+ * reset to 0 by FW when kicked by the given OSid -+ */ -+ IMG_UINT32 ui32OSKickTest; -+#endif -+ -+ /* Value to write into RGX_CR_TFBC_COMPRESSION_CONTROL */ -+ IMG_UINT32 ui32TFBCCompressionControl; -+ -+#if defined(SUPPORT_AUTOVZ) -+ IMG_UINT32 ui32VzWdgPeriod; -+#endif -+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) -+ /* notify firmware power-up on host-side recovery */ -+ IMG_BOOL bFwHostRecoveryMode; -+#endif -+} UNCACHED_ALIGN RGXFWIF_SYSINIT; -+ -+static_assert(offsetof(RGXFWIF_SYSINIT, sMIPSState) == 0, -+ "sMIPSState is not the first member of the RGXFWIF_SYSINIT struct"); -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS) -+#define RGXFWIF_KICK_TEST_ENABLED_BIT 0x1 -+#define RGXFWIF_KICK_TEST_OSID_SHIFT 0x1 -+#endif -+ -+/*! -+ ***************************************************************************** -+ * Timer correlation shared data and defines -+ *****************************************************************************/ -+ -+typedef struct -+{ -+ IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp; -+ IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp; -+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp; -+ -+ /* Utility variable used to convert CR timer deltas to OS timer deltas (nS), -+ * where the deltas are relative to the timestamps above: -+ * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */ -+ IMG_UINT64 RGXFW_ALIGN ui64CRDeltaToOSDeltaKNs; -+ -+ IMG_UINT32 ui32CoreClockSpeed; -+ IMG_UINT32 ui32Reserved; -+} UNCACHED_ALIGN RGXFWIF_TIME_CORR; -+ -+ -+/* The following macros are used to help converting FW timestamps to the Host -+ * time domain. On the FW the RGX_CR_TIMER counter is used to keep track of -+ * time; it increments by 1 every 256 GPU clock ticks, so the general -+ * formula to perform the conversion is: -+ * -+ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS, -+ * otherwise if (scale == 10^6) then deltaOS is in uS ] -+ * -+ * deltaCR * 256 256 * scale -+ * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ] -+ * GPUclockspeed GPUclockspeed -+ * -+ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20) -+ * to get some better accuracy and to avoid returning 0 in the integer -+ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz. -+ * This is the same as keeping K as a decimal number. -+ * -+ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies -+ * (deltaCR * K is more or less a constant), and it's relative to the base -+ * OS timestamp sampled as a part of the timer correlation data. -+ * This base is refreshed on GPU power-on, DVFS transition and periodic -+ * frequency calibration (executed every few seconds if the FW is doing -+ * some work), so as long as the GPU is doing something and one of these -+ * events is triggered then deltaCR * K will not overflow and deltaOS will be -+ * correct. -+ */ -+ -+#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20) -+ -+#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \ -+ (((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT) -+ -+ -+/*! -+ ****************************************************************************** -+ * GPU Utilisation -+ *****************************************************************************/ -+ -+/* See rgx_common.h for a list of GPU states */ -+#define RGXFWIF_GPU_UTIL_TIME_MASK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK) -+ -+#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK) -+#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK) -+ -+/* The OS timestamps computed by the FW are approximations of the real time, -+ * which means they could be slightly behind or ahead the real timer on the Host. -+ * In some cases we can perform subtractions between FW approximated -+ * timestamps and real OS timestamps, so we need a form of protection against -+ * negative results if for instance the FW one is a bit ahead of time. -+ */ -+#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \ -+ (((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U) -+ -+#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \ -+ (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state)) -+ -+ -+/* The timer correlation array must be big enough to ensure old entries won't be -+ * overwritten before all the HWPerf events linked to those entries are processed -+ * by the MISR. The update frequency of this array depends on how fast the system -+ * can change state (basically how small the APM latency is) and perform DVFS transitions. -+ * -+ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading -+ * an entry while the Host is updating it. With 2 entries in the worst case the FW -+ * will read old data, which is still quite ok if the Host is updating the timer -+ * correlation at that time. -+ */ -+#define RGXFWIF_TIME_CORR_ARRAY_SIZE 256U -+#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount) ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE) -+ -+/* Make sure the timer correlation array size is a power of 2 */ -+static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U, -+ "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two"); -+ -+typedef struct -+{ -+ RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE]; -+ IMG_UINT32 ui32TimeCorrSeqCount; -+ -+ /* Compatibility and other flags */ -+ IMG_UINT32 ui32GpuUtilFlags; -+ -+ /* Last GPU state + OS time of the last state update */ -+ IMG_UINT64 RGXFW_ALIGN ui64GpuLastWord; -+ /* Counters for the amount of time the GPU was active/idle/blocked */ -+ IMG_UINT64 RGXFW_ALIGN aui64GpuStatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM]; -+ -+ /* Last GPU DM per-OS states + OS time of the last state update */ -+ IMG_UINT64 RGXFW_ALIGN aaui64DMOSLastWord[RGXFWIF_DM_MAX][RGXFW_MAX_NUM_OSIDS]; -+ /* Counters for the amount of time the GPU DMs were active/idle/blocked */ -+ IMG_UINT64 RGXFW_ALIGN aaaui64DMOSStatsCounters[RGXFWIF_DM_MAX][RGXFW_MAX_NUM_OSIDS][RGXFWIF_GPU_UTIL_STATE_NUM]; -+} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_GPU_UTIL_FWCB) == 12584, -+ "RGXFWIF_GPU_UTIL_FWCB is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+typedef struct -+{ -+ IMG_UINT32 ui32RenderTargetIndex; //Render number -+ IMG_UINT32 ui32CurrentRenderTarget; //index in RTA -+ IMG_UINT32 ui32ActiveRenderTargets; //total active RTs -+ IMG_UINT32 ui32CumulActiveRenderTargets; //total active RTs from the first TA kick, for OOM -+ RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices -+ RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders; //Array of number of occurred partial renders per render target -+ IMG_UINT32 ui32MaxRTs; //Number of render targets in the array -+ IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */ -+} UNCACHED_ALIGN RGXFWIF_RTA_CTL; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_RTA_CTL) == 32, -+ "RGXFWIF_RTA_CTL is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! -+ * @InGroup RenderTarget -+ * @Brief Firmware Freelist holding usage state of the Parameter Buffers -+ */ -+typedef struct -+{ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN psFreeListDevVAddr; /*!< Freelist page table base */ -+ IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr;/*!< Freelist page table entry for current free page */ -+ IMG_UINT32 ui32CurrentStackTop; /*!< Freelist current free page */ -+ IMG_UINT32 ui32MaxPages; /*!< Max no. of pages can be added to the freelist */ -+ IMG_UINT32 ui32GrowPages; /*!< No pages to add in each freelist grow */ -+ IMG_UINT32 ui32CurrentPages; /*!< Total no. of pages made available to the PM HW */ -+ IMG_UINT32 ui32AllocatedPageCount; /*!< No. of pages allocated by PM HW */ -+ IMG_UINT32 ui32AllocatedMMUPageCount; /*!< No. of pages allocated for GPU MMU for PM*/ -+#if defined(SUPPORT_SHADOW_FREELISTS) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ IMG_UINT32 ui32HWRCounter; -+ PRGXFWIF_FWMEMCONTEXT psFWMemContext; -+#endif -+ IMG_UINT32 ui32FreeListID; /*!< Unique Freelist ID */ -+ IMG_BOOL bGrowPending; /*!< Freelist grow is pending */ -+ IMG_UINT32 ui32ReadyPages; /*!< Reserved pages to be used only on PM OOM event */ -+ IMG_UINT32 ui32FreelistFlags; /*!< Compatibility and other flags */ -+#if defined(SUPPORT_AGP) || defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ IMG_UINT32 ui32PmGlobalPb; /*!< PM Global PB on which Freelist is loaded */ -+#endif -+} UNCACHED_ALIGN RGXFWIF_FREELIST; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_FREELIST) == 64, -+ "RGXFWIF_FREELIST is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! -+ ****************************************************************************** -+ * HWRTData -+ *****************************************************************************/ -+ -+/* HWRTData flags */ -+/* Deprecated flags 1:0 */ -+#define HWRTDATA_HAS_LAST_TA (IMG_UINT32_C(1) << 2) -+#define HWRTDATA_PARTIAL_RENDERED (IMG_UINT32_C(1) << 3) -+#define HWRTDATA_DISABLE_TILE_REORDERING (IMG_UINT32_C(1) << 4) -+#define HWRTDATA_NEED_BRN65101_BLIT (IMG_UINT32_C(1) << 5) -+#define HWRTDATA_FIRST_BRN65101_STRIP (IMG_UINT32_C(1) << 6) -+#define HWRTDATA_NEED_BRN67182_2ND_RENDER (IMG_UINT32_C(1) << 7) -+#if defined(SUPPORT_AGP) -+#define HWRTDATA_GLOBAL_PB_NUMBER_BIT0 (IMG_UINT32_C(1) << 8) -+#if defined(SUPPORT_AGP4) -+#define HWRTDATA_GLOBAL_PB_NUMBER_BIT1 (IMG_UINT32_C(1) << 9) -+#endif -+#define HWRTDATA_GEOM_NEEDS_RESUME (IMG_UINT32_C(1) << 10) -+#endif -+ -+typedef enum -+{ -+ RGXFWIF_RTDATA_STATE_NONE = 0, -+ RGXFWIF_RTDATA_STATE_KICKTA, -+ RGXFWIF_RTDATA_STATE_KICKTAFIRST, -+ RGXFWIF_RTDATA_STATE_TAFINISHED, -+ RGXFWIF_RTDATA_STATE_KICK3D, -+ RGXFWIF_RTDATA_STATE_3DFINISHED, -+ RGXFWIF_RTDATA_STATE_3DCONTEXTSTORED, -+ RGXFWIF_RTDATA_STATE_TAOUTOFMEM, -+ RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED, -+ /* In case of HWR, we can't set the RTDATA state to NONE, -+ * as this will cause any TA to become a first TA. -+ * To ensure all related TA's are skipped, we use the HWR state */ -+ RGXFWIF_RTDATA_STATE_HWR, -+ RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU -+} RGXFWIF_RTDATA_STATE; -+ -+typedef struct -+{ -+ IMG_BOOL bTACachesNeedZeroing; -+ -+ IMG_UINT32 ui32ScreenPixelMax; -+ IMG_UINT64 RGXFW_ALIGN ui64MultiSampleCtl; -+ IMG_UINT64 ui64FlippedMultiSampleCtl; -+ IMG_UINT32 ui32TPCStride; -+ IMG_UINT32 ui32TPCSize; -+ IMG_UINT32 ui32TEScreen; -+ IMG_UINT32 ui32MTileStride; -+ IMG_UINT32 ui32TEAA; -+ IMG_UINT32 ui32TEMTILE1; -+ IMG_UINT32 ui32TEMTILE2; -+ IMG_UINT32 ui32ISPMergeLowerX; -+ IMG_UINT32 ui32ISPMergeLowerY; -+ IMG_UINT32 ui32ISPMergeUpperX; -+ IMG_UINT32 ui32ISPMergeUpperY; -+ IMG_UINT32 ui32ISPMergeScaleX; -+ IMG_UINT32 ui32ISPMergeScaleY; -+ IMG_UINT32 uiRgnHeaderSize; -+ IMG_UINT32 ui32ISPMtileSize; -+} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_HWRTDATA_COMMON) == 88, -+ "RGXFWIF_HWRTDATA_COMMON is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+#define MAX_FREELISTS_SIZE 3 -+ -+static_assert(RGXFW_MAX_FREELISTS <= MAX_FREELISTS_SIZE, -+ "RGXFW_MAX_FREELISTS is outside of allowable range for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/*! -+ * @InGroup RenderTarget -+ * @Brief Firmware Render Target data i.e. HWRTDATA used to hold the PM context -+ */ -+typedef struct -+{ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN psPMMListDevVAddr; /*!< MList Data Store */ -+ -+ IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[1]; /*!< VCE Page Catalogue base */ -+ IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[1]; -+ IMG_UINT64 RGXFW_ALIGN ui64TECatBase[1]; /*!< TE Page Catalogue base */ -+ IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[1]; -+ IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; /*!< Alist Page Catalogue base */ -+ IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase; -+ -+ IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer; /*!< Freelist page table entry for current Mlist page */ -+ IMG_UINT32 ui32PMMListStackPointer; /*!< Current Mlist page */ -+ -+ RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; /*!< Render target dimension dependent data */ -+ -+ IMG_UINT32 ui32HWRTDataFlags; -+ RGXFWIF_RTDATA_STATE eState; /*!< Current workload processing state of HWRTDATA */ -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[MAX_FREELISTS_SIZE]; /*!< Freelist to use */ -+ IMG_UINT32 aui32FreeListHWRSnapshot[MAX_FREELISTS_SIZE]; -+#else -+ PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; /*!< Freelist to use */ -+ IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS]; -+#endif -+ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap table base */ -+ -+ RGXFWIF_RTA_CTL sRTACtl; /*!< Render target array data */ -+ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; /*!< Tail pointers base */ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN sMacrotileArrayDevVAddr; /*!< Macrotiling array base */ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN sRgnHeaderDevVAddr; /*!< Region headers base */ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN sRTCDevVAddr; /*!< Render target cache base */ -+#if defined(RGX_FIRMWARE) -+ struct RGXFWIF_FWCOMMONCONTEXT_* RGXFW_ALIGN psOwnerGeom; -+#else -+ RGXFWIF_DEV_VIRTADDR RGXFW_ALIGN pui32OwnerGeomNotUsedByHost; -+#endif -+#if defined(SUPPORT_TRP) && !defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ IMG_UINT32 ui32KickFlagsCopy; -+ IMG_UINT32 ui32TRPState; /*!< Used by Firmware to track current state of a protected kick */ -+ IMG_UINT32 ui32TEPageCopy; -+ IMG_UINT32 ui32VCEPageCopy; -+#endif -+#if defined(SUPPORT_AGP) || defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ IMG_BOOL bTACachesNeedZeroing; -+#endif -+ -+ RGXFWIF_CLEANUP_CTL RGXFW_ALIGN_DCACHEL sCleanupState; /*!< Render target clean up state */ -+} RGXFW_ALIGN_DCACHEL RGXFWIF_HWRTDATA; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_HWRTDATA) == 256, -+ "RGXFWIF_HWRTDATA is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+/* Sync_checkpoint firmware object. -+ * This is the FW-addressable structure use to hold the sync checkpoint's -+ * state and other information which needs to be accessed by the firmware. -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */ -+ IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */ -+} SYNC_CHECKPOINT_FW_OBJ; -+ -+/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */ -+#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0) -+ -+#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET -+#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U -+ -+#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES) -+#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+#define RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE PVR_ALIGN((RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES), DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY) -+#else -+#define RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE 0 -+#endif -+ -+#define RGXFWIF_TDM_SECURE_QUEUE_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES) -+#define RGXFWIF_CDM_SECURE_QUEUE_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES) -+ -+/*! -+ ****************************************************************************** -+ * Virtualisation and Security -+ *****************************************************************************/ -+#define FW_OSID (0U) -+#define MMU_CONTEXT_MAPPING_FWPRIV (0U) /* FW code/private data */ -+ -+#if defined(SECURE_FW_CODE_OSID) -+/* software workaround for SoCs without fw_code, fw_priv_data signals, MIPS only */ -+#if defined(RGX_FEATURE_META) -+#error "SECURE_FW_CODE_OSID is not supported on META cores" -+#elif defined(RGX_FEATURE_RISCV_FW_PROCESSOR) -+#error "SECURE_FW_CODE_OSID is not supported on RISC-V cores" -+#elif (RGX_NUM_DRIVERS_SUPPORTED > 1) -+#error "SECURE_FW_CODE_OSID is not supported on virtualization drivers" -+#elif (SECURE_FW_CODE_OSID + 1 > 2) -+#define MIPS_FW_CODE_OSID (SECURE_FW_CODE_OSID) -+#else -+#define MIPS_FW_CODE_OSID (1U) -+#endif -+#endif /* defined(SECURE_FW_CODE_OSID) */ -+ -+static_assert((RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_FIRMWARE_OSID), -+ " Invalid RGX_FW_HEAP_OSID_ASSIGNMENT value. Rogue cores support only the RGX_FW_HEAP_USES_FIRMWARE_OSID config"); -+ -+/* Firmware and Host driver share the same OSID */ -+#define FW_HEAP_OSID (FW_OSID) -+ -+#if (RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_FIRMWARE_OSID) || defined(RGX_FEATURE_MIPS) -+/* The Firmware accesses its private code & data and the interface -+ * memory it shares with the KM drivers using the same MMU context */ -+#define MMU_CONTEXT_MAPPING_FWIF MMU_CONTEXT_MAPPING_FWPRIV -+#else -+/* The Firmware accesses the interface memory it shares -+ * with the KM drivers using a reserved MMU context */ -+#define MMU_CONTEXT_MAPPING_FWIF (7U) -+#endif -+ -+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+/* virtualization without security support */ -+#define DRIVER_ID(osid) (osid) -+#define OSID(did) (did) -+#else -+#define DRIVER_ID(osid) (0U) -+#define OSID(did) (did) -+#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -+ -+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ -+#define FOREACH_SUPPORTED_DRIVER(did) for ((did)=RGXFW_HOST_DRIVER_ID; (did) < RGX_NUM_DRIVERS_SUPPORTED; (did)++) -+ -+#if defined(__KERNEL__) -+/* Driver implementation */ -+#define FOREACH_ACTIVE_DRIVER(devinfo, did) RGXFwSharedMemCacheOpValue(psFwSysData->asOsRuntimeFlagsMirror[RGXFW_HOST_DRIVER_ID], \ -+ INVALIDATE); \ -+ FOREACH_SUPPORTED_DRIVER(did) \ -+ { \ -+ if (devinfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[did].bfOsState != RGXFW_CONNECTION_FW_ACTIVE) continue; -+ -+#define END_FOREACH_ACTIVE_DRIVER } -+ -+#else -+/* Firmware implementation */ -+#define FOREACH_ACTIVE_DRIVER(did) do { \ -+ unsigned int idx; \ -+ for ((idx)=RGXFW_HOST_DRIVER_ID, (did)=gsRGXFWCtl.aui32ActiveDrivers[0U]; \ -+ (idx) < RGXFW_NUM_ACTIVE_DRIVERS; \ -+ ++(idx), (did)=gsRGXFWCtl.aui32ActiveDrivers[(idx)]) { -+ -+#define END_FOREACH_ACTIVE_DRIVER }} while (false); -+#endif /* defined(__KERNEL__) */ -+ -+ -+#else -+#define FOREACH_SUPPORTED_DRIVER(did) for ((did)=RGXFW_HOST_DRIVER_ID; (did) <= RGXFW_HOST_DRIVER_ID; (did)++) -+ -+#define FOREACH_ACTIVE_DRIVER(did) FOREACH_SUPPORTED_DRIVER(did) -+#define END_FOREACH_ACTIVE_DRIVER -+ -+#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -+ -+#define FOREACH_VALIDATION_OSID(osid) for ((osid)=0; (osid) < GPUVIRT_VALIDATION_NUM_OS; (osid)++) -+#define FOREACH_HW_OSID(osid) for ((osid)=0; (osid) < RGXFW_MAX_NUM_OSIDS; (osid)++) -+#define FOREACH_DRIVER_RAW_HEAP(did) for ((did)=RGX_FIRST_RAW_HEAP_DRIVER_ID; (did) < ((PVRSRV_VZ_MODE_IS(NATIVE) ? 1 : RGX_NUM_DRIVERS_SUPPORTED)); (did)++) -+ -+#endif /* RGX_FWIF_KM_H */ -+ -+/****************************************************************************** -+ End of file (rgx_fwif_km.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgx_fwif_resetframework.h b/drivers/gpu/drm/img-rogue/rgx_fwif_resetframework.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_fwif_resetframework.h -@@ -0,0 +1,70 @@ -+/*************************************************************************/ /*! -+@File rgx_fwif_resetframework.h -+@Title Post-reset work-around framework FW interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_FWIF_RESETFRAMEWORK_H) -+#define RGX_FWIF_RESETFRAMEWORK_H -+ -+#include "img_types.h" -+#include "rgx_fwif_shared.h" -+ -+typedef struct -+{ -+ union -+ { -+ IMG_UINT64 uCDMReg_CDM_CB_BASE; // defined(RGX_FEATURE_CDM_USER_MODE_QUEUE) -+ IMG_UINT64 uCDMReg_CDM_CTRL_STREAM_BASE; // !defined(RGX_FEATURE_CDM_USER_MODE_QUEUE) -+ }; -+ IMG_UINT64 uCDMReg_CDM_CB_QUEUE; // !defined(RGX_FEATURE_CDM_USER_MODE_QUEUE) -+ IMG_UINT64 uCDMReg_CDM_CB; // !defined(RGX_FEATURE_CDM_USER_MODE_QUEUE) -+} RGXFWIF_RF_REGISTERS; -+ -+typedef struct -+{ -+ /* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */ -+ RGXFWIF_RF_REGISTERS RGXFW_ALIGN sFWRegisters; -+ -+} RGXFWIF_RF_CMD; -+ -+/* to opaquely allocate and copy in the kernel */ -+#define RGXFWIF_RF_CMD_SIZE sizeof(RGXFWIF_RF_CMD) -+ -+#endif /* RGX_FWIF_RESETFRAMEWORK_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgx_fwif_sf.h b/drivers/gpu/drm/img-rogue/rgx_fwif_sf.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_fwif_sf.h -@@ -0,0 +1,995 @@ -+/*************************************************************************/ /*! -+@File rgx_fwif_sf.h -+@Title RGX firmware interface string format specifiers -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the rgx firmware logging messages. The following -+ list are the messages the firmware prints. Changing anything -+ but the first column or spelling mistakes in the strings will -+ break compatibility with log files created with older/newer -+ firmware versions. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef RGX_FWIF_SF_H -+#define RGX_FWIF_SF_H -+ -+/****************************************************************************** -+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you -+ * WILL BREAK fw tracing message compatibility with previous -+ * fw versions. Only add new ones, if so required. -+ *****************************************************************************/ -+/* Available log groups */ -+#define RGXFW_LOG_SFGROUPLIST \ -+ X(RGXFW_GROUP_NULL,NULL) \ -+ X(RGXFW_GROUP_MAIN,MAIN) \ -+ X(RGXFW_GROUP_CLEANUP,CLEANUP) \ -+ X(RGXFW_GROUP_CSW,CSW) \ -+ X(RGXFW_GROUP_PM, PM) \ -+ X(RGXFW_GROUP_RTD,RTD) \ -+ X(RGXFW_GROUP_SPM,SPM) \ -+ X(RGXFW_GROUP_MTS,MTS) \ -+ X(RGXFW_GROUP_BIF,BIF) \ -+ X(RGXFW_GROUP_MISC,MISC) \ -+ X(RGXFW_GROUP_POW,POW) \ -+ X(RGXFW_GROUP_HWR,HWR) \ -+ X(RGXFW_GROUP_HWP,HWP) \ -+ X(RGXFW_GROUP_RPM,RPM) \ -+ X(RGXFW_GROUP_DMA,DMA) \ -+ X(RGXFW_GROUP_DBG,DBG) -+ -+/*! -+ * @InGroup SRVAndFWTracing -+ * @Brief FW Trace log groups(GID) list -+ */ -+enum RGXFW_LOG_SFGROUPS { -+#define X(A,B) A, -+ RGXFW_LOG_SFGROUPLIST -+#undef X -+}; -+ -+#define IMG_SF_STRING_MAX_SIZE 256U -+ -+typedef struct { -+ IMG_UINT32 ui32Id; -+ IMG_CHAR sName[IMG_SF_STRING_MAX_SIZE]; -+} RGXFW_STID_FMT; /* pair of string format id and string formats */ -+ -+typedef struct { -+ IMG_UINT32 ui32Id; -+ const IMG_CHAR *psName; -+} RGXKM_STID_FMT; /* pair of string format id and string formats */ -+ -+/* Table of String Format specifiers, the group they belong and the number of -+ * arguments each expects. Xmacro styled macros are used to generate what is -+ * needed without requiring hand editing. -+ * -+ * id : id within a group -+ * gid : group id -+ * Sym name : name of enumerations used to identify message strings -+ * String : Actual string -+ * #args : number of arguments the string format requires -+ */ -+#define RGXFW_LOG_SFIDLIST \ -+/*id, gid, id name, string, # arguments */ \ -+X( 0, RGXFW_GROUP_NULL, RGXFW_SF_FIRST, "You should not use this string", 0) \ -+\ -+X( 1, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8x @ %u, RTD 0x%08x. Partial render:%u, CSW resume:%u, prio:%d", 6) \ -+X( 2, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%x, HWRTData1State=%x", 2) \ -+X( 3, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8x @ %u, CSW resume:%u, prio: %d", 4) \ -+X( 4, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished", 0) \ -+X( 5, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8x @ %u, prio: %d", 3) \ -+X( 6, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED, "Compute finished", 0) \ -+X( 7, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8x @ %u, RTD 0x%08x. First kick:%u, Last kick:%u, CSW resume:%u, prio:%d", 7) \ -+X( 8, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED, "TA finished", 0) \ -+X( 9, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render", 0) \ -+X( 10, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render", 0) \ -+X( 11, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x", 2) \ -+X( 12, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8x @ %u, prio:%d", 3) \ -+X( 13, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TLA_FINISHED, "TLA finished", 0) \ -+X( 14, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %u, DM = %u, FWCtx = 0x%08.8x", 3) \ -+X( 16, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx 0x%08.8x @ %u", 2) \ -+X( 17, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK, "UFO Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ -+X( 18, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded", 0) \ -+X( 19, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \ -+X( 20, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx 0x%08.8x", 1) \ -+X( 21, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= ????????, [0x%08.8x] is ???????? requires 0x%08.8x", 4) \ -+X( 22, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx 0x%08.8x @ %u", 2) \ -+X( 23, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \ -+X( 24, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %u of:", 1) \ -+X( 25, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%u, FWCtx: 0x%08.8x", 2) \ -+X( 26, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%u, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ -+X( 27, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW", 0) \ -+X( 28, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.", 0) \ -+X( 29, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u", 1) \ -+X( 30, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %u failed: host = 0x%x, fw = 0x%x", 3) \ -+X( 31, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered", 0) \ -+X( 32, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler", 2) \ -+X( 33, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8x", 1) \ -+X( 34, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_STORE, "Store breakpoint state", 0) \ -+X( 35, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_UNSET, "Unsetting BP Registers", 0) \ -+X( 36, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NONZERO_RT, "Active RTs expected to be zero, actually %u", 1) \ -+X( 37, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_PRESENT, "RTC present, %u active render targets", 1) \ -+X( 38, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_EST_POWER_DEPRECATED, "Estimated Power 0x%x", 1) \ -+X( 39, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_TARGET, "RTA render target %u", 1) \ -+X( 40, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u", 2) \ -+X( 41, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK_DEPRECATED, "HWR sizes check %u failed: addresses = %u, sizes = %u", 3) \ -+X( 42, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%x", 1) \ -+X( 43, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %u, Units: 0x%08.8x", 2) \ -+X( 44, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %u to %u", 2) \ -+X( 45, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down", 0) \ -+X( 46, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %u (bPowRascalDust=%u)", 2) \ -+X( 47, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)", 0) \ -+X( 48, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)", 0) \ -+X( 49, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%u FWCtx: 0x%08.8x", 2) \ -+X( 50, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%u checker: CatBase TE=0x%08x (%u Pages), VCE=0x%08x (%u Pages), ALIST=0x%08x, IsTA=%u", 7) \ -+X( 51, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%u checker: MList[%u] = 0x%08x", 3) \ -+X( 52, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%u OK", 1) \ -+X( 53, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%u is empty", 1) \ -+X( 54, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%u checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%u", 8) \ -+X( 55, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_40480KICK, "3D OQ flush kick", 0) \ -+X( 56, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device", 1) \ -+X( 57, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED2, "Setting breakpoint: Addr 0x%08.8x DM%u", 2) \ -+X( 58, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8x @ %u, prio: %d", 3) \ -+X( 59, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_FINISHED_DEPRECATED, "RDM finished on context %u", 1) \ -+X( 60, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8x @ %u, prio: %d", 3) \ -+X( 61, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SHG_FINISHED_DEPRECATED, "SHG finished", 0) \ -+X( 62, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBA_FINISHED_DEPRECATED, "FBA finished on context %u", 1) \ -+X( 63, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed", 0) \ -+X( 64, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%u start", 1) \ -+X( 65, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%u complete", 1) \ -+X( 66, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FC_CCB_UPDATE_DEPRECATED, "FC%u cCCB Woff update = %u", 2) \ -+X( 67, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8x @ %u, prio: %d, Frame Context: %u", 4) \ -+X( 68, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_INIT, "GPU init", 0) \ -+X( 69, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_INIT, "GPU Units init (# mask: 0x%x)", 1) \ -+X( 70, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %u cycles, write: %u cycles, iterations: %u", 3) \ -+X( 71, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x", 3) \ -+X( 72, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %u. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)", 1) \ -+X( 73, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.", 0) \ -+X( 74, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DEPRECATED, "GPU has locked up (see HWR logs for more info)", 0) \ -+X( 75, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)", 0) \ -+X( 76, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE_DEPRECATED, "HWR has been triggered - GPU has failed a poll (see HWR logs)", 0) \ -+X( 77, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DOPPLER_OOM_DEPRECATED, "Doppler out of memory event for FC %u", 1) \ -+X( 78, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \ -+X( 79, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ -+X( 80, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [0x%08.8x]", 1) \ -+X( 81, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx 0x%08.8x @ %u", 2) \ -+X( 82, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \ -+X( 83, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8x @ %u", 2) \ -+X( 84, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM_DEPRECATED, "RPM Out of memory! Context 0x%08x, SH requestor %u", 2) \ -+X( 85, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD_DEPRECATED, "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %u, prio: %d, Frame Context: %u", 4) \ -+X( 86, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %u (deferred DMs = 0x%08x)", 4) \ -+X( 87, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN_DEPRECATED, "Deferring DM%u from running context 0x%08x @ %u to let other deferred DMs run (deferred DMs = 0x%08x)", 4) \ -+X( 88, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %u (deferred DMs = 0x%08x)", 4) \ -+X( 89, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ -+X( 90, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for Driver ID %u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ -+X( 91, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %u", 1) \ -+X( 92, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %u", 1) \ -+X( 93, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %u Hz", 1) \ -+X( 94, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \ -+X( 95, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DEPRECATED, "Signal check failed, Required Data: 0x%x, Address: 0x%08x%08x", 3) \ -+X( 96, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_DEPRECATED, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x", 5) \ -+X( 97, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNALED, "Signalled the previously waiting FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \ -+X( 98, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED_DEPRECATED, "Compute stalled", 0) \ -+X( 99, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED, "Compute stalled (Roff = %u, Woff = %u, Size = %u)", 3) \ -+X(100, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED_FROM_STALL, "Compute resumed (Roff = %u, Woff = %u, Size = %u)", 3) \ -+X(101, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x", 4) \ -+X(102, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_OSID_DM_DEPRECATED, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x", 4) \ -+X(103, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM_DEPRECATED, "DM: %u signal check failed", 1) \ -+X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8x @ %u, prio:%d", 3) \ -+X(105, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED, "TDM finished", 0) \ -+X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS_DEPRECATED, "MMU_PM_CAT_BASE_TE[%u]_PIPE[%u]: 0x%08x 0x%08x)", 4) \ -+X(107, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_HIT_DEPRECATED, "BRN 54141 HIT", 0) \ -+X(108, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA_DEPRECATED, "BRN 54141 Dummy TA kicked", 0) \ -+X(109, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_RESUME_TA_DEPRECATED, "BRN 54141 resume TA", 0) \ -+X(110, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DOUBLE_HIT_DEPRECATED, "BRN 54141 double hit after applying WA", 0) \ -+X(111, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DUMMY_TA_VDM_BASE_DEPRECATED, "BRN 54141 Dummy TA VDM base address: 0x%08x%08x", 2) \ -+X(112, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%x, Current Data: 0x%x, Address: 0x%08x%08x", 4) \ -+X(113, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL_DEPRECATED, "TDM stalled (Roff = %u, Woff = %u)", 2) \ -+X(114, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx 0x%08.8x", 1) \ -+X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED, "Changing OSid %u's priority from %u to %u", 3) \ -+X(116, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed", 0) \ -+X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 7) \ -+X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED3, "Kick TDM: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 7) \ -+X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8x @ %u, RTD 0x%08x, First kick:%u, Last kick:%u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 11) \ -+X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8x @ %u, RTD 0x%08x, Partial render:%u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 10) \ -+X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8x @ %u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8) \ -+X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED2, "Kick Compute: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, ext:0x%08x, int:0x%08x)", 6) \ -+X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED3, "Kick RTU: FWCtx 0x%08.8x @ %u, Frame Context:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8) \ -+X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED2, "Kick SHG: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 7) \ -+X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %u.", 1) \ -+X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %u.", 1) \ -+X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %u.", 1) \ -+X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%u failed", 1) \ -+X(130, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \ -+X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %u (Roff = %u, Woff = %u)", 3) \ -+X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED_DEPRECATED, "DM %u failed to Context Switch on time. Triggered HCS (see HWR logs).", 1) \ -+X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET_DEPRECATED, "HCS changed to %u ms", 1) \ -+X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT_DEPRECATED, "Updating Tiles In Flight (Dusts=%u, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)", 4) \ -+X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, " Phantom %u: USCTiles=%u", 2) \ -+X(136, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_OFF_DEPRECATED, "Isolation grouping is disabled", 0) \ -+X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_DEPRECATED, "Isolation group configured with a priority threshold of %u", 1) \ -+X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE_DEPRECATED, "OS %u has come online", 1) \ -+X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE_DEPRECATED, "OS %u has gone offline", 1) \ -+X(140, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \ -+X(141, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS_DEPRECATED, "TDM Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \ -+X(142, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes 0, Woff = %u, Size = %u)", 6) \ -+X(143, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 5) \ -+X(144, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_DEINIT, "GPU deinit", 0) \ -+X(145, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_DEINIT, "GPU units deinit", 0) \ -+X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %u with config flags 0x%08x", 2) \ -+X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_LIMIT, "UFO limit exceeded %u/%u", 2) \ -+X(148, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store", 0) \ -+X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %u with config flags 0x%08x and extended config flags 0x%08x", 3) \ -+X(150, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_COMMAND_DEPRECATED, "Unknown Command (eCmdType=0x%08x)", 1) \ -+X(151, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx 0x%08.8x @ %u [0x%08.8x] = 0x%08.8x", 4) \ -+X(152, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP_DEPRECATED, "UFO forced update NOP: FWCtx 0x%08.8x @ %u [0x%08.8x] = 0x%08.8x, reason %u", 5) \ -+X(153, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66075_CHECK, "TDM context switch check: Roff %u points to 0x%08x, Match=%u", 3) \ -+X(154, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS, "Driver ID %u CCB init status: %u (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x", 6) \ -+X(155, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWIRQ, "FW IRQ # %u @ %u", 2) \ -+X(156, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 3) \ -+X(157, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_DEPRECATED, "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x", 3) \ -+X(158, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for Driver ID %u @ KCCB 0x%08x", 3) \ -+X(159, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT_DEPRECATED, "FW FAULT: At line %u in file 0x%08x%08x, additional data=0x%08x", 4) \ -+X(160, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_INVALID, "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 4) \ -+X(161, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID_DEPRECATED, "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x", 3) \ -+X(162, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE_DEPRECATED, "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x", 4) \ -+X(163, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD_DEPRECATED, "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u", 4) \ -+X(164, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FULL_CHPTCCB, "Checkpoint CCB for Driver ID %u is full, signalling host for full check state (Roff = %u, Woff = %u)", 3) \ -+X(165, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS_DEPRECATED, "OSid %u CCB init status: %u (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x", 8) \ -+X(166, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_STATE_CHANGE, "Driver ID %u fw state transition request: from %u to %u (0-offline 1-ready 2-active 3-offloading 4-cooldown). Status %u (1-ok 0-fail)", 4) \ -+X(167, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_STALE_KCCB_CMDS, "Driver ID %u has %u stale commands in its KCCB", 2) \ -+X(168, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_VCE_PAUSE, "Applying VCE pause", 0) \ -+X(169, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KCCB_UPDATE_RTN_SLOT_DEPRECATED, "OSid %u KCCB slot %u value updated to %u", 3) \ -+X(170, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_KCCB_COMMAND, "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x", 7) \ -+X(171, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND1, "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \ -+X(172, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND2, "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \ -+X(173, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for Driver ID %u with WOff %u", 2) \ -+X(174, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for Driver ID %u, FWCtx 0x%08x", 2) \ -+X(175, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from Driver ID %u: FWCtx 0x%08x, MemCtx 0x%08x", 3) \ -+X(176, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_INIT_CONFIG, "Initialised Firmware with config flags 0x%08x and extended config flags 0x%08x", 2) \ -+X(177, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_CONFIG, "Set Periodic Hardware Reset Mode: %u", 1) \ -+X(179, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_TRIG, "PHR mode %u, FW state: 0x%08x, HWR flags: 0x%08x", 3) \ -+X(180, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_RESET_DEPRECATED, "PHR mode %u triggered a reset", 1) \ -+X(181, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, Signal Id: %u", 2) \ -+X(182, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEV_SERIES8_DEPRECATED, "WARNING: Skipping FW KCCB Cmd type %u which is not yet supported on Series8.", 1) \ -+X(183, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INCONSISTENT_MMU_FLAGS, "MMU context cache data NULL, but cache flags=0x%x (sync counter=%u, update value=%u) OSId=%u", 4) \ -+X(184, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SLC_FLUSH, "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%u", 5) \ -+X(185, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBSC_INVAL, "FBSC invalidate for Context Set [0x%08x]: Entry mask 0x%08x%08x.", 3) \ -+X(186, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66284_UPDATE, "TDM context switch check: Roff %u was not valid for kick starting at %u, moving back to %u", 3) \ -+X(187, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SPFILTER_UPDATES, "Signal updates: FIFO: %u, Signals: 0x%08x", 2) \ -+X(188, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_FBSC_CMD, "Invalid FBSC cmd: FWCtx 0x%08x, MemCtx 0x%08x", 2) \ -+X(189, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN68497_BLIT, "Insert BRN68497 WA blit after TDM Context store.", 0) \ -+X(190, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PENDING_UFO_UPDATE_START, "UFO Updates for previously finished FWCtx 0x%08.8x", 1) \ -+X(191, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_RTA_PRESENT, "RTC with RTA present, %u active render targets", 1) \ -+X(192, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULL_RTAS, "Invalid RTA Set-up. The ValidRenderTargets array in RTACtl is Null!", 0) \ -+X(193, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_COUNTER, "Block 0x%x / Counter 0x%x INVALID and ignored", 2) \ -+X(194, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_DEPRECATED, "ECC fault GPU=0x%08x FW=0x%08x", 2) \ -+X(195, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PROCESS_XPU_EVENT, "Processing XPU event on DM = %u", 1) \ -+X(196, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VZ_WDG_TRIGGER, "Driver ID %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u", 2) \ -+X(197, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP, "GPU-%u has locked up (see HWR logs for more info)", 1) \ -+X(198, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%u, PartitionMask=0x%08x, ISPCtl=0x%08x)", 3) \ -+X(199, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DM, "GPU has locked up (see HWR logs for more info)", 0) \ -+X(200, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REPROCESS_XPU_EVENTS, "Reprocessing outstanding XPU events from cores 0x%02x", 1) \ -+X(201, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SECONDARY_XPU_EVENT, "Secondary XPU event on DM=%u, CoreMask=0x%02x, Raised=0x%02x", 3) \ -+X(202, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS, "TDM Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \ -+X(203, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled Core %u (Roff = %u, Woff = %u)", 3) \ -+X(204, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_OFFSETS, "Compute Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \ -+X(205, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_STALLED, "Compute stalled core %u (Roff = %u, Woff = %u, Size = %u)", 4) \ -+X(206, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_CORE_READ_OFFSET, "User Mode Queue mismatched stream start: Core %u, FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 6) \ -+X(207, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_RESUMED_FROM_STALL, "TDM resumed core %u (Roff = %u, Woff = %u)", 3) \ -+X(208, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_RESUMED_FROM_STALL, "Compute resumed core %u (Roff = %u, Woff = %u, Size = %u)", 4) \ -+X(209, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_MTS_PERMISSION_CHANGED, " Updated permission for Driver ID %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)", 2) \ -+X(210, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST1, "Mask = 0x%X, mask2 = 0x%X", 2) \ -+X(211, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST2, " core %u, reg = %u, mask = 0x%X)", 3) \ -+X(212, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_SAFETY_BUS, "ECC fault received from safety bus: 0x%08x", 1) \ -+X(213, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_CONFIG, "Safety Watchdog threshold period set to 0x%x clock cycles", 1) \ -+X(214, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_TRIGGER, "MTS Safety Event trigged by the safety watchdog.", 0) \ -+X(215, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_USC_TASKS_RANGE, "DM%u USC tasks range limit 0 - %u, stride %u", 3) \ -+X(216, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_ECC_FAULT, "ECC fault GPU=0x%08x", 1) \ -+X(217, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_SAFETY_RESET, "GPU Hardware units reset to prevent transient faults.", 0) \ -+X(218, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ABORTCMD, "Kick Abort cmd: FWCtx 0x%08.8x @ %u", 2) \ -+X(219, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_DEPRECATED, "Kick Ray: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 7)\ -+X(220, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_DEPRECATED, "Ray finished", 0) \ -+X(221, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWDATA_INIT_STATUS, "State of firmware's private data at boot time: %u (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X", 2) \ -+X(222, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT, "CFI Timeout detected (%u increasing to %u)", 2) \ -+X(223, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT_FBM, "CFI Timeout detected for FBM (%u increasing to %u)", 2) \ -+X(224, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GEOM_OOM_DISALLOWED, "Geom OOM event not allowed", 0) \ -+X(225, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED2, "Changing Driver ID %u's priority from %u to %u; Isolation = %u (0 = off; 1 = on)", 4) \ -+X(226, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SKIP_ALREADY_RUN_GEOM, "Skipping already executed TA FWCtx 0x%08.8x @ %u", 2) \ -+X(227, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ATTEMPT_TO_RUN_AHEAD_GEOM, "Attempt to execute TA FWCtx 0x%08.8x @ %u ahead of time on other GEOM", 2) \ -+X(228, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED2, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8) \ -+X(229, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_PIPELINE, "Kick TA: Kick ID %u FWCtx 0x%08.8x @ %u, RTD 0x%08x, First kick:%u, Last kick:%u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 12) \ -+X(230, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_PIPELINE, "Kick 3D: Kick ID %u FWCtx 0x%08.8x @ %u, RTD 0x%08x, Partial render:%u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 11) \ -+X(231, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_PIPELINE_DEPRECATED, "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, ext:0x%08x, int:0x%08x)", 7) \ -+X(232, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED_PIPELINE, "TDM finished: Kick ID %u", 1) \ -+X(233, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED_PIPELINE, "TA finished: Kick ID %u", 1) \ -+X(234, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED_PIPELINE, "3D finished: Kick ID %u, HWRTData0State=%x, HWRTData1State=%x", 3) \ -+X(235, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED_PIPELINE, "Compute finished: Kick ID %u", 1) \ -+X(236, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_PIPELINE_DEPRECATED, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %u, Base 0x%08x%08x. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 10) \ -+X(237, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_PIPELINE_DEPRECATED, "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8)\ -+X(238, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_PIPELINE, "Ray finished: Kick ID %u", 1) \ -+X(239, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RT_UNITS_INIT, "GPU RT Units init (# mask: 0x%08x%08x)", 2) \ -+X(240, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_PENDING_PASS, "UFO Check: [0x%08.8x] is pending update to 0x%08.8x and therefore passes", 2) \ -+X(241, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK_PENDING_PASS, "UFO PR-Check: [0x%08.8x] is pending update to 0x%08.8x and therefore passes", 2) \ -+X(242, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DELAY_DM_TO_OVERLAP_PIPES, "Holding kick of DM %u pipe %u to encourage pipeline overlap", 2) \ -+X(243, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RELEASE_DM_PIPE, "Releasing kick for DM %u pipe %u", 2) \ -+X(244, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing Driver ID %u's priority from %u to %u", 3) \ -+X(245, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ISOLATION_GROUP_CHANGE, "Changing Driver ID %u's isolation group from %u to %u", 3) \ -+X(246, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VK_TIMESTAMP, "VK Timestamp: addr=0x%08x%08x, avail=0x%08x%08x stamp=0x%08x%08x", 6) \ -+X(247, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %u failed to Context Switch on time (Current time: 0x%08x%08x, deadline: 0x%08x%08x). Triggered HCS (see HWR logs).", 5) \ -+X(248, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll, RGX_CR_EVENT_STATUS=0x%08x (see HWR logs)", 1) \ -+X(249, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBCDC_FAILURE_DETECTED, "FBCDC signature failure detected so block scheduling more work", 0) \ -+X(250, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBCDC_FAILURE_CLEARED, "FBCDC signature cleared which unlocks scheduling more work", 0) \ -+X(251, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT, "FW FAULT: At line %u in file 0x%08x%08x, additional data=0x%08x%08x", 5) \ -+X(252, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx 0x%08.8x @ %u, reason %u", 3) \ -+X(253, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_CONTEXT_STORED, "TDM FWCtx:0x%08.8x stored", 1) \ -+X(254, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CDM_CONTEXT_STORED, "CDM FWCtx:0x%08.8x stored, resumeMask:0x%08x", 2) \ -+X(255, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GEOM_CONTEXT_STORED, "GEOM FWCtx:0x%08.8x stored", 1) \ -+X(256, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_CONTEXT_STORED, "3D FWCtx:0x%08.8x stored, resumeMask:0x%08x", 2) \ -+X(257, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RDM_CONTEXT_STORED, "RAY FWCtx:0x%08.8x stored", 1) \ -+X(258, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8x @ %u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8) \ -+X(259, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8x @ %u, CSW resume:%u. (PID:%u, prio:%d, ext:0x%08x, int:0x%08x)", 7) \ -+X(260, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_PIPELINE, "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %u, CSW resume:%u. (PID:%u, prio:%d, ext:0x%08x, int:0x%08x)", 8) \ -+X(261, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_PIPELINE, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %u, Base 0x%08x%08x, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 11) \ -+X(262, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_PIPELINE, "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 9)\ -+X(263, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DISABLE_DEPTH, "3D Disable Depth. ExtJobRef = 0x%08x", 1) \ -+X(264, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DISABLE_STENCIL, "3D Disable Stencil. ExtJobRef = 0x%08x", 1) \ -+X(265, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DISABLE_DS_IN_3D_RUNNING, "3D Disable DS in 3D running. RenderContext 0x%08.8x ExtJobRef 0x%08x", 2) \ -+X(266, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DISABLE_DS_IN_KICK_3D, "3D Disable DS in kick 3D. RenderContext 0x%08.8x ExtJobRef 0x%08x", 2) \ -+X(267, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ADD_DISABLE_DS_QUEUE, "Add disable DS in queue. RenderContext 0x%08.8x DisableJobRef 0x%08x Uncheck %u", 3) \ -+X(268, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOT_FIND_USABLE_DS_IN_QUEUE, "Not find usable DS in queue. RenderContext 0x%08.8x ExtJobRef 0x%08x DisableJobRef 0x%08x Uncheck %u index %u", 5) \ -+X(269, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_LIST_FULL, "Unable to set breakpoint for MemCtx 0x%08x as the breakpoint list is full.", 1) \ -+X(270, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NUM_LOG_PARAMS, "Invalid number of log parameters passed! (Group:%u ID:%u Params:%u Passed:%u)", 4) \ -+X(271, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_CANCEL_PIPELINE, "TDM cancelled: Kick ID %u", 1) \ -+X(272, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_CANCEL_PIPELINE, "TA cancelled: Kick ID %u", 1) \ -+X(273, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_CANCEL_PIPELINE, "3D cancelled: Kick ID %u", 1) \ -+X(274, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CDM_CANCEL_PIPELINE, "Compute cancelled: Kick ID %u", 1) \ -+X(275, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_CANCEL_PIPELINE, "Ray cancelled: Kick ID %u", 1) \ -+\ -+X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %u", 2) \ -+X( 2, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u", 1) \ -+X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK_DEPRECATED, "Irq Task DM = %u, Breq = %u, SBIrq = 0x%x", 3) \ -+X( 4, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u", 1) \ -+X( 5, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_BG_ALL_DEPRECATED, "Kick MTS Bg task DM=All", 0) \ -+X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%u", 1) \ -+X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %u", 2) \ -+X( 8, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x", 2) \ -+X( 9, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = 0x%x, cmd = 0x%x", 3) \ -+X( 10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug Driver ID = %u, DM = %u, item = 0x%x", 3) \ -+X( 11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED2, "Ready queue debug DM = %u, celltype = %u, OSid = %u", 3) \ -+X( 12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED2, "Bg Task DM = %u, counted = %u, OSid = %u", 3) \ -+X( 13, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u", 1) \ -+X( 14, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.", 0) \ -+X( 15, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %u OS ID = %u PID = %u context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %u PID = %u.", 7) \ -+X( 16, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC_DEPRECATED, "KCCB Slot %u: DM=%u, Cmd=0x%08x, OSid=%u", 4) \ -+X( 17, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_RTN_VALUE, "KCCB Slot %u: Return value %u", 2) \ -+X( 18, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task Driver ID = %u", 1) \ -+X( 19, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC, "KCCB Slot %u: Cmd=0x%08x, Driver ID=%u", 3) \ -+X( 20, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK, "Irq Task (EVENT_STATUS=0x%08x)", 1) \ -+X( 21, RGXFW_GROUP_MTS, RGXFW_SF_MTS_VZ_SIDEBAND, "VZ sideband test, kicked with DriverID=%u from MTS, OSid for test=%u", 2) \ -+\ -+X( 1, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned", 1) \ -+X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %u, WriteOffset = %u", 3) \ -+X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP_DEPRECATED, "HWRTData [0x%08x] for DM=%u, received cleanup request", 2) \ -+X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM_DEPRECATED, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %u", 3) \ -+X( 5, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy", 2) \ -+X( 6, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_DEPRECATED, "HWRTData [0x%08x] HW Context %u cleaned", 2) \ -+X( 7, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned", 1) \ -+X( 8, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned", 1) \ -+X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %u, executed = %u", 3) \ -+X( 10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED2, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %u, executed = %u", 4) \ -+X( 11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP_DEPRECATED, "HW Ray Frame data [0x%08x] for DM=%u, received cleanup request", 2) \ -+X( 12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM_DEPRECATED, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %u", 3) \ -+X( 13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY_DEPRECATED, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %u, executed = %u", 4) \ -+X( 14, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_DEPRECATED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned", 2) \ -+X( 15, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_INVALID_REQUEST, "Discarding invalid cleanup request of type 0x%x", 1) \ -+X( 16, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP, "Received cleanup request for HWRTData [0x%08x]", 1) \ -+X( 17, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context is busy: submitted = %u, executed = %u", 3) \ -+X( 18, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %u", 3) \ -+\ -+X( 1, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8x needs resume", 1) \ -+X( 2, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_DEPRECATED, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ -+X( 3, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SHARED, "CDM FWCtx shared alloc size load 0x%x", 1) \ -+X( 4, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_COMPLETE, "*** CDM FWCtx store complete", 0) \ -+X( 5, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_START, "*** CDM FWCtx store start", 0) \ -+X( 6, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SOFT_RESET, "CDM Soft Reset", 0) \ -+X( 7, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_NEEDS_RESUME, "3D FWCtx 0x%08.8x needs resume", 1) \ -+X( 8, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME, "*** 3D FWCtx 0x%08.8x resume", 1) \ -+X( 9, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_COMPLETE, "*** 3D context store complete", 0) \ -+X( 10, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED, "3D context store pipe state: 0x%08.8x 0x%08.8x 0x%08.8x", 3) \ -+X( 11, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START, "*** 3D context store start", 0) \ -+X( 12, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_TQ_RESUME, "*** 3D TQ FWCtx 0x%08.8x resume", 1) \ -+X( 13, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_NEEDS_RESUME, "TA FWCtx 0x%08.8x needs resume", 1) \ -+X( 14, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_RESUME, "*** TA FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ -+X( 15, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_SHARED, "TA context shared alloc size store 0x%x, load 0x%x", 2) \ -+X( 16, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_COMPLETE, "*** TA context store complete", 0) \ -+X( 17, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_START, "*** TA context store start", 0) \ -+X( 18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_REQUESTS_DM_DEPRECATED, "Higher priority context requests DM %u, old prio:%d, new prio:%d", 3) \ -+X( 19, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u", 2) \ -+X( 20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%u state: 0x%08.8x", 2) \ -+X( 21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%u state: 0x%08.8x", 2) \ -+X( 22, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_NEEDS_RESUME_DEPRECATED, "SHG FWCtx 0x%08.8x needs resume", 1) \ -+X( 23, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_RESUME_DEPRECATED, "*** SHG FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ -+X( 24, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_SHARED_DEPRECATED, "SHG context shared alloc size store 0x%x, load 0x%x", 2) \ -+X( 25, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_COMPLETE_DEPRECATED, "*** SHG context store complete", 0) \ -+X( 26, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_START_DEPRECATED, "*** SHG context store start", 0) \ -+X( 27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %u", 1) \ -+X( 28, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.", 0) \ -+X( 29, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x, shader state %u", 4) \ -+X( 30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%u->%u)", 2) \ -+X( 31, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_52563_HIT_DEPRECATED, "TA context store hit BRN 52563: vertex store tasks outstanding", 0) \ -+X( 32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %u)", 1) \ -+X( 33, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_DEFERRED_DEPRECATED, "TA context store deferred due to BRN 54141.", 0) \ -+X( 34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_REQUESTS_DM_DEPRECATED2, "Higher priority context requests DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u", 7) \ -+X( 35, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_START, "*** TDM context store start", 0) \ -+X( 36, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete", 0) \ -+X( 37, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_NEEDS_RESUME_DEPRECATED, "TDM context needs resume, header [0x%08.8x, 0x%08.8x]", 2) \ -+X( 38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_REQUESTS_DM, "Higher priority context requests DM %u. Prios (Driver ID, Driver ID Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u", 8) \ -+X( 39, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe %2d (%2d) state: 0x%08.8x", 3) \ -+X( 40, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe %2d (%2d) state: 0x%08.8x", 3) \ -+X( 41, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START_VOLCANIC, "*** 3D context store start version %u (1=IPP_TILE, 2=ISP_TILE)", 1) \ -+X( 42, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_VOLCANIC, "3D context store pipe%u state: 0x%08.8x%08x", 3) \ -+X( 43, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_VOLCANIC, "3D context resume pipe%u state: 0x%08.8x%08x", 3) \ -+X( 44, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_IPP_STATE, "3D context resume IPP state: 0x%08.8x%08x", 2) \ -+X( 45, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_PIPES_EMPTY, "All 3D pipes empty after ISP tile mode store! IPP_status: 0x%08x", 1) \ -+X( 46, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE_DEPRECATED, "TDM context resume pipe%u state: 0x%08.8x%08x", 3) \ -+X( 47, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_LEVEL4_STORE_START, "*** 3D context store start version 4", 0) \ -+X( 48, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RESUME_MULTICORE, "Multicore context resume on DM%u active core mask 0x%04.4x", 2) \ -+X( 49, RGXFW_GROUP_CSW, RGXFW_SF_CSW_STORE_MULTICORE, "Multicore context store on DM%u active core mask 0x%04.4x", 2) \ -+X( 50, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE, "TDM context resume Core %u, pipe%u state: 0x%08.8x%08x%08x", 5) \ -+X( 51, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_COMPLETE, "*** RDM FWCtx store complete", 0) \ -+X( 52, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_START, "*** RDM FWCtx store start", 0) \ -+X( 53, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_NEEDS_RESUME, "RDM FWCtx 0x%08.8x needs resume", 1) \ -+X( 54, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_RESUME, "RDM FWCtx 0x%08.8x resume", 1) \ -+\ -+X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_BIFREQ_DEPRECATED, "Activate MemCtx=0x%08x BIFreq=%u secure=%u", 3) \ -+X( 2, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x", 1) \ -+X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC_DEPRECATED, "Alloc PC reg %u", 1) \ -+X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_GRAB, "Grab reg set %u refcount now %u", 2) \ -+X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_UNGRAB_DEPRECATED, "Ungrab reg set %u refcount now %u", 2) \ -+X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_BIFREQ_DEPRECATED, "Setup reg=%u BIFreq=%u, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ -+X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DEPRECATED, "Trust enabled:%u, for BIFreq=%u", 2) \ -+X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG_DEPRECATED, "BIF Tiling Cfg %u base 0x%08x%08x len 0x%08x%08x enable %u stride %u --> 0x%08x%08x", 9) \ -+X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %u to OSID0, Cat Base %u, Register's contents are now 0x%08x 0x%08x", 4) \ -+X( 10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %u to OSID1, Context %u, Register's contents are now 0x%04x", 3) \ -+X( 11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx_DEPRECATED, "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 7) \ -+X( 12, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_BIFREQ_DEPRECATED, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u", 5) \ -+X( 13, RGXFW_GROUP_BIF, RGXFW_SF_BIF_UNMAP_GPU_MEMORY, "Unmap GPU memory (event status 0x%x)", 1) \ -+X( 14, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM, "Activate MemCtx=0x%08x DM=%u secure=%u", 3) \ -+X( 15, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM_DEPRECATED, "Setup reg=%u DM=%u, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ -+X( 16, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u", 4) \ -+X( 17, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DM, "Trust enabled:%u, for DM=%u", 2) \ -+X( 18, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_DM, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, DM %u", 5) \ -+X( 19, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM, "Setup register set=%u DM=%u, PC address=0x%08x%08x, OSid=%u, NewPCRegRequired=%u", 6) \ -+X( 20, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_ALLOC, "Alloc PC set %u as register range [%u - %u]", 3) \ -+X( 21, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32CoreID = %u, ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 8) \ -+X( 22, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM_SECURE, "Setup secure register=%u DM=%u, PC address=0x%08x%08x, OSid=%u, NewContext=%u", 6) \ -+X( 23, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM_DEPRECATED, "Activate MemCtx=0x%08x DM=%u secure=%u CtxFlags=0x%08x", 4) \ -+X( 24, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE_DEPRECATED, "Deactivate MemCtx=0x%08x CtxFlags=0x%08x", 2) \ -+X( 25, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE_AND_UNGRAB_PCSET, "Deactivate MemCtx=0x%08x, ungrab reg set %u refcount now %u", 3) \ -+\ -+X( 1, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_WRITE, "GPIO write 0x%02x", 1) \ -+X( 2, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_READ, "GPIO read 0x%02x", 1) \ -+X( 3, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ENABLED, "GPIO enabled", 0) \ -+X( 4, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_DISABLED, "GPIO disabled", 0) \ -+X( 5, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_STATUS, "GPIO status=%u (0=OK, 1=Disabled)", 1) \ -+X( 6, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%u byte(s))", 2) \ -+X( 7, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%u byte(s))", 2) \ -+X( 8, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_TIMEOUT, "GPIO_AP timeout!", 0) \ -+X( 9, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%u (0=OK, 1=Disabled)", 1) \ -+X( 10, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ALREADY_READ, "GPIO already read 0x%02x", 1) \ -+X( 11, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %u available returned %u", 2) \ -+X( 12, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %u", 1) \ -+X( 13, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %u (after %u ticks)", 2) \ -+X( 14, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %u returned %u (0=No skip, 1=Skip frame)", 2) \ -+X( 15, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %u in frame", 1) \ -+X( 16, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %u is a new frame", 1) \ -+X( 17, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %u ticks)", 1) \ -+X( 18, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %u", 1) \ -+X( 19, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %u)", 1) \ -+X( 20, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %u)", 1) \ -+X( 21, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %u)", 1) \ -+X( 22, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE_DEPRECATED, "TRP state: %u", 1) \ -+X( 23, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE, "TRP failure: %u", 1) \ -+X( 24, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_STATE, "SW TRP State: %u", 1) \ -+X( 25, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_FAILURE_DEPRECATED, "SW TRP failure: %u", 1) \ -+X( 26, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HW_KICK, "HW kick event (%u)", 1) \ -+X( 27, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_CHECKSUMS, "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x", 4) \ -+X( 28, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_UNIT_CHECKSUMS, "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x", 6) \ -+X( 29, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_CHECK_REG, "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ -+X( 30, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_SLOTS_CHECK, "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u", 4) \ -+X( 31, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_REG_CHECK, "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ -+X( 32, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE_HWRTDATA, "TRP HWRTData: 0x%08x, state: %u", 2) \ -+X( 33, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE_CNTX, "TRP Context: 0x%08x, state: %u", 2) \ -+X( 34, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE_CNTX, "TRP Context: 0x%08x, failure: %u", 2) \ -+X( 35, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_GPU, "Memory dump: Addr=0x%02x%08x, Size=%d, ContextId=%u, DM=%u", 5) \ -+X( 36, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_GPU_DWORDS4, " 0x%02x%08x %08x %08x %08x %08x", 6) \ -+X( 37, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_GPU_DWORDS3, " 0x%02x%08x %08x %08x %08x", 5) \ -+X( 38, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_GPU_DWORDS2, " 0x%02x%08x %08x %08x", 4) \ -+X( 39, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_GPU_DWORDS1, " 0x%02x%08x %08x", 3) \ -+X( 40, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_FW, "Memory dump: Addr=0x%08x, Size=%d", 2) \ -+X( 41, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_FW_DWORDS4, " 0x%08x %08x %08x %08x %08x", 5) \ -+X( 42, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_FW_DWORDS3, " 0x%08x %08x %08x %08x", 4) \ -+X( 43, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_FW_DWORDS2, " 0x%08x %08x %08x", 3) \ -+X( 44, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HEXDUMP_FW_DWORDS1, " 0x%08x %08x", 2) \ -+X( 45, RGXFW_GROUP_MISC, RGXFW_SF_MISC_FBCDC_FAILURE_STATUS, "FBCDC: Core=0x%08x, Status=0x%08x, Signature status=0x%08x", 3) \ -+X( 46, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WORK_CYCLES_PIPEDM_EN, "FWCtx 0x%08.8x, PipeDM state %04x, (start) %08x, (end) %08x, elapsed %08x", 5) \ -+X( 47, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WORK_CYCLES, "FWCtx 0x%08.8x, elapsed %08x", 2) \ -+\ -+X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%u SP = %u, MLIST%u SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)", 10) \ -+X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %u, finished: %u on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%u, local:%u, mmu:%u", 8) \ -+X( 3, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_DEPRECATED, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \ -+X( 4, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_DEPRECATED, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \ -+X( 5, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_COMPLETE_DEPRECATED, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \ -+X( 6, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_DENIED_DEPRECATED, "Grow for freelist ID=0x%08x denied by host", 1) \ -+X( 7, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \ -+X( 8, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed", 1) \ -+X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %u, operation(0-unpause, 1-pause): %u", 2) \ -+X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %u", 2)\ -+X( 11, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ -+X( 12, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ -+X( 13, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ -+X( 14, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ -+X( 15, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x", 1) \ -+X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %u, finished: %u on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%u, local:%u", 7) \ -+X( 17, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ -+X( 18, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ -+X( 19, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE_VOLCANIC, "Freelist update completed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \ -+X( 20, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_FAILED, "Freelist update failed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \ -+X( 21, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_VOLCANIC, "UFL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ -+X( 22, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_VOLCANIC, "UFL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ -+X( 23, RGXFW_GROUP_PM, RGXFW_SF_PM_CHECK_FL_BASEADDR, "Freelist 0x%08x base address from HW: 0x%02x%08x (expected value: 0x%02x%08x)", 5) \ -+X( 24, RGXFW_GROUP_PM, RGXFW_SF_PM_ANALYSE_FL_GROW, "Analysis of FL grow: Pause=(%u,%u) Paused+Valid(%u,%u) PMStateBuffer=0x%x", 5) \ -+X( 25, RGXFW_GROUP_PM, RGXFW_SF_PM_ATTEMPT_FL_GROW, "Attempt FL grow for FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \ -+X( 26, RGXFW_GROUP_PM, RGXFW_SF_PM_DEFER_FL_GROW, "Deferring FL grow for non-loaded FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \ -+X( 27, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_ALBIORIX, "Is GEOM: %u, finished: %u (HWRTData = 0x%08x, MemCtx = 0x%08x)", 4) \ -+X( 28, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT, "3D Timeout Now for FWCtx 0x%08.8x", 1) \ -+X( 29, RGXFW_GROUP_PM, RGXFW_SF_PM_RECYCLE, "GEOM PM Recycle for FWCtx 0x%08.8x", 1) \ -+X( 30, RGXFW_GROUP_PM, RGXFW_SF_PM_PRIMARY_CONFIG, "PM running primary config (Core %u)", 1) \ -+X( 31, RGXFW_GROUP_PM, RGXFW_SF_PM_SECONDARY_CONFIG, "PM running secondary config (Core %u)", 1) \ -+X( 32, RGXFW_GROUP_PM, RGXFW_SF_PM_TERTIARY_CONFIG, "PM running tertiary config (Core %u)", 1) \ -+X( 33, RGXFW_GROUP_PM, RGXFW_SF_PM_QUATERNARY_CONFIG, "PM running quaternary config (Core %u)", 1) \ -+X( 34, RGXFW_GROUP_PM, RGXFW_SF_PM_REVERT_CONFIG, "PM reverting to previous config (Core %u)", 1) \ -+\ -+X( 1, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_DYNAMIC_STATUS_DEPRECATED, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \ -+X( 2, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_STATIC_STATUS_DEPRECATED, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \ -+X( 3, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_GROW_DEPRECATED, "RPM request failed. Waiting for freelist grow.", 0) \ -+X( 4, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_ABORT_DEPRECATED, "RPM request failed. Aborting the current frame.", 0) \ -+X( 5, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW_DEPRECATED, "RPM waiting for pending grow on freelist 0x%08x", 1) \ -+X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW_DEPRECATED, "Request freelist grow [0x%08x] current pages %u, grow size %u", 3) \ -+X( 7, RGXFW_GROUP_RPM, RGXFW_SF_RPM_FREELIST_LOAD_DEPRECATED, "Freelist load: SHF = 0x%08x, SHG = 0x%08x", 2) \ -+X( 8, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08x.0x%08x", 2) \ -+X( 9, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08x.0x%08x", 2) \ -+X( 10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST_DEPRECATED, "Kernel requested RPM grow on freelist (type %u) at 0x%08x from current size %u to new size %u, RPM restart: %u (1=Yes)", 5) \ -+X( 11, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_RESTART_DEPRECATED, "Restarting SHG", 0) \ -+X( 12, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_ABORTED_DEPRECATED, "Grow failed, aborting the current frame.", 0) \ -+X( 13, RGXFW_GROUP_RPM, RGXFW_SF_RPM_ABORT_COMPLETE_DEPRECATED, "RPM abort complete on HWFrameData [0x%08x].", 1) \ -+X( 14, RGXFW_GROUP_RPM, RGXFW_SF_RPM_CLEANUP_NEEDS_ABORT_DEPRECATED, "RPM freelist cleanup [0x%08x] requires abort to proceed.", 1) \ -+X( 15, RGXFW_GROUP_RPM, RGXFW_SF_RPM_RPM_PT_DEPRECATED, "RPM page table base register: 0x%08x.0x%08x", 2) \ -+X( 16, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_ABORT_DEPRECATED, "Issuing RPM abort.", 0) \ -+X( 17, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_TOGGLE_CHECK_FULL_DEPRECATED, "RPM OOM received but toggle bits indicate free pages available", 0) \ -+X( 18, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_HW_TIMEOUT_DEPRECATED, "RPM hardware timeout. Unable to process OOM event.", 0) \ -+X( 19, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_LOAD_DEPRECATED_DEPRECATED, "SHF FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \ -+X( 20, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_LOAD_DEPRECATED, "SHG FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \ -+X( 21, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_STORE_DEPRECATED, "SHF FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \ -+X( 22, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_STORE_DEPRECATED, "SHG FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \ -+\ -+X( 1, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u", 2) \ -+X( 2, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u", 2) \ -+X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %u, global: %u, mmu: %u", 4) \ -+X( 4, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_3D_DEPRECATED, "Loading VFP table 0x%08x%08x for 3D", 2) \ -+X( 5, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_TA_DEPRECATED, "Loading VFP table 0x%08x%08x for TA", 2) \ -+X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%x type: %u (0:local,1:global,2:mmu) for DM%u: TotalPMPages = %u, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ -+X( 7, RGXFW_GROUP_RTD, RGXFW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store", 0) \ -+X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%u: Load=No, Store=No", 2) \ -+X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%u: Load=Yes, Store=No", 2) \ -+X( 10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%u: Load=Yes, Store=Yes", 3) \ -+X( 11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%u: Load=Yes, Store=Yes", 3) \ -+X( 12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %u (0:MidTA,1:3D) on context %u, MLIST = 0x%08x, ALIST = 0x%08x%08x", 5) \ -+X( 13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%x type: %u (0:local,1:global,2:mmu) for DM%u: TotalPMPages = %u, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ -+X( 14, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u", 2) \ -+X( 15, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u", 2) \ -+X( 16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%x type: %u (0:local,1:global,2:mmu) for DM%u: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ -+X( 17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load Freelist 0x%x type: %u (0:local,1:global,2:mmu) for DM%u: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ -+X( 18, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG_DEPRECATED, "Freelist 0x%x RESET!!!!!!!!", 1) \ -+X( 19, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG2_DEPRECATED, "Freelist 0x%x stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 5) \ -+X( 20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%x type: %u (0:local,1:global,2:mmu) on HW context %u", 3) \ -+X( 21, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)", 1) \ -+X( 22, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed", 0) \ -+X( 23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%u", 3) \ -+X( 24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %u)", 3) \ -+X( 25, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS_DEPRECATED, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 8) \ -+X( 26, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_LOADED_DEPRECATED, "3D RTData 0x%08x loaded on HW context %u", 2) \ -+X( 27, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)", 4) \ -+X( 28, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=0x%x, HWRTData1State=0x%x", 2) \ -+X( 29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %u, global: %u", 3) \ -+X( 30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%x type: %u (0:local,1:global) for PMDM%u: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ -+X( 31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load Freelist 0x%x type: %u (0:local,1:global) for PMDM%u: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ -+X( 32, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED2, "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %u, (MemCtx 0x%08x)", 5) \ -+X( 33, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 7) \ -+X( 34, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %u, (MemCtx 0x%08x)", 4) \ -+X( 35, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_V2, "Load Freelist 0x%x type: %u (0:local,1:global) for PMDM%u: FL Total Pages %u (max=%u,grow size=%u)", 6) \ -+X( 36, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_TA, "TA RTData 0x%08x marked as killed.", 1) \ -+X( 37, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_3D, "3D RTData 0x%08x marked as killed.", 1) \ -+X( 38, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILL_TA_AFTER_RESTART, "RTData 0x%08x will be killed after TA restart.", 1) \ -+X( 39, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RENDERSTATE_RESET, "RTData 0x%08x Render State Buffer 0x%02x%08x will be reset.", 3) \ -+X( 40, RGXFW_GROUP_RTD, RGXFW_SF_RTD_GEOM_RENDERSTATE, "GEOM RTData 0x%08x using Render State Buffer 0x%02x%08x.", 3) \ -+X( 41, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FRAG_RENDERSTATE, "FRAG RTData 0x%08x using Render State Buffer 0x%02x%08x.", 3) \ -+\ -+X( 1, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_DEPRECATED, "Force Z-Load for partial render", 0) \ -+X( 2, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_DEPRECATED, "Force Z-Store for partial render", 0) \ -+X( 3, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_LOCAL_DEPRECATED, "3D MemFree: Local FL 0x%08x", 1) \ -+X( 4, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_MMU_DEPRECATED, "3D MemFree: MMU FL 0x%08x", 1) \ -+X( 5, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_GLOBAL_DEPRECATED, "3D MemFree: Global FL 0x%08x", 1) \ -+X( 6, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_DEPRECATED, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x, HardwareSync Fence [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 6) \ -+X( 7, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x", 3) \ -+X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL_DEPRECATED, "OOM TA_cmd=0x%08x, OOM MMU:%u, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x", 5) \ -+X( 9, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_AVOIDED_DEPRECATED, "Partial render avoided", 0) \ -+X( 10, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_DISCARDED_DEPRECATED, "Partial render discarded", 0) \ -+X( 11, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_FINISHED, "Partial Render finished", 0) \ -+X( 12, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DBG_DEPRECATED, "SPM Owner = 3D-BG", 0) \ -+X( 13, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DIRQ_DEPRECATED, "SPM Owner = 3D-IRQ", 0) \ -+X( 14, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_NONE_DEPRECATED, "SPM Owner = NONE", 0) \ -+X( 15, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TABG_DEPRECATED, "SPM Owner = TA-BG", 0) \ -+X( 16, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TAIRQ_DEPRECATED, "SPM Owner = TA-IRQ", 0) \ -+X( 17, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_ADDRESS, "ZStore address 0x%08x%08x", 2) \ -+X( 18, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SSTORE_ADDRESS, "SStore address 0x%08x%08x", 2) \ -+X( 19, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_ADDRESS, "ZLoad address 0x%08x%08x", 2) \ -+X( 20, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SLOAD_ADDRESS, "SLoad address 0x%08x%08x", 2) \ -+X( 21, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_ZSBUFFER_DEPRECATED, "No deferred ZS Buffer provided", 0) \ -+X( 22, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POPULATED, "ZS Buffer successfully populated (ID=0x%08x)", 1) \ -+X( 23, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POP_UNNEEDED_DEPRECATED, "No need to populate ZS Buffer (ID=0x%08x)", 1) \ -+X( 24, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOPULATED, "ZS Buffer successfully unpopulated (ID=0x%08x)", 1) \ -+X( 25, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOP_UNNEEDED_DEPRECATED, "No need to unpopulate ZS Buffer (ID=0x%08x)", 1) \ -+X( 26, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_DEPRECATED, "Send ZS-Buffer backing request to host (ID=0x%08x)", 1) \ -+X( 27, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_DEPRECATED, "Send ZS-Buffer unbacking request to host (ID=0x%08x)", 1) \ -+X( 28, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \ -+X( 29, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \ -+X( 30, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)", 1) \ -+X( 31, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for SBuffer to be backed (ID=0x%08x)", 1) \ -+X( 32, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_NONE, "SPM State = none", 0) \ -+X( 33, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_BLOCKED, "SPM State = PR blocked", 0) \ -+X( 34, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_GROW, "SPM State = wait for grow", 0) \ -+X( 35, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_HW, "SPM State = wait for HW", 0) \ -+X( 36, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_RUNNING, "SPM State = PR running", 0) \ -+X( 37, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_AVOIDED, "SPM State = PR avoided", 0) \ -+X( 38, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_EXECUTED, "SPM State = PR executed", 0) \ -+X( 39, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FREELIST_MATCH, "3DMemFree matches freelist 0x%08x (FL type = %u)", 2) \ -+X( 40, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDedected flag", 0) \ -+X( 41, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_PENDING_GROW, "Wait for pending grow on Freelist 0x%08x", 1) \ -+X( 42, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_FAILED, "ZS Buffer failed to be populated (ID=0x%08x)", 1) \ -+X( 43, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FL_GROW_DEBUG, "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u", 5) \ -+X( 44, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA_WITH_SP, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u", 4) \ -+X( 45, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE_DEPRECATED, "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u", 5) \ -+X( 46, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%u) Buffer provided", 1) \ -+X( 47, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_POP_UNNEEDED, "No need to populate PR Buffer (ID=0x%08x)", 1) \ -+X( 48, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNPOP_UNNEEDED, "No need to unpopulate PR Buffer (ID=0x%08x)", 1) \ -+X( 49, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST, "Send PR Buffer backing request to host (ID=0x%08x)", 1) \ -+X( 50, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST, "Send PR Buffer unbacking request to host (ID=0x%08x)", 1) \ -+X( 51, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST_PENDING, "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \ -+X( 52, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST_PENDING, "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \ -+X( 53, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %u type to be backed (ID=0x%08x)", 2) \ -+X( 54, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE, "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u", 4) \ -+X( 66, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ -+X( 67, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u", 3) \ -+X( 68, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PR_DEADLOCK_UNBLOCKED, "OOM TA/3D PR deadlock unblocked reordering DM%u runlist head from Context 0x%08x to 0x%08x", 3) \ -+X( 69, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_FORCEFREE, "SPM State = PR force free", 0) \ -+X( 70, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_FAILED, "Failure to back PR Buffer", 0) \ -+\ -+X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state DM%u int: 0x%x, ext: 0x%x, pow flags: 0x%x", 4) \ -+X( 2, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_IDLE, "GPU idle (might be powered down). Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ -+X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ_DEPRECATED, "OS requested pow off (forced = %u), DM%u, pow flags: 0x%x", 3) \ -+X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %u %u %u %u", 4) \ -+X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %u, Any RD-DM Active? %u", 2) \ -+X( 6, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_OFF, "GPU ready to be powered down. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ -+X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %u, Units: 0x%08.8x", 2) \ -+X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %u (Power flags=%u)", 2) \ -+X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %u to %u", 2) \ -+X( 11, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init", 0) \ -+X( 12, RGXFW_GROUP_POW, RGXFW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: 0x%x)", 1) \ -+X( 13, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.", 0) \ -+X( 14, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.", 0) \ -+X( 15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %u, Any RD-DM Active? %u", 2) \ -+X( 16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %u, TLA-DM Active? %u", 2) \ -+X( 17, RGXFW_GROUP_POW, RGXFW_SF_POW_BRN37270_DEPRECATED, "Request power up due to BRN37270. Pow stat int: 0x%x", 1) \ -+X( 18, RGXFW_GROUP_POW, RGXFW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%x, ext: 0x%x, pow flags: 0x%x", 3) \ -+X( 19, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%x", 1) \ -+X( 20, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%x", 1) \ -+X( 21, RGXFW_GROUP_POW, RGXFW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ -+X( 22, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ -+X( 23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %ums. Core clock: %u Hz", 2) \ -+X( 24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%x, %u dusts powered.", 2) \ -+X( 25, RGXFW_GROUP_POW, RGXFW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.", 0) \ -+X( 26, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_ENERGY, "Power monitor: Estimate of dynamic energy %u", 1) \ -+X( 27, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED2, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x", 3) \ -+X( 28, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_DEADLINE, "Proactive DVFS: New deadline, time = 0x%08x%08x", 2) \ -+X( 29, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_WORKLOAD, "Proactive DVFS: New workload, cycles = 0x%08x%08x", 2) \ -+X( 30, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_CALCULATE, "Proactive DVFS: Proactive frequency calculated = %u", 1) \ -+X( 31, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UTILISATION, "Proactive DVFS: Reactive utilisation = %u percent", 1) \ -+X( 32, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_REACT, "Proactive DVFS: Reactive frequency calculated = %u.%u", 2) \ -+X( 33, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND_DEPRECATED, "Proactive DVFS: OPP Point Sent = 0x%x", 1) \ -+X( 34, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEADLINE_REMOVED, "Proactive DVFS: Deadline removed = 0x%08x%08x", 2) \ -+X( 35, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_WORKLOAD_REMOVED, "Proactive DVFS: Workload removed = 0x%08x%08x", 2) \ -+X( 36, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_THROTTLE, "Proactive DVFS: Throttle to a maximum = 0x%x", 1) \ -+X( 37, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_FAILURE, "Proactive DVFS: Failed to pass OPP point via GPIO.", 0) \ -+X( 38, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_INVALID_NODE_DEPRECATED, "Proactive DVFS: Invalid node passed to function.", 0) \ -+X( 39, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GUEST_BAD_ACCESS_DEPRECATED, "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u", 1) \ -+X( 40, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: %u", 1) \ -+X( 41, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: %u", 1) \ -+X( 42, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.", 0) \ -+X( 43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %u, Ticks: %u", 2) \ -+X( 44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042_DEPRECATED, "Allowed number of dusts is %u due to BRN59042.", 1) \ -+X( 45, RGXFW_GROUP_POW, RGXFW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ -+X( 46, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x, Fence Counters: Check: %u - Update: %u", 5) \ -+X( 47, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x", 2) \ -+X( 48, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_IDLE, "Proactive DVFS: GPU transitioned to idle", 0) \ -+X( 49, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_ACTIVE, "Proactive DVFS: GPU transitioned to active", 0) \ -+X( 50, RGXFW_GROUP_POW, RGXFW_SF_POW_POWDUMP_BUFFER_SIZE, "Power counter dumping: Data truncated writing register %u. Buffer too small.", 1) \ -+X( 51, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT, "Power controller returned ABORT for last request so retrying.", 0) \ -+X( 52, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST_DEPRECATED, "Discarding invalid power request: type 0x%x, DM %u", 2) \ -+X( 53, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE_NOT_IDLE, "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \ -+X( 54, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_POW_OFF_NOT_IDLE, "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \ -+X( 55, RGXFW_GROUP_POW, RGXFW_SF_POW_NUMDUST_CHANGE_NOT_IDLE, "Detected attempt to change dust count while not forced idle (pow state 0x%x)", 1) \ -+X( 56, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESULT, "Power monitor: Type = %u (0 = power, 1 = energy), Estimate result = 0x%08x%08x", 3) \ -+X( 57, RGXFW_GROUP_POW, RGXFW_SF_POW_MINMAX_CONFLICT, "Conflicting clock frequency range: OPP min = %u, max = %u", 2) \ -+X( 58, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_FLOOR, "Proactive DVFS: Set floor to a minimum = 0x%x", 1) \ -+X( 59, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %u), pow flags: 0x%x", 2) \ -+X( 60, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST, "Discarding invalid power request: type 0x%x", 1) \ -+X( 61, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x. Pow flags: 0x%x", 3) \ -+X( 62, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x", 2) \ -+X( 63, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_CHANGE_NOT_IDLE, "Detected attempt to change SPU power state mask while not forced idle (pow state 0x%x)", 1) \ -+X( 64, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_SPU_POWER_MASK, "Invalid SPU power mask 0x%x! Changing to 1", 1) \ -+X( 65, RGXFW_GROUP_POW, RGXFW_SF_POW_CLKDIV_UPDATE, "Proactive DVFS: Send OPP %u with clock divider value %u", 2) \ -+X( 66, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_PERF_MODE, "Power counters in raw/validation mode.", 0) \ -+X( 67, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESET, "Reset PPA block state %u (1=reset, 0=recalculate).", 1) \ -+X( 68, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT_WITH_CORE, "Power controller returned ABORT for Core-%u last request so retrying.", 1) \ -+X( 69, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ64BIT, "HW Request On(1)/Off(0): %u, Units: 0x%08x%08x", 3) \ -+X( 70, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_RAC_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x. Pow flags: 0x%x", 5) \ -+X( 71, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_RAC_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x", 4) \ -+X( 72, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RAC, "RAC pending? %u, RAC Active? %u", 2) \ -+X( 73, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RAC, "Initiate powoff query for RAC.", 0) \ -+X( 74, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEFER_REACTIVE_UPDATE, "Proactive DVFS: Defer reactive update to meet next deadline 0x%08x%08x", 2) \ -+\ -+X( 1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%u, FWCtx: 0x%08.8x", 2) \ -+X( 2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%u, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ -+X( 3, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW", 0) \ -+X( 4, RGXFW_GROUP_HWR, RGXFW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.", 0) \ -+X( 5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%u FWCtx: 0x%08.8x", 2) \ -+X( 6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%u->%u), PER-DM(0x%08x->0x%08x)", 4) \ -+X( 7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%u->%u), PER-DM(0x%08x)", 3) \ -+X( 8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%u), PER-DM(0x%08x->0x%08x)", 3) \ -+X( 9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%u->%u), PER-DM(0x%08x->0x%08x)", 4) \ -+X( 10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%u->%u), PER-DM(0x%08x->0x%08x)", 4) \ -+X( 11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%u->%u), PER-DM(0x%08x)", 3) \ -+X( 12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%u->%u), PER-DM(0x%08x)", 4) \ -+X( 13, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08x val:0x%08x)", 3) \ -+X( 14, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08x", 2) \ -+X( 15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %u), FWCtx 0x%08x @ %u", 6) \ -+X( 16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE_DEPRECATED, "PM fence WA could not be applied, Valid TA Setup: %u, RD powered off: %u", 2) \ -+X( 17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %u, global (0x%08.8x): %u", 5) \ -+X( 18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8x, discard: %u - local (0x%08.8x): s%u?=c%u, global (0x%08.8x): s%u?=c%u", 8) \ -+X( 19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8x c%u", 2) \ -+X( 20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8x @ %u, RTD 0x%08x.", 3) \ -+X( 21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%u, extmem: %u)", 2) \ -+X( 22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %u bytes)", 4) \ -+X( 23, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED2, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08x", 2) \ -+X( 24, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08x", 5) \ -+X( 25, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered", 1) \ -+X( 26, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP, "DM%u: Hold scheduling due to R-Flag = 0x%08x", 2) \ -+X( 27, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_RECONSTRUCTION, "Analysis: Need freelist reconstruction", 0) \ -+X( 28, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP, "Analysis DM%u: Lockup FWCtx: 0x%08.8x. Need to skip to next command", 2) \ -+X( 29, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP_OOM_TA, "Analysis DM%u: Lockup while TA is OOM FWCtx: 0x%08.8x. Need to skip to next command", 2) \ -+X( 30, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_PR_CLEANUP, "Analysis DM%u: Lockup while partial render FWCtx: 0x%08.8x. Need PR cleanup", 2) \ -+X( 31, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED2, "GPU has locked up", 0) \ -+X( 32, RGXFW_GROUP_HWR, RGXFW_SF_HWR_READY, "DM%u ready for HWR", 1) \ -+X( 33, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_UPDATE_RECOVERY, "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08x", 2) \ -+X( 34, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED2, "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08x)", 1) \ -+X( 35, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT_DEPRECATED, "DM%u timed out", 1) \ -+X( 36, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EVENT_STATUS_REG, "RGX_CR_EVENT_STATUS=0x%08x", 1) \ -+X( 37, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_FALSE_LOCKUP, "DM%u lockup falsely detected, R-Flags=0x%08x", 2) \ -+X( 38, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline", 0) \ -+X( 39, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll", 0) \ -+X( 40, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x", 2) \ -+X( 41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED3, "Reset HW (loop:%u, poll failures: 0x%08x)", 2) \ -+X( 42, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08x", 1) \ -+X( 43, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08x)", 1) \ -+X( 44, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).", 1) \ -+X( 45, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_META_FAULT, "Meta MMU page fault detected (Meta MMU Status: 0x%08x%08x)", 2) \ -+X( 46, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_DEPRECATED, "Fast CRC Check result for DM%u is HWRNeeded=%u", 2) \ -+X( 47, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK_DEPRECATED, "Full Signature Check result for DM%u is HWRNeeded=%u", 2) \ -+X( 48, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u", 3) \ -+X( 49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK_DEPRECATED, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%u", 3) \ -+X( 50, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK_DEPRECATED, "Deadline counter for DM%u is HWRDeadline=%u", 2) \ -+X( 51, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction", 1) \ -+X( 52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%x (ID=%u)", 2) \ -+X( 53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%u complete", 1) \ -+X( 54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%x (ID=%u) type: %u (0:local,1:global,2:mmu) on HW context %u", 4) \ -+X( 55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%u failed", 1) \ -+X( 56, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \ -+X( 57, RGXFW_GROUP_HWR, RGXFW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \ -+X( 58, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u", 2) \ -+X( 59, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty", 1) \ -+X( 60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%u's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x", 5) \ -+X( 61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%u)", 1) \ -+X( 62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%u)", 1) \ -+X( 63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for Driver ID %u due to pending freelist reconstruction", 2) \ -+X( 64, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes StreamStartOffset = %u)", 5) \ -+X( 65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED2, "Reconstruction needed for freelist 0x%x (ID=%u) type: %u (0:local,1:global) on HW context %u", 4) \ -+X( 66, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)", 3) \ -+X( 67, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance", 1) \ -+X( 68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%x (ID=%u)", 2) \ -+X( 69, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %u bytes)", 4) \ -+X( 70, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED3, "Reconstruction needed for freelist 0x%x (ID=%u) type: %u (0:local,1:global) phase: %u (0:TA, 1:3D) on HW context %u", 5) \ -+X( 71, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_LONG_HW_POLL, "Start long HW poll %u (0-Unset 1-Set) for (reg:0x%08x val:0x%08x)", 3) \ -+X( 72, RGXFW_GROUP_HWR, RGXFW_SF_HWR_END_LONG_HW_POLL, "End long HW poll (result=%u)", 1) \ -+X( 73, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "DM%u has taken %u ticks and deadline is %u ticks", 3) \ -+X( 74, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK_DEPRECATED, "USC Watchdog result for DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 5) \ -+X( 75, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%x (ID=%u) Driver ID: %u type: %u (0:local,1:global) phase: %u (0:TA, 1:3D) on HW context %u", 6) \ -+X( 76, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP, "GPU-%u has locked up", 1) \ -+X( 77, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DM, "DM%u has locked up", 1) \ -+X( 78, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_EVENT_STATUS_REG, "Core %u RGX_CR_EVENT_STATUS=0x%08x", 2) \ -+X( 79, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MULTICORE_EVENT_STATUS_REG, "RGX_CR_MULTICORE_EVENT_STATUS%u=0x%08x", 2) \ -+X( 80, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_BIF0_FAULT, "BIF0 page fault detected (Core %u MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)", 5) \ -+X( 81, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT_S7, "MMU page fault detected (Core %u MMU Status: 0x%08x%08x)", 3) \ -+X( 82, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT, "MMU page fault detected (Core %u MMU Status: 0x%08x%08x 0x%08x)", 4) \ -+X( 83, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (core:%u of %u, loop:%u, poll failures: 0x%08x)", 4) \ -+X( 84, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK, "Fast CRC Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ -+X( 85, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK, "Full Signature Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ -+X( 86, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%u", 4) \ -+X( 87, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK, "USC Watchdog result for Core%u DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 6) \ -+X( 88, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_RISCV_FAULT, "RISC-V MMU page fault detected (FWCORE MMU Status 0x%08x Req Status 0x%08x%08x)", 3) \ -+X( 89, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS1_PFS_DEPRECATED, "TEXAS1_PFS poll failed on core %u with value 0x%08x", 2) \ -+X( 90, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_PFS, "BIF_PFS poll failed on core %u with value 0x%08x", 2) \ -+X( 91, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS set poll failed on core %u with value 0x%08x", 2) \ -+X( 92, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_UNSET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS unset poll failed on core %u with value 0x%08x", 2) \ -+X( 93, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLC_INVAL, "MMU_CTRL_INVAL poll (all but fw) failed on core %u with value 0x%08x", 2) \ -+X( 94, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLCMMU_INVAL, "MMU_CTRL_INVAL poll (all) failed on core %u with value 0x%08x", 2) \ -+X( 95, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS_PFS, "TEXAS%u_PFS poll failed on core %u with value 0x%08x", 3) \ -+X( 96, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EXTRA_CHECK, "Extra Registers Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ -+X( 97, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WRITE_TO_GPU_READONLY_ADDR, "FW attempted to write to read-only GPU address 0x%08x", 1) \ -+X( 98, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT, "DM%u timed out (phase count=0x%08x)", 2) \ -+\ -+X( 1, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGBLK, "Block 0x%x mapped to Config Idx %u", 2) \ -+X( 2, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_OMTBLK, "Block 0x%x omitted from event - not enabled in HW", 1) \ -+X( 3, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INCBLK, "Block 0x%x included in event - enabled in HW", 1) \ -+X( 4, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELREG, "Select register state hi_0x%x lo_0x%x", 2) \ -+X( 5, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CSBHDR, "Counter stream block header word 0x%x", 1) \ -+X( 6, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTROFF, "Counter register offset 0x%x", 1) \ -+X( 7, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGSKP, "Block 0x%x config unset, skipping", 1) \ -+X( 8, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK, "Accessing Indirect block 0x%x", 1) \ -+X( 9, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DIRBLK, "Accessing Direct block 0x%x", 1) \ -+X( 10, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CNTPRG, "Programmed counter select register at offset 0x%x", 1) \ -+X( 11, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKPRG, "Block register offset 0x%x and value 0x%x", 2) \ -+X( 12, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKCG, "Reading config block from driver 0x%x", 1) \ -+X( 13, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKRG, "Reading block range 0x%x to 0x%x", 2) \ -+X( 14, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKREC, "Recording block 0x%x config from driver", 1) \ -+X( 15, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKED, "Finished reading config block from driver", 0) \ -+X( 16, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: 0x%x value: 0x%x", 2) \ -+X( 17, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u ID:0x%x", 2) \ -+X( 18, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID 0x%x is not allowed. The package [b:%u, n:%u] will be discarded", 3) \ -+X( 19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS_CUSTOM, "Custom Counters filter status %u", 1) \ -+X( 20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %u is not allowed. Use only blocks lower than %u. The package will be discarded", 2) \ -+X( 21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %u counters IDs while the upper limit is %u", 2) \ -+X( 22, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHECK_FILTER, "Check Filter 0x%x is 0x%x ?", 2) \ -+X( 23, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset", 1) \ -+X( 24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD_DEPRECATED, "Encountered an invalid command (%u)", 1) \ -+X( 25, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \ -+X( 26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %u (Roff = %u, Woff = %u)", 3) \ -+X( 27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %u", 1) \ -+X( 28, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKENA, "Block 0x%x ENABLED", 1) \ -+X( 29, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKDIS, "Block 0x%x DISABLED", 1) \ -+X( 30, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK_INSTANCE, "Accessing Indirect block 0x%x, instance %u", 2) \ -+X( 31, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTRVAL, "Counter register 0x%x, Value 0x%x", 2) \ -+X( 32, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Counters filter status %u", 1) \ -+X( 33, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTLBLK, "Block 0x%x mapped to Ctl Idx %u", 2) \ -+X( 34, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_WORKEST_EN, "Block(s) in use for workload estimation.", 0) \ -+X( 35, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CYCCTR, "GPU %u Cycle counter 0x%x, Value 0x%x", 3) \ -+X( 36, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CYCMAX, "GPU Mask 0x%x Cycle counter 0x%x, Value 0x%x", 3) \ -+X( 37, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_IGNORE_BLOCKS, "Blocks IGNORED for GPU %u", 1) \ -+\ -+X( 1, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST_DEPRECATED, "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u", 5) \ -+X( 2, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_COMPLETE, "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u", 4) \ -+X( 3, RGXFW_GROUP_DMA, RGXFW_SF_DMA_INT_REG, "DMA Interrupt register 0x%08x", 1) \ -+X( 4, RGXFW_GROUP_DMA, RGXFW_SF_DMA_WAIT, "Waiting for transfer of type 0x%02x completion...", 1) \ -+X( 5, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOADING_FAILED, "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed", 3) \ -+X( 6, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOAD_INVALID, "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)", 3) \ -+X( 7, RGXFW_GROUP_DMA, RGXFW_SF_DMA_POLL_FAILED, "Transfer 0x%02x request poll failure", 1) \ -+X( 8, RGXFW_GROUP_DMA, RGXFW_SF_DMA_BOOT_TRANSFER_FAILED_DEPRECATED, "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead", 2) \ -+X( 9, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST, "Transfer 0x%02x request on ch. %u: system 0x%02x%08x, coremem 0x%08x, flags 0x%x, size %u", 7) \ -+\ -+X( 1, RGXFW_GROUP_DBG, RGXFW_SF_DBG_INTPAIR, "0x%08x 0x%08x", 2) \ -+X( 2, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1HEX, "0x%08x", 1) \ -+X( 3, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2HEX, "0x%08x 0x%08x", 2) \ -+X( 4, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3HEX, "0x%08x 0x%08x 0x%08x", 3) \ -+X( 5, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4HEX, "0x%08x 0x%08x 0x%08x 0x%08x", 4) \ -+X( 6, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 5) \ -+X( 7, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 6) \ -+X( 8, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 7) \ -+X( 9, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 8) \ -+X( 10, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1SIGNED, "%d", 1) \ -+X( 11, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2SIGNED, "%d %d", 2) \ -+X( 12, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3SIGNED, "%d %d %d", 3) \ -+X( 13, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4SIGNED, "%d %d %d %d", 4) \ -+X( 14, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5SIGNED, "%d %d %d %d %d", 5) \ -+X( 15, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6SIGNED, "%d %d %d %d %d %d", 6) \ -+X( 16, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7SIGNED, "%d %d %d %d %d %d %d", 7) \ -+X( 17, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8SIGNED, "%d %d %d %d %d %d %d %d", 8) \ -+X( 18, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1UNSIGNED, "%u", 1) \ -+X( 19, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2UNSIGNED, "%u %u", 2) \ -+X( 20, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3UNSIGNED, "%u %u %u", 3) \ -+X( 21, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4UNSIGNED, "%u %u %u %u", 4) \ -+X( 22, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5UNSIGNED, "%u %u %u %u %u", 5) \ -+X( 23, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6UNSIGNED, "%u %u %u %u %u %u", 6) \ -+X( 24, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7UNSIGNED, "%u %u %u %u %u %u %u", 7) \ -+X( 25, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8UNSIGNED, "%u %u %u %u %u %u %u %u", 8) \ -+\ -+X(65535, RGXFW_GROUP_NULL, RGXFW_SF_LAST, "You should not use this string", 15) -+ -+ -+/* The symbolic names found in the table above are assigned an ui32 value of -+ * the following format: -+ * 31 30 28 27 20 19 16 15 12 11 0 bits -+ * - --- ---- ---- ---- ---- ---- ---- ---- -+ * 0-11: id number -+ * 12-15: group id number -+ * 16-19: number of parameters -+ * 20-27: unused -+ * 28-30: active: identify SF packet, otherwise regular int32 -+ * 31: reserved for signed/unsigned compatibility -+ * -+ * The following macro assigns those values to the enum generated SF ids list. -+ */ -+#define RGXFW_LOG_IDMARKER (0x70000000U) -+#define RGXFW_LOG_CREATESFID(a,b,e) ((IMG_UINT32)(a) | ((IMG_UINT32)(b)<<12U) | ((IMG_UINT32)(e)<<16U)) | RGXFW_LOG_IDMARKER -+ -+#define RGXFW_LOG_IDMASK (0xFFF00000U) -+#define RGXFW_LOG_VALIDID(I) (((I) & RGXFW_LOG_IDMASK) == RGXFW_LOG_IDMARKER) -+ -+typedef enum { -+#define X(a, b, c, d, e) c = RGXFW_LOG_CREATESFID(a,b,e), -+ RGXFW_LOG_SFIDLIST -+#undef X -+} RGXFW_LOG_SFids; -+ -+/* Return the group id number that the given (enum generated) id belongs to */ -+#define RGXFW_SF_GID(x) (((IMG_UINT32)(x)>>12) & 0xfU) -+/* Return the id number that the given (enum generated) id belongs to */ -+#define RGXFW_SF_ID(x) ((IMG_UINT32)(x) & 0xfffU) -+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */ -+#define RGXFW_SF_PARAMNUM(x) (((IMG_UINT32)(x)>>16) & 0xfU) -+ -+#endif /* RGX_FWIF_SF_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgx_fwif_shared.h b/drivers/gpu/drm/img-rogue/rgx_fwif_shared.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_fwif_shared.h -@@ -0,0 +1,361 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX firmware interface structures -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX firmware interface structures shared by both host client -+ and host server -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_FWIF_SHARED_H) -+#define RGX_FWIF_SHARED_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "rgx_common.h" -+#include "powervr/mem_types.h" -+#include "devicemem_typedefs.h" -+ -+/* Indicates the number of RTDATAs per RTDATASET */ -+#if defined(SUPPORT_AGP) -+#define RGXMKIF_NUM_RTDATAS 4U -+#define RGXMKIF_NUM_GEOMDATAS 4U -+#define RGXMKIF_NUM_RTDATA_FREELISTS 12U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */ -+#define RGX_NUM_GEOM_CORES (2U) -+#else -+#define RGXMKIF_NUM_RTDATAS 2U -+#define RGXMKIF_NUM_GEOMDATAS 1U -+#define RGXMKIF_NUM_RTDATA_FREELISTS 2U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */ -+#define RGX_NUM_GEOM_CORES (1U) -+#endif -+ -+/* Maximum number of UFOs in a CCB command. -+ * The number is based on having 32 sync prims (as originally), plus 32 sync -+ * checkpoints. -+ * Once the use of sync prims is no longer supported, we will retain -+ * the same total (64) as the number of sync checkpoints which may be -+ * supporting a fence is not visible to the client driver and has to -+ * allow for the number of different timelines involved in fence merges. -+ */ -+#define RGXFWIF_CCB_CMD_MAX_UFOS (32U+32U) -+ -+/* -+ * This is a generic limit imposed on any DM (TA,3D,CDM,TDM,2D,TRANSFER) -+ * command passed through the bridge. -+ * Just across the bridge in the server, any incoming kick command size is -+ * checked against this maximum limit. -+ * In case the incoming command size is larger than the specified limit, -+ * the bridge call is retired with error. -+ */ -+#define RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U) -+ -+typedef struct RGXFWIF_DEV_VIRTADDR_ -+{ -+ IMG_UINT32 ui32Addr; -+} RGXFWIF_DEV_VIRTADDR; -+ -+typedef struct -+{ -+ IMG_DEV_VIRTADDR RGXFW_ALIGN psDevVirtAddr; -+ RGXFWIF_DEV_VIRTADDR pbyFWAddr; -+} UNCACHED_ALIGN RGXFWIF_DMA_ADDR; -+ -+typedef IMG_UINT8 RGXFWIF_CCCB; -+ -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_UFO_ADDR; -+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CLEANUP_CTL; -+ -+ -+/*! -+ * @InGroup ClientCCBTypes -+ * @Brief Command data for fence & update types Client CCB commands. -+ */ -+typedef struct -+{ -+ PRGXFWIF_UFO_ADDR puiAddrUFO; /*!< Address to be checked/updated */ -+ IMG_UINT32 ui32Value; /*!< Value to check-against/update-to */ -+} RGXFWIF_UFO; -+ -+/*! -+ * @InGroup RenderTarget -+ * @Brief Track pending and executed workloads of HWRTDATA and ZSBUFFER -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32SubmittedCommands; /*!< Number of commands received by the FW */ -+ IMG_UINT32 ui32ExecutedCommands; /*!< Number of commands executed by the FW */ -+} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL; -+ -+#define RGXFWIF_PRBUFFER_START IMG_UINT32_C(0) -+#define RGXFWIF_PRBUFFER_ZSBUFFER IMG_UINT32_C(0) -+#define RGXFWIF_PRBUFFER_MSAABUFFER IMG_UINT32_C(1) -+#define RGXFWIF_PRBUFFER_MAXSUPPORTED IMG_UINT32_C(2) -+ -+typedef IMG_UINT32 RGXFWIF_PRBUFFER_TYPE; -+ -+typedef enum -+{ -+ RGXFWIF_PRBUFFER_UNBACKED = 0, -+ RGXFWIF_PRBUFFER_BACKED, -+ RGXFWIF_PRBUFFER_BACKING_PENDING, -+ RGXFWIF_PRBUFFER_UNBACKING_PENDING, -+}RGXFWIF_PRBUFFER_STATE; -+ -+/*! -+ * @InGroup RenderTarget -+ * @Brief OnDemand Z/S/MSAA Buffers -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32BufferID; /*!< Buffer ID*/ -+ IMG_BOOL bOnDemand; /*!< Needs On-demand Z/S/MSAA Buffer allocation */ -+ RGXFWIF_PRBUFFER_STATE eState; /*!< Z/S/MSAA -Buffer state */ -+ RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Cleanup state */ -+ IMG_UINT32 ui32PRBufferFlags; /*!< Compatibility and other flags */ -+} UNCACHED_ALIGN RGXFWIF_PRBUFFER; -+ -+/* -+ * Used to share frame numbers across UM-KM-FW, -+ * frame number is set in UM, -+ * frame number is required in both KM for HTB and FW for FW trace. -+ * -+ * May be used to house Kick flags in the future. -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32FrameNum; /*!< associated frame number */ -+} CMD_COMMON; -+ -+/* -+ * TA and 3D commands require set of firmware addresses that are stored in the -+ * Kernel. Client has handle(s) to Kernel containers storing these addresses, -+ * instead of raw addresses. We have to patch/write these addresses in KM to -+ * prevent UM from controlling FW addresses directly. -+ * Typedefs for TA and 3D commands are shared between Client and Firmware (both -+ * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use -+ * TA|3D CMD type definitions directly. Therefore we have a SHARED block that -+ * is shared between UM-KM-FW across all BVNC configurations. -+ */ -+typedef struct -+{ -+ CMD_COMMON sCmn; /*!< Common command attributes */ -+ RGXFWIF_DEV_VIRTADDR sHWRTData; /* RTData associated with this command, -+ this is used for context selection and for storing out HW-context, -+ when TA is switched out for continuing later */ -+ -+ RGXFWIF_DEV_VIRTADDR asPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /* Supported PR Buffers like Z/S/MSAA Scratch */ -+ -+} CMDTA3D_SHARED; -+ -+/*! -+ * Client Circular Command Buffer (CCCB) control structure. -+ * This is shared between the Server and the Firmware and holds byte offsets -+ * into the CCCB as well as the wrapping mask to aid wrap around. A given -+ * snapshot of this queue with Cmd 1 running on the GPU might be: -+ * -+ * Roff Doff Woff -+ * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........] -+ * < runnable commands >< !ready to run > -+ * -+ * Cmd 1 : Currently executing on the GPU data master. -+ * Cmd 2,3,4: Fence dependencies met, commands runnable. -+ * Cmd 5... : Fence dependency not met yet. -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32WriteOffset; /*!< Host write offset into CCB. This -+ * must be aligned to 16 bytes. */ -+ IMG_UINT32 ui32ReadOffset; /*!< Firmware read offset into CCB. -+ Points to the command that is -+ * runnable on GPU, if R!=W */ -+ IMG_UINT32 ui32DepOffset; /*!< Firmware fence dependency offset. -+ * Points to commands not ready, i.e. -+ * fence dependencies are not met. */ -+ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask, total capacity -+ in bytes of the CCB-1 */ -+ -+ IMG_UINT32 ui32ReadOffset2; /*!< Firmware 2nd read offset into CCB for AGP. -+ Points to the command that is -+ runnable on GPU, if R2!=W */ -+ IMG_UINT32 ui32ReadOffset3; /*!< Firmware 3rd read offset into CCB for AGP. -+ Points to the command that is -+ runnable on GPU, if R3!=W */ -+ IMG_UINT32 ui32ReadOffset4; /*!< Firmware 4th read offset into CCB for AGP. -+ Points to the command that is -+ runnable on GPU, if R4!=W */ -+ -+} UNCACHED_ALIGN RGXFWIF_CCCB_CTL; -+ -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+static_assert(sizeof(RGXFWIF_CCCB_CTL) == 32, -+ "RGXFWIF_CCCB_CTL is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER"); -+#endif -+ -+typedef IMG_UINT32 RGXFW_FREELIST_TYPE; -+ -+#define RGXFW_LOCAL_FREELIST IMG_UINT32_C(0) -+#define RGXFW_GLOBAL_FREELIST IMG_UINT32_C(1) -+#if defined(SUPPORT_AGP) -+#define RGXFW_GLOBAL2_FREELIST IMG_UINT32_C(2) -+#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL2_FREELIST + 1U) -+#else -+#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL_FREELIST + 1U) -+#endif -+#define RGXFW_MAX_HWFREELISTS (2U) -+ -+/*! -+ * @Defgroup ContextSwitching Context switching data interface -+ * @Brief Types grouping data structures and defines used in realising the Context Switching (CSW) functionality -+ * @{ -+ */ -+ -+/*! -+ * @Brief GEOM DM or TA register controls for context switch -+ */ -+typedef struct -+{ -+ IMG_UINT64 uTAReg_VDM_CONTEXT_STATE_BASE_ADDR; /*!< The base address of the VDM's context state buffer */ -+ IMG_UINT64 uTAReg_VDM_CONTEXT_STATE_RESUME_ADDR; -+ IMG_UINT64 uTAReg_TA_CONTEXT_STATE_BASE_ADDR; /*!< The base address of the TA's context state buffer */ -+ -+ struct -+ { -+ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK0; /*!< VDM context store task 0 */ -+ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK1; /*!< VDM context store task 1 */ -+ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK2; /*!< VDM context store task 2 */ -+ -+ /* VDM resume state update controls */ -+ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK0; /*!< VDM context resume task 0 */ -+ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK1; /*!< VDM context resume task 1 */ -+ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK2; /*!< VDM context resume task 2 */ -+ -+ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK3; -+ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK4; -+ -+ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK3; -+ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK4; -+ } asTAState[2]; -+ -+} RGXFWIF_TAREGISTERS_CSWITCH; -+/*! @} End of Defgroup ContextSwitching */ -+ -+#define RGXFWIF_TAREGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_TAREGISTERS_CSWITCH) -+ -+typedef struct -+{ -+ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0; -+ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS1; -+ IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS; -+ IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS1; -+ -+ /* CDM resume controls */ -+ IMG_UINT64 uCDMReg_CDM_RESUME_PDS0; -+ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0_B; -+ IMG_UINT64 uCDMReg_CDM_RESUME_PDS0_B; -+ -+} RGXFWIF_CDM_REGISTERS_CSWITCH; -+ -+/*! -+ * @InGroup ContextSwitching -+ * @Brief Render context static register controls for context switch -+ */ -+typedef struct -+{ -+ RGXFWIF_TAREGISTERS_CSWITCH RGXFW_ALIGN asCtxSwitch_GeomRegs[RGX_NUM_GEOM_CORES]; /*!< Geom registers for ctx switch */ -+} RGXFWIF_STATIC_RENDERCONTEXT_STATE; -+ -+#define RGXFWIF_STATIC_RENDERCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RENDERCONTEXT_STATE) -+ -+typedef struct -+{ -+ RGXFWIF_CDM_REGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs; /*!< CDM registers for ctx switch */ -+} RGXFWIF_STATIC_COMPUTECONTEXT_STATE; -+ -+#define RGXFWIF_STATIC_COMPUTECONTEXT_SIZE sizeof(RGXFWIF_STATIC_COMPUTECONTEXT_STATE) -+ -+/*! -+ @Brief Context reset reason. Last reset reason for a reset context. -+*/ -+typedef enum -+{ -+ RGX_CONTEXT_RESET_REASON_NONE = 0, /*!< No reset reason recorded */ -+ RGX_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1, /*!< Caused a reset due to locking up */ -+ RGX_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2, /*!< Affected by another context locking up */ -+ RGX_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3, /*!< Overran the global deadline */ -+ RGX_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4, /*!< Affected by another context overrunning */ -+ RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5, /*!< Forced reset to ensure scheduling requirements */ -+ RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM = 6, /*!< CDM Mission/safety checksum mismatch */ -+ RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM = 7, /*!< TRP checksum mismatch */ -+ RGX_CONTEXT_RESET_REASON_GPU_ECC_OK = 8, /*!< GPU ECC error (corrected, OK) */ -+ RGX_CONTEXT_RESET_REASON_GPU_ECC_HWR = 9, /*!< GPU ECC error (uncorrected, HWR) */ -+ RGX_CONTEXT_RESET_REASON_FW_ECC_OK = 10, /*!< FW ECC error (corrected, OK) */ -+ RGX_CONTEXT_RESET_REASON_FW_ECC_ERR = 11, /*!< FW ECC error (uncorrected, ERR) */ -+ RGX_CONTEXT_RESET_REASON_FW_WATCHDOG = 12, /*!< FW Safety watchdog triggered */ -+ RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13, /*!< FW page fault (no HWR) */ -+ RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14, /*!< FW execution error (GPU reset requested) */ -+ RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15, /*!< Host watchdog detected FW error */ -+ RGX_CONTEXT_GEOM_OOM_DISABLED = 16, /*!< Geometry DM OOM event is not allowed */ -+ RGX_CONTEXT_PVRIC_SIGNATURE_MISMATCH = 17, /*!< PVRIC Signature mismatch */ -+} RGX_CONTEXT_RESET_REASON; -+ -+/*! -+ @Brief Context reset data shared with the host -+*/ -+typedef struct -+{ -+ RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reset reason */ -+ IMG_UINT32 ui32ResetExtJobRef; /*!< External Job ID */ -+} RGX_CONTEXT_RESET_REASON_DATA; -+ -+#define RGX_HEAP_UM_PDS_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY -+#define RGX_HEAP_UM_PDS_RESERVED_REGION_OFFSET 0 -+#define RGX_HEAP_PDS_RESERVED_TOTAL_SIZE RGX_HEAP_UM_PDS_RESERVED_SIZE -+ -+#define RGX_HEAP_UM_USC_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY -+#define RGX_HEAP_UM_USC_RESERVED_REGION_OFFSET 0 -+#define RGX_HEAP_USC_RESERVED_TOTAL_SIZE RGX_HEAP_UM_USC_RESERVED_SIZE -+ -+#define RGX_HEAP_UM_GENERAL_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY -+#define RGX_HEAP_UM_GENERAL_RESERVED_REGION_OFFSET 0 -+#define RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET RGX_HEAP_UM_GENERAL_RESERVED_SIZE -+ -+#endif /* RGX_FWIF_SHARED_H */ -+ -+/****************************************************************************** -+ End of file (rgx_fwif_shared.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgx_heap_firmware.h b/drivers/gpu/drm/img-rogue/rgx_heap_firmware.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_heap_firmware.h -@@ -0,0 +1,126 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX FW heap definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_HEAP_FIRMWARE_H) -+#define RGX_HEAP_FIRMWARE_H -+ -+/* Start at 903GiB. Size of 32MB per OSID (see rgxheapconfig.h) -+ * NOTE: -+ * The firmware heaps bases and sizes are defined here to -+ * simplify #include dependencies, see rgxheapconfig.h -+ * for the full RGX virtual address space layout. -+ */ -+ -+/* -+ * The Config heap holds initialisation data shared between the -+ * the driver and firmware (e.g. pointers to the KCCB and FWCCB). -+ * The Main Firmware heap size is adjusted accordingly but most -+ * of the map / unmap functions must take into consideration -+ * the entire range (i.e. main and config heap). -+ */ -+#define RGX_FIRMWARE_NUMBER_OF_FW_HEAPS (IMG_UINT32_C(2)) -+#define RGX_FIRMWARE_HEAP_SHIFT RGX_FW_HEAP_SHIFT -+#define RGX_FIRMWARE_RAW_HEAP_BASE (IMG_UINT64_C(0xE1C0000000)) -+#define RGX_FIRMWARE_RAW_HEAP_SIZE (IMG_UINT32_C(1) << RGX_FIRMWARE_HEAP_SHIFT) -+ -+/* To enable the firmware to compute the exact address of structures allocated by the KM -+ * in the Fw Config subheap, regardless of the KM's page size (and PMR granularity), -+ * objects allocated consecutively but from different PMRs (due to differing memalloc flags) -+ * are allocated with a 64kb offset. This way, all structures will be located at the same base -+ * addresses when the KM is running with a page size of 4k, 16k or 64k. */ -+#define RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY (IMG_UINT32_C(0x10000)) -+ -+/* Ensure the heap can hold 3 PMRs of maximum supported granularity (192KB): -+ * 1st PMR: RGXFWIF_CONNECTION_CTL -+ * 2nd PMR: RGXFWIF_OSINIT -+ * 3rd PMR: RGXFWIF_SYSINIT */ -+#define RGX_FIRMWARE_CONFIG_HEAP_SIZE (IMG_UINT32_C(3)*RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY) -+ -+#define RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE) -+/* -+ * MIPS FW needs space in the Main heap to map GPU memory. -+ * This space is taken from the MAIN heap, to avoid creating a new heap. -+ */ -+#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL (IMG_UINT32_C(0x100000)) /* 1MB */ -+#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101 (IMG_UINT32_C(0x400000)) /* 4MB */ -+ -+#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE - \ -+ RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL) -+ -+#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101 (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE - \ -+ RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101) -+ -+#if !defined(__KERNEL__) -+#if defined(FIX_HW_BRN_65101) -+#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101 -+#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101 -+ -+#include "img_defs.h" -+static_assert((RGX_FIRMWARE_RAW_HEAP_SIZE) >= IMG_UINT32_C(0x800000), "MIPS GPU map size cannot be increased due to BRN65101 with a small FW heap"); -+ -+#else -+#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL -+#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL -+#endif -+#endif /* !defined(__KERNEL__) */ -+ -+#define RGX_FIRMWARE_MAIN_HEAP_BASE RGX_FIRMWARE_RAW_HEAP_BASE -+#define RGX_FIRMWARE_CONFIG_HEAP_BASE (RGX_FIRMWARE_MAIN_HEAP_BASE + \ -+ RGX_FIRMWARE_RAW_HEAP_SIZE - \ -+ RGX_FIRMWARE_CONFIG_HEAP_SIZE) -+ -+ -+/* 1 Mb can hold the maximum amount of page tables for the memory shared between the firmware and all KM drivers: -+ * MAX(RAW_HEAP_SIZE) = 32 Mb; MAX(NUMBER_OS) = 8; Total shared memory = 256 Mb; -+ * MMU objects required: 65536 PTEs; 16 PDEs; 1 PCE; */ -+#define RGX_FIRMWARE_MAX_PAGETABLE_SIZE (1 * 1024 * 1024) -+ -+/* -+ * The maximum configurable size via RGX_FW_HEAP_SHIFT is 32MiB (1<<25) and -+ * the minimum is 4MiB (1<<22); the default firmware heap size is set to -+ * maximum 32MiB. -+ */ -+#if defined(RGX_FW_HEAP_SHIFT) && (RGX_FW_HEAP_SHIFT < 22 || RGX_FW_HEAP_SHIFT > 25) -+#error "RGX_FW_HEAP_SHIFT is outside valid range [22, 25]" -+#endif -+ -+#endif /* RGX_HEAP_FIRMWARE_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgx_heaps.h b/drivers/gpu/drm/img-rogue/rgx_heaps.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_heaps.h -@@ -0,0 +1,68 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX heap definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_HEAPS_H) -+#define RGX_HEAPS_H -+ -+/* -+ Identify heaps by their names -+*/ -+#define RGX_GENERAL_SVM_HEAP_IDENT "General SVM" /*!< RGX General SVM (shared virtual memory) Heap Identifier */ -+#define RGX_GENERAL_HEAP_IDENT "General" /*!< RGX General Heap Identifier */ -+#define RGX_GENERAL_NON4K_HEAP_IDENT "General NON-4K" /*!< RGX General non-4K Heap Identifier */ -+#define RGX_PDSCODEDATA_HEAP_IDENT "PDS Code and Data" /*!< RGX PDS Code/Data Heap Identifier */ -+#define RGX_USCCODE_HEAP_IDENT "USC Code" /*!< RGX USC Code Heap Identifier */ -+#define RGX_VK_CAPT_REPLAY_HEAP_IDENT "Vulkan Capture Replay" /*!< RGX Vulkan capture replay buffer Heap Identifier */ -+#define RGX_SIGNALS_HEAP_IDENT "Signals" /*!< Signals Heap Identifier */ -+#define RGX_FBCDC_HEAP_IDENT "FBCDC" /*!< RGX FBCDC State Table Heap Identifier */ -+#define RGX_FBCDC_LARGE_HEAP_IDENT "Large FBCDC" /*!< RGX Large FBCDC State Table Heap Identifier */ -+#define RGX_CMP_MISSION_RMW_HEAP_IDENT "Compute Mission RMW" /*!< Compute Mission RMW Heap Identifier */ -+#define RGX_CMP_SAFETY_RMW_HEAP_IDENT "Compute Safety RMW" /*!< Compute Safety RMW Heap Identifier */ -+#define RGX_TEXTURE_STATE_HEAP_IDENT "Texture State" /*!< Texture State Heap Identifier */ -+#define RGX_VISIBILITY_TEST_HEAP_IDENT "Visibility Test" /*!< Visibility Test Heap Identifier */ -+ -+/* Services client internal heap identification */ -+#define RGX_RGNHDR_BRN_63142_HEAP_IDENT "RgnHdr BRN63142" /*!< RGX RgnHdr BRN63142 Heap Identifier */ -+#define RGX_TQ3DPARAMETERS_HEAP_IDENT "TQ3DParameters" /*!< RGX TQ 3D Parameters Heap Identifier */ -+#define RGX_MMU_INIA_BRN_65273_HEAP_IDENT "MMU INIA BRN65273" /*!< MMU BRN65273 Heap A Identifier */ -+#define RGX_MMU_INIB_BRN_65273_HEAP_IDENT "MMU INIB BRN65273" /*!< MMU BRN65273 Heap B Identifier */ -+#endif /* RGX_HEAPS_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgx_hwperf.h b/drivers/gpu/drm/img-rogue/rgx_hwperf.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_hwperf.h -@@ -0,0 +1,483 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX HWPerf and Debug Types and Defines Header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Common data types definitions for hardware performance API -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef RGX_HWPERF_H_ -+#define RGX_HWPERF_H_ -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+/* These structures are used on both GPU and CPU and must be a size that is a -+ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at -+ * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this. -+ */ -+ -+/****************************************************************************** -+ * Includes and Defines -+ *****************************************************************************/ -+ -+#include "img_types.h" -+#include "img_defs.h" -+ -+#include "rgx_common.h" -+#include "rgx_hwperf_common.h" -+ -+#if !defined(__KERNEL__) -+/* User-mode and Firmware definitions only */ -+ -+#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) -+ -+/* HWPerf interface assumption checks */ -+static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U, "Cluster count too large for HWPerf protocol definition"); -+ -+/*! The number of indirectly addressable TPU_MSC blocks in the GPU */ -+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST MAX(((IMG_UINT32)RGX_FEATURE_NUM_CLUSTERS >> 1), 1U) -+ -+/*! The number of indirectly addressable USC blocks in the GPU */ -+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER (RGX_FEATURE_NUM_CLUSTERS) -+ -+# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) -+ -+ /*! Defines the number of performance counter blocks that are directly -+ * addressable in the RGX register map for S. */ -+# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 1 /* JONES */ -+# define RGX_HWPERF_INDIRECT_BY_PHANTOM (RGX_NUM_PHANTOMS) -+# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 1 /* BLACKPEARL */ -+# define RGX_HWPERF_PHANTOM_DUST_BLKS 2 /* TPU, TEXAS */ -+# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 2 /* USC, PBE */ -+ -+# elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) -+ -+ /*! Defines the number of performance counter blocks that are directly -+ * addressable in the RGX register map. */ -+# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 2 /* TORNADO, TA */ -+ -+# define RGX_HWPERF_INDIRECT_BY_PHANTOM (RGX_NUM_PHANTOMS) -+# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 2 /* RASTER, TEXAS */ -+# define RGX_HWPERF_PHANTOM_DUST_BLKS 1 /* TPU */ -+# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */ -+ -+# else /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) i.e. S6 */ -+ -+ /*! Defines the number of performance counter blocks that are -+ * addressable in the RGX register map for Series 6. */ -+# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 3 /* TA, RASTER, HUB */ -+# define RGX_HWPERF_INDIRECT_BY_PHANTOM 0 /* PHANTOM is not there in Rogue1. Just using it to keep naming same as later series (RogueXT n Rogue XT+) */ -+# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 0 -+# define RGX_HWPERF_PHANTOM_DUST_BLKS 1 /* TPU */ -+# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */ -+ -+# endif -+ -+/*! The number of performance counters in each layout block defined for UM/FW code */ -+#if defined(RGX_FEATURE_CLUSTER_GROUPING) -+ #define RGX_HWPERF_CNTRS_IN_BLK 6 -+ #else -+ #define RGX_HWPERF_CNTRS_IN_BLK 4 -+#endif -+ -+#endif /* #if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) */ -+#else /* defined(__KERNEL__) */ -+/* Kernel/server definitions - not used, hence invalid definitions */ -+ -+# define RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC 0xFF -+ -+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC -+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC -+ -+# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC -+# define RGX_HWPERF_INDIRECT_BY_PHANTOM RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC -+# define RGX_HWPERF_PHANTOM_NONDUST_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC -+# define RGX_HWPERF_PHANTOM_DUST_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC -+# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC -+ -+#endif -+ -+/*! The number of custom non-mux counter blocks supported */ -+#define RGX_HWPERF_MAX_CUSTOM_BLKS 5U -+ -+/*! The number of counters supported in each non-mux counter block */ -+#define RGX_HWPERF_MAX_CUSTOM_CNTRS 8U -+ -+ -+/****************************************************************************** -+ * Data Stream Common Types -+ *****************************************************************************/ -+ -+/*! All the Data Masters HWPerf is aware of. When a new DM is added to this -+ * list, it should be appended at the end to maintain backward compatibility -+ * of HWPerf data. -+ */ -+#define RGX_HWPERF_DM_GP 0x00000000U -+#define RGX_HWPERF_DM_2D 0x00000001U -+#define RGX_HWPERF_DM_TA 0x00000002U -+#define RGX_HWPERF_DM_3D 0x00000003U -+#define RGX_HWPERF_DM_CDM 0x00000004U -+#define RGX_HWPERF_DM_RTU 0x00000005U -+#define RGX_HWPERF_DM_SHG 0x00000006U -+#define RGX_HWPERF_DM_TDM 0x00000007U -+ -+#define RGX_HWPERF_DM_LAST 0x00000008U -+ -+#define RGX_HWPERF_DM_INVALID 0x1FFFFFFFU -+ -+/****************************************************************************** -+ * API Types -+ *****************************************************************************/ -+ -+/*! Counter block IDs for all the hardware blocks with counters. -+ * Directly addressable blocks must have a value between 0..15 [0..0xF]. -+ * Indirect groups have following encoding: -+ * First hex digit (LSB) represents a unit number within the group -+ * and the second hex digit represents the group number. -+ * Group 0 is the direct group, all others are indirect groups. -+ */ -+typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; -+ -+/*! Directly addressable counter blocks */ -+#if defined(DOXYGEN) -+/*! _RGX_HWPERF_CNTBLK_ID */ -+#endif -+#define RGX_CNTBLK_ID_TA 0x0000U -+#define RGX_CNTBLK_ID_RASTER 0x0001U /*!< Non-cluster grouping cores */ -+#define RGX_CNTBLK_ID_HUB 0x0002U /*!< Non-cluster grouping cores */ -+#define RGX_CNTBLK_ID_TORNADO 0x0003U /*!< XT cores */ -+#define RGX_CNTBLK_ID_JONES 0x0004U /*!< S7 cores */ -+#define RGX_CNTBLK_ID_DIRECT_LAST 0x0005U /*!< Indirect blocks start from here */ -+#define RGX_CNTBLK_ID_BF_DEPRECATED 0x0005U /*!< Doppler unit (DEPRECATED) */ -+#define RGX_CNTBLK_ID_BT_DEPRECATED 0x0006U /*!< Doppler unit (DEPRECATED) */ -+#define RGX_CNTBLK_ID_RT_DEPRECATED 0x0007U /*!< Doppler unit (DEPRECATED) */ -+#define RGX_CNTBLK_ID_SH_DEPRECATED 0x0008U /*!< Ray tracing unit (DEPRECATED) */ -+ -+ -+/*! Indirectly addressable counter blocks. DA blocks indicate counter blocks -+ * where the counter registers are directly accessible -+ */ -+#define RGX_CNTBLK_ID_TPU_MCU0 0x0010U /*!< Addressable by Dust */ -+#define RGX_CNTBLK_ID_TPU_MCU0_DA 0x8010U -+#define RGX_CNTBLK_ID_TPU_MCU1 0x0011U -+#define RGX_CNTBLK_ID_TPU_MCU1_DA 0x8011U -+#define RGX_CNTBLK_ID_TPU_MCU2 0x0012U -+#define RGX_CNTBLK_ID_TPU_MCU2_DA 0x8012U -+#define RGX_CNTBLK_ID_TPU_MCU3 0x0013U -+#define RGX_CNTBLK_ID_TPU_MCU3_DA 0x8013U -+#define RGX_CNTBLK_ID_TPU_MCU4 0x0014U -+#define RGX_CNTBLK_ID_TPU_MCU4_DA 0x8014U -+#define RGX_CNTBLK_ID_TPU_MCU5 0x0015U -+#define RGX_CNTBLK_ID_TPU_MCU5_DA 0x8015U -+#define RGX_CNTBLK_ID_TPU_MCU6 0x0016U -+#define RGX_CNTBLK_ID_TPU_MCU6_DA 0x8016U -+#define RGX_CNTBLK_ID_TPU_MCU7 0x0017U -+#define RGX_CNTBLK_ID_TPU_MCU7_DA 0x8017U -+#define RGX_CNTBLK_ID_TPU_MCU_ALL 0x4010U -+#define RGX_CNTBLK_ID_TPU_MCU_ALL_DA 0xC010U -+ -+#define RGX_CNTBLK_ID_USC0 0x0020U /*!< Addressable by Cluster */ -+#define RGX_CNTBLK_ID_USC0_DA 0x8020U -+#define RGX_CNTBLK_ID_USC1 0x0021U -+#define RGX_CNTBLK_ID_USC1_DA 0x8021U -+#define RGX_CNTBLK_ID_USC2 0x0022U -+#define RGX_CNTBLK_ID_USC2_DA 0x8022U -+#define RGX_CNTBLK_ID_USC3 0x0023U -+#define RGX_CNTBLK_ID_USC3_DA 0x8023U -+#define RGX_CNTBLK_ID_USC4 0x0024U -+#define RGX_CNTBLK_ID_USC4_DA 0x8024U -+#define RGX_CNTBLK_ID_USC5 0x0025U -+#define RGX_CNTBLK_ID_USC5_DA 0x8025U -+#define RGX_CNTBLK_ID_USC6 0x0026U -+#define RGX_CNTBLK_ID_USC6_DA 0x8026U -+#define RGX_CNTBLK_ID_USC7 0x0027U -+#define RGX_CNTBLK_ID_USC7_DA 0x8027U -+#define RGX_CNTBLK_ID_USC8 0x0028U -+#define RGX_CNTBLK_ID_USC8_DA 0x8028U -+#define RGX_CNTBLK_ID_USC9 0x0029U -+#define RGX_CNTBLK_ID_USC9_DA 0x8029U -+#define RGX_CNTBLK_ID_USC10 0x002AU -+#define RGX_CNTBLK_ID_USC10_DA 0x802AU -+#define RGX_CNTBLK_ID_USC11 0x002BU -+#define RGX_CNTBLK_ID_USC11_DA 0x802BU -+#define RGX_CNTBLK_ID_USC12 0x002CU -+#define RGX_CNTBLK_ID_USC12_DA 0x802CU -+#define RGX_CNTBLK_ID_USC13 0x002DU -+#define RGX_CNTBLK_ID_USC13_DA 0x802DU -+#define RGX_CNTBLK_ID_USC14 0x002EU -+#define RGX_CNTBLK_ID_USC14_DA 0x802EU -+#define RGX_CNTBLK_ID_USC15 0x002FU -+#define RGX_CNTBLK_ID_USC15_DA 0x802FU -+#define RGX_CNTBLK_ID_USC_ALL 0x4020U -+#define RGX_CNTBLK_ID_USC_ALL_DA 0xC020U -+ -+#define RGX_CNTBLK_ID_TEXAS0 0x0030U /*!< Addressable by Phantom in XT, Dust in S7 */ -+#define RGX_CNTBLK_ID_TEXAS1 0x0031U -+#define RGX_CNTBLK_ID_TEXAS2 0x0032U -+#define RGX_CNTBLK_ID_TEXAS3 0x0033U -+#define RGX_CNTBLK_ID_TEXAS4 0x0034U -+#define RGX_CNTBLK_ID_TEXAS5 0x0035U -+#define RGX_CNTBLK_ID_TEXAS6 0x0036U -+#define RGX_CNTBLK_ID_TEXAS7 0x0037U -+#define RGX_CNTBLK_ID_TEXAS_ALL 0x4030U -+ -+#define RGX_CNTBLK_ID_RASTER0 0x0040U /*!< Addressable by Phantom, XT only */ -+#define RGX_CNTBLK_ID_RASTER1 0x0041U -+#define RGX_CNTBLK_ID_RASTER2 0x0042U -+#define RGX_CNTBLK_ID_RASTER3 0x0043U -+#define RGX_CNTBLK_ID_RASTER_ALL 0x4040U -+ -+#define RGX_CNTBLK_ID_BLACKPEARL0 0x0050U /*!< Addressable by Phantom, S7, only */ -+#define RGX_CNTBLK_ID_BLACKPEARL1 0x0051U -+#define RGX_CNTBLK_ID_BLACKPEARL2 0x0052U -+#define RGX_CNTBLK_ID_BLACKPEARL3 0x0053U -+#define RGX_CNTBLK_ID_BLACKPEARL_ALL 0x4050U -+ -+#define RGX_CNTBLK_ID_PBE0 0x0060U /*!< Addressable by Cluster in S7 and PBE2_IN_XE */ -+#define RGX_CNTBLK_ID_PBE1 0x0061U -+#define RGX_CNTBLK_ID_PBE2 0x0062U -+#define RGX_CNTBLK_ID_PBE3 0x0063U -+#define RGX_CNTBLK_ID_PBE4 0x0064U -+#define RGX_CNTBLK_ID_PBE5 0x0065U -+#define RGX_CNTBLK_ID_PBE6 0x0066U -+#define RGX_CNTBLK_ID_PBE7 0x0067U -+#define RGX_CNTBLK_ID_PBE8 0x0068U -+#define RGX_CNTBLK_ID_PBE9 0x0069U -+#define RGX_CNTBLK_ID_PBE10 0x006AU -+#define RGX_CNTBLK_ID_PBE11 0x006BU -+#define RGX_CNTBLK_ID_PBE12 0x006CU -+#define RGX_CNTBLK_ID_PBE13 0x006DU -+#define RGX_CNTBLK_ID_PBE14 0x006EU -+#define RGX_CNTBLK_ID_PBE15 0x006FU -+#define RGX_CNTBLK_ID_PBE_ALL 0x4060U -+ -+#define RGX_CNTBLK_ID_LAST 0x0070U /*!< End of PBE block */ -+ -+#define RGX_CNTBLK_ID_BX_TU0_DEPRECATED 0x0070U /*!< Doppler unit, DEPRECATED */ -+#define RGX_CNTBLK_ID_BX_TU1_DEPRECATED 0x0071U -+#define RGX_CNTBLK_ID_BX_TU2_DEPRECATED 0x0072U -+#define RGX_CNTBLK_ID_BX_TU3_DEPRECATED 0x0073U -+#define RGX_CNTBLK_ID_BX_TU_ALL_DEPRECATED 0x4070U -+ -+#define RGX_CNTBLK_ID_CUSTOM0 0x70F0U -+#define RGX_CNTBLK_ID_CUSTOM1 0x70F1U -+#define RGX_CNTBLK_ID_CUSTOM2 0x70F2U -+#define RGX_CNTBLK_ID_CUSTOM3 0x70F3U -+#define RGX_CNTBLK_ID_CUSTOM4_FW 0x70F4U /*!< Custom block used for getting statistics held in the FW */ -+#define RGX_CNTBLK_ID_CUSTOM_MASK 0x70FFU -+ -+ -+/* Masks for the counter block ID*/ -+#define RGX_CNTBLK_ID_UNIT_MASK (0x000FU) -+#define RGX_CNTBLK_ID_GROUP_MASK (0x00F0U) -+#define RGX_CNTBLK_ID_GROUP_SHIFT (4U) -+#define RGX_CNTBLK_ID_MC_GPU_MASK (0x0F00U) -+#define RGX_CNTBLK_ID_MC_GPU_SHIFT (8U) -+#define RGX_CNTBLK_ID_UNIT_ALL_MASK (0x4000U) -+#define RGX_CNTBLK_ID_DA_MASK (0x8000U) /*!< Block with directly accessible counter registers */ -+ -+#define RGX_CNTBLK_INDIRECT_COUNT(_class, _n) ((IMG_UINT32)(RGX_CNTBLK_ID_ ## _class ## _n) - (IMG_UINT32)(RGX_CNTBLK_ID_ ## _class ## 0) + 1u) -+ -+/*! The number of layout blocks defined with configurable multiplexed -+ * performance counters, hence excludes custom counter blocks. -+ */ -+#define RGX_HWPERF_MAX_DEFINED_BLKS (\ -+ (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\ -+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7)+\ -+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15)+\ -+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7)+\ -+ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3)+\ -+ RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3)+\ -+ RGX_CNTBLK_INDIRECT_COUNT(PBE, 15) ) -+#define RGX_HWPERF_MAX_MUX_BLKS (\ -+ RGX_HWPERF_MAX_DEFINED_BLKS ) -+ -+static_assert( -+ ((RGX_CNTBLK_ID_DIRECT_LAST + ((RGX_CNTBLK_ID_LAST & RGX_CNTBLK_ID_GROUP_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT)) <= RGX_HWPERF_MAX_BVNC_BLOCK_LEN), -+ "RGX_HWPERF_MAX_BVNC_BLOCK_LEN insufficient"); -+ -+#define RGX_HWPERF_EVENT_MASK_VALUE(e) (IMG_UINT64_C(1) << (IMG_UINT32)(e)) -+ -+#define RGX_CUSTOM_FW_CNTRS \ -+ X(TA_LOCAL_FL_SIZE, 0x0, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ -+ \ -+ X(TA_GLOBAL_FL_SIZE, 0x1, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ -+ \ -+ X(3D_LOCAL_FL_SIZE, 0x2, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ -+ \ -+ X(3D_GLOBAL_FL_SIZE, 0x3, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ -+ \ -+ X(ISP_TILES_IN_FLIGHT, 0x4, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DSPMKICK)) -+ -+/*! Counter IDs for the firmware held statistics */ -+typedef enum -+{ -+#define X(ctr, id, allow_mask) RGX_CUSTOM_FW_CNTR_##ctr = id, -+ RGX_CUSTOM_FW_CNTRS -+#undef X -+ -+ /* always the last entry in the list */ -+ RGX_CUSTOM_FW_CNTR_LAST -+} RGX_HWPERF_CUSTOM_FW_CNTR_ID; -+ -+/*! Identifier for each counter in a performance counting module */ -+typedef IMG_UINT32 RGX_HWPERF_CNTBLK_COUNTER_ID; -+ -+#define RGX_CNTBLK_COUNTER0_ID 0U -+#define RGX_CNTBLK_COUNTER1_ID 1U -+#define RGX_CNTBLK_COUNTER2_ID 2U -+#define RGX_CNTBLK_COUNTER3_ID 3U -+#define RGX_CNTBLK_COUNTER4_ID 4U -+#define RGX_CNTBLK_COUNTER5_ID 5U -+ /* MAX value used in server handling of counter config arrays */ -+#define RGX_CNTBLK_MUX_COUNTERS_MAX 6U -+ -+ -+/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */ -+#define MASK_RANGE_IMPL(b1, b2) ((IMG_UINT64)((IMG_UINT64_C(1) << ((IMG_UINT32)(b2)-(IMG_UINT32)(b1) + 1U)) - 1U) << (IMG_UINT32)(b1)) -+#define MASK_RANGE(R) MASK_RANGE_IMPL(R##_FIRST_TYPE, R##_LAST_TYPE) -+#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) (IMG_UINT32_C(1) << (e)) -+ -+/*! Mask macros for use with RGXCtrlHWPerf() API. -+ */ -+#define RGX_HWPERF_EVENT_MASK_NONE (IMG_UINT64_C(0x0000000000000000)) -+#define RGX_HWPERF_EVENT_MASK_DEFAULT RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_FWACT) | \ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG) | \ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) -+#define RGX_HWPERF_EVENT_MASK_ALL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) -+ -+/*! HWPerf Firmware event masks -+ * @par -+ * All FW Start/End/Debug (SED) events. */ -+#define RGX_HWPERF_EVENT_MASK_FW_SED (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE)) -+ -+#define RGX_HWPERF_EVENT_MASK_FW_UFO (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) -+#define RGX_HWPERF_EVENT_MASK_FW_CSW (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED)) -+/*! All FW events. */ -+#define RGX_HWPERF_EVENT_MASK_ALL_FW (RGX_HWPERF_EVENT_MASK_FW_SED |\ -+ RGX_HWPERF_EVENT_MASK_FW_UFO |\ -+ RGX_HWPERF_EVENT_MASK_FW_CSW) -+ -+/*! HW Periodic events (1ms interval). */ -+#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC)) -+/*! All HW Kick/Finish events. */ -+#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\ -+ MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\ -+ ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC)) -+ -+#define RGX_HWPERF_EVENT_MASK_ALL_HW (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\ -+ RGX_HWPERF_EVENT_MASK_HW_PERIODIC) -+ -+#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE)) -+ -+#define RGX_HWPERF_EVENT_MASK_ALL_PWR (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\ -+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG)) -+ -+/*! HWPerf Host event masks -+ */ -+#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ)) -+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO)) -+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC)) -+ -+ -+/*! Type used in the RGX API RGXConfigMuxHWPerfCounters() */ -+typedef struct -+{ -+ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ -+ IMG_UINT16 ui16BlockID; -+ -+ /*! 4 or 6 LSBs used to select counters to configure in this block. */ -+ IMG_UINT8 ui8CounterSelect; -+ -+ /*! 4 or 6 LSBs used as MODE bits for the counters in the group. */ -+ IMG_UINT8 ui8Mode; -+ -+ /*! 5 or 6 LSBs used as the GROUP_SELECT value for the counter. */ -+ IMG_UINT8 aui8GroupSelect[RGX_CNTBLK_MUX_COUNTERS_MAX]; -+ -+ /*! 16 LSBs used as the BIT_SELECT value for the counter. */ -+ IMG_UINT16 aui16BitSelect[RGX_CNTBLK_MUX_COUNTERS_MAX]; -+ -+ /*! 14 LSBs used as the BATCH_MAX value for the counter. */ -+ IMG_UINT32 aui32BatchMax[RGX_CNTBLK_MUX_COUNTERS_MAX]; -+ -+ /*! 14 LSBs used as the BATCH_MIN value for the counter. */ -+ IMG_UINT32 aui32BatchMin[RGX_CNTBLK_MUX_COUNTERS_MAX]; -+} UNCACHED_ALIGN RGX_HWPERF_CONFIG_MUX_CNTBLK; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_MUX_CNTBLK); -+ -+/*! Type used in the RGX API RGXConfigHWPerfCounters() */ -+typedef struct -+{ -+ /*! Reserved for future use */ -+ IMG_UINT32 ui32Reserved; -+ -+ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ -+ IMG_UINT16 ui16BlockID; -+ -+ /*! Number of configured counters within this block */ -+ IMG_UINT16 ui16NumCounters; -+ -+ /*! Counter register values */ -+ IMG_UINT16 ui16Counters[RGX_CNTBLK_COUNTERS_MAX]; -+} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK); -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* RGX_HWPERF_H_ */ -+ -+/****************************************************************************** -+ End of file -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgx_hwperf_common.h b/drivers/gpu/drm/img-rogue/rgx_hwperf_common.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_hwperf_common.h -@@ -0,0 +1,1634 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX HWPerf and Debug Types and Defines Header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Common data types definitions for hardware performance API -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef RGX_HWPERF_COMMON_H_ -+#define RGX_HWPERF_COMMON_H_ -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+/* These structures are used on both GPU and CPU and must be a size that is a -+ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at -+ * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this. -+ */ -+ -+/****************************************************************************** -+ * Includes and Defines -+ *****************************************************************************/ -+ -+#include "img_types.h" -+#include "img_defs.h" -+ -+#include "rgx_common.h" -+#include "rgx_common_asserts.h" -+#include "pvrsrv_tlcommon.h" -+#include "pvrsrv_sync_km.h" -+ -+ -+/****************************************************************************** -+ * Packet Event Type Enumerations -+ *****************************************************************************/ -+ -+/*! Type used to encode the event that generated the packet. -+ * NOTE: When this type is updated the corresponding hwperfbin2json tool -+ * source needs to be updated as well. The RGX_HWPERF_EVENT_MASK_* macros will -+ * also need updating when adding new types. -+ * -+ * @par -+ * The event type values are incrementing integers for use as a shift ordinal -+ * in the event filtering process at the point events are generated. -+ * This scheme thus implies a limit of 63 event types. -+ */ -+ -+typedef IMG_UINT32 RGX_HWPERF_EVENT_TYPE; -+ -+#define RGX_HWPERF_INVALID 0x00U /*!< Invalid. Reserved value. */ -+ -+/*! FW types 0x01..0x06 */ -+#define RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE 0x01U -+ -+#define RGX_HWPERF_FW_BGSTART 0x01U /*!< Background task processing start */ -+#define RGX_HWPERF_FW_BGEND 0x02U /*!< Background task end */ -+#define RGX_HWPERF_FW_IRQSTART 0x03U /*!< IRQ task processing start */ -+ -+#define RGX_HWPERF_FW_IRQEND 0x04U /*!< IRQ task end */ -+#define RGX_HWPERF_FW_DBGSTART 0x05U /*!< Debug event start */ -+#define RGX_HWPERF_FW_DBGEND 0x06U /*!< Debug event end */ -+ -+#define RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE 0x06U -+ -+/*! HW types 0x07..0x19 */ -+#define RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE 0x07U -+ -+#define RGX_HWPERF_HW_PMOOM_TAPAUSE 0x07U /*!< TA Pause at PM Out of Memory */ -+ -+#define RGX_HWPERF_HW_TAKICK 0x08U /*!< TA task started */ -+#define RGX_HWPERF_HW_TAFINISHED 0x09U /*!< TA task finished */ -+#define RGX_HWPERF_HW_3DTQKICK 0x0AU /*!< 3D TQ started */ -+#define RGX_HWPERF_HW_3DKICK 0x0BU /*!< 3D task started */ -+#define RGX_HWPERF_HW_3DFINISHED 0x0CU /*!< 3D task finished */ -+#define RGX_HWPERF_HW_CDMKICK 0x0DU /*!< CDM task started */ -+#define RGX_HWPERF_HW_CDMFINISHED 0x0EU /*!< CDM task finished */ -+#define RGX_HWPERF_HW_TLAKICK 0x0FU /*!< TLA task started */ -+#define RGX_HWPERF_HW_TLAFINISHED 0x10U /*!< TLS task finished */ -+#define RGX_HWPERF_HW_3DSPMKICK 0x11U /*!< 3D SPM task started */ -+#define RGX_HWPERF_HW_PERIODIC 0x12U /*!< Periodic event with updated HW counters */ -+#define RGX_HWPERF_HW_RTUKICK 0x13U /*!< Reserved, future use */ -+#define RGX_HWPERF_HW_RTUFINISHED 0x14U /*!< Reserved, future use */ -+#define RGX_HWPERF_HW_SHGKICK 0x15U /*!< Reserved, future use */ -+#define RGX_HWPERF_HW_SHGFINISHED 0x16U /*!< Reserved, future use */ -+#define RGX_HWPERF_HW_3DTQFINISHED 0x17U /*!< 3D TQ finished */ -+#define RGX_HWPERF_HW_3DSPMFINISHED 0x18U /*!< 3D SPM task finished */ -+ -+#define RGX_HWPERF_HW_PMOOM_TARESUME 0x19U /*!< TA Resume after PM Out of Memory */ -+ -+/*! HW_EVENT_RANGE0 used up. Use next empty range below to add new hardware events */ -+#define RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE 0x19U -+ -+/*! other types 0x1A..0x1F */ -+#define RGX_HWPERF_CLKS_CHG 0x1AU /*!< Clock speed change in GPU */ -+#define RGX_HWPERF_GPU_STATE_CHG 0x1BU /*!< GPU work state change */ -+ -+/*! power types 0x20..0x27 */ -+#define RGX_HWPERF_PWR_EST_RANGE_FIRST_TYPE 0x20U -+#define RGX_HWPERF_PWR_EST_REQUEST 0x20U /*!< Power estimate requested (via GPIO) */ -+#define RGX_HWPERF_PWR_EST_READY 0x21U /*!< Power estimate inputs ready */ -+#define RGX_HWPERF_PWR_EST_RESULT 0x22U /*!< Power estimate result calculated */ -+#define RGX_HWPERF_PWR_EST_RANGE_LAST_TYPE 0x22U -+ -+#define RGX_HWPERF_PWR_CHG 0x23U /*!< Power state change */ -+ -+/*! HW_EVENT_RANGE1 0x28..0x2F, for accommodating new hardware events */ -+#define RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE 0x28U -+ -+#define RGX_HWPERF_HW_TDMKICK 0x28U /*!< TDM task started */ -+#define RGX_HWPERF_HW_TDMFINISHED 0x29U /*!< TDM task finished */ -+#define RGX_HWPERF_HW_NULLKICK 0x2AU /*!< NULL event */ -+ -+#define RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE 0x2AU -+ -+/*! context switch types 0x30..0x31 */ -+#define RGX_HWPERF_CSW_START 0x30U /*!< HW context store started */ -+#define RGX_HWPERF_CSW_FINISHED 0x31U /*!< HW context store finished */ -+ -+/*! DVFS events */ -+#define RGX_HWPERF_DVFS 0x32U /*!< Dynamic voltage/frequency scaling events */ -+ -+/*! firmware misc 0x38..0x39 */ -+#define RGX_HWPERF_UFO 0x38U /*!< FW UFO Check / Update */ -+#define RGX_HWPERF_FWACT 0x39U /*!< FW Activity notification */ -+ -+/*! last */ -+#define RGX_HWPERF_LAST_TYPE 0x3BU -+ -+/*! This enumeration must have a value that is a power of two as it is -+ * used in masks and a filter bit field (currently 64 bits long). -+ */ -+#define RGX_HWPERF_MAX_TYPE 0x40U -+ -+static_assert(RGX_HWPERF_LAST_TYPE < RGX_HWPERF_MAX_TYPE, "Too many HWPerf event types"); -+ -+/*! Macro used to check if an event type ID is present in the known set of hardware type events */ -+#define HWPERF_PACKET_IS_HW_TYPE(_etype) (((_etype) >= RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) || \ -+ ((_etype) >= RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE)) -+ -+/*! Macro used to check if an event type ID is present in the known set of firmware type events */ -+#define HWPERF_PACKET_IS_FW_TYPE(_etype) \ -+ ((_etype) >= RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE && \ -+ (_etype) <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE) -+ -+ -+typedef enum { -+ RGX_HWPERF_HOST_INVALID = 0x00, /*!< Invalid, do not use. */ -+ RGX_HWPERF_HOST_ENQ = 0x01, /*!< ``0x01`` Kernel driver has queued GPU work. -+ See RGX_HWPERF_HOST_ENQ_DATA */ -+ RGX_HWPERF_HOST_UFO = 0x02, /*!< ``0x02`` UFO updated by the driver. -+ See RGX_HWPERF_HOST_UFO_DATA */ -+ RGX_HWPERF_HOST_ALLOC = 0x03, /*!< ``0x03`` Resource allocated. -+ See RGX_HWPERF_HOST_ALLOC_DATA */ -+ RGX_HWPERF_HOST_CLK_SYNC = 0x04, /*!< ``0x04`` GPU / Host clocks correlation data. -+ See RGX_HWPERF_HOST_CLK_SYNC_DATA */ -+ RGX_HWPERF_HOST_FREE = 0x05, /*!< ``0x05`` Resource freed, -+ See RGX_HWPERF_HOST_FREE_DATA */ -+ RGX_HWPERF_HOST_MODIFY = 0x06, /*!< ``0x06`` Resource modified / updated. -+ See RGX_HWPERF_HOST_MODIFY_DATA */ -+ RGX_HWPERF_HOST_DEV_INFO = 0x07, /*!< ``0x07`` Device Health status. -+ See RGX_HWPERF_HOST_DEV_INFO_DATA */ -+ RGX_HWPERF_HOST_INFO = 0x08, /*!< ``0x08`` Device memory usage information. -+ See RGX_HWPERF_HOST_INFO_DATA */ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT = 0x09, /*!< ``0x09`` Wait for sync event. -+ See RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA */ -+ RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE = 0x0A, /*!< ``0x0A`` Software timeline advanced. -+ See RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA */ -+ RGX_HWPERF_HOST_CLIENT_INFO = 0x0B, /*!< ``0x0B`` Additional client info. -+ See RGX_HWPERF_HOST_CLIENT_INFO_DATA */ -+ -+ /*! last */ -+ RGX_HWPERF_HOST_LAST_TYPE, -+ -+ /*! This enumeration must have a value that is a power of two as it is -+ * used in masks and a filter bit field (currently 32 bits long). -+ */ -+ RGX_HWPERF_HOST_MAX_TYPE = 0x20 -+} RGX_HWPERF_HOST_EVENT_TYPE; -+ -+/*!< The event type values are incrementing integers for use as a shift ordinal -+ * in the event filtering process at the point events are generated. -+ * This scheme thus implies a limit of 31 event types. -+ */ -+static_assert(RGX_HWPERF_HOST_LAST_TYPE < RGX_HWPERF_HOST_MAX_TYPE, "Too many HWPerf host event types"); -+ -+/*! Define containing bit position for 32bit feature flags used in hwperf and api */ -+typedef IMG_UINT32 RGX_HWPERF_FEATURE_FLAGS; -+#define RGX_HWPERF_FEATURE_PERFBUS_FLAG 0x00000001U -+#define RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG 0x00000002U -+#define RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG 0x00000004U -+#define RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG 0x00000008U -+#define RGX_HWPERF_FEATURE_ROGUEXE_FLAG 0x00000010U -+#define RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG 0x00000020U -+#define RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG 0x00000040U -+#define RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION 0x00000080U -+#define RGX_HWPERF_FEATURE_MULTICORE_FLAG 0x00000100U -+#define RGX_HWPERF_FEATURE_RAYTRACING_FLAG 0x00000200U -+#define RGX_HWPERF_FEATURE_CXT_TOP_INFRASTRUCTURE_FLAG 0x00000400U -+#define RGX_HWPERF_FEATURE_VOLCANIC_FLAG 0x00000800U -+#define RGX_HWPERF_FEATURE_ROGUE_FLAG 0x00001000U -+#define RGX_HWPERF_FEATURE_RESERVED1_FLAG 0x00002000U -+#define RGX_HWPERF_FEATURE_CXT_XTP_TOP_INFRASTRUCTURE_FLAG 0x00004000U -+#define RGX_HWPERF_FEATURE_AX_TOP_INFRASTRUCTURE_FLAG 0x00008000U -+#define RGX_HWPERF_FEATURE_BX_TOP_INFRASTRUCTURE_FLAG 0x00010000U -+#define RGX_HWPERF_FEATURE_DX_TOP_INFRASTRUCTURE_FLAG 0x00020000U -+ -+/* ! Define for RGX_HWPERF_DM type. The values are architecture specific */ -+typedef IMG_UINT32 RGX_HWPERF_DM; -+ -+/****************************************************************************** -+ * Packet Header Format Version 2 Types -+ *****************************************************************************/ -+ -+/*! Major version number of the protocol in operation -+ */ -+#define RGX_HWPERF_V2_FORMAT 2 -+ -+/*! Signature ASCII pattern 'HWP2' found in the first word of a HWPerfV2 packet -+ */ -+#define HWPERF_PACKET_V2_SIG 0x48575032 -+ -+/*! Signature ASCII pattern 'HWPA' found in the first word of a HWPerfV2a packet -+ */ -+#define HWPERF_PACKET_V2A_SIG 0x48575041 -+ -+/*! Signature ASCII pattern 'HWPB' found in the first word of a HWPerfV2b packet -+ */ -+#define HWPERF_PACKET_V2B_SIG 0x48575042 -+ -+/*! Signature ASCII pattern 'HWPC' found in the first word of a HWPerfV2c packet -+ */ -+#define HWPERF_PACKET_V2C_SIG 0x48575043 -+ -+#define HWPERF_PACKET_ISVALID(_val) (((_val) == HWPERF_PACKET_V2_SIG) || ((_val) == HWPERF_PACKET_V2A_SIG) || ((_val) == HWPERF_PACKET_V2B_SIG) || ((_val) == HWPERF_PACKET_V2C_SIG)) -+/*!< Checks that the packet signature is one of the supported versions */ -+ -+/*! Type defines the HWPerf packet header common to all events. */ -+typedef struct -+{ -+ IMG_UINT32 ui32Sig; /*!< Always the value HWPERF_PACKET_SIG */ -+ IMG_UINT32 ui32Size; /*!< Overall packet size in bytes */ -+ IMG_UINT32 eTypeId; /*!< Event type information field */ -+ IMG_UINT32 ui32Ordinal; /*!< Sequential number of the packet */ -+ IMG_UINT64 ui64Timestamp; /*!< Event timestamp */ -+} RGX_HWPERF_V2_PACKET_HDR, *RGX_PHWPERF_V2_PACKET_HDR; -+ -+RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_V2_PACKET_HDR, ui64Timestamp); -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR); -+ -+ -+/*! Mask for use with the IMG_UINT32 ui32Size header field */ -+#define RGX_HWPERF_SIZE_MASK 0xFFFFU -+ -+/*! This macro defines an upper limit to which the size of the largest variable -+ * length HWPerf packet must fall within, currently 3KB. This constant may be -+ * used to allocate a buffer to hold one packet. -+ * This upper limit is policed by packet producing code. -+ */ -+#define RGX_HWPERF_MAX_PACKET_SIZE 0xC00U -+ -+/*! Defines an upper limit to the size of a variable length packet payload. -+ */ -+#define RGX_HWPERF_MAX_PAYLOAD_SIZE ((IMG_UINT32)(RGX_HWPERF_MAX_PACKET_SIZE-\ -+ sizeof(RGX_HWPERF_V2_PACKET_HDR))) -+ -+/*! Macro which takes a structure name and provides the packet size for -+ * a fixed size payload packet, rounded up to 8 bytes to align packets -+ * for 64 bit architectures. */ -+#define RGX_HWPERF_MAKE_SIZE_FIXED(_struct) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(sizeof(_struct), PVRSRVTL_PACKET_ALIGNMENT)))) -+ -+/*! Macro which takes the number of bytes written in the data payload of a -+ * packet for a variable size payload packet, rounded up to 8 bytes to -+ * align packets for 64 bit architectures. */ -+#define RGX_HWPERF_MAKE_SIZE_VARIABLE(_size) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&((IMG_UINT32)sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN((_size), PVRSRVTL_PACKET_ALIGNMENT)))) -+ -+/*! Macro to obtain the size of the packet */ -+#define RGX_HWPERF_GET_SIZE(_packet_addr) ((IMG_UINT16)(((_packet_addr)->ui32Size) & RGX_HWPERF_SIZE_MASK)) -+ -+/*! Macro to obtain the size of the packet data */ -+#define RGX_HWPERF_GET_DATA_SIZE(_packet_addr) (RGX_HWPERF_GET_SIZE(_packet_addr) - sizeof(RGX_HWPERF_V2_PACKET_HDR)) -+ -+/*! Masks for use with the IMG_UINT32 eTypeId header field */ -+#define RGX_HWPERF_TYPEID_MASK 0x0007FFFFU -+#define RGX_HWPERF_TYPEID_EVENT_MASK 0x00007FFFU -+#define RGX_HWPERF_TYPEID_THREAD_MASK 0x00008000U -+#define RGX_HWPERF_TYPEID_STREAM_MASK 0x00070000U -+#define RGX_HWPERF_TYPEID_META_DMA_MASK 0x00080000U -+#define RGX_HWPERF_TYPEID_M_CORE_MASK 0x00100000U -+#define RGX_HWPERF_TYPEID_PIPEDM_MASK 0x00200000U -+#define RGX_HWPERF_TYPEID_OSID_MASK 0x07000000U -+ -+/*! Meta thread macros for encoding the ID into the type field of a packet */ -+#define RGX_HWPERF_META_THREAD_SHIFT 15U -+#define RGX_HWPERF_META_THREAD_ID0 0x0U /*!< Meta Thread 0 ID */ -+#define RGX_HWPERF_META_THREAD_ID1 0x1U /*!< Meta Thread 1 ID */ -+/*! Obsolete, kept for source compatibility */ -+#define RGX_HWPERF_META_THREAD_MASK 0x1U -+/*! Stream ID macros for encoding the ID into the type field of a packet */ -+#define RGX_HWPERF_STREAM_SHIFT 16U -+/*! Meta DMA macro for encoding how the packet was generated into the type field of a packet */ -+#define RGX_HWPERF_META_DMA_SHIFT 19U -+/*! Bit-shift macro used for encoding multi-core data into the type field of a packet */ -+#define RGX_HWPERF_M_CORE_SHIFT 20U -+/*! Bit-shift macro used for encoding Pipeline DM data into the type field of a packet */ -+#define RGX_HWPERF_PIPEDM_SHIFT 21U -+/*! OSID bit-shift macro used for encoding OSID into type field of a packet */ -+#define RGX_HWPERF_OSID_SHIFT 24U -+ -+/*! HWPerf Stream ID type definition. Maximum of 32bits. */ -+typedef IMG_UINT32 RGX_HWPERF_STREAM_ID; -+/*! Events from the Firmware/GPU */ -+#define RGX_HWPERF_STREAM_ID0_FW 0U -+/*! Events from the Server host driver component */ -+#define RGX_HWPERF_STREAM_ID1_HOST 1U -+/*! Events from the Client host driver component */ -+#define RGX_HWPERF_STREAM_ID2_CLIENT 2U -+#define RGX_HWPERF_STREAM_ID_LAST 3U -+ -+/* Checks if all stream IDs can fit under RGX_HWPERF_TYPEID_STREAM_MASK. */ -+static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_STREAM_MASK >> RGX_HWPERF_STREAM_SHIFT), -+ "Too many HWPerf stream IDs."); -+ -+/*! Compile-time value used to seed the Multi-Core (MC) bit in the typeID field. -+ * Only set by RGX_FIRMWARE builds. -+ */ -+#if defined(RGX_FIRMWARE) -+# if defined(RGX_FEATURE_GPU_MULTICORE_SUPPORT) -+#define RGX_HWPERF_M_CORE_VALUE 1U /*!< 1 => Multi-core supported */ -+# else -+#define RGX_HWPERF_M_CORE_VALUE 0U /*!< 0 => Multi-core not supported */ -+# endif -+# if defined(RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION) && (RGX_FEATURE_PIPELINED_DATAMASTERS_VERSION > 0) -+#define RGX_HWPERF_PIPEDM_VALUE 1U /*!< 1 => Pipeline DM supported */ -+# else -+#define RGX_HWPERF_PIPEDM_VALUE 0U /*!< 0 => Pipeline DM not supported */ -+# endif -+#else -+#define RGX_HWPERF_M_CORE_VALUE 0U /*!< 0 => Multi-core not supported */ -+#define RGX_HWPERF_PIPEDM_VALUE 0U /*!< 0 => Pipeline DM not supported */ -+#endif -+ -+/*! Macros used to set the packet type and encode meta thread ID (0|1), -+ * HWPerf stream ID, multi-core capability and OSID within the typeID */ -+#define RGX_HWPERF_MAKE_TYPEID(_stream, _type, _thread, _metadma, _osid)\ -+ ((IMG_UINT32) ((RGX_HWPERF_TYPEID_STREAM_MASK&((IMG_UINT32)(_stream) << RGX_HWPERF_STREAM_SHIFT)) | \ -+ (RGX_HWPERF_TYPEID_THREAD_MASK & ((IMG_UINT32)(_thread) << RGX_HWPERF_META_THREAD_SHIFT)) | \ -+ (RGX_HWPERF_TYPEID_EVENT_MASK & (IMG_UINT32)(_type)) | \ -+ (RGX_HWPERF_TYPEID_META_DMA_MASK & ((IMG_UINT32)(_metadma) << RGX_HWPERF_META_DMA_SHIFT)) | \ -+ (RGX_HWPERF_TYPEID_OSID_MASK & ((IMG_UINT32)(_osid) << RGX_HWPERF_OSID_SHIFT)) | \ -+ (RGX_HWPERF_TYPEID_M_CORE_MASK & ((IMG_UINT32)(RGX_HWPERF_M_CORE_VALUE) << RGX_HWPERF_M_CORE_SHIFT)) | \ -+ (RGX_HWPERF_TYPEID_PIPEDM_MASK & ((IMG_UINT32)(RGX_HWPERF_PIPEDM_VALUE) << RGX_HWPERF_PIPEDM_SHIFT)))) -+ -+/*! Obtains the event type that generated the packet */ -+#define RGX_HWPERF_GET_TYPE(_packet_addr) (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK) -+ -+/*! Obtains the META Thread number that generated the packet */ -+#define RGX_HWPERF_GET_THREAD_ID(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_THREAD_MASK) >> RGX_HWPERF_META_THREAD_SHIFT)) -+ -+/*! Determines if the packet generated contains multi-core data */ -+#define RGX_HWPERF_GET_M_CORE(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_M_CORE_MASK) >> RGX_HWPERF_M_CORE_SHIFT) -+ -+/*! Determines if the packet generated contains multi-core data */ -+#define RGX_HWPERF_GET_PIPEDM(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_PIPEDM_MASK) >> RGX_HWPERF_PIPEDM_SHIFT) -+ -+/*! Obtains the guest OSID which resulted in packet generation */ -+#define RGX_HWPERF_GET_OSID(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_OSID_MASK) >> RGX_HWPERF_OSID_SHIFT) -+ -+/*! Obtain stream id */ -+#define RGX_HWPERF_GET_STREAM_ID(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_STREAM_MASK) >> RGX_HWPERF_STREAM_SHIFT)) -+ -+/*! Obtain information about how the packet was generated, which might affect payload total size */ -+#define RGX_HWPERF_GET_META_DMA_INFO(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_META_DMA_MASK) >> RGX_HWPERF_META_DMA_SHIFT)) -+ -+/*! Obtains a typed pointer to a packet given a buffer address */ -+#define RGX_HWPERF_GET_PACKET(_buffer_addr) ((RGX_HWPERF_V2_PACKET_HDR *)(void *) (_buffer_addr)) -+/*! Obtains a typed pointer to a data structure given a packet address */ -+#define RGX_HWPERF_GET_PACKET_DATA_BYTES(_packet_addr) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR))) -+/*! Obtains a typed pointer to the next packet given a packet address */ -+#define RGX_HWPERF_GET_NEXT_PACKET(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR *) (IMG_OFFSET_ADDR((_packet_addr), RGX_HWPERF_SIZE_MASK&((_packet_addr)->ui32Size)))) -+ -+/*! Obtains a typed pointer to a packet header given the packet data address */ -+#define RGX_HWPERF_GET_PACKET_HEADER(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR *) (IMG_OFFSET_ADDR((_packet_addr), -(IMG_INT32)sizeof(RGX_HWPERF_V2_PACKET_HDR)))) -+ -+ -+/****************************************************************************** -+ * Other Common Defines -+ *****************************************************************************/ -+ -+/*! This macro is not a real array size, but indicates the array has a variable -+ * length only known at run-time but always contains at least 1 element. The -+ * final size of the array is deduced from the size field of a packet header. -+ */ -+#define RGX_HWPERF_ONE_OR_MORE_ELEMENTS 1U -+ -+/*! This macro is not a real array size, but indicates the array is optional -+ * and if present has a variable length only known at run-time. The final -+ * size of the array is deduced from the size field of a packet header. */ -+#define RGX_HWPERF_ZERO_OR_MORE_ELEMENTS 1U -+ -+ -+/*! Masks for use with the IMG_UINT32 ui32BlkInfo field */ -+#define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK 0xFFFF0000U -+#define RGX_HWPERF_BLKINFO_BLKOFFSET_MASK 0x0000FFFFU -+ -+/*! Shift for the NumBlocks and counter block offset field in ui32BlkInfo */ -+#define RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT 16U -+#define RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT 0U -+ -+/*! Macro used to set the block info word as a combination of two 16-bit integers */ -+#define RGX_HWPERF_MAKE_BLKINFO(_numblks, _blkoffset) ((IMG_UINT32) ((RGX_HWPERF_BLKINFO_BLKCOUNT_MASK&((_numblks) << RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)) | (RGX_HWPERF_BLKINFO_BLKOFFSET_MASK&((_blkoffset) << RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT)))) -+ -+/*! Macro used to obtain the number of counter blocks present in the packet */ -+#define RGX_HWPERF_GET_BLKCOUNT(_blkinfo) (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKCOUNT_MASK) >> RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT) -+ -+/*! Obtains the offset of the counter block stream in the packet */ -+#define RGX_HWPERF_GET_BLKOFFSET(_blkinfo) (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKOFFSET_MASK) >> RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT) -+ -+/*! This macro gets the number of blocks depending on the packet version */ -+#define RGX_HWPERF_GET_NUMBLKS(_sig, _packet_data, _numblocks) \ -+ do { \ -+ if (HWPERF_PACKET_V2B_SIG == (_sig) || HWPERF_PACKET_V2C_SIG == (_sig)) \ -+ { \ -+ (_numblocks) = RGX_HWPERF_GET_BLKCOUNT((_packet_data)->ui32BlkInfo);\ -+ } \ -+ else \ -+ { \ -+ IMG_UINT32 ui32VersionOffset = (((_sig) == HWPERF_PACKET_V2_SIG) ? 1 : 3);\ -+ (_numblocks) = *(IMG_UINT16 *)(IMG_OFFSET_ADDR(&(_packet_data)->ui32WorkTarget, ui32VersionOffset)); \ -+ } \ -+ } while (0) -+ -+/*! This macro gets the counter stream pointer depending on the packet version */ -+#define RGX_HWPERF_GET_CNTSTRM(_sig, _hw_packet_data, _cntstream_ptr) \ -+{ \ -+ if (HWPERF_PACKET_V2B_SIG == (_sig) || HWPERF_PACKET_V2C_SIG == (_sig)) \ -+ { \ -+ (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR((_hw_packet_data), RGX_HWPERF_GET_BLKOFFSET((_hw_packet_data)->ui32BlkInfo))); \ -+ } \ -+ else \ -+ { \ -+ IMG_UINT32 ui32BlkStreamOffsetInWords = (((_sig) == HWPERF_PACKET_V2_SIG) ? 6 : 8); \ -+ (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR_DW((_hw_packet_data), ui32BlkStreamOffsetInWords)); \ -+ } \ -+} -+ -+/*! Masks for use with the IMG_UINT32 ui32KickInfo field */ -+#define RGX_HWPERF_KICKINFO_STARTBE_MASK 0xFFFFFF00U -+#define RGX_HWPERF_KICKINFO_KICKID_MASK 0x000000FFU -+ -+/*! Shift for the Kick ID field in ui32KickInfo */ -+#define RGX_HWPERF_KICKINFO_STARTBE_SHIFT 8U -+#define RGX_HWPERF_KICKINFO_KICKID_SHIFT 0U -+ -+/*! Macro used to set the kick info field. */ -+#define RGX_HWPERF_MAKE_KICKINFO(_startbe, _kickid) \ -+ ((IMG_UINT32) (RGX_HWPERF_KICKINFO_STARTBE_MASK&((_startbe) << RGX_HWPERF_KICKINFO_STARTBE_SHIFT))| \ -+ (RGX_HWPERF_KICKINFO_KICKID_MASK&((_kickid) << RGX_HWPERF_KICKINFO_KICKID_SHIFT))) -+ -+/*! Macro used to obtain the lowest 24 bits of START_BE if present in the packet */ -+#define RGX_HWPERF_GET_STARTBE(_kickinfo) (((_kickinfo) & RGX_HWPERF_KICKINFO_STARTBE_MASK) >> RGX_HWPERF_KICKINFO_STARTBE_SHIFT) -+ -+/*! Macro used to obtain the Kick ID if present in the packet */ -+#define RGX_HWPERF_GET_KICKID(_kickinfo) (((_kickinfo) & RGX_HWPERF_KICKINFO_KICKID_MASK) >> RGX_HWPERF_KICKINFO_KICKID_SHIFT) -+ -+/*! Masks for use with the RGX_HWPERF_UFO_EV eEvType field */ -+#define RGX_HWPERF_UFO_STREAMSIZE_MASK 0xFFFF0000U -+#define RGX_HWPERF_UFO_STREAMOFFSET_MASK 0x0000FFFFU -+ -+/*! Shift for the UFO count and data stream fields */ -+#define RGX_HWPERF_UFO_STREAMSIZE_SHIFT 16U -+#define RGX_HWPERF_UFO_STREAMOFFSET_SHIFT 0U -+ -+/*! Macro used to set UFO stream info word as a combination of two 16-bit integers */ -+#define RGX_HWPERF_MAKE_UFOPKTINFO(_ssize, _soff) \ -+ ((IMG_UINT32) ((RGX_HWPERF_UFO_STREAMSIZE_MASK&((_ssize) << RGX_HWPERF_UFO_STREAMSIZE_SHIFT)) | \ -+ (RGX_HWPERF_UFO_STREAMOFFSET_MASK&((_soff) << RGX_HWPERF_UFO_STREAMOFFSET_SHIFT)))) -+ -+/*! Macro used to obtain UFO count*/ -+#define RGX_HWPERF_GET_UFO_STREAMSIZE(_streaminfo) \ -+ (((_streaminfo) & RGX_HWPERF_UFO_STREAMSIZE_MASK) >> RGX_HWPERF_UFO_STREAMSIZE_SHIFT) -+ -+/*! Obtains the offset of the UFO stream in the packet */ -+#define RGX_HWPERF_GET_UFO_STREAMOFFSET(_streaminfo) \ -+ (((_streaminfo) & RGX_HWPERF_UFO_STREAMOFFSET_MASK) >> RGX_HWPERF_UFO_STREAMOFFSET_SHIFT) -+ -+ -+/*! This structure holds the data of a firmware packet. */ -+typedef struct -+{ -+ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ -+ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ -+ IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */ -+ IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */ -+ IMG_UINT32 ui32TimeCorrIndex; /*!< Internal field */ -+ IMG_UINT32 ui32Padding; /*!< Reserved */ -+} RGX_HWPERF_FW_DATA; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA); -+ -+ -+/*! This structure holds the data of a hardware packet, including counters. */ -+typedef struct -+{ -+ union -+ { -+ IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ -+ IMG_UINT32 ui32KickStartTime; /*!< Front End start time for Pipeline DMs */ -+ }; -+ IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ -+ IMG_UINT32 ui32PID; /*!< Process identifier */ -+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ -+ IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ -+ IMG_UINT32 ui32ExtJobRef; /*!< Client driver context job reference used for tracking/debugging */ -+ IMG_UINT32 ui32IntJobRef; /*!< RGX Data master context job reference used for tracking/debugging */ -+ IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */ -+ IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counter block stream offset */ -+ IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */ -+ IMG_UINT32 ui32CtxPriority; /*!< Context priority */ -+ IMG_UINT32 ui32GPUIdMask; /*!< GPU IDs active within this event */ -+ IMG_UINT32 ui32KickInfo; /*!< <31..8> Back End start time lowest 24 bits <7..0> GPU Pipeline DM kick ID, 0 if not using Pipeline DMs */ -+ IMG_UINT32 ui32KickEndTime; /*!< Back End finish time for Pipeline DMs */ -+ IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Optional variable length Counter data */ -+ IMG_UINT32 ui32Padding2; /*!< Reserved. To ensure correct alignment (not written in the packet) */ -+} RGX_HWPERF_HW_DATA; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA); -+RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_HW_DATA, aui32CountBlksStream); -+ -+/*! Mask for use with the aui32CountBlksStream field when decoding the -+ * counter block ID and mask word. */ -+#define RGX_HWPERF_CNTBLK_ID_MASK 0xFFFF0000U -+#define RGX_HWPERF_CNTBLK_ID_SHIFT 16U -+ -+/*! MAX value used in server handling of counter config arrays */ -+#define RGX_CNTBLK_COUNTERS_MAX PVRSRV_HWPERF_COUNTERS_PERBLK -+ -+/*! Obtains the counter block ID word from an aui32CountBlksStream field. -+ * The word combines Control bits (15-12), GPU-Id (11-8), Group (7-4), Unit -+ * within group (3-0) */ -+#define RGX_HWPERF_GET_CNTBLK_IDW(_word) ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT)) -+ -+/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address -+ * and stream index. May be used in decoding the counter block stream words of -+ * a RGX_HWPERF_HW_DATA structure. */ -+#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) RGX_HWPERF_GET_CNTBLK_IDW((_data_addr)->aui32CountBlksStream[(_idx)]) -+ -+/*! Obtains the GPU ID from the supplied RGX_HWPERF_HW_DATA CNTBLK_IDW */ -+#define RGX_HWPERF_GET_CNTBLK_GPUW(_word) ((IMG_UINT16)(((_word)&RGX_CNTBLK_ID_MC_GPU_MASK)>>RGX_CNTBLK_ID_MC_GPU_SHIFT)) -+ -+#define RGX_HWPERF_GET_CNT_MASKW(_word) ((IMG_UINT16)((_word)&(~RGX_HWPERF_CNTBLK_ID_MASK))) -+ -+/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address -+ * and stream index. May be used in decoding the counter block stream words -+ * of a RGX_HWPERF_HW_DATA structure. */ -+#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) RGX_HWPERF_GET_CNT_MASKW((_data_addr)->aui32CountBlksStream[(_idx)]) -+ -+ -+/*! Context switch packet event */ -+typedef struct -+{ -+ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ -+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ -+ IMG_UINT32 ui32FrameNum; /*!< Client Frame number (TA, 3D only) */ -+ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ -+ IMG_UINT32 ui32PerfCycle; /*!< Cycle count. Used to measure HW context store latency */ -+ IMG_UINT32 ui32PerfPhase; /*!< Phase. Used to determine geometry content */ -+ IMG_UINT32 ui32Padding[2]; /*!< Padding to 8 DWords */ -+} RGX_HWPERF_CSW_DATA; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA); -+ -+/*! Enumeration of clocks supporting this event */ -+typedef enum -+{ -+ RGX_HWPERF_CLKS_CHG_INVALID = 0, -+ -+ RGX_HWPERF_CLKS_CHG_NAME_CORE = 1, -+ -+ RGX_HWPERF_CLKS_CHG_LAST, -+} RGX_HWPERF_CLKS_CHG_NAME; -+ -+/*! This structure holds the data of a clocks change packet. */ -+typedef struct -+{ -+ IMG_UINT64 ui64NewClockSpeed; /*!< New Clock Speed (in Hz) */ -+ RGX_HWPERF_CLKS_CHG_NAME eClockName; /*!< Clock name */ -+ IMG_UINT32 ui32CalibratedClockSpeed; /*!< Calibrated new GPU clock speed (in Hz) */ -+ IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */ -+ IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and -+ correlated to OSTimeStamp */ -+} RGX_HWPERF_CLKS_CHG_DATA; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA); -+ -+/*! Enumeration of GPU utilisation states supported by this event */ -+typedef IMG_UINT32 RGX_HWPERF_GPU_STATE; -+ -+/*! This structure holds the data of a GPU utilisation state change packet. */ -+typedef struct -+{ -+ RGX_HWPERF_GPU_STATE eState; /*!< New GPU utilisation state */ -+ IMG_UINT32 uiUnused1; /*!< Padding */ -+ IMG_UINT32 uiUnused2; /*!< Padding */ -+ IMG_UINT32 uiUnused3; /*!< Padding */ -+} RGX_HWPERF_GPU_STATE_CHG_DATA; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA); -+ -+ -+/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */ -+#define HWPERF_PWR_EST_V1_SIG 0x48504531 -+ -+/*! Macros to obtain a component field from a counter ID word */ -+#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31) -+#define RGX_HWPERF_GET_PWR_EST_GPUID(_word) (((_word)&0x70000000)>>28) -+/*!< Obtains the GPU ID from a counter ID word */ -+#define RGX_HWPERF_GET_PWR_EST_UNIT(_word) (((_word)&0x0F000000)>>24) -+#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word) ((_word)&0x0000FFFF) -+ -+#define RGX_HWPERF_PWR_EST_HIGH_OFFSET (31) -+#define RGX_HWPERF_PWR_EST_GPUID_OFFSET (28) -+#define RGX_HWPERF_PWR_EST_GPUID_MASK (0x7U) -+#define RGX_HWPERF_PWR_EST_UNIT_OFFSET (24) -+#define RGX_HWPERF_PWR_EST_UNIT_MASK (0xFU) -+#define RGX_HWPERF_PWR_EST_VALUE_MASK (0xFFFFU) -+ -+/*! This macro constructs a counter ID for a power estimate data stream from -+ * the component parts of: high word flag, unit id, GPU id, counter number */ -+#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _core, _number) \ -+ ((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<= RGX_BVNC_STR_SIZE_MAX), -+ "Space inside HWPerf packet data for BVNC string insufficient"); -+ -+#define RGX_HWPERF_MAX_BVNC_BLOCK_LEN (20U) -+ -+/*! BVNC Features */ -+typedef struct -+{ -+ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ -+ IMG_UINT16 ui16BlockID; -+ -+ /*! Number of counters in this block type */ -+ IMG_UINT16 ui16NumCounters; -+ -+ /*! Number of blocks of this type */ -+ IMG_UINT16 ui16NumBlocks; -+ -+ /*! Reserved for future use */ -+ IMG_UINT16 ui16Reserved; -+} RGX_HWPERF_BVNC_BLOCK; -+ -+/*! BVNC Features */ -+typedef struct -+{ -+ IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*!< BVNC string */ -+ IMG_UINT32 ui32BvncKmFeatureFlags; /*!< See RGX_HWPERF_FEATURE_FLAGS */ -+ IMG_UINT16 ui16BvncBlocks; /*!< Number of blocks described in aBvncBlocks */ -+ IMG_UINT16 ui16BvncGPUCores; /*!< Number of GPU cores present */ -+ RGX_HWPERF_BVNC_BLOCK aBvncBlocks[RGX_HWPERF_MAX_BVNC_BLOCK_LEN]; /*!< Supported Performance Blocks for BVNC. See RGX_HWPERF_BVNC_BLOCK */ -+} RGX_HWPERF_BVNC; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC); -+ -+/*! Performance Counter Configuration data element. */ -+typedef struct -+{ -+ IMG_UINT32 ui32BlockID; /*!< Counter Block ID. See RGX_HWPERF_CNTBLK_ID */ -+ IMG_UINT32 ui32NumCounters; /*!< Number of counters configured */ -+ IMG_UINT32 ui32CounterVals[RGX_CNTBLK_COUNTERS_MAX]; /*!< Counters configured (ui32NumCounters worth of entries) */ -+} RGX_HWPERF_COUNTER_CFG_DATA_EL; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG_DATA_EL); -+ -+/*! Performance Counter Configuration data. */ -+typedef struct -+{ -+ IMG_UINT32 ui32EnabledBlocks; /*!< Number of Enabled Blocks. */ -+ RGX_HWPERF_COUNTER_CFG_DATA_EL uData; /*!< Start of variable length data. See RGX_HWPERF_COUNTER_CFG_DATA_EL */ -+ IMG_UINT32 ui32Padding; /*!< reserved */ -+} RGX_HWPERF_COUNTER_CFG; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG); -+ -+/*! Sub-event's data. */ -+typedef union -+{ -+ struct -+ { -+ RGX_HWPERF_DM eDM; /*!< Data Master ID. */ -+ RGX_HWPERF_HWR_REASON eReason; /*!< Reason of the HWR. */ -+ IMG_UINT32 ui32DMContext; /*!< FW render context */ -+ } sHWR; /*!< HWR sub-event data. */ -+ -+ RGX_HWPERF_BVNC sBVNC; /*!< BVNC Features. See RGX_HWPERF_BVNC */ -+ struct -+ { -+ IMG_UINT32 ui32EvMaskLo; /*!< Low order 32 bits of Filter Mask */ -+ IMG_UINT32 ui32EvMaskHi; /*!< High order 32 bits of Filter Mask */ -+ } sEvMsk; /*!< HW Filter Mask */ -+ RGX_HWPERF_COUNTER_CFG sPCC; /*!< Performance Counter Config. See RGX_HWPERF_COUNTER_CFG */ -+ -+ struct -+ { -+ RGX_HWPERF_DM eDM; /*!< Data Master ID. */ -+ IMG_UINT32 ui32DMContext; /*!< FW context */ -+ IMG_UINT32 ui32GPUIdMask; /*!< Multicore mask. */ -+ IMG_UINT32 ui32KickID; /*!< Kick Id cancelled. */ -+ } sKickCancel; /*!< Kick cancel sub-event data. */ -+} RGX_HWPERF_FWACT_DETAIL; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL); -+ -+/*! This structure holds the data of a FW activity event packet */ -+typedef struct -+{ -+ RGX_HWPERF_FWACT_EV eEvType; /*!< Event type. */ -+ RGX_HWPERF_FWACT_DETAIL uFwActDetail; /*!< Data of the sub-event. */ -+ IMG_UINT32 ui32Padding; /*!< Reserved. */ -+} RGX_HWPERF_FWACT_DATA; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA); -+ -+typedef struct -+{ -+ RGX_HWPERF_KICK_TYPE ui32EnqType; /*!< Workload type sent to FW for -+ scheduling on GPU hardware. -+ See RGX_HWPERF_KICK_TYPE */ -+ IMG_UINT32 ui32PID; /*!< Client process identifier */ -+ IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX API -+ to track submitted work (for debugging / -+ trace purposes) */ -+ IMG_UINT32 ui32IntJobRef; /*!< internal reference used to track submitted -+ work (for debugging / trace purposes) */ -+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ -+ IMG_UINT32 ui32Padding; /*!< Unused, reserved */ -+ IMG_UINT64 ui64CheckFence_UID; /*!< ID of fence gating work execution on GPU */ -+ IMG_UINT64 ui64UpdateFence_UID; /*!< ID of fence triggered after work completes on GPU */ -+ IMG_UINT64 ui64DeadlineInus; /*!< Workload deadline in system monotonic time */ -+ IMG_UINT32 ui32CycleEstimate; /*!< Estimated cycle time for the workload */ -+ PVRSRV_FENCE hCheckFence; /*!< Fence this enqueue task waits for, before starting */ -+ PVRSRV_FENCE hUpdateFence; /*!< Fence this enqueue task signals, on completion */ -+ PVRSRV_TIMELINE hUpdateTimeline; /*!< Timeline on which the above hUpdateFence is created */ -+ -+ /* Align structure size to 8 bytes */ -+} RGX_HWPERF_HOST_ENQ_DATA; -+ -+/* Payload size must be multiple of 8 bytes to align start of next packet. */ -+static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, -+ "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); -+ -+typedef struct -+{ -+ RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event */ -+ IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the stream and -+ stream data offset in the payload */ -+#ifdef __CHECKER__ -+ /* Since we're not conforming to the C99 standard by not using a flexible -+ * array member need to add a special case for Smatch static code analyser. */ -+ IMG_UINT32 aui32StreamData[]; -+#else -+ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; -+ /*!< Series of tuples holding UFO objects data */ -+ -+ IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ -+#endif -+} RGX_HWPERF_HOST_UFO_DATA; -+ -+/* Payload size must be multiple of 8 bytes to align start of next packet. */ -+static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, -+ "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); -+ -+/*! -+ * RGX_HWPERF_HOST_RESOURCE_TYPE describes the type of resource which has been -+ * Allocated, Freed or Modified. The values are used to determine which event -+ * data structure to use to decode the data from the event stream -+ */ -+typedef enum -+{ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID, /*!< Invalid */ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, /*!< SyncPrim */ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED, -+ /*!< Timeline resource packets are -+ now emitted in client hwperf buffer */ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /*!< Fence for use on GPU (SYNC_CP backed) */ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, /*!< Sync Checkpoint */ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, /*!< Fence created on SW timeline */ -+ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_LAST /*!< End of enumeration */ -+} RGX_HWPERF_HOST_RESOURCE_TYPE; -+ -+typedef union -+{ -+ /*! Data for TYPE_TIMELINE (*Deprecated*). This sub-event is no longer -+ * generated in the HOST stream. Timeline data is now provided in the -+ * CLIENT stream instead. -+ */ -+ struct -+ { -+ IMG_UINT32 uiPid; /*!< Identifier of owning process */ -+ IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for timeline resource */ -+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; -+ /*!< Label or name given to the sync resource */ -+ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ -+ } sTimelineAlloc; -+ -+ /*! Data for TYPE_FENCE_PVR */ -+ struct -+ { -+ IMG_PID uiPID; /*!< Identifier of owning process */ -+ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ -+ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier of the check point -+ backing this fence on the GPU */ -+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; -+ /*!< Label or name given to the sync resource */ -+ } sFenceAlloc; -+ -+ /*! Data for TYPE_SYNC_CP */ -+ struct -+ { -+ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ -+ PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ -+ IMG_PID uiPID; /*!< Identifier of owning process */ -+ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ -+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; -+ /*!< Label or name given to the sync resource */ -+ } sSyncCheckPointAlloc; -+ -+ /*! Data for TYPE_FENCE_SW */ -+ struct -+ { -+ IMG_PID uiPID; /*!< Identifier of owning process */ -+ PVRSRV_FENCE hSWFence; /*!< Unique identifier for the SWFence resource */ -+ PVRSRV_TIMELINE hSWTimeline; /*!< Unique identifier for the timeline resource */ -+ IMG_UINT64 ui64SyncPtIndex; /*!< Sync-pt index where this SW timeline has reached */ -+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; -+ /*!< Label or name given to the sync resource */ -+ } sSWFenceAlloc; -+ -+ /*! Data for TYPE_SYNC */ -+ struct -+ { -+ IMG_UINT32 ui32FWAddr; /*!< Identifier of sync resource */ -+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; -+ /*!< Label or name given to the sync resource */ -+ } sSyncAlloc; -+} RGX_HWPERF_HOST_ALLOC_DETAIL; -+ -+typedef struct -+{ -+ RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType; -+ /*!< This describes the type of the resource -+ allocated in the driver. See -+ RGX_HWPERF_HOST_RESOURCE_TYPE */ -+ RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail; -+ /*!< Union of structures providing further -+ data regarding the resource allocated. -+ Size of data varies with union member that -+ is present, check ``ui32AllocType`` value -+ to decode */ -+} RGX_HWPERF_HOST_ALLOC_DATA; -+ -+/* Payload size must be multiple of 8 bytes to align start of next packet. */ -+static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, -+ "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); -+ -+typedef union -+{ -+ /*! Data for TYPE_TIMELINE (*Deprecated*) */ -+ struct -+ { -+ IMG_UINT32 uiPid; /*!< Identifier of owning process */ -+ IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for the timeline resource */ -+ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ -+ } sTimelineDestroy; -+ -+ /*! Data for TYPE_FENCE_PVR */ -+ struct -+ { -+ IMG_UINT64 ui64Fence_UID; /*!< Unique identifier for the fence resource */ -+ IMG_UINT32 ui32Padding; /*!< Reserved. */ -+ } sFenceDestroy; -+ -+ /*! Data for TYPE_SYNC_CP */ -+ struct -+ { -+ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ -+ } sSyncCheckPointFree; -+ -+ /*! Data for TYPE_SYNC */ -+ struct -+ { -+ IMG_UINT32 ui32FWAddr; /*!< Unique identifier for the sync resource */ -+ } sSyncFree; -+} RGX_HWPERF_HOST_FREE_DETAIL; -+ -+typedef struct -+{ -+ RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType; -+ /*!< This describes the type of the resource -+ freed or released by the driver. See -+ RGX_HWPERF_HOST_RESOURCE_TYPE */ -+ RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail; -+ /*!< Union of structures providing further data -+ regarding the resource freed. Size of data -+ varies with union member that is present, -+ check ``ui32FreeType`` value to decode */ -+ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ -+} RGX_HWPERF_HOST_FREE_DATA; -+ -+/* Payload size must be multiple of 8 bytes to align start of next packet. */ -+static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, -+ "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); -+ -+typedef struct -+{ -+ IMG_UINT64 ui64CRTimestamp; /*!< CR timer value from the latest entry of -+ the time domains correlation table */ -+ IMG_UINT64 ui64OSTimestamp; /*!< OS timestamp from the latest entry of the -+ time domains correlation table */ -+ IMG_UINT32 ui32ClockSpeed; /*!< GPU clock speed from the latest entry of -+ the time domains correlation table */ -+ IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ -+} RGX_HWPERF_HOST_CLK_SYNC_DATA; -+ -+/* Payload size must be multiple of 8 bytes to align start of next packet. */ -+static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, -+ "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); -+ -+typedef union -+{ -+ /*! Data for TYPE_FENCE_PVR */ -+ struct -+ { -+ IMG_UINT64 ui64NewFence_UID; /*!< Unique identifier for the new merged fence -+ resource that has been created */ -+ IMG_UINT64 ui64InFence1_UID; /*!< Unique identifier for the fence resource */ -+ IMG_UINT64 ui64InFence2_UID; /*!< Unique identifier of the check point backing -+ the fence on the GPU */ -+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; -+ /*!< Label or name given to the sync resource */ -+ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ -+ } sFenceMerge; -+} RGX_HWPERF_HOST_MODIFY_DETAIL; -+ -+typedef struct -+{ -+ RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType; -+ /*!< Describes the type of the resource -+ modified by the driver. See -+ RGX_HWPERF_HOST_RESOURCE_TYPE */ -+ -+ RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail; -+ /*!< Union of structures providing further -+ data regarding the resource modified. -+ Size of data varies with union member that -+ is present. -+ Check ``uiModifyType`` value to decode */ -+} RGX_HWPERF_HOST_MODIFY_DATA; -+ -+/* Payload size must be multiple of 8 bytes to align start of next packet. */ -+static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, -+ "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); -+ -+typedef enum -+{ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, /*!< Invalid */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK, /*!< Device OK */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_NOT_RESPONDING,/*!< Device not responding to requests */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD, /*!< Device not responding */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT, /*!< Device has faulted */ -+ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST -+} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS; -+ -+typedef enum -+{ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0, /*!< Invalid */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE, /*!< No underlying health reason. */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED, /*!< Device has asserted. */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING, /*!< Device poll has failed. */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS, /*!< Device timeout has fired. */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, /*!< Queue has become corrupt. */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED, /*!< Queue has stalled. */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING, /*!< Device is idling. */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING, /*!< Device restarting. */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS,/*!< Interrupts have been discarded. */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_PCI_ERROR, /*!< PCI error detected. */ -+ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST -+} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON; -+ -+/*! Data for device status event */ -+typedef struct -+{ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus; -+ /*!< Device's health status */ -+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason; -+ /*!< Reason for device's health status */ -+} RGX_HWPERF_HOST_DEVICE_HEALTH; -+ -+/*! RGX_HWPERF_DEV_INFO_EV values */ -+typedef enum -+{ -+ RGX_HWPERF_DEV_INFO_EV_HEALTH, /*!< Health sub-event */ -+ RGX_HWPERF_DEV_INFO_EV_FEATURES, /*!< Features sub-event */ -+ -+ RGX_HWPERF_DEV_INFO_EV_LAST /*!< Last enumeration value */ -+} RGX_HWPERF_DEV_INFO_EV; -+ -+/*! RGX_HWPERF_HOST_DEV_INFO_DETAIL is a union of structures providing -+ * further data regarding the device's status -+ */ -+typedef union -+{ -+ RGX_HWPERF_HOST_DEVICE_HEALTH sDeviceStatus; /*!< Device health status */ -+ RGX_HWPERF_BVNC sBVNC; /*!< Device features */ -+} RGX_HWPERF_HOST_DEV_INFO_DETAIL; -+ -+/*! RGX_HWPERF_HOST_DEV_INFO_DATA contains device health status information */ -+typedef struct -+{ -+ IMG_UINT32 ui32Padding; -+ /*!< Reserved. Align structure size to 8 bytes */ -+ RGX_HWPERF_DEV_INFO_EV eEvType; -+ /*!< Type of the sub-event. See -+ RGX_HWPERF_DEV_INFO_EV */ -+ RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevInfoDetail; -+ /*!< Union of structures providing further data -+ regarding the device's status. Size of data -+ varies with union member that is present, -+ check ``eEvType`` value to decode */ -+} RGX_HWPERF_HOST_DEV_INFO_DATA; -+ -+/* Payload size must be multiple of 8 bytes to align start of next packet. */ -+static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, -+ "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); -+ -+/*! RGX_HWPERF_INFO_EV event subtype for RGX_HWPERF_HOST_INFO_DATA events */ -+typedef enum -+{ -+ RGX_HWPERF_INFO_EV_RESERVED_0, -+ RGX_HWPERF_INFO_EV_MEM64_USAGE, /*!< 64-bit Memory usage event */ -+ RGX_HWPERF_INFO_EV_LAST /*!< End of enumeration */ -+} RGX_HWPERF_INFO_EV; -+ -+/*! RGX_HWPERF_HOST_INFO_DETAIL contains the data payload for the -+ * RGX_HWPERF_HOST_INFO_DATA event. -+ */ -+typedef union -+{ -+ /*! Host Memory usage statistics */ -+ struct -+ { -+ IMG_UINT64 ui64TotalMemoryUsage; /*!< Total memory usage (bytes) */ -+ /*! Detailed memory usage */ -+ struct -+ { -+ IMG_UINT32 ui32Pid; /*!< Process ID */ -+ IMG_UINT32 ui32Padding; /*!< Padding */ -+ IMG_UINT64 ui64KernelMemUsage; /*!< Kernel memory usage (bytes) */ -+ IMG_UINT64 ui64GraphicsMemUsage; /*!< GPU memory usage (bytes) */ -+ } sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; -+ } sMemUsageStats; -+} RGX_HWPERF_HOST_INFO_DETAIL; -+ -+/*! RGX_HWPERF_HOST_INFO_DATA. Host Info data event payload contains device -+ * memory usage information. -+ */ -+typedef struct -+{ -+ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ -+ RGX_HWPERF_INFO_EV eEvType; /*!< Type of subevent. See RGX_HWPERF_INFO_EV */ -+ RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail; -+ /*!< Union of structures providing further data -+ regarding memory usage. Size varies with union -+ member that is present, check ``eEvType`` -+ value to decode */ -+} RGX_HWPERF_HOST_INFO_DATA; -+ -+/* Payload size must be multiple of 8 bytes to align start of next packet. */ -+static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, -+ "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); -+ -+/*! FENCE_WAIT_TYPE definitions */ -+typedef enum -+{ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0, /*!< Begin */ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END, /*!< End */ -+ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST, /*!< Do not use */ -+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE; -+ -+/*! FENCE_WAIT_RESULT definitions */ -+typedef enum -+{ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0, /*!< Invalid */ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT, /*!< Timed Out */ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED, /*!< Passed */ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR, /*!< Errored */ -+ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST, /*!< Do not use */ -+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT; -+ -+/*! FENCE_WAIT_DETAIL Event Payload */ -+typedef union -+{ -+/*! Data for SYNC_FENCE_WAIT_TYPE_BEGIN */ -+ struct -+ { -+ IMG_UINT32 ui32TimeoutInMs; /*!< Wait timeout (ms) */ -+ } sBegin; -+ -+ /*! Data for SYNC_FENCE_WAIT_TYPE_END */ -+ struct -+ { -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult; /*!< Wait result */ -+ } sEnd; -+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL; -+ -+/*! RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA Event Payload. This data structure -+ * is received whenever the host driver handles a wait for sync event request. -+ */ -+typedef struct -+{ -+ IMG_PID uiPID; /*!< Identifier of the owning process */ -+ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType; -+ /*!< Type of the subevent, see -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE */ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail; -+ /*!< Union of structures providing further data -+ regarding device's status. Size of data varies with -+ union member that is present, check ``eType`` value -+ to decode */ -+ -+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA; -+ -+static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, -+ "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); -+ -+/*! RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA. -+ * Software Timeline Advanced Event Payload. This data structure is received -+ * whenever the host driver processes a Software Timeline Advanced event. -+ */ -+typedef struct -+{ -+ IMG_PID uiPID; /*!< Identifier of the owning process */ -+ PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ -+ IMG_UINT64 ui64SyncPtIndex; /*!< Index of the sync point to which the -+ timeline has advanced */ -+ -+} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA; -+ -+static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, -+ "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); -+ -+typedef enum -+{ -+ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_INVALID = 0, /*!< Invalid */ -+ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME, /*!< Process Name */ -+ -+ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_LAST, /*!< Do not use */ -+} RGX_HWPERF_HOST_CLIENT_INFO_TYPE; -+ -+typedef struct -+{ -+ IMG_PID uiClientPID; /*!< Client process identifier */ -+ IMG_UINT32 ui32Length; /*!< Number of bytes present in ``acName`` */ -+ IMG_CHAR acName[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Process name string, null terminated */ -+} RGX_HWPERF_HOST_CLIENT_PROC_NAME; -+ -+#define RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen) \ -+ ((IMG_UINT32)(offsetof(RGX_HWPERF_HOST_CLIENT_PROC_NAME, acName) + (ui32NameLen))) -+ -+typedef union -+{ -+ struct -+ { -+ IMG_UINT32 ui32Count; /*!< Number of elements in ``asProcNames`` */ -+ RGX_HWPERF_HOST_CLIENT_PROC_NAME asProcNames[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; -+ } sProcName; -+} RGX_HWPERF_HOST_CLIENT_INFO_DETAIL; -+ -+typedef struct -+{ -+ IMG_UINT32 uiReserved1; /*!< Reserved. Align structure size to 8 bytes */ -+ RGX_HWPERF_HOST_CLIENT_INFO_TYPE eType; -+ /*!< Type of the subevent, see -+ RGX_HWPERF_HOST_CLIENT_INFO_TYPE */ -+ RGX_HWPERF_HOST_CLIENT_INFO_DETAIL uDetail; -+ /*!< Union of structures. Size of data -+ varies with union member that is present, -+ check ``eType`` value to decode */ -+ -+} RGX_HWPERF_HOST_CLIENT_INFO_DATA; -+ -+static_assert((sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, -+ "sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); -+ -+/*! This type is a union of packet payload data structures associated with -+ * various FW and Host events */ -+typedef union -+{ -+ RGX_HWPERF_FW_DATA sFW; /*!< Firmware event packet data, -+ events ``0x01-0x06`` */ -+ RGX_HWPERF_HW_DATA sHW; /*!< Hardware event packet data, -+ events ``0x07-0x19``, ``0x28-0x29`` -+ See RGX_HWPERF_HW_DATA */ -+ RGX_HWPERF_CLKS_CHG_DATA sCLKSCHG; /*!< Clock change event packet -+ data, events ``0x1A`` */ -+ RGX_HWPERF_GPU_STATE_CHG_DATA sGPUSTATECHG; /*!< GPU utilisation state -+ change event packet data, -+ events ``0x1B`` */ -+ RGX_HWPERF_PWR_EST_DATA sPWREST; /*!< Power estimate event -+ packet data, -+ events ``0x20-0x22`` */ -+ RGX_HWPERF_PWR_CHG_DATA sPWR; /*!< Power event packet data, -+ events ``0x23`` */ -+ RGX_HWPERF_CSW_DATA sCSW; /*!< Context switch packet data, -+ events ``0x30-0x31`` */ -+ RGX_HWPERF_DVFS_DATA sDVFS; /*!< DVFS activity data, -+ events ``0x32`` */ -+ RGX_HWPERF_UFO_DATA sUFO; /*!< UFO data, events ``0x38`` */ -+ RGX_HWPERF_FWACT_DATA sFWACT; /*!< Firmware activity event -+ packet data, -+ events ``0x39`` */ -+ /* */ -+ RGX_HWPERF_HOST_ENQ_DATA sENQ; /*!< Host ENQ data, -+ events ``0x01`` (Host) */ -+ RGX_HWPERF_HOST_UFO_DATA sHUFO; /*!< Host UFO data, -+ events ``0x02`` (Host) */ -+ RGX_HWPERF_HOST_ALLOC_DATA sHALLOC; /*!< Host Alloc data, -+ events ``0x03`` (Host) */ -+ RGX_HWPERF_HOST_CLK_SYNC_DATA sHCLKSYNC; /*!< Host CLK_SYNC data, -+ events ``0x04`` (Host) */ -+ RGX_HWPERF_HOST_FREE_DATA sHFREE; /*!< Host Free data, -+ events ``0x05`` (Host) */ -+ RGX_HWPERF_HOST_MODIFY_DATA sHMOD; /*!< Host Modify data, -+ events ``0x06`` (Host) */ -+ RGX_HWPERF_HOST_DEV_INFO_DATA sHDEVINFO; /*!< Host device info data, -+ events ``0x07`` (Host) */ -+ RGX_HWPERF_HOST_INFO_DATA sHINFO; /*!< Host info data, -+ events ``0x08`` (Host) */ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT; /*!< Host fence-wait data, -+ events ``0x09`` (Host) */ -+ RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance -+ data, events ``0x0A`` (Host) */ -+ RGX_HWPERF_HOST_CLIENT_INFO_DATA sHClientInfo; /*!< Host client info, -+ events ``0x0B`` (Host) */ -+} RGX_HWPERF_V2_PACKET_DATA, *RGX_PHWPERF_V2_PACKET_DATA; -+ -+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA); -+ -+#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR)))) -+ -+#define RGX_HWPERF_GET_DVFS_EVENT_TYPE_PTR(_packet_addr) \ -+ ((RGX_HWPERF_DVFS_EV*) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR) + offsetof(RGX_HWPERF_DVFS_DATA,eEventType)))) -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* RGX_HWPERF_COMMON_H_ */ -+ -+/****************************************************************************** -+ End of file -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgx_hwperf_table.c b/drivers/gpu/drm/img-rogue/rgx_hwperf_table.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_hwperf_table.c -@@ -0,0 +1,635 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX HW Performance counter table -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX HW Performance counters table -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "rgx_fwif_hwperf.h" -+#if defined(__KERNEL__) -+#include "rgxdefs_km.h" -+#else -+#include "rgxdefs.h" -+#endif -+#include "rgx_hwperf_table.h" -+ -+/* Includes needed for PVRSRVKM (Server) context */ -+# include "rgx_bvnc_defs_km.h" -+# if defined(__KERNEL__) -+# include "rgxdevice.h" -+# endif -+ -+/* Shared compile-time context ASSERT macro */ -+#if defined(RGX_FIRMWARE) -+# include "rgxfw_utils.h" -+/* firmware context */ -+# define DBG_ASSERT(_c) RGXFW_ASSERT((_c)) -+#else -+# include "pvr_debug.h" -+/* host client/server context */ -+# define DBG_ASSERT(_c) PVR_ASSERT((_c)) -+#endif -+ -+/***************************************************************************** -+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() -+ -+ Referenced in gasCntBlkTypeModel[] table below and only called from -+ RGX_FIRMWARE run-time context. Therefore compile time configuration is used. -+ *****************************************************************************/ -+ -+#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS) -+# include "rgxfw_pow.h" -+# include "rgxfw_utils.h" -+ -+static bool rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) -+{ -+ PVR_UNREFERENCED_PARAMETER(eBlkType); -+ PVR_UNREFERENCED_PARAMETER(ui8UnitId); -+ -+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) -+ /* S7XT: JONES */ -+ return (eBlkType == RGX_CNTBLK_ID_JONES); -+#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) -+ /* S6XT: TA, TORNADO */ -+ return true; -+#else -+ /* S6 : TA, HUB, RASTER (RASCAL) */ -+ return (gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U; -+#endif -+} -+ -+/* Only use conditional compilation when counter blocks appear in different -+ * islands for different Rogue families. -+ */ -+static bool rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) -+{ -+ IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_units(); -+ -+ if (((gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U) && -+ (ui32NumDustsEnabled > 0U)) -+ { -+#if defined(RGX_FEATURE_DYNAMIC_DUST_POWER) -+ IMG_UINT32 ui32NumUscEnabled = ui32NumDustsEnabled*2U; -+ -+ switch (eBlkType) -+ { -+ case RGX_CNTBLK_ID_TPU_MCU0: /* S6 and S6XT */ -+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) -+ case RGX_CNTBLK_ID_TEXAS0: /* S7 */ -+#endif -+ if (ui8UnitId >= ui32NumDustsEnabled) -+ { -+ return false; -+ } -+ break; -+ case RGX_CNTBLK_ID_USC0: /* S6, S6XT, S7 */ -+ case RGX_CNTBLK_ID_PBE0: /* S7, PBE2_IN_XE */ -+ /* Handle single cluster cores */ -+ if (ui8UnitId >= ((ui32NumUscEnabled > RGX_FEATURE_NUM_CLUSTERS) ? RGX_FEATURE_NUM_CLUSTERS : ui32NumUscEnabled)) -+ { -+ return false; -+ } -+ break; -+ case RGX_CNTBLK_ID_BLACKPEARL0: /* S7 */ -+ case RGX_CNTBLK_ID_RASTER0: /* S6XT */ -+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) -+ case RGX_CNTBLK_ID_TEXAS0: /* S6XT */ -+#endif -+ if (ui8UnitId >= (RGX_REQ_NUM_PHANTOMS(ui32NumUscEnabled))) -+ { -+ return false; -+ } -+ break; -+ default: -+ RGXFW_ASSERT(false); /* should never get here, table error */ -+ break; -+ } -+#else -+ /* Always true, no fused DUSTs, all powered so do not check unit */ -+ PVR_UNREFERENCED_PARAMETER(eBlkType); -+ PVR_UNREFERENCED_PARAMETER(ui8UnitId); -+#endif -+ } -+ else -+ { -+ return false; -+ } -+ return true; -+} -+ -+#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ -+ -+# define rgxfw_hwperf_pow_st_direct ((void*)NULL) -+# define rgxfw_hwperf_pow_st_indirect ((void*)NULL) -+ -+#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ -+ -+/***************************************************************************** -+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end -+ *****************************************************************************/ -+ -+/***************************************************************************** -+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start -+ -+ Referenced in gasCntBlkTypeModel[] table below and called from all build -+ contexts: -+ RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server). -+ -+ Therefore each function has two implementations, one for compile time and one -+ run time configuration depending on the context. The functions will inform the -+ caller whether this block is valid for this particular RGX device. Other -+ run-time dependent data is returned in psRtInfo for the caller to use. -+ *****************************************************************************/ -+ -+/* Used for block types: USC */ -+static IMG_BOOL rgx_hwperf_blk_present_perfbus(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) -+{ -+ DBG_ASSERT(psBlkTypeDesc != NULL); -+ DBG_ASSERT(psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_USC0); -+ -+#if defined(__KERNEL__) /* Server context */ -+ PVR_ASSERT(pvDev_km != NULL); -+ PVR_ASSERT(pvRtInfo != NULL); -+ { -+ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; -+ const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) -+ { -+ psRtInfo->ui32NumUnits = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) : 0; -+ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; -+ return IMG_TRUE; -+ } -+ } -+#else /* FW context */ -+ PVR_UNREFERENCED_PARAMETER(pvDev_km); -+ PVR_UNREFERENCED_PARAMETER(pvRtInfo); -+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); -+# if defined(RGX_FEATURE_PERFBUS) -+ return IMG_TRUE; -+# endif -+#endif -+ return IMG_FALSE; -+} -+ -+/* Used for block types: Direct RASTERISATION, HUB */ -+static IMG_BOOL rgx_hwperf_blk_present_not_clustergrouping(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) -+{ -+ DBG_ASSERT(psBlkTypeDesc != NULL); -+ DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER) || -+ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_HUB)); -+ -+#if defined(__KERNEL__) /* Server context */ -+ PVR_ASSERT(pvDev_km != NULL); -+ PVR_ASSERT(pvRtInfo != NULL); -+ { -+ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; -+ const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; -+ if ((!RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) && -+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))) -+ { -+ psRtInfo->ui32NumUnits = 1; -+ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; -+ return IMG_TRUE; -+ } -+ } -+#else /* FW context */ -+ PVR_UNREFERENCED_PARAMETER(pvDev_km); -+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); -+ PVR_UNREFERENCED_PARAMETER(pvRtInfo); -+# if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) -+ return IMG_TRUE; -+# endif -+#endif -+ return IMG_FALSE; -+} -+ -+#if defined(__KERNEL__) /* Server context */ -+static IMG_UINT32 rgx_units_indirect_by_phantom(const PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg) -+{ -+ /* Run-time math for RGX_HWPERF_INDIRECT_BY_PHANTOM */ -+ return ((psFeatCfg->ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK) == 0) ? 1 -+ : (psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]+3)/4; -+} -+ -+static IMG_UINT32 rgx_units_phantom_indirect_by_dust(const PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg) -+{ -+ /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST */ -+ return MAX((psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]>>1),1); -+} -+ -+static IMG_UINT32 rgx_units_phantom_indirect_by_cluster(const PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg) -+{ -+ /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER */ -+ return psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]; -+} -+#endif /* defined(__KERNEL__) */ -+ -+/* Used for block types: TORNADO, TEXAS, Indirect RASTERISATION */ -+static IMG_BOOL rgx_hwperf_blk_present_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) -+{ -+ DBG_ASSERT(psBlkTypeDesc != NULL); -+ DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TORNADO) || -+ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) || -+ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER0)); -+ -+#if defined(__KERNEL__) /* Server context */ -+ PVR_ASSERT(pvDev_km != NULL); -+ PVR_ASSERT(pvRtInfo != NULL); -+ { -+ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; -+ const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)) -+ { -+ if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TORNADO) -+ { -+ psRtInfo->ui32NumUnits = 1; -+ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; -+ return IMG_TRUE; -+ } -+ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) -+ { -+ psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); -+ psRtInfo->ui32IndirectReg = RGX_CR_TEXAS_PERF_INDIRECT; -+ return IMG_TRUE; -+ } -+ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER0) -+ { -+ psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); -+ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; -+ return IMG_TRUE; -+ } -+ } -+ } -+#else /* FW context */ -+ PVR_UNREFERENCED_PARAMETER(pvDev_km); -+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); -+ PVR_UNREFERENCED_PARAMETER(pvRtInfo); -+# if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) -+ return IMG_TRUE; -+# endif -+#endif -+ return IMG_FALSE; -+} -+ -+/* Used for block types: JONES, TPU_MCU, TEXAS, BLACKPERL, PBE */ -+static IMG_BOOL rgx_hwperf_blk_present_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) -+{ -+ DBG_ASSERT(psBlkTypeDesc != NULL); -+ DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_JONES) || -+ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) || -+ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) || -+ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) || -+ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0)); -+ -+#if defined(__KERNEL__) /* Server context */ -+ PVR_ASSERT(pvDev_km != NULL); -+ PVR_ASSERT(pvRtInfo != NULL); -+ { -+ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; -+ const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) -+ { -+ if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) -+ { -+ psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); -+ psRtInfo->ui32IndirectReg = RGX_CR_TPU_PERF_INDIRECT; -+ return IMG_TRUE; -+ } -+ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) -+ { -+ psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); -+ psRtInfo->ui32IndirectReg = RGX_CR_TEXAS3_PERF_INDIRECT; -+ return IMG_TRUE; -+ } -+ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) -+ { -+ psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); -+ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; -+ return IMG_TRUE; -+ } -+ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0) -+ { -+ psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg); -+ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; -+ return IMG_TRUE; -+ } -+ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_JONES) -+ { -+ psRtInfo->ui32NumUnits = 1; -+ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; -+ return IMG_TRUE; -+ } -+ } -+ } -+#else /* FW context */ -+ PVR_UNREFERENCED_PARAMETER(pvDev_km); -+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); -+ PVR_UNREFERENCED_PARAMETER(pvRtInfo); -+# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) -+ return IMG_TRUE; -+# else -+# endif -+#endif -+ return IMG_FALSE; -+} -+ -+/* Used for block types: TA, TPU_MCU. Also PBE when PBE2_IN_XE is present */ -+static IMG_BOOL rgx_hwperf_blk_present_not_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) -+{ -+ DBG_ASSERT(psBlkTypeDesc != NULL); -+ DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TA) || -+ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) || -+ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0)); -+ -+#if defined(__KERNEL__) /* Server context */ -+ PVR_ASSERT(pvDev_km != NULL); -+ PVR_ASSERT(pvRtInfo != NULL); -+ { -+ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; -+ const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; -+ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE) && -+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) -+ { -+ if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TA) -+ { -+ psRtInfo->ui32NumUnits = 1; -+ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; -+ return IMG_TRUE; -+ } -+ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0) -+ { -+ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) -+ { -+ /* PBE counters are not present on this config */ -+ return IMG_FALSE; -+ } -+ psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg); -+ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; -+ return IMG_TRUE; -+ } -+ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) -+ { -+ psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); -+ psRtInfo->ui32IndirectReg = RGX_CR_TPU_MCU_L0_PERF_INDIRECT; -+ return IMG_TRUE; -+ } -+ } -+ } -+#else /* FW context */ -+ PVR_UNREFERENCED_PARAMETER(pvDev_km); -+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); -+ PVR_UNREFERENCED_PARAMETER(pvRtInfo); -+# if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) -+# if !defined(RGX_FEATURE_PBE2_IN_XE) -+ if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0) -+ { -+ /* No support for PBE counters without PBE2_IN_XE */ -+ return IMG_FALSE; -+ } -+# endif -+ return IMG_TRUE; -+# endif -+#endif -+ return IMG_FALSE; -+} -+ -+static IMG_BOOL rgx_hwperf_blk_present_check_s7top_or_not(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) -+{ -+#if defined(__KERNEL__) -+ return (rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo) -+ || rgx_hwperf_blk_present_not_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo)); -+ -+#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) -+ return rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo); -+ -+#elif defined(RGX_FEATURE_PBE2_IN_XE) || defined(RGX_FEATURE_PERFBUS) -+ return rgx_hwperf_blk_present_not_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo); -+#else -+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); -+ PVR_UNREFERENCED_PARAMETER(pvDev_km); -+ PVR_UNREFERENCED_PARAMETER(pvRtInfo); -+ return IMG_FALSE; -+#endif -+} -+ -+static IMG_BOOL rgx_hwperf_blk_present_check_s7top_or_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) -+{ -+#if defined(__KERNEL__) -+ return (rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo) -+ || rgx_hwperf_blk_present_xttop(psBlkTypeDesc, pvDev_km, pvRtInfo)); -+ -+#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) -+ return rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo); -+ -+#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) -+ return rgx_hwperf_blk_present_xttop(psBlkTypeDesc, pvDev_km, pvRtInfo); -+#else -+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); -+ PVR_UNREFERENCED_PARAMETER(pvDev_km); -+ PVR_UNREFERENCED_PARAMETER(pvRtInfo); -+ return IMG_FALSE; -+#endif -+} -+ -+#if !defined(__KERNEL__) /* Firmware or User-mode context */ -+static IMG_BOOL rgx_hwperf_blk_present_false(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) -+{ -+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); -+ PVR_UNREFERENCED_PARAMETER(pvDev_km); -+ PVR_UNREFERENCED_PARAMETER(pvRtInfo); -+ -+ /* Some functions not used on some BVNCs, silence compiler warnings */ -+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_perfbus); -+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_clustergrouping); -+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_xttop); -+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_s7top); -+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_s7top); -+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_check_s7top_or_not); -+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_check_s7top_or_xttop); -+ -+ return IMG_FALSE; -+} -+ -+/* Used to instantiate a null row in the block type model table below where the -+ * block is not supported for a given build BVNC in firmware/user mode context. -+ * This is needed as the blockid to block type lookup uses the table as well -+ * and clients may try to access blocks not in the hardware. */ -+#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) {_blkid, 0, 0, 0, 0, 0, 0, 0, 0, #_blkid, NULL, rgx_hwperf_blk_present_false} -+ -+#endif -+ -+ -+/***************************************************************************** -+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end -+ *****************************************************************************/ -+ -+#if defined(__KERNEL__) /* Values will be calculated at run-time */ -+#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC -+#define RGX_INDIRECT_REG_TEXAS 0xFFFFFFFF -+#define RGX_INDIRECT_REG_TPU 0xFFFFFFFF -+ -+#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) -+#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST -+#define RGX_INDIRECT_REG_TEXAS RGX_CR_TEXAS3_PERF_INDIRECT -+#define RGX_INDIRECT_REG_TPU RGX_CR_TPU_PERF_INDIRECT -+ -+#else -+ -+#if defined(RGX_FEATURE_PERFBUS) -+#define RGX_INDIRECT_REG_TPU RGX_CR_TPU_MCU_L0_PERF_INDIRECT -+#endif -+ -+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) -+#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_INDIRECT_BY_PHANTOM -+#define RGX_INDIRECT_REG_TEXAS RGX_CR_TEXAS_PERF_INDIRECT -+#endif -+ -+#endif -+ -+ -+/***************************************************************************** -+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table -+ -+ This table holds the entries for the performance counter block type model. -+ Where the block is not present on an RGX device in question the -+ pfnIsBlkPresent() returns false, if valid and present it returns true. -+ Columns in the table with a ** indicate the value is a default and the -+ value returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent() -+ should be used at runtime by the caller. These columns are only valid for -+ compile time BVNC configured contexts. -+ -+ Order of table rows must match order of counter block IDs in the enumeration -+ RGX_HWPERF_CNTBLK_ID. -+ *****************************************************************************/ -+ -+static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] = -+{ -+ /* ui32CntBlkIdBase, ui32IndirectReg, ui32PerfReg, ui32Select0BaseReg, ui32Counter0BaseReg ui8NumCounters, ui32NumUnits**, ui8SelectRegModeShift, ui8SelectRegOffsetShift, pfnIsBlkPowered pfnIsBlkPresent -+ * pszBlockNameComment, */ -+ /*RGX_CNTBLK_ID_TA*/ -+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) -+ {RGX_CNTBLK_ID_TA, 0, /* direct */ RGX_CR_TA_PERF, RGX_CR_TA_PERF_SELECT0, RGX_CR_TA_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_TA_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_s7top }, -+#else -+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TA), -+#endif -+ -+ /*RGX_CNTBLK_ID_RASTER*/ -+#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) -+ {RGX_CNTBLK_ID_RASTER, 0, /* direct */ RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_RASTERISATION_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_clustergrouping }, -+#else -+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER), -+#endif -+ -+ /*RGX_CNTBLK_ID_HUB*/ -+#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) -+ {RGX_CNTBLK_ID_HUB, 0, /* direct */ RGX_CR_HUB_BIFPMCACHE_PERF, RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0, RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_HUB_BIFPMCACHE_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_clustergrouping }, -+#else -+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_HUB), -+#endif -+ -+ /*RGX_CNTBLK_ID_TORNADO*/ -+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__) -+ {RGX_CNTBLK_ID_TORNADO, 0, /* direct */ RGX_CR_TORNADO_PERF, RGX_CR_TORNADO_PERF_SELECT0, RGX_CR_TORNADO_PERF_COUNTER_0, 4, 1, 21, 4, "RGX_CR_TORNADO_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_xttop }, -+#else -+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TORNADO), -+#endif -+ -+ /*RGX_CNTBLK_ID_JONES*/ -+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__) -+ {RGX_CNTBLK_ID_JONES, 0, /* direct */ RGX_CR_JONES_PERF, RGX_CR_JONES_PERF_SELECT0, RGX_CR_JONES_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_JONES_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_s7top }, -+#else -+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_JONES), -+#endif -+ -+ /*RGX_CNTBLK_ID_TPU_MCU0*/ -+#if defined(__KERNEL__) || (defined(RGX_FEATURE_PERFBUS) && !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) -+ {RGX_CNTBLK_ID_TPU_MCU0, RGX_INDIRECT_REG_TPU, RGX_CR_TPU_MCU_L0_PERF, RGX_CR_TPU_MCU_L0_PERF_SELECT0, RGX_CR_TPU_MCU_L0_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST, 21, 3, "RGX_CR_TPU_MCU_L0_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_not }, -+#else -+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0), -+#endif -+ -+ /*RGX_CNTBLK_ID_USC0*/ -+#if defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) -+ {RGX_CNTBLK_ID_USC0, RGX_CR_USC_PERF_INDIRECT, RGX_CR_USC_PERF, RGX_CR_USC_PERF_SELECT0, RGX_CR_USC_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21, 3, "RGX_CR_USC_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_perfbus }, -+#else -+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_USC0), -+#endif -+ -+ /*RGX_CNTBLK_ID_TEXAS0*/ -+#if defined(__KERNEL__) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) -+ {RGX_CNTBLK_ID_TEXAS0, RGX_INDIRECT_REG_TEXAS, RGX_CR_TEXAS_PERF, RGX_CR_TEXAS_PERF_SELECT0, RGX_CR_TEXAS_PERF_COUNTER_0, 6, RGX_HWPERF_NUM_BLOCK_UNITS, 31, 3, "RGX_CR_TEXAS_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_xttop }, -+#else -+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0), -+#endif -+ -+ /*RGX_CNTBLK_ID_RASTER0*/ -+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__) -+ {RGX_CNTBLK_ID_RASTER0, RGX_CR_RASTERISATION_PERF_INDIRECT, RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0, 4, RGX_HWPERF_INDIRECT_BY_PHANTOM, 21, 3, "RGX_CR_RASTERISATION_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop }, -+#else -+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER0), -+#endif -+ -+ /*RGX_CNTBLK_ID_BLACKPEARL0*/ -+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__) -+ {RGX_CNTBLK_ID_BLACKPEARL0, RGX_CR_BLACKPEARL_PERF_INDIRECT, RGX_CR_BLACKPEARL_PERF, RGX_CR_BLACKPEARL_PERF_SELECT0, RGX_CR_BLACKPEARL_PERF_COUNTER_0, 6, RGX_HWPERF_INDIRECT_BY_PHANTOM, 21, 3, "RGX_CR_BLACKPEARL_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top }, -+#else -+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BLACKPEARL0), -+#endif -+ -+ /*RGX_CNTBLK_ID_PBE0*/ -+#if defined(__KERNEL__) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_PBE2_IN_XE) -+ {RGX_CNTBLK_ID_PBE0, RGX_CR_PBE_PERF_INDIRECT, RGX_CR_PBE_PERF, RGX_CR_PBE_PERF_SELECT0, RGX_CR_PBE_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21, 3, "RGX_CR_PBE_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_not }, -+#else -+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_PBE0), -+#endif -+}; -+ -+ -+IMG_INTERNAL IMG_UINT32 -+RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel) -+{ -+ *ppsModel = gasCntBlkTypeModel; -+ return ARRAY_SIZE(gasCntBlkTypeModel); -+} -+ -+/****************************************************************************** -+ End of file (rgx_hwperf_table.c) -+ ******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgx_hwperf_table.h b/drivers/gpu/drm/img-rogue/rgx_hwperf_table.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_hwperf_table.h -@@ -0,0 +1,116 @@ -+/*************************************************************************/ /*! -+@File -+@Title HWPerf counter table header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Utility functions used internally for HWPerf data retrieval -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGX_HWPERF_TABLE_H -+#define RGX_HWPERF_TABLE_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "rgx_fwif_hwperf.h" -+#if defined(__KERNEL__) -+#include "rgxdevice.h" -+#endif -+/*****************************************************************************/ -+ -+/* Forward declaration */ -+typedef struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ RGXFW_HWPERF_CNTBLK_TYPE_MODEL; -+ -+/* Function pointer type for functions to check dynamic power state of -+ * counter block instance. Used only in firmware. */ -+typedef bool (*PFN_RGXFW_HWPERF_CNTBLK_POWERED)( -+ RGX_HWPERF_CNTBLK_ID eBlkType, -+ IMG_UINT8 ui8UnitId); -+ -+#if defined(__KERNEL__) -+/* Counter block run-time info */ -+typedef struct -+{ -+ IMG_UINT32 ui32IndirectReg; /* 0 if direct type otherwise the indirect control register to select indirect unit */ -+ IMG_UINT32 ui32NumUnits; /* Number of instances of this block type in the core */ -+} RGX_HWPERF_CNTBLK_RT_INFO; -+#endif -+ -+/* Function pointer type for functions to check block is valid and present -+ * on that RGX Device at runtime. It may have compile logic or run-time -+ * logic depending on where the code executes: server, srvinit or firmware. -+ * Values in the psRtInfo output parameter are only valid if true returned. -+ */ -+typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_PRESENT)( -+ const struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_* psBlkTypeDesc, -+ const void *pvDev_km, -+ void *pvRtInfo); -+ -+/* This structure encodes properties of a type of performance counter block. -+ * The structure is sometimes referred to as a block type descriptor. These -+ * properties contained in this structure represent the columns in the block -+ * type model table variable below. These values vary depending on the build -+ * BVNC and core type. -+ * Each direct block has a unique type descriptor and each indirect group has -+ * a type descriptor. -+ */ -+struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ -+{ -+ /* Could use RGXFW_ALIGN_DCACHEL here but then we would waste 40% of the cache line? */ -+ IMG_UINT32 ui32CntBlkIdBase; /* The starting block id for this block type */ -+ IMG_UINT32 ui32IndirectReg; /* 0 if direct type otherwise the indirect control register to select indirect unit */ -+ IMG_UINT32 ui32PerfReg; /* RGX_CR_*_PERF register for this block type */ -+ IMG_UINT32 ui32Select0BaseReg; /* RGX_CR_*_PERF_SELECT0 register for this block type */ -+ IMG_UINT32 ui32Counter0BaseReg; /* RGX_CR_*_PERF_COUNTER_0 register for this block type */ -+ IMG_UINT8 ui8NumCounters; /* Number of counters in this block type */ -+ IMG_UINT8 ui8NumUnits; /* Number of instances of this block type in the core */ -+ IMG_UINT8 ui8SelectRegModeShift; /* Mode field shift value of select registers */ -+ IMG_UINT8 ui8SelectRegOffsetShift; /* Interval between select registers, either 8 bytes or 16, hence << 3 or << 4 */ -+ const IMG_CHAR *pszBlockNameComment; /* Name of the PERF register. Used while dumping the perf counters to pdumps */ -+ PFN_RGXFW_HWPERF_CNTBLK_POWERED pfnIsBlkPowered; /* A function to determine dynamic power state for the block type */ -+ PFN_RGXFW_HWPERF_CNTBLK_PRESENT pfnIsBlkPresent; /* A function to determine presence on RGX Device at run-time */ -+}; -+ -+/*****************************************************************************/ -+ -+IMG_INTERNAL IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel); -+ -+#endif /* RGX_HWPERF_TABLE_H */ -+ -+/****************************************************************************** -+ End of file (rgx_hwperf_table.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgx_memallocflags.h b/drivers/gpu/drm/img-rogue/rgx_memallocflags.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_memallocflags.h -@@ -0,0 +1,58 @@ -+/**************************************************************************/ /*! -+@File -+@Title RGX device specific memory allocation flags -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGX_MEMALLOCFLAGS_H -+#define RGX_MEMALLOCFLAGS_H -+ -+ -+/* Include pvrsrv layer header as the flags below are used in the device -+ * field defined in this header inside Services code. -+ * See PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK */ -+#include "pvrsrv_memallocflags.h" -+ -+ -+/* Device specific MMU flags */ -+#define PMMETA_PROTECT (1U << 0) /*!< Memory that only the PM and Meta can access */ -+#define FIRMWARE_CACHED (1U << 1) /*!< Memory that is cached in META/MIPS */ -+ -+ -+#endif /* RGX_MEMALLOCFLAGS_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgx_meta.h b/drivers/gpu/drm/img-rogue/rgx_meta.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_meta.h -@@ -0,0 +1,379 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX META definitions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX META helper definitions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_META_H) -+#define RGX_META_H -+ -+ -+/***** The META HW register definitions in the file are updated manually *****/ -+ -+ -+#include "img_defs.h" -+#include "km/rgxdefs_km.h" -+ -+ -+/****************************************************************************** -+* META registers and MACROS -+******************************************************************************/ -+#define META_CR_CTRLREG_BASE(T) (0x04800000U + (0x1000U*(T))) -+ -+#define META_CR_TXPRIVEXT (0x048000E8) -+#define META_CR_TXPRIVEXT_MINIM_EN (IMG_UINT32_C(0x1) << 7) -+ -+#define META_CR_SYSC_JTAG_THREAD (0x04830030) -+#define META_CR_SYSC_JTAG_THREAD_PRIV_EN (0x00000004) -+ -+#define META_CR_PERF_COUNT0 (0x0480FFE0) -+#define META_CR_PERF_COUNT1 (0x0480FFE8) -+#define META_CR_PERF_COUNT_CTRL_SHIFT (28) -+#define META_CR_PERF_COUNT_CTRL_MASK (0xF0000000) -+#define META_CR_PERF_COUNT_CTRL_DCACHEHITS (IMG_UINT32_C(0x8) << META_CR_PERF_COUNT_CTRL_SHIFT) -+#define META_CR_PERF_COUNT_CTRL_ICACHEHITS (IMG_UINT32_C(0x9) << META_CR_PERF_COUNT_CTRL_SHIFT) -+#define META_CR_PERF_COUNT_CTRL_ICACHEMISS (IMG_UINT32_C(0xA) << META_CR_PERF_COUNT_CTRL_SHIFT) -+#define META_CR_PERF_COUNT_CTRL_ICORE (IMG_UINT32_C(0xD) << META_CR_PERF_COUNT_CTRL_SHIFT) -+#define META_CR_PERF_COUNT_THR_SHIFT (24) -+#define META_CR_PERF_COUNT_THR_MASK (0x0F000000) -+#define META_CR_PERF_COUNT_THR_0 (IMG_UINT32_C(0x1) << META_CR_PERF_COUNT_THR_SHIFT) -+#define META_CR_PERF_COUNT_THR_1 (IMG_UINT32_C(0x2) << META_CR_PERF_COUNT_THR_SHIFT) -+ -+#define META_CR_TxVECINT_BHALT (0x04820500) -+#define META_CR_PERF_ICORE0 (0x0480FFD0) -+#define META_CR_PERF_ICORE1 (0x0480FFD8) -+#define META_CR_PERF_ICORE_DCACHEMISS (0x8) -+ -+#define META_CR_PERF_COUNT(CTRL, THR) ((META_CR_PERF_COUNT_CTRL_##CTRL << META_CR_PERF_COUNT_CTRL_SHIFT) | \ -+ (THR << META_CR_PERF_COUNT_THR_SHIFT)) -+ -+#define META_CR_TXUXXRXDT_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF0U) -+#define META_CR_TXUXXRXRQ_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF8U) -+ -+#define META_CR_TXUXXRXRQ_DREADY_BIT (0x80000000U) /* Poll for done */ -+#define META_CR_TXUXXRXRQ_RDnWR_BIT (0x00010000U) /* Set for read */ -+#define META_CR_TXUXXRXRQ_TX_S (12) -+#define META_CR_TXUXXRXRQ_RX_S (4) -+#define META_CR_TXUXXRXRQ_UXX_S (0) -+ -+#define META_CR_TXUIN_ID (0x0) /* Internal ctrl regs */ -+#define META_CR_TXUD0_ID (0x1) /* Data unit regs */ -+#define META_CR_TXUD1_ID (0x2) /* Data unit regs */ -+#define META_CR_TXUA0_ID (0x3) /* Address unit regs */ -+#define META_CR_TXUA1_ID (0x4) /* Address unit regs */ -+#define META_CR_TXUPC_ID (0x5) /* PC registers */ -+ -+/* Macros to calculate register access values */ -+#define META_CR_CORE_REG(Thr, RegNum, Unit) (((IMG_UINT32)(Thr) << META_CR_TXUXXRXRQ_TX_S) | \ -+ ((IMG_UINT32)(RegNum) << META_CR_TXUXXRXRQ_RX_S) | \ -+ ((IMG_UINT32)(Unit) << META_CR_TXUXXRXRQ_UXX_S)) -+ -+#define META_CR_THR0_PC META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID) -+#define META_CR_THR0_PCX META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID) -+#define META_CR_THR0_SP META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID) -+ -+#define META_CR_THR1_PC META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID) -+#define META_CR_THR1_PCX META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID) -+#define META_CR_THR1_SP META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID) -+ -+#define SP_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUA0_ID) -+#define PC_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUPC_ID) -+ -+#define META_CR_COREREG_ENABLE (0x0000000U) -+#define META_CR_COREREG_STATUS (0x0000010U) -+#define META_CR_COREREG_DEFR (0x00000A0U) -+#define META_CR_COREREG_PRIVEXT (0x00000E8U) -+ -+#define META_CR_T0ENABLE_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_ENABLE) -+#define META_CR_T0STATUS_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_STATUS) -+#define META_CR_T0DEFR_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_DEFR) -+#define META_CR_T0PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_PRIVEXT) -+ -+#define META_CR_T1ENABLE_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_ENABLE) -+#define META_CR_T1STATUS_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_STATUS) -+#define META_CR_T1DEFR_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_DEFR) -+#define META_CR_T1PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_PRIVEXT) -+ -+#define META_CR_TXENABLE_ENABLE_BIT (0x00000001U) /* Set if running */ -+#define META_CR_TXSTATUS_PRIV (0x00020000U) -+#define META_CR_TXPRIVEXT_MINIM (0x00000080U) -+ -+#define META_MEM_GLOBAL_RANGE_BIT (0x80000000U) -+ -+#define META_CR_TXCLKCTRL (0x048000B0) -+#define META_CR_TXCLKCTRL_ALL_ON (0x55111111) -+#define META_CR_TXCLKCTRL_ALL_AUTO (0xAA222222) -+ -+ -+/****************************************************************************** -+* META LDR Format -+******************************************************************************/ -+/* Block header structure */ -+typedef struct -+{ -+ IMG_UINT32 ui32DevID; -+ IMG_UINT32 ui32SLCode; -+ IMG_UINT32 ui32SLData; -+ IMG_UINT16 ui16PLCtrl; -+ IMG_UINT16 ui16CRC; -+ -+} RGX_META_LDR_BLOCK_HDR; -+ -+/* High level data stream block structure */ -+typedef struct -+{ -+ IMG_UINT16 ui16Cmd; -+ IMG_UINT16 ui16Length; -+ IMG_UINT32 ui32Next; -+ IMG_UINT32 aui32CmdData[4]; -+ -+} RGX_META_LDR_L1_DATA_BLK; -+ -+/* High level data stream block structure */ -+typedef struct -+{ -+ IMG_UINT16 ui16Tag; -+ IMG_UINT16 ui16Length; -+ IMG_UINT32 aui32BlockData[4]; -+ -+} RGX_META_LDR_L2_DATA_BLK; -+ -+/* Config command structure */ -+typedef struct -+{ -+ IMG_UINT32 ui32Type; -+ IMG_UINT32 aui32BlockData[4]; -+ -+} RGX_META_LDR_CFG_BLK; -+ -+/* Block type definitions */ -+#define RGX_META_LDR_COMMENT_TYPE_MASK (0x0010U) -+#define RGX_META_LDR_BLK_IS_COMMENT(X) ((X & RGX_META_LDR_COMMENT_TYPE_MASK) != 0U) -+ -+/* Command definitions -+ * Value Name Description -+ * 0 LoadMem Load memory with binary data. -+ * 1 LoadCore Load a set of core registers. -+ * 2 LoadMMReg Load a set of memory mapped registers. -+ * 3 StartThreads Set each thread PC and SP, then enable threads. -+ * 4 ZeroMem Zeros a memory region. -+ * 5 Config Perform a configuration command. -+ */ -+#define RGX_META_LDR_CMD_MASK (0x000FU) -+ -+#define RGX_META_LDR_CMD_LOADMEM (0x0000U) -+#define RGX_META_LDR_CMD_LOADCORE (0x0001U) -+#define RGX_META_LDR_CMD_LOADMMREG (0x0002U) -+#define RGX_META_LDR_CMD_START_THREADS (0x0003U) -+#define RGX_META_LDR_CMD_ZEROMEM (0x0004U) -+#define RGX_META_LDR_CMD_CONFIG (0x0005U) -+ -+/* Config Command definitions -+ * Value Name Description -+ * 0 Pause Pause for x times 100 instructions -+ * 1 Read Read a value from register - No value return needed. -+ * Utilises effects of issuing reads to certain registers -+ * 2 Write Write to mem location -+ * 3 MemSet Set mem to value -+ * 4 MemCheck check mem for specific value. -+ */ -+#define RGX_META_LDR_CFG_PAUSE (0x0000) -+#define RGX_META_LDR_CFG_READ (0x0001) -+#define RGX_META_LDR_CFG_WRITE (0x0002) -+#define RGX_META_LDR_CFG_MEMSET (0x0003) -+#define RGX_META_LDR_CFG_MEMCHECK (0x0004) -+ -+ -+/****************************************************************************** -+* RGX FW segmented MMU definitions -+******************************************************************************/ -+/* All threads can access the segment */ -+#define RGXFW_SEGMMU_ALLTHRS (IMG_UINT32_C(0xf) << 8U) -+/* Writable */ -+#define RGXFW_SEGMMU_WRITEABLE (0x1U << 1U) -+/* All threads can access and writable */ -+#define RGXFW_SEGMMU_ALLTHRS_WRITEABLE (RGXFW_SEGMMU_ALLTHRS | RGXFW_SEGMMU_WRITEABLE) -+ -+/* Direct map region 10 used for mapping GPU memory - max 8MB */ -+#define RGXFW_SEGMMU_DMAP_GPU_ID (10U) -+#define RGXFW_SEGMMU_DMAP_GPU_ADDR_START (0x07000000U) -+#define RGXFW_SEGMMU_DMAP_GPU_MAX_SIZE (0x00800000U) -+ -+/* Segment IDs */ -+#define RGXFW_SEGMMU_DATA_ID (1U) -+#define RGXFW_SEGMMU_BOOTLDR_ID (2U) -+#define RGXFW_SEGMMU_TEXT_ID (RGXFW_SEGMMU_BOOTLDR_ID) -+ -+/* -+ * SLC caching strategy in S7 and volcanic is emitted through the segment MMU. -+ * All the segments configured through the macro RGXFW_SEGMMU_OUTADDR_TOP are -+ * CACHED in the SLC. -+ * The interface has been kept the same to simplify the code changes. -+ * The bifdm argument is ignored (no longer relevant) in S7 and volcanic. -+ */ -+#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(pers, slc_policy, mmu_ctx) ((((IMG_UINT64) ((pers) & 0x3U)) << 52) | \ -+ (((IMG_UINT64) ((mmu_ctx) & 0xFFU)) << 44) | \ -+ (((IMG_UINT64) ((slc_policy) & 0x1U)) << 40)) -+#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x3U, 0x0U, mmu_ctx) -+#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x0U, 0x1U, mmu_ctx) -+ -+/* To configure the Page Catalog and BIF-DM fed into the BIF for Garten -+ * accesses through this segment -+ */ -+#define RGXFW_SEGMMU_OUTADDR_TOP_SLC(pc, bifdm) (((IMG_UINT64)((IMG_UINT64)(pc) & 0xFU) << 44U) | \ -+ ((IMG_UINT64)((IMG_UINT64)(bifdm) & 0xFU) << 40U)) -+ -+#define RGXFW_SEGMMU_META_BIFDM_ID (0x7U) -+#if !defined(__KERNEL__) && defined(RGX_FEATURE_META) -+#if defined(RGX_FEATURE_SLC_VIVT) -+#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED -+#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED -+#define RGXFW_SEGMMU_OUTADDR_TOP_META RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED -+#else -+#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED RGXFW_SEGMMU_OUTADDR_TOP_SLC -+#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED RGXFW_SEGMMU_OUTADDR_TOP_SLC -+#define RGXFW_SEGMMU_OUTADDR_TOP_META(pc) RGXFW_SEGMMU_OUTADDR_TOP_SLC(pc, RGXFW_SEGMMU_META_BIFDM_ID) -+#endif -+#endif -+ -+/* META segments have 4kB minimum size */ -+#define RGXFW_SEGMMU_ALIGN (0x1000U) -+ -+/* Segmented MMU registers (n = segment id) */ -+#define META_CR_MMCU_SEGMENTn_BASE(n) (0x04850000U + ((n)*0x10U)) -+#define META_CR_MMCU_SEGMENTn_LIMIT(n) (0x04850004U + ((n)*0x10U)) -+#define META_CR_MMCU_SEGMENTn_OUTA0(n) (0x04850008U + ((n)*0x10U)) -+#define META_CR_MMCU_SEGMENTn_OUTA1(n) (0x0485000CU + ((n)*0x10U)) -+ -+/* The following defines must be recalculated if the Meta MMU segments used -+ * to access Host-FW data are changed -+ * Current combinations are: -+ * - SLC uncached, META cached, FW base address 0x70000000 -+ * - SLC uncached, META uncached, FW base address 0xF0000000 -+ * - SLC cached, META cached, FW base address 0x10000000 -+ * - SLC cached, META uncached, FW base address 0x90000000 -+ */ -+#define RGXFW_SEGMMU_DATA_BASE_ADDRESS (0x10000000U) -+#define RGXFW_SEGMMU_DATA_META_CACHED (0x0U) -+#define RGXFW_SEGMMU_DATA_META_UNCACHED (META_MEM_GLOBAL_RANGE_BIT) // 0x80000000 -+#define RGXFW_SEGMMU_DATA_META_CACHE_MASK (META_MEM_GLOBAL_RANGE_BIT) -+/* For non-VIVT SLCs the cacheability of the FW data in the SLC is selected in -+ * the PTEs for the FW data, not in the Meta Segment MMU, which means these -+ * defines have no real effect in those cases -+ */ -+#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED (0x0U) -+#define RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000U) -+#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U) -+ -+/****************************************************************************** -+* RGX FW Bootloader defaults -+******************************************************************************/ -+#define RGXFW_BOOTLDR_META_ADDR (0x40000000U) -+#define RGXFW_BOOTLDR_DEVV_ADDR_0 (0xC0000000U) -+#define RGXFW_BOOTLDR_DEVV_ADDR_1 (0x000000E1) -+#define RGXFW_BOOTLDR_DEVV_ADDR ((((IMG_UINT64) RGXFW_BOOTLDR_DEVV_ADDR_1) << 32) | RGXFW_BOOTLDR_DEVV_ADDR_0) -+#define RGXFW_BOOTLDR_LIMIT (0x1FFFF000) -+#define RGXFW_MAX_BOOTLDR_OFFSET (0x1000) -+ -+/* Bootloader configuration offset is in dwords (512 bytes) */ -+#define RGXFW_BOOTLDR_CONF_OFFSET (0x80) -+ -+ -+/****************************************************************************** -+* RGX META Stack -+******************************************************************************/ -+#define RGX_META_STACK_SIZE (0x1000U) -+ -+/****************************************************************************** -+ RGX META Core memory -+******************************************************************************/ -+/* code and data both map to the same physical memory */ -+#define RGX_META_COREMEM_CODE_ADDR (0x80000000U) -+#define RGX_META_COREMEM_DATA_ADDR (0x82000000U) -+#define RGX_META_COREMEM_OFFSET_MASK (0x01ffffffU) -+ -+#if defined(__KERNEL__) -+#define RGX_META_IS_COREMEM_CODE(A, B) (((A) >= RGX_META_COREMEM_CODE_ADDR) && ((A) < (RGX_META_COREMEM_CODE_ADDR + (B)))) -+#define RGX_META_IS_COREMEM_DATA(A, B) (((A) >= RGX_META_COREMEM_DATA_ADDR) && ((A) < (RGX_META_COREMEM_DATA_ADDR + (B)))) -+#endif -+ -+/****************************************************************************** -+* 2nd thread -+******************************************************************************/ -+#define RGXFW_THR1_PC (0x18930000) -+#define RGXFW_THR1_SP (0x78890000) -+ -+/****************************************************************************** -+* META compatibility -+******************************************************************************/ -+ -+#define META_CR_CORE_ID (0x04831000) -+#define META_CR_CORE_ID_VER_SHIFT (16U) -+#define META_CR_CORE_ID_VER_CLRMSK (0XFF00FFFFU) -+ -+#if !defined(__KERNEL__) && defined(RGX_FEATURE_META) -+ -+ #if (RGX_FEATURE_META == MTP218) -+ #define RGX_CR_META_CORE_ID_VALUE 0x19 -+ #elif (RGX_FEATURE_META == MTP219) -+ #define RGX_CR_META_CORE_ID_VALUE 0x1E -+ #elif (RGX_FEATURE_META == LTP218) -+ #define RGX_CR_META_CORE_ID_VALUE 0x1C -+ #elif (RGX_FEATURE_META == LTP217) -+ #define RGX_CR_META_CORE_ID_VALUE 0x1F -+ #else -+ #error "Unknown META ID" -+ #endif -+#else -+ -+ #define RGX_CR_META_MTP218_CORE_ID_VALUE 0x19 -+ #define RGX_CR_META_MTP219_CORE_ID_VALUE 0x1E -+ #define RGX_CR_META_LTP218_CORE_ID_VALUE 0x1C -+ #define RGX_CR_META_LTP217_CORE_ID_VALUE 0x1F -+ -+#endif -+#define RGXFW_PROCESSOR_META "META" -+ -+ -+#endif /* RGX_META_H */ -+ -+/****************************************************************************** -+ End of file (rgx_meta.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgx_mips.h b/drivers/gpu/drm/img-rogue/rgx_mips.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_mips.h -@@ -0,0 +1,406 @@ -+/*************************************************************************/ /*! -+@File rgx_mips.h -+@Title -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Platform RGX -+@Description RGX MIPS definitions, kernel/user space -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_MIPS_H) -+#define RGX_MIPS_H -+ -+/* -+ * Utility defines for memory management -+ */ -+#define RGXMIPSFW_LOG2_PAGE_SIZE_4K (12) -+#define RGXMIPSFW_PAGE_SIZE_4K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4K) -+#define RGXMIPSFW_PAGE_MASK_4K (RGXMIPSFW_PAGE_SIZE_4K - 1) -+#define RGXMIPSFW_LOG2_PAGE_SIZE_64K (16) -+#define RGXMIPSFW_PAGE_SIZE_64K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_64K) -+#define RGXMIPSFW_PAGE_MASK_64K (RGXMIPSFW_PAGE_SIZE_64K - 1) -+#define RGXMIPSFW_LOG2_PAGE_SIZE_256K (18) -+#define RGXMIPSFW_PAGE_SIZE_256K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_256K) -+#define RGXMIPSFW_PAGE_MASK_256K (RGXMIPSFW_PAGE_SIZE_256K - 1) -+#define RGXMIPSFW_LOG2_PAGE_SIZE_1MB (20) -+#define RGXMIPSFW_PAGE_SIZE_1MB (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_1MB) -+#define RGXMIPSFW_PAGE_MASK_1MB (RGXMIPSFW_PAGE_SIZE_1MB - 1) -+#define RGXMIPSFW_LOG2_PAGE_SIZE_4MB (22) -+#define RGXMIPSFW_PAGE_SIZE_4MB (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4MB) -+#define RGXMIPSFW_PAGE_MASK_4MB (RGXMIPSFW_PAGE_SIZE_4MB - 1) -+#define RGXMIPSFW_LOG2_PTE_ENTRY_SIZE (2) -+/* log2 page table sizes dependent on FW heap size and page size (for each OS) */ -+#define RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K (RGX_FIRMWARE_HEAP_SHIFT - RGXMIPSFW_LOG2_PAGE_SIZE_4K + RGXMIPSFW_LOG2_PTE_ENTRY_SIZE) -+#define RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K (RGX_FIRMWARE_HEAP_SHIFT - RGXMIPSFW_LOG2_PAGE_SIZE_64K + RGXMIPSFW_LOG2_PTE_ENTRY_SIZE) -+/* Maximum number of page table pages (both Host and MIPS pages) */ -+#define RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES (4) -+/* Total number of TLB entries */ -+#define RGXMIPSFW_NUMBER_OF_TLB_ENTRIES (16) -+/* "Uncached" caching policy */ -+#define RGXMIPSFW_UNCACHED_CACHE_POLICY (0X00000002U) -+/* "Write-back write-allocate" caching policy */ -+#define RGXMIPSFW_WRITEBACK_CACHE_POLICY (0X00000003) -+/* "Write-through no write-allocate" caching policy */ -+#define RGXMIPSFW_WRITETHROUGH_CACHE_POLICY (0X00000001) -+/* Cached policy used by MIPS in case of physical bus on 32 bit */ -+#define RGXMIPSFW_CACHED_POLICY (RGXMIPSFW_WRITEBACK_CACHE_POLICY) -+/* Cached policy used by MIPS in case of physical bus on more than 32 bit */ -+#define RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT (RGXMIPSFW_WRITETHROUGH_CACHE_POLICY) -+/* Total number of Remap entries */ -+#define RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES (2 * RGXMIPSFW_NUMBER_OF_TLB_ENTRIES) -+ -+ -+/* -+ * MIPS EntryLo/PTE format -+ */ -+ -+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_SHIFT (31U) -+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK (0X7FFFFFFF) -+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN (0X80000000U) -+ -+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT (30U) -+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK (0XBFFFFFFF) -+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN (0X40000000U) -+ -+/* Page Frame Number */ -+#define RGXMIPSFW_ENTRYLO_PFN_SHIFT (6) -+#define RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT (12) -+/* Mask used for the MIPS Page Table in case of physical bus on 32 bit */ -+#define RGXMIPSFW_ENTRYLO_PFN_MASK (0x03FFFFC0) -+#define RGXMIPSFW_ENTRYLO_PFN_SIZE (20) -+/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit */ -+#define RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT (0x3FFFFFC0U) -+#define RGXMIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT (24) -+#define RGXMIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT (RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \ -+ RGXMIPSFW_ENTRYLO_PFN_SHIFT) -+ -+#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT (3U) -+#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK (0XFFFFFFC7U) -+ -+#define RGXMIPSFW_ENTRYLO_DIRTY_SHIFT (2U) -+#define RGXMIPSFW_ENTRYLO_DIRTY_CLRMSK (0XFFFFFFFB) -+#define RGXMIPSFW_ENTRYLO_DIRTY_EN (0X00000004U) -+ -+#define RGXMIPSFW_ENTRYLO_VALID_SHIFT (1U) -+#define RGXMIPSFW_ENTRYLO_VALID_CLRMSK (0XFFFFFFFD) -+#define RGXMIPSFW_ENTRYLO_VALID_EN (0X00000002U) -+ -+#define RGXMIPSFW_ENTRYLO_GLOBAL_SHIFT (0U) -+#define RGXMIPSFW_ENTRYLO_GLOBAL_CLRMSK (0XFFFFFFFE) -+#define RGXMIPSFW_ENTRYLO_GLOBAL_EN (0X00000001U) -+ -+#define RGXMIPSFW_ENTRYLO_DVG (RGXMIPSFW_ENTRYLO_DIRTY_EN | \ -+ RGXMIPSFW_ENTRYLO_VALID_EN | \ -+ RGXMIPSFW_ENTRYLO_GLOBAL_EN) -+#define RGXMIPSFW_ENTRYLO_UNCACHED (RGXMIPSFW_UNCACHED_CACHE_POLICY << \ -+ RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT) -+#define RGXMIPSFW_ENTRYLO_DVG_UNCACHED (RGXMIPSFW_ENTRYLO_DVG | RGXMIPSFW_ENTRYLO_UNCACHED) -+ -+ -+/* Remap Range Config Addr Out */ -+/* These defines refer to the upper half of the Remap Range Config register */ -+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_MASK (0x0FFFFFF0) -+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT (4) /* wrt upper half of the register */ -+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12) -+#define RGXMIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT (RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \ -+ RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT) -+/* -+ * Pages to trampoline problematic physical addresses: -+ * - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000 -+ * - RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000 -+ * - RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000 -+ * - (benign trampoline) : 0x1FC0_3000 -+ * that would otherwise be erroneously remapped by the MIPS wrapper -+ * (see "Firmware virtual layout and remap configuration" section below) -+ */ -+ -+#define RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES (2) -+#define RGXMIPSFW_TRAMPOLINE_NUMPAGES (1U << RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES) -+#define RGXMIPSFW_TRAMPOLINE_SIZE (RGXMIPSFW_TRAMPOLINE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE_4K) -+#define RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE (RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES + RGXMIPSFW_LOG2_PAGE_SIZE_4K) -+ -+#define RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN) -+#define RGXMIPSFW_TRAMPOLINE_OFFSET(a) (a - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN) -+ -+#define RGXMIPSFW_SENSITIVE_ADDR(a) (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN == (~((1UL << RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE)-1U) & a)) -+ -+#define RGXMIPSFW_C0_PAGEMASK_4K (0x00001800) -+#define RGXMIPSFW_C0_PAGEMASK_16K (0x00007800) -+#define RGXMIPSFW_C0_PAGEMASK_64K (0x0001F800) -+#define RGXMIPSFW_C0_PAGEMASK_256K (0x0007F800) -+#define RGXMIPSFW_C0_PAGEMASK_1MB (0x001FF800) -+#define RGXMIPSFW_C0_PAGEMASK_4MB (0x007FF800) -+ -+#if defined(RGX_FEATURE_GPU_MULTICORE_SUPPORT) -+/* GPU_COUNT: number of physical cores in the system -+ * NUM_OF_REGBANKS = GPU_COUNT + 1 //XPU BROADCAST BANK -+ * RGXMIPSFW_REGISTERS_PAGE_SIZE = NUM_OF_REGBANKS * REGBANK_SIZE(64KB) * NUM_OF_OSID(8) -+ * For RGXMIPSFW_REGISTERS_PAGE_SIZE = 4MB, NUM_OF_REGBANKS = 8 so supports upto GPU_COUNT = 7 cores -+ */ -+#define RGXMIPSFW_C0_PAGEMASK_REGISTERS (RGXMIPSFW_C0_PAGEMASK_4MB) -+#define RGXMIPSFW_REGISTERS_PAGE_SIZE (RGXMIPSFW_PAGE_SIZE_4MB) -+#define RGXMIPSFW_REGISTERS_REMAP_RANGE_CONFIG_REGION_SIZE (RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB) -+#elif (RGX_NUM_DRIVERS_SUPPORTED == 1) -+#define RGXMIPSFW_C0_PAGEMASK_REGISTERS (RGXMIPSFW_C0_PAGEMASK_64K) -+#define RGXMIPSFW_REGISTERS_PAGE_SIZE (RGXMIPSFW_PAGE_SIZE_64K) -+#define RGXMIPSFW_REGISTERS_REMAP_RANGE_CONFIG_REGION_SIZE (RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB) -+#elif (RGX_NUM_DRIVERS_SUPPORTED <= 4) -+#define RGXMIPSFW_C0_PAGEMASK_REGISTERS (RGXMIPSFW_C0_PAGEMASK_256K) -+#define RGXMIPSFW_REGISTERS_PAGE_SIZE (RGXMIPSFW_PAGE_SIZE_256K) -+#define RGXMIPSFW_REGISTERS_REMAP_RANGE_CONFIG_REGION_SIZE (RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB) -+#elif (RGX_NUM_DRIVERS_SUPPORTED <= 8) -+#define RGXMIPSFW_C0_PAGEMASK_REGISTERS (RGXMIPSFW_C0_PAGEMASK_1MB) -+#define RGXMIPSFW_REGISTERS_PAGE_SIZE (RGXMIPSFW_PAGE_SIZE_1MB) -+#define RGXMIPSFW_REGISTERS_REMAP_RANGE_CONFIG_REGION_SIZE (RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB) -+#else -+#error "MIPS TLB invalid params" -+#endif -+ -+#define RGXMIPSFW_DECODE_REMAP_CONFIG_REGION_SIZE(r) ((1U << (((r >> 7) + 1U) << 1U))*0x400) -+ -+/* -+ * Firmware virtual layout and remap configuration -+ */ -+/* -+ * For each remap region we define: -+ * - the virtual base used by the Firmware to access code/data through that region -+ * - the microAptivAP physical address correspondent to the virtual base address, -+ * used as input address and remapped to the actual physical address -+ * - log2 of size of the region remapped by the MIPS wrapper, i.e. number of bits from -+ * the bottom of the base input address that survive onto the output address -+ * (this defines both the alignment and the maximum size of the remapped region) -+ * - one or more code/data segments within the remapped region -+ */ -+ -+/* Boot remap setup */ -+#define RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE (0xBFC00000) -+#define RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN (0x1FC00000U) -+#define RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE (12) -+#define RGXMIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE (RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE) -+ -+/* Data remap setup */ -+#define RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE (0xBFC01000) -+#define RGXMIPSFW_DATA_CACHED_REMAP_VIRTUAL_BASE (0x9FC01000) -+#define RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN (0x1FC01000U) -+#define RGXMIPSFW_DATA_REMAP_LOG2_SEGMENT_SIZE (12) -+#define RGXMIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE (RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE) -+ -+/* Code remap setup */ -+#define RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE (0x9FC02000) -+#define RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN (0x1FC02000U) -+#define RGXMIPSFW_CODE_REMAP_LOG2_SEGMENT_SIZE (12) -+#define RGXMIPSFW_EXCEPTIONS_VIRTUAL_BASE (RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE) -+ -+/* Permanent mappings setup */ -+#define RGXMIPSFW_PT_VIRTUAL_BASE (0xCF000000) -+#define RGXMIPSFW_REGISTERS_VIRTUAL_BASE (0xCF800000) -+#define RGXMIPSFW_STACK_VIRTUAL_BASE (0xCF600000) -+#define RGXMIPSFW_MIPS_STATE_VIRTUAL_BASE (RGXMIPSFW_REGISTERS_VIRTUAL_BASE + RGXMIPSFW_REGISTERS_PAGE_SIZE) -+ -+/* Offset inside the bootloader data page where the general_exception handler saves the error state. -+ * The error value is then copied by the NMI handler to the MipsState struct in shared memory. -+ * This is done because it's difficult to obtain the address of MipsState inside the general -+ * exception handler. */ -+#define RGXMIPSFW_ERROR_STATE_BASE (0x100) -+ -+/* -+ * Bootloader configuration data -+ */ -+/* Bootloader configuration offset (where RGXMIPSFW_BOOT_DATA lives) -+ * within the bootloader/NMI data page */ -+#define RGXMIPSFW_BOOTLDR_CONF_OFFSET (0x0U) -+ -+/* -+ * MIPS boot stage -+ */ -+#define RGXMIPSFW_BOOT_STAGE_OFFSET (0x400) -+ -+/* -+ * MIPS private data in the bootloader data page. -+ * Memory below this offset is used by the FW only, no interface data allowed. -+ */ -+#define RGXMIPSFW_PRIVATE_DATA_OFFSET (0x800) -+ -+ -+/* The things that follow are excluded when compiling assembly sources */ -+#if !defined(RGXMIPSFW_ASSEMBLY_CODE) -+#include "img_types.h" -+#include "km/rgxdefs_km.h" -+ -+typedef struct -+{ -+ IMG_UINT64 ui64StackPhyAddr; -+ IMG_UINT64 ui64RegBase; -+ IMG_UINT64 aui64PTPhyAddr[RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES]; -+ IMG_UINT32 ui32PTLog2PageSize; -+ IMG_UINT32 ui32PTNumPages; -+ IMG_UINT32 ui32Reserved1; -+ IMG_UINT32 ui32Reserved2; -+} RGXMIPSFW_BOOT_DATA; -+ -+#define RGXMIPSFW_GET_OFFSET_IN_DWORDS(offset) (offset / sizeof(IMG_UINT32)) -+#define RGXMIPSFW_GET_OFFSET_IN_QWORDS(offset) (offset / sizeof(IMG_UINT64)) -+ -+/* Used for compatibility checks */ -+#define RGXMIPSFW_ARCHTYPE_VER_CLRMSK (0xFFFFE3FFU) -+#define RGXMIPSFW_ARCHTYPE_VER_SHIFT (10U) -+#define RGXMIPSFW_CORE_ID_VALUE (0x001U) -+#define RGXFW_PROCESSOR_MIPS "MIPS" -+ -+/* microAptivAP cache line size */ -+#define RGXMIPSFW_MICROAPTIVEAP_CACHELINE_SIZE (16U) -+ -+/* The SOCIF transactions are identified with the top 16 bits of the physical address emitted by the MIPS */ -+#define RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN (16U) -+ -+/* Values to put in the MIPS selectors for performance counters */ -+#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_ACCESSES_C0 (9U) /* Icache accesses in COUNTER0 */ -+#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_MISSES_C1 (9U) /* Icache misses in COUNTER1 */ -+ -+#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_ACCESSES_C0 (10U) /* Dcache accesses in COUNTER0 */ -+#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_MISSES_C1 (11U) /* Dcache misses in COUNTER1 */ -+ -+#define RGXMIPSFW_PERF_COUNT_CTRL_ITLB_INSTR_ACCESSES_C0 (5U) /* ITLB instruction accesses in COUNTER0 */ -+#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_INSTR_MISSES_C1 (7U) /* JTLB instruction accesses misses in COUNTER1 */ -+ -+#define RGXMIPSFW_PERF_COUNT_CTRL_INSTR_COMPLETED_C0 (1U) /* Instructions completed in COUNTER0 */ -+#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_DATA_MISSES_C1 (8U) /* JTLB data misses in COUNTER1 */ -+ -+#define RGXMIPSFW_PERF_COUNT_CTRL_EVENT_SHIFT (5U) /* Shift for the Event field in the MIPS perf ctrl registers */ -+/* Additional flags for performance counters. See MIPS manual for further reference */ -+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_USER_MODE (8U) -+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_KERNEL_MODE (2U) -+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_EXL (1U) -+ -+ -+#define RGXMIPSFW_C0_NBHWIRQ 8 -+ -+/* Macros to decode C0_Cause register */ -+#define RGXMIPSFW_C0_CAUSE_EXCCODE(CAUSE) (((CAUSE) & 0x7cU) >> 2U) -+#define RGXMIPSFW_C0_CAUSE_EXCCODE_FWERROR 9 -+/* Use only when Coprocessor Unusable exception */ -+#define RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(CAUSE) (((CAUSE) >> 28U) & 0x3U) -+#define RGXMIPSFW_C0_CAUSE_PENDING_HWIRQ(CAUSE) (((CAUSE) & 0x3fc00) >> 10) -+#define RGXMIPSFW_C0_CAUSE_FDCIPENDING (1UL << 21) -+#define RGXMIPSFW_C0_CAUSE_IV (1UL << 23) -+#define RGXMIPSFW_C0_CAUSE_IC (1UL << 25) -+#define RGXMIPSFW_C0_CAUSE_PCIPENDING (1UL << 26) -+#define RGXMIPSFW_C0_CAUSE_TIPENDING (1UL << 30) -+#define RGXMIPSFW_C0_CAUSE_BRANCH_DELAY (1UL << 31) -+ -+/* Macros to decode C0_Debug register */ -+#define RGXMIPSFW_C0_DEBUG_EXCCODE(DEBUG) (((DEBUG) >> 10U) & 0x1fU) -+#define RGXMIPSFW_C0_DEBUG_DSS (1UL << 0) -+#define RGXMIPSFW_C0_DEBUG_DBP (1UL << 1) -+#define RGXMIPSFW_C0_DEBUG_DDBL (1UL << 2) -+#define RGXMIPSFW_C0_DEBUG_DDBS (1UL << 3) -+#define RGXMIPSFW_C0_DEBUG_DIB (1UL << 4) -+#define RGXMIPSFW_C0_DEBUG_DINT (1UL << 5) -+#define RGXMIPSFW_C0_DEBUG_DIBIMPR (1UL << 6) -+#define RGXMIPSFW_C0_DEBUG_DDBLIMPR (1UL << 18) -+#define RGXMIPSFW_C0_DEBUG_DDBSIMPR (1UL << 19) -+#define RGXMIPSFW_C0_DEBUG_IEXI (1UL << 20) -+#define RGXMIPSFW_C0_DEBUG_DBUSEP (1UL << 21) -+#define RGXMIPSFW_C0_DEBUG_CACHEEP (1UL << 22) -+#define RGXMIPSFW_C0_DEBUG_MCHECKP (1UL << 23) -+#define RGXMIPSFW_C0_DEBUG_IBUSEP (1UL << 24) -+#define RGXMIPSFW_C0_DEBUG_DM (1UL << 30) -+#define RGXMIPSFW_C0_DEBUG_DBD (1UL << 31) -+ -+/* Macros to decode TLB entries */ -+#define RGXMIPSFW_TLB_GET_MASK(PAGE_MASK) (((PAGE_MASK) >> 13) & 0XFFFFU) -+#define RGXMIPSFW_TLB_GET_PAGE_SIZE(PAGE_MASK) ((((PAGE_MASK) | 0x1FFFU) + 1U) >> 11U) /* page size in KB */ -+#define RGXMIPSFW_TLB_GET_PAGE_MASK(PAGE_SIZE) ((((PAGE_SIZE) << 11) - 1) & ~0x7FF) /* page size in KB */ -+#define RGXMIPSFW_TLB_GET_VPN2(ENTRY_HI) ((ENTRY_HI) >> 13) -+#define RGXMIPSFW_TLB_GET_COHERENCY(ENTRY_LO) (((ENTRY_LO) >> 3) & 0x7U) -+#define RGXMIPSFW_TLB_GET_PFN(ENTRY_LO) (((ENTRY_LO) >> 6) & 0XFFFFFU) -+/* GET_PA uses a non-standard PFN mask for 36 bit addresses */ -+#define RGXMIPSFW_TLB_GET_PA(ENTRY_LO) (((IMG_UINT64)(ENTRY_LO) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) << 6) -+#define RGXMIPSFW_TLB_GET_INHIBIT(ENTRY_LO) (((ENTRY_LO) >> 30) & 0x3U) -+#define RGXMIPSFW_TLB_GET_DGV(ENTRY_LO) ((ENTRY_LO) & 0x7U) -+#define RGXMIPSFW_TLB_GLOBAL (1U) -+#define RGXMIPSFW_TLB_VALID (1U << 1) -+#define RGXMIPSFW_TLB_DIRTY (1U << 2) -+#define RGXMIPSFW_TLB_XI (1U << 30) -+#define RGXMIPSFW_TLB_RI (1U << 31) -+ -+typedef struct { -+ IMG_UINT32 ui32TLBPageMask; -+ IMG_UINT32 ui32TLBHi; -+ IMG_UINT32 ui32TLBLo0; -+ IMG_UINT32 ui32TLBLo1; -+} RGX_MIPS_TLB_ENTRY; -+ -+typedef struct { -+ IMG_UINT32 ui32RemapAddrIn; /* always 4k aligned */ -+ IMG_UINT32 ui32RemapAddrOut; /* always 4k aligned */ -+ IMG_UINT32 ui32RemapRegionSize; -+} RGX_MIPS_REMAP_ENTRY; -+ -+typedef struct { -+ IMG_UINT32 ui32ErrorState; /* This must come first in the structure */ -+ IMG_UINT32 ui32Sync; -+ IMG_UINT32 ui32ErrorEPC; -+ IMG_UINT32 ui32StatusRegister; -+ IMG_UINT32 ui32CauseRegister; -+ IMG_UINT32 ui32BadRegister; -+ IMG_UINT32 ui32EPC; -+ IMG_UINT32 ui32SP; -+ IMG_UINT32 ui32Debug; -+ IMG_UINT32 ui32DEPC; -+ IMG_UINT32 ui32BadInstr; -+ IMG_UINT32 ui32UnmappedAddress; -+ RGX_MIPS_TLB_ENTRY asTLB[RGXMIPSFW_NUMBER_OF_TLB_ENTRIES]; -+ IMG_UINT64 aui64Remap[RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES]; -+} RGX_MIPS_STATE; -+ -+static_assert(offsetof(RGX_MIPS_STATE, ui32ErrorState) == 0, -+ "ui32ErrorState is not the first member of the RGX_MIPS_STATE struct"); -+ -+#if defined(SUPPORT_MIPS_64K_PAGE_SIZE) -+static_assert(RGXMIPSFW_REGISTERS_PAGE_SIZE >= RGXMIPSFW_PAGE_SIZE_64K, -+ "Register page size must be greater or equal to MIPS page size"); -+#else -+static_assert(RGXMIPSFW_REGISTERS_PAGE_SIZE >= RGXMIPSFW_PAGE_SIZE_4K, -+ "Register page size must be greater or equal to MIPS page size"); -+#endif -+ -+ -+#endif /* RGXMIPSFW_ASSEMBLY_CODE */ -+ -+#endif /* RGX_MIPS_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgx_options.h b/drivers/gpu/drm/img-rogue/rgx_options.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_options.h -@@ -0,0 +1,342 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX build options -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* Each build option listed here is packed into a dword which provides up to -+ * log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM and -+ * (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM. -+ * The corresponding bit is set if the build option was enabled at compile -+ * time. -+ * -+ * IMPORTANT: add new options to unused bits or define a new dword -+ * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield -+ * remains backwards compatible. -+ */ -+ -+#ifndef RGX_OPTIONS_H -+#define RGX_OPTIONS_H -+ -+#define OPTIONS_OPEN_SOURCE_EN (0x1UL << 0) -+#define OPTIONS_PDUMP_EN (0x1UL << 1) -+#define OPTIONS_UNUSED1_EN (0x1UL << 2) -+#define OPTIONS_SECURE_ALLOC_KM_EN (0x1UL << 3) -+#define OPTIONS_RGX_EN (0x1UL << 4) -+#define OPTIONS_SECURE_EXPORT_EN (0x1UL << 5) -+#define OPTIONS_INSECURE_EXPORT_EN (0x1UL << 6) -+#define OPTIONS_VFP_EN (0x1UL << 7) -+#define OPTIONS_WORKLOAD_ESTIMATION_EN (0x1UL << 8) -+#define OPTIONS_PDVFS_EN (0x1UL << 9) -+#define OPTIONS_DEBUG_EN (0x1UL << 10) -+#define OPTIONS_BUFFER_SYNC_EN (0x1UL << 11) -+#define OPTIONS_AUTOVZ_EN (0x1UL << 12) -+#define OPTIONS_AUTOVZ_HW_REGS_EN (0x1UL << 13) -+#define OPTIONS_FW_IRQ_REG_COUNTERS_EN (0x1UL << 14) -+#define OPTIONS_VALIDATION_EN (0x1UL << 15) -+#define OPTIONS_NO_HARDWARE_EN (0x1UL << 16) -+ -+#define OPTIONS_PERCONTEXT_FREELIST_EN (0x1UL << 31) -+ -+#define RGX_BUILD_OPTIONS_MASK_KM \ -+ (OPTIONS_OPEN_SOURCE_EN | \ -+ OPTIONS_PDUMP_EN | \ -+ OPTIONS_SECURE_ALLOC_KM_EN | \ -+ OPTIONS_RGX_EN | \ -+ OPTIONS_SECURE_EXPORT_EN | \ -+ OPTIONS_INSECURE_EXPORT_EN | \ -+ OPTIONS_VFP_EN | \ -+ OPTIONS_WORKLOAD_ESTIMATION_EN | \ -+ OPTIONS_PDVFS_EN | \ -+ OPTIONS_DEBUG_EN | \ -+ OPTIONS_BUFFER_SYNC_EN | \ -+ OPTIONS_AUTOVZ_EN | \ -+ OPTIONS_AUTOVZ_HW_REGS_EN | \ -+ OPTIONS_FW_IRQ_REG_COUNTERS_EN | \ -+ OPTIONS_VALIDATION_EN | \ -+ OPTIONS_NO_HARDWARE_EN) -+ -+ -+#define RGX_BUILD_OPTIONS_MASK_FW \ -+ (RGX_BUILD_OPTIONS_MASK_KM & \ -+ ~OPTIONS_BUFFER_SYNC_EN) -+ -+/* Build options that the FW must have if the present on the KM */ -+#define FW_OPTIONS_STRICT ((RGX_BUILD_OPTIONS_MASK_KM | \ -+ OPTIONS_PERCONTEXT_FREELIST_EN) & \ -+ ~(OPTIONS_DEBUG_EN | \ -+ OPTIONS_WORKLOAD_ESTIMATION_EN | \ -+ OPTIONS_PDVFS_EN)) -+ -+/* Build options that the UM must have if the present on the KM */ -+#define UM_OPTIONS_STRICT ((RGX_BUILD_OPTIONS_MASK_KM | \ -+ OPTIONS_PERCONTEXT_FREELIST_EN) & \ -+ ~(OPTIONS_DEBUG_EN | \ -+ OPTIONS_WORKLOAD_ESTIMATION_EN | \ -+ OPTIONS_OPEN_SOURCE_EN | \ -+ OPTIONS_PDVFS_EN)) -+ -+/* Build options that the KM must have if the present on the UM */ -+#define KM_OPTIONS_STRICT ((RGX_BUILD_OPTIONS_MASK_KM | \ -+ OPTIONS_PERCONTEXT_FREELIST_EN) & \ -+ ~(OPTIONS_DEBUG_EN | \ -+ OPTIONS_WORKLOAD_ESTIMATION_EN | \ -+ OPTIONS_PDVFS_EN | \ -+ OPTIONS_OPEN_SOURCE_EN | \ -+ OPTIONS_BUFFER_SYNC_EN)) -+ -+#define OPEN_SOURCE_OPTION "OPEN_SOURCE_DRIVER " -+#if defined(SUPPORT_OPEN_SOURCE_DRIVER) -+ #define OPTIONS_BIT0 OPTIONS_OPEN_SOURCE_EN -+ #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT0 0x0UL -+#endif /* SUPPORT_OPEN_SOURCE_DRIVER */ -+ -+#define PDUMP_OPTION "PDUMP " -+#if defined(PDUMP) -+ #define OPTIONS_BIT1 OPTIONS_PDUMP_EN -+ #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT1 0x0UL -+#endif /* PDUMP */ -+ -+/* No longer used */ -+#define INTERNAL_UNUSED1_OPTION "INTERNAL_UNUSED1 " -+#if defined(INTERNAL_UNUSED1) -+ #define OPTIONS_BIT2 OPTIONS_UNUSED1_EN -+ #if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT2 0x0UL -+#endif -+ -+#define SECURE_ALLOC_KM_OPTION "SECURE_ALLOC_KM " -+#if defined(SUPPORT_SECURE_ALLOC_KM) -+ #define OPTIONS_BIT3 OPTIONS_SECURE_ALLOC_KM_EN -+ #if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT3 0x0UL -+#endif /* SUPPORT_SECURE_ALLOC_KM */ -+ -+#define RGX_OPTION " " -+#if defined(SUPPORT_RGX) -+ #define OPTIONS_BIT4 OPTIONS_RGX_EN -+ #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT4 0x0UL -+#endif /* SUPPORT_RGX */ -+ -+#define SECURE_EXPORT_OPTION "SECURE_EXPORTS " -+#if defined(SUPPORT_SECURE_EXPORT) -+ #define OPTIONS_BIT5 OPTIONS_SECURE_EXPORT_EN -+ #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT5 0x0UL -+#endif /* SUPPORT_SECURE_EXPORT */ -+ -+#define INSECURE_EXPORT_OPTION "INSECURE_EXPORTS " -+#if defined(SUPPORT_INSECURE_EXPORT) -+ #define OPTIONS_BIT6 OPTIONS_INSECURE_EXPORT_EN -+ #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT6 0x0UL -+#endif /* SUPPORT_INSECURE_EXPORT */ -+ -+#define VFP_OPTION "VFP " -+#if defined(SUPPORT_VFP) -+ #define OPTIONS_BIT7 OPTIONS_VFP_EN -+ #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT7 0x0UL -+#endif /* SUPPORT_VFP */ -+ -+#define WORKLOAD_ESTIMATION_OPTION "WORKLOAD_ESTIMATION " -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ #define OPTIONS_BIT8 OPTIONS_WORKLOAD_ESTIMATION_EN -+ #if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT8 0x0UL -+#endif /* SUPPORT_WORKLOAD_ESTIMATION */ -+ -+#define PDVFS_OPTION "PDVFS " -+#if defined(SUPPORT_PDVFS) -+ #define OPTIONS_BIT9 OPTIONS_PDVFS_EN -+ #if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT9 0x0UL -+#endif /* SUPPORT_PDVFS */ -+ -+#define DEBUG_OPTION "DEBUG " -+#if defined(DEBUG) -+ #define OPTIONS_BIT10 OPTIONS_DEBUG_EN -+ #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT10 0x0UL -+#endif /* DEBUG */ -+ -+#define BUFFER_SYNC_OPTION "BUFFER_SYNC " -+#if defined(SUPPORT_BUFFER_SYNC) -+ #define OPTIONS_BIT11 OPTIONS_BUFFER_SYNC_EN -+ #if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT11 0x0UL -+#endif /* SUPPORT_BUFFER_SYNC */ -+ -+#define AUTOVZ_OPTION "AUTOVZ " -+#if defined(SUPPORT_AUTOVZ) -+ #define OPTIONS_BIT12 OPTIONS_AUTOVZ_EN -+ #if OPTIONS_BIT12 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT12 0x0UL -+#endif /* SUPPORT_AUTOVZ */ -+ -+#define AUTOVZ_HW_REGS_OPTION "AUTOVZ_HW_REGS " -+#if defined(SUPPORT_AUTOVZ_HW_REGS) -+ #define OPTIONS_BIT13 OPTIONS_AUTOVZ_HW_REGS_EN -+ #if OPTIONS_BIT13 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT13 0x0UL -+#endif /* SUPPORT_AUTOVZ_HW_REGS */ -+ -+#define RGX_FW_IRQ_OS_COUNTERS_OPTION "FW_IRQ_OS_COUNTERS " -+#if defined(RGX_FW_IRQ_OS_COUNTERS) -+ #define OPTIONS_BIT14 OPTIONS_FW_IRQ_REG_COUNTERS_EN -+ #if OPTIONS_BIT14 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT14 0x0UL -+#endif /* RGX_FW_IRQ_OS_COUNTERS */ -+ -+#define VALIDATION_OPTION "VALIDATION " -+#if defined(SUPPORT_VALIDATION) -+ #define OPTIONS_BIT15 OPTIONS_VALIDATION_EN -+ #if OPTIONS_BIT15 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT15 0x0UL -+#endif /* SUPPORT_VALIDATION */ -+ -+#define NO_HARDWARE_OPTION "NO_HARDWARE " -+#if defined(NO_HARDWARE) -+ #define OPTIONS_BIT16 OPTIONS_NO_HARDWARE_EN -+ #if OPTIONS_BIT16 > RGX_BUILD_OPTIONS_MASK_KM -+ #error "Bit exceeds reserved range" -+ #endif -+#else -+ #define OPTIONS_BIT16 0x0UL -+#endif /* NO_HARDWARE */ -+ -+#define OPTIONS_BIT31 OPTIONS_PERCONTEXT_FREELIST_EN -+#if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM -+#error "Bit exceeds reserved range" -+#endif -+ -+#define RGX_BUILD_OPTIONS_KM \ -+ (OPTIONS_BIT0 |\ -+ OPTIONS_BIT1 |\ -+ OPTIONS_BIT2 |\ -+ OPTIONS_BIT3 |\ -+ OPTIONS_BIT4 |\ -+ OPTIONS_BIT5 |\ -+ OPTIONS_BIT6 |\ -+ OPTIONS_BIT7 |\ -+ OPTIONS_BIT8 |\ -+ OPTIONS_BIT9 |\ -+ OPTIONS_BIT10 |\ -+ OPTIONS_BIT11 |\ -+ OPTIONS_BIT12 |\ -+ OPTIONS_BIT13 |\ -+ OPTIONS_BIT14 |\ -+ OPTIONS_BIT15 |\ -+ OPTIONS_BIT16) -+ -+#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31) -+ -+#define RGX_BUILD_OPTIONS_LIST \ -+ { \ -+ OPEN_SOURCE_OPTION, \ -+ PDUMP_OPTION, \ -+ INTERNAL_UNUSED1_OPTION, \ -+ SECURE_ALLOC_KM_OPTION, \ -+ RGX_OPTION, \ -+ SECURE_EXPORT_OPTION, \ -+ INSECURE_EXPORT_OPTION, \ -+ VFP_OPTION, \ -+ WORKLOAD_ESTIMATION_OPTION, \ -+ PDVFS_OPTION, \ -+ DEBUG_OPTION, \ -+ BUFFER_SYNC_OPTION, \ -+ AUTOVZ_OPTION, \ -+ AUTOVZ_HW_REGS_OPTION, \ -+ RGX_FW_IRQ_OS_COUNTERS_OPTION, \ -+ VALIDATION_OPTION, \ -+ NO_HARDWARE_OPTION \ -+ } -+ -+#endif /* RGX_OPTIONS_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgx_pdump_panics.h b/drivers/gpu/drm/img-rogue/rgx_pdump_panics.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_pdump_panics.h -@@ -0,0 +1,64 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX PDump panic definitions header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX PDump panic definitions header -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_PDUMP_PANICS_H_) -+#define RGX_PDUMP_PANICS_H_ -+ -+/*! Unique device specific IMG_UINT16 panic IDs to identify the cause of an -+ * RGX PDump panic in a PDump script. */ -+typedef enum -+{ -+ RGX_PDUMP_PANIC_UNDEFINED = 0, -+ -+ /* These panics occur when test parameters and driver configuration -+ * enable features that require the firmware and host driver to -+ * communicate. Such features are not supported with off-line playback. -+ */ -+ RGX_PDUMP_PANIC_ZSBUFFER_BACKING = 101, /*!< Requests ZSBuffer to be backed with physical pages */ -+ RGX_PDUMP_PANIC_ZSBUFFER_UNBACKING = 102, /*!< Requests ZSBuffer to be unbacked */ -+ RGX_PDUMP_PANIC_FREELIST_GROW = 103, /*!< Requests an on-demand freelist grow/shrink */ -+ RGX_PDUMP_PANIC_FREELISTS_RECONSTRUCTION = 104, /*!< Requests freelists reconstruction */ -+ RGX_PDUMP_PANIC_SPARSEMEM_SWAP = 105, /*!< Requests sparse remap memory swap feature */ -+} RGX_PDUMP_PANIC; -+ -+#endif /* RGX_PDUMP_PANICS_H_ */ -diff --git a/drivers/gpu/drm/img-rogue/rgx_riscv.h b/drivers/gpu/drm/img-rogue/rgx_riscv.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_riscv.h -@@ -0,0 +1,248 @@ -+/*************************************************************************/ /*! -+@File rgx_riscv.h -+@Title -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Platform RGX -+@Description RGX RISCV definitions, kernel/user space -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_RISCV_H) -+#define RGX_RISCV_H -+ -+#include "km/rgxdefs_km.h" -+ -+ -+/* Utility defines to convert regions to virtual addresses and remaps */ -+#define RGXRISCVFW_GET_REGION_BASE(r) IMG_UINT32_C((r) << 28) -+#define RGXRISCVFW_GET_REGION(a) IMG_UINT32_C((a) >> 28) -+#define RGXRISCVFW_MAX_REGION_SIZE IMG_UINT32_C(1 << 28) -+#define RGXRISCVFW_GET_REMAP(r) (RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 + ((r) * 8U)) -+ -+/* RISCV remap output is aligned to 4K */ -+#define RGXRISCVFW_REMAP_CONFIG_DEVVADDR_ALIGN (0x1000U) -+ -+/* -+ * FW bootloader defines -+ */ -+#define RGXRISCVFW_BOOTLDR_CODE_REGION IMG_UINT32_C(0xC) -+#define RGXRISCVFW_BOOTLDR_DATA_REGION IMG_UINT32_C(0x5) -+#define RGXRISCVFW_BOOTLDR_CODE_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_CODE_REGION)) -+#define RGXRISCVFW_BOOTLDR_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_DATA_REGION)) -+#define RGXRISCVFW_BOOTLDR_CODE_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_BOOTLDR_CODE_REGION)) -+#define RGXRISCVFW_BOOTLDR_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_BOOTLDR_DATA_REGION)) -+ -+/* Bootloader data offset in dwords from the beginning of the FW data allocation */ -+#define RGXRISCVFW_BOOTLDR_CONF_OFFSET (0x0) -+ -+/* -+ * FW coremem region defines -+ */ -+#define RGXRISCVFW_COREMEM_REGION IMG_UINT32_C(0x8) -+#define RGXRISCVFW_COREMEM_MAX_SIZE IMG_UINT32_C(0x10000000) /* 256 MB */ -+#define RGXRISCVFW_COREMEM_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_COREMEM_REGION)) -+#define RGXRISCVFW_COREMEM_END (RGXRISCVFW_COREMEM_BASE + RGXRISCVFW_COREMEM_MAX_SIZE - 1U) -+ -+ -+/* -+ * Host-FW shared data defines -+ */ -+#define RGXRISCVFW_SHARED_CACHED_DATA_REGION (0x6UL) -+#define RGXRISCVFW_SHARED_UNCACHED_DATA_REGION (0xDUL) -+#define RGXRISCVFW_SHARED_CACHED_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_CACHED_DATA_REGION)) -+#define RGXRISCVFW_SHARED_UNCACHED_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION)) -+#define RGXRISCVFW_SHARED_CACHED_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_CACHED_DATA_REGION)) -+#define RGXRISCVFW_SHARED_UNCACHED_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION)) -+ -+ -+/* -+ * GPU SOCIF access defines -+ */ -+#define RGXRISCVFW_SOCIF_REGION (0x2U) -+#define RGXRISCVFW_SOCIF_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SOCIF_REGION)) -+ -+ -+/* The things that follow are excluded when compiling assembly sources */ -+#if !defined(RGXRISCVFW_ASSEMBLY_CODE) -+#include "img_types.h" -+ -+#define RGXFW_PROCESSOR_RISCV "RISCV" -+#define RGXRISCVFW_CORE_ID_VALUE (0x00450B02U) -+#define RGXRISCVFW_MISA_ADDR (0x301U) -+#define RGXRISCVFW_MISA_VALUE (0x40001104U) -+#define RGXRISCVFW_MSCRATCH_ADDR (0x340U) -+ -+typedef struct -+{ -+ IMG_UINT64 ui64CorememCodeDevVAddr; -+ IMG_UINT64 ui64CorememDataDevVAddr; -+ IMG_UINT32 ui32CorememCodeFWAddr; -+ IMG_UINT32 ui32CorememDataFWAddr; -+ IMG_UINT32 ui32CorememCodeSize; -+ IMG_UINT32 ui32CorememDataSize; -+ IMG_UINT32 ui32Flags; -+ IMG_UINT32 ui32Reserved; -+} RGXRISCVFW_BOOT_DATA; -+ -+/* -+ * List of registers to be printed in debug dump. -+ * First column: register names (general purpose or control/status registers) -+ * Second column: register number to be used in abstract access register command -+ * (see RISC-V debug spec v0.13) -+ */ -+#define RGXRISCVFW_DEBUG_DUMP_REGISTERS \ -+ X(pc, 0x7b1) /* dpc */ \ -+ X(ra, 0x1001) \ -+ X(sp, 0x1002) \ -+ X(mepc, 0x341) \ -+ X(mcause, 0x342) \ -+ X(mdseac, 0xfc0) \ -+ X(mstatus, 0x300) \ -+ X(mie, 0x304) \ -+ X(mip, 0x344) \ -+ X(mscratch, 0x340) \ -+ X(mbvnc0, 0xffe) \ -+ X(mbvnc1, 0xfff) \ -+ X(micect, 0x7f0) \ -+ X(mdcect, 0x7f3) \ -+ X(mdcrfct, 0x7f4) \ -+ -+typedef struct -+{ -+#define X(name, address) \ -+ IMG_UINT32 name; -+ -+ RGXRISCVFW_DEBUG_DUMP_REGISTERS -+#undef X -+} RGXRISCVFW_STATE; -+ -+ -+#define RGXRISCVFW_MCAUSE_INTERRUPT (1U << 31) -+ -+#define RGXRISCVFW_MCAUSE_TABLE \ -+ X(0x00000000U, IMG_FALSE, "NMI pin assertion") /* Also reset value */ \ -+ X(0x00000001U, IMG_TRUE, "Instruction access fault") \ -+ X(0x00000002U, IMG_TRUE, "Illegal instruction") \ -+ X(0x00000003U, IMG_TRUE, "Breakpoint") \ -+ X(0x00000004U, IMG_TRUE, "Load address misaligned") \ -+ X(0x00000005U, IMG_TRUE, "Load access fault") \ -+ X(0x00000006U, IMG_TRUE, "Store/AMO address misaligned") \ -+ X(0x00000007U, IMG_TRUE, "Store/AMO access fault") \ -+ X(0x0000000BU, IMG_TRUE, "Environment call from M-mode (FW assert)") \ -+ X(0x80000007U, IMG_FALSE, "Machine timer interrupt") \ -+ X(0x8000000BU, IMG_FALSE, "Machine external interrupt") \ -+ X(0x8000001EU, IMG_FALSE, "Machine correctable error local interrupt") \ -+ X(0xF0000000U, IMG_TRUE, "Machine D-bus store error NMI") \ -+ X(0xF0000001U, IMG_TRUE, "Machine D-bus non-blocking load error NMI") \ -+ X(0xF0000002U, IMG_TRUE, "dCache unrecoverable NMI") -+ -+ -+/* Debug module HW defines */ -+#define RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER (0U) -+#define RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY (2U) -+#define RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT (2UL << 20) -+#define RGXRISCVFW_DMI_COMMAND_WRITE (1UL << 16) -+#define RGXRISCVFW_DMI_COMMAND_READ (0UL << 16) -+#define RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT (2U) -+ -+/* Abstract command error codes (descriptions from RISC-V debug spec v0.13) */ -+typedef IMG_UINT32 RGXRISCVFW_ABSTRACT_CMD_ERR; -+ -+/* No error. */ -+#define RISCV_ABSTRACT_CMD_NO_ERROR 0U -+ -+/* -+ * An abstract command was executing while command, abstractcs, or abstractauto -+ * was written, or when one of the data or progbuf registers was read or -+ * written. This status is only written if cmderr contains 0. -+ */ -+#define RISCV_ABSTRACT_CMD_BUSY 1U -+ -+/* -+ * The requested command is not supported, regardless of whether -+ * the hart is running or not. -+ */ -+#define RISCV_ABSTRACT_CMD_NOT_SUPPORTED 2U -+ -+/* -+ * An exception occurred while executing the command -+ * (e.g. while executing the Program Buffer). -+ */ -+#define RISCV_ABSTRACT_CMD_EXCEPTION 3U -+ -+/* -+ * The abstract command couldn't execute because the hart wasn't in the required -+ * state (running/halted), or unavailable. -+ */ -+#define RISCV_ABSTRACT_CMD_HALT_RESUME 4U -+ -+/* -+ * The abstract command failed due to a bus error -+ * (e.g. alignment, access size, or timeout). -+ */ -+#define RISCV_ABSTRACT_CMD_BUS_ERROR 5U -+ -+/* The command failed for another reason. */ -+#define RISCV_ABSTRACT_CMD_OTHER_ERROR 7U -+ -+ -+/* System Bus error codes (descriptions from RISC-V debug spec v0.13) */ -+typedef IMG_UINT32 RGXRISCVFW_SYSBUS_ERR; -+ -+/* There was no bus error. */ -+#define RISCV_SYSBUS_NO_ERROR 0U -+ -+/* There was a timeout. */ -+#define RISCV_SYSBUS_TIMEOUT 1U -+ -+/* A bad address was accessed. */ -+#define RISCV_SYSBUS_BAD_ADDRESS 2U -+ -+/* There was an alignment error. */ -+#define RISCV_SYSBUS_BAD_ALIGNMENT 3U -+ -+/* An access of unsupported size was requested. */ -+#define RISCV_SYSBUS_UNSUPPORTED_SIZE 4U -+ -+/* Other. */ -+#define RISCV_SYSBUS_OTHER_ERROR 7U -+ -+ -+#endif /* RGXRISCVFW_ASSEMBLY_CODE */ -+ -+#endif /* RGX_RISCV_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgx_tq_shared.h b/drivers/gpu/drm/img-rogue/rgx_tq_shared.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgx_tq_shared.h -@@ -0,0 +1,61 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX transfer queue shared -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Shared definitions between client and server -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGX_TQ_SHARED_H -+#define RGX_TQ_SHARED_H -+ -+#define TQ_MAX_PREPARES_PER_SUBMIT 16U -+ -+#define TQ_PREP_FLAGS_COMMAND_3D 0x0U -+#define TQ_PREP_FLAGS_COMMAND_2D 0x1U -+#define TQ_PREP_FLAGS_COMMAND_MASK (0xfU) -+#define TQ_PREP_FLAGS_COMMAND_SHIFT 0 -+#define TQ_PREP_FLAGS_PDUMPCONTINUOUS (1U << 4) -+ -+#define TQ_PREP_FLAGS_COMMAND_SET(m) \ -+ ((TQ_PREP_FLAGS_COMMAND_##m << TQ_PREP_FLAGS_COMMAND_SHIFT) & TQ_PREP_FLAGS_COMMAND_MASK) -+ -+#define TQ_PREP_FLAGS_COMMAND_IS(m,n) \ -+ (((m & TQ_PREP_FLAGS_COMMAND_MASK) >> TQ_PREP_FLAGS_COMMAND_SHIFT) == TQ_PREP_FLAGS_COMMAND_##n) -+ -+#endif /* RGX_TQ_SHARED_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxapi_km.h b/drivers/gpu/drm/img-rogue/rgxapi_km.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxapi_km.h -@@ -0,0 +1,336 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX API Header kernel mode -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Exported RGX API details -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXAPI_KM_H -+#define RGXAPI_KM_H -+ -+#if defined(SUPPORT_SHARED_SLC) -+/*************************************************************************/ /*! -+@Function RGXInitSLC -+@Description Init the SLC after a power up. It is required to call this -+ function if using SUPPORT_SHARED_SLC. Otherwise, it shouldn't -+ be called. -+ -+@Input hDevHandle RGX Device Node -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle); -+#endif -+ -+#include "rgx_hwperf.h" -+ -+ -+/****************************************************************************** -+ * RGX HW Performance Profiling Control API(s) -+ *****************************************************************************/ -+ -+/*! HWPerf device identification structure */ -+typedef struct _RGX_HWPERF_DEVICE_ -+{ -+ IMG_CHAR pszName[20]; /*!< Helps identify this device uniquely */ -+ IMG_HANDLE hDevData; /*!< Handle for the server */ -+ -+ struct _RGX_HWPERF_DEVICE_ *psNext; /*!< Next device if any */ -+} RGX_HWPERF_DEVICE; -+ -+/*! HWPerf connection structure */ -+typedef struct -+{ -+ RGX_HWPERF_DEVICE *psHWPerfDevList; /*!< Pointer to list of devices */ -+} RGX_HWPERF_CONNECTION; -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfLazyConnect -+@Description Obtain a HWPerf connection object to the RGX device(s). The -+ connections to devices are not actually opened until -+ HWPerfOpen() is called. -+ -+@Output ppsHWPerfConnection Address of a HWPerf connection object -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); -+ -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfOpen -+@Description Opens connection(s) to the RGX device(s). Valid handle to the -+ connection object has to be provided which means the this -+ function needs to be preceded by the call to -+ RGXHWPerfLazyConnect() function. -+ -+@Input psHWPerfConnection HWPerf connection object -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION* psHWPerfConnection); -+ -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfConnect -+@Description Obtain a connection object to the RGX HWPerf module. Allocated -+ connection object(s) reference opened connection(s). Calling -+ this function is an equivalent of calling RGXHWPerfLazyConnect -+ and RGXHWPerfOpen. This connect should be used when the caller -+ will be retrieving event data. -+ -+@Output ppsHWPerfConnection Address of HWPerf connection object -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); -+ -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfFreeConnection -+@Description Frees the HWPerf connection object -+ -+@Input psHWPerfConnection Pointer to connection object as returned -+ from RGXHWPerfLazyConnect() -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** psHWPerfConnection); -+ -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfClose -+@Description Closes all the opened connection(s) to RGX device(s) -+ -+@Input psHWPerfConnection Pointer to HWPerf connection object as -+ returned from RGXHWPerfConnect() or -+ RGXHWPerfOpen() -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection); -+ -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfDisconnect -+@Description Disconnect from the RGX device -+ -+@Input ppsHWPerfConnection Pointer to HWPerf connection object as -+ returned from RGXHWPerfConnect() or -+ RGXHWPerfOpen(). Calling this function is -+ an equivalent of calling RGXHWPerfClose() -+ and RGXHWPerfFreeConnection(). -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); -+ -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfControl -+@Description Enable or disable the generation of RGX HWPerf event packets. -+ See RGXCtrlHWPerf(). -+ -+@Input psHWPerfConnection Pointer to HWPerf connection object -+@Input eStreamId ID of the HWPerf stream -+@Input bToggle Switch to toggle or apply mask. -+@Input ui64Mask Mask of events to control. -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfControl( -+ RGX_HWPERF_CONNECTION *psHWPerfConnection, -+ RGX_HWPERF_STREAM_ID eStreamId, -+ IMG_BOOL bToggle, -+ IMG_UINT64 ui64Mask); -+ -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfGetFilter -+@Description Reads HWPerf stream filter where stream is identified by the -+ given stream ID. -+ -+@Input hDevData Handle to connection/device object -+@Input eStreamId ID of the HWPerf stream -+@Output ui64Filter HWPerf filter value -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfGetFilter( -+ IMG_HANDLE hDevData, -+ RGX_HWPERF_STREAM_ID eStreamId, -+ IMG_UINT64 *ui64Filter -+); -+ -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfConfigMuxCounters -+@Description Enable and configure the performance counter block for one or -+ more device layout modules. -+ See RGXHWPerfConfigureAndEnableCustomCounters(). -+ -+@Input psHWPerfConnection Pointer to HWPerf connection object -+@Input ui32NumBlocks Number of elements in the array -+@Input asBlockConfigs Address of the array of configuration blocks -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfConfigMuxCounters( -+ RGX_HWPERF_CONNECTION *psHWPerfConnection, -+ IMG_UINT32 ui32NumBlocks, -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *asBlockConfigs); -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfConfigureAndEnableCustomCounters -+@Description Enable and configure custom performance counters -+ -+@Input psHWPerfConnection Pointer to HWPerf connection object -+@Input ui16CustomBlockID ID of the custom block to configure -+@Input ui16NumCustomCounters Number of custom counters -+@Input pui32CustomCounterIDs Pointer to array containing custom -+ counter IDs -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfConfigureAndEnableCustomCounters( -+ RGX_HWPERF_CONNECTION *psHWPerfConnection, -+ IMG_UINT16 ui16CustomBlockID, -+ IMG_UINT16 ui16NumCustomCounters, -+ IMG_UINT32 *pui32CustomCounterIDs); -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfDisableCounters -+@Description Disable the performance counter block for one or more device -+ layout modules. -+ -+@Input psHWPerfConnection Pointer to HWPerf connection object -+@Input ui32NumBlocks Number of elements in the array -+@Input aeBlockIDs An array of words with values taken from -+ the RGX_HWPERF_CNTBLK_ID -+ enumeration. -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfDisableCounters( -+ RGX_HWPERF_CONNECTION *psHWPerfConnection, -+ IMG_UINT32 ui32NumBlocks, -+ IMG_UINT16* aeBlockIDs); -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfEnableCounters -+@Description Enable the performance counter block for one or more device -+ layout modules. -+ -+@Input psHWPerfConnection Pointer to HWPerf connection object -+@Input ui32NumBlocks Number of elements in the array -+@Input aeBlockIDs An array of words with values taken from the -+ RGX_HWPERF_CNTBLK_ID enumeration. -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfEnableCounters( -+ RGX_HWPERF_CONNECTION *psHWPerfConnection, -+ IMG_UINT32 ui32NumBlocks, -+ IMG_UINT16* aeBlockIDs); -+ -+/****************************************************************************** -+ * RGX HW Performance Profiling Retrieval API(s) -+ * -+ * The client must ensure their use of this acquire/release API for a single -+ * connection/stream must not be shared with multiple execution contexts e.g. -+ * between a kernel thread and an ISR handler. It is the client's -+ * responsibility to ensure this API is not interrupted by a high priority -+ * thread/ISR -+ *****************************************************************************/ -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfAcquireEvents -+@Description When there is data available to read this call returns with OK -+ and the address and length of the data buffer the client can -+ safely read. This buffer may contain one or more event packets. -+ When there is no data to read, this call returns with OK and -+ sets *puiBufLen to 0 on exit. -+ Clients must pair this call with a RGXHWPerfReleaseEvents() -+ call. -+ Data returned in ppBuf will be in the form of a sequence of -+ HWPerf packets which should be traversed using the pointers, -+ structures and macros provided by rgx_hwperf.h. -+ -+@Input hDevData Handle to connection/device object -+@Input eStreamId ID of the HWPerf stream -+@Output ppBuf Address of a pointer to a byte buffer. On exit it -+ contains the address of buffer to read from -+@Output pui32BufLen Pointer to an integer. On exit it is the size of -+ the data to read from the buffer -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfAcquireEvents( -+ IMG_HANDLE hDevData, -+ RGX_HWPERF_STREAM_ID eStreamId, -+ IMG_PBYTE* ppBuf, -+ IMG_UINT32* pui32BufLen); -+ -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfReleaseEvents -+@Description Called after client has read the event data out of the buffer -+ retrieved from the Acquire Events call to release resources. -+ -+@Input hDevData Handle to connection/device object -+@Input eStreamId ID of the HWPerf stream -+@Return PVRSRV_ERROR System error code -+*/ /**************************************************************************/ -+IMG_INTERNAL -+PVRSRV_ERROR RGXHWPerfReleaseEvents( -+ IMG_HANDLE hDevData, -+ RGX_HWPERF_STREAM_ID eStreamId); -+ -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfConvertCRTimeStamp -+@Description Converts the timestamp given by FW events to the common OS -+ timestamp. The first three inputs are obtained via a CLK_SYNC -+ event, ui64CRTimeStamp is the CR timestamp from the FW event -+ to be converted. -+ -+@Input ui32ClkSpeed Clock speed given by sync event -+@Input ui64CorrCRTimeStamp CR Timestamp given by sync event -+@Input ui64CorrOSTimeStamp Correlating OS Timestamp given by sync -+ event -+@Input ui64CRTimeStamp CR Timestamp to convert -+@Return IMG_UINT64 Calculated OS Timestamp -+*/ /**************************************************************************/ -+IMG_UINT64 RGXHWPerfConvertCRTimeStamp( -+ IMG_UINT32 ui32ClkSpeed, -+ IMG_UINT64 ui64CorrCRTimeStamp, -+ IMG_UINT64 ui64CorrOSTimeStamp, -+ IMG_UINT64 ui64CRTimeStamp); -+ -+#endif /* RGXAPI_KM_H */ -+ -+/****************************************************************************** -+ End of file (rgxapi_km.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxbreakpoint.c b/drivers/gpu/drm/img-rogue/rgxbreakpoint.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxbreakpoint.c -@@ -0,0 +1,292 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX Breakpoint routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX Breakpoint routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "rgxbreakpoint.h" -+#include "pvr_debug.h" -+#include "rgxutils.h" -+#include "rgxfwutils.h" -+#include "rgxmem.h" -+#include "device.h" -+#include "sync_internal.h" -+#include "pdump_km.h" -+#include "pvrsrv.h" -+ -+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_HANDLE hMemCtxPrivData, -+ RGXFWIF_DM eFWDataMaster, -+ IMG_UINT64 ui64TempSpillingAddr, -+ IMG_UINT32 ui32BPAddr, -+ IMG_UINT32 ui32HandlerAddr, -+ IMG_UINT32 ui32DataMaster) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sBPCmd; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ OSLockAcquire(psDevInfo->hBPLock); -+ -+ if (psDevInfo->bBPSet) -+ { -+ eError = PVRSRV_ERROR_BP_ALREADY_SET; -+ goto unlock; -+ } -+ -+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; -+ sBPCmd.uCmdData.sBPData.ui32BPAddr = ui32BPAddr; -+ sBPCmd.uCmdData.sBPData.ui32HandlerAddr = ui32HandlerAddr; -+ sBPCmd.uCmdData.sBPData.ui32BPDM = ui32DataMaster; -+ sBPCmd.uCmdData.sBPData.ui64SpillAddr = ui64TempSpillingAddr; -+ sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_ENABLE; -+ sBPCmd.uCmdData.sBPData.eDM = eFWDataMaster; -+ -+ eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, -+ psFWMemContextMemDesc, -+ 0 , -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); -+ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, -+ eFWDataMaster, -+ &sBPCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); -+ -+ /* Wait for FW to complete command execution */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); -+ -+ psDevInfo->eBPDM = eFWDataMaster; -+ psDevInfo->bBPSet = IMG_TRUE; -+ -+unlock: -+ OSLockRelease(psDevInfo->hBPLock); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_HANDLE hMemCtxPrivData) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sBPCmd; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; -+ sBPCmd.uCmdData.sBPData.ui32BPAddr = 0; -+ sBPCmd.uCmdData.sBPData.ui32HandlerAddr = 0; -+ sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_CTL; -+ sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM; -+ -+ OSLockAcquire(psDevInfo->hBPLock); -+ -+ eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, -+ psFWMemContextMemDesc, -+ 0 , -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); -+ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, -+ psDevInfo->eBPDM, -+ &sBPCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); -+ -+ /* Wait for FW to complete command execution */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); -+ -+ psDevInfo->bBPSet = IMG_FALSE; -+ -+unlock: -+ OSLockRelease(psDevInfo->hBPLock); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_HANDLE hMemCtxPrivData) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sBPCmd; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ OSLockAcquire(psDevInfo->hBPLock); -+ -+ if (psDevInfo->bBPSet == IMG_FALSE) -+ { -+ eError = PVRSRV_ERROR_BP_NOT_SET; -+ goto unlock; -+ } -+ -+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; -+ sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_CTL | RGXFWIF_BPDATA_FLAGS_ENABLE; -+ sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM; -+ -+ eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, -+ psFWMemContextMemDesc, -+ 0 , -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); -+ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, -+ psDevInfo->eBPDM, -+ &sBPCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); -+ -+ /* Wait for FW to complete command execution */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); -+ -+unlock: -+ OSLockRelease(psDevInfo->hBPLock); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_HANDLE hMemCtxPrivData) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sBPCmd; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ OSLockAcquire(psDevInfo->hBPLock); -+ -+ if (psDevInfo->bBPSet == IMG_FALSE) -+ { -+ eError = PVRSRV_ERROR_BP_NOT_SET; -+ goto unlock; -+ } -+ -+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; -+ sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_CTL; -+ sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM; -+ -+ eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, -+ psFWMemContextMemDesc, -+ 0 , -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); -+ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, -+ psDevInfo->eBPDM, -+ &sBPCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); -+ -+ /* Wait for FW to complete command execution */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); -+ -+unlock: -+ OSLockRelease(psDevInfo->hBPLock); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32TempRegs, -+ IMG_UINT32 ui32SharedRegs) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sBPCmd; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; -+ sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_REGS; -+ sBPCmd.uCmdData.sBPData.ui32TempRegs = ui32TempRegs; -+ sBPCmd.uCmdData.sBPData.ui32SharedRegs = ui32SharedRegs; -+ sBPCmd.uCmdData.sBPData.psFWMemContext.ui32Addr = 0U; -+ sBPCmd.uCmdData.sBPData.eDM = RGXFWIF_DM_GP; -+ -+ OSLockAcquire(psDevInfo->hBPLock); -+ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDeviceNode->pvDevice, -+ RGXFWIF_DM_GP, -+ &sBPCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); -+ -+ /* Wait for FW to complete command execution */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); -+ -+unlock: -+ OSLockRelease(psDevInfo->hBPLock); -+ -+ return eError; -+} -+ -+/****************************************************************************** -+ End of file (rgxbreakpoint.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxbreakpoint.h b/drivers/gpu/drm/img-rogue/rgxbreakpoint.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxbreakpoint.h -@@ -0,0 +1,142 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX breakpoint functionality -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX breakpoint functionality -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXBREAKPOINT_H) -+#define RGXBREAKPOINT_H -+ -+#include "pvr_debug.h" -+#include "rgxutils.h" -+#include "rgxfwutils.h" -+#include "rgx_fwif_km.h" -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXSetBreakpointKM -+ -+ @Description -+ Server-side implementation of RGXSetBreakpoint -+ -+ @Input psDeviceNode - RGX Device node -+ @Input eDataMaster - Data Master to schedule command for -+ @Input hMemCtxPrivData - memory context private data -+ @Input ui32BPAddr - Address of breakpoint -+ @Input ui32HandlerAddr - Address of breakpoint handler -+ @Input ui32BPCtl - Breakpoint controls -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_HANDLE hMemCtxPrivData, -+ RGXFWIF_DM eFWDataMaster, -+ IMG_UINT64 ui64TempSpillingAddr, -+ IMG_UINT32 ui32BPAddr, -+ IMG_UINT32 ui32HandlerAddr, -+ IMG_UINT32 ui32DataMaster); -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXClearBreakpointKM -+ -+ @Description -+ Server-side implementation of RGXClearBreakpoint -+ -+ @Input psDeviceNode - RGX Device node -+ @Input hMemCtxPrivData - memory context private data -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_HANDLE hMemCtxPrivData); -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXEnableBreakpointKM -+ -+ @Description -+ Server-side implementation of RGXEnableBreakpoint -+ -+ @Input psDeviceNode - RGX Device node -+ @Input hMemCtxPrivData - memory context private data -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_HANDLE hMemCtxPrivData); -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXDisableBreakpointKM -+ -+ @Description -+ Server-side implementation of RGXDisableBreakpoint -+ -+ @Input psDeviceNode - RGX Device node -+ @Input hMemCtxPrivData - memory context private data -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_HANDLE hMemCtxPrivData); -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXOverallocateBPRegistersKM -+ -+ @Description -+ Server-side implementation of RGXOverallocateBPRegisters -+ -+ @Input psDeviceNode - RGX Device node -+ @Input ui32TempRegs - Number of temporary registers to overallocate -+ @Input ui32SharedRegs - Number of shared registers to overallocate -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32TempRegs, -+ IMG_UINT32 ui32SharedRegs); -+#endif /* RGXBREAKPOINT_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxbvnc.c b/drivers/gpu/drm/img-rogue/rgxbvnc.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxbvnc.c -@@ -0,0 +1,969 @@ -+/*************************************************************************/ /*! -+@File -+@Title BVNC handling specific routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Functions used for BNVC related work -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "rgxbvnc.h" -+#define RGXBVNC_C -+#include "rgx_bvnc_table_km.h" -+#undef RGXBVNC_C -+#include "os_apphint.h" -+#include "pvrsrv.h" -+#include "pdump_km.h" -+#include "rgx_compat_bvnc.h" -+#include "allocmem.h" -+ -+#define RGX_FEATURE_TRUE_VALUE_TYPE_UINT16 (RGX_FEATURE_VALUE_TYPE_UINT16 >> RGX_FEATURE_TYPE_BIT_SHIFT) -+#define RGX_FEATURE_TRUE_VALUE_TYPE_UINT32 (RGX_FEATURE_VALUE_TYPE_UINT32 >> RGX_FEATURE_TYPE_BIT_SHIFT) -+#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(RGX_BVNC_STR_SIZE_MAX))+1) -+ -+/* This function searches the given array for a given search value */ -+static IMG_UINT64* _RGXSearchBVNCTable( IMG_UINT64 *pui64Array, -+ IMG_UINT uiEnd, -+ IMG_UINT64 ui64SearchValue, -+ IMG_UINT uiColCount) -+{ -+ IMG_UINT uiStart = 0, index; -+ IMG_UINT64 value, *pui64Ptr = NULL; -+ -+ while (uiStart < uiEnd) -+ { -+ index = (uiStart + uiEnd)/2; -+ pui64Ptr = pui64Array + (index * uiColCount); -+ value = *(pui64Ptr); -+ -+ if (value == ui64SearchValue) -+ { -+ return pui64Ptr; -+ } -+ -+ if (value > ui64SearchValue) -+ { -+ uiEnd = index; -+ }else -+ { -+ uiStart = index + 1; -+ } -+ } -+ return NULL; -+} -+#define RGX_SEARCH_BVNC_TABLE(t, b) (_RGXSearchBVNCTable((IMG_UINT64*)(t), \ -+ ARRAY_SIZE(t), (b), \ -+ sizeof((t)[0])/sizeof(IMG_UINT64)) ) -+ -+ -+#if !defined(NO_HARDWARE) -+/*************************************************************************/ /*! -+@brief This function reads the (P)BVNC core_ID register and extracts -+ the BVNC configuration. Supports the old scheme and the newer -+ PBVNC scheme. -+@param psDeviceNode - Device Node pointer -+@param ui32CoreNum - Core/bank number (0 for single core) -+@param pB - Address of branch value (output) -+@param pV - Address of version value (output) -+@param pN - Address of number of clusters/scalable shading units value (output) -+@param pC - Address of configuration value (output) -+@return BVNC encoded in 64-bit value, 16-bits per field -+*/ /**************************************************************************/ -+static -+IMG_UINT64 _RGXReadBVNCFromReg(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32CoreNum, -+ IMG_UINT32 *pB, IMG_UINT32 *pV, IMG_UINT32 *pN, IMG_UINT32 *pC) -+{ -+ IMG_UINT64 ui64BVNC; -+ IMG_UINT32 B=0, V=0, N=0, C=0; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+#if defined(RGX_CR_CORE_ID__PBVNC) -+ /* Core ID reading code for Rogue */ -+ -+ /* Read the BVNC, in to new way first, if B not set, use old scheme */ -+ ui64BVNC = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID__PBVNC + (ui32CoreNum << 16)); -+ -+ if (GET_B(ui64BVNC)) -+ { -+ B = GET_PBVNC_B(ui64BVNC); -+ V = GET_PBVNC_V(ui64BVNC); -+ N = GET_PBVNC_N(ui64BVNC); -+ C = GET_PBVNC_C(ui64BVNC); -+ } -+ else -+ { -+ IMG_UINT64 ui32CoreID, ui32CoreRev; -+ ui32CoreRev = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_REVISION + (ui32CoreNum << 16)); -+ ui32CoreID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID + (ui32CoreNum << 16)); -+ B = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MAJOR_CLRMSK) >> -+ RGX_CR_CORE_REVISION_MAJOR_SHIFT; -+ V = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MINOR_CLRMSK) >> -+ RGX_CR_CORE_REVISION_MINOR_SHIFT; -+ N = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_N_CLRMSK) >> -+ RGX_CR_CORE_ID_CONFIG_N_SHIFT; -+ C = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_C_CLRMSK) >> -+ RGX_CR_CORE_ID_CONFIG_C_SHIFT; -+ ui64BVNC = rgx_bvnc_pack(B, V, N, C); -+ } -+#else -+ /* Core ID reading code for Volcanic */ -+ -+ ui64BVNC = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID + (ui32CoreNum << 16)); -+ -+ B = (ui64BVNC & ~RGX_CR_CORE_ID_BRANCH_ID_CLRMSK) >> -+ RGX_CR_CORE_ID_BRANCH_ID_SHIFT; -+ V = (ui64BVNC & ~RGX_CR_CORE_ID_VERSION_ID_CLRMSK) >> -+ RGX_CR_CORE_ID_VERSION_ID_SHIFT; -+ N = (ui64BVNC & ~RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK) >> -+ RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT; -+ C = (ui64BVNC & ~RGX_CR_CORE_ID_CONFIG_ID_CLRMSK) >> -+ RGX_CR_CORE_ID_CONFIG_ID_SHIFT; -+#endif -+ -+ *pB = B; *pV = V; *pN = N; *pC = C; -+ return ui64BVNC; -+} -+#endif -+ -+#if defined(DEBUG) || defined(SUPPORT_PERFORMANCE_RUN) -+ -+#define PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, szShortName, Feature) \ -+ if ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] != RGX_FEATURE_VALUE_DISABLED ) \ -+ { PVR_LOG(("%s %d", szShortName, psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX])); } \ -+ else \ -+ { PVR_LOG(("%s N/A", szShortName)); } -+ -+static void _RGXBvncDumpParsedConfig(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ IMG_UINT64 ui64Mask = 0, ui32IdOrNameIdx = 1; -+ -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NC: ", NUM_CLUSTERS); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "CSF: ", CDM_CONTROL_STREAM_FORMAT); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "FBCDCA: ", FBCDC_ARCHITECTURE); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "META: ", META); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMB: ", META_COREMEM_BANKS); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMS: ", META_COREMEM_SIZE); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MDMACnt: ", META_DMA_CHANNEL_COUNT); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIIP: ", NUM_ISP_IPP_PIPES); -+#if defined(RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX) -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIPS: ", NUM_ISP_PER_SPU); -+#endif -+#if defined(RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX) -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PPS: ", PBE_PER_SPU); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NSPU: ", NUM_SPU); -+#endif -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PBW: ", PHYS_BUS_WIDTH); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "STEArch: ", SCALABLE_TE_ARCH); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SVCEA: ", SCALABLE_VCE); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCBanks: ", SLC_BANKS); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCCLS: ", SLC_CACHE_LINE_SIZE_BITS); -+ PVR_LOG(("SLCSize: %d", psDevInfo->sDevFeatureCfg.ui32SLCSizeInBytes)); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "VASB: ", VIRTUAL_ADDRESS_SPACE_BITS); -+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NOSIDS: ", NUM_OSIDS); -+ -+#if defined(FEATURE_NO_VALUES_NAMES_MAX_IDX) -+ /* Dump the features with no values */ -+ ui64Mask = psDevInfo->sDevFeatureCfg.ui64Features; -+ while (ui64Mask) -+ { -+ if (ui64Mask & 0x01) -+ { -+ if (ui32IdOrNameIdx <= FEATURE_NO_VALUES_NAMES_MAX_IDX) -+ { -+ PVR_LOG(("%s", gaszFeaturesNoValuesNames[ui32IdOrNameIdx - 1])); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "Feature with Mask doesn't exist: 0x%016" IMG_UINT64_FMTSPECx, -+ ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1)))); -+ } -+ } -+ ui64Mask >>= 1; -+ ui32IdOrNameIdx++; -+ } -+#endif -+ -+#if defined(ERNSBRNS_IDS_MAX_IDX) -+ /* Dump the ERN and BRN flags for this core */ -+ ui64Mask = psDevInfo->sDevFeatureCfg.ui64ErnsBrns; -+ ui32IdOrNameIdx = 1; -+ -+ while (ui64Mask) -+ { -+ if (ui64Mask & 0x1) -+ { -+ if (ui32IdOrNameIdx <= ERNSBRNS_IDS_MAX_IDX) -+ { -+ PVR_LOG(("ERN/BRN : %d", gaui64ErnsBrnsIDs[ui32IdOrNameIdx - 1])); -+ } -+ else -+ { -+ PVR_LOG(("Unknown ErnBrn bit: 0x%0" IMG_UINT64_FMTSPECx, ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1)))); -+ } -+ } -+ ui64Mask >>= 1; -+ ui32IdOrNameIdx++; -+ } -+#endif -+ -+#if !defined(ERNSBRNS_IDS_MAX_IDX) && !defined(FEATURE_NO_VALUES_NAMES_MAX_IDX) -+ PVR_UNREFERENCED_PARAMETER(ui64Mask); -+ PVR_UNREFERENCED_PARAMETER(ui32IdOrNameIdx); -+#endif -+ -+} -+#endif -+ -+static PVRSRV_ERROR _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT64 *pui64Cfg) -+{ -+ IMG_UINT32 ui32Index; -+ -+ /* Read the feature values for the runtime BVNC */ -+ for (ui32Index = 0; ui32Index < RGX_FEATURE_WITH_VALUES_MAX_IDX; ui32Index++) -+ { -+ IMG_UINT16 ui16BitPosition = aui16FeaturesWithValuesBitPositions[ui32Index]; -+ IMG_UINT64 ui64PackedValues = pui64Cfg[2 + ui16BitPosition / 64]; -+ IMG_UINT16 ui16ValueIndex = (ui64PackedValues & aui64FeaturesWithValuesBitMasks[ui32Index]) >> (ui16BitPosition % 64); -+ -+ if (ui16ValueIndex >= gaFeaturesValuesMaxIndexes[ui32Index]) -+ { -+ /* This case should never be reached */ -+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_INVALID; -+ PVR_DPF((PVR_DBG_ERROR, "%s: Feature with index (%d) decoded wrong value index (%d)", __func__, ui32Index, ui16ValueIndex)); -+ PVR_ASSERT(ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]); -+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS; -+ } -+ -+ switch (ui16BitPosition >> RGX_FEATURE_TYPE_BIT_SHIFT) -+ { -+ case RGX_FEATURE_TRUE_VALUE_TYPE_UINT16: -+ { -+ IMG_UINT16 *pui16FeatureValues = (IMG_UINT16*)gaFeaturesValues[ui32Index]; -+ if (pui16FeatureValues[ui16ValueIndex] == (IMG_UINT16)RGX_FEATURE_VALUE_DISABLED) -+ { -+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = -+ RGX_FEATURE_VALUE_DISABLED; -+ } -+ else -+ { -+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = -+ pui16FeatureValues[ui16ValueIndex]; -+ } -+ break; -+ } -+ case RGX_FEATURE_TRUE_VALUE_TYPE_UINT32: -+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = -+ ((IMG_UINT32*)gaFeaturesValues[ui32Index])[ui16ValueIndex]; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Feature with index %d has invalid feature type", -+ __func__, -+ ui32Index)); -+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS; -+ } -+ } -+ -+#if defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) -+ /* Code path for Volcanic */ -+ -+ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = RGXFWIF_DM_CDM+1; -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && -+ RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1) -+ { -+ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_RAY+1); -+ } -+#if defined(SUPPORT_AGP) -+ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_GEOM2+1); -+#if defined(SUPPORT_AGP4) -+ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_GEOM4+1); -+#endif -+#endif -+ -+ /* Get the max number of dusts in the core */ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS)) -+ { -+ RGX_LAYER_PARAMS sParams = {.psDevInfo = psDevInfo}; -+ -+ if (RGX_DEVICE_GET_FEATURE_VALUE(&sParams, POWER_ISLAND_VERSION) == 1) -+ { -+ /* per SPU power island */ -+ psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2)); -+ } -+ else if (RGX_DEVICE_GET_FEATURE_VALUE(&sParams, POWER_ISLAND_VERSION) >= 2) -+ { -+ /* per Cluster power island */ -+ psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS); -+ } -+ else -+ { -+ /* All volcanic cores support power islanding */ -+ psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID; -+ PVR_DPF((PVR_DBG_ERROR, "%s: Power island feature version not found!", __func__)); -+ PVR_ASSERT(0); -+ return PVRSRV_ERROR_FEATURE_DISABLED; -+ } -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && -+ RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1) -+ { -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RT_RAC_PER_SPU)) -+ { -+ psDevInfo->sDevFeatureCfg.ui32MAXRACCount = RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU); -+ } -+ else -+ { -+ psDevInfo->sDevFeatureCfg.ui32MAXRACCount = 1; -+ } -+ } -+ } -+ else -+ { -+ /* This case should never be reached as all cores have clusters */ -+ psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID; -+ PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__)); -+ PVR_ASSERT(0); -+ return PVRSRV_ERROR_FEATURE_DISABLED; -+ } -+#else /* defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) */ -+ /* Code path for Rogue */ -+ -+ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = RGXFWIF_DM_CDM+1; -+#if defined(SUPPORT_AGP) -+ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_GEOM2+1); -+#endif -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_IDX] = RGX_FEATURE_VALUE_DISABLED; -+ } -+ -+ /* Get the max number of dusts in the core */ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS)) -+ { -+ psDevInfo->sDevFeatureCfg.ui32MAXDustCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2)); -+ } -+ else -+ { -+ /* This case should never be reached as all cores have clusters */ -+ psDevInfo->sDevFeatureCfg.ui32MAXDustCount = RGX_FEATURE_VALUE_INVALID; -+ PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__)); -+ PVR_ASSERT(0); -+ return PVRSRV_ERROR_FEATURE_DISABLED; -+ } -+#endif /* defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) */ -+ -+ /* Transform the META coremem size info in bytes */ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) -+ { -+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_COREMEM_SIZE_IDX] *= 1024; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNC, const IMG_UINT32 ui32RGXDevCount) -+{ -+ const IMG_CHAR *pszAppHintDefault = PVRSRV_APPHINT_RGXBVNC; -+ void *pvAppHintState = NULL; -+ IMG_UINT32 ui32BVNCCount = 0; -+ IMG_BOOL bRet; -+ IMG_CHAR *pszBVNCAppHint; -+ IMG_CHAR *pszCurrentBVNC; -+ pszBVNCAppHint = (IMG_CHAR *)OSAllocMem(RGXBVNC_BUFFER_SIZE); -+ if (pszBVNCAppHint == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ pszBVNCAppHint[0] = '\0'; -+ -+ pszCurrentBVNC = pszBVNCAppHint; -+ -+ OSCreateAppHintState(&pvAppHintState); -+ -+ bRet = (IMG_BOOL)OSGetAppHintSTRING(APPHINT_NO_DEVICE, -+ pvAppHintState, -+ RGXBVNC, -+ pszAppHintDefault, -+ pszBVNCAppHint, -+ RGXBVNC_BUFFER_SIZE -+ ); -+ -+ OSFreeAppHintState(pvAppHintState); -+ -+ if (!bRet || (pszBVNCAppHint[0] == '\0')) -+ { -+ OSFreeMem(pszBVNCAppHint); -+ return PVRSRV_OK; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC module param list: %s",__func__, pszBVNCAppHint)); -+ -+ while (*pszCurrentBVNC != '\0') -+ { -+ IMG_CHAR *pszNext = pszCurrentBVNC; -+ -+ if (ui32BVNCCount >= PVRSRV_MAX_DEVICES) -+ { -+ break; -+ } -+ -+ while (1) -+ { -+ if (*pszNext == ',') -+ { -+ pszNext[0] = '\0'; -+ pszNext++; -+ break; -+ } else if (*pszNext == '\0') -+ { -+ break; -+ } -+ pszNext++; -+ } -+ -+ if (ui32BVNCCount == ui32RGXDevCount) -+ { -+ OSStringLCopy(pszBVNC, pszCurrentBVNC, RGX_BVNC_STR_SIZE_MAX); -+ OSFreeMem(pszBVNCAppHint); -+ return PVRSRV_OK; -+ } -+ -+ ui32BVNCCount++; -+ pszCurrentBVNC = pszNext; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Given module parameters list is shorter than " -+ "number of actual devices", __func__)); -+ -+ /* If only one BVNC parameter is specified, the same is applied for all RGX -+ * devices detected */ -+ if (1 == ui32BVNCCount) -+ { -+ OSStringLCopy(pszBVNC, pszBVNCAppHint, RGX_BVNC_STR_SIZE_MAX); -+ } -+ -+ OSFreeMem(pszBVNCAppHint); -+ -+ return PVRSRV_OK; -+} -+ -+/* Function that parses the BVNC List passed as module parameter */ -+static PVRSRV_ERROR _RGXBvncParseList(IMG_UINT32 *pB, -+ IMG_UINT32 *pV, -+ IMG_UINT32 *pN, -+ IMG_UINT32 *pC, -+ const IMG_UINT32 ui32RGXDevCount) -+{ -+ IMG_CHAR aszBVNCString[RGX_BVNC_STR_SIZE_MAX]; -+ IMG_CHAR *pcTemp, *pcNext; -+ PVRSRV_ERROR eError; -+ -+ aszBVNCString[0] = '\0'; -+ -+ /* 4 components of a BVNC string is B, V, N & C */ -+#define RGX_BVNC_INFO_PARAMS (4) -+ -+ eError = _RGXBvncAcquireAppHint(aszBVNCString, ui32RGXDevCount); -+ -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ if ('\0' == aszBVNCString[0]) -+ { -+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS; -+ } -+ -+ /* Parse the given RGX_BVNC string */ -+ pcTemp = &aszBVNCString[0]; -+ pcNext = strchr(pcTemp, '.'); -+ if (pcNext == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS; -+ } -+ -+ *pcNext = '\0'; -+ if (OSStringToUINT32(pcTemp, 0, pB) != PVRSRV_OK) -+ { -+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS; -+ } -+ pcTemp = pcNext+1; -+ /* remove any 'p' from the V string, as this will -+ * cause the call to OSStringToUINT32 to fail -+ */ -+ pcNext = strchr(pcTemp, 'p'); -+ if (pcNext) -+ { -+ /* found one- - changing to '\0' */ -+ *pcNext = '\0'; -+ /* Move to next '.' */ -+ pcNext++; -+ } -+ else -+ { -+ /* none found, so find next '.' and change to '\0' */ -+ pcNext = strchr(pcTemp, '.'); -+ if (pcNext == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS; -+ } -+ *pcNext = '\0'; -+ } -+ if (OSStringToUINT32(pcTemp, 0, pV) != PVRSRV_OK) -+ { -+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS; -+ } -+ pcTemp = pcNext+1; -+ pcNext = strchr(pcTemp, '.'); -+ if (pcNext == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS; -+ } -+ *pcNext = '\0'; -+ if (OSStringToUINT32(pcTemp, 0, pN) != PVRSRV_OK) -+ { -+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS; -+ } -+ pcTemp = pcNext+1; -+ if (OSStringToUINT32(pcTemp, 0, pC) != PVRSRV_OK) -+ { -+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS; -+ } -+ PVR_LOG(("BVNC module parameter honoured: %d.%d.%d.%d", *pB, *pV, *pN, *pC)); -+ -+ return PVRSRV_OK; -+} -+ -+#if !defined(NO_HARDWARE) -+/* -+ * This function obtains the SLCSize from the physical device for GPUs which provide -+ * this information. If the GPU does not provide support we return a value of 0 which will -+ * result in the BVNC supplied definition being used to provide the SLCSize. -+ * Must only be called from driver-live with hardware powered-on. -+ */ -+static IMG_UINT32 _RGXBvncReadSLCSize(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ IMG_UINT64 ui64SLCSize = 0ULL; -+ -+#if defined(RGX_CR_SLC_SIZE_IN_KB) -+ /* Rogue hardware */ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_SIZE_CONFIGURABLE)) -+ { -+ ui64SLCSize = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SLC_SIZE_IN_KB); -+ if (ui64SLCSize == 0ULL) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Unexpected 0 SLC size. Using default", __func__)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: RGX_CR_SIZE_IN_KB = %u", __func__, -+ (IMG_UINT32) ui64SLCSize)); -+ } -+ } -+#else -+ /* Volcanic hardware */ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_SIZE_ADJUSTMENT)) -+ { -+ ui64SLCSize = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SLC_STATUS2); -+ ui64SLCSize &= ~RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_CLRMSK; -+ ui64SLCSize >>= RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_SHIFT; -+ -+ if (ui64SLCSize == 0ULL) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Unexpected 0 SLC size. Using default", __func__)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: SLC_SIZE_IN_KILOBYTES = %u", __func__, -+ (IMG_UINT32) ui64SLCSize)); -+ } -+ } -+#endif -+ -+ return (IMG_UINT32)ui64SLCSize * 1024U; -+} -+#endif /* !defined(NO_HARDWARE) */ -+ -+/* This function detects the Rogue variant and configures the essential -+ * config info associated with such a device. -+ * The config info includes features, errata, etc -+ */ -+PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ static IMG_UINT32 ui32RGXDevCnt = 0; -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ IMG_UINT64 ui64BVNC=0; -+ IMG_UINT32 B=0, V=0, N=0, C=0; -+ IMG_UINT64 *pui64Cfg = NULL; -+ IMG_UINT32 ui32Cores = 1U; -+ IMG_UINT32 ui32SLCSize = 0; -+ -+ /* Check for load time RGX BVNC parameter */ -+ eError = _RGXBvncParseList(&B,&V,&N,&C, ui32RGXDevCnt); -+ if (PVRSRV_OK == eError) -+ { -+ PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC -+ " from driver load parameter", B, V, N, C)); -+ -+ /* Extract the BVNC config from the Features table */ -+ ui64BVNC = BVNC_PACK(B,0,N,C); -+ pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); -+ PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Driver parameter BVNC configuration not found!"); -+ } -+ -+ { -+ void *pvAppHintState = NULL; -+ const IMG_BOOL bAppHintDefault = PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC; -+ -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, -+ pvAppHintState, -+ IgnoreHWReportedBVNC, -+ &bAppHintDefault, -+ &psDevInfo->bIgnoreHWReportedBVNC); -+ OSFreeAppHintState(pvAppHintState); -+ } -+ -+#if !defined(NO_HARDWARE) -+ -+ /* Try to detect the RGX BVNC from the HW device */ -+ if ((NULL == pui64Cfg) && !psDevInfo->bIgnoreHWReportedBVNC) -+ { -+ IMG_BOOL bPowerDown = (psDeviceNode->psDevConfig->pfnGpuDomainPower(psDeviceNode) == PVRSRV_SYS_POWER_STATE_OFF); -+ -+ /* Power-up the device as required to read the registers */ -+ if (bPowerDown) -+ { -+ eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON"); -+ } -+ -+ /* Read the BVNC from HW */ -+ _RGXReadBVNCFromReg(psDeviceNode, 0 /*core0*/, &B, &V, &N, &C); -+ -+ PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC -+ " from HW device registers", B, V, N, C)); -+ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Read the number of cores in the system for newer BVNC (Branch ID > 20) */ -+ if (B > 20) -+ { -+ ui32Cores = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM); -+ } -+ } -+ -+ /* Obtain the SLC size from the device */ -+ ui32SLCSize = _RGXBvncReadSLCSize(psDeviceNode); -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: SLC Size reported as %u", __func__, ui32SLCSize)); -+ -+ if (bPowerDown) -+ { -+ eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_OFF); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF"); -+ } -+ -+ /* Extract the BVNC config from the Features table */ -+ ui64BVNC = BVNC_PACK(B,0,N,C); -+ if (ui64BVNC != 0) -+ { -+ pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); -+ PVR_LOG_IF_FALSE((pui64Cfg != NULL), "HW device BVNC configuration not found!"); -+ } -+ else if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* -+ * On host OS we should not get here as CORE_ID should not be zero, so flag an error. -+ * On older cores, guest OS only has CORE_ID if defined(RGX_FEATURE_COREID_PER_OS) -+ */ -+ PVR_LOG_ERROR(PVRSRV_ERROR_DEVICE_REGISTER_FAILED, "CORE_ID register returns zero. Unknown BVNC"); -+ } -+ } -+#endif -+ -+#if defined(RGX_BVNC_KM_B) && defined(RGX_BVNC_KM_N) && defined(RGX_BVNC_KM_C) -+ if (NULL == pui64Cfg) -+ { -+ IMG_CHAR acVStr[5] = RGX_BVNC_KM_V_ST; -+ -+ /* We reach here if the HW is not present, -+ * or we are running in a guest OS with no COREID_PER_OS feature, -+ * or HW is unstable during register read giving invalid values, -+ * or runtime detection has been disabled - fall back to compile time BVNC -+ */ -+ B = RGX_BVNC_KM_B; -+ N = RGX_BVNC_KM_N; -+ C = RGX_BVNC_KM_C; -+ -+ /* Clear any 'p' that may have been in RGX_BVNC_KM_V_ST, -+ * as OSStringToUINT32() will otherwise return an error. -+ */ -+ if (acVStr[strlen(acVStr)-1] == 'p') -+ { -+ acVStr[strlen(acVStr)-1] = '\0'; -+ } -+ -+ if (OSStringToUINT32(&acVStr[0], 0, &V) != PVRSRV_OK) -+ { -+ V = 0; -+ } -+ PVR_LOG(("Reverting to compile time BVNC %s", RGX_BVNC_KM)); -+ -+ /* Extract the BVNC config from the Features table */ -+ ui64BVNC = BVNC_PACK(B,0,N,C); -+ pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); -+ PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Compile time BVNC configuration not found!"); -+ } -+#endif /* defined(RGX_BVNC) */ -+ -+ /* Have we failed to identify the BVNC to use? */ -+ if (NULL == pui64Cfg) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: BVNC Detection and feature lookup failed. " -+ "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC)); -+ return PVRSRV_ERROR_BVNC_UNSUPPORTED; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC Feature found config: 0x%016" -+ IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx " 0x%016" -+ IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx "\n", __func__, -+ pui64Cfg[0], pui64Cfg[1], pui64Cfg[2], pui64Cfg[3])); -+ -+ /* Parsing feature config depends on available features on the core -+ * hence this parsing should always follow the above feature assignment */ -+ psDevInfo->sDevFeatureCfg.ui64Features = pui64Cfg[1]; -+ eError = _RGXBvncParseFeatureValues(psDevInfo, pui64Cfg); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ /* Add 'V' to the packed BVNC value to get the BVNC ERN and BRN config. */ -+ ui64BVNC = BVNC_PACK(B,V,N,C); -+ pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaErnsBrns, ui64BVNC); -+ if (NULL == pui64Cfg) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: BVNC ERN/BRN lookup failed. " -+ "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC)); -+ psDevInfo->sDevFeatureCfg.ui64ErnsBrns = 0; -+ return PVRSRV_ERROR_BVNC_UNSUPPORTED; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC ERN/BRN Cfg: 0x%016" IMG_UINT64_FMTSPECx -+ " 0x%016" IMG_UINT64_FMTSPECx, __func__, *pui64Cfg, pui64Cfg[1])); -+ psDevInfo->sDevFeatureCfg.ui64ErnsBrns = pui64Cfg[1]; -+ -+ psDevInfo->sDevFeatureCfg.ui32B = B; -+ psDevInfo->sDevFeatureCfg.ui32V = V; -+ psDevInfo->sDevFeatureCfg.ui32N = N; -+ psDevInfo->sDevFeatureCfg.ui32C = C; -+ -+ -+ /* -+ * Store the SLCSize in the device info field. If 0 it means the device uses the BVNC -+ * values so grab them here as we've already populated the internal structures. -+ */ -+ if (ui32SLCSize == 0U) -+ { -+ ui32SLCSize = RGX_GET_FEATURE_VALUE(psDevInfo, SLC_SIZE_IN_KILOBYTES) * 1024U; -+ -+ /* Verify that we have a valid value returned from the BVNC */ -+ PVR_ASSERT(ui32SLCSize != 0U); -+ } -+ psDevInfo->sDevFeatureCfg.ui32SLCSizeInBytes = ui32SLCSize; -+ -+ /* Message to confirm configuration look up was a success */ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) -+ { -+#if defined(NO_HARDWARE) -+ { -+ PVR_UNREFERENCED_PARAMETER(ui32Cores); -+ PVR_LOG(("RGX Device registered with BVNC " RGX_BVNC_STR_FMTSPEC, -+ B, V, N, C)); -+ } -+#else -+ { -+ PVR_LOG(("RGX Device registered BVNC " RGX_BVNC_STR_FMTSPEC -+ " with %u %s in the system", B ,V ,N ,C, ui32Cores , -+ ((ui32Cores == 1U)?"core":"cores"))); -+ } -+#endif -+ } -+ else -+ { -+ PVR_LOG(("RGX Device registered with BVNC " RGX_BVNC_STR_FMTSPEC, -+ B, V, N, C)); -+ } -+ -+ ui32RGXDevCnt++; -+ -+#if defined(DEBUG) || defined(SUPPORT_PERFORMANCE_RUN) -+ _RGXBvncDumpParsedConfig(psDeviceNode); -+#endif -+ return PVRSRV_OK; -+} -+ -+/* -+ * This function checks if a particular feature is available on the given rgx device */ -+IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ if (psDevInfo->sDevFeatureCfg.ui64Features & ui64FeatureMask) -+ { -+ return IMG_TRUE; -+ } -+ return IMG_FALSE; -+} -+ -+/* -+ * This function returns the value of a feature on the given rgx device */ -+IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ if (eFeatureIndex >= RGX_FEATURE_WITH_VALUES_MAX_IDX) -+ { -+ return -1; -+ } -+ -+ if (psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex] == RGX_FEATURE_VALUE_DISABLED) -+ { -+ return -1; -+ } -+ -+ return psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex]; -+} -+ -+/**************************************************************************/ /*! -+@Function RGXVerifyBVNC -+@Description Checks that the device's BVNC registers have the correct values. -+@Input psDeviceNode Device node -+@Return PVRSRV_ERROR -+*/ /***************************************************************************/ -+#define NUM_RGX_CORE_IDS 8 -+PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT64 ui64MatchBVNC; -+#if !defined(NO_HARDWARE) -+ IMG_UINT32 B=0, V=0, N=0, C=0; -+#endif -+ IMG_UINT32 i; -+ -+ PVR_ASSERT(psDeviceNode != NULL); -+ PVR_ASSERT(psDeviceNode->pvDevice != NULL); -+ -+ /* The device info */ -+ psDevInfo = psDeviceNode->pvDevice; -+ -+ PDUMPCOMMENT(psDeviceNode, "PDUMP VERIFY CORE_ID registers for all OSIDs\n"); -+ -+ /* construct the value to match against */ -+ if ((ui64GivenBVNC | ui64CoreIdMask) == 0) /* both zero means use configured DDK value */ -+ { -+ ui64MatchBVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, -+ psDevInfo->sDevFeatureCfg.ui32V, -+ psDevInfo->sDevFeatureCfg.ui32N, -+ psDevInfo->sDevFeatureCfg.ui32C); -+ } -+ else -+ { -+ /* use the value in CORE_ID for any zero elements in the BVNC */ -+#if !defined(NO_HARDWARE) -+ IMG_UINT64 ui64BVNC = _RGXReadBVNCFromReg(psDeviceNode, 0, &B, &V, &N, &C); -+ ui64MatchBVNC = (ui64GivenBVNC & ~ui64CoreIdMask) | (ui64BVNC & ui64CoreIdMask); -+#else -+ ui64MatchBVNC = 0; -+#endif -+ } -+ PVR_LOG(("matchBVNC %d.%d.%d.%d", -+ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), -+ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), -+ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), -+ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); -+ -+ /* read in all the CORE_ID registers */ -+ for (i = 0; i < NUM_RGX_CORE_IDS; ++i) -+ { -+#if !defined(NO_HARDWARE) -+ IMG_UINT64 ui64BVNC = _RGXReadBVNCFromReg(psDeviceNode, i, &B, &V, &N, &C); -+ PVR_LOG(("CORE_ID%d returned %d.%d.%d.%d", i, B, V, N, C)); -+ -+ if (ui64BVNC != ui64MatchBVNC) -+ { -+ eError = PVRSRV_ERROR_BVNC_MISMATCH; -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CORE_ID%d %d.%d.%d.%d, Expected %d.%d.%d.%d", __func__, i, -+ B, V, N, C, -+ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), -+ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), -+ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), -+ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); -+ break; -+ } -+#endif -+ -+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) -+ /* check upper DWORD */ -+ eError = PDUMPREGPOL(psDeviceNode, RGX_PDUMPREG_NAME, -+ (RGX_CR_CORE_ID + 4) + (i << 16), -+ (IMG_UINT32)(ui64MatchBVNC >> 32), -+ 0xFFFFFFFF, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ if (eError == PVRSRV_OK) -+ { -+ /* check lower DWORD */ -+ eError = PDUMPREGPOL(psDeviceNode, RGX_PDUMPREG_NAME, -+ RGX_CR_CORE_ID + (i << 16), -+ (IMG_UINT32)(ui64MatchBVNC & 0xFFFFFFFF), -+ 0xFFFFFFFF, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ } -+#endif -+ } -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/rgxbvnc.h b/drivers/gpu/drm/img-rogue/rgxbvnc.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxbvnc.h -@@ -0,0 +1,90 @@ -+/*************************************************************************/ /*! -+@File -+@Title BVNC handling specific header file -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the BVNC related work -+ (see hwdefs/km/rgx_bvnc_table_km.h and -+ hwdefs/km/rgx_bvnc_defs_km.h -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXBVNC_H) -+#define RGXBVNC_H -+ -+#include "pvrsrv_error.h" -+#include "img_types.h" -+#include "rgxdevice.h" -+ -+/*************************************************************************/ /*! -+@brief This function detects the Rogue variant and configures the -+ essential config info associated with such a device. -+ The config info includes features, errata, etc -+@param psDeviceNode - Device Node pointer -+@return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/*************************************************************************/ /*! -+@brief This function checks if a particular feature is available on -+ the given rgx device -+@param psDeviceNode - Device Node pointer -+@param ui64FeatureMask - feature to be checked -+@return true if feature is supported, false otherwise -+*/ /**************************************************************************/ -+IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask); -+ -+/*************************************************************************/ /*! -+@brief This function returns the value of a feature on the given -+ rgx device -+@param psDeviceNode - Device Node pointer -+@param ui64FeatureMask - feature for which to return the value -+@return the value for the specified feature -+*/ /**************************************************************************/ -+IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex); -+ -+/*************************************************************************/ /*! -+@brief This function validates that the BVNC values in CORE_ID regs are -+ consistent and correct. -+@param psDeviceNode - Device Node pointer -+@param GivenBVNC - BVNC to be verified against as supplied by caller -+@param CoreIdMask - mask of components to pull from CORE_ID register -+@return success or fail -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask); -+ -+#endif /* RGXBVNC_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxccb.c b/drivers/gpu/drm/img-rogue/rgxccb.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxccb.c -@@ -0,0 +1,2869 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX CCB routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX CCB routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "pvr_debug.h" -+#include "rgxdevice.h" -+#include "pdump_km.h" -+#include "allocmem.h" -+#include "devicemem.h" -+#include "rgxfwutils.h" -+#include "rgxfwcmnctx.h" -+ -+#include "osfunc.h" -+#include "rgxccb.h" -+#include "rgx_memallocflags.h" -+#include "devicemem_pdump.h" -+#include "dllist.h" -+#if defined(__linux__) -+#include "trace_events.h" -+#endif -+#include "sync_checkpoint_external.h" -+#include "sync_checkpoint.h" -+#include "rgxutils.h" -+#include "info_page.h" -+#include "rgxtimerquery.h" -+ -+#if defined(PVRSRV_FORCE_FLUSH_CCCB_ON_KICK) -+#include "cache_km.h" -+#endif -+ -+/* -+ * Uncomment PVRSRV_ENABLE_CCCB_UTILISATION_INFO define for verbose -+ * info and statistics regarding CCB usage. -+ */ -+//#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO -+ -+/* Default threshold (as a percentage) for the PVRSRV_ENABLE_CCCB_UTILISATION_INFO feature. */ -+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD (90) -+ -+/* -+ * Defines the number of fence updates to record so that future fences in the -+ * CCB. Can be checked to see if they are already known to be satisfied. -+ */ -+#define RGX_CCCB_FENCE_UPDATE_LIST_SIZE (32) -+ -+#define RGX_UFO_PTR_ADDR(ufoptr) \ -+ (((ufoptr)->puiAddrUFO.ui32Addr) & 0xFFFFFFFC) -+ -+#define GET_CCB_SPACE(WOff, ROff, CCBSize) \ -+ ((((ROff) - (WOff)) + ((CCBSize) - 1)) & ((CCBSize) - 1)) -+ -+#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \ -+ (Off) = (((Off) + (PacketSize)) & ((CCBSize) - 1)) -+ -+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) -+ -+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD 0x1 -+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED 0x2 -+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB 0x4 -+ -+typedef struct _RGX_CLIENT_CCB_UTILISATION_ -+{ -+ /* the threshold in bytes. -+ * when the CCB utilisation hits the threshold then we will print -+ * a warning message. -+ */ -+ IMG_UINT32 ui32ThresholdBytes; -+ /* Maximum cCCB usage at some point in time */ -+ IMG_UINT32 ui32HighWaterMark; -+ /* keep track of the warnings already printed. -+ * bit mask of PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_xyz -+ */ -+ IMG_UINT32 ui32Warnings; -+ /* Keep track how many times CCB was full. -+ * Counters are reset after every grow. -+ */ -+ IMG_UINT32 ui32CCBFull; -+ IMG_UINT32 ui32CCBAcquired; -+} RGX_CLIENT_CCB_UTILISATION; -+ -+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ -+ -+struct _RGX_CLIENT_CCB_ { -+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; /*!< CPU mapping of the CCB control structure used by the fw */ -+ void *pvClientCCB; /*!< CPU mapping of the CCB */ -+ DEVMEM_MEMDESC *psClientCCBMemDesc; /*!< MemDesc for the CCB */ -+ DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; /*!< MemDesc for the CCB control */ -+ IMG_UINT32 ui32HostWriteOffset; /*!< CCB write offset from the driver side */ -+ IMG_UINT32 ui32LastPDumpWriteOffset; /*!< CCB write offset from the last time we submitted a command in capture range */ -+ IMG_UINT32 ui32FinishedPDumpWriteOffset; /*!< Trails LastPDumpWriteOffset for last finished command, used for HW CB driven DMs */ -+ IMG_UINT32 ui32LastROff; /*!< Last CCB Read offset to help detect any CCB wedge */ -+ IMG_UINT32 ui32LastWOff; /*!< Last CCB Write offset to help detect any CCB wedge */ -+ IMG_UINT32 ui32ByteCount; /*!< Count of the number of bytes written to CCCB */ -+ IMG_UINT32 ui32LastByteCount; /*!< Last value of ui32ByteCount to help detect any CCB wedge */ -+ IMG_UINT32 ui32Size; /*!< Size of the CCB */ -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ POS_LOCK hCCBGrowLock; /*!< Prevents CCB Grow while DumpCCB() is called and vice versa */ -+ IMG_UINT32 ui32VirtualAllocSize; /*!< Virtual size of the CCB */ -+ IMG_UINT32 ui32ChunkSize; /*!< CCB Sparse allocation chunk size */ -+ IMG_PUINT32 pui32MappingTable; /*!< Mapping table for sparse allocation of the CCB */ -+#endif -+ DLLIST_NODE sNode; /*!< Node used to store this CCB on the per connection list */ -+ PDUMP_CONNECTION_DATA *psPDumpConnectionData; /*!< Pointer to the per connection data in which we reside */ -+ void *hTransition; /*!< Handle for Transition callback */ -+ IMG_CHAR szName[MAX_CLIENT_CCB_NAME]; /*!< Name of this client CCB */ -+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; /*!< Parent server common context that this CCB belongs to */ -+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) -+ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor; -+ RGX_CLIENT_CCB_UTILISATION sUtilisation; /*!< CCB utilisation data */ -+#endif -+#if defined(DEBUG) -+ IMG_UINT32 ui32UpdateEntries; /*!< Number of Fence Updates in asFenceUpdateList */ -+ RGXFWIF_UFO asFenceUpdateList[RGX_CCCB_FENCE_UPDATE_LIST_SIZE]; /*!< List of recent updates written in this CCB */ -+#endif -+ IMG_UINT32 ui32CCBFlags; /*!< Bitmask for various flags relating to CCB. Bit defines in rgxccb.h */ -+}; -+ -+/* Forms a table, with array of strings for each requestor type (listed in RGX_CCB_REQUESTORS X macro), to be used for -+ DevMemAllocation comments and PDump comments. Each tuple in the table consists of 3 strings: -+ { "FwClientCCB:" , "FwClientCCBControl:" , }, -+ The first string being used as comment when allocating ClientCCB for the given requestor, the second for CCBControl -+ structure, and the 3rd one for use in PDUMP comments. The number of tuples in the table must adhere to the following -+ build assert. */ -+const IMG_CHAR *const aszCCBRequestors[][3] = -+{ -+#define REQUESTOR_STRING(prefix,req) #prefix ":" #req -+#define FORM_REQUESTOR_TUPLE(req) { REQUESTOR_STRING(FwClientCCB,req), REQUESTOR_STRING(FwClientCCBControl,req), #req }, -+ RGX_CCB_REQUESTORS(FORM_REQUESTOR_TUPLE) -+#undef FORM_REQUESTOR_TUPLE -+}; -+ -+PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ -+ IMG_UINT32 ui32PollOffset; -+#if defined(PDUMP) -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); -+#endif -+ -+ if (BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN)) -+ { -+ /* Draining CCB on a command that hasn't finished, and FW isn't expected -+ * to have updated Roff up to Woff. Only drain to the first -+ * finished command prior to this. The Roff for this -+ * is stored in ui32FinishedPDumpWriteOffset. -+ */ -+ ui32PollOffset = psClientCCB->ui32FinishedPDumpWriteOffset; -+ -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, -+ ui32PDumpFlags, -+ "cCCB(%s@%p): Draining open CCB rgxfw_roff < woff (%d)", -+ psClientCCB->szName, -+ psClientCCB, -+ ui32PollOffset); -+ } -+ else -+ { -+ /* Command to a finished CCB stream and FW is drained to empty -+ * out remaining commands until R==W. -+ */ -+ ui32PollOffset = psClientCCB->ui32LastPDumpWriteOffset; -+ -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, -+ ui32PDumpFlags, -+ "cCCB(%s@%p): Draining CCB rgxfw_roff == woff (%d)", -+ psClientCCB->szName, -+ psClientCCB, -+ ui32PollOffset); -+ } -+ -+ return DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc, -+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), -+ ui32PollOffset, -+ 0xffffffff, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ ui32PDumpFlags); -+} -+ -+/****************************************************************************** -+ FUNCTION : RGXCCBPDumpSyncCCB -+ -+ PURPOSE : Synchronise Client CCBs from both live and playback contexts. -+ Waits for live-FW to empty live-CCB. -+ Waits for sim-FW to empty sim-CCB by adding POL -+ -+ PARAMETERS : psClientCCB - The client CCB -+ ui32PDumpFlags - PDump flags -+ -+ RETURNS : PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR RGXCCBPDumpSyncCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Wait for the live FW to catch up/empty CCB. This is done by returning -+ * retry which will get pushed back out to Services client where it -+ * waits on the event object and then resubmits the command. -+ */ -+ if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset) -+ { -+ return PVRSRV_ERROR_RETRY; -+ } -+ -+ /* Wait for the sim FW to catch up/empty sim CCB. -+ * We drain whenever capture range is entered, even if no commands -+ * have been issued on this CCB when out of capture range. We have to -+ * wait for commands that might have been issued in the last capture -+ * range to finish so the connection's sync block snapshot dumped after -+ * all the PDumpTransition callbacks have been execute doesn't clobber -+ * syncs which the sim FW is currently working on. -+ * -+ * Although this is sub-optimal for play-back - while out of capture -+ * range for every continuous operation we synchronise the sim -+ * play-back processing the script and the sim FW, there is no easy -+ * solution. Not all modules that work with syncs register a -+ * PDumpTransition callback and thus we have no way of knowing if we -+ * can skip this sim CCB drain and sync block dump or not. -+ */ -+ -+ eError = RGXCCBPDumpDrainCCB(psClientCCB, ui32PDumpFlags); -+ PVR_LOG_IF_ERROR(eError, "RGXCCBPDumpDrainCCB"); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* Live CCB and simulation CCB now empty, FW idle on CCB in both -+ * contexts. -+ */ -+ return PVRSRV_OK; -+} -+ -+/****************************************************************************** -+ FUNCTION : RGXCCBPDumpFastForwardCCB -+ -+ PURPOSE : Fast-forward sim-CCB and live-CCB offsets to live app-thread -+ values. -+ This helps to skip any commands submitted when out of capture -+ range and start with first command in capture range in both -+ live and playback contexts. In case of Block mode, this helps -+ to playback any intermediate PDump block directly after first -+ block. -+ -+ -+ PARAMETERS : psClientCCB - The client CCB -+ ui32PDumpFlags - PDump flags -+ -+ RETURNS : void -+******************************************************************************/ -+static void RGXCCBPDumpFastForwardCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags) -+{ -+ volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl; -+#if defined(PDUMP) -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); -+#endif -+ -+ /* Make sure that we have synced live-FW and live-App threads */ -+ PVR_ASSERT(psCCBCtl->ui32ReadOffset == psClientCCB->ui32HostWriteOffset); -+ -+ psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; -+ psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; -+ psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset; -+ -+ psCCBCtl->ui32ReadOffset2 = psClientCCB->ui32HostWriteOffset; -+ psCCBCtl->ui32ReadOffset3 = psClientCCB->ui32HostWriteOffset; -+ psCCBCtl->ui32ReadOffset4 = psClientCCB->ui32HostWriteOffset; -+ -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, -+ ui32PDumpFlags, -+ "cCCB(%s@%p): Fast-forward from %d to %d", -+ psClientCCB->szName, -+ psClientCCB, -+ psClientCCB->ui32LastPDumpWriteOffset, -+ psClientCCB->ui32HostWriteOffset); -+ -+ DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, -+ 0, -+ sizeof(RGXFWIF_CCCB_CTL), -+ ui32PDumpFlags); -+ -+ /* Although we've entered capture range for this process connection -+ * we might not do any work on this CCB so update the -+ * ui32LastPDumpWriteOffset to reflect where we got to for next -+ * time so we start the drain from where we got to last time. -+ */ -+ psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset; -+ -+} -+ -+static PVRSRV_ERROR _RGXCCBPDumpTransition(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags) -+{ -+ RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData; -+#if defined(PDUMP) -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) pvDevice; -+#endif -+ PVRSRV_ERROR eError; -+ -+ /* Block mode: -+ * Here is block structure at transition (ui32BlockLength=N frames): -+ * -+ * ... -+ * ... -+ * PDUMP_BLOCK_START_0x0000000x{ -+ * -+ * -+ * ... -+ * ... -+ * ... (N frames data) -+ * ... -+ * ... -+ * <(1) Drain sim-KCCB> ''| -+ * <(2) Sync live and sim CCCB> | -+ * }PDUMP_BLOCK_END_0x0000000x | <- BlockTransition Steps -+ * <(3) Split MAIN and BLOCK stream script> | -+ * PDUMP_BLOCK_START_0x0000000y{ | -+ * <(4) Fast-forward sim-CCCB> | -+ * <(5) Re-dump SyncBlocks> ,,| -+ * ... -+ * ... -+ * ... (N frames data) -+ * ... -+ * ... -+ * -+ * -+ * }PDUMP_BLOCK_END_0x0000000y -+ * ... -+ * ... -+ * -+ * Steps (3) and (5) are done in pdump_server.c -+ * */ -+ switch (eEvent) -+ { -+ case PDUMP_TRANSITION_EVENT_RANGE_ENTERED: -+ { -+ /* We're about to transition into capture range and we've submitted -+ * new commands since the last time we entered capture range so drain -+ * the live CCB and simulation (sim) CCB as required, i.e. leave CCB -+ * idle in both live and sim contexts. -+ * This requires the host driver to ensure the live FW & the sim FW -+ * have both emptied out the remaining commands until R==W (CCB empty). -+ */ -+ -+ eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ if (psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset) -+ { -+ /* If new commands have been written when out of capture range in -+ * the live CCB then we need to fast forward the sim CCBCtl -+ * offsets past uncaptured commands. This is done by PDUMPing -+ * the CCBCtl memory to align sim values with the live CCBCtl -+ * values. Both live and sim FWs can start with the 1st command -+ * which is in the new capture range. -+ */ -+ RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags); -+ } -+ break; -+ } -+ case PDUMP_TRANSITION_EVENT_RANGE_EXITED: -+ { -+ /* Nothing to do */ -+ break; -+ } -+ case PDUMP_TRANSITION_EVENT_BLOCK_FINISHED: -+ { -+ /* (1) Drain KCCB from current block before starting new: -+ * -+ * At playback, this will ensure that sim-FW drains all commands in KCCB -+ * belongs to current block before 'jumping' to any future commands (from -+ * next block). This will synchronise script-thread and sim-FW thread KCCBs -+ * at end of each pdump block. -+ * -+ * This will additionally force redump of KCCBCtl structure at start of next/new block. -+ * */ -+ -+#if defined(PDUMP) -+ eError = RGXPdumpDrainKCCB(psDevInfo, psDevInfo->psKernelCCBCtl->ui32WriteOffset); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXPdumpDrainKCCB"); -+#endif -+ -+ /* (2) Synchronise Client CCBs from live and playback contexts before starting new block: -+ * -+ * This operation will, -+ * a. Force synchronisation between app-thread and live-FW thread (i.e. Wait -+ * for live-FW to empty live Client CCB). -+ * -+ * b. Next, it will dump poll command to drain Client CCB at end of every -+ * pdump block. At playback time this will synchronise sim-FW and -+ * script-thread Client CCBs at end of each block. -+ * -+ * This is to ensure that all commands in CCB from current block are processed -+ * before moving on to future commands. -+ * */ -+ -+ eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags); -+ PVR_RETURN_IF_ERROR(eError); -+ break; -+ } -+ case PDUMP_TRANSITION_EVENT_BLOCK_STARTED: -+ { -+ /* (4) Fast-forward CCB write offsets to current live values: -+ * -+ * We have already synchronised live-FW and app-thread above at end of each -+ * block (in Step 2a above), now fast-forward Client CCBCtl write offsets to that of -+ * current app-thread values at start of every block. This will allow us to -+ * skip any intermediate pdump blocks and start with last (or any next) block -+ * immediately after first pdump block. -+ * */ -+ -+ RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags); -+ break; -+ } -+ case PDUMP_TRANSITION_EVENT_NONE: -+ /* Invalid event for transition */ -+ default: -+ { -+ /* Unknown Transition event */ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+ return PVRSRV_OK; -+} -+ -+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) -+ -+static INLINE void _RGXInitCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) -+{ -+ psClientCCB->sUtilisation.ui32HighWaterMark = 0; /* initialize ui32HighWaterMark level to zero */ -+ psClientCCB->sUtilisation.ui32ThresholdBytes = (psClientCCB->ui32Size * -+ PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD) / 100; -+ psClientCCB->sUtilisation.ui32Warnings = 0; -+ psClientCCB->sUtilisation.ui32CCBAcquired = 0; -+ psClientCCB->sUtilisation.ui32CCBFull = 0; -+} -+ -+static INLINE void _RGXCCBUtilisationEvent(RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT32 ui32WarningType, -+ IMG_UINT32 ui32CmdSize) -+{ -+ /* in VERBOSE mode we will print a message for each different -+ * event type as they happen. -+ */ -+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) -+ if (!(psClientCCB->sUtilisation.ui32Warnings & ui32WarningType)) -+ { -+ if (ui32WarningType == PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED) -+ { -+ PVR_LOG(("Failed to acquire CCB space for %u byte command:", ui32CmdSize)); -+ } -+ -+ PVR_LOG(("%s: Client CCB (%s) watermark (%u) hit %d%% of its allocation size (%u)", -+ __func__, -+ psClientCCB->szName, -+ psClientCCB->sUtilisation.ui32HighWaterMark, -+ psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size, -+ psClientCCB->ui32Size)); -+ -+ /* record that we have issued a warning of this type */ -+ psClientCCB->sUtilisation.ui32Warnings |= ui32WarningType; -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(psClientCCB); -+ PVR_UNREFERENCED_PARAMETER(ui32WarningType); -+ PVR_UNREFERENCED_PARAMETER(ui32CmdSize); -+#endif -+} -+ -+/* Check the current CCB utilisation. Print a one-time warning message if it is above the -+ * specified threshold -+ */ -+static INLINE void _RGXCheckCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) -+{ -+ /* Print a warning message if the cCCB watermark is above the threshold value */ -+ if (psClientCCB->sUtilisation.ui32HighWaterMark >= psClientCCB->sUtilisation.ui32ThresholdBytes) -+ { -+ _RGXCCBUtilisationEvent(psClientCCB, -+ PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD, -+ 0); -+ } -+} -+ -+/* Update the cCCB high watermark level if necessary */ -+static void _RGXUpdateCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) -+{ -+ IMG_UINT32 ui32FreeSpace, ui32MemCurrentUsage; -+ -+ RGXFwSharedMemCacheOpValue(psClientCCB->psClientCCBCtrl->ui32ReadOffset, -+ INVALIDATE); -+ -+ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, -+ psClientCCB->psClientCCBCtrl->ui32ReadOffset, -+ psClientCCB->ui32Size); -+ ui32MemCurrentUsage = psClientCCB->ui32Size - ui32FreeSpace; -+ -+ if (ui32MemCurrentUsage > psClientCCB->sUtilisation.ui32HighWaterMark) -+ { -+ psClientCCB->sUtilisation.ui32HighWaterMark = ui32MemCurrentUsage; -+ -+ /* The high water mark has increased. Check if it is above the -+ * threshold so we can print a warning if necessary. -+ */ -+ _RGXCheckCCBUtilisation(psClientCCB); -+ } -+} -+ -+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ -+ -+PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32CCBSizeLog2, -+ IMG_UINT32 ui32CCBMaxSizeLog2, -+ IMG_UINT32 ui32ContextFlags, -+ CONNECTION_DATA *psConnectionData, -+ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, -+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, -+ RGX_CLIENT_CCB **ppsClientCCB, -+ DEVMEM_MEMDESC **ppsClientCCBMemDesc, -+ DEVMEM_MEMDESC **ppsClientCCBCtrlMemDesc) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_MEMALLOCFLAGS_T uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags; -+ IMG_UINT32 ui32FWMainLog2PageSize = DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap); -+ IMG_UINT32 ui32ChunkSize = (1U << ui32FWMainLog2PageSize); -+ IMG_UINT32 ui32AllocSize = MAX((1U << ui32CCBSizeLog2), ui32ChunkSize); -+ IMG_UINT32 ui32MinAllocSize = MAX((1U << MIN_SAFE_CCB_SIZE_LOG2), ui32ChunkSize); -+ RGX_CLIENT_CCB *psClientCCB; -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ IMG_UINT32 ui32NumChunks = ui32AllocSize / ui32ChunkSize; -+ IMG_UINT32 ui32VirtualAllocSize = (1U << ui32CCBMaxSizeLog2); -+ IMG_UINT32 ui32NumVirtChunks = ui32VirtualAllocSize / ui32ChunkSize; -+ IMG_UINT32 i; -+ -+ /* For the allocation request to be valid, at least one page is required. -+ * This is relevant on systems where the page size is greater than the client CCB size. */ -+ ui32NumVirtChunks = MAX(1, ui32NumVirtChunks); -+ PVR_ASSERT((ui32ChunkSize >= (1U << PAGE_SHIFT))); -+#else -+ PVR_UNREFERENCED_PARAMETER(ui32CCBMaxSizeLog2); -+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ -+ -+ /* All client CCBs should be at-least of the "minimum" size and not to exceed "maximum" */ -+ if ((ui32CCBSizeLog2 < MIN_SAFE_CCB_SIZE_LOG2) || -+ (ui32CCBSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s CCB size is invalid (%d). Should be from %d to %d", -+ __func__, -+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], -+ ui32CCBSizeLog2, MIN_SAFE_CCB_SIZE_LOG2, MAX_SAFE_CCB_SIZE_LOG2)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ if ((ui32CCBMaxSizeLog2 < ui32CCBSizeLog2) || -+ (ui32CCBMaxSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s CCB maximum size is invalid (%d). Should be from %d to %d", -+ __func__, -+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], -+ ui32CCBMaxSizeLog2, ui32CCBSizeLog2, MAX_SAFE_CCB_SIZE_LOG2)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+#endif -+ -+ psClientCCB = OSAllocMem(sizeof(*psClientCCB)); -+ if (psClientCCB == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_alloc; -+ } -+ psClientCCB->psServerCommonContext = psServerCommonContext; -+ -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ psClientCCB->ui32VirtualAllocSize = 0; -+ psClientCCB->pui32MappingTable = NULL; -+ psClientCCB->ui32ChunkSize = ui32ChunkSize; -+#endif -+ -+ uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); -+ -+ uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED | -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); -+ -+ /* If connection data indicates Sync Lockup Recovery (SLR) should be disabled, -+ * or if the caller has set ui32ContextFlags to disable SLR for this context, -+ * indicate this in psClientCCB->ui32CCBFlags. -+ */ -+ if ((psConnectionData->ui32ClientFlags & SRV_FLAGS_CLIENT_SLR_DISABLED) || -+ (ui32ContextFlags & RGX_CONTEXT_FLAG_DISABLESLR)) -+ { -+ BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); -+ } -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate RGXFW cCCB"); -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ if (BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN)) -+ { -+ PHYS_HEAP *psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; -+ PHYS_HEAP_POLICY uiHeapPolicy = PhysHeapGetPolicy(psPhysHeap); -+ -+ psClientCCB->ui32VirtualAllocSize = ui32VirtualAllocSize; -+ -+ if (uiHeapPolicy != PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG) -+ { -+ psClientCCB->pui32MappingTable = NULL; -+ /* -+ * On LMA sparse memory can't be mapped to kernel without support for non physically -+ * sparse allocations. -+ * To work around this whole ccb memory is allocated at once as contiguous. -+ */ -+ eError = DevmemFwAllocate(psDevInfo, -+ ui32VirtualAllocSize, -+ uiClientCCBMemAllocFlags, -+ aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], -+ &psClientCCB->psClientCCBMemDesc); -+ } -+ else -+ { -+ /* -+ * Growing CCB is doubling the size. Last grow would require only ui32NumVirtChunks/2 new chunks -+ * because another ui32NumVirtChunks/2 is already allocated. -+ * Sometimes initial chunk count would be higher (when CCB size is equal to CCB maximum size) so MAX is needed. -+ */ -+ psClientCCB->pui32MappingTable = OSAllocMem(MAX(ui32NumChunks, ui32NumVirtChunks/2) * sizeof(IMG_UINT32)); -+ if (psClientCCB->pui32MappingTable == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_alloc_mtable; -+ } -+ -+ for (i = 0; i < ui32NumChunks; i++) -+ { -+ psClientCCB->pui32MappingTable[i] = i; -+ } -+ -+ eError = DevmemFwAllocateSparse(psDevInfo, -+ ui32VirtualAllocSize, -+ ui32NumChunks, -+ ui32NumVirtChunks, -+ psClientCCB->pui32MappingTable, -+ uiClientCCBMemAllocFlags, -+ aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], -+ &psClientCCB->psClientCCBMemDesc); -+ } -+ } -+ -+ if (eError != PVRSRV_OK) -+ { -+ OSFreeMem(psClientCCB->pui32MappingTable); -+ psClientCCB->pui32MappingTable = NULL; -+ psClientCCB->ui32VirtualAllocSize = 0; -+ } -+ -+ if (!BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN) || -+ (eError != PVRSRV_OK)) -+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ -+ { -+ /* Allocate ui32AllocSize, or the next best POT allocation */ -+ do -+ { -+ eError = DevmemFwAllocate(psDevInfo, -+ ui32AllocSize, -+ uiClientCCBMemAllocFlags, -+ aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], -+ &psClientCCB->psClientCCBMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ /* Failed to allocate - ensure CCB grow is disabled from -+ * now on for this device. -+ */ -+ BITMASK_UNSET(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); -+ -+ /* Failed to allocate, try next POT down */ -+ ui32AllocSize >>= 1; -+ } -+ } while ((eError != PVRSRV_OK) && (ui32AllocSize > ui32MinAllocSize)); -+ } -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate RGX client CCB (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_alloc_ccb; -+ } -+ -+ OSSNPrintf(psClientCCB->szName, MAX_CLIENT_CCB_NAME, "%s-P%lu-T%lu-%s", -+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], -+ (unsigned long) OSGetCurrentClientProcessIDKM(), -+ (unsigned long) OSGetCurrentClientThreadIDKM(), -+ OSGetCurrentClientProcessNameKM()); -+ -+ if (ui32AllocSize < (1U << ui32CCBSizeLog2)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Unable to allocate %d bytes for RGX client CCB (%s) but allocated %d bytes", -+ __func__, -+ (1U << ui32CCBSizeLog2), -+ psClientCCB->szName, -+ ui32AllocSize)); -+ } -+ -+ eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, -+ &psClientCCB->pvClientCCB); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map RGX client CCB (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_map_ccb; -+ } -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate RGXFW cCCB control"); -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(RGXFWIF_CCCB_CTL), -+ uiClientCCBCtlMemAllocFlags, -+ aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING], -+ &psClientCCB->psClientCCBCtrlMemDesc); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate RGX client CCB control (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_alloc_ccbctrl; -+ } -+ -+ -+ eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc, -+ (void **) &psClientCCB->psClientCCBCtrl); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map RGX client CCB control (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_map_ccbctrl; -+ } -+ -+ /* psClientCCBCtrlMemDesc was zero alloc'd so no need to initialise offsets. */ -+ psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1; -+ -+ /* Flush the whole struct since other parts are implicitly init (zero'd) */ -+ RGXFwSharedMemCacheOpPtr(psClientCCB->psClientCCBCtrl, -+ FLUSH); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "cCCB control"); -+ DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, -+ 0, -+ sizeof(RGXFWIF_CCCB_CTL), -+ PDUMP_FLAGS_CONTINUOUS); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ psClientCCB->ui32HostWriteOffset = 0; -+ psClientCCB->ui32LastPDumpWriteOffset = 0; -+ psClientCCB->ui32FinishedPDumpWriteOffset = 0; -+ psClientCCB->ui32Size = ui32AllocSize; -+ psClientCCB->ui32LastROff = ui32AllocSize - 1; -+ psClientCCB->ui32ByteCount = 0; -+ psClientCCB->ui32LastByteCount = 0; -+ BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); -+ -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ eError = OSLockCreate(&psClientCCB->hCCBGrowLock); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to create hCCBGrowLock (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_create_ccbgrow_lock; -+ } -+#endif -+#if defined(DEBUG) -+ psClientCCB->ui32UpdateEntries = 0; -+#endif -+ -+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) -+ _RGXInitCCBUtilisation(psClientCCB); -+ psClientCCB->eRGXCCBRequestor = eRGXCCBRequestor; -+#endif -+ eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData, -+ _RGXCCBPDumpTransition, -+ psClientCCB, -+ psDevInfo, -+ &psClientCCB->hTransition); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_pdumpreg; -+ } -+ -+ /* -+ * Note: -+ * Save the PDump specific structure, which is ref counted unlike -+ * the connection data, to ensure it's not freed too early -+ */ -+ psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData; -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "New RGXFW cCCB(%s@%p) created", -+ psClientCCB->szName, -+ psClientCCB); -+ -+ *ppsClientCCB = psClientCCB; -+ *ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc; -+ *ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc; -+ return PVRSRV_OK; -+ -+fail_pdumpreg: -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ OSLockDestroy(psClientCCB->hCCBGrowLock); -+fail_create_ccbgrow_lock: -+#endif -+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc); -+fail_map_ccbctrl: -+ DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc); -+fail_alloc_ccbctrl: -+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); -+fail_map_ccb: -+ DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc); -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+fail_alloc_ccb: -+ if (psClientCCB->pui32MappingTable) -+ { -+ OSFreeMem(psClientCCB->pui32MappingTable); -+ } -+fail_alloc_mtable: -+#else -+fail_alloc_ccb: -+#endif -+ OSFreeMem(psClientCCB); -+fail_alloc: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB) -+{ -+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) -+ if (psClientCCB->sUtilisation.ui32CCBFull) -+ { -+ PVR_LOG(("CCBUtilisationInfo: GPU %s command buffer was full %d times out of %d. " -+ "This is not an error but the application may not run optimally.", -+ aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT], -+ psClientCCB->sUtilisation.ui32CCBFull, -+ psClientCCB->sUtilisation.ui32CCBAcquired)); -+ } -+#endif -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ OSLockDestroy(psClientCCB->hCCBGrowLock); -+#endif -+ PDumpUnregisterTransitionCallback(psClientCCB->hTransition); -+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc); -+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc); -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ if (psClientCCB->pui32MappingTable) -+ { -+ OSFreeMem(psClientCCB->pui32MappingTable); -+ } -+#endif -+ OSFreeMem(psClientCCB); -+} -+ -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+static PVRSRV_ERROR _RGXCCBMemChangeSparse(RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT32 ui32AllocPageCount) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ -+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE -+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); -+#endif -+ -+ for (i = 0; i < ui32AllocPageCount; i++) -+ { -+ psClientCCB->pui32MappingTable[i] = ui32AllocPageCount + i; -+ } -+ -+ /* Double the CCB size (CCB must be POT) by adding ui32AllocPageCount new pages */ -+ eError = DeviceMemChangeSparse(psClientCCB->psClientCCBMemDesc, -+ ui32AllocPageCount, -+ psClientCCB->pui32MappingTable, -+ 0, -+ NULL, -+#if !defined(PVRSRV_UNMAP_ON_SPARSE_CHANGE) -+ SPARSE_MAP_CPU_ADDR | -+#endif -+ SPARSE_RESIZE_ALLOC); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to grow RGX client CCB (%s)", -+ PVRSRVGetErrorString(eError))); -+ -+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE -+ if (DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, -+ &psClientCCB->pvClientCCB) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to reacquire CCB mapping")); -+ psClientCCB->pvClientCCB = NULL; -+ } -+#endif -+ -+ return eError; -+ } -+ -+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE -+ eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, -+ &psClientCCB->pvClientCCB); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to map RGX client CCB (%s)", -+ PVRSRVGetErrorString(eError))); -+ return eError; -+ } -+#endif -+ -+ return PVRSRV_OK; -+} -+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ -+ -+PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize) -+{ -+ IMG_UINT32 ui32FreeSpace; -+ -+ /* Check that the CCB can hold this command + padding */ -+ if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB" -+ " (%d bytes)", ui32CmdSize, psClientCCB->ui32Size)); -+ return PVRSRV_ERROR_CMD_TOO_BIG; -+ } -+ -+ /* -+ Check we don't overflow the end of the buffer and make sure we have -+ enough space for the padding command. If we don't have enough space -+ (including the minimum amount for the padding command) we need to make -+ sure we insert a padding command now and wrap before adding the main -+ command. -+ */ -+ -+ /* Invalidate read offset */ -+ RGXFwSharedMemCacheOpValue(psClientCCB->psClientCCBCtrl->ui32ReadOffset, -+ INVALIDATE); -+ if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size) -+ { -+ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, -+ psClientCCB->psClientCCBCtrl->ui32ReadOffset, -+ psClientCCB->ui32Size); -+ -+ /* Don't allow all the space to be used */ -+ if (ui32FreeSpace > ui32CmdSize) -+ { -+ return PVRSRV_OK; -+ } -+ -+ goto e_retry; -+ } -+ else -+ { -+ IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; -+ -+ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, -+ psClientCCB->psClientCCBCtrl->ui32ReadOffset, -+ psClientCCB->ui32Size); -+ -+ /* Check there is space for both the command and the padding command */ -+ if (ui32FreeSpace > ui32Remain + ui32CmdSize) -+ { -+ return PVRSRV_OK; -+ } -+ -+ goto e_retry; -+ } -+ -+e_retry: -+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) -+ _RGXCCBUtilisationEvent(psClientCCB, -+ PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB, -+ ui32CmdSize); -+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ -+ -+ return PVRSRV_ERROR_RETRY; -+} -+ -+/****************************************************************************** -+ FUNCTION : RGXAcquireCCB -+ -+ PURPOSE : Obtains access to write some commands to a CCB -+ -+ PARAMETERS : psClientCCB - The client CCB -+ ui32CmdSize - How much space is required -+ ppvBufferSpace - Pointer to space in the buffer -+ ui32PDumpFlags - Should this be PDump continuous? -+ -+ RETURNS : PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT32 ui32CmdSize, -+ void **ppvBufferSpace, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ IMG_UINT32 ui32RetryCount = 2; -+#endif -+ -+#if defined(PDUMP) -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; -+ IMG_BOOL bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, ui32PDumpFlags); -+ IMG_BOOL bPDumpFlagsContinuous = PDUMP_IS_CONTINUOUS(ui32PDumpFlags); -+ -+ /* -+ PDumpSetFrame will detect as we Transition into capture range for -+ frame based data but if we are PDumping continuous data then we -+ need to inform the PDump layer ourselves -+ -+ First check is to confirm we are in continuous mode -+ Second check is to confirm the pdump client is connected and ready. -+ Third check is to confirm we are not in capture range. -+ */ -+ if (bPDumpFlagsContinuous && -+ bPDumpEnabled && -+ !PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_NONE)) -+ { -+ eError = PDumpTransition(psDeviceNode, -+ psClientCCB->psPDumpConnectionData, -+ PDUMP_TRANSITION_EVENT_RANGE_ENTERED, -+ ui32PDumpFlags); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ } -+#endif -+ -+ /* Check that the CCB can hold this command + padding */ -+ if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)", -+ ui32CmdSize, psClientCCB->ui32Size)); -+ return PVRSRV_ERROR_CMD_TOO_BIG; -+ } -+ -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ while (ui32RetryCount--) -+#endif -+ { -+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) -+ psClientCCB->sUtilisation.ui32CCBAcquired++; -+#endif -+ -+ /* -+ Check we don't overflow the end of the buffer and make sure we have -+ enough space for the padding command. We don't have enough space (including the -+ minimum amount for the padding command) we will need to make sure we insert a -+ padding command now and wrap before adding the main command. -+ */ -+ -+ if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size) -+ { -+ /* The command can fit without wrapping... */ -+ IMG_UINT32 ui32FreeSpace; -+ -+#if defined(PDUMP) -+ /* Wait for sufficient CCB space to become available */ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0, -+ "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", -+ ui32CmdSize, psClientCCB->ui32HostWriteOffset, -+ psClientCCB->szName); -+ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, -+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), -+ psClientCCB->ui32HostWriteOffset, -+ ui32CmdSize, -+ psClientCCB->ui32Size); -+#endif -+ -+ RGXFwSharedMemCacheOpValue(psClientCCB->psClientCCBCtrl->ui32ReadOffset, -+ INVALIDATE); -+ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, -+ psClientCCB->psClientCCBCtrl->ui32ReadOffset, -+ psClientCCB->ui32Size); -+ -+ /* Can command fit? */ -+ if (ui32FreeSpace > ui32CmdSize) -+ { -+ *ppvBufferSpace = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); -+ return PVRSRV_OK; -+ } -+ /* There is not enough free space in CCB. */ -+ goto e_retry; -+ } -+ else -+ { -+ /* -+ We're at the end of the buffer without enough contiguous space. -+ The command cannot fit without wrapping, we need to insert a -+ padding command and wrap. We need to do this in one go otherwise -+ we would be leaving unflushed commands and forcing the client to -+ deal with flushing the padding command but not the command they -+ wanted to write. Therefore we either do all or nothing. -+ */ -+ RGXFWIF_CCB_CMD_HEADER *psHeader; -+ IMG_UINT32 ui32FreeSpace; -+ IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; -+ -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ RGXFwSharedMemCacheOpValue(psClientCCB->psClientCCBCtrl->ui32ReadOffset, -+ INVALIDATE); -+ /* Check this is a growable CCB */ -+ if (psClientCCB->ui32VirtualAllocSize > 0) -+ { -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); -+ -+ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, -+ psClientCCB->psClientCCBCtrl->ui32ReadOffset, -+ psClientCCB->ui32Size); -+ /* -+ * Check if CCB should grow or be wrapped. -+ * Wrap CCB if there is no need for grow (CCB is half empty) or CCB can't grow, -+ * and when is free space for command and padding. -+ */ -+ if (((ui32FreeSpace > psClientCCB->ui32Size/2) || (psClientCCB->ui32Size == psClientCCB->ui32VirtualAllocSize)) && -+ (ui32FreeSpace > ui32Remain + ui32CmdSize)) -+ { -+ /* Wrap CCB */ -+ psHeader = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); -+ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING; -+ psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags, -+ "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize); -+ if (bPDumpEnabled) -+ { -+ DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, -+ psClientCCB->ui32HostWriteOffset, -+ ui32Remain, -+ ui32PDumpFlags); -+ } -+#endif -+ -+ *ppvBufferSpace = psClientCCB->pvClientCCB; -+ return PVRSRV_OK; -+ } -+ else if ((psClientCCB->ui32Size < psClientCCB->ui32VirtualAllocSize) && -+ (psClientCCB->ui32HostWriteOffset >= psClientCCB->psClientCCBCtrl->ui32ReadOffset)) -+ { -+ /* Grow CCB */ -+ PHYS_HEAP *psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; -+ PHYS_HEAP_POLICY uiHeapPolicy = PhysHeapGetPolicy(psPhysHeap); -+ PVRSRV_ERROR eErr = PVRSRV_OK; -+ -+ /* Something went wrong if we are here a second time */ -+ PVR_ASSERT(ui32RetryCount != 0); -+ OSLockAcquire(psClientCCB->hCCBGrowLock); -+ -+ /* -+ * On LMA sparse memory can't be mapped to kernel without support for non physically -+ * sparse allocations. -+ * To work around this whole ccb memory was allocated at once as contiguous. -+ * In such case below sparse change is not needed because memory is already allocated. -+ */ -+ if (uiHeapPolicy == PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG) -+ { -+ IMG_UINT32 ui32AllocChunkCount = psClientCCB->ui32Size / psClientCCB->ui32ChunkSize; -+ -+ eErr = _RGXCCBMemChangeSparse(psClientCCB, ui32AllocChunkCount); -+ } -+ -+ /* Setup new CCB size */ -+ if (eErr == PVRSRV_OK) -+ { -+ psClientCCB->ui32Size += psClientCCB->ui32Size; -+ } -+ else -+ { -+ PVR_LOG(("%s: Client CCB (%s) grow failed (%s)", __func__, psClientCCB->szName, PVRSRVGetErrorString(eErr))); -+ OSLockRelease(psClientCCB->hCCBGrowLock); -+ goto e_retry; -+ } -+ -+#if defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags, "cCCB update for grow"); -+ if (bPDumpEnabled) -+ { -+ DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, -+ offsetof(RGXFWIF_CCCB_CTL, ui32WrapMask), -+ sizeof(psClientCCB->psClientCCBCtrl->ui32WrapMask), -+ ui32PDumpFlags); -+ DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, -+ offsetof(RGX_CLIENT_CCB, ui32Size), -+ sizeof(psClientCCB->ui32Size), -+ ui32PDumpFlags); -+ } -+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ -+ -+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) -+ PVR_LOG(("%s: Client CCB (%s) grew to %u", __func__, psClientCCB->szName, psClientCCB->ui32Size)); -+ /* Reset counters */ -+ _RGXInitCCBUtilisation(psClientCCB); -+#endif -+ -+ /* CCB doubled the size so retry now. */ -+ OSLockRelease(psClientCCB->hCCBGrowLock); -+ } -+ else -+ { -+ /* CCB can't grow anymore and can't be wrapped */ -+#if defined(PDUMP) -+ /* Wait for sufficient CCB space to become available */ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0, -+ "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", -+ ui32Remain, psClientCCB->ui32HostWriteOffset, -+ psClientCCB->szName); -+ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, -+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), -+ psClientCCB->ui32HostWriteOffset, -+ ui32Remain, -+ psClientCCB->ui32Size); -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0, -+ "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", -+ ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */, -+ psClientCCB->szName); -+ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, -+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), -+ 0 /*ui32HostWriteOffset after wrap */, -+ ui32CmdSize, -+ psClientCCB->ui32Size); -+ /* CCB has now space for our command so try wrapping again. Retry now. */ -+#else /* defined(PDUMP) */ -+ goto e_retry; -+#endif /* defined(PDUMP) */ -+ } -+ } -+ else -+#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ -+ { -+#if defined(PDUMP) -+ /* Wait for sufficient CCB space to become available */ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0, -+ "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", -+ ui32Remain, psClientCCB->ui32HostWriteOffset, -+ psClientCCB->szName); -+ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, -+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), -+ psClientCCB->ui32HostWriteOffset, -+ ui32Remain, -+ psClientCCB->ui32Size); -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0, -+ "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", -+ ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */, -+ psClientCCB->szName); -+ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, -+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), -+ 0 /*ui32HostWriteOffset after wrap */, -+ ui32CmdSize, -+ psClientCCB->ui32Size); -+#endif -+ RGXFwSharedMemCacheOpValue(psClientCCB->psClientCCBCtrl->ui32ReadOffset, -+ INVALIDATE); -+ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, -+ psClientCCB->psClientCCBCtrl->ui32ReadOffset, -+ psClientCCB->ui32Size); -+ -+ if (ui32FreeSpace > ui32Remain + ui32CmdSize) -+ { -+ psHeader = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); -+ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING; -+ psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER); -+#if defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize); -+ if (bPDumpEnabled) -+ { -+ DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, -+ psClientCCB->ui32HostWriteOffset, -+ ui32Remain, -+ ui32PDumpFlags); -+ } -+#endif -+ -+ *ppvBufferSpace = psClientCCB->pvClientCCB; -+ return PVRSRV_OK; -+ } -+ -+ goto e_retry; -+ } -+ } -+ } -+e_retry: -+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) -+ psClientCCB->sUtilisation.ui32CCBFull++; -+ _RGXCCBUtilisationEvent(psClientCCB, -+ PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED, -+ ui32CmdSize); -+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ -+ return PVRSRV_ERROR_RETRY; -+} -+ -+/****************************************************************************** -+ FUNCTION : RGXReleaseCCB -+ -+ PURPOSE : Release a CCB that we have been writing to. -+ -+ PARAMETERS : psDevData - device data -+ psCCB - the CCB -+ -+ RETURNS : None -+******************************************************************************/ -+void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT32 ui32CmdSize, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+#if defined(PDUMP) -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; -+ IMG_BOOL bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, ui32PDumpFlags); -+ IMG_BOOL bPDumpFlagsContinuous = PDUMP_IS_CONTINUOUS(ui32PDumpFlags); -+#endif -+ -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ OSLockAcquire(psClientCCB->hCCBGrowLock); -+#endif -+ /* -+ * If a padding command was needed then we should now move ui32HostWriteOffset -+ * forward. The command has already be dumped (if bPDumpEnabled). -+ */ -+ if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) > psClientCCB->ui32Size) -+ { -+ IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; -+ -+ UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset, -+ ui32Remain, -+ psClientCCB->ui32Size); -+ psClientCCB->ui32ByteCount += ui32Remain; -+ } -+ -+#if defined(PDUMP) -+ /* Dump the CCB data */ -+ if (bPDumpEnabled) -+ { -+ DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, -+ psClientCCB->ui32HostWriteOffset, -+ ui32CmdSize, -+ ui32PDumpFlags); -+ } -+#endif -+ -+ /* -+ * Check if there any fences being written that will already be -+ * satisfied by the last written update command in this CCB. At the -+ * same time we can ASSERT that all sync addresses are not NULL. -+ */ -+#if defined(DEBUG) -+ { -+ void *pvBufferStart = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); -+ void *pvBufferEnd = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset + ui32CmdSize); -+ IMG_BOOL bMessagePrinted = IMG_FALSE; -+ -+ /* Walk through the commands in this section of CCB being released... */ -+ while (pvBufferStart < pvBufferEnd) -+ { -+ RGXFWIF_CCB_CMD_HEADER *psCmdHeader = pvBufferStart; -+ -+ if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE) -+ { -+ /* If an UPDATE then record the values in case an adjacent fence uses it. */ -+ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); -+ RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); -+ -+ psClientCCB->ui32UpdateEntries = 0; -+ while (ui32NumUFOs-- > 0) -+ { -+ PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); -+ if (psClientCCB->ui32UpdateEntries < RGX_CCCB_FENCE_UPDATE_LIST_SIZE) -+ { -+ psClientCCB->asFenceUpdateList[psClientCCB->ui32UpdateEntries++] = *psUFOPtr++; -+ } -+ } -+ } -+ else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) -+ { -+ /* If a FENCE then check the values against the last UPDATE issued. */ -+ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); -+ RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); -+ -+ while (ui32NumUFOs-- > 0) -+ { -+ PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); -+ -+ if (bMessagePrinted == IMG_FALSE) -+ { -+ RGXFWIF_UFO *psUpdatePtr = psClientCCB->asFenceUpdateList; -+ IMG_UINT32 ui32UpdateIndex; -+ -+ for (ui32UpdateIndex = 0; ui32UpdateIndex < psClientCCB->ui32UpdateEntries; ui32UpdateIndex++) -+ { -+ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) -+ { -+ if (RGX_UFO_PTR_ADDR(psUFOPtr) == RGX_UFO_PTR_ADDR(psUpdatePtr)) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "Redundant sync checkpoint check found in cCCB(%p) - 0x%x -> 0x%x", -+ psClientCCB, RGX_UFO_PTR_ADDR(psUFOPtr), psUFOPtr->ui32Value)); -+ bMessagePrinted = IMG_TRUE; -+ break; -+ } -+ } -+ else -+ { -+ if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr && -+ psUFOPtr->ui32Value == psUpdatePtr->ui32Value) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "Redundant fence check found in cCCB(%p) - 0x%x -> 0x%x", -+ psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); -+ bMessagePrinted = IMG_TRUE; -+ break; -+ } -+ } -+ psUpdatePtr++; -+ } -+ } -+ -+ psUFOPtr++; -+ } -+ } -+ else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR || -+ psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE) -+ { -+ /* For all other UFO ops check the UFO address is not NULL. */ -+ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); -+ RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); -+ -+ while (ui32NumUFOs-- > 0) -+ { -+ PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); -+ psUFOPtr++; -+ } -+ } -+ -+ /* Move to the next command in this section of CCB being released... */ -+ pvBufferStart = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHeader->ui32CmdSize); -+ } -+ } -+#endif /* REDUNDANT_SYNCS_DEBUG */ -+ -+ -+#if defined(PVRSRV_FORCE_FLUSH_CCCB_ON_KICK) -+ { -+ DEVMEM_MEMDESC* psClientCCBMemDesc = psClientCCB->psClientCCBMemDesc; -+ void *pvClientCCBAddr = psClientCCB->pvClientCCB; -+ PMR *psClientCCBMemDescPMR = NULL; -+ IMG_DEVMEM_OFFSET_T uiPMROffset; -+ -+ DevmemGetPMRData(psClientCCBMemDesc, -+ (IMG_HANDLE*)&psClientCCBMemDescPMR, -+ &uiPMROffset); -+ -+ CacheOpValExec(psClientCCBMemDescPMR, -+ (IMG_UINT64)(uintptr_t) pvClientCCBAddr, -+ uiPMROffset, -+ psClientCCBMemDesc->uiAllocSize, -+ PVRSRV_CACHE_OP_FLUSH); -+ -+ } -+#endif -+ /* Flush the CCB data */ -+ RGXFwSharedMemFlushCCB(psClientCCB->pvClientCCB, -+ psClientCCB->psClientCCBCtrl->ui32ReadOffset, -+ psClientCCB->ui32HostWriteOffset, -+ psClientCCB->psClientCCBCtrl->ui32WrapMask + 1); -+ -+ /* -+ * Update the CCB write offset. -+ */ -+ UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset, -+ ui32CmdSize, -+ psClientCCB->ui32Size); -+ psClientCCB->ui32ByteCount += ui32CmdSize; -+ -+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) -+ _RGXUpdateCCBUtilisation(psClientCCB); -+#endif -+ /* -+ PDumpSetFrame will detect as we Transition out of capture range for -+ frame based data but if we are PDumping continuous data then we -+ need to inform the PDump layer ourselves -+ -+ First check is to confirm we are in continuous mode -+ Second check is to confirm the pdump client is connected and ready. -+ Third check is to confirm we are not in capture range. -+ */ -+#if defined(PDUMP) -+ if (bPDumpFlagsContinuous && -+ bPDumpEnabled && -+ !PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_NONE)) -+ { -+ PVRSRV_ERROR eError; -+ -+ /* Only Transitioning into capture range can cause an error */ -+ eError = PDumpTransition(psDeviceNode, -+ psClientCCB->psPDumpConnectionData, -+ PDUMP_TRANSITION_EVENT_RANGE_EXITED, -+ ui32PDumpFlags); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ } -+ -+ if (bPDumpEnabled) -+ { -+ if (!BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN)) -+ { -+ /* Store offset to last finished CCB command. This offset can -+ * be needed when appending commands to a non finished CCB. -+ */ -+ psClientCCB->ui32FinishedPDumpWriteOffset = psClientCCB->ui32LastPDumpWriteOffset; -+ } -+ -+ /* Update the PDump write offset to show we PDumped this command */ -+ psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset; -+ } -+#endif -+ -+#if defined(NO_HARDWARE) -+ /* -+ The firmware is not running, it cannot update these; we do here instead. -+ */ -+ psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; -+ psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; -+ psClientCCB->psClientCCBCtrl->ui32ReadOffset2 = psClientCCB->ui32HostWriteOffset; -+ psClientCCB->psClientCCBCtrl->ui32ReadOffset3 = psClientCCB->ui32HostWriteOffset; -+ psClientCCB->psClientCCBCtrl->ui32ReadOffset4 = psClientCCB->ui32HostWriteOffset; -+#endif -+ -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ OSLockRelease(psClientCCB->hCCBGrowLock); -+#endif -+} -+ -+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB) -+{ -+ return psClientCCB->ui32HostWriteOffset; -+} -+ -+IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB) -+{ -+ return psClientCCB->ui32Size-1; -+} -+ -+void RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT32 ui32Flags) -+{ -+ if ((ui32Flags & RGX_CONTEXT_FLAG_DISABLESLR)) -+ { -+ BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); -+ } -+ else -+ { -+ BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); -+ } -+} -+ -+void RGXCmdHelperInitCmdCCB_CommandSize(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT64 ui64FBSCEntryMask, -+ IMG_UINT32 ui32ClientFenceCount, -+ IMG_UINT32 ui32ClientUpdateCount, -+ IMG_UINT32 ui32CmdSize, -+ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, -+ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, -+ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, -+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; -+ IMG_BOOL bCacheInval = IMG_TRUE; -+ IMG_UINT32 ui32FenceCmdSize = 0; -+ IMG_UINT32 ui32UpdateCmdSize = 0; -+ -+ /* Init the generated data members */ -+ psCmdHelperData->ui32FBSCInvalCmdSize = 0; -+ psCmdHelperData->ui64FBSCEntryMask = 0; -+ psCmdHelperData->ui32PreTimeStampCmdSize = 0; -+ psCmdHelperData->ui32PostTimeStampCmdSize = 0; -+ psCmdHelperData->ui32RMWUFOCmdSize = 0; -+ -+ /* Only compile if RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE is defined to avoid -+ * compilation errors on rogue cores. -+ */ -+#if defined(RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE) -+ bCacheInval = !(PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE) && -+ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, USC_INSTRUCTION_CACHE_AUTO_INVALIDATE) && -+ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, TDM_SLC_MMU_AUTO_CACHE_OPS) && -+ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, GEOM_SLC_MMU_AUTO_CACHE_OPS) && -+ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, FRAG_SLC_MMU_AUTO_CACHE_OPS) && -+ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, COMPUTE_SLC_MMU_AUTO_CACHE_OPS)) || -+ RGX_IS_BRN_SUPPORTED(psDevInfo, 71960) || -+ RGX_IS_BRN_SUPPORTED(psDevInfo, 72143); -+#else -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+#endif -+ -+ /* Total FBSC invalidate command size (header plus command data) */ -+ if (bCacheInval) -+ { -+ if (ui64FBSCEntryMask != 0) -+ { -+ psCmdHelperData->ui32FBSCInvalCmdSize = -+ RGX_CCB_FWALLOC_ALIGN(sizeof(psCmdHelperData->ui64FBSCEntryMask) + -+ sizeof(RGXFWIF_CCB_CMD_HEADER)); -+ psCmdHelperData->ui64FBSCEntryMask = ui64FBSCEntryMask; -+ } -+ } -+ -+ /* total DM command size (header plus command data) */ -+ -+ psCmdHelperData->ui32DMCmdSize = -+ RGX_CCB_FWALLOC_ALIGN(ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)); -+ -+ if (ui32ClientFenceCount != 0) -+ { -+ ui32FenceCmdSize = -+ RGX_CCB_FWALLOC_ALIGN(ui32ClientFenceCount * sizeof(RGXFWIF_UFO) + -+ sizeof(RGXFWIF_CCB_CMD_HEADER)); -+ } -+ -+ if (ui32ClientUpdateCount != 0) -+ { -+ ui32UpdateCmdSize = -+ RGX_CCB_FWALLOC_ALIGN(ui32ClientUpdateCount * sizeof(RGXFWIF_UFO) + -+ sizeof(RGXFWIF_CCB_CMD_HEADER)); -+ } -+ -+ if (ppPreAddr && (ppPreAddr->ui32Addr != 0)) -+ { -+ psCmdHelperData->ui32PreTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) -+ + PVR_ALIGN(sizeof(RGXFWIF_DEV_VIRTADDR), RGXFWIF_FWALLOC_ALIGN); -+ } -+ -+ if (ppPostAddr && (ppPostAddr->ui32Addr != 0)) -+ { -+ psCmdHelperData->ui32PostTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) -+ + PVR_ALIGN(sizeof(RGXFWIF_DEV_VIRTADDR), RGXFWIF_FWALLOC_ALIGN); -+ } -+ -+ if (ppRMWUFOAddr && (ppRMWUFOAddr->ui32Addr != 0)) -+ { -+ psCmdHelperData->ui32RMWUFOCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_UFO); -+ } -+ -+ psCmdHelperData->ui32TotalSize = -+ ui32FenceCmdSize + -+ psCmdHelperData->ui32FBSCInvalCmdSize + -+ psCmdHelperData->ui32DMCmdSize + -+ ui32UpdateCmdSize + -+ psCmdHelperData->ui32PreTimeStampCmdSize + -+ psCmdHelperData->ui32PostTimeStampCmdSize + -+ psCmdHelperData->ui32RMWUFOCmdSize; -+ -+ psCmdHelperData->ui32DMCmdOffset = -+ ui32FenceCmdSize + -+ psCmdHelperData->ui32PreTimeStampCmdSize + -+ psCmdHelperData->ui32FBSCInvalCmdSize; -+} -+ -+/* -+ Work out how much space this command will require -+*/ -+void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT32 ui32ClientFenceCount, -+ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, -+ IMG_UINT32 *paui32FenceValue, -+ IMG_UINT32 ui32ClientUpdateCount, -+ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, -+ IMG_UINT32 *paui32UpdateValue, -+ IMG_UINT32 ui32CmdSize, -+ IMG_PBYTE pui8DMCmd, -+ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, -+ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, -+ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, -+ RGXFWIF_CCB_CMD_TYPE eType, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_UINT32 ui32PDumpFlags, -+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, -+ IMG_CHAR *pszCommandName, -+ IMG_BOOL bCCBStateOpen, -+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = NULL; -+ -+ /* Job reference values */ -+ psCmdHelperData->ui32ExtJobRef = ui32ExtJobRef; -+ psCmdHelperData->ui32IntJobRef = ui32IntJobRef; -+ -+ /* Save the data we require in the submit call */ -+ psCmdHelperData->psClientCCB = psClientCCB; -+#if defined(PDUMP) -+ psCmdHelperData->ui32PDumpFlags = ui32PDumpFlags; -+ psDevInfo = FWCommonContextGetRGXDevInfo(psCmdHelperData->psClientCCB->psServerCommonContext); -+#else -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+#endif -+ psCmdHelperData->pszCommandName = pszCommandName; -+ if (bCCBStateOpen) -+ { -+ BIT_SET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); -+ } -+ else -+ { -+ BIT_UNSET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); -+ } -+ -+ /* Client sync data */ -+ psCmdHelperData->ui32ClientFenceCount = ui32ClientFenceCount; -+ psCmdHelperData->pauiFenceUFOAddress = pauiFenceUFOAddress; -+ psCmdHelperData->paui32FenceValue = paui32FenceValue; -+ psCmdHelperData->ui32ClientUpdateCount = ui32ClientUpdateCount; -+ psCmdHelperData->pauiUpdateUFOAddress = pauiUpdateUFOAddress; -+ psCmdHelperData->paui32UpdateValue = paui32UpdateValue; -+ -+ /* Command data */ -+ psCmdHelperData->ui32CmdSize = ui32CmdSize; -+ psCmdHelperData->pui8DMCmd = pui8DMCmd; -+ psCmdHelperData->eType = eType; -+ -+ if (ppPreAddr) -+ { -+ psCmdHelperData->pPreTimestampAddr = *ppPreAddr; -+ } -+ -+ if (ppPostAddr) -+ { -+ psCmdHelperData->pPostTimestampAddr = *ppPostAddr; -+ } -+ -+ if (ppRMWUFOAddr) -+ { -+ psCmdHelperData->pRMWUFOAddr = *ppRMWUFOAddr; -+ } -+ -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, -+ "%s Command Server Init on FWCtx %08x", pszCommandName, -+ FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr); -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Workload Data added */ -+ psCmdHelperData->psWorkEstKickData = psWorkEstKickData; -+ } -+#endif -+} -+ -+/* -+ Work out how much space this command will require -+*/ -+void RGXCmdHelperInitCmdCCB(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT64 ui64FBSCEntryMask, -+ IMG_UINT32 ui32ClientFenceCount, -+ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, -+ IMG_UINT32 *paui32FenceValue, -+ IMG_UINT32 ui32ClientUpdateCount, -+ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, -+ IMG_UINT32 *paui32UpdateValue, -+ IMG_UINT32 ui32CmdSize, -+ IMG_PBYTE pui8DMCmd, -+ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, -+ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, -+ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, -+ RGXFWIF_CCB_CMD_TYPE eType, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_UINT32 ui32PDumpFlags, -+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, -+ IMG_CHAR *pszCommandName, -+ IMG_BOOL bCCBStateOpen, -+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) -+{ -+ RGXCmdHelperInitCmdCCB_CommandSize(psDevInfo, -+ ui64FBSCEntryMask, -+ ui32ClientFenceCount, -+ ui32ClientUpdateCount, -+ ui32CmdSize, -+ ppPreAddr, -+ ppPostAddr, -+ ppRMWUFOAddr, -+ psCmdHelperData); -+ -+ RGXCmdHelperInitCmdCCB_OtherData(psClientCCB, -+ ui32ClientFenceCount, -+ pauiFenceUFOAddress, -+ paui32FenceValue, -+ ui32ClientUpdateCount, -+ pauiUpdateUFOAddress, -+ paui32UpdateValue, -+ ui32CmdSize, -+ pui8DMCmd, -+ ppPreAddr, -+ ppPostAddr, -+ ppRMWUFOAddr, -+ eType, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ ui32PDumpFlags, -+ psWorkEstKickData, -+ pszCommandName, -+ bCCBStateOpen, -+ psCmdHelperData); -+} -+ -+static inline void RGXWriteCmdHeader(void *pvCCB, IMG_UINT32 eCmdType, IMG_UINT32 ui32TotalSize, -+ IMG_UINT32 ui32ExtJobRef, IMG_UINT32 ui32IntJobRef, -+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData) -+{ -+ RGXFWIF_CCB_CMD_HEADER sCmdHeader; -+ -+ sCmdHeader.eCmdType = eCmdType; -+ sCmdHeader.ui32CmdSize = ui32TotalSize - sizeof(RGXFWIF_CCB_CMD_HEADER); -+ sCmdHeader.ui32ExtJobRef = ui32ExtJobRef; -+ sCmdHeader.ui32IntJobRef = ui32IntJobRef; -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ if (psWorkEstKickData != NULL && -+ RGXIsValidWorkloadEstCCBCommand(eCmdType)) -+ { -+ sCmdHeader.sWorkEstKickData = *psWorkEstKickData; -+ } -+ else -+ { -+ sCmdHeader.sWorkEstKickData.ui16ReturnDataIndex = 0; -+ sCmdHeader.sWorkEstKickData.ui64Deadline = 0; -+ sCmdHeader.sWorkEstKickData.ui32CyclesPrediction = 0; -+ } -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(psWorkEstKickData); -+#endif -+ -+ OSCachedMemCopy(pvCCB, &sCmdHeader, sizeof(RGXFWIF_CCB_CMD_HEADER)); -+} -+ -+/* -+ Reserve space in the CCB and fill in the command and client sync data -+*/ -+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, -+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData) -+{ -+ IMG_UINT32 ui32AllocSize = 0; -+ IMG_UINT32 i; -+ void *pvStartPtr; -+ PVRSRV_ERROR eError; -+#if defined(PDUMP) -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(asCmdHelperData->psClientCCB->psServerCommonContext); -+#endif -+ RGXFWIF_UFO asUFOs[RGXFWIF_CCB_CMD_MAX_UFOS]; -+ -+ /* -+ Check the number of fences & updates are valid. -+ */ -+ for (i = 0; i < ui32CmdCount; i++) -+ { -+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i]; -+ -+ if (psCmdHelperData->ui32ClientFenceCount > RGXFWIF_CCB_CMD_MAX_UFOS || -+ psCmdHelperData->ui32ClientUpdateCount > RGXFWIF_CCB_CMD_MAX_UFOS) -+ { -+ return PVRSRV_ERROR_TOO_MANY_SYNCS; -+ } -+ } -+ -+ /* -+ Work out how much space we need for all the command(s) -+ */ -+ ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData); -+ -+#if defined(PDUMP) -+ for (i = 0; i < ui32CmdCount; i++) -+ { -+ if ((asCmdHelperData[0].ui32PDumpFlags ^ asCmdHelperData[i].ui32PDumpFlags) & PDUMP_FLAGS_CONTINUOUS) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PDump continuous is not consistent (%s != %s) for command %d", -+ __func__, -+ PDUMP_IS_CONTINUOUS(asCmdHelperData[0].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE", -+ PDUMP_IS_CONTINUOUS(asCmdHelperData[i].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE", -+ ui32CmdCount)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+#endif -+ -+ /* -+ Acquire space in the CCB for all the command(s). -+ */ -+ eError = RGXAcquireCCB(asCmdHelperData[0].psClientCCB, -+ ui32AllocSize, -+ &pvStartPtr, -+ asCmdHelperData[0].ui32PDumpFlags); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ return eError; -+ } -+ -+ /* -+ For each command fill in the fence, DM, and update command -+ -+ */ -+ for (i = 0; i < ui32CmdCount; i++) -+ { -+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = & asCmdHelperData[i]; -+ void *pvCmdPtr; -+#if defined(PDUMP) -+ IMG_UINT32 ui32CtxAddr = FWCommonContextGetFWAddress(asCmdHelperData->psClientCCB->psServerCommonContext).ui32Addr; -+ IMG_UINT32 ui32CcbWoff = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(asCmdHelperData->psClientCCB->psServerCommonContext)); -+#endif -+ -+ if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0) -+ { -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Start of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", -+ psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); -+ } -+ -+ pvCmdPtr = pvStartPtr; -+ -+ /* -+ Create the fence command. -+ */ -+ if (psCmdHelperData->ui32ClientFenceCount > 0) -+ { -+ IMG_UINT k, uiNextValueIndex; -+ IMG_UINT32 ui32FenceCmdSize = -+ RGX_CCB_FWALLOC_ALIGN(psCmdHelperData->ui32ClientFenceCount * sizeof(RGXFWIF_UFO) + -+ sizeof(RGXFWIF_CCB_CMD_HEADER)); -+ -+ RGXWriteCmdHeader(pvCmdPtr, -+ RGXFWIF_CCB_CMD_TYPE_FENCE, -+ ui32FenceCmdSize, -+ psCmdHelperData->ui32ExtJobRef, -+ psCmdHelperData->ui32IntJobRef, -+ NULL); -+ -+ /* Fill in the client fences */ -+ uiNextValueIndex = 0; -+ for (k = 0; k < psCmdHelperData->ui32ClientFenceCount; k++) -+ { -+ PVR_ASSERT(k < RGXFWIF_CCB_CMD_MAX_UFOS); -+ -+ asUFOs[k].puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k]; -+ -+ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR(psCmdHelperData->pauiFenceUFOAddress[k].ui32Addr)) -+ { -+ asUFOs[k].ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; -+ } -+ else -+ { -+ /* Only increment uiNextValueIndex for non sync checkpoints -+ * (as paui32FenceValue only contains values for sync prims) -+ */ -+ asUFOs[k].ui32Value = psCmdHelperData->paui32FenceValue[uiNextValueIndex++]; -+ } -+ -+#if defined(SYNC_COMMAND_DEBUG) -+ PVR_DPF((PVR_DBG_ERROR, "%s client sync fence - 0x%x -> 0x%x", -+ psCmdHelperData->psClientCCB->szName, asUFOs[k].puiAddrUFO.ui32Addr, asUFOs[k].ui32Value)); -+#endif -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ ".. %s client sync fence - 0x%x -> 0x%x", -+ psCmdHelperData->psClientCCB->szName, -+ asUFOs[k].puiAddrUFO.ui32Addr, asUFOs[k].ui32Value); -+ -+ -+ } -+ -+ OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), -+ &asUFOs, psCmdHelperData->ui32ClientFenceCount * sizeof(RGXFWIF_UFO)); -+ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, ui32FenceCmdSize); -+ } -+ -+ /* -+ Create the FBSC invalidate command. -+ */ -+ if (psCmdHelperData->ui32FBSCInvalCmdSize) -+ { -+ -+ RGXWriteCmdHeader(pvCmdPtr, -+ RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE, -+ psCmdHelperData->ui32FBSCInvalCmdSize, -+ psCmdHelperData->ui32ExtJobRef, -+ psCmdHelperData->ui32IntJobRef, -+ NULL); -+ -+ OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), -+ &psCmdHelperData->ui64FBSCEntryMask, -+ sizeof(psCmdHelperData->ui64FBSCEntryMask)); -+ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32FBSCInvalCmdSize); -+ } -+ -+ /* -+ Create the pre DM timestamp commands. Pre and Post timestamp commands are supposed to -+ sandwich the DM cmd. The padding code with the CCB wrap upsets the FW if we don't have -+ the task type bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types. -+ */ -+ if (psCmdHelperData->ui32PreTimeStampCmdSize != 0) -+ { -+ RGXWriteCmdHeader(pvCmdPtr, -+ RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP, -+ psCmdHelperData->ui32PreTimeStampCmdSize, -+ psCmdHelperData->ui32ExtJobRef, -+ psCmdHelperData->ui32IntJobRef, -+ NULL); -+ -+ OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), -+ &psCmdHelperData->pPreTimestampAddr.ui32Addr, -+ sizeof(psCmdHelperData->pPreTimestampAddr.ui32Addr)); -+ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32PreTimeStampCmdSize); -+ -+ } -+ -+ /* -+ Create the DM command -+ */ -+ if (psCmdHelperData->ui32DMCmdSize) -+ { -+ PVR_ASSERT(psCmdHelperData->ui32DMCmdSize == sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHelperData->ui32CmdSize); -+ -+ RGXWriteCmdHeader(pvCmdPtr, -+ psCmdHelperData->eType, -+ psCmdHelperData->ui32DMCmdSize, -+ psCmdHelperData->ui32ExtJobRef, -+ psCmdHelperData->ui32IntJobRef, -+ psCmdHelperData->psWorkEstKickData); -+ -+ OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), -+ psCmdHelperData->pui8DMCmd, -+ psCmdHelperData->ui32CmdSize); -+ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32DMCmdSize); -+ } -+ -+ -+ if (psCmdHelperData->ui32PostTimeStampCmdSize != 0) -+ { -+ RGXWriteCmdHeader(pvCmdPtr, -+ RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP, -+ psCmdHelperData->ui32PostTimeStampCmdSize, -+ psCmdHelperData->ui32ExtJobRef, -+ psCmdHelperData->ui32IntJobRef, -+ NULL); -+ -+ OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), -+ &psCmdHelperData->pPostTimestampAddr.ui32Addr, -+ sizeof(psCmdHelperData->pPostTimestampAddr.ui32Addr)); -+ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32PostTimeStampCmdSize); -+ -+ } -+ -+ -+ if (psCmdHelperData->ui32RMWUFOCmdSize != 0) -+ { -+ RGXWriteCmdHeader(pvCmdPtr, -+ RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE, -+ psCmdHelperData->ui32RMWUFOCmdSize, -+ psCmdHelperData->ui32ExtJobRef, -+ psCmdHelperData->ui32IntJobRef, -+ NULL); -+ -+ asUFOs[0].puiAddrUFO = psCmdHelperData->pRMWUFOAddr; -+ asUFOs[0].ui32Value = 0; -+ -+ OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), -+ asUFOs, sizeof(RGXFWIF_UFO)); -+ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32RMWUFOCmdSize); -+ } -+ -+ /* -+ Create the update command. -+ */ -+ if (psCmdHelperData->ui32ClientUpdateCount > 0) -+ { -+ IMG_UINT k, uiNextValueIndex; -+ IMG_UINT32 ui32UpdateCmdSize = -+ RGX_CCB_FWALLOC_ALIGN(psCmdHelperData->ui32ClientUpdateCount * sizeof(RGXFWIF_UFO) + -+ sizeof(RGXFWIF_CCB_CMD_HEADER)); -+ RGXWriteCmdHeader(pvCmdPtr, -+ RGXFWIF_CCB_CMD_TYPE_UPDATE, -+ ui32UpdateCmdSize, -+ psCmdHelperData->ui32ExtJobRef, -+ psCmdHelperData->ui32IntJobRef, -+ NULL); -+ -+ /* Fill in the client updates */ -+ uiNextValueIndex = 0; -+ for (k = 0; k < psCmdHelperData->ui32ClientUpdateCount; k++) -+ { -+ PVR_ASSERT(k < RGXFWIF_CCB_CMD_MAX_UFOS); -+ -+ asUFOs[k].puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k]; -+ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR(psCmdHelperData->pauiUpdateUFOAddress[k].ui32Addr)) -+ { -+ asUFOs[k].ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; -+ } -+ else -+ { -+ /* Only increment uiNextValueIndex for non sync checkpoints -+ * (as paui32UpdateValue only contains values for sync prims) -+ */ -+ asUFOs[k].ui32Value = psCmdHelperData->paui32UpdateValue[uiNextValueIndex++]; -+ } -+ -+#if defined(SYNC_COMMAND_DEBUG) -+ PVR_DPF((PVR_DBG_ERROR, "%s client sync update - 0x%x -> 0x%x", -+ psCmdHelperData->psClientCCB->szName, asUFOs[k].puiAddrUFO.ui32Addr, asUFOs[k].ui32Value)); -+#endif -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ ".. %s client sync update - 0x%x -> 0x%x", -+ psCmdHelperData->psClientCCB->szName, -+ asUFOs[k].puiAddrUFO.ui32Addr, asUFOs[k].ui32Value); -+ -+ } -+ -+ OSCachedMemCopy(IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)), -+ &asUFOs, psCmdHelperData->ui32ClientUpdateCount * sizeof(RGXFWIF_UFO)); -+ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, ui32UpdateCmdSize); -+ } -+ -+ /* Set the start pointer for the next iteration around the loop */ -+ pvStartPtr = IMG_OFFSET_ADDR(pvStartPtr, psCmdHelperData->ui32TotalSize); -+ -+ if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0) -+ { -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "End of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", -+ psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); -+ } -+ else -+ { -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "No %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", -+ psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ Fill in the server syncs data and release the CCB space -+*/ -+void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, -+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, -+ const IMG_CHAR *pcszDMName, -+ IMG_UINT32 ui32CtxAddr) -+{ -+ IMG_UINT32 ui32AllocSize = 0; -+ IMG_UINT32 i; -+#if defined(__linux__) && defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ IMG_BOOL bTraceChecks = trace_rogue_are_fence_checks_traced(); -+ IMG_BOOL bTraceUpdates = trace_rogue_are_fence_updates_traced(); -+#endif -+ -+ /* -+ Work out how much space we need for all the command(s) -+ */ -+ ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData); -+ /* -+ For each command write PDump comments and emit FTrace events for fence -+ checks and updates if they exist. -+ */ -+ for (i=0;ipsClientCCB->psServerCommonContext); -+#endif -+ -+#if (!defined(__linux__) || !defined(PDUMP)) -+ PVR_UNREFERENCED_PARAMETER(psCmdHelperData); -+#endif -+ -+#if defined(__linux__) && defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ if (bTraceChecks) -+ { -+ trace_rogue_fence_checks(psCmdHelperData->pszCommandName, -+ pcszDMName, -+ psDevInfo->psDeviceNode->sDevId.ui32InternalID, -+ ui32CtxAddr, -+ psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, -+ psCmdHelperData->ui32ClientFenceCount, -+ psCmdHelperData->pauiFenceUFOAddress, -+ psCmdHelperData->paui32FenceValue); -+ } -+ if (bTraceUpdates) -+ { -+ trace_rogue_fence_updates(psCmdHelperData->pszCommandName, -+ pcszDMName, -+ psDevInfo->psDeviceNode->sDevId.ui32InternalID, -+ ui32CtxAddr, -+ psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, -+ psCmdHelperData->ui32ClientUpdateCount, -+ psCmdHelperData->pauiUpdateUFOAddress, -+ psCmdHelperData->paui32UpdateValue); -+ } -+#endif -+ -+ /* -+ All the commands have been filled in so release the CCB space. -+ The FW still won't run this command until we kick it -+ */ -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, -+ psCmdHelperData->ui32PDumpFlags, -+ "%s Command Server Release on FWCtx %08x", -+ psCmdHelperData->pszCommandName, ui32CtxAddr); -+ } -+ -+ RGXReleaseCCB(asCmdHelperData[0].psClientCCB, -+ ui32AllocSize, -+ asCmdHelperData[0].ui32PDumpFlags); -+ -+ BIT_UNSET(asCmdHelperData[0].psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); -+} -+ -+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount, -+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData) -+{ -+ IMG_UINT32 ui32AllocSize = 0; -+ IMG_UINT32 i; -+ -+ /* -+ Work out how much space we need for all the command(s) -+ */ -+ for (i = 0; i < ui32CmdCount; i++) -+ { -+ ui32AllocSize += asCmdHelperData[i].ui32TotalSize; -+ } -+ -+ return ui32AllocSize; -+} -+ -+/* Work out how much of an offset there is to a specific command. */ -+IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, -+ IMG_UINT32 ui32Cmdindex) -+{ -+ return RGXCmdHelperGetCommandSize(ui32Cmdindex, asCmdHelperData); -+} -+ -+/* Returns the offset of the data master command from a write offset */ -+IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) -+{ -+ return psCmdHelperData->ui32DMCmdOffset; -+} -+ -+static const char *_CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType) -+{ -+ switch (cmdType) -+ { -+ case RGXFWIF_CCB_CMD_TYPE_GEOM: return "TA"; -+ case RGXFWIF_CCB_CMD_TYPE_3D: return "3D"; -+ case RGXFWIF_CCB_CMD_TYPE_3D_PR: return "3D_PR"; -+ case RGXFWIF_CCB_CMD_TYPE_CDM: return "CDM"; -+ case RGXFWIF_CCB_CMD_TYPE_TQ_3D: return "TQ_3D"; -+ case RGXFWIF_CCB_CMD_TYPE_TQ_2D: return "TQ_2D"; -+ case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: return "TQ_TDM"; -+ case RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE: return "FBSC_INVALIDATE"; -+ case RGXFWIF_CCB_CMD_TYPE_NULL: return "NULL"; -+ case RGXFWIF_CCB_CMD_TYPE_FENCE: return "FENCE"; -+ case RGXFWIF_CCB_CMD_TYPE_UPDATE: return "UPDATE"; -+ case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: return "FENCE_PR"; -+ case RGXFWIF_CCB_CMD_TYPE_PRIORITY: return "PRIORITY"; -+ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: return "UNFENCED_UPDATE"; -+ case RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP: return "PRE_TIMESTAMP"; -+ case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE: return "RMW_UPDATE"; -+ case RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP: return "POST_TIMESTAMP"; -+ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE: return "UNFENCED_RMW_UPDATE"; -+ case RGXFWIF_CCB_CMD_TYPE_PADDING: return "PADDING"; -+ -+ default: -+ PVR_ASSERT(IMG_FALSE); -+ break; -+ } -+ -+ return "INVALID"; -+} -+ -+PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM) -+{ -+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; -+ IMG_UINT32 ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff, ui32WrapMask; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (psCurrentClientCCB == NULL) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB is NULL")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ /* If CCB grow is enabled, take the lock while sampling offsets -+ * (to guard against a grow happening mid-sample) -+ */ -+ OSLockAcquire(psCurrentClientCCB->hCCBGrowLock); -+#endif -+ /* NB. use psCurrentClientCCB->ui32Size as basis for wrap mask (rather than psClientCCBCtrl->ui32WrapMask) -+ * as if CCB grow happens, psCurrentClientCCB->ui32Size will have been updated but -+ * psClientCCBCtrl->ui32WrapMask is only updated once the firmware sees the CCB has grown. -+ * If we use the wrong value, we might incorrectly determine that the offsets are invalid. -+ */ -+ ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB); -+ RGXFwSharedMemCacheOpPtr(psCurrentClientCCB->psClientCCBCtrl, -+ INVALIDATE); -+ psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; -+ ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; -+ ui32SampledDpOff = psClientCCBCtrl->ui32DepOffset; -+ ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset; -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ OSLockRelease(psCurrentClientCCB->hCCBGrowLock); -+#endif -+ -+ if (ui32SampledRdOff > ui32WrapMask || -+ ui32SampledDpOff > ui32WrapMask || -+ ui32SampledWrOff > ui32WrapMask) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has invalid offset (ROFF=%d DOFF=%d WOFF=%d)", -+ ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff)); -+ return PVRSRV_ERROR_INVALID_OFFSET; -+ } -+ -+ if (ui32SampledRdOff != ui32SampledWrOff && -+ psCurrentClientCCB->ui32LastROff != psCurrentClientCCB->ui32LastWOff && -+ ui32SampledRdOff == psCurrentClientCCB->ui32LastROff && -+ (psCurrentClientCCB->ui32ByteCount - psCurrentClientCCB->ui32LastByteCount) < psCurrentClientCCB->ui32Size) -+ { -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice; -+ -+ /* Only log a stalled CCB if GPU is idle (any state other than POW_ON is considered idle). -+ * Guest drivers do not initialize psRGXFWIfFwSysData, so they assume FW internal state is ON. */ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ePowState, -+ INVALIDATE); -+ if (((psDevInfo->psRGXFWIfFwSysData == NULL) || (psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_ON)) && -+ (psDevInfo->ui32SLRHoldoffCounter == 0)) -+ { -+ static __maybe_unused const char *pszStalledAction = -+#if defined(PVRSRV_STALLED_CCB_ACTION) -+ "force"; -+#else -+ "warn"; -+#endif -+ /* Don't log this by default unless debugging since a higher up -+ * function will log the stalled condition. Helps avoid double -+ * messages in the log. -+ */ -+ PVR_DPF((PVR_DBG_ERROR, "%s (%s): CCCB has not progressed (ROFF=%d DOFF=%d WOFF=%d) for \"%s\"", -+ __func__, pszStalledAction, ui32SampledRdOff, -+ ui32SampledDpOff, ui32SampledWrOff, -+ psCurrentClientCCB->szName)); -+ eError = PVRSRV_ERROR_CCCB_STALLED; -+ -+ { -+ void *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; -+ RGXFWIF_CCB_CMD_HEADER *psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext); -+ -+ /* Special case - if readOffset is on a PADDING packet, CCB has wrapped. -+ * In this case, skip over the PADDING packet. -+ */ -+ if (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_PADDING) -+ { -+ psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, -+ ((ui32SampledRdOff + -+ psCommandHeader->ui32CmdSize + -+ sizeof(RGXFWIF_CCB_CMD_HEADER)) -+ & psCurrentClientCCB->psClientCCBCtrl->ui32WrapMask)); -+ } -+ -+ /* Only try to recover a 'stalled' context (ie one waiting on a fence), as some work (eg compute) could -+ * take a long time to complete, during which time the CCB ptrs would not advance. -+ */ -+ if (((psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) || -+ (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) && -+ (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff))) -+ { -+ /* Acquire the cCCB recovery lock */ -+ OSLockAcquire(psDevInfo->hCCBRecoveryLock); -+ -+ if (!psDevInfo->pvEarliestStalledClientCCB) -+ { -+ psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB; -+ psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef; -+ } -+ else -+ { -+ /* Check if this fence cmd header has an older submission stamp than the one we are currently considering unblocking -+ * (account for submission stamp wrap by checking diff is less than 0x80000000) - if it is older, then this becomes -+ * our preferred fence to be unblocked/ -+ */ -+ if ((psCommandHeader->ui32IntJobRef < psDevInfo->ui32OldestSubmissionOrdinal) && -+ ((psDevInfo->ui32OldestSubmissionOrdinal - psCommandHeader->ui32IntJobRef) < 0x8000000)) -+ { -+ psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB; -+ psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef; -+ } -+ } -+ -+ /* Release the cCCB recovery lock */ -+ OSLockRelease(psDevInfo->hCCBRecoveryLock); -+ } -+ } -+ } -+ } -+ -+ psCurrentClientCCB->ui32LastROff = ui32SampledRdOff; -+ psCurrentClientCCB->ui32LastWOff = ui32SampledWrOff; -+ psCurrentClientCCB->ui32LastByteCount = psCurrentClientCCB->ui32ByteCount; -+ -+ return eError; -+} -+ -+void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, -+ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, -+ RGX_CLIENT_CCB *psCurrentClientCCB, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; -+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; -+ void *pvClientCCBBuff; -+ IMG_UINT32 ui32Offset; -+ IMG_UINT32 ui32DepOffset; -+ IMG_UINT32 ui32EndOffset; -+ IMG_UINT32 ui32WrapMask; -+ IMG_CHAR * pszState = "Ready"; -+ -+ /* Ensure hCCBGrowLock is acquired before reading -+ * psCurrentClientCCB->pvClientCCB as a CCB grow -+ * could remap the virtual addresses. -+ */ -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ OSLockAcquire(psCurrentClientCCB->hCCBGrowLock); -+#endif -+ RGXFwSharedMemCacheOpPtr(psCurrentClientCCB->psClientCCBCtrl, -+ INVALIDATE); -+ psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; -+ pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; -+ ui32EndOffset = psCurrentClientCCB->ui32HostWriteOffset; -+ OSMemoryBarrier(NULL); -+ ui32Offset = psClientCCBCtrl->ui32ReadOffset; -+ ui32DepOffset = psClientCCBCtrl->ui32DepOffset; -+ /* NB. Use psCurrentClientCCB->ui32Size as basis for wrap mask (rather -+ * than psClientCCBCtrl->ui32WrapMask) as if CCB grow happened, -+ * psCurrentClientCCB->ui32Size will have been updated but -+ * psClientCCBCtrl->ui32WrapMask is only updated once the firmware -+ * sees the CCB has grown. If we use the wrong value, ui32NextOffset -+ * can end up being wrapped prematurely and pointing to garbage. -+ */ -+ ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB); -+ -+ PVR_DUMPDEBUG_LOG("FWCtx 0x%08X (%s)", sFWCommonContext.ui32Addr, psCurrentClientCCB->szName); -+ if (ui32Offset == ui32EndOffset) -+ { -+ PVR_DUMPDEBUG_LOG(" `--"); -+ } -+ -+ if (ui32Offset > ui32WrapMask) -+ { -+ PVR_DUMPDEBUG_LOG(" `--"); -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ OSLockRelease(psCurrentClientCCB->hCCBGrowLock); -+#endif -+ return; -+ } -+ -+ while (ui32Offset != ui32EndOffset) -+ { -+ RGXFWIF_CCB_CMD_HEADER *psCmdHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset); -+ IMG_UINT32 ui32NextOffset = (ui32Offset + psCmdHeader->ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)) & ui32WrapMask; -+ IMG_BOOL bLastCommand = (ui32NextOffset == ui32EndOffset)? IMG_TRUE: IMG_FALSE; -+ IMG_BOOL bLastUFO; -+ #define CCB_SYNC_INFO_LEN 80 -+ IMG_CHAR pszSyncInfo[CCB_SYNC_INFO_LEN]; -+ IMG_UINT32 ui32NoOfUpdates, i; -+ RGXFWIF_UFO *psUFOPtr; -+ -+ ui32NoOfUpdates = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); -+ psUFOPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset + sizeof(RGXFWIF_CCB_CMD_HEADER)); -+ pszSyncInfo[0] = '\0'; -+ -+ if (ui32Offset == ui32DepOffset) -+ { -+ pszState = "Waiting"; -+ } -+ -+ PVR_DUMPDEBUG_LOG(" %s--%s %s @ %u Int=%u Ext=%u", -+ bLastCommand? "`": "|", -+ pszState, _CCBCmdTypename(psCmdHeader->eCmdType), -+ ui32Offset, psCmdHeader->ui32IntJobRef, psCmdHeader->ui32ExtJobRef -+ ); -+ -+ /* switch on type and write checks and updates */ -+ switch (psCmdHeader->eCmdType) -+ { -+ case RGXFWIF_CCB_CMD_TYPE_UPDATE: -+ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: -+ case RGXFWIF_CCB_CMD_TYPE_FENCE: -+ case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: -+ { -+ for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++) -+ { -+ bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE; -+ -+ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) -+ { -+ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) -+ { -+ SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, -+ pszSyncInfo, CCB_SYNC_INFO_LEN); -+ } -+ else -+ { -+ SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, -+ pszSyncInfo, CCB_SYNC_INFO_LEN); -+ } -+ } -+ -+ PVR_DUMPDEBUG_LOG(" %s %s--Addr:0x%08x Val=0x%08x %s", -+ bLastCommand? " ": "|", -+ bLastUFO? "`": "|", -+ psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value, -+ pszSyncInfo -+ ); -+ } -+ break; -+ } -+ case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE: -+ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE: -+ { -+ for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++) -+ { -+ bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE; -+ -+ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) -+ { -+ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) -+ { -+ SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, -+ pszSyncInfo, CCB_SYNC_INFO_LEN); -+ } -+ else -+ { -+ SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, -+ pszSyncInfo, CCB_SYNC_INFO_LEN); -+ } -+ } -+ -+ PVR_DUMPDEBUG_LOG(" %s %s--Addr:0x%08x Val++ %s", -+ bLastCommand? " ": "|", -+ bLastUFO? "`": "|", -+ psUFOPtr->puiAddrUFO.ui32Addr, -+ pszSyncInfo -+ ); -+ } -+ break; -+ } -+ default: -+ break; -+ } -+ -+ /* Check the command size is valid, otherwise if corruption was present this loop might hang... */ -+ if ((psCmdHeader->ui32CmdSize == 0) || (ui32Offset == ui32NextOffset) || -+ ((ui32Offset + psCmdHeader->ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)) > ui32WrapMask+1)) -+ { -+ PVR_DUMPDEBUG_LOG(" `--Invalid CCB offset!"); -+ break; -+ } -+ -+ ui32Offset = ui32NextOffset; -+ } -+ -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ OSLockRelease(psCurrentClientCCB->hCCBGrowLock); -+#endif -+} -+ -+void DumpFirstCCBCmd(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, -+ RGX_CLIENT_CCB *psCurrentClientCCB, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; -+ void *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; -+ IMG_UINT32 ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset; -+ IMG_UINT32 ui32SampledRdOff; -+ IMG_UINT32 ui32SampledDepOff; -+ -+ RGXFwSharedMemCacheOpPtr(psCurrentClientCCB->psClientCCBCtrl, -+ INVALIDATE); -+ psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; -+ ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; -+ ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset; -+ -+ if ((ui32SampledRdOff == ui32SampledDepOff) && -+ (ui32SampledRdOff != ui32SampledWrOff)) -+ { -+ volatile RGXFWIF_CCB_CMD_HEADER *psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); -+ RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType; -+ volatile void *pvPtr = psCommandHeader; -+ -+ /* CCB is stalled on a fence... */ -+ if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) -+ { -+#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext); -+ IMG_UINT32 ui32Val; -+#endif -+ RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader)); -+ IMG_UINT32 jj; -+ -+ /* Display details of the fence object on which the context is pending */ -+ PVR_DUMPDEBUG_LOG("FWCtx 0x%08X @ %d (%s) pending on %s:", -+ sFWCommonContext.ui32Addr, -+ ui32SampledRdOff, -+ psCurrentClientCCB->szName, -+ _CCBCmdTypename(eCommandType)); -+ for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) -+ { -+#if !defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) -+ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value); -+#else -+ ui32Val = 0; -+ RGXReadFWModuleAddr(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val); -+ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x", -+ psUFOPtr[jj].puiAddrUFO.ui32Addr, -+ psUFOPtr[jj].ui32Value, ui32Val); -+#endif -+ } -+ -+ /* Advance psCommandHeader past the FENCE to the next command header (this will be the TA/3D command that is fenced) */ -+ pvPtr = IMG_OFFSET_ADDR(psUFOPtr, psCommandHeader->ui32CmdSize); -+ psCommandHeader = pvPtr; -+ if (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff)) -+ { -+ PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X fenced command is of type %s",sFWCommonContext.ui32Addr, _CCBCmdTypename(psCommandHeader->eCmdType)); -+ /* Advance psCommandHeader past the TA/3D to the next command header (this will possibly be an UPDATE) */ -+ pvPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader) + psCommandHeader->ui32CmdSize); -+ psCommandHeader = pvPtr; -+ /* If the next command is an update, display details of that so we can see what would then become unblocked */ -+ if (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff)) -+ { -+ eCommandType = psCommandHeader->eCmdType; -+ -+ if (eCommandType == RGXFWIF_CCB_CMD_TYPE_UPDATE) -+ { -+ psUFOPtr = IMG_OFFSET_ADDR(psCommandHeader, sizeof(*psCommandHeader)); -+ PVR_DUMPDEBUG_LOG(" preventing %s:",_CCBCmdTypename(eCommandType)); -+ for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) -+ { -+#if !defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) -+ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value); -+#else -+ ui32Val = 0; -+ RGXReadFWModuleAddr(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val); -+ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x", -+ psUFOPtr[jj].puiAddrUFO.ui32Addr, -+ psUFOPtr[jj].ui32Value, -+ ui32Val); -+#endif -+ } -+ } -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr); -+ } -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr); -+ } -+ } -+ } -+} -+ -+void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGX_CLIENT_CCB *psStalledClientCCB; -+ -+ PVR_ASSERT(psDevInfo); -+ -+ psStalledClientCCB = (RGX_CLIENT_CCB *)psDevInfo->pvEarliestStalledClientCCB; -+ -+ if (psStalledClientCCB) -+ { -+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; -+ IMG_UINT32 ui32SampledDepOffset; -+ void *pvPtr; -+ RGXFWIF_CCB_CMD_HEADER *psCommandHeader; -+ RGXFWIF_CCB_CMD_TYPE eCommandType; -+ -+ RGXFwSharedMemCacheOpValue(psStalledClientCCB->psClientCCBCtrl->ui32DepOffset, -+ INVALIDATE); -+ psClientCCBCtrl = psStalledClientCCB->psClientCCBCtrl; -+ ui32SampledDepOffset = psClientCCBCtrl->ui32DepOffset; -+ /* No need to invalidate CCCB as FW doesn't write to it, only read. */ -+ pvPtr = IMG_OFFSET_ADDR(psStalledClientCCB->pvClientCCB, ui32SampledDepOffset); -+ psCommandHeader = pvPtr; -+ eCommandType = psCommandHeader->eCmdType; -+ -+ if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) -+ { -+ RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader)); -+ IMG_UINT32 jj; -+ IMG_UINT32 ui32NumUnsignalledUFOs = 0; -+ IMG_UINT32 ui32UnsignalledUFOVaddrs[PVRSRV_MAX_SYNCS]; -+ -+#if defined(PVRSRV_STALLED_CCB_ACTION) -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwOsData, -+ INVALIDATE); -+ if (!psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName[0]) -+ { -+ OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui64Timestamp); -+ psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)); -+ psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr; -+ OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName, -+ psStalledClientCCB->szName, -+ MAX_CLIENT_CCB_NAME); -+ } -+ else -+ { -+ OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui64Timestamp); -+ psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)); -+ psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr; -+ OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].aszCCBName, -+ psStalledClientCCB->szName, -+ MAX_CLIENT_CCB_NAME); -+ psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp = (psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp + 1) % PVR_SLR_LOG_ENTRIES; -+ } -+ psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested++; -+ /* flush write buffers for psRGXFWIfFwOsData */ -+ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp]); -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwOsData, -+ FLUSH); -+#endif -+ PVR_LOG(("Fence found on context 0x%x '%s' @ %d has %d UFOs", -+ FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr, -+ psStalledClientCCB->szName, ui32SampledDepOffset, -+ (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)))); -+ -+ for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) -+ { -+ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT((RGXFWIF_UFO *)&psUFOPtr[jj])) -+ { -+ IMG_UINT32 ui32ReadValue = SyncCheckpointStateFromUFO(psDevInfo->psDeviceNode, -+ psUFOPtr[jj].puiAddrUFO.ui32Addr); -+ PVR_LOG((" %d/%d FWAddr 0x%x requires 0x%x (currently 0x%x)", jj+1, -+ (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)), -+ psUFOPtr[jj].puiAddrUFO.ui32Addr, -+ psUFOPtr[jj].ui32Value, -+ ui32ReadValue)); -+ /* If fence is unmet, dump debug info on it */ -+ if (ui32ReadValue != psUFOPtr[jj].ui32Value) -+ { -+ /* Add to our list to pass to pvr_sync */ -+ ui32UnsignalledUFOVaddrs[ui32NumUnsignalledUFOs] = psUFOPtr[jj].puiAddrUFO.ui32Addr; -+ ui32NumUnsignalledUFOs++; -+ } -+ } -+ else -+ { -+ PVR_LOG((" %d/%d FWAddr 0x%x requires 0x%x", jj+1, -+ (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)), -+ psUFOPtr[jj].puiAddrUFO.ui32Addr, -+ psUFOPtr[jj].ui32Value)); -+ } -+ } -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) -+ if (ui32NumUnsignalledUFOs > 0) -+ { -+ IMG_UINT32 ui32NumSyncsOwned; -+ PVRSRV_ERROR eErr = SyncCheckpointDumpInfoOnStalledUFOs(ui32NumUnsignalledUFOs, &ui32UnsignalledUFOVaddrs[0], &ui32NumSyncsOwned); -+ -+ PVR_LOG_IF_ERROR(eErr, "SyncCheckpointDumpInfoOnStalledUFOs() call failed."); -+ } -+#endif -+#if defined(PVRSRV_STALLED_CCB_ACTION) -+ if (BIT_ISSET(psStalledClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED)) -+ { -+ PRGXFWIF_FWCOMMONCONTEXT psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext); -+ -+ PVR_LOG(("SLR disabled for FWCtx 0x%08X", psContext.ui32Addr)); -+ } -+ else -+ { -+ if (ui32NumUnsignalledUFOs > 0) -+ { -+ RGXFWIF_KCCB_CMD sSignalFencesCmd; -+ -+ sSignalFencesCmd.eCmdType = RGXFWIF_KCCB_CMD_FORCE_UPDATE; -+ sSignalFencesCmd.ui32KCCBFlags = 0; -+ sSignalFencesCmd.uCmdData.sForceUpdateData.psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext); -+ sSignalFencesCmd.uCmdData.sForceUpdateData.ui32CCBFenceOffset = ui32SampledDepOffset; -+ -+ PVR_LOG(("Forced update command issued for FWCtx 0x%08X", sSignalFencesCmd.uCmdData.sForceUpdateData.psContext.ui32Addr)); -+ -+ RGXScheduleCommand(FWCommonContextGetRGXDevInfo(psStalledClientCCB->psServerCommonContext), -+ RGXFWIF_DM_GP, -+ &sSignalFencesCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ } -+#endif -+ } -+ psDevInfo->pvEarliestStalledClientCCB = NULL; -+ } -+} -+ -+/****************************************************************************** -+ End of file (rgxccb.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxccb.h b/drivers/gpu/drm/img-rogue/rgxccb.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxccb.h -@@ -0,0 +1,356 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX Circular Command Buffer functionality. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX Circular Command Buffer functionality. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXCCB_H) -+#define RGXCCB_H -+ -+#include "devicemem.h" -+#include "device.h" -+#include "rgxdevice.h" -+#include "sync_server.h" -+#include "connection_server.h" -+#include "rgxdebug_common.h" -+#include "rgxdefs_km.h" -+#include "pvr_notifier.h" -+ -+#define MAX_CLIENT_CCB_NAME 30 -+#define SYNC_FLAG_MASK_ALL IMG_UINT32_MAX -+ -+/* -+ * This size is to be used when a client CCB is found to consume very -+ * negligible space (e.g. a few hundred bytes to few KBs - less than a page). -+ * In such a case, instead of allocating CCB of size of only a few KBs, we -+ * allocate at-least this much to be future risk-free. -+ */ -+#define MIN_SAFE_CCB_SIZE_LOG2 13 /* 8K (2 Pages) */ -+#define MAX_SAFE_CCB_SIZE_LOG2 18 /* 256K (64 Pages) */ -+ -+#define RGX_TQ3D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D -+static_assert(RGX_TQ3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && -+ RGX_TQ3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ3D CCB size is invalid"); -+#define RGX_TQ3D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D -+static_assert(RGX_TQ3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D -+ && RGX_TQ3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ3D max CCB size is invalid"); -+ -+#define RGX_TQ2D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D -+static_assert(RGX_TQ2D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && -+ RGX_TQ2D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ2D CCB size is invalid"); -+#define RGX_TQ2D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D -+static_assert(RGX_TQ2D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D && -+ RGX_TQ2D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ2D max CCB size is invalid"); -+ -+#define RGX_CDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM -+static_assert(RGX_CDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && -+ RGX_CDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM CCB size is invalid"); -+#define RGX_CDM_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM -+static_assert(RGX_CDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM && -+ RGX_CDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM max CCB size is invalid"); -+ -+#define RGX_TA_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA -+static_assert(RGX_TA_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && -+ RGX_TA_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA CCB size is invalid"); -+#define RGX_TA_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA -+static_assert(RGX_TA_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA && -+ RGX_TA_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA max CCB size is invalid"); -+ -+#define RGX_3D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D -+static_assert(RGX_3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && -+ RGX_3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D CCB size is invalid"); -+#define RGX_3D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D -+static_assert(RGX_3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D && -+ RGX_3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D max CCB size is invalid"); -+ -+#define RGX_KICKSYNC_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC -+static_assert(RGX_KICKSYNC_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && -+ RGX_KICKSYNC_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync CCB size is invalid"); -+#define RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC -+static_assert(RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC && -+ RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync max CCB size is invalid"); -+ -+#define RGX_TDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM -+static_assert(RGX_TDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && -+ RGX_TDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TDM CCB size is invalid"); -+#define RGX_TDM_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM -+static_assert(RGX_TDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM && -+ RGX_TDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TDM max CCB size is invalid"); -+ -+#define RGX_RDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RDM -+static_assert(RGX_RDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && -+ RGX_RDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "RDM CCB size is invalid"); -+#define RGX_RDM_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_RDM -+static_assert(RGX_RDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RDM && -+ RGX_RDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "RDM max CCB size is invalid"); -+ -+typedef struct _RGX_CLIENT_CCB_ RGX_CLIENT_CCB; -+ -+/* -+ This structure is declared here as it's allocated on the heap by -+ the callers -+*/ -+ -+typedef struct _RGX_CCB_CMD_HELPER_DATA_ { -+ /* Data setup at command init time */ -+ RGX_CLIENT_CCB *psClientCCB; -+ IMG_CHAR *pszCommandName; -+ IMG_UINT32 ui32PDumpFlags; -+ -+ IMG_UINT32 ui32ClientFenceCount; -+ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress; -+ IMG_UINT32 *paui32FenceValue; -+ IMG_UINT32 ui32ClientUpdateCount; -+ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress; -+ IMG_UINT32 *paui32UpdateValue; -+ RGXFWIF_CCB_CMD_TYPE eType; -+ IMG_UINT32 ui32CmdSize; -+ IMG_UINT8 *pui8DMCmd; -+ IMG_UINT32 ui32FBSCInvalCmdSize; -+ IMG_UINT32 ui32DMCmdSize; -+ IMG_UINT32 ui32TotalSize; -+ IMG_UINT32 ui32DMCmdOffset; -+ -+ /* data for FBSC invalidate command */ -+ IMG_UINT64 ui64FBSCEntryMask; -+ -+ /* timestamp commands */ -+ PRGXFWIF_TIMESTAMP_ADDR pPreTimestampAddr; -+ IMG_UINT32 ui32PreTimeStampCmdSize; -+ PRGXFWIF_TIMESTAMP_ADDR pPostTimestampAddr; -+ IMG_UINT32 ui32PostTimeStampCmdSize; -+ PRGXFWIF_UFO_ADDR pRMWUFOAddr; -+ IMG_UINT32 ui32RMWUFOCmdSize; -+ -+ /* Job reference fields */ -+ IMG_UINT32 ui32ExtJobRef; -+ IMG_UINT32 ui32IntJobRef; -+ -+ /* FW Memdesc for Workload information */ -+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData; -+ -+} RGX_CCB_CMD_HELPER_DATA; -+ -+#define PADDING_COMMAND_SIZE (sizeof(RGXFWIF_CCB_CMD_HEADER)) -+ -+ -+#define RGX_CCB_REQUESTORS(TYPE) \ -+ /* for debugging purposes */ TYPE(UNDEF) \ -+ TYPE(TA) \ -+ TYPE(3D) \ -+ TYPE(CDM) \ -+ TYPE(SH) \ -+ TYPE(RS) \ -+ TYPE(TQ_3D) \ -+ TYPE(TQ_2D) \ -+ TYPE(TQ_TDM) \ -+ TYPE(KICKSYNC) \ -+ TYPE(RAY) \ -+ -+/* Forms an enum constant for each type present in RGX_CCB_REQUESTORS list. The enum is mainly used as -+ an index to the aszCCBRequestors table defined in rgxccb.c. The total number of enums must adhere -+ to the following build assert. -+*/ -+typedef enum _RGX_CCB_REQUESTOR_TYPE_ -+{ -+#define CONSTRUCT_ENUM(req) REQ_TYPE_##req, -+ RGX_CCB_REQUESTORS (CONSTRUCT_ENUM) -+#undef CONSTRUCT_ENUM -+ -+ /* should always be at the end */ -+ REQ_TYPE_TOTAL_COUNT, -+} RGX_CCB_REQUESTOR_TYPE; -+ -+/* Tuple describing the columns of the following table */ -+typedef enum _RGX_CCB_REQUESTOR_TUPLE_ -+{ -+ REQ_RGX_FW_CLIENT_CCB_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCB for this requestor */ -+ REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCBControl for this requestor */ -+ REQ_PDUMP_COMMENT, /* Index to comment to be dumped in PDUMPs */ -+ -+ /* should always be at the end */ -+ REQ_TUPLE_CARDINALITY, -+} RGX_CCB_REQUESTOR_TUPLE; -+ -+/* Unpack U8 values from U32. */ -+#define U32toU8_Unpack1(U32Packed) (U32Packed & 0xFF) -+#define U32toU8_Unpack2(U32Packed) ((U32Packed>>8) & 0xFF) -+#define U32toU8_Unpack3(U32Packed) ((U32Packed>>16) & 0xFF) -+#define U32toU8_Unpack4(U32Packed) ((U32Packed>>24) & 0xFF) -+ -+/* Defines for bit meanings within the ui32CCBFlags member of struct _RGX_CLIENT_CCB_ -+ * -+ * ( X = taken/in use, - = available/unused ) -+ * -+ * 31 10 -+ * | || -+ * ------------------------------XX -+ * Bit Meaning -+ * 0 = If set, CCB is still open and commands will be appended to it -+ * 1 = If set, do not perform Sync Lockup Recovery (SLR) for this CCB -+ */ -+#define CCB_FLAGS_CCB_STATE_OPEN (0) /*!< This bit is set to indicate CCB is in the 'Open' state. */ -+#define CCB_FLAGS_SLR_DISABLED (1) /*!< This bit is set to disable Sync Lockup Recovery (SLR) for this CCB. */ -+ -+ -+/* Table containing an array of strings for each requestor type in the list of RGX_CCB_REQUESTORS. In addition to its use in -+ this module (rgxccb.c), this table is also used to access string to be dumped in PDUMP comments, hence, marking it extern for -+ use in other modules. -+*/ -+extern const IMG_CHAR *const aszCCBRequestors[][REQ_TUPLE_CARDINALITY]; -+ -+PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT32 ui32PDumpFlags); -+ -+PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32CCBSizeLog2, -+ IMG_UINT32 ui32CCBMaxSizeLog2, -+ IMG_UINT32 ui32ContextFlags, -+ CONNECTION_DATA *psConnectionData, -+ RGX_CCB_REQUESTOR_TYPE eCCBRequestor, -+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, -+ RGX_CLIENT_CCB **ppsClientCCB, -+ DEVMEM_MEMDESC **ppsClientCCBMemDesc, -+ DEVMEM_MEMDESC **ppsClientCCBCtlMemDesc); -+ -+void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB); -+ -+PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize); -+ -+PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT32 ui32CmdSize, -+ void **ppvBufferSpace, -+ IMG_UINT32 ui32PDumpFlags); -+ -+void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT32 ui32CmdSize, -+ IMG_UINT32 ui32PDumpFlags); -+ -+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB); -+IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB); -+ -+void RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT32 ui32Flags); -+ -+void RGXCmdHelperInitCmdCCB_CommandSize(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT64 ui64FBSCEntryMask, -+ IMG_UINT32 ui32ClientFenceCount, -+ IMG_UINT32 ui32ClientUpdateCount, -+ IMG_UINT32 ui32CmdSize, -+ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, -+ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, -+ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, -+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); -+ -+void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT32 ui32ClientFenceCount, -+ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, -+ IMG_UINT32 *paui32FenceValue, -+ IMG_UINT32 ui32ClientUpdateCount, -+ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, -+ IMG_UINT32 *paui32UpdateValue, -+ IMG_UINT32 ui32CmdSize, -+ IMG_PBYTE pui8DMCmd, -+ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, -+ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, -+ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, -+ RGXFWIF_CCB_CMD_TYPE eType, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_UINT32 ui32PDumpFlags, -+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, -+ IMG_CHAR *pszCommandName, -+ IMG_BOOL bCCBStateOpen, -+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); -+ -+void RGXCmdHelperInitCmdCCB(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGX_CLIENT_CCB *psClientCCB, -+ IMG_UINT64 ui64FBSCEntryMask, -+ IMG_UINT32 ui32ClientFenceCount, -+ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, -+ IMG_UINT32 *paui32FenceValue, -+ IMG_UINT32 ui32ClientUpdateCount, -+ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, -+ IMG_UINT32 *paui32UpdateValue, -+ IMG_UINT32 ui32CmdSize, -+ IMG_UINT8 *pui8DMCmd, -+ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, -+ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, -+ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, -+ RGXFWIF_CCB_CMD_TYPE eType, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_UINT32 ui32PDumpFlags, -+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, -+ IMG_CHAR *pszCommandName, -+ IMG_BOOL bCCBStateOpen, -+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); -+ -+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, -+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData); -+ -+void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, -+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, -+ const IMG_CHAR *pcszDMName, -+ IMG_UINT32 ui32CtxAddr); -+ -+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount, -+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData); -+ -+IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, -+ IMG_UINT32 ui32Cmdindex); -+ -+IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); -+ -+void DumpFirstCCBCmd(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, -+ RGX_CLIENT_CCB *psCurrentClientCCB, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+ -+void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, -+ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, -+ RGX_CLIENT_CCB *psCurrentClientCCB, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+ -+PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM); -+ -+void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo); -+#endif /* RGXCCB_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxcompute.c b/drivers/gpu/drm/img-rogue/rgxcompute.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxcompute.c -@@ -0,0 +1,1562 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX Compute routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX Compute routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "srvkm.h" -+#include "pdump_km.h" -+#include "pvr_debug.h" -+#include "rgxutils.h" -+#include "rgxfwcmnctx.h" -+#include "rgxcompute.h" -+#include "rgx_bvnc_defs_km.h" -+#include "rgxmem.h" -+#include "allocmem.h" -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "osfunc.h" -+#include "rgxccb.h" -+#include "rgxhwperf.h" -+#include "ospvr_gputrace.h" -+#include "htbserver.h" -+ -+#include "sync_server.h" -+#include "sync_internal.h" -+#include "sync.h" -+#include "rgx_memallocflags.h" -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+#include "pvr_buffer_sync.h" -+#endif -+ -+#include "sync_checkpoint.h" -+#include "sync_checkpoint_internal.h" -+ -+#include "rgxtimerquery.h" -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+#include "rgxworkest.h" -+ -+#define HASH_CLEAN_LIMIT 6 -+#endif -+ -+/* Enable this to dump the compiled list of UFOs prior to kick call */ -+#define ENABLE_CMP_UFO_DUMP 0 -+ -+//#define CMP_CHECKPOINT_DEBUG 1 -+ -+#if defined(CMP_CHECKPOINT_DEBUG) -+#define CHKPT_DBG(X) PVR_DPF(X) -+#else -+#define CHKPT_DBG(X) -+#endif -+ -+struct _RGX_SERVER_COMPUTE_CONTEXT_ { -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; -+ DEVMEM_MEMDESC *psFWComputeContextMemDesc; -+ DEVMEM_MEMDESC *psFWFrameworkMemDesc; -+ DEVMEM_MEMDESC *psFWComputeContextStateMemDesc; -+ DLLIST_NODE sListNode; -+ SYNC_ADDR_LIST sSyncAddrListFence; -+ SYNC_ADDR_LIST sSyncAddrListUpdate; -+ POS_LOCK hLock; -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ WORKEST_HOST_DATA sWorkEstData; -+#endif -+#if defined(SUPPORT_BUFFER_SYNC) -+ struct pvr_buffer_sync_context *psBufferSyncContext; -+#endif -+}; -+ -+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_INT32 i32Priority, -+ IMG_UINT32 ui32FrameworkCommandSize, -+ IMG_PBYTE pbyFrameworkCommand, -+ IMG_HANDLE hMemCtxPrivData, -+ IMG_UINT32 ui32StaticComputeContextStateSize, -+ IMG_PBYTE pStaticComputeContextState, -+ IMG_UINT32 ui32PackedCCBSizeU88, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_UINT64 ui64RobustnessAddress, -+ IMG_UINT32 ui32MaxDeadlineMS, -+ RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); -+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext; -+ RGX_COMMON_CONTEXT_INFO sInfo = {NULL}; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext; -+ IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2; -+ -+ /* Prepare cleanup struct */ -+ *ppsComputeContext = NULL; -+ -+ psComputeContext = OSAllocZMem(sizeof(*psComputeContext)); -+ if (psComputeContext == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ /* -+ Create the FW compute context, this has the CDM common -+ context embedded within it -+ */ -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(RGXFWIF_FWCOMPUTECONTEXT), -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "FwComputeContext", -+ &psComputeContext->psFWComputeContextMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_fwcomputecontext; -+ } -+ -+ eError = OSLockCreate(&psComputeContext->hLock); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to create lock (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_createlock; -+ } -+ -+ psComputeContext->psDeviceNode = psDeviceNode; -+ -+ /* -+ Allocate device memory for the firmware GPU context suspend state. -+ Note: the FW reads/writes the state to memory by accessing the GPU register interface. -+ */ -+ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware compute context suspend state"); -+ -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(RGXFWIF_COMPUTECTX_STATE), -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "FwComputeContextState", -+ &psComputeContext->psFWComputeContextStateMemDesc); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate firmware GPU context suspend state (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_contextsuspendalloc; -+ } -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ WorkEstInitCompute(psDevInfo, &psComputeContext->sWorkEstData); -+ } -+#endif -+ -+ if (ui32FrameworkCommandSize) -+ { -+ /* -+ * Create the FW framework buffer -+ */ -+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, -+ &psComputeContext->psFWFrameworkMemDesc, -+ ui32FrameworkCommandSize); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate firmware GPU framework state (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_frameworkcreate; -+ } -+ -+ /* Copy the Framework client data into the framework buffer */ -+ eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode, -+ psComputeContext->psFWFrameworkMemDesc, -+ pbyFrameworkCommand, -+ ui32FrameworkCommandSize); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to populate the framework buffer (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_frameworkcopy; -+ } -+ -+ sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc; -+ } -+ -+ ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88); -+ ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88); -+ eError = FWCommonContextAllocate(psConnection, -+ psDeviceNode, -+ REQ_TYPE_CDM, -+ RGXFWIF_DM_CDM, -+ hMemCtxPrivData, -+ psComputeContext->psFWComputeContextMemDesc, -+ offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext), -+ psFWMemContextMemDesc, -+ psComputeContext->psFWComputeContextStateMemDesc, -+ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_CDM_CCB_SIZE_LOG2, -+ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_CDM_CCB_MAX_SIZE_LOG2, -+ ui32ContextFlags, -+ i32Priority, -+ ui32MaxDeadlineMS, -+ ui64RobustnessAddress, -+ &sInfo, -+ &psComputeContext->psServerCommonContext); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_contextalloc; -+ } -+ -+ eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc, -+ (void **)&psFWComputeContext); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_acquire_cpu_mapping; -+ } -+ -+ OSDeviceMemCopy(&psFWComputeContext->sStaticComputeContextState, pStaticComputeContextState, ui32StaticComputeContextStateSize); -+ DevmemPDumpLoadMem(psComputeContext->psFWComputeContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS); -+ RGXFwSharedMemCacheOpValue(psFWComputeContext->sStaticComputeContextState, FLUSH); -+ DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ psComputeContext->psBufferSyncContext = -+ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, -+ "rogue-cdm"); -+ if (IS_ERR(psComputeContext->psBufferSyncContext)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to create buffer_sync context (err=%ld)", -+ __func__, PTR_ERR(psComputeContext->psBufferSyncContext))); -+ -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto fail_buffer_sync_context_create; -+ } -+#endif -+ -+ SyncAddrListInit(&psComputeContext->sSyncAddrListFence); -+ SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate); -+ -+ { -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock); -+ dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock); -+ } -+ -+ *ppsComputeContext = psComputeContext; -+ return PVRSRV_OK; -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+fail_buffer_sync_context_create: -+#endif -+fail_acquire_cpu_mapping: -+ FWCommonContextFree(psComputeContext->psServerCommonContext); -+fail_contextalloc: -+fail_frameworkcopy: -+ if (psComputeContext->psFWFrameworkMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc); -+ } -+fail_frameworkcreate: -+ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc); -+fail_contextsuspendalloc: -+ OSLockDestroy(psComputeContext->hLock); -+fail_createlock: -+ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc); -+fail_fwcomputecontext: -+ OSFreeMem(psComputeContext); -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; -+ -+ /* Check if the FW has finished with this resource ... */ -+ eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode, -+ psComputeContext->psServerCommonContext, -+ RGXFWIF_DM_CDM, -+ PDUMP_FLAGS_NONE); -+ -+ if (RGXIsErrorAndDeviceRecoverable(psComputeContext->psDeviceNode, &eError)) -+ { -+ return eError; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ /* remove after RGXFWRequestCommonContextCleanUp() because we might return -+ * RETRY and don't want to be calling this twice */ -+ if (psComputeContext->psBufferSyncContext != NULL) -+ { -+ pvr_buffer_sync_context_destroy(psComputeContext->psBufferSyncContext); -+ psComputeContext->psBufferSyncContext = NULL; -+ } -+#endif -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext; -+ IMG_UINT32 ui32WorkEstCCBSubmitted; -+ -+ eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc, -+ (void **)&psFWComputeContext); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map firmware compute context (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ return eError; -+ } -+ RGXFwSharedMemCacheOpValue(psFWComputeContext->ui32WorkEstCCBSubmitted, INVALIDATE); -+ ui32WorkEstCCBSubmitted = psFWComputeContext->ui32WorkEstCCBSubmitted; -+ -+ DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); -+ -+ /* Check if all of the workload estimation CCB commands for this workload are read */ -+ if (ui32WorkEstCCBSubmitted != psComputeContext->sWorkEstData.ui32WorkEstCCBReceived) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", -+ __func__, ui32WorkEstCCBSubmitted, -+ psComputeContext->sWorkEstData.ui32WorkEstCCBReceived)); -+ -+ return PVRSRV_ERROR_RETRY; -+ } -+ } -+#endif -+ -+ /* ... it has so we can free its resources */ -+ -+ OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock); -+ dllist_remove_node(&(psComputeContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock); -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ WorkEstDeInitCompute(psDevInfo, &psComputeContext->sWorkEstData); -+ } -+#endif -+ -+ SyncAddrListDeinit(&psComputeContext->sSyncAddrListFence); -+ SyncAddrListDeinit(&psComputeContext->sSyncAddrListUpdate); -+ -+ FWCommonContextFree(psComputeContext->psServerCommonContext); -+ psComputeContext->psServerCommonContext = NULL; -+ -+ if (psComputeContext->psFWFrameworkMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc); -+ } -+ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc); -+ -+ OSLockDestroy(psComputeContext->hLock); -+ OSFreeMem(psComputeContext); -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, -+ IMG_UINT32 ui32ClientUpdateCount, -+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, -+ IMG_UINT32 *paui32ClientUpdateSyncOffset, -+ IMG_UINT32 *paui32ClientUpdateValue, -+ PVRSRV_FENCE iCheckFence, -+ PVRSRV_TIMELINE iUpdateTimeline, -+ PVRSRV_FENCE *piUpdateFence, -+ IMG_CHAR pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], -+ IMG_UINT32 ui32CmdSize, -+ IMG_PBYTE pui8DMCmd, -+ IMG_UINT32 ui32PDumpFlags, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32SyncPMRCount, -+ IMG_UINT32 *paui32SyncPMRFlags, -+ PMR **ppsSyncPMRs, -+ IMG_UINT32 ui32NumWorkgroups, -+ IMG_UINT32 ui32NumWorkitems, -+ IMG_UINT64 ui64DeadlineInus) -+{ -+ RGXFWIF_KCCB_CMD sCmpKCCBCmd; -+ RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32CDMCmdOffset = 0; -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->psServerCommonContext); -+ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext); -+ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); -+ IMG_UINT32 ui32FWCtx; -+ IMG_BOOL bCCBStateOpen = IMG_FALSE; -+ -+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr; -+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr; -+ PRGXFWIF_UFO_ADDR pRMWUFOAddr; -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataCompute = {0}; -+ IMG_UINT32 ui32CDMWorkloadDataRO = 0; -+ IMG_UINT32 ui32CDMCmdHeaderOffset = 0; -+ IMG_UINT32 ui32CDMCmdOffsetWrapCheck = 0; -+ RGX_WORKLOAD sWorkloadCharacteristics = {0}; -+#endif -+ -+ IMG_UINT64 ui64FBSCEntryMask = 0; -+ IMG_UINT32 ui32IntClientFenceCount = 0; -+ PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL; -+ IMG_UINT32 ui32IntClientUpdateCount = 0; -+ PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL; -+ IMG_UINT32 *paui32IntUpdateValue = NULL; -+ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; -+ IMG_UINT64 uiCheckFenceUID = 0; -+ IMG_UINT64 uiUpdateFenceUID = 0; -+ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; -+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; -+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0; -+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; -+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; -+ IMG_UINT32 ui32FenceTimelineUpdateValue = 0; -+ void *pvUpdateFenceFinaliseData = NULL; -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; -+ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; -+ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; -+ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ -+ CMD_COMMON *psComputeCmdCmn = IMG_OFFSET_ADDR(pui8DMCmd, 0); -+ -+ if (iUpdateTimeline >= 0 && !piUpdateFence) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Ensure we haven't been given a null ptr to -+ * update values if we have been told we -+ * have updates -+ */ -+ if (ui32ClientUpdateCount > 0) -+ { -+ PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, -+ "paui32ClientUpdateValue NULL but " -+ "ui32ClientUpdateCount > 0", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ /* Ensure the string is null-terminated (Required for safety) */ -+ pszUpdateFenceName[31] = '\0'; -+ -+ OSLockAcquire(psComputeContext->hLock); -+ -+ eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence, -+ 0, -+ NULL, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ goto err_populate_sync_addr_list; -+ } -+ -+ ui32IntClientUpdateCount = ui32ClientUpdateCount; -+ -+ eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate, -+ ui32ClientUpdateCount, -+ pauiClientUpdateUFODevVarBlock, -+ paui32ClientUpdateSyncOffset); -+ if (eError != PVRSRV_OK) -+ { -+ goto err_populate_sync_addr_list; -+ } -+ if (ui32IntClientUpdateCount) -+ { -+ pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; -+ } -+ paui32IntUpdateValue = paui32ClientUpdateValue; -+ -+ if (ui32SyncPMRCount != 0) -+ { -+#if defined(SUPPORT_BUFFER_SYNC) -+ int err; -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling " -+ "pvr_buffer_sync_resolve_and_create_fences", __func__)); -+ -+ err = pvr_buffer_sync_resolve_and_create_fences( -+ psComputeContext->psBufferSyncContext, -+ psComputeContext->psDeviceNode->hSyncCheckpointContext, -+ ui32SyncPMRCount, -+ ppsSyncPMRs, -+ paui32SyncPMRFlags, -+ &ui32BufferFenceSyncCheckpointCount, -+ &apsBufferFenceSyncCheckpoints, -+ &psBufferUpdateSyncCheckpoint, -+ &psBufferSyncData -+ ); -+ -+ if (unlikely(err)) -+ { -+ switch (err) -+ { -+ case -EINTR: -+ eError = PVRSRV_ERROR_RETRY; -+ break; -+ case -ENOMEM: -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ break; -+ default: -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ break; -+ } -+ -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: " -+ "pvr_buffer_sync_resolve_and_create_fences failed (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ } -+ -+ goto fail_resolve_input_fence; -+ } -+ -+ /* Append buffer sync fences */ -+ if (ui32BufferFenceSyncCheckpointCount > 0) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints " -+ "to CDM Fence (&psTransferContext->sSyncAddrListFence=<%p>, " -+ "pauiIntFenceUFOAddress=<%p>)...", __func__, -+ ui32BufferFenceSyncCheckpointCount, -+ (void *) &psComputeContext->sSyncAddrListFence , -+ (void *) pauiIntFenceUFOAddress)); -+ -+ SyncAddrListAppendAndDeRefCheckpoints(&psComputeContext->sSyncAddrListFence, -+ ui32BufferFenceSyncCheckpointCount, -+ apsBufferFenceSyncCheckpoints); -+ if (pauiIntFenceUFOAddress == NULL) -+ { -+ pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs; -+ } -+ ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount; -+ } -+ -+ /* Append the update (from output fence) */ -+ if (psBufferUpdateSyncCheckpoint) -+ { -+ SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate, -+ 1, &psBufferUpdateSyncCheckpoint); -+ if (pauiIntUpdateUFOAddress == NULL) -+ { -+ pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; -+ } -+ ui32IntClientUpdateCount++; -+ } -+#else /* defined(SUPPORT_BUFFER_SYNC) */ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", -+ __func__, ui32SyncPMRCount)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto err_populate_sync_addr_list; -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); -+ /* Resolve the sync checkpoints that make up the input fence */ -+ eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext, -+ iCheckFence, -+ &ui32FenceSyncCheckpointCount, -+ &apsFenceSyncCheckpoints, -+ &uiCheckFenceUID, ui32PDumpFlags); -+ if (eError != PVRSRV_OK) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (%s)", __func__, PVRSRVGetErrorString(eError))); -+ goto fail_free_buffer_sync_data; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); -+#if defined(CMP_CHECKPOINT_DEBUG) -+ if (ui32FenceSyncCheckpointCount > 0) -+ { -+ IMG_UINT32 ii; -+ for (ii=0; ii", __func__, ii, (void*)psNextCheckpoint)); -+ } -+ } -+#endif -+ /* Create the output fence (if required) */ -+ if (iUpdateTimeline != PVRSRV_NO_TIMELINE) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); -+ eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode, -+ pszUpdateFenceName, -+ iUpdateTimeline, -+ psComputeContext->psDeviceNode->hSyncCheckpointContext, -+ &iUpdateFence, -+ &uiUpdateFenceUID, -+ &pvUpdateFenceFinaliseData, -+ &psUpdateSyncCheckpoint, -+ (void*)&psFenceTimelineUpdateSync, -+ &ui32FenceTimelineUpdateValue, -+ ui32PDumpFlags); -+ if (eError != PVRSRV_OK) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%s)", __func__, PVRSRVGetErrorString(eError))); -+ goto fail_create_output_fence; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync)); -+ /* Append the sync prim update for the timeline (if required) */ -+ if (psFenceTimelineUpdateSync) -+ { -+ IMG_UINT32 *pui32TimelineUpdateWp = NULL; -+ -+ /* Allocate memory to hold the list of update values (including our timeline update) */ -+ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); -+ if (!pui32IntAllocatedUpdateValues) -+ { -+ /* Failed to allocate memory */ -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_alloc_update_values_mem; -+ } -+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); -+ /* Copy the update values into the new memory, then append our timeline update value */ -+ if (paui32IntUpdateValue) -+ { -+ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); -+ } -+#if defined(CMP_CHECKPOINT_DEBUG) -+ if (ui32IntClientUpdateCount > 0) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount)); -+ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ /* Now set the additional update value */ -+ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; -+ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; -+ ui32IntClientUpdateCount++; -+ /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ -+ paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__, (void*)psFenceTimelineUpdateSync)); -+ /* Now append the timeline sync prim addr to the compute context update list */ -+ SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate, -+ psFenceTimelineUpdateSync); -+#if defined(CMP_CHECKPOINT_DEBUG) -+ if (ui32IntClientUpdateCount > 0) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount)); -+ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ -+ paui32IntUpdateValue = pui32IntAllocatedUpdateValues; -+ } -+ } -+ -+ /* Append the checks (from input fence) */ -+ if (ui32FenceSyncCheckpointCount > 0) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence)); -+#if defined(CMP_CHECKPOINT_DEBUG) -+ if (ui32IntClientUpdateCount > 0) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; -+ -+ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence, -+ ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ if (!pauiIntFenceUFOAddress) -+ { -+ pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs; -+ } -+ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; -+ } -+#if defined(CMP_CHECKPOINT_DEBUG) -+ if (ui32IntClientUpdateCount > 0) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue; -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Dumping %d update values (paui32IntUpdateValue=<%p>)...", __func__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue)); -+ for (iii=0; iii", __func__, iii, (void*)pui32Tmp)); -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __func__, iii, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ -+ if (psUpdateSyncCheckpoint) -+ { -+ /* Append the update (from output fence) */ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint)); -+ SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate, -+ 1, -+ &psUpdateSyncCheckpoint); -+ if (!pauiIntUpdateUFOAddress) -+ { -+ pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; -+ } -+ ui32IntClientUpdateCount++; -+#if defined(CMP_CHECKPOINT_DEBUG) -+ if (ui32IntClientUpdateCount > 0) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount)); -+ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); -+ -+#if (ENABLE_CMP_UFO_DUMP == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: dumping Compute (CDM) fence/updates syncs...", __func__)); -+ { -+ IMG_UINT32 ii; -+ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; -+ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; -+ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; -+ -+ /* Dump Fence syncs and Update syncs */ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) fence syncs (&psComputeContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psComputeContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); -+ for (ii=0; ii. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); -+ psTmpIntFenceUFOAddress++; -+ } -+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) update syncs (&psComputeContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psComputeContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); -+ for (ii=0; iiui32Addr & 0x1) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); -+ pui32TmpIntUpdateValue++; -+ } -+ psTmpIntUpdateUFOAddress++; -+ } -+ } -+#endif -+ -+#if defined(RGX_FBSC_INVALIDATE_COMMAND_SUPPORTED) -+ /* -+ * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command, -+ * in other words, take the value and set it to zero afterwards. -+ * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts -+ * as it must be ready at the time of context activation. -+ */ -+ { -+ eError = RGXExtractFBSCEntryMaskFromMMUContext(psComputeContext->psDeviceNode, -+ FWCommonContextGetServerMMUCtx(psComputeContext->psServerCommonContext), -+ &ui64FBSCEntryMask); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%s)", PVRSRVGetErrorString(eError))); -+ goto fail_cmdinvalfbsc; -+ } -+ } -+#endif -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ sWorkloadCharacteristics.sCompute.ui32NumberOfWorkgroups = ui32NumWorkgroups; -+ sWorkloadCharacteristics.sCompute.ui32NumberOfWorkitems = ui32NumWorkitems; -+ -+ /* Prepare workload estimation */ -+ WorkEstPrepare(psComputeContext->psDeviceNode->pvDevice, -+ &psComputeContext->sWorkEstData, -+ &psComputeContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM, -+ RGXFWIF_CCB_CMD_TYPE_CDM, -+ &sWorkloadCharacteristics, -+ ui64DeadlineInus, -+ &sWorkloadKickDataCompute); -+ } -+#endif -+ -+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psComputeContext->psDeviceNode->pvDevice, -+ &pPreAddr, -+ &pPostAddr, -+ &pRMWUFOAddr); -+ -+ RGXCmdHelperInitCmdCCB(psDevInfo, -+ psClientCCB, -+ ui64FBSCEntryMask, -+ ui32IntClientFenceCount, -+ pauiIntFenceUFOAddress, -+ NULL, -+ ui32IntClientUpdateCount, -+ pauiIntUpdateUFOAddress, -+ paui32IntUpdateValue, -+ ui32CmdSize, -+ pui8DMCmd, -+ &pPreAddr, -+ &pPostAddr, -+ &pRMWUFOAddr, -+ RGXFWIF_CCB_CMD_TYPE_CDM, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ ui32PDumpFlags, -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ &sWorkloadKickDataCompute, -+#else -+ NULL, -+#endif -+ "Compute", -+ bCCBStateOpen, -+ asCmdHelperData); -+ -+ eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_cmdaquire; -+ } -+ -+ -+ /* -+ We should reserve space in the kernel CCB here and fill in the command -+ directly. -+ This is so if there isn't space in the kernel CCB we can return with -+ retry back to services client before we take any operations -+ */ -+ -+ /* -+ We might only be kicking for flush out a padding packet so only submit -+ the command if the create was successful -+ */ -+ -+ /* -+ All the required resources are ready at this point, we can't fail so -+ take the required server sync operations and commit all the resources -+ */ -+ -+ eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ -+ /* If system is found powered OFF, Retry scheduling the command */ -+ if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) -+ { -+ eError = PVRSRV_ERROR_RETRY; -+ } -+ goto fail_acquirepowerlock; -+ } -+ -+ ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB); -+ RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr); -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* The following is used to determine the offset of the command header containing -+ the workload estimation data so that can be accessed when the KCCB is read */ -+ ui32CDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData); -+ -+ ui32CDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext)); -+ -+ /* This checks if the command would wrap around at the end of the CCB and -+ * therefore would start at an offset of 0 rather than the current command -+ * offset */ -+ if (ui32CDMCmdOffset < ui32CDMCmdOffsetWrapCheck) -+ { -+ ui32CDMWorkloadDataRO = ui32CDMCmdOffset; -+ } -+ else -+ { -+ ui32CDMWorkloadDataRO = 0; -+ } -+ } -+#endif -+ -+ /* Construct the kernel compute CCB command. */ -+ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; -+ sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); -+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); -+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); -+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; -+ -+ /* Add the Workload data into the KCCB kick */ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Store the offset to the CCCB command header so that it can be referenced -+ * when the KCCB command reaches the FW */ -+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32CDMWorkloadDataRO + ui32CDMCmdHeaderOffset; -+ } -+#endif -+ -+ ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr; -+ -+ if (psComputeCmdCmn) -+ { -+ HTBLOGK(HTB_SF_MAIN_KICK_CDM, -+ sCmpKCCBCmd.uCmdData.sCmdKickData.psContext, -+ ui32CDMCmdOffset, -+ psComputeCmdCmn->ui32FrameNum, -+ ui32ExtJobRef, -+ ui32IntJobRef); -+ } -+ -+ RGXSRV_HWPERF_ENQ(psComputeContext, -+ OSGetCurrentClientProcessIDKM(), -+ ui32FWCtx, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ RGX_HWPERF_KICK_TYPE2_CDM, -+ iCheckFence, -+ iUpdateFence, -+ iUpdateTimeline, -+ uiCheckFenceUID, -+ uiUpdateFenceUID, -+ NO_DEADLINE, -+ NO_CYCEST); -+ -+ /* -+ * Submit the compute command to the firmware. -+ */ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommandWithoutPowerLock(psComputeContext->psDeviceNode->pvDevice, -+ RGXFWIF_DM_CDM, -+ &sCmpKCCBCmd, -+ ui32PDumpFlags); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ PVRSRVPowerUnlock(psDevInfo->psDeviceNode); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s failed to schedule kernel CCB command (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_schedulecmd; -+ } -+ else -+ { -+ PVRGpuTraceEnqueueEvent(psComputeContext->psDeviceNode, -+ ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, -+ RGX_HWPERF_KICK_TYPE2_CDM); -+ } -+ -+#if defined(NO_HARDWARE) -+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ -+ if (psUpdateSyncCheckpoint) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); -+ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); -+ } -+ if (psFenceTimelineUpdateSync) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); -+ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); -+ } -+ SyncCheckpointNoHWUpdateTimelines(NULL); -+#endif /* defined(NO_HARDWARE) */ -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ if (psBufferSyncData) -+ { -+ pvr_buffer_sync_kick_succeeded(psBufferSyncData); -+ } -+ if (apsBufferFenceSyncCheckpoints) -+ { -+ kfree(apsBufferFenceSyncCheckpoints); -+ } -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ -+ *piUpdateFence = iUpdateFence; -+ -+ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) -+ { -+ SyncCheckpointFinaliseFence(psComputeContext->psDeviceNode, iUpdateFence, -+ pvUpdateFenceFinaliseData, -+ psUpdateSyncCheckpoint, pszUpdateFenceName); -+ } -+ /* Drop the references taken on the sync checkpoints in the -+ * resolved input fence */ -+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ -+ if (apsFenceSyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); -+ } -+ /* Free memory allocated to hold the internal list of update values */ -+ if (pui32IntAllocatedUpdateValues) -+ { -+ OSFreeMem(pui32IntAllocatedUpdateValues); -+ pui32IntAllocatedUpdateValues = NULL; -+ } -+ -+ OSLockRelease(psComputeContext->hLock); -+ -+ return PVRSRV_OK; -+ -+fail_schedulecmd: -+fail_acquirepowerlock: -+fail_cmdaquire: -+#if defined(RGX_FBSC_INVALIDATE_COMMAND_SUPPORTED) -+fail_cmdinvalfbsc: -+#endif -+ SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence); -+ SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate); -+fail_alloc_update_values_mem: -+ if (iUpdateFence != PVRSRV_NO_FENCE) -+ { -+ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); -+ } -+fail_create_output_fence: -+ /* Drop the references taken on the sync checkpoints in the -+ * resolved input fence */ -+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ -+fail_free_buffer_sync_data: -+#if defined(SUPPORT_BUFFER_SYNC) -+ if (psBufferSyncData) -+ { -+ pvr_buffer_sync_kick_failed(psBufferSyncData); -+ } -+ if (apsBufferFenceSyncCheckpoints) -+ { -+ kfree(apsBufferFenceSyncCheckpoints); -+ } -+ -+fail_resolve_input_fence: -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ -+err_populate_sync_addr_list: -+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ -+ if (apsFenceSyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); -+ } -+ /* Free memory allocated to hold the internal list of update values */ -+ if (pui32IntAllocatedUpdateValues) -+ { -+ OSFreeMem(pui32IntAllocatedUpdateValues); -+ pui32IntAllocatedUpdateValues = NULL; -+ } -+ OSLockRelease(psComputeContext->hLock); -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) -+{ -+ RGXFWIF_KCCB_CMD sFlushCmd; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; -+ -+#if defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psComputeContext->psDeviceNode, -+ PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush"); -+#endif -+ sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; -+ sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE; -+ sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE; -+ sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); -+ -+ OSLockAcquire(psComputeContext->hLock); -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, -+ RGXFWIF_DM_CDM, -+ &sFlushCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ /* Iterate if we hit a PVRSRV_ERROR_KERNEL_CCB_FULL error */ -+ if ((eError != PVRSRV_ERROR_RETRY) && -+ (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if (eError != PVRSRV_OK) -+ { -+ /* If we hit a temporary KCCB exhaustion, return a RETRY to caller */ -+ if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Returning RETRY to caller", __func__)); -+ eError = PVRSRV_ERROR_RETRY; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to schedule SLC flush command (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ } -+ else -+ { -+ /* Wait for the SLC flush to complete */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Compute flush aborted (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto error_exit; -+ } -+ -+ if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & -+ RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); -+ } -+ } -+ -+error_exit: -+ OSLockRelease(psComputeContext->hLock); -+ return eError; -+} -+ -+ -+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; -+ IMG_UINT32 ui32ControlStreamFormat = -+ RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) ? -+ RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT) : 0; -+ -+ if (ui32ControlStreamFormat >= 2U && ui32ControlStreamFormat < 5U) -+ { -+ -+ RGXFWIF_KCCB_CMD sKCCBCmd; -+ PVRSRV_ERROR eError; -+ -+ OSLockAcquire(psComputeContext->hLock); -+ -+ /* Schedule the firmware command */ -+ sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE; -+ sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, -+ RGXFWIF_DM_CDM, -+ &sKCCBCmd, -+ PDUMP_FLAGS_NONE); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to schedule the FW command %d (%s)", -+ __func__, -+ eError, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ OSLockRelease(psComputeContext->hLock); -+ return eError; -+ } -+ else -+ { -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+ } -+} -+ -+ -+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, -+ IMG_INT32 i32Priority) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ -+ OSLockAcquire(psComputeContext->hLock); -+ -+ eError = ContextSetPriority(psComputeContext->psServerCommonContext, -+ psConnection, -+ psComputeContext->psDeviceNode->pvDevice, -+ i32Priority, -+ RGXFWIF_DM_CDM); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __func__, PVRSRVGetErrorString(eError))); -+ } -+ -+ OSLockRelease(psComputeContext->hLock); -+ return eError; -+} -+ -+/* -+ * PVRSRVRGXSetComputeContextPropertyKM -+ */ -+PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, -+ RGX_CONTEXT_PROPERTY eContextProperty, -+ IMG_UINT64 ui64Input, -+ IMG_UINT64 *pui64Output) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ switch (eContextProperty) -+ { -+ case RGX_CONTEXT_PROPERTY_FLAGS: -+ { -+ IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input; -+ -+ OSLockAcquire(psComputeContext->hLock); -+ eError = FWCommonContextSetFlags(psComputeContext->psServerCommonContext, -+ ui32ContextFlags); -+ OSLockRelease(psComputeContext->hLock); -+ break; -+ } -+ -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); -+ eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ } -+ } -+ -+ return eError; -+} -+ -+void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock); -+ dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode); -+ DumpFWCommonContextInfo(psCurrentServerComputeCtx->psServerCommonContext, -+ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+ } -+ OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock); -+} -+ -+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_UINT32 ui32ContextBitMask = 0; -+ DLLIST_NODE *psNode, *psNext; -+ OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock); -+ dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode); -+ -+ if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->psServerCommonContext, RGX_KICK_TYPE_DM_CDM) -+ == PVRSRV_ERROR_CCCB_STALLED) -+ { -+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM; -+ } -+ } -+ OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock); -+ return ui32ContextBitMask; -+} -+ -+/* -+ * PVRSRVRGXGetLastDeviceErrorKM -+ */ -+PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 *ui32Error) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ *ui32Error = psDevInfo->eLastDeviceError; -+ psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_NONE; -+ return PVRSRV_OK; -+} -+ -+/* -+ * PVRSRVRGXKickTimestampQueryKM -+ */ -+PVRSRV_ERROR PVRSRVRGXKickTimestampQueryKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, -+ PVRSRV_FENCE iCheckFence, -+ IMG_UINT32 ui32CmdSize, -+ IMG_PBYTE pui8DMCmd, -+ IMG_UINT32 ui32ExtJobRef) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->psServerCommonContext); -+ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext); -+ RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; -+ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); -+ IMG_UINT32 ui32PDumpFlags = 0; -+ IMG_UINT64 uiCheckFenceUID = 0; -+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; -+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0; -+ RGXFWIF_KCCB_CMD sCmpKCCBCmd; -+ PVRSRV_ERROR eError; -+ -+ OSLockAcquire(psComputeContext->hLock); -+ -+ if (iCheckFence != PVRSRV_NO_FENCE) -+ { -+ -+ eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence, -+ 0, -+ NULL, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ goto err_populate_sync_addr_list; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); -+ /* Resolve the sync checkpoints that make up the input fence */ -+ eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext, -+ iCheckFence, -+ &ui32FenceSyncCheckpointCount, -+ &apsFenceSyncCheckpoints, -+ &uiCheckFenceUID, ui32PDumpFlags); -+ if (eError != PVRSRV_OK) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (%s)", __func__, PVRSRVGetErrorString(eError))); -+ goto fail_resolve_fence; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); -+#if defined(CMP_CHECKPOINT_DEBUG) -+ if (ui32FenceSyncCheckpointCount > 0) -+ { -+ IMG_UINT32 ii; -+ for (ii=0; ii", __func__, ii, (void*)psNextCheckpoint)); -+ } -+ } -+#endif -+ /* Append the checks (from input fence) */ -+ if (ui32FenceSyncCheckpointCount > 0) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence)); -+ eError = SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence, -+ ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ if (eError != PVRSRV_OK) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (%s)", __func__, PVRSRVGetErrorString(eError))); -+ goto fail_append_checkpoints; -+ } -+ } -+ } -+ -+ RGXCmdHelperInitCmdCCB(psDevInfo, -+ psClientCCB, -+ 0, /* empty ui64FBSCEntryMask */ -+ ui32FenceSyncCheckpointCount, -+ psComputeContext->sSyncAddrListFence.pasFWAddrs, -+ NULL, -+ 0, -+ NULL, -+ NULL, -+ ui32CmdSize, -+ pui8DMCmd, -+ NULL, -+ NULL, -+ NULL, -+ RGXFWIF_CCB_CMD_TYPE_VK_TIMESTAMP, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ PDUMP_FLAGS_NONE, -+ NULL, -+ "VkTimestamp", -+ IMG_FALSE, /* bCCBStateOpen */ -+ asCmdHelperData); -+ -+ eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); -+ -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXCmdHelperAcquireCmdCCB", fail_cmdaquire); -+ -+ eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ -+ /* If system is found powered OFF, Retry scheduling the command */ -+ if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) -+ { -+ eError = PVRSRV_ERROR_RETRY; -+ } -+ goto fail_acquirepowerlock; -+ } -+ -+ RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", -+ FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr); -+ -+ /* Construct the kernel compute CCB command. */ -+ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; -+ sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); -+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); -+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); -+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; -+ -+ /* -+ * Submit the RGXFWIF_CCB_CMD_TYPE_VK_TIMESTAMP -+ * command to the firmware. -+ */ -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommandWithoutPowerLock(psComputeContext->psDeviceNode->pvDevice, -+ RGXFWIF_DM_CDM, -+ &sCmpKCCBCmd, -+ PDUMP_FLAGS_NONE); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ PVRSRVPowerUnlock(psDevInfo->psDeviceNode); -+ -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommand", fail_cmdaquire); -+ -+ /* Drop the references taken on the sync checkpoints in the -+ * resolved input fence */ -+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ /* Free the memory that was allocated for the sync checkpoint list returned -+ * by ResolveFence() */ -+ if (apsFenceSyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); -+ } -+ -+ OSLockRelease(psComputeContext->hLock); -+ return PVRSRV_OK; -+ -+fail_acquirepowerlock: -+fail_cmdaquire: -+ SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, -+ &psComputeContext->sSyncAddrListFence); -+ -+fail_append_checkpoints: -+ /* Drop the references taken on the sync checkpoints in the -+ * resolved input fence */ -+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ /* Free memory allocated to hold the resolved fence's checkpoints */ -+ if (apsFenceSyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); -+ } -+fail_resolve_fence: -+err_populate_sync_addr_list: -+ OSLockRelease(psComputeContext->hLock); -+ return eError; -+} -+ -+/****************************************************************************** -+ End of file (rgxcompute.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxcompute.h b/drivers/gpu/drm/img-rogue/rgxcompute.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxcompute.h -@@ -0,0 +1,196 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX compute functionality -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX compute functionality -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXCOMPUTE_H) -+#define RGXCOMPUTE_H -+ -+#include "devicemem.h" -+#include "device.h" -+#include "rgxfwutils.h" -+#include "rgx_fwif_resetframework.h" -+#include "rgxdebug_common.h" -+#include "pvr_notifier.h" -+ -+#include "sync_server.h" -+#include "sync_internal.h" -+#include "connection_server.h" -+ -+ -+typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT; -+ -+/*************************************************************************/ /*! -+@Function PVRSRVRGXCreateComputeContextKM -+@Description Creates an RGX device context for submitting commands to CDM. -+@Input psConnection Device connection -+@Input psDeviceNode Services-managed device -+@Input i32Priority Scheduling priority for commands -+ on this context -+@Input ui32FrameworkCommandSize -+ Framework command size -+@Input pabyFrameworkCommand Pointer to framework command -+@Input hMemCtxPrivData Private data -+@Input ui32StaticComputeContextStateSize -+ Size of fixed compute context state -+@Input pStaticComputeContextState -+ Compute context state -+@Input ui32PackedCCBSizeU88 Packed CCB size. The first byte contains -+ the log2 CCB size and the second byte -+ the log2 maximum CCB size. -+@Input ui32ContextFlags Flags with context properties -+@Input ui64RobustnessAddress Address for FW to signal a context reset -+@Input ui32MaxDeadlineMS Max deadline limit in MS that the -+ workload can run -+@Output ppsComputeContext Cleanup data -+@Return PVRSRV_ERROR Returns PVRSRV_OK or an error. -+*/ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_INT32 i32Priority, -+ IMG_UINT32 ui32FrameworkCommandSize, -+ IMG_PBYTE pabyFrameworkCommand, -+ IMG_HANDLE hMemCtxPrivData, -+ IMG_UINT32 ui32StaticComputeContextStateSize, -+ IMG_PBYTE pStaticComputeContextState, -+ IMG_UINT32 ui32PackedCCBSizeU88, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_UINT64 ui64RobustnessAddress, -+ IMG_UINT32 ui32MaxDeadlineMS, -+ RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext); -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXDestroyComputeContextKM -+ -+ @Description -+ Server-side implementation of RGXDestroyComputeContext -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); -+ -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXKickCDMKM -+ -+ @Description -+ Server-side implementation of RGXKickCDM -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, -+ IMG_UINT32 ui32ClientUpdateCount, -+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, -+ IMG_UINT32 *paui32ClientUpdateSyncOffset, -+ IMG_UINT32 *paui32ClientUpdateValue, -+ PVRSRV_FENCE iCheckFence, -+ PVRSRV_TIMELINE iUpdateTimeline, -+ PVRSRV_FENCE *piUpdateFence, -+ IMG_CHAR pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], -+ IMG_UINT32 ui32CmdSize, -+ IMG_PBYTE pui8DMCmd, -+ IMG_UINT32 ui32PDumpFlags, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32SyncPMRCount, -+ IMG_UINT32 *paui32SyncPMRFlags, -+ PMR **ppsSyncPMRs, -+ IMG_UINT32 ui32NumWorkgroups, -+ IMG_UINT32 ui32NumWorkitems, -+ IMG_UINT64 ui64DeadlineInus); -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXFlushComputeDataKM -+ -+ @Description -+ Server-side implementation of RGXFlushComputeData -+ -+ @Input psComputeContext - Compute context to flush -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); -+ -+/*! -+******************************************************************************* -+ -+ @Function PVRSRVRGXNotifyComputeWriteOffsetUpdateKM -+ @Description Server-side implementation of RGXNotifyComputeWriteOffsetUpdate -+ -+ @Input psComputeContext - Compute context to flush -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); -+ -+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, -+ IMG_INT32 i32Priority); -+ -+PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, -+ RGX_CONTEXT_PROPERTY eContextProperty, -+ IMG_UINT64 ui64Input, -+ IMG_UINT64 *pui64Output); -+ -+PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 *ui32Error); -+ -+PVRSRV_ERROR PVRSRVRGXKickTimestampQueryKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, -+ PVRSRV_FENCE iCheckFence, -+ IMG_UINT32 ui32CmdSize, -+ IMG_PBYTE pui8DMCmd, -+ IMG_UINT32 ui32ExtJobRef); -+ -+/* Debug - Dump debug info of compute contexts on this device */ -+void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel); -+ -+/* Debug/Watchdog - check if client compute contexts are stalled */ -+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+#endif /* RGXCOMPUTE_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxdebug.c b/drivers/gpu/drm/img-rogue/rgxdebug.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxdebug.c -@@ -0,0 +1,4077 @@ -+/*************************************************************************/ /*! -+@File -+@Title Rgx debug information -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX debugging functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+//#define PVR_DPF_FUNCTION_TRACE_ON 1 -+#undef PVR_DPF_FUNCTION_TRACE_ON -+ -+#include "img_defs.h" -+#include "rgxdefs_km.h" -+#include "rgxdevice.h" -+#include "rgxmem.h" -+#include "allocmem.h" -+#include "cache_km.h" -+#include "osfunc.h" -+#include "os_apphint.h" -+ -+#include "rgxdebug_common.h" -+#include "pvrversion.h" -+#include "pvr_debug.h" -+#include "srvkm.h" -+#include "rgxutils.h" -+#include "tlstream.h" -+#include "rgxfwriscv.h" -+#include "pvrsrv.h" -+#include "services_km.h" -+ -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "devicemem_utils.h" -+#include "rgx_fwif_km.h" -+#include "rgx_fwif_sf.h" -+#include "debug_common.h" -+ -+#include "rgxta3d.h" -+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) -+#include "rgxkicksync.h" -+#endif -+#include "rgxcompute.h" -+#include "rgxtransfer.h" -+#include "rgxtdmtransfer.h" -+#include "rgxtimecorr.h" -+#include "rgx_options.h" -+#include "rgxinit.h" -+#include "rgxlayer_impl.h" -+#include "devicemem_history_server.h" -+ -+#define DD_SUMMARY_INDENT "" -+ -+#define RGX_DEBUG_STR_SIZE (150U) -+ -+#define RGX_CR_BIF_CAT_BASE0 (0x1200U) -+#define RGX_CR_BIF_CAT_BASE1 (0x1208U) -+ -+#define RGX_CR_BIF_CAT_BASEN(n) \ -+ RGX_CR_BIF_CAT_BASE0 + \ -+ ((RGX_CR_BIF_CAT_BASE1 - RGX_CR_BIF_CAT_BASE0) * n) -+ -+ -+#define RGXDBG_BIF_IDS \ -+ X(BIF0)\ -+ X(BIF1)\ -+ X(TEXAS_BIF)\ -+ X(DPX_BIF) \ -+ X(FWCORE) -+ -+#define RGXDBG_SIDEBAND_TYPES \ -+ X(META)\ -+ X(TLA)\ -+ X(DMA)\ -+ X(VDMM)\ -+ X(CDM)\ -+ X(IPP)\ -+ X(PM)\ -+ X(TILING)\ -+ X(MCU)\ -+ X(PDS)\ -+ X(PBE)\ -+ X(VDMS)\ -+ X(IPF)\ -+ X(ISP)\ -+ X(TPF)\ -+ X(USCS)\ -+ X(PPP)\ -+ X(VCE)\ -+ X(TPF_CPF)\ -+ X(IPF_CPF)\ -+ X(FBCDC) -+ -+typedef enum -+{ -+#define X(NAME) RGXDBG_##NAME, -+ RGXDBG_BIF_IDS -+#undef X -+} RGXDBG_BIF_ID; -+ -+typedef enum -+{ -+#define X(NAME) RGXDBG_##NAME, -+ RGXDBG_SIDEBAND_TYPES -+#undef X -+} RGXDBG_SIDEBAND_TYPE; -+ -+static const IMG_CHAR *const pszPowStateName[] = -+{ -+#define X(NAME) #NAME, -+ RGXFWIF_POW_STATES -+#undef X -+}; -+ -+static const IMG_CHAR *const pszBIFNames[] = -+{ -+#define X(NAME) #NAME, -+ RGXDBG_BIF_IDS -+#undef X -+}; -+ -+static const IMG_FLAGS2DESC asHwrState2Description[] = -+{ -+ {RGXFWIF_HWR_HARDWARE_OK, " HWR OK;"}, -+ {RGXFWIF_HWR_GENERAL_LOCKUP, " General lockup;"}, -+ {RGXFWIF_HWR_DM_RUNNING_OK, " DM running ok;"}, -+ {RGXFWIF_HWR_DM_STALLING, " DM stalling;"}, -+ {RGXFWIF_HWR_FW_FAULT, " FW fault;"}, -+ {RGXFWIF_HWR_RESTART_REQUESTED, " Restart requested;"}, -+}; -+ -+static const IMG_FLAGS2DESC asDmState2Description[] = -+{ -+ {RGXFWIF_DM_STATE_READY_FOR_HWR, " ready for hwr;"}, -+ {RGXFWIF_DM_STATE_NEEDS_SKIP, " needs skip;"}, -+ {RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, " needs PR cleanup;"}, -+ {RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, " needs trace clear;"}, -+ {RGXFWIF_DM_STATE_GUILTY_LOCKUP, " guilty lockup;"}, -+ {RGXFWIF_DM_STATE_INNOCENT_LOCKUP, " innocent lockup;"}, -+ {RGXFWIF_DM_STATE_GUILTY_OVERRUNING, " guilty overrunning;"}, -+ {RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, " innocent overrunning;"}, -+ {RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH, " hard context switching;"}, -+ {RGXFWIF_DM_STATE_GPU_ECC_HWR, " GPU ECC hwr;"}, -+}; -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+const IMG_CHAR * const gapszMipsPermissionPTFlags[4] = -+{ -+ " ", -+ "XI ", -+ "RI ", -+ "RIXI" -+}; -+ -+const IMG_CHAR * const gapszMipsCoherencyPTFlags[8] = -+{ -+ "C", -+ "C", -+ " ", -+ "C", -+ "C", -+ "C", -+ "C", -+ " " -+}; -+ -+const IMG_CHAR * const gapszMipsDirtyGlobalValidPTFlags[8] = -+{ -+ " ", -+ " G", -+ " V ", -+ " VG", -+ "D ", -+ "D G", -+ "DV ", -+ "DVG" -+}; -+ -+#if !defined(NO_HARDWARE) -+/* Translation of MIPS exception encoding */ -+typedef struct _MIPS_EXCEPTION_ENCODING_ -+{ -+ const IMG_CHAR *const pszStr; /* Error type */ -+ const IMG_BOOL bIsFatal; /* Error is fatal or non-fatal */ -+} MIPS_EXCEPTION_ENCODING; -+ -+static const MIPS_EXCEPTION_ENCODING apsMIPSExcCodes[] = -+{ -+ {"Interrupt", IMG_FALSE}, -+ {"TLB modified exception", IMG_FALSE}, -+ {"TLB exception (load/instruction fetch)", IMG_FALSE}, -+ {"TLB exception (store)", IMG_FALSE}, -+ {"Address error exception (load/instruction fetch)", IMG_TRUE}, -+ {"Address error exception (store)", IMG_TRUE}, -+ {"Bus error exception (instruction fetch)", IMG_TRUE}, -+ {"Bus error exception (load/store)", IMG_TRUE}, -+ {"Syscall exception", IMG_FALSE}, -+ {"Breakpoint exception (FW assert)", IMG_FALSE}, -+ {"Reserved instruction exception", IMG_TRUE}, -+ {"Coprocessor Unusable exception", IMG_FALSE}, -+ {"Arithmetic Overflow exception", IMG_FALSE}, -+ {"Trap exception", IMG_FALSE}, -+ {NULL, IMG_FALSE}, -+ {NULL, IMG_FALSE}, -+ {"Implementation-Specific Exception 1 (COP2)", IMG_FALSE}, -+ {"CorExtend Unusable", IMG_FALSE}, -+ {"Coprocessor 2 exceptions", IMG_FALSE}, -+ {"TLB Read-Inhibit", IMG_TRUE}, -+ {"TLB Execute-Inhibit", IMG_TRUE}, -+ {NULL, IMG_FALSE}, -+ {NULL, IMG_FALSE}, -+ {"Reference to WatchHi/WatchLo address", IMG_FALSE}, -+ {"Machine check", IMG_FALSE}, -+ {NULL, IMG_FALSE}, -+ {"DSP Module State Disabled exception", IMG_FALSE}, -+ {NULL, IMG_FALSE}, -+ {NULL, IMG_FALSE}, -+ {NULL, IMG_FALSE}, -+ /* Can only happen in MIPS debug mode */ -+ {"Parity error", IMG_FALSE}, -+ {NULL, IMG_FALSE} -+}; -+ -+static IMG_CHAR const *_GetMIPSExcString(IMG_UINT32 ui32ExcCode) -+{ -+ if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "Only %lu exceptions available in MIPS, %u is not a valid exception code", -+ (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode)); -+ return NULL; -+ } -+ -+ return apsMIPSExcCodes[ui32ExcCode].pszStr; -+} -+#endif -+ -+typedef struct _RGXMIPSFW_C0_DEBUG_TBL_ENTRY_ -+{ -+ IMG_UINT32 ui32Mask; -+ const IMG_CHAR * pszExplanation; -+} RGXMIPSFW_C0_DEBUG_TBL_ENTRY; -+ -+#if !defined(NO_HARDWARE) -+static const RGXMIPSFW_C0_DEBUG_TBL_ENTRY sMIPS_C0_DebugTable[] = -+{ -+ { RGXMIPSFW_C0_DEBUG_DSS, "Debug single-step exception occurred" }, -+ { RGXMIPSFW_C0_DEBUG_DBP, "Debug software breakpoint exception occurred" }, -+ { RGXMIPSFW_C0_DEBUG_DDBL, "Debug data break exception occurred on a load" }, -+ { RGXMIPSFW_C0_DEBUG_DDBS, "Debug data break exception occurred on a store" }, -+ { RGXMIPSFW_C0_DEBUG_DIB, "Debug instruction break exception occurred" }, -+ { RGXMIPSFW_C0_DEBUG_DINT, "Debug interrupt exception occurred" }, -+ { RGXMIPSFW_C0_DEBUG_DIBIMPR, "Imprecise debug instruction break exception occurred" }, -+ { RGXMIPSFW_C0_DEBUG_DDBLIMPR, "Imprecise debug data break load exception occurred" }, -+ { RGXMIPSFW_C0_DEBUG_DDBSIMPR, "Imprecise debug data break store exception occurred" }, -+ { RGXMIPSFW_C0_DEBUG_IEXI, "Imprecise error exception inhibit controls exception occurred" }, -+ { RGXMIPSFW_C0_DEBUG_DBUSEP, "Data access Bus Error exception pending" }, -+ { RGXMIPSFW_C0_DEBUG_CACHEEP, "Imprecise Cache Error pending" }, -+ { RGXMIPSFW_C0_DEBUG_MCHECKP, "Imprecise Machine Check exception pending" }, -+ { RGXMIPSFW_C0_DEBUG_IBUSEP, "Instruction fetch Bus Error exception pending" }, -+ { (IMG_UINT32)RGXMIPSFW_C0_DEBUG_DBD, "Debug exception occurred in branch delay slot" } -+}; -+#endif -+#endif -+ -+static const IMG_CHAR * const apszFwOsStateName[RGXFW_CONNECTION_FW_STATE_COUNT] = -+{ -+ "offline", -+ "ready", -+ "active", -+ "offloading", -+ "cooldown" -+}; -+ -+#if defined(PVR_ENABLE_PHR) -+static const IMG_FLAGS2DESC asPHRConfig2Description[] = -+{ -+ {BIT_ULL(RGXFWIF_PHR_MODE_OFF), "off"}, -+ {BIT_ULL(RGXFWIF_PHR_MODE_RD_RESET), "reset RD hardware"}, -+ {BIT_ULL(RGXFWIF_PHR_MODE_FULL_RESET), "full gpu reset "}, -+}; -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function _RGXDecodePMPC -+ -+ @Description -+ -+ Return the name for the PM managed Page Catalogues -+ -+ @Input ui32PC - Page Catalogue number -+ -+ @Return void -+ -+******************************************************************************/ -+static const IMG_CHAR* _RGXDecodePMPC(IMG_UINT32 ui32PC) -+{ -+ const IMG_CHAR* pszPMPC = " (-)"; -+ -+ switch (ui32PC) -+ { -+ case 0x8: pszPMPC = " (PM-VCE0)"; break; -+ case 0x9: pszPMPC = " (PM-TE0)"; break; -+ case 0xA: pszPMPC = " (PM-ZLS0)"; break; -+ case 0xB: pszPMPC = " (PM-ALIST0)"; break; -+ case 0xC: pszPMPC = " (PM-VCE1)"; break; -+ case 0xD: pszPMPC = " (PM-TE1)"; break; -+ case 0xE: pszPMPC = " (PM-ZLS1)"; break; -+ case 0xF: pszPMPC = " (PM-ALIST1)"; break; -+ } -+ -+ return pszPMPC; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function _RGXDecodeBIFReqTags -+ -+ @Description -+ -+ Decode the BIF Tag ID and sideband data fields from BIF_FAULT_BANK_REQ_STATUS regs -+ -+ @Input eBankID - BIF identifier -+ @Input ui32TagID - Tag ID value -+ @Input ui32TagSB - Tag Sideband data -+ @Output ppszTagID - Decoded string from the Tag ID -+ @Output ppszTagSB - Decoded string from the Tag SB -+ @Output pszScratchBuf - Buffer provided to the function to generate the debug strings -+ @Input ui32ScratchBufSize - Size of the provided buffer -+ -+ @Return void -+ -+******************************************************************************/ -+#include "rgxmhdefs_km.h" -+ -+static void _RGXDecodeBIFReqTagsXE(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32TagID, -+ IMG_UINT32 ui32TagSB, -+ IMG_CHAR **ppszTagID, -+ IMG_CHAR **ppszTagSB, -+ IMG_CHAR *pszScratchBuf, -+ IMG_UINT32 ui32ScratchBufSize) -+{ -+ /* default to unknown */ -+ IMG_CHAR *pszTagID = "-"; -+ IMG_CHAR *pszTagSB = "-"; -+ IMG_BOOL bNewTagEncoding = IMG_FALSE; -+ -+ PVR_ASSERT(ppszTagID != NULL); -+ PVR_ASSERT(ppszTagSB != NULL); -+ -+ /* tags updated for all cores (auto & consumer) with branch > 36 or only auto cores with branch = 36 */ -+ if ((psDevInfo->sDevFeatureCfg.ui32B > 36) || -+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TILE_REGION_PROTECTION) && (psDevInfo->sDevFeatureCfg.ui32B == 36))) -+ { -+ bNewTagEncoding = IMG_TRUE; -+ } -+ -+ switch (ui32TagID) -+ { -+ /* MMU tags */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_MMU: -+ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU: -+ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU: -+ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU: -+ { -+ switch (ui32TagID) -+ { -+ case RGX_MH_TAG_ENCODING_MH_TAG_MMU: pszTagID = "MMU"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU: pszTagID = "CPU MMU"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU: pszTagID = "CPU IFU"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU: pszTagID = "CPU LSU"; break; -+ } -+ switch (ui32TagSB) -+ { -+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST: pszTagSB = "PT"; break; -+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST: pszTagSB = "PD"; break; -+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST: pszTagSB = "PC"; break; -+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST: pszTagSB = "PM PT"; break; -+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST: pszTagSB = "PM PD"; break; -+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST: pszTagSB = "PM PC"; break; -+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST: pszTagSB = "PM PD W"; break; -+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST: pszTagSB = "PM PC W"; break; -+ } -+ break; -+ } -+ -+ /* MIPS */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_MIPS: -+ { -+ pszTagID = "MIPS"; -+ switch (ui32TagSB) -+ { -+ case RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH: pszTagSB = "Opcode"; break; -+ case RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS: pszTagSB = "Data"; break; -+ } -+ break; -+ } -+ -+ /* CDM tags */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0: -+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1: -+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2: -+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3: -+ { -+ switch (ui32TagID) -+ { -+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0: pszTagID = "CDM Stage 0"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1: pszTagID = "CDM Stage 1"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2: pszTagID = "CDM Stage 2"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3: pszTagID = "CDM Stage 3"; break; -+ } -+ switch (ui32TagSB) -+ { -+ case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM: pszTagSB = "Control"; break; -+ case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA: pszTagSB = "Indirect"; break; -+ case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA: pszTagSB = "Event"; break; -+ case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE: pszTagSB = "Context"; break; -+ } -+ break; -+ } -+ -+ /* VDM tags */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0: -+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1: -+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2: -+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3: -+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4: -+ { -+ switch (ui32TagID) -+ { -+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0: pszTagID = "VDM Stage 0"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1: pszTagID = "VDM Stage 1"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2: pszTagID = "VDM Stage 2"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3: pszTagID = "VDM Stage 3"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4: pszTagID = "VDM Stage 4"; break; -+ } -+ switch (ui32TagSB) -+ { -+ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL: pszTagSB = "Control"; break; -+ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE: pszTagSB = "State"; break; -+ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX: pszTagSB = "Index"; break; -+ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK: pszTagSB = "Stack"; break; -+ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT: pszTagSB = "Context"; break; -+ } -+ break; -+ } -+ -+ /* PDS */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_PDS_0: -+ pszTagID = "PDS req 0"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_PDS_1: -+ pszTagID = "PDS req 1"; break; -+ -+ /* MCU */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA: -+ pszTagID = "MCU USCA"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB: -+ pszTagID = "MCU USCB"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC: -+ pszTagID = "MCU USCC"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD: -+ pszTagID = "MCU USCD"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA: -+ pszTagID = "MCU PDS USCA"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB: -+ pszTagID = "MCU PDS USCB"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC: -+ pszTagID = "MCU PDS USCC"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD: -+ pszTagID = "MCU PDSUSCD"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW: -+ pszTagID = "MCU PDS PDSRW"; break; -+ -+ /* TCU */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_TCU_0: -+ pszTagID = "TCU req 0"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_TCU_1: -+ pszTagID = "TCU req 1"; break; -+ -+ /* FBCDC */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0: -+ pszTagID = bNewTagEncoding ? "TFBDC_TCU0" : "FBCDC0"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1: -+ pszTagID = bNewTagEncoding ? "TFBDC_ZLS0" : "FBCDC1"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_2: -+ pszTagID = bNewTagEncoding ? "TFBDC_TCU1" : "FBCDC2"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_3: -+ pszTagID = bNewTagEncoding ? "TFBDC_ZLS1" : "FBCDC3"; break; -+ -+ /* USC Shared */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_USC: -+ pszTagID = "USCS"; break; -+ -+ /* ISP */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS: -+ pszTagID = "ISP0 ZLS"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS: -+ pszTagID = "ISP0 DS"; break; -+ -+ /* TPF */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF: -+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS: -+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF: -+ { -+ switch (ui32TagID) -+ { -+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF: pszTagID = "TPF0"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS: pszTagID = "TPF0 DBIAS"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF: pszTagID = "TPF0 SPF"; break; -+ } -+ switch (ui32TagSB) -+ { -+ case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE: pszTagSB = "PDS state"; break; -+ case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS: pszTagSB = "Depth bias"; break; -+ case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA: pszTagSB = "Floor offset"; break; -+ case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA: pszTagSB = "Delta"; break; -+ } -+ break; -+ } -+ -+ /* IPF */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ: -+ case RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS: -+ { -+ switch (ui32TagID) -+ { -+ case RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ: pszTagID = "IPF0"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS: pszTagID = "IPF0"; break; -+ } -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_ISP_IPP_PIPES)) -+ { -+ if (ui32TagID < RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES)) -+ { -+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "CReq%d", ui32TagID); -+ pszTagSB = pszScratchBuf; -+ } -+ else if (ui32TagID < 2 * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES)) -+ { -+ ui32TagID -= RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES); -+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "PReq%d", ui32TagID); -+ pszTagSB = pszScratchBuf; -+ } -+ else -+ { -+ switch (ui32TagSB - 2 * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES)) -+ { -+ case 0: pszTagSB = "RReq"; break; -+ case 1: pszTagSB = "DBSC"; break; -+ case 2: pszTagSB = "CPF"; break; -+ case 3: pszTagSB = "Delta"; break; -+ } -+ } -+ } -+ break; -+ } -+ -+ /* VDM Stage 5 (temporary) */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5: -+ pszTagID = "VDM Stage 5"; break; -+ -+ /* TA */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP: -+ pszTagID = "PPP"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC: -+ pszTagID = "TPW RTC"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC: -+ pszTagID = "TEAC RTC"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC: -+ pszTagID = "PSG RTC"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION: -+ pszTagID = "PSG Region"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM: -+ pszTagID = "PSG Stream"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW: -+ pszTagID = "TPW"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC: -+ pszTagID = "TPC"; break; -+ -+ /* PM */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC: -+ { -+ pszTagID = "PMA"; -+ switch (ui32TagSB) -+ { -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK: pszTagSB = "TA Fstack"; break; -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST: pszTagSB = "TA MList"; break; -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK: pszTagSB = "3D Fstack"; break; -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST: pszTagSB = "3D MList"; break; -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0: pszTagSB = "Context0"; break; -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1: pszTagSB = "Context1"; break; -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP: pszTagSB = "MAVP"; break; -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK: pszTagSB = "UFstack"; break; -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK: pszTagSB = "TA MMUstack"; break; -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK: pszTagSB = "3D MMUstack"; break; -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK: pszTagSB = "TA UFstack"; break; -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK: pszTagSB = "3D UFstack"; break; -+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP: pszTagSB = "TA VFP"; break; -+ } -+ break; -+ } -+ case RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC: -+ { -+ pszTagID = "PMD"; -+ switch (ui32TagSB) -+ { -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK: pszTagSB = "TA Fstack"; break; -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST: pszTagSB = "TA MList"; break; -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK: pszTagSB = "3D Fstack"; break; -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST: pszTagSB = "3D MList"; break; -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0: pszTagSB = "Context0"; break; -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1: pszTagSB = "Context1"; break; -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK: pszTagSB = "UFstack"; break; -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK: pszTagSB = "TA MMUstack"; break; -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK: pszTagSB = "3D MMUstack"; break; -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK: pszTagSB = "TA UFstack"; break; -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK: pszTagSB = "3D UFstack"; break; -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP: pszTagSB = "TA VFP"; break; -+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP: pszTagSB = "3D VFP"; break; -+ } -+ break; -+ } -+ -+ /* TDM */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA: -+ { -+ pszTagID = "TDM DMA"; -+ switch (ui32TagSB) -+ { -+ case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM: pszTagSB = "Ctl stream"; break; -+ case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER: pszTagSB = "Ctx buffer"; break; -+ case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL: pszTagSB = "Queue ctl"; break; -+ } -+ break; -+ } -+ case RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL: -+ { -+ pszTagID = "TDM CTL"; -+ switch (ui32TagSB) -+ { -+ case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE: pszTagSB = "Fence"; break; -+ case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT: pszTagSB = "Context"; break; -+ case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE: pszTagSB = "Queue"; break; -+ } -+ break; -+ } -+ -+ /* PBE */ -+ case RGX_MH_TAG_ENCODING_MH_TAG_PBE0: -+ pszTagID = "PBE0"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_PBE1: -+ pszTagID = "PBE1"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_PBE2: -+ pszTagID = "PBE2"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_PBE3: -+ pszTagID = "PBE3"; break; -+ } -+ -+ *ppszTagID = pszTagID; -+ *ppszTagSB = pszTagSB; -+} -+ -+/* RISC-V pf tags */ -+#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU (0x00000001U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU (0x00000002U) -+#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU (0x00000003U) -+ -+static void _RGXDecodeBIFReqTagsFwcore(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32TagID, -+ IMG_UINT32 ui32TagSB, -+ IMG_CHAR **ppszTagID, -+ IMG_CHAR **ppszTagSB) -+{ -+ /* default to unknown */ -+ IMG_CHAR *pszTagID = "-"; -+ IMG_CHAR *pszTagSB = "-"; -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ pszTagSB = "RISC-V"; -+ -+ switch (ui32TagID) -+ { -+ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU: pszTagID = "RISC-V MMU"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU: pszTagID = "RISC-V Instruction Fetch Unit"; break; -+ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU: pszTagID = "RISC-V Load/Store Unit"; break; /* Or Debug Module System Bus */ -+ } -+ } -+ -+ *ppszTagID = pszTagID; -+ *ppszTagSB = pszTagSB; -+} -+ -+static void _RGXDecodeBIFReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXDBG_BIF_ID eBankID, -+ IMG_UINT32 ui32TagID, -+ IMG_UINT32 ui32TagSB, -+ IMG_CHAR **ppszTagID, -+ IMG_CHAR **ppszTagSB, -+ IMG_CHAR *pszScratchBuf, -+ IMG_UINT32 ui32ScratchBufSize) -+{ -+ /* default to unknown */ -+ IMG_CHAR *pszTagID = "-"; -+ IMG_CHAR *pszTagSB = "-"; -+ -+ PVR_ASSERT(ppszTagID != NULL); -+ PVR_ASSERT(ppszTagSB != NULL); -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) -+ { -+ if (eBankID == RGXDBG_FWCORE) -+ { -+ _RGXDecodeBIFReqTagsFwcore(psDevInfo, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB); -+ } -+ else -+ { -+ _RGXDecodeBIFReqTagsXE(psDevInfo, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB, pszScratchBuf, ui32ScratchBufSize); -+ } -+ return; -+ } -+ -+ switch (ui32TagID) -+ { -+ case 0x0: -+ { -+ pszTagID = "MMU"; -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "Table"; break; -+ case 0x1: pszTagSB = "Directory"; break; -+ case 0x2: pszTagSB = "Catalogue"; break; -+ } -+ break; -+ } -+ case 0x1: -+ { -+ pszTagID = "TLA"; -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "Pixel data"; break; -+ case 0x1: pszTagSB = "Command stream data"; break; -+ case 0x2: pszTagSB = "Fence or flush"; break; -+ } -+ break; -+ } -+ case 0x2: -+ { -+ pszTagID = "HOST"; -+ break; -+ } -+ case 0x3: -+ { -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ pszTagID = "META"; -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "DCache - Thread 0"; break; -+ case 0x1: pszTagSB = "ICache - Thread 0"; break; -+ case 0x2: pszTagSB = "JTag - Thread 0"; break; -+ case 0x3: pszTagSB = "Slave bus - Thread 0"; break; -+ case 0x4: pszTagSB = "DCache - Thread "; break; -+ case 0x5: pszTagSB = "ICache - Thread 1"; break; -+ case 0x6: pszTagSB = "JTag - Thread 1"; break; -+ case 0x7: pszTagSB = "Slave bus - Thread 1"; break; -+ } -+ } -+ else if (RGX_IS_ERN_SUPPORTED(psDevInfo, 57596)) -+ { -+ pszTagID="TCU"; -+ } -+ else -+ { -+ /* Unreachable code */ -+ PVR_ASSERT(IMG_FALSE); -+ } -+ break; -+ } -+ case 0x4: -+ { -+ pszTagID = "USC"; -+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, -+ "Cache line %d", (ui32TagSB & 0x3f)); -+ pszTagSB = pszScratchBuf; -+ break; -+ } -+ case 0x5: -+ { -+ pszTagID = "PBE"; -+ break; -+ } -+ case 0x6: -+ { -+ pszTagID = "ISP"; -+ switch (ui32TagSB) -+ { -+ case 0x00: pszTagSB = "ZLS"; break; -+ case 0x20: pszTagSB = "Occlusion Query"; break; -+ } -+ break; -+ } -+ case 0x7: -+ { -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) -+ { -+ if (eBankID == RGXDBG_TEXAS_BIF) -+ { -+ pszTagID = "IPF"; -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "CPF"; break; -+ case 0x1: pszTagSB = "DBSC"; break; -+ case 0x2: -+ case 0x4: -+ case 0x6: -+ case 0x8: pszTagSB = "Control Stream"; break; -+ case 0x3: -+ case 0x5: -+ case 0x7: -+ case 0x9: pszTagSB = "Primitive Block"; break; -+ } -+ } -+ else -+ { -+ pszTagID = "IPP"; -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "Macrotile Header"; break; -+ case 0x1: pszTagSB = "Region Header"; break; -+ } -+ } -+ } -+ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIMPLE_INTERNAL_PARAMETER_FORMAT)) -+ { -+ pszTagID = "IPF"; -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "Region Header"; break; -+ case 0x1: pszTagSB = "DBSC"; break; -+ case 0x2: pszTagSB = "CPF"; break; -+ case 0x3: pszTagSB = "Control Stream"; break; -+ case 0x4: pszTagSB = "Primitive Block"; break; -+ } -+ } -+ else -+ { -+ pszTagID = "IPF"; -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "Macrotile Header"; break; -+ case 0x1: pszTagSB = "Region Header"; break; -+ case 0x2: pszTagSB = "DBSC"; break; -+ case 0x3: pszTagSB = "CPF"; break; -+ case 0x4: -+ case 0x6: -+ case 0x8: pszTagSB = "Control Stream"; break; -+ case 0x5: -+ case 0x7: -+ case 0x9: pszTagSB = "Primitive Block"; break; -+ } -+ } -+ break; -+ } -+ case 0x8: -+ { -+ pszTagID = "CDM"; -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "Control Stream"; break; -+ case 0x1: pszTagSB = "Indirect Data"; break; -+ case 0x2: pszTagSB = "Event Write"; break; -+ case 0x3: pszTagSB = "Context State"; break; -+ } -+ break; -+ } -+ case 0x9: -+ { -+ pszTagID = "VDM"; -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "Control Stream"; break; -+ case 0x1: pszTagSB = "PPP State"; break; -+ case 0x2: pszTagSB = "Index Data"; break; -+ case 0x4: pszTagSB = "Call Stack"; break; -+ case 0x8: pszTagSB = "Context State"; break; -+ } -+ break; -+ } -+ case 0xA: -+ { -+ pszTagID = "PM"; -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "PMA_TAFSTACK"; break; -+ case 0x1: pszTagSB = "PMA_TAMLIST"; break; -+ case 0x2: pszTagSB = "PMA_3DFSTACK"; break; -+ case 0x3: pszTagSB = "PMA_3DMLIST"; break; -+ case 0x4: pszTagSB = "PMA_PMCTX0"; break; -+ case 0x5: pszTagSB = "PMA_PMCTX1"; break; -+ case 0x6: pszTagSB = "PMA_MAVP"; break; -+ case 0x7: pszTagSB = "PMA_UFSTACK"; break; -+ case 0x8: pszTagSB = "PMD_TAFSTACK"; break; -+ case 0x9: pszTagSB = "PMD_TAMLIST"; break; -+ case 0xA: pszTagSB = "PMD_3DFSTACK"; break; -+ case 0xB: pszTagSB = "PMD_3DMLIST"; break; -+ case 0xC: pszTagSB = "PMD_PMCTX0"; break; -+ case 0xD: pszTagSB = "PMD_PMCTX1"; break; -+ case 0xF: pszTagSB = "PMD_UFSTACK"; break; -+ case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break; -+ case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break; -+ case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break; -+ case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break; -+ case 0x14: pszTagSB = "PMA_TAUFSTACK"; break; -+ case 0x15: pszTagSB = "PMA_3DUFSTACK"; break; -+ case 0x16: pszTagSB = "PMD_TAUFSTACK"; break; -+ case 0x17: pszTagSB = "PMD_3DUFSTACK"; break; -+ case 0x18: pszTagSB = "PMA_TAVFP"; break; -+ case 0x19: pszTagSB = "PMD_3DVFP"; break; -+ case 0x1A: pszTagSB = "PMD_TAVFP"; break; -+ } -+ break; -+ } -+ case 0xB: -+ { -+ pszTagID = "TA"; -+ switch (ui32TagSB) -+ { -+ case 0x1: pszTagSB = "VCE"; break; -+ case 0x2: pszTagSB = "TPC"; break; -+ case 0x3: pszTagSB = "TE Control Stream"; break; -+ case 0x4: pszTagSB = "TE Region Header"; break; -+ case 0x5: pszTagSB = "TE Render Target Cache"; break; -+ case 0x6: pszTagSB = "TEAC Render Target Cache"; break; -+ case 0x7: pszTagSB = "VCE Render Target Cache"; break; -+ case 0x8: pszTagSB = "PPP Context State"; break; -+ } -+ break; -+ } -+ case 0xC: -+ { -+ pszTagID = "TPF"; -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "TPF0: Primitive Block"; break; -+ case 0x1: pszTagSB = "TPF0: Depth Bias"; break; -+ case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break; -+ case 0x3: pszTagSB = "CPF - Tables"; break; -+ case 0x4: pszTagSB = "TPF1: Primitive Block"; break; -+ case 0x5: pszTagSB = "TPF1: Depth Bias"; break; -+ case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break; -+ case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break; -+ case 0x8: pszTagSB = "TPF2: Primitive Block"; break; -+ case 0x9: pszTagSB = "TPF2: Depth Bias"; break; -+ case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break; -+ case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break; -+ case 0xC: pszTagSB = "TPF3: Primitive Block"; break; -+ case 0xD: pszTagSB = "TPF3: Depth Bias"; break; -+ case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break; -+ case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break; -+ } -+ break; -+ } -+ case 0xD: -+ { -+ pszTagID = "PDS"; -+ break; -+ } -+ case 0xE: -+ { -+ pszTagID = "MCU"; -+ { -+ IMG_UINT32 ui32Burst = (ui32TagSB >> 5) & 0x7; -+ IMG_UINT32 ui32GroupEnc = (ui32TagSB >> 2) & 0x7; -+ IMG_UINT32 ui32Group = ui32TagSB & 0x3; -+ -+ IMG_CHAR* pszBurst = ""; -+ IMG_CHAR* pszGroupEnc = ""; -+ IMG_CHAR* pszGroup = ""; -+ -+ switch (ui32Burst) -+ { -+ case 0x0: -+ case 0x1: pszBurst = "128bit word within the Lower 256bits"; break; -+ case 0x2: -+ case 0x3: pszBurst = "128bit word within the Upper 256bits"; break; -+ case 0x4: pszBurst = "Lower 256bits"; break; -+ case 0x5: pszBurst = "Upper 256bits"; break; -+ case 0x6: pszBurst = "512 bits"; break; -+ } -+ switch (ui32GroupEnc) -+ { -+ case 0x0: pszGroupEnc = "TPUA_USC"; break; -+ case 0x1: pszGroupEnc = "TPUB_USC"; break; -+ case 0x2: pszGroupEnc = "USCA_USC"; break; -+ case 0x3: pszGroupEnc = "USCB_USC"; break; -+ case 0x4: pszGroupEnc = "PDS_USC"; break; -+ case 0x5: -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) && -+ 6 > RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) -+ { -+ pszGroupEnc = "PDSRW"; -+ } else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) && -+ 6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) -+ { -+ pszGroupEnc = "UPUC_USC"; -+ } -+ break; -+ case 0x6: -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) && -+ 6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) -+ { -+ pszGroupEnc = "TPUC_USC"; -+ } -+ break; -+ case 0x7: -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) && -+ 6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) -+ { -+ pszGroupEnc = "PDSRW"; -+ } -+ break; -+ } -+ switch (ui32Group) -+ { -+ case 0x0: pszGroup = "Banks 0-3"; break; -+ case 0x1: pszGroup = "Banks 4-7"; break; -+ case 0x2: pszGroup = "Banks 8-11"; break; -+ case 0x3: pszGroup = "Banks 12-15"; break; -+ } -+ -+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, -+ "%s, %s, %s", pszBurst, pszGroupEnc, pszGroup); -+ pszTagSB = pszScratchBuf; -+ } -+ break; -+ } -+ case 0xF: -+ { -+ pszTagID = "FB_CDC"; -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)) -+ { -+ IMG_UINT32 ui32Req = (ui32TagSB >> 0) & 0xf; -+ IMG_UINT32 ui32MCUSB = (ui32TagSB >> 4) & 0x3; -+ IMG_CHAR* pszReqOrig = ""; -+ -+ switch (ui32Req) -+ { -+ case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break; -+ case 0x1: pszReqOrig = "FBC Request, originator PBE"; break; -+ case 0x2: pszReqOrig = "FBC Request, originator Host"; break; -+ case 0x3: pszReqOrig = "FBC Request, originator TLA"; break; -+ case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break; -+ case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break; -+ case 0x6: pszReqOrig = "FBDC Request, originator Host"; break; -+ case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break; -+ case 0x8: pszReqOrig = "FBC Request, originator ZLS Requester Fence"; break; -+ case 0x9: pszReqOrig = "FBC Request, originator PBE Requester Fence"; break; -+ case 0xa: pszReqOrig = "FBC Request, originator Host Requester Fence"; break; -+ case 0xb: pszReqOrig = "FBC Request, originator TLA Requester Fence"; break; -+ case 0xc: pszReqOrig = "Reserved"; break; -+ case 0xd: pszReqOrig = "Reserved"; break; -+ case 0xe: pszReqOrig = "FBDC Request, originator FBCDC(Host) Memory Fence"; break; -+ case 0xf: pszReqOrig = "FBDC Request, originator FBCDC(TLA) Memory Fence"; break; -+ } -+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, -+ "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB); -+ pszTagSB = pszScratchBuf; -+ } -+ else -+ { -+ IMG_UINT32 ui32Req = (ui32TagSB >> 2) & 0x7; -+ IMG_UINT32 ui32MCUSB = (ui32TagSB >> 0) & 0x3; -+ IMG_CHAR* pszReqOrig = ""; -+ -+ switch (ui32Req) -+ { -+ case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break; -+ case 0x1: pszReqOrig = "FBC Request, originator PBE"; break; -+ case 0x2: pszReqOrig = "FBC Request, originator Host"; break; -+ case 0x3: pszReqOrig = "FBC Request, originator TLA"; break; -+ case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break; -+ case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break; -+ case 0x6: pszReqOrig = "FBDC Request, originator Host"; break; -+ case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break; -+ } -+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, -+ "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB); -+ pszTagSB = pszScratchBuf; -+ } -+ break; -+ } -+ } /* switch (TagID) */ -+ -+ *ppszTagID = pszTagID; -+ *ppszTagSB = pszTagSB; -+} -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function _RGXDecodeMMULevel -+ -+ @Description -+ -+ Return the name for the MMU level that faulted. -+ -+ @Input ui32MMULevel - MMU level -+ -+ @Return IMG_CHAR* to the sting describing the MMU level that faulted. -+ -+******************************************************************************/ -+static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel) -+{ -+ const IMG_CHAR* pszMMULevel = ""; -+ -+ switch (ui32MMULevel) -+ { -+ case 0x0: pszMMULevel = " (Page Table)"; break; -+ case 0x1: pszMMULevel = " (Page Directory)"; break; -+ case 0x2: pszMMULevel = " (Page Catalog)"; break; -+ case 0x3: pszMMULevel = " (Cat Base Reg)"; break; -+ } -+ -+ return pszMMULevel; -+} -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function _RGXDecodeMMUReqTags -+ -+ @Description -+ -+ Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and -+ RGX_CR_MMU_FAULT_STATUS regs. -+ -+ @Input ui32TagID - Tag ID value -+ @Input ui32TagSB - Tag Sideband data -+ @Input bRead - Read flag -+ @Output ppszTagID - Decoded string from the Tag ID -+ @Output ppszTagSB - Decoded string from the Tag SB -+ @Output pszScratchBuf - Buffer provided to the function to generate the debug strings -+ @Input ui32ScratchBufSize - Size of the provided buffer -+ -+ @Return void -+ -+******************************************************************************/ -+static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32TagID, -+ IMG_UINT32 ui32TagSB, -+ IMG_BOOL bRead, -+ IMG_CHAR **ppszTagID, -+ IMG_CHAR **ppszTagSB, -+ IMG_CHAR *pszScratchBuf, -+ IMG_UINT32 ui32ScratchBufSize) -+{ -+ IMG_INT32 i32SideBandType = -1; -+ IMG_CHAR *pszTagID = "-"; -+ IMG_CHAR *pszTagSB = "-"; -+ -+ PVR_ASSERT(ppszTagID != NULL); -+ PVR_ASSERT(ppszTagSB != NULL); -+ -+ -+ switch (ui32TagID) -+ { -+ case 0: pszTagID = "META (Jones)"; i32SideBandType = RGXDBG_META; break; -+ case 1: pszTagID = "TLA (Jones)"; i32SideBandType = RGXDBG_TLA; break; -+ case 2: pszTagID = "DMA (Jones)"; i32SideBandType = RGXDBG_DMA; break; -+ case 3: pszTagID = "VDMM (Jones)"; i32SideBandType = RGXDBG_VDMM; break; -+ case 4: pszTagID = "CDM (Jones)"; i32SideBandType = RGXDBG_CDM; break; -+ case 5: pszTagID = "IPP (Jones)"; i32SideBandType = RGXDBG_IPP; break; -+ case 6: pszTagID = "PM (Jones)"; i32SideBandType = RGXDBG_PM; break; -+ case 7: pszTagID = "Tiling (Jones)"; i32SideBandType = RGXDBG_TILING; break; -+ case 8: pszTagID = "MCU (Texas 0)"; i32SideBandType = RGXDBG_MCU; break; -+ case 12: pszTagID = "VDMS (Black Pearl 0)"; i32SideBandType = RGXDBG_VDMS; break; -+ case 13: pszTagID = "IPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF; break; -+ case 14: pszTagID = "ISP (Black Pearl 0)"; i32SideBandType = RGXDBG_ISP; break; -+ case 15: pszTagID = "TPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF; break; -+ case 16: pszTagID = "USCS (Black Pearl 0)"; i32SideBandType = RGXDBG_USCS; break; -+ case 17: pszTagID = "PPP (Black Pearl 0)"; i32SideBandType = RGXDBG_PPP; break; -+ case 20: pszTagID = "MCU (Texas 1)"; i32SideBandType = RGXDBG_MCU; break; -+ case 24: pszTagID = "MCU (Texas 2)"; i32SideBandType = RGXDBG_MCU; break; -+ case 28: pszTagID = "VDMS (Black Pearl 1)"; i32SideBandType = RGXDBG_VDMS; break; -+ case 29: pszTagID = "IPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF; break; -+ case 30: pszTagID = "ISP (Black Pearl 1)"; i32SideBandType = RGXDBG_ISP; break; -+ case 31: pszTagID = "TPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF; break; -+ case 32: pszTagID = "USCS (Black Pearl 1)"; i32SideBandType = RGXDBG_USCS; break; -+ case 33: pszTagID = "PPP (Black Pearl 1)"; i32SideBandType = RGXDBG_PPP; break; -+ case 36: pszTagID = "MCU (Texas 3)"; i32SideBandType = RGXDBG_MCU; break; -+ case 40: pszTagID = "MCU (Texas 4)"; i32SideBandType = RGXDBG_MCU; break; -+ case 44: pszTagID = "VDMS (Black Pearl 2)"; i32SideBandType = RGXDBG_VDMS; break; -+ case 45: pszTagID = "IPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF; break; -+ case 46: pszTagID = "ISP (Black Pearl 2)"; i32SideBandType = RGXDBG_ISP; break; -+ case 47: pszTagID = "TPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF; break; -+ case 48: pszTagID = "USCS (Black Pearl 2)"; i32SideBandType = RGXDBG_USCS; break; -+ case 49: pszTagID = "PPP (Black Pearl 2)"; i32SideBandType = RGXDBG_PPP; break; -+ case 52: pszTagID = "MCU (Texas 5)"; i32SideBandType = RGXDBG_MCU; break; -+ case 56: pszTagID = "MCU (Texas 6)"; i32SideBandType = RGXDBG_MCU; break; -+ case 60: pszTagID = "VDMS (Black Pearl 3)"; i32SideBandType = RGXDBG_VDMS; break; -+ case 61: pszTagID = "IPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF; break; -+ case 62: pszTagID = "ISP (Black Pearl 3)"; i32SideBandType = RGXDBG_ISP; break; -+ case 63: pszTagID = "TPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF; break; -+ case 64: pszTagID = "USCS (Black Pearl 3)"; i32SideBandType = RGXDBG_USCS; break; -+ case 65: pszTagID = "PPP (Black Pearl 3)"; i32SideBandType = RGXDBG_PPP; break; -+ case 68: pszTagID = "MCU (Texas 7)"; i32SideBandType = RGXDBG_MCU; break; -+ } -+ if (('-' == pszTagID[0]) && '\n' == pszTagID[1]) -+ { -+ -+ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 50539) || -+ (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, FBCDC_ARCHITECTURE) && RGX_GET_FEATURE_VALUE(psDevInfo, FBCDC_ARCHITECTURE) >= 3)) -+ { -+ switch (ui32TagID) -+ { -+ case 18: pszTagID = "TPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF_CPF; break; -+ case 19: pszTagID = "IPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF_CPF; break; -+ case 34: pszTagID = "TPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF_CPF; break; -+ case 35: pszTagID = "IPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF_CPF; break; -+ case 50: pszTagID = "TPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF_CPF; break; -+ case 51: pszTagID = "IPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF_CPF; break; -+ case 66: pszTagID = "TPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF_CPF; break; -+ case 67: pszTagID = "IPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF_CPF; break; -+ } -+ -+ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 50539)) -+ { -+ switch (ui32TagID) -+ { -+ case 9: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; -+ case 10: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break; -+ case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 21: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; -+ case 22: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break; -+ case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 25: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; -+ case 26: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break; -+ case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 37: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; -+ case 38: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break; -+ case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 41: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; -+ case 42: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break; -+ case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 53: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; -+ case 54: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break; -+ case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 57: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; -+ case 58: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break; -+ case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 69: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; -+ case 70: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break; -+ case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break; -+ } -+ }else -+ { -+ switch (ui32TagID) -+ { -+ case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break; -+ case 10: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; -+ case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break; -+ case 22: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; -+ case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break; -+ case 26: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; -+ case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break; -+ case 38: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; -+ case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break; -+ case 42: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; -+ case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break; -+ case 54: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; -+ case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break; -+ case 58: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; -+ case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break; -+ case 70: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; -+ case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break; -+ } -+ } -+ }else -+ { -+ switch (ui32TagID) -+ { -+ case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break; -+ case 10: pszTagID = "PBE0 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; -+ case 11: pszTagID = "PBE1 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; -+ case 18: pszTagID = "VCE (Black Pearl 0)"; i32SideBandType = RGXDBG_VCE; break; -+ case 19: pszTagID = "FBCDC (Black Pearl 0)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break; -+ case 22: pszTagID = "PBE0 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; -+ case 23: pszTagID = "PBE1 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; -+ case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break; -+ case 26: pszTagID = "PBE0 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; -+ case 27: pszTagID = "PBE1 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; -+ case 34: pszTagID = "VCE (Black Pearl 1)"; i32SideBandType = RGXDBG_VCE; break; -+ case 35: pszTagID = "FBCDC (Black Pearl 1)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break; -+ case 38: pszTagID = "PBE0 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; -+ case 39: pszTagID = "PBE1 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; -+ case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break; -+ case 42: pszTagID = "PBE0 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; -+ case 43: pszTagID = "PBE1 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; -+ case 50: pszTagID = "VCE (Black Pearl 2)"; i32SideBandType = RGXDBG_VCE; break; -+ case 51: pszTagID = "FBCDC (Black Pearl 2)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break; -+ case 54: pszTagID = "PBE0 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; -+ case 55: pszTagID = "PBE1 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; -+ case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break; -+ case 58: pszTagID = "PBE0 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; -+ case 59: pszTagID = "PBE1 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; -+ case 66: pszTagID = "VCE (Black Pearl 3)"; i32SideBandType = RGXDBG_VCE; break; -+ case 67: pszTagID = "FBCDC (Black Pearl 3)"; i32SideBandType = RGXDBG_FBCDC; break; -+ case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break; -+ case 70: pszTagID = "PBE0 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; -+ case 71: pszTagID = "PBE1 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; -+ } -+ } -+ -+ } -+ -+ switch (i32SideBandType) -+ { -+ case RGXDBG_META: -+ { -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "DCache - Thread 0"; break; -+ case 0x1: pszTagSB = "ICache - Thread 0"; break; -+ case 0x2: pszTagSB = "JTag - Thread 0"; break; -+ case 0x3: pszTagSB = "Slave bus - Thread 0"; break; -+ case 0x4: pszTagSB = "DCache - Thread 1"; break; -+ case 0x5: pszTagSB = "ICache - Thread 1"; break; -+ case 0x6: pszTagSB = "JTag - Thread 1"; break; -+ case 0x7: pszTagSB = "Slave bus - Thread 1"; break; -+ } -+ break; -+ } -+ -+ case RGXDBG_TLA: -+ { -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "Pixel data"; break; -+ case 0x1: pszTagSB = "Command stream data"; break; -+ case 0x2: pszTagSB = "Fence or flush"; break; -+ } -+ break; -+ } -+ -+ case RGXDBG_VDMM: -+ { -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "Control Stream - Read Only"; break; -+ case 0x1: pszTagSB = "PPP State - Read Only"; break; -+ case 0x2: pszTagSB = "Indices - Read Only"; break; -+ case 0x4: pszTagSB = "Call Stack - Read/Write"; break; -+ case 0x6: pszTagSB = "DrawIndirect - Read Only"; break; -+ case 0xA: pszTagSB = "Context State - Write Only"; break; -+ } -+ break; -+ } -+ -+ case RGXDBG_CDM: -+ { -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "Control Stream"; break; -+ case 0x1: pszTagSB = "Indirect Data"; break; -+ case 0x2: pszTagSB = "Event Write"; break; -+ case 0x3: pszTagSB = "Context State"; break; -+ } -+ break; -+ } -+ -+ case RGXDBG_IPP: -+ { -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "Macrotile Header"; break; -+ case 0x1: pszTagSB = "Region Header"; break; -+ } -+ break; -+ } -+ -+ case RGXDBG_PM: -+ { -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "PMA_TAFSTACK"; break; -+ case 0x1: pszTagSB = "PMA_TAMLIST"; break; -+ case 0x2: pszTagSB = "PMA_3DFSTACK"; break; -+ case 0x3: pszTagSB = "PMA_3DMLIST"; break; -+ case 0x4: pszTagSB = "PMA_PMCTX0"; break; -+ case 0x5: pszTagSB = "PMA_PMCTX1"; break; -+ case 0x6: pszTagSB = "PMA_MAVP"; break; -+ case 0x7: pszTagSB = "PMA_UFSTACK"; break; -+ case 0x8: pszTagSB = "PMD_TAFSTACK"; break; -+ case 0x9: pszTagSB = "PMD_TAMLIST"; break; -+ case 0xA: pszTagSB = "PMD_3DFSTACK"; break; -+ case 0xB: pszTagSB = "PMD_3DMLIST"; break; -+ case 0xC: pszTagSB = "PMD_PMCTX0"; break; -+ case 0xD: pszTagSB = "PMD_PMCTX1"; break; -+ case 0xF: pszTagSB = "PMD_UFSTACK"; break; -+ case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break; -+ case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break; -+ case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break; -+ case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break; -+ case 0x14: pszTagSB = "PMA_TAUFSTACK"; break; -+ case 0x15: pszTagSB = "PMA_3DUFSTACK"; break; -+ case 0x16: pszTagSB = "PMD_TAUFSTACK"; break; -+ case 0x17: pszTagSB = "PMD_3DUFSTACK"; break; -+ case 0x18: pszTagSB = "PMA_TAVFP"; break; -+ case 0x19: pszTagSB = "PMD_3DVFP"; break; -+ case 0x1A: pszTagSB = "PMD_TAVFP"; break; -+ } -+ break; -+ } -+ -+ case RGXDBG_TILING: -+ { -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "PSG Control Stream TP0"; break; -+ case 0x1: pszTagSB = "TPC TP0"; break; -+ case 0x2: pszTagSB = "VCE0"; break; -+ case 0x3: pszTagSB = "VCE1"; break; -+ case 0x4: pszTagSB = "PSG Control Stream TP1"; break; -+ case 0x5: pszTagSB = "TPC TP1"; break; -+ case 0x8: pszTagSB = "PSG Region Header TP0"; break; -+ case 0xC: pszTagSB = "PSG Region Header TP1"; break; -+ } -+ break; -+ } -+ -+ case RGXDBG_VDMS: -+ { -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "Context State - Write Only"; break; -+ } -+ break; -+ } -+ -+ case RGXDBG_IPF: -+ { -+ switch (ui32TagSB) -+ { -+ case 0x00: -+ case 0x20: pszTagSB = "CPF"; break; -+ case 0x01: pszTagSB = "DBSC"; break; -+ case 0x02: -+ case 0x04: -+ case 0x06: -+ case 0x08: -+ case 0x0A: -+ case 0x0C: -+ case 0x0E: -+ case 0x10: pszTagSB = "Control Stream"; break; -+ case 0x03: -+ case 0x05: -+ case 0x07: -+ case 0x09: -+ case 0x0B: -+ case 0x0D: -+ case 0x0F: -+ case 0x11: pszTagSB = "Primitive Block"; break; -+ } -+ break; -+ } -+ -+ case RGXDBG_ISP: -+ { -+ switch (ui32TagSB) -+ { -+ case 0x00: pszTagSB = "ZLS read/write"; break; -+ case 0x20: pszTagSB = "Occlusion query read/write"; break; -+ } -+ break; -+ } -+ -+ case RGXDBG_TPF: -+ { -+ switch (ui32TagSB) -+ { -+ case 0x0: pszTagSB = "TPF0: Primitive Block"; break; -+ case 0x1: pszTagSB = "TPF0: Depth Bias"; break; -+ case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break; -+ case 0x3: pszTagSB = "CPF - Tables"; break; -+ case 0x4: pszTagSB = "TPF1: Primitive Block"; break; -+ case 0x5: pszTagSB = "TPF1: Depth Bias"; break; -+ case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break; -+ case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break; -+ case 0x8: pszTagSB = "TPF2: Primitive Block"; break; -+ case 0x9: pszTagSB = "TPF2: Depth Bias"; break; -+ case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break; -+ case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break; -+ case 0xC: pszTagSB = "TPF3: Primitive Block"; break; -+ case 0xD: pszTagSB = "TPF3: Depth Bias"; break; -+ case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break; -+ case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break; -+ } -+ break; -+ } -+ -+ case RGXDBG_FBCDC: -+ { -+ /* -+ * FBC faults on a 4-cluster phantom does not always set SB -+ * bit 5, but since FBC is write-only and FBDC is read-only, -+ * we can set bit 5 if this is a write fault, before decoding. -+ */ -+ if (bRead == IMG_FALSE) -+ { -+ ui32TagSB |= 0x20; -+ } -+ -+ switch (ui32TagSB) -+ { -+ case 0x00: pszTagSB = "FBDC Request, originator ZLS"; break; -+ case 0x02: pszTagSB = "FBDC Request, originator MCU Dust 0"; break; -+ case 0x03: pszTagSB = "FBDC Request, originator MCU Dust 1"; break; -+ case 0x20: pszTagSB = "FBC Request, originator ZLS"; break; -+ case 0x22: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0"; break; -+ case 0x23: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1"; break; -+ case 0x24: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0"; break; -+ case 0x25: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1"; break; -+ case 0x28: pszTagSB = "FBC Request, originator ZLS Fence"; break; -+ case 0x2a: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0, Fence"; break; -+ case 0x2b: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1, Fence"; break; -+ case 0x2c: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0, Fence"; break; -+ case 0x2d: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1, Fence"; break; -+ } -+ break; -+ } -+ -+ case RGXDBG_MCU: -+ { -+ IMG_UINT32 ui32SetNumber = (ui32TagSB >> 5) & 0x7; -+ IMG_UINT32 ui32WayNumber = (ui32TagSB >> 2) & 0x7; -+ IMG_UINT32 ui32Group = ui32TagSB & 0x3; -+ -+ IMG_CHAR* pszGroup = ""; -+ -+ switch (ui32Group) -+ { -+ case 0x0: pszGroup = "Banks 0-1"; break; -+ case 0x1: pszGroup = "Banks 2-3"; break; -+ case 0x2: pszGroup = "Banks 4-5"; break; -+ case 0x3: pszGroup = "Banks 6-7"; break; -+ } -+ -+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, -+ "Set=%d, Way=%d, %s", ui32SetNumber, ui32WayNumber, pszGroup); -+ pszTagSB = pszScratchBuf; -+ break; -+ } -+ -+ default: -+ { -+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "SB=0x%02x", ui32TagSB); -+ pszTagSB = pszScratchBuf; -+ break; -+ } -+ } -+ -+ *ppszTagID = pszTagID; -+ *ppszTagSB = pszTagSB; -+} -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function _RGXDumpRGXBIFBank -+ -+ @Description -+ -+ Dump BIF Bank state in human readable form. -+ -+ @Input pfnDumpDebugPrintf - The debug printf function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Input psDevInfo - RGX device info -+ @Input eBankID - BIF identifier -+ @Input ui64MMUStatus - MMU Status register value -+ @Input ui64ReqStatus - BIF request Status register value -+ @Return void -+ -+******************************************************************************/ -+static void _RGXDumpRGXBIFBank(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXDBG_BIF_ID eBankID, -+ IMG_UINT64 ui64MMUStatus, -+ IMG_UINT64 ui64ReqStatus, -+ const IMG_CHAR *pszIndent) -+{ -+ if (ui64MMUStatus == 0x0) -+ { -+ PVR_DUMPDEBUG_LOG("%s - OK", pszBIFNames[eBankID]); -+ } -+ else -+ { -+ IMG_UINT32 ui32PageSize; -+ IMG_UINT32 ui32PC = -+ (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> -+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; -+ -+ /* Bank 0 & 1 share the same fields */ -+ PVR_DUMPDEBUG_LOG("%s%s - FAULT:", -+ pszIndent, -+ pszBIFNames[eBankID]); -+ -+ /* MMU Status */ -+ { -+ IMG_UINT32 ui32MMUDataType = -+ (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK) >> -+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT; -+ -+ IMG_BOOL bROFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN) != 0; -+ IMG_BOOL bProtFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN) != 0; -+ -+ ui32PageSize = (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> -+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; -+ -+ PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d%s, Page Size = %d%s%s%s.", -+ pszIndent, -+ ui64MMUStatus, -+ ui32PC, -+ (ui32PC < 0x8)?"":_RGXDecodePMPC(ui32PC), -+ ui32PageSize, -+ (bROFault)?", Read Only fault":"", -+ (bProtFault)?", PM/META protection fault":"", -+ _RGXDecodeMMULevel(ui32MMUDataType)); -+ } -+ -+ /* Req Status */ -+ { -+ IMG_CHAR *pszTagID; -+ IMG_CHAR *pszTagSB; -+ IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE]; -+ IMG_BOOL bRead; -+ IMG_UINT32 ui32TagSB, ui32TagID; -+ IMG_UINT64 ui64Addr; -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) -+ { -+ bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN) != 0; -+ ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK) >> -+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT; -+ ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK) >> -+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT; -+ } -+ else -+ { -+ bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN) != 0; -+ ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK) >> -+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT; -+ ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK) >> -+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT; -+ } -+ ui64Addr = ((ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK) >> -+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT) << -+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT; -+ -+ _RGXDecodeBIFReqTags(psDevInfo, eBankID, ui32TagID, ui32TagSB, &pszTagID, &pszTagSB, &aszScratch[0], RGX_DEBUG_STR_SIZE); -+ -+ PVR_DUMPDEBUG_LOG("%s * Request (0x%016" IMG_UINT64_FMTSPECx -+ "): %s (%s), %s " IMG_DEV_VIRTADDR_FMTSPEC ".", -+ pszIndent, -+ ui64ReqStatus, -+ pszTagID, -+ pszTagSB, -+ (bRead)?"Reading from":"Writing to", -+ ui64Addr); -+ } -+ } -+} -+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN), -+ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN mismatch!"); -+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK), -+ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK mismatch!"); -+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT), -+ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT mismatch!"); -+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK), -+ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK mismatch!"); -+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT), -+ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT mismatch!"); -+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK), -+ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK mismatch!"); -+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT), -+ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT mismatch!"); -+static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT), -+ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT mismatch!"); -+ -+/*! -+******************************************************************************* -+ -+ @Function _RGXDumpRGXMMUFaultStatus -+ -+ @Description -+ -+ Dump MMU Fault status in human readable form. -+ -+ @Input pfnDumpDebugPrintf - The debug printf function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Input psDevInfo - RGX device info -+ @Input ui64MMUStatus - MMU Status register value -+ @Input pszMetaOrCore - string representing call is for META or MMU core -+ @Return void -+ -+******************************************************************************/ -+static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT64 ui64MMUStatus, -+ const IMG_PCHAR pszMetaOrCore, -+ const IMG_CHAR *pszIndent) -+{ -+ if (ui64MMUStatus == 0x0) -+ { -+ PVR_DUMPDEBUG_LOG("%sMMU (%s) - OK", pszIndent, pszMetaOrCore); -+ } -+ else -+ { -+ IMG_UINT32 ui32PC = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >> -+ RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT; -+ IMG_UINT64 ui64Addr = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK) >> -+ RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT) << 4; /* align shift */ -+ IMG_UINT32 ui32Requester = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK) >> -+ RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT; -+ IMG_UINT32 ui32SideBand = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK) >> -+ RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT; -+ IMG_UINT32 ui32MMULevel = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK) >> -+ RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT; -+ IMG_BOOL bRead = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_RNW_EN) != 0; -+ IMG_BOOL bFault = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_FAULT_EN) != 0; -+ IMG_BOOL bROFault = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >> -+ RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x2; -+ IMG_BOOL bProtFault = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >> -+ RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x3; -+ IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE]; -+ IMG_CHAR *pszTagID; -+ IMG_CHAR *pszTagSB; -+ -+ _RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32SideBand, bRead, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE); -+ -+ PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore); -+ PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECx ", %s (%s)%s%s%s%s.", -+ pszIndent, -+ ui64MMUStatus, -+ ui32PC, -+ (bRead)?"Reading from":"Writing to", -+ ui64Addr, -+ pszTagID, -+ pszTagSB, -+ (bFault)?", Fault":"", -+ (bROFault)?", Read Only fault":"", -+ (bProtFault)?", PM/FW core protection fault":"", -+ _RGXDecodeMMULevel(ui32MMULevel)); -+ -+ } -+} -+static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT), -+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); -+ -+ -+#if !defined(NO_HARDWARE) -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+static PVRSRV_ERROR _RGXMipsExtraDebug(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_MIPS_STATE *psMIPSState) -+{ -+ void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; -+ IMG_UINT32 ui32RegRead; -+ IMG_UINT32 eError = PVRSRV_OK; -+ IMG_UINT32 volatile *pui32SyncFlag; -+ -+ /* Acquire the NMI operations lock */ -+ OSLockAcquire(psDevInfo->hNMILock); -+ -+ /* Make sure the synchronisation flag is set to 0 */ -+ pui32SyncFlag = &psDevInfo->psRGXFWIfSysInit->sMIPSState.ui32Sync; -+ *pui32SyncFlag = 0; -+ -+ /* Readback performed as a part of memory barrier */ -+ OSWriteMemoryBarrier(pui32SyncFlag); -+ RGXFwSharedMemCacheOpPtr(pui32SyncFlag, -+ FLUSH); -+ -+ -+ /* Enable NMI issuing in the MIPS wrapper */ -+ OSWriteHWReg64(pvRegsBaseKM, -+ RGX_CR_MIPS_WRAPPER_NMI_ENABLE, -+ RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN); -+ (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_ENABLE); -+ -+ /* Check the MIPS is not in error state already (e.g. it is booting or an NMI has already been requested) */ -+ ui32RegRead = OSReadHWReg32(pvRegsBaseKM, -+ RGX_CR_MIPS_EXCEPTION_STATUS); -+ if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) || (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN)) -+ { -+ -+ eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; -+ goto fail; -+ } -+ ui32RegRead = 0; -+ -+ /* Issue NMI */ -+ OSWriteHWReg32(pvRegsBaseKM, -+ RGX_CR_MIPS_WRAPPER_NMI_EVENT, -+ RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN); -+ (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_EVENT); -+ -+ -+ /* Wait for NMI Taken to be asserted */ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ ui32RegRead = OSReadHWReg32(pvRegsBaseKM, -+ RGX_CR_MIPS_EXCEPTION_STATUS); -+ if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) == 0) -+ { -+ eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; -+ goto fail; -+ } -+ ui32RegRead = 0; -+ -+ /* Allow the firmware to proceed */ -+ *pui32SyncFlag = 1; -+ -+ /* Readback performed as a part of memory barrier */ -+ OSWriteMemoryBarrier(pui32SyncFlag); -+ RGXFwSharedMemCacheOpPtr(pui32SyncFlag, -+ FLUSH); -+ -+ -+ /* Wait for the FW to have finished the NMI routine */ -+ ui32RegRead = OSReadHWReg32(pvRegsBaseKM, -+ RGX_CR_MIPS_EXCEPTION_STATUS); -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ ui32RegRead = OSReadHWReg32(pvRegsBaseKM, -+ RGX_CR_MIPS_EXCEPTION_STATUS); -+ if (!(ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN)) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) -+ { -+ eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; -+ goto fail; -+ } -+ ui32RegRead = 0; -+ -+ /* Copy state */ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfSysInit->sMIPSState, -+ INVALIDATE); -+ OSDeviceMemCopy(psMIPSState, &psDevInfo->psRGXFWIfSysInit->sMIPSState, sizeof(*psMIPSState)); -+ -+ --(psMIPSState->ui32ErrorEPC); -+ --(psMIPSState->ui32EPC); -+ -+ /* Disable NMI issuing in the MIPS wrapper */ -+ OSWriteHWReg32(pvRegsBaseKM, -+ RGX_CR_MIPS_WRAPPER_NMI_ENABLE, -+ 0); -+ (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_ENABLE); -+ -+fail: -+ /* Release the NMI operations lock */ -+ OSLockRelease(psDevInfo->hNMILock); -+ return eError; -+} -+ -+/* Print decoded information from cause register */ -+static void _RGXMipsDumpCauseDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32Cause, -+ IMG_UINT32 ui32ErrorState) -+{ -+#define INDENT " " -+ const IMG_UINT32 ui32ExcCode = RGXMIPSFW_C0_CAUSE_EXCCODE(ui32Cause); -+ const IMG_CHAR * const pszException = _GetMIPSExcString(ui32ExcCode); -+ -+ if (ui32ErrorState != 0 && -+ pszException != NULL) -+ { -+ PVR_DUMPDEBUG_LOG(INDENT "Cause exception: %s", pszException); -+ } -+ -+ if (ui32Cause & RGXMIPSFW_C0_CAUSE_FDCIPENDING) -+ { -+ PVR_DUMPDEBUG_LOG(INDENT "FDC interrupt pending"); -+ } -+ -+ if (!(ui32Cause & RGXMIPSFW_C0_CAUSE_IV)) -+ { -+ PVR_DUMPDEBUG_LOG(INDENT "Interrupt uses general interrupt vector"); -+ } -+ -+ if (ui32Cause & RGXMIPSFW_C0_CAUSE_PCIPENDING) -+ { -+ PVR_DUMPDEBUG_LOG(INDENT "Performance Counter Interrupt pending"); -+ } -+ -+ /* Unusable Coproc exception */ -+ if (ui32ExcCode == 11) -+ { -+ PVR_DUMPDEBUG_LOG(INDENT "Unusable Coprocessor: %d", RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(ui32Cause)); -+ } -+ -+#undef INDENT -+} -+ -+static IMG_BOOL _IsFWCodeException(IMG_UINT32 ui32ExcCode) -+{ -+ if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "Only %lu exceptions available in MIPS, %u is not a valid exception code", -+ (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode)); -+ return IMG_FALSE; -+ } -+ -+ return apsMIPSExcCodes[ui32ExcCode].bIsFatal; -+} -+ -+static void _RGXMipsDumpDebugDecode(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32Debug, -+ IMG_UINT32 ui32DEPC) -+{ -+ const IMG_CHAR *pszDException = NULL; -+ IMG_UINT32 i; -+#define INDENT " " -+ -+ if (!(ui32Debug & RGXMIPSFW_C0_DEBUG_DM)) -+ { -+ return; -+ } -+ -+ PVR_DUMPDEBUG_LOG("DEBUG :"); -+ -+ pszDException = _GetMIPSExcString(RGXMIPSFW_C0_DEBUG_EXCCODE(ui32Debug)); -+ -+ if (pszDException != NULL) -+ { -+ PVR_DUMPDEBUG_LOG(INDENT "Debug exception: %s", pszDException); -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(sMIPS_C0_DebugTable); ++i) -+ { -+ const RGXMIPSFW_C0_DEBUG_TBL_ENTRY * const psDebugEntry = &sMIPS_C0_DebugTable[i]; -+ -+ if (ui32Debug & psDebugEntry->ui32Mask) -+ { -+ PVR_DUMPDEBUG_LOG(INDENT "%s", psDebugEntry->pszExplanation); -+ } -+ } -+#undef INDENT -+ PVR_DUMPDEBUG_LOG("DEPC :0x%08X", ui32DEPC); -+} -+ -+static inline void _GetMipsTLBPARanges(const RGX_MIPS_TLB_ENTRY *psTLBEntry, -+ const RGX_MIPS_REMAP_ENTRY *psRemapEntry0, -+ const RGX_MIPS_REMAP_ENTRY *psRemapEntry1, -+ IMG_UINT64 *pui64PA0Start, -+ IMG_UINT64 *pui64PA0End, -+ IMG_UINT64 *pui64PA1Start, -+ IMG_UINT64 *pui64PA1End) -+{ -+ IMG_BOOL bUseRemapOutput = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE; -+ IMG_UINT64 ui64PageSize = RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask); -+ -+ if ((psTLBEntry->ui32TLBLo0 & RGXMIPSFW_TLB_VALID) == 0) -+ { -+ /* Dummy values to fail the range checks later */ -+ *pui64PA0Start = -1ULL; -+ *pui64PA0End = -1ULL; -+ } -+ else if (bUseRemapOutput) -+ { -+ *pui64PA0Start = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12; -+ *pui64PA0End = *pui64PA0Start + ui64PageSize - 1; -+ } -+ else -+ { -+ *pui64PA0Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0); -+ *pui64PA0End = *pui64PA0Start + ui64PageSize - 1; -+ } -+ -+ if ((psTLBEntry->ui32TLBLo1 & RGXMIPSFW_TLB_VALID) == 0) -+ { -+ /* Dummy values to fail the range checks later */ -+ *pui64PA1Start = -1ULL; -+ *pui64PA1End = -1ULL; -+ } -+ else if (bUseRemapOutput) -+ { -+ *pui64PA1Start = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12; -+ *pui64PA1End = *pui64PA1Start + ui64PageSize - 1; -+ } -+ else -+ { -+ *pui64PA1Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1); -+ *pui64PA1End = *pui64PA1Start + ui64PageSize - 1; -+ } -+} -+ -+static void _CheckMipsTLBDuplicatePAs(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const RGX_MIPS_TLB_ENTRY *psTLB, -+ const RGX_MIPS_REMAP_ENTRY *psRemap) -+{ -+ IMG_UINT64 ui64PA0StartI, ui64PA1StartI, ui64PA0StartJ, ui64PA1StartJ; -+ IMG_UINT64 ui64PA0EndI, ui64PA1EndI, ui64PA0EndJ, ui64PA1EndJ; -+ IMG_UINT32 i, j; -+ -+#define RANGES_OVERLAP(start0,end0,start1,end1) ((start0) < (end1) && (start1) < (end0)) -+ -+ for (i = 0; i < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; i++) -+ { -+ _GetMipsTLBPARanges(&psTLB[i], -+ psRemap ? &psRemap[i] : NULL, -+ psRemap ? &psRemap[i + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, -+ &ui64PA0StartI, &ui64PA0EndI, -+ &ui64PA1StartI, &ui64PA1EndI); -+ -+ for (j = i + 1; j < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; j++) -+ { -+ _GetMipsTLBPARanges(&psTLB[j], -+ psRemap ? &psRemap[j] : NULL, -+ psRemap ? &psRemap[j + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, -+ &ui64PA0StartJ, &ui64PA0EndJ, -+ &ui64PA1StartJ, &ui64PA1EndJ); -+ -+ if (RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA0StartJ, ui64PA0EndJ) || -+ RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA1StartJ, ui64PA1EndJ) || -+ RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA0StartJ, ui64PA0EndJ) || -+ RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA1StartJ, ui64PA1EndJ) ) -+ { -+ PVR_DUMPDEBUG_LOG("Overlap between TLB entry %u and %u", i , j); -+ } -+ } -+ } -+} -+ -+static inline IMG_UINT32 _GetMIPSRemapRegionSize(IMG_UINT32 ui32RegionSizeEncoding) -+{ -+ return 1U << ((ui32RegionSizeEncoding + 1U) << 1U); -+} -+ -+static inline void _RGXMipsDumpTLBEntry(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const RGX_MIPS_TLB_ENTRY *psTLBEntry, -+ const RGX_MIPS_REMAP_ENTRY *psRemapEntry0, -+ const RGX_MIPS_REMAP_ENTRY *psRemapEntry1, -+ IMG_UINT32 ui32Index) -+{ -+ IMG_BOOL bDumpRemapEntries = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE; -+ IMG_UINT64 ui64PA0 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0); -+ IMG_UINT64 ui64PA1 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1); -+ IMG_UINT64 ui64Remap0AddrOut = 0, ui64Remap1AddrOut = 0; -+ IMG_UINT32 ui32Remap0AddrIn = 0, ui32Remap1AddrIn = 0; -+ -+ if (bDumpRemapEntries) -+ { -+ /* RemapAddrIn is always 4k aligned and on 32 bit */ -+ ui32Remap0AddrIn = psRemapEntry0->ui32RemapAddrIn << 12; -+ ui32Remap1AddrIn = psRemapEntry1->ui32RemapAddrIn << 12; -+ -+ /* RemapAddrOut is always 4k aligned and on 32 or 36 bit */ -+ ui64Remap0AddrOut = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12; -+ ui64Remap1AddrOut = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12; -+ -+ /* If TLB and remap entries match, then merge them else, print them separately */ -+ if ((IMG_UINT32)ui64PA0 == ui32Remap0AddrIn && -+ (IMG_UINT32)ui64PA1 == ui32Remap1AddrIn) -+ { -+ ui64PA0 = ui64Remap0AddrOut; -+ ui64PA1 = ui64Remap1AddrOut; -+ bDumpRemapEntries = IMG_FALSE; -+ } -+ } -+ -+ PVR_DUMPDEBUG_LOG("%2u) VA 0x%08X (%3uk) -> PA0 0x%08" IMG_UINT64_FMTSPECx " %s%s%s, " -+ "PA1 0x%08" IMG_UINT64_FMTSPECx " %s%s%s", -+ ui32Index, -+ psTLBEntry->ui32TLBHi, -+ RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask), -+ ui64PA0, -+ gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo0)], -+ gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo0)], -+ gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo0)], -+ ui64PA1, -+ gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo1)], -+ gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo1)], -+ gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo1)]); -+ -+ if (bDumpRemapEntries) -+ { -+ PVR_DUMPDEBUG_LOG(" Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx, -+ ui32Index, -+ ui32Remap0AddrIn, -+ _GetMIPSRemapRegionSize(psRemapEntry0->ui32RemapRegionSize), -+ ui64Remap0AddrOut); -+ -+ PVR_DUMPDEBUG_LOG(" Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx, -+ ui32Index + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES, -+ ui32Remap1AddrIn, -+ _GetMIPSRemapRegionSize(psRemapEntry1->ui32RemapRegionSize), -+ ui64Remap1AddrOut); -+ } -+} -+#endif -+ -+ -+static inline IMG_CHAR const *_GetRISCVException(IMG_UINT32 ui32Mcause) -+{ -+ switch (ui32Mcause) -+ { -+#define X(value, fatal, description) \ -+ case value: \ -+ if (fatal) \ -+ return description; \ -+ return NULL; -+ -+ RGXRISCVFW_MCAUSE_TABLE -+#undef X -+ -+ default: -+ PVR_DPF((PVR_DBG_WARNING, "Invalid RISC-V FW mcause value 0x%08x", ui32Mcause)); -+ return NULL; -+ } -+} -+#endif /* !defined(NO_HARDWARE) */ -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function _RGXDumpFWAssert -+ -+ @Description -+ -+ Dump FW assert strings when a thread asserts. -+ -+ @Input pfnDumpDebugPrintf - The debug printf function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer -+ -+ @Return void -+ -+******************************************************************************/ -+static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl) -+{ -+ const IMG_CHAR *pszTraceAssertPath; -+ const IMG_CHAR *pszTraceAssertInfo; -+ IMG_INT32 ui32TraceAssertLine; -+ IMG_UINT32 i; -+ -+ for (i = 0; i < RGXFW_THREAD_NUM; i++) -+ { -+ RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf, INVALIDATE); -+ pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath; -+ pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo; -+ ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum; -+ -+ /* print non-null assert strings */ -+ if (*pszTraceAssertInfo) -+ { -+ PVR_DUMPDEBUG_LOG("FW-T%d Assert: %.*s (%.*s:%d)", -+ i, RGXFW_TRACE_BUFFER_ASSERT_SIZE, pszTraceAssertInfo, -+ RGXFW_TRACE_BUFFER_ASSERT_SIZE, pszTraceAssertPath, ui32TraceAssertLine); -+ } -+ } -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function _RGXDumpFWFaults -+ -+ @Description -+ -+ Dump FW assert strings when a thread asserts. -+ -+ @Input pfnDumpDebugPrintf - The debug printf function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Input psFwSysData - RGX FW shared system data -+ -+ @Return void -+ -+******************************************************************************/ -+static void _RGXDumpFWFaults(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const RGXFWIF_SYSDATA *psFwSysData) -+{ -+ if (psFwSysData->ui32FWFaults > 0) -+ { -+ IMG_UINT32 ui32StartFault = psFwSysData->ui32FWFaults - RGXFWIF_FWFAULTINFO_MAX; -+ IMG_UINT32 ui32EndFault = psFwSysData->ui32FWFaults - 1; -+ IMG_UINT32 ui32Index; -+ -+ if (psFwSysData->ui32FWFaults < RGXFWIF_FWFAULTINFO_MAX) -+ { -+ ui32StartFault = 0; -+ } -+ -+ for (ui32Index = ui32StartFault; ui32Index <= ui32EndFault; ui32Index++) -+ { -+ const RGX_FWFAULTINFO *psFaultInfo = &psFwSysData->sFaultInfo[ui32Index % RGXFWIF_FWFAULTINFO_MAX]; -+ IMG_UINT64 ui64Seconds, ui64Nanoseconds; -+ -+ /* Split OS timestamp in seconds and nanoseconds */ -+ RGXConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); -+ -+ PVR_DUMPDEBUG_LOG("FW Fault %d: %.*s (%.*s:%d)", -+ ui32Index+1, RGXFW_TRACE_BUFFER_ASSERT_SIZE, psFaultInfo->sFaultBuf.szInfo, -+ RGXFW_TRACE_BUFFER_ASSERT_SIZE, psFaultInfo->sFaultBuf.szPath, -+ psFaultInfo->sFaultBuf.ui32LineNum); -+ PVR_DUMPDEBUG_LOG(" Data = 0x%016"IMG_UINT64_FMTSPECx", CRTimer = 0x%012"IMG_UINT64_FMTSPECx", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, -+ psFaultInfo->ui64Data, -+ psFaultInfo->ui64CRTimer, -+ ui64Seconds, ui64Nanoseconds); -+ } -+ } -+} -+ -+static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const RGXFWIF_SYSDATA *psFwSysData) -+{ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < RGXFW_THREAD_NUM; i++) -+ { -+ if (psFwSysData->aui32CrPollAddr[i]) -+ { -+ PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)", -+ i, -+ ((psFwSysData->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), -+ psFwSysData->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET, -+ psFwSysData->aui32CrPollMask[i]); -+ } -+ } -+ -+} -+ -+static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const RGXFWIF_SYSDATA *psFwSysData, -+ const RGXFWIF_HWRINFOBUF *psHWRInfoBuf, -+ PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_BOOL bAnyLocked = IMG_FALSE; -+ IMG_UINT32 dm, i; -+ IMG_UINT32 ui32LineSize; -+ IMG_CHAR *pszLine, *pszTemp; -+ const IMG_CHAR *apszDmNames[RGXFWIF_DM_MAX] = {"GP", "TDM", "TA", "3D", "CDM", "RAY", "TA2", "TA3", "TA4"}; -+ const IMG_CHAR szMsgHeader[] = "Number of HWR: "; -+ const IMG_CHAR szMsgFalse[] = "FALSE("; -+ IMG_CHAR *pszLockupType = ""; -+ const IMG_UINT32 ui32MsgHeaderCharCount = ARRAY_SIZE(szMsgHeader) - 1; /* size includes the null */ -+ const IMG_UINT32 ui32MsgFalseCharCount = ARRAY_SIZE(szMsgFalse) - 1; -+ IMG_UINT32 ui32HWRRecoveryFlags; -+ IMG_UINT32 ui32ReadIndex; -+ -+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))) -+ { -+ apszDmNames[RGXFWIF_DM_TDM] = "2D"; -+ } -+ -+ for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) -+ { -+ if (psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] || -+ psHWRInfoBuf->aui32HwrDmOverranCount[dm]) -+ { -+ bAnyLocked = IMG_TRUE; -+ break; -+ } -+ } -+ -+ if (!PVRSRV_VZ_MODE_IS(GUEST) && !bAnyLocked && (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK)) -+ { -+ /* No HWR situation, print nothing */ -+ return; -+ } -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ IMG_BOOL bAnyHWROccured = IMG_FALSE; -+ -+ for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) -+ { -+ if (psHWRInfoBuf->aui32HwrDmRecoveredCount[dm] != 0 || -+ psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] != 0 || -+ psHWRInfoBuf->aui32HwrDmOverranCount[dm] !=0) -+ { -+ bAnyHWROccured = IMG_TRUE; -+ break; -+ } -+ } -+ -+ if (!bAnyHWROccured) -+ { -+ return; -+ } -+ } -+ -+/* + + + + -+ + + */ -+#define FWHWRINFO_DM_STR_SIZE (5U + 10U + 1U + 10U + 1U + 10U + 3U) -+ -+ ui32LineSize = sizeof(IMG_CHAR) * ( -+ ui32MsgHeaderCharCount + -+ (psDevInfo->sDevFeatureCfg.ui32MAXDMCount * FWHWRINFO_DM_STR_SIZE) + -+ ui32MsgFalseCharCount + 1 + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*6) + 1 -+ /* 'FALSE(' + ')' + (UINT16 max num + comma) per DM + \0 */ -+ ); -+ -+ pszLine = OSAllocMem(ui32LineSize); -+ if (pszLine == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Out of mem allocating line string (size: %d)", -+ __func__, -+ ui32LineSize)); -+ return; -+ } -+ -+ OSStringLCopy(pszLine, szMsgHeader, ui32LineSize); -+ pszTemp = pszLine + ui32MsgHeaderCharCount; -+ -+ for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) -+ { -+ pszTemp += OSSNPrintf(pszTemp, -+ FWHWRINFO_DM_STR_SIZE, -+ "%s(%u/%u+%u), ", -+ apszDmNames[dm], -+ psHWRInfoBuf->aui32HwrDmRecoveredCount[dm], -+ psHWRInfoBuf->aui32HwrDmLockedUpCount[dm], -+ psHWRInfoBuf->aui32HwrDmOverranCount[dm]); -+ } -+ -+ OSStringLCat(pszLine, szMsgFalse, ui32LineSize); -+ pszTemp += ui32MsgFalseCharCount; -+ -+ for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) -+ { -+ pszTemp += OSSNPrintf(pszTemp, -+ 10 + 1 + 1 /* UINT32 max num + comma + \0 */, -+ (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1 ? "%u," : "%u)"), -+ psHWRInfoBuf->aui32HwrDmFalseDetectCount[dm]); -+ } -+ -+ PVR_DUMPDEBUG_LOG("%s", pszLine); -+ -+ OSFreeMem(pszLine); -+ -+ /* Print out per HWR info */ -+ for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) -+ { -+ if (dm == RGXFWIF_DM_GP) -+ { -+ PVR_DUMPDEBUG_LOG("DM %d (GP)", dm); -+ } -+ else -+ { -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ IMG_UINT32 ui32HWRRecoveryFlags = psFwSysData->aui32HWRRecoveryFlags[dm]; -+ IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE]; -+ sPerDmHwrDescription[0] = '\0'; -+ -+ if (ui32HWRRecoveryFlags == RGXFWIF_DM_STATE_WORKING) -+ { -+ OSStringLCopy(sPerDmHwrDescription, " working;", RGX_DEBUG_STR_SIZE); -+ } -+ else -+ { -+ DebugCommonFlagStrings(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE, -+ asDmState2Description, ARRAY_SIZE(asDmState2Description), -+ ui32HWRRecoveryFlags); -+ } -+ PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x:%.*s)", dm, ui32HWRRecoveryFlags, -+ RGX_DEBUG_STR_SIZE, sPerDmHwrDescription); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("DM %d", dm); -+ } -+ } -+ -+ ui32ReadIndex = 0; -+ for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) -+ { -+ IMG_BOOL bPMFault = IMG_FALSE; -+ IMG_UINT32 ui32PC; -+ IMG_UINT32 ui32PageSize = 0; -+ IMG_DEV_PHYADDR sPCDevPAddr = { 0 }; -+ const RGX_HWRINFO *psHWRInfo = &psHWRInfoBuf->sHWRInfo[ui32ReadIndex]; -+ -+ if (ui32ReadIndex >= RGXFWIF_HWINFO_MAX) -+ { -+ PVR_DUMPDEBUG_LOG("HWINFO index error: %u", ui32ReadIndex); -+ break; -+ } -+ -+ if ((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0)) -+ { -+ IMG_CHAR aui8RecoveryNum[10+10+1]; -+ IMG_UINT64 ui64Seconds, ui64Nanoseconds; -+ IMG_BOOL bPageFault = IMG_FALSE; -+ IMG_DEV_VIRTADDR sFaultDevVAddr; -+ -+ /* Split OS timestamp in seconds and nanoseconds */ -+ RGXConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); -+ -+ ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags; -+ if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; } -+ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; } -+ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; } -+ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_OVERRUNING) { pszLockupType = ", Innocent Overrun"; } -+ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH) { pszLockupType = ", Hard Context Switch"; } -+ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GPU_ECC_HWR) { pszLockupType = ", GPU ECC HWR"; } -+ -+ OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber); -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) -+ { -+ PVR_DUMPDEBUG_LOG(" %s Core = %u, PID = %u / %.*s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", -+ aui8RecoveryNum, -+ psHWRInfo->ui32CoreID, -+ psHWRInfo->ui32PID, -+ RGXFW_PROCESS_NAME_LEN, psHWRInfo->szProcName, -+ psHWRInfo->ui32FrameNum, -+ psHWRInfo->ui32ActiveHWRTData, -+ psHWRInfo->ui32EventStatus, -+ pszLockupType); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG(" %s PID = %u / %.*s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", -+ aui8RecoveryNum, -+ psHWRInfo->ui32PID, -+ RGXFW_PROCESS_NAME_LEN, psHWRInfo->szProcName, -+ psHWRInfo->ui32FrameNum, -+ psHWRInfo->ui32ActiveHWRTData, -+ psHWRInfo->ui32EventStatus, -+ pszLockupType); -+ } -+ pszTemp = &aui8RecoveryNum[0]; -+ while (*pszTemp != '\0') -+ { -+ *pszTemp++ = ' '; -+ } -+ -+ /* There's currently no time correlation for the Guest OSes on the Firmware so there's no point printing OS Timestamps on Guests */ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd, -+ aui8RecoveryNum, -+ psHWRInfo->ui64CRTimer, -+ ui64Seconds, -+ ui64Nanoseconds, -+ (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", CyclesElapsed = %" IMG_INT64_FMTSPECd, -+ aui8RecoveryNum, -+ psHWRInfo->ui64CRTimer, -+ (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); -+ } -+ -+ if (psHWRInfo->ui64CRTimeHWResetFinish != 0) -+ { -+ if (psHWRInfo->ui64CRTimeFreelistReady != 0) -+ { -+ /* If ui64CRTimeFreelistReady is less than ui64CRTimeHWResetFinish it means APM kicked in and the time is not valid. */ -+ if (psHWRInfo->ui64CRTimeHWResetFinish < psHWRInfo->ui64CRTimeFreelistReady) -+ { -+ PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd, -+ aui8RecoveryNum, -+ (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, -+ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, -+ (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256, -+ (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = , TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd, -+ aui8RecoveryNum, -+ (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, -+ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, -+ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256); -+ } -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd, -+ aui8RecoveryNum, -+ (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, -+ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, -+ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256); -+ } -+ } -+ -+ switch (psHWRInfo->eHWRType) -+ { -+ case RGX_HWRTYPE_BIF0FAULT: -+ case RGX_HWRTYPE_BIF1FAULT: -+ { -+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))) -+ { -+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXFWIF_HWRTYPE_BIF_BANK_GET(psHWRInfo->eHWRType), -+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, -+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, -+ DD_NORMAL_INDENT); -+ -+ bPageFault = IMG_TRUE; -+ sFaultDevVAddr.uiAddr = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK); -+ ui32PC = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> -+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; -+ bPMFault = (ui32PC >= 8); -+ ui32PageSize = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> -+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; -+ sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; -+ } -+ } -+ break; -+ case RGX_HWRTYPE_TEXASBIF0FAULT: -+ { -+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))) -+ { -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) -+ { -+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, -+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, -+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, -+ DD_NORMAL_INDENT); -+ -+ bPageFault = IMG_TRUE; -+ sFaultDevVAddr.uiAddr = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK); -+ ui32PC = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> -+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; -+ bPMFault = (ui32PC >= 8); -+ ui32PageSize = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> -+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; -+ sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; -+ } -+ } -+ } -+ break; -+ -+ case RGX_HWRTYPE_ECCFAULT: -+ { -+ PVR_DUMPDEBUG_LOG(" ECC fault GPU=0x%08x", psHWRInfo->uHWRData.sECCInfo.ui32FaultGPU); -+ } -+ break; -+ -+ case RGX_HWRTYPE_MMUFAULT: -+ { -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) -+ { -+ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, -+ psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], -+ "Core", -+ DD_NORMAL_INDENT); -+ -+ bPageFault = IMG_TRUE; -+ sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; -+ sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK; -+ sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT; -+ sFaultDevVAddr.uiAddr <<= 4; /* align shift */ -+ ui32PC = (psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >> -+ RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT; -+ bPMFault = (ui32PC <= 8); -+ sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; -+ } -+ } -+ break; -+ -+ case RGX_HWRTYPE_MMUMETAFAULT: -+ { -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) -+ { -+ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, -+ psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], -+ "Meta", -+ DD_NORMAL_INDENT); -+ -+ bPageFault = IMG_TRUE; -+ sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; -+ sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK; -+ sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT; -+ sFaultDevVAddr.uiAddr <<= 4; /* align shift */ -+ sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; -+ } -+ } -+ break; -+ -+ case RGX_HWRTYPE_POLLFAILURE: -+ { -+ PVR_DUMPDEBUG_LOG(" T%u polling %s (reg:0x%08X mask:0x%08X last:0x%08X)", -+ psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum, -+ ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")), -+ psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET, -+ psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask, -+ psHWRInfo->uHWRData.sPollInfo.ui32CrPollLastValue); -+ } -+ break; -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ case RGX_HWRTYPE_MIPSTLBFAULT: -+ { -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ IMG_UINT32 ui32EntryLo = psHWRInfo->uHWRData.sTLBInfo.ui32EntryLo; -+ -+ /* This is not exactly what the MMU code does, but the result should be the same */ -+ const IMG_UINT32 ui32UnmappedEntry = -+ ((IMG_UINT32)(MMU_BAD_PHYS_ADDR & 0xffffffff) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) | RGXMIPSFW_ENTRYLO_UNCACHED; -+ -+ PVR_DUMPDEBUG_LOG(" MIPS TLB fault: BadVA = 0x%08X, EntryLo = 0x%08X" -+ " (page PA 0x%" IMG_UINT64_FMTSPECx", V %u)", -+ psHWRInfo->uHWRData.sTLBInfo.ui32BadVAddr, -+ ui32EntryLo, -+ RGXMIPSFW_TLB_GET_PA(ui32EntryLo), -+ ui32EntryLo & RGXMIPSFW_TLB_VALID ? 1 : 0); -+ -+ if (ui32EntryLo == ui32UnmappedEntry) -+ { -+ PVR_DUMPDEBUG_LOG(" Potential use-after-free detected"); -+ } -+ } -+ } -+ break; -+#endif -+ -+ case RGX_HWRTYPE_MMURISCVFAULT: -+ { -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_FWCORE, -+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, -+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, -+ DD_NORMAL_INDENT); -+ -+ bPageFault = IMG_TRUE; -+ bPMFault = IMG_FALSE; -+ sFaultDevVAddr.uiAddr = -+ (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & -+ ~RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK); -+ ui32PageSize = -+ (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & -+ ~RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK) >> -+ RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT; -+ sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; -+ } -+ } -+ break; -+ -+ case RGX_HWRTYPE_OVERRUN: -+ case RGX_HWRTYPE_UNKNOWNFAILURE: -+ { -+ /* Nothing to dump */ -+ } -+ break; -+ -+ default: -+ { -+ PVR_DUMPDEBUG_LOG(" Unknown HWR Info type: 0x%x", psHWRInfo->eHWRType); -+ } -+ break; -+ } -+ -+ if (bPageFault) -+ { -+ RGXDumpFaultInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, psHWRInfo, -+ ui32ReadIndex, &sFaultDevVAddr, &sPCDevPAddr, bPMFault, ui32PageSize); -+ } -+ -+ } -+ -+ if (ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1) -+ ui32ReadIndex = psHWRInfoBuf->ui32WriteIndex; -+ else -+ ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST; -+ } -+ } -+} -+ -+ -+#if defined(SUPPORT_VALIDATION) -+static void _RGXDumpFWKickCountInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const RGXFWIF_OSDATA *psFwOsData, -+ PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_UINT32 ui32DMIndex, ui32LineSize; -+ IMG_CHAR *pszLine, *pszTemp; -+ const IMG_CHAR *apszDmNames[RGXFWIF_DM_MAX] = {"GP", "TDM", "TA", "3D", "CDM", "RAY", "TA2", "TA3", "TA4"}; -+ const IMG_CHAR szKicksHeader[] = "RGX Kicks: "; -+ const IMG_UINT32 ui32KicksHeaderCharCount = ARRAY_SIZE(szKicksHeader) - 1; /* size includes the null */ -+ -+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))) -+ { -+ apszDmNames[RGXFWIF_DM_TDM] = "2D"; -+ } -+ -+ ui32LineSize = sizeof(IMG_CHAR) * -+ (ui32KicksHeaderCharCount + -+ (psDevInfo->sDevFeatureCfg.ui32MAXDMCount * -+ ( 5 /*DM name + equal sign*/ + -+ 10 /*UINT32 max num of digits*/ + -+ 3 /*comma + space*/)) + -+ 1); /* \0 */ -+ -+ pszLine = OSAllocMem(ui32LineSize); -+ if (pszLine == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Out of mem allocating line string (size: %d)", -+ __func__, -+ ui32LineSize)); -+ return; -+ } -+ -+ /* Print the number of kicks in general... */ -+ OSStringLCopy(pszLine, szKicksHeader, ui32LineSize); -+ pszTemp = pszLine + ui32KicksHeaderCharCount; -+ -+ /* Invalidate the whole array before reading */ -+ RGXFwSharedMemCacheOpValue(psFwOsData->aui32KickCount, -+ INVALIDATE); -+ -+ for (ui32DMIndex = 1 /*Skip GP*/; ui32DMIndex < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; ui32DMIndex++) -+ { -+ pszTemp += OSSNPrintf(pszTemp, -+ 5 + 1 + 10 + 1 + 1 + 1 -+ /* name + equal sign + UINT32 + comma + space + \0 */, -+ "%s=%u, ", -+ apszDmNames[ui32DMIndex], -+ psFwOsData->aui32KickCount[ui32DMIndex]); -+ } -+ -+ /* Go back 2 spaces and remove the last comma+space... */ -+ pszTemp -= 2; -+ *pszTemp = '\0'; -+ -+ PVR_DUMPDEBUG_LOG("%s", pszLine); -+ -+ OSFreeMem(pszLine); -+} -+#endif -+ -+ -+#if !defined(NO_HARDWARE) -+ -+/*! -+******************************************************************************* -+ -+ @Function _CheckForPendingPage -+ -+ @Description -+ -+ Check if the MMU indicates it is blocked on a pending page -+ -+ @Input psDevInfo - RGX device info -+ -+ @Return IMG_BOOL - IMG_TRUE if there is a pending page -+ -+******************************************************************************/ -+static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_UINT32 ui32BIFMMUEntry; -+ -+ ui32BIFMMUEntry = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY); -+ -+ if (ui32BIFMMUEntry & RGX_CR_BIF_MMU_ENTRY_PENDING_EN) -+ { -+ return IMG_TRUE; -+ } -+ else -+ { -+ return IMG_FALSE; -+ } -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function _GetPendingPageInfo -+ -+ @Description -+ -+ Get information about the pending page from the MMU status registers -+ -+ @Input psDevInfo - RGX device info -+ @Output psDevVAddr - The device virtual address of the pending MMU address translation -+ @Output pui32CatBase - The page catalog base -+ @Output pui32DataType - The MMU entry data type -+ -+ @Return void -+ -+******************************************************************************/ -+static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr, -+ IMG_UINT32 *pui32CatBase, -+ IMG_UINT32 *pui32DataType) -+{ -+ IMG_UINT64 ui64BIFMMUEntryStatus; -+ -+ ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY_STATUS); -+ -+ psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK); -+ -+ *pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK) >> -+ RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT; -+ -+ *pui32DataType = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK) >> -+ RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT; -+} -+ -+#endif -+ -+void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_BOOL bRGXPoweredON) -+{ -+ IMG_CHAR *pszState, *pszReason; -+ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; -+ IMG_UINT32 ui32DriverID; -+ const RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; -+ /* space for the current clock speed and 3 previous */ -+ RGXFWIF_TIME_CORR asTimeCorrs[4]; -+ IMG_UINT32 ui32NumClockSpeedChanges; -+ -+ /* Should invalidate all reads below including when passed to functions. */ -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfRuntimeCfg, INVALIDATE); -+ -+#if defined(NO_HARDWARE) -+ PVR_UNREFERENCED_PARAMETER(bRGXPoweredON); -+#else -+ if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) -+ { -+ IMG_UINT64 ui64RegValMMUStatus; -+ -+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS); -+ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, "Core", DD_SUMMARY_INDENT); -+ -+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META); -+ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, "Meta", DD_SUMMARY_INDENT); -+ } -+ else -+ { -+ IMG_UINT64 ui64RegValMMUStatus, ui64RegValREQStatus; -+ -+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_MMU_STATUS); -+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_REQ_STATUS); -+ -+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF0, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); -+ -+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SINGLE_BIF))) -+ { -+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_MMU_STATUS); -+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_REQ_STATUS); -+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF1, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); -+ } -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS); -+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS); -+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_FWCORE, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); -+ } -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) -+ { -+ IMG_UINT32 ui32PhantomCnt = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_REQ_NUM_PHANTOMS(RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) : 0; -+ -+ if (ui32PhantomCnt > 1) -+ { -+ IMG_UINT32 ui32Phantom; -+ for (ui32Phantom = 0; ui32Phantom < ui32PhantomCnt; ui32Phantom++) -+ { -+ /* This can't be done as it may interfere with the FW... */ -+ /*OSWriteHWReg64(RGX_CR_TEXAS_INDIRECT, ui32Phantom);*/ -+ -+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS); -+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS); -+ -+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); -+ } -+ }else -+ { -+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS); -+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS); -+ -+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); -+ } -+ } -+ } -+ -+ if (_CheckForPendingPage(psDevInfo)) -+ { -+ IMG_UINT32 ui32CatBase; -+ IMG_UINT32 ui32DataType; -+ IMG_DEV_VIRTADDR sDevVAddr; -+ -+ PVR_DUMPDEBUG_LOG("MMU Pending page: Yes"); -+ -+ _GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase, &ui32DataType); -+ -+ if (ui32CatBase >= 8) -+ { -+ PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase); -+ } -+ else -+ { -+ IMG_DEV_PHYADDR sPCDevPAddr; -+ MMU_FAULT_DATA sFaultData; -+ IMG_BOOL bIsValid = IMG_TRUE; -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ IMG_UINT64 ui64CBaseMapping; -+ IMG_UINT32 ui32CBaseMapCtxReg; -+ -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) -+ { -+ ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4; -+ -+ OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32CBaseMapCtxReg, ui32CatBase); -+ -+ ui64CBaseMapping = OSReadUncheckedHWReg64(psDevInfo->pvSecureRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1); -+ sPCDevPAddr.uiAddr = (((ui64CBaseMapping & ~RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_CLRMSK) -+ >> RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT) -+ << RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT); -+ bIsValid = !(ui64CBaseMapping & RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__INVALID_EN); -+ } -+ else -+ { -+ ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT; -+ -+ OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32CBaseMapCtxReg, ui32CatBase); -+ -+ ui64CBaseMapping = OSReadUncheckedHWReg64(psDevInfo->pvSecureRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING); -+ sPCDevPAddr.uiAddr = (((ui64CBaseMapping & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK) -+ >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT) -+ << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT); -+ bIsValid = !(ui64CBaseMapping & RGX_CR_MMU_CBASE_MAPPING_INVALID_EN); -+ } -+#else -+ sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32CatBase)); -+#endif -+ -+ PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC -+ " on cat base %u. PC Addr = 0x%" IMG_UINT64_FMTSPECx " is %s", -+ sDevVAddr.uiAddr, -+ ui32CatBase, -+ sPCDevPAddr.uiAddr, -+ bIsValid ? "valid":"invalid"); -+ RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr, &sFaultData); -+ RGXDumpFaultAddressHostView(&sFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_SUMMARY_INDENT); -+ } -+ } -+ } -+#endif /* NO_HARDWARE */ -+ -+ /* Firmware state */ -+ switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus)) -+ { -+ case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszState = "OK"; break; -+ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszState = "NOT RESPONDING"; break; -+ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszState = "DEAD"; break; -+ case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszState = "FAULT"; break; -+ case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszState = "UNDEFINED"; break; -+ default: pszState = "UNKNOWN"; break; -+ } -+ -+ switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason)) -+ { -+ case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " - Asserted"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " - Poll failing"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " - Global Event Object timeouts rising"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " - KCCB offset invalid"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " - KCCB stalled"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " - Idling"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " - Restarting"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: pszReason = " - Missing interrupts"; break; -+ case PVRSRV_DEVICE_HEALTH_REASON_PCI_ERROR: pszReason = " - PCI error"; break; -+ default: pszReason = " - Unknown reason"; break; -+ } -+ -+#if !defined(NO_HARDWARE) -+ /* Determine the type virtualisation support used */ -+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ if (!PVRSRV_VZ_MODE_IS(NATIVE)) -+ { -+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) -+#if defined(SUPPORT_AUTOVZ) -+#if defined(SUPPORT_AUTOVZ_HW_REGS) -+ PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with HW register support"); -+#else -+ PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with shared memory"); -+#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ -+#else -+ PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with static Fw heap allocation"); -+#endif /* defined(SUPPORT_AUTOVZ) */ -+#else -+ PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with dynamic Fw heap allocation"); -+#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ -+ } -+#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -+ -+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)) -+ if (!PVRSRV_VZ_MODE_IS(NATIVE)) -+ { -+ RGXFWIF_CONNECTION_FW_STATE eFwState; -+ RGXFWIF_CONNECTION_OS_STATE eOsState; -+ -+ KM_CONNECTION_CACHEOP(Fw, INVALIDATE); -+ KM_CONNECTION_CACHEOP(Os, INVALIDATE); -+ -+ eFwState = KM_GET_FW_CONNECTION(psDevInfo); -+ eOsState = KM_GET_OS_CONNECTION(psDevInfo); -+ -+ PVR_DUMPDEBUG_LOG("RGX Virtualisation firmware connection state: %s (Fw=%s; OS=%s)", -+ ((eFwState == RGXFW_CONNECTION_FW_ACTIVE) && (eOsState == RGXFW_CONNECTION_OS_ACTIVE)) ? ("UP") : ("DOWN"), -+ (eFwState < RGXFW_CONNECTION_FW_STATE_COUNT) ? (apszFwOsStateName[eFwState]) : ("invalid"), -+ (eOsState < RGXFW_CONNECTION_OS_STATE_COUNT) ? (apszFwOsStateName[eOsState]) : ("invalid")); -+ -+ } -+#endif -+ -+#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ if (!PVRSRV_VZ_MODE_IS(NATIVE)) -+ { -+ IMG_UINT32 ui32FwAliveTS; -+ IMG_UINT32 ui32OsAliveTS; -+ -+ KM_ALIVE_TOKEN_CACHEOP(Fw, INVALIDATE); -+ KM_ALIVE_TOKEN_CACHEOP(Os, INVALIDATE); -+ -+ ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo); -+ ui32OsAliveTS = KM_GET_OS_ALIVE_TOKEN(psDevInfo); -+ -+ PVR_DUMPDEBUG_LOG("RGX Virtualisation watchdog timestamps (in GPU timer ticks): Fw=%u; OS=%u; diff(FW, OS) = %u", -+ ui32FwAliveTS, ui32OsAliveTS, ui32FwAliveTS - ui32OsAliveTS); -+ } -+#endif -+#endif /* !defined(NO_HARDWARE) */ -+ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE]; -+ IMG_BOOL bDriverIsolationEnabled = IMG_FALSE; -+ IMG_UINT32 ui32HostIsolationGroup; -+ -+ if (psFwSysData == NULL) -+ { -+ /* can't dump any more information */ -+ PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason); -+ return; -+ } -+ -+ sHwrStateDescription[0] = '\0'; -+ -+ DebugCommonFlagStrings(sHwrStateDescription, RGX_DEBUG_STR_SIZE, -+ asHwrState2Description, ARRAY_SIZE(asHwrState2Description), -+ psFwSysData->ui32HWRStateFlags); -+ PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x:%s)", pszState, pszReason, psFwSysData->ui32HWRStateFlags, sHwrStateDescription); -+ PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)", -+ (psFwSysData->ePowState < ARRAY_SIZE(pszPowStateName) ? pszPowStateName[psFwSysData->ePowState] : "???"), -+ (psDevInfo->pvAPMISRData)?"enabled":"disabled", -+ psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle, -+ psDevInfo->ui32ActivePMReqDenied, -+ psDevInfo->ui32ActivePMReqNonIdle, -+ psDevInfo->ui32ActivePMReqRetry, -+ psDevInfo->ui32ActivePMReqTotal - -+ psDevInfo->ui32ActivePMReqOk - -+ psDevInfo->ui32ActivePMReqDenied - -+ psDevInfo->ui32ActivePMReqRetry - -+ psDevInfo->ui32ActivePMReqNonIdle, -+ psDevInfo->ui32ActivePMReqTotal, -+ psRuntimeCfg->ui32ActivePMLatencyms); -+ -+ ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges); -+ RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs)); -+ -+ PVR_DUMPDEBUG_LOG("RGX DVFS: %u frequency changes. " -+ "Current frequency: %u.%03u MHz (sampled at %" IMG_UINT64_FMTSPEC " ns). " -+ "FW frequency: %u.%03u MHz.", -+ ui32NumClockSpeedChanges, -+ asTimeCorrs[0].ui32CoreClockSpeed / 1000000, -+ (asTimeCorrs[0].ui32CoreClockSpeed / 1000) % 1000, -+ asTimeCorrs[0].ui64OSTimeStamp, -+ psRuntimeCfg->ui32CoreClockSpeed / 1000000, -+ (psRuntimeCfg->ui32CoreClockSpeed / 1000) % 1000); -+ if (ui32NumClockSpeedChanges > 0) -+ { -+ PVR_DUMPDEBUG_LOG(" Previous frequencies: %u.%03u, %u.%03u, %u.%03u MHz (Sampled at " -+ "%" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ")", -+ asTimeCorrs[1].ui32CoreClockSpeed / 1000000, -+ (asTimeCorrs[1].ui32CoreClockSpeed / 1000) % 1000, -+ asTimeCorrs[2].ui32CoreClockSpeed / 1000000, -+ (asTimeCorrs[2].ui32CoreClockSpeed / 1000) % 1000, -+ asTimeCorrs[3].ui32CoreClockSpeed / 1000000, -+ (asTimeCorrs[3].ui32CoreClockSpeed / 1000) % 1000, -+ asTimeCorrs[1].ui64OSTimeStamp, -+ asTimeCorrs[2].ui64OSTimeStamp, -+ asTimeCorrs[3].ui64OSTimeStamp); -+ } -+ -+ ui32HostIsolationGroup = psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[RGXFW_HOST_DRIVER_ID]; -+ -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32DriverID]; -+ IMG_UINT32 ui32IsolationGroup = psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID]; -+ IMG_BOOL bMTSEnabled = IMG_FALSE; -+ -+#if !defined(NO_HARDWARE) -+ if (bRGXPoweredON) -+ { -+ bMTSEnabled = (RGX_IS_BRN_SUPPORTED(psDevInfo, 64502) || !RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ? -+ IMG_TRUE : ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32DriverID)) != 0); -+ } -+#endif -+ -+ PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %u; Isolation group: %u; %s", ui32DriverID, -+ apszFwOsStateName[sFwRunFlags.bfOsState], -+ (sFwRunFlags.bfFLOk) ? "Ok" : "Not Ok", -+ (sFwRunFlags.bfFLGrowPending) ? "; Grow Request Pending" : "", -+ psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverPriority[ui32DriverID], -+ ui32IsolationGroup, -+ (bMTSEnabled) ? "MTS on;" : "MTS off;" -+ ); -+ -+ if (ui32IsolationGroup != ui32HostIsolationGroup) -+ { -+ bDriverIsolationEnabled = IMG_TRUE; -+ } -+ } -+ -+#if defined(PVR_ENABLE_PHR) -+ { -+ IMG_CHAR sPHRConfigDescription[RGX_DEBUG_STR_SIZE]; -+ -+ sPHRConfigDescription[0] = '\0'; -+ DebugCommonFlagStrings(sPHRConfigDescription, RGX_DEBUG_STR_SIZE, -+ asPHRConfig2Description, ARRAY_SIZE(asPHRConfig2Description), -+ BIT_ULL(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode)); -+ -+ PVR_DUMPDEBUG_LOG("RGX PHR configuration: (%d) %.*s", psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode, RGX_DEBUG_STR_SIZE, sPHRConfigDescription); -+ } -+#endif -+ -+ if (bRGXPoweredON && RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) -+ { -+ if (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM) > 1U) -+ { -+ PVR_DUMPDEBUG_LOG("RGX MC Configuration: 0x%X (1:primary, 0:secondary)", psFwSysData->ui32McConfig); -+ } -+ } -+ -+ if (bDriverIsolationEnabled) -+ { -+ PVR_DUMPDEBUG_LOG("RGX Hard Context Switch deadline: %u ms", psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS); -+ } -+ -+ _RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl); -+ _RGXDumpFWFaults(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); -+ _RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("RGX FW State: Unavailable under Guest Mode of operation"); -+ PVR_DUMPDEBUG_LOG("RGX FW Power State: Unavailable under Guest Mode of operation"); -+ } -+ -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfHWRInfoBufCtl, INVALIDATE); -+ _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData, psDevInfo->psRGXFWIfHWRInfoBufCtl, psDevInfo); -+#if defined(SUPPORT_VALIDATION) -+ _RGXDumpFWKickCountInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo->psRGXFWIfFwOsData, psDevInfo); -+#endif -+ -+#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) -+ /* Dump all non-zero values in lines of 8... */ -+ { -+ IMG_CHAR pszLine[(9*RGXFWIF_STATS_FRAMEWORK_LINESIZE)+1]; -+ const IMG_UINT32 *pui32FWStatsBuf = psFwSysData->aui32FWStatsBuf; -+ IMG_UINT32 ui32Index1, ui32Index2; -+ -+ PVR_DUMPDEBUG_LOG("STATS[START]: RGXFWIF_STATS_FRAMEWORK_MAX=%d", RGXFWIF_STATS_FRAMEWORK_MAX); -+ for (ui32Index1 = 0; ui32Index1 < RGXFWIF_STATS_FRAMEWORK_MAX; ui32Index1 += RGXFWIF_STATS_FRAMEWORK_LINESIZE) -+ { -+ IMG_UINT32 ui32OrOfValues = 0; -+ IMG_CHAR *pszBuf = pszLine; -+ -+ /* Print all values in this line and skip if all zero... */ -+ for (ui32Index2 = 0; ui32Index2 < RGXFWIF_STATS_FRAMEWORK_LINESIZE; ui32Index2++) -+ { -+ ui32OrOfValues |= pui32FWStatsBuf[ui32Index1+ui32Index2]; -+ OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32FWStatsBuf[ui32Index1+ui32Index2]); -+ pszBuf += 9; /* write over the '\0' */ -+ } -+ -+ if (ui32OrOfValues != 0) -+ { -+ PVR_DUMPDEBUG_LOG("STATS[%08x]:%s", ui32Index1, pszLine); -+ } -+ } -+ PVR_DUMPDEBUG_LOG("STATS[END]"); -+ } -+#endif -+} -+ -+#if !defined(NO_HARDWARE) -+static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+/* List of extra META Slave Port debug registers */ -+#define RGX_META_SP_EXTRA_DEBUG \ -+ X(RGX_CR_META_SP_MSLVCTRL0) \ -+ X(RGX_CR_META_SP_MSLVCTRL1) \ -+ X(RGX_CR_META_SP_MSLVDATAX) \ -+ X(RGX_CR_META_SP_MSLVIRQSTATUS) \ -+ X(RGX_CR_META_SP_MSLVIRQENABLE) \ -+ X(RGX_CR_META_SP_MSLVIRQLEVEL) -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+/* Order in these two initialisers and the one above must match */ -+#define RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_EQ1_AND_MRUA_ACCESSES \ -+ X(RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA) \ -+ X(RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA) \ -+ X(RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_EQ1_AND_MRUA) \ -+ X(RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA) \ -+ X(RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_EQ1_AND_MRUA) \ -+ X(RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_EQ1_AND_MRUA) -+ -+#define RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_GT1_AND_MRUA_ACCESSES \ -+ X(RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA) \ -+ X(RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA) \ -+ X(RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_MRUA) \ -+ X(RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA) \ -+ X(RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_MRUA) \ -+ X(RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_MRUA) -+#endif -+ -+ IMG_UINT32 ui32Idx; -+ IMG_UINT32 ui32RegVal; -+ IMG_UINT32 ui32RegAddr; -+ -+ const IMG_UINT32* pui32DebugRegAddr; -+ const IMG_UINT32 aui32DebugRegAddr[] = { -+#define X(A) A, -+ RGX_META_SP_EXTRA_DEBUG -+#undef X -+ }; -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ const IMG_UINT32 aui32DebugRegAddrUAHSV1[] = { -+#define X(A) A, -+ RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_EQ1_AND_MRUA_ACCESSES -+#undef X -+ }; -+ -+ const IMG_UINT32 aui32DebugRegAddrUAHSGT1[] = { -+#define X(A) A, -+ RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_GT1_AND_MRUA_ACCESSES -+#undef X -+ }; -+#endif -+ -+ const IMG_CHAR* apszDebugRegName[] = { -+#define X(A) #A, -+ RGX_META_SP_EXTRA_DEBUG -+#undef X -+ }; -+ -+ PVR_DUMPDEBUG_LOG("META Slave Port extra debug:"); -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ /* array of register offset values depends on feature. But don't augment names in apszDebugRegName */ -+ PVR_ASSERT(sizeof(aui32DebugRegAddrUAHSGT1) == sizeof(aui32DebugRegAddr)); -+ PVR_ASSERT(sizeof(aui32DebugRegAddrUAHSV1) == sizeof(aui32DebugRegAddr)); -+ pui32DebugRegAddr = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ? -+ ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) ? (aui32DebugRegAddrUAHSGT1) : (aui32DebugRegAddrUAHSV1)) : aui32DebugRegAddr; -+#else -+ pui32DebugRegAddr = aui32DebugRegAddr; -+#endif -+ -+ /* dump set of Slave Port debug registers */ -+ for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++) -+ { -+ const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx]; -+ -+ ui32RegAddr = pui32DebugRegAddr[ui32Idx]; -+ ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr); -+ PVR_DUMPDEBUG_LOG(" * %s: 0x%8.8X", pszRegName, ui32RegVal); -+ } -+} -+#endif /* !defined(NO_HARDWARE) */ -+ -+ -+/* Helper macros to emit data */ -+#define REG32_FMTSPEC "%-30s: 0x%08X" -+#define REG64_FMTSPEC "%-30s: 0x%016" IMG_UINT64_FMTSPECX -+#define DDLOG32(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R)); -+#define DDLOG64(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R)); -+#define DDLOG32_DPX(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R)); -+#define DDLOG64_DPX(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R)); -+#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V); -+ -+#if !defined(NO_HARDWARE) -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+static RGX_MIPS_REMAP_ENTRY RGXDecodeMIPSRemap(IMG_UINT64 ui64RemapReg) -+{ -+ RGX_MIPS_REMAP_ENTRY sRemapInfo; -+ -+ sRemapInfo.ui32RemapAddrIn = -+ (ui64RemapReg & ~RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK) -+ >> RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT; -+ -+ sRemapInfo.ui32RemapAddrOut = -+ (ui64RemapReg & ~RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK) -+ >> RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT; -+ -+ sRemapInfo.ui32RemapRegionSize = -+ (ui64RemapReg & ~RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK) -+ >> RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT; -+ -+ return sRemapInfo; -+} -+ -+static void RGXDumpMIPSState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; -+ RGX_MIPS_STATE sMIPSState = {0}; -+ PVRSRV_ERROR eError; -+ -+ eError = _RGXMipsExtraDebug(psDevInfo, &sMIPSState); -+ PVR_DUMPDEBUG_LOG("---- [ MIPS internal state ] ----"); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DUMPDEBUG_LOG("MIPS extra debug not available"); -+ } -+ else -+ { -+ DDLOGVAL32("PC", sMIPSState.ui32ErrorEPC); -+ DDLOGVAL32("STATUS_REGISTER", sMIPSState.ui32StatusRegister); -+ DDLOGVAL32("CAUSE_REGISTER", sMIPSState.ui32CauseRegister); -+ _RGXMipsDumpCauseDecode(pfnDumpDebugPrintf, pvDumpDebugFile, -+ sMIPSState.ui32CauseRegister, sMIPSState.ui32ErrorState); -+ DDLOGVAL32("BAD_REGISTER", sMIPSState.ui32BadRegister); -+ DDLOGVAL32("EPC", sMIPSState.ui32EPC); -+ DDLOGVAL32("SP", sMIPSState.ui32SP); -+ DDLOGVAL32("BAD_INSTRUCTION", sMIPSState.ui32BadInstr); -+ _RGXMipsDumpDebugDecode(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, -+ sMIPSState.ui32Debug, sMIPSState.ui32DEPC); -+ -+ { -+ IMG_UINT32 ui32Idx; -+ RGX_MIPS_REMAP_ENTRY *psMipsRemaps = NULL; -+ -+ IMG_BOOL bCheckBRN63553WA = -+ RGX_IS_BRN_SUPPORTED(psDevInfo, 63553) && -+ (OSReadHWReg32(pvRegsBaseKM, RGX_CR_MIPS_ADDR_REMAP5_CONFIG1) == (0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN)); -+ -+ IMG_BOOL bUseRemapRanges = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32; -+ -+ if (bUseRemapRanges) -+ { -+ psMipsRemaps = OSAllocMem(sizeof(RGX_MIPS_REMAP_ENTRY) * RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES); -+ PVR_LOG_RETURN_VOID_IF_FALSE(psMipsRemaps != NULL, "psMipsRemaps alloc failed."); -+ } -+ -+ PVR_DUMPDEBUG_LOG("TLB :"); -+ -+ for (ui32Idx = 0; ui32Idx < ARRAY_SIZE(sMIPSState.asTLB); ui32Idx++) -+ { -+ if (bUseRemapRanges) -+ { -+ psMipsRemaps[ui32Idx] = -+ RGXDecodeMIPSRemap(sMIPSState.aui64Remap[ui32Idx]); -+ -+ psMipsRemaps[ui32Idx+RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] = -+ RGXDecodeMIPSRemap(sMIPSState.aui64Remap[ui32Idx+RGXMIPSFW_NUMBER_OF_TLB_ENTRIES]); -+ } -+ -+ _RGXMipsDumpTLBEntry(pfnDumpDebugPrintf, -+ pvDumpDebugFile, -+ &sMIPSState.asTLB[ui32Idx], -+ (bUseRemapRanges) ? &psMipsRemaps[ui32Idx] : NULL, -+ (bUseRemapRanges) ? &psMipsRemaps[ui32Idx+RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, -+ ui32Idx); -+ -+ if (bCheckBRN63553WA) -+ { -+ const RGX_MIPS_TLB_ENTRY *psTLBEntry = &sMIPSState.asTLB[ui32Idx]; -+ -+ #define BRN63553_TLB_IS_NUL(X) (((X) & RGXMIPSFW_TLB_VALID) && (RGXMIPSFW_TLB_GET_PA(X) == 0x0)) -+ -+ if (BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo0) || BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo1)) -+ { -+ PVR_DUMPDEBUG_LOG("BRN63553 WA present with a valid TLB entry mapping address 0x0."); -+ } -+ } -+ } -+ -+ /* This implicitly also checks for overlaps between memory and regbank addresses */ -+ _CheckMipsTLBDuplicatePAs(pfnDumpDebugPrintf, -+ pvDumpDebugFile, -+ sMIPSState.asTLB, -+ bUseRemapRanges ? psMipsRemaps : NULL); -+ -+ if (bUseRemapRanges) -+ { -+ /* Dump unmapped address if it was dumped in FW, otherwise it will be 0 */ -+ if (sMIPSState.ui32UnmappedAddress) -+ { -+ PVR_DUMPDEBUG_LOG("Remap unmapped address => 0x%08X", -+ sMIPSState.ui32UnmappedAddress); -+ } -+ } -+ -+ if (psMipsRemaps != NULL) -+ { -+ OSFreeMem(psMipsRemaps); -+ } -+ } -+ -+ /* Check FW code corruption in case of known errors */ -+ if (_IsFWCodeException(RGXMIPSFW_C0_CAUSE_EXCCODE(sMIPSState.ui32CauseRegister))) -+ { -+ eError = RGXValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); -+ } -+ } -+ } -+ PVR_DUMPDEBUG_LOG("--------------------------------"); -+} -+#endif -+ -+static PVRSRV_ERROR RGXDumpRISCVState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; -+ RGXRISCVFW_STATE sRiscvState; -+ const IMG_CHAR *pszException; -+ PVRSRV_ERROR eError; -+ -+ DDLOG64(FWCORE_MEM_CAT_BASE0); -+ DDLOG64(FWCORE_MEM_CAT_BASE1); -+ DDLOG64(FWCORE_MEM_CAT_BASE2); -+ DDLOG64(FWCORE_MEM_CAT_BASE3); -+ DDLOG64(FWCORE_MEM_CAT_BASE4); -+ DDLOG64(FWCORE_MEM_CAT_BASE5); -+ DDLOG64(FWCORE_MEM_CAT_BASE6); -+ DDLOG64(FWCORE_MEM_CAT_BASE7); -+ -+ /* Limit dump to what is currently being used */ -+ DDLOG64(FWCORE_ADDR_REMAP_CONFIG4); -+ DDLOG64(FWCORE_ADDR_REMAP_CONFIG5); -+ DDLOG64(FWCORE_ADDR_REMAP_CONFIG6); -+ DDLOG64(FWCORE_ADDR_REMAP_CONFIG12); -+ DDLOG64(FWCORE_ADDR_REMAP_CONFIG13); -+ DDLOG64(FWCORE_ADDR_REMAP_CONFIG14); -+ -+ DDLOG32(FWCORE_MEM_FAULT_MMU_STATUS); -+ DDLOG64(FWCORE_MEM_FAULT_REQ_STATUS); -+ DDLOG32(FWCORE_MEM_MMU_STATUS); -+ DDLOG32(FWCORE_MEM_READS_EXT_STATUS); -+ DDLOG32(FWCORE_MEM_READS_INT_STATUS); -+ -+ PVR_DUMPDEBUG_LOG("---- [ RISC-V internal state ] ----"); -+ -+#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) -+ if (RGXRiscvIsHalted(psDevInfo)) -+ { -+ /* Avoid resuming the RISC-V FW as most operations -+ * on the debug module require a halted core */ -+ PVR_DUMPDEBUG_LOG("(skipping as RISC-V found halted)"); -+ return PVRSRV_OK; -+ } -+#endif -+ -+ eError = RGXRiscvHalt(psDevInfo); -+ PVR_GOTO_IF_ERROR(eError, _RISCVDMError); -+ -+#define X(name, address) \ -+ eError = RGXRiscvReadReg(psDevInfo, address, &sRiscvState.name); \ -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXRiscvReadReg", _RISCVDMError); \ -+ DDLOGVAL32(#name, sRiscvState.name); -+ -+ RGXRISCVFW_DEBUG_DUMP_REGISTERS -+#undef X -+ -+ eError = RGXRiscvResume(psDevInfo); -+ PVR_GOTO_IF_ERROR(eError, _RISCVDMError); -+ -+ pszException = _GetRISCVException(sRiscvState.mcause); -+ if (pszException != NULL) -+ { -+ PVR_DUMPDEBUG_LOG("RISC-V FW hit an exception: %s", pszException); -+ -+ eError = RGXValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); -+ } -+ } -+ -+ return PVRSRV_OK; -+ -+_RISCVDMError: -+ PVR_DPF((PVR_DBG_ERROR, "Failed to communicate with the Debug Module")); -+ -+ return eError; -+} -+#endif /* !defined(NO_HARDWARE) */ -+ -+PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+#if defined(NO_HARDWARE) -+ PVR_DUMPDEBUG_LOG("------[ RGX registers ]------"); -+ PVR_DUMPDEBUG_LOG("(Not supported for NO_HARDWARE builds)"); -+ -+ return PVRSRV_OK; -+#else /* !defined(NO_HARDWARE) */ -+ IMG_UINT32 ui32Meta = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0; -+ IMG_UINT32 ui32TACycles, ui323DCycles, ui32TAOr3DCycles, ui32TAAnd3DCycles; -+ IMG_UINT32 ui32RegVal; -+ IMG_BOOL bFirmwarePerf; -+ IMG_BOOL bS7Infra = RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE); -+ IMG_BOOL bMulticore = RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT); -+ void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; -+ PVRSRV_ERROR eError; -+ -+ PVR_DUMPDEBUG_LOG("------[ RGX registers ]------"); -+ PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM); -+ PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr); -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) -+ { -+ PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Linear): 0x%p", -+ psDevInfo->pvSecureRegsBaseKM); -+ PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Physical): 0x%08lX", -+ (unsigned long)psDevInfo->sRegsPhysBase.uiAddr + RGX_HOST_SECURE_REGBANK_OFFSET); -+ } -+#endif -+ -+ /* Check if firmware perf was set at Init time */ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfSysInit->eFirmwarePerf, -+ INVALIDATE); -+ bFirmwarePerf = (psDevInfo->psRGXFWIfSysInit->eFirmwarePerf != FW_PERF_CONF_NONE); -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBVNC_COREID_REG)) -+ { -+ DDLOG64(CORE_ID__PBVNC); -+ } -+ else -+ { -+ DDLOG32(CORE_ID); -+ DDLOG32(CORE_REVISION); -+ } -+ DDLOG32(DESIGNER_REV_FIELD1); -+ DDLOG32(DESIGNER_REV_FIELD2); -+ DDLOG64(CHANGESET_NUMBER); -+ if (ui32Meta) -+ { -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ IMG_UINT32 ui32MSlvCtrl1Reg = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ? -+ ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) ? -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA : -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA) : -+ RGX_CR_META_SP_MSLVCTRL1; -+ -+ /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */ -+ OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32MSlvCtrl1Reg, 0x0); -+#else -+ /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */ -+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0); -+ -+ DDLOG32(META_SP_MSLVIRQSTATUS); -+#endif -+ } -+ -+ if (bMulticore) -+ { -+ DDLOG32(MULTICORE_SYSTEM); -+ DDLOG32(MULTICORE_GPU); -+ } -+ -+ DDLOG64(CLK_CTRL); -+ DDLOG64(CLK_STATUS); -+ DDLOG64(CLK_CTRL2); -+ DDLOG64(CLK_STATUS2); -+ -+ if (bS7Infra) -+ { -+ DDLOG64(CLK_XTPLUS_CTRL); -+ DDLOG64(CLK_XTPLUS_STATUS); -+ } -+ DDLOG32(EVENT_STATUS); -+ DDLOG64(TIMER); -+ if (bS7Infra) -+ { -+ DDLOG64(MMU_FAULT_STATUS); -+ DDLOG64(MMU_FAULT_STATUS_META); -+ } -+ else -+ { -+ DDLOG32(BIF_FAULT_BANK0_MMU_STATUS); -+ DDLOG64(BIF_FAULT_BANK0_REQ_STATUS); -+ DDLOG32(BIF_FAULT_BANK1_MMU_STATUS); -+ DDLOG64(BIF_FAULT_BANK1_REQ_STATUS); -+ } -+ DDLOG32(BIF_MMU_STATUS); -+ DDLOG32(BIF_MMU_ENTRY); -+ DDLOG64(BIF_MMU_ENTRY_STATUS); -+ -+ if (bS7Infra) -+ { -+ DDLOG32(BIF_JONES_OUTSTANDING_READ); -+ DDLOG32(BIF_BLACKPEARL_OUTSTANDING_READ); -+ DDLOG32(BIF_DUST_OUTSTANDING_READ); -+ } -+ else -+ { -+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))) -+ { -+ DDLOG32(BIF_STATUS_MMU); -+ DDLOG32(BIF_READS_EXT_STATUS); -+ DDLOG32(BIF_READS_INT_STATUS); -+ } -+ DDLOG32(BIFPM_STATUS_MMU); -+ DDLOG32(BIFPM_READS_EXT_STATUS); -+ DDLOG32(BIFPM_READS_INT_STATUS); -+ } -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) -+ { -+ DDLOG64(CONTEXT_MAPPING0); -+ DDLOG64(CONTEXT_MAPPING1); -+ DDLOG64(CONTEXT_MAPPING2); -+ DDLOG64(CONTEXT_MAPPING3); -+ DDLOG64(CONTEXT_MAPPING4); -+ } -+ else -+ { -+ DDLOG64(BIF_CAT_BASE_INDEX); -+ DDLOG64(BIF_CAT_BASE0); -+ DDLOG64(BIF_CAT_BASE1); -+ DDLOG64(BIF_CAT_BASE2); -+ DDLOG64(BIF_CAT_BASE3); -+ DDLOG64(BIF_CAT_BASE4); -+ DDLOG64(BIF_CAT_BASE5); -+ DDLOG64(BIF_CAT_BASE6); -+ DDLOG64(BIF_CAT_BASE7); -+ } -+ -+ DDLOG32(BIF_CTRL_INVAL); -+ DDLOG32(BIF_CTRL); -+ -+ DDLOG64(BIF_PM_CAT_BASE_VCE0); -+ DDLOG64(BIF_PM_CAT_BASE_TE0); -+ DDLOG64(BIF_PM_CAT_BASE_ALIST0); -+ DDLOG64(BIF_PM_CAT_BASE_VCE1); -+ DDLOG64(BIF_PM_CAT_BASE_TE1); -+ DDLOG64(BIF_PM_CAT_BASE_ALIST1); -+ -+ if (bMulticore) -+ { -+ DDLOG32(MULTICORE_GEOMETRY_CTRL_COMMON); -+ DDLOG32(MULTICORE_FRAGMENT_CTRL_COMMON); -+ DDLOG32(MULTICORE_COMPUTE_CTRL_COMMON); -+ } -+ -+ DDLOG32(PERF_TA_PHASE); -+ DDLOG32(PERF_TA_CYCLE); -+ DDLOG32(PERF_3D_PHASE); -+ DDLOG32(PERF_3D_CYCLE); -+ -+ ui32TACycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_CYCLE); -+ ui323DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_3D_CYCLE); -+ ui32TAOr3DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_OR_3D_CYCLE); -+ ui32TAAnd3DCycles = ((ui32TACycles + ui323DCycles) > ui32TAOr3DCycles) ? (ui32TACycles + ui323DCycles - ui32TAOr3DCycles) : 0; -+ DDLOGVAL32("PERF_TA_OR_3D_CYCLE", ui32TAOr3DCycles); -+ DDLOGVAL32("PERF_TA_AND_3D_CYCLE", ui32TAAnd3DCycles); -+ -+ DDLOG32(PERF_COMPUTE_PHASE); -+ DDLOG32(PERF_COMPUTE_CYCLE); -+ -+ DDLOG32(PM_PARTIAL_RENDER_ENABLE); -+ -+ DDLOG32(ISP_RENDER); -+ DDLOG64(TLA_STATUS); -+ DDLOG64(MCU_FENCE); -+ -+ DDLOG32(VDM_CONTEXT_STORE_STATUS); -+ DDLOG64(VDM_CONTEXT_STORE_TASK0); -+ DDLOG64(VDM_CONTEXT_STORE_TASK1); -+ DDLOG64(VDM_CONTEXT_STORE_TASK2); -+ DDLOG64(VDM_CONTEXT_RESUME_TASK0); -+ DDLOG64(VDM_CONTEXT_RESUME_TASK1); -+ DDLOG64(VDM_CONTEXT_RESUME_TASK2); -+ -+ DDLOG32(ISP_CTL); -+ DDLOG32(ISP_STATUS); -+ DDLOG32(MTS_INTCTX); -+ DDLOG32(MTS_BGCTX); -+ DDLOG32(MTS_BGCTX_COUNTED_SCHEDULE); -+ DDLOG32(MTS_SCHEDULE); -+ DDLOG32(MTS_GPU_INT_STATUS); -+ -+ DDLOG32(CDM_CONTEXT_STORE_STATUS); -+ DDLOG64(CDM_CONTEXT_PDS0); -+ DDLOG64(CDM_CONTEXT_PDS1); -+ DDLOG64(CDM_TERMINATE_PDS); -+ DDLOG64(CDM_TERMINATE_PDS1); -+ -+ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 47025)) -+ { -+ DDLOG64(CDM_CONTEXT_LOAD_PDS0); -+ DDLOG64(CDM_CONTEXT_LOAD_PDS1); -+ } -+ -+ if (bS7Infra) -+ { -+ DDLOG32(JONES_IDLE); -+ } -+ -+ DDLOG32(SIDEKICK_IDLE); -+ -+ if (!bS7Infra) -+ { -+ DDLOG32(SLC_IDLE); -+ DDLOG32(SLC_STATUS0); -+ DDLOG64(SLC_STATUS1); -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS) && RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS)) -+ { -+ DDLOG64(SLC_STATUS2); -+ } -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) -+ { -+ DDLOG64(SLC_CTRL_BYPASS); -+ } -+ else -+ { -+ DDLOG32(SLC_CTRL_BYPASS); -+ } -+ DDLOG64(SLC_CTRL_MISC); -+ } -+ else -+ { -+ DDLOG32(SLC3_IDLE); -+ DDLOG64(SLC3_STATUS); -+ DDLOG32(SLC3_FAULT_STOP_STATUS); -+ } -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE) && -+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER)) -+ { -+ DDLOG32(SAFETY_EVENT_STATUS__ROGUEXE); -+ DDLOG32(MTS_SAFETY_EVENT_ENABLE__ROGUEXE); -+ } -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER)) -+ { -+ DDLOG32(FWCORE_WDT_CTRL); -+ } -+ -+ if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0) -+ { -+ DDLOG32(SCRATCH0); -+ DDLOG32(SCRATCH1); -+ DDLOG32(SCRATCH2); -+ DDLOG32(SCRATCH3); -+ DDLOG32(SCRATCH4); -+ DDLOG32(SCRATCH5); -+ DDLOG32(SCRATCH6); -+ DDLOG32(SCRATCH7); -+ DDLOG32(SCRATCH8); -+ DDLOG32(SCRATCH9); -+ DDLOG32(SCRATCH10); -+ DDLOG32(SCRATCH11); -+ DDLOG32(SCRATCH12); -+ DDLOG32(SCRATCH13); -+ DDLOG32(SCRATCH14); -+ DDLOG32(SCRATCH15); -+ } -+ -+ if (ui32Meta) -+ { -+ IMG_BOOL bIsT0Enabled = IMG_FALSE, bIsFWFaulted = IMG_FALSE; -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ IMG_UINT32 ui32MSlvIrqStatusReg = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ? -+ ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) ? -+ RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA : -+ RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA) : -+ RGX_CR_META_SP_MSLVIRQSTATUS; -+ -+ PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, "META_SP_MSLVIRQSTATUS", OSReadUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32MSlvIrqStatusReg)); -+#endif -+ -+ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); -+ DDLOGVAL32("T0 TXENABLE", ui32RegVal); -+ if (ui32RegVal & META_CR_TXENABLE_ENABLE_BIT) -+ { -+ bIsT0Enabled = IMG_TRUE; -+ } -+ -+ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0STATUS_OFFSET, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); -+ DDLOGVAL32("T0 TXSTATUS", ui32RegVal); -+ -+ /* check for FW fault */ -+ if (((ui32RegVal >> 20) & 0x3) == 0x2) -+ { -+ bIsFWFaulted = IMG_TRUE; -+ } -+ -+ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0DEFR_OFFSET, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); -+ DDLOGVAL32("T0 TXDEFR", ui32RegVal); -+ -+ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PC, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); -+ DDLOGVAL32("T0 PC", ui32RegVal); -+ -+ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PCX, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); -+ DDLOGVAL32("T0 PCX", ui32RegVal); -+ -+ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_SP, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); -+ DDLOGVAL32("T0 SP", ui32RegVal); -+ -+ if ((ui32Meta == MTP218) || (ui32Meta == MTP219)) -+ { -+ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1ENABLE_OFFSET, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); -+ DDLOGVAL32("T1 TXENABLE", ui32RegVal); -+ -+ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1STATUS_OFFSET, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); -+ DDLOGVAL32("T1 TXSTATUS", ui32RegVal); -+ -+ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1DEFR_OFFSET, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); -+ DDLOGVAL32("T1 TXDEFR", ui32RegVal); -+ -+ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PC, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); -+ DDLOGVAL32("T1 PC", ui32RegVal); -+ -+ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PCX, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); -+ DDLOGVAL32("T1 PCX", ui32RegVal); -+ -+ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_SP, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); -+ DDLOGVAL32("T1 SP", ui32RegVal); -+ } -+ -+ if (bFirmwarePerf) -+ { -+ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT0, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); -+ DDLOGVAL32("META_CR_PERF_COUNT0", ui32RegVal); -+ -+ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT1, &ui32RegVal); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); -+ DDLOGVAL32("META_CR_PERF_COUNT1", ui32RegVal); -+ } -+ -+ if (bIsT0Enabled & bIsFWFaulted) -+ { -+ eError = RGXValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); -+ } -+ } -+ else if (bIsFWFaulted) -+ { -+ PVR_DUMPDEBUG_LOG("Skipping FW code memory corruption checking as META is disabled"); -+ } -+ } -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ DDLOG32(MIPS_ADDR_REMAP1_CONFIG1); -+ DDLOG64(MIPS_ADDR_REMAP1_CONFIG2); -+ DDLOG32(MIPS_ADDR_REMAP2_CONFIG1); -+ DDLOG64(MIPS_ADDR_REMAP2_CONFIG2); -+ DDLOG32(MIPS_ADDR_REMAP3_CONFIG1); -+ DDLOG64(MIPS_ADDR_REMAP3_CONFIG2); -+ DDLOG32(MIPS_ADDR_REMAP4_CONFIG1); -+ DDLOG64(MIPS_ADDR_REMAP4_CONFIG2); -+ DDLOG32(MIPS_ADDR_REMAP5_CONFIG1); -+ DDLOG64(MIPS_ADDR_REMAP5_CONFIG2); -+ DDLOG64(MIPS_WRAPPER_CONFIG); -+ DDLOG32(MIPS_EXCEPTION_STATUS); -+ -+ RGXDumpMIPSState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); -+ } -+#endif -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ eError = RGXDumpRISCVState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); -+ PVR_RETURN_IF_ERROR(eError); -+ } -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, TFBC_VERSION)) -+ { -+ DDLOGVAL32("TFBC_VERSION", RGX_GET_FEATURE_VALUE(psDevInfo, TFBC_VERSION)); -+ } -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT) || -+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION)) -+ { -+ DDLOGVAL32("TFBC_COMPRESSION_CONTROL", psDevInfo->psRGXFWIfSysInit->ui32TFBCCompressionControl); -+ } -+ return PVRSRV_OK; -+ -+_METASPError: -+ PVR_DUMPDEBUG_LOG("Dump Slave Port debug information"); -+ _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); -+ -+ return eError; -+#endif /* defined(NO_HARDWARE) */ -+} -+ -+#undef REG32_FMTSPEC -+#undef REG64_FMTSPEC -+#undef DDLOG32 -+#undef DDLOG64 -+#undef DDLOG32_DPX -+#undef DDLOG64_DPX -+#undef DDLOGVAL32 -+ -+void RGXDumpAllContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel) -+{ -+ DumpTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+ DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) -+ DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+#endif -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)) -+ { -+ DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+ } -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) -+ { -+ DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+ } -+} -+ -+/****************************************************************************** -+ End of file (rgxdebug.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxdebug_common.c b/drivers/gpu/drm/img-rogue/rgxdebug_common.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxdebug_common.c -@@ -0,0 +1,2219 @@ -+/*************************************************************************/ /*! -+@File -+@Title Rgx debug information -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX debugging functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "rgxdefs_km.h" -+#include "rgxdevice.h" -+#include "osfunc.h" -+#include "allocmem.h" -+ -+#include "rgxdebug_common.h" -+#include "pvrversion.h" -+#include "pvrsrv.h" -+#include "rgx_fwif_sf.h" -+#include "rgxfw_log_helper.h" -+#include "fwtrace_string.h" -+#include "rgxmmudefs_km.h" -+#include "rgxinit.h" -+#include "rgxfwutils.h" -+#include "rgxfwriscv.h" -+#include "rgxfwimageutils.h" -+#include "fwload.h" -+#include "rgx_options.h" -+#include "devicemem_history_server.h" -+#include "debug_common.h" -+#include "info_page.h" -+#include "osfunc.h" -+ -+#define MAX_FW_DESCRIPTION_LENGTH (600U) -+ -+#define PVR_DUMP_FIRMWARE_INFO(x) \ -+ PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x", \ -+ PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion), \ -+ PVRVERSION_UNPACK_MIN((x).ui32DDKVersion), \ -+ (x).ui32DDKBuild, \ -+ ((x).ui32BuildOptions & OPTIONS_DEBUG_EN) ? "debug":"release", \ -+ (x).ui32BuildOptions); -+ -+#define PVR_DUMP_FIRMWARE_INFO_HDR(x) \ -+ PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x", \ -+ (x).ui16PVRVersionMajor, \ -+ (x).ui16PVRVersionMinor, \ -+ (x).ui32PVRVersionBuild, \ -+ ((x).ui32Flags & OPTIONS_DEBUG_EN) ? "debug":"release", \ -+ (x).ui32Flags); -+ -+typedef struct { -+ IMG_UINT16 ui16Mask; -+ const IMG_CHAR *pszStr; -+} RGXFWT_DEBUG_INFO_MSKSTR; /* pair of bit mask and debug info message string */ -+ -+/* -+ * Array of all the Firmware Trace log IDs used to convert the trace data. -+ */ -+typedef struct _TRACEBUF_LOG_ { -+ RGXFW_LOG_SFids eSFId; -+ const IMG_CHAR *pszName; -+ const IMG_CHAR *pszFmt; -+ IMG_UINT32 ui32ArgNum; -+} TRACEBUF_LOG; -+ -+static const TRACEBUF_LOG aLogDefinitions[] = -+{ -+#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e}, -+ RGXFW_LOG_SFIDLIST -+#undef X -+}; -+ -+static const IMG_FLAGS2DESC asCswOpts2Description[] = -+{ -+ {RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, " Fast CSW profile;"}, -+ {RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, " Medium CSW profile;"}, -+ {RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, " Slow CSW profile;"}, -+ {RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, " No Delay CSW profile;"}, -+ {RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, " Random Csw enabled;"}, -+ {RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, " SoftReset;"}, -+}; -+ -+static const IMG_FLAGS2DESC asMisc2Description[] = -+{ -+ {RGXFWIF_INICFG_POW_RASCALDUST, " Power Rascal/Dust;"}, -+ {RGXFWIF_INICFG_SPU_CLOCK_GATE, " SPU Clock Gating (requires Power Rascal/Dust);"}, -+ {RGXFWIF_INICFG_HWPERF_EN, " HwPerf EN;"}, -+ {RGXFWIF_INICFG_FBCDC_V3_1_EN, " FBCDCv3.1;"}, -+ {RGXFWIF_INICFG_CHECK_MLIST_EN, " Check MList;"}, -+ {RGXFWIF_INICFG_DISABLE_CLKGATING_EN, " ClockGating Off;"}, -+ {RGXFWIF_INICFG_REGCONFIG_EN, " Register Config;"}, -+ {RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, " Assert on OOM;"}, -+ {RGXFWIF_INICFG_HWP_DISABLE_FILTER, " HWP Filter Off;"}, -+ {RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN, " CDM Random kill;"}, -+ {RGXFWIF_INICFG_DISABLE_DM_OVERLAP, " DM Overlap Off;"}, -+ {RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, " Assert on HWR;"}, -+ {RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED, " Coherent fabric on;"}, -+ {RGXFWIF_INICFG_VALIDATE_IRQ, " Validate IRQ;"}, -+ {RGXFWIF_INICFG_DISABLE_PDP_EN, " PDUMP Panic off;"}, -+ {RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN, " SPU Pow mask change on;"}, -+ {RGXFWIF_INICFG_WORKEST, " Workload Estim;"}, -+ {RGXFWIF_INICFG_PDVFS, " PDVFS;"}, -+ {RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND, " CDM task demand arbitration;"}, -+ {RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN, " CDM round-robin arbitration;"}, -+ {RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP, " ISP v1 scheduling;"}, -+ {RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP, " ISP v2 scheduling;"}, -+ {RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER, " Validate SOC&USC timers;"}, -+}; -+ -+static const IMG_FLAGS2DESC asFwOsCfg2Description[] = -+{ -+ {RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN, " TDM;"}, -+ {RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN, " GEOM;"}, -+ {RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN, " 3D;"}, -+ {RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN, " CDM;"}, -+#if defined(SUPPORT_RAY_TRACING) -+ {RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN, " RDM;"}, -+#endif -+ {RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM, " LowPrio TDM;"}, -+ {RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM, " LowPrio GEOM;"}, -+ {RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D, " LowPrio 3D;"}, -+ {RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM, " LowPrio CDM;"}, -+#if defined(SUPPORT_RAY_TRACING) -+ {RGXFWIF_INICFG_OS_LOW_PRIO_CS_RDM, " LowPrio RDM;"}, -+#endif -+}; -+ -+#define NARGS_MASK ~(0xF<<16) -+static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ const TRACEBUF_LOG *psLogDef = &aLogDefinitions[0]; -+ IMG_BOOL bIntegrityOk = IMG_TRUE; -+ -+ /* -+ * For every log ID, check the format string and number of arguments is valid. -+ */ -+ while (psLogDef->eSFId != RGXFW_SF_LAST) -+ { -+ const TRACEBUF_LOG *psLogDef2; -+ const IMG_CHAR *pszString; -+ IMG_UINT32 ui32Count; -+ -+ /* -+ * Check the number of arguments matches the number of '%' in the string and -+ * check that no string uses %s which is not supported as it requires a -+ * pointer to memory that is not going to be valid. -+ */ -+ pszString = psLogDef->pszFmt; -+ ui32Count = 0; -+ -+ while (*pszString != '\0') -+ { -+ if (*pszString++ == '%') -+ { -+ ui32Count++; -+ if (*pszString == 's') -+ { -+ bIntegrityOk = IMG_FALSE; -+ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.", -+ psLogDef->pszName, *pszString); -+ } -+ else if (*pszString == '%') -+ { -+ /* Double % is a printable % sign and not a format string... */ -+ ui32Count--; -+ } -+ } -+ } -+ -+ if (ui32Count != psLogDef->ui32ArgNum) -+ { -+ bIntegrityOk = IMG_FALSE; -+ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.", -+ psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum); -+ } -+ -+ /* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */ -+ if (ui32Count > 20) -+ { -+ bIntegrityOk = IMG_FALSE; -+ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.", -+ psLogDef->pszName, ui32Count); -+ } -+ -+ /* Check the id number is unique (don't take into account the number of arguments) */ -+ ui32Count = 0; -+ psLogDef2 = &aLogDefinitions[0]; -+ -+ while (psLogDef2->eSFId != RGXFW_SF_LAST) -+ { -+ if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK)) -+ { -+ ui32Count++; -+ } -+ psLogDef2++; -+ } -+ -+ if (ui32Count != 1) -+ { -+ bIntegrityOk = IMG_FALSE; -+ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.", -+ psLogDef->pszName, psLogDef->eSFId, ui32Count - 1); -+ } -+ -+ /* Move to the next log ID... */ -+ psLogDef++; -+ } -+ -+ return bIntegrityOk; -+} -+ -+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; -+ static IMG_BOOL bIntegrityCheckPassed = IMG_FALSE; -+ -+ /* Check that the firmware trace is correctly defined... */ -+ if (!bIntegrityCheckPassed) -+ { -+ bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile); -+ if (!bIntegrityCheckPassed) -+ { -+ return; -+ } -+ } -+ -+ /* Dump FW trace information... */ -+ if (psRGXFWIfTraceBufCtl != NULL) -+ { -+ IMG_UINT32 tid; -+ -+ PVR_DUMPDEBUG_LOG("Device ID: %u", psDevInfo->psDeviceNode->sDevId.ui32InternalID); -+ -+ RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->ui32LogType, INVALIDATE); -+ -+ /* Print the log type settings... */ -+ if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) -+ { -+ PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", -+ ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), -+ RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) -+ ); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("Debug log type: none"); -+ } -+ -+ /* Print the decoded log for each thread... */ -+ for (tid = 0; tid < RGXFW_THREAD_NUM; tid++) -+ { -+ RGXDumpFirmwareTraceDecoded(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl, tid); -+ } -+ } -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXPrepareExtraDebugInfo -+ -+ @Description -+ -+ Prepares debug info string by decoding ui16DebugInfo value passed -+ -+ @Input pszBuffer - pointer to debug info string buffer -+ -+ @Return void -+ -+******************************************************************************/ -+static void RGXPrepareExtraDebugInfo(IMG_CHAR *pszBuffer, IMG_UINT32 ui32BufferSize, IMG_UINT16 ui16DebugInfo) -+{ -+ const RGXFWT_DEBUG_INFO_MSKSTR aDebugInfoMskStr[] = -+ { -+#define X(a, b) {a, b}, -+ RGXFWT_DEBUG_INFO_MSKSTRLIST -+#undef X -+ }; -+ -+ IMG_UINT32 ui32NumFields = sizeof(aDebugInfoMskStr)/sizeof(RGXFWT_DEBUG_INFO_MSKSTR); -+ IMG_UINT32 i; -+ IMG_BOOL bHasExtraDebugInfo = IMG_FALSE; -+ -+ /* Add prepend string */ -+ OSStringLCopy(pszBuffer, RGXFWT_DEBUG_INFO_STR_PREPEND, ui32BufferSize); -+ -+ /* Add debug info strings */ -+ for (i = 0; i < ui32NumFields; i++) -+ { -+ if (ui16DebugInfo & aDebugInfoMskStr[i].ui16Mask) -+ { -+ if (bHasExtraDebugInfo) -+ { -+ OSStringLCat(pszBuffer, ", ", ui32BufferSize); /* Add comma separator */ -+ } -+ OSStringLCat(pszBuffer, aDebugInfoMskStr[i].pszStr, ui32BufferSize); -+ bHasExtraDebugInfo = IMG_TRUE; -+ } -+ } -+ -+ /* Add append string */ -+ OSStringLCat(pszBuffer, RGXFWT_DEBUG_INFO_STR_APPEND, ui32BufferSize); -+} -+ -+#define PVR_MAX_DEBUG_PARTIAL_LINES (40U) -+#define PVR_DUMPDEBUG_LOG_LINES(fmt, ...) \ -+ if (!bPrintAllLines) { \ -+ OSSNPrintf(&pszLineBuffer[ui32LastLineIdx * PVR_MAX_DEBUG_MESSAGE_LEN], PVR_MAX_DEBUG_MESSAGE_LEN, (fmt), ##__VA_ARGS__); \ -+ ui32LineCount++; \ -+ ui32LastLineIdx = ui32LineCount % PVR_MAX_DEBUG_PARTIAL_LINES; \ -+ } else { \ -+ PVR_UNREFERENCED_PARAMETER(pszLineBuffer); \ -+ PVR_UNREFERENCED_PARAMETER(ui32LineCount); \ -+ PVR_UNREFERENCED_PARAMETER(ui32LastLineIdx); \ -+ PVR_DUMPDEBUG_LOG((fmt), ##__VA_ARGS__); \ -+ } -+ -+static void RGXDumpFirmwareTraceLines(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, -+ IMG_UINT32 ui32TID, -+ bool bPrintAllLines) -+{ -+ volatile IMG_UINT32 *pui32FWWrapCount; -+ volatile IMG_UINT32 *pui32FWTracePtr; -+ IMG_UINT32 *pui32TraceBuf; -+ IMG_UINT32 *pui32LocalTraceBuf = NULL; -+ IMG_UINT32 ui32HostWrapCount; -+ IMG_UINT32 ui32HostTracePtr; -+ IMG_UINT32 ui32Count = 0; -+ IMG_UINT32 ui32LineCount = 0; -+ IMG_UINT32 ui32LastLineIdx = 0; -+ IMG_CHAR *pszLineBuffer = NULL; -+ IMG_UINT32 ui32TraceBufSizeInDWords; -+ -+ RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->sTraceBuf[ui32TID], INVALIDATE); -+ -+ pui32FWWrapCount = &(psRGXFWIfTraceBufCtl->sTraceBuf[ui32TID].ui32WrapCount); -+ pui32FWTracePtr = &(psRGXFWIfTraceBufCtl->sTraceBuf[ui32TID].ui32TracePointer); -+ pui32TraceBuf = psDevInfo->apui32TraceBuffer[ui32TID]; -+ ui32HostWrapCount = *pui32FWWrapCount; -+ ui32HostTracePtr = *pui32FWTracePtr; -+ -+ if (pui32TraceBuf == NULL) -+ { -+ /* trace buffer not yet allocated */ -+ return; -+ } -+ -+ if (!bPrintAllLines) -+ { -+ pszLineBuffer = OSAllocMem(PVR_MAX_DEBUG_MESSAGE_LEN * PVR_MAX_DEBUG_PARTIAL_LINES); -+ PVR_LOG_RETURN_VOID_IF_FALSE(pszLineBuffer != NULL, "pszLineBuffer alloc failed"); -+ } -+ -+ ui32TraceBufSizeInDWords = psDevInfo->ui32TraceBufSizeInDWords; -+ -+ if (ui32HostTracePtr >= ui32TraceBufSizeInDWords) -+ { -+ PVR_DUMPDEBUG_LOG_LINES("WARNING: Trace pointer (%d) greater than buffer size (%d).", -+ ui32HostTracePtr, ui32TraceBufSizeInDWords); -+ ui32HostTracePtr %= ui32TraceBufSizeInDWords; -+ } -+ -+ /* -+ * Allocate a local copy of the trace buffer which will contain a static non-changing -+ * snapshot view of the buffer. This removes the issue of a fast GPU wrapping and -+ * overwriting the tail data of the buffer. -+ */ -+ pui32LocalTraceBuf = OSAllocMem(ui32TraceBufSizeInDWords * sizeof(IMG_UINT32)); -+ if (pui32LocalTraceBuf != NULL) -+ { -+ memcpy(pui32LocalTraceBuf, pui32TraceBuf, ui32TraceBufSizeInDWords * sizeof(IMG_UINT32)); -+ ui32HostTracePtr = *pui32FWTracePtr; -+ pui32TraceBuf = pui32LocalTraceBuf; -+ } -+ -+ while (ui32Count < ui32TraceBufSizeInDWords) -+ { -+ IMG_UINT32 ui32Data, ui32DataToId; -+ -+ /* Find the first valid log ID, skipping whitespace... */ -+ do -+ { -+ IMG_UINT32 ui32ValidatedHostTracePtr; -+ ui32ValidatedHostTracePtr = OSConfineArrayIndexNoSpeculation(ui32HostTracePtr, -+ ui32TraceBufSizeInDWords); -+ ui32Data = pui32TraceBuf[ui32ValidatedHostTracePtr]; -+ ui32DataToId = idToStringID(ui32Data, SFs); -+ -+ /* If an unrecognized id is found it may be inconsistent data or a firmware trace error. */ -+ if (ui32DataToId == RGXFW_SF_LAST && RGXFW_LOG_VALIDID(ui32Data)) -+ { -+ PVR_DUMPDEBUG_LOG_LINES("WARNING: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data); -+ } -+ -+ /* Update the trace pointer... */ -+ ui32HostTracePtr++; -+ if (ui32HostTracePtr >= ui32TraceBufSizeInDWords) -+ { -+ ui32HostTracePtr = 0; -+ ui32HostWrapCount++; -+ } -+ ui32Count++; -+ } while ((RGXFW_SF_LAST == ui32DataToId) && -+ ui32Count < ui32TraceBufSizeInDWords); -+ -+ if (ui32Count < ui32TraceBufSizeInDWords) -+ { -+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%" IMG_UINT64_FMTSPEC ":T%u-%s> "; -+ IMG_CHAR szDebugInfoBuffer[RGXFWT_DEBUG_INFO_STR_MAXLEN] = ""; -+ IMG_UINT64 ui64Timestamp; -+ IMG_UINT16 ui16DebugInfo; -+ -+ /* If we hit the ASSERT message then this is the end of the log... */ -+ if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED) -+ { -+ PVR_DUMPDEBUG_LOG_LINES("ASSERTION %.*s failed at %.*s:%u", -+ RGXFW_TRACE_BUFFER_ASSERT_SIZE, -+ psRGXFWIfTraceBufCtl->sTraceBuf[ui32TID].sAssertBuf.szInfo, -+ RGXFW_TRACE_BUFFER_ASSERT_SIZE, -+ psRGXFWIfTraceBufCtl->sTraceBuf[ui32TID].sAssertBuf.szPath, -+ psRGXFWIfTraceBufCtl->sTraceBuf[ui32TID].sAssertBuf.ui32LineNum); -+ break; -+ } -+ -+ ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 0) % ui32TraceBufSizeInDWords]) << 32 | -+ (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 1) % ui32TraceBufSizeInDWords]); -+ -+ ui16DebugInfo = (IMG_UINT16) ((ui64Timestamp & ~RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK) >> RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT); -+ ui64Timestamp = (ui64Timestamp & ~RGXFWT_TIMESTAMP_TIME_CLRMSK) >> RGXFWT_TIMESTAMP_TIME_SHIFT; -+ -+ /* -+ * Print the trace string and provide up to 20 arguments which -+ * printf function will be able to use. We have already checked -+ * that no string uses more than this. -+ */ -+ OSStringLCat(szBuffer, SFs[ui32DataToId].psName, PVR_MAX_DEBUG_MESSAGE_LEN); -+ -+ /* Check and append any extra debug info available */ -+ if (ui16DebugInfo) -+ { -+ /* Prepare debug info string */ -+ RGXPrepareExtraDebugInfo(szDebugInfoBuffer, RGXFWT_DEBUG_INFO_STR_MAXLEN, ui16DebugInfo); -+ -+ /* Append debug info string */ -+ OSStringLCat(szBuffer, szDebugInfoBuffer, PVR_MAX_DEBUG_MESSAGE_LEN); -+ } -+ -+ PVR_DUMPDEBUG_LOG_LINES(szBuffer, ui64Timestamp, ui32TID, groups[RGXFW_SF_GID(ui32Data)], -+ pui32TraceBuf[(ui32HostTracePtr + 2) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 3) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 4) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 5) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 6) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 7) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 8) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 9) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 10) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 11) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 12) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 13) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 14) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 15) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 16) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 17) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 18) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 19) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 20) % ui32TraceBufSizeInDWords], -+ pui32TraceBuf[(ui32HostTracePtr + 21) % ui32TraceBufSizeInDWords]); -+ -+ /* Update the trace pointer... */ -+ ui32HostTracePtr = ui32HostTracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data); -+ if (ui32HostTracePtr >= ui32TraceBufSizeInDWords) -+ { -+ ui32HostTracePtr = ui32HostTracePtr % ui32TraceBufSizeInDWords; -+ ui32HostWrapCount++; -+ } -+ ui32Count = (ui32Count + 2 + RGXFW_SF_PARAMNUM(ui32Data)); -+ -+ /* Has the FW trace buffer overtaken the host pointer during the last line printed??? */ -+ if ((pui32LocalTraceBuf == NULL) && -+ ((*pui32FWWrapCount > ui32HostWrapCount) || -+ ((*pui32FWWrapCount == ui32HostWrapCount) && (*pui32FWTracePtr > ui32HostTracePtr)))) -+ { -+ /* Move forward to the oldest entry again... */ -+ PVR_DUMPDEBUG_LOG_LINES(". . ."); -+ ui32HostWrapCount = *pui32FWWrapCount; -+ ui32HostTracePtr = *pui32FWTracePtr; -+ } -+ } -+ } -+ -+ /* Free the local copy of the trace buffer if it was allocated... */ -+ if (pui32LocalTraceBuf != NULL) -+ { -+ OSFreeMem(pui32LocalTraceBuf); -+ } -+ -+ if (!bPrintAllLines) -+ { -+ IMG_UINT32 ui32FirstLineIdx; -+ -+ if (ui32LineCount > PVR_MAX_DEBUG_PARTIAL_LINES) -+ { -+ ui32FirstLineIdx = ui32LastLineIdx; -+ ui32LineCount = PVR_MAX_DEBUG_PARTIAL_LINES; -+ } -+ else -+ { -+ ui32FirstLineIdx = 0; -+ } -+ -+ for (ui32Count = 0; ui32Count < ui32LineCount; ui32Count++) -+ { -+ PVR_DUMPDEBUG_LOG("%s", &pszLineBuffer[((ui32FirstLineIdx + ui32Count) % PVR_MAX_DEBUG_PARTIAL_LINES) * PVR_MAX_DEBUG_MESSAGE_LEN]); -+ } -+ -+ OSFreeMem(pszLineBuffer); -+ } -+} -+ -+void RGXDumpFirmwareTraceDecoded(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, -+ IMG_UINT32 ui32TID) -+{ -+ RGXDumpFirmwareTraceLines(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, -+ psRGXFWIfTraceBufCtl, ui32TID, true); -+} -+ -+void RGXDumpFirmwareTracePartial(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, -+ IMG_UINT32 ui32TID) -+{ -+ RGXDumpFirmwareTraceLines(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, -+ psRGXFWIfTraceBufCtl, ui32TID, false); -+} -+ -+void RGXDumpFirmwareTraceBinary(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, -+ IMG_UINT32 ui32TID) -+{ -+ IMG_UINT32 i; -+ IMG_BOOL bPrevLineWasZero = IMG_FALSE; -+ IMG_BOOL bLineIsAllZeros = IMG_FALSE; -+ IMG_UINT32 ui32CountLines = 0; -+ IMG_UINT32 *pui32TraceBuffer; -+ IMG_CHAR *pszLine; -+ -+ RGXFwSharedMemCacheOpExec(psDevInfo->apui32TraceBuffer[ui32TID], -+ psDevInfo->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32), -+ PVRSRV_CACHE_OP_INVALIDATE); -+ pui32TraceBuffer = psDevInfo->apui32TraceBuffer[ui32TID]; -+ -+/* Max number of DWords to be printed per line, in debug dump binary output */ -+#define PVR_DD_FW_TRACEBUF_LINESIZE 30U -+ /* each element in the line is 8 characters plus a space. The '+ 1' is because of the final trailing '\0'. */ -+ pszLine = OSAllocMem(9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1); -+ if (pszLine == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Out of mem allocating line string (size: %d)", -+ __func__, -+ 9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1)); -+ return; -+ } -+ -+ for (i = 0; i < psDevInfo->ui32TraceBufSizeInDWords; i += PVR_DD_FW_TRACEBUF_LINESIZE) -+ { -+ IMG_UINT32 k = 0; -+ IMG_UINT32 ui32Line = 0x0; -+ IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32); -+ IMG_CHAR *pszBuf = pszLine; -+ -+ for (k = 0; k < PVR_DD_FW_TRACEBUF_LINESIZE; k++) -+ { -+ if ((i + k) >= psDevInfo->ui32TraceBufSizeInDWords) -+ { -+ /* Stop reading when the index goes beyond trace buffer size. This condition is -+ * hit during printing the last line in DD when ui32TraceBufSizeInDWords is not -+ * a multiple of PVR_DD_FW_TRACEBUF_LINESIZE */ -+ break; -+ } -+ -+ ui32Line |= pui32TraceBuffer[i + k]; -+ -+ /* prepare the line to print it. The '+1' is because of the trailing '\0' added */ -+ OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]); -+ pszBuf += 9; /* write over the '\0' */ -+ } -+ -+ bLineIsAllZeros = (ui32Line == 0x0); -+ -+ if (bLineIsAllZeros) -+ { -+ if (bPrevLineWasZero) -+ { -+ ui32CountLines++; -+ } -+ else -+ { -+ bPrevLineWasZero = IMG_TRUE; -+ ui32CountLines = 1; -+ PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset); -+ } -+ } -+ else -+ { -+ if (bPrevLineWasZero && ui32CountLines > 1) -+ { -+ PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines); -+ } -+ bPrevLineWasZero = IMG_FALSE; -+ -+ PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine); -+ } -+ } -+ -+ if (bPrevLineWasZero) -+ { -+ PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines); -+ } -+ -+ OSFreeMem(pszLine); -+} -+ -+void RGXDocumentFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const IMG_UINT32 ui32FwVA, -+ const IMG_CPU_PHYADDR sCpuPA, -+ const IMG_DEV_PHYADDR sDevPA, -+ const IMG_UINT64 ui64PTE) -+{ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ PVR_DUMPDEBUG_LOG("| 0x%08X | " -+ "0x%016" IMG_UINT64_FMTSPECX " | " -+ "0x%016" IMG_UINT64_FMTSPECX " | " -+ "%s%s%s |", -+ ui32FwVA, -+ (IMG_UINT64) sCpuPA.uiAddr, -+ sDevPA.uiAddr, -+ gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(ui64PTE)], -+ gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(ui64PTE)], -+ gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(ui64PTE)]); -+ } -+ else -+#endif -+ { -+ const char *pszSLCBypass = -+#if defined(RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN) -+ BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN) ? "B" : " "; -+#else -+ " "; -+#endif -+ -+ /* META and RISCV use a subset of the GPU's virtual address space */ -+ PVR_DUMPDEBUG_LOG("| 0x%08X | " -+ "0x%016" IMG_UINT64_FMTSPECX " | " -+ "0x%016" IMG_UINT64_FMTSPECX " | " -+ "%s%s%s%s%s%s |", -+ ui32FwVA, -+ (IMG_UINT64) sCpuPA.uiAddr, -+ sDevPA.uiAddr, -+ BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN) ? "P" : " ", -+ BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_PM_SRC_EN) ? "PM" : " ", -+ pszSLCBypass, -+ BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_CC_EN) ? "C" : " ", -+ BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_READ_ONLY_EN) ? "RO" : "RW", -+ BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_VALID_EN) ? "V" : " "); -+ } -+} -+ -+ -+#if !defined(NO_HARDWARE) -+static PVRSRV_ERROR -+RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset, -+ IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask) -+{ -+ IMG_UINT32 ui32RegValue, ui32NumPolls = 0; -+ PVRSRV_ERROR eError; -+ -+ do -+ { -+ eError = RGXReadFWModuleAddr(psDevInfo, ui32RegOffset, &ui32RegValue); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ } while (((ui32RegValue & ui32Mask) != ui32PollValue) && (ui32NumPolls++ < 1000)); -+ -+ return ((ui32RegValue & ui32Mask) == ui32PollValue) ? PVRSRV_OK : PVRSRV_ERROR_RETRY; -+} -+ -+PVRSRV_ERROR -+RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegAddr, IMG_UINT32 *pui32RegVal) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Core Read Ready? */ -+ eError = RGXPollMetaRegThroughSP(psDevInfo, -+ META_CR_TXUXXRXRQ_OFFSET, -+ META_CR_TXUXXRXRQ_DREADY_BIT, -+ META_CR_TXUXXRXRQ_DREADY_BIT); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); -+ -+ /* Set the reg we are interested in reading */ -+ eError = RGXWriteFWModuleAddr(psDevInfo, META_CR_TXUXXRXRQ_OFFSET, -+ ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXWriteFWModuleAddr"); -+ -+ /* Core Read Done? */ -+ eError = RGXPollMetaRegThroughSP(psDevInfo, -+ META_CR_TXUXXRXRQ_OFFSET, -+ META_CR_TXUXXRXRQ_DREADY_BIT, -+ META_CR_TXUXXRXRQ_DREADY_BIT); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); -+ -+ /* Read the value */ -+ return RGXReadFWModuleAddr(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal); -+} -+#endif /* !defined(NO_HARDWARE) */ -+ -+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) -+static PVRSRV_ERROR _ValidateWithFWModule(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_DEV_VIRTADDR *psFWAddr, -+ void *pvHostCodeAddr, -+ IMG_UINT32 ui32MaxLen, -+ const IMG_CHAR *pszDesc, -+ IMG_UINT32 ui32StartOffset) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32Value = 0; -+ IMG_UINT32 ui32FWCodeDevVAAddr = psFWAddr->ui32Addr + ui32StartOffset; -+ IMG_UINT32 *pui32FWCode = (IMG_UINT32*) IMG_OFFSET_ADDR(pvHostCodeAddr,ui32StartOffset); -+ IMG_UINT32 i; -+ -+#if defined(EMULATOR) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ return PVRSRV_OK; -+ } -+#endif -+ -+ ui32MaxLen -= ui32StartOffset; -+ ui32MaxLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */ -+ -+ for (i = 0; i < ui32MaxLen; i++) -+ { -+ eError = RGXReadFWModuleAddr(psDevInfo, ui32FWCodeDevVAAddr, &ui32Value); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); -+ return eError; -+ } -+ -+#if defined(EMULATOR) -+ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+#endif -+ { -+ PVR_DPF((PVR_DBG_VERBOSE, "0x%x: CPU 0x%08x, FW 0x%08x", i * 4, pui32FWCode[i], ui32Value)); -+ -+ if (pui32FWCode[i] != ui32Value) -+ { -+ PVR_DUMPDEBUG_LOG("%s: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (%p), FW 0x%08x (%x)", -+ __func__, pszDesc, -+ (i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr); -+ return PVRSRV_ERROR_FW_IMAGE_MISMATCH; -+ } -+ } -+ -+ ui32FWCodeDevVAAddr += 4; -+ } -+ -+ PVR_DUMPDEBUG_LOG("Match between Host and Firmware view of the %s", pszDesc); -+ return PVRSRV_OK; -+} -+#endif -+ -+#if !defined(NO_HARDWARE) -+PVRSRV_ERROR RGXValidateFWImage(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+#if !defined(SUPPORT_TRUSTED_DEVICE) -+ PVRSRV_ERROR eError; -+ IMG_UINT32 *pui32HostFWCode = NULL, *pui32HostFWCoremem = NULL; -+ OS_FW_IMAGE *psRGXFW = NULL; -+ const IMG_BYTE *pbRGXFirmware = NULL; -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ IMG_UINT32 *pui32CodeMemoryPointer; -+#endif -+ RGXFWIF_DEV_VIRTADDR sFWAddr; -+ IMG_UINT32 ui32StartOffset = 0; -+ RGX_LAYER_PARAMS sLayerParams; -+ sLayerParams.psDevInfo = psDevInfo; -+ -+#if defined(EMULATOR) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ PVR_DUMPDEBUG_LOG("Validation of RISC-V FW code is disabled on emulator"); -+ return PVRSRV_OK; -+ } -+#endif -+ -+ if (psDevInfo->pvRegsBaseKM == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: RGX registers not mapped yet!", __func__)); -+ return PVRSRV_ERROR_BAD_MAPPING; -+ } -+ -+ /* Load FW from system for code verification */ -+ pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes); -+ if (pui32HostFWCode == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed in allocating memory for FW code. " -+ "So skipping FW code verification", -+ __func__)); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ /* Coremem is not present on all GPU cores, so size can be zero */ -+ if (psDevInfo->ui32FWCorememCodeSizeInBytes) -+ { -+ pui32HostFWCoremem = OSAllocZMem(psDevInfo->ui32FWCorememCodeSizeInBytes); -+ if (pui32HostFWCoremem == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed in allocating memory for FW core code. " -+ "So skipping FW code verification", -+ __func__)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto freeHostFWCode; -+ } -+ } -+ -+ /* Load FW image */ -+ eError = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW, &pbRGXFirmware); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load FW image file (%s).", -+ __func__, PVRSRVGetErrorString(eError))); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto cleanup_initfw; -+ } -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ eError = ProcessLDRCommandStream(&sLayerParams, pbRGXFirmware, -+ (void*) pui32HostFWCode, NULL, -+ (void*) pui32HostFWCoremem, NULL, NULL); -+ } -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware, -+ pui32HostFWCode, NULL, -+ NULL, NULL); -+ } -+#endif -+ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware, -+ pui32HostFWCode, NULL, -+ pui32HostFWCoremem, NULL); -+ } -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__)); -+ goto cleanup_initfw; -+ } -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32CodeMemoryPointer); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error in acquiring MIPS FW code memory area (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto cleanup_initfw; -+ } -+ -+ RGXFwSharedMemCacheOpExec(pui32CodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, PVRSRV_CACHE_OP_INVALIDATE); -+ -+ if (OSMemCmp(pui32HostFWCode, pui32CodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes) == 0) -+ { -+ PVR_DUMPDEBUG_LOG("Match between Host and MIPS views of the FW code" ); -+ } -+ else -+ { -+ IMG_UINT32 ui32Count = 10; /* Show only the first 10 mismatches */ -+ IMG_UINT32 ui32Offset; -+ -+ PVR_DUMPDEBUG_LOG("Mismatch between Host and MIPS views of the FW code"); -+ for (ui32Offset = 0; (ui32Offset*4 < psDevInfo->ui32FWCodeSizeInBytes) || (ui32Count == 0); ui32Offset++) -+ { -+ if (pui32HostFWCode[ui32Offset] != pui32CodeMemoryPointer[ui32Offset]) -+ { -+ PVR_DUMPDEBUG_LOG("At %d bytes, code should be 0x%x but it is instead 0x%x", -+ ui32Offset*4, pui32HostFWCode[ui32Offset], pui32CodeMemoryPointer[ui32Offset]); -+ ui32Count--; -+ } -+ } -+ } -+ -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); -+ } -+ else -+#endif -+ { -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ /* starting checking after BOOT LOADER config */ -+ sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; -+ -+ ui32StartOffset = RGXFW_MAX_BOOTLDR_OFFSET; -+ } -+ else -+ { -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ /* Use bootloader code remap which is always configured before the FW is started */ -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) -+ { -+ sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_REMAP_SECURE; -+ } -+ else -+#endif -+ { -+ sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_REMAP; -+ } -+ } -+ -+ eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile, -+ psDevInfo, &sFWAddr, -+ pui32HostFWCode, psDevInfo->ui32FWCodeSizeInBytes, -+ "FW code", ui32StartOffset); -+ if (eError != PVRSRV_OK) -+ { -+ goto cleanup_initfw; -+ } -+ -+ /* Coremem is not present on all GPU cores, so may not be alloc'd */ -+ if (pui32HostFWCoremem != NULL) // && psDevInfo->ui32FWCorememCodeSizeInBytes -+ { -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); -+ } -+ else -+ { -+ sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE); -+ -+ /* Core must be halted while issuing abstract commands */ -+ eError = RGXRiscvHalt(psDevInfo); -+ PVR_GOTO_IF_ERROR(eError, cleanup_initfw); -+ } -+ -+ eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile, -+ psDevInfo, &sFWAddr, -+ pui32HostFWCoremem, psDevInfo->ui32FWCorememCodeSizeInBytes, -+ "FW coremem code", 0); -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ eError = RGXRiscvResume(psDevInfo); -+ PVR_GOTO_IF_ERROR(eError, cleanup_initfw); -+ } -+ } -+ } -+ -+cleanup_initfw: -+ if (psRGXFW) -+ { -+ OSUnloadFirmware(psRGXFW); -+ } -+ -+ if (pui32HostFWCoremem) -+ { -+ OSFreeMem(pui32HostFWCoremem); -+ } -+freeHostFWCode: -+ if (pui32HostFWCode) -+ { -+ OSFreeMem(pui32HostFWCode); -+ } -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); -+ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ return PVRSRV_OK; -+#endif -+} -+#endif /* !defined(NO_HARDWARE) */ -+ -+#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) -+PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) -+ IMG_PBYTE pbCodeMemoryPointer; -+ PVRSRV_ERROR eError; -+ RGXFWIF_DEV_VIRTADDR sFWAddr; -+ -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pbCodeMemoryPointer); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ RGXFwSharedMemCacheOpExec(pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, PVRSRV_CACHE_OP_INVALIDATE); -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; -+ } -+ else -+ { -+ PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)); -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) -+ { -+ sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_REMAP_SECURE; -+ } -+ else -+#endif -+ { -+ sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_REMAP; -+ } -+ }; -+ -+ eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, "FW code", 0); -+ if (eError != PVRSRV_OK) -+ { -+ goto releaseFWCodeMapping; -+ } -+ -+ if (psDevInfo->ui32FWCorememCodeSizeInBytes) -+ { -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, (void **)&pbCodeMemoryPointer); -+ if (eError != PVRSRV_OK) -+ { -+ goto releaseFWCoreCodeMapping; -+ } -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); -+ } -+ else -+ { -+ PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)); -+ sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE); -+ } -+ -+ eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, -+ psDevInfo->ui32FWCorememCodeSizeInBytes, "FW coremem code", 0); -+ } -+ -+releaseFWCoreCodeMapping: -+ if (psDevInfo->ui32FWCorememCodeSizeInBytes) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); -+ } -+releaseFWCodeMapping: -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); -+ -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ return PVRSRV_OK; -+#endif -+} -+#endif -+ -+static const IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState) -+{ -+ switch (ePowerState) -+ { -+ case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT"; -+ case PVRSRV_DEV_POWER_STATE_OFF: return "OFF"; -+ case PVRSRV_DEV_POWER_STATE_ON: return "ON"; -+ default: return "UNKNOWN"; -+ } -+} -+ -+/* -+ Writes flags strings to an uninitialised buffer. -+*/ -+static void _GetFwSysFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) -+{ -+ const IMG_CHAR szCswLabel[] = "Ctx switch options:"; -+ size_t uLabelLen = sizeof(szCswLabel) - 1; -+ const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; -+ -+ OSStringLCopy(psDesc, szCswLabel, ui32DescSize); -+ -+ DebugCommonFlagStrings(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags); -+ DebugCommonFlagStrings(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags); -+} -+ -+static void _GetFwOsFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) -+{ -+ const IMG_CHAR szCswLabel[] = "Ctx switch:"; -+ size_t uLabelLen = sizeof(szCswLabel) - 1; -+ const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; -+ -+ OSStringLCopy(psDesc, szCswLabel, ui32DescSize); -+ -+ DebugCommonFlagStrings(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags); -+} -+ -+ -+typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_ -+{ -+ DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING, -+ DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED, -+ DEVICEMEM_HISTORY_QUERY_INDEX_NEXT, -+ DEVICEMEM_HISTORY_QUERY_INDEX_COUNT, -+} DEVICEMEM_HISTORY_QUERY_INDEX; -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function _PrintDevicememHistoryQueryResult -+ -+ @Description -+ -+ Print details of a single result from a DevicememHistory query -+ -+ @Input pfnDumpDebugPrintf - Debug printf function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Input psFaultProcessInfo - The process info derived from the page fault -+ @Input psResult - The DevicememHistory result to be printed -+ @Input ui32Index - The index of the result -+ -+ @Return void -+ -+******************************************************************************/ -+static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ RGXMEM_PROCESS_INFO *psFaultProcessInfo, -+ DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult, -+ IMG_UINT32 ui32Index, -+ const IMG_CHAR* pszIndent) -+{ -+ IMG_UINT32 ui32Remainder; -+ IMG_UINT64 ui64Seconds, ui64Nanoseconds; -+ -+ RGXConvertOSTimestampToSAndNS(psResult->ui64When, -+ &ui64Seconds, -+ &ui64Nanoseconds); -+ -+ if (psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE) -+ { -+ PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC -+ " Size: " IMG_DEVMEM_SIZE_FMTSPEC -+ " Operation: %s Modified: %" IMG_UINT64_FMTSPEC -+ " us ago (OS time %" IMG_UINT64_FMTSPEC -+ ".%09" IMG_UINT64_FMTSPEC " s)", -+ pszIndent, -+ ui32Index, -+ psResult->szString, -+ psResult->sBaseDevVAddr.uiAddr, -+ psResult->uiSize, -+ psResult->bMap ? "Map": "Unmap", -+ OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), -+ ui64Seconds, -+ ui64Nanoseconds); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC -+ " Size: " IMG_DEVMEM_SIZE_FMTSPEC -+ " Operation: %s Modified: %" IMG_UINT64_FMTSPEC -+ " us ago (OS time %" IMG_UINT64_FMTSPEC -+ ".%09" IMG_UINT64_FMTSPEC -+ ") PID: %u (%s)", -+ pszIndent, -+ ui32Index, -+ psResult->szString, -+ psResult->sBaseDevVAddr.uiAddr, -+ psResult->uiSize, -+ psResult->bMap ? "Map": "Unmap", -+ OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), -+ ui64Seconds, -+ ui64Nanoseconds, -+ psResult->sProcessInfo.uiPID, -+ psResult->sProcessInfo.szProcessName); -+ } -+ -+ if (!psResult->bRange) -+ { -+ PVR_DUMPDEBUG_LOG("%s Whole allocation was %s", pszIndent, psResult->bMap ? "mapped": "unmapped"); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("%s Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s", -+ pszIndent, -+ psResult->ui32StartPage, -+ psResult->ui32StartPage + psResult->ui32PageCount - 1, -+ psResult->sMapStartAddr.uiAddr, -+ psResult->sMapEndAddr.uiAddr, -+ psResult->bAll ? "(whole allocation) " : "", -+ psResult->bMap ? "mapped": "unmapped"); -+ } -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function _PrintDevicememHistoryQueryOut -+ -+ @Description -+ -+ Print details of all the results from a DevicememHistory query -+ -+ @Input pfnDumpDebugPrintf - Debug printf function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Input psFaultProcessInfo - The process info derived from the page fault -+ @Input psQueryOut - Storage for the query results -+ -+ @Return void -+ -+******************************************************************************/ -+static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ RGXMEM_PROCESS_INFO *psFaultProcessInfo, -+ DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, -+ const IMG_CHAR* pszIndent) -+{ -+ IMG_UINT32 i; -+ -+ if (psQueryOut->ui32NumResults == 0) -+ { -+ PVR_DUMPDEBUG_LOG("%s No results", pszIndent); -+ } -+ else -+ { -+ for (i = 0; i < psQueryOut->ui32NumResults; i++) -+ { -+ _PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile, -+ psFaultProcessInfo, -+ &psQueryOut->sResults[i], -+ i, -+ pszIndent); -+ } -+ } -+} -+ -+/* table of HW page size values and the equivalent */ -+static const unsigned int aui32HWPageSizeTable[][2] = -+{ -+ { 0, PVRSRV_4K_PAGE_SIZE }, -+ { 1, PVRSRV_16K_PAGE_SIZE }, -+ { 2, PVRSRV_64K_PAGE_SIZE }, -+ { 3, PVRSRV_256K_PAGE_SIZE }, -+ { 4, PVRSRV_1M_PAGE_SIZE }, -+ { 5, PVRSRV_2M_PAGE_SIZE } -+}; -+ -+/*! -+******************************************************************************* -+ -+ @Function _PageSizeHWToBytes -+ -+ @Description -+ -+ Convert a HW page size value to its size in bytes -+ -+ @Input ui32PageSizeHW - The HW page size value -+ -+ @Return IMG_UINT32 The page size in bytes -+ -+******************************************************************************/ -+static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW) -+{ -+ if (ui32PageSizeHW > 5) -+ { -+ /* This is invalid, so return a default value as we cannot ASSERT in this code! */ -+ return PVRSRV_4K_PAGE_SIZE; -+ } -+ -+ return aui32HWPageSizeTable[ui32PageSizeHW][1]; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function _GetDevicememHistoryData -+ -+ @Description -+ -+ Get the DevicememHistory results for the given PID and faulting device virtual address. -+ The function will query DevicememHistory for information about the faulting page, as well -+ as the page before and after. -+ -+ @Input psDeviceNode - The device which this allocation search should be made on -+ @Input uiPID - The process ID to search for allocations belonging to -+ @Input sFaultDevVAddr - The device address to search for allocations at/before/after -+ @Input asQueryOut - Storage for the query results -+ @Input ui32PageSizeBytes - Faulted page size in bytes -+ -+ @Return IMG_BOOL - IMG_TRUE if any results were found for this page fault -+ -+******************************************************************************/ -+static IMG_BOOL _GetDevicememHistoryData(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PID uiPID, -+ IMG_DEV_VIRTADDR sFaultDevVAddr, -+ DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT], -+ IMG_UINT32 ui32PageSizeBytes) -+{ -+ DEVICEMEM_HISTORY_QUERY_IN sQueryIn; -+ IMG_BOOL bAnyHits = IMG_FALSE; -+ -+ /* if the page fault originated in the firmware then the allocation may -+ * appear to belong to any PID, because FW allocations are attributed -+ * to the client process creating the allocation, so instruct the -+ * devicemem_history query to search all available PIDs -+ */ -+ if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) -+ { -+ sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY; -+ } -+ else -+ { -+ sQueryIn.uiPID = uiPID; -+ } -+ -+ sQueryIn.psDevNode = psDeviceNode; -+ /* Query the DevicememHistory for all allocations in the previous page... */ -+ sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - ui32PageSizeBytes; -+ if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING], -+ ui32PageSizeBytes, IMG_TRUE)) -+ { -+ bAnyHits = IMG_TRUE; -+ } -+ -+ /* Query the DevicememHistory for any record at the exact address... */ -+ sQueryIn.sDevVAddr = sFaultDevVAddr; -+ if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED], -+ ui32PageSizeBytes, IMG_FALSE)) -+ { -+ bAnyHits = IMG_TRUE; -+ } -+ else -+ { -+ /* If not matched then try matching any record in the faulting page... */ -+ if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED], -+ ui32PageSizeBytes, IMG_TRUE)) -+ { -+ bAnyHits = IMG_TRUE; -+ } -+ } -+ -+ /* Query the DevicememHistory for all allocations in the next page... */ -+ sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes; -+ if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_NEXT], -+ ui32PageSizeBytes, IMG_TRUE)) -+ { -+ bAnyHits = IMG_TRUE; -+ } -+ -+ return bAnyHits; -+} -+ -+/* stored data about one page fault */ -+typedef struct _FAULT_INFO_ -+{ -+ /* the process info of the memory context that page faulted */ -+ RGXMEM_PROCESS_INFO sProcessInfo; -+ IMG_DEV_VIRTADDR sFaultDevVAddr; -+ MMU_FAULT_DATA sMMUFaultData; -+ DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT]; -+ /* the CR timer value at the time of the fault, recorded by the FW. -+ * used to differentiate different page faults -+ */ -+ IMG_UINT64 ui64CRTimer; -+ /* time when this FAULT_INFO entry was added. used for timing -+ * reference against the map/unmap information -+ */ -+ IMG_UINT64 ui64When; -+ IMG_UINT32 ui32FaultInfoFlags; -+} FAULT_INFO; -+ -+/* history list of page faults. -+ * Keeps the first `n` page faults and the last `n` page faults, like the FW -+ * HWR log -+ */ -+typedef struct _FAULT_INFO_LOG_ -+{ -+ IMG_UINT32 ui32Head; -+ /* the number of faults in this log need not correspond exactly to -+ * the HWINFO number of the FW, as the FW HWINFO log may contain -+ * non-page fault HWRs -+ */ -+ FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX]; -+} FAULT_INFO_LOG; -+ -+#define FAULT_INFO_PROC_INFO (0x1U) -+#define FAULT_INFO_DEVMEM_HIST (0x2U) -+ -+static FAULT_INFO_LOG gsFaultInfoLog = { 0 }; -+ -+static void _FillAppForFWFaults(PVRSRV_RGXDEV_INFO *psDevInfo, -+ FAULT_INFO *psInfo, -+ RGXMEM_PROCESS_INFO *psProcInfo) -+{ -+ IMG_UINT32 i, j; -+ -+ for (i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) -+ { -+ for (j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++) -+ { -+ IMG_BOOL bFound; -+ -+ RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo; -+ bFound = RGXPCPIDToProcessInfo(psDevInfo, -+ psProcInfo->uiPID, -+ psProcInfo); -+ if (!bFound) -+ { -+ OSStringLCopy(psProcInfo->szProcessName, -+ "(unknown)", -+ sizeof(psProcInfo->szProcessName)); -+ } -+ } -+ } -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function _PrintFaultInfo -+ -+ @Description -+ -+ Print all the details of a page fault from a FAULT_INFO structure -+ -+ @Input pfnDumpDebugPrintf - The debug printf function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Input psInfo - The page fault occurrence to print -+ -+ @Return void -+ -+******************************************************************************/ -+static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ void *pvDumpDebugFile, -+ FAULT_INFO *psInfo, -+ const IMG_CHAR* pszIndent) -+{ -+ IMG_UINT32 i; -+ IMG_UINT64 ui64Seconds, ui64Nanoseconds; -+ -+ RGXConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds); -+ -+ if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_PROC_INFO)) -+ { -+ IMG_PID uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE || psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_PM) ? -+ 0 : psInfo->sProcessInfo.uiPID; -+ -+ PVR_DUMPDEBUG_LOG("%sDevice memory history for page fault address " IMG_DEV_VIRTADDR_FMTSPEC -+ ", CRTimer: 0x%016" IMG_UINT64_FMTSPECX -+ ", PID: %u (%s, unregistered: %u) OS time: " -+ "%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, -+ pszIndent, -+ psInfo->sFaultDevVAddr.uiAddr, -+ psInfo->ui64CRTimer, -+ uiPID, -+ psInfo->sProcessInfo.szProcessName, -+ psInfo->sProcessInfo.bUnregistered, -+ ui64Seconds, -+ ui64Nanoseconds); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("%sCould not find PID for device memory history on PC of the fault", pszIndent); -+ } -+ -+ if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_DEVMEM_HIST)) -+ { -+ for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) -+ { -+ const IMG_CHAR *pszWhich = NULL; -+ -+ switch (i) -+ { -+ case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING: -+ pszWhich = "Preceding page"; -+ break; -+ case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED: -+ pszWhich = "Faulted page"; -+ break; -+ case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT: -+ pszWhich = "Next page"; -+ break; -+ } -+ -+ PVR_DUMPDEBUG_LOG("%s %s:", pszIndent, pszWhich); -+ _PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile, -+ &psInfo->sProcessInfo, -+ &psInfo->asQueryOut[i], -+ pszIndent); -+ } -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("%s No matching Devmem History for fault address", pszIndent); -+ DevicememHistoryDumpRecordStats(psDevNode, pfnDumpDebugPrintf, pvDumpDebugFile); -+ PVR_DUMPDEBUG_LOG("%s Records Searched -" -+ " PP:%"IMG_UINT64_FMTSPEC -+ " FP:%"IMG_UINT64_FMTSPEC -+ " NP:%"IMG_UINT64_FMTSPEC, -+ pszIndent, -+ psInfo->asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING].ui64SearchCount, -+ psInfo->asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED].ui64SearchCount, -+ psInfo->asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_NEXT].ui64SearchCount); -+ } -+} -+ -+static void _RecordFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ FAULT_INFO *psInfo, -+ IMG_DEV_VIRTADDR sFaultDevVAddr, -+ IMG_DEV_PHYADDR sPCDevPAddr, -+ IMG_UINT64 ui64CRTimer, -+ IMG_UINT32 ui32PageSizeBytes) -+{ -+ IMG_BOOL bFound = IMG_FALSE, bIsPMFault = IMG_FALSE; -+ RGXMEM_PROCESS_INFO sProcessInfo; -+ -+ psInfo->ui32FaultInfoFlags = 0; -+ psInfo->sFaultDevVAddr = sFaultDevVAddr; -+ psInfo->ui64CRTimer = ui64CRTimer; -+ psInfo->ui64When = OSClockns64(); -+ -+ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) -+ { -+ /* Check if this is PM fault */ -+ if (psInfo->sMMUFaultData.eType == MMU_FAULT_TYPE_PM) -+ { -+ bIsPMFault = IMG_TRUE; -+ bFound = IMG_TRUE; -+ sProcessInfo.uiPID = RGXMEM_SERVER_PID_PM; -+ OSStringLCopy(sProcessInfo.szProcessName, "PM", sizeof(sProcessInfo.szProcessName)); -+ sProcessInfo.szProcessName[sizeof(sProcessInfo.szProcessName) - 1] = '\0'; -+ sProcessInfo.bUnregistered = IMG_FALSE; -+ } -+ else -+ { -+ /* look up the process details for the faulting page catalogue */ -+ bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo); -+ } -+ -+ if (bFound) -+ { -+ IMG_BOOL bHits; -+ -+ psInfo->ui32FaultInfoFlags = FAULT_INFO_PROC_INFO; -+ psInfo->sProcessInfo = sProcessInfo; -+ -+ if (bIsPMFault) -+ { -+ bHits = IMG_TRUE; -+ } -+ else -+ { -+ /* get any DevicememHistory data for the faulting address */ -+ bHits = _GetDevicememHistoryData(psDevInfo->psDeviceNode, -+ sProcessInfo.uiPID, -+ sFaultDevVAddr, -+ psInfo->asQueryOut, -+ ui32PageSizeBytes); -+ -+ if (bHits) -+ { -+ psInfo->ui32FaultInfoFlags |= FAULT_INFO_DEVMEM_HIST; -+ -+ /* if the page fault was caused by the firmware then get information about -+ * which client application created the related allocations. -+ * -+ * Fill in the process info data for each query result. -+ */ -+ -+ if (sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE) -+ { -+ _FillAppForFWFaults(psDevInfo, psInfo, &sProcessInfo); -+ } -+ } -+ } -+ } -+ } -+} -+ -+void RGXDumpFaultAddressHostView(MMU_FAULT_DATA *psFaultData, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const IMG_CHAR* pszIndent) -+{ -+ MMU_LEVEL eTopLevel; -+ const IMG_CHAR szPageLevel[][4] = {"", "PTE", "PDE", "PCE" }; -+ const IMG_CHAR szPageError[][3] = {"", "PT", "PD", "PC" }; -+ -+ eTopLevel = psFaultData->eTopLevel; -+ -+ if (psFaultData->eType == MMU_FAULT_TYPE_UNKNOWN) -+ { -+ PVR_DUMPDEBUG_LOG("%sNo live host MMU data available", pszIndent); -+ return; -+ } -+ else if (psFaultData->eType == MMU_FAULT_TYPE_PM) -+ { -+ PVR_DUMPDEBUG_LOG("%sPM faulted at PC address = 0x%016" IMG_UINT64_FMTSPECx, pszIndent, psFaultData->sLevelData[MMU_LEVEL_0].ui64Address); -+ } -+ else -+ { -+ MMU_LEVEL eCurrLevel; -+ PVR_ASSERT(eTopLevel < MMU_LEVEL_LAST); -+ -+ for (eCurrLevel = eTopLevel; eCurrLevel > MMU_LEVEL_0; eCurrLevel--) -+ { -+ MMU_LEVEL_DATA *psMMULevelData = &psFaultData->sLevelData[eCurrLevel]; -+ if (psMMULevelData->ui64Address) -+ { -+ if (psMMULevelData->uiBytesPerEntry == 4) -+ { -+ PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%08x and is %s", -+ pszIndent, -+ szPageLevel[eCurrLevel], -+ psMMULevelData->ui32Index, -+ (IMG_UINT) psMMULevelData->ui64Address, -+ psMMULevelData->psDebugStr); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s", -+ pszIndent, -+ szPageLevel[eCurrLevel], -+ psMMULevelData->ui32Index, -+ psMMULevelData->ui64Address, -+ psMMULevelData->psDebugStr); -+ } -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("%s%s index (%d) out of bounds (%d)", -+ pszIndent, -+ szPageError[eCurrLevel], -+ psMMULevelData->ui32Index, -+ psMMULevelData->ui32NumOfEntries); -+ break; -+ } -+ } -+ } -+ -+} -+ -+void RGXDumpFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const RGX_HWRINFO *psHWRInfo, -+ IMG_UINT32 ui32ReadIndex, -+ IMG_DEV_VIRTADDR *psFaultDevVAddr, -+ IMG_DEV_PHYADDR *psPCDevPAddr, -+ bool bPMFault, -+ IMG_UINT32 ui32PageSize) -+{ -+ FAULT_INFO *psInfo; -+ -+ OSLockAcquire(psDevInfo->hDebugFaultInfoLock); -+ -+ /* Find the matching Fault Info for this HWRInfo */ -+ psInfo = &gsFaultInfoLog.asFaults[ui32ReadIndex]; -+ -+ /* if they do not match, we need to update the psInfo */ -+ if ((psInfo->ui64CRTimer != psHWRInfo->ui64CRTimer) || -+ (psInfo->sFaultDevVAddr.uiAddr != psFaultDevVAddr->uiAddr)) -+ { -+ MMU_FAULT_DATA *psFaultData = &psInfo->sMMUFaultData; -+ -+ psFaultData->eType = MMU_FAULT_TYPE_UNKNOWN; -+ -+ if (bPMFault) -+ { -+ /* PM fault and we dump PC details only */ -+ psFaultData->eTopLevel = MMU_LEVEL_0; -+ psFaultData->eType = MMU_FAULT_TYPE_PM; -+ psFaultData->sLevelData[MMU_LEVEL_0].ui64Address = psPCDevPAddr->uiAddr; -+ } -+ else -+ { -+ RGXCheckFaultAddress(psDevInfo, psFaultDevVAddr, psPCDevPAddr, psFaultData); -+ } -+ -+ _RecordFaultInfo(psDevInfo, psInfo, -+ *psFaultDevVAddr, *psPCDevPAddr, psHWRInfo->ui64CRTimer, -+ _PageSizeHWToBytes(ui32PageSize)); -+ -+ } -+ -+ RGXDumpFaultAddressHostView(&psInfo->sMMUFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_NORMAL_INDENT); -+ -+ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) -+ { -+ _PrintFaultInfo(pfnDumpDebugPrintf, psDevInfo->psDeviceNode, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT); -+ } -+ -+ OSLockRelease(psDevInfo->hDebugFaultInfoLock); -+} -+ -+void RGXConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer, -+ IMG_UINT64 *pui64Seconds, -+ IMG_UINT64 *pui64Nanoseconds) -+{ -+ IMG_UINT32 ui32Remainder; -+ -+ *pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder); -+ *pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL); -+} -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDebugRequestProcess -+ -+ @Description -+ -+ This function will print out the debug for the specified level of verbosity -+ -+ @Input pfnDumpDebugPrintf - Optional replacement print function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Input psDevInfo - RGX device info -+ @Input ui32VerbLevel - Verbosity level -+ -+ @Return void -+ -+******************************************************************************/ -+static -+void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32VerbLevel) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; -+ PVRSRV_DEV_POWER_STATE ePowerState; -+ IMG_BOOL bRGXPoweredON; -+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; -+ const RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; -+ IMG_BOOL bPwrLockAlreadyHeld; -+ -+ bPwrLockAlreadyHeld = PVRSRVPwrLockIsLockedByMe(psDeviceNode); -+ if (!bPwrLockAlreadyHeld) -+ { -+ /* Only acquire the power-lock if not already held by the calling context */ -+ eError = PVRSRVPowerLock(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ return; -+ } -+ } -+ /* This should satisfy all accesses below */ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks, -+ INVALIDATE); -+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error retrieving RGX power state. No debug info dumped.", -+ __func__)); -+ goto Exit; -+ } -+ -+ if (PVRSRV_VZ_MODE_IS(NATIVE) && (RGX_NUM_DRIVERS_SUPPORTED > 1)) -+ { -+ PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", -+ 1, RGX_NUM_DRIVERS_SUPPORTED); -+ } -+ -+ PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d Start ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID); -+ -+ bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON); -+ -+ PVR_DUMPDEBUG_LOG("------[ RGX Info ]------"); -+ PVR_DUMPDEBUG_LOG("Device Node (Info): %p (%p)", psDevInfo->psDeviceNode, psDevInfo); -+ DevicememHistoryDumpRecordStats(psDevInfo->psDeviceNode, pfnDumpDebugPrintf, pvDumpDebugFile); -+ PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B, -+ psDevInfo->sDevFeatureCfg.ui32V, -+ psDevInfo->sDevFeatureCfg.ui32N, -+ psDevInfo->sDevFeatureCfg.ui32C, -+ PVR_ARCH_NAME); -+ PVR_DUMPDEBUG_LOG("RGX Device State: %s", PVRSRVGetDebugDevStateString(psDeviceNode->eDevState)); -+ PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState)); -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) -+ { -+ PVR_DUMP_FIRMWARE_INFO(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("FW info: UNINITIALIZED"); -+ } -+ } -+ else -+ { -+ PVR_DUMP_FIRMWARE_INFO_HDR(psDevInfo->sFWInfoHeader); -+ } -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TILE_REGION_PROTECTION)) -+ { -+#if defined(SUPPORT_TRP) -+ PVR_DUMPDEBUG_LOG("TRP: HW support - Yes; SW enabled"); -+#else -+ PVR_DUMPDEBUG_LOG("TRP: HW support - Yes; SW disabled"); -+#endif -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("TRP: HW support - No"); -+ } -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, WORKGROUP_PROTECTION)) -+ { -+#if defined(SUPPORT_WGP) -+ PVR_DUMPDEBUG_LOG("WGP: HW support - Yes; SW enabled"); -+#else -+ PVR_DUMPDEBUG_LOG("WGP: HW support - Yes; SW disabled"); -+#endif -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("WGP: HW support - No"); -+ } -+ -+ RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON); -+ -+ /* Dump out the kernel CCB. */ -+ { -+ const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; -+ const RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psKernelCCBCtl, INVALIDATE); -+ -+ if (psKCCBCtl != NULL) -+ { -+ PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X", -+ psKCCBCtlLocal->ui32WriteOffset, -+ psKCCBCtl->ui32ReadOffset); -+ } -+ } -+ -+ /* Dump out the firmware CCB. */ -+ { -+ const RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl; -+ const RGXFWIF_CCB_CTL *psFCCBCtlLocal = psDevInfo->psFirmwareCCBCtlLocal; -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psFirmwareCCBCtl, INVALIDATE); -+ -+ if (psFCCBCtl != NULL) -+ { -+ PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X", -+ psFCCBCtl->ui32WriteOffset, -+ psFCCBCtlLocal->ui32ReadOffset); -+ } -+ } -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Dump out the Workload estimation CCB. */ -+ const RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl; -+ const RGXFWIF_CCB_CTL *psWorkEstCCBCtlLocal = psDevInfo->psWorkEstFirmwareCCBCtlLocal; -+ -+ if (psWorkEstCCBCtl != NULL) -+ { -+ RGXFwSharedMemCacheOpPtr(psWorkEstCCBCtl, INVALIDATE); -+ PVR_DUMPDEBUG_LOG("RGX WorkEst CCB WO:0x%X RO:0x%X", -+ psWorkEstCCBCtl->ui32WriteOffset, -+ psWorkEstCCBCtlLocal->ui32ReadOffset); -+ } -+ } -+#endif -+ -+ RGXFwSharedMemCacheOpPtr(psFwOsData, -+ INVALIDATE); -+ -+ if (psFwOsData != NULL) -+ { -+ /* Dump the KCCB commands executed */ -+ PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d", -+ psFwOsData->ui32KCCBCmdsExecuted); -+ -+#if defined(PVRSRV_STALLED_CCB_ACTION) -+ /* Dump the number of times we have performed a forced UFO update, -+ * and (if non-zero) the timestamp of the most recent occurrence/ -+ */ -+ PVR_DUMPDEBUG_LOG("RGX SLR: Forced UFO updates requested = %d", -+ psFwOsData->ui32ForcedUpdatesRequested); -+ if (psFwOsData->ui32ForcedUpdatesRequested > 0) -+ { -+ IMG_UINT8 ui8Idx; -+ IMG_UINT64 ui64Seconds, ui64Nanoseconds; -+ -+ if (psFwOsData->ui64LastForcedUpdateTime > 0ULL) -+ { -+ RGXConvertOSTimestampToSAndNS(psFwOsData->ui64LastForcedUpdateTime, &ui64Seconds, &ui64Nanoseconds); -+ PVR_DUMPDEBUG_LOG("RGX SLR: (most recent forced update was around %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ")", -+ ui64Seconds, ui64Nanoseconds); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("RGX SLR: (unable to force update as fence contained no sync checkpoints)"); -+ } -+ /* Dump SLR log */ -+ if (psFwOsData->sSLRLogFirst.aszCCBName[0]) -+ { -+ RGXConvertOSTimestampToSAndNS(psFwOsData->sSLRLogFirst.ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); -+ PVR_DUMPDEBUG_LOG("RGX SLR:{%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC -+ "} Fence found on context 0x%x '%.*s' has %d UFOs", -+ ui64Seconds, ui64Nanoseconds, -+ psFwOsData->sSLRLogFirst.ui32FWCtxAddr, -+ PVR_SLR_LOG_STRLEN, psFwOsData->sSLRLogFirst.aszCCBName, -+ psFwOsData->sSLRLogFirst.ui32NumUFOs); -+ } -+ for (ui8Idx=0; ui8IdxsSLRLog[ui8Idx].aszCCBName[0]) -+ { -+ RGXConvertOSTimestampToSAndNS(psFwOsData->sSLRLog[ui8Idx].ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); -+ PVR_DUMPDEBUG_LOG("RGX SLR:[%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC -+ "] Fence found on context 0x%x '%.*s' has %d UFOs", -+ ui64Seconds, ui64Nanoseconds, -+ psFwOsData->sSLRLog[ui8Idx].ui32FWCtxAddr, -+ PVR_SLR_LOG_STRLEN, psFwOsData->sSLRLog[ui8Idx].aszCCBName, -+ psFwOsData->sSLRLog[ui8Idx].ui32NumUFOs); -+ } -+ } -+ } -+#else -+ PVR_DUMPDEBUG_LOG("RGX SLR: Disabled"); -+#endif -+ -+ /* Dump the error counts */ -+ PVR_DUMPDEBUG_LOG("RGX Errors: WGP:%d, TRP:%d", -+ psDevInfo->sErrorCounts.ui32WGPErrorCount, -+ psDevInfo->sErrorCounts.ui32TRPErrorCount); -+ -+ /* Dump the IRQ info for threads or OS IDs */ -+#if defined(RGX_FW_IRQ_OS_COUNTERS) -+ /* only Host has access to registers containing IRQ counters */ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+#endif -+ { -+ IMG_UINT32 ui32idx; -+ -+ for_each_irq_cnt(ui32idx) -+ { -+ IMG_UINT32 ui32IrqCnt; -+ -+ get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); -+ if (ui32IrqCnt) -+ { -+ PVR_DUMPDEBUG_LOG(MSG_IRQ_CNT_TYPE "%u: FW IRQ count = %u", ui32idx, ui32IrqCnt); -+#if defined(RGX_FW_IRQ_OS_COUNTERS) -+ if (ui32idx == RGXFW_HOST_DRIVER_ID) -+#endif -+ { -+ PVR_DUMPDEBUG_LOG("Last sampled IRQ count in LISR = %u", psDevInfo->aui32SampleIRQCount[ui32idx]); -+ } -+ } -+ } -+ } -+ } -+ -+ /* Dump the FW Sys config flags on the Host */ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ IMG_CHAR sFwSysFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; -+ -+ if (!psFwSysData) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Fw Sys Data is not mapped into CPU space", __func__)); -+ goto Exit; -+ } -+ -+ RGXFwSharedMemCacheOpValue(psFwSysData->ui32ConfigFlags, -+ INVALIDATE); -+ -+ _GetFwSysFlagsDescription(sFwSysFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwSysData->ui32ConfigFlags); -+ PVR_DUMPDEBUG_LOG("FW System config flags = 0x%08X (%s)", psFwSysData->ui32ConfigFlags, sFwSysFlagsDescription); -+ } -+ -+ /* Dump the FW OS config flags */ -+ { -+ IMG_CHAR sFwOsFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; -+ -+ if (!psFwOsData) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Fw Os Data is not mapped into CPU space", __func__)); -+ goto Exit; -+ } -+ -+ _GetFwOsFlagsDescription(sFwOsFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwOsData->ui32FwOsConfigFlags); -+ PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%08X (%s)", psFwOsData->ui32FwOsConfigFlags, sFwOsFlagsDescription); -+ } -+ -+ if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: RGXDumpRGXRegisters failed (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG(" (!) %s. No registers dumped", PVRSRV_VZ_MODE_IS(GUEST) ? "Guest Mode of operation" : "RGX power is down"); -+ } -+ -+ PVR_DUMPDEBUG_LOG("------[ RGX FW Trace Info ]------"); -+ -+ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) -+ { -+ IMG_INT tid; -+ /* Dump FW trace information */ -+ if (psRGXFWIfTraceBufCtl != NULL) -+ { -+ RGX_FWT_LOGTYPE eFWTLogType = psDevInfo->eDebugDumpFWTLogType; -+ -+ if (eFWTLogType == RGX_FWT_LOGTYPE_NONE) -+ { -+ PVR_DUMPDEBUG_LOG("Firmware trace printing disabled."); -+ } -+ else -+ { -+ RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->ui32LogType, INVALIDATE); -+ -+ for (tid = 0; tid < RGXFW_THREAD_NUM; tid++) -+ { -+ IMG_UINT32 *pui32TraceBuffer; -+ -+ if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) -+ { -+ PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", -+ ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), -+ RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) -+ ); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("Debug log type: none"); -+ } -+ -+ pui32TraceBuffer = psDevInfo->apui32TraceBuffer[tid]; -+ -+ /* Skip if trace buffer is not allocated */ -+ if (pui32TraceBuffer == NULL) -+ { -+ PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid); -+ continue; -+ } -+ -+ RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer, INVALIDATE); -+ PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid); -+ PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer); -+ PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", psDevInfo->ui32TraceBufSizeInDWords); -+ -+ if (eFWTLogType == RGX_FWT_LOGTYPE_BINARY) -+ { -+ RGXDumpFirmwareTraceBinary(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl, tid); -+ } -+ else if (eFWTLogType == RGX_FWT_LOGTYPE_DECODED) -+ { -+ RGXDumpFirmwareTraceDecoded(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl, tid); -+ } -+ else if (eFWTLogType == RGX_FWT_LOGTYPE_PARTIAL) -+ { -+ RGXDumpFirmwareTracePartial(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl, tid); -+ } -+ -+ PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid); -+ } -+ } -+ } -+ -+ { -+ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) -+ { -+ PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------"); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("------[ FWCtxs Next CMD ]------"); -+ } -+ -+ RGXDumpAllContextInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+ } -+ } -+ -+ PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d End ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID); -+ -+Exit: -+ if (!bPwrLockAlreadyHeld) -+ { -+ PVRSRVPowerUnlock(psDeviceNode); -+ } -+} -+ -+/*! -+ ****************************************************************************** -+ -+ @Function RGXDebugRequestNotify -+ -+ @Description Dump the debug data for RGX -+ -+ ******************************************************************************/ -+static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = hDbgRequestHandle; -+ -+ /* Only action the request if we've fully init'ed */ -+ if (psDevInfo->bDevInit2Done) -+ { -+ RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel); -+ } -+} -+ -+PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ return PVRSRVRegisterDeviceDbgRequestNotify(&psDevInfo->hDbgReqNotify, -+ psDevInfo->psDeviceNode, -+ RGXDebugRequestNotify, -+ DEBUG_REQUEST_RGX, -+ psDevInfo); -+} -+ -+PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ if (psDevInfo->hDbgReqNotify) -+ { -+ return PVRSRVUnregisterDeviceDbgRequestNotify(psDevInfo->hDbgReqNotify); -+ } -+ -+ /* No notifier registered */ -+ return PVRSRV_OK; -+} -+ -+/****************************************************************************** -+ End of file (rgxdebug_common.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxdebug_common.h b/drivers/gpu/drm/img-rogue/rgxdebug_common.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxdebug_common.h -@@ -0,0 +1,388 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX debug header file -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX debugging functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXDEBUG_COMMON_H) -+#define RGXDEBUG_COMMON_H -+ -+#include "pvrsrv_error.h" -+#include "img_types.h" -+#include "device.h" -+#include "pvr_notifier.h" -+#include "pvrsrv.h" -+#include "rgxdevice.h" -+#include "rgxfwmemctx.h" -+ -+#define DD_NORMAL_INDENT " " -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+extern const IMG_CHAR * const gapszMipsPermissionPTFlags[4]; -+extern const IMG_CHAR * const gapszMipsCoherencyPTFlags[8]; -+extern const IMG_CHAR * const gapszMipsDirtyGlobalValidPTFlags[8]; -+#endif -+ -+/** -+ * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in -+ * LISR for each RGX FW thread. -+ * Macro takes pointer to PVRSRV_RGXDEV_INFO as input. -+ */ -+ -+#if defined(RGX_FW_IRQ_OS_COUNTERS) -+#define for_each_irq_cnt(ui32idx) FOREACH_SUPPORTED_DRIVER(ui32idx) -+ -+#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \ -+ do { \ -+ extern const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OSIDS]; \ -+ ui32Dest = PVRSRV_VZ_MODE_IS(GUEST) ? 0 : OSReadHWReg32((psRgxDevInfo)->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[ui32idx]); \ -+ } while (false) -+ -+#define MSG_IRQ_CNT_TYPE "OS" -+ -+#else -+ -+#define for_each_irq_cnt(ui32idx) \ -+ for (ui32idx = 0; ui32idx < RGXFW_THREAD_NUM; ui32idx++) -+ -+#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \ -+ do { \ -+ RGXFwSharedMemCacheOpValue(psRgxDevInfo->psRGXFWIfFwOsData->aui32InterruptCount[ui32idx], \ -+ INVALIDATE); \ -+ ui32Dest = (psRgxDevInfo)->psRGXFWIfFwOsData->aui32InterruptCount[ui32idx]; \ -+ } while (false) -+#define MSG_IRQ_CNT_TYPE "Thread" -+#endif /* RGX_FW_IRQ_OS_COUNTERS */ -+ -+static inline void RGXDEBUG_PRINT_IRQ_COUNT(PVRSRV_RGXDEV_INFO* psRgxDevInfo) -+{ -+#if defined(PVRSRV_NEED_PVR_DPF) && defined(DEBUG) -+ IMG_UINT32 ui32idx; -+ -+ for_each_irq_cnt(ui32idx) -+ { -+ IMG_UINT32 ui32IrqCnt; -+ -+ get_irq_cnt_val(ui32IrqCnt, ui32idx, psRgxDevInfo); -+ -+ PVR_DPF((DBGPRIV_VERBOSE, MSG_IRQ_CNT_TYPE -+ " %u FW IRQ count = %u", ui32idx, ui32IrqCnt)); -+ -+#if defined(RGX_FW_IRQ_OS_COUNTERS) -+ if (ui32idx == RGXFW_HOST_DRIVER_ID) -+#endif -+ { -+ PVR_DPF((DBGPRIV_VERBOSE, "Last sampled IRQ count in LISR = %u", -+ (psRgxDevInfo)->aui32SampleIRQCount[ui32idx])); -+ } -+ } -+#endif /* PVRSRV_NEED_PVR_DPF */ -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDumpFirmwareTrace -+ -+ @Description Dumps the decoded version of the firmware trace buffer. -+ -+ Dump useful debugging info -+ -+ @Input pfnDumpDebugPrintf - Optional replacement print function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Input psDevInfo - RGX device info -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+void RGXDumpFirmwareTraceBinary(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, -+ IMG_UINT32 ui32TID); -+ -+void RGXDumpFirmwareTracePartial(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, -+ IMG_UINT32 ui32TID); -+ -+void RGXDumpFirmwareTraceDecoded(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, -+ IMG_UINT32 ui32TID); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDumpRGXRegisters -+ -+ @Description -+ -+ Dumps an extensive list of RGX registers required for debugging -+ -+ @Input pfnDumpDebugPrintf - Optional replacement print function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Input psDevInfo - RGX device info -+ -+ @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+#if !defined(NO_HARDWARE) -+/*! -+******************************************************************************* -+ -+ @Function RGXReadMetaCoreReg -+ -+ @Description Read a META core register's value -+ -+ @Input psDevInfo RGX device info -+ @Input ui32RegAddr Register address to read from -+ @Output pui32RegVal Pointer to the resulting register value -+ -+ @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 *pui32RegVal); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXValidateFWImage -+ -+ @Description Validate the currently running firmware -+ against the firmware image -+ -+ @Input pfnDumpDebugPrintf - The debug printf function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Input psDevInfo - RGX device info -+ -+ @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXValidateFWImage(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo); -+#endif -+ -+#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) -+/*! -+******************************************************************************* -+ -+ @Function ValidateFWOnLoad -+ -+ @Description Compare the Firmware image as seen from the CPU point of view -+ against the same memory area as seen from the firmware point -+ of view after first power up. -+ -+ @Input psDevInfo - Device Info -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo); -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDumpRGXDebugSummary -+ -+ @Description -+ -+ Dump a summary in human readable form with the RGX state -+ -+ @Input pfnDumpDebugPrintf - The debug printf function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Input psDevInfo - RGX device info -+ @Input bRGXPoweredON - IMG_TRUE if RGX device is on -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_BOOL bRGXPoweredON); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDebugInit -+ -+ @Description -+ -+ Setup debug requests, calls into PVRSRVRegisterDeviceDbgRequestNotify -+ -+ @Input psDevInfo RGX device info -+ @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDebugDeinit -+ -+ @Description -+ -+ Remove debug requests, calls into PVRSRVUnregisterDeviceDbgRequestNotify -+ -+ @Output phNotify Points to debug notifier handle -+ @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXGetFwMapping -+ -+ @Description Retrieve any of the CPU Physical Address, Device Physical -+ Address or the raw value of the page table entry associated -+ with the firmware virtual address given. -+ -+ @Input psDevInfo Pointer to device info -+ @Input pfnDumpDebugPrintf The debug printf function -+ @Input pvDumpDebugFile Optional file identifier to be passed to -+ the 'printf' function if required -+ @Input ui32FwVA The Fw VA that needs decoding -+ @Output psCpuPA Pointer to the resulting CPU PA -+ @Output psDevPA Pointer to the resulting Dev PA -+ @Output pui64PTE Pointer to the raw Page Table Entry value -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXDocumentFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const IMG_UINT32 ui32FwVA, -+ const IMG_CPU_PHYADDR sCpuPA, -+ const IMG_DEV_PHYADDR sDevPA, -+ const IMG_UINT64 ui64PTE); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXConvertOSTimestampToSAndNS -+ -+ @Description Convert the OS time to seconds and nanoseconds -+ -+ @Input ui64OSTimer OS time to convert -+ @Output pui64Seconds Pointer to the resulting seconds -+ @Output pui64Nanoseconds Pointer to the resulting nanoseconds -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer, -+ IMG_UINT64 *pui64Seconds, -+ IMG_UINT64 *pui64Nanoseconds); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDumpAllContextInfo -+ -+ @Description Dump debug info of all contexts on a device -+ -+ @Input psDevInfo Pointer to device info -+ @Input pfnDumpDebugPrintf The debug printf function -+ @Input pvDumpDebugFile Optional file identifier to be passed to -+ the 'printf' function if required -+ @Input ui32VerbLevel Verbosity level -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXDumpAllContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDumpFaultAddressHostView -+ -+ @Description -+ -+ Dump FW HWR fault status in human readable form. -+ -+ @Input ui32Index - Index of global Fault info -+ @Input pfnDumpDebugPrintf - The debug printf function -+ @Input pvDumpDebugFile - Optional file identifier to be passed to the -+ 'printf' function if required -+ @Return void -+ -+******************************************************************************/ -+void RGXDumpFaultAddressHostView(MMU_FAULT_DATA *psFaultData, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const IMG_CHAR* pszIndent); -+ -+void RGXDumpFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ const RGX_HWRINFO *psHWRInfo, -+ IMG_UINT32 ui32ReadIndex, -+ IMG_DEV_VIRTADDR *psFaultDevVAddr, -+ IMG_DEV_PHYADDR *psPCDevPAddr, -+ bool bPMFault, -+ IMG_UINT32 ui32PageSize); -+ -+#endif /* RGXDEBUG_COMMON_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxdevice.h b/drivers/gpu/drm/img-rogue/rgxdevice.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxdevice.h -@@ -0,0 +1,912 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX device node header file -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX device node -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXDEVICE_H) -+#define RGXDEVICE_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_device_types.h" -+#include "mmu_common.h" -+#include "rgx_fwif_km.h" -+#include "cache_ops.h" -+#include "device.h" -+#include "osfunc.h" -+#include "rgxlayer_impl.h" -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+#include "hash.h" -+#endif -+typedef struct _RGX_SERVER_COMMON_CONTEXT_ RGX_SERVER_COMMON_CONTEXT; -+ -+typedef struct { -+ DEVMEM_MEMDESC *psFWFrameworkMemDesc; -+} RGX_COMMON_CONTEXT_INFO; -+ -+ -+/*! -+ ****************************************************************************** -+ * Device state flags -+ *****************************************************************************/ -+#define RGXKM_DEVICE_STATE_ZERO_FREELIST (0x1) /*!< Zeroing the physical pages of reconstructed free lists */ -+#define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN (0x2) /*!< Used to disable the Devices Watchdog logging */ -+#define RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN (0x4) /*!< Used for validation to inject dust requests every TA/3D kick */ -+#define RGXKM_DEVICE_STATE_CCB_GROW_EN (0x8) /*!< Used to indicate CCB grow is permitted */ -+#define RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN (0x10) /*!< Used for validation to enable SPU power state mask change */ -+#define RGXKM_DEVICE_STATE_MASK (0x1F) -+ -+/*! -+ ****************************************************************************** -+ * ECC RAM Fault Validation -+ *****************************************************************************/ -+#define RGXKM_ECC_ERR_INJ_DISABLE 0 -+#define RGXKM_ECC_ERR_INJ_SLC 1 -+#define RGXKM_ECC_ERR_INJ_USC 2 -+#define RGXKM_ECC_ERR_INJ_TPU 3 -+#define RGXKM_ECC_ERR_INJ_RASCAL 4 -+#define RGXKM_ECC_ERR_INJ_MARS 5 -+ -+#define RGXKM_ECC_ERR_INJ_INTERVAL 10U -+ -+/*! -+ ****************************************************************************** -+ * GPU DVFS Table -+ *****************************************************************************/ -+ -+#define RGX_GPU_DVFS_TABLE_SIZE 32 -+#define RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US 25000 /* Time required to calibrate a clock frequency the first time */ -+#define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US 150000 /* Time required for a recalibration after a DVFS transition */ -+#define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US 10000000 /* Time before the next periodic calibration and correlation */ -+ -+/*! -+ ****************************************************************************** -+ * Global flags for driver validation -+ *****************************************************************************/ -+#define RGX_VAL_FBDC_SIG_CHECK_NOERR_EN (0U) /*!< Not supported on Rogue cores */ -+#define RGX_VAL_FBDC_SIG_CHECK_ERR_EN (0U) /*!< Not supported on Rogue cores */ -+#define RGX_VAL_WGP_SIG_CHECK_NOERR_EN (0x10U) /*!< Enable WGP signature check. Signatures must match */ -+#define RGX_VAL_WGP_SIG_CHECK_ERR_EN (0x20U) /*!< Enable WGP signature check. Signatures must not match */ -+#define RGX_VAL_TRP_SIG_CHECK_NOERR_EN (0U) /*!< Not supported on Rogue cores */ -+#define RGX_VAL_TRP_SIG_CHECK_ERR_EN (0U) /*!< Not supported on Rogue cores */ -+ -+/*! -+ ****************************************************************************** -+ * HWPerf L2 Stream ID type definition. -+ *****************************************************************************/ -+typedef IMG_UINT32 RGX_HWPERF_L2_STREAM_ID; -+/* HWPerf stream for Client HWPerf access. */ -+#define RGX_HWPERF_L2_STREAM_HWPERF 0U -+#if (defined(__linux__) && !defined(__QNXNTO__) && !defined(INTEGRITY_OS)) -+/* HWPerf stream for FTrace HWPerf access. */ -+#define RGX_HWPERF_L2_STREAM_FTRACE 1U -+#define RGX_HWPERF_L2_STREAM_LAST 2U -+#else -+#define RGX_HWPERF_L2_STREAM_LAST 1U -+#endif -+ -+typedef struct _GPU_FREQ_TRACKING_DATA_ -+{ -+ /* Core clock speed estimated by the driver */ -+ IMG_UINT32 ui32EstCoreClockSpeed; -+ -+ /* Amount of successful calculations of the estimated core clock speed */ -+ IMG_UINT32 ui32CalibrationCount; -+} GPU_FREQ_TRACKING_DATA; -+ -+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) -+#define RGX_GPU_FREQ_TRACKING_SIZE 16 -+ -+typedef struct -+{ -+ IMG_UINT64 ui64BeginCRTimestamp; -+ IMG_UINT64 ui64BeginOSTimestamp; -+ -+ IMG_UINT64 ui64EndCRTimestamp; -+ IMG_UINT64 ui64EndOSTimestamp; -+ -+ IMG_UINT32 ui32EstCoreClockSpeed; -+ IMG_UINT32 ui32CoreClockSpeed; -+} GPU_FREQ_TRACKING_HISTORY; -+#endif -+ -+typedef struct _RGX_GPU_DVFS_TABLE_ -+{ -+ /* Beginning of current calibration period (in us) */ -+ IMG_UINT64 ui64CalibrationCRTimestamp; -+ IMG_UINT64 ui64CalibrationOSTimestamp; -+ -+ /* Calculated calibration period (in us) */ -+ IMG_UINT64 ui64CalibrationCRTimediff; -+ IMG_UINT64 ui64CalibrationOSTimediff; -+ -+ /* Current calibration period (in us) */ -+ IMG_UINT32 ui32CalibrationPeriod; -+ -+ /* System layer frequency table and frequency tracking data */ -+ IMG_UINT32 ui32FreqIndex; -+ IMG_UINT32 aui32GPUFrequency[RGX_GPU_DVFS_TABLE_SIZE]; -+ GPU_FREQ_TRACKING_DATA asTrackingData[RGX_GPU_DVFS_TABLE_SIZE]; -+ -+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) -+ IMG_UINT32 ui32HistoryIndex; -+ GPU_FREQ_TRACKING_HISTORY asTrackingHistory[RGX_GPU_FREQ_TRACKING_SIZE]; -+#endif -+} RGX_GPU_DVFS_TABLE; -+ -+ -+/*! -+ ****************************************************************************** -+ * GPU utilisation statistics -+ *****************************************************************************/ -+ -+typedef struct _RGXFWIF_TEMP_GPU_UTIL_STATS_ -+{ -+ IMG_UINT64 aaaui64DMOSTmpCounters[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED][RGXFWIF_GPU_UTIL_STATE_NUM]; -+ IMG_UINT64 aaui64DMOSTmpLastWord[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; -+ IMG_UINT64 aaui64DMOSTmpLastState[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; -+ IMG_UINT64 aaui64DMOSTmpLastPeriod[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; -+ IMG_UINT64 aaui64DMOSTmpLastTime[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; -+} RGXFWIF_TEMP_GPU_UTIL_STATS; -+ -+typedef struct _RGXFWIF_GPU_UTIL_STATS_ -+{ -+ IMG_BOOL bValid; /* If TRUE, statistics are valid. -+ FALSE if the driver couldn't get reliable stats. */ -+ IMG_UINT64 ui64GpuStatActive; /* GPU active statistic */ -+ IMG_UINT64 ui64GpuStatBlocked; /* GPU blocked statistic */ -+ IMG_UINT64 ui64GpuStatIdle; /* GPU idle statistic */ -+ IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */ -+ -+ IMG_UINT64 aaui64DMOSStatActive[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS active statistic */ -+ IMG_UINT64 aaui64DMOSStatBlocked[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS blocked statistic */ -+ IMG_UINT64 aaui64DMOSStatIdle[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS idle statistic */ -+ IMG_UINT64 aaui64DMOSStatCumulative[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS sum of active/blocked/idle stats */ -+ -+ IMG_UINT64 ui64TimeStamp; /* Timestamp of the most recent sample of the GPU stats */ -+ -+ RGXFWIF_TEMP_GPU_UTIL_STATS sTempGpuStats; /* Temporary data used to calculate the per-DM per-OS statistics */ -+} RGXFWIF_GPU_UTIL_STATS; -+ -+ -+typedef struct _RGX_REG_CONFIG_ -+{ -+ IMG_BOOL bEnabled; -+ RGXFWIF_REG_CFG_TYPE eRegCfgTypeToPush; -+ IMG_UINT32 ui32NumRegRecords; -+ POS_LOCK hLock; -+} RGX_REG_CONFIG; -+ -+typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC; -+ -+typedef struct -+{ -+ IMG_UINT32 ui32DustCount1; -+ IMG_UINT32 ui32DustCount2; -+ IMG_BOOL bToggle; -+} RGX_DUST_STATE; -+ -+typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_ -+{ -+ IMG_UINT64 ui64ErnsBrns; -+ IMG_UINT64 ui64Features; -+ IMG_UINT32 ui32B; -+ IMG_UINT32 ui32V; -+ IMG_UINT32 ui32N; -+ IMG_UINT32 ui32C; -+ IMG_UINT32 ui32FeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX]; -+ IMG_UINT32 ui32MAXDMCount; -+ IMG_UINT32 ui32MAXDustCount; -+ IMG_UINT32 ui32SLCSizeInBytes; -+ IMG_PCHAR pszBVNCString; -+}PVRSRV_DEVICE_FEATURE_CONFIG; -+ -+/* This is used to get the value of a specific feature. -+ * Note that it will assert if the feature is disabled or value is invalid. */ -+#define RGX_GET_FEATURE_VALUE(psDevInfo, Feature) \ -+ ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] ) -+ -+/* This is used to check if the feature value (e.g. with an integer value) is available for the currently running BVNC or not */ -+#define RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, Feature) \ -+ ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] < RGX_FEATURE_VALUE_DISABLED ) -+ -+/* This is used to check if the Boolean feature (e.g. WITHOUT an integer value) is available for the currently running BVNC or not */ -+#define RGX_IS_FEATURE_SUPPORTED(psDevInfo, Feature) \ -+ BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64Features, RGX_FEATURE_##Feature##_BIT_MASK) -+ -+/* This is used to check if the ERN is available for the currently running BVNC or not */ -+#define RGX_IS_ERN_SUPPORTED(psDevInfo, ERN) \ -+ BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, HW_ERN_##ERN##_BIT_MASK) -+ -+/* This is used to check if the BRN is available for the currently running BVNC or not */ -+#define RGX_IS_BRN_SUPPORTED(psDevInfo, BRN) \ -+ BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, FIX_HW_BRN_##BRN##_BIT_MASK) -+ -+/* there is a corresponding define in rgxapi.h */ -+#define RGX_MAX_TIMER_QUERIES 16U -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+/*! -+ * The host maintains a 512-deep cache of submitted workloads per device, -+ * i.e. a global look-up table for TA, 3D and compute (depending on the RGX -+ * hardware support present) -+ */ -+ -+/* -+ * For the workload estimation return data array, the max amount of commands the -+ * MTS can have is 255, therefore 512 (LOG2 = 9) is large enough to account for -+ * all corner cases -+ */ -+#define RETURN_DATA_ARRAY_SIZE_LOG2 (9) -+#define RETURN_DATA_ARRAY_SIZE ((1U) << RETURN_DATA_ARRAY_SIZE_LOG2) -+#define RETURN_DATA_ARRAY_WRAP_MASK (RETURN_DATA_ARRAY_SIZE - 1) -+ -+#define WORKLOAD_HASH_SIZE_LOG2 6 -+#define WORKLOAD_HASH_SIZE ((1U) << WORKLOAD_HASH_SIZE_LOG2) -+#define WORKLOAD_HASH_WRAP_MASK (WORKLOAD_HASH_SIZE - 1) -+ -+/*! -+ * Workload characteristics for supported data masters. -+ * All characteristics must match for the workload estimate to be used/updated. -+ */ -+typedef union _RGX_WORKLOAD_ -+{ -+ struct -+ { -+ IMG_UINT32 ui32RenderTargetSize; -+ IMG_UINT32 ui32NumberOfDrawCalls; -+ IMG_UINT32 ui32NumberOfIndices; -+ IMG_UINT32 ui32NumberOfMRTs; -+ } sTA3D; -+ -+ struct -+ { -+ IMG_UINT32 ui32NumberOfWorkgroups; -+ IMG_UINT32 ui32NumberOfWorkitems; -+ } sCompute; -+ -+ struct -+ { -+ IMG_UINT32 ui32Characteristic1; -+ IMG_UINT32 ui32Characteristic2; -+ } sTransfer; -+} RGX_WORKLOAD; -+ -+/*! -+ * Host data used to match the return data (actual cycles count) to the -+ * submitted command packet. -+ * The hash table is a per-DM circular buffer containing a key based on the -+ * workload characteristics. On job completion, the oldest workload data -+ * is evicted if the CB is full and the driver matches the characteristics -+ * to the matching data. -+ * -+ * o If the driver finds a match the existing cycle estimate is averaged with -+ * the actual cycles used. -+ * o Otherwise a new hash entry is created with the actual cycles for this -+ * workload. -+ * -+ * Subsequently if a match is found during command submission, the estimate -+ * is passed to the scheduler, e.g. adjust the GPU frequency if PDVFS is enabled. -+ */ -+typedef struct _WORKLOAD_MATCHING_DATA_ -+{ -+ POS_LOCK psHashLock; -+ HASH_TABLE *psHashTable; /*! existing workload cycle estimates for this DM */ -+ RGX_WORKLOAD asHashKeys[WORKLOAD_HASH_SIZE]; -+ IMG_UINT64 aui64HashData[WORKLOAD_HASH_SIZE]; -+ IMG_UINT32 ui32HashArrayWO; /*! track the most recent workload estimates */ -+} WORKLOAD_MATCHING_DATA; -+ -+/*! -+ * A generic container for the workload matching data for GPU contexts: -+ * rendering (TA, 3D), compute, etc. -+ */ -+typedef struct _WORKEST_HOST_DATA_ -+{ -+ union -+ { -+ struct -+ { -+ WORKLOAD_MATCHING_DATA sDataTA; /*!< matching data for TA commands */ -+ WORKLOAD_MATCHING_DATA sData3D; /*!< matching data for 3D commands */ -+ } sTA3D; -+ -+ struct -+ { -+ WORKLOAD_MATCHING_DATA sDataCDM; /*!< matching data for CDM commands */ -+ } sCompute; -+ -+ struct -+ { -+ WORKLOAD_MATCHING_DATA sDataTDM; /*!< matching data for TDM-TQ commands */ -+ } sTransfer; -+ } uWorkloadMatchingData; -+ -+ /* -+ * This is a per-context property, hence the TA and 3D share the same -+ * per render context counter. -+ */ -+ IMG_UINT32 ui32WorkEstCCBReceived; /*!< Used to ensure all submitted work -+ estimation commands are received -+ by the host before clean up. */ -+} WORKEST_HOST_DATA; -+ -+/*! -+ * Entries in the list of submitted workloads, used when the completed command -+ * returns data to the host. -+ * -+ * - the matching data is needed as it holds the hash data -+ * - the host data is needed for completion updates, ensuring memory is not -+ * freed while workload estimates are in-flight. -+ * - the workload characteristic is used in the hash table look-up. -+ */ -+typedef struct _WORKEST_RETURN_DATA_ -+{ -+ WORKEST_HOST_DATA *psWorkEstHostData; -+ WORKLOAD_MATCHING_DATA *psWorkloadMatchingData; -+ RGX_WORKLOAD sWorkloadCharacteristics; -+} WORKEST_RETURN_DATA; -+#endif -+ -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+typedef struct -+{ -+#if defined(PDUMP) -+ IMG_HANDLE hPdumpPages; -+#endif -+ PG_HANDLE sPages; -+ IMG_DEV_PHYADDR sPhysAddr; -+} RGX_MIPS_ADDRESS_TRAMPOLINE; -+#endif -+ -+ -+/*! -+ ****************************************************************************** -+ * RGX Device error counts -+ *****************************************************************************/ -+typedef struct _PVRSRV_RGXDEV_ERROR_COUNTS_ -+{ -+ IMG_UINT32 ui32WGPErrorCount; /*!< count of the number of WGP checksum errors */ -+ IMG_UINT32 ui32TRPErrorCount; /*!< count of the number of TRP checksum errors */ -+} PVRSRV_RGXDEV_ERROR_COUNTS; -+ -+/*! -+ ****************************************************************************** -+ * RGX Debug dump firmware trace log type -+ *****************************************************************************/ -+typedef IMG_UINT32 RGX_FWT_LOGTYPE; -+#define RGX_FWT_LOGTYPE_NONE 0U -+#define RGX_FWT_LOGTYPE_BINARY 1U -+#define RGX_FWT_LOGTYPE_DECODED 2U -+#define RGX_FWT_LOGTYPE_PARTIAL 3U -+ -+/*! -+ ****************************************************************************** -+ * RGX Device info -+ *****************************************************************************/ -+typedef struct _PVRSRV_RGXDEV_INFO_ -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ PVRSRV_DEVICE_FEATURE_CONFIG sDevFeatureCfg; -+ -+ IMG_BOOL bDevInit2Done; -+ -+ IMG_BOOL bFirmwareInitialised; -+ IMG_BOOL bPDPEnabled; -+ -+ IMG_HANDLE hDbgReqNotify; -+ -+ /* Kernel mode linear address of device registers */ -+ void __iomem *pvRegsBaseKM; -+ -+ IMG_HANDLE hRegMapping; -+ -+ /* System physical address of device registers */ -+ IMG_CPU_PHYADDR sRegsPhysBase; -+ /* Register region size in bytes */ -+ IMG_UINT32 ui32RegSize; -+ -+ PVRSRV_STUB_PBDESC *psStubPBDescListKM; -+ -+ /* Firmware memory context info */ -+ DEVMEM_CONTEXT *psKernelDevmemCtx; -+ DEVMEM_HEAP *psFirmwareMainHeap; -+ DEVMEM_HEAP *psFirmwareConfigHeap; -+ MMU_CONTEXT *psKernelMMUCtx; -+ -+ void *pvDeviceMemoryHeap; -+ -+ /* Kernel CCB */ -+ DEVMEM_MEMDESC *psKernelCCBCtlMemDesc; /*!< memdesc for Kernel CCB control */ -+ RGXFWIF_CCB_CTL *psKernelCCBCtl; /*!< kernel mapping for Kernel CCB control */ -+ RGXFWIF_CCB_CTL *psKernelCCBCtlLocal; /*!< cpu local copy of Kernel CCB control */ -+ DEVMEM_MEMDESC *psKernelCCBMemDesc; /*!< memdesc for Kernel CCB */ -+ IMG_UINT8 *psKernelCCB; /*!< kernel mapping for Kernel CCB */ -+ DEVMEM_MEMDESC *psKernelCCBRtnSlotsMemDesc; /*!< Return slot array for Kernel CCB commands */ -+ IMG_UINT32 *pui32KernelCCBRtnSlots; /*!< kernel mapping for return slot array */ -+ -+ /* Firmware CCB */ -+ DEVMEM_MEMDESC *psFirmwareCCBCtlMemDesc; /*!< memdesc for Firmware CCB control */ -+ RGXFWIF_CCB_CTL *psFirmwareCCBCtl; /*!< kernel mapping for Firmware CCB control */ -+ RGXFWIF_CCB_CTL *psFirmwareCCBCtlLocal; /*!< cpu local copy of Firmware CCB control */ -+ DEVMEM_MEMDESC *psFirmwareCCBMemDesc; /*!< memdesc for Firmware CCB */ -+ IMG_UINT8 *psFirmwareCCB; /*!< kernel mapping for Firmware CCB */ -+ -+ /* Workload Estimation Firmware CCB */ -+ DEVMEM_MEMDESC *psWorkEstFirmwareCCBCtlMemDesc; /*!< memdesc for Workload Estimation Firmware CCB control */ -+ RGXFWIF_CCB_CTL *psWorkEstFirmwareCCBCtl; /*!< kernel mapping for Workload Estimation Firmware CCB control */ -+ RGXFWIF_CCB_CTL *psWorkEstFirmwareCCBCtlLocal; /*!< cpu local copy of Workload Estimation Firmware CCB control */ -+ DEVMEM_MEMDESC *psWorkEstFirmwareCCBMemDesc; /*!< memdesc for Workload Estimation Firmware CCB */ -+ IMG_UINT8 *psWorkEstFirmwareCCB; /*!< kernel mapping for Workload Estimation Firmware CCB */ -+ -+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) -+ /* Counter dumping */ -+ DEVMEM_MEMDESC *psCounterBufferMemDesc; /*!< mem desc for counter dumping buffer */ -+ POS_LOCK hCounterDumpingLock; /*!< Lock for guarding access to counter dumping buffer */ -+#endif -+ -+ PVRSRV_MEMALLOCFLAGS_T uiFWPoisonOnFreeFlag; /*!< Flag for poisoning FW allocations when freed */ -+ -+ IMG_BOOL bIgnoreHWReportedBVNC; /*!< Ignore BVNC reported by HW */ -+ -+ /* multicore configuration information */ -+ IMG_UINT32 ui32MultiCoreNumCores; /* total cores primary + secondaries. 0 for non-multi core */ -+ IMG_UINT32 ui32MultiCorePrimaryId; /* primary core id for this device */ -+ IMG_UINT64 *pui64MultiCoreCapabilities; /* capabilities for each core */ -+ -+ /* -+ if we don't preallocate the pagetables we must -+ insert newly allocated page tables dynamically -+ */ -+ void *pvMMUContextList; -+ -+ IMG_UINT32 ui32ClkGateStatusReg; -+ IMG_UINT32 ui32ClkGateStatusMask; -+ -+ DEVMEM_MEMDESC *psRGXFWCodeMemDesc; -+ IMG_DEV_VIRTADDR sFWCodeDevVAddrBase; -+ IMG_UINT32 ui32FWCodeSizeInBytes; -+ DEVMEM_MEMDESC *psRGXFWDataMemDesc; -+ IMG_DEV_VIRTADDR sFWDataDevVAddrBase; -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ RGX_MIPS_ADDRESS_TRAMPOLINE *psTrampoline; -+#endif -+ -+ DEVMEM_MEMDESC *psRGXFWCorememCodeMemDesc; -+ IMG_DEV_VIRTADDR sFWCorememCodeDevVAddrBase; -+ RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; -+ IMG_UINT32 ui32FWCorememCodeSizeInBytes; -+ -+ DEVMEM_MEMDESC *psRGXFWIfCorememDataStoreMemDesc; -+ IMG_DEV_VIRTADDR sFWCorememDataStoreDevVAddrBase; -+ RGXFWIF_DEV_VIRTADDR sFWCorememDataStoreFWAddr; -+ -+ DEVMEM_MEMDESC *psRGXFWAlignChecksMemDesc; -+ -+#if defined(PDUMP) -+ DEVMEM_MEMDESC *psRGXFWSigTAChecksMemDesc; -+ IMG_UINT32 ui32SigTAChecksSize; -+ -+ DEVMEM_MEMDESC *psRGXFWSig3DChecksMemDesc; -+ IMG_UINT32 ui32Sig3DChecksSize; -+ -+ DEVMEM_MEMDESC *psRGXFWSigTDM2DChecksMemDesc; -+ IMG_UINT32 ui32SigTDM2DChecksSize; -+ -+ IMG_BOOL bDumpedKCCBCtlAlready; -+ -+ POS_SPINLOCK hSyncCheckpointSignalSpinLock; /*!< Guards data shared between an atomic & sleepable-context */ -+#endif -+ -+ POS_LOCK hRGXFWIfBufInitLock; /*!< trace buffer lock for initialisation phase */ -+ -+ DEVMEM_MEMDESC *psRGXFWIfTraceBufCtlMemDesc; /*!< memdesc of trace buffer control structure */ -+ DEVMEM_MEMDESC *psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM]; /*!< memdesc of actual FW trace (log) buffer(s) */ -+ IMG_PUINT32 apui32TraceBuffer[RGXFW_THREAD_NUM]; /*!< Trace buffer address (Host address), to be used by host when reading from trace buffer */ -+ IMG_UINT32 ui32TraceBufSizeInDWords; /*!< CPU local copy of FW Trace buffer size in dwords */ -+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl; /*!< structure containing trace control data and actual trace buffer */ -+ -+ DEVMEM_MEMDESC *psRGXFWIfFwSysDataMemDesc; /*!< memdesc of the firmware-shared system data structure */ -+ RGXFWIF_SYSDATA *psRGXFWIfFwSysData; /*!< structure containing km-firmware shared system data */ -+ -+ DEVMEM_MEMDESC *psRGXFWIfFwOsDataMemDesc; /*!< memdesc of the firmware-shared os structure */ -+ RGXFWIF_OSDATA *psRGXFWIfFwOsData; /*!< structure containing km-firmware shared os data */ -+ -+#if defined(SUPPORT_TBI_INTERFACE) -+ DEVMEM_MEMDESC *psRGXFWIfTBIBufferMemDesc; /*!< memdesc of actual FW TBI buffer */ -+ RGXFWIF_DEV_VIRTADDR sRGXFWIfTBIBuffer; /*!< TBI buffer data */ -+ IMG_UINT32 ui32FWIfTBIBufferSize; -+#endif -+ -+ DEVMEM_MEMDESC *psRGXFWIfHWRInfoBufCtlMemDesc; -+ RGXFWIF_HWRINFOBUF *psRGXFWIfHWRInfoBufCtl; -+ IMG_UINT32 ui32ClockSource; -+ IMG_UINT32 ui32LastClockSource; -+ -+ DEVMEM_MEMDESC *psRGXFWIfGpuUtilFWCbCtlMemDesc; -+ RGXFWIF_GPU_UTIL_FWCB *psRGXFWIfGpuUtilFWCb; -+ -+ DEVMEM_MEMDESC *psRGXFWIfHWPerfBufMemDesc; -+ IMG_BYTE *psRGXFWIfHWPerfBuf; -+ IMG_UINT32 ui32RGXFWIfHWPerfBufSize; /* in bytes */ -+ IMG_UINT32 ui32RGXL2HWPerfBufSize; /* in bytes */ -+ -+ DEVMEM_MEMDESC *psRGXFWIfRegCfgMemDesc; -+ -+ DEVMEM_MEMDESC *psRGXFWIfHWPerfCountersMemDesc; -+ -+ DEVMEM_MEMDESC *psRGXFWIfConnectionCtlMemDesc; -+ RGXFWIF_CONNECTION_CTL *psRGXFWIfConnectionCtl; -+ -+ DEVMEM_MEMDESC *psRGXFWHeapGuardPageReserveMemDesc; -+ DEVMEM_MEMDESC *psRGXFWIfSysInitMemDesc; -+ RGXFWIF_SYSINIT *psRGXFWIfSysInit; -+ -+ DEVMEM_MEMDESC *psRGXFWIfOsInitMemDesc; -+ RGXFWIF_OSINIT *psRGXFWIfOsInit; -+ -+ DEVMEM_MEMDESC *psRGXFWIfRuntimeCfgMemDesc; -+ RGXFWIF_RUNTIME_CFG *psRGXFWIfRuntimeCfg; -+ -+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) -+ DEVMEM_MEMDESC *psRGXFWIfActiveContextBufDesc; -+ RGXFWIF_ACTIVE_CONTEXT_BUF_DATA *psRGXFWIfActiveContextBuf; -+#endif -+ -+ /* Premapped firmware memory context info */ -+ DEVMEM_HEAP *psPremappedFwRawHeap[RGX_NUM_DRIVERS_SUPPORTED]; -+ DEVMEM_MEMDESC *psPremappedFwRawMemDesc[RGX_NUM_DRIVERS_SUPPORTED]; -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ /* Array to store data needed for workload estimation when a workload -+ has finished and its cycle time is returned to the host. */ -+ WORKEST_RETURN_DATA asReturnData[RETURN_DATA_ARRAY_SIZE]; -+ IMG_UINT32 ui32ReturnDataWO; -+ POS_LOCK hWorkEstLock; -+#endif -+ -+#if defined(SUPPORT_PDVFS) -+ /** -+ * Host memdesc and pointer to memory containing core clock rate in Hz. -+ * Firmware (PDVFS) updates the memory on changing the core clock rate over -+ * GPIO. -+ * Note: Shared memory needs atomic access from Host driver and firmware, -+ * hence size should not be greater than memory transaction granularity. -+ * Currently it is chosen to be 32 bits. -+ */ -+ DEVMEM_MEMDESC *psRGXFWIFCoreClkRateMemDesc; -+ volatile IMG_UINT32 *pui32RGXFWIFCoreClkRate; -+ /** -+ * Last sampled core clk rate. -+ */ -+ volatile IMG_UINT32 ui32CoreClkRateSnapshot; -+#endif -+ -+ /* -+ HWPerf data for the RGX device -+ */ -+ -+ POS_LOCK hHWPerfLock; /*! Critical section lock that protects HWPerf code -+ * from multiple thread duplicate init/deinit -+ * and loss/freeing of FW & Host resources while in -+ * use in another thread e.g. MSIR. */ -+ -+ IMG_UINT64 ui64HWPerfFilter[RGX_HWPERF_L2_STREAM_LAST]; /*! Event filter for FW events (settable by AppHint) */ -+ IMG_HANDLE hHWPerfStream[RGX_HWPERF_L2_STREAM_LAST]; /*! TL Stream buffer (L2) for firmware event stream */ -+ IMG_UINT32 ui32L2BufMaxPacketSize[RGX_HWPERF_L2_STREAM_LAST]; /*! Max allowed packet size in FW HWPerf TL (L2) buffer */ -+ IMG_BOOL bSuspendHWPerfL2DataCopy[RGX_HWPERF_L2_STREAM_LAST]; /*! Flag to indicate if copying HWPerf data is suspended */ -+ IMG_UINT64 ui64HWPerfFwFilter; /*! Event filter for FW events created from OR-ing ui64HWPerfFilter values. */ -+ IMG_UINT32 uiHWPerfStreamCount; /*! Value indicating if any of the HWPerf streams has been created */ -+ -+ IMG_UINT32 ui32HWPerfHostFilter; /*! Event filter for HWPerfHost stream (settable by AppHint) */ -+ POS_LOCK hLockHWPerfHostStream; /*! Lock guarding access to HWPerfHost stream from multiple threads */ -+ IMG_HANDLE hHWPerfHostStream; /*! TL Stream buffer for host only event stream */ -+ IMG_UINT32 ui32HWPerfHostBufSize; /*! Host side buffer size in bytes */ -+ IMG_UINT32 ui32HWPerfHostLastOrdinal; /*! Ordinal of the last packet emitted in HWPerfHost TL stream. -+ * Guarded by hLockHWPerfHostStream */ -+ IMG_UINT32 ui32HWPerfHostNextOrdinal; /*! Ordinal number for HWPerfHost events. Guarded by hHWPerfHostSpinLock */ -+ IMG_UINT8 *pui8DeferredEvents; /*! List of HWPerfHost events yet to be emitted in the TL stream. -+ * Events generated from atomic context are deferred "emitted" -+ * as the "emission" code can sleep */ -+ IMG_UINT16 ui16DEReadIdx; /*! Read index in the above deferred events buffer */ -+ IMG_UINT16 ui16DEWriteIdx; /*! Write index in the above deferred events buffer */ -+ void *pvHostHWPerfMISR; /*! MISR to emit pending/deferred events in HWPerfHost TL stream */ -+ POS_SPINLOCK hHWPerfHostSpinLock; /*! Guards data shared between an atomic & sleepable-context */ -+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) -+ IMG_UINT32 ui32DEHighWatermark; /*! High watermark of deferred events buffer usage. Protected by -+ *! hHWPerfHostSpinLock */ -+ /* Max number of times DeferredEmission waited for an atomic-context to "finish" packet write */ -+ IMG_UINT32 ui32WaitForAtomicCtxPktHighWatermark; /*! Protected by hLockHWPerfHostStream */ -+ /* Whether warning has been logged about an atomic-context packet loss (due to too long wait for "write" finish) */ -+ IMG_BOOL bWarnedAtomicCtxPktLost; -+ /* Max number of times DeferredEmission scheduled-out to give a chance to the right-ordinal packet to be emitted */ -+ IMG_UINT32 ui32WaitForRightOrdPktHighWatermark; /*! Protected by hLockHWPerfHostStream */ -+ /* Whether warning has been logged about an packet loss (due to too long wait for right ordinal to emit) */ -+ IMG_BOOL bWarnedPktOrdinalBroke; -+#endif -+ -+ void *pvGpuFtraceData; -+ -+ /* Poll data for detecting firmware fatal errors */ -+ IMG_UINT32 aui32CrLastPollCount[RGXFW_THREAD_NUM]; -+ IMG_UINT32 ui32KCCBCmdsExecutedLastTime; -+ IMG_BOOL bKCCBCmdsWaitingLastTime; -+ IMG_UINT32 ui32GEOTimeoutsLastTime; -+ IMG_UINT32 ui32InterruptCountLastTime; -+ IMG_UINT32 ui32MissingInterruptsLastTime; -+ -+ /* Client stall detection */ -+ IMG_UINT32 ui32StalledClientMask; -+ -+ IMG_BOOL bWorkEstEnabled; -+ IMG_BOOL bPDVFSEnabled; -+ -+ void *pvLISRData; -+ void *pvMISRData; -+ void *pvAPMISRData; -+ RGX_ACTIVEPM_CONF eActivePMConf; -+ -+ volatile IMG_UINT32 aui32SampleIRQCount[RGXFW_THREAD_NUM]; -+ -+ DEVMEM_MEMDESC *psRGXFaultAddressMemDesc; -+ -+ DEVMEM_MEMDESC *psSLC3FenceMemDesc; -+ -+ /* If we do 10 deferred memory allocations per second, then the ID would wrap around after 13 years */ -+ IMG_UINT32 ui32ZSBufferCurrID; /*!< ID assigned to the next deferred devmem allocation */ -+ IMG_UINT32 ui32FreelistCurrID; /*!< ID assigned to the next freelist */ -+ -+ POS_LOCK hLockZSBuffer; /*!< Lock to protect simultaneous access to ZSBuffers */ -+ DLLIST_NODE sZSBufferHead; /*!< List of on-demand ZSBuffers */ -+ POS_LOCK hLockFreeList; /*!< Lock to protect simultaneous access to Freelists */ -+ DLLIST_NODE sFreeListHead; /*!< List of growable Freelists */ -+ PSYNC_PRIM_CONTEXT hSyncPrimContext; -+ PVRSRV_CLIENT_SYNC_PRIM *psPowSyncPrim; -+ -+ IMG_UINT32 ui32ActivePMReqOk; -+ IMG_UINT32 ui32ActivePMReqDenied; -+ IMG_UINT32 ui32ActivePMReqNonIdle; -+ IMG_UINT32 ui32ActivePMReqRetry; -+ IMG_UINT32 ui32ActivePMReqTotal; -+ -+ IMG_HANDLE hProcessQueuesMISR; -+ -+ IMG_UINT32 ui32DeviceFlags; /*!< Flags to track general device state */ -+ -+ /* GPU DVFS Table */ -+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable; -+ -+ /* Pointer to function returning the GPU utilisation statistics since the last -+ * time the function was called. Supports different users at the same time. -+ * -+ * psReturnStats [out]: GPU utilisation statistics (active high/active low/idle/blocked) -+ * in microseconds since the last time the function was called -+ * by a specific user (identified by hGpuUtilUser) -+ * -+ * Returns PVRSRV_OK in case the call completed without errors, -+ * some other value otherwise. -+ */ -+ PVRSRV_ERROR (*pfnGetGpuUtilStats) (PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_HANDLE hGpuUtilUser, -+ RGXFWIF_GPU_UTIL_STATS *psReturnStats); -+ -+ /* Pointer to function that checks if the physical GPU IRQ -+ * line has been asserted and clears it if so */ -+ IMG_BOOL (*pfnRGXAckIrq) (struct _PVRSRV_RGXDEV_INFO_ *psDevInfo); -+ -+ POS_LOCK hGPUUtilLock; -+ -+ /* Register configuration */ -+ RGX_REG_CONFIG sRegCongfig; -+ -+ IMG_BOOL bRGXPowered; -+ DLLIST_NODE sMemoryContextList; -+ -+ POSWR_LOCK hRenderCtxListLock; -+ POSWR_LOCK hComputeCtxListLock; -+ POSWR_LOCK hTransferCtxListLock; -+ POSWR_LOCK hTDMCtxListLock; -+ POSWR_LOCK hMemoryCtxListLock; -+ POSWR_LOCK hKickSyncCtxListLock; -+ -+ /* Linked list of deferred KCCB commands due to a full KCCB. -+ * Access to members sKCCBDeferredCommandsListHead and ui32KCCBDeferredCommandsCount -+ * are protected by the hLockKCCBDeferredCommandsList spin lock. */ -+ POS_SPINLOCK hLockKCCBDeferredCommandsList; /*!< Protects deferred KCCB commands list */ -+ DLLIST_NODE sKCCBDeferredCommandsListHead; -+ IMG_UINT32 ui32KCCBDeferredCommandsCount; /*!< No of commands in the deferred list */ -+ -+ /* Linked lists of contexts on this device */ -+ DLLIST_NODE sRenderCtxtListHead; -+ DLLIST_NODE sComputeCtxtListHead; -+ DLLIST_NODE sTransferCtxtListHead; -+ DLLIST_NODE sTDMCtxtListHead; -+ DLLIST_NODE sKickSyncCtxtListHead; -+ -+ DLLIST_NODE sCommonCtxtListHead; -+ POSWR_LOCK hCommonCtxtListLock; -+ IMG_UINT32 ui32CommonCtxtCurrentID; /*!< ID assigned to the next common context */ -+ -+ POS_LOCK hDebugFaultInfoLock; /*!< Lock to protect the debug fault info list */ -+ POS_LOCK hMMUCtxUnregLock; /*!< Lock to protect list of unregistered MMU contexts */ -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ POS_LOCK hNMILock; /*!< Lock to protect NMI operations */ -+#endif -+ -+#if defined(SUPPORT_VALIDATION) -+ IMG_UINT32 ui32ValidationFlags; /*!< Validation flags for host driver */ -+#endif -+ RGX_DUST_STATE sDustReqState; -+ -+ RGX_LAYER_PARAMS sLayerParams; -+ -+ RGXFWIF_DM eBPDM; /*!< Current breakpoint data master */ -+ IMG_BOOL bBPSet; /*!< A Breakpoint has been set */ -+ POS_LOCK hBPLock; /*!< Lock for break point operations */ -+ -+ IMG_UINT32 ui32CoherencyTestsDone; -+ -+ ATOMIC_T iCCBSubmissionOrdinal; /* Rolling count used to indicate CCB submission order (all CCBs) */ -+ POS_LOCK hCCBRecoveryLock; /* Lock to protect pvEarliestStalledClientCCB and ui32OldestSubmissionOrdinal variables */ -+ void *pvEarliestStalledClientCCB; /* Will point to cCCB command to unblock in the event of a stall */ -+ IMG_UINT32 ui32OldestSubmissionOrdinal; /* Earliest submission ordinal of CCB entry found so far */ -+ IMG_UINT32 ui32SLRHoldoffCounter; /* Decremented each time health check is called until zero. SLR only happen when zero. */ -+ -+ POS_LOCK hCCBStallCheckLock; /* Lock used to guard against multiple threads simultaneously checking for stalled CCBs */ -+ -+#if defined(SUPPORT_FIRMWARE_GCOV) -+ /* Firmware gcov buffer */ -+ DEVMEM_MEMDESC *psFirmwareGcovBufferMemDesc; /*!< mem desc for Firmware gcov dumping buffer */ -+ IMG_UINT32 ui32FirmwareGcovSize; -+#endif -+ -+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) -+ struct -+ { -+ IMG_UINT64 ui64timerGray; -+ IMG_UINT64 ui64timerBinary; -+ IMG_UINT64 *pui64uscTimers; -+ } sRGXTimerValues; -+#endif -+ -+#if defined(SUPPORT_VALIDATION) -+ struct -+ { -+ IMG_UINT64 ui64RegVal; -+ struct completion sRegComp; -+ } sFwRegs; -+#endif -+ -+ IMG_HANDLE hTQCLISharedMem; /*!< TQ Client Shared Mem PMR */ -+ IMG_HANDLE hTQUSCSharedMem; /*!< TQ USC Shared Mem PMR */ -+ -+#if defined(SUPPORT_VALIDATION) -+ IMG_UINT32 ui32TestSLRInterval; /* Don't enqueue an update sync checkpoint every nth kick */ -+ IMG_UINT32 ui32TestSLRCount; /* (used to test SLR operation) */ -+ IMG_UINT32 ui32SLRSkipFWAddr; -+#endif -+ -+#if defined(SUPPORT_SECURITY_VALIDATION) -+ DEVMEM_MEMDESC *psRGXFWIfSecureBufMemDesc; -+ DEVMEM_MEMDESC *psRGXFWIfNonSecureBufMemDesc; -+#endif -+ -+ /* Timer Queries */ -+ IMG_UINT32 ui32ActiveQueryId; /*!< id of the active line */ -+ IMG_BOOL bSaveStart; /*!< save the start time of the next kick on the device*/ -+ IMG_BOOL bSaveEnd; /*!< save the end time of the next kick on the device*/ -+ -+ DEVMEM_MEMDESC *psStartTimeMemDesc; /*!< memdesc for Start Times */ -+ IMG_UINT64 *pui64StartTimeById; /*!< CPU mapping of the above */ -+ -+ DEVMEM_MEMDESC *psEndTimeMemDesc; /*!< memdesc for End Timer */ -+ IMG_UINT64 *pui64EndTimeById; /*!< CPU mapping of the above */ -+ -+ IMG_UINT32 aui32ScheduledOnId[RGX_MAX_TIMER_QUERIES]; /*!< kicks Scheduled on QueryId */ -+ DEVMEM_MEMDESC *psCompletedMemDesc; /*!< kicks Completed on QueryId */ -+ IMG_UINT32 *pui32CompletedById; /*!< CPU mapping of the above */ -+ -+#if !defined(PVRSRV_USE_BRIDGE_LOCK) -+ POS_LOCK hTimerQueryLock; /*!< lock to protect simultaneous access to timer query members */ -+#endif -+ -+ PVRSRV_RGXDEV_ERROR_COUNTS sErrorCounts; /*!< struct containing device error counts */ -+ -+ IMG_UINT32 ui32HostSafetyEventMask;/*!< mask of the safety events handled by the driver */ -+ -+ RGX_CONTEXT_RESET_REASON eLastDeviceError; /*!< device error reported to client */ -+#if defined(SUPPORT_VALIDATION) -+ IMG_UINT32 ui32ECCRAMErrInjModule; -+ IMG_UINT32 ui32ECCRAMErrInjInterval; -+#endif -+ -+#if defined(SUPPORT_SECURE_ALLOC_KM) -+ PMR *psGenHeapSecMem; /*!< An allocation of secure memory mapped to -+ the general devmem heap. The allocation is -+ created and mapped at driver init. It's used for -+ various purposes. See rgx_fwif_km.h for all use cases. */ -+#endif -+ -+ RGX_FWT_LOGTYPE eDebugDumpFWTLogType; -+ -+ RGX_FW_INFO_HEADER sFWInfoHeader; -+#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) -+ IMG_UINT32 ui32TFBCLossyGroup; /*!< TFBCCompressionControlGroup -+ setting for those cores which support -+ this feature. */ -+#endif -+} PVRSRV_RGXDEV_INFO; -+ -+ -+ -+typedef struct _RGX_TIMING_INFORMATION_ -+{ -+ /*! GPU default core clock speed in Hz */ -+ IMG_UINT32 ui32CoreClockSpeed; -+ -+ /*! Active Power Management: GPU actively requests the host driver to be powered off */ -+ IMG_BOOL bEnableActivePM; -+ -+ /*! Enable the GPU to power off internal Power Islands independently from the host driver */ -+ IMG_BOOL bEnableRDPowIsland; -+ -+ /*! Active Power Management: Delay between the GPU idle and the request to the host */ -+ IMG_UINT32 ui32ActivePMLatencyms; -+ -+} RGX_TIMING_INFORMATION; -+ -+typedef struct _RGX_DATA_ -+{ -+ /*! Timing information */ -+ RGX_TIMING_INFORMATION *psRGXTimingInfo; -+} RGX_DATA; -+ -+ -+/* -+ RGX PDUMP register bank name (prefix) -+*/ -+#define RGX_PDUMPREG_NAME "RGXREG" -+ -+#endif /* RGXDEVICE_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxfw_log_helper.h b/drivers/gpu/drm/img-rogue/rgxfw_log_helper.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfw_log_helper.h -@@ -0,0 +1,79 @@ -+/*************************************************************************/ /*! -+@File rgxfw_log_helper.h -+@Title Firmware TBI logging helper function -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Platform Generic -+@Description This file contains some helper code to make TBI logging possible -+ Specifically, it uses the SFIDLIST xmacro to trace ids back to -+ the original strings. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef RGXFW_LOG_HELPER_H -+#define RGXFW_LOG_HELPER_H -+ -+#include "rgx_fwif_sf.h" -+ -+static const IMG_CHAR *const groups[]= { -+#define X(A,B) #B, -+ RGXFW_LOG_SFGROUPLIST -+#undef X -+}; -+ -+/* idToStringID : Search SFs tuples {id,string} for a matching id. -+ * return index to array if found or RGXFW_SF_LAST if none found. -+ * bsearch could be used as ids are in increasing order. */ -+#if defined(RGX_FIRMWARE) -+static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXFW_STID_FMT *const psSFs) -+#else -+static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXKM_STID_FMT *const psSFs) -+#endif -+{ -+ IMG_UINT32 i = 0, ui32Id = (IMG_UINT32)RGXFW_SF_LAST; -+ -+ for ( i = 0 ; psSFs[i].ui32Id != (IMG_UINT32)RGXFW_SF_LAST ; i++) -+ { -+ if ( ui32CheckData == psSFs[i].ui32Id ) -+ { -+ ui32Id = i; -+ break; -+ } -+ } -+ return ui32Id; -+} -+ -+#endif /* RGXFW_LOG_HELPER_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxfwcmnctx.c b/drivers/gpu/drm/img-rogue/rgxfwcmnctx.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfwcmnctx.c -@@ -0,0 +1,755 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX firmware common context utility routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX firmware common context utility routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "rgxfwcmnctx.h" -+#include "rgxfwutils.h" -+#include "devicemem_pdump.h" -+#if defined(__linux__) && defined(PVRSRV_TRACE_ROGUE_EVENTS) -+#include "rogue_trace_events.h" -+#endif -+ -+/* -+ * Maximum length of time a DM can run for before the DM will be marked -+ * as out-of-time. CDM has an increased value due to longer running kernels. -+ * -+ * These deadlines are increased on FPGA, EMU and VP due to the slower -+ * execution time of these platforms. PDUMPS are also included since they -+ * are often run on EMU, FPGA or in CSim. -+ */ -+#if defined(FPGA) || defined(EMULATOR) || defined(VIRTUAL_PLATFORM) || defined(PDUMP) -+#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (480000) -+#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (10800000) -+#else -+#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (40000) -+#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (600000) -+#endif -+ -+struct _RGX_SERVER_COMMON_CONTEXT_ { -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ DEVMEM_MEMDESC *psFWCommonContextMemDesc; -+ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr; -+ SERVER_MMU_CONTEXT *psServerMMUContext; -+ DEVMEM_MEMDESC *psFWMemContextMemDesc; -+ DEVMEM_MEMDESC *psFWFrameworkMemDesc; -+ DEVMEM_MEMDESC *psContextStateMemDesc; -+ RGX_CLIENT_CCB *psClientCCB; -+ DEVMEM_MEMDESC *psClientCCBMemDesc; -+ DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; -+ IMG_BOOL bCommonContextMemProvided; -+ IMG_UINT32 ui32ContextID; -+ DLLIST_NODE sListNode; -+ RGX_CONTEXT_RESET_REASON eLastResetReason; -+ IMG_UINT32 ui32LastResetJobRef; -+ IMG_INT32 i32Priority; -+ RGX_CCB_REQUESTOR_TYPE eRequestor; -+}; -+ -+/*************************************************************************/ /*! -+@Function _CheckPriority -+@Description Check if priority is allowed for requestor type -+@Input psDevInfo pointer to DevInfo struct -+@Input i32Priority Requested priority -+@Input eRequestor Requestor type specifying data master -+@Return PVRSRV_ERROR PVRSRV_OK on success -+*/ /**************************************************************************/ -+static PVRSRV_ERROR _CheckPriority(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_INT32 i32Priority, -+ RGX_CCB_REQUESTOR_TYPE eRequestor) -+{ -+ /* Only contexts from a single PID allowed with real time priority (highest priority) */ -+ if (i32Priority == RGX_CTX_PRIORITY_REALTIME) -+ { -+ DLLIST_NODE *psNode, *psNext; -+ -+ dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_COMMON_CONTEXT *psThisContext = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); -+ -+ if (psThisContext->i32Priority == RGX_CTX_PRIORITY_REALTIME && -+ psThisContext->eRequestor == eRequestor && -+ RGXGetPIDFromServerMMUContext(psThisContext->psServerMMUContext) != OSGetCurrentClientProcessIDKM()) -+ { -+ PVR_LOG(("Only one process can have contexts with real time priority")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, -+ RGXFWIF_DM eDM, -+ SERVER_MMU_CONTEXT *psServerMMUContext, -+ DEVMEM_MEMDESC *psAllocatedMemDesc, -+ IMG_UINT32 ui32AllocatedOffset, -+ DEVMEM_MEMDESC *psFWMemContextMemDesc, -+ DEVMEM_MEMDESC *psContextStateMemDesc, -+ IMG_UINT32 ui32CCBAllocSizeLog2, -+ IMG_UINT32 ui32CCBMaxAllocSizeLog2, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_INT32 i32Priority, -+ IMG_UINT32 ui32MaxDeadlineMS, -+ IMG_UINT64 ui64RobustnessAddress, -+ RGX_COMMON_CONTEXT_INFO *psInfo, -+ RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; -+ RGXFWIF_FWCOMMONCONTEXT sFWCommonContext = {{0}}; -+ IMG_UINT32 ui32FWCommonContextOffset; -+ IMG_UINT8 *pui8Ptr; -+ PVRSRV_ERROR eError; -+ -+ /* -+ * Allocate all the resources that are required -+ */ -+ psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext)); -+ if (psServerCommonContext == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_alloc; -+ } -+ -+ psServerCommonContext->psDevInfo = psDevInfo; -+ psServerCommonContext->psServerMMUContext = psServerMMUContext; -+ -+ if (psAllocatedMemDesc) -+ { -+ PDUMPCOMMENT(psDeviceNode, -+ "Using existing MemDesc for Rogue firmware %s context (offset = %d)", -+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], -+ ui32AllocatedOffset); -+ ui32FWCommonContextOffset = ui32AllocatedOffset; -+ psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc; -+ psServerCommonContext->bCommonContextMemProvided = IMG_TRUE; -+ } -+ else -+ { -+ /* Allocate device memory for the firmware context */ -+ PDUMPCOMMENT(psDeviceNode, -+ "Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(sFWCommonContext), -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "FwContext", -+ &psServerCommonContext->psFWCommonContextMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate firmware %s context (%s)", -+ __func__, -+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], -+ PVRSRVGetErrorString(eError))); -+ goto fail_contextalloc; -+ } -+ ui32FWCommonContextOffset = 0; -+ psServerCommonContext->bCommonContextMemProvided = IMG_FALSE; -+ } -+ -+ /* Record this context so we can refer to it if the FW needs to tell us it was reset. */ -+ psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE; -+ psServerCommonContext->ui32LastResetJobRef = 0; -+ psServerCommonContext->ui32ContextID = psDevInfo->ui32CommonCtxtCurrentID++; -+ -+ /* -+ * Temporarily map the firmware context to the kernel and initialise it -+ */ -+ eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc, -+ (void **)&pui8Ptr); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map firmware %s context to CPU (%s)", -+ __func__, -+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], -+ PVRSRVGetErrorString(eError))); -+ goto fail_cpuvirtacquire; -+ } -+ -+ /* Allocate the client CCB */ -+ eError = RGXCreateCCB(psDevInfo, -+ ui32CCBAllocSizeLog2, -+ ui32CCBMaxAllocSizeLog2, -+ ui32ContextFlags, -+ psConnection, -+ eRGXCCBRequestor, -+ psServerCommonContext, -+ &psServerCommonContext->psClientCCB, -+ &psServerCommonContext->psClientCCBMemDesc, -+ &psServerCommonContext->psClientCCBCtrlMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to create CCB for %s context (%s)", -+ __func__, -+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], -+ PVRSRVGetErrorString(eError))); -+ goto fail_allocateccb; -+ } -+ -+ sFWCommonContext.eDM = eDM; -+ BITMASK_SET(sFWCommonContext.ui32CompatFlags, RGXFWIF_CONTEXT_COMPAT_FLAGS_HAS_DEFER_COUNT); -+ -+ /* Set the firmware CCB device addresses in the firmware common context */ -+ eError = RGXSetFirmwareAddress(&sFWCommonContext.psCCB, -+ psServerCommonContext->psClientCCBMemDesc, -+ 0, RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail_cccbfwaddr); -+ -+ eError = RGXSetFirmwareAddress(&sFWCommonContext.psCCBCtl, -+ psServerCommonContext->psClientCCBCtrlMemDesc, -+ 0, RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail_cccbctrlfwaddr); -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) -+ { -+ RGXSetMetaDMAAddress(&sFWCommonContext.sCCBMetaDMAAddr, -+ psServerCommonContext->psClientCCBMemDesc, -+ &sFWCommonContext.psCCB, -+ 0); -+ } -+ -+ /* Set the memory context device address */ -+ psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc; -+ eError = RGXSetFirmwareAddress(&sFWCommonContext.psFWMemContext, -+ psFWMemContextMemDesc, -+ 0, RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", fail_fwmemctxfwaddr); -+ -+ /* Set the framework register updates address */ -+ psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc; -+ if (psInfo->psFWFrameworkMemDesc != NULL) -+ { -+ eError = RGXSetFirmwareAddress(&sFWCommonContext.psRFCmd, -+ psInfo->psFWFrameworkMemDesc, -+ 0, RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:4", fail_fwframeworkfwaddr); -+ } -+ else -+ { -+ /* This should never be touched in this contexts without a framework -+ * memdesc, but ensure it is zero so we see crashes if it is. -+ */ -+ sFWCommonContext.psRFCmd.ui32Addr = 0; -+ } -+ -+ eError = _CheckPriority(psDevInfo, i32Priority, eRGXCCBRequestor); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority); -+ -+ psServerCommonContext->i32Priority = i32Priority; -+ psServerCommonContext->eRequestor = eRGXCCBRequestor; -+ -+ sFWCommonContext.i32Priority = i32Priority; -+ sFWCommonContext.ui32PrioritySeqNum = 0; -+ sFWCommonContext.ui32MaxDeadlineMS = MIN(ui32MaxDeadlineMS, -+ (eDM == RGXFWIF_DM_CDM ? -+ RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS : -+ RGXFWIF_MAX_WORKLOAD_DEADLINE_MS)); -+ sFWCommonContext.ui64RobustnessAddress = ui64RobustnessAddress; -+ -+ /* Store a references to Server Common Context and PID for notifications back from the FW. */ -+ sFWCommonContext.ui32ServerCommonContextID = psServerCommonContext->ui32ContextID; -+ sFWCommonContext.ui32PID = OSGetCurrentClientProcessIDKM(); -+ OSStringLCopy(sFWCommonContext.szProcName, psConnection->pszProcName, RGXFW_PROCESS_NAME_LEN); -+ -+ /* Set the firmware GPU context state buffer */ -+ psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc; -+ if (psContextStateMemDesc) -+ { -+ eError = RGXSetFirmwareAddress(&sFWCommonContext.psContextState, -+ psContextStateMemDesc, -+ 0, -+ RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:5", fail_ctxstatefwaddr); -+ } -+ -+ OSCachedMemCopy(IMG_OFFSET_ADDR(pui8Ptr, ui32FWCommonContextOffset), &sFWCommonContext, sizeof(sFWCommonContext)); -+ RGXFwSharedMemCacheOpExec(IMG_OFFSET_ADDR(pui8Ptr, ui32FWCommonContextOffset), -+ sizeof(sFWCommonContext), -+ PVRSRV_CACHE_OP_FLUSH); -+ -+ /* -+ * Dump the created context -+ */ -+ PDUMPCOMMENT(psDeviceNode, -+ "Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); -+ DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc, -+ ui32FWCommonContextOffset, -+ sizeof(sFWCommonContext), -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /* We've finished the setup so release the CPU mapping */ -+ DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); -+ -+ /* Map this allocation into the FW */ -+ eError = RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr, -+ psServerCommonContext->psFWCommonContextMemDesc, -+ ui32FWCommonContextOffset, -+ RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:6", fail_fwcommonctxfwaddr); -+ -+#if defined(__linux__) && defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ { -+ IMG_UINT32 ui32FWAddr; -+ switch (eDM) { -+ case RGXFWIF_DM_GEOM: -+ ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) -+ psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext)); -+ break; -+ case RGXFWIF_DM_3D: -+ ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) -+ psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext)); -+ break; -+ default: -+ ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr; -+ break; -+ } -+ -+ trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(), -+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], -+ psDeviceNode->sDevId.ui32InternalID, -+ ui32FWAddr); -+ } -+#endif -+ -+ /*Add the node to the list when finalised */ -+ OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock); -+ dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock); -+ -+ *ppsServerCommonContext = psServerCommonContext; -+ return PVRSRV_OK; -+ -+fail_fwcommonctxfwaddr: -+ if (psContextStateMemDesc) -+ { -+ RGXUnsetFirmwareAddress(psContextStateMemDesc); -+ } -+fail_ctxstatefwaddr: -+fail_checkpriority: -+ if (psInfo->psFWFrameworkMemDesc != NULL) -+ { -+ RGXUnsetFirmwareAddress(psInfo->psFWFrameworkMemDesc); -+ } -+fail_fwframeworkfwaddr: -+ RGXUnsetFirmwareAddress(psFWMemContextMemDesc); -+fail_fwmemctxfwaddr: -+ RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); -+fail_cccbctrlfwaddr: -+ RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); -+fail_cccbfwaddr: -+ RGXDestroyCCB(psDevInfo, psServerCommonContext->psClientCCB); -+fail_allocateccb: -+ DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); -+fail_cpuvirtacquire: -+ if (!psServerCommonContext->bCommonContextMemProvided) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc); -+ psServerCommonContext->psFWCommonContextMemDesc = NULL; -+ } -+fail_contextalloc: -+ OSFreeMem(psServerCommonContext); -+fail_alloc: -+ return eError; -+} -+ -+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -+{ -+ -+ OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); -+ /* Remove the context from the list of all contexts. */ -+ dllist_remove_node(&psServerCommonContext->sListNode); -+ OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); -+ -+ /* -+ Unmap the context itself and then all its resources -+ */ -+ -+ /* Unmap the FW common context */ -+ RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc); -+ /* Umap context state buffer (if there was one) */ -+ if (psServerCommonContext->psContextStateMemDesc) -+ { -+ RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc); -+ } -+ /* Unmap the framework buffer */ -+ if (psServerCommonContext->psFWFrameworkMemDesc != NULL) -+ { -+ RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc); -+ } -+ /* Unmap client CCB and CCB control */ -+ RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); -+ RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); -+ /* Unmap the memory context */ -+ RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc); -+ -+ /* Destroy the client CCB */ -+ RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB); -+ -+ -+ /* Free the FW common context (if there was one) */ -+ if (!psServerCommonContext->bCommonContextMemProvided) -+ { -+ DevmemFwUnmapAndFree(psServerCommonContext->psDevInfo, -+ psServerCommonContext->psFWCommonContextMemDesc); -+ psServerCommonContext->psFWCommonContextMemDesc = NULL; -+ } -+ /* Free the hosts representation of the common context */ -+ OSFreeMem(psServerCommonContext); -+} -+ -+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -+{ -+ return psServerCommonContext->sFWCommonContextFWAddr; -+} -+ -+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -+{ -+ return psServerCommonContext->psClientCCB; -+} -+ -+SERVER_MMU_CONTEXT *FWCommonContextGetServerMMUCtx(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -+{ -+ return psServerCommonContext->psServerMMUContext; -+} -+ -+RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, -+ IMG_UINT32 *pui32LastResetJobRef) -+{ -+ RGX_CONTEXT_RESET_REASON eLastResetReason; -+ -+ PVR_ASSERT(psServerCommonContext != NULL); -+ PVR_ASSERT(pui32LastResetJobRef != NULL); -+ -+ /* Take the most recent reason & job ref and reset for next time... */ -+ eLastResetReason = psServerCommonContext->eLastResetReason; -+ *pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef; -+ psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE; -+ psServerCommonContext->ui32LastResetJobRef = 0; -+ -+ if (eLastResetReason == RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "A Hard Context Switch was triggered on the GPU to ensure Quality of Service.")); -+ } -+ -+ return eLastResetReason; -+} -+ -+PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) -+{ -+ return psServerCommonContext->psDevInfo; -+} -+ -+PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, -+ SERVER_MMU_CONTEXT *psServerMMUContext, -+ PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_COMMON_CONTEXT *psThisContext = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); -+ -+ if (psThisContext->psServerMMUContext == psServerMMUContext) -+ { -+ psFWCommonContextFWAddr->ui32Addr = psThisContext->sFWCommonContextFWAddr.ui32Addr; -+ return PVRSRV_OK; -+ } -+ } -+ return PVRSRV_ERROR_INVALID_PARAMS; -+} -+ -+PRGXFWIF_FWCOMMONCONTEXT RGXGetFWCommonContextAddrFromServerCommonCtx(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DLLIST_NODE *psNode) -+{ -+ RGX_SERVER_COMMON_CONTEXT *psThisContext = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); -+ -+ return FWCommonContextGetFWAddress(psThisContext); -+} -+ -+PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, -+ IMG_UINT32 ui32ContextFlags) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (BITMASK_ANY(ui32ContextFlags, ~RGX_CONTEXT_FLAGS_WRITEABLE_MASK)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Context flag(s) invalid or not writeable (%d)", -+ __func__, ui32ContextFlags)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ else -+ { -+ RGXSetCCBFlags(psServerCommonContext->psClientCCB, -+ ui32ContextFlags); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, -+ CONNECTION_DATA *psConnection, -+ PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_INT32 i32Priority, -+ RGXFWIF_DM eDM) -+{ -+ IMG_UINT32 ui32CmdSize; -+ IMG_UINT8 *pui8CmdPtr; -+ RGXFWIF_KCCB_CMD sPriorityCmd = { 0 }; -+ RGXFWIF_CCB_CMD_HEADER *psCmdHeader; -+ RGXFWIF_CMD_PRIORITY *psCmd; -+ PVRSRV_ERROR eError; -+ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psContext); -+ -+ eError = _CheckPriority(psDevInfo, i32Priority, psContext->eRequestor); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority); -+ -+ /* -+ Get space for command -+ */ -+ ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY)); -+ -+ eError = RGXAcquireCCB(psClientCCB, -+ ui32CmdSize, -+ (void **) &pui8CmdPtr, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __func__)); -+ } -+ goto fail_ccbacquire; -+ } -+ -+ /* -+ Write the command header and command -+ */ -+ psCmdHeader = IMG_OFFSET_ADDR(pui8CmdPtr, 0); -+ psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY; -+ psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY)); -+ pui8CmdPtr += sizeof(*psCmdHeader); -+ -+ psCmd = IMG_OFFSET_ADDR(pui8CmdPtr, 0); -+ psCmd->i32Priority = i32Priority; -+ pui8CmdPtr += sizeof(*psCmd); -+ -+ /* -+ We should reserve space in the kernel CCB here and fill in the command -+ directly. -+ This is so if there isn't space in the kernel CCB we can return with -+ retry back to services client before we take any operations -+ */ -+ -+ /* -+ Submit the command -+ */ -+ RGXReleaseCCB(psClientCCB, -+ ui32CmdSize, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __func__)); -+ return eError; -+ } -+ -+ /* Construct the priority command. */ -+ sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; -+ sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext); -+ sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); -+ sPriorityCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); -+ sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; -+#endif -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psDevInfo, -+ eDM, -+ &sPriorityCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to submit set priority command with error (%u)", -+ __func__, -+ eError)); -+ goto fail_cmdacquire; -+ } -+ -+ psContext->i32Priority = i32Priority; -+ -+ return PVRSRV_OK; -+ -+fail_ccbacquire: -+fail_checkpriority: -+fail_cmdacquire: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM) -+{ -+ if (psCurrentServerCommonContext == NULL) -+ { -+ /* the context has already been freed so there is nothing to do here */ -+ return PVRSRV_OK; -+ } -+ -+ return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode, -+ psCurrentServerCommonContext->psClientCCB, -+ eKickTypeDM); -+} -+ -+void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel) -+{ -+ if (psCurrentServerCommonContext == NULL) -+ { -+ /* the context has already been freed so there is nothing to do here */ -+ return; -+ } -+ -+ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) -+ { -+ /* If high verbosity requested, dump whole CCB */ -+ DumpCCB(psCurrentServerCommonContext->psDevInfo, -+ psCurrentServerCommonContext->sFWCommonContextFWAddr, -+ psCurrentServerCommonContext->psClientCCB, -+ pfnDumpDebugPrintf, -+ pvDumpDebugFile); -+ } -+ else -+ { -+ /* Otherwise, only dump first command in the CCB */ -+ DumpFirstCCBCmd(psCurrentServerCommonContext->sFWCommonContextFWAddr, -+ psCurrentServerCommonContext->psClientCCB, -+ pfnDumpDebugPrintf, -+ pvDumpDebugFile); -+ } -+} -+ -+void FWCommonContextListSetLastResetReason(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 *pui32ErrorPid, -+ const RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL; -+ IMG_UINT32 ui32ErrorPid = 0; -+ -+ OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock); -+ -+ dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_COMMON_CONTEXT *psThisContext = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); -+ -+ /* If the notification applies to all contexts update reset info -+ * for all contexts, otherwise only do so for the appropriate ID. -+ */ -+ if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) -+ { -+ /* Notification applies to all contexts */ -+ psThisContext->eLastResetReason = psCmdContextResetNotification->eResetReason; -+ psThisContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; -+ } -+ else -+ { -+ /* Notification applies to one context only */ -+ if (psThisContext->ui32ContextID == psCmdContextResetNotification->ui32ServerCommonContextID) -+ { -+ psServerCommonContext = psThisContext; -+ psServerCommonContext->eLastResetReason = psCmdContextResetNotification->eResetReason; -+ psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; -+ ui32ErrorPid = RGXGetPIDFromServerMMUContext(psServerCommonContext->psServerMMUContext); -+ break; -+ } -+ } -+ } -+ -+ OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock); -+ -+ if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: All contexts reset (Reason=%d, JobRef=0x%08x)", -+ __func__, -+ (IMG_UINT32)(psCmdContextResetNotification->eResetReason), -+ psCmdContextResetNotification->ui32ResetJobRef)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)", -+ __func__, -+ psServerCommonContext, -+ psCmdContextResetNotification->ui32ServerCommonContextID, -+ (IMG_UINT32)(psCmdContextResetNotification->eResetReason), -+ psCmdContextResetNotification->ui32ResetJobRef)); -+ } -+ -+ if (pui32ErrorPid) -+ { -+ *pui32ErrorPid = ui32ErrorPid; -+ } -+} -+ -+/****************************************************************************** -+ End of file (rgxfwcmnctx.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxfwcmnctx.h b/drivers/gpu/drm/img-rogue/rgxfwcmnctx.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfwcmnctx.h -@@ -0,0 +1,150 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX firmware common context utility routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX firmware common context utility routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXFWCMNCTX_H -+#define RGXFWCMNCTX_H -+ -+#include "connection_server.h" -+#include "device.h" -+#include "rgxccb.h" -+#include "rgx_common.h" -+#include "devicemem_typedefs.h" -+#include "rgxdevice.h" -+#include "rgxmem.h" -+ -+/*************************************************************************/ /*! -+@Function FWCommonContextAllocate -+ -+@Description Allocate a FW common context. This allocates the HW memory -+ for the context, the CCB and wires it all together. -+ -+@Input psConnection Connection this context is being created on -+@Input psDeviceNode Device node to create the FW context on -+ (must be RGX device node) -+@Input eRGXCCBRequestor RGX_CCB_REQUESTOR_TYPE enum constant which -+ represents the requestor of this FWCC -+@Input eDM Data Master type -+@Input psServerMMUContext Server MMU memory context. -+@Input psAllocatedMemDesc Pointer to pre-allocated MemDesc to use -+ as the FW context or NULL if this function -+ should allocate it -+@Input ui32AllocatedOffset Offset into pre-allocate MemDesc to use -+ as the FW context. If psAllocatedMemDesc -+ is NULL then this parameter is ignored -+@Input psFWMemContextMemDesc MemDesc of the FW memory context this -+ common context resides on -+@Input psContextStateMemDesc FW context state (context switch) MemDesc -+@Input ui32CCBAllocSizeLog2 Size of the CCB for this context -+@Input ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context -+@Input ui32ContextFlags Flags which specify properties of the context -+@Input i32Priority Priority of the context -+@Input ui32MaxDeadlineMS Max deadline limit in MS that the workload can run -+@Input ui64RobustnessAddress Address for FW to signal a context reset -+@Input psInfo Structure that contains extra info -+ required for the creation of the context -+ (elements might change from core to core) -+@Return PVRSRV_OK if the context was successfully created -+*/ /**************************************************************************/ -+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, -+ RGXFWIF_DM eDM, -+ SERVER_MMU_CONTEXT *psServerMMUContext, -+ DEVMEM_MEMDESC *psAllocatedMemDesc, -+ IMG_UINT32 ui32AllocatedOffset, -+ DEVMEM_MEMDESC *psFWMemContextMemDesc, -+ DEVMEM_MEMDESC *psContextStateMemDesc, -+ IMG_UINT32 ui32CCBAllocSizeLog2, -+ IMG_UINT32 ui32CCBMaxAllocSizeLog2, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_INT32 i32Priority, -+ IMG_UINT32 ui32MaxDeadlineMS, -+ IMG_UINT64 ui64RobustnessAddress, -+ RGX_COMMON_CONTEXT_INFO *psInfo, -+ RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext); -+ -+ -+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); -+ -+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); -+ -+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); -+ -+SERVER_MMU_CONTEXT *FWCommonContextGetServerMMUCtx(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); -+ -+RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, -+ IMG_UINT32 *pui32LastResetJobRef); -+ -+PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); -+ -+PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, -+ SERVER_MMU_CONTEXT *psServerMMUContext, -+ PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr); -+ -+PRGXFWIF_FWCOMMONCONTEXT RGXGetFWCommonContextAddrFromServerCommonCtx(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DLLIST_NODE *psNode); -+ -+PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, -+ IMG_UINT32 ui32ContextFlags); -+ -+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, -+ CONNECTION_DATA *psConnection, -+ PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_INT32 i32Priority, -+ RGXFWIF_DM eDM); -+ -+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM); -+ -+void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel); -+ -+void FWCommonContextListSetLastResetReason(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 *pui32ErrorPid, -+ const RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification); -+ -+#endif /* RGXFWCMNCTX_H */ -+/****************************************************************************** -+ End of file (rgxfwcmnctx.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxfwdbg.c b/drivers/gpu/drm/img-rogue/rgxfwdbg.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfwdbg.c -@@ -0,0 +1,608 @@ -+/*************************************************************************/ /*! -+@File -+@Title Debugging and miscellaneous functions server implementation -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Kernel services functions for debugging and other -+ miscellaneous functionality. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "pvrsrv.h" -+#include "pvr_debug.h" -+#include "rgxfwdbg.h" -+#include "rgxfwutils.h" -+#include "rgxta3d.h" -+#include "pdump_km.h" -+#include "mmu_common.h" -+#include "devicemem_server.h" -+#include "osfunc.h" -+#include "vmm_pvz_server.h" -+#include "vz_vm.h" -+#if defined(PDUMP) -+#include "devicemem_pdump.h" -+#endif -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugQueryFWLogKM( -+ const CONNECTION_DATA *psConnection, -+ const PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 *pui32RGXFWLogType) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ if (!psDeviceNode || !pui32RGXFWLogType) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ -+ if (!psDevInfo || !psDevInfo->psRGXFWIfTraceBufCtl) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, INVALIDATE); -+ *pui32RGXFWLogType = psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType; -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetFWLogKM( -+ const CONNECTION_DATA * psConnection, -+ const PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32RGXFWLogType) -+{ -+ RGXFWIF_KCCB_CMD sLogTypeUpdateCmd; -+ PVRSRV_DEV_POWER_STATE ePowerState; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; -+ IMG_UINT32 ui32OldRGXFWLogTpe; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ IMG_BOOL bWaitForFwUpdate = IMG_FALSE; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, INVALIDATE); -+ ui32OldRGXFWLogTpe = psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType; -+ -+ /* check log type is valid */ -+ if (ui32RGXFWLogType & ~RGXFWIF_LOG_TYPE_MASK) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ OSLockAcquire(psDevInfo->hRGXFWIfBufInitLock); -+ -+ /* set the new log type and ensure the new log type is written to memory -+ * before requesting the FW to read it -+ */ -+ psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32RGXFWLogType; -+ OSMemoryBarrier(&psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType); -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, FLUSH); -+ -+ /* Allocate firmware trace buffer resource(s) if not already done */ -+ if (RGXTraceBufferIsInitRequired(psDevInfo)) -+ { -+ eError = RGXTraceBufferInitOnDemandResources(psDevInfo, RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS); -+ } -+#if defined(SUPPORT_TBI_INTERFACE) -+ /* Check if LogType is TBI then allocate resource on demand and copy -+ * SFs to it -+ */ -+ else if (RGXTBIBufferIsInitRequired(psDevInfo)) -+ { -+ eError = RGXTBIBufferInitOnDemandResources(psDevInfo); -+ } -+ -+ /* TBI buffer address will be 0 if not initialised */ -+ sLogTypeUpdateCmd.uCmdData.sTBIBuffer = psDevInfo->sRGXFWIfTBIBuffer; -+#else -+ sLogTypeUpdateCmd.uCmdData.sTBIBuffer.ui32Addr = 0; -+#endif -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate resource on-demand. Reverting to old value", -+ __func__)); -+ psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32OldRGXFWLogTpe; -+ OSMemoryBarrier(&psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType); -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, FLUSH); -+ -+ OSLockRelease(psDevInfo->hRGXFWIfBufInitLock); -+ -+ return eError; -+ } -+ -+ OSLockRelease(psDevInfo->hRGXFWIfBufInitLock); -+ -+ eError = PVRSRVPowerLock((PPVRSRV_DEVICE_NODE) psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to acquire power lock (%u)", -+ __func__, -+ eError)); -+ return eError; -+ } -+ -+ eError = PVRSRVGetDevicePowerState((PPVRSRV_DEVICE_NODE) psDeviceNode, &ePowerState); -+ -+ if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) -+ { -+ /* Ask the FW to update its cached version of logType value */ -+ sLogTypeUpdateCmd.eCmdType = RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE; -+ -+ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, -+ &sLogTypeUpdateCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot", unlock); -+ bWaitForFwUpdate = IMG_TRUE; -+ } -+ -+unlock: -+ PVRSRVPowerUnlock( (PPVRSRV_DEVICE_NODE) psDeviceNode); -+ if (bWaitForFwUpdate) -+ { -+ /* Wait for the LogType value to be updated in FW */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); -+ } -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetHCSDeadlineKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32HCSDeadlineMS) -+{ -+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ return RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadlineMS); -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugMapGuestHeapKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID, -+ IMG_UINT64 ui64GuestHeapBase) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32DeviceID = psDeviceNode->sDevId.ui32InternalID; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ if (PVRSRV_VZ_MODE_IS(HOST)) -+ { -+ if (ui64GuestHeapBase == IMG_UINT64_MAX) -+ { -+ /* unmap heap and set DriverID to offline */ -+ eError = PvzServerUnmapDevPhysHeap(ui32DriverID, ui32DeviceID); -+ eError = PvzServerOnVmOffline(ui32DriverID, ui32DeviceID); -+ } -+ else -+ { -+ /* set DriverID online if necessary and map firmware heap */ -+ if (!IsVmOnline(ui32DriverID, ui32DeviceID)) -+ { -+ eError = PvzServerOnVmOnline(ui32DriverID, ui32DeviceID); -+ } -+ -+ eError = PvzServerMapDevPhysHeap(ui32DriverID, ui32DeviceID, RGX_FIRMWARE_RAW_HEAP_SIZE, ui64GuestHeapBase); -+ } -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ PVR_DPF((PVR_DBG_ERROR, " %s: Driver must run in Host mode to support Guest Mapping operations\n", __func__)); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetDriverTimeSliceIntervalKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverTimeSliceInterval) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_KCCB_CMD sVzTimeSliceIntervalCmd = { 0 }; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ if (psDevInfo->psRGXFWIfRuntimeCfg == NULL) -+ { -+ return PVRSRV_ERROR_NOT_INITIALISED; -+ } -+ -+ sVzTimeSliceIntervalCmd.eCmdType = RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE_INTERVAL; -+ psDevInfo->psRGXFWIfRuntimeCfg->ui32DriverTimeSliceInterval = ui32DriverTimeSliceInterval; -+ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32DriverTimeSliceInterval); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Updating the timeslice interval inside RGXFWIfRuntimeCfg"); -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, -+ offsetof(RGXFWIF_RUNTIME_CFG, ui32DriverTimeSliceInterval), -+ ui32DriverTimeSliceInterval, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psDevInfo, -+ RGXFWIF_DM_GP, -+ &sVzTimeSliceIntervalCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetDriverTimeSliceKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DriverTimeSlice) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sVzTimeSliceCmd = { 0 }; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg; -+ IMG_INT32 ui32TimeSliceMax = 0; -+ IMG_UINT32 ui32DriverIDLoop; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ if (ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ PVR_RETURN_IF_FALSE(psDevInfo != NULL, PVRSRV_ERROR_NOT_INITIALISED); -+ -+ psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; -+ PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, PVRSRV_ERROR_NOT_INITIALISED); -+ -+ /* -+ * Each time slice is a number between 0 -> 100. -+ * Use '0' to disable time slice based CSW for the driver. -+ */ -+ /* Check if the sum exceeds PVRSRV_VZ_TIME_SLICE_MAX */ -+ if (ui32DriverTimeSlice) -+ { -+ FOREACH_SUPPORTED_DRIVER(ui32DriverIDLoop) -+ { -+ if (ui32DriverID != ui32DriverIDLoop) -+ { -+ ui32TimeSliceMax += psRuntimeCfg->aui32DriverTimeSlice[ui32DriverIDLoop]; -+ } -+ else -+ { -+ ui32TimeSliceMax += ui32DriverTimeSlice; -+ } -+ -+ PVR_RETURN_IF_FALSE(ui32TimeSliceMax <= PVRSRV_VZ_TIME_SLICE_MAX, PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ } -+ -+ sVzTimeSliceCmd.eCmdType = RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE; -+ psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverTimeSlice[ui32DriverID] = ui32DriverTimeSlice; -+ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverTimeSlice[ui32DriverID]); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Updating the timeslice of DriverID %u inside RGXFWIfRuntimeCfg", ui32DriverID); -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, -+ offsetof(RGXFWIF_RUNTIME_CFG, aui32DriverTimeSlice) + (ui32DriverID * sizeof(ui32DriverTimeSlice)), -+ ui32DriverTimeSlice, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psDevInfo, -+ RGXFWIF_DM_GP, -+ &sVzTimeSliceCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetDriverPriorityKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DriverPriority) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_KCCB_CMD sVzPriorityCmd = { 0 }; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ if (psDevInfo->psRGXFWIfRuntimeCfg == NULL) -+ { -+ return PVRSRV_ERROR_NOT_INITIALISED; -+ } -+ -+ if (ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ sVzPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE; -+ psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverPriority[ui32DriverID] = ui32DriverPriority; -+ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverPriority[ui32DriverID]); -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverPriority[ui32DriverID], FLUSH); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Updating the priority of DriverID %u inside RGXFWIfRuntimeCfg", ui32DriverID); -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, -+ offsetof(RGXFWIF_RUNTIME_CFG, aui32DriverPriority) + (ui32DriverID * sizeof(ui32DriverPriority)), -+ ui32DriverPriority, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psDevInfo, -+ RGXFWIF_DM_GP, -+ &sVzPriorityCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetDriverIsolationGroupKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DriverIsolationGroup) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_KCCB_CMD sVzIsolationGroupCmd = { 0 }; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ if (psDevInfo->psRGXFWIfRuntimeCfg == NULL) -+ { -+ return PVRSRV_ERROR_NOT_INITIALISED; -+ } -+ -+ if (ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ sVzIsolationGroupCmd.eCmdType = RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE; -+ psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID] = ui32DriverIsolationGroup; -+ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID]); -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID], FLUSH); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Updating the isolation group of DriverID%u inside RGXFWIfRuntimeCfg", ui32DriverID); -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, -+ offsetof(RGXFWIF_RUNTIME_CFG, aui32DriverIsolationGroup) + (ui32DriverID * sizeof(ui32DriverIsolationGroup)), -+ ui32DriverIsolationGroup, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psDevInfo, -+ RGXFWIF_DM_GP, -+ &sVzIsolationGroupCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetOSNewOnlineStateKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32OSNewState) -+{ -+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_OS_STATE_CHANGE eState; -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ eState = (ui32OSNewState) ? (RGXFWIF_OS_ONLINE) : (RGXFWIF_OS_OFFLINE); -+ return RGXFWSetFwOsState(psDevInfo, ui32DriverID, eState); -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugPHRConfigureKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32PHRMode) -+{ -+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ return RGXFWConfigPHR(psDevInfo, -+ ui32PHRMode); -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugWdgConfigureKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32WdgPeriodUs) -+{ -+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ return RGXFWConfigWdg(psDevInfo, -+ ui32WdgPeriodUs); -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugDumpFreelistPageListKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; -+ DLLIST_NODE *psNode, *psNext; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ if (dllist_is_empty(&psDevInfo->sFreeListHead)) -+ { -+ return PVRSRV_OK; -+ } -+ -+ PVR_LOG(("---------------[ Begin Freelist Page List Dump ]------------------")); -+ -+ OSLockAcquire(psDevInfo->hLockFreeList); -+ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) -+ { -+ RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); -+ RGXDumpFreeListPageList(psFreeList); -+ } -+ OSLockRelease(psDevInfo->hLockFreeList); -+ -+ PVR_LOG(("----------------[ End Freelist Page List Dump ]-------------------")); -+ -+ return PVRSRV_OK; -+ -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugInjectFaultKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ return RGXFWInjectFault(psDevInfo); -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSuspendDeviceKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+#if defined(SUPPORT_AUTOVZ) -+ psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; -+#endif -+ -+ return PVRSRVSetDeviceSystemPowerState(psDeviceNode, -+ PVRSRV_SYS_POWER_STATE_OFF, -+ PVRSRV_POWER_FLAGS_NONE); -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugResumeDeviceKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ return PVRSRVSetDeviceSystemPowerState(psDeviceNode, -+ PVRSRV_SYS_POWER_STATE_ON, -+ PVRSRV_POWER_FLAGS_NONE); -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetVzConnectionCooldownPeriodInSecKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32VzConnectionCooldownPeriodInSec) -+{ -+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ return RGXFWSetVzConnectionCooldownPeriod(psDevInfo, ui32VzConnectionCooldownPeriodInSec); -+} -+ -diff --git a/drivers/gpu/drm/img-rogue/rgxfwdbg.h b/drivers/gpu/drm/img-rogue/rgxfwdbg.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfwdbg.h -@@ -0,0 +1,160 @@ -+/*************************************************************************/ /*! -+@File -+@Title Debugging and miscellaneous functions server interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Kernel services functions for debugging and other -+ miscellaneous functionality. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXFWDBG_H) -+#define RGXFWDBG_H -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "device.h" -+#include "pmr.h" -+ -+#include "connection_server.h" -+ -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugInitFWImageKM( -+ PMR *psFWImgDestPMR, -+ PMR *psFWImgSrcPMR, -+ IMG_UINT64 ui64FWImgLen, -+ PMR *psFWImgSigPMR, -+ IMG_UINT64 ui64FWSigLen); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugQueryFWLogKM( -+ const CONNECTION_DATA *psConnection, -+ const PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 *pui32RGXFWLogType); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetFWLogKM( -+ const CONNECTION_DATA *psConnection, -+ const PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32RGXFWLogType); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetHCSDeadlineKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32HCSDeadlineMS); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetDriverTimeSliceKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DriverTimeSlice); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetDriverTimeSliceIntervalKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverTimeSliceInterval); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetDriverPriorityKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DriverPriority); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetDriverIsolationGroupKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DriverIsolationGroup); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetOSNewOnlineStateKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32OSNewState); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugMapGuestHeapKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID, -+ IMG_UINT64 ui64GuestHeapBase); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugPHRConfigureKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32PHRMode); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugWdgConfigureKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32WdgPeriodUs); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugDumpFreelistPageListKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugInjectFaultKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSuspendDeviceKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugResumeDeviceKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+PVRSRV_ERROR -+PVRSRVRGXFWDebugSetVzConnectionCooldownPeriodInSecKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32VzConnectionCooldownPeriodInSec); -+#endif -diff --git a/drivers/gpu/drm/img-rogue/rgxfwimageutils.c b/drivers/gpu/drm/img-rogue/rgxfwimageutils.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfwimageutils.c -@@ -0,0 +1,1154 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services Firmware image utilities used at init time -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Services Firmware image utilities used at init time -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* The routines implemented here are built on top of an abstraction layer to -+ * hide DDK/OS-specific details in case they are used outside of the DDK -+ * (e.g. when trusted device is enabled). -+ * Any new dependency should be added to rgxlayer.h. -+ * Any new code should be built on top of the existing abstraction layer, -+ * which should be extended when necessary. */ -+#include "rgxfwimageutils.h" -+#include "rgxfwutils.h" -+#include "pvrsrv.h" -+#include "pvrversion.h" -+ -+ -+/************************************************************************ -+* FW layout information -+************************************************************************/ -+#define MAX_NUM_ENTRIES (8) -+static RGX_FW_LAYOUT_ENTRY asRGXFWLayoutTable[MAX_NUM_ENTRIES]; -+static IMG_UINT32 ui32LayoutEntryNum; -+ -+ -+static RGX_FW_LAYOUT_ENTRY* GetTableEntry(const void *hPrivate, RGX_FW_SECTION_ID eId) -+{ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < ui32LayoutEntryNum; i++) -+ { -+ if (asRGXFWLayoutTable[i].eId == eId) -+ { -+ return &asRGXFWLayoutTable[i]; -+ } -+ } -+ -+ RGXErrorLog(hPrivate, "%s: id %u not found, returning entry 0\n", -+ __func__, eId); -+ -+ return &asRGXFWLayoutTable[0]; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function FindMMUSegment -+ -+ @Description Given a 32 bit FW address attempt to find the corresponding -+ pointer to FW allocation -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui32OffsetIn : 32 bit FW address -+ @Input pvHostFWCodeAddr : Pointer to FW code -+ @Input pvHostFWDataAddr : Pointer to FW data -+ @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code -+ @Input pvHostFWCorememDataAddr : Pointer to FW coremem code -+ @Input uiHostAddrOut : CPU pointer equivalent to ui32OffsetIn -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+static PVRSRV_ERROR FindMMUSegment(const void *hPrivate, -+ IMG_UINT32 ui32OffsetIn, -+ void *pvHostFWCodeAddr, -+ void *pvHostFWDataAddr, -+ void *pvHostFWCorememCodeAddr, -+ void *pvHostFWCorememDataAddr, -+ void **uiHostAddrOut) -+{ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < ui32LayoutEntryNum; i++) -+ { -+ if ((ui32OffsetIn >= asRGXFWLayoutTable[i].ui32BaseAddr) && -+ (ui32OffsetIn < (asRGXFWLayoutTable[i].ui32BaseAddr + asRGXFWLayoutTable[i].ui32AllocSize))) -+ { -+ switch (asRGXFWLayoutTable[i].eType) -+ { -+ case FW_CODE: -+ *uiHostAddrOut = pvHostFWCodeAddr; -+ break; -+ -+ case FW_DATA: -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) -+ { -+ *uiHostAddrOut = RGXCalculateHostFWDataAddress(hPrivate, pvHostFWDataAddr); -+ } -+ else -+ { -+ *uiHostAddrOut = pvHostFWDataAddr; -+ } -+ break; -+ -+ case FW_COREMEM_CODE: -+ *uiHostAddrOut = pvHostFWCorememCodeAddr; -+ break; -+ -+ case FW_COREMEM_DATA: -+ *uiHostAddrOut = pvHostFWCorememDataAddr; -+ break; -+ -+ default: -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ goto found; -+ } -+ } -+ -+ return PVRSRV_ERROR_INIT_FAILURE; -+ -+found: -+ if (*uiHostAddrOut == NULL) -+ { -+ return PVRSRV_OK; -+ } -+ -+ /* Add offset to pointer to FW allocation now that allocation is found */ -+ -+ /* Direct Mem write to mapped memory */ -+ ui32OffsetIn -= asRGXFWLayoutTable[i].ui32BaseAddr; -+ ui32OffsetIn += asRGXFWLayoutTable[i].ui32AllocOffset; -+ -+ *(IMG_UINT8 **)uiHostAddrOut += ui32OffsetIn; -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXFWConfigureSegID -+ -+ @Description Configures a single segment of the Segment MMU -+ (base, limit and out_addr) -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui64SegOutAddr : Segment output base address (40 bit devVaddr) -+ @Input ui32SegBase : Segment input base address (32 bit FW address) -+ @Input ui32SegLimit : Segment size -+ @Input ui32SegID : Segment ID -+ @Input pszName : Segment name -+ @Input ppui32BootConf : Pointer to bootloader data -+ -+ @Return void -+ -+******************************************************************************/ -+static void RGXFWConfigureSegID(const void *hPrivate, -+ IMG_UINT64 ui64SegOutAddr, -+ IMG_UINT32 ui32SegBase, -+ IMG_UINT32 ui32SegLimit, -+ IMG_UINT32 ui32SegID, -+ IMG_UINT32 **ppui32BootConf) -+{ -+ IMG_UINT32 *pui32BootConf = *ppui32BootConf; -+ IMG_UINT32 ui32SegOutAddr0 = ui64SegOutAddr & 0x00000000FFFFFFFFUL; -+ IMG_UINT32 ui32SegOutAddr1 = (ui64SegOutAddr >> 32) & 0x00000000FFFFFFFFUL; -+ -+ /* META segments have a minimum size */ -+ IMG_UINT32 ui32LimitOff = (ui32SegLimit < RGXFW_SEGMMU_ALIGN) ? -+ RGXFW_SEGMMU_ALIGN : ui32SegLimit; -+ /* the limit is an offset, therefore off = size - 1 */ -+ ui32LimitOff -= 1; -+ -+ RGXCommentLog(hPrivate, -+ "* Seg%d: meta_addr = 0x%08x, devv_addr = 0x%" IMG_UINT64_FMTSPECx ", limit = 0x%x", -+ ui32SegID, -+ ui32SegBase, -+ ui64SegOutAddr, -+ ui32LimitOff); -+ -+ ui32SegBase |= RGXFW_SEGMMU_ALLTHRS_WRITEABLE; -+ -+ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_BASE(ui32SegID); -+ *pui32BootConf++ = ui32SegBase; -+ -+ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_LIMIT(ui32SegID); -+ *pui32BootConf++ = ui32LimitOff; -+ -+ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA0(ui32SegID); -+ *pui32BootConf++ = ui32SegOutAddr0; -+ -+ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA1(ui32SegID); -+ *pui32BootConf++ = ui32SegOutAddr1; -+ -+ *ppui32BootConf = pui32BootConf; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXFWConfigureSegMMU -+ -+ @Description Configures META's Segment MMU -+ -+ @Input hPrivate : Implementation specific data -+ @Input psFWCodeDevVAddrBase : FW code base device virtual address -+ @Input psFWDataDevVAddrBase : FW data base device virtual address -+ @Input ppui32BootConf : Pointer to bootloader data -+ -+ @Return void -+ -+******************************************************************************/ -+static void RGXFWConfigureSegMMU(const void *hPrivate, -+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase, -+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase, -+ IMG_UINT32 **ppui32BootConf) -+{ -+ IMG_UINT64 ui64SegOutAddrTop; -+ IMG_UINT32 i; -+ -+ PVR_UNREFERENCED_PARAMETER(psFWCodeDevVAddrBase); -+ -+ /* Configure Segment MMU */ -+ RGXCommentLog(hPrivate, "********** FW configure Segment MMU **********"); -+ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) -+ { -+ ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWPRIV); -+ } -+ else -+ { -+ ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV, RGXFW_SEGMMU_META_BIFDM_ID); -+ } -+ -+ for (i = 0; i < ui32LayoutEntryNum; i++) -+ { -+ /* -+ * FW code is using the bootloader segment which is already configured on boot. -+ * FW coremem code and data don't use the segment MMU. -+ * Only the FW data segment needs to be configured. -+ */ -+ -+ if (asRGXFWLayoutTable[i].eType == FW_DATA) -+ { -+ IMG_UINT64 ui64SegOutAddr; -+ IMG_UINT32 ui32SegId = RGXFW_SEGMMU_DATA_ID; -+ -+ ui64SegOutAddr = (psFWDataDevVAddrBase->uiAddr | ui64SegOutAddrTop) + -+ asRGXFWLayoutTable[i].ui32AllocOffset; -+ -+ RGXFWConfigureSegID(hPrivate, -+ ui64SegOutAddr, -+ asRGXFWLayoutTable[i].ui32BaseAddr, -+ asRGXFWLayoutTable[i].ui32AllocSize, -+ ui32SegId, -+ ppui32BootConf); /*write the sequence to the bootldr */ -+ -+ break; -+ } -+ } -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXFWConfigureMetaCaches -+ -+ @Description Configure and enable the Meta instruction and data caches -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui32NumThreads : Number of FW threads in use -+ @Input ppui32BootConf : Pointer to bootloader data -+ -+ @Return void -+ -+******************************************************************************/ -+static void RGXFWConfigureMetaCaches(const void *hPrivate, -+ IMG_UINT32 ui32NumThreads, -+ IMG_UINT32 **ppui32BootConf) -+{ -+ IMG_UINT32 *pui32BootConf = *ppui32BootConf; -+ IMG_UINT32 ui32DCacheT0, ui32ICacheT0; -+ IMG_UINT32 ui32DCacheT1, ui32ICacheT1; -+ IMG_UINT32 ui32DCacheT2, ui32ICacheT2; -+ IMG_UINT32 ui32DCacheT3, ui32ICacheT3; -+ -+#define META_CR_MMCU_LOCAL_EBCTRL (0x04830600) -+#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN (0x3 << 14) -+#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN (0x3 << 6) -+#define META_CR_SYSC_DCPART(n) (0x04830200 + (n)*0x8) -+#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE (0x1 << 31) -+#define META_CR_SYSC_ICPART(n) (0x04830220 + (n)*0x8) -+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF (0x8 << 16) -+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE (0xF) -+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE (0x7) -+#define META_CR_MMCU_DCACHE_CTRL (0x04830018) -+#define META_CR_MMCU_ICACHE_CTRL (0x04830020) -+#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN (0x1) -+ -+ RGXCommentLog(hPrivate, "********** Meta caches configuration *********"); -+ -+ /* Initialise I/Dcache settings */ -+ ui32DCacheT0 = ui32DCacheT1 = (IMG_UINT32)META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; -+ ui32DCacheT2 = ui32DCacheT3 = (IMG_UINT32)META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; -+ ui32ICacheT0 = ui32ICacheT1 = ui32ICacheT2 = ui32ICacheT3 = 0; -+ -+ if (ui32NumThreads == 1) -+ { -+ ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; -+ ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; -+ } -+ else -+ { -+ ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE; -+ ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE; -+ -+ ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE | -+ META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF; -+ ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE | -+ META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF; -+ } -+ -+ /* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */ -+ *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL; -+ *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL_ICWIN | -+ META_CR_MMCU_LOCAL_EBCTRL_DCWIN; -+ -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", -+ META_CR_MMCU_LOCAL_EBCTRL, -+ META_CR_MMCU_LOCAL_EBCTRL_ICWIN | META_CR_MMCU_LOCAL_EBCTRL_DCWIN); -+ -+ /* Data cache partitioning thread 0 to 3 */ -+ *pui32BootConf++ = META_CR_SYSC_DCPART(0); -+ *pui32BootConf++ = ui32DCacheT0; -+ *pui32BootConf++ = META_CR_SYSC_DCPART(1); -+ *pui32BootConf++ = ui32DCacheT1; -+ *pui32BootConf++ = META_CR_SYSC_DCPART(2); -+ *pui32BootConf++ = ui32DCacheT2; -+ *pui32BootConf++ = META_CR_SYSC_DCPART(3); -+ *pui32BootConf++ = ui32DCacheT3; -+ -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", -+ META_CR_SYSC_DCPART(0), ui32DCacheT0); -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", -+ META_CR_SYSC_DCPART(1), ui32DCacheT1); -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", -+ META_CR_SYSC_DCPART(2), ui32DCacheT2); -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", -+ META_CR_SYSC_DCPART(3), ui32DCacheT3); -+ -+ /* Enable data cache hits */ -+ *pui32BootConf++ = META_CR_MMCU_DCACHE_CTRL; -+ *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN; -+ -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", -+ META_CR_MMCU_DCACHE_CTRL, -+ META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); -+ -+ /* Instruction cache partitioning thread 0 to 3 */ -+ *pui32BootConf++ = META_CR_SYSC_ICPART(0); -+ *pui32BootConf++ = ui32ICacheT0; -+ *pui32BootConf++ = META_CR_SYSC_ICPART(1); -+ *pui32BootConf++ = ui32ICacheT1; -+ *pui32BootConf++ = META_CR_SYSC_ICPART(2); -+ *pui32BootConf++ = ui32ICacheT2; -+ *pui32BootConf++ = META_CR_SYSC_ICPART(3); -+ *pui32BootConf++ = ui32ICacheT3; -+ -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", -+ META_CR_SYSC_ICPART(0), ui32ICacheT0); -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", -+ META_CR_SYSC_ICPART(1), ui32ICacheT1); -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", -+ META_CR_SYSC_ICPART(2), ui32ICacheT2); -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", -+ META_CR_SYSC_ICPART(3), ui32ICacheT3); -+ -+ /* Enable instruction cache hits */ -+ *pui32BootConf++ = META_CR_MMCU_ICACHE_CTRL; -+ *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN; -+ -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", -+ META_CR_MMCU_ICACHE_CTRL, -+ META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); -+ -+ *pui32BootConf++ = 0x040000C0; -+ *pui32BootConf++ = 0; -+ -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", 0x040000C0, 0); -+ -+ *ppui32BootConf = pui32BootConf; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function ProcessLDRCommandStream -+ -+ @Description Process the output of the Meta toolchain in the .LDR format -+ copying code and data sections into their final location and -+ passing some information to the Meta bootloader -+ -+ @Input hPrivate : Implementation specific data -+ @Input pbLDR : Pointer to FW blob -+ @Input pvHostFWCodeAddr : Pointer to FW code -+ @Input pvHostFWDataAddr : Pointer to FW data -+ @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code -+ @Input pvHostFWCorememDataAddr : Pointer to FW coremem data -+ @Input ppui32BootConf : Pointer to bootloader data -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, -+ const IMG_BYTE* pbLDR, -+ void* pvHostFWCodeAddr, -+ void* pvHostFWDataAddr, -+ void* pvHostFWCorememCodeAddr, -+ void* pvHostFWCorememDataAddr, -+ IMG_UINT32 **ppui32BootConf) -+{ -+ RGX_META_LDR_BLOCK_HDR *psHeader = (RGX_META_LDR_BLOCK_HDR *) pbLDR; -+ RGX_META_LDR_L1_DATA_BLK *psL1Data = -+ (RGX_META_LDR_L1_DATA_BLK*) ((IMG_UINT8 *) pbLDR + psHeader->ui32SLData); -+ -+ IMG_UINT32 *pui32BootConf = ppui32BootConf ? *ppui32BootConf : NULL; -+ IMG_UINT32 ui32CorememSize = RGXGetFWCorememSize(hPrivate); -+ -+ RGXCommentLog(hPrivate, "**********************************************"); -+ RGXCommentLog(hPrivate, "************** Begin LDR Parsing *************"); -+ RGXCommentLog(hPrivate, "**********************************************"); -+ -+ while (psL1Data != NULL) -+ { -+ if (RGX_META_LDR_BLK_IS_COMMENT(psL1Data->ui16Cmd)) -+ { -+ /* Don't process comment blocks */ -+ goto NextBlock; -+ } -+ -+ switch (psL1Data->ui16Cmd & RGX_META_LDR_CMD_MASK) -+ { -+ case RGX_META_LDR_CMD_LOADMEM: -+ { -+ RGX_META_LDR_L2_DATA_BLK *psL2Block = -+ (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[1]); -+ IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0]; -+ IMG_UINT32 ui32DataSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */; -+ void *pvWriteAddr; -+ PVRSRV_ERROR eError; -+ -+ if (!RGX_META_IS_COREMEM_CODE(ui32Offset, ui32CorememSize) && -+ !RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize)) -+ { -+ /* Global range is aliased to local range */ -+ ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT; -+ } -+ -+ eError = FindMMUSegment(hPrivate, -+ ui32Offset, -+ pvHostFWCodeAddr, -+ pvHostFWDataAddr, -+ pvHostFWCorememCodeAddr, -+ pvHostFWCorememDataAddr, -+ &pvWriteAddr); -+ -+ if (eError != PVRSRV_OK) -+ { -+ RGXErrorLog(hPrivate, -+ "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment", -+ ui32Offset, ui32DataSize); -+ return eError; -+ } -+ -+ /* Write to FW allocation only if available */ -+ if (pvWriteAddr) -+ { -+ RGXMemCopy(hPrivate, -+ pvWriteAddr, -+ psL2Block->aui32BlockData, -+ ui32DataSize); -+ RGXFwSharedMemCacheOpExec(pvWriteAddr, ui32DataSize, PVRSRV_CACHE_OP_FLUSH); -+ } -+ -+ break; -+ } -+ case RGX_META_LDR_CMD_LOADCORE: -+ case RGX_META_LDR_CMD_LOADMMREG: -+ { -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ case RGX_META_LDR_CMD_START_THREADS: -+ { -+ /* Don't process this block */ -+ break; -+ } -+ case RGX_META_LDR_CMD_ZEROMEM: -+ { -+ IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0]; -+ IMG_UINT32 ui32ByteCount = psL1Data->aui32CmdData[1]; -+ void *pvWriteAddr; -+ PVRSRV_ERROR eError; -+ -+ if (RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize)) -+ { -+ /* cannot zero coremem directly */ -+ break; -+ } -+ -+ /* Global range is aliased to local range */ -+ ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT; -+ -+ eError = FindMMUSegment(hPrivate, -+ ui32Offset, -+ pvHostFWCodeAddr, -+ pvHostFWDataAddr, -+ pvHostFWCorememCodeAddr, -+ pvHostFWCorememDataAddr, -+ &pvWriteAddr); -+ -+ if (eError != PVRSRV_OK) -+ { -+ RGXErrorLog(hPrivate, -+ "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment", -+ ui32Offset, ui32ByteCount); -+ return eError; -+ } -+ -+ /* Write to FW allocation only if available */ -+ if (pvWriteAddr) -+ { -+ RGXMemSet(hPrivate, pvWriteAddr, 0, ui32ByteCount); -+ RGXFwSharedMemCacheOpExec(pvWriteAddr, ui32ByteCount, PVRSRV_CACHE_OP_FLUSH); -+ } -+ -+ break; -+ } -+ case RGX_META_LDR_CMD_CONFIG: -+ { -+ RGX_META_LDR_L2_DATA_BLK *psL2Block = -+ (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[0]); -+ RGX_META_LDR_CFG_BLK *psConfigCommand = (RGX_META_LDR_CFG_BLK*) psL2Block->aui32BlockData; -+ IMG_UINT32 ui32L2BlockSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */; -+ IMG_UINT32 ui32CurrBlockSize = 0; -+ -+ while (ui32L2BlockSize) -+ { -+ switch (psConfigCommand->ui32Type) -+ { -+ case RGX_META_LDR_CFG_PAUSE: -+ case RGX_META_LDR_CFG_READ: -+ { -+ ui32CurrBlockSize = 8; -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ case RGX_META_LDR_CFG_WRITE: -+ { -+ IMG_UINT32 ui32RegisterOffset = psConfigCommand->aui32BlockData[0]; -+ IMG_UINT32 ui32RegisterValue = psConfigCommand->aui32BlockData[1]; -+ -+ /* Only write to bootloader if we got a valid -+ * pointer to the FW code allocation -+ */ -+ if (pui32BootConf) -+ { -+ /* Do register write */ -+ *pui32BootConf++ = ui32RegisterOffset; -+ *pui32BootConf++ = ui32RegisterValue; -+ } -+ -+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", -+ ui32RegisterOffset, ui32RegisterValue); -+ -+ ui32CurrBlockSize = 12; -+ break; -+ } -+ case RGX_META_LDR_CFG_MEMSET: -+ case RGX_META_LDR_CFG_MEMCHECK: -+ { -+ ui32CurrBlockSize = 20; -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ default: -+ { -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ } -+ ui32L2BlockSize -= ui32CurrBlockSize; -+ psConfigCommand = (RGX_META_LDR_CFG_BLK*) (((IMG_UINT8*) psConfigCommand) + ui32CurrBlockSize); -+ } -+ -+ break; -+ } -+ default: -+ { -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ } -+ -+NextBlock: -+ -+ if (psL1Data->ui32Next == 0xFFFFFFFF) -+ { -+ psL1Data = NULL; -+ } -+ else -+ { -+ psL1Data = (RGX_META_LDR_L1_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->ui32Next); -+ } -+ } -+ -+ if (pui32BootConf) -+ { -+ *ppui32BootConf = pui32BootConf; -+ } -+ -+ RGXCommentLog(hPrivate, "**********************************************"); -+ RGXCommentLog(hPrivate, "************** End Loader Parsing ************"); -+ RGXCommentLog(hPrivate, "**********************************************"); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function ProcessELFCommandStream -+ -+ @Description Process a file in .ELF format copying code and data sections -+ into their final location -+ -+ @Input hPrivate : Implementation specific data -+ @Input pbELF : Pointer to FW blob -+ @Input pvHostFWCodeAddr : Pointer to FW code -+ @Input pvHostFWDataAddr : Pointer to FW data -+ @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code -+ @Input pvHostFWCorememDataAddr : Pointer to FW coremem data -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate, -+ const IMG_BYTE *pbELF, -+ void *pvHostFWCodeAddr, -+ void *pvHostFWDataAddr, -+ void* pvHostFWCorememCodeAddr, -+ void* pvHostFWCorememDataAddr) -+{ -+ IMG_UINT32 ui32Entry; -+ IMG_ELF_HDR *psHeader = (IMG_ELF_HDR *)pbELF; -+ IMG_ELF_PROGRAM_HDR *psProgramHeader = -+ (IMG_ELF_PROGRAM_HDR *)(pbELF + psHeader->ui32Ephoff); -+ PVRSRV_ERROR eError; -+ -+ for (ui32Entry = 0; ui32Entry < psHeader->ui32Ephnum; ui32Entry++, psProgramHeader++) -+ { -+ void *pvWriteAddr; -+ -+ /* Only consider loadable entries in the ELF segment table */ -+ if (psProgramHeader->ui32Ptype != ELF_PT_LOAD) continue; -+ -+ eError = FindMMUSegment(hPrivate, -+ psProgramHeader->ui32Pvaddr, -+ pvHostFWCodeAddr, -+ pvHostFWDataAddr, -+ pvHostFWCorememCodeAddr, -+ pvHostFWCorememDataAddr, -+ &pvWriteAddr); -+ -+ if (eError != PVRSRV_OK) -+ { -+ RGXErrorLog(hPrivate, -+ "%s: Addr 0x%x (size: %d) not found in any segment",__func__, -+ psProgramHeader->ui32Pvaddr, -+ psProgramHeader->ui32Pfilesz); -+ return eError; -+ } -+ -+ /* Write to FW allocation only if available */ -+ if (pvWriteAddr) -+ { -+ RGXMemCopy(hPrivate, -+ pvWriteAddr, -+ (IMG_PBYTE)(pbELF + psProgramHeader->ui32Poffset), -+ psProgramHeader->ui32Pfilesz); -+ -+ RGXMemSet(hPrivate, -+ (IMG_PBYTE)pvWriteAddr + psProgramHeader->ui32Pfilesz, -+ 0, -+ psProgramHeader->ui32Pmemsz - psProgramHeader->ui32Pfilesz); -+ -+ RGXFwSharedMemCacheOpExec(pvWriteAddr, psProgramHeader->ui32Pmemsz, PVRSRV_CACHE_OP_FLUSH); -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+ -+IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate, RGX_FW_SECTION_ID eId) -+{ -+ RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); -+ -+ return psEntry->ui32AllocOffset; -+} -+ -+IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate, RGX_FW_SECTION_ID eId) -+{ -+ RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); -+ -+ return psEntry->ui32MaxSize; -+} -+ -+IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate, RGX_FW_SECTION_ID eId) -+{ -+ RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); -+ -+ return psEntry->ui32AllocSize; -+} -+ -+IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, RGX_FW_SECTION_ID eId) -+{ -+ RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); -+ -+ return psEntry->ui32BaseAddr; -+} -+ -+static inline -+PVRSRV_ERROR RGXValidateFWHeaderVersion1(const void *hPrivate, -+ const RGX_FW_INFO_HEADER *psInfoHeader) -+{ -+ /* Applicable to any FW_INFO_VERSION */ -+ if (psInfoHeader->ui32LayoutEntrySize != sizeof(RGX_FW_LAYOUT_ENTRY)) -+ { -+ RGXErrorLog(hPrivate, "%s: FW layout entry sizes mismatch (expected: %u, found: %u)", -+ __func__, -+ (IMG_UINT32) sizeof(RGX_FW_LAYOUT_ENTRY), -+ psInfoHeader->ui32LayoutEntrySize); -+ } -+ -+ /* Applicable to any FW_INFO_VERSION */ -+ if (psInfoHeader->ui32LayoutEntryNum > MAX_NUM_ENTRIES) -+ { -+ RGXErrorLog(hPrivate, "%s: Not enough storage for the FW layout table (max: %u entries, found: %u)", -+ __func__, -+ MAX_NUM_ENTRIES, -+ psInfoHeader->ui32LayoutEntryNum); -+ } -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ /* Applicable to any FW_INFO_VERSION */ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) -+ { -+ if (psInfoHeader->ui32FwPageSize != RGXGetOSPageSize(hPrivate)) -+ { -+ RGXErrorLog(hPrivate, "%s: FW page size mismatch (expected: %u, found: %u)", -+ __func__, -+ (IMG_UINT32) RGXGetOSPageSize(hPrivate), -+ psInfoHeader->ui32FwPageSize); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+#endif -+ -+ if (psInfoHeader->ui32InfoVersion != FW_INFO_VERSION) -+ { -+ /* Not an error because RGX_FW_INFO_HEADER is now versioned. It can grow -+ * incrementally and it must be backwards compatible. -+ */ -+ RGXCommentLog(hPrivate, "%s: FW info version mismatch (expected: %u, found: %u)", -+ __func__, -+ (IMG_UINT32) FW_INFO_VERSION, -+ psInfoHeader->ui32InfoVersion); -+ goto exit_version1_validation; -+ } -+ -+ if (psInfoHeader->ui32HeaderLen != sizeof(RGX_FW_INFO_HEADER)) -+ { -+ RGXErrorLog(hPrivate, "%s: FW info header sizes mismatch (expected: %u, found: %u)", -+ __func__, -+ (IMG_UINT32) sizeof(RGX_FW_INFO_HEADER), -+ psInfoHeader->ui32HeaderLen); -+ } -+ -+exit_version1_validation: -+ return PVRSRV_OK; -+} -+ -+static inline -+PVRSRV_ERROR RGXValidateFWHeaderVersion2(const void *hPrivate, -+ const RGX_FW_INFO_HEADER *psInfoHeader) -+{ -+ if (psInfoHeader->ui16PVRVersionMajor != PVRVERSION_MAJ || -+ psInfoHeader->ui16PVRVersionMinor != PVRVERSION_MIN) -+ { -+ RGXErrorLog(hPrivate, "%s: KM and FW version mismatch (expected: %u.%u, found: %u.%u)", -+ __func__, -+ PVRVERSION_MAJ, -+ PVRVERSION_MIN, -+ psInfoHeader->ui16PVRVersionMajor, -+ psInfoHeader->ui16PVRVersionMinor); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static inline -+PVRSRV_ERROR RGXValidateFWHeaderVersion(const void *hPrivate, -+ const RGX_FW_INFO_HEADER *psInfoHeader) -+{ -+ PVRSRV_ERROR eError; -+ -+ switch (psInfoHeader->ui32InfoVersion) -+ { -+ default: -+ __fallthrough; -+ case 2: -+ eError = RGXValidateFWHeaderVersion2(hPrivate, psInfoHeader); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ __fallthrough; -+ case 1: -+ eError = RGXValidateFWHeaderVersion1(hPrivate, psInfoHeader); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ break; -+ case 0: -+ RGXErrorLog(hPrivate, "%s: invalid FW_INFO_VERSION", __func__); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, -+ const IMG_BYTE *pbRGXFirmware, -+ const IMG_UINT32 ui32RGXFirmwareSize, -+ IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize, -+ IMG_DEVMEM_SIZE_T *puiFWDataAllocSize, -+ IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize, -+ IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize, -+ RGX_FW_INFO_HEADER *psFWInfoHeader) -+{ -+ RGX_FW_INFO_HEADER *psInfoHeader; -+ const IMG_BYTE *pbRGXFirmwareInfo; -+ const IMG_BYTE *pbRGXFirmwareLayout; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ -+ if (pbRGXFirmware == NULL || ui32RGXFirmwareSize == 0 || ui32RGXFirmwareSize <= FW_BLOCK_SIZE) -+ { -+ RGXErrorLog(hPrivate, "%s: Invalid FW binary at %p, size %u", -+ __func__, pbRGXFirmware, ui32RGXFirmwareSize); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* -+ * Acquire pointer to the FW info header within the FW image. -+ * The format of the header in the FW image might not be the one expected -+ * by the driver, but the driver should still be able to correctly read -+ * the information below, as long as new/incompatible elements are added -+ * at the end of the header (they will be ignored by the driver). -+ */ -+ -+ pbRGXFirmwareInfo = pbRGXFirmware + ui32RGXFirmwareSize - FW_BLOCK_SIZE; -+ psInfoHeader = (RGX_FW_INFO_HEADER*)pbRGXFirmwareInfo; -+ -+ eError = RGXValidateFWHeaderVersion(hPrivate, psInfoHeader); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ ui32LayoutEntryNum = psInfoHeader->ui32LayoutEntryNum; -+ -+ -+ /* -+ * Copy FW layout table from FW image to local array. -+ * One entry is copied at a time and the copy is limited to what the driver -+ * expects to find in it. Assuming that new/incompatible elements -+ * are added at the end of each entry, the loop below adapts the table -+ * in the FW image into the format expected by the driver. -+ */ -+ -+ pbRGXFirmwareLayout = pbRGXFirmwareInfo + psInfoHeader->ui32HeaderLen; -+ -+ for (i = 0; i < ui32LayoutEntryNum; i++) -+ { -+ RGX_FW_LAYOUT_ENTRY *psOutEntry = &asRGXFWLayoutTable[i]; -+ -+ RGX_FW_LAYOUT_ENTRY *psInEntry = (RGX_FW_LAYOUT_ENTRY*) -+ (pbRGXFirmwareLayout + i * psInfoHeader->ui32LayoutEntrySize); -+ -+ RGXMemCopy(hPrivate, -+ (void*)psOutEntry, -+ (void*)psInEntry, -+ sizeof(RGX_FW_LAYOUT_ENTRY)); -+ } -+ -+ -+ /* Calculate how much memory the FW needs for its code and data segments */ -+ -+ *puiFWCodeAllocSize = 0; -+ *puiFWDataAllocSize = 0; -+ *puiFWCorememCodeAllocSize = 0; -+ *puiFWCorememDataAllocSize = 0; -+ -+ for (i = 0; i < ui32LayoutEntryNum; i++) -+ { -+ switch (asRGXFWLayoutTable[i].eType) -+ { -+ case FW_CODE: -+ *puiFWCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; -+ break; -+ -+ case FW_DATA: -+ *puiFWDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; -+ break; -+ -+ case FW_COREMEM_CODE: -+ *puiFWCorememCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; -+ break; -+ -+ case FW_COREMEM_DATA: -+ *puiFWCorememDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; -+ break; -+ -+ default: -+ RGXErrorLog(hPrivate, "%s: Unknown FW section type %u\n", -+ __func__, asRGXFWLayoutTable[i].eType); -+ break; -+ } -+ } -+ -+ *psFWInfoHeader = *psInfoHeader; -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate, -+ const IMG_BYTE *pbRGXFirmware, -+ void *pvFWCode, -+ void *pvFWData, -+ void *pvFWCorememCode, -+ void *pvFWCorememData, -+ PVRSRV_FW_BOOT_PARAMS *puFWParams) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_BOOL bMIPS = IMG_FALSE; -+ IMG_BOOL bRISCV = RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR); -+ IMG_BOOL bMETA; -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ bMIPS = (IMG_BOOL)RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS); -+#endif -+ bMETA = (IMG_BOOL)(!bMIPS && !bRISCV); -+ -+ if (bMETA) -+ { -+ IMG_UINT32 *pui32BootConf = NULL; -+ /* Skip bootloader configuration if a pointer to the FW code -+ * allocation is not available -+ */ -+ if (pvFWCode) -+ { -+ /* This variable points to the bootloader code which is mostly -+ * a sequence of pairs -+ */ -+ pui32BootConf = ((IMG_UINT32*) pvFWCode) + RGXFW_BOOTLDR_CONF_OFFSET; -+ -+ /* Slave port and JTAG accesses are privileged */ -+ *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD; -+ *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD_PRIV_EN; -+ -+ RGXFWConfigureSegMMU(hPrivate, -+ &puFWParams->sMeta.sFWCodeDevVAddr, -+ &puFWParams->sMeta.sFWDataDevVAddr, -+ &pui32BootConf); -+ } -+ -+ /* Process FW image data stream */ -+ eError = ProcessLDRCommandStream(hPrivate, -+ pbRGXFirmware, -+ pvFWCode, -+ pvFWData, -+ pvFWCorememCode, -+ pvFWCorememData, -+ &pui32BootConf); -+ if (eError != PVRSRV_OK) -+ { -+ RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); -+ return eError; -+ } -+ -+ /* Skip bootloader configuration if a pointer to the FW code -+ * allocation is not available -+ */ -+ if (pvFWCode) -+ { -+ IMG_UINT32 ui32NumThreads = puFWParams->sMeta.ui32NumThreads; -+ -+ if ((ui32NumThreads == 0) || (ui32NumThreads > 2)) -+ { -+ RGXErrorLog(hPrivate, -+ "ProcessFWImage: Wrong Meta threads configuration, using one thread only"); -+ -+ ui32NumThreads = 1; -+ } -+ -+ RGXFWConfigureMetaCaches(hPrivate, -+ ui32NumThreads, -+ &pui32BootConf); -+ -+ /* Signal the end of the conf sequence */ -+ *pui32BootConf++ = 0x0; -+ *pui32BootConf++ = 0x0; -+ -+ if (puFWParams->sMeta.uiFWCorememCodeSize && (puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr != 0)) -+ { -+ *pui32BootConf++ = puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr; -+ *pui32BootConf++ = puFWParams->sMeta.uiFWCorememCodeSize; -+ } -+ else -+ { -+ *pui32BootConf++ = 0; -+ *pui32BootConf++ = 0; -+ } -+ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_DMA)) -+ { -+ *pui32BootConf++ = (IMG_UINT32) (puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr >> 32); -+ *pui32BootConf++ = (IMG_UINT32) puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr; -+ } -+ else -+ { -+ *pui32BootConf++ = 0; -+ *pui32BootConf++ = 0; -+ } -+ } -+ } -+#if defined(RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES) -+ else if (bMIPS) -+ { -+ /* Process FW image data stream */ -+ eError = ProcessELFCommandStream(hPrivate, -+ pbRGXFirmware, -+ pvFWCode, -+ pvFWData, -+ NULL, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); -+ return eError; -+ } -+ -+ if (pvFWData) -+ { -+ RGXMIPSFW_BOOT_DATA *psBootData = (RGXMIPSFW_BOOT_DATA*) -+ /* To get a pointer to the bootloader configuration data start from a pointer to the FW image... */ -+ IMG_OFFSET_ADDR(pvFWData, -+ /* ... jump to the boot/NMI data page... */ -+ (RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA) -+ /* ... and then jump to the bootloader data offset within the page */ -+ + RGXMIPSFW_BOOTLDR_CONF_OFFSET)); -+ -+ /* Rogue Registers physical address */ -+ psBootData->ui64RegBase = puFWParams->sMips.sGPURegAddr.uiAddr; -+ -+ /* MIPS Page Table physical address */ -+ psBootData->ui32PTLog2PageSize = puFWParams->sMips.ui32FWPageTableLog2PageSize; -+ psBootData->ui32PTNumPages = puFWParams->sMips.ui32FWPageTableNumPages; -+ psBootData->aui64PTPhyAddr[0U] = puFWParams->sMips.asFWPageTableAddr[0U].uiAddr; -+ psBootData->aui64PTPhyAddr[1U] = puFWParams->sMips.asFWPageTableAddr[1U].uiAddr; -+ psBootData->aui64PTPhyAddr[2U] = puFWParams->sMips.asFWPageTableAddr[2U].uiAddr; -+ psBootData->aui64PTPhyAddr[3U] = puFWParams->sMips.asFWPageTableAddr[3U].uiAddr; -+ -+ /* MIPS Stack Pointer Physical Address */ -+ psBootData->ui64StackPhyAddr = puFWParams->sMips.sFWStackAddr.uiAddr; -+ -+ /* Reserved for future use */ -+ psBootData->ui32Reserved1 = 0; -+ psBootData->ui32Reserved2 = 0; -+ } -+ } -+#endif /* #if defined(RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES) */ -+ else -+ { -+ /* Process FW image data stream */ -+ eError = ProcessELFCommandStream(hPrivate, -+ pbRGXFirmware, -+ pvFWCode, -+ pvFWData, -+ pvFWCorememCode, -+ pvFWCorememData); -+ if (eError != PVRSRV_OK) -+ { -+ RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); -+ return eError; -+ } -+ -+ if (pvFWData) -+ { -+ RGXRISCVFW_BOOT_DATA *psBootData = (RGXRISCVFW_BOOT_DATA*) -+ IMG_OFFSET_ADDR(pvFWData, RGXRISCVFW_BOOTLDR_CONF_OFFSET); -+ -+ psBootData->ui64CorememCodeDevVAddr = puFWParams->sRISCV.sFWCorememCodeDevVAddr.uiAddr; -+ psBootData->ui32CorememCodeFWAddr = puFWParams->sRISCV.sFWCorememCodeFWAddr.ui32Addr; -+ psBootData->ui32CorememCodeSize = puFWParams->sRISCV.uiFWCorememCodeSize; -+ -+ psBootData->ui64CorememDataDevVAddr = puFWParams->sRISCV.sFWCorememDataDevVAddr.uiAddr; -+ psBootData->ui32CorememDataFWAddr = puFWParams->sRISCV.sFWCorememDataFWAddr.ui32Addr; -+ psBootData->ui32CorememDataSize = puFWParams->sRISCV.uiFWCorememDataSize; -+ } -+ } -+ -+ return eError; -+} -diff --git a/drivers/gpu/drm/img-rogue/rgxfwimageutils.h b/drivers/gpu/drm/img-rogue/rgxfwimageutils.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfwimageutils.h -@@ -0,0 +1,224 @@ -+/*************************************************************************/ /*! -+@File -+@Title Header for Services Firmware image utilities used at init time -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for Services Firmware image utilities used at init time -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXFWIMAGEUTILS_H -+#define RGXFWIMAGEUTILS_H -+ -+/* The routines declared here are built on top of an abstraction layer to -+ * hide DDK/OS-specific details in case they are used outside of the DDK -+ * (e.g. when DRM security is enabled). -+ * Any new dependency should be added to rgxlayer.h. -+ * Any new code should be built on top of the existing abstraction layer, -+ * which should be extended when necessary. -+ */ -+#include "rgxlayer.h" -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXGetFWImageSectionOffset -+ -+ @Input hPrivate : Implementation specific data -+ @Input eId : Section id -+ -+ @Description Return offset of a Firmware section, relative to the beginning -+ of the code or data allocation (depending on the section id) -+ -+******************************************************************************/ -+IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate, -+ RGX_FW_SECTION_ID eId); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXGetFWImageSectionMaxSize -+ -+ @Input hPrivate : Implementation specific data -+ @Input eId : Section id -+ -+ @Description Return maximum size (not allocation size) of a Firmware section -+ -+******************************************************************************/ -+IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate, -+ RGX_FW_SECTION_ID eId); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXGetFWImageSectionAllocSize -+ -+ @Input hPrivate : Implementation specific data -+ @Input eId : Section id -+ -+ @Description Return allocation size of a Firmware section -+ -+******************************************************************************/ -+IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate, -+ RGX_FW_SECTION_ID eId); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXGetFWImageSectionAddress -+ -+ @Input hPrivate : Implementation specific data -+ @Input eId : Section id -+ -+ @Description Return base address of a Firmware section -+ -+******************************************************************************/ -+IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, -+ RGX_FW_SECTION_ID eId); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXGetFWImageAllocSize -+ -+ @Description Return size of Firmware code/data/coremem code allocations -+ -+ @Input hPrivate : Implementation specific data -+ @Input pbRGXFirmware : Pointer to FW binary -+ @Input ui32RGXFirmwareSize : FW binary size -+ @Output puiFWCodeAllocSize : Code size -+ @Output puiFWDataAllocSize : Data size -+ @Output puiFWCorememCodeAllocSize : Coremem code size (0 if N/A) -+ @Output puiFWCorememDataAllocSize : Coremem data size (0 if N/A) -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, -+ const IMG_BYTE *pbRGXFirmware, -+ const IMG_UINT32 ui32RGXFirmwareSize, -+ IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize, -+ IMG_DEVMEM_SIZE_T *puiFWDataAllocSize, -+ IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize, -+ IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize, -+ RGX_FW_INFO_HEADER *psFWInfoHeader); -+ -+/*! -+******************************************************************************* -+ -+ @Function ProcessLDRCommandStream -+ -+ @Description Process the output of the Meta toolchain in the .LDR format -+ copying code and data sections into their final location and -+ passing some information to the Meta bootloader -+ -+ @Input hPrivate : Implementation specific data -+ @Input pbLDR : Pointer to FW blob -+ @Input pvHostFWCodeAddr : Pointer to FW code -+ @Input pvHostFWDataAddr : Pointer to FW data -+ @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code -+ @Input pvHostFWCorememDataAddr : Pointer to FW coremem data -+ @Input ppui32BootConf : Pointer to bootloader data -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, -+ const IMG_BYTE* pbLDR, -+ void* pvHostFWCodeAddr, -+ void* pvHostFWDataAddr, -+ void* pvHostFWCorememCodeAddr, -+ void* pvHostFWCorememDataAddr, -+ IMG_UINT32 **ppui32BootConf); -+ -+/*! -+******************************************************************************* -+ -+ @Function ProcessELFCommandStream -+ -+ @Description Process a file in .ELF format copying code and data sections -+ into their final location -+ -+ @Input hPrivate : Implementation specific data -+ @Input pbELF : Pointer to FW blob -+ @Input pvHostFWCodeAddr : Pointer to FW code -+ @Input pvHostFWDataAddr : Pointer to FW data -+ @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code -+ @Input pvHostFWCorememDataAddr : Pointer to FW coremem data -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate, -+ const IMG_BYTE *pbELF, -+ void *pvHostFWCodeAddr, -+ void *pvHostFWDataAddr, -+ void* pvHostFWCorememCodeAddr, -+ void* pvHostFWCorememDataAddr); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXProcessFWImage -+ -+ @Description Process the Firmware binary blob copying code and data -+ sections into their final location and passing some -+ information to the Firmware bootloader. -+ If a pointer to the final memory location for FW code or data -+ is not valid (NULL) then the relative section will not be -+ processed. -+ -+ @Input hPrivate : Implementation specific data -+ @Input pbRGXFirmware : Pointer to FW blob -+ @Input pvFWCode : Pointer to FW code -+ @Input pvFWData : Pointer to FW data -+ @Input pvFWCorememCode : Pointer to FW coremem code -+ @Input pvFWCorememData : Pointer to FW coremem data -+ @Input puFWParams : Parameters used by the FW at boot time -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate, -+ const IMG_BYTE *pbRGXFirmware, -+ void *pvFWCode, -+ void *pvFWData, -+ void *pvFWCorememCode, -+ void *pvFWCorememData, -+ PVRSRV_FW_BOOT_PARAMS *puFWParams); -+ -+#endif /* RGXFWIMAGEUTILS_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxfwmemctx.h b/drivers/gpu/drm/img-rogue/rgxfwmemctx.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfwmemctx.h -@@ -0,0 +1,163 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX firmware Memctx routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for operations on FWKM Shared memory context. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXFWMEMCTX_H -+#define RGXFWMEMCTX_H -+ -+#include "device.h" -+#include "rgx_memallocflags.h" -+#include "pvrsrv.h" -+#include "cache_ops.h" -+#include "cache_km.h" -+#include "pvr_debug.h" -+ -+#if defined(CONFIG_ARM64) && defined(__linux__) && defined(SUPPORT_CPUCACHED_FWMEMCTX) -+/* -+ * RGXFwSharedMemCPUCacheMode() -+ * We upgrade allocations on ARM64 Linux when CPU Cache snooping is enabled. -+ * This is because of the Linux direct mapping causing interference due to PIPT -+ * cache. All allocations are normally UCWC but snooping can return a bad value from the -+ * direct mapping as it is cached. Upgrade our allocations to cached to prevent bad cached -+ * values but in turn we require flushing. -+ */ -+static INLINE void RGXFwSharedMemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PVRSRV_MEMALLOCFLAGS_T *puiFlags) -+{ -+ if ((*puiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE)) == 0) -+ { -+ /* We don't need to upgrade if we don't map into the CPU */ -+ return; -+ } -+ -+ if (PVRSRV_CHECK_UNCACHED(*puiFlags)) -+ { -+ /* We don't need to upgrade uncached allocations */ -+ return; -+ } -+ -+ /* Clear the existing CPU cache flags */ -+ *puiFlags &= ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK); -+ -+ if (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig)) -+ { -+ *puiFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHED; -+ } -+ else -+ { -+ *puiFlags |= PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; -+ } -+} -+ -+#define RGXFwSharedMemCheckSnoopMode(psDeviceConfig) PVR_ASSERT(PVRSRVSystemSnoopingOfCPUCache(psDeviceConfig)) -+ -+/* -+ * FWSharedMemCacheOpExec() -+ * This is the CPU data-cache maintenance interface for FW shared allocations. -+ * We have to be very careful that the VAs supplied to this function are -+ * sensible as to not cause a kernel oops. Given that this should only be -+ * used for allocations used for the FW this should be guaranteed. -+ */ -+static INLINE PVRSRV_ERROR RGXFwSharedMemCacheOpExec(const volatile void *pvVirtStart, -+ IMG_UINT64 uiSize, -+ PVRSRV_CACHE_OP uiCacheOp) -+{ -+ IMG_UINT64 uiEndAddr = (IMG_UINT64) pvVirtStart + uiSize; -+ IMG_CPU_PHYADDR uiUnusedPhysAddr = {.uiAddr = 0}; -+ -+ if (!pvVirtStart || uiSize == 0) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return CacheOpExec(NULL, -+ (void*) pvVirtStart, -+ (void*) uiEndAddr, -+ uiUnusedPhysAddr, -+ uiUnusedPhysAddr, -+ uiCacheOp); -+ return PVRSRV_OK; -+} -+ -+#define RGXFwSharedMemCacheOpValue(value, cacheop) (RGXFwSharedMemCacheOpExec(&value, sizeof(value), PVRSRV_CACHE_OP_##cacheop)) -+#define RGXFwSharedMemCacheOpPtr(ptr, cacheop) (RGXFwSharedMemCacheOpExec(ptr, sizeof(*ptr), PVRSRV_CACHE_OP_##cacheop)) -+#define RGXFwSharedMemCacheOpExecPfn RGXFwSharedMemCacheOpExec -+ -+static INLINE void RGXFwSharedMemFlushCCB(void *pvCCBVirtAddr, -+ IMG_UINT64 uiStart, -+ IMG_UINT64 uiFinish, -+ IMG_UINT64 uiLimit) -+{ -+ if (uiFinish >= uiStart) -+ { -+ /* Flush the CCB data */ -+ RGXFwSharedMemCacheOpExec(IMG_OFFSET_ADDR(pvCCBVirtAddr, uiStart), -+ uiFinish - uiStart, -+ PVRSRV_CACHE_OP_FLUSH); -+ } -+ else -+ { -+ /* CCCB wrapped around - flush the pre and post wrap boundary separately */ -+ RGXFwSharedMemCacheOpExec(IMG_OFFSET_ADDR(pvCCBVirtAddr, uiStart), -+ uiLimit - uiStart, -+ PVRSRV_CACHE_OP_FLUSH); -+ -+ RGXFwSharedMemCacheOpExec(IMG_OFFSET_ADDR(pvCCBVirtAddr, 0), -+ uiFinish, -+ PVRSRV_CACHE_OP_FLUSH); -+ } -+} -+#else -+#define RGXFwSharedMemCPUCacheMode(...) -+#define RGXFwSharedMemCheckSnoopMode(...) -+/* NULL value required for function callbacks */ -+#define RGXFwSharedMemCacheOpExec(...) ((void)NULL) -+#define RGXFwSharedMemCacheOpValue(...) ((void)NULL) -+#define RGXFwSharedMemCacheOpPtr(...) ((void)NULL) -+#define RGXFwSharedMemCacheOpExecPfn NULL -+#define RGXFwSharedMemFlushCCB(...) -+#endif -+ -+ -+#endif /* RGXFWMEMCTX_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxfwriscv.c b/drivers/gpu/drm/img-rogue/rgxfwriscv.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfwriscv.c -@@ -0,0 +1,1076 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX firmware RISC-V utility routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX firmware RISC-V utility routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "rgxfwutils.h" -+#include "rgxfwriscv.h" -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+#define RGX_GET_DMI_REG(psDevInfo, value) \ -+ ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) ? \ -+ RGX_CR_FWCORE_DMI_##value##__RISCV_AND_HOST_SECURITY_GEQ4 : RGX_CR_FWCORE_DMI_##value) -+#define RGX_GET_RISCV_REGS_BASE(psDevInfo) ((psDevInfo)->pvSecureRegsBaseKM) -+#else -+#define RGX_GET_DMI_REG(psDevInfo, value) RGX_CR_FWCORE_DMI_##value -+#define RGX_GET_RISCV_REGS_BASE(psDevInfo) ((psDevInfo)->pvRegsBaseKM) -+#endif -+ -+/* -+ * RGXRiscvHalt -+ */ -+PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ __maybe_unused IMG_UINT32 ui32_DMI_DMCONTROL_Reg = RGX_GET_DMI_REG(psDevInfo, DMCONTROL); -+ __maybe_unused IMG_UINT32 ui32_DMI_DMSTATUS_Reg = RGX_GET_DMI_REG(psDevInfo, DMSTATUS); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, -+ PDUMP_FLAGS_CONTINUOUS, "Halt RISC-V FW"); -+ -+ /* Send halt request (no need to select one or more harts on this RISC-V core) */ -+ PDUMPREG32(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, ui32_DMI_DMCONTROL_Reg, -+ RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN | -+ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Wait until hart is halted */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_DMSTATUS_Reg, -+ RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, -+ RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ -+ /* Clear halt request */ -+ PDUMPREG32(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, ui32_DMI_DMCONTROL_Reg, -+ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, -+ PDUMP_FLAGS_CONTINUOUS); -+#else -+ IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); -+ -+ /* Send halt request (no need to select one or more harts on this RISC-V core) */ -+ OSWriteHWReg32(pui32RegsBase, ui32_DMI_DMCONTROL_Reg, -+ RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN | -+ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); -+ -+ /* Wait until hart is halted */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ pui32RegsBase + ui32_DMI_DMSTATUS_Reg/sizeof(IMG_UINT32), -+ RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, -+ RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Hart not halted (0x%x)", -+ __func__, OSReadHWReg32(pui32RegsBase, ui32_DMI_DMSTATUS_Reg))); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ /* Clear halt request */ -+ OSWriteHWReg32(pui32RegsBase, ui32_DMI_DMCONTROL_Reg, -+ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * RGXRiscvIsHalted -+ */ -+IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+#if defined(NO_HARDWARE) -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ /* Assume the core is always halted in nohw */ -+ return IMG_TRUE; -+#else -+ IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); -+ IMG_UINT32 ui32_DMI_DMSTATUS_Reg = RGX_GET_DMI_REG(psDevInfo, DMSTATUS); -+ -+ return (OSReadHWReg32(pui32RegsBase, ui32_DMI_DMSTATUS_Reg) & -+ RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN) != 0U; -+#endif -+} -+ -+/* -+ * RGXRiscvResume -+ */ -+PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ __maybe_unused IMG_UINT32 ui32_DMI_DMCONTROL_Reg = RGX_GET_DMI_REG(psDevInfo, DMCONTROL); -+ __maybe_unused IMG_UINT32 ui32_DMI_DMSTATUS_Reg = RGX_GET_DMI_REG(psDevInfo, DMSTATUS); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, -+ PDUMP_FLAGS_CONTINUOUS, "Resume RISC-V FW"); -+ -+ /* Send resume request (no need to select one or more harts on this RISC-V core) */ -+ PDUMPREG32(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, ui32_DMI_DMCONTROL_Reg, -+ RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN | -+ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Wait until hart is resumed */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_DMSTATUS_Reg, -+ RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, -+ RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ -+ /* Clear resume request */ -+ PDUMPREG32(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, ui32_DMI_DMCONTROL_Reg, -+ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, -+ PDUMP_FLAGS_CONTINUOUS); -+#else -+ IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); -+ -+ /* Send resume request (no need to select one or more harts on this RISC-V core) */ -+ OSWriteHWReg32(pui32RegsBase, ui32_DMI_DMCONTROL_Reg, -+ RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN | -+ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); -+ -+ /* Wait until hart is resumed */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ pui32RegsBase + ui32_DMI_DMSTATUS_Reg/sizeof(IMG_UINT32), -+ RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, -+ RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Hart not resumed (0x%x)", -+ __func__, OSReadHWReg32(pui32RegsBase, ui32_DMI_DMSTATUS_Reg))); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ /* Clear resume request */ -+ OSWriteHWReg32(pui32RegsBase, ui32_DMI_DMCONTROL_Reg, -+ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvCheckAbstractCmdError -+ -+@Description Check for RISC-V abstract command errors and clear them -+ -+@Input psDevInfo Pointer to GPU device info -+ -+@Return RGXRISCVFW_ABSTRACT_CMD_ERR -+******************************************************************************/ -+static RGXRISCVFW_ABSTRACT_CMD_ERR RGXRiscvCheckAbstractCmdError(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGXRISCVFW_ABSTRACT_CMD_ERR eCmdErr; -+ -+ __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ eCmdErr = RISCV_ABSTRACT_CMD_NO_ERROR; -+ -+ /* Check error status */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_ABSTRACTCS_Reg, -+ RISCV_ABSTRACT_CMD_NO_ERROR << RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT, -+ ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+#else -+ void __iomem *pvRegsBaseKM = RGX_GET_RISCV_REGS_BASE(psDevInfo); -+ -+ /* Check error status */ -+ eCmdErr = (OSReadHWReg32(pvRegsBaseKM, ui32_DMI_ABSTRACTCS_Reg) -+ & ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK) -+ >> RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT; -+ -+ if (eCmdErr != RISCV_ABSTRACT_CMD_NO_ERROR) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "RISC-V FW abstract command error %u", eCmdErr)); -+ -+ /* Clear the error (note CMDERR field is write-1-to-clear) */ -+ OSWriteHWReg32(pvRegsBaseKM, ui32_DMI_ABSTRACTCS_Reg, -+ ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK); -+ } -+#endif -+ -+ return eCmdErr; -+} -+ -+/* -+ * RGXRiscReadReg -+ */ -+PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 *pui32Value) -+{ -+ __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ PVR_UNREFERENCED_PARAMETER(ui32RegAddr); -+ PVR_UNREFERENCED_PARAMETER(pui32Value); -+ -+ /* Reading HW registers is not supported in nohw/pdump */ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+#else -+ IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); -+ -+ /* Send abstract register read command */ -+ OSWriteHWReg32(pui32RegsBase, -+ ui32_DMI_COMMAND_Reg, -+ (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | -+ RGXRISCVFW_DMI_COMMAND_READ | -+ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | -+ ui32RegAddr); -+ -+ /* Wait until abstract command is completed */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ pui32RegsBase + ui32_DMI_ABSTRACTCS_Reg/sizeof(IMG_UINT32), -+ 0U, -+ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", -+ __func__, OSReadHWReg32(pui32RegsBase, ui32_DMI_ABSTRACTCS_Reg))); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR) -+ { -+ /* Read register value */ -+ *pui32Value = OSReadHWReg32(pui32RegsBase, ui32_DMI_DATA0_Reg); -+ } -+ else -+ { -+ *pui32Value = 0U; -+ } -+ -+ return PVRSRV_OK; -+#endif -+} -+ -+/* -+ * RGXRiscvPollReg -+ */ -+PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32Value) -+{ -+ __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "Poll RISC-V register 0x%x (expected 0x%08x)", -+ ui32RegAddr, ui32Value); -+ -+ /* Send abstract register read command */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_COMMAND_Reg, -+ (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | -+ RGXRISCVFW_DMI_COMMAND_READ | -+ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | -+ ui32RegAddr, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Wait until abstract command is completed */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_ABSTRACTCS_Reg, -+ 0U, -+ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ -+ RGXRiscvCheckAbstractCmdError(psDevInfo); -+ -+ /* Check read value */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_DATA0_Reg, -+ ui32Value, -+ 0xFFFFFFFF, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ -+ return PVRSRV_OK; -+#else -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ PVR_UNREFERENCED_PARAMETER(ui32RegAddr); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ -+ /* Polling HW registers is currently not required driverlive */ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+#endif -+} -+ -+/* -+ * RGXRiscvWriteReg -+ */ -+PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32Value) -+{ -+ __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "Write RISC-V register 0x%x (value 0x%08x)", -+ ui32RegAddr, ui32Value); -+ -+ /* Prepare data to be written to register */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_DATA0_Reg, -+ ui32Value, PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Send abstract register write command */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_COMMAND_Reg, -+ (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | -+ RGXRISCVFW_DMI_COMMAND_WRITE | -+ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | -+ ui32RegAddr, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Wait until abstract command is completed */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_ABSTRACTCS_Reg, -+ 0U, -+ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+#else -+ IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); -+ -+ /* Prepare data to be written to register */ -+ OSWriteHWReg32(pui32RegsBase, ui32_DMI_DATA0_Reg, ui32Value); -+ -+ /* Send abstract register write command */ -+ OSWriteHWReg32(pui32RegsBase, -+ ui32_DMI_COMMAND_Reg, -+ (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | -+ RGXRISCVFW_DMI_COMMAND_WRITE | -+ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | -+ ui32RegAddr); -+ -+ /* Wait until abstract command is completed */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ pui32RegsBase + ui32_DMI_ABSTRACTCS_Reg/sizeof(IMG_UINT32), -+ 0U, -+ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", -+ __func__, OSReadHWReg32(pui32RegsBase, ui32_DMI_ABSTRACTCS_Reg))); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvCheckSysBusError -+ -+@Description Check for RISC-V system bus errors and clear them -+ -+@Input psDevInfo Pointer to GPU device info -+ -+@Return RGXRISCVFW_SYSBUS_ERR -+******************************************************************************/ -+static __maybe_unused RGXRISCVFW_SYSBUS_ERR RGXRiscvCheckSysBusError(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGXRISCVFW_SYSBUS_ERR eSBError; -+ -+ __maybe_unused IMG_UINT32 ui32_DMI_SBCS_Reg = RGX_GET_DMI_REG(psDevInfo, SBCS); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ eSBError = RISCV_SYSBUS_NO_ERROR; -+ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_SBCS_Reg, -+ RISCV_SYSBUS_NO_ERROR << RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT, -+ ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+#else -+ void __iomem *pvRegsBaseKM = RGX_GET_RISCV_REGS_BASE(psDevInfo); -+ -+ eSBError = (OSReadHWReg32(pvRegsBaseKM, ui32_DMI_SBCS_Reg) -+ & ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK) -+ >> RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT; -+ -+ if (eSBError != RISCV_SYSBUS_NO_ERROR) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "RISC-V FW system bus error %u", eSBError)); -+ -+ /* Clear the error (note SBERROR field is write-1-to-clear) */ -+ OSWriteHWReg32(pvRegsBaseKM, ui32_DMI_SBCS_Reg, -+ ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK); -+ } -+#endif -+ -+ return eSBError; -+} -+ -+#if !defined(EMULATOR) -+/*! -+******************************************************************************* -+@Function RGXRiscvReadAbstractMem -+ -+@Description Read a value at the given address in RISC-V memory space -+ using RISC-V abstract memory commands -+ -+@Input psDevInfo Pointer to device info -+@Input ui32Addr Address in RISC-V memory space -+ -+@Output pui32Value Read value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR -+RGXRiscvReadAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value) -+{ -+ __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA1_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); -+ __maybe_unused IMG_UINT32 ui32_DMI_SBCS_Reg = RGX_GET_DMI_REG(psDevInfo, SBCS); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ PVR_UNREFERENCED_PARAMETER(ui32Addr); -+ PVR_UNREFERENCED_PARAMETER(pui32Value); -+ -+ /* Reading memory is not supported in nohw/pdump */ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+#else -+ IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); -+ -+ /* Prepare read address */ -+ OSWriteHWReg32(pui32RegsBase, ui32_DMI_DATA1_Reg, ui32Addr); -+ -+ /* Send abstract memory read command */ -+ OSWriteHWReg32(pui32RegsBase, -+ ui32_DMI_COMMAND_Reg, -+ (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | -+ RGXRISCVFW_DMI_COMMAND_READ | -+ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT); -+ -+ /* Wait until abstract command is completed */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ pui32RegsBase + ui32_DMI_ABSTRACTCS_Reg/sizeof(IMG_UINT32), -+ 0U, -+ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", -+ __func__, OSReadHWReg32(pui32RegsBase, ui32_DMI_ABSTRACTCS_Reg))); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR) -+ { -+ /* Read memory value */ -+ *pui32Value = OSReadHWReg32(pui32RegsBase, ui32_DMI_DATA0_Reg); -+ } -+ else -+ { -+ *pui32Value = 0U; -+ } -+ -+ return PVRSRV_OK; -+#endif -+} -+#endif /* !defined(EMULATOR) */ -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvPollAbstractMem -+ -+@Description Poll for a value at the given address in RISC-V memory space -+ using RISC-V abstract memory commands -+ -+@Input psDevInfo Pointer to device info -+@Input ui32Addr Address in RISC-V memory space -+@Input ui32Value Expected value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR -+RGXRiscvPollAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) -+{ -+ __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA1_Reg = RGX_GET_DMI_REG(psDevInfo, DATA1); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, -+ PDUMP_FLAGS_CONTINUOUS, -+ "Poll RISC-V address 0x%x (expected 0x%08x)", -+ ui32Addr, ui32Value); -+ -+ /* Prepare read address */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_DATA1_Reg, -+ ui32Addr, PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Send abstract memory read command */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_COMMAND_Reg, -+ (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | -+ RGXRISCVFW_DMI_COMMAND_READ | -+ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Wait until abstract command is completed */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_ABSTRACTCS_Reg, -+ 0U, -+ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ -+ RGXRiscvCheckAbstractCmdError(psDevInfo); -+ -+ /* Check read value */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_DATA0_Reg, -+ ui32Value, -+ 0xFFFFFFFF, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ -+ return PVRSRV_OK; -+#else -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ PVR_UNREFERENCED_PARAMETER(ui32Addr); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ -+ /* Polling memory is currently not required driverlive */ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+#endif -+} -+ -+#if !defined(EMULATOR) -+/*! -+******************************************************************************* -+@Function RGXRiscvReadSysBusMem -+ -+@Description Read a value at the given address in RISC-V memory space -+ using the RISC-V system bus -+ -+@Input psDevInfo Pointer to device info -+@Input ui32Addr Address in RISC-V memory space -+ -+@Output pui32Value Read value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR -+RGXRiscvReadSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value) -+{ -+ __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA1_Reg = RGX_GET_DMI_REG(psDevInfo, DATA1); -+ __maybe_unused IMG_UINT32 ui32_DMI_SBCS_Reg = RGX_GET_DMI_REG(psDevInfo, SBCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_SBADDRESS0_Reg = RGX_GET_DMI_REG(psDevInfo, SBADDRESS0); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ PVR_UNREFERENCED_PARAMETER(ui32Addr); -+ PVR_UNREFERENCED_PARAMETER(pui32Value); -+ -+ /* Reading memory is not supported in nohw/pdump */ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+#else -+ IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); -+ -+ /* Configure system bus to read 32 bit every time a new address is provided */ -+ OSWriteHWReg32(pui32RegsBase, -+ ui32_DMI_SBCS_Reg, -+ (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) | -+ RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN); -+ -+ /* Perform read */ -+ OSWriteHWReg32(pui32RegsBase, ui32_DMI_SBADDRESS0_Reg, ui32Addr); -+ -+ /* Wait until system bus is idle */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ pui32RegsBase + ui32_DMI_SBCS_Reg/sizeof(IMG_UINT32), -+ 0U, -+ RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)", -+ __func__, OSReadHWReg32(pui32RegsBase, ui32_DMI_SBCS_Reg))); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ if (RGXRiscvCheckSysBusError(psDevInfo) == RISCV_SYSBUS_NO_ERROR) -+ { -+ /* Read value from debug system bus */ -+ *pui32Value = OSReadHWReg32(pui32RegsBase, ui32_DMI_DATA0_Reg); -+ } -+ else -+ { -+ *pui32Value = 0U; -+ } -+ -+ return PVRSRV_OK; -+#endif -+} -+#endif /* !defined(EMULATOR) */ -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvPollSysBusMem -+ -+@Description Poll for a value at the given address in RISC-V memory space -+ using the RISC-V system bus -+ -+@Input psDevInfo Pointer to device info -+@Input ui32Addr Address in RISC-V memory space -+@Input ui32Value Expected value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR -+RGXRiscvPollSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) -+{ -+ __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA1_Reg = RGX_GET_DMI_REG(psDevInfo, DATA1); -+ __maybe_unused IMG_UINT32 ui32_DMI_SBCS_Reg = RGX_GET_DMI_REG(psDevInfo, SBCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_SBADDRESS0_Reg = RGX_GET_DMI_REG(psDevInfo, SBADDRESS0); -+ __maybe_unused IMG_UINT32 ui32_DMI_SBDATA0_Reg = RGX_GET_DMI_REG(psDevInfo, SBDATA0); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "Poll RISC-V address 0x%x (expected 0x%08x)", -+ ui32Addr, ui32Value); -+ -+ /* Configure system bus to read 32 bit every time a new address is provided */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_SBCS_Reg, -+ (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) | -+ RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Perform read */ -+ PDUMPREG32(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, ui32_DMI_SBADDRESS0_Reg, -+ ui32Addr, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Wait until system bus is idle */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_SBCS_Reg, -+ 0U, -+ RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ -+ RGXRiscvCheckSysBusError(psDevInfo); -+ -+ /* Check read value */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_SBDATA0_Reg, -+ ui32Value, -+ 0xFFFFFFFF, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ -+ return PVRSRV_OK; -+#else -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ PVR_UNREFERENCED_PARAMETER(ui32Addr); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ -+ /* Polling memory is currently not required driverlive */ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+#endif -+} -+ -+#if !defined(EMULATOR) -+/* -+ * RGXRiscvReadMem -+ */ -+PVRSRV_ERROR RGXRiscvReadMem(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32Addr, -+ IMG_UINT32 *pui32Value) -+{ -+ if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) -+ { -+ return RGXRiscvReadAbstractMem(psDevInfo, ui32Addr, pui32Value); -+ } -+ -+ return RGXRiscvReadSysBusMem(psDevInfo, ui32Addr, pui32Value); -+} -+#endif /* !defined(EMULATOR) */ -+ -+/* -+ * RGXRiscvPollMem -+ */ -+PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32Addr, -+ IMG_UINT32 ui32Value) -+{ -+ if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) -+ { -+ return RGXRiscvPollAbstractMem(psDevInfo, ui32Addr, ui32Value); -+ } -+ -+ return RGXRiscvPollSysBusMem(psDevInfo, ui32Addr, ui32Value); -+} -+ -+#if !defined(EMULATOR) -+/*! -+******************************************************************************* -+@Function RGXRiscvWriteAbstractMem -+ -+@Description Write a value at the given address in RISC-V memory space -+ using RISC-V abstract memory commands -+ -+@Input psDevInfo Pointer to device info -+@Input ui32Addr Address in RISC-V memory space -+@Input ui32Value Write value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR -+RGXRiscvWriteAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) -+{ -+ __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA1_Reg = RGX_GET_DMI_REG(psDevInfo, DATA1); -+ __maybe_unused IMG_UINT32 ui32_DMI_SBCS_Reg = RGX_GET_DMI_REG(psDevInfo, SBCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_SBADDRESS0_Reg = RGX_GET_DMI_REG(psDevInfo, SBADDRESS0); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "Write RISC-V address 0x%x (value 0x%08x)", -+ ui32Addr, ui32Value); -+ -+ /* Prepare write address */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_DATA1_Reg, -+ ui32Addr, PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Prepare write data */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_DATA0_Reg, -+ ui32Value, PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Send abstract register write command */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_COMMAND_Reg, -+ (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | -+ RGXRISCVFW_DMI_COMMAND_WRITE | -+ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Wait until abstract command is completed */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_ABSTRACTCS_Reg, -+ 0U, -+ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+#else -+ IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); -+ -+ /* Prepare write address */ -+ OSWriteHWReg32(pui32RegsBase, ui32_DMI_DATA1_Reg, ui32Addr); -+ -+ /* Prepare write data */ -+ OSWriteHWReg32(pui32RegsBase, ui32_DMI_DATA0_Reg, ui32Value); -+ -+ /* Send abstract memory write command */ -+ OSWriteHWReg32(pui32RegsBase, -+ ui32_DMI_COMMAND_Reg, -+ (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | -+ RGXRISCVFW_DMI_COMMAND_WRITE | -+ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT); -+ -+ /* Wait until abstract command is completed */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ pui32RegsBase + ui32_DMI_ABSTRACTCS_Reg/sizeof(IMG_UINT32), -+ 0U, -+ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", -+ __func__, OSReadHWReg32(pui32RegsBase, ui32_DMI_ABSTRACTCS_Reg))); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvWriteSysBusMem -+ -+@Description Write a value at the given address in RISC-V memory space -+ using the RISC-V system bus -+ -+@Input psDevInfo Pointer to device info -+@Input ui32Addr Address in RISC-V memory space -+@Input ui32Value Write value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR -+RGXRiscvWriteSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) -+{ -+ __maybe_unused IMG_UINT32 ui32_DMI_ABSTRACTCS_Reg = RGX_GET_DMI_REG(psDevInfo, ABSTRACTCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_COMMAND_Reg = RGX_GET_DMI_REG(psDevInfo, COMMAND); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA0_Reg = RGX_GET_DMI_REG(psDevInfo, DATA0); -+ __maybe_unused IMG_UINT32 ui32_DMI_SBDATA0_Reg = RGX_GET_DMI_REG(psDevInfo, SBDATA0); -+ __maybe_unused IMG_UINT32 ui32_DMI_DATA1_Reg = RGX_GET_DMI_REG(psDevInfo, DATA1); -+ __maybe_unused IMG_UINT32 ui32_DMI_SBCS_Reg = RGX_GET_DMI_REG(psDevInfo, SBCS); -+ __maybe_unused IMG_UINT32 ui32_DMI_SBADDRESS0_Reg = RGX_GET_DMI_REG(psDevInfo, SBADDRESS0); -+ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "Write RISC-V address 0x%x (value 0x%08x)", -+ ui32Addr, ui32Value); -+ -+ /* Configure system bus to read 32 bit every time a new address is provided */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_SBCS_Reg, -+ RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Prepare write address */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_SBADDRESS0_Reg, -+ ui32Addr, PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Prepare write data and initiate write */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ui32_DMI_SBDATA0_Reg, -+ ui32Value, PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Wait until system bus is idle */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32_DMI_SBCS_Reg, -+ 0U, -+ RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, -+ PDUMP_FLAGS_CONTINUOUS, -+ PDUMP_POLL_OPERATOR_EQUAL); -+#else -+ IMG_UINT32 __iomem *pui32RegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); -+ -+ /* Configure system bus for 32 bit accesses */ -+ OSWriteHWReg32(pui32RegsBase, -+ ui32_DMI_SBCS_Reg, -+ RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT); -+ -+ /* Prepare write address */ -+ OSWriteHWReg32(pui32RegsBase, ui32_DMI_SBADDRESS0_Reg, ui32Addr); -+ -+ /* Prepare write data and initiate write */ -+ OSWriteHWReg32(pui32RegsBase, ui32_DMI_SBDATA0_Reg, ui32Value); -+ -+ /* Wait until system bus is idle */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ pui32RegsBase + ui32_DMI_SBCS_Reg/sizeof(IMG_UINT32), -+ 0U, -+ RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)", -+ __func__, OSReadHWReg32(pui32RegsBase, ui32_DMI_SBCS_Reg))); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * RGXRiscvWriteMem -+ */ -+PVRSRV_ERROR RGXRiscvWriteMem(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32Addr, -+ IMG_UINT32 ui32Value) -+{ -+ if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) -+ { -+ return RGXRiscvWriteAbstractMem(psDevInfo, ui32Addr, ui32Value); -+ } -+ -+ return RGXRiscvWriteSysBusMem(psDevInfo, ui32Addr, ui32Value); -+} -+#endif /* !defined(EMULATOR) */ -+ -+/* -+ * RGXRiscvDmiOp -+ */ -+PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT64 *pui64DMI) -+{ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+ PVR_UNREFERENCED_PARAMETER(pui64DMI); -+ -+ /* Accessing DM registers is not supported in nohw/pdump */ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+#else -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+#define DMI_BASE ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) ? RGX_CR_FWCORE_DMI_RESERVED00__RISCV_AND_HOST_SECURITY_GEQ4 : RGX_CR_FWCORE_DMI_RESERVED00) -+#else -+#define DMI_BASE RGX_CR_FWCORE_DMI_RESERVED00 -+#endif -+#define DMI_STRIDE (RGX_CR_FWCORE_DMI_RESERVED01 - RGX_CR_FWCORE_DMI_RESERVED00) -+#define DMI_REG(r) ((DMI_BASE) + (DMI_STRIDE) * (r)) -+ -+#define DMI_OP_SHIFT 0U -+#define DMI_OP_MASK 0x3ULL -+#define DMI_DATA_SHIFT 2U -+#define DMI_DATA_MASK 0x3FFFFFFFCULL -+#define DMI_ADDRESS_SHIFT 34U -+#define DMI_ADDRESS_MASK 0xFC00000000ULL -+ -+#define DMI_OP_NOP 0U -+#define DMI_OP_READ 1U -+#define DMI_OP_WRITE 2U -+#define DMI_OP_RESERVED 3U -+ -+#define DMI_OP_STATUS_SUCCESS 0U -+#define DMI_OP_STATUS_RESERVED 1U -+#define DMI_OP_STATUS_FAILED 2U -+#define DMI_OP_STATUS_BUSY 3U -+ -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; -+ PVRSRV_DEV_POWER_STATE ePowerState; -+ PVRSRV_ERROR eError; -+ IMG_UINT64 ui64Op, ui64Address, ui64Data; -+ -+ ui64Op = (*pui64DMI & DMI_OP_MASK) >> DMI_OP_SHIFT; -+ ui64Address = (*pui64DMI & DMI_ADDRESS_MASK) >> DMI_ADDRESS_SHIFT; -+ ui64Data = (*pui64DMI & DMI_DATA_MASK) >> DMI_DATA_SHIFT; -+ -+ eError = PVRSRVPowerLock(psDeviceNode); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire powerlock (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ ui64Op = DMI_OP_STATUS_FAILED; -+ goto dmiop_update; -+ } -+ -+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to retrieve RGX power state (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ ui64Op = DMI_OP_STATUS_FAILED; -+ goto dmiop_release_lock; -+ } -+ -+ if (ePowerState == PVRSRV_DEV_POWER_STATE_ON) -+ { -+ void __iomem *pvRegsBase = RGX_GET_RISCV_REGS_BASE(psDevInfo); -+ switch (ui64Op) -+ { -+ case DMI_OP_NOP: -+ ui64Op = DMI_OP_STATUS_SUCCESS; -+ break; -+ case DMI_OP_WRITE: -+ OSWriteHWReg32(pvRegsBase, -+ DMI_REG(ui64Address), -+ (IMG_UINT32)ui64Data); -+ ui64Op = DMI_OP_STATUS_SUCCESS; -+ break; -+ case DMI_OP_READ: -+ ui64Data = (IMG_UINT64)OSReadHWReg32(pvRegsBase, -+ DMI_REG(ui64Address)); -+ ui64Op = DMI_OP_STATUS_SUCCESS; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: unknown op %u", __func__, (IMG_UINT32)ui64Op)); -+ ui64Op = DMI_OP_STATUS_FAILED; -+ break; -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Accessing RISC-V Debug Module is not " -+ "possible while the GPU is powered off", __func__)); -+ -+ ui64Op = DMI_OP_STATUS_FAILED; -+ } -+ -+dmiop_release_lock: -+ PVRSRVPowerUnlock(psDeviceNode); -+ -+dmiop_update: -+ *pui64DMI = (ui64Op << DMI_OP_SHIFT) | -+ (ui64Address << DMI_ADDRESS_SHIFT) | -+ (ui64Data << DMI_DATA_SHIFT); -+ -+ return eError; -+#endif -+} -+ -+/****************************************************************************** -+ End of file (rgxfwriscv.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxfwriscv.h b/drivers/gpu/drm/img-rogue/rgxfwriscv.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfwriscv.h -@@ -0,0 +1,212 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX firmware RISC-V utility routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX firmware RISC-V utility routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXFWRISCV_H -+#define RGXFWRISCV_H -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+#define RGXRISCVFW_GET_REMAP_SECURE(r) (RGX_CR_FWCORE_ADDR_REMAP_CONFIG0__HOST_SECURITY_GEQ4 + ((r) * 8U)) -+#define RGXRISCVFW_BOOTLDR_CODE_REMAP_SECURE (RGXRISCVFW_GET_REMAP_SECURE(RGXRISCVFW_BOOTLDR_CODE_REGION)) -+#define RGXRISCVFW_BOOTLDR_DATA_REMAP_SECURE (RGXRISCVFW_GET_REMAP_SECURE(RGXRISCVFW_BOOTLDR_DATA_REGION)) -+#endif -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvHalt -+ -+@Description Halt the RISC-V FW core (required for certain operations -+ done through Debug Module) -+ -+@Input psDevInfo Pointer to device info -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvIsHalted -+ -+@Description Check if the RISC-V FW is halted -+ -+@Input psDevInfo Pointer to device info -+ -+@Return IMG_BOOL -+******************************************************************************/ -+IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvResume -+ -+@Description Resume the RISC-V FW core -+ -+@Input psDevInfo Pointer to device info -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvReadReg -+ -+@Description Read a value from the given RISC-V register (GPR or CSR) -+ -+@Input psDevInfo Pointer to device info -+@Input ui32RegAddr RISC-V register address -+ -+@Output pui32Value Read value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 *pui32Value); -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvPollReg -+ -+@Description Poll for a value from the given RISC-V register (GPR or CSR) -+ -+@Input psDevInfo Pointer to device info -+@Input ui32RegAddr RISC-V register address -+@Input ui32Value Expected value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32Value); -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvWriteReg -+ -+@Description Write a value to the given RISC-V register (GPR or CSR) -+ -+@Input psDevInfo Pointer to device info -+@Input ui32RegAddr RISC-V register address -+@Input ui32Value Write value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32Value); -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvPollMem -+ -+@Description Poll for a value at the given address in RISC-V memory space -+ -+@Input psDevInfo Pointer to device info -+@Input ui32Addr Address in RISC-V memory space -+@Input ui32Value Expected value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32Addr, -+ IMG_UINT32 ui32Value); -+ -+#if !defined(EMULATOR) -+/*! -+******************************************************************************* -+@Function RGXRiscvReadMem -+ -+@Description Read a value at the given address in RISC-V memory space -+ -+@Input psDevInfo Pointer to device info -+@Input ui32Addr Address in RISC-V memory space -+ -+@Output pui32Value Read value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXRiscvReadMem(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32Addr, -+ IMG_UINT32 *pui32Value); -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvWriteMem -+ -+@Description Write a value to the given address in RISC-V memory space -+ -+@Input psDevInfo Pointer to device info -+@Input ui32Addr Address in RISC-V memory space -+@Input ui32Value Write value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXRiscvWriteMem(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32Addr, -+ IMG_UINT32 ui32Value); -+#endif /* !defined(EMULATOR) */ -+ -+/*! -+******************************************************************************* -+@Function RGXRiscvDmiOp -+ -+@Description Acquire the powerlock and perform an operation on the RISC-V -+ Debug Module Interface, but only if the GPU is powered on. -+ -+@Input psDevInfo Pointer to device info -+@InOut pui64DMI Encoding of a request for the RISC-V Debug -+ Module with same format as the 'dmi' register -+ from the RISC-V debug specification (v0.13+). -+ On return, this is updated with the result of -+ the request, encoded the same way. -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT64 *pui64DMI); -+ -+#endif /* RGXFWRISCV_H */ -+/****************************************************************************** -+ End of file (rgxfwriscv.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxfwtrace_strings.c b/drivers/gpu/drm/img-rogue/rgxfwtrace_strings.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfwtrace_strings.c -@@ -0,0 +1,56 @@ -+/*************************************************************************/ /*! -+@File rgxfwtrace_strings.c -+@Title RGX Firmware trace strings -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "rgx_fwif_sf.h" -+#include "fwtrace_string.h" -+ -+/* The tuple pairs that will be generated using XMacros will be stored here. -+ * This macro definition must match the definition of SFids in rgx_fwif_sf.h -+ */ -+const RGXKM_STID_FMT SFs[]= { -+#define X(a, b, c, d, e) { RGXFW_LOG_CREATESFID(a,b,e), d }, -+ RGXFW_LOG_SFIDLIST -+#undef X -+}; -+ -+const IMG_UINT32 g_ui32SFsCount = ARRAY_SIZE(SFs); -diff --git a/drivers/gpu/drm/img-rogue/rgxfwutils.c b/drivers/gpu/drm/img-rogue/rgxfwutils.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfwutils.c -@@ -0,0 +1,6946 @@ -+/*************************************************************************/ /*! -+@File -+@Title Rogue firmware utility routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Rogue firmware utility routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if defined(__linux__) -+#include -+#else -+#include -+#endif -+ -+#include "img_defs.h" -+ -+#include "rgxdefs_km.h" -+#include "rgx_fwif_km.h" -+#include "pdump_km.h" -+#include "osfunc.h" -+#include "os_apphint.h" -+#include "cache_km.h" -+#include "allocmem.h" -+#include "physheap.h" -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "devicemem_server.h" -+ -+#include "pvr_debug.h" -+#include "pvr_notifier.h" -+#include "rgxfwutils.h" -+#include "rgxfwcmnctx.h" -+#include "rgxfwriscv.h" -+#include "rgx_options.h" -+#include "rgx_fwif_alignchecks.h" -+#include "rgx_fwif_resetframework.h" -+#include "rgx_pdump_panics.h" -+#include "fwtrace_string.h" -+#include "rgxheapconfig.h" -+#include "pvrsrv.h" -+#include "rgxdebug_common.h" -+#include "rgxhwperf.h" -+#include "rgxccb.h" -+#include "rgxcompute.h" -+#include "rgxtransfer.h" -+#include "rgxtdmtransfer.h" -+#include "rgxpower.h" -+#if defined(SUPPORT_DISPLAY_CLASS) -+#include "dc_server.h" -+#endif -+#include "rgxmem.h" -+#include "rgxmmudefs_km.h" -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+#include "rgxmipsmmuinit.h" -+#endif -+#include "rgxta3d.h" -+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) -+#include "rgxkicksync.h" -+#endif -+#include "rgxutils.h" -+#include "rgxtimecorr.h" -+#include "sync_internal.h" -+#include "sync.h" -+#include "sync_checkpoint.h" -+#include "sync_checkpoint_external.h" -+#include "tlstream.h" -+#include "devicemem_server_utils.h" -+#include "htbserver.h" -+#include "rgx_bvnc_defs_km.h" -+#include "info_page.h" -+ -+#include "physmem_lma.h" -+#include "physmem_osmem.h" -+ -+#ifdef __linux__ -+#include /* sprintf */ -+#else -+#include -+#include -+#endif -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#include "process_stats.h" -+#endif -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+#include "rgxworkest.h" -+#endif -+ -+#if defined(SUPPORT_PDVFS) -+#include "rgxpdvfs.h" -+#endif -+ -+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) -+#include "rgxsoctimer.h" -+#endif -+ -+#include "vz_vmm_pvz.h" -+#include "rgx_heaps.h" -+ -+/*! -+ ****************************************************************************** -+ * HWPERF -+ *****************************************************************************/ -+/* Size of the Firmware L1 HWPERF buffer in bytes (2MB). Accessed by the -+ * Firmware and host driver. */ -+#define RGXFW_HWPERF_L1_SIZE_MIN (16U) -+#define RGXFW_HWPERF_L1_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB -+#define RGXFW_HWPERF_L1_SIZE_MAX (12288U) -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+#if defined(DEBUG) -+/* Catch the use of auto-increment when meta_registers_unpacked_accesses feature is -+ * present in case we ever use it. No WA exists so it must not be used */ -+#define CHECK_HWBRN_68777(v) \ -+ do { \ -+ PVR_ASSERT(((v) & RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN) == 0); \ -+ } while (0) -+#else -+#define CHECK_HWBRN_68777(v) -+#endif -+#endif -+ -+/* Firmware CCB length */ -+#if defined(NO_HARDWARE) && defined(PDUMP) -+#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (10) -+#elif defined(SUPPORT_PDVFS) -+#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (8) -+#else -+#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (5) -+#endif -+ -+#if defined(RGX_FW_IRQ_OS_COUNTERS) -+const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OSIDS] = {IRQ_COUNTER_STORAGE_REGS}; -+#endif -+ -+/* Workload Estimation Firmware CCB length */ -+#define RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2 (7) -+ -+/* Size of memory buffer for firmware gcov data -+ * The actual data size is several hundred kilobytes. The buffer is an order of magnitude larger. */ -+#define RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE (4*1024*1024) -+ -+typedef struct -+{ -+ RGXFWIF_KCCB_CMD sKCCBcmd; -+ DLLIST_NODE sListNode; -+ PDUMP_FLAGS_T uiPDumpFlags; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+} RGX_DEFERRED_KCCB_CMD; -+ -+#if defined(PDUMP) -+/* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the -+ * PID filter example entries -+ */ -+static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32), -+ "FW PID filtering assumes the IMG_PID type is 32-bits wide as it " -+ "generates WRW commands for loading the PID values"); -+#endif -+ -+#if (RGXFW_MAX_NUM_OSIDS > 1) -+static_assert(((IMG_UINT8)RGX_DRIVERID_0_DEFAULT_TIME_SLICE + -+ (IMG_UINT8)RGX_DRIVERID_1_DEFAULT_TIME_SLICE + -+ (IMG_UINT8)RGX_DRIVERID_2_DEFAULT_TIME_SLICE + -+ (IMG_UINT8)RGX_DRIVERID_3_DEFAULT_TIME_SLICE + -+ (IMG_UINT8)RGX_DRIVERID_4_DEFAULT_TIME_SLICE + -+ (IMG_UINT8)RGX_DRIVERID_5_DEFAULT_TIME_SLICE + -+ (IMG_UINT8)RGX_DRIVERID_6_DEFAULT_TIME_SLICE + -+ (IMG_UINT8)RGX_DRIVERID_7_DEFAULT_TIME_SLICE) <= PVRSRV_VZ_TIME_SLICE_MAX, "Invalid driverid time slice aggregate"); -+#endif -+ -+static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo); -+static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) -+static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_SYSINIT* psFwSysInit) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc; -+ IMG_UINT32 ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE( -+ RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); -+ -+ PVR_DPF_ENTERED; -+ -+ eError = DevmemAllocate(psDevInfo->psFirmwareMainHeap, -+ 1, -+ ui32CacheLineSize, -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), -+ "FwSLC3FenceWA", -+ ppsSLC3FenceMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF_RETURN_RC(eError); -+ } -+ -+ /* We need to map it so the heap for this allocation is set */ -+ eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc, -+ psDevInfo->psFirmwareMainHeap, -+ &psFwSysInit->sSLC3FenceDevVAddr); -+ if (eError != PVRSRV_OK) -+ { -+ DevmemFree(*ppsSLC3FenceMemDesc); -+ *ppsSLC3FenceMemDesc = NULL; -+ } -+ -+ PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc); -+} -+ -+static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo) -+{ -+ DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc; -+ -+ if (psSLC3FenceMemDesc) -+ { -+ DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc); -+ DevmemFree(psSLC3FenceMemDesc); -+ } -+} -+#endif -+ -+static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value) -+{ -+ /* Ensure any uncached/WC memory writes are flushed from CPU write buffers -+ * before kicking MTS. -+ */ -+ OSWriteMemoryBarrier(NULL); -+ -+ /* This should *NOT* happen. Try to trace what caused this and avoid a NPE -+ * with the Write/Read at the foot of the function. -+ */ -+ PVR_ASSERT((psDevInfo != NULL)); -+ if (psDevInfo == NULL) -+ { -+ return; -+ } -+ -+ /* Kick MTS to wake firmware. */ -+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value); -+ -+ /* Uncached device/IO mapping will ensure MTS kick leaves CPU, read back -+ * will ensure it reaches the regbank via inter-connects (AXI, PCIe etc) -+ * before continuing. -+ */ -+ (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE); -+} -+ -+/*************************************************************************/ /*! -+@Function RGXSetupFwAllocation -+ -+@Description Sets a pointer in a firmware data structure. -+ -+@Input psDevInfo Device Info struct -+@Input uiAllocFlags Flags determining type of memory allocation -+@Input ui32Size Size of memory allocation -+@Input pszName Allocation label -+@Input ppsMemDesc pointer to the allocation's memory descriptor -+@Input psFwPtr Address of the firmware pointer to set -+@Input ppvCpuPtr Address of the cpu pointer to set -+@Input ui32DevVAFlags Any combination of RFW_FWADDR_*_FLAG -+ -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO* psDevInfo, -+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, -+ IMG_UINT32 ui32Size, -+ const IMG_CHAR *pszName, -+ DEVMEM_MEMDESC **ppsMemDesc, -+ RGXFWIF_DEV_VIRTADDR *psFwPtr, -+ void **ppvCpuPtr, -+ IMG_UINT32 ui32DevVAFlags) -+{ -+ PVRSRV_ERROR eError; -+#if defined(SUPPORT_AUTOVZ) -+ IMG_BOOL bClearByMemset; -+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiAllocFlags)) -+ { -+ /* Under AutoVz the ZERO_ON_ALLOC flag is avoided as it causes the memory to -+ * be allocated from a different PMR than an allocation without the flag. -+ * When the content of an allocation needs to be recovered from physical memory -+ * on a later driver reboot, the memory then cannot be zeroed but the allocation -+ * addresses must still match. -+ * If the memory requires clearing, perform a memset after the allocation. */ -+ uiAllocFlags &= ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC; -+ bClearByMemset = IMG_TRUE; -+ } -+ else -+ { -+ bClearByMemset = IMG_FALSE; -+ } -+#endif -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate %s", pszName); -+ eError = DevmemFwAllocate(psDevInfo, -+ ui32Size, -+ uiAllocFlags, -+ pszName, -+ ppsMemDesc); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate %u bytes for %s (%u)", -+ __func__, -+ ui32Size, -+ pszName, -+ eError)); -+ goto fail_alloc; -+ } -+ -+ if (psFwPtr) -+ { -+ eError = RGXSetFirmwareAddress(psFwPtr, *ppsMemDesc, 0, ui32DevVAFlags); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to acquire firmware virtual address for %s (%u)", -+ __func__, -+ pszName, -+ eError)); -+ goto fail_fwaddr; -+ } -+ } -+ -+#if defined(SUPPORT_AUTOVZ) -+ if ((bClearByMemset) || (ppvCpuPtr)) -+#else -+ if (ppvCpuPtr) -+#endif -+ { -+ void *pvTempCpuPtr; -+ -+ eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, &pvTempCpuPtr); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to acquire CPU virtual address for %s (%u)", -+ __func__, -+ pszName, -+ eError)); -+ goto fail_cpuva; -+ } -+ -+#if defined(SUPPORT_AUTOVZ) -+ if (bClearByMemset) -+ { -+ if (PVRSRV_CHECK_CPU_WRITE_COMBINE(uiAllocFlags)) -+ { -+ OSCachedMemSetWMB(pvTempCpuPtr, 0, ui32Size); -+ } -+ else -+ { -+ OSDeviceMemSet(pvTempCpuPtr, 0, ui32Size); -+ } -+ } -+ if (ppvCpuPtr) -+#endif -+ { -+ *ppvCpuPtr = pvTempCpuPtr; -+ } -+#if defined(SUPPORT_AUTOVZ) -+ else -+ { -+ DevmemReleaseCpuVirtAddr(*ppsMemDesc); -+ pvTempCpuPtr = NULL; -+ } -+#endif -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: %s set up at Fw VA 0x%x and CPU VA 0x%p with alloc flags 0x%" IMG_UINT64_FMTSPECX, -+ __func__, pszName, -+ (psFwPtr) ? (psFwPtr->ui32Addr) : (0), -+ (ppvCpuPtr) ? (*ppvCpuPtr) : (NULL), -+ uiAllocFlags)); -+ -+ return eError; -+ -+fail_cpuva: -+ if (psFwPtr) -+ { -+ RGXUnsetFirmwareAddress(*ppsMemDesc); -+ } -+fail_fwaddr: -+ DevmemFree(*ppsMemDesc); -+fail_alloc: -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function GetHwPerfBufferSize -+ -+@Description Computes the effective size of the HW Perf Buffer -+@Input ui32HWPerfFWBufSizeKB Device Info struct -+@Return HwPerfBufferSize -+*/ /**************************************************************************/ -+static IMG_UINT32 GetHwPerfBufferSize(IMG_UINT32 ui32HWPerfFWBufSizeKB) -+{ -+ IMG_UINT32 HwPerfBufferSize; -+ -+ /* HWPerf: Determine the size of the FW buffer */ -+ if (ui32HWPerfFWBufSizeKB == 0 || -+ ui32HWPerfFWBufSizeKB == RGXFW_HWPERF_L1_SIZE_DEFAULT) -+ { -+ /* Under pvrsrvctl 0 size implies AppHint not set or is set to zero, -+ * use default size from driver constant. Set it to the default -+ * size, no logging. -+ */ -+ HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_DEFAULT<<10; -+ } -+ else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MAX)) -+ { -+ /* Size specified as a AppHint but it is too big */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: HWPerfFWBufSizeInKB value (%u) too big, using maximum (%u)", -+ __func__, -+ ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MAX)); -+ HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MAX<<10; -+ } -+ else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MIN)) -+ { -+ /* Size specified as in AppHint HWPerfFWBufSizeInKB */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Using HWPerf FW buffer size of %u KB", -+ __func__, -+ ui32HWPerfFWBufSizeKB)); -+ HwPerfBufferSize = ui32HWPerfFWBufSizeKB<<10; -+ } -+ else -+ { -+ /* Size specified as a AppHint but it is too small */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: HWPerfFWBufSizeInKB value (%u) too small, using minimum (%u)", -+ __func__, -+ ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MIN)); -+ HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MIN<<10; -+ } -+ -+ return HwPerfBufferSize; -+} -+ -+#if defined(PDUMP) -+/*! -+******************************************************************************* -+ @Function RGXFWSetupSignatureChecks -+ @Description -+ @Input psDevInfo -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR RGXFWSetupSignatureChecks(PVRSRV_RGXDEV_INFO* psDevInfo, -+ DEVMEM_MEMDESC** ppsSigChecksMemDesc, -+ IMG_UINT32 ui32SigChecksBufSize, -+ RGXFWIF_SIGBUF_CTL* psSigBufCtl) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Allocate memory for the checks */ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, -+ ui32SigChecksBufSize, -+ "FwSignatureChecks", -+ ppsSigChecksMemDesc, -+ &psSigBufCtl->sBuffer, -+ NULL, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); -+ -+ DevmemPDumpLoadMem( *ppsSigChecksMemDesc, -+ 0, -+ ui32SigChecksBufSize, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ psSigBufCtl->ui32LeftSizeInRegs = ui32SigChecksBufSize / sizeof(IMG_UINT32); -+fail: -+ return eError; -+} -+#endif -+ -+ -+#if defined(SUPPORT_FIRMWARE_GCOV) -+/*! -+******************************************************************************* -+ @Function RGXFWSetupFirmwareGcovBuffer -+ @Description -+ @Input psDevInfo -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR RGXFWSetupFirmwareGcovBuffer(PVRSRV_RGXDEV_INFO* psDevInfo, -+ DEVMEM_MEMDESC** ppsBufferMemDesc, -+ IMG_UINT32 ui32FirmwareGcovBufferSize, -+ RGXFWIF_FIRMWARE_GCOV_CTL* psFirmwareGcovCtl, -+ const IMG_CHAR* pszBufferName) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Allocate memory for gcov */ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), -+ ui32FirmwareGcovBufferSize, -+ pszBufferName, -+ ppsBufferMemDesc, -+ &psFirmwareGcovCtl->sBuffer, -+ NULL, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXSetupFwAllocation"); -+ -+ psFirmwareGcovCtl->ui32Size = ui32FirmwareGcovBufferSize; -+ -+ return PVRSRV_OK; -+} -+#endif -+ -+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) -+/*! -+ ****************************************************************************** -+ @Function RGXFWSetupCounterBuffer -+ @Description -+ @Input psDevInfo -+ -+ @Return PVRSRV_ERROR -+ *****************************************************************************/ -+static PVRSRV_ERROR RGXFWSetupCounterBuffer(PVRSRV_RGXDEV_INFO* psDevInfo, -+ DEVMEM_MEMDESC** ppsBufferMemDesc, -+ IMG_UINT32 ui32CounterDataBufferSize, -+ RGXFWIF_COUNTER_DUMP_CTL* psCounterDumpCtl, -+ const IMG_CHAR* pszBufferName) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), -+ ui32CounterDataBufferSize, -+ "FwCounterBuffer", -+ ppsBufferMemDesc, -+ &psCounterDumpCtl->sBuffer, -+ NULL, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXSetupFwAllocation"); -+ -+ psCounterDumpCtl->ui32SizeInDwords = ui32CounterDataBufferSize >> 2; -+ -+ return PVRSRV_OK; -+} -+#endif -+ -+/*! -+******************************************************************************* -+ @Function RGXFWSetupAlignChecks -+ @Description This functions allocates and fills memory needed for the -+ aligns checks of the UM and KM structures shared with the -+ firmware. The format of the data in the memory is as follows: -+ -+ -+ -+ -+ The UM array is passed from the user side. Now the firmware is -+ responsible for filling this part of the memory. If that -+ happens the check of the UM structures will be performed -+ by the host driver on client's connect. -+ If the macro is not defined the client driver fills the memory -+ and the firmware checks for the alignment of all structures. -+ @Input psDeviceNode -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGXFWIF_DEV_VIRTADDR *psAlignChecksDevFW) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ IMG_UINT32 aui32RGXFWAlignChecksKM[] = { RGXFW_ALIGN_CHECKS_INIT_KM }; -+ IMG_UINT32 ui32RGXFWAlignChecksTotal; -+ IMG_UINT32* paui32AlignChecks; -+ PVRSRV_ERROR eError; -+ -+ /* In this case we don't know the number of elements in UM array. -+ * We have to assume something so we assume RGXFW_ALIGN_CHECKS_UM_MAX. -+ */ -+ ui32RGXFWAlignChecksTotal = sizeof(aui32RGXFWAlignChecksKM) -+ + RGXFW_ALIGN_CHECKS_UM_MAX * sizeof(IMG_UINT32) -+ + 2 * sizeof(IMG_UINT32); -+ -+ /* Allocate memory for the checks */ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & -+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), -+ ui32RGXFWAlignChecksTotal, -+ "FwAlignmentChecks", -+ &psDevInfo->psRGXFWAlignChecksMemDesc, -+ psAlignChecksDevFW, -+ (void**) &paui32AlignChecks, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); -+ -+ if (!psDeviceNode->bAutoVzFwIsUp) -+ { -+ /* Copy the values */ -+ *paui32AlignChecks++ = ARRAY_SIZE(aui32RGXFWAlignChecksKM); -+ OSCachedMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0], -+ sizeof(aui32RGXFWAlignChecksKM)); -+ paui32AlignChecks += ARRAY_SIZE(aui32RGXFWAlignChecksKM); -+ -+ *paui32AlignChecks = 0; -+ -+ OSWriteMemoryBarrier(paui32AlignChecks); -+ RGXFwSharedMemCacheOpExec(paui32AlignChecks - (ARRAY_SIZE(aui32RGXFWAlignChecksKM) + 1), -+ ui32RGXFWAlignChecksTotal, -+ PVRSRV_CACHE_OP_FLUSH); -+ } -+ -+ -+ DevmemPDumpLoadMem( psDevInfo->psRGXFWAlignChecksMemDesc, -+ 0, -+ ui32RGXFWAlignChecksTotal, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ return PVRSRV_OK; -+ -+fail: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+static void RGXFWFreeAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo) -+{ -+ if (psDevInfo->psRGXFWAlignChecksMemDesc != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc); -+ psDevInfo->psRGXFWAlignChecksMemDesc = NULL; -+ } -+} -+ -+PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, -+ DEVMEM_MEMDESC *psSrc, -+ IMG_UINT32 uiExtraOffset, -+ IMG_UINT32 ui32Flags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_DEV_VIRTADDR psDevVirtAddr; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ psDeviceNode = (PVRSRV_DEVICE_NODE *) DevmemGetConnection(psSrc); -+ psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ IMG_UINT32 ui32Offset; -+ IMG_BOOL bCachedInMETA; -+ PVRSRV_MEMALLOCFLAGS_T uiDevFlags; -+ IMG_UINT32 uiGPUCacheMode; -+ -+ eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire); -+ -+ /* Convert to an address in META memmap */ -+ ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE; -+ -+ /* Check in the devmem flags whether this memory is cached/uncached */ -+ DevmemGetFlags(psSrc, &uiDevFlags); -+ -+ /* Honour the META cache flags */ -+ bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0; -+ -+ /* Honour the SLC cache flags */ -+ eError = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags, &uiGPUCacheMode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemDeviceCacheMode", failDevCacheMode); -+ -+ /* -+ * Choose Meta virtual address based on Meta and SLC cacheability. -+ */ -+ ui32Offset += RGXFW_SEGMMU_DATA_BASE_ADDRESS; -+ -+ if (bCachedInMETA) -+ { -+ ui32Offset |= RGXFW_SEGMMU_DATA_META_CACHED; -+ } -+ else -+ { -+ ui32Offset |= RGXFW_SEGMMU_DATA_META_UNCACHED; -+ } -+ -+ if (PVRSRV_CHECK_GPU_CACHED(uiGPUCacheMode)) -+ { -+ ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED; -+ } -+ else -+ { -+ ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED; -+ } -+ -+ ppDest->ui32Addr = ui32Offset; -+ } -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); -+ PVR_GOTO_IF_ERROR(eError, failDevVAAcquire); -+ -+ ppDest->ui32Addr = (IMG_UINT32)((psDevVirtAddr.uiAddr + uiExtraOffset) & 0xFFFFFFFF); -+ } -+#endif -+ else -+ { -+ IMG_UINT32 ui32Offset; -+ IMG_BOOL bCachedInRISCV; -+ PVRSRV_MEMALLOCFLAGS_T uiDevFlags; -+ -+ eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire); -+ -+ /* Convert to an address in RISCV memmap */ -+ ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE; -+ -+ /* Check in the devmem flags whether this memory is cached/uncached */ -+ DevmemGetFlags(psSrc, &uiDevFlags); -+ -+ /* Honour the RISCV cache flags */ -+ bCachedInRISCV = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0; -+ -+ if (bCachedInRISCV) -+ { -+ ui32Offset |= RGXRISCVFW_SHARED_CACHED_DATA_BASE; -+ } -+ else -+ { -+ ui32Offset |= RGXRISCVFW_SHARED_UNCACHED_DATA_BASE; -+ } -+ -+ ppDest->ui32Addr = ui32Offset; -+ } -+ -+ if ((ppDest->ui32Addr & 0x3U) != 0) -+ { -+ IMG_CHAR *pszAnnotation; -+ /* It is expected that the annotation returned by DevmemGetAnnotation() is always valid */ -+ DevmemGetAnnotation(psSrc, &pszAnnotation); -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: %s @ 0x%x is not aligned to 32 bit", -+ __func__, pszAnnotation, ppDest->ui32Addr)); -+ -+ return PVRSRV_ERROR_INVALID_ALIGNMENT; -+ } -+ -+ if (ui32Flags & RFW_FWADDR_NOREF_FLAG) -+ { -+ DevmemReleaseDevVirtAddr(psSrc); -+ } -+ -+ return PVRSRV_OK; -+ -+failDevCacheMode: -+ DevmemReleaseDevVirtAddr(psSrc); -+failDevVAAcquire: -+ return eError; -+} -+ -+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, -+ DEVMEM_MEMDESC *psSrcMemDesc, -+ RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, -+ IMG_UINT32 uiOffset) -+{ -+ PVRSRV_ERROR eError; -+ IMG_DEV_VIRTADDR sDevVirtAddr; -+ -+ eError = DevmemAcquireDevVirtAddr(psSrcMemDesc, &sDevVirtAddr); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ psDest->psDevVirtAddr.uiAddr = sDevVirtAddr.uiAddr; -+ psDest->psDevVirtAddr.uiAddr += uiOffset; -+ psDest->pbyFWAddr.ui32Addr = psSrcFWDevVAddr->ui32Addr; -+ -+ DevmemReleaseDevVirtAddr(psSrcMemDesc); -+} -+ -+ -+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc) -+{ -+ DevmemReleaseDevVirtAddr(psSrc); -+} -+ -+/*! -+******************************************************************************* -+ @Function RGXFreeCCB -+ @Description Free the kernel or firmware CCB -+ @Input psDevInfo -+ @Input ppsCCBCtl -+ @Input ppvCCBCtlLocal -+ @Input ppsCCBCtlMemDesc -+ @Input ppsCCBMemDesc -+ @Input psCCBCtlFWAddr -+******************************************************************************/ -+static void RGXFreeCCB(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_CCB_CTL **ppsCCBCtl, -+ RGXFWIF_CCB_CTL **ppsCCBCtlLocal, -+ DEVMEM_MEMDESC **ppsCCBCtlMemDesc, -+ IMG_UINT8 **ppui8CCB, -+ DEVMEM_MEMDESC **ppsCCBMemDesc) -+{ -+ if (*ppsCCBMemDesc != NULL) -+ { -+ if (*ppui8CCB != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(*ppsCCBMemDesc); -+ *ppui8CCB = NULL; -+ } -+ DevmemFwUnmapAndFree(psDevInfo, *ppsCCBMemDesc); -+ *ppsCCBMemDesc = NULL; -+ } -+ if (*ppsCCBCtlMemDesc != NULL) -+ { -+ if (*ppsCCBCtl != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(*ppsCCBCtlMemDesc); -+ *ppsCCBCtl = NULL; -+ } -+ DevmemFwUnmapAndFree(psDevInfo, *ppsCCBCtlMemDesc); -+ *ppsCCBCtlMemDesc = NULL; -+ } -+ if (*ppsCCBCtlLocal != NULL) -+ { -+ OSFreeMem(*ppsCCBCtlLocal); -+ *ppsCCBCtlLocal = NULL; -+ } -+} -+ -+/*! -+******************************************************************************* -+ @Function RGXFreeCCBReturnSlots -+ @Description Free the kernel CCB's return slot array and associated mappings -+ @Input psDevInfo Device Info struct -+ @Input ppui32CCBRtnSlots CPU mapping of slot array -+ @Input ppsCCBRtnSlotsMemDesc Slot array's device memdesc -+******************************************************************************/ -+static void RGXFreeCCBReturnSlots(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 **ppui32CCBRtnSlots, -+ DEVMEM_MEMDESC **ppsCCBRtnSlotsMemDesc) -+{ -+ /* Free the return slot array if allocated */ -+ if (*ppsCCBRtnSlotsMemDesc != NULL) -+ { -+ /* Before freeing, ensure the CPU mapping as well is released */ -+ if (*ppui32CCBRtnSlots != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(*ppsCCBRtnSlotsMemDesc); -+ *ppui32CCBRtnSlots = NULL; -+ } -+ DevmemFwUnmapAndFree(psDevInfo, *ppsCCBRtnSlotsMemDesc); -+ *ppsCCBRtnSlotsMemDesc = NULL; -+ } -+} -+ -+/*! -+******************************************************************************* -+ @Function RGXSetupCCB -+ @Description Allocate and initialise a circular command buffer -+ @Input psDevInfo -+ @Input ppsCCBCtl -+ @Input ppsCCBCtlMemDesc -+ @Input ppui8CCB -+ @Input ppsCCBMemDesc -+ @Input psCCBCtlFWAddr -+ @Input ui32NumCmdsLog2 -+ @Input ui32CmdSize -+ @Input uiCCBMemAllocFlags -+ @Input pszName -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_CCB_CTL **ppsCCBCtl, -+ RGXFWIF_CCB_CTL **ppsCCBCtlLocal, -+ DEVMEM_MEMDESC **ppsCCBCtlMemDesc, -+ IMG_UINT8 **ppui8CCB, -+ DEVMEM_MEMDESC **ppsCCBMemDesc, -+ PRGXFWIF_CCB_CTL *psCCBCtlFWAddr, -+ PRGXFWIF_CCB *psCCBFWAddr, -+ IMG_UINT32 ui32NumCmdsLog2, -+ IMG_UINT32 ui32CmdSize, -+ PVRSRV_MEMALLOCFLAGS_T uiCCBMemAllocFlags, -+ const IMG_CHAR *pszName) -+{ -+ PVRSRV_ERROR eError; -+ RGXFWIF_CCB_CTL *psCCBCtl; -+ IMG_UINT32 ui32CCBSize = (1U << ui32NumCmdsLog2); -+ IMG_CHAR szCCBCtlName[DEVMEM_ANNOTATION_MAX_LEN]; -+ IMG_INT32 iStrLen; -+ -+ /* Append "Control" to the name for the control struct. */ -+ iStrLen = OSSNPrintf(szCCBCtlName, sizeof(szCCBCtlName), "%sControl", pszName); -+ PVR_ASSERT(iStrLen < sizeof(szCCBCtlName)); -+ -+ if (unlikely(iStrLen < 0)) -+ { -+ OSStringLCopy(szCCBCtlName, "FwCCBControl", DEVMEM_ANNOTATION_MAX_LEN); -+ } -+ -+ /* Allocate memory for the CCB control.*/ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, -+ sizeof(RGXFWIF_CCB_CTL), -+ szCCBCtlName, -+ ppsCCBCtlMemDesc, -+ psCCBCtlFWAddr, -+ (void**) ppsCCBCtl, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); -+ -+ /* -+ * Allocate memory for the CCB. -+ * (this will reference further command data in non-shared CCBs) -+ */ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ uiCCBMemAllocFlags, -+ ui32CCBSize * ui32CmdSize, -+ pszName, -+ ppsCCBMemDesc, -+ psCCBFWAddr, -+ (void**) ppui8CCB, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); -+ -+ /* -+ * Initialise the CCB control. -+ */ -+ psCCBCtl = OSAllocZMem(sizeof(*psCCBCtl)); -+ PVR_LOG_GOTO_IF_NOMEM(psCCBCtl, eError, fail); -+ -+ psCCBCtl->ui32WrapMask = ui32CCBSize - 1; -+ -+ OSDeviceMemCopy(*ppsCCBCtl, psCCBCtl, sizeof(*psCCBCtl)); -+ RGXFwSharedMemCacheOpPtr(psCCBCtl, FLUSH); -+ -+ *ppsCCBCtlLocal = psCCBCtl; -+ -+ /* Pdump the CCB control */ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Initialise %s", szCCBCtlName); -+ DevmemPDumpLoadMem(*ppsCCBCtlMemDesc, -+ 0, -+ sizeof(RGXFWIF_CCB_CTL), -+ 0); -+ -+ return PVRSRV_OK; -+ -+fail: -+ RGXFreeCCB(psDevInfo, -+ ppsCCBCtl, -+ ppsCCBCtlLocal, -+ ppsCCBCtlMemDesc, -+ ppui8CCB, -+ ppsCCBMemDesc); -+ -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ PMR *psPMR; -+ -+ if (psDevInfo->psRGXFaultAddressMemDesc) -+ { -+ if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR) == PVRSRV_OK) -+ { -+ PMRUnlockSysPhysAddresses(psPMR); -+ } -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc); -+ psDevInfo->psRGXFaultAddressMemDesc = NULL; -+ } -+} -+ -+static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_SYSINIT *psFwSysInit) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 *pui32MemoryVirtAddr; -+ IMG_UINT32 i; -+ size_t ui32PageSize = OSGetPageSize(); -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PMR *psPMR; -+ -+ /* Allocate page of memory to use for page faults on non-blocking memory transactions. -+ * Doesn't need to be cleared as it is initialised with the 0xDEADBEE0 pattern below. */ -+ psDevInfo->psRGXFaultAddressMemDesc = NULL; -+ eError = DevmemFwAllocateExportable(psDeviceNode, -+ ui32PageSize, -+ ui32PageSize, -+ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, -+ "FwExFaultAddress", -+ &psDevInfo->psRGXFaultAddressMemDesc); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate mem for fault address (%u)", -+ __func__, eError)); -+ goto failFaultAddressDescAlloc; -+ } -+ -+ -+ if (!psDeviceNode->bAutoVzFwIsUp) -+ { -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc, -+ (void **)&pui32MemoryVirtAddr); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to acquire mem for fault address (%u)", -+ __func__, eError)); -+ goto failFaultAddressDescAqCpuVirt; -+ } -+ -+ /* fill the page with a known pattern when booting the firmware */ -+ for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++) -+ { -+ *(pui32MemoryVirtAddr + i) = 0xDEADBEE0; -+ } -+ -+ OSWriteMemoryBarrier(pui32MemoryVirtAddr); -+ RGXFwSharedMemCacheOpExec(pui32MemoryVirtAddr, ui32PageSize, PVRSRV_CACHE_OP_FLUSH); -+ -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc); -+ } -+ -+ eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error getting PMR for fault address (%u)", -+ __func__, eError)); -+ -+ goto failFaultAddressDescGetPMR; -+ } -+ else -+ { -+ IMG_BOOL bValid; -+ IMG_UINT32 ui32Log2PageSize = OSGetPageShift(); -+ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error locking physical address for fault address MemDesc (%u)", -+ __func__, eError)); -+ -+ goto failFaultAddressDescLockPhys; -+ } -+ -+ eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize, 1, 0, &(psFwSysInit->sFaultPhysAddr), &bValid, DEVICE_USE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error getting physical address for fault address MemDesc (%u)", -+ __func__, eError)); -+ -+ goto failFaultAddressDescGetPhys; -+ } -+ -+ if (!bValid) -+ { -+ psFwSysInit->sFaultPhysAddr.uiAddr = 0; -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed getting physical address for fault address MemDesc - invalid page (0x%" IMG_UINT64_FMTSPECX ")", -+ __func__, psFwSysInit->sFaultPhysAddr.uiAddr)); -+ -+ goto failFaultAddressDescGetPhys; -+ } -+ } -+ -+ return PVRSRV_OK; -+ -+failFaultAddressDescGetPhys: -+ PMRUnlockSysPhysAddresses(psPMR); -+ -+failFaultAddressDescLockPhys: -+failFaultAddressDescGetPMR: -+failFaultAddressDescAqCpuVirt: -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc); -+ psDevInfo->psRGXFaultAddressMemDesc = NULL; -+ -+failFaultAddressDescAlloc: -+ -+ return eError; -+} -+ -+#if defined(PDUMP) -+/* Replace the DevPhy address with the one Pdump allocates at pdump_player run time */ -+static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ PVRSRV_ERROR eError; -+ PMR *psFWInitPMR, *psFaultAddrPMR; -+ IMG_UINT32 ui32Dstoffset; -+ -+ psFWInitPMR = (PMR *)(psDevInfo->psRGXFWIfSysInitMemDesc->psImport->hPMR); -+ ui32Dstoffset = psDevInfo->psRGXFWIfSysInitMemDesc->uiOffset + offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr.uiAddr); -+ -+ psFaultAddrPMR = (PMR *)(psDevInfo->psRGXFaultAddressMemDesc->psImport->hPMR); -+ -+ eError = PDumpMemLabelToMem64(psFaultAddrPMR, -+ psFWInitPMR, -+ 0, -+ ui32Dstoffset, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Dump of Fault Page Phys address failed(%u)", __func__, eError)); -+ } -+ return eError; -+} -+#endif -+ -+#if defined(SUPPORT_TBI_INTERFACE) -+/*************************************************************************/ /*! -+@Function RGXTBIBufferIsInitRequired -+ -+@Description Returns true if the firmware tbi buffer is not allocated and -+ might be required by the firmware soon. TBI buffer allocated -+ on-demand to reduce RAM footprint on systems not needing -+ tbi. -+ -+@Input psDevInfo RGX device info -+ -+@Return IMG_BOOL Whether on-demand allocation(s) is/are needed -+ or not -+*/ /**************************************************************************/ -+INLINE IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; -+ -+ RGXFwSharedMemCacheOpValue(psTraceBufCtl->ui32LogType, INVALIDATE); -+ -+ /* The firmware expects a tbi buffer only when: -+ * - Logtype is "tbi" -+ */ -+ if ((psDevInfo->psRGXFWIfTBIBufferMemDesc == NULL) -+ && (psTraceBufCtl->ui32LogType & ~RGXFWIF_LOG_TYPE_TRACE) -+ && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)) -+ { -+ return IMG_TRUE; -+ } -+ -+ return IMG_FALSE; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXTBIBufferDeinit -+ -+@Description Deinitialises all the allocations and references that are made -+ for the FW tbi buffer -+ -+@Input ppsDevInfo RGX device info -+@Return void -+*/ /**************************************************************************/ -+static void RGXTBIBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTBIBufferMemDesc); -+ psDevInfo->psRGXFWIfTBIBufferMemDesc = NULL; -+ psDevInfo->ui32RGXFWIfHWPerfBufSize = 0; -+ psDevInfo->ui32RGXL2HWPerfBufSize = 0; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXTBIBufferInitOnDemandResources -+ -+@Description Allocates the firmware TBI buffer required for reading SFs -+ strings and initialize it with SFs. -+ -+@Input psDevInfo RGX device info -+ -+@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 i, ui32Len; -+ const IMG_UINT32 ui32FWTBIBufsize = g_ui32SFsCount * sizeof(RGXFW_STID_FMT); -+ RGXFW_STID_FMT *psFW_SFs = NULL; -+ -+ /* Firmware address should not be already set */ -+ if (psDevInfo->sRGXFWIfTBIBuffer.ui32Addr) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: FW address for FWTBI is already set. Resetting it with newly allocated one", -+ __func__)); -+ } -+ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS, -+ ui32FWTBIBufsize, -+ "FwTBIBuffer", -+ &psDevInfo->psRGXFWIfTBIBufferMemDesc, -+ &psDevInfo->sRGXFWIfTBIBuffer, -+ (void**)&psFW_SFs, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); -+ -+ /* Copy SFs entries to FW buffer */ -+ for (i = 0; i < g_ui32SFsCount; i++) -+ { -+ OSCachedMemCopy(&psFW_SFs[i].ui32Id, &SFs[i].ui32Id, sizeof(SFs[i].ui32Id)); -+ ui32Len = OSStringLength(SFs[i].psName); -+ OSCachedMemCopy(psFW_SFs[i].sName, SFs[i].psName, MIN(ui32Len, IMG_SF_STRING_MAX_SIZE - 1)); -+ } -+ -+ /* flush write buffers for psFW_SFs */ -+ OSWriteMemoryBarrier(psFW_SFs); -+ -+ /* Set size of TBI buffer */ -+ psDevInfo->ui32FWIfTBIBufferSize = ui32FWTBIBufsize; -+ -+ /* release CPU mapping */ -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTBIBufferMemDesc); -+ -+ return PVRSRV_OK; -+fail: -+ RGXTBIBufferDeinit(psDevInfo); -+ return eError; -+} -+#endif -+ -+/*************************************************************************/ /*! -+@Function RGXTraceBufferIsInitRequired -+ -+@Description Returns true if the firmware trace buffer is not allocated and -+ might be required by the firmware soon. Trace buffer allocated -+ on-demand to reduce RAM footprint on systems not needing -+ firmware trace. -+ -+@Input psDevInfo RGX device info -+ -+@Return IMG_BOOL Whether on-demand allocation(s) is/are needed -+ or not -+*/ /**************************************************************************/ -+INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; -+ -+ RGXFwSharedMemCacheOpValue(psTraceBufCtl->ui32LogType, INVALIDATE); -+ -+ /* The firmware expects a trace buffer only when: -+ * - Logtype is "trace" AND -+ * - at least one LogGroup is configured -+ * - the Driver Mode is not Guest -+ */ -+ if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL) -+ && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE) -+ && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) -+ && !PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ return IMG_TRUE; -+ } -+ -+ return IMG_FALSE; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXTraceBufferDeinit -+ -+@Description Deinitialises all the allocations and references that are made -+ for the FW trace buffer(s) -+ -+@Input ppsDevInfo RGX device info -+@Return void -+*/ /**************************************************************************/ -+static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < RGXFW_THREAD_NUM; i++) -+ { -+ if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i]) -+ { -+ if (psDevInfo->apui32TraceBuffer[i] != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); -+ psDevInfo->apui32TraceBuffer[i] = NULL; -+ } -+ -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); -+ psDevInfo->psRGXFWIfTraceBufferMemDesc[i] = NULL; -+ } -+ } -+} -+ -+/*************************************************************************/ /*! -+@Function RGXTraceBufferInitOnDemandResources -+ -+@Description Allocates the firmware trace buffer required for dumping trace -+ info from the firmware. -+ -+@Input psDevInfo RGX device info -+ -+@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, -+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags) -+{ -+ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32FwThreadNum; -+ IMG_UINT32 ui32DefaultTraceBufSize; -+ IMG_DEVMEM_SIZE_T uiTraceBufSizeInBytes; -+ void *pvAppHintState = NULL; -+ IMG_CHAR pszBufferName[] = "FwTraceBuffer_Thread0"; -+ -+ /* Check AppHint value for module-param FWTraceBufSizeInDWords */ -+ OSCreateAppHintState(&pvAppHintState); -+ ui32DefaultTraceBufSize = RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, -+ pvAppHintState, -+ FWTraceBufSizeInDWords, -+ &ui32DefaultTraceBufSize, -+ &psDevInfo->ui32TraceBufSizeInDWords); -+ OSFreeAppHintState(pvAppHintState); -+ pvAppHintState = NULL; -+ -+ /* Write tracebuf size once to devmem */ -+ psTraceBufCtl->ui32TraceBufSizeInDWords = psDevInfo->ui32TraceBufSizeInDWords; -+ -+ if (psDevInfo->ui32TraceBufSizeInDWords < RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS || -+ psDevInfo->ui32TraceBufSizeInDWords > RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Requested trace buffer size (%u) out of its minimum (%u) & maximum (%u) range. Exiting error.", -+ __func__, -+ psDevInfo->ui32TraceBufSizeInDWords, -+ RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS, -+ RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS)); -+ eError = PVRSRV_ERROR_OUT_OF_RANGE; -+ goto exit_error; -+ } -+ -+ uiTraceBufSizeInBytes = psDevInfo->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); -+ -+ for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++) -+ { -+#if !defined(SUPPORT_AUTOVZ) -+ /* Ensure allocation API is only called when not already allocated */ -+ PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum] == NULL); -+ /* Firmware address should not be already set */ -+ PVR_ASSERT(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer.ui32Addr == 0x0); -+#endif -+ -+ /* update the firmware thread number in the Trace Buffer's name */ -+ pszBufferName[sizeof(pszBufferName) - 2] += ui32FwThreadNum; -+ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ uiAllocFlags, -+ uiTraceBufSizeInBytes, -+ pszBufferName, -+ &psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum], -+ &psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer, -+ (void**)&psDevInfo->apui32TraceBuffer[ui32FwThreadNum], -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); -+ } -+ -+ return PVRSRV_OK; -+ -+fail: -+ RGXTraceBufferDeinit(psDevInfo); -+exit_error: -+ return eError; -+} -+ -+#if defined(PDUMP) -+/*************************************************************************/ /*! -+@Function RGXPDumpLoadFWInitData -+ -+@Description Allocates the firmware trace buffer required for dumping trace -+ info from the firmware. -+ -+@Input psDevInfo RGX device info -+ */ /*************************************************************************/ -+static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32HWPerfCountersDataSize, -+ IMG_BOOL bEnableSignatureChecks) -+{ -+ IMG_UINT32 ui32ConfigFlags = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; -+ IMG_UINT32 ui32FwOsCfgFlags = psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags; -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Dump RGXFW Init data"); -+ if (!bEnableSignatureChecks) -+ { -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "(to enable rgxfw signatures place the following line after the RTCONF line)"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, -+ offsetof(RGXFWIF_SYSINIT, asSigBufCtl), -+ sizeof(RGXFWIF_SIGBUF_CTL)*(RGXFWIF_DM_MAX), -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Dump initial state of FW runtime configuration"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, -+ 0, -+ sizeof(RGXFWIF_RUNTIME_CFG), -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Dump rgxfw hwperfctl structure"); -+ DevmemPDumpLoadZeroMem (psDevInfo->psRGXFWIfHWPerfCountersMemDesc, -+ 0, -+ ui32HWPerfCountersDataSize, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Dump rgxfw trace control structure"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, -+ 0, -+ sizeof(RGXFWIF_TRACEBUF), -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Dump firmware system data structure"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwSysDataMemDesc, -+ 0, -+ sizeof(RGXFWIF_SYSDATA), -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Dump firmware OS data structure"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwOsDataMemDesc, -+ 0, -+ sizeof(RGXFWIF_OSDATA), -+ PDUMP_FLAGS_CONTINUOUS); -+ -+#if defined(SUPPORT_TBI_INTERFACE) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Dump rgx TBI buffer"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTBIBufferMemDesc, -+ 0, -+ psDevInfo->ui32FWIfTBIBufferSize, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif /* defined(SUPPORT_TBI_INTERFACE) */ -+ -+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Dump rgxfw register configuration buffer"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRegCfgMemDesc, -+ 0, -+ sizeof(RGXFWIF_REG_CFG), -+ PDUMP_FLAGS_CONTINUOUS); -+#endif /* defined(SUPPORT_USER_REGISTER_CONFIGURATION) */ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Dump rgxfw system init structure"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, -+ 0, -+ sizeof(RGXFWIF_SYSINIT), -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Dump rgxfw os init structure"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfOsInitMemDesc, -+ 0, -+ sizeof(RGXFWIF_OSINIT), -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /* RGXFW Init structure needs to be loaded before we overwrite FaultPhysAddr, else this address patching won't have any effect */ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Overwrite FaultPhysAddr of FwSysInit in pdump with actual physical address"); -+ RGXPDumpFaultReadRegister(psDevInfo); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "RTCONF: run-time configuration"); -+ -+ /* Dump the config options so they can be edited. */ -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "(Set the FW system config options here)"); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Ctx Switch Rand mode: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_MODE_RAND); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN); -+#if defined(SUPPORT_VALIDATION) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Enable generic DM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN); -+#endif /* defined(SUPPORT_VALIDATION) */ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( FBCDC Version 3.1 Enable: 0x%08x)", RGXFWIF_INICFG_FBCDC_V3_1_EN); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Check MList: 0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Assert on TA Out-of-Memory: 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Disable HWPerf custom counter filter: 0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Enable Ctx Switch profile mode: 0x%08x (none=b'000, fast=b'001, medium=b'010, slow=b'011, nodelay=b'100))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Enable coherent memory accesses: 0x%08x)", RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Enable IRQ validation: 0x%08x)", RGXFWIF_INICFG_VALIDATE_IRQ); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( SPU power state mask change Enable: 0x%08x)", RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN); -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST); -+#if defined(SUPPORT_PDVFS) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS); -+#endif /* defined(SUPPORT_PDVFS) */ -+ } -+#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( CDM Arbitration Mode (task demand=b'01, round robin=b'10): 0x%08x)", RGXFWIF_INICFG_CDM_ARBITRATION_MASK); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( ISP Scheduling Mode (v1=b'01, v2=b'10): 0x%08x)", RGXFWIF_INICFG_ISPSCHEDMODE_MASK); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Validate SOC & USC timers: 0x%08x)", RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER); -+ -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc, -+ offsetof(RGXFWIF_SYSDATA, ui32ConfigFlags), -+ ui32ConfigFlags, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Extended FW system config options not used.)"); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "(Set the FW OS config options here)"); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Ctx Switch TDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Lower Priority Ctx Switch 2D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Lower Priority Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Lower Priority Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Lower Priority Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM); -+ -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwOsDataMemDesc, -+ offsetof(RGXFWIF_OSDATA, ui32FwOsConfigFlags), -+ ui32FwOsCfgFlags, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ -+#if defined(SUPPORT_SECURITY_VALIDATION) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "(Select one or more security tests here)"); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Read/write FW private data from non-FW contexts: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Read/write FW code from non-FW contexts: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Execute FW code from non-secure memory: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Execute FW code from secure (non-FW) memory: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE); -+ -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, -+ offsetof(RGXFWIF_SYSINIT, ui32SecurityTestFlags), -+ psDevInfo->psRGXFWIfSysInit->ui32SecurityTestFlags, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif /* defined(SUPPORT_SECURITY_VALIDATION) */ -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( PID filter type: %X=INCLUDE_ALL_EXCEPT, %X=EXCLUDE_ALL_EXCEPT)", -+ RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT, -+ RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT); -+ -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, -+ offsetof(RGXFWIF_SYSINIT, sPIDFilter.eMode), -+ psDevInfo->psRGXFWIfSysInit->sPIDFilter.eMode, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( PID filter PID/DriverID list (Up to %u entries. Terminate with a zero PID))", -+ RGXFWIF_PID_FILTER_MAX_NUM_PIDS); -+ { -+ IMG_UINT32 i; -+ -+ /* generate a few WRWs in the pdump stream as an example */ -+ for (i = 0; i < MIN(RGXFWIF_PID_FILTER_MAX_NUM_PIDS, 8); i++) -+ { -+ /* -+ * Some compilers cannot cope with the uses of offsetof() below - the specific problem being the use of -+ * a non-const variable in the expression, which it needs to be const. Typical compiler output is -+ * "expression must have a constant value". -+ */ -+ const IMG_DEVMEM_OFFSET_T uiPIDOff -+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].uiPID); -+ -+ const IMG_DEVMEM_OFFSET_T uiDriverIDOff -+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32DriverID); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID and DriverID pair %u)", i); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID)"); -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, -+ uiPIDOff, -+ 0, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "(DriverID)"); -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, -+ uiDriverIDOff, -+ 0, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ } -+ -+ /* -+ * Dump the log config so it can be edited. -+ */ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "(Set the log config here)"); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( Log Type: set bit 0 for TRACE, reset for TBI)"); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( MTS Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MTS); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( CLEANUP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CLEANUP); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( CSW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CSW); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( BIF Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_BIF); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( PM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_PM); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( RTD Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RTD); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( SPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SPM); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( POW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_POW); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP); -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) -+ { -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA); -+ } -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( MISC Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MISC); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG); -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, -+ offsetof(RGXFWIF_TRACEBUF, ui32LogType), -+ psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Set the HWPerf Filter config here, see \"hwperfbin2jsont -h\""); -+ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfSysInitMemDesc, -+ offsetof(RGXFWIF_SYSINIT, ui64HWPerfFilter), -+ psDevInfo->psRGXFWIfSysInit->ui64HWPerfFilter, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), tla(%d), TDM(%d))", -+ RGXFWIF_REG_CFG_TYPE_PWR_ON, -+ RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, -+ RGXFWIF_REG_CFG_TYPE_TA, -+ RGXFWIF_REG_CFG_TYPE_3D, -+ RGXFWIF_REG_CFG_TYPE_CDM, -+ RGXFWIF_REG_CFG_TYPE_TLA, -+ RGXFWIF_REG_CFG_TYPE_TDM); -+ -+ { -+ IMG_UINT32 i; -+ -+ /* Write 32 bits in each iteration as required by PDUMP WRW command */ -+ for (i = 0; i < RGXFWIF_REG_CFG_TYPE_ALL; i += sizeof(IMG_UINT32)) -+ { -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc, -+ offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[i]), -+ 0, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ } -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "(Set registers here: address, mask, value)"); -+ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, -+ offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Addr), -+ 0, -+ PDUMP_FLAGS_CONTINUOUS); -+ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, -+ offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Mask), -+ 0, -+ PDUMP_FLAGS_CONTINUOUS); -+ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, -+ offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Value), -+ 0, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif /* SUPPORT_USER_REGISTER_CONFIGURATION */ -+} -+#endif /* defined(PDUMP) */ -+ -+/*! -+******************************************************************************* -+ @Function RGXSetupFwGuardPage -+ -+ @Description Allocate a Guard Page at the start of a Guest's Main Heap -+ -+ @Input psDevceNode -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR RGXSetupFwGuardPage(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)), -+ OSGetPageSize(), -+ "FwGuardPage", -+ &psDevInfo->psRGXFWHeapGuardPageReserveMemDesc, -+ NULL, -+ NULL, -+ RFW_FWADDR_FLAG_NONE); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function RGXSetupFwSysData -+ -+ @Description Sets up all system-wide firmware related data -+ -+ @Input psDevInfo -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bEnableSignatureChecks, -+ IMG_UINT32 ui32SignatureChecksBufSize, -+ IMG_UINT32 ui32HWPerfFWBufSizeKB, -+ IMG_UINT64 ui64HWPerfFilter, -+ IMG_UINT32 ui32ConfigFlags, -+ IMG_UINT32 ui32ConfigFlagsExt, -+ IMG_UINT32 ui32LogType, -+ IMG_UINT32 ui32FilterFlags, -+ IMG_UINT32 ui32JonesDisableMask, -+ IMG_UINT32 ui32HWPerfCountersDataSize, -+ IMG_UINT32 *pui32TPUTrilinearFracMask, -+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, -+ FW_PERF_CONF eFirmwarePerf) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_SYSINIT *psFwSysInitScratch = NULL; -+#if defined(SUPPORT_VALIDATION) -+ /* Create AppHint reference handle for use in SUPPORT_VALIDATION case. -+ * This is freed on exit from this routine. -+ */ -+ IMG_UINT32 ui32AppHintDefault = 0; -+ void *pvAppHintState = NULL; -+ OSCreateAppHintState(&pvAppHintState); -+#endif /* defined(SUPPORT_VALIDATION) */ -+ -+ psFwSysInitScratch = OSAllocZMem(sizeof(*psFwSysInitScratch)); -+ PVR_LOG_GOTO_IF_NOMEM(psFwSysInitScratch, eError, fail); -+ -+ /* Sys Fw init data */ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ (RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) & -+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), -+ sizeof(RGXFWIF_SYSINIT), -+ "FwSysInitStructure", -+ &psDevInfo->psRGXFWIfSysInitMemDesc, -+ NULL, -+ (void**) &psDevInfo->psRGXFWIfSysInit, -+ RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Sys Init structure allocation", fail); -+ -+ /* Setup Fault read register */ -+ eError = RGXSetupFaultReadRegister(psDeviceNode, psFwSysInitScratch); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Fault read register setup", fail); -+ -+#if defined(SUPPORT_AUTOVZ) -+ psFwSysInitScratch->ui32VzWdgPeriod = PVR_AUTOVZ_WDG_PERIOD_MS; -+#endif -+ -+ /* RD Power Island */ -+ { -+ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; -+ IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland; -+ IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) || -+ (eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON); -+ -+ ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0; -+ } -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST; -+#if defined(SUPPORT_PDVFS) -+ { -+ RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo; -+ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg; -+ -+ /* Pro-active DVFS depends on Workload Estimation */ -+ psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo; -+ psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; -+ PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table"); -+ -+ if (psDVFSDeviceCfg->pasOPPTable != NULL) -+ { -+ if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: OPP Table too large: Size = %u, Maximum size = %lu", -+ __func__, -+ psDVFSDeviceCfg->ui32OPPTableSize, -+ (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)))); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto fail; -+ } -+ -+ OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues, -+ psDVFSDeviceCfg->pasOPPTable, -+ sizeof(psPDVFSOPPInfo->asOPPValues)); -+ -+ psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1; -+ -+ ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS; -+ } -+ } -+#endif /* defined(SUPPORT_PDVFS) */ -+ } -+#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ -+ -+ /* FW trace control structure */ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & -+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), -+ sizeof(RGXFWIF_TRACEBUF), -+ "FwTraceCtlStruct", -+ &psDevInfo->psRGXFWIfTraceBufCtlMemDesc, -+ &psFwSysInitScratch->sTraceBufCtl, -+ (void**) &psDevInfo->psRGXFWIfTraceBufCtl, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); -+ -+ if (!psDeviceNode->bAutoVzFwIsUp) -+ { -+ /* Set initial firmware log type/group(s) */ -+ if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK) -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid initial log type (0x%X)", -+ __func__, ui32LogType)); -+ goto fail; -+ } -+ psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32LogType; -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, FLUSH); -+ } -+ -+ /* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource -+ * (irrespective of loggroup(s) enabled), given that logtype/loggroups can -+ * be set during PDump playback in logconfig, at any point of time, -+ * Otherwise, allocate only if required. */ -+#if !defined(PDUMP) -+#if defined(SUPPORT_AUTOVZ) -+ /* always allocate trace buffer for AutoVz Host drivers to allow -+ * deterministic addresses of all SysData structures */ -+ if ((PVRSRV_VZ_MODE_IS(HOST)) || (RGXTraceBufferIsInitRequired(psDevInfo))) -+#else -+ if (RGXTraceBufferIsInitRequired(psDevInfo)) -+#endif -+#endif -+ { -+ eError = RGXTraceBufferInitOnDemandResources(psDevInfo, -+ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & -+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp)); -+ } -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXTraceBufferInitOnDemandResources", fail); -+ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & -+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), -+ sizeof(RGXFWIF_SYSDATA), -+ "FwSysData", -+ &psDevInfo->psRGXFWIfFwSysDataMemDesc, -+ &psFwSysInitScratch->sFwSysData, -+ (void**) &psDevInfo->psRGXFWIfFwSysData, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); -+ -+ /* GPIO validation setup */ -+ psFwSysInitScratch->eGPIOValidationMode = RGXFWIF_GPIO_VAL_OFF; -+#if defined(SUPPORT_VALIDATION) -+ { -+ IMG_INT32 ui32GPIOValidationMode; -+ -+ /* Check AppHint for GPIO validation mode */ -+ ui32AppHintDefault = PVRSRV_APPHINT_GPIOVALIDATIONMODE; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, -+ pvAppHintState, -+ GPIOValidationMode, -+ &ui32AppHintDefault, -+ &ui32GPIOValidationMode); -+ -+ if (ui32GPIOValidationMode >= RGXFWIF_GPIO_VAL_LAST) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid GPIO validation mode: %d, only valid if smaller than %d. Disabling GPIO validation.", -+ __func__, -+ ui32GPIOValidationMode, -+ RGXFWIF_GPIO_VAL_LAST)); -+ } -+ else -+ { -+ psFwSysInitScratch->eGPIOValidationMode = (RGXFWIF_GPIO_VAL_MODE) ui32GPIOValidationMode; -+ } -+ -+ psFwSysInitScratch->eGPIOValidationMode = ui32GPIOValidationMode; -+ } -+#endif -+ -+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) -+ eError = RGXFWSetupCounterBuffer(psDevInfo, -+ &psDevInfo->psCounterBufferMemDesc, -+ PAGE_SIZE, -+ &psFwSysInitScratch->sCounterDumpCtl, -+ "CounterBuffer"); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Counter Buffer allocation", fail); -+#endif /* defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) */ -+ -+#if defined(SUPPORT_VALIDATION) -+ { -+ IMG_UINT32 ui32EnablePollOnChecksumErrorStatus; -+ ui32AppHintDefault = 0; -+ -+ /* Check AppHint for polling on GPU Checksum status */ -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, -+ pvAppHintState, -+ EnablePollOnChecksumErrorStatus, -+ &ui32AppHintDefault, -+ &ui32EnablePollOnChecksumErrorStatus); -+ -+ switch (ui32EnablePollOnChecksumErrorStatus) -+ { -+ case 0: /* no checking */ break; -+ case 3: psDevInfo->ui32ValidationFlags |= RGX_VAL_WGP_SIG_CHECK_NOERR_EN; break; -+ case 4: psDevInfo->ui32ValidationFlags |= RGX_VAL_WGP_SIG_CHECK_ERR_EN; break; -+ default: -+ PVR_DPF((PVR_DBG_WARNING, "Unsupported value in EnablePollOnChecksumErrorStatus (%d)", ui32EnablePollOnChecksumErrorStatus)); -+ break; -+ } -+ } -+#endif /* defined(SUPPORT_VALIDATION) */ -+ -+#if defined(SUPPORT_FIRMWARE_GCOV) -+ eError = RGXFWSetupFirmwareGcovBuffer(psDevInfo, -+ &psDevInfo->psFirmwareGcovBufferMemDesc, -+ RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE, -+ &psFwSysInitScratch->sFirmwareGcovCtl, -+ "FirmwareGcovBuffer"); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware GCOV buffer allocation", fail); -+ psDevInfo->ui32FirmwareGcovSize = RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE; -+#endif /* defined(SUPPORT_FIRMWARE_GCOV) */ -+ -+#if defined(PDUMP) -+ /* Require a minimum amount of memory for the signature buffers */ -+ if (ui32SignatureChecksBufSize < RGXFW_SIG_BUFFER_SIZE_MIN) -+ { -+ ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN; -+ } -+ -+ /* Setup Signature and Checksum Buffers for TDM, GEOM and 3D */ -+ eError = RGXFWSetupSignatureChecks(psDevInfo, -+ &psDevInfo->psRGXFWSigTAChecksMemDesc, -+ ui32SignatureChecksBufSize, -+ &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "TA Signature check setup", fail); -+ psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize; -+ -+ eError = RGXFWSetupSignatureChecks(psDevInfo, -+ &psDevInfo->psRGXFWSig3DChecksMemDesc, -+ ui32SignatureChecksBufSize, -+ &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "3D Signature check setup", fail); -+ psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize; -+ -+ psDevInfo->psRGXFWSigTDM2DChecksMemDesc = NULL; -+ psDevInfo->ui32SigTDM2DChecksSize = 0; -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) -+ { -+ /* Buffer allocated only when feature present because, all known TDM -+ * signature registers are dependent on this feature being present */ -+ eError = RGXFWSetupSignatureChecks(psDevInfo, -+ &psDevInfo->psRGXFWSigTDM2DChecksMemDesc, -+ ui32SignatureChecksBufSize, -+ &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "TDM Signature check setup", fail); -+ psDevInfo->ui32SigTDM2DChecksSize = ui32SignatureChecksBufSize; -+ } -+ -+ if (!bEnableSignatureChecks) -+ { -+ psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM].sBuffer.ui32Addr = 0x0; -+ psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM].sBuffer.ui32Addr = 0x0; -+ psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0; -+ } -+#endif /* defined(PDUMP) */ -+ -+ eError = RGXFWSetupAlignChecks(psDeviceNode, -+ &psFwSysInitScratch->sAlignChecks); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Alignment checks setup", fail); -+ -+ psFwSysInitScratch->ui32FilterFlags = ui32FilterFlags; -+ -+ /* Fill the remaining bits of fw the init data */ -+ psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE; -+ psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE; -+ psFwSysInitScratch->sFBCDCStateTableBase.uiAddr = RGX_FBCDC_HEAP_BASE; -+ psFwSysInitScratch->sFBCDCLargeStateTableBase.uiAddr = RGX_FBCDC_LARGE_HEAP_BASE; -+ psFwSysInitScratch->sTextureHeapBase.uiAddr = RGX_TEXTURE_STATE_HEAP_BASE; -+ -+#if defined(FIX_HW_BRN_65273_BIT_MASK) -+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) -+ { -+ /* Fill the remaining bits of fw the init data */ -+ psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_BRN_65273_HEAP_BASE; -+ psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_BRN_65273_HEAP_BASE; -+ } -+#endif -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) -+ { -+ psFwSysInitScratch->ui32JonesDisableMask = ui32JonesDisableMask; -+ } -+#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) -+ { -+ eError = _AllocateSLC3Fence(psDevInfo, psFwSysInitScratch); -+ PVR_LOG_GOTO_IF_ERROR(eError, "SLC3Fence memory allocation", fail); -+ } -+#endif -+#if defined(SUPPORT_PDVFS) -+ /* Core clock rate */ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & -+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), -+ sizeof(IMG_UINT32), -+ "FwPDVFSCoreClkRate", -+ &psDevInfo->psRGXFWIFCoreClkRateMemDesc, -+ &psFwSysInitScratch->sCoreClockRate, -+ (void**) &psDevInfo->pui32RGXFWIFCoreClkRate, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PDVFS core clock rate memory setup", fail); -+#endif -+ { -+ /* Timestamps */ -+ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags = -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | /* XXX ?? */ -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC; -+ -+ /* -+ the timer query arrays -+ */ -+ PDUMPCOMMENT(psDeviceNode, "Allocate timer query arrays (FW)"); -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES, -+ uiMemAllocFlags | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE, -+ "FwStartTimesArray", -+ & psDevInfo->psStartTimeMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map start times array", -+ __func__)); -+ goto fail; -+ } -+ -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psStartTimeMemDesc, -+ (void **)& psDevInfo->pui64StartTimeById); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map start times array", -+ __func__)); -+ goto fail; -+ } -+ -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES, -+ uiMemAllocFlags | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE, -+ "FwEndTimesArray", -+ & psDevInfo->psEndTimeMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map end times array", -+ __func__)); -+ goto fail; -+ } -+ -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psEndTimeMemDesc, -+ (void **)& psDevInfo->pui64EndTimeById); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map end times array", -+ __func__)); -+ goto fail; -+ } -+ -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(IMG_UINT32) * RGX_MAX_TIMER_QUERIES, -+ uiMemAllocFlags, -+ "FwCompletedOpsArray", -+ & psDevInfo->psCompletedMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to completed ops array", -+ __func__)); -+ goto fail; -+ } -+ -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCompletedMemDesc, -+ (void **)& psDevInfo->pui32CompletedById); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map completed ops array", -+ __func__)); -+ goto fail; -+ } -+ } -+#if !defined(PVRSRV_USE_BRIDGE_LOCK) -+ eError = OSLockCreate(&psDevInfo->hTimerQueryLock); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate log for timer query", -+ __func__)); -+ goto fail; -+ } -+#endif -+#if defined(SUPPORT_TBI_INTERFACE) -+#if !defined(PDUMP) -+ /* allocate only if required */ -+ if (RGXTBIBufferIsInitRequired(psDevInfo)) -+#endif /* !defined(PDUMP) */ -+ { -+ /* When PDUMP is enabled, ALWAYS allocate on-demand TBI buffer resource -+ * (irrespective of loggroup(s) enabled), given that logtype/loggroups -+ * can be set during PDump playback in logconfig, at any point of time -+ */ -+ eError = RGXTBIBufferInitOnDemandResources(psDevInfo); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXTBIBufferInitOnDemandResources", fail); -+ } -+ -+ psFwSysInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer; -+#endif /* defined(SUPPORT_TBI_INTERFACE) */ -+ -+ /* Allocate shared buffer for GPU utilisation. -+ * Enable FIRMWARE_CACHED to reduce read latency in the FW. -+ * The FW flushes the cache after any writes. -+ */ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) & -+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), -+ sizeof(RGXFWIF_GPU_UTIL_FWCB), -+ "FwGPUUtilisationBuffer", -+ &psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc, -+ &psFwSysInitScratch->sGpuUtilFWCbCtl, -+ (void**) &psDevInfo->psRGXFWIfGpuUtilFWCb, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "GPU Utilisation Buffer ctl allocation", fail); -+ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS & -+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), -+ sizeof(RGXFWIF_RUNTIME_CFG), -+ "FwRuntimeCfg", -+ &psDevInfo->psRGXFWIfRuntimeCfgMemDesc, -+ &psFwSysInitScratch->sRuntimeCfg, -+ (void**) &psDevInfo->psRGXFWIfRuntimeCfg, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware runtime configuration memory allocation", fail); -+ -+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & -+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), -+ sizeof(RGXFWIF_REG_CFG), -+ "FwRegisterConfigStructure", -+ &psDevInfo->psRGXFWIfRegCfgMemDesc, -+ &psFwSysInitScratch->sRegCfg, -+ NULL, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware register user configuration structure allocation", fail); -+#endif -+ -+#if defined(SUPPORT_SECURE_CONTEXT_SWITCH) -+ eError = RGXSetupFwAllocation(psDevInfo, -+ (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS | PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) & -+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), -+ RGXFW_SCRATCH_BUF_SIZE, -+ "FwScratchBuf", -+ &psDevInfo->psRGXFWScratchBufMemDesc, -+ &psFwSysInitScratch->pbFwScratchBuf, -+ NULL, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware scratch buffer allocation", fail); -+#endif -+ -+ psDevInfo->ui32RGXFWIfHWPerfBufSize = GetHwPerfBufferSize(ui32HWPerfFWBufSizeKB); -+ -+ /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer -+ * accessed by the FW. The MISR may try to write one packet the size of the L1 -+ * buffer in some scenarios. When logging is enabled in the MISR, it can be seen -+ * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers -+ * are the more chance of this happening. -+ * Size chosen to allow MISR to write an L1 sized packet and for the client -+ * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1. -+ */ -+ psDevInfo->ui32RGXL2HWPerfBufSize = psDevInfo->ui32RGXFWIfHWPerfBufSize + -+ (psDevInfo->ui32RGXFWIfHWPerfBufSize>>1); -+ -+ /* Second stage initialisation or HWPerf, hHWPerfLock created in first -+ * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */ -+ if (psDevInfo->ui64HWPerfFilter[RGX_HWPERF_L2_STREAM_HWPERF] == 0) -+ { -+ psFwSysInitScratch->ui64HWPerfFilter = -+ RGXHWPerfFwSetEventFilter(psDevInfo, RGX_HWPERF_L2_STREAM_HWPERF, -+ ui64HWPerfFilter); -+ } -+ else -+ { -+ /* The filter has already been modified. This can happen if -+ * pvr/apphint/EnableFTraceGPU was enabled. */ -+ psFwSysInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFwFilter; -+ } -+ -+#if !defined(PDUMP) -+ /* Allocate if HWPerf filter has already been set. This is possible either -+ * by setting a proper AppHint or enabling GPU ftrace events. */ -+ if (psFwSysInitScratch->ui64HWPerfFilter != 0) -+#endif -+ { -+ /* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources -+ * (irrespective of HWPerf enabled or not), given that HWPerf can be -+ * enabled during PDump playback via RTCONF at any point of time. */ -+ eError = RGXHWPerfInitOnDemandL1Buffer(psDevInfo); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandL1Buffer", fail); -+ -+ eError = RGXHWPerfInitOnDemandL2Stream(psDevInfo, RGX_HWPERF_L2_STREAM_HWPERF); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandL2Stream", fail); -+ } -+ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWCOMCTX_ALLOCFLAGS & -+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), -+ ui32HWPerfCountersDataSize, -+ "FwHWPerfControlStructure", -+ &psDevInfo->psRGXFWIfHWPerfCountersMemDesc, -+ &psFwSysInitScratch->sHWPerfCtl, -+ NULL, -+ RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware HW Perf control struct allocation", fail); -+ -+ psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_INICFG_DISABLE_PDP_EN) -+ ? IMG_FALSE : IMG_TRUE; -+ -+ psFwSysInitScratch->eFirmwarePerf = eFirmwarePerf; -+ -+#if defined(PDUMP) -+ /* default: no filter */ -+ psFwSysInitScratch->sPIDFilter.eMode = RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT; -+ psFwSysInitScratch->sPIDFilter.asItems[0].uiPID = 0; -+#endif -+ -+#if defined(SUPPORT_VALIDATION) -+ { -+ IMG_UINT32 dm; -+ -+ /* TPU trilinear rounding mask override */ -+ for (dm = 0; dm < RGXFWIF_TPU_DM_LAST; dm++) -+ { -+ psFwSysInitScratch->aui32TPUTrilinearFracMask[dm] = pui32TPUTrilinearFracMask[dm]; -+ } -+ } -+#endif -+ -+#if defined(SUPPORT_SECURITY_VALIDATION) -+ { -+ PVRSRV_MEMALLOCFLAGS_T uiFlags = RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS; -+ PVRSRV_SET_PHYS_HEAP_HINT(FW_PRIV_DATA, uiFlags); -+ -+ PDUMPCOMMENT(psDeviceNode, "Allocate non-secure buffer for security validation test"); -+ eError = DevmemFwAllocateExportable(psDeviceNode, -+ OSGetPageSize(), -+ OSGetPageSize(), -+ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, -+ "FwExNonSecureBuffer", -+ &psDevInfo->psRGXFWIfNonSecureBufMemDesc); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Non-secure buffer allocation", fail); -+ -+ eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbNonSecureBuffer, -+ psDevInfo->psRGXFWIfNonSecureBufMemDesc, -+ 0, RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail); -+ -+ PDUMPCOMMENT(psDeviceNode, "Allocate secure buffer for security validation test"); -+ eError = DevmemFwAllocateExportable(psDeviceNode, -+ OSGetPageSize(), -+ OSGetPageSize(), -+ uiFlags, -+ "FwExSecureBuffer", -+ &psDevInfo->psRGXFWIfSecureBufMemDesc); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Secure buffer allocation", fail); -+ -+ eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbSecureBuffer, -+ psDevInfo->psRGXFWIfSecureBufMemDesc, -+ 0, RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail); -+ } -+#endif /* SUPPORT_SECURITY_VALIDATION */ -+ -+#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT) || RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION)) -+ { -+ psFwSysInitScratch->ui32TFBCCompressionControl = -+ (ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK) >> RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT; -+ } -+#endif /* SUPPORT_SECURITY_VALIDATION */ -+ -+ /* Initialize FW started flag */ -+ psFwSysInitScratch->bFirmwareStarted = IMG_FALSE; -+ psFwSysInitScratch->ui32MarkerVal = 1; -+ psDevInfo->psRGXFWIfRuntimeCfg->ui32VzConnectionCooldownPeriodInSec = RGX_VZ_CONNECTION_COOLDOWN_PERIOD; -+ -+ if (!psDeviceNode->bAutoVzFwIsUp) -+ { -+ IMG_UINT32 ui32DriverID; -+ -+ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; -+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; -+ -+ /* Required info by FW to calculate the ActivePM idle timer latency */ -+ psFwSysInitScratch->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; -+ psFwSysInitScratch->ui32InitialActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms; -+ -+ /* Initialise variable runtime configuration to the system defaults */ -+ psRuntimeCfg->ui32CoreClockSpeed = psFwSysInitScratch->ui32InitialCoreClockSpeed; -+ psRuntimeCfg->ui32ActivePMLatencyms = psFwSysInitScratch->ui32InitialActivePMLatencyms; -+ psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE; -+ psRuntimeCfg->ui32HCSDeadlineMS = RGX_HCS_DEFAULT_DEADLINE_MS; -+ -+ if ((RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US > 0U) && (RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US < 1000U)) -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ PVR_LOG_GOTO_IF_ERROR(eError, -+ "RGXSetupFwSysData: RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US must be either 0 (disabled) or greater than 1000", -+ fail); -+ } -+ else -+ { -+ psRuntimeCfg->ui32WdgPeriodUs = RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US; -+ } -+ -+ if (PVRSRV_VZ_MODE_IS(NATIVE)) -+ { -+ psRuntimeCfg->aui32DriverPriority[RGXFW_HOST_DRIVER_ID] = 0; -+ psRuntimeCfg->aui32DriverIsolationGroup[RGXFW_HOST_DRIVER_ID] = RGX_DRIVERID_0_DEFAULT_ISOLATION_GROUP; -+ psRuntimeCfg->aui32DriverTimeSlice[RGXFW_HOST_DRIVER_ID] = (IMG_UINT8)RGX_DRIVERID_0_DEFAULT_TIME_SLICE; -+ } -+ else -+ { -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ const IMG_INT32 ai32DefaultPriorities[RGXFW_MAX_NUM_OSIDS] = -+ {RGX_DRIVERID_0_DEFAULT_PRIORITY, -+#if (RGXFW_MAX_NUM_OSIDS > 1) -+ RGX_DRIVERID_1_DEFAULT_PRIORITY, -+#if (RGXFW_MAX_NUM_OSIDS > 2) -+ RGX_DRIVERID_2_DEFAULT_PRIORITY, -+ RGX_DRIVERID_3_DEFAULT_PRIORITY, -+ RGX_DRIVERID_4_DEFAULT_PRIORITY, -+ RGX_DRIVERID_5_DEFAULT_PRIORITY, -+ RGX_DRIVERID_6_DEFAULT_PRIORITY, -+ RGX_DRIVERID_7_DEFAULT_PRIORITY -+#if (RGXFW_MAX_NUM_OSIDS > 8) -+#error "Support for more than 8 OSIDs not implemented." -+#endif -+#endif -+#endif -+ }; -+ -+ const IMG_UINT32 aui32DefaultIsolationGroups[RGXFW_MAX_NUM_OSIDS] = -+ {RGX_DRIVERID_0_DEFAULT_ISOLATION_GROUP, -+#if (RGXFW_MAX_NUM_OSIDS > 1) -+ RGX_DRIVERID_1_DEFAULT_ISOLATION_GROUP, -+#if (RGXFW_MAX_NUM_OSIDS > 2) -+ RGX_DRIVERID_2_DEFAULT_ISOLATION_GROUP, -+ RGX_DRIVERID_3_DEFAULT_ISOLATION_GROUP, -+ RGX_DRIVERID_4_DEFAULT_ISOLATION_GROUP, -+ RGX_DRIVERID_5_DEFAULT_ISOLATION_GROUP, -+ RGX_DRIVERID_6_DEFAULT_ISOLATION_GROUP, -+ RGX_DRIVERID_7_DEFAULT_ISOLATION_GROUP, -+#endif -+#endif -+ }; -+ -+ const IMG_INT32 ai32DefaultTimeSlice[RGXFW_MAX_NUM_OSIDS] = -+ {RGX_DRIVERID_0_DEFAULT_TIME_SLICE, -+#if (RGXFW_MAX_NUM_OSIDS > 1) -+ RGX_DRIVERID_1_DEFAULT_TIME_SLICE, -+#if (RGXFW_MAX_NUM_OSIDS > 2) -+ RGX_DRIVERID_2_DEFAULT_TIME_SLICE, -+ RGX_DRIVERID_3_DEFAULT_TIME_SLICE, -+ RGX_DRIVERID_4_DEFAULT_TIME_SLICE, -+ RGX_DRIVERID_5_DEFAULT_TIME_SLICE, -+ RGX_DRIVERID_6_DEFAULT_TIME_SLICE, -+ RGX_DRIVERID_7_DEFAULT_TIME_SLICE -+#if (RGXFW_MAX_NUM_OSIDS > 8) -+#error "Support for more than 8 OSIDs not implemented." -+#endif -+#endif -+#endif -+ }; -+ -+ /* Set up initial priorities between different OSes */ -+ psRuntimeCfg->aui32DriverPriority[ui32DriverID] = (IMG_UINT32)ai32DefaultPriorities[ui32DriverID]; -+ psRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID] = aui32DefaultIsolationGroups[ui32DriverID]; -+ psRuntimeCfg->aui32DriverTimeSlice[ui32DriverID] = (IMG_UINT32)(ai32DefaultTimeSlice[ui32DriverID] <= -+ PVRSRV_VZ_TIME_SLICE_MAX) ? -+ ai32DefaultTimeSlice[ui32DriverID]:(0); -+ } -+ } -+ psRuntimeCfg->ui32DriverTimeSliceInterval = RGX_DRIVER_DEFAULT_TIME_SLICE_INTERVAL; -+ -+#if defined(PVR_ENABLE_PHR) && defined(PDUMP) -+ psRuntimeCfg->ui32PHRMode = RGXFWIF_PHR_MODE_RD_RESET; -+#else -+ psRuntimeCfg->ui32PHRMode = 0; -+#endif -+ -+ /* Initialize the DefaultDustsNumInit Field to Max Dusts */ -+ psRuntimeCfg->ui32DefaultDustsNumInit = psDevInfo->sDevFeatureCfg.ui32MAXDustCount; -+ -+ /* flush write buffers for psDevInfo->psRGXFWIfRuntimeCfg */ -+ OSWriteMemoryBarrier(psDevInfo->psRGXFWIfRuntimeCfg); -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfRuntimeCfg, FLUSH); -+ -+ /* Setup FW coremem data */ -+ if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) -+ { -+ psFwSysInitScratch->sCorememDataStore.pbyFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) -+ { -+ RGXSetMetaDMAAddress(&psFwSysInitScratch->sCorememDataStore, -+ psDevInfo->psRGXFWIfCorememDataStoreMemDesc, -+ &psFwSysInitScratch->sCorememDataStore.pbyFWAddr, -+ 0); -+ } -+ } -+ -+ psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags = ui32ConfigFlags & RGXFWIF_INICFG_ALL; -+ psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlagsExt = ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_ALL; -+ -+ /* Initialise GPU utilisation buffer */ -+ { -+ IMG_UINT64 ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(), RGXFWIF_GPU_UTIL_STATE_IDLE); -+ RGXFWIF_DM eDM; -+ -+ psDevInfo->psRGXFWIfGpuUtilFWCb->ui64GpuLastWord = ui64LastWord; -+ -+ for (eDM = 0; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++) -+ { -+ IMG_UINT32 ui32DriverID; -+ -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ psDevInfo->psRGXFWIfGpuUtilFWCb->aaui64DMOSLastWord[eDM][ui32DriverID] = ui64LastWord; -+ } -+ } -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfGpuUtilFWCb, FLUSH); -+ } -+ -+ /* init HWPERF data */ -+ psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfRIdx = 0; -+ psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfWIdx = 0; -+ psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfWrapCount = 0; -+ psDevInfo->psRGXFWIfFwSysData->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize; -+ psDevInfo->psRGXFWIfFwSysData->ui32HWPerfUt = 0; -+ psDevInfo->psRGXFWIfFwSysData->ui32HWPerfDropCount = 0; -+ psDevInfo->psRGXFWIfFwSysData->ui32FirstDropOrdinal = 0; -+ psDevInfo->psRGXFWIfFwSysData->ui32LastDropOrdinal = 0; -+ -+ psDevInfo->psRGXFWIfFwSysData->ui32MemFaultCheck = 0; -+ -+ // flush write buffers for psRGXFWIfFwSysData -+ OSWriteMemoryBarrier(psDevInfo->psRGXFWIfFwSysData); -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, FLUSH); -+ -+ /*Send through the BVNC Feature Flags*/ -+ eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, &psFwSysInitScratch->sBvncKmFeatureFlags); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags", fail); -+ -+ /* populate the real FwOsInit structure with the values stored in the scratch copy */ -+ OSCachedMemCopyWMB(psDevInfo->psRGXFWIfSysInit, psFwSysInitScratch, sizeof(RGXFWIF_SYSINIT)); -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfSysInit, -+ FLUSH); -+ } -+ -+ OSFreeMem(psFwSysInitScratch); -+ -+#if defined(SUPPORT_VALIDATION) -+ OSFreeAppHintState(pvAppHintState); -+#endif -+ -+ return PVRSRV_OK; -+ -+fail: -+ if (psFwSysInitScratch) -+ { -+ OSFreeMem(psFwSysInitScratch); -+ } -+ -+ RGXFreeFwSysData(psDevInfo); -+ -+ PVR_ASSERT(eError != PVRSRV_OK); -+#if defined(SUPPORT_VALIDATION) -+ OSFreeAppHintState(pvAppHintState); -+#endif -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function RGXSetupFwOsData -+ -+ @Description Sets up all os-specific firmware related data -+ -+ @Input psDevInfo -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32KCCBSizeLog2, -+ IMG_UINT32 ui32HWRDebugDumpLimit, -+ IMG_UINT32 ui32FwOsCfgFlags) -+{ -+ PVRSRV_ERROR eError; -+ RGXFWIF_OSINIT sFwOsInitScratch; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ OSCachedMemSet(&sFwOsInitScratch, 0, sizeof(RGXFWIF_OSINIT)); -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ eError = RGXSetupFwGuardPage(psDevInfo); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware heap guard pages", fail); -+ } -+ -+ /* Memory tracking the connection state should be non-volatile and -+ * is not cleared on allocation to prevent loss of pre-reset information */ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS & -+ ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, -+ sizeof(RGXFWIF_CONNECTION_CTL), -+ "FwConnectionCtl", -+ &psDevInfo->psRGXFWIfConnectionCtlMemDesc, -+ NULL, -+ (void**) &psDevInfo->psRGXFWIfConnectionCtl, -+ RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Connection Control structure allocation", fail); -+ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED), -+ sizeof(RGXFWIF_OSINIT), -+ "FwOsInitStructure", -+ &psDevInfo->psRGXFWIfOsInitMemDesc, -+ NULL, -+ (void**) &psDevInfo->psRGXFWIfOsInit, -+ RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Os Init structure allocation", fail); -+ -+ /* init HWR frame info */ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, -+ sizeof(RGXFWIF_HWRINFOBUF), -+ "FwHWRInfoBuffer", -+ &psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc, -+ &sFwOsInitScratch.sRGXFWIfHWRInfoBufCtl, -+ (void**) &psDevInfo->psRGXFWIfHWRInfoBufCtl, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "HWR Info Buffer allocation", fail); -+ -+ /* Might be uncached. Be conservative and use a DeviceMemSet */ -+ OSDeviceMemSet(psDevInfo->psRGXFWIfHWRInfoBufCtl, 0, sizeof(RGXFWIF_HWRINFOBUF)); -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfHWRInfoBufCtl, FLUSH); -+ -+ /* Allocate a sync for power management */ -+ eError = SyncPrimContextCreate(psDevInfo->psDeviceNode, -+ &psDevInfo->hSyncPrimContext); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive context allocation", fail); -+ -+ eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psDevInfo->psPowSyncPrim, "fw power ack"); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive allocation", fail); -+ -+ /* Set up kernel CCB */ -+ eError = RGXSetupCCB(psDevInfo, -+ &psDevInfo->psKernelCCBCtl, -+ &psDevInfo->psKernelCCBCtlLocal, -+ &psDevInfo->psKernelCCBCtlMemDesc, -+ &psDevInfo->psKernelCCB, -+ &psDevInfo->psKernelCCBMemDesc, -+ &sFwOsInitScratch.psKernelCCBCtl, -+ &sFwOsInitScratch.psKernelCCB, -+ ui32KCCBSizeLog2, -+ sizeof(RGXFWIF_KCCB_CMD), -+ (RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), -+ "FwKernelCCB"); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB allocation", fail); -+ -+ /* KCCB additionally uses a return slot array for FW to be able to send back -+ * return codes for each required command -+ */ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, -+ (1U << ui32KCCBSizeLog2) * sizeof(IMG_UINT32), -+ "FwKernelCCBRtnSlots", -+ &psDevInfo->psKernelCCBRtnSlotsMemDesc, -+ &sFwOsInitScratch.psKernelCCBRtnSlots, -+ (void**) &psDevInfo->pui32KernelCCBRtnSlots, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB return slot array allocation", fail); -+ -+ /* Set up firmware CCB */ -+ eError = RGXSetupCCB(psDevInfo, -+ &psDevInfo->psFirmwareCCBCtl, -+ &psDevInfo->psFirmwareCCBCtlLocal, -+ &psDevInfo->psFirmwareCCBCtlMemDesc, -+ &psDevInfo->psFirmwareCCB, -+ &psDevInfo->psFirmwareCCBMemDesc, -+ &sFwOsInitScratch.psFirmwareCCBCtl, -+ &sFwOsInitScratch.psFirmwareCCB, -+ RGXFWIF_FWCCB_NUMCMDS_LOG2, -+ sizeof(RGXFWIF_FWCCB_CMD), -+ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, -+ "FwCCB"); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware CCB allocation", fail); -+ -+ eError = RGXSetupFwAllocation(psDevInfo, -+ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, -+ sizeof(RGXFWIF_OSDATA), -+ "FwOsData", -+ &psDevInfo->psRGXFWIfFwOsDataMemDesc, -+ &sFwOsInitScratch.sFwOsData, -+ (void**) &psDevInfo->psRGXFWIfFwOsData, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); -+ -+ psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags = ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_ALL; -+ -+ eError = SyncPrimGetFirmwareAddr(psDevInfo->psPowSyncPrim, &psDevInfo->psRGXFWIfFwOsData->sPowerSync.ui32Addr); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Get Sync Prim FW address", fail); -+ -+ /* flush write buffers for psRGXFWIfFwOsData */ -+ OSWriteMemoryBarrier(psDevInfo->psRGXFWIfFwOsData); -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwOsData, -+ FLUSH); -+ -+ sFwOsInitScratch.ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit; -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Set up Workload Estimation firmware CCB */ -+ eError = RGXSetupCCB(psDevInfo, -+ &psDevInfo->psWorkEstFirmwareCCBCtl, -+ &psDevInfo->psWorkEstFirmwareCCBCtlLocal, -+ &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, -+ &psDevInfo->psWorkEstFirmwareCCB, -+ &psDevInfo->psWorkEstFirmwareCCBMemDesc, -+ &sFwOsInitScratch.psWorkEstFirmwareCCBCtl, -+ &sFwOsInitScratch.psWorkEstFirmwareCCB, -+ RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2, -+ sizeof(RGXFWIF_WORKEST_FWCCB_CMD), -+ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, -+ "FwWEstCCB"); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail); -+ } -+#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Initialise the compatibility check data */ -+ RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sFWBVNC); -+ RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sHWBVNC); -+ } -+ -+ /* populate the real FwOsInit structure with the values stored in the scratch copy */ -+ OSCachedMemCopyWMB(psDevInfo->psRGXFWIfOsInit, &sFwOsInitScratch, sizeof(RGXFWIF_OSINIT)); -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfOsInit, -+ FLUSH); -+ -+ return PVRSRV_OK; -+ -+fail: -+ RGXFreeFwOsData(psDevInfo); -+ -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function RGXSetupFirmware -+ -+ @Description Sets up all firmware related data -+ -+ @Input psDevInfo -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bEnableSignatureChecks, -+ IMG_UINT32 ui32SignatureChecksBufSize, -+ IMG_UINT32 ui32HWPerfFWBufSizeKB, -+ IMG_UINT64 ui64HWPerfFilter, -+ IMG_UINT32 ui32ConfigFlags, -+ IMG_UINT32 ui32ConfigFlagsExt, -+ IMG_UINT32 ui32FwOsCfgFlags, -+ IMG_UINT32 ui32LogType, -+ IMG_UINT32 ui32FilterFlags, -+ IMG_UINT32 ui32JonesDisableMask, -+ IMG_UINT32 ui32HWRDebugDumpLimit, -+ IMG_UINT32 ui32HWPerfCountersDataSize, -+ IMG_UINT32 *pui32TPUTrilinearFracMask, -+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, -+ FW_PERF_CONF eFirmwarePerf, -+ IMG_UINT32 ui32KCCBSizeLog2) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ eError = RGXSetupFwOsData(psDeviceNode, -+ ui32KCCBSizeLog2, -+ ui32HWRDebugDumpLimit, -+ ui32FwOsCfgFlags); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware os data", fail); -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Guest drivers do not configure system-wide firmware data */ -+ psDevInfo->psRGXFWIfSysInit = NULL; -+ } -+ else -+ { -+ /* Native and Host drivers must initialise the firmware's system data */ -+ eError = RGXSetupFwSysData(psDeviceNode, -+ bEnableSignatureChecks, -+ ui32SignatureChecksBufSize, -+ ui32HWPerfFWBufSizeKB, -+ ui64HWPerfFilter, -+ ui32ConfigFlags, -+ ui32ConfigFlagsExt, -+ ui32LogType, -+ ui32FilterFlags, -+ ui32JonesDisableMask, -+ ui32HWPerfCountersDataSize, -+ pui32TPUTrilinearFracMask, -+ eRGXRDPowerIslandConf, -+ eFirmwarePerf); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware system data", fail); -+ } -+ -+ psDevInfo->bFirmwareInitialised = IMG_TRUE; -+ -+#if defined(PDUMP) -+ RGXPDumpLoadFWInitData(psDevInfo, -+ ui32HWPerfCountersDataSize, -+ bEnableSignatureChecks); -+#endif /* PDUMP */ -+ -+fail: -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ @Function RGXFreeFwSysData -+ -+ @Description Frees all system-wide firmware related data -+ -+ @Input psDevInfo -+******************************************************************************/ -+static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ psDevInfo->bFirmwareInitialised = IMG_FALSE; -+ -+ if (psDevInfo->psRGXFWAlignChecksMemDesc) -+ { -+ RGXFWFreeAlignChecks(psDevInfo); -+ } -+ -+#if defined(PDUMP) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM) && -+ psDevInfo->psRGXFWSigTDM2DChecksMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTDM2DChecksMemDesc); -+ psDevInfo->psRGXFWSigTDM2DChecksMemDesc = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWSigTAChecksMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTAChecksMemDesc); -+ psDevInfo->psRGXFWSigTAChecksMemDesc = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWSig3DChecksMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSig3DChecksMemDesc); -+ psDevInfo->psRGXFWSig3DChecksMemDesc = NULL; -+ } -+#endif -+ -+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) -+ if (psDevInfo->psCounterBufferMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psCounterBufferMemDesc); -+ psDevInfo->psCounterBufferMemDesc = NULL; -+ } -+#endif -+ -+#if defined(SUPPORT_FIRMWARE_GCOV) -+ if (psDevInfo->psFirmwareGcovBufferMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psFirmwareGcovBufferMemDesc); -+ psDevInfo->psFirmwareGcovBufferMemDesc = NULL; -+ } -+#endif -+ -+ RGXSetupFaultReadRegisterRollback(psDevInfo); -+ -+ if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc) -+ { -+ if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); -+ psDevInfo->psRGXFWIfGpuUtilFWCb = NULL; -+ } -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); -+ psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc) -+ { -+ if (psDevInfo->psRGXFWIfRuntimeCfg != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc); -+ psDevInfo->psRGXFWIfRuntimeCfg = NULL; -+ } -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRuntimeCfgMemDesc); -+ psDevInfo->psRGXFWIfRuntimeCfgMemDesc = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) -+ { -+ psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc) -+ { -+ if (psDevInfo->psRGXFWIfTraceBufCtl != NULL) -+ { -+ /* deinit/free the tracebuffer allocation */ -+ RGXTraceBufferDeinit(psDevInfo); -+ -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc); -+ psDevInfo->psRGXFWIfTraceBufCtl = NULL; -+ } -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufCtlMemDesc); -+ psDevInfo->psRGXFWIfTraceBufCtlMemDesc = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWIfFwSysDataMemDesc) -+ { -+ if (psDevInfo->psRGXFWIfFwSysData != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc); -+ psDevInfo->psRGXFWIfFwSysData = NULL; -+ } -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwSysDataMemDesc); -+ psDevInfo->psRGXFWIfFwSysDataMemDesc = NULL; -+ } -+ -+#if defined(SUPPORT_TBI_INTERFACE) -+ if (psDevInfo->psRGXFWIfTBIBufferMemDesc) -+ { -+ RGXTBIBufferDeinit(psDevInfo); -+ } -+#endif -+ -+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) -+ if (psDevInfo->psRGXFWIfRegCfgMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRegCfgMemDesc); -+ psDevInfo->psRGXFWIfRegCfgMemDesc = NULL; -+ } -+#endif -+ if (psDevInfo->psRGXFWIfHWPerfCountersMemDesc) -+ { -+ RGXUnsetFirmwareAddress(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc); -+ psDevInfo->psRGXFWIfHWPerfCountersMemDesc = NULL; -+ } -+ -+#if defined(SUPPORT_SECURITY_VALIDATION) -+ if (psDevInfo->psRGXFWIfNonSecureBufMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfNonSecureBufMemDesc); -+ psDevInfo->psRGXFWIfNonSecureBufMemDesc = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWIfSecureBufMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSecureBufMemDesc); -+ psDevInfo->psRGXFWIfSecureBufMemDesc = NULL; -+ } -+#endif -+ -+#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) -+ { -+ _FreeSLC3Fence(psDevInfo); -+ } -+#endif -+#if defined(SUPPORT_PDVFS) -+ if (psDevInfo->psRGXFWIFCoreClkRateMemDesc) -+ { -+ if (psDevInfo->pui32RGXFWIFCoreClkRate != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc); -+ psDevInfo->pui32RGXFWIFCoreClkRate = NULL; -+ } -+ -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIFCoreClkRateMemDesc); -+ psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL; -+ } -+#endif -+ -+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY) -+ if (psDevInfo->psRGXFWIfActiveContextBufDesc) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfActiveContextBufDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfActiveContextBufDesc); -+ psDevInfo->psRGXFWIfActiveContextBufDesc = NULL; -+ } -+#endif -+} -+ -+/*! -+******************************************************************************* -+ @Function RGXFreeFwOsData -+ -+ @Description Frees all os-specific firmware related data -+ -+ @Input psDevInfo -+******************************************************************************/ -+static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGXFreeCCBReturnSlots(psDevInfo, -+ &psDevInfo->pui32KernelCCBRtnSlots, -+ &psDevInfo->psKernelCCBRtnSlotsMemDesc); -+ RGXFreeCCB(psDevInfo, -+ &psDevInfo->psKernelCCBCtl, -+ &psDevInfo->psKernelCCBCtlLocal, -+ &psDevInfo->psKernelCCBCtlMemDesc, -+ &psDevInfo->psKernelCCB, -+ &psDevInfo->psKernelCCBMemDesc); -+ -+ RGXFreeCCB(psDevInfo, -+ &psDevInfo->psFirmwareCCBCtl, -+ &psDevInfo->psFirmwareCCBCtlLocal, -+ &psDevInfo->psFirmwareCCBCtlMemDesc, -+ &psDevInfo->psFirmwareCCB, -+ &psDevInfo->psFirmwareCCBMemDesc); -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ RGXFreeCCB(psDevInfo, -+ &psDevInfo->psWorkEstFirmwareCCBCtl, -+ &psDevInfo->psWorkEstFirmwareCCBCtlLocal, -+ &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, -+ &psDevInfo->psWorkEstFirmwareCCB, -+ &psDevInfo->psWorkEstFirmwareCCBMemDesc); -+ } -+#endif -+ -+ if (psDevInfo->psPowSyncPrim != NULL) -+ { -+ SyncPrimFree(psDevInfo->psPowSyncPrim); -+ psDevInfo->psPowSyncPrim = NULL; -+ } -+ -+ if (psDevInfo->hSyncPrimContext != (IMG_HANDLE) NULL) -+ { -+ SyncPrimContextDestroy(psDevInfo->hSyncPrimContext); -+ psDevInfo->hSyncPrimContext = (IMG_HANDLE) NULL; -+ } -+ -+ if (psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc) -+ { -+ if (psDevInfo->psRGXFWIfHWRInfoBufCtl != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc); -+ psDevInfo->psRGXFWIfHWRInfoBufCtl = NULL; -+ } -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc); -+ psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWIfFwOsDataMemDesc) -+ { -+ if (psDevInfo->psRGXFWIfFwOsData != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwOsDataMemDesc); -+ psDevInfo->psRGXFWIfFwOsData = NULL; -+ } -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwOsDataMemDesc); -+ psDevInfo->psRGXFWIfFwOsDataMemDesc = NULL; -+ } -+ -+ if (psDevInfo->psCompletedMemDesc) -+ { -+ if (psDevInfo->pui32CompletedById) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psCompletedMemDesc); -+ psDevInfo->pui32CompletedById = NULL; -+ } -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psCompletedMemDesc); -+ psDevInfo->psCompletedMemDesc = NULL; -+ } -+ if (psDevInfo->psEndTimeMemDesc) -+ { -+ if (psDevInfo->pui64EndTimeById) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psEndTimeMemDesc); -+ psDevInfo->pui64EndTimeById = NULL; -+ } -+ -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psEndTimeMemDesc); -+ psDevInfo->psEndTimeMemDesc = NULL; -+ } -+ if (psDevInfo->psStartTimeMemDesc) -+ { -+ if (psDevInfo->pui64StartTimeById) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psStartTimeMemDesc); -+ psDevInfo->pui64StartTimeById = NULL; -+ } -+ -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psStartTimeMemDesc); -+ psDevInfo->psStartTimeMemDesc = NULL; -+ } -+#if !defined(PVRSRV_USE_BRIDGE_LOCK) -+ if (psDevInfo->hTimerQueryLock) -+ { -+ OSLockDestroy(psDevInfo->hTimerQueryLock); -+ psDevInfo->hTimerQueryLock = NULL; -+ } -+#endif -+ -+ if (psDevInfo->psRGXFWHeapGuardPageReserveMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWHeapGuardPageReserveMemDesc); -+ } -+} -+ -+/*! -+******************************************************************************* -+ @Function RGXFreeFirmware -+ -+ @Description Frees all the firmware-related allocations -+ -+ @Input psDevInfo -+******************************************************************************/ -+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGXFreeFwOsData(psDevInfo); -+ -+ if (psDevInfo->psRGXFWIfConnectionCtl) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfConnectionCtlMemDesc); -+ psDevInfo->psRGXFWIfConnectionCtl = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWIfConnectionCtlMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfConnectionCtlMemDesc); -+ psDevInfo->psRGXFWIfConnectionCtlMemDesc = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWIfOsInit) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfOsInitMemDesc); -+ psDevInfo->psRGXFWIfOsInit = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWIfOsInitMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfOsInitMemDesc); -+ psDevInfo->psRGXFWIfOsInitMemDesc = NULL; -+ } -+ -+ RGXFreeFwSysData(psDevInfo); -+ if (psDevInfo->psRGXFWIfSysInit) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfSysInitMemDesc); -+ psDevInfo->psRGXFWIfSysInit = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWIfSysInitMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSysInitMemDesc); -+ psDevInfo->psRGXFWIfSysInitMemDesc = NULL; -+ } -+} -+ -+static INLINE PVRSRV_ERROR RGXUpdateLocalKCCBRoff(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; -+ RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; -+ IMG_UINT32 ui32ReadOffset; -+ -+ barrier(); /* Don't optimise order. Reads from device memory follow. */ -+ -+ /* update KCCB read offset */ -+ RGXFwSharedMemCacheOpValue(psKCCBCtl->ui32ReadOffset, INVALIDATE); -+ ui32ReadOffset = psKCCBCtl->ui32ReadOffset; -+ -+ if (ui32ReadOffset > psKCCBCtlLocal->ui32WrapMask) -+ { -+ return PVRSRV_ERROR_KERNEL_CCB_OFFSET; -+ } -+ -+ psKCCBCtlLocal->ui32ReadOffset = ui32ReadOffset; -+ -+ return PVRSRV_OK; -+} -+ -+static INLINE PVRSRV_ERROR RGXUpdateLocalFWCCBWoff(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ const RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl; -+ RGXFWIF_CCB_CTL *psFWCCBCtlLocal = psDevInfo->psFirmwareCCBCtlLocal; -+ IMG_UINT32 ui32WriteOffset; -+ -+ barrier(); /* Don't optimise order. Reads from device memory follow. */ -+ -+ /* update FWCCB write offset */ -+ RGXFwSharedMemCacheOpValue(psFWCCBCtl->ui32WriteOffset, INVALIDATE); -+ ui32WriteOffset = psFWCCBCtl->ui32WriteOffset; -+ -+ if (ui32WriteOffset > psFWCCBCtlLocal->ui32WrapMask) -+ { -+ return PVRSRV_ERROR_KERNEL_CCB_OFFSET; -+ } -+ -+ psFWCCBCtlLocal->ui32WriteOffset = ui32WriteOffset; -+ -+ return PVRSRV_OK; -+} -+ -+/****************************************************************************** -+ FUNCTION : RGXAcquireKernelCCBSlot -+ -+ PURPOSE : Attempts to obtain a slot in the Kernel CCB -+ -+ PARAMETERS : psCCB - the CCB -+ : Address of space if available, NULL otherwise -+ -+ RETURNS : PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR RGXAcquireKernelCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 *pui32Offset) -+{ -+ IMG_UINT32 ui32NextWriteOffset; -+ RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; -+ -+ ui32NextWriteOffset = (psKCCBCtlLocal->ui32WriteOffset + 1) & psKCCBCtlLocal->ui32WrapMask; -+ -+#if defined(PDUMP) -+ /* Wait for sufficient CCB space to become available */ -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, 0, -+ "Wait for kCCB woff=%u", ui32NextWriteOffset); -+ DevmemPDumpCBP(psDevInfo->psKernelCCBCtlMemDesc, -+ offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), -+ ui32NextWriteOffset, -+ 1, -+ (psKCCBCtlLocal->ui32WrapMask + 1)); -+#endif -+ -+ if (ui32NextWriteOffset == psKCCBCtlLocal->ui32ReadOffset) -+ { -+ PVRSRV_ERROR eError = RGXUpdateLocalKCCBRoff(psDevInfo); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXUpdateLocalKCCBRoff"); -+ -+ if (ui32NextWriteOffset == psKCCBCtlLocal->ui32ReadOffset) -+ { -+ return PVRSRV_ERROR_KERNEL_CCB_FULL; -+ } -+ } -+ *pui32Offset = ui32NextWriteOffset; -+ return PVRSRV_OK; -+} -+ -+/****************************************************************************** -+ FUNCTION : RGXPollKernelCCBSlot -+ -+ PURPOSE : Poll for space in Kernel CCB -+ -+ PARAMETERS : psCCB - the CCB -+ : Address of space if available, NULL otherwise -+ -+ RETURNS : PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR RGXPollKernelCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_UINT32 ui32NextWriteOffset; -+ RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; -+ -+ ui32NextWriteOffset = (psKCCBCtlLocal->ui32WriteOffset + 1) & psKCCBCtlLocal->ui32WrapMask; -+ -+ if (ui32NextWriteOffset != psKCCBCtlLocal->ui32ReadOffset) -+ { -+ return PVRSRV_OK; -+ } -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ PVRSRV_ERROR eError = RGXUpdateLocalKCCBRoff(psDevInfo); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXUpdateLocalKCCBRoff"); -+ -+ if (ui32NextWriteOffset != psKCCBCtlLocal->ui32ReadOffset) -+ { -+ return PVRSRV_OK; -+ } -+ -+ /* -+ * The following check doesn't impact performance, since the -+ * CPU has to wait for the GPU anyway (full kernel CCB). -+ */ -+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) -+ { -+ return PVRSRV_ERROR_KERNEL_CCB_FULL; -+ } -+ -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ return PVRSRV_ERROR_KERNEL_CCB_FULL; -+} -+ -+/****************************************************************************** -+ FUNCTION : RGXGetCmdMemCopySize -+ -+ PURPOSE : Calculates actual size of KCCB command getting used -+ -+ PARAMETERS : eCmdType Type of KCCB command -+ -+ RETURNS : Returns actual size of KCCB command on success else zero -+******************************************************************************/ -+static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType) -+{ -+ /* First get offset of uCmdData inside the struct RGXFWIF_KCCB_CMD -+ * This will account alignment requirement of uCmdData union -+ * -+ * Then add command-data size depending on command type to calculate actual -+ * command size required to do mem copy -+ * -+ * NOTE: Make sure that uCmdData is the last member of RGXFWIF_KCCB_CMD struct. -+ */ -+ switch (eCmdType) -+ { -+ case RGXFWIF_KCCB_CMD_KICK: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_KICK_DATA); -+ } -+ case RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA); -+ } -+ case RGXFWIF_KCCB_CMD_MMUCACHE: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_MMUCACHEDATA); -+ } -+#if defined(SUPPORT_USC_BREAKPOINT) -+ case RGXFWIF_KCCB_CMD_BP: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_BPDATA); -+ } -+#endif -+ case RGXFWIF_KCCB_CMD_SLCFLUSHINVAL: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SLCFLUSHINVALDATA); -+ } -+ case RGXFWIF_KCCB_CMD_CLEANUP: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CLEANUP_REQUEST); -+ } -+ case RGXFWIF_KCCB_CMD_POW: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_POWER_REQUEST); -+ } -+ case RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE: -+ case RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_ZSBUFFER_BACKING_DATA); -+ } -+ case RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELIST_GS_DATA); -+ } -+ case RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELISTS_RECONSTRUCTION_DATA); -+ } -+ case RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_WRITE_OFFSET_UPDATE_DATA); -+ } -+ case RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE_DATA); -+ } -+ case RGXFWIF_KCCB_CMD_FORCE_UPDATE: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA); -+ } -+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) -+ case RGXFWIF_KCCB_CMD_REGCONFIG: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_REGCONFIG_DATA); -+ } -+#endif -+ case RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS); -+ } -+#if defined(SUPPORT_PDVFS) -+ case RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_PDVFS_MAX_FREQ_DATA); -+ } -+#endif -+ case RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OS_STATE_CHANGE_DATA); -+ } -+ case RGXFWIF_KCCB_CMD_COUNTER_DUMP: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_COUNTER_DUMP_DATA); -+ } -+ case RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL); -+ } -+ case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS); -+ } -+ case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CONFIG_DA_BLKS); -+ } -+ case RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL_BLKS); -+ } -+ case RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CORECLKSPEEDCHANGE_DATA); -+ } -+ case RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE: -+ case RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE: -+ case RGXFWIF_KCCB_CMD_VZ_DRV_TIME_SLICE_INTERVAL: -+ case RGXFWIF_KCCB_CMD_WDG_CFG: -+ case RGXFWIF_KCCB_CMD_PHR_CFG: -+ case RGXFWIF_KCCB_CMD_HEALTH_CHECK: -+ case RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE: -+ case RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL: -+ { -+ /* No command specific data */ -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData); -+ } -+#if defined(SUPPORT_VALIDATION) -+ case RGXFWIF_KCCB_CMD_RGXREG: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_RGXREG_DATA); -+ } -+ case RGXFWIF_KCCB_CMD_GPUMAP: -+ { -+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_GPUMAP_DATA); -+ } -+#endif -+ default: -+ { -+ /* Invalid (OR) Unused (OR) Newly added command type */ -+ return 0; /* Error */ -+ } -+ } -+} -+ -+PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32SlotNum, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ -+ RGXFwSharedMemCacheOpValue(psDevInfo->pui32KernelCCBRtnSlots[ui32SlotNum], -+ INVALIDATE); -+ eError = PVRSRVWaitForValueKM( -+ (IMG_UINT32 __iomem *)&psDevInfo->pui32KernelCCBRtnSlots[ui32SlotNum], -+ RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, -+ RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, -+ RGXFwSharedMemCacheOpExecPfn); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVWaitForValueKM"); -+ -+#if defined(PDUMP) -+ /* PDumping conditions same as RGXSendCommandRaw for the actual command and poll command to go in harmony */ -+ if (PDumpCheckFlagsWrite(psDevInfo->psDeviceNode, ui32PDumpFlags)) -+ { -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Poll on KCCB slot %u for value %u (mask: 0x%x)", ui32SlotNum, -+ RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED); -+ -+ eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc, -+ ui32SlotNum * sizeof(IMG_UINT32), -+ RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, -+ RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ ui32PDumpFlags); -+ PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+#endif -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_KCCB_CMD *psKCCBCmd, -+ IMG_UINT32 uiPDumpFlags, -+ IMG_UINT32 *pui32CmdKCCBSlot) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; -+ RGXFWIF_CCB_CTL *psKCCBCtl; -+ RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; -+ IMG_UINT8 *pui8KCCB = psDevInfo->psKernelCCB; -+ IMG_UINT32 ui32NewWriteOffset; -+ IMG_UINT32 ui32OldWriteOffset; -+ IMG_UINT32 ui32CmdMemCopySize; -+ -+#if !defined(PDUMP) -+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psKernelCCBCtl, INVALIDATE); -+ psKCCBCtl = psDevInfo->psKernelCCBCtl; -+ ui32OldWriteOffset = psKCCBCtlLocal->ui32WriteOffset; -+ -+#else -+ IMG_BOOL bContCaptureOn = PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); /* client connected or in pdump init phase */ -+ IMG_BOOL bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, uiPDumpFlags); /* Are we in capture range or continuous and not in a power transition */ -+ -+ psKCCBCtl = psDevInfo->psKernelCCBCtl; -+ ui32OldWriteOffset = psKCCBCtlLocal->ui32WriteOffset; -+ -+ if (bContCaptureOn) -+ { -+ /* in capture range */ -+ if (bPDumpEnabled) -+ { -+ if (!psDevInfo->bDumpedKCCBCtlAlready) -+ { -+ /* entering capture range */ -+ psDevInfo->bDumpedKCCBCtlAlready = IMG_TRUE; -+ -+ /* Wait for the live FW to catch up */ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: waiting on fw to catch-up, roff: %d, woff: %d", -+ __func__, -+ psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset)); -+ PVRSRVPollForValueKM(psDeviceNode, -+ (IMG_UINT32 __iomem *)&psKCCBCtl->ui32ReadOffset, -+ ui32OldWriteOffset, 0xFFFFFFFF, -+ POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP, -+ NULL); -+ -+ /* Dump Init state of Kernel CCB control (read and write offset) */ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags, -+ "Initial state of kernel CCB Control, roff: %d, woff: %d", -+ psKCCBCtl->ui32ReadOffset, psKCCBCtlLocal->ui32WriteOffset); -+ -+ DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, -+ 0, -+ sizeof(RGXFWIF_CCB_CTL), -+ uiPDumpFlags); -+ } -+ } -+ } -+#endif -+ -+#if defined(SUPPORT_AUTOVZ) -+ KM_CONNECTION_CACHEOP(Fw, INVALIDATE); -+ KM_CONNECTION_CACHEOP(Os, INVALIDATE); -+ if (!((KM_FW_CONNECTION_IS(READY, psDevInfo) && KM_OS_CONNECTION_IS(READY, psDevInfo)) || -+ (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))) && -+ !PVRSRV_VZ_MODE_IS(NATIVE)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: The firmware-driver connection is invalid:" -+ "driver state = %u / firmware state = %u;" -+ "expected READY (%u/%u) or ACTIVE (%u/%u);", -+ __func__, KM_GET_OS_CONNECTION(psDevInfo), KM_GET_FW_CONNECTION(psDevInfo), -+ RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_READY, -+ RGXFW_CONNECTION_OS_ACTIVE, RGXFW_CONNECTION_FW_ACTIVE)); -+ eError = PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE; -+ goto _RGXSendCommandRaw_Exit; -+ } -+#endif -+ -+ if (!OSLockIsLocked(psDeviceNode->hPowerLock)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s called without power lock held!", -+ __func__)); -+ PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock)); -+ } -+ -+ /* Acquire a slot in the CCB */ -+ eError = RGXAcquireKernelCCBSlot(psDevInfo, &ui32NewWriteOffset); -+ if (eError != PVRSRV_OK) -+ { -+ goto _RGXSendCommandRaw_Exit; -+ } -+ -+ /* Calculate actual size of command to optimize device mem copy */ -+ ui32CmdMemCopySize = RGXGetCmdMemCopySize(psKCCBCmd->eCmdType); -+ PVR_LOG_RETURN_IF_FALSE(ui32CmdMemCopySize !=0, "RGXGetCmdMemCopySize failed", PVRSRV_ERROR_INVALID_CCB_COMMAND); -+ -+ /* Copy the command into the CCB */ -+ OSCachedMemCopyWMB(&pui8KCCB[ui32OldWriteOffset * sizeof(RGXFWIF_KCCB_CMD)], -+ psKCCBCmd, ui32CmdMemCopySize); -+ RGXFwSharedMemCacheOpExec(&pui8KCCB[ui32OldWriteOffset * sizeof(RGXFWIF_KCCB_CMD)], ui32CmdMemCopySize, PVRSRV_CACHE_OP_FLUSH); -+ -+ /* If non-NULL pui32CmdKCCBSlot passed-in, return the kCCB slot in which the command was enqueued */ -+ if (pui32CmdKCCBSlot) -+ { -+ *pui32CmdKCCBSlot = ui32OldWriteOffset; -+ -+ /* Each such command enqueue needs to reset the slot value first. This is so that a caller -+ * doesn't get to see stale/false value in allotted slot */ -+ OSWriteDeviceMem32WithWMB(&psDevInfo->pui32KernelCCBRtnSlots[ui32OldWriteOffset], -+ RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE); -+ RGXFwSharedMemCacheOpValue(psDevInfo->pui32KernelCCBRtnSlots[ui32OldWriteOffset], -+ FLUSH); -+#if defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags, -+ "Reset kCCB slot number %u", ui32OldWriteOffset); -+ DevmemPDumpLoadMem(psDevInfo->psKernelCCBRtnSlotsMemDesc, -+ ui32OldWriteOffset * sizeof(IMG_UINT32), -+ sizeof(IMG_UINT32), -+ uiPDumpFlags); -+#endif -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Device (%p) KCCB slot %u reset with value %u for command type %x", -+ __func__, psDevInfo, ui32OldWriteOffset, RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE, psKCCBCmd->eCmdType)); -+ } -+ -+ /* Move past the current command */ -+ psKCCBCtlLocal->ui32WriteOffset = ui32NewWriteOffset; -+ psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset; -+ OSWriteMemoryBarrier(&psKCCBCtl->ui32WriteOffset); -+ RGXFwSharedMemCacheOpValue(psKCCBCtl->ui32WriteOffset, FLUSH); -+ -+#if defined(PDUMP) -+ if (bContCaptureOn) -+ { -+ /* in capture range */ -+ if (bPDumpEnabled) -+ { -+ /* Dump new Kernel CCB content */ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, -+ uiPDumpFlags, "Dump kCCB cmd woff = %d", -+ ui32OldWriteOffset); -+ DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc, -+ ui32OldWriteOffset * sizeof(RGXFWIF_KCCB_CMD), -+ ui32CmdMemCopySize, -+ uiPDumpFlags); -+ -+ /* Dump new kernel CCB write offset */ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, -+ uiPDumpFlags, "Dump kCCBCtl woff: %d", -+ ui32NewWriteOffset); -+ DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, -+ offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset), -+ sizeof(IMG_UINT32), -+ uiPDumpFlags); -+ -+ /* mimic the read-back of the write from above */ -+ DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, -+ offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset), -+ ui32NewWriteOffset, -+ 0xFFFFFFFF, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ uiPDumpFlags); -+ } -+ /* out of capture range */ -+ else -+ { -+ eError = RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", _RGXSendCommandRaw_Exit); -+ } -+ } -+#endif -+ -+ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags, "MTS kick for kernel CCB"); -+ /* -+ * Kick the MTS to schedule the firmware. -+ */ -+ __MTSScheduleWrite(psDevInfo, RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK); -+ -+ PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_MTS_SCHEDULE, -+ RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK, uiPDumpFlags); -+ -+#if defined(SUPPORT_AUTOVZ) -+ RGXUpdateAutoVzWdgToken(psDevInfo); -+#endif -+ -+#if defined(NO_HARDWARE) -+ /* keep the roff updated because fw isn't there to update it */ -+ psKCCBCtl->ui32ReadOffset = psKCCBCtlLocal->ui32WriteOffset; -+#endif -+ -+_RGXSendCommandRaw_Exit: -+ return eError; -+} -+ -+/****************************************************************************** -+ FUNCTION : _AllocDeferredCommand -+ -+ PURPOSE : Allocate a KCCB command and add it to KCCB deferred list -+ -+ PARAMETERS : psDevInfo RGX device info -+ : eKCCBType Firmware Command type -+ : psKCCBCmd Firmware Command -+ : uiPDumpFlags Pdump flags -+ -+ RETURNS : PVRSRV_OK If all went good, PVRSRV_ERROR_RETRY otherwise. -+******************************************************************************/ -+static PVRSRV_ERROR _AllocDeferredCommand(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_KCCB_CMD *psKCCBCmd, -+ IMG_UINT32 uiPDumpFlags) -+{ -+ RGX_DEFERRED_KCCB_CMD *psDeferredCommand; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ psDeferredCommand = OSAllocMem(sizeof(*psDeferredCommand)); -+ -+ if (!psDeferredCommand) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Deferring a KCCB command failed: allocation failure: requesting retry")); -+ return PVRSRV_ERROR_RETRY; -+ } -+ -+ psDeferredCommand->sKCCBcmd = *psKCCBCmd; -+ psDeferredCommand->uiPDumpFlags = uiPDumpFlags; -+ psDeferredCommand->psDevInfo = psDevInfo; -+ -+ OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); -+ dllist_add_to_tail(&(psDevInfo->sKCCBDeferredCommandsListHead), &(psDeferredCommand->sListNode)); -+ psDevInfo->ui32KCCBDeferredCommandsCount++; -+ OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); -+ -+ return PVRSRV_OK; -+} -+ -+/****************************************************************************** -+ FUNCTION : _FreeDeferredCommand -+ -+ PURPOSE : Remove from the deferred list the sent deferred KCCB command -+ -+ PARAMETERS : psNode Node in deferred list -+ : psDeferredKCCBCmd KCCB Command to free -+ -+ RETURNS : None -+******************************************************************************/ -+static void _FreeDeferredCommand(DLLIST_NODE *psNode, RGX_DEFERRED_KCCB_CMD *psDeferredKCCBCmd) -+{ -+ dllist_remove_node(psNode); -+ psDeferredKCCBCmd->psDevInfo->ui32KCCBDeferredCommandsCount--; -+ OSFreeMem(psDeferredKCCBCmd); -+} -+ -+/****************************************************************************** -+ FUNCTION : RGXSendCommandsFromDeferredList -+ -+ PURPOSE : Try send KCCB commands in deferred list to KCCB -+ Should be called by holding PowerLock -+ -+ PARAMETERS : psDevInfo RGX device info -+ : bPoll Poll for space in KCCB -+ -+ RETURNS : PVRSRV_OK If all commands in deferred list are sent to KCCB, -+ PVRSRV_ERROR_KERNEL_CCB_FULL otherwise. -+******************************************************************************/ -+PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ DLLIST_NODE *psNode, *psNext; -+ RGX_DEFERRED_KCCB_CMD *psTempDeferredKCCBCmd; -+ DLLIST_NODE sCommandList; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ PVR_ASSERT(PVRSRVPwrLockIsLockedByMe(psDevInfo->psDeviceNode)); -+ -+ /* !!! Important !!! -+ * -+ * The idea of moving the whole list hLockKCCBDeferredCommandsList below -+ * to the temporary list is only valid under the principle that all of the -+ * operations are also protected by the power lock. It must be held -+ * so that the order of the commands doesn't get messed up while we're -+ * performing the operations on the local list. -+ * -+ * The necessity of releasing the hLockKCCBDeferredCommandsList comes from -+ * the fact that _FreeDeferredCommand() is allocating memory and it can't -+ * be done in atomic context (inside section protected by a spin lock). -+ * -+ * We're using spin lock here instead of mutex to quickly perform a check -+ * if the list is empty in MISR without a risk that the MISR is going -+ * to sleep due to a lock. -+ */ -+ -+ /* move the whole list to a local list so it can be processed without lock */ -+ OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); -+ dllist_replace_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList); -+ OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ if (dllist_is_empty(&sCommandList)) -+ { -+ return PVRSRV_OK; -+ } -+ -+ /* For every deferred KCCB command, try to send it*/ -+ dllist_foreach_node(&sCommandList, psNode, psNext) -+ { -+ psTempDeferredKCCBCmd = IMG_CONTAINER_OF(psNode, RGX_DEFERRED_KCCB_CMD, sListNode); -+ eError = RGXSendCommandRaw(psTempDeferredKCCBCmd->psDevInfo, -+ &psTempDeferredKCCBCmd->sKCCBcmd, -+ psTempDeferredKCCBCmd->uiPDumpFlags, -+ NULL /* We surely aren't interested in kCCB slot number of deferred command */); -+ if (eError != PVRSRV_OK) -+ { -+ if (!bPoll) -+ { -+ eError = PVRSRV_ERROR_KERNEL_CCB_FULL; -+ goto cleanup_; -+ } -+ break; -+ } -+ -+ _FreeDeferredCommand(psNode, psTempDeferredKCCBCmd); -+ } -+ -+ if (bPoll) -+ { -+ PVRSRV_ERROR eErrPollForKCCBSlot; -+ -+ /* Don't overwrite eError because if RGXPollKernelCCBSlot returns OK and the -+ * outer loop times-out, we'll still want to return KCCB_FULL to caller -+ */ -+ eErrPollForKCCBSlot = RGXPollKernelCCBSlot(psDevInfo); -+ if (eErrPollForKCCBSlot == PVRSRV_ERROR_KERNEL_CCB_FULL) -+ { -+ eError = PVRSRV_ERROR_KERNEL_CCB_FULL; -+ goto cleanup_; -+ } -+ } -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+cleanup_: -+ /* if the local list is not empty put it back to the deferred list head -+ * so that the old order of commands is retained */ -+ OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); -+ dllist_insert_list_at_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList); -+ OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_KCCB_CMD *psKCCBCmd, -+ IMG_UINT32 uiPDumpFlags, -+ IMG_UINT32 *pui32CmdKCCBSlot) -+{ -+ IMG_BOOL bPoll = (pui32CmdKCCBSlot != NULL); -+ PVRSRV_ERROR eError; -+ -+ /* -+ * First try to Flush all the cmds in deferred list. -+ * -+ * We cannot defer an incoming command if the caller is interested in -+ * knowing the command's kCCB slot: it plans to poll/wait for a -+ * response from the FW just after the command is enqueued, so we must -+ * poll for space to be available. -+ */ -+ eError = RGXSendCommandsFromDeferredList(psDevInfo, bPoll); -+ if (eError == PVRSRV_OK) -+ { -+ eError = RGXSendCommandRaw(psDevInfo, -+ psKCCBCmd, -+ uiPDumpFlags, -+ pui32CmdKCCBSlot); -+ } -+ -+ /* -+ * If we don't manage to enqueue one of the deferred commands or the command -+ * passed as argument because the KCCB is full, insert the latter into the deferred commands list. -+ * The deferred commands will also be flushed eventually by: -+ * - one more KCCB command sent for any DM -+ * - RGX_MISRHandler_CheckFWActivePowerState -+ */ -+ if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL) -+ { -+ if (pui32CmdKCCBSlot == NULL) -+ { -+ eError = _AllocDeferredCommand(psDevInfo, psKCCBCmd, uiPDumpFlags); -+ } -+ else -+ { -+ /* Let the caller retry. Otherwise if we deferred the command and returned OK, -+ * the caller can end up looking in a stale CCB slot. -+ */ -+ PVR_DPF((PVR_DBG_WARNING, "%s: Couldn't flush the deferred queue for a command (Type:%d) " -+ "- will be retried", __func__, psKCCBCmd->eCmdType)); -+ } -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_KCCB_CMD *psKCCBCmd, -+ IMG_UINT32 ui32PDumpFlags, -+ IMG_UINT32 *pui32CmdKCCBSlot) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; -+ -+ /* Ensure Rogue is powered up before kicking MTS */ -+ eError = PVRSRVPowerLock(psDeviceNode); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: failed to acquire powerlock (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ -+ goto _PVRSRVPowerLock_Exit; -+ } -+ -+ PDUMPPOWCMDSTART(psDeviceNode); -+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, -+ PVRSRV_DEV_POWER_STATE_ON, -+ PVRSRV_POWER_FLAGS_NONE); -+ PDUMPPOWCMDEND(psDeviceNode); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ -+ goto _PVRSRVSetDevicePowerStateKM_Exit; -+ } -+ -+ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, -+ psKCCBCmd, -+ ui32PDumpFlags, -+ pui32CmdKCCBSlot); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to schedule command (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+#if defined(DEBUG) -+ /* PVRSRVDebugRequest must be called without powerlock */ -+ PVRSRVPowerUnlock(psDeviceNode); -+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); -+ goto _PVRSRVPowerLock_Exit; -+#endif -+ } -+ -+_PVRSRVSetDevicePowerStateKM_Exit: -+ PVRSRVPowerUnlock(psDeviceNode); -+ -+_PVRSRVPowerLock_Exit: -+ return eError; -+} -+ -+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hCmdCompHandle; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ OSScheduleMISR(psDevInfo->hProcessQueuesMISR); -+} -+ -+#if defined(SUPPORT_VALIDATION) -+PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT64 ui64RegVal, -+ IMG_UINT64 ui64Size, -+ IMG_UINT32 ui32Offset, -+ IMG_BOOL bWriteOp) -+{ -+ RGXFWIF_KCCB_CMD sRgxRegsCmd = {0}; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ PVRSRV_ERROR eError; -+ -+ sRgxRegsCmd.eCmdType = RGXFWIF_KCCB_CMD_RGXREG; -+ sRgxRegsCmd.uCmdData.sFwRgxData.ui64RegVal = ui64RegVal; -+ sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegWidth = ui64Size; -+ sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegAddr = ui32Offset; -+ sRgxRegsCmd.uCmdData.sFwRgxData.bWriteOp = bWriteOp; -+ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, -+ RGXFWIF_DM_GP, -+ &sRgxRegsCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); -+ -+ if (bWriteOp) -+ { -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, -+ ui32kCCBCommandSlot, -+ PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); -+ } -+ -+ return eError; -+} -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function RGX_MISRHandler_ScheduleProcessQueues -+ -+ @Description - Sends uncounted kick to all the DMs (the FW will process all -+ the queue for all the DMs) -+******************************************************************************/ -+static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError; -+ PVRSRV_DEV_POWER_STATE ePowerState; -+ -+ eError = PVRSRVPowerLock(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ return; -+ } -+ -+ /* Check whether it's worth waking up the GPU */ -+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); -+ -+ if (!PVRSRV_VZ_MODE_IS(GUEST) && -+ (eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF)) -+ { -+ /* For now, guest drivers will always wake-up the GPU */ -+ RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; -+ IMG_BOOL bGPUHasWorkWaiting; -+ -+ RGXFwSharedMemCacheOpValue(psUtilFWCb->ui64GpuLastWord, INVALIDATE); -+ bGPUHasWorkWaiting = -+ (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64GpuLastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED); -+ -+ if (!bGPUHasWorkWaiting) -+ { -+ /* all queues are empty, don't wake up the GPU */ -+ PVRSRVPowerUnlock(psDeviceNode); -+ return; -+ } -+ } -+ -+ PDUMPPOWCMDSTART(psDeviceNode); -+ /* wake up the GPU */ -+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, -+ PVRSRV_DEV_POWER_STATE_ON, -+ PVRSRV_POWER_FLAGS_NONE); -+ PDUMPPOWCMDEND(psDeviceNode); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ -+ PVRSRVPowerUnlock(psDeviceNode); -+ return; -+ } -+ -+ /* uncounted kick to the FW */ -+ HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED); -+ __MTSScheduleWrite(psDevInfo, (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED); -+ -+ PVRSRVPowerUnlock(psDeviceNode); -+} -+ -+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ return OSInstallMISR(phMISR, -+ RGX_MISRHandler_ScheduleProcessQueues, -+ psDeviceNode, -+ "RGX_ScheduleProcessQueues"); -+} -+ -+PVRSRV_ERROR _RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_DM eKCCBType, -+ RGXFWIF_KCCB_CMD *psKCCBCmd, -+ IMG_UINT32 ui32PDumpFlags, -+ IMG_UINT32 *pui32CmdKCCBSlot, -+ IMG_BOOL bCallerHasPwrLock) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 uiMMUSyncUpdate; -+ -+ /* Don't send the command/power up request if device not available. */ -+ if (unlikely((psDevInfo == NULL) || -+ (psDevInfo->psDeviceNode == NULL) || -+ (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT_POWERED_OFF) || -+ (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR))) -+ { -+ return PVRSRV_ERROR_INVALID_DEVICE; -+ } -+ -+ /* Don't send the command/power up request if device in deinit phase. -+ * The de-init thread could destroy the device whilst the power up -+ * sequence below is accessing the HW registers. -+ * Not yet safe to free resources. Caller should retry later. -+ */ -+ if (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT) -+ { -+ return PVRSRV_ERROR_RETRY; -+ } -+ -+#if defined(SUPPORT_VALIDATION) -+ /* For validation, force the core to different dust count states with each kick */ -+ if ((eKCCBType == RGXFWIF_DM_GEOM) || (eKCCBType == RGXFWIF_DM_CDM)) -+ { -+ if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN) -+ { -+ IMG_UINT32 ui32NumDusts = RGXGetNextDustCount(&psDevInfo->sDustReqState, psDevInfo->sDevFeatureCfg.ui32MAXDustCount); -+ PVRSRVDeviceGPUUnitsPowerChange(psDevInfo->psDeviceNode, ui32NumDusts); -+ } -+ } -+ -+ if (psDevInfo->ui32ECCRAMErrInjModule != RGXKM_ECC_ERR_INJ_DISABLE) -+ { -+ if (psDevInfo->ui32ECCRAMErrInjInterval > 0U) -+ { -+ --psDevInfo->ui32ECCRAMErrInjInterval; -+ } -+ else -+ { -+ IMG_UINT64 ui64ECCRegVal = 0U; -+ -+ psDevInfo->ui32ECCRAMErrInjInterval = RGXKM_ECC_ERR_INJ_INTERVAL; -+ -+ if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_SLC) -+ { -+ PVR_LOG(("ECC RAM Error Inject SLC")); -+ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN; -+ } -+ else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_USC) -+ { -+ PVR_LOG(("ECC RAM Error Inject USC")); -+ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_USC_EN; -+ } -+ else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_TPU) -+ { -+#if defined(RGX_FEATURE_MAX_TPU_PER_SPU) -+ PVR_LOG(("ECC RAM Error Inject Swift TPU")); -+ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_SWIFT_EN; -+#else -+ PVR_LOG(("ECC RAM Error Inject TPU MCU L0")); -+ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN; -+#endif -+ } -+ else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_RASCAL) -+ { -+#if defined(RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN) -+ PVR_LOG(("ECC RAM Error Inject RASCAL")); -+ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN; -+#else -+ PVR_LOG(("ECC RAM Error Inject USC")); -+ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_USC_EN; -+#endif -+ } -+ else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_MARS) -+ { -+ PVR_LOG(("ECC RAM Error Inject MARS")); -+ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_MARS_EN; -+ } -+ else -+ { -+ } -+ -+ OSWriteMemoryBarrier(NULL); -+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_ECC_RAM_ERR_INJ, ui64ECCRegVal); -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Write reg ECC_RAM_ERR_INJ"); -+ PDUMPREG64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_ECC_RAM_ERR_INJ, ui64ECCRegVal, PDUMP_FLAGS_CONTINUOUS); -+ OSWriteMemoryBarrier(NULL); -+ } -+ } -+#endif -+ -+ if (!bCallerHasPwrLock) -+ { -+ /* PVRSRVPowerLock guarantees atomicity between commands. This is helpful -+ in a scenario with several applications allocating resources. */ -+ eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ -+ /* If system is found powered OFF, Retry scheduling the command */ -+ if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) -+ { -+ eError = PVRSRV_ERROR_RETRY; -+ } -+ -+ goto RGXScheduleCommand_exit; -+ } -+ } -+ -+ if (unlikely(psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)) -+ { -+ /* If we have the power lock the device is valid but the deinit -+ * thread could be waiting for the lock. */ -+ eError = PVRSRV_ERROR_RETRY; -+ goto _PVRSRVInvalidDeviceError_Exit; -+ } -+ -+ /* Ensure device is powered up before sending any commands */ -+ PDUMPPOWCMDSTART(psDevInfo->psDeviceNode); -+ eError = PVRSRVSetDevicePowerStateKM(psDevInfo->psDeviceNode, -+ PVRSRV_DEV_POWER_STATE_ON, -+ PVRSRV_POWER_FLAGS_NONE); -+ PDUMPPOWCMDEND(psDevInfo->psDeviceNode); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ goto _PVRSRVSetDevicePowerStateKM_Exit; -+ } -+ -+ eError = RGXPreKickCacheCommand(psDevInfo, eKCCBType, &uiMMUSyncUpdate); -+ if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; -+ -+ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot); -+ if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; -+ -+_PVRSRVSetDevicePowerStateKM_Exit: -+_PVRSRVInvalidDeviceError_Exit: -+ if (!bCallerHasPwrLock) -+ { -+ PVRSRVPowerUnlock(psDevInfo->psDeviceNode); -+ } -+RGXScheduleCommand_exit: -+ return eError; -+} -+ -+/* -+ * RGXCheckFirmwareCCB -+ */ -+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl; -+ RGXFWIF_CCB_CTL *psFWCCBCtlLocal = psDevInfo->psFirmwareCCBCtlLocal; -+ IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB; -+ PVRSRV_ERROR eError; -+ -+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ KM_CONNECTION_CACHEOP(Fw, INVALIDATE); -+ KM_CONNECTION_CACHEOP(Os, INVALIDATE); -+ PVR_LOG_RETURN_VOID_IF_FALSE(PVRSRV_VZ_MODE_IS(NATIVE) || -+ (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && -+ KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)), -+ "FW-KM connection is down"); -+#endif -+ -+ eError = RGXUpdateLocalFWCCBWoff(psDevInfo); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "RGXUpdateLocalFWCCBWoff"); -+ return; -+ } -+ -+ while (psFWCCBCtlLocal->ui32ReadOffset != psFWCCBCtlLocal->ui32WriteOffset) -+ { -+ /* Point to the next command */ -+ const RGXFWIF_FWCCB_CMD *psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtlLocal->ui32ReadOffset; -+ RGXFwSharedMemCacheOpPtr(psFwCCBCmd, INVALIDATE); -+ -+ -+ HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType); -+ switch (psFwCCBCmd->eCmdType) -+ { -+ case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING: -+ { -+ if (psDevInfo->bPDPEnabled) -+ { -+ PDUMP_PANIC(psDevInfo->psDeviceNode, ZSBUFFER_BACKING, "Request to add backing to ZSBuffer"); -+ } -+ RGXProcessRequestZSBufferBacking(psDevInfo, -+ psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); -+ break; -+ } -+ -+ case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING: -+ { -+ if (psDevInfo->bPDPEnabled) -+ { -+ PDUMP_PANIC(psDevInfo->psDeviceNode, ZSBUFFER_UNBACKING, "Request to remove backing from ZSBuffer"); -+ } -+ RGXProcessRequestZSBufferUnbacking(psDevInfo, -+ psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); -+ break; -+ } -+ -+ case RGXFWIF_FWCCB_CMD_FREELIST_GROW: -+ { -+ if (psDevInfo->bPDPEnabled) -+ { -+ PDUMP_PANIC(psDevInfo->psDeviceNode, FREELIST_GROW, "Request to grow the free list"); -+ } -+ RGXProcessRequestGrow(psDevInfo, -+ psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID); -+ break; -+ } -+ -+ case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION: -+ { -+ if (psDevInfo->bPDPEnabled) -+ { -+ PDUMP_PANIC(psDevInfo->psDeviceNode, FREELISTS_RECONSTRUCTION, "Request to reconstruct free lists"); -+ } -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d) for %d freelists", -+ __func__, -+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, -+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d/%d) for %d freelists", -+ __func__, -+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, -+ psDevInfo->psRGXFWIfHWRInfoBufCtl->ui32HwrCounter+1, -+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); -+ } -+ -+ RGXProcessRequestFreelistsReconstruction(psDevInfo, -+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount, -+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs); -+ break; -+ } -+ -+ case RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION: -+ { -+ /* Notify client drivers */ -+ /* Client notification of device error will be achieved by -+ * clients calling UM function RGXGetLastDeviceError() */ -+ psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT; -+ -+ /* Notify system layer */ -+ { -+ PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; -+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; -+ const RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA *psCmdFwPagefault = -+ &psFwCCBCmd->uCmdData.sCmdFWPagefault; -+ -+ if (psDevConfig->pfnSysDevErrorNotify) -+ { -+ PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; -+ -+ sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT; -+ sErrorData.uErrData.sFwPFErrData.sFWFaultAddr.uiAddr = psCmdFwPagefault->sFWFaultAddr.uiAddr; -+ -+ psDevConfig->pfnSysDevErrorNotify(psDevConfig, -+ &sErrorData); -+ } -+ } -+ break; -+ } -+ -+ case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION: -+ { -+ const RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification = -+ &psFwCCBCmd->uCmdData.sCmdContextResetNotification; -+ IMG_UINT32 ui32ErrorPid = 0; -+ -+ FWCommonContextListSetLastResetReason(psDevInfo, -+ &ui32ErrorPid, -+ psCmdContextResetNotification); -+ -+ /* Increment error counter (if appropriate) */ -+ if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM) -+ { -+ /* Avoid wrapping the error count (which would then -+ * make it appear we had far fewer errors), by limiting -+ * it to IMG_UINT32_MAX. -+ */ -+ if (psDevInfo->sErrorCounts.ui32WGPErrorCount < IMG_UINT32_MAX) -+ { -+ psDevInfo->sErrorCounts.ui32WGPErrorCount++; -+ } -+ } -+ else if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM) -+ { -+ /* Avoid wrapping the error count (which would then -+ * make it appear we had far fewer errors), by limiting -+ * it to IMG_UINT32_MAX. -+ */ -+ if (psDevInfo->sErrorCounts.ui32TRPErrorCount < IMG_UINT32_MAX) -+ { -+ psDevInfo->sErrorCounts.ui32TRPErrorCount++; -+ } -+ } -+ -+ /* Notify system layer */ -+ { -+ PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; -+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; -+ -+ if (psDevConfig->pfnSysDevErrorNotify) -+ { -+ PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; -+ -+ sErrorData.eResetReason = psCmdContextResetNotification->eResetReason; -+ sErrorData.pid = ui32ErrorPid; -+ -+ /* Populate error data according to reset reason */ -+ switch (psCmdContextResetNotification->eResetReason) -+ { -+ case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM: -+ case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM: -+ { -+ sErrorData.uErrData.sChecksumErrData.ui32ExtJobRef = psCmdContextResetNotification->ui32ResetJobRef; -+ sErrorData.uErrData.sChecksumErrData.eDM = psCmdContextResetNotification->eDM; -+ break; -+ } -+ default: -+ { -+ break; -+ } -+ } -+ -+ psDevConfig->pfnSysDevErrorNotify(psDevConfig, -+ &sErrorData); -+ } -+ } -+ -+ /* Notify if a page fault */ -+ if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF) -+ { -+ DevmemIntPFNotify(psDevInfo->psDeviceNode, -+ psCmdContextResetNotification->ui64PCAddress, -+ psCmdContextResetNotification->sFaultAddress); -+ } -+ break; -+ } -+ -+ case RGXFWIF_FWCCB_CMD_DEBUG_DUMP: -+ { -+ PVRSRV_ERROR eError; -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ OSAtomicWrite(&psDevInfo->psDeviceNode->eDebugDumpRequested, PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE); -+ eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal FW Cmd debug dump event, dumping now instead", __func__)); -+ PVRSRVDebugRequest(psDevInfo->psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); -+ } -+ break; -+ } -+ -+ case RGXFWIF_FWCCB_CMD_UPDATE_STATS: -+ { -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner; -+ IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue; -+ -+ switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate) -+ { -+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS: -+ { -+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,i32AdjustmentValue,0,0,0,0,0,0,pidTmp); -+ break; -+ } -+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY: -+ { -+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,i32AdjustmentValue,0,0,0,0,0,pidTmp); -+ break; -+ } -+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES: -+ { -+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,i32AdjustmentValue,0,0,0,0,pidTmp); -+ break; -+ } -+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES: -+ { -+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,i32AdjustmentValue,0,0,0,pidTmp); -+ break; -+ } -+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES: -+ { -+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,i32AdjustmentValue,0,0,pidTmp); -+ break; -+ } -+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES: -+ { -+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,0,i32AdjustmentValue,0,pidTmp); -+ break; -+ } -+ } -+#endif -+ break; -+ } -+#if defined(SUPPORT_PDVFS) -+ case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE: -+ { -+ PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, -+ psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate); -+ break; -+ } -+#endif -+ case RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART: -+ { -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); -+ if (psDevInfo->psRGXFWIfFwSysData != NULL && -+ psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF) -+ { -+ PVRSRV_ERROR eError; -+ -+ /* Power down... */ -+ eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, -+ PVRSRV_SYS_POWER_STATE_OFF, -+ PVRSRV_POWER_FLAGS_NONE); -+ if (eError == PVRSRV_OK) -+ { -+ /* Clear the FW faulted flags... */ -+ psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags &= ~(RGXFWIF_HWR_FW_FAULT|RGXFWIF_HWR_RESTART_REQUESTED); -+ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags); -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags, -+ FLUSH); -+ -+ /* Power back up again... */ -+ eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, -+ PVRSRV_SYS_POWER_STATE_ON, -+ PVRSRV_POWER_FLAGS_NONE); -+ -+ /* Send a dummy KCCB command to ensure the FW wakes up and checks the queues... */ -+ if (eError == PVRSRV_OK) -+ { -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXFWHealthCheckCmd(psDevInfo); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ } -+ } -+ -+ /* Notify client drivers and system layer of FW fault */ -+ { -+ PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; -+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; -+ -+ /* Client notification of device error will be achieved by -+ * clients calling UM function RGXGetLastDeviceError() */ -+ psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR; -+ -+ /* Notify system layer */ -+ if (psDevConfig->pfnSysDevErrorNotify) -+ { -+ PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; -+ -+ sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR; -+ psDevConfig->pfnSysDevErrorNotify(psDevConfig, -+ &sErrorData); -+ } -+ } -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed firmware restart (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ } -+ } -+ break; -+ } -+#if defined(SUPPORT_VALIDATION) -+ case RGXFWIF_FWCCB_CMD_REG_READ: -+ { -+ psDevInfo->sFwRegs.ui64RegVal = psFwCCBCmd->uCmdData.sCmdRgxRegReadData.ui64RegValue; -+ complete(&psDevInfo->sFwRegs.sRegComp); -+ break; -+ } -+#if defined(SUPPORT_SOC_TIMER) -+ case RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS: -+ { -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags, -+ INVALIDATE); -+ if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) -+ { -+ PVRSRV_ERROR eSOCtimerErr = RGXValidateSOCUSCTimer(psDevInfo, -+ PDUMP_NONE, -+ psFwCCBCmd->uCmdData.sCmdTimers.ui64timerGray, -+ psFwCCBCmd->uCmdData.sCmdTimers.ui64timerBinary, -+ psFwCCBCmd->uCmdData.sCmdTimers.aui64uscTimers); -+ if (PVRSRV_OK == eSOCtimerErr) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have increased over time")); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have NOT increased over time")); -+ } -+ } -+ break; -+ } -+#endif -+#endif -+ default: -+ { -+ /* unknown command */ -+ PVR_DPF((PVR_DBG_WARNING, "%s: Unknown Command (eCmdType=0x%08x)", -+ __func__, psFwCCBCmd->eCmdType)); -+ /* Assert on magic value corruption */ -+ PVR_ASSERT((((IMG_UINT32)psFwCCBCmd->eCmdType & RGX_CMD_MAGIC_DWORD_MASK) >> RGX_CMD_MAGIC_DWORD_SHIFT) == RGX_CMD_MAGIC_DWORD); -+ } -+ } -+ -+ /* Update read offset */ -+ psFWCCBCtlLocal->ui32ReadOffset = (psFWCCBCtlLocal->ui32ReadOffset + 1) & psFWCCBCtlLocal->ui32WrapMask; -+ OSMemoryBarrier(NULL); -+ psFWCCBCtl->ui32ReadOffset = psFWCCBCtlLocal->ui32ReadOffset; -+ OSWriteMemoryBarrier(NULL); -+ -+ if (psFWCCBCtlLocal->ui32ReadOffset == psFWCCBCtlLocal->ui32WriteOffset) -+ { -+ eError = RGXUpdateLocalFWCCBWoff(psDevInfo); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "RGXUpdateLocalFWCCBWoff"); -+ return; -+ } -+ } -+ } -+} -+ -+/* -+ * PVRSRVRGXFrameworkCopyCommand -+*/ -+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEM_MEMDESC *psFWFrameworkMemDesc, -+ IMG_PBYTE pbyGPUFRegisterList, -+ IMG_UINT32 ui32FrameworkRegisterSize) -+{ -+ PVRSRV_ERROR eError; -+ RGXFWIF_RF_REGISTERS *psRFReg; -+ -+ eError = DevmemAcquireCpuVirtAddr(psFWFrameworkMemDesc, -+ (void **)&psRFReg); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map firmware render context state (%u)", -+ __func__, eError)); -+ return eError; -+ } -+ -+ OSDeviceMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize); -+ RGXFwSharedMemCacheOpPtr(psRFReg, FLUSH); -+ -+ /* Release the CPU mapping */ -+ DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc); -+ -+ /* -+ * Dump the FW framework buffer -+ */ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDeviceNode, "Dump FWFramework buffer"); -+ DevmemPDumpLoadMem(psFWFrameworkMemDesc, 0, ui32FrameworkRegisterSize, PDUMP_FLAGS_CONTINUOUS); -+#else -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * PVRSRVRGXFrameworkCreateKM -+*/ -+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEM_MEMDESC **ppsFWFrameworkMemDesc, -+ IMG_UINT32 ui32FrameworkCommandSize) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ /* -+ Allocate device memory for the firmware GPU framework state. -+ Sufficient info to kick one or more DMs should be contained in this buffer -+ */ -+ PDUMPCOMMENT(psDeviceNode, "Allocate firmware framework state"); -+ -+ eError = DevmemFwAllocate(psDevInfo, -+ ui32FrameworkCommandSize, -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "FwGPUFrameworkState", -+ ppsFWFrameworkMemDesc); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate firmware framework state (%u)", -+ __func__, eError)); -+ return eError; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, -+ volatile IMG_UINT32 __iomem *pui32LinMemAddr, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32CurrentQueueLength, ui32MaxRetries; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; -+ RGXFWIF_CCB_CTL *psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; -+ -+ eError = RGXUpdateLocalKCCBRoff(psDevInfo); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXUpdateLocalKCCBRoff"); -+ -+ ui32CurrentQueueLength = (psKCCBCtlLocal->ui32WrapMask+1 + -+ psKCCBCtlLocal->ui32WriteOffset - -+ psKCCBCtlLocal->ui32ReadOffset) & psKCCBCtlLocal->ui32WrapMask; -+ ui32CurrentQueueLength += psDevInfo->ui32KCCBDeferredCommandsCount; -+ -+ for (ui32MaxRetries = ui32CurrentQueueLength + 1; -+ ui32MaxRetries > 0; -+ ui32MaxRetries--) -+ { -+ -+ /* -+ * PVRSRVPollForValueKM flags are set to POLL_FLAG_NONE in this case so that the function -+ * does not generate an error message. In this case, the PollForValueKM is expected to -+ * timeout as there is work ongoing on the GPU which may take longer than the timeout period. -+ */ -+ eError = PVRSRVPollForValueKM(psDevNode, pui32LinMemAddr, ui32Value, ui32Mask, POLL_FLAG_NONE, NULL); -+ if (eError != PVRSRV_ERROR_TIMEOUT) -+ { -+ break; -+ } -+ -+ RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE); -+ } -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)", -+ __func__, PVRSRVGetErrorString(eError), -+ pui32LinMemAddr, ui32Value)); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32Config, -+ IMG_UINT32 *pui32ConfigState, -+ IMG_BOOL bSetNotClear) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DEV_POWER_STATE ePowerState; -+ RGXFWIF_KCCB_CMD sStateFlagCmd = { 0 }; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ RGXFWIF_SYSDATA *psFwSysData; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ IMG_BOOL bWaitForFwUpdate = IMG_FALSE; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ if (!psDevInfo) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ psDeviceNode = psDevInfo->psDeviceNode; -+ -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); -+ psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ -+ if (NULL == psFwSysData) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Fw Sys Config is not mapped into CPU space", __func__)); -+ return PVRSRV_ERROR_INVALID_CPU_ADDR; -+ } -+ -+ /* apply change and ensure the new data is written to memory -+ * before requesting the FW to read it -+ */ -+ ui32Config = ui32Config & RGXFWIF_INICFG_ALL; -+ if (bSetNotClear) -+ { -+ psFwSysData->ui32ConfigFlags |= ui32Config; -+ } -+ else -+ { -+ psFwSysData->ui32ConfigFlags &= ~ui32Config; -+ } -+ OSWriteMemoryBarrier(&psFwSysData->ui32ConfigFlags); -+ RGXFwSharedMemCacheOpValue(psFwSysData->ui32ConfigFlags, FLUSH); -+ -+ /* return current/new value to caller */ -+ if (pui32ConfigState) -+ { -+ *pui32ConfigState = psFwSysData->ui32ConfigFlags; -+ } -+ -+ OSMemoryBarrier(&psFwSysData->ui32ConfigFlags); -+ -+ eError = PVRSRVPowerLock(psDeviceNode); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); -+ -+ /* notify FW to update setting */ -+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); -+ -+ if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) -+ { -+ /* Ask the FW to update its cached version of the value */ -+ sStateFlagCmd.eCmdType = RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL; -+ -+ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, -+ &sStateFlagCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot", unlock); -+ bWaitForFwUpdate = IMG_TRUE; -+ } -+ -+unlock: -+ PVRSRVPowerUnlock(psDeviceNode); -+ if (bWaitForFwUpdate) -+ { -+ /* Wait for the value to be updated as the FW validates -+ * the parameters and modifies the ui32ConfigFlags -+ * accordingly -+ * (for completeness as registered callbacks should also -+ * not permit invalid transitions) -+ */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); -+ } -+ return eError; -+} -+ -+static -+PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_DM eDM, -+ RGXFWIF_KCCB_CMD *psKCCBCmd, -+ RGXFWIF_CLEANUP_TYPE eCleanupType, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ -+ /* Clean-up commands sent during frame capture intervals must be dumped even when not in capture range... */ -+ ui32PDumpFlags |= PDUMP_FLAGS_INTERVAL; -+ -+ psKCCBCmd->eCmdType = RGXFWIF_KCCB_CMD_CLEANUP; -+ psKCCBCmd->uCmdData.sCleanupData.eCleanupType = eCleanupType; -+ -+ /* -+ Send the cleanup request to the firmware. If the resource is still busy -+ the firmware will tell us and we'll drop out with a retry. -+ */ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, -+ eDM, -+ psKCCBCmd, -+ ui32PDumpFlags, -+ &ui32kCCBCommandSlot); -+ if (eError != PVRSRV_OK) -+ { -+ /* If caller may retry, fail with no error message */ -+ if (!PVRSRVIsRetryError(eError)) -+ { -+ PVR_DPF((PVR_DBG_ERROR ,"RGXScheduleCommandAndGetKCCBSlot() failed (%s) in %s()", -+ PVRSRVGETERRORSTRING(eError), __func__)); -+ } -+ goto fail_command; -+ } -+ -+ /* Wait for command kCCB slot to be updated by FW */ -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, -+ "Wait for the firmware to reply to the cleanup command"); -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, -+ ui32PDumpFlags); -+ /* -+ If the firmware hasn't got back to us in a timely manner -+ then bail and let the caller retry the command. -+ */ -+ if (eError == PVRSRV_ERROR_TIMEOUT) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: RGXWaitForKCCBSlotUpdate timed out. Dump debug information.", -+ __func__)); -+ -+ eError = PVRSRV_ERROR_RETRY; -+#if defined(DEBUG) -+ PVRSRVDebugRequest(psDevInfo->psDeviceNode, -+ DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); -+#endif -+ goto fail_poll; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ goto fail_poll; -+ } -+ -+#if defined(PDUMP) -+ /* -+ * The cleanup request to the firmware will tell us if a given resource is busy or not. -+ * If the RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY flag is set, this means that the resource is -+ * still in use. In this case we return a PVRSRV_ERROR_RETRY error to the client drivers -+ * and they will re-issue the cleanup request until it succeed. -+ * -+ * Since this retry mechanism doesn't work for pdumps, client drivers should ensure -+ * that cleanup requests are only submitted if the resource is unused. -+ * If this is not the case, the following poll will block infinitely, making sure -+ * the issue doesn't go unnoticed. -+ */ -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, -+ "Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps", -+ eDM, -+ psKCCBCmd->uCmdData.sCleanupData.eCleanupType, -+ psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr); -+ eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc, -+ ui32kCCBCommandSlot * sizeof(IMG_UINT32), -+ 0, -+ RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ ui32PDumpFlags); -+ PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); -+#endif -+ -+ /* -+ If the command has was run but a resource was busy, then the request -+ will need to be retried. -+ */ -+ RGXFwSharedMemCacheOpValue(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot], -+ INVALIDATE); -+ -+ if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)) -+ { -+ if (psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); -+ } -+ eError = PVRSRV_ERROR_RETRY; -+ goto fail_requestbusy; -+ } -+ -+ return PVRSRV_OK; -+ -+fail_requestbusy: -+fail_poll: -+fail_command: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ return eError; -+} -+ -+/* -+ RGXRequestCommonContextCleanUp -+*/ -+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, -+ RGXFWIF_DM eDM, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ RGXFWIF_KCCB_CMD sRCCleanUpCmd = {0}; -+ PVRSRV_ERROR eError; -+ PRGXFWIF_FWCOMMONCONTEXT psFWCommonContextFWAddr; -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; -+ -+ /* Force retry if this context's CCB is currently being dumped -+ * as part of the stalled CCB debug */ -+ if (psDevInfo->pvEarliestStalledClientCCB == (void*)FWCommonContextGetClientCCB(psServerCommonContext)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Forcing retry as psDevInfo->pvEarliestStalledClientCCB = psCtxClientCCB <%p>", -+ __func__, -+ psDevInfo->pvEarliestStalledClientCCB)); -+ return PVRSRV_ERROR_RETRY; -+ } -+ -+ psFWCommonContextFWAddr = FWCommonContextGetFWAddress(psServerCommonContext); -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDeviceNode, "Common ctx cleanup Request DM%d [context = 0x%08x]", -+ eDM, psFWCommonContextFWAddr.ui32Addr); -+ PDUMPCOMMENT(psDeviceNode, "Wait for CCB to be empty before common ctx cleanup"); -+ -+ RGXCCBPDumpDrainCCB(FWCommonContextGetClientCCB(psServerCommonContext), ui32PDumpFlags); -+#endif -+ -+ /* Setup our command data, the cleanup call will fill in the rest */ -+ sRCCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psContext = psFWCommonContextFWAddr; -+ -+ /* Request cleanup of the firmware resource */ -+ eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice, -+ eDM, -+ &sRCCleanUpCmd, -+ RGXFWIF_CLEANUP_FWCOMMONCONTEXT, -+ ui32PDumpFlags); -+ -+ if ((eError != PVRSRV_OK) && !PVRSRVIsRetryError(eError)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to schedule a memory context cleanup with error (%u)", -+ __func__, eError)); -+ } -+ -+ return eError; -+} -+ -+/* -+ * RGXFWRequestHWRTDataCleanUp -+ */ -+ -+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PRGXFWIF_HWRTDATA psHWRTData) -+{ -+ RGXFWIF_KCCB_CMD sHWRTDataCleanUpCmd = {0}; -+ PVRSRV_ERROR eError; -+ -+ PDUMPCOMMENT(psDeviceNode, "HW RTData cleanup Request [HWRTData = 0x%08x]", psHWRTData.ui32Addr); -+ -+ sHWRTDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWRTData = psHWRTData; -+ -+ eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice, -+ RGXFWIF_DM_GP, -+ &sHWRTDataCleanUpCmd, -+ RGXFWIF_CLEANUP_HWRTDATA, -+ PDUMP_FLAGS_NONE); -+ -+ if (eError != PVRSRV_OK) -+ { -+ /* If caller may retry, fail with no error message */ -+ if (!PVRSRVIsRetryError(eError)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to schedule a HWRTData cleanup with error (%u)", -+ __func__, eError)); -+ } -+ } -+ -+ return eError; -+} -+ -+/* -+ RGXFWRequestFreeListCleanUp -+*/ -+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, -+ PRGXFWIF_FREELIST psFWFreeList) -+{ -+ RGXFWIF_KCCB_CMD sFLCleanUpCmd = {0}; -+ PVRSRV_ERROR eError; -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Free list cleanup Request [FreeList = 0x%08x]", psFWFreeList.ui32Addr); -+ -+ /* Setup our command data, the cleanup call will fill in the rest */ -+ sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psFreelist = psFWFreeList; -+ -+ /* Request cleanup of the firmware resource */ -+ eError = RGXScheduleCleanupCommand(psDevInfo, -+ RGXFWIF_DM_GP, -+ &sFLCleanUpCmd, -+ RGXFWIF_CLEANUP_FREELIST, -+ PDUMP_FLAGS_NONE); -+ -+ if (eError != PVRSRV_OK) -+ { -+ /* If caller may retry, fail with no error message */ -+ if (!PVRSRVIsRetryError(eError)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to schedule a memory context cleanup with error (%u)", -+ __func__, eError)); -+ } -+ } -+ -+ return eError; -+} -+ -+/* -+ RGXFWRequestZSBufferCleanUp -+*/ -+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, -+ PRGXFWIF_ZSBUFFER psFWZSBuffer) -+{ -+ RGXFWIF_KCCB_CMD sZSBufferCleanUpCmd = {0}; -+ PVRSRV_ERROR eError; -+ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "ZS Buffer cleanup Request [ZS Buffer = 0x%08x]", psFWZSBuffer.ui32Addr); -+ -+ /* Setup our command data, the cleanup call will fill in the rest */ -+ sZSBufferCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psZSBuffer = psFWZSBuffer; -+ -+ /* Request cleanup of the firmware resource */ -+ eError = RGXScheduleCleanupCommand(psDevInfo, -+ RGXFWIF_DM_3D, -+ &sZSBufferCleanUpCmd, -+ RGXFWIF_CLEANUP_ZSBUFFER, -+ PDUMP_FLAGS_NONE); -+ -+ if ((eError != PVRSRV_OK) && !PVRSRVIsRetryError(eError)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to schedule a memory context cleanup with error (%u)", -+ __func__, eError)); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32HCSDeadlineMs) -+{ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS = ui32HCSDeadlineMs; -+ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS); -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS, FLUSH); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Updating the Hard Context Switching deadline inside RGXFWIfRuntimeCfg"); -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, -+ offsetof(RGXFWIF_RUNTIME_CFG, ui32HCSDeadlineMS), -+ ui32HCSDeadlineMs, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXFWHealthCheckCmdInt(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bCallerHasPwrLock) -+{ -+ RGXFWIF_KCCB_CMD sCmpKCCBCmd = { 0 }; -+ -+ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; -+ -+ if (bCallerHasPwrLock) -+ { -+ return RGXScheduleCommandWithoutPowerLock(psDevInfo, -+ RGXFWIF_DM_GP, -+ &sCmpKCCBCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ else -+ { -+ return RGXScheduleCommand(psDevInfo, -+ RGXFWIF_DM_GP, -+ &sCmpKCCBCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+} -+ -+PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ IMG_UINT32 ui32CBaseMapCtxReg; -+#endif -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) -+ { -+ ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4; -+ /* Set the mapping context */ -+ RGXWriteReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWPRIV); -+ (void)RGXReadReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg); /* Fence write */ -+ -+ /* -+ * Catbase-0 (FW MMU context) pointing to unmapped mem to make -+ * FW crash from its memory context -+ */ -+ RGXWriteKernelMMUPC32(&psDevInfo->sLayerParams, -+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1, -+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT, -+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT, -+ 0xDEADBEEF); -+ } -+ else -+ { -+ ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT; -+ /* Set the mapping context */ -+ RGXWriteReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWPRIV); -+ (void)RGXReadReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg); /* Fence write */ -+ -+ /* -+ * Catbase-0 (FW MMU context) pointing to unmapped mem to make -+ * FW crash from its memory context -+ */ -+ RGXWriteKernelMMUPC32(&psDevInfo->sLayerParams, -+ RGX_CR_MMU_CBASE_MAPPING, -+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, -+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, -+ 0xDEADBEEF); -+ } -+#else -+ /* -+ * Catbase-0 (FW MMU context) pointing to unmapped mem to make -+ * FW crash from its memory context -+ */ -+ RGXWriteKernelMMUPC64(&psDevInfo->sLayerParams, -+ FWCORE_MEM_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), -+ RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT, -+ RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT, -+ ((0xDEADBEEF -+ >> RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT) -+ << RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) -+ & ~RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32DriverID, -+ RGXFWIF_OS_STATE_CHANGE eOSOnlineState) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sOSOnlineStateCmd = { 0 }; -+ const RGXFWIF_SYSDATA *psFwSysData; -+ -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, INVALIDATE); -+ psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ -+ sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE; -+ sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32DriverID = ui32DriverID; -+ sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState; -+ -+#if defined(SUPPORT_AUTOVZ) -+ { -+ IMG_BOOL bConnectionDown = IMG_FALSE; -+ -+ PVR_UNREFERENCED_PARAMETER(psFwSysData); -+ sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = RGXFWIF_OS_OFFLINE; -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ /* Send the offline command regardless if power lock is held or not. -+ * Under AutoVz this is done during regular driver deinit, store-to-ram suspend -+ * or (optionally) from a kernel panic callback. Deinit and suspend operations -+ * take the lock in the rgx pre/post power functions as expected. -+ * The kernel panic callback is a last resort way of letting the firmware know that -+ * the VM is unrecoverable and the vz connection must be disabled. It cannot wait -+ * on other kernel threads to finish and release the lock. */ -+ eError = RGXSendCommand(psDevInfo, -+ &sOSOnlineStateCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ /* Guests and Host going offline should wait for confirmation -+ * from the Firmware of the state change. If this fails, break -+ * the connection on the OS Driver's end as backup. */ -+ if (PVRSRV_VZ_MODE_IS(GUEST) || (ui32DriverID == RGXFW_HOST_DRIVER_ID)) -+ { -+ LOOP_UNTIL_TIMEOUT(SECONDS_TO_MICROSECONDS/2) -+ { -+ KM_CONNECTION_CACHEOP(Fw, INVALIDATE); -+ if (KM_FW_CONNECTION_IS(READY, psDevInfo)) -+ { -+ bConnectionDown = IMG_TRUE; -+ break; -+ } -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if (!bConnectionDown) -+ { -+ KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); -+ KM_CONNECTION_CACHEOP(Os, FLUSH); -+ } -+ } -+ } -+#else -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* no reason for Guests to update their state or any other VM's. -+ * This is the Hypervisor and Host driver's responsibility. */ -+ return PVRSRV_OK; -+ } -+ else if (eOSOnlineState == RGXFWIF_OS_ONLINE) -+ { -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psDevInfo, -+ RGXFWIF_DM_GP, -+ &sOSOnlineStateCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_ERROR_RETRY) break; -+ -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ } -+ else if (psFwSysData) -+ { -+ const volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags = -+ (const volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32DriverID]; -+ -+ /* Attempt several times until the FW manages to offload the OS */ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ IMG_UINT32 ui32kCCBCommandSlot; -+ -+ /* Send request */ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, -+ RGXFWIF_DM_GP, -+ &sOSOnlineStateCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ if (unlikely(eError == PVRSRV_ERROR_RETRY)) continue; -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommand", return_); -+ -+ /* Wait for FW to process the cmd */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_); -+ -+ /* read the OS state */ -+ OSMemoryBarrier(NULL); -+ /* check if FW finished offloading the driver and is stopped */ -+ if (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE) -+ { -+ eError = PVRSRV_OK; -+ break; -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_NOT_INITIALISED; -+ } -+ -+return_ : -+#endif -+ return eError; -+} -+ -+PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32PHRMode) -+{ -+ PVRSRV_ERROR eError; -+ RGXFWIF_KCCB_CMD sCfgPHRCmd = { 0 }; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ sCfgPHRCmd.eCmdType = RGXFWIF_KCCB_CMD_PHR_CFG; -+ psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode = ui32PHRMode; -+ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode); -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode, FLUSH); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Updating the Periodic Hardware Reset Mode inside RGXFWIfRuntimeCfg"); -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, -+ offsetof(RGXFWIF_RUNTIME_CFG, ui32PHRMode), -+ ui32PHRMode, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psDevInfo, -+ RGXFWIF_DM_GP, -+ &sCfgPHRCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32WdgPeriodUs) -+{ -+ PVRSRV_ERROR eError; -+ RGXFWIF_KCCB_CMD sCfgWdgCmd = { 0 }; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ sCfgWdgCmd.eCmdType = RGXFWIF_KCCB_CMD_WDG_CFG; -+ psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs = ui32WdgPeriodUs; -+ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs); -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs, FLUSH); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Updating the firmware watchdog period inside RGXFWIfRuntimeCfg"); -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, -+ offsetof(RGXFWIF_RUNTIME_CFG, ui32WdgPeriodUs), -+ ui32WdgPeriodUs, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psDevInfo, -+ RGXFWIF_DM_GP, -+ &sCfgWdgCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ return eError; -+} -+ -+ -+void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious) -+{ -+ /* Attempt to detect and deal with any stalled client contexts. -+ * bIgnorePrevious may be set by the caller if they know a context to be -+ * stalled, as otherwise this function will only identify stalled -+ * contexts which have not been previously reported. -+ */ -+ -+ IMG_UINT32 ui32StalledClientMask = 0; -+ -+ if (!(OSTryLockAcquire(psDevInfo->hCCBStallCheckLock))) -+ { -+ PVR_LOG(("RGXCheckForStalledClientContexts: Failed to acquire hCCBStallCheckLock, returning...")); -+ return; -+ } -+ -+ ui32StalledClientMask |= CheckForStalledClientTransferCtxt(psDevInfo); -+ -+ ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo); -+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) -+ ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo); -+#endif -+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK) -+ { -+ ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo); -+ } -+ -+ /* If at least one DM stalled bit is different than before */ -+ if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask))//(psDevInfo->ui32StalledClientMask ^ ui32StalledClientMask)) -+ { -+ if (ui32StalledClientMask > 0) -+ { -+ static __maybe_unused const char *pszStalledAction = -+#if defined(PVRSRV_STALLED_CCB_ACTION) -+ "force"; -+#else -+ "warn"; -+#endif -+ /* Print all the stalled DMs */ -+ PVR_LOG(("Possible stalled client RGX contexts detected: %s%s%s%s%s%s%s", -+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GP), -+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TDM_2D), -+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TA), -+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_3D), -+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_CDM), -+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ2D), -+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ3D))); -+ -+ PVR_LOG(("Trying to identify stalled context...(%s) [%d]", -+ pszStalledAction, bIgnorePrevious)); -+ -+ DumpStalledContextInfo(psDevInfo); -+ } -+ else -+ { -+ if (psDevInfo->ui32StalledClientMask> 0) -+ { -+ /* Indicate there are no stalled DMs */ -+ PVR_LOG(("No further stalled client contexts exist")); -+ } -+ } -+ psDevInfo->ui32StalledClientMask = ui32StalledClientMask; -+ psDevInfo->pvEarliestStalledClientCCB = NULL; -+ } -+ OSLockRelease(psDevInfo->hCCBStallCheckLock); -+} -+ -+/* -+ RGXUpdateHealthStatus -+*/ -+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, -+ IMG_BOOL bCheckAfterTimePassed) -+{ -+ const PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_DEVICE_HEALTH_STATUS eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK; -+ PVRSRV_DEVICE_HEALTH_REASON eNewReason = PVRSRV_DEVICE_HEALTH_REASON_NONE; -+ PVRSRV_RGXDEV_INFO* psDevInfo; -+ const RGXFWIF_TRACEBUF* psRGXFWIfTraceBufCtl; -+ const RGXFWIF_SYSDATA* psFwSysData; -+ const RGXFWIF_OSDATA* psFwOsData; -+ const RGXFWIF_CCB_CTL* psKCCBCtl; -+ RGXFWIF_CCB_CTL* psKCCBCtlLocal; -+ IMG_UINT32 ui32ThreadCount; -+ IMG_BOOL bKCCBCmdsWaiting; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psDevNode != NULL); -+ psDevInfo = psDevNode->pvDevice; -+ -+ /* If the firmware is not yet initialised or has already deinitialised, stop here */ -+ if (psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL || -+ psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || -+ psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT_POWERED_OFF) -+ { -+ return PVRSRV_OK; -+ } -+ -+ psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; -+ -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwSysData, -+ INVALIDATE); -+ psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfFwOsData, -+ INVALIDATE); -+ psFwOsData = psDevInfo->psRGXFWIfFwOsData; -+ -+ /* If this is a quick update, then include the last current value... */ -+ if (!bCheckAfterTimePassed) -+ { -+ eNewStatus = OSAtomicRead(&psDevNode->eHealthStatus); -+ eNewReason = OSAtomicRead(&psDevNode->eHealthReason); -+ } -+ -+ /* Decrement the SLR holdoff counter (if non-zero) */ -+ if (psDevInfo->ui32SLRHoldoffCounter > 0) -+ { -+ psDevInfo->ui32SLRHoldoffCounter--; -+ } -+ -+ eError = PVRSRVPowerLock(psDevNode); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); -+ -+ /* On a PCI error all reads from the PCI bar may return 0xFFFFFFFF. -+ This value is not valid for a core ID. */ -+ if (psFwSysData->ui32MemFaultCheck == RGX_PCI_ERROR_VALUE_DWORD) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: PCI error", __func__)); -+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; -+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_PCI_ERROR; -+ PVRSRVDeviceSetState(psDevNode, PVRSRV_DEVICE_STATE_PCI_ERROR); -+ PVRSRVPowerUnlock(psDevNode); -+ goto _RGXUpdateHealthStatus_Exit; -+ } -+ -+ /* If Rogue is not powered on, just skip ahead and check for stalled client CCBs */ -+ if (PVRSRVIsDevicePowered(psDevNode)) -+ { -+ if (psRGXFWIfTraceBufCtl != NULL) -+ { -+ /* -+ Firmware thread checks... -+ */ -+ for (ui32ThreadCount = 0; ui32ThreadCount < RGXFW_THREAD_NUM; ui32ThreadCount++) -+ { -+ const IMG_CHAR* pszTraceAssertInfo; -+ -+ RGXFwSharedMemCacheOpValue(psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf, INVALIDATE); -+ pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo; -+ -+ /* -+ Check if the FW has hit an assert... -+ */ -+ if (*pszTraceAssertInfo != '\0') -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %.*s (%.*s:%d)", -+ __func__, ui32ThreadCount, RGXFW_TRACE_BUFFER_ASSERT_SIZE, -+ pszTraceAssertInfo, RGXFW_TRACE_BUFFER_ASSERT_SIZE, -+ psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath, -+ psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum)); -+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; -+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED; -+ PVRSRVPowerUnlock(psDevNode); -+ goto _RGXUpdateHealthStatus_Exit; -+ } -+ -+ /* -+ Check the threads to see if they are in the same poll locations as last time... -+ */ -+ if (bCheckAfterTimePassed) -+ { -+ if (psFwSysData->aui32CrPollAddr[ui32ThreadCount] != 0 && -+ psFwSysData->aui32CrPollCount[ui32ThreadCount] == psDevInfo->aui32CrLastPollCount[ui32ThreadCount]) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)", -+ __func__, ui32ThreadCount, -+ ((psFwSysData->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), -+ psFwSysData->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET, -+ psFwSysData->aui32CrPollMask[ui32ThreadCount])); -+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; -+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING; -+ PVRSRVPowerUnlock(psDevNode); -+ goto _RGXUpdateHealthStatus_Exit; -+ } -+ psDevInfo->aui32CrLastPollCount[ui32ThreadCount] = psFwSysData->aui32CrPollCount[ui32ThreadCount]; -+ } -+ } -+ -+ /* -+ Check if the FW has faulted... -+ */ -+ if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Firmware has faulted and needs to restart", -+ __func__)); -+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT; -+ if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED) -+ { -+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING; -+ } -+ else -+ { -+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING; -+ } -+ PVRSRVPowerUnlock(psDevNode); -+ goto _RGXUpdateHealthStatus_Exit; -+ } -+ } -+ -+ /* -+ Event Object Timeouts check... -+ */ -+ if (!bCheckAfterTimePassed) -+ { -+ if (psDevInfo->ui32GEOTimeoutsLastTime > 1 && psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Global Event Object Timeouts have risen (from %d to %d)", -+ __func__, -+ psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts)); -+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; -+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS; -+ } -+ psDevInfo->ui32GEOTimeoutsLastTime = psPVRSRVData->ui32GEOConsecutiveTimeouts; -+ } -+ -+ /* -+ Check the Kernel CCB pointer is valid. If any commands were waiting last time, then check -+ that some have executed since then. -+ */ -+ bKCCBCmdsWaiting = IMG_FALSE; -+ -+ psKCCBCtl = psDevInfo->psKernelCCBCtl; -+ psKCCBCtlLocal = psDevInfo->psKernelCCBCtlLocal; -+ -+ if (psKCCBCtl != NULL && psKCCBCtlLocal != NULL) -+ { -+ /* update KCCB read offset */ -+ RGXFwSharedMemCacheOpValue(psKCCBCtl->ui32ReadOffset, INVALIDATE); -+ psKCCBCtlLocal->ui32ReadOffset = psKCCBCtl->ui32ReadOffset; -+ -+ if (psKCCBCtlLocal->ui32ReadOffset > psKCCBCtlLocal->ui32WrapMask || -+ psKCCBCtlLocal->ui32WriteOffset > psKCCBCtlLocal->ui32WrapMask) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: KCCB has invalid offset (ROFF=%d WOFF=%d)", -+ __func__, psKCCBCtlLocal->ui32ReadOffset, psKCCBCtlLocal->ui32WriteOffset)); -+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; -+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; -+ } -+ -+ if (psKCCBCtlLocal->ui32ReadOffset != psKCCBCtlLocal->ui32WriteOffset) -+ { -+ bKCCBCmdsWaiting = IMG_TRUE; -+ } -+ } -+ -+ if (bCheckAfterTimePassed && psFwOsData != NULL) -+ { -+ IMG_UINT32 ui32KCCBCmdsExecuted = psFwOsData->ui32KCCBCmdsExecuted; -+ -+ if (psDevInfo->ui32KCCBCmdsExecutedLastTime == ui32KCCBCmdsExecuted) -+ { -+ /* -+ If something was waiting last time then the Firmware has stopped processing commands. -+ */ -+ if (psDevInfo->bKCCBCmdsWaitingLastTime) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: No KCCB commands executed since check!", -+ __func__)); -+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; -+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED; -+ } -+ -+ /* -+ If no commands are currently pending and nothing happened since the last poll, then -+ schedule a dummy command to ping the firmware so we know it is alive and processing. -+ */ -+ if (!bKCCBCmdsWaiting) -+ { -+ /* Protect the PDumpLoadMem. RGXScheduleCommand() cannot take the -+ * PMR lock itself, because some bridge functions will take the PMR lock -+ * before calling RGXScheduleCommand -+ */ -+ PVRSRV_ERROR eError = RGXFWHealthCheckCmdWithoutPowerLock(psDevNode->pvDevice); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Cannot schedule Health Check command! (0x%x)", -+ __func__, eError)); -+ } -+ else -+ { -+ bKCCBCmdsWaiting = IMG_TRUE; -+ } -+ } -+ } -+ -+ psDevInfo->bKCCBCmdsWaitingLastTime = bKCCBCmdsWaiting; -+ psDevInfo->ui32KCCBCmdsExecutedLastTime = ui32KCCBCmdsExecuted; -+ } -+ } -+ -+ /* -+ Interrupt counts check... -+ */ -+ if (bCheckAfterTimePassed && psFwOsData != NULL) -+ { -+ IMG_UINT32 ui32LISRCount = 0; -+ IMG_UINT32 ui32FWCount = 0; -+ IMG_UINT32 ui32MissingInts = 0; -+ -+ /* Add up the total number of interrupts issued, sampled/received and missed... */ -+#if defined(RGX_FW_IRQ_OS_COUNTERS) -+ /* Only the Host OS has a sample count, so only one counter to check. */ -+ ui32LISRCount += psDevInfo->aui32SampleIRQCount[RGXFW_HOST_DRIVER_ID]; -+ ui32FWCount += OSReadHWReg32(psDevInfo->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[RGXFW_HOST_DRIVER_ID]); -+#else -+ IMG_UINT32 ui32Index; -+ -+ for (ui32Index = 0; ui32Index < RGXFW_THREAD_NUM; ui32Index++) -+ { -+ ui32LISRCount += psDevInfo->aui32SampleIRQCount[ui32Index]; -+ ui32FWCount += psFwOsData->aui32InterruptCount[ui32Index]; -+ } -+#endif /* RGX_FW_IRQ_OS_COUNTERS */ -+ -+ if (ui32LISRCount < ui32FWCount) -+ { -+ ui32MissingInts = (ui32FWCount-ui32LISRCount); -+ } -+ -+ if (ui32LISRCount == psDevInfo->ui32InterruptCountLastTime && -+ ui32MissingInts >= psDevInfo->ui32MissingInterruptsLastTime && -+ psDevInfo->ui32MissingInterruptsLastTime > 1) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: LISR has not received the last %d interrupts", -+ __func__, ui32MissingInts)); -+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; -+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS; -+ -+ /* Schedule the MISRs to help mitigate the problems of missing interrupts. */ -+ OSScheduleMISR(psDevInfo->pvMISRData); -+ if (psDevInfo->pvAPMISRData != NULL) -+ { -+ OSScheduleMISR(psDevInfo->pvAPMISRData); -+ } -+ } -+ psDevInfo->ui32InterruptCountLastTime = ui32LISRCount; -+ psDevInfo->ui32MissingInterruptsLastTime = ui32MissingInts; -+ } -+ -+ /* Release power lock before RGXCheckForStalledClientContexts */ -+ PVRSRVPowerUnlock(psDevNode); -+ -+ /* -+ Stalled CCB check... -+ */ -+ if (bCheckAfterTimePassed && (PVRSRV_DEVICE_HEALTH_STATUS_OK==eNewStatus)) -+ { -+ RGXCheckForStalledClientContexts(psDevInfo, IMG_FALSE); -+ } -+ -+ /* Notify client driver and system layer of any eNewStatus errors */ -+ if (eNewStatus > PVRSRV_DEVICE_HEALTH_STATUS_OK) -+ { -+ /* Client notification of device error will be achieved by -+ * clients calling UM function RGXGetLastDeviceError() */ -+ psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR; -+ -+ /* Notify system layer */ -+ { -+ PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; -+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; -+ -+ if (psDevConfig->pfnSysDevErrorNotify) -+ { -+ PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; -+ -+ sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR; -+ sErrorData.uErrData.sHostWdgData.ui32Status = (IMG_UINT32)eNewStatus; -+ sErrorData.uErrData.sHostWdgData.ui32Reason = (IMG_UINT32)eNewReason; -+ -+ psDevConfig->pfnSysDevErrorNotify(psDevConfig, -+ &sErrorData); -+ } -+ } -+ } -+ -+ /* -+ Finished, save the new status... -+ */ -+_RGXUpdateHealthStatus_Exit: -+ OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus); -+ OSAtomicWrite(&psDevNode->eHealthReason, eNewReason); -+ RGXSRV_HWPERF_DEVICE_INFO_HEALTH(psDevInfo, eNewStatus, eNewReason); -+ -+ /* -+ * Attempt to service the HWPerf buffer to regularly transport idle/periodic -+ * packets to host buffer. -+ */ -+ if (psDevNode->pfnServiceHWPerf != NULL) -+ { -+ PVRSRV_ERROR eError = psDevNode->pfnServiceHWPerf(psDevNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: " -+ "Error occurred when servicing HWPerf buffer (%d)", -+ __func__, eError)); -+ } -+ } -+ -+ /* Attempt to refresh timer correlation data */ -+ RGXTimeCorrRestartPeriodic(psDevNode); -+ -+ return PVRSRV_OK; -+} /* RGXUpdateHealthStatus */ -+ -+#if defined(SUPPORT_AUTOVZ) -+void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ KM_CONNECTION_CACHEOP(Fw, INVALIDATE); -+ KM_CONNECTION_CACHEOP(Os, INVALIDATE); -+ if (likely(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))) -+ { -+ /* read and write back the alive token value to confirm to the -+ * virtualisation watchdog that this connection is healthy */ -+ KM_SET_OS_ALIVE_TOKEN(KM_GET_FW_ALIVE_TOKEN(psDevInfo), psDevInfo); -+ KM_ALIVE_TOKEN_CACHEOP(Os, FLUSH); -+ } -+} -+ -+/* -+ RGXUpdateAutoVzWatchdog -+*/ -+void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode) -+{ -+ if (likely(psDevNode != NULL)) -+ { -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; -+ -+ if (unlikely((psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || !psDevInfo->bRGXPowered || -+ psDevInfo->pvRegsBaseKM == NULL || psDevNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT || -+ psDevNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT_POWERED_OFF))) -+ { -+ /* If the firmware is not initialised, stop here */ -+ return; -+ } -+ else -+ { -+ PVRSRV_ERROR eError = PVRSRVPowerLock(psDevNode); -+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PVRSRVPowerLock"); -+ -+ RGXUpdateAutoVzWdgToken(psDevInfo); -+ PVRSRVPowerUnlock(psDevNode); -+ } -+ } -+} -+ -+PVRSRV_ERROR RGXDisconnectAllGuests(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ IMG_UINT32 ui32DriverID; -+ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror, -+ INVALIDATE); -+ -+ for (ui32DriverID = RGXFW_GUEST_DRIVER_ID_START; -+ ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED; -+ ui32DriverID++) -+ { -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_CONNECTION_FW_STATE eGuestState = (RGXFWIF_CONNECTION_FW_STATE) -+ psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32DriverID].bfOsState; -+ -+ if (eGuestState == RGXFW_CONNECTION_FW_ACTIVE || -+ eGuestState == RGXFW_CONNECTION_FW_READY) -+ { -+ PVRSRV_ERROR eError = RGXFWSetFwOsState(psDevInfo, ui32DriverID, RGXFWIF_OS_OFFLINE); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXFWSetFwOsState"); -+ } -+ } -+ -+ return PVRSRV_OK; -+} -+#endif /* SUPPORT_AUTOVZ */ -+ -+PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, -+ IMG_UINT32 *pui32NumCleanupCtl, -+ RGXFWIF_DM eDM, -+ IMG_BOOL bKick, -+ RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, -+ RGX_ZSBUFFER_DATA *psZSBuffer, -+ RGX_ZSBUFFER_DATA *psMSAAScratchBuffer) -+{ -+ PVRSRV_ERROR eError; -+ PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl; -+ -+ PVR_ASSERT((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); -+ PVR_RETURN_IF_INVALID_PARAM((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); -+ -+ if (bKick) -+ { -+ if (psKMHWRTDataSet) -+ { -+ PRGXFWIF_CLEANUP_CTL psCleanupCtl; -+ -+ eError = RGXSetFirmwareAddress(&psCleanupCtl, psKMHWRTDataSet->psHWRTDataFwMemDesc, -+ offsetof(RGXFWIF_HWRTDATA, sCleanupState), -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ *(psCleanupCtlWrite++) = psCleanupCtl; -+ } -+ -+ if (eDM == RGXFWIF_DM_3D) -+ { -+ RGXFWIF_PRBUFFER_TYPE eBufferType; -+ RGX_ZSBUFFER_DATA *psBuffer = NULL; -+ -+ for (eBufferType = RGXFWIF_PRBUFFER_START; eBufferType < RGXFWIF_PRBUFFER_MAXSUPPORTED; eBufferType++) -+ { -+ switch (eBufferType) -+ { -+ case RGXFWIF_PRBUFFER_ZSBUFFER: -+ psBuffer = psZSBuffer; -+ break; -+ case RGXFWIF_PRBUFFER_MSAABUFFER: -+ psBuffer = psMSAAScratchBuffer; -+ break; -+ case RGXFWIF_PRBUFFER_MAXSUPPORTED: -+ psBuffer = NULL; -+ break; -+ } -+ if (psBuffer) -+ { -+ (psCleanupCtlWrite++)->ui32Addr = psBuffer->sZSBufferFWDevVAddr.ui32Addr + -+ offsetof(RGXFWIF_PRBUFFER, sCleanupState); -+ psBuffer = NULL; -+ } -+ } -+ } -+ } -+ -+ *pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl; -+ PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ RGXFWIF_HWRINFOBUF *psHWRInfoBuf; -+ IMG_UINT32 i; -+ -+ if (psDevNode->pvDevice == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_DEVINFO; -+ } -+ psDevInfo = psDevNode->pvDevice; -+ -+ psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; -+ -+ for (i = 0 ; i < RGXFWIF_DM_MAX ; i++) -+ { -+ /* Reset the HWR numbers */ -+ psHWRInfoBuf->aui32HwrDmLockedUpCount[i] = 0; -+ psHWRInfoBuf->aui32HwrDmFalseDetectCount[i] = 0; -+ psHWRInfoBuf->aui32HwrDmRecoveredCount[i] = 0; -+ psHWRInfoBuf->aui32HwrDmOverranCount[i] = 0; -+ } -+ -+ for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) -+ { -+ psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0; -+ } -+ -+ psHWRInfoBuf->ui32WriteIndex = 0; -+ psHWRInfoBuf->ui32DDReqCount = 0; -+ -+ OSWriteMemoryBarrier(&psHWRInfoBuf->ui32DDReqCount); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, -+ IMG_DEV_PHYADDR *psPhyAddr, -+ IMG_UINT32 ui32LogicalOffset, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_BOOL *bValid) -+{ -+ -+ PVRSRV_ERROR eError; -+ -+ eError = PMRLockSysPhysAddresses(psPMR); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: PMRLockSysPhysAddresses failed (%u)", -+ __func__, -+ eError)); -+ return eError; -+ } -+ -+ eError = PMR_DevPhysAddr(psPMR, -+ ui32Log2PageSize, -+ ui32NumOfPages, -+ ui32LogicalOffset, -+ psPhyAddr, -+ bValid, -+ DEVICE_USE); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: PMR_DevPhysAddr failed (%u)", -+ __func__, -+ eError)); -+ return eError; -+ } -+ -+ -+ eError = PMRUnlockSysPhysAddresses(psPMR); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: PMRUnLockSysPhysAddresses failed (%u)", -+ __func__, -+ eError)); -+ return eError; -+ } -+ -+ return eError; -+} -+ -+#if defined(PDUMP) -+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (psDevInfo->bDumpedKCCBCtlAlready) -+ { -+ /* exiting capture range or pdump block */ -+ psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE; -+ -+ /* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */ -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, -+ PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER, -+ "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)", -+ psDevInfo->psKernelCCBCtl, -+ ui32WriteOffset, -+ ui32WriteOffset); -+ eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, -+ offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), -+ ui32WriteOffset, -+ 0xffffffff, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: problem pdumping POL for kCCBCtl (%d)", __func__, eError)); -+ } -+ } -+ -+ return eError; -+ -+} -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXClientConnectCompatCheck_ClientAgainstFW -+ -+ @Description -+ -+ Check compatibility of client and firmware (build options) -+ at the connection time. -+ -+ @Input psDeviceNode - device node -+ @Input ui32ClientBuildOptions - build options for the client -+ -+ @Return PVRSRV_ERROR - depending on mismatch found -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32ClientBuildOptions) -+{ -+ IMG_UINT32 ui32BuildOptionsMismatch; -+ IMG_UINT32 ui32BuildOptionsFW; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_FW_INFO_HEADER *psFWInfoHeader; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ if (psDevInfo == NULL || psDevInfo->psRGXFWIfOsInitMemDesc == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Cannot acquire kernel fw compatibility check info, RGXFWIF_OSINIT structure not allocated.", -+ __func__)); -+ return PVRSRV_ERROR_NOT_INITIALISED; -+ } -+ -+ psFWInfoHeader = &psDevInfo->sFWInfoHeader; -+ -+#if !defined(NO_HARDWARE) -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, -+ INVALIDATE); -+ if (*((volatile IMG_BOOL *) &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) -+ { -+ /* No need to wait if the FW has already updated the values */ -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ } -+#endif -+ -+ ui32BuildOptionsFW = psFWInfoHeader->ui32Flags; -+ ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW; -+ -+ if (ui32BuildOptionsMismatch != 0) -+ { -+ if ((ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) -+ { -+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " -+ "extra options present in client: (0x%x). Please check rgx_options.h", -+ ui32ClientBuildOptions & ui32BuildOptionsMismatch )); -+ } -+ -+ if ((ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0) -+ { -+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " -+ "extra options present in Firmware: (0x%x). Please check rgx_options.h", -+ ui32BuildOptionsFW & ui32BuildOptionsMismatch )); -+ } -+ -+ return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware and client build options match. [ OK ]", __func__)); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXFwRawHeapAllocMap -+ -+ @Description Register firmware heap for the specified driver -+ -+ @Input psDeviceNode - device node -+ @Input ui32DriverID - Guest driver -+ @Input sDevPAddr - Heap address -+ @Input ui64DevPSize - Heap size -+ -+ @Return PVRSRV_ERROR - PVRSRV_OK if heap setup was successful. -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID, -+ IMG_DEV_PHYADDR sDevPAddr, -+ IMG_UINT64 ui64DevPSize) -+{ -+ PVRSRV_ERROR eError; -+ IMG_CHAR szRegionRAName[RA_MAX_NAME_LENGTH]; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_MEMALLOCFLAGS_T uiRawFwHeapAllocFlags = (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32DriverID)); -+ PHYS_HEAP_CONFIG *psFwHeapConfig = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, -+ PHYS_HEAP_USAGE_FW_SHARED); -+ PHYS_HEAP_CONFIG sFwHeapConfig; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ if (psFwHeapConfig == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "FW_MAIN heap config not found.")); -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+ } -+ -+ OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID); -+ -+ if (!ui64DevPSize || -+ !sDevPAddr.uiAddr || -+ ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED || -+ ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Invalid parameters for %s", szRegionRAName)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ sFwHeapConfig = *psFwHeapConfig; -+ sFwHeapConfig.sStartAddr.uiAddr = 0; -+ sFwHeapConfig.sCardBase.uiAddr = sDevPAddr.uiAddr; -+ sFwHeapConfig.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE; -+ sFwHeapConfig.eType = PHYS_HEAP_TYPE_LMA; -+ sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_PREMAP; -+ -+ eError = PhysmemCreateHeapLMA(psDeviceNode, -+ RGXPhysHeapGetLMAPolicy(sFwHeapConfig.ui32UsageFlags), -+ &sFwHeapConfig, -+ szRegionRAName, -+ &psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]); -+ PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32DriverID); -+ -+ eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]); -+ PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32DriverID); -+ -+ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32DriverID] = psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]; -+ -+ PDUMPCOMMENT(psDeviceNode, "Allocate and map raw firmware heap for DriverID: [%d]", ui32DriverID); -+ -+#if (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ /* don't clear the heap of other guests on allocation */ -+ uiRawFwHeapAllocFlags &= (ui32DriverID > RGXFW_HOST_DRIVER_ID) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL); -+#endif -+ -+ /* if the firmware is already powered up, consider the firmware heaps are pre-mapped. */ -+ if (psDeviceNode->bAutoVzFwIsUp) -+ { -+ uiRawFwHeapAllocFlags &= RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); -+ DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_TRUE); -+ } -+ -+ eError = DevmemFwAllocate(psDevInfo, -+ RGX_FIRMWARE_RAW_HEAP_SIZE, -+ uiRawFwHeapAllocFlags, -+ psDevInfo->psPremappedFwRawHeap[ui32DriverID]->pszName, -+ &psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]); -+ PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); -+ -+ /* Mark this devmem heap as premapped so allocations will not require device mapping. */ -+ DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_TRUE); -+ -+ if (ui32DriverID == RGXFW_HOST_DRIVER_ID) -+ { -+ /* if the Host's raw fw heap is premapped, mark its main & config sub-heaps accordingly -+ * No memory allocated from these sub-heaps will be individually mapped into the device's -+ * address space so they can remain marked permanently as premapped. */ -+ DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); -+ DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); -+ } -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXFwRawHeapUnmapFree -+ -+ @Description Unregister firmware heap for the specified guest driver -+ -+ @Input psDeviceNode - device node -+ @Input ui32DriverID -+ -+******************************************************************************/ -+void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ /* remove the premap status, so the heap can be unmapped and freed */ -+ if (psDevInfo->psPremappedFwRawHeap[ui32DriverID]) -+ { -+ DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_FALSE); -+ } -+ -+ if (psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]); -+ psDevInfo->psPremappedFwRawMemDesc[ui32DriverID] = NULL; -+ } -+} -+ -+/* -+ RGXReadMETAAddr -+*/ -+static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value) -+{ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ void __iomem *pvRegBase = psDevInfo->pvSecureRegsBaseKM; -+ IMG_UINT8 __iomem *pui8RegBase = pvRegBase; -+ IMG_UINT32 ui32PollValue; -+ IMG_UINT32 ui32PollMask; -+ IMG_UINT32 ui32PollRegOffset; -+ IMG_UINT32 ui32ReadOffset; -+ IMG_UINT32 ui32WriteOffset; -+ IMG_UINT32 ui32WriteValue; -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) -+ { -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) -+ { -+ ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN; -+ ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN; -+ ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA; -+ ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA; -+ ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__RD_EN; -+ CHECK_HWBRN_68777(ui32WriteValue); -+ ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_MRUA; -+ } -+ else -+ { -+ ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN; -+ ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN; -+ ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA; -+ ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA; -+ ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__RD_EN; -+ CHECK_HWBRN_68777(ui32WriteValue); -+ ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_EQ1_AND_MRUA; -+ } -+ } -+ else -+ { -+ ui32PollValue = RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN; -+ ui32PollMask = RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN; -+ ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1; -+ ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0; -+ ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN; -+ ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX; -+ } -+ -+ /* Wait for Slave Port to be Ready */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ (IMG_UINT32 __iomem *) (pui8RegBase + ui32PollRegOffset), -+ ui32PollValue, -+ ui32PollMask, -+ POLL_FLAG_LOG_ERROR, -+ NULL) != PVRSRV_OK) -+ { -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ /* Issue the Read */ -+ OSWriteUncheckedHWReg32(pvRegBase, ui32WriteOffset, ui32WriteValue); -+ (void)OSReadUncheckedHWReg32(pvRegBase, ui32WriteOffset); -+ -+ /* Wait for Slave Port to be Ready: read complete */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ (IMG_UINT32 __iomem *) (pui8RegBase + ui32PollRegOffset), -+ ui32PollValue, -+ ui32PollMask, -+ POLL_FLAG_LOG_ERROR, -+ NULL) != PVRSRV_OK) -+ { -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ /* Read the value */ -+ *pui32Value = OSReadUncheckedHWReg32(pvRegBase, ui32ReadOffset); -+#else -+ IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; -+ -+ /* Wait for Slave Port to be Ready */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ /* Issue the Read */ -+ OSWriteHWReg32( -+ psDevInfo->pvRegsBaseKM, -+ RGX_CR_META_SP_MSLVCTRL0, -+ ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN); -+ (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0); -+ -+ /* Wait for Slave Port to be Ready: read complete */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ /* Read the value */ -+ *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAX); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ RGXWriteMETAAddr -+*/ -+static PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value) -+{ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ void __iomem *pvRegBase = psDevInfo->pvSecureRegsBaseKM; -+ IMG_UINT8 __iomem *pui8RegBase = pvRegBase; -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) -+ { -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) -+ { -+ /* Wait for Slave Port to be Ready */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA), -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ /* Issue the Write */ -+ CHECK_HWBRN_68777(ui32METAAddr); -+ OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA, ui32METAAddr); -+ OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_MRUA, ui32Value); -+ } -+ else -+ { -+ /* Wait for Slave Port to be Ready */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA), -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ /* Issue the Write */ -+ CHECK_HWBRN_68777(ui32METAAddr); -+ OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA, ui32METAAddr); -+ OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_EQ1_AND_MRUA, ui32Value); -+ } -+ } -+ else -+ { -+ /* Wait for Slave Port to be Ready */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ /* Issue the Write */ -+ OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr); -+ (void) OSReadUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ -+ OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT, ui32Value); -+ (void) OSReadUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT); /* Fence write */ -+ } -+#else -+ IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; -+ -+ /* Wait for Slave Port to be Ready */ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, -+ POLL_FLAG_LOG_ERROR, NULL) != PVRSRV_OK) -+ { -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ /* Issue the Write */ -+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr); -+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXReadFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value) -+{ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ return RGXReadMETAAddr(psDevInfo, ui32FWAddr, pui32Value); -+ } -+ -+#if !defined(EMULATOR) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ return RGXRiscvReadMem(psDevInfo, ui32FWAddr, pui32Value); -+ } -+#endif -+ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+} -+ -+PVRSRV_ERROR RGXWriteFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value) -+{ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ return RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value); -+ } -+ -+#if !defined(EMULATOR) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ return RGXRiscvWriteMem(psDevInfo, ui32FWAddr, ui32Value); -+ } -+#endif -+ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+} -+ -+/* -+ RGXWriteMetaRegThroughSP -+*/ -+PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ /* Wait for Slave Port to be Ready */ -+ PVRSRV_RGXDEV_INFO *psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) -+ { -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN); -+ if (eError == PVRSRV_OK) -+ { -+ /* Issue a Write */ -+ CHECK_HWBRN_68777(ui32RegAddr); -+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA, ui32RegAddr); -+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA); /* Fence write */ -+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_MRUA, ui32RegValue); -+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_MRUA); /* Fence write */ -+ } -+ } -+ else -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN); -+ if (eError == PVRSRV_OK) -+ { -+ /* Issue a Write */ -+ CHECK_HWBRN_68777(ui32RegAddr); -+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA, ui32RegAddr); -+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA); /* Fence write */ -+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA, ui32RegValue); -+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA); /* Fence write */ -+ } -+ } -+ } -+ else -+#endif -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_META_SP_MSLVCTRL1, -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); -+ if (eError == PVRSRV_OK) -+ { -+ /* Issue a Write */ -+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr); -+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ -+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue); -+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT); /* Fence write */ -+ } -+ } -+ -+ return eError; -+} -+ -+/* -+ RGXReadMetaRegThroughSP -+*/ -+PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32* ui32RegValue) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ /* Wait for Slave Port to be Ready */ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ PVRSRV_RGXDEV_INFO *psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) -+ { -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN); -+ if (eError == PVRSRV_OK) -+ { -+ /* Issue a Read */ -+ CHECK_HWBRN_68777(ui32RegAddr); -+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA, -+ ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA__RD_EN); -+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_MRUA); /* Fence write */ -+ -+ /* Wait for Slave Port to be Ready */ -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN); -+ if (eError != PVRSRV_OK) return eError; -+ } -+ } -+ else -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN); -+ if (eError == PVRSRV_OK) -+ { -+ /* Issue a Read */ -+ CHECK_HWBRN_68777(ui32RegAddr); -+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA, -+ ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA__RD_EN); -+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_EQ1_AND_MRUA); /* Fence write */ -+ -+ /* Wait for Slave Port to be Ready */ -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN); -+ if (eError != PVRSRV_OK) return eError; -+ } -+ } -+#if !defined(NO_HARDWARE) -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) -+ { -+ *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_MRUA); -+ } -+ else -+ { -+ *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_EQ1_AND_MRUA); -+ } -+#else -+ *ui32RegValue = 0xFFFFFFFF; -+#endif -+ } -+ else -+#endif -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_META_SP_MSLVCTRL1, -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); -+ if (eError == PVRSRV_OK) -+ { -+ /* Issue a Read */ -+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN); -+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ -+ -+ /* Wait for Slave Port to be Ready */ -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_META_SP_MSLVCTRL1, -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); -+ if (eError != PVRSRV_OK) return eError; -+ } -+#if !defined(NO_HARDWARE) -+ *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX); -+#else -+ *ui32RegValue = 0xFFFFFFFF; -+#endif -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32FwVA, -+ IMG_CPU_PHYADDR *psCpuPA, -+ IMG_DEV_PHYADDR *psDevPA, -+ IMG_UINT64 *pui64RawPTE) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_CPU_PHYADDR sCpuPA = {0U}; -+ IMG_DEV_PHYADDR sDevPA = {0U}; -+ IMG_UINT64 ui64RawPTE = 0U; -+ MMU_FAULT_DATA sFaultData = {0U}; -+ MMU_CONTEXT *psFwMMUCtx = psDevInfo->psKernelMMUCtx; -+ IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_BASE & UINT_MAX); -+ IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (RGX_NUM_DRIVERS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE); -+ IMG_UINT32 ui32DriverID = (ui32FwVA - ui32FwHeapBase) / RGX_FIRMWARE_RAW_HEAP_SIZE; -+ IMG_UINT32 ui32HeapId; -+ PHYS_HEAP *psPhysHeap; -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ /* MIPS uses the same page size as the OS, while others default to 4K pages */ -+ IMG_UINT32 ui32FwPageSize = RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ? -+ OSGetPageSize() : BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT); -+#else -+ /* default to 4K pages */ -+ IMG_UINT32 ui32FwPageSize = BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT); -+#endif -+ -+ IMG_UINT32 ui32PageOffset = (ui32FwVA & (ui32FwPageSize - 1)); -+ -+ PVR_LOG_GOTO_IF_INVALID_PARAM((ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED), -+ eError, ErrorExit); -+ -+ PVR_LOG_GOTO_IF_INVALID_PARAM(((psCpuPA != NULL) || -+ (psDevPA != NULL) || -+ (pui64RawPTE != NULL)), -+ eError, ErrorExit); -+ -+ PVR_LOG_GOTO_IF_INVALID_PARAM(((ui32FwVA >= ui32FwHeapBase) && -+ (ui32FwVA < ui32FwHeapEnd)), -+ eError, ErrorExit); -+ -+ ui32HeapId = (ui32DriverID == RGXFW_HOST_DRIVER_ID) ? -+ PVRSRV_PHYS_HEAP_FW_MAIN : (PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32DriverID); -+ psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[ui32HeapId]; -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ /* MIPS is equipped with a dedicated MMU */ -+ RGXMipsCheckFaultAddress(psFwMMUCtx, ui32FwVA, &sFaultData); -+ } -+ else -+#endif -+ { -+ IMG_UINT64 ui64FwDataBaseMask; -+ IMG_DEV_VIRTADDR sDevVAddr; -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ ui64FwDataBaseMask = ~(RGXFW_SEGMMU_DATA_META_CACHE_MASK | -+ RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK | -+ RGXFW_SEGMMU_DATA_BASE_ADDRESS); -+ } -+#if !defined(EMULATOR) -+ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ ui64FwDataBaseMask = ~(RGXRISCVFW_GET_REGION_BASE(0xF)); -+ } -+#endif -+ else -+ { -+ PVR_LOG_GOTO_WITH_ERROR("RGXGetFwMapping", eError, PVRSRV_ERROR_NOT_IMPLEMENTED, ErrorExit); -+ } -+ -+ sDevVAddr.uiAddr = (ui32FwVA & ui64FwDataBaseMask) | RGX_FIRMWARE_RAW_HEAP_BASE; -+ -+ /* Fw CPU shares a subset of the GPU's VA space */ -+ MMU_CheckFaultAddress(psFwMMUCtx, &sDevVAddr, &sFaultData); -+ } -+ -+ ui64RawPTE = sFaultData.sLevelData[MMU_LEVEL_1].ui64Address; -+ -+ if (eError == PVRSRV_OK) -+ { -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ IMG_BOOL bValidPage = (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ? -+ BITMASK_HAS(ui64RawPTE, RGXMIPSFW_TLB_VALID) : -+ BITMASK_HAS(ui64RawPTE, RGX_MMUCTRL_PT_DATA_VALID_EN); -+#else -+ IMG_BOOL bValidPage = BITMASK_HAS(ui64RawPTE, RGX_MMUCTRL_PT_DATA_VALID_EN); -+#endif -+ -+ if (!bValidPage) -+ { -+ /* don't report invalid pages */ -+ eError = PVRSRV_ERROR_DEVICEMEM_NO_MAPPING; -+ } -+ else -+ { -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ sDevPA.uiAddr = ui32PageOffset + ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ? -+ RGXMIPSFW_TLB_GET_PA(ui64RawPTE) : -+ (ui64RawPTE & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK)); -+#else -+ sDevPA.uiAddr = ui32PageOffset + (ui64RawPTE & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK); -+#endif -+ -+ /* Only the Host's Firmware heap is present in the Host's CPU IPA space */ -+ if (ui32DriverID == RGXFW_HOST_DRIVER_ID) -+ { -+ PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPA, &sDevPA); -+ } -+ else -+ { -+ sCpuPA.uiAddr = 0U; -+ } -+ } -+ } -+ -+ if (psCpuPA != NULL) -+ { -+ *psCpuPA = sCpuPA; -+ } -+ -+ if (psDevPA != NULL) -+ { -+ *psDevPA = sDevPA; -+ } -+ -+ if (pui64RawPTE != NULL) -+ { -+ *pui64RawPTE = ui64RawPTE; -+ } -+ -+ErrorExit: -+ return eError; -+} -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+/*! -+******************************************************************************* -+@Function RGXIsValidWorkloadEstCCBCommand -+ -+@Description Checks if command type can be used for workload estimation -+ -+@Input eType Command type to check -+ -+ -+@Return IMG_BOOL -+******************************************************************************/ -+INLINE IMG_BOOL RGXIsValidWorkloadEstCCBCommand(RGXFWIF_CCB_CMD_TYPE eType) -+{ -+ switch (eType) -+ { -+ case RGXFWIF_CCB_CMD_TYPE_GEOM: -+ case RGXFWIF_CCB_CMD_TYPE_3D: -+ case RGXFWIF_CCB_CMD_TYPE_CDM: -+ case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: -+ return IMG_TRUE; -+ default: -+ PVR_ASSERT(IMG_FALSE); -+ return IMG_FALSE; -+ } -+} -+#endif -+ -+PVRSRV_ERROR -+RGXFWSetVzConnectionCooldownPeriod(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32VzConnectionCooldownPeriodInSec) -+{ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ psDevInfo->psRGXFWIfRuntimeCfg->ui32VzConnectionCooldownPeriodInSec = ui32VzConnectionCooldownPeriodInSec; -+ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32VzConnectionCooldownPeriodInSec); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Updating the Vz reconnect request cooldown period inside RGXFWIfRuntimeCfg"); -+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, -+ offsetof(RGXFWIF_RUNTIME_CFG, ui32VzConnectionCooldownPeriodInSec), -+ ui32VzConnectionCooldownPeriodInSec, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/****************************************************************************** -+ End of file (rgxfwutils.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxfwutils.h b/drivers/gpu/drm/img-rogue/rgxfwutils.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxfwutils.h -@@ -0,0 +1,1292 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX firmware utility routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX firmware utility routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXFWUTILS_H -+#define RGXFWUTILS_H -+ -+#include "rgx_memallocflags.h" -+#include "log2.h" -+#include "rgxdevice.h" -+#include "rgxccb.h" -+#include "devicemem.h" -+#include "device.h" -+#include "pvr_notifier.h" -+#include "pvrsrv.h" -+#include "connection_server.h" -+#include "rgxta3d.h" -+#include "devicemem_utils.h" -+#include "rgxmem.h" -+#include "rgxfwmemctx.h" -+ -+#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "FwRawDriverID%d" /*!< RGX Raw Firmware Heap identifier */ -+ -+static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo, -+ PVRSRV_MEMALLOCFLAGS_T *puiFlags, -+ DEVMEM_HEAP **ppsFwHeap) -+{ -+ PVRSRV_PHYS_HEAP ePhysHeap = (PVRSRV_PHYS_HEAP)PVRSRV_GET_PHYS_HEAP_HINT(*puiFlags); -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ switch (ePhysHeap) -+ { -+ case PVRSRV_PHYS_HEAP_FW_CODE: -+ case PVRSRV_PHYS_HEAP_FW_PRIV_DATA: -+ case PVRSRV_PHYS_HEAP_FW_MAIN: -+ { -+ *ppsFwHeap = psDevInfo->psFirmwareMainHeap; -+ break; -+ } -+ case PVRSRV_PHYS_HEAP_FW_CONFIG: -+ { -+ *ppsFwHeap = psDevInfo->psFirmwareConfigHeap; -+ break; -+ } -+ case PVRSRV_PHYS_HEAP_FW_PREMAP0: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP1: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP2: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP3: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP4: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP5: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP6: -+ case PVRSRV_PHYS_HEAP_FW_PREMAP7: -+ { -+ IMG_UINT32 ui32DriverID = ePhysHeap - PVRSRV_PHYS_HEAP_FW_PREMAP0; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED, "ui32DriverID"); -+ *ppsFwHeap = psDevInfo->psPremappedFwRawHeap[ui32DriverID]; -+ break; -+ } -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid phys heap", __func__)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ break; -+ } -+ } -+ -+ return eError; -+} -+ -+/* -+ * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size. -+ * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems -+ * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't -+ * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation. -+ */ -+static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_DEVMEM_SIZE_T uiSize, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszText, -+ DEVMEM_MEMDESC **ppsMemDescPtr) -+{ -+ IMG_DEV_VIRTADDR sTmpDevVAddr; -+ PVRSRV_ERROR eError; -+ DEVMEM_HEAP *psFwHeap; -+ IMG_DEVMEM_ALIGN_T uiAlign; -+ -+ PVR_DPF_ENTERED; -+ -+ /* Enforce the standard pre-fix naming scheme callers must follow */ -+ PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w')); -+ -+ /* Imported from AppHint , flag to poison allocations when freed */ -+ uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag; -+ -+ eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF_RETURN_RC(eError); -+ } -+ -+ if (psFwHeap == psDevInfo->psFirmwareConfigHeap) -+ { -+ /* -+ * All structures allocated from the Firmware Config sub-heap must start at the same pre-determined -+ * offsets, regardless of the system's page size (e.g. 4k,16k,64k). The alignment requirement is -+ * satisfied for virtual addresses during the mapping stage. Physical allocations do not take -+ * alignment into consideration. -+ * VZ drivers usually preallocate and pre-map the entire Firmware heap range. Any allocations from -+ * this heap are physical allocations only, having their device VAs derived from their PAs. This makes -+ * it impossible to fulfil alignment requirements. -+ * To work around this limitation, allocation sizes are rounded to the nearest multiple of 64kb, -+ * regardless of the actual size of object. -+ */ -+ uiAlign = RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY; -+ -+ uiSize = PVR_ALIGN(uiSize, RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY); -+ } -+ else -+ { -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ /* Aligning FW based allocations for MIPS based rogue cores at cache line boundary(16 bytes) instead -+ * of SLC(64 bytes) to have more compact memory with less waste and hopefully save some TLB misses. -+ * MIPS CPU cores alignment. -+ */ -+ uiAlign = RGXMIPSFW_MICROAPTIVEAP_CACHELINE_SIZE; -+ } -+ else -+#endif -+ { -+ /* Non-MIPS CPU cores alignment */ -+ uiAlign = (GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS))); -+ } -+ } -+ -+ RGXFwSharedMemCPUCacheMode(psDevInfo->psDeviceNode, -+ &uiFlags); -+ -+ eError = DevmemAllocateAndMap(psFwHeap, -+ uiSize, -+ uiAlign, -+ uiFlags, -+ pszText, -+ ppsMemDescPtr, -+ &sTmpDevVAddr); -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_DEVMEM_ALIGN_T uiAlign, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszText, -+ DEVMEM_MEMDESC **ppsMemDescPtr) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; -+ IMG_DEV_VIRTADDR sTmpDevVAddr; -+ PVRSRV_ERROR eError; -+ DEVMEM_HEAP *psFwHeap; -+ IMG_UINT32 ui32HeapLog2PageSize; -+ -+ PVR_DPF_ENTERED; -+ -+ /* Enforce the standard pre-fix naming scheme callers must follow */ -+ PVR_ASSERT((pszText != NULL) && -+ (pszText[0] == 'F') && (pszText[1] == 'w') && -+ (pszText[2] == 'E') && (pszText[3] == 'x')); -+ -+ /* Imported from AppHint , flag to poison allocations when freed */ -+ uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag; -+ -+ eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF_RETURN_RC(eError); -+ } -+ -+ RGXFwSharedMemCPUCacheMode(psDevInfo->psDeviceNode, -+ &uiFlags); -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ /* MIPS cores */ -+ ui32HeapLog2PageSize = ExactLog2(uiAlign); -+ } -+ else -+#endif -+ { -+ /* Meta and RiscV cores */ -+ ui32HeapLog2PageSize = DevmemGetHeapLog2PageSize(psFwHeap); -+ } -+ -+ eError = DevmemAllocateExportable(psDeviceNode, -+ uiSize, -+ uiAlign, -+ ui32HeapLog2PageSize, -+ uiFlags, -+ pszText, -+ ppsMemDescPtr); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "FW DevmemAllocateExportable failed (%u)", eError)); -+ PVR_DPF_RETURN_RC(eError); -+ } -+ -+ /* -+ We need to map it so the heap for this allocation -+ is set -+ */ -+ eError = DevmemMapToDevice(*ppsMemDescPtr, -+ psFwHeap, -+ &sTmpDevVAddr); -+ if (eError != PVRSRV_OK) -+ { -+ DevmemFree(*ppsMemDescPtr); -+ PVR_DPF((PVR_DBG_ERROR, "FW DevmemMapToDevice failed (%u)", eError)); -+ } -+ -+ PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr); -+} -+ -+static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_DEVMEM_SIZE_T uiSize, -+ IMG_UINT32 ui32NumPhysChunks, -+ IMG_UINT32 ui32NumVirtChunks, -+ IMG_UINT32 *pui32MappingTable, -+ PVRSRV_MEMALLOCFLAGS_T uiFlags, -+ const IMG_CHAR *pszText, -+ DEVMEM_MEMDESC **ppsMemDescPtr) -+{ -+ IMG_DEV_VIRTADDR sTmpDevVAddr; -+ PVRSRV_ERROR eError; -+ DEVMEM_HEAP *psFwHeap; -+ IMG_UINT32 ui32Align; -+ -+ PVR_DPF_ENTERED; -+ -+ /* Enforce the standard pre-fix naming scheme callers must follow */ -+ PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w')); -+ ui32Align = GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); -+ -+ /* Imported from AppHint , flag to poison allocations when freed */ -+ uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag; -+ -+ eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF_RETURN_RC(eError); -+ } -+ -+ RGXFwSharedMemCPUCacheMode(psDevInfo->psDeviceNode, -+ &uiFlags); -+ -+ eError = DevmemAllocateSparse(psDevInfo->psDeviceNode, -+ uiSize, -+ ui32NumPhysChunks, -+ ui32NumVirtChunks, -+ pui32MappingTable, -+ ui32Align, -+ DevmemGetHeapLog2PageSize(psFwHeap), -+ uiFlags | PVRSRV_MEMALLOCFLAG_SPARSE_NO_SCRATCH_BACKING, -+ pszText, -+ ppsMemDescPtr); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF_RETURN_RC(eError); -+ } -+ /* -+ We need to map it so the heap for this allocation -+ is set -+ */ -+ eError = DevmemMapToDevice(*ppsMemDescPtr, -+ psFwHeap, -+ &sTmpDevVAddr); -+ if (eError != PVRSRV_OK) -+ { -+ DevmemFree(*ppsMemDescPtr); -+ PVR_DPF_RETURN_RC(eError); -+ } -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+ -+static INLINE void DevmemFwUnmapAndFree(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DEVMEM_MEMDESC *psMemDesc) -+{ -+ PVR_DPF_ENTERED1(psMemDesc); -+ -+ DevmemReleaseDevVirtAddr(psMemDesc); -+ DevmemFree(psMemDesc); -+ -+ PVR_DPF_RETURN; -+} -+ -+/* -+ * This function returns the value of the hardware register RGX_CR_TIMER -+ * which is a timer counting in ticks. -+ */ -+ -+static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_UINT64 ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER); -+ -+ /* -+ * In order to avoid having to issue three 32-bit reads to detect the -+ * lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated -+ * in the MSB of the high 32-bit word. If the wrap happens, we just read -+ * the register again (it will not wrap again so soon). -+ */ -+ if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK) -+ { -+ ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER); -+ } -+ -+ return (ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK) >> RGX_CR_TIMER_VALUE_SHIFT; -+} -+ -+/* -+ * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes. -+ * Otherwise this allocation is only used by the FW. -+ * Therefore the GPU cache doesn't need coherency, and write-combine will -+ * suffice on the CPU side (WC buffer will be flushed at the first kick) -+ */ -+#define RGX_FWCOMCTX_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)| \ -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \ -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) -+ -+#define RGX_FWCODEDATA_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | \ -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \ -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) -+ -+#define RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) -+ -+#define RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CONFIG)) -+ -+#define RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) -+ -+/* Firmware memory that is not accessible by the CPU. */ -+#define RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) -+ -+/* Firmware shared memory that is supposed to be read-only to the CPU. -+ * In reality it isn't due to ZERO_ON_ALLOC which enforces CPU_WRITEABLE -+ * flag on the allocations. */ -+#define RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | \ -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) -+ -+/* data content being kept from previous boot cycles from physical memory must not be cleared during allocation */ -+#define RGX_AUTOVZ_KEEP_FW_DATA_MASK(bKeepMem) ((bKeepMem) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL)) -+ -+/****************************************************************************** -+ * RGXSetFirmwareAddress Flags -+ *****************************************************************************/ -+#define RFW_FWADDR_FLAG_NONE (0) /*!< Void flag */ -+#define RFW_FWADDR_NOREF_FLAG (1U << 0) /*!< It is safe to immediately release the reference to the pointer, -+ otherwise RGXUnsetFirmwareAddress() must be call when finished. */ -+ -+IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); -+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, PVRSRV_MEMALLOCFLAGS_T uiAllocFlags); -+ -+#if defined(SUPPORT_TBI_INTERFACE) -+IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); -+PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo); -+#endif -+ -+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bEnableSignatureChecks, -+ IMG_UINT32 ui32SignatureChecksBufSize, -+ IMG_UINT32 ui32HWPerfFWBufSizeKB, -+ IMG_UINT64 ui64HWPerfFilter, -+ IMG_UINT32 ui32ConfigFlags, -+ IMG_UINT32 ui32ConfigFlagsExt, -+ IMG_UINT32 ui32FwOsCfgFlags, -+ IMG_UINT32 ui32LogType, -+ IMG_UINT32 ui32FilterFlags, -+ IMG_UINT32 ui32JonesDisableMask, -+ IMG_UINT32 ui32HWRDebugDumpLimit, -+ IMG_UINT32 ui32HWPerfCountersDataSize, -+#if defined(PVR_ARCH_VOLCANIC) -+ IMG_UINT32 ui32RenderKillingCtl, -+ IMG_UINT32 ui32CDMTDMKillingCtl, -+ IMG_UINT32 *pui32USRMNumRegions, -+ IMG_UINT64 *pui64UVBRMNumRegions, -+ IMG_UINT64 ui64ClkCtrl0, -+ IMG_UINT64 ui64ClkCtrl1, -+ IMG_UINT32 ui32ClkCtrl2, -+ IMG_BOOL bSPUClockGating, -+ IMG_UINT32 ui32AvailablePowUnitsMask, -+ IMG_UINT32 ui32AvailableRACMask, -+#endif -+ IMG_UINT32 *pui32TPUTrilinearFracMask, -+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, -+ FW_PERF_CONF eFirmwarePerf, -+ IMG_UINT32 ui32KCCBSizeLog2); -+ -+ -+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/*************************************************************************/ /*! -+@Function RGXSetupFwAllocation -+ -+@Description Sets a pointer in a firmware data structure. -+ -+@Input psDevInfo Device Info struct -+@Input uiAllocFlags Flags determining type of memory allocation -+@Input ui32Size Size of memory allocation -+@Input pszName Allocation label -+@Input psFwPtr Address of the firmware pointer to set -+@Input ppvCpuPtr Address of the cpu pointer to set -+@Input ui32DevVAFlags Any combination of RFW_FWADDR_*_FLAG -+ -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO *psDevInfo, -+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, -+ IMG_UINT32 ui32Size, -+ const IMG_CHAR *pszName, -+ DEVMEM_MEMDESC **ppsMemDesc, -+ RGXFWIF_DEV_VIRTADDR *psFwPtr, -+ void **ppvCpuPtr, -+ IMG_UINT32 ui32DevVAFlags); -+ -+/*************************************************************************/ /*! -+@Function RGXSetFirmwareAddress -+ -+@Description Sets a pointer in a firmware data structure. -+ -+@Input ppDest Address of the pointer to set -+@Input psSrc MemDesc describing the pointer -+@Input ui32Flags Any combination of RFW_FWADDR_*_FLAG -+ -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, -+ DEVMEM_MEMDESC *psSrc, -+ IMG_UINT32 uiOffset, -+ IMG_UINT32 ui32Flags); -+ -+ -+/*************************************************************************/ /*! -+@Function RGXSetMetaDMAAddress -+ -+@Description Fills a Firmware structure used to setup the Meta DMA with two -+ pointers to the same data, one on 40 bit and one on 32 bit -+ (pointer in the FW memory space). -+ -+@Input ppDest Address of the structure to set -+@Input psSrcMemDesc MemDesc describing the pointer -+@Input psSrcFWDevVAddr Firmware memory space pointer -+ -+@Return void -+*/ /**************************************************************************/ -+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, -+ DEVMEM_MEMDESC *psSrcMemDesc, -+ RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, -+ IMG_UINT32 uiOffset); -+ -+ -+/*************************************************************************/ /*! -+@Function RGXUnsetFirmwareAddress -+ -+@Description Unsets a pointer in a firmware data structure -+ -+@Input psSrc MemDesc describing the pointer -+ -+@Return void -+*/ /**************************************************************************/ -+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc); -+ -+/*! -+******************************************************************************* -+@Function RGXScheduleProcessQueuesKM -+ -+@Description Software command complete handler -+ (sends uncounted kicks for all the DMs through the MISR) -+ -+@Input hCmdCompHandle RGX device node -+ -+@Return None -+******************************************************************************/ -+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle); -+ -+#if defined(SUPPORT_VALIDATION) -+/*! -+******************************************************************************* -+@Function RGXScheduleRgxRegCommand -+ -+@Input psDevInfo Device Info struct -+@Input ui64RegVal Value to write into FW register -+@Input ui64Size Register size -+@Input ui32Offset Register Offset -+@Input bWriteOp Register Write or Read toggle -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT64 ui64RegVal, -+ IMG_UINT64 ui64Size, -+ IMG_UINT32 ui32Offset, -+ IMG_BOOL bWriteOp); -+ -+#endif -+ -+/*! -+******************************************************************************* -+ -+@Function RGXInstallProcessQueuesMISR -+ -+@Description Installs the MISR to handle Process Queues operations -+ -+@Input phMISR Pointer to the MISR handler -+@Input psDeviceNode RGX Device node -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll); -+ -+/*************************************************************************/ /*! -+@Function RGXSendCommandWithPowLockAndGetKCCBSlot -+ -+@Description Sends a command to a particular DM without honouring -+ pending cache operations but taking the power lock. -+ -+@Input psDevInfo Device Info -+@Input psKCCBCmd The cmd to send. -+@Input ui32PDumpFlags Pdump flags -+@Output pui32CmdKCCBSlot When non-NULL: -+ - Pointer on return contains the kCCB slot -+ number in which the command was enqueued. -+ - Resets the value of the allotted slot to -+ RGXFWIF_KCCB_RTN_SLOT_RST -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_KCCB_CMD *psKCCBCmd, -+ IMG_UINT32 ui32PDumpFlags, -+ IMG_UINT32 *pui32CmdKCCBSlot); -+ -+#define RGXSendCommandWithPowLock(psDevInfo, psKCCBCmd, ui32PDumpFlags) \ -+ RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) -+ -+/*************************************************************************/ /*! -+@Function RGXSendCommandAndGetKCCBSlot -+ -+@Description Sends a command to a particular DM without honouring -+ pending cache operations or the power lock. -+ The function flushes any deferred KCCB commands first. -+ -+@Input psDevInfo Device Info -+@Input psKCCBCmd The cmd to send. -+@Input uiPdumpFlags PDump flags. -+@Output pui32CmdKCCBSlot When non-NULL: -+ - Pointer on return contains the kCCB slot -+ number in which the command was enqueued. -+ - Resets the value of the allotted slot to -+ RGXFWIF_KCCB_RTN_SLOT_RST -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_KCCB_CMD *psKCCBCmd, -+ PDUMP_FLAGS_T uiPdumpFlags, -+ IMG_UINT32 *pui32CmdKCCBSlot); -+ -+#define RGXSendCommand(psDevInfo, psKCCBCmd, ui32PDumpFlags) \ -+ RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) -+ -+/*************************************************************************/ /*! -+@Function _RGXScheduleCommandAndGetKCCBSlot -+ -+@Description Sends a command to a particular DM and kicks the firmware but -+ first schedules any commands which have to happen before -+ handle -+ -+@Input psDevInfo Device Info -+@Input eDM To which DM the cmd is sent. -+@Input psKCCBCmd The cmd to send. -+@Input ui32PDumpFlags PDump flags -+@Input bCallerHasPwrLock Caller already has power lock -+@Output pui32CmdKCCBSlot When non-NULL: -+ - Pointer on return contains the kCCB slot -+ number in which the command was enqueued. -+ - Resets the value of the allotted slot to -+ RGXFWIF_KCCB_RTN_SLOT_RST -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR _RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_DM eKCCBType, -+ RGXFWIF_KCCB_CMD *psKCCBCmd, -+ IMG_UINT32 ui32PDumpFlags, -+ IMG_UINT32 *pui32CmdKCCBSlot, -+ IMG_BOOL bCallerHasPwrLock); -+ -+#define RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot) \ -+ _RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot, IMG_FALSE) -+ -+#define RGXScheduleCommand(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags) \ -+ RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags, NULL) -+ -+#define RGXScheduleCommandWithoutPowerLock(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags) \ -+ _RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags, NULL, IMG_TRUE) -+ -+/*************************************************************************/ /*! -+@Function RGXWaitForKCCBSlotUpdate -+ -+@Description Waits until the required kCCB slot value is updated by the FW -+ (signifies command completion). Additionally, dumps a relevant -+ PDump poll command. -+ -+@Input psDevInfo Device Info -+@Input ui32SlotNum The kCCB slot number to wait for an update on -+@Input ui32PDumpFlags -+ -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32SlotNum, -+ IMG_UINT32 ui32PDumpFlags); -+ -+ -+/*************************************************************************/ /*! -+@Function PVRSRVRGXFrameworkCopyCommand -+ -+@Description Copy framework command into FW addressable buffer -+ -+@param psDeviceNode -+@param psFWFrameworkMemDesc -+@param pbyGPUFRegisterList -+@param ui32FrameworkRegisterSize -+ -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEM_MEMDESC *psFWFrameworkMemDesc, -+ IMG_PBYTE pbyGPUFRegisterList, -+ IMG_UINT32 ui32FrameworkRegisterSize); -+ -+ -+/*************************************************************************/ /*! -+@Function PVRSRVRGXFrameworkCreateKM -+ -+@Description Create FW addressable buffer for framework -+ -+@param psDeviceNode -+@param ppsFWFrameworkMemDesc -+@param ui32FrameworkRegisterSize -+ -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE * psDeviceNode, -+ DEVMEM_MEMDESC ** ppsFWFrameworkMemDesc, -+ IMG_UINT32 ui32FrameworkRegisterSize); -+ -+/*************************************************************************/ /*! -+@Function RGXPollForGPCommandCompletion -+ -+@Description Polls for completion of a submitted GP command. Poll is done -+ on a value matching a masked read from the address. -+ -+@Input psDevNode Pointer to device node struct -+@Input pui32LinMemAddr CPU linear address to poll -+@Input ui32Value Required value -+@Input ui32Mask Mask -+ -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, -+ volatile IMG_UINT32 __iomem *pui32LinMemAddr, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask); -+ -+/*************************************************************************/ /*! -+@Function RGXStateFlagCtrl -+ -+@Description Set and return FW internal state flags. -+ -+@Input psDevInfo Device Info -+@Input ui32Config AppHint config flags -+@Output pui32State Current AppHint state flag configuration -+@Input bSetNotClear Set or clear the provided config flags -+ -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32Config, -+ IMG_UINT32 *pui32State, -+ IMG_BOOL bSetNotClear); -+ -+/*! -+******************************************************************************* -+@Function RGXFWRequestCommonContextCleanUp -+ -+@Description Schedules a FW common context cleanup. The firmware doesn't -+ block waiting for the resource to become idle but rather -+ notifies the host that the resource is busy. -+ -+@Input psDeviceNode pointer to device node -+@Input psServerCommonContext context to be cleaned up -+@Input eDM Data master, to which the cleanup command should -+ be sent -+@Input ui32PDumpFlags PDump continuous flag -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, -+ RGXFWIF_DM eDM, -+ IMG_UINT32 ui32PDumpFlags); -+ -+/*! -+******************************************************************************* -+@Function RGXFWRequestHWRTDataCleanUp -+ -+@Description Schedules a FW HWRTData memory cleanup. The firmware doesn't -+ block waiting for the resource to become idle but rather -+ notifies the host that the resource is busy. -+ -+@Input psDeviceNode pointer to device node -+@Input psHWRTData firmware address of the HWRTData for clean-up -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PRGXFWIF_HWRTDATA psHWRTData); -+ -+/*! -+******************************************************************************* -+@Function RGXFWRequestFreeListCleanUp -+ -+@Description Schedules a FW FreeList cleanup. The firmware doesn't block -+ waiting for the resource to become idle but rather notifies the -+ host that the resource is busy. -+ -+@Input psDeviceNode pointer to device node -+@Input psFWFreeList firmware address of the FreeList for clean-up -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode, -+ PRGXFWIF_FREELIST psFWFreeList); -+ -+/*! -+******************************************************************************* -+@Function RGXFWRequestZSBufferCleanUp -+ -+@Description Schedules a FW ZS Buffer cleanup. The firmware doesn't block -+ waiting for the resource to become idle but rather notifies the -+ host that the resource is busy. -+ -+@Input psDevInfo pointer to device node -+@Input psFWZSBuffer firmware address of the ZS Buffer for clean-up -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, -+ PRGXFWIF_ZSBUFFER psFWZSBuffer); -+ -+/*! -+******************************************************************************* -+@Function RGXFWSetHCSDeadline -+ -+@Description Requests the Firmware to set a new Hard Context Switch timeout -+ deadline. Context switches that surpass that deadline cause the -+ system to kill the currently running workloads. -+ -+@Input psDeviceNode pointer to device node -+@Input ui32HCSDeadlineMs The deadline in milliseconds. -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32HCSDeadlineMs); -+ -+/*! -+******************************************************************************* -+@Function RGXFWHealthCheckCmdInt -+ -+@Description Ping the firmware to check if it is responsive. -+ -+@Input psDevInfo pointer to device info -+@Input bCallerHasPwrLock Caller already has power lock -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXFWHealthCheckCmdInt(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bCallerHasPwrLock); -+ -+#define RGXFWHealthCheckCmd(psDevInfo) \ -+ RGXFWHealthCheckCmdInt(psDevInfo, IMG_FALSE) -+ -+#define RGXFWHealthCheckCmdWithoutPowerLock(psDevInfo) \ -+ RGXFWHealthCheckCmdInt(psDevInfo, IMG_TRUE) -+ -+/*! -+******************************************************************************* -+@Function RGXFWSetFwOsState -+ -+@Description Requests the Firmware to change the guest OS Online states. -+ This should be initiated by the VMM when a guest VM comes -+ online or goes offline. If offline, the FW offloads any current -+ resource from that DriverID. The request is repeated until the -+ FW has had time to free all the resources or has waited for -+ workloads to finish. -+ -+@Input psDevInfo pointer to device info -+@Input ui32DriverID The driver whose state is being altered -+@Input eOSOnlineState The new state (Online or Offline) -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32DriverID, -+ RGXFWIF_OS_STATE_CHANGE eOSOnlineState); -+ -+#if defined(SUPPORT_AUTOVZ) -+/*! -+******************************************************************************* -+@Function RGXUpdateAutoVzWdgToken -+ -+@Description If the driver-firmware connection is active, read the -+ firmware's watchdog token and copy its value back into the OS -+ token. This indicates to the firmware that this driver is alive -+ and responsive. -+ -+@Input psDevInfo pointer to device info -+******************************************************************************/ -+void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/*! -+******************************************************************************* -+@Function RGXDisconnectAllGuests -+ -+@Description Send requests to FW to disconnect all guest connections. -+ -+@Input psDeviceNode pointer to device node -+******************************************************************************/ -+PVRSRV_ERROR RGXDisconnectAllGuests(PVRSRV_DEVICE_NODE *psDeviceNode); -+#endif -+ -+/*! -+******************************************************************************* -+@Function RGXFWConfigPHR -+ -+@Description Configure the Periodic Hardware Reset functionality -+ -+@Input psDevInfo pointer to device info -+@Input ui32PHRMode desired PHR mode -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32PHRMode); -+ -+/*! -+******************************************************************************* -+@Function RGXFWConfigWdg -+ -+@Description Configure the Safety watchdog trigger period -+ -+@Input psDevInfo pointer to device info -+@Input ui32WdgPeriodUs requested period in microseconds -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32WdgPeriod); -+ -+/*! -+******************************************************************************* -+@Function RGXCheckFirmwareCCB -+ -+@Description Processes all commands that are found in the Firmware CCB. -+ -+@Input psDevInfo pointer to device -+ -+@Return None -+******************************************************************************/ -+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/*! -+******************************************************************************* -+@Function RGXCheckForStalledClientContexts -+ -+@Description Checks all client contexts, for the device with device info -+ provided, to see if any are waiting for a fence to signal and -+ optionally force signalling of the fence for the context which -+ has been waiting the longest. -+ This function is called by RGXUpdateHealthStatus() and also -+ may be invoked from other trigger points. -+ -+@Input psDevInfo pointer to device info -+@Input bIgnorePrevious If IMG_TRUE, any stalled contexts will be -+ indicated immediately, rather than only -+ checking against any previous stalled contexts -+ -+@Return None -+******************************************************************************/ -+void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious); -+ -+/*! -+******************************************************************************* -+@Function RGXUpdateHealthStatus -+ -+@Description Tests a number of conditions which might indicate a fatal error -+ has occurred in the firmware. The result is stored in the -+ device node eHealthStatus. -+ -+@Input psDevNode Pointer to device node structure. -+@Input bCheckAfterTimePassed When TRUE, the function will also test -+ for firmware queues and polls not changing -+ since the previous test. -+ -+ Note: if not enough time has passed since the -+ last call, false positives may occur. -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, -+ IMG_BOOL bCheckAfterTimePassed); -+ -+#if defined(SUPPORT_AUTOVZ) -+/*! -+******************************************************************************* -+@Function RGXUpdateAutoVzWatchdog -+ -+@Description Updates AutoVz watchdog that maintains the fw-driver connection -+ -+@Input psDevNode Pointer to device node structure. -+******************************************************************************/ -+void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode); -+#endif /* SUPPORT_AUTOVZ */ -+ -+/*! -+******************************************************************************* -+@Function AttachKickResourcesCleanupCtls -+ -+@Description Attaches the cleanup structures to a kick command so that -+ submission reference counting can be performed when the -+ firmware processes the command -+ -+@Output apsCleanupCtl Array of CleanupCtl structure pointers to populate. -+@Output pui32NumCleanupCtl Number of CleanupCtl structure pointers written out. -+@Input eDM Which data master is the subject of the command. -+@Input bKick TRUE if the client originally wanted to kick this DM. -+@Input psRTDataCleanup Optional RTData cleanup associated with the command. -+@Input psZBuffer Optional ZSBuffer associated with the command. -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, -+ IMG_UINT32 *pui32NumCleanupCtl, -+ RGXFWIF_DM eDM, -+ IMG_BOOL bKick, -+ RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, -+ RGX_ZSBUFFER_DATA *psZSBuffer, -+ RGX_ZSBUFFER_DATA *psMSAAScratchBuffer); -+ -+/*! -+******************************************************************************* -+@Function RGXResetHWRLogs -+ -+@Description Resets the HWR Logs buffer -+ (the hardware recovery count is not reset) -+ -+@Input psDevNode Pointer to the device -+ -+@Return PVRSRV_ERROR PVRSRV_OK on success. -+ Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode); -+ -+/*! -+******************************************************************************* -+@Function RGXGetPhyAddr -+ -+@Description Get the physical address of a PMR at an offset within it -+ -+@Input psPMR PMR of the allocation -+@Input ui32LogicalOffset Logical offset -+ -+@Output psPhyAddr Physical address of the allocation -+ -+@Return PVRSRV_ERROR PVRSRV_OK on success. -+ Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, -+ IMG_DEV_PHYADDR *psPhyAddr, -+ IMG_UINT32 ui32LogicalOffset, -+ IMG_UINT32 ui32Log2PageSize, -+ IMG_UINT32 ui32NumOfPages, -+ IMG_BOOL *bValid); -+ -+#if defined(PDUMP) -+/*! -+******************************************************************************* -+@Function RGXPdumpDrainKCCB -+ -+@Description Wait for the firmware to execute all the commands in the kCCB -+ -+@Input psDevInfo Pointer to the device -+@Input ui32WriteOffset Woff we have to POL for the Roff to be equal to -+ -+@Return PVRSRV_ERROR PVRSRV_OK on success. -+ Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32WriteOffset); -+#endif /* PDUMP */ -+ -+/*! -+******************************************************************************* -+@Function RGXFwRawHeapAllocMap -+ -+@Description Register and maps to device, a raw firmware physheap -+ -+@Return PVRSRV_ERROR PVRSRV_OK on success. -+ Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID, -+ IMG_DEV_PHYADDR sDevPAddr, -+ IMG_UINT64 ui64DevPSize); -+ -+/*! -+******************************************************************************* -+@Function RGXFwRawHeapUnmapFree -+ -+@Description Unregister and unmap from device, a raw firmware physheap -+******************************************************************************/ -+void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DriverID); -+ -+/*! -+******************************************************************************* -+@Function RGXReadFWModuleAddr -+ -+@Description Read a value at the given address in META or RISCV memory space -+ -+@Input psDevInfo Pointer to device info -+@Input ui32Addr Address in META or RISCV memory space -+ -+@Output pui32Value Read value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXReadFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32Addr, -+ IMG_UINT32 *pui32Value); -+ -+/*! -+******************************************************************************* -+@Function RGXWriteFWModuleAddr -+ -+@Description Write a value to the given address in META or RISC memory space -+ -+@Input psDevInfo Pointer to device info -+@Input ui32Addr Address in RISC-V memory space -+@Input ui32Value Write value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXWriteFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32MemAddr, -+ IMG_UINT32 ui32Value); -+ -+PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue); -+ -+PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32* ui32RegValue); -+ -+/*! -+******************************************************************************* -+@Function RGXGetFwMapping -+ -+@Description Retrieve any of the CPU Physical Address, Device Physical -+ Address or the raw value of the page table entry associated -+ with the firmware virtual address given. -+ -+@Input psDevInfo Pointer to device info -+@Input ui32FwVA The Fw VA that needs decoding -+@Output psCpuPA Pointer to the resulting CPU PA -+@Output psDevPA Pointer to the resulting Dev PA -+@Output pui64RawPTE Pointer to the raw Page Table Entry value -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32FwVA, -+ IMG_CPU_PHYADDR *psCpuPA, -+ IMG_DEV_PHYADDR *psDevPA, -+ IMG_UINT64 *pui64RawPTE); -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+/*! -+******************************************************************************* -+@Function RGXIsValidWorkloadEstCCBCommand -+ -+@Description Checks if command type can be used for workload estimation -+ -+@Input eType Command type to check -+ -+@Return IMG_BOOL -+******************************************************************************/ -+IMG_BOOL RGXIsValidWorkloadEstCCBCommand(RGXFWIF_CCB_CMD_TYPE eType); -+ -+#endif -+ -+/*! -+******************************************************************************* -+@Function RGXFWInjectFault -+ -+@Description Injecting firmware fault to validate recovery through Host -+ -+@Input psDevInfo Pointer to device info -+ -+@Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/*! -+******************************************************************************* -+@Function RGXFWSetVzConnectionCooldownPeriod -+ -+@Description Set Vz connection cooldown period -+ -+@Input psDevInfo pointer to device info -+@Input ui32VzConnectionCooldownPeriodInSec Cooldown period in secs -+******************************************************************************/ -+PVRSRV_ERROR -+RGXFWSetVzConnectionCooldownPeriod(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32VzConnectionCooldownPeriodInSec); -+ -+#if defined(SUPPORT_AUTOVZ_HW_REGS) && !defined(SUPPORT_AUTOVZ) -+#error "VZ build configuration error: use of OS scratch registers supported only in AutoVz drivers." -+#endif -+ -+#if defined(SUPPORT_AUTOVZ_HW_REGS) -+/* AutoVz with hw support */ -+#define KM_GET_FW_CONNECTION(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH3) -+#define KM_GET_OS_CONNECTION(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2) -+#define KM_SET_OS_CONNECTION(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2, RGXFW_CONNECTION_OS_##val) -+ -+#define KM_GET_FW_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH1) -+#define KM_GET_OS_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0) -+#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0, val) -+ -+#define KM_ALIVE_TOKEN_CACHEOP(Target, CacheOp) -+#define KM_CONNECTION_CACHEOP(Target, CacheOp) -+ -+#else -+ -+#if defined(SUPPORT_AUTOVZ) -+#define KM_GET_FW_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveFwToken) -+#define KM_GET_OS_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken) -+#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) do { \ -+ OSWriteDeviceMem32WithWMB((volatile IMG_UINT32 *) &psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken, val); \ -+ KM_ALIVE_TOKEN_CACHEOP(Os, FLUSH); \ -+ } while (0) -+ -+#define KM_ALIVE_TOKEN_CACHEOP(Target, CacheOp) RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfConnectionCtl->ui32Alive##Target##Token, \ -+ CacheOp); -+#endif /* defined(SUPPORT_AUTOVZ) */ -+ -+#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (!defined(RGX_NUM_DRIVERS_SUPPORTED) || (RGX_NUM_DRIVERS_SUPPORTED == 1))) -+/* native, static-vz and AutoVz using shared memory */ -+#define KM_GET_FW_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionFwState) -+#define KM_GET_OS_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState) -+#define KM_SET_OS_CONNECTION(val, psDevInfo) do { \ -+ OSWriteDeviceMem32WithWMB((void*)&psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState, RGXFW_CONNECTION_OS_##val); \ -+ KM_CONNECTION_CACHEOP(Os, FLUSH); \ -+ } while (0) -+ -+#define KM_CONNECTION_CACHEOP(Target, CacheOp) RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfConnectionCtl->eConnection##Target##State, \ -+ CacheOp); -+#else -+/* dynamic-vz & nohw */ -+#define KM_GET_FW_CONNECTION(psDevInfo) (RGXFW_CONNECTION_FW_ACTIVE) -+#define KM_GET_OS_CONNECTION(psDevInfo) (RGXFW_CONNECTION_OS_ACTIVE) -+#define KM_SET_OS_CONNECTION(val, psDevInfo) -+#define KM_CONNECTION_CACHEOP(Target, CacheOp) -+#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED == 1) */ -+#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ -+ -+#if defined(RGX_PREMAP_FW_HEAPS) -+#define RGX_FIRST_RAW_HEAP_DRIVER_ID RGXFW_HOST_DRIVER_ID -+#else -+#define RGX_FIRST_RAW_HEAP_DRIVER_ID RGXFW_GUEST_DRIVER_ID_START -+#endif -+ -+#define KM_OS_CONNECTION_IS(val, psDevInfo) (KM_GET_OS_CONNECTION(psDevInfo) == RGXFW_CONNECTION_OS_##val) -+#define KM_FW_CONNECTION_IS(val, psDevInfo) (KM_GET_FW_CONNECTION(psDevInfo) == RGXFW_CONNECTION_FW_##val) -+ -+#endif /* RGXFWUTILS_H */ -+/****************************************************************************** -+ End of file (rgxfwutils.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxheapconfig.h b/drivers/gpu/drm/img-rogue/rgxheapconfig.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxheapconfig.h -@@ -0,0 +1,294 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX Device virtual memory map -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Memory heaps device specific configuration -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXHEAPCONFIG_H -+#define RGXHEAPCONFIG_H -+ -+#include "rgxdefs_km.h" -+ -+ -+#define RGX_HEAP_SIZE_4KiB IMG_UINT64_C(0x0000001000) -+#define RGX_HEAP_SIZE_64KiB IMG_UINT64_C(0x0000010000) -+#define RGX_HEAP_SIZE_256KiB IMG_UINT64_C(0x0000040000) -+ -+#define RGX_HEAP_SIZE_1MiB IMG_UINT64_C(0x0000100000) -+#define RGX_HEAP_SIZE_2MiB IMG_UINT64_C(0x0000200000) -+#define RGX_HEAP_SIZE_4MiB IMG_UINT64_C(0x0000400000) -+#define RGX_HEAP_SIZE_16MiB IMG_UINT64_C(0x0001000000) -+#define RGX_HEAP_SIZE_32MiB IMG_UINT64_C(0x0002000000) -+#define RGX_HEAP_SIZE_256MiB IMG_UINT64_C(0x0010000000) -+ -+#define RGX_HEAP_SIZE_1GiB IMG_UINT64_C(0x0040000000) -+#define RGX_HEAP_SIZE_2GiB IMG_UINT64_C(0x0080000000) -+#define RGX_HEAP_SIZE_4GiB IMG_UINT64_C(0x0100000000) -+#define RGX_HEAP_SIZE_16GiB IMG_UINT64_C(0x0400000000) -+#define RGX_HEAP_SIZE_32GiB IMG_UINT64_C(0x0800000000) -+#define RGX_HEAP_SIZE_64GiB IMG_UINT64_C(0x1000000000) -+#define RGX_HEAP_SIZE_128GiB IMG_UINT64_C(0x2000000000) -+#define RGX_HEAP_SIZE_256GiB IMG_UINT64_C(0x4000000000) -+#define RGX_HEAP_SIZE_512GiB IMG_UINT64_C(0x8000000000) -+ -+/* -+ RGX Device Virtual Address Space Definitions -+ -+ This file defines the RGX virtual address heaps that are used in -+ application memory contexts. It also shows where the Firmware memory heap -+ fits into this, but the firmware heap is only ever created in the -+ Services KM/server component. -+ -+ RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed, -+ on a global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_* -+ respectively. Therefore if clients use multiple configs they must still -+ be consistent with their definitions for these heaps. -+ -+ Shared virtual memory (GENERAL_SVM) support requires half of the address -+ space (512 GiB) be reserved for SVM allocations to mirror application CPU -+ addresses. However, if BRN_65273 WA is active in which case the SVM heap -+ is disabled. This is reflected in the device connection capability bits -+ returned to user space. -+ -+ The GENERAL non-SVM region is 512 GiB to 768 GiB and is shared between the -+ general (4KiB) heap and the general non-4K heap. The first 128 GiB is used -+ for the GENERAL_HEAP (4KiB) and the last 32 GiB is used for the -+ GENERAL_NON4K_HEAP. This heap has a default page-size of 16K. -+ AppHint PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE can be used to forced it -+ to these values: 4K,64K,256K,1M,2M. -+ -+ The heaps defined for BRN_65273 _replace_ the non-BRN equivalents below -+ when this BRN WA is active on affected cores. This is different to most -+ other BRNs and hence has been given its own header file for clarity, -+ see below. This is a special case, other BRNs that need 1 or 2 additional -+ heaps should be added to this file, like BRN_63142 below. -+ NOTE: All regular heaps below greater than 1GB require a BRN_65273 WA heap. -+ -+ Base addresses have to be a multiple of 4MiB -+ Heaps must not start at 0x0000000000, as this is reserved for internal -+ use within device memory layer. -+ Range comments, those starting in column 0 below are a section heading of -+ sorts and are above the heaps in that range. Often this is the reserved -+ size of the heap within the range. -+*/ -+ -+/* This BRN requires a different virtual memory map from the standard one -+ * defined in this file below. Hence the alternative heap definitions for this -+ * BRN are provided in a separate file for clarity. */ -+#include "rgxheapconfig_65273.h" -+ -+ -+/* 0x00_0000_0000 ************************************************************/ -+ -+/* 0x00_0000_0000 - 0x00_0020_0000 **/ -+ /* 0 MiB to 2 MiB, size of 2 MiB : RESERVED **/ -+ -+ /* BRN_65273 TQ3DPARAMETERS base 0x0000010000 */ -+ /* BRN_65273 GENERAL base 0x65C0000000 */ -+ /* BRN_65273 GENERAL_NON4K base 0x73C0000000 */ -+ -+/* 0x00_0020_0000 - 0x7F_FFC0_0000 **/ -+ /* 2 MiB to 512 GiB, size of 512 GiB less 2 MiB : GENERAL_SVM_HEAP **/ -+ #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000200000) -+ #define RGX_GENERAL_SVM_HEAP_SIZE (RGX_HEAP_SIZE_512GiB - RGX_HEAP_SIZE_2MiB) -+ -+ -+/* 0x80_0000_0000 ************************************************************/ -+ -+/* 0x80_0000_0000 - 0x9F_FFFF_FFFF **/ -+ /* 512 GiB to 640 GiB, size of 128 GiB : GENERAL_HEAP **/ -+ #define RGX_GENERAL_HEAP_BASE IMG_UINT64_C(0x8000000000) -+ #define RGX_GENERAL_HEAP_SIZE RGX_HEAP_SIZE_128GiB -+ -+ /* BRN_65273 PDSCODEDATA base 0xA800000000 */ -+ -+/* 0xA0_0000_0000 - 0xAF_FFFF_FFFF **/ -+ /* 640 GiB to 704 GiB, size of 64 GiB : FREE **/ -+ -+/* B0_0000_0000 - 0xB7_FFFF_FFFF **/ -+ /* 704 GiB to 736 GiB, size of 32 GiB : FREE **/ -+ -+ /* BRN_65273 USCCODE base 0xBA00000000 */ -+ -+/* 0xB8_0000_0000 - 0xBF_FFFF_FFFF **/ -+ /* 736 GiB to 768 GiB, size of 32 GiB : GENERAL_NON4K_HEAP **/ -+ #define RGX_GENERAL_NON4K_HEAP_BASE IMG_UINT64_C(0xB800000000) -+ #define RGX_GENERAL_NON4K_HEAP_SIZE RGX_HEAP_SIZE_32GiB -+ -+ -+/* 0xC0_0000_0000 ************************************************************/ -+ -+/* 0xC0_0000_0000 - 0xD9_FFFF_FFFF **/ -+ /* 768 GiB to 872 GiB, size of 104 GiB : FREE **/ -+ -+/* 0xDA_0000_0000 - 0xDA_FFFF_FFFF **/ -+ /* 872 GiB to 876 GiB, size of 4 GiB : PDSCODEDATA_HEAP **/ -+ #define RGX_PDSCODEDATA_HEAP_BASE IMG_UINT64_C(0xDA00000000) -+ #define RGX_PDSCODEDATA_HEAP_SIZE RGX_HEAP_SIZE_4GiB -+ -+/* 0xDB_0000_0000 - 0xDB_FFFF_FFFF **/ -+ /* 876 GiB to 880 GiB, size of 256 MiB (reserved 4GiB) : BRN **/ -+ /* HWBRN63142 workaround requires Region Header memory to be at the top -+ of a 16GiB aligned range. This is so when masked with 0x03FFFFFFFF the -+ address will avoid aliasing PB addresses. Start at 879.75GiB. Size of 256MiB. */ -+ #define RGX_RGNHDR_BRN_63142_HEAP_BASE IMG_UINT64_C(0xDBF0000000) -+ #define RGX_RGNHDR_BRN_63142_HEAP_SIZE RGX_HEAP_SIZE_256MiB -+ -+/* 0xDC_0000_0000 - 0xDF_FFFF_FFFF **/ -+ /* 880 GiB to 896 GiB, size of 16 GiB : FREE **/ -+ -+/* 0xE0_0000_0000 - 0xE0_FDFF_FFFF **/ -+ /* 896 GiB to 900 GiB, size of 4 GiB less 32 MiB : USCCODE_HEAP **/ -+ #define RGX_USCCODE_HEAP_BASE IMG_UINT64_C(0xE000000000) -+ #define RGX_USCCODE_HEAP_SIZE (RGX_HEAP_SIZE_4GiB - RGX_HEAP_SIZE_32MiB) -+ -+/* 0xE0_FE00_0000 - 0xE0_FFFF_FFFF **/ -+ /* 900 GiB less 32 MiB to 900 GiB, size of 32 MiB : RESERVED VOLCANIC **/ -+ -+/* 0xE1_0000_0000 - 0xE1_BFFF_FFFF **/ -+ /* 900 GiB to 903 GiB, size of 3 GiB : RESERVED **/ -+ -+/* 0xE1_C000_000 - 0xE1_FFFF_FFFF **/ -+ /* 903 GiB to 904 GiB, reserved 1 GiB, : FIRMWARE_HEAP **/ -+ -+ /* Firmware heaps defined in rgx_heap_firmware.h as they are not present in -+ application memory contexts, see: -+ RGX_FIRMWARE_RAW_HEAP_BASE -+ RGX_FIRMWARE_RAW_HEAP_SIZE -+ See header for other sub-heaps details -+ */ -+ -+/* 0xE2_0000_0000 - 0xE3_FFFF_FFFF **/ -+ /* 904 GiB to 912 GiB, size of 8 GiB : FREE **/ -+ -+ /* BRN_65273 VISIBILITY_TEST base 0xE400000000 */ -+ -+/* 0xE4_0000_0000 - 0xE7_FFFF_FFFF **/ -+ /* 912 GiB to 928 GiB, size 16 GiB : TQ3DPARAMETERS_HEAP **/ -+ /* Aligned to match RGX_CR_ISP_PIXEL_BASE at 16 GiB */ -+ #define RGX_TQ3DPARAMETERS_HEAP_BASE IMG_UINT64_C(0xE400000000) -+ #define RGX_TQ3DPARAMETERS_HEAP_SIZE RGX_HEAP_SIZE_16GiB -+ -+/* 0xE8_0000_0000 - 0xE8_FFFF_FFFF **/ -+ /* 928 GiB to 932 GiB, size of 4 GiB : FREE **/ -+ -+/* 0xE9_0000_0000 - 0xE9_3FFF_FFFF **/ -+ /* 932 GiB to 933 GiB, size of 1 GiB : VK_CAPT_REPLAY_HEAP **/ -+ #define RGX_VK_CAPT_REPLAY_HEAP_BASE IMG_UINT64_C(0xE900000000) -+ #define RGX_VK_CAPT_REPLAY_HEAP_SIZE RGX_HEAP_SIZE_1GiB -+ -+/* 0xE9_4000_0000 - 0xE9_FFFF_FFFF **/ -+ /* 933 GiB to 936 GiB, size of 3 GiB : FREE **/ -+ -+/* 0xEA_0000_0000 - 0xEA_0000_0FFF **/ -+ /* 936 GiB to 937 GiB, size of min heap size : SIGNALS_HEAP **/ -+ /* CDM Signals heap (31 signals less one reserved for Services). -+ * Size 960B rounded up to minimum heap size */ -+ #define RGX_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00000000) -+ #define RGX_SIGNALS_HEAP_SIZE DEVMEM_HEAP_MINIMUM_SIZE -+ -+/* 0xEA_4000_0000 - 0xEA_FFFF_FFFF **/ -+ /* 937 GiB to 940 GiB, size of 3 GiB : FREE **/ -+ -+/* 0xEB_0000_0000 - 0xEB_FFFF_FFFF **/ -+ /* 940 GiB to 944 GiB, size of 4 GiB : RESERVED VOLCANIC **/ -+ -+/* 0xEC_0000_0000 - 0xEC_001F_FFFF **/ -+ /* 944 GiB to 945 GiB, size 2 MiB : FBCDC_HEAP **/ -+ #define RGX_FBCDC_HEAP_BASE IMG_UINT64_C(0xEC00000000) -+ #define RGX_FBCDC_HEAP_SIZE RGX_HEAP_SIZE_2MiB -+ -+/* 0xEC_4000_0000 - 0xEC_401F_FFFF **/ -+ /* 945 GiB to 946 GiB, size 2 MiB : FBCDC_LARGE_HEAP **/ -+ #define RGX_FBCDC_LARGE_HEAP_BASE IMG_UINT64_C(0xEC40000000) -+ #define RGX_FBCDC_LARGE_HEAP_SIZE RGX_HEAP_SIZE_2MiB -+ -+/* 0xEC_8000_0000 - 0xED_FFFF_FFFF **/ -+ /* 946 GiB to 952 GiB, size of 6 GiB : RESERVED VOLCANIC **/ -+ -+/* 0xEE_0000_0000 - 0xEE_3FFF_FFFF **/ -+ /* 952 GiB to 953 GiB, size of 1 GiB : CMP_MISSION_RMW_HEAP **/ -+ #define RGX_CMP_MISSION_RMW_HEAP_BASE IMG_UINT64_C(0xEE00000000) -+ #define RGX_CMP_MISSION_RMW_HEAP_SIZE RGX_HEAP_SIZE_1GiB -+ -+/* 0xEE_4000_0000 - 0xEE_FFFF_FFFF **/ -+ /* 953 GiB to 956 GiB, size of 3 GiB : RESERVED **/ -+ -+/* 0xEF_0000_0000 - 0xEF_3FFF_FFFF **/ -+ /* 956 GiB to 957 GiB, size of 1 GiB : CMP_SAFETY_RMW_HEAP **/ -+ #define RGX_CMP_SAFETY_RMW_HEAP_BASE IMG_UINT64_C(0xEF00000000) -+ #define RGX_CMP_SAFETY_RMW_HEAP_SIZE RGX_HEAP_SIZE_1GiB -+ -+/* 0xEF_4000_0000 - 0xEF_FFFF_FFFF **/ -+ /* 957 GiB to 960 GiB, size of 3 GiB : RESERVED **/ -+ -+/* 0xF0_0000_0000 - 0xF0_FFFF_FFFF **/ -+ /* 960 GiB to 964 GiB, size of 4 GiB : TEXTURE_STATE_HEAP (36-bit aligned) */ -+ #define RGX_TEXTURE_STATE_HEAP_BASE IMG_UINT64_C(0xF000000000) -+ #define RGX_TEXTURE_STATE_HEAP_SIZE RGX_HEAP_SIZE_4GiB -+ -+/* 0xF1_0000_0000 - 0xF1_FFFF_FFFF **/ -+ /* 964 GiB to 968 GiB, size of 4 GiB : FREE **/ -+ -+/* 0xF2_0000_0000 - 0xF2_001F_FFFF **/ -+ /* 968 GiB to 969 GiB, size of 2 MiB : VISIBILITY_TEST_HEAP **/ -+ #define RGX_VISIBILITY_TEST_HEAP_BASE IMG_UINT64_C(0xF200000000) -+ #define RGX_VISIBILITY_TEST_HEAP_SIZE RGX_HEAP_SIZE_2MiB -+ -+/* 0xF2_4000_0000 - 0xF2_FFFF_FFFF **/ -+ /* 969 GiB to 972 GiB, size of 3 GiB : FREE **/ -+ -+ /* BRN_65273 MMU_INIA base 0xF800000000 */ -+ /* BRN_65273 MMU_INIB base 0xF900000000 */ -+ -+/* 0xF3_0000_0000 - 0xFF_FFFF_FFFF **/ -+ /* 972 GiB to 1024 GiB, size of 52 GiB : FREE **/ -+ -+ -+ -+/* 0xFF_FFFF_FFFF ************************************************************/ -+ -+/* End of RGX Device Virtual Address Space definitions */ -+ -+#endif /* RGXHEAPCONFIG_H */ -+ -+/****************************************************************************** -+ End of file (rgxheapconfig.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxheapconfig_65273.h b/drivers/gpu/drm/img-rogue/rgxheapconfig_65273.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxheapconfig_65273.h -@@ -0,0 +1,124 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX Device virtual memory map for BRN_65273. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Memory heaps device specific configuration -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXHEAPCONFIG_65273_H -+#define RGXHEAPCONFIG_65273_H -+ -+/* -+ RGX Device Virtual Address Space Definitions -+ -+ This file defines the RGX virtual address replacement heaps that are used -+ in application memory contexts for BRN_65273. -+ -+ The heaps defined for BRN_65273 _replace_ the non-BRN equivalents when this -+ BRN WA is active on affected cores. This is different to most other BRNs -+ and hence has been given its own header file for clarity. The SVM_HEAP is -+ also disabled and unavailable when the WA is active. This is reflected -+ in the device connection capability bits returned to user space. -+ NOTE: All regular heaps in rgxheapconfig.h greater than 1GB require -+ a BRN_65273 WA heap. -+ -+ Base addresses must have to be a multiple of 4MiB -+ Heaps must not start at 0x0000000000, as this is reserved for internal -+ use within device memory layer. -+ Range comments, those starting in column 0 below are a section heading of -+ sorts and are above the heaps in that range. -+*/ -+ -+ -+/* 0x00_0000_0000 ************************************************************/ -+ -+/* 0x00_0001_0000 - 0x00_3FFF_FFFF **/ -+ /* HWBRN65273 workaround requires TQ memory to start at 64 KiB and use a -+ * unique single 0.99GiB PCE entry. */ -+ #define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE IMG_UINT64_C(0x0000010000) -+ #define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE (RGX_HEAP_SIZE_1GiB - RGX_HEAP_SIZE_64KiB) -+ -+/* 0x65_C000_0000 - 0x66_3FFF_FFFF **/ -+ /* HWBRN65273 workaround requires General Heap to use a unique PCE entry for each GiB in range */ -+ #define RGX_GENERAL_BRN_65273_HEAP_BASE IMG_UINT64_C(0x65C0000000) -+ #define RGX_GENERAL_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_2GiB -+ -+/* 0x73_C000_0000 - 0x74_3FFF_FFFF **/ -+ /* HWBRN65273 workaround requires Non4K memory to use a unique PCE entry for each GiB in range */ -+ #define RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE IMG_UINT64_C(0x73C0000000) -+ #define RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_2GiB -+ -+ -+/* 0x80_0000_0000 ************************************************************/ -+ -+/* 0xA8_0000_0000 - 0xA8_3FFF_FFFF **/ -+ /* HWBRN65273 workaround requires PDS memory to use a unique single 1GiB PCE entry. */ -+ #define RGX_PDSCODEDATA_BRN_65273_HEAP_BASE IMG_UINT64_C(0xA800000000) -+ #define RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_1GiB -+ -+/* 0xBA_0000_0000 - 0xBA_3FFF_FFFF **/ -+ /* HWBRN65273 workaround requires USC memory to use a unique single 1GiB PCE entry. */ -+ #define RGX_USCCODE_BRN_65273_HEAP_BASE IMG_UINT64_C(0xBA00000000) -+ #define RGX_USCCODE_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_1GiB -+ -+ -+/* 0xC0_0000_0000 ************************************************************/ -+ -+/* 0xE4_0000_0000 - 0xE4_001F_FFFF **/ -+ /* HWBRN65273 workaround requires USC memory to use a unique single 1GiB PCE entry. */ -+ #define RGX_VISIBILITY_TEST_BRN_65273_HEAP_BASE IMG_UINT64_C(0xE400000000) -+ #define RGX_VISIBILITY_TEST_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_2MiB -+ -+/* 0xF8_0000_0000 - 0xF9_FFFF_FFFF **/ -+ /* HWBRN65273 workaround requires two Region Header buffers 4GiB apart. */ -+ #define RGX_MMU_INIA_BRN_65273_HEAP_BASE IMG_UINT64_C(0xF800000000) -+ #define RGX_MMU_INIA_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_1GiB -+ #define RGX_MMU_INIB_BRN_65273_HEAP_BASE IMG_UINT64_C(0xF900000000) -+ #define RGX_MMU_INIB_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_1GiB -+ -+ -+/* 0xFF_FFFF_FFFF ************************************************************/ -+ -+/* End of RGX Device Virtual Address Space definitions */ -+ -+#endif /* RGXHEAPCONFIG_65273_H */ -+ -+/****************************************************************************** -+ End of file (rgxheapconfig_65273.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxhwperf.c b/drivers/gpu/drm/img-rogue/rgxhwperf.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxhwperf.c -@@ -0,0 +1,1037 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX HW Performance implementation -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX HW Performance implementation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ /**************************************************************************/ -+ -+//#define PVR_DPF_FUNCTION_TRACE_ON 1 -+#undef PVR_DPF_FUNCTION_TRACE_ON -+ -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "rgxdevice.h" -+#include "pvrsrv_error.h" -+#include "pvr_notifier.h" -+#include "osfunc.h" -+#include "allocmem.h" -+ -+#include "pvrsrv.h" -+#include "pvrsrv_tlstreams.h" -+#include "pvrsrv_tlcommon.h" -+#include "tlclient.h" -+#include "tlstream.h" -+ -+#include "rgxhwperf.h" -+#include "rgxapi_km.h" -+#include "rgxfwutils.h" -+#include "rgxtimecorr.h" -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "pdump_km.h" -+#include "pvrsrv_apphint.h" -+#include "process_stats.h" -+#include "rgx_hwperf_table.h" -+#include "rgxinit.h" -+ -+#include "info_page_defs.h" -+ -+/* This is defined by default to enable producer callbacks. -+ * Clients of the TL interface can disable the use of the callback -+ * with PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK. */ -+#define SUPPORT_TL_PRODUCER_CALLBACK 1 -+ -+/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */ -+#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT) -+ -+/* Defines size of buffers returned from acquire/release calls */ -+#define FW_STREAM_BUFFER_SIZE (0x80000) -+#define HOST_STREAM_BUFFER_SIZE (0x20000) -+ -+/* Must be at least as large as two tl packets of maximum size */ -+static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), -+ "HOST_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); -+static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), -+ "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); -+ -+/****************************************************************************** -+ * RGX HW Performance Profiling Server API(s) -+ *****************************************************************************/ -+ -+static IMG_BOOL RGXServerFeatureFlagsToHWPerfFlagsAddBlock( -+ RGX_HWPERF_BVNC_BLOCK * const psBlocks, -+ IMG_UINT16 * const pui16Count, -+ const IMG_UINT16 ui16BlockID, /* see RGX_HWPERF_CNTBLK_ID */ -+ const IMG_UINT16 ui16NumCounters, -+ const IMG_UINT16 ui16NumBlocks) -+{ -+ const IMG_UINT16 ui16Count = *pui16Count; -+ -+ if (ui16Count < RGX_HWPERF_MAX_BVNC_BLOCK_LEN) -+ { -+ RGX_HWPERF_BVNC_BLOCK * const psBlock = &psBlocks[ui16Count]; -+ -+ /* If the GROUP is non-zero, convert from e.g. RGX_CNTBLK_ID_USC0 to RGX_CNTBLK_ID_USC_ALL. The table stores the former (plus the -+ number of blocks and counters) but PVRScopeServices expects the latter (plus the number of blocks and counters). The conversion -+ could always be moved to PVRScopeServices, but it's less code this way. */ -+ psBlock->ui16BlockID = (ui16BlockID & RGX_CNTBLK_ID_GROUP_MASK) ? (ui16BlockID | RGX_CNTBLK_ID_UNIT_ALL_MASK) : ui16BlockID; -+ if ((ui16BlockID & RGX_CNTBLK_ID_DA_MASK) == RGX_CNTBLK_ID_DA_MASK) -+ { -+ psBlock->ui16NumCounters = RGX_CNTBLK_COUNTERS_MAX; -+ } -+ else -+ { -+ psBlock->ui16NumCounters = ui16NumCounters; -+ } -+ psBlock->ui16NumBlocks = ui16NumBlocks; -+ -+ *pui16Count = ui16Count + 1; -+ return IMG_TRUE; -+ } -+ return IMG_FALSE; -+} -+ -+PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_HWPERF_BVNC *psBVNC) -+{ -+ IMG_PCHAR pszBVNC; -+ PVR_LOG_RETURN_IF_FALSE((NULL != psDevInfo), "psDevInfo invalid", PVRSRV_ERROR_INVALID_PARAMS); -+ -+ if ((pszBVNC = RGXDevBVNCString(psDevInfo))) -+ { -+ size_t uiStringLength = OSStringNLength(pszBVNC, RGX_HWPERF_MAX_BVNC_LEN - 1); -+ OSStringLCopy(psBVNC->aszBvncString, pszBVNC, uiStringLength + 1); -+ memset(&psBVNC->aszBvncString[uiStringLength], 0, RGX_HWPERF_MAX_BVNC_LEN - uiStringLength); -+ } -+ else -+ { -+ *psBVNC->aszBvncString = 0; -+ } -+ -+ psBVNC->ui32BvncKmFeatureFlags = 0x0; -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) -+ { -+ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERFBUS_FLAG; -+ } -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) -+ { -+ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG; -+ } -+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)) -+ { -+ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG; -+ } -+#endif -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERF_COUNTER_BATCH)) -+ { -+ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG; -+ } -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE)) -+ { -+ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_ROGUEXE_FLAG; -+ } -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, DUST_POWER_ISLAND_S7)) -+ { -+ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG; -+ } -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) -+ { -+ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG; -+ } -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) -+ { -+ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_MULTICORE_FLAG; -+ } -+ -+#ifdef SUPPORT_WORKLOAD_ESTIMATION -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Not a part of BVNC feature line and so doesn't need the feature supported check */ -+ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION; -+ } -+#endif -+ -+ /* Define the HW counter block counts. */ -+ { -+ RGX_HWPERF_BVNC_BLOCK * const psBlocks = psBVNC->aBvncBlocks; -+ IMG_UINT16 * const pui16Count = &psBVNC->ui16BvncBlocks; -+ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel; -+ const IMG_UINT32 ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel); -+ IMG_UINT32 ui32BlkCfgIdx; -+ size_t uiCount; -+ IMG_BOOL bOk = IMG_TRUE; -+ -+ // Initialise to zero blocks -+ *pui16Count = 0; -+ -+ // Add all the blocks -+ for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++) -+ { -+ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL * const psCntBlkInfo = &asCntBlkTypeModel[ui32BlkCfgIdx]; -+ RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo; -+ /* psCntBlkInfo->ui8NumUnits gives compile-time info. For BVNC agnosticism, we use this: */ -+ if (psCntBlkInfo->pfnIsBlkPresent(psCntBlkInfo, psDevInfo, &sCntBlkRtInfo)) -+ { -+ bOk &= RGXServerFeatureFlagsToHWPerfFlagsAddBlock(psBlocks, pui16Count, psCntBlkInfo->ui32CntBlkIdBase, psCntBlkInfo->ui8NumCounters, sCntBlkRtInfo.ui32NumUnits); -+ } -+ } -+ -+ /* If this fails, consider why the static_assert didn't fail, and consider increasing RGX_HWPERF_MAX_BVNC_BLOCK_LEN */ -+ PVR_ASSERT(bOk); -+ -+ // Zero the remaining entries -+ uiCount = *pui16Count; -+ OSDeviceMemSet(&psBlocks[uiCount], 0, (RGX_HWPERF_MAX_BVNC_BLOCK_LEN - uiCount) * sizeof(*psBlocks)); -+ } -+ -+ /* The GPU core count is overwritten by the FW */ -+ psBVNC->ui16BvncGPUCores = 0; -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ PVRSRVRGXConfigMuxHWPerfCountersKM -+ */ -+PVRSRV_ERROR PVRSRVRGXConfigMuxHWPerfCountersKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32ArrayLen, -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigs) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sKccbCmd; -+ DEVMEM_MEMDESC* psFwBlkConfigsMemDesc; -+ RGX_HWPERF_CONFIG_MUX_CNTBLK* psFwArray; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ PVRSRV_RGXDEV_INFO *psDevice; -+ -+ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ psDevice = psDeviceNode->pvDevice; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ PVR_LOG_RETURN_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_LOG_RETURN_IF_FALSE(psBlockConfigs != NULL, "psBlockConfigs is NULL", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ PVR_DPF_ENTERED; -+ -+ /* Fill in the command structure with the parameters needed -+ */ -+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS; -+ sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen; -+ -+ /* used for passing counters config to the Firmware, write-only for the CPU */ -+ eError = DevmemFwAllocate(psDevice, -+ sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)*ui32ArrayLen, -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), -+ "FwHWPerfCountersConfigBlock", -+ &psFwBlkConfigsMemDesc); -+ PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); -+ -+ eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.sBlockConfigs, -+ psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1); -+ -+ eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2); -+ -+ OSCachedMemCopyWMB(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)*ui32ArrayLen); -+ DevmemPDumpLoadMem(psFwBlkConfigsMemDesc, -+ 0, -+ sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)*ui32ArrayLen, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigMuxHWPerfCountersKM parameters set, calling FW"));*/ -+ -+ /* Ask the FW to carry out the HWPerf configuration command -+ */ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, -+ RGXFWIF_DM_GP, -+ &sKccbCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail2); -+ -+ /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigMuxHWPerfCountersKM command scheduled for FW"));*/ -+ -+ /* Wait for FW to complete */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3); -+ -+ /* Release temporary memory used for block configuration -+ */ -+ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); -+ DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); -+ DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); -+ -+ /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigMuxHWPerfCountersKM firmware completed"));*/ -+ -+ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen)); -+ -+ PVR_DPF_RETURN_OK; -+ -+fail3: -+ DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); -+fail2: -+ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); -+fail1: -+ DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+ -+/* -+ PVRSRVRGXConfigCustomCountersReadingHWPerfKM -+ */ -+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT16 ui16CustomBlockID, -+ IMG_UINT16 ui16NumCustomCounters, -+ IMG_UINT32 * pui32CustomCounterIDs) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sKccbCmd; -+ DEVMEM_MEMDESC* psFwSelectCntrsMemDesc = NULL; -+ IMG_UINT32* psFwArray; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ PVRSRV_RGXDEV_INFO *psDevice = psDeviceNode->pvDevice; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psDeviceNode); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRGXSelectCustomCountersKM: configure block %u to read %u counters", ui16CustomBlockID, ui16NumCustomCounters)); -+ -+ /* Fill in the command structure with the parameters needed */ -+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS; -+ sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16NumCounters = ui16NumCustomCounters; -+ sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16CustomBlock = ui16CustomBlockID; -+ -+ if (ui16NumCustomCounters > 0) -+ { -+ PVR_ASSERT(pui32CustomCounterIDs); -+ -+ /* used for passing counters config to the Firmware, write-only for the CPU */ -+ eError = DevmemFwAllocate(psDevice, -+ sizeof(IMG_UINT32) * ui16NumCustomCounters, -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), -+ "FwHWPerfConfigCustomCounters", -+ &psFwSelectCntrsMemDesc); -+ PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); -+ -+ eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.sCustomCounterIDs, -+ psFwSelectCntrsMemDesc, 0, RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1); -+ -+ eError = DevmemAcquireCpuVirtAddr(psFwSelectCntrsMemDesc, (void **)&psFwArray); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2); -+ -+ OSCachedMemCopyWMB(psFwArray, pui32CustomCounterIDs, sizeof(IMG_UINT32) * ui16NumCustomCounters); -+ DevmemPDumpLoadMem(psFwSelectCntrsMemDesc, -+ 0, -+ sizeof(IMG_UINT32) * ui16NumCustomCounters, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ -+ /* Push in the KCCB the command to configure the custom counters block */ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, -+ RGXFWIF_DM_GP, -+ &sKccbCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail3); -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: Command scheduled")); -+ -+ /* Wait for FW to complete */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3); -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: FW operation completed")); -+ -+ if (ui16NumCustomCounters > 0) -+ { -+ /* Release temporary memory used for block configuration */ -+ RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc); -+ DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc); -+ DevmemFwUnmapAndFree(psDevice, psFwSelectCntrsMemDesc); -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "HWPerf custom counters %u reading will be sent with the next HW events", ui16NumCustomCounters)); -+ -+ PVR_DPF_RETURN_OK; -+ -+fail3: -+ if (psFwSelectCntrsMemDesc) -+ { -+ DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc); -+ } -+fail2: -+ if (psFwSelectCntrsMemDesc) -+ { -+ RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc); -+ } -+fail1: -+ if (psFwSelectCntrsMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevice, psFwSelectCntrsMemDesc); -+ } -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+/* -+ PVRSRVRGXConfigureHWPerfBlocksKM -+ */ -+PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32CtrlWord, -+ IMG_UINT32 ui32ArrayLen, -+ RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sKccbCmd; -+ DEVMEM_MEMDESC *psFwBlkConfigsMemDesc; -+ RGX_HWPERF_CONFIG_CNTBLK *psFwArray; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ PVRSRV_RGXDEV_INFO *psDevice; -+ -+ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ psDevice = psDeviceNode->pvDevice; -+ -+ PVR_UNREFERENCED_PARAMETER(ui32CtrlWord); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ PVR_LOG_RETURN_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_LOG_RETURN_IF_FALSE(psBlockConfigs != NULL, "psBlockConfigs is NULL", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ PVR_DPF_ENTERED; -+ -+ /* Fill in the command structure with the parameters needed */ -+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS; -+ sKccbCmd.uCmdData.sHWPerfCfgDABlks.ui32NumBlocks = ui32ArrayLen; -+ -+ /* used for passing counters config to the Firmware, write-only for the CPU */ -+ eError = DevmemFwAllocate(psDevice, -+ sizeof(RGX_HWPERF_CONFIG_CNTBLK) * ui32ArrayLen, -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), -+ "FwHWPerfCountersDAConfigBlock", -+ &psFwBlkConfigsMemDesc); -+ PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); -+ -+ eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgDABlks.sBlockConfigs, -+ psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1); -+ -+ eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2); -+ -+ OSCachedMemCopyWMB(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen); -+ DevmemPDumpLoadMem(psFwBlkConfigsMemDesc, -+ 0, -+ sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ /* Ask the FW to carry out the HWPerf configuration command. */ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, -+ RGXFWIF_DM_GP, -+ &sKccbCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail2); -+ -+ /* Wait for FW to complete */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3); -+ -+ /* Release temporary memory used for block configuration. */ -+ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); -+ DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); -+ DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); -+ -+ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", -+ ui32ArrayLen)); -+ -+ PVR_DPF_RETURN_OK; -+ -+fail3: -+ DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); -+fail2: -+ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); -+fail1: -+ DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+/****************************************************************************** -+ * Currently only implemented on Linux. Feature can be enabled to provide -+ * an interface to 3rd-party kernel modules that wish to access the -+ * HWPerf data. The API is documented in the rgxapi_km.h header and -+ * the rgx_hwperf* headers. -+ *****************************************************************************/ -+ -+/* Internal HWPerf kernel connection/device data object to track the state -+ * of a client session. -+ */ -+typedef struct -+{ -+ PVRSRV_DEVICE_NODE* psRgxDevNode; -+ PVRSRV_RGXDEV_INFO* psRgxDevInfo; -+ -+ /* TL Open/close state */ -+ IMG_HANDLE hSD[RGX_HWPERF_MAX_STREAM_ID]; -+ -+ /* TL Acquire/release state */ -+ IMG_PBYTE pHwpBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer returned to user in acquire call */ -+ IMG_PBYTE pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to end of HwpBuf */ -+ IMG_PBYTE pTlBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer obtained via TlAcquireData */ -+ IMG_PBYTE pTlBufPos[RGX_HWPERF_MAX_STREAM_ID]; /*!< initial position in TlBuf to acquire packets */ -+ IMG_PBYTE pTlBufRead[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to the last packet read */ -+ IMG_UINT32 ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID]; /*!< length of acquired TlBuf */ -+ IMG_BOOL bRelease[RGX_HWPERF_MAX_STREAM_ID]; /*!< used to determine whether or not to release currently held TlBuf */ -+ -+ -+} RGX_KM_HWPERF_DEVDATA; -+ -+PVRSRV_ERROR RGXHWPerfConfigMuxCounters( -+ RGX_HWPERF_CONNECTION *psHWPerfConnection, -+ IMG_UINT32 ui32NumBlocks, -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *asBlockConfigs) -+{ -+ PVRSRV_ERROR eError; -+ RGX_KM_HWPERF_DEVDATA* psDevData; -+ RGX_HWPERF_DEVICE *psHWPerfDev; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ /* Validate input argument values supplied by the caller */ -+ if (!psHWPerfConnection || ui32NumBlocks==0 || !asBlockConfigs) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psHWPerfDev = psHWPerfConnection->psHWPerfDevList; -+ -+ while (psHWPerfDev) -+ { -+ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; -+ -+ /* Call the internal server API */ -+ eError = PVRSRVRGXConfigMuxHWPerfCountersKM(NULL, -+ psDevData->psRgxDevNode, -+ ui32NumBlocks, -+ asBlockConfigs); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXConfigMuxHWPerfCountersKM"); -+ -+ psHWPerfDev = psHWPerfDev->psNext; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR RGXHWPerfConfigureAndEnableCustomCounters( -+ RGX_HWPERF_CONNECTION *psHWPerfConnection, -+ IMG_UINT16 ui16CustomBlockID, -+ IMG_UINT16 ui16NumCustomCounters, -+ IMG_UINT32 *pui32CustomCounterIDs) -+{ -+ PVRSRV_ERROR eError; -+ RGX_HWPERF_DEVICE *psHWPerfDev; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ /* Validate input arguments supplied by the caller */ -+ PVR_LOG_RETURN_IF_FALSE((NULL != psHWPerfConnection), "psHWPerfConnection invalid", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_LOG_RETURN_IF_FALSE((0 != ui16NumCustomCounters), "uiNumBlocks invalid", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_LOG_RETURN_IF_FALSE((NULL != pui32CustomCounterIDs),"asBlockConfigs invalid", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ /* Check # of blocks */ -+ PVR_LOG_RETURN_IF_FALSE((!(ui16CustomBlockID > RGX_HWPERF_MAX_CUSTOM_BLKS)),"ui16CustomBlockID invalid", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ /* Check # of counters */ -+ PVR_LOG_RETURN_IF_FALSE((!(ui16NumCustomCounters > RGX_HWPERF_MAX_CUSTOM_CNTRS)),"ui16NumCustomCounters invalid", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ psHWPerfDev = psHWPerfConnection->psHWPerfDevList; -+ -+ while (psHWPerfDev) -+ { -+ RGX_KM_HWPERF_DEVDATA *psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; -+ -+ eError = PVRSRVRGXConfigCustomCountersKM(NULL, -+ psDevData->psRgxDevNode, -+ ui16CustomBlockID, ui16NumCustomCounters, pui32CustomCounterIDs); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlCustHWPerfKM"); -+ -+ psHWPerfDev = psHWPerfDev->psNext; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function GetHWPerfBlockTypeByID -+@Description Lookup function to obtain a block type descriptor for a given -+ counter block identifier. -+@Input psDevInfo A pointer to current device info. -+@Input ui32BlockID The block ID for which a type -+ descriptor should be retrieved. -+@Return RGXFW_HWPERF_CNTBLK_TYPE_MODEL Block type descriptor. -+*/ /**************************************************************************/ -+static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL * -+GetHWPerfBlockTypeByID(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32BlockID) -+{ -+ IMG_UINT32 ui32CntBlkModelLen; -+ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel; -+ IMG_UINT32 ui32TableIdx = 0xFFFF; -+ RGX_HWPERF_CNTBLK_RT_INFO sRtInfo; /* Only used to satisfy pfnIsBlkPresent requirements. */ -+ -+#if defined(HWPERF_UNIFIED) -+ IMG_UINT32 uiBlockID = (IMG_UINT32)(ui32BlockID & ~(RGX_CNTBLK_ID_UNIT_ALL_MASK|RGX_CNTBLK_ID_DA_MASK)); -+#else -+ IMG_UINT32 uiBlockID = (IMG_UINT32)(ui32BlockID & ~RGX_CNTBLK_ID_UNIT_ALL_MASK); -+#endif -+ -+ ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel); -+ -+ /* Is it a direct block? */ -+ if (uiBlockID < RGX_CNTBLK_ID_DIRECT_LAST) -+ { -+ ui32TableIdx = uiBlockID; -+ } -+ /* Is it an indirect block */ -+ else if ((uiBlockID > RGX_CNTBLK_ID_DIRECT_LAST) && (uiBlockID < RGX_CNTBLK_ID_LAST)) -+ { -+ ui32TableIdx = RGX_CNTBLK_ID_DIRECT_LAST + (((uiBlockID & ~RGX_CNTBLK_ID_UNIT_ALL_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT) - 1U); -+ } -+ /* Unknown mapping from CNTBLK_ID to Table index */ -+ else -+ { -+ return NULL; -+ } -+ -+ PVR_ASSERT(ui32TableIdx < ui32CntBlkModelLen); -+ -+ if (psDevInfo == NULL) -+ { -+ PVR_LOG(("psDevInfo invalid")); -+ return NULL; -+ } -+ -+ if ((ui32TableIdx < ui32CntBlkModelLen) && -+ (asCntBlkTypeModel[ui32TableIdx].pfnIsBlkPresent(&asCntBlkTypeModel[ui32TableIdx], psDevInfo, &sRtInfo) != IMG_FALSE)) -+ { -+ return &asCntBlkTypeModel[ui32TableIdx]; -+ } -+ -+ /* Fall through, block not valid from run-time validation */ -+ return NULL; -+} -+ -+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCountersKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_UINT32 ui32BlockID, -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters) -+{ -+ RGXFWIF_HWPERF_CTL *psHWPerfCtl; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is invalid", PVRSRV_ERROR_INVALID_PARAMS); -+ -+ eError = RGXAcquireHWPerfCtlCPUAddr(psDeviceNode, &psHWPerfCtl); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXAcquireHWPerfCtlCPUAddr"); -+ -+ eError = PVRSRVRGXGetConfiguredHWPerfMuxCounters(psDeviceNode, -+ psHWPerfCtl, -+ ui32BlockID, -+ psConfiguredMuxCounters); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVRGXGetConfiguredHWPerfMuxCounters"); -+ -+ RGXReleaseHWPerfCtlCPUAddr(psDeviceNode); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCounters(PVRSRV_DEVICE_NODE *psDevNode, -+ RGXFWIF_HWPERF_CTL *psHWPerfCtl, -+ IMG_UINT32 ui32BlockID, -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = NULL; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_RETURN_IF_FALSE(psDevNode != NULL, PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_RETURN_IF_FALSE(psHWPerfCtl != NULL, PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_RETURN_IF_FALSE(psConfiguredMuxCounters != NULL, PVRSRV_ERROR_INVALID_PARAMS); -+ -+ psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice; -+ PVR_RETURN_IF_FALSE(psDevInfo != NULL, PVRSRV_ERROR_INVALID_PARAMS); -+ -+ if ((ui32BlockID & ~RGX_CNTBLK_ID_UNIT_ALL_MASK) < RGX_CNTBLK_ID_LAST) -+ { -+ RGXFWIF_HWPERF_CTL_BLK *psBlock = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfCtl); -+ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *psBlkTypeDesc; -+ IMG_UINT32 i, ui32LastCountIdx = 0, ui8CurCountIdx = 0; -+ RGX_HWPERF_CONFIG_MUX_CNTBLK sBlockConfig; -+ -+ PVR_RETURN_IF_ERROR(PVRSRVPowerLock(psDevNode)); -+ -+ if (psBlock == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Block ID (0x%04x) was invalid.", ui32BlockID)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error); -+ } -+ -+ if (!psBlock->ui32Enabled || !psBlock->ui32Valid) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Block (0x%04x) is not %s", -+ ui32BlockID, -+ !psBlock->ui32Enabled ? "enabled." : "configured.")); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error); -+ } -+ -+ psBlkTypeDesc = GetHWPerfBlockTypeByID(psDevInfo, psBlock->eBlockID); -+ if (psBlkTypeDesc == NULL) -+ { -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error); -+ } -+ -+ sBlockConfig.ui16BlockID = psBlock->eBlockID; -+ sBlockConfig.ui8Mode = 0; -+ -+ for (i = 0; ((psBlock->uiCounterMask >> i) != 0) && -+ (ui8CurCountIdx < psBlkTypeDesc->ui8NumCounters); i++) -+ { -+ if (psBlock->uiCounterMask & (1 << i)) -+ { -+ IMG_UINT8 ui8Mode = 0; -+ -+ ui8Mode = (psBlock->aui64CounterCfg[i] >> psBlkTypeDesc->ui8SelectRegModeShift) & 1U; -+ sBlockConfig.ui8Mode |= ui8Mode << ui32LastCountIdx; -+ -+ sBlockConfig.aui8GroupSelect[ui32LastCountIdx] = -+ (psBlock->aui64CounterCfg[i] >> RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT) & 0x1F; -+ -+ sBlockConfig.aui16BitSelect[ui32LastCountIdx] = -+ (psBlock->aui64CounterCfg[i] >> RGX_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT) & 0x7FFF; -+ -+#if defined(RGX_FEATURE_PERF_COUNTER_BATCH) -+ sBlockConfig.aui32BatchMax[ui32LastCountIdx] = -+ (psBlock->aui64CounterCfg[i] >> RGX_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT) & 0x1FFF; -+ -+ sBlockConfig.aui32BatchMin[ui32LastCountIdx] = -+ (psBlock->aui64CounterCfg[i] >> RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT) & 0x1FFF; -+#endif -+ ui32LastCountIdx++; -+ ui8CurCountIdx++; -+ } -+ } -+ -+ sBlockConfig.ui8CounterSelect = (1 << ui32LastCountIdx) - 1; -+ *psConfiguredMuxCounters = sBlockConfig; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Block ID (0x%04x) was invalid.", ui32BlockID)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, InvalidIDError); -+ } -+ -+Error: -+ PVRSRVPowerUnlock(psDevNode); -+ -+InvalidIDError: -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCounters(PVRSRV_DEVICE_NODE *psDevNode, -+ RGXFWIF_HWPERF_CTL *psHWPerfCtl, -+ IMG_UINT32 ui32BlockID, -+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters) -+{ -+ RGX_HWPERF_CONFIG_CNTBLK sBlockConfig; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_RETURN_IF_FALSE(psDevNode != NULL, PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_RETURN_IF_FALSE(psHWPerfCtl != NULL, PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_RETURN_IF_FALSE(psConfiguredCounters != NULL, PVRSRV_ERROR_INVALID_PARAMS); -+ -+ if ((ui32BlockID & RGX_CNTBLK_ID_CUSTOM_MASK) >= RGX_CNTBLK_ID_LAST) -+ { -+ /* Validate block ID */ -+ switch (ui32BlockID) -+ { -+ case RGX_CNTBLK_ID_CUSTOM0: -+ case RGX_CNTBLK_ID_CUSTOM1: -+ case RGX_CNTBLK_ID_CUSTOM2: -+ case RGX_CNTBLK_ID_CUSTOM3: -+ case RGX_CNTBLK_ID_CUSTOM4_FW: -+ { -+ PVR_RETURN_IF_ERROR(PVRSRVPowerLock(psDevNode)); -+ -+ /* Check to see if this block is enabled */ -+ if (psHWPerfCtl->ui32SelectedCountersBlockMask & (1 << (ui32BlockID & 0x0F))) -+ { -+ RGXFW_HWPERF_SELECT *psBlock = &psHWPerfCtl->SelCntr[ui32BlockID & 0x0F]; -+ -+ sBlockConfig.ui16BlockID = ui32BlockID; -+ sBlockConfig.ui16NumCounters = psBlock->ui32NumSelectedCounters; -+ -+ for (i = 0; i < psBlock->ui32NumSelectedCounters; i++) -+ { -+ sBlockConfig.ui16Counters[i] = psBlock->aui32SelectedCountersIDs[i]; -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Block (0x%04x) is not enabled.", ui32BlockID)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error); -+ } -+ break; -+ } -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Block ID (0x%04x) was invalid.", ui32BlockID)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, InvalidIDError); -+ } -+ } -+ } -+#if defined(HWPERF_UNIFIED) -+ else if ((ui32BlockID & RGX_CNTBLK_ID_DA_MASK) == RGX_CNTBLK_ID_DA_MASK) -+ { -+ RGXFWIF_HWPERF_DA_BLK *psBlock = rgxfw_hwperf_get_da_block_ctl(ui32BlockID, psHWPerfCtl); -+ -+ PVR_RETURN_IF_ERROR(PVRSRVPowerLock(psDevNode)); -+ -+ if (psBlock == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Block ID (0x%04x) was invalid.", ui32BlockID)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error); -+ } -+ -+ if (!psBlock->uiEnabled) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Block (0x%04x) is not enabled.", ui32BlockID)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error); -+ } -+ -+ sBlockConfig.ui16BlockID = psBlock->eBlockID; -+ sBlockConfig.ui16NumCounters = psBlock->uiNumCounters; -+ -+ for (i = 0; i < psBlock->uiNumCounters; i++) -+ { -+ sBlockConfig.ui16Counters[i] = psBlock->aui32Counters[i]; -+ } -+ } -+#endif -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Block ID (0x%04x) was invalid.", ui32BlockID)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, InvalidIDError); -+ } -+ -+Error: -+ PVRSRVPowerUnlock(psDevNode); -+ -+InvalidIDError: -+ if (eError == PVRSRV_OK) -+ { -+ *psConfiguredCounters = sBlockConfig; -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocks(PVRSRV_DEVICE_NODE *psDevNode, -+ RGXFWIF_HWPERF_CTL *psHWPerfCtl, -+ IMG_UINT32 ui32ArrayLen, -+ IMG_UINT32 *pui32BlockCount, -+ IMG_UINT32 *pui32EnabledBlockIDs) -+{ -+ IMG_UINT32 ui32LastIdx = 0; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_RETURN_IF_FALSE(psDevNode != NULL, PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_RETURN_IF_FALSE(psHWPerfCtl != NULL, PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_RETURN_IF_FALSE(pui32BlockCount != NULL, PVRSRV_ERROR_INVALID_PARAMS); -+ -+ *pui32BlockCount = 0; -+ -+ if (ui32ArrayLen > 0 && pui32EnabledBlockIDs == NULL) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "ui32ArrayLen is greater than 0 but pui32EnabledBlockIDs is NULL")); -+ } -+ -+ PVR_RETURN_IF_ERROR(PVRSRVPowerLock(psDevNode)); -+ -+ for (i = 0; i < RGX_HWPERF_MAX_MUX_BLKS; i++) -+ { -+ if (psHWPerfCtl->sBlkCfg[i].ui32Enabled && psHWPerfCtl->sBlkCfg[i].ui32Valid) -+ { -+ *pui32BlockCount += 1; -+ -+ if (pui32EnabledBlockIDs == NULL) -+ { -+ continue; -+ } -+ -+ if (ui32LastIdx + 1 > ui32ArrayLen) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "ui32ArrayLen less than the number of enabled blocks.")); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, Error); -+ } -+ -+ pui32EnabledBlockIDs[ui32LastIdx] = psHWPerfCtl->sBlkCfg[i].eBlockID; -+ ui32LastIdx++; -+ } -+ } -+ -+ for (i = 0; i < RGX_HWPERF_MAX_CUSTOM_BLKS; i++) -+ { -+ if (psHWPerfCtl->ui32SelectedCountersBlockMask == 0) -+ { -+ break; -+ } -+ -+ if (psHWPerfCtl->ui32SelectedCountersBlockMask & (1 << i)) -+ { -+ *pui32BlockCount += 1; -+ -+ if (pui32EnabledBlockIDs == NULL) -+ { -+ continue; -+ } -+ -+ if (ui32LastIdx + 1 > ui32ArrayLen) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "ui32ArrayLen less than the number of enabled blocks.")); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, Error); -+ } -+ -+ pui32EnabledBlockIDs[ui32LastIdx] = RGX_CNTBLK_ID_CUSTOM0 + i; -+ ui32LastIdx++; -+ } -+ } -+ -+#if defined(HWPERF_UNIFIED) -+ for (i = 0; i < RGX_HWPERF_MAX_DA_BLKS; i++) -+ { -+ if (psHWPerfCtl->sDABlkCfg[i].uiEnabled) -+ { -+ *pui32BlockCount += 1; -+ -+ if (pui32EnabledBlockIDs == NULL) -+ { -+ continue; -+ } -+ -+ if (ui32LastIdx > ui32ArrayLen) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "ui32ArrayLen less than the number of enabled blocks.")); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, Error); -+ } -+ -+ pui32EnabledBlockIDs[ui32LastIdx] = psHWPerfCtl->sDABlkCfg[i].eBlockID; -+ ui32LastIdx++; -+ } -+ } -+#endif -+ -+Error: -+ PVRSRVPowerUnlock(psDevNode); -+ return eError; -+} -+ -+/****************************************************************************** -+ End of file (rgxhwperf.c) -+ ******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxhwperf.h b/drivers/gpu/drm/img-rogue/rgxhwperf.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxhwperf.h -@@ -0,0 +1,96 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX HW Performance header file -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX HWPerf functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXHWPERF_H_ -+#define RGXHWPERF_H_ -+ -+#include "rgx_fwif_hwperf.h" -+#include "rgxhwperf_common.h" -+ -+/****************************************************************************** -+ * RGX HW Performance Profiling API(s) Rogue specific -+ *****************************************************************************/ -+ -+PVRSRV_ERROR PVRSRVRGXConfigMuxHWPerfCountersKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32ArrayLen, -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigs); -+ -+ -+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT16 ui16CustomBlockID, -+ IMG_UINT16 ui16NumCustomCounters, -+ IMG_UINT32 * pui32CustomCounterIDs); -+ -+PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32CtrlWord, -+ IMG_UINT32 ui32ArrayLen, -+ RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs); -+ -+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCountersKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_UINT32 ui32BlockID, -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters); -+ -+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCounters(PVRSRV_DEVICE_NODE *psDevNode, -+ RGXFWIF_HWPERF_CTL *psHWPerfCtl, -+ IMG_UINT32 ui32BlockID, -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters); -+ -+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCounters(PVRSRV_DEVICE_NODE *psDevNode, -+ RGXFWIF_HWPERF_CTL *psHWPerfCtl, -+ IMG_UINT32 ui32BlockID, -+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters); -+ -+PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocks(PVRSRV_DEVICE_NODE *psDevNode, -+ RGXFWIF_HWPERF_CTL *psHWPerfCtl, -+ IMG_UINT32 ui32ArrayLength, -+ IMG_UINT32 *pui32BlockCount, -+ IMG_UINT32 *pui32EnabledBlockIDs); -+ -+#endif /* RGXHWPERF_H_ */ -diff --git a/drivers/gpu/drm/img-rogue/rgxhwperf_common.c b/drivers/gpu/drm/img-rogue/rgxhwperf_common.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxhwperf_common.c -@@ -0,0 +1,4050 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX HW Performance implementation -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX HW Performance implementation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ /**************************************************************************/ -+ -+//#define PVR_DPF_FUNCTION_TRACE_ON 1 -+#undef PVR_DPF_FUNCTION_TRACE_ON -+ -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "rgxdevice.h" -+#include "pvrsrv_error.h" -+#include "pvr_notifier.h" -+#include "osfunc.h" -+#include "allocmem.h" -+ -+#include "pvrsrv.h" -+#include "pvrsrv_tlstreams.h" -+#include "pvrsrv_tlcommon.h" -+#include "tlclient.h" -+#include "tlstream.h" -+ -+#include "rgxhwperf.h" -+#include "rgxapi_km.h" -+#include "rgxfwutils.h" -+#include "rgxtimecorr.h" -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "pdump_km.h" -+#include "pvrsrv_apphint.h" -+#include "process_stats.h" -+#include "rgx_hwperf_table.h" -+#include "rgxinit.h" -+#if (defined(__linux__) && !defined(__QNXNTO__) && !defined(INTEGRITY_OS)) -+#include "ospvr_gputrace.h" -+#endif -+ -+#include "info_page_defs.h" -+ -+/* This is defined by default to enable producer callbacks. -+ * Clients of the TL interface can disable the use of the callback -+ * with PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK. */ -+#define SUPPORT_TL_PRODUCER_CALLBACK 1 -+ -+/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */ -+#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT) -+ -+/* Defines size of buffers returned from acquire/release calls */ -+#define FW_STREAM_BUFFER_SIZE (0x80000) -+#define HOST_STREAM_BUFFER_SIZE (0x20000) -+ -+/* Must be at least as large as two tl packets of maximum size */ -+static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), -+ "HOST_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); -+static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), -+ "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); -+ -+IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **); -+static IMG_UINT64 RGXHWPerfFwSetEventFilterNoLock(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId, -+ IMG_UINT64 uiFilter); -+ -+static inline IMG_UINT32 -+RGXHWPerfGetPackets(IMG_UINT32 ui32BytesExp, -+ IMG_UINT32 ui32AllowedSize, -+ RGX_PHWPERF_V2_PACKET_HDR psCurPkt ) -+{ -+ IMG_UINT32 sizeSum = 0; -+ RGXFwSharedMemCacheOpValue(psCurPkt->ui32Size, INVALIDATE); -+ -+ /* Traverse the array to find how many packets will fit in the available space. */ -+ while ( sizeSum < ui32BytesExp && -+ sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize ) -+ { -+ sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt); -+ psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt); -+ RGXFwSharedMemCacheOpValue(psCurPkt->ui32Size, INVALIDATE); -+ } -+ -+ return sizeSum; -+} -+ -+static inline void -+RGXSuspendHWPerfL2DataCopy(PVRSRV_RGXDEV_INFO* psDeviceInfo, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId, -+ IMG_BOOL bIsReaderConnected) -+{ -+ PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); -+ -+ if (!bIsReaderConnected) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s : HWPerf FW events enabled but L2 host buffer " -+ "for stream %u is full and no reader is currently connected, suspending " -+ "event collection. Connect a reader or restart driver to avoid event loss.", -+ __func__, eL2StreamId)); -+ psDeviceInfo->bSuspendHWPerfL2DataCopy[eL2StreamId] = IMG_TRUE; -+ } -+} -+ -+static IMG_UINT32 RGXHWPerfCopyData(PVRSRV_RGXDEV_INFO *psDeviceInfo, -+ IMG_BYTE *pbSrcBuffer, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId, -+ IMG_UINT32 uiBytesToCopy) -+{ -+ IMG_BYTE *pbDestBuffer; -+ IMG_UINT32 uiBytesCopied = 0; -+ IMG_UINT32 uiFreeSpace; -+ IMG_UINT32 uiBytesToCopyMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbSrcBuffer)); -+ IMG_BOOL bIsReaderConnected; -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hHWPerfDestStream; -+ -+ PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); -+ -+ hHWPerfDestStream = psDeviceInfo->hHWPerfStream[eL2StreamId]; -+ -+ PVR_DPF_ENTERED; -+ -+ /* Try submitting all data in one TL packet. */ -+ eError = TLStreamReserve2(hHWPerfDestStream, &pbDestBuffer, uiBytesToCopy, uiBytesToCopyMin, -+ &uiFreeSpace, &bIsReaderConnected); -+ if (eError == PVRSRV_OK) -+ { -+ RGXFwSharedMemCacheOpExec(pbSrcBuffer, uiBytesToCopy, PVRSRV_CACHE_OP_INVALIDATE); -+ OSDeviceMemCopy(pbDestBuffer, pbSrcBuffer, (size_t) uiBytesToCopy); -+ -+ eError = TLStreamCommit(hHWPerfDestStream, uiBytesToCopy); -+ PVR_LOG_GOTO_IF_ERROR_VA(eError, ErrReturn, "TLStreamCommit() failed with error %d, " -+ "unable to copy packet from L1 to L2 buffer", eError); -+ -+ /* Data were successfully written */ -+ uiBytesCopied = (size_t) uiBytesToCopy; -+ } -+ else if (eError == PVRSRV_ERROR_STREAM_FULL) -+ { -+ /* There was not enough space for all data, copy as much as possible */ -+ IMG_UINT32 uiSizeSum = RGXHWPerfGetPackets(uiBytesToCopy, uiFreeSpace, -+ RGX_HWPERF_GET_PACKET(pbSrcBuffer)); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, " -+ "remaining free space: %d", uiBytesToCopy, uiFreeSpace)); -+ -+ if (uiSizeSum != 0) -+ { -+ eError = TLStreamReserve(hHWPerfDestStream, &pbDestBuffer, uiSizeSum); -+ -+ if (eError == PVRSRV_OK) -+ { -+ RGXFwSharedMemCacheOpExec(pbSrcBuffer, uiSizeSum, PVRSRV_CACHE_OP_INVALIDATE); -+ OSDeviceMemCopy(pbDestBuffer, pbSrcBuffer, (size_t) uiSizeSum); -+ -+ eError = TLStreamCommit(hHWPerfDestStream, uiSizeSum); -+ PVR_LOG_GOTO_IF_ERROR_VA(eError, ErrReturn, "TLStreamCommit() failed with error " -+ "%d, unable to copy packet from L1 to L2 buffer", eError); -+ -+ /* uiSizeSum bytes of hwperf packets have been successfully written */ -+ uiBytesCopied = uiSizeSum; -+ } -+ else if (eError == PVRSRV_ERROR_STREAM_FULL) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "Cannot write HWPerf packet into host buffer, check data " -+ "in case of packet loss, remaining free space: %d", uiFreeSpace)); -+ RGXSuspendHWPerfL2DataCopy(psDeviceInfo, eL2StreamId, bIsReaderConnected); -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "Cannot find space in host buffer, check data in case of " -+ "packet loss, remaining free space: %d", uiFreeSpace)); -+ RGXSuspendHWPerfL2DataCopy(psDeviceInfo, eL2StreamId, bIsReaderConnected); -+ } -+ } -+ -+ /* Some other error occurred. Full error handled by caller, we returning the copied bytes count -+ * to caller */ -+ if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_STREAM_FULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "HWPerf enabled: Unexpected Error (%d) while copying FW buffer " -+ "to destination buffer.", eError)); -+ } -+ -+ErrReturn: -+ /* Return the remaining packets left to be transported. */ -+ PVR_DPF_RETURN_VAL(uiBytesCopied); -+} -+ -+/* -+ RGXHWPerfCopyDataL1toL2 -+ */ -+static IMG_UINT32 RGXHWPerfCopyDataL1toL2(PVRSRV_RGXDEV_INFO* psDeviceInfo, -+ IMG_BYTE *pbFwBuffer, -+ IMG_UINT32 ui32BytesExp) -+{ -+ IMG_UINT32 eL2StreamId, uiHWPerfBytesCopied = 0; -+ -+ /* Invalidate initial packet header, type/size cast via RGX_HWPEF_GET_PACKET */ -+ RGXFwSharedMemCacheOpPtr(RGX_HWPERF_GET_PACKET(pbFwBuffer), INVALIDATE); -+ -+ /* HWPERF_MISR_FUNC_DEBUG enables debug code for investigating HWPerf issues */ -+#ifdef HWPERF_MISR_FUNC_DEBUG -+ static IMG_UINT32 gui32Ordinal = IMG_UINT32_MAX; -+#endif -+ -+ PVR_DPF_ENTERED; -+ -+#ifdef HWPERF_MISR_FUNC_DEBUG -+ PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d", -+ pbFwBuffer, ui32BytesExp)); -+#endif -+ -+#ifdef HWPERF_MISR_FUNC_DEBUG -+ { -+ /* Check the incoming buffer of data has not lost any packets */ -+ IMG_BYTE *pbFwBufferIter = pbFwBuffer; -+ IMG_BYTE *pbFwBufferEnd = pbFwBuffer+ui32BytesExp; -+ do -+ { -+ RGX_HWPERF_V2_PACKET_HDR *asCurPos = RGX_HWPERF_GET_PACKET(pbFwBufferIter); -+ IMG_UINT32 ui32CurOrdinal; -+ /* Invalidate HDR pointed to by asCurPos as we use both ordinal for detecting -+ * lost packets and size for iteration. -+ */ -+ RGXFwSharedMemCacheOpPtr(asCurPos, INVALIDATE); -+ ui32CurOrdinal = asCurPos->ui32Ordinal; -+ if (gui32Ordinal != IMG_UINT32_MAX) -+ { -+ if ((gui32Ordinal+1) != ui32CurOrdinal) -+ { -+ if (gui32Ordinal < ui32CurOrdinal) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "HWPerf [%p] packets lost (%u packets) between ordinal %u...%u", -+ pbFwBufferIter, -+ ui32CurOrdinal - gui32Ordinal - 1, -+ gui32Ordinal, -+ ui32CurOrdinal)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "HWPerf [%p] packet ordinal out of sequence last: %u, current: %u", -+ pbFwBufferIter, -+ gui32Ordinal, -+ ui32CurOrdinal)); -+ } -+ } -+ } -+ gui32Ordinal = asCurPos->ui32Ordinal; -+ pbFwBufferIter += RGX_HWPERF_GET_SIZE(asCurPos); -+ } while (pbFwBufferIter < pbFwBufferEnd); -+ } -+#endif -+ -+ for (eL2StreamId = 0; eL2StreamId < RGX_HWPERF_L2_STREAM_LAST; eL2StreamId++) -+ { -+ if (!psDeviceInfo->bSuspendHWPerfL2DataCopy[eL2StreamId]) -+ { -+ IMG_UINT32 uiBytesCopied, uiPacketDataSize = ui32BytesExp; -+ IMG_UINT32 uiMaxPacketSize = psDeviceInfo->ui32L2BufMaxPacketSize[eL2StreamId]; -+ -+ if (ui32BytesExp > uiMaxPacketSize) -+ { -+ uiPacketDataSize = RGXHWPerfGetPackets(ui32BytesExp, uiMaxPacketSize, -+ RGX_HWPERF_GET_PACKET(pbFwBuffer)); -+ -+ if (uiPacketDataSize == 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to write data into host buffer " -+ "(%u) as packet is too big and hence it breaches TL " -+ "packet size limit (TLBufferSize / 2.5)", eL2StreamId)); -+ -+ continue; -+ } -+ } -+ -+ uiBytesCopied = RGXHWPerfCopyData(psDeviceInfo, pbFwBuffer, eL2StreamId, -+ uiPacketDataSize); -+ -+ uiHWPerfBytesCopied = MAX(uiBytesCopied, uiHWPerfBytesCopied); -+ } -+ } -+ -+ /* Return the remaining packets left to be transported. */ -+ PVR_DPF_RETURN_VAL(uiHWPerfBytesCopied); -+} -+ -+ -+static INLINE IMG_UINT32 RGXHWPerfAdvanceRIdx( -+ const IMG_UINT32 ui32BufSize, -+ const IMG_UINT32 ui32Pos, -+ const IMG_UINT32 ui32Size) -+{ -+ return ( ui32Pos + ui32Size < ui32BufSize ? ui32Pos + ui32Size : 0 ); -+} -+ -+ -+/* -+ RGXHWPerfDataStore -+ -+ This function copies HWPerf data from L1 buffer to all L2 streams. -+ The number of copied data is always the maximum read number of packets. -+ In case where one of the stream is not able to accept the same amount of -+ data as other streams it will suffer from gaps in the data. -+ */ -+static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ IMG_BYTE* psHwPerfInfo = psDevInfo->psRGXFWIfHWPerfBuf; -+ IMG_UINT32 ui32SrcRIdx, ui32SrcWIdx, ui32SrcWrapCount; -+ IMG_UINT32 ui32BytesExp = 0, ui32BytesCopied = 0, ui32BytesCopiedSum = 0; -+#ifdef HWPERF_MISR_FUNC_DEBUG -+ IMG_UINT32 ui32BytesExpSum = 0; -+#endif -+ -+ PVR_DPF_ENTERED; -+ -+ /* Caller should check this member is valid before calling */ -+ { -+ IMG_UINT32 i, uiSuspendedCount = 0; -+#if defined(PVRSRV_NEED_PVR_ASSERT) -+ IMG_UINT32 uiNotNullCount = 0; -+#endif -+ for (i = 0; i < RGX_HWPERF_L2_STREAM_LAST; i++) -+ { -+#if defined(PVRSRV_NEED_PVR_ASSERT) -+ if (psDevInfo->hHWPerfStream[i] != NULL) -+ { -+ uiNotNullCount++; -+ } -+#endif -+ if (psDevInfo->bSuspendHWPerfL2DataCopy[i]) -+ { -+ uiSuspendedCount++; -+ } -+ } -+ -+#if defined(PVRSRV_NEED_PVR_ASSERT) -+ /* At least one stream must exist. */ -+ PVR_ASSERT(uiNotNullCount > 0); -+#endif -+ -+ /* Only proceed if any of the streams are not suspended. */ -+ if (uiSuspendedCount == RGX_HWPERF_L2_STREAM_LAST) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s : Copying data to all L2 host buffers for FW events is " -+ "suspended. Start at least one of the HWPerf consumers or restart the driver " -+ "if HWPerf FW events are needed", __func__)); -+ -+ PVR_DPF_RETURN_VAL(0); -+ } -+ } -+ -+ /* Invalidate partial region of struct */ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl, -+ INVALIDATE); -+ -+ /* Get a copy of the current -+ * read (first packet to read) -+ * write (empty location for the next write to be inserted) -+ * WrapCount (size in bytes of the buffer at or past end) -+ * indexes of the FW buffer */ -+ ui32SrcRIdx = psFwSysData->sHWPerfCtrl.ui32HWPerfRIdx; -+ ui32SrcWIdx = psFwSysData->sHWPerfCtrl.ui32HWPerfWIdx; -+ OSMemoryBarrier(NULL); -+ ui32SrcWrapCount = psFwSysData->sHWPerfCtrl.ui32HWPerfWrapCount; -+ -+#if defined(HWPERF_MISR_FUNC_DEBUG) || defined(EMULATOR) -+ { -+ IMG_UINT32 ui32SrcBufSize = psDevInfo->ui32RGXFWIfHWPerfBufSize; -+ -+ if (ui32SrcRIdx >= ui32SrcBufSize || ui32SrcWIdx >= ui32SrcBufSize) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s : Invalid read/write offsets found! srcRIdx:%u srcWIdx:%u srcBufSize:%u", -+ __func__, ui32SrcRIdx, ui32SrcWIdx, ui32SrcBufSize)); -+ -+ PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); -+ } -+ } -+#endif -+ -+ /* Is there any data in the buffer not yet retrieved? */ -+ if ( ui32SrcRIdx != ui32SrcWIdx ) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStore EVENTS found srcRIdx:%d srcWIdx: %d", ui32SrcRIdx, ui32SrcWIdx)); -+ -+ /* Is the write position higher than the read position? */ -+ if ( ui32SrcWIdx > ui32SrcRIdx ) -+ { -+ /* Yes, buffer has not wrapped */ -+ ui32BytesExp = ui32SrcWIdx - ui32SrcRIdx; -+#ifdef HWPERF_MISR_FUNC_DEBUG -+ ui32BytesExpSum += ui32BytesExp; -+#endif -+ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, -+ psHwPerfInfo + ui32SrcRIdx, -+ ui32BytesExp); -+ -+ /* Advance the read index and the free bytes counter by the number -+ * of bytes transported. Items will be left in buffer if not all data -+ * could be transported. Exit to allow buffer to drain. */ -+ OSWriteDeviceMem32WithWMB(&psFwSysData->sHWPerfCtrl.ui32HWPerfRIdx, -+ RGXHWPerfAdvanceRIdx(psDevInfo->ui32RGXFWIfHWPerfBufSize, -+ ui32SrcRIdx, -+ ui32BytesCopied)); -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfRIdx, -+ FLUSH); -+ -+ ui32BytesCopiedSum += ui32BytesCopied; -+ } -+ /* No, buffer has wrapped and write position is behind read position */ -+ else -+ { -+ /* Byte count equal to -+ * number of bytes from read position to the end of the buffer, -+ * + data in the extra space in the end of the buffer. */ -+ ui32BytesExp = ui32SrcWrapCount - ui32SrcRIdx; -+ -+#ifdef HWPERF_MISR_FUNC_DEBUG -+ ui32BytesExpSum += ui32BytesExp; -+#endif -+ /* Attempt to transfer the packets to the TL stream buffer */ -+ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, -+ psHwPerfInfo + ui32SrcRIdx, -+ ui32BytesExp); -+ -+ /* Advance read index as before and Update the local copy of the -+ * read index as it might be used in the last if branch*/ -+ ui32SrcRIdx = RGXHWPerfAdvanceRIdx( -+ psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, -+ ui32BytesCopied); -+ -+ /* Update Wrap Count */ -+ if ( ui32SrcRIdx == 0) -+ { -+ OSWriteDeviceMem32WithWMB(&psFwSysData->sHWPerfCtrl.ui32HWPerfWrapCount, -+ psDevInfo->ui32RGXFWIfHWPerfBufSize); -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfWrapCount, -+ FLUSH); -+ } -+ OSWriteDeviceMem32WithWMB(&psFwSysData->sHWPerfCtrl.ui32HWPerfRIdx, ui32SrcRIdx); -+ -+ ui32BytesCopiedSum += ui32BytesCopied; -+ -+ /* If all the data in the end of the array was copied, try copying -+ * wrapped data in the beginning of the array, assuming there is -+ * any and the RIdx was wrapped. */ -+ if ( (ui32BytesCopied == ui32BytesExp) -+ && (ui32SrcWIdx > 0) -+ && (ui32SrcRIdx == 0) ) -+ { -+ ui32BytesExp = ui32SrcWIdx; -+#ifdef HWPERF_MISR_FUNC_DEBUG -+ ui32BytesExpSum += ui32BytesExp; -+#endif -+ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, -+ psHwPerfInfo, -+ ui32BytesExp); -+ /* Advance the FW buffer read position. */ -+ psFwSysData->sHWPerfCtrl.ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx( -+ psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, -+ ui32BytesCopied); -+ -+ ui32BytesCopiedSum += ui32BytesCopied; -+ } -+ /* This flush covers both writes above */ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->sHWPerfCtrl.ui32HWPerfRIdx, -+ FLUSH); -+ } -+#ifdef HWPERF_MISR_FUNC_DEBUG -+ if (ui32BytesCopiedSum != ui32BytesExpSum) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psFwSysData->sHWPerfCtrl.ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum)); -+ } -+#endif -+ -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfDataStore NO EVENTS to transport")); -+ } -+ -+ PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); -+} -+ -+/* Function called from MISR to copy data from L1 buffer to L2 streams. */ -+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE *psDevInfo) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_RGXDEV_INFO* psRgxDevInfo; -+ IMG_UINT32 ui32BytesCopied; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psDevInfo); -+ psRgxDevInfo = psDevInfo->pvDevice; -+ -+ /* Store FW event data if the destination buffer exists.*/ -+ OSLockAcquire(psRgxDevInfo->hHWPerfLock); -+ -+ if (psRgxDevInfo->uiHWPerfStreamCount > 0) -+ { -+ ui32BytesCopied = RGXHWPerfDataStore(psRgxDevInfo); -+ if ( ui32BytesCopied ) -+ { -+ /* It's possible that the HWPerf stream doesn't exist yet. It's -+ * possible that only FTrace L2 stream has been created so far. */ -+ if (psRgxDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_HWPERF] != NULL) -+ { -+ /* Signal consumers that packets may be available to read when -+ * running from a HW kick, not when called by client APP thread -+ * via the transport layer CB as this can lead to stream -+ * corruption. */ -+ eError = TLStreamSync(psRgxDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_HWPERF]); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStoreCB: Zero bytes copied")); -+ RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo); -+ } -+ } -+ -+ OSLockRelease(psRgxDevInfo->hHWPerfLock); -+ -+ PVR_DPF_RETURN_OK; -+} -+ -+ -+/* Currently supported by default */ -+#if !defined(NO_HARDWARE) && defined(SUPPORT_TL_PRODUCER_CALLBACK) -+static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream, -+ IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)pvUser; -+ -+ PVR_UNREFERENCED_PARAMETER(hStream); -+ PVR_UNREFERENCED_PARAMETER(ui32Resp); -+ -+ PVR_ASSERT(psRgxDevInfo); -+ -+ switch (ui32ReqOp) -+ { -+ case TL_SOURCECB_OP_CLIENT_EOS: -+ /* Keep HWPerf resource init check and use of -+ * resources atomic, they may not be freed during use -+ */ -+ -+ /* This solution is for avoiding a deadlock situation where - -+ * in DoTLStreamReserve(), writer has acquired HWPerfLock and -+ * ReadLock and is waiting on ReadPending (which will be reset -+ * by reader), And -+ * the reader after setting ReadPending in TLStreamAcquireReadPos(), -+ * is waiting for HWPerfLock in RGXHWPerfTLCB(). -+ * So here in RGXHWPerfTLCB(), if HWPerfLock is already acquired we -+ * will return to the reader without waiting to acquire HWPerfLock. -+ */ -+ if (!OSTryLockAcquire(psRgxDevInfo->hHWPerfLock)) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "hHWPerfLock is already acquired, a write " -+ "operation might already be in process")); -+ return PVRSRV_OK; -+ } -+ -+ if (psRgxDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_HWPERF] != NULL) -+ { -+ (void) RGXHWPerfDataStore(psRgxDevInfo); -+ } -+ OSLockRelease(psRgxDevInfo->hHWPerfLock); -+ break; -+ -+ default: -+ break; -+ } -+ -+ return eError; -+} -+#endif -+ -+ -+static void RGXHWPerfL1BufferDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) -+{ -+ if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc) -+ { -+ if (psRgxDevInfo->psRGXFWIfHWPerfBuf != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); -+ psRgxDevInfo->psRGXFWIfHWPerfBuf = NULL; -+ } -+ DevmemFwUnmapAndFree(psRgxDevInfo, psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); -+ psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL; -+ } -+} -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfInit -+ -+@Description Called during driver init for initialization of HWPerf module -+ in the Rogue device driver. This function keeps allocated -+ only the minimal necessary resources, which are required for -+ functioning of HWPerf server module. -+ -+@Input psRgxDevInfo RGX Device Info -+ -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 i; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ PVR_DPF_ENTERED; -+ -+ /* expecting a valid device info */ -+ PVR_RETURN_IF_INVALID_PARAM(psRgxDevInfo != NULL); -+ -+ /* Create a lock for HWPerf server module used for serializing, L1 to L2 -+ * copy calls (e.g. in case of TL producer callback) and L1, L2 resource -+ * allocation */ -+ eError = OSLockCreate(&psRgxDevInfo->hHWPerfLock); -+ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); -+ -+ /* Initialise only non-zero fields since psRgxDevInfo is zeroed -+ * on allocation. */ -+ for (i = 0; i < RGX_HWPERF_L2_STREAM_LAST; i++) -+ { -+ psRgxDevInfo->bSuspendHWPerfL2DataCopy[i] = IMG_TRUE; -+ } -+ -+ PVR_DPF_RETURN_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfIsInitRequired -+ -+@Description Returns true if the HWperf firmware buffer (L1 buffer) and host -+ driver TL buffer (L2 buffer) are not already allocated. Caller -+ must possess hHWPerfLock lock before calling this -+ function so the state tested is not inconsistent. -+ -+@Input psRgxDevInfo RGX Device Info, on which init requirement is -+ checked. -+ -+@Return IMG_BOOL Whether initialization (allocation) is required -+ */ /**************************************************************************/ -+static INLINE IMG_BOOL RGXHWPerfIsInitRequired(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId) -+{ -+ PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hHWPerfLock)); -+ -+#if !defined(NO_HARDWARE) -+ PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); -+ -+ /* Both L1 and L2 buffers are required (for HWPerf functioning) on driver -+ * built for actual hardware (TC, EMU, etc.) -+ */ -+ return psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL || -+ psRgxDevInfo->hHWPerfStream[eL2StreamId] == NULL; -+#else -+ /* On a NO-HW driver L2 is not allocated. So, no point in checking its -+ * allocation */ -+ return psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL; -+#endif -+} -+#if !defined(NO_HARDWARE) -+static void _HWPerfFWOnReaderOpenCB(void *pvArg) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*) pvArg; -+ PVRSRV_DEVICE_NODE* psDevNode = (PVRSRV_DEVICE_NODE*) psRgxDevInfo->psDeviceNode; -+ RGXFWIF_KCCB_CMD sKccbCmd; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ -+ PVRSRV_VZ_RETN_IF_MODE(GUEST); -+ -+ /* Clear any previously suspended state for bSuspendHWPerfL2DataCopy as we -+ * now have a reader attached so the data will be delivered upstream. */ -+ if (psRgxDevInfo->bSuspendHWPerfL2DataCopy[RGX_HWPERF_L2_STREAM_HWPERF]) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Resuming HWPerf FW event collection.", -+ __func__)); -+ psRgxDevInfo->bSuspendHWPerfL2DataCopy[RGX_HWPERF_L2_STREAM_HWPERF] = IMG_FALSE; -+ } -+ -+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG; -+ sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV; -+ sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = 0; -+ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevNode->pvDevice, -+ RGXFWIF_DM_GP, -+ &sKccbCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to generate feature packet in " -+ "firmware (error = %d)", __func__, eError)); -+ return; -+ } -+ -+ eError = RGXWaitForKCCBSlotUpdate(psRgxDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); -+} -+#endif -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfInitOnDemandL1Buffer -+ -+@Description This function allocates the HWperf firmware buffer (L1 buffer) -+ if HWPerf is enabled at driver load time. Otherwise, this -+ buffer is allocated on-demand as and when required. Caller must -+ possess hHWPerfLock lock before calling this function so the -+ state tested is not inconsistent if called outside of -+ initialisation. -+ -+@Input psRgxDevInfo RGX Device Info, on which init is done -+ -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfInitOnDemandL1Buffer(PVRSRV_RGXDEV_INFO *psRgxDevInfo) -+{ -+ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags; -+ PVRSRV_ERROR eError; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ PVR_DPF_ENTERED; -+ -+ /* This function might be called more than once due to initialisation of -+ * multiple consumers. Make sure that L1 is only ever initialised once. */ -+ if (psRgxDevInfo->psRGXFWIfHWPerfBuf != NULL) -+ { -+ PVR_DPF_RETURN_OK; -+ } -+ -+ /* Create the L1 HWPerf buffer on demand, read-only for the CPU -+ * (except for the zero/poison operations) */ -+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) -+ | PVRSRV_MEMALLOCFLAG_GPU_READABLE -+ | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE -+ | PVRSRV_MEMALLOCFLAG_GPU_UNCACHED -+ | PVRSRV_MEMALLOCFLAG_CPU_READABLE -+ | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC -+ | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE -+#if defined(PDUMP) /* Helps show where the packet data ends */ -+ | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC -+#else /* Helps show corruption issues in driver-live */ -+ | PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC -+#endif -+ | PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); -+ -+ /* Allocate HWPerf FW L1 buffer */ -+ eError = DevmemFwAllocate(psRgxDevInfo, -+ /* Pad it enough to hold the biggest variable sized packet. */ -+ psRgxDevInfo->ui32RGXFWIfHWPerfBufSize+RGX_HWPERF_MAX_PACKET_SIZE, -+ uiMemAllocFlags, -+ "FwHWPerfBuffer", -+ &psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate kernel fw hwperf buffer (%u)", -+ __func__, eError)); -+ goto ErrReturn; -+ } -+ -+ /* Expecting the RuntimeCfg structure is mapped into CPU virtual memory. -+ * Also, make sure the FW address is not already set */ -+ PVR_ASSERT(psRgxDevInfo->psRGXFWIfRuntimeCfg && psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr == 0x0); -+ -+ /* Meta cached flag removed from this allocation as it was found -+ * FW performance was better without it. */ -+ eError = RGXSetFirmwareAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf, -+ psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, -+ 0, RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrDeInitL1Buffer); -+ -+#if defined(RGX_FEATURE_HWPERF_VOLCANIC) -+ RGXSetMetaDMAAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfDMABuf, -+ psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, -+ &psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf, -+ 0); -+#endif -+ -+ /* flush write buffers for psRgxDevInfo->psRGXFWIfRuntimeCfg */ -+ OSWriteMemoryBarrier(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr); -+ RGXFwSharedMemCacheOpValue(psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr, FLUSH); -+ -+ eError = DevmemAcquireCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, -+ (void**)&psRgxDevInfo->psRGXFWIfHWPerfBuf); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to acquire kernel hwperf buffer (%u)", -+ __func__, eError)); -+ goto ErrDeInitL1Buffer; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d", -+ psRgxDevInfo->ui32RGXFWIfHWPerfBufSize)); -+ -+ PVR_DPF_RETURN_OK; -+ -+ErrDeInitL1Buffer: -+ /* L1 buffer initialisation failures */ -+ RGXHWPerfL1BufferDeinit(psRgxDevInfo); -+ErrReturn: -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfInitOnDemandL2Stream -+ -+@Description This function allocates the HWperf firmware buffer (L1 buffer) -+ and host driver TL buffer (L2 buffer) if HWPerf is enabled at -+ driver load time. Otherwise, these buffers are allocated -+ on-demand as and when required. Caller must possess hHWPerfLock -+ lock before calling this function so the state tested is not -+ inconsistent if called outside of driver initialisation. -+ -+@Input psRgxDevInfo RGX Device Info, on which init is done -+ -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfInitOnDemandL2Stream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+#if !defined(NO_HARDWARE) -+ IMG_HANDLE hStream; -+ TL_STREAM_INFO sTLStreamInfo; -+#endif -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); -+ -+ PVR_DPF_ENTERED; -+ -+#if !defined(NO_HARDWARE) -+ if (eL2StreamId == RGX_HWPERF_L2_STREAM_HWPERF) -+ { -+ /* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence, -+ * L2 buffer is not allocated */ -+ IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 4]; -+ /* + 4 is used to allow names up to "hwperf_fw_999", which is enough */ -+ -+ /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ -+ if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d", -+ PVRSRV_TL_HWPERF_RGX_FW_STREAM, -+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to form HWPerf stream name for device %d", -+ __func__, -+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = TLStreamCreate(&hStream, -+ pszHWPerfStreamName, -+ psRgxDevInfo->ui32RGXL2HWPerfBufSize, -+ TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT, -+ _HWPerfFWOnReaderOpenCB, psRgxDevInfo, -+#if !defined(SUPPORT_TL_PRODUCER_CALLBACK) -+ NULL, NULL -+#else -+ /* Not enabled by default */ -+ RGXHWPerfTLCB, psRgxDevInfo -+#endif -+ ); -+ PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate", ErrClearStream); -+ -+ eError = TLStreamSetNotifStream(hStream, -+ PVRSRVGetPVRSRVData()->hTLCtrlStream); -+ /* we can still discover host stream so leave it as is and just log error */ -+ PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream"); -+ -+ /* send the event here because host stream is implicitly opened for write -+ * in TLStreamCreate and TLStreamOpen is never called (so the event is -+ * never emitted) */ -+ TLStreamMarkStreamOpen(hStream); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d L2: %d", -+ psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, -+ psRgxDevInfo->ui32RGXL2HWPerfBufSize)); -+ -+ psRgxDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_HWPERF] = hStream; -+ psRgxDevInfo->uiHWPerfStreamCount++; -+ PVR_ASSERT(psRgxDevInfo->uiHWPerfStreamCount <= RGX_HWPERF_L2_STREAM_LAST); -+ } -+#if (defined(__linux__) && !defined(__QNXNTO__) && !defined(INTEGRITY_OS)) -+ else if (eL2StreamId == RGX_HWPERF_L2_STREAM_FTRACE) -+ { -+ eError = PVRGpuTraceInitStream(psRgxDevInfo); -+ PVR_LOG_IF_ERROR(eError, "PVRGpuTraceInitStream"); -+ } -+#endif -+ -+ TLStreamInfo(psRgxDevInfo->hHWPerfStream[eL2StreamId], &sTLStreamInfo); -+ psRgxDevInfo->ui32L2BufMaxPacketSize[eL2StreamId] = sTLStreamInfo.maxTLpacketSize; -+#else -+ psRgxDevInfo->hHWPerfStream[eL2StreamId] = NULL; -+#endif /* !defined(NO_HARDWARE) */ -+ -+ PVR_DPF_RETURN_OK; -+ -+#if !defined(NO_HARDWARE) -+ErrClearStream: /* L2 buffer initialisation failures */ -+ psRgxDevInfo->hHWPerfStream[RGX_HWPERF_L2_STREAM_HWPERF] = NULL; -+#endif -+ /* L1 buffer initialisation failures */ -+ RGXHWPerfL1BufferDeinit(psRgxDevInfo); -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+ -+void RGXHWPerfDeinitL2Stream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId) -+{ -+ IMG_HANDLE hStream; -+ -+ PVRSRV_VZ_RETN_IF_MODE(GUEST); -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psRgxDevInfo); -+ -+ hStream = psRgxDevInfo->hHWPerfStream[eL2StreamId]; -+ -+ /* Clean up the L2 buffer stream object if allocated */ -+ if (hStream) -+ { -+ psRgxDevInfo->hHWPerfStream[eL2StreamId] = NULL; -+ psRgxDevInfo->bSuspendHWPerfL2DataCopy[eL2StreamId] = IMG_TRUE; -+ psRgxDevInfo->uiHWPerfStreamCount--; -+ PVR_ASSERT(psRgxDevInfo->uiHWPerfStreamCount < RGX_HWPERF_L2_STREAM_LAST); -+ -+ /* send the event here because host stream is implicitly opened for -+ * write in TLStreamCreate and TLStreamClose is never called (so the -+ * event is never emitted) */ -+ TLStreamMarkStreamClose(hStream); -+ TLStreamClose(hStream); -+ } -+ -+ PVR_DPF_RETURN; -+} -+ -+void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) -+{ -+ /* Cleanup L1 buffer resources */ -+ RGXHWPerfL1BufferDeinit(psRgxDevInfo); -+ -+ /* Cleanup the HWPerf server module lock resource */ -+ if (psRgxDevInfo->hHWPerfLock) -+ { -+ OSLockDestroy(psRgxDevInfo->hHWPerfLock); -+ psRgxDevInfo->hHWPerfLock = NULL; -+ } -+} -+ -+ -+/****************************************************************************** -+ * RGX HW Performance Profiling Server API(s) -+ *****************************************************************************/ -+ -+static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId, -+ IMG_BOOL bToggle, -+ IMG_UINT64 ui64Mask) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice; -+ RGXFWIF_KCCB_CMD sKccbCmd; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ IMG_UINT64 ui64MaskValue = ui64Mask; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); -+ -+ /* Modify mask to include the default bit settings if it is non-zero */ -+ if (!bToggle && (ui64Mask != 0ULL)) -+ { -+ ui64MaskValue = ui64Mask | RGX_HWPERF_EVENT_MASK_DEFAULT; -+ } -+ -+ /* If this method is being used whether to enable or disable -+ * then the hwperf buffers (host and FW) are likely to be needed -+ * eventually so create them, also helps unit testing. Buffers -+ * allocated on demand to reduce RAM foot print on systems not -+ * needing HWPerf resources. -+ * Obtain lock first, test and init if required. */ -+ OSLockAcquire(psDevice->hHWPerfLock); -+ -+ if (!psDevice->bFirmwareInitialised) -+ { -+ /* No other initialisation can be done at this point until the FW is -+ * initialised so unlock, log and return Ok so the caller knows -+ * the filter was set. */ -+ (void) RGXHWPerfFwSetEventFilterNoLock(psDevice, eL2StreamId, ui64MaskValue); -+ OSLockRelease(psDevice->hHWPerfLock); -+ goto done_; -+ } -+ -+ if (RGXHWPerfIsInitRequired(psDevice, eL2StreamId)) -+ { -+ eError = RGXHWPerfInitOnDemandL1Buffer(psDevice); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW " -+ "resources failed", __func__)); -+ goto unlock_and_return; -+ } -+ -+ /* if this fails it also cleans up L1 buffer */ -+ eError = RGXHWPerfInitOnDemandL2Stream(psDevice, eL2StreamId); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW " -+ "resources failed", __func__)); -+ goto unlock_and_return; -+ } -+ } -+ -+ /* Unlock here as no further HWPerf resources are used below that would be -+ * affected if freed by another thread */ -+ OSLockRelease(psDevice->hHWPerfLock); -+ -+ /* Return if the filter is the same */ -+ if (!bToggle && psDevice->ui64HWPerfFilter[eL2StreamId] == ui64MaskValue) -+ { -+ goto done_; -+ } -+ -+ /* Prepare command parameters ... */ -+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG; -+ sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = bToggle ? RGXFWIF_HWPERF_CTRL_TOGGLE : RGXFWIF_HWPERF_CTRL_SET; -+ sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64MaskValue; -+ -+ /* Ask the FW to carry out the HWPerf configuration command */ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, -+ RGXFWIF_DM_GP, -+ &sKccbCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set new HWPerfFW filter in " -+ "firmware (error = %d)", __func__, eError)); -+ goto return_; -+ } -+ -+ (void) RGXHWPerfFwSetEventFilter(psDevice, eL2StreamId, bToggle -+ ? psDevice->ui64HWPerfFilter[eL2StreamId] ^ ui64Mask -+ : ui64MaskValue); -+ -+ /* Wait for FW to complete */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_); -+ -+done_: -+ return PVRSRV_OK; -+ -+unlock_and_return: -+ OSLockRelease(psDevice->hHWPerfLock); -+ -+return_: -+ return eError; -+} -+ -+#define HWPERF_HOST_MAX_DEFERRED_PACKETS 800 -+ -+static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bToggle, -+ IMG_UINT32 ui32Mask) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice; -+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) -+ IMG_UINT32 ui32OldFilter = psDevice->ui32HWPerfHostFilter; -+#endif -+ -+ OSLockAcquire(psDevice->hLockHWPerfHostStream); -+ if (psDevice->hHWPerfHostStream == NULL) -+ { -+ eError = RGXHWPerfHostInitOnDemandResources(psDevice); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Initialisation of on-demand HWPerfHost resources failed", -+ __func__)); -+ OSLockRelease(psDevice->hLockHWPerfHostStream); -+ return eError; -+ } -+ } -+ -+ psDevice->ui32HWPerfHostFilter = bToggle ? -+ psDevice->ui32HWPerfHostFilter ^ ui32Mask : ui32Mask; -+ -+ // Deferred creation of host periodic events thread -+ if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)) -+ { -+ eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread"); -+ } -+ else -+ { -+ eError = PVRSRVDestroyHWPerfHostThread(); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVDestroyHWPerfHostThread"); -+ } -+ -+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) -+ // Log deferred events stats if filter changed from non-zero to zero -+ if ((ui32OldFilter != 0) && (psDevice->ui32HWPerfHostFilter == 0)) -+ { -+ PVR_LOG(("HWPerfHost deferred events buffer high-watermark / size: (%u / %u)", -+ psDevice->ui32DEHighWatermark, HWPERF_HOST_MAX_DEFERRED_PACKETS)); -+ -+ PVR_LOG(("HWPerfHost deferred event retries: WaitForAtomicCtxPktHighWatermark(%u) " -+ "WaitForRightOrdPktHighWatermark(%u)", -+ psDevice->ui32WaitForAtomicCtxPktHighWatermark, -+ psDevice->ui32WaitForRightOrdPktHighWatermark)); -+ } -+#endif -+ -+ OSLockRelease(psDevice->hLockHWPerfHostStream); -+ -+#if defined(DEBUG) -+ if (bToggle) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost events (%x) have been TOGGLED", -+ ui32Mask)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost mask has been SET to (%x)", -+ ui32Mask)); -+ } -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR RGXHWPerfCtrlClientBuffer(IMG_BOOL bToggle, -+ IMG_UINT32 ui32InfoPageIdx, -+ IMG_UINT32 ui32Mask) -+{ -+ PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); -+ -+ PVR_LOG_RETURN_IF_FALSE(ui32InfoPageIdx >= INFO_PAGE_HWPERF_BLOCK_START && -+ ui32InfoPageIdx < INFO_PAGE_HWPERF_BLOCK_END, -+ "invalid info page index", PVRSRV_ERROR_INVALID_PARAMS); -+ -+ OSLockAcquire(psData->hInfoPageLock); -+ psData->pui32InfoPage[ui32InfoPageIdx] = bToggle ? -+ psData->pui32InfoPage[ui32InfoPageIdx] ^ ui32Mask : ui32Mask; -+ OSLockRelease(psData->hInfoPageLock); -+ -+#if defined(DEBUG) -+ if (bToggle) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) events (%x) have been TOGGLED", -+ ui32InfoPageIdx, ui32Mask)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) mask has been SET to (%x)", -+ ui32InfoPageIdx, ui32Mask)); -+ } -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_HWPERF_BVNC *psBVNC) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_FALSE((NULL != psDeviceNode), "psDeviceNode invalid", PVRSRV_ERROR_INVALID_PARAMS); -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, psBVNC); -+ -+ return eError; -+} -+ -+/* -+ AppHint interfaces -+ */ -+static -+PVRSRV_ERROR RGXHWPerfSetFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT64 ui64Value) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(psPrivate); -+ -+ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL); -+ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL); -+ -+ eError = RGXHWPerfCtrlFwBuffer(psDeviceNode, RGX_HWPERF_L2_STREAM_HWPERF, -+ IMG_FALSE, ui64Value); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to set HWPerf firmware filter for device (%u)", -+ psDeviceNode->sDevId.ui32InternalID)); -+ return eError; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static -+PVRSRV_ERROR RGXHWPerfReadFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT64 *pui64Value) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL); -+ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL); -+ -+ PVR_UNREFERENCED_PARAMETER(psPrivate); -+ -+ psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; -+ -+ *pui64Value = psDevInfo->ui64HWPerfFilter[RGX_HWPERF_L2_STREAM_HWPERF]; -+ -+ return PVRSRV_OK; -+} -+ -+static -+PVRSRV_ERROR RGXHWPerfSetHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT32 ui32Value) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL); -+ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL); -+ -+ PVR_UNREFERENCED_PARAMETER(psPrivate); -+ -+ eError = RGXHWPerfCtrlHostBuffer(psDeviceNode, IMG_FALSE, ui32Value); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to set HWPerf firmware filter for device (%u)", -+ psDeviceNode->sDevId.ui32InternalID)); -+ return eError; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static -+PVRSRV_ERROR RGXHWPerfReadHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT32 *pui32Value) -+{ -+ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL); -+ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL); -+ -+ PVR_UNREFERENCED_PARAMETER(psPrivate); -+ -+ *pui32Value = -+ ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32HWPerfHostFilter; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR _ReadClientFilter(const PVRSRV_DEVICE_NODE *psDevice, -+ const void *psPrivData, -+ IMG_UINT32 *pui32Value) -+{ -+ PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); -+ IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData; -+ PVR_UNREFERENCED_PARAMETER(psDevice); -+ -+ OSLockAcquire(psData->hInfoPageLock); -+ *pui32Value = psData->pui32InfoPage[ui32Idx]; -+ OSLockRelease(psData->hInfoPageLock); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR _WriteClientFilter(const PVRSRV_DEVICE_NODE *psDevice, -+ const void *psPrivData, -+ IMG_UINT32 ui32Value) -+{ -+ IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData; -+ PVR_UNREFERENCED_PARAMETER(psDevice); -+ -+ return RGXHWPerfCtrlClientBuffer(IMG_FALSE, ui32Idx, ui32Value); -+} -+ -+void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRVAppHintRegisterHandlersUINT64(APPHINT_ID_HWPerfFWFilter, -+ RGXHWPerfReadFwFilter, -+ RGXHWPerfSetFwFilter, -+ psDeviceNode, -+ NULL); -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfHostFilter, -+ RGXHWPerfReadHostFilter, -+ RGXHWPerfSetHostFilter, -+ psDeviceNode, -+ NULL); -+} -+ -+void RGXHWPerfClientInitAppHintCallbacks(void) -+{ -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Services, -+ _ReadClientFilter, -+ _WriteClientFilter, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ (void *) HWPERF_FILTER_SERVICES_IDX); -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_EGL, -+ _ReadClientFilter, -+ _WriteClientFilter, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ (void *) HWPERF_FILTER_EGL_IDX); -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGLES, -+ _ReadClientFilter, -+ _WriteClientFilter, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ (void *) HWPERF_FILTER_OPENGLES_IDX); -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenCL, -+ _ReadClientFilter, -+ _WriteClientFilter, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ (void *) HWPERF_FILTER_OPENCL_IDX); -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Vulkan, -+ _ReadClientFilter, -+ _WriteClientFilter, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ (void *) HWPERF_FILTER_VULKAN_IDX); -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGL, -+ _ReadClientFilter, -+ _WriteClientFilter, -+ APPHINT_OF_DRIVER_NO_DEVICE, -+ (void *) HWPERF_FILTER_OPENGL_IDX); -+} -+ -+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCountersKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_UINT32 ui32BlockID, -+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters) -+{ -+ RGXFWIF_HWPERF_CTL *psHWPerfCtl; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is invalid", PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_LOG_RETURN_IF_FALSE(psConfiguredCounters != NULL, "psConfiguredCounters is invalid", PVRSRV_ERROR_INVALID_PARAMS); -+ -+ eError = RGXAcquireHWPerfCtlCPUAddr(psDeviceNode, &psHWPerfCtl); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXGetHWPerfCtl"); -+ -+ eError = PVRSRVRGXGetConfiguredHWPerfCounters(psDeviceNode, -+ psHWPerfCtl, -+ ui32BlockID, -+ psConfiguredCounters); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVRGXGetConfiguredHWPerfCounters"); -+ -+ RGXReleaseHWPerfCtlCPUAddr(psDeviceNode); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocksKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_UINT32 ui32ArrayLen, -+ IMG_UINT32 *pui32BlockCount, -+ IMG_UINT32 *pui32EnabledBlockIDs) -+{ -+ RGXFWIF_HWPERF_CTL *psHWPerfCtl; -+ IMG_UINT32 *pui32BlockIDs = NULL; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is invalid", PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_LOG_RETURN_IF_FALSE(pui32BlockCount != NULL, "pui32BlockCount is invalid", PVRSRV_ERROR_INVALID_PARAMS); -+ -+ eError = RGXAcquireHWPerfCtlCPUAddr(psDeviceNode, &psHWPerfCtl); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXGetHWPerfCtl"); -+ -+ if (pui32EnabledBlockIDs != NULL) -+ { -+ pui32BlockIDs = OSAllocMem(sizeof(IMG_UINT32) * ui32ArrayLen); -+ if (pui32BlockIDs == NULL) -+ { -+ PVR_LOG_GOTO_WITH_ERROR("OSAllocMem", eError, PVRSRV_ERROR_OUT_OF_MEMORY, Error); -+ } -+ } -+ -+ eError = PVRSRVRGXGetEnabledHWPerfBlocks(psDeviceNode, -+ psHWPerfCtl, -+ ui32ArrayLen, -+ pui32BlockCount, -+ pui32BlockIDs); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRGXGetEnabledHWPerfBlocks", Error); -+ -+ if (pui32EnabledBlockIDs != NULL) -+ { -+ IMG_UINT32 i; -+ if (*pui32BlockCount > ui32ArrayLen) -+ { -+ *pui32BlockCount = 0; -+ PVR_DPF((PVR_DBG_ERROR, "ui32ArrayLen less than the number of enabled blocks.")); -+ PVR_LOG_GOTO_WITH_ERROR(__func__, eError, PVRSRV_ERROR_OUT_OF_MEMORY, Error); -+ } -+ else if (*pui32BlockCount < ui32ArrayLen) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "ui32ArrayLen greater than the number of enabled blocks.")); -+ } -+ -+ for (i = 0; i < *pui32BlockCount; i++) -+ { -+ pui32EnabledBlockIDs[i] = pui32BlockIDs[i]; -+ } -+ } -+ -+Error: -+ if (pui32BlockIDs != NULL) -+ { -+ OSFreeMem(pui32BlockIDs); -+ } -+ -+ RGXReleaseHWPerfCtlCPUAddr(psDeviceNode); -+ -+ return eError; -+} -+ -+static INLINE IMG_UINT32 _RGXHWPerfFixBufferSize(IMG_UINT32 ui32BufSizeKB) -+{ -+ if (ui32BufSizeKB > HWPERF_HOST_TL_STREAM_SIZE_MAX) -+ { -+ /* Size specified as a AppHint but it is too big */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "RGXHWPerfHostInit: HWPerf Host buffer size " -+ "value (%u) too big, using maximum (%u)", -+ ui32BufSizeKB, HWPERF_HOST_TL_STREAM_SIZE_MAX)); -+ return HWPERF_HOST_TL_STREAM_SIZE_MAX<<10; -+ } -+ else if (ui32BufSizeKB >= HWPERF_HOST_TL_STREAM_SIZE_MIN) -+ { -+ return ui32BufSizeKB<<10; -+ } -+ else if (ui32BufSizeKB > 0) -+ { -+ /* Size specified as a AppHint but it is too small */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "RGXHWPerfHostInit: HWPerf Host buffer size " -+ "value (%u) too small, using minimum (%u)", -+ ui32BufSizeKB, HWPERF_HOST_TL_STREAM_SIZE_MIN)); -+ return HWPERF_HOST_TL_STREAM_SIZE_MIN<<10; -+ } -+ else -+ { -+ /* 0 size implies AppHint not set or is set to zero, -+ * use default size from driver constant. */ -+ return HWPERF_HOST_TL_STREAM_SIZE_DEFAULT<<10; -+ } -+} -+ -+/****************************************************************************** -+ * RGX HW Performance Host Stream API -+ *****************************************************************************/ -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfHostInit -+ -+@Description Called during driver init for initialisation of HWPerfHost -+ stream in the Rogue device driver. This function keeps allocated -+ only the minimal necessary resources, which are required for -+ functioning of HWPerf server module. -+ -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ PVR_RETURN_IF_INVALID_PARAM(psRgxDevInfo != NULL); -+ -+ eError = OSLockCreate(&psRgxDevInfo->hLockHWPerfHostStream); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", error); -+ -+ psRgxDevInfo->hHWPerfHostStream = NULL; -+ psRgxDevInfo->ui32HWPerfHostFilter = 0; /* disable all events */ -+ psRgxDevInfo->ui32HWPerfHostNextOrdinal = 1; -+ psRgxDevInfo->ui32HWPerfHostBufSize = _RGXHWPerfFixBufferSize(ui32BufSizeKB); -+ psRgxDevInfo->pvHostHWPerfMISR = NULL; -+ psRgxDevInfo->pui8DeferredEvents = NULL; -+ /* First packet has ordinal=1, so LastOrdinal=0 will ensure ordering logic -+ * is maintained */ -+ psRgxDevInfo->ui32HWPerfHostLastOrdinal = 0; -+ psRgxDevInfo->hHWPerfHostSpinLock = NULL; -+ -+error: -+ return eError; -+} -+ -+#define RGX_HWPERF_HOST_CLIENT_INFO_PROC_NAME_BASE_SIZE \ -+ ((IMG_UINT32)(offsetof(RGX_HWPERF_HOST_CLIENT_INFO_DATA, uDetail) + \ -+ sizeof(((RGX_HWPERF_HOST_CLIENT_INFO_DETAIL*)0)->sProcName.ui32Count))) -+ -+static void _HWPerfHostOnConnectCB(void *pvArg) -+{ -+ PVRSRV_RGXDEV_INFO* psDevice; -+ PVRSRV_ERROR eError; -+ -+ RGXSRV_HWPERF_CLK_SYNC(pvArg); -+ -+ psDevice = (PVRSRV_RGXDEV_INFO*) pvArg; -+ -+ /* Handle the case where we may be being called as part of a multi-device -+ * initialisation sequence. If the bDevInit2Done flag is not yet set we can -+ * perform no action for this device. Simply return. -+ */ -+ if (!psDevice->bDevInit2Done) -+ { -+ return; -+ } -+ -+ /* Handle the case where the RGX_HWPERF_HOST_INFO bit is set in the event filter -+ * before the host stream is opened for reading by a HWPerf client. -+ * Which can result in the host periodic thread sleeping for a long duration as TLStreamIsOpenForReading may return false. */ -+ if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)) -+ { -+ eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS); -+ PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread"); -+ } -+ -+ RGXSRV_HWPERF_DEVICE_INFO_FEATURES(psDevice); -+ -+ if (RGXHWPerfHostIsEventEnabled(psDevice, RGX_HWPERF_HOST_CLIENT_INFO)) -+ { -+ // GCC throws -Werror=frame-larger-than error if the frame size is > 1024 bytes, -+ // so use a heap allocation - is there an alternate solution? -+ IMG_BYTE *pbPktPayload = (IMG_BYTE*)OSAllocMem(RGX_HWPERF_MAX_PAYLOAD_SIZE); -+ -+ if (pbPktPayload) -+ { -+ RGX_HWPERF_HOST_CLIENT_INFO_DATA *psHostClientInfo; -+ RGX_HWPERF_HOST_CLIENT_PROC_NAME *psProcName; -+ IMG_UINT32 ui32TotalPayloadSize, ui32NameLen, ui32ProcNamePktSize; -+ DLLIST_NODE *pNode, *pNext; -+ -+ psHostClientInfo = IMG_OFFSET_ADDR(pbPktPayload,0); -+ psHostClientInfo->eType = RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME; -+ psHostClientInfo->uDetail.sProcName.ui32Count = 0U; -+ psProcName = psHostClientInfo->uDetail.sProcName.asProcNames; -+ ui32TotalPayloadSize = RGX_HWPERF_HOST_CLIENT_INFO_PROC_NAME_BASE_SIZE; -+ -+ OSLockAcquire(psDevice->psDeviceNode->hConnectionsLock); -+ -+ // Announce current client connections to the reader -+ dllist_foreach_node(&psDevice->psDeviceNode->sConnections, pNode, pNext) -+ { -+ CONNECTION_DATA *psData = IMG_CONTAINER_OF(pNode, CONNECTION_DATA, sConnectionListNode); -+ -+ ui32NameLen = OSStringLength(psData->pszProcName) + 1U; -+ ui32ProcNamePktSize = RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen); -+ -+ // Unlikely case where we have too much data to fit into a single hwperf packet -+ if (ui32ProcNamePktSize + ui32TotalPayloadSize > RGX_HWPERF_MAX_PAYLOAD_SIZE) -+ { -+ RGXHWPerfHostPostRaw(psDevice, RGX_HWPERF_HOST_CLIENT_INFO, pbPktPayload, ui32TotalPayloadSize); -+ -+ psHostClientInfo->uDetail.sProcName.ui32Count = 0U; -+ psProcName = psHostClientInfo->uDetail.sProcName.asProcNames; -+ ui32TotalPayloadSize = RGX_HWPERF_HOST_CLIENT_INFO_PROC_NAME_BASE_SIZE; -+ } -+ -+ // Setup packet data -+ psHostClientInfo->uDetail.sProcName.ui32Count++; -+ psProcName->uiClientPID = psData->pid; -+ psProcName->ui32Length = ui32NameLen; -+ (void)OSCachedMemCopy(psProcName->acName, psData->pszProcName, ui32NameLen); -+ -+ psProcName = (RGX_HWPERF_HOST_CLIENT_PROC_NAME*)IMG_OFFSET_ADDR(psProcName, ui32ProcNamePktSize); -+ ui32TotalPayloadSize += ui32ProcNamePktSize; -+ } -+ -+ OSLockRelease(psDevice->psDeviceNode->hConnectionsLock); -+ RGXHWPerfHostPostRaw(psDevice, RGX_HWPERF_HOST_CLIENT_INFO, pbPktPayload, ui32TotalPayloadSize); -+ OSFreeMem(pbPktPayload); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: OUT OF MEMORY. Could not allocate memory for RGX_HWPERF_HOST_CLIENT_INFO_DATA packet.", __func__)); -+ } -+ } -+} -+ -+/* Avoiding a holder struct using fields below, as a struct gets along padding, -+ * packing, and other compiler dependencies, and we want a continuous stream of -+ * bytes for (header+data) for use in TLStreamWrite. See -+ * _HWPerfHostDeferredEventsEmitter(). -+ * -+ * A deferred (UFO) packet is represented in memory as: -+ * - IMG_BOOL --> Indicates whether a packet write is -+ * "complete" by atomic context or not. -+ * - RGX_HWPERF_V2_PACKET_HDR --. -+ * |--> Fed together to TLStreamWrite for -+ * | deferred packet to be written to -+ * | HWPerfHost buffer -+ * - RGX_HWPERF_HOST_UFO_DATA---` -+ * -+ * PS: Currently only UFO events are supported in deferred list */ -+#define HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE (sizeof(IMG_BOOL) +\ -+ sizeof(RGX_HWPERF_V2_PACKET_HDR) +\ -+ sizeof(RGX_HWPERF_HOST_UFO_DATA)) -+ -+static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData); -+static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_UINT32 ui32MaxOrdinal); -+ -+/*************************************************************************/ /*! -+@Function RGXHWPerfHostInitOnDemandResources -+ -+@Description This function allocates the HWPerfHost buffer if HWPerf is -+ enabled at driver load time. Otherwise, these buffers are -+ allocated on-demand as and when required. -+ -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo) -+{ -+ PVRSRV_ERROR eError; -+ /* 4 makes space up to "hwperf_host_999" streams */ -+ IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 4]; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ if (psRgxDevInfo->hHWPerfHostStream != NULL) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "HWPerf host stream already initialised")); -+ return PVRSRV_OK; -+ } -+ -+ /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */ -+ if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d", -+ PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, -+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to form HWPerf host stream name for device %d", -+ __func__, -+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = TLStreamCreate(&psRgxDevInfo->hHWPerfHostStream, -+ pszHWPerfHostStreamName, psRgxDevInfo->ui32HWPerfHostBufSize, -+ TL_OPMODE_DROP_NEWER, -+ _HWPerfHostOnConnectCB, psRgxDevInfo, -+ NULL, NULL); -+ PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate"); -+ -+ eError = TLStreamSetNotifStream(psRgxDevInfo->hHWPerfHostStream, -+ PVRSRVGetPVRSRVData()->hTLCtrlStream); -+ /* we can still discover host stream so leave it as is and just log error */ -+ PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream"); -+ -+ /* send the event here because host stream is implicitly opened for write -+ * in TLStreamCreate and TLStreamOpen is never called (so the event is -+ * never emitted) */ -+ eError = TLStreamMarkStreamOpen(psRgxDevInfo->hHWPerfHostStream); -+ PVR_LOG_IF_ERROR(eError, "TLStreamMarkStreamOpen"); -+ -+ /* HWPerfHost deferred events specific initialization */ -+ eError = OSInstallMISR(&psRgxDevInfo->pvHostHWPerfMISR, -+ RGX_MISRHandler_HWPerfPostDeferredHostEvents, -+ psRgxDevInfo, -+ "RGX_HWPerfDeferredEventPoster"); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR", err_install_misr); -+ -+ eError = OSSpinLockCreate(&psRgxDevInfo->hHWPerfHostSpinLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate", err_spinlock_create); -+ -+ psRgxDevInfo->pui8DeferredEvents = OSAllocMem(HWPERF_HOST_MAX_DEFERRED_PACKETS -+ * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE); -+ if (NULL == psRgxDevInfo->pui8DeferredEvents) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: OUT OF MEMORY. Could not allocate memory for " -+ "HWPerfHost deferred events array", __func__)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_alloc_deferred_events; -+ } -+ psRgxDevInfo->ui16DEReadIdx = 0; -+ psRgxDevInfo->ui16DEWriteIdx = 0; -+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) -+ psRgxDevInfo->ui32DEHighWatermark = 0; -+ psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = 0; -+ psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = 0; -+#endif -+ -+ PVR_DPF((DBGPRIV_MESSAGE, "HWPerf Host buffer size is %uKB", -+ psRgxDevInfo->ui32HWPerfHostBufSize)); -+ -+ return PVRSRV_OK; -+ -+err_alloc_deferred_events: -+ OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock); -+ psRgxDevInfo->hHWPerfHostSpinLock = NULL; -+ -+err_spinlock_create: -+ (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR); -+ psRgxDevInfo->pvHostHWPerfMISR = NULL; -+ -+err_install_misr: -+ TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream); -+ TLStreamClose(psRgxDevInfo->hHWPerfHostStream); -+ psRgxDevInfo->hHWPerfHostStream = NULL; -+ -+ return eError; -+} -+ -+void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) -+{ -+ PVRSRV_VZ_RETN_IF_MODE(GUEST); -+ -+ PVR_ASSERT (psRgxDevInfo); -+ -+ if (psRgxDevInfo->pui8DeferredEvents) -+ { -+ OSFreeMem(psRgxDevInfo->pui8DeferredEvents); -+ psRgxDevInfo->pui8DeferredEvents = NULL; -+ } -+ -+ if (psRgxDevInfo->hHWPerfHostSpinLock) -+ { -+ OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock); -+ psRgxDevInfo->hHWPerfHostSpinLock = NULL; -+ } -+ -+ if (psRgxDevInfo->pvHostHWPerfMISR) -+ { -+ (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR); -+ psRgxDevInfo->pvHostHWPerfMISR = NULL; -+ } -+ -+ if (psRgxDevInfo->hHWPerfHostStream) -+ { -+ /* send the event here because host stream is implicitly opened for -+ * write in TLStreamCreate and TLStreamClose is never called (so the -+ * event is never emitted) */ -+ TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream); -+ TLStreamClose(psRgxDevInfo->hHWPerfHostStream); -+ psRgxDevInfo->hHWPerfHostStream = NULL; -+ } -+ -+ if (psRgxDevInfo->hLockHWPerfHostStream) -+ { -+ OSLockDestroy(psRgxDevInfo->hLockHWPerfHostStream); -+ psRgxDevInfo->hLockHWPerfHostStream = NULL; -+ } -+} -+ -+static IMG_UINT64 RGXHWPerfFwSetEventFilterNoLock(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId, -+ IMG_UINT64 uiFilter) -+{ -+ IMG_UINT64 uiTmpFilter = 0; -+ IMG_UINT32 i; -+ -+ PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); -+ -+ /* Set filter for the given L2 stream. */ -+ psRgxDevInfo->ui64HWPerfFilter[eL2StreamId] = uiFilter; -+ -+ /* Compute compound filter from all existing L2 streams' filters. */ -+ for (i = 0; i < RGX_HWPERF_L2_STREAM_LAST; i++) -+ { -+ uiTmpFilter |= psRgxDevInfo->ui64HWPerfFilter[i]; -+ } -+ -+ psRgxDevInfo->ui64HWPerfFwFilter = uiTmpFilter; -+ -+#if !defined(NO_HARDWARE) -+ PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to 0x%" IMG_UINT64_FMTSPECx -+ " (stream %u value SET to 0x%" IMG_UINT64_FMTSPECx ")", -+ psRgxDevInfo->ui64HWPerfFwFilter, eL2StreamId, -+ psRgxDevInfo->ui64HWPerfFilter[eL2StreamId])); -+#endif -+ -+ return uiTmpFilter; -+} -+ -+IMG_UINT64 RGXHWPerfFwSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId, -+ IMG_UINT64 uiFilter) -+{ -+ OSLockAcquire(psRgxDevInfo->hHWPerfLock); -+ -+ uiFilter = RGXHWPerfFwSetEventFilterNoLock(psRgxDevInfo, eL2StreamId, uiFilter); -+ -+ OSLockRelease(psRgxDevInfo->hHWPerfLock); -+ -+ return uiFilter; -+} -+ -+inline void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Filter) -+{ -+ PVRSRV_VZ_RETN_IF_MODE(GUEST); -+ psRgxDevInfo->ui32HWPerfHostFilter = ui32Filter; -+} -+ -+inline IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent) -+{ -+ PVR_ASSERT(psRgxDevInfo); -+ return (psRgxDevInfo->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(eEvent)) ? IMG_TRUE : IMG_FALSE; -+} -+ -+#define MAX_RETRY_COUNT 80 -+static inline void _PostFunctionPrologue(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_UINT32 ui32CurrentOrdinal) -+{ -+ IMG_UINT32 ui32Retry = MAX_RETRY_COUNT; -+ -+ PVR_ASSERT(psRgxDevInfo->hLockHWPerfHostStream != NULL); -+ PVR_ASSERT(psRgxDevInfo->hHWPerfHostStream != NULL); -+ -+ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); -+ -+ /* First, flush pending events (if any) */ -+ _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, ui32CurrentOrdinal); -+ -+ while ((ui32CurrentOrdinal != psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1) -+ && (--ui32Retry != 0)) -+ { -+ /* Release lock and give a chance to a waiting context to emit the -+ * expected packet */ -+ OSLockRelease (psRgxDevInfo->hLockHWPerfHostStream); -+ OSSleepms(100); -+ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); -+ } -+ -+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) -+ if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedPktOrdinalBroke)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Will warn only once! Potential packet(s) lost after ordinal" -+ " %u (Current ordinal = %u)", -+ __func__, -+ psRgxDevInfo->ui32HWPerfHostLastOrdinal, ui32CurrentOrdinal)); -+ psRgxDevInfo->bWarnedPktOrdinalBroke = IMG_TRUE; -+ } -+ -+ if (psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry)) -+ { -+ psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = MAX_RETRY_COUNT - ui32Retry; -+ } -+#endif -+} -+ -+static inline void _PostFunctionEpilogue(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_UINT32 ui32CurrentOrdinal) -+{ -+ /* update last ordinal emitted */ -+ psRgxDevInfo->ui32HWPerfHostLastOrdinal = ui32CurrentOrdinal; -+ -+ PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream)); -+ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); -+} -+ -+static inline IMG_UINT8 *_ReserveHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size) -+{ -+ IMG_UINT8 *pui8Dest; -+ -+ PVRSRV_ERROR eError = TLStreamReserve(psRgxDevInfo->hHWPerfHostStream, -+ &pui8Dest, ui32Size); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not reserve space in %s buffer" -+ " (%d). Dropping packet.", -+ __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); -+ return NULL; -+ } -+ PVR_ASSERT(pui8Dest != NULL); -+ -+ return pui8Dest; -+} -+ -+static inline void _CommitHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size) -+{ -+ PVRSRV_ERROR eError = TLStreamCommit(psRgxDevInfo->hHWPerfHostStream, -+ ui32Size); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not commit data to %s" -+ " (%d)", __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); -+ } -+} -+ -+/* Returns IMG_TRUE if packet write passes, IMG_FALSE otherwise */ -+static inline IMG_BOOL _WriteHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_V2_PACKET_HDR *psHeader) -+{ -+ PVRSRV_ERROR eError = TLStreamWrite(psRgxDevInfo->hHWPerfHostStream, -+ IMG_OFFSET_ADDR(psHeader, 0), psHeader->ui32Size); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not write packet in %s buffer" -+ " (%d). Dropping packet.", -+ __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); -+ } -+ -+ /* Regardless of whether write passed/failed, we consider it "written" */ -+ psRgxDevInfo->ui32HWPerfHostLastOrdinal = psHeader->ui32Ordinal; -+ -+ return (eError == PVRSRV_OK); -+} -+ -+/* Helper macros for deferred events operations */ -+#define GET_DE_NEXT_IDX(_curridx) ((_curridx + 1) % HWPERF_HOST_MAX_DEFERRED_PACKETS) -+#define GET_DE_EVENT_BASE(_idx) (IMG_OFFSET_ADDR(psRgxDevInfo->pui8DeferredEvents, \ -+ (_idx) * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE)) -+ -+#define GET_DE_EVENT_WRITE_STATUS(_base) ((IMG_BOOL*)((void *)(_base))) -+#define GET_DE_EVENT_DATA(_base) (IMG_OFFSET_ADDR((_base), sizeof(IMG_BOOL))) -+ -+/* Emits HWPerfHost event packets present in the deferred list stopping when one -+ * of the following cases is hit: -+ * case 1: Packet ordering breaks i.e. a packet found doesn't meet ordering -+ * criteria (ordinal == last_ordinal + 1) -+ * -+ * case 2: A packet with ordinal > ui32MaxOrdinal is found -+ * -+ * case 3: Deferred list's (read == write) i.e. no more deferred packets. -+ * -+ * NOTE: Caller must possess the hLockHWPerfHostStream lock before calling -+ * this function.*/ -+static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_UINT32 ui32MaxOrdinal) -+{ -+ RGX_HWPERF_V2_PACKET_HDR *psHeader; -+ IMG_UINT32 ui32Retry; -+ IMG_UINT8 *pui8DeferredEvent; -+ IMG_BOOL *pbPacketWritten; -+ IMG_BOOL bWritePassed; -+ -+ PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream)); -+ -+ while (psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) -+ { -+ pui8DeferredEvent = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEReadIdx); -+ pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8DeferredEvent); -+ psHeader = (RGX_HWPERF_V2_PACKET_HDR*) GET_DE_EVENT_DATA(pui8DeferredEvent); -+ -+ for (ui32Retry = MAX_RETRY_COUNT; !(*pbPacketWritten) && (ui32Retry != 0); ui32Retry--) -+ { -+ /* Packet not yet written, re-check after a while. Wait for a short period as -+ * atomic contexts are generally expected to finish fast */ -+ OSWaitus(10); -+ } -+ -+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) -+ if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedAtomicCtxPktLost)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Will warn only once. Dropping a deferred packet as atomic context" -+ " took too long to write it", -+ __func__)); -+ psRgxDevInfo->bWarnedAtomicCtxPktLost = IMG_TRUE; -+ } -+ -+ if (psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry)) -+ { -+ psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = MAX_RETRY_COUNT - ui32Retry; -+ } -+#endif -+ -+ if (*pbPacketWritten) -+ { -+ if ((psHeader->ui32Ordinal > ui32MaxOrdinal) || -+ (psHeader->ui32Ordinal != (psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1))) -+ { -+ /* Leave remaining events to be emitted by next call to this function */ -+ break; -+ } -+ bWritePassed = _WriteHWPerfStream(psRgxDevInfo, psHeader); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Atomic context packet lost!", __func__)); -+ bWritePassed = IMG_FALSE; -+ } -+ -+ /* Move on to next packet */ -+ psRgxDevInfo->ui16DEReadIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEReadIdx); -+ -+ if (!bWritePassed // if write failed -+ && ui32MaxOrdinal == IMG_UINT32_MAX // and we are from MISR -+ && psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) // and there are more events -+ { -+ /* Stop emitting here and re-schedule MISR */ -+ OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR); -+ break; -+ } -+ } -+} -+ -+static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData) -+{ -+ PVRSRV_RGXDEV_INFO *psRgxDevInfo = pvData; -+ -+ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); -+ -+ /* Since we're called from MISR, there is no upper cap of ordinal to be emitted. -+ * Send IMG_UINT32_MAX to signify all possible packets. */ -+ _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, IMG_UINT32_MAX); -+ -+ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); -+} -+ -+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) -+static inline void _UpdateDEBufferHighWatermark(PVRSRV_RGXDEV_INFO *psRgxDevInfo) -+{ -+ IMG_UINT32 ui32DEWatermark; -+ IMG_UINT16 ui16LRead = psRgxDevInfo->ui16DEReadIdx; -+ IMG_UINT16 ui16LWrite = psRgxDevInfo->ui16DEWriteIdx; -+ -+ if (ui16LWrite >= ui16LRead) -+ { -+ ui32DEWatermark = ui16LWrite - ui16LRead; -+ } -+ else -+ { -+ ui32DEWatermark = (HWPERF_HOST_MAX_DEFERRED_PACKETS - ui16LRead) + (ui16LWrite); -+ } -+ -+ if (ui32DEWatermark > psRgxDevInfo->ui32DEHighWatermark) -+ { -+ psRgxDevInfo->ui32DEHighWatermark = ui32DEWatermark; -+ } -+} -+#endif -+ -+/* @Description Gets the data/members that concerns the accuracy of a packet in HWPerfHost -+ buffer. Since the data returned by this function is required in both, an -+ atomic as well as a process/sleepable context, it is protected under spinlock -+ -+ @Output pui32Ordinal Pointer to ordinal number assigned to this packet -+ @Output pui64Timestamp Timestamp value for this packet -+ @Output ppui8Dest If the current context cannot sleep, pointer to a place in -+ deferred events buffer where the packet data should be written. -+ Don't care, otherwise. -+ */ -+static void _GetHWPerfHostPacketSpecifics(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_UINT32 *pui32Ordinal, -+ IMG_UINT64 *pui64Timestamp, -+ IMG_UINT8 **ppui8Dest, -+ IMG_BOOL bSleepAllowed) -+{ -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ /* Spin lock is required to avoid getting scheduled out by a higher priority -+ * context while we're getting header specific details and packet place in -+ * HWPerf buffer (when in atomic context) for ourselves */ -+ OSSpinLockAcquire(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags); -+ -+ *pui32Ordinal = psRgxDevInfo->ui32HWPerfHostNextOrdinal++; -+ *pui64Timestamp = RGXTimeCorrGetClockus64(psRgxDevInfo->psDeviceNode); -+ -+ if (!bSleepAllowed) -+ { -+ /* We're in an atomic context. So return the next position available in -+ * deferred events buffer */ -+ IMG_UINT16 ui16NewWriteIdx; -+ IMG_BOOL *pbPacketWritten; -+ -+ PVR_ASSERT(ppui8Dest != NULL); -+ -+ ui16NewWriteIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEWriteIdx); -+ if (ui16NewWriteIdx == psRgxDevInfo->ui16DEReadIdx) -+ { -+ /* This shouldn't happen. HWPERF_HOST_MAX_DEFERRED_PACKETS should be -+ * big enough to avoid any such scenario */ -+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) -+ /* PVR_LOG/printk isn't recommended in atomic context. Perhaps we'll do -+ * this debug output here when trace_printk support is added to DDK */ -+// PVR_LOG(("%s: No more space in deferred events buffer (%u/%u) W=%u,R=%u", -+// __func__, psRgxDevInfo->ui32DEHighWatermark, -+// HWPERF_HOST_MAX_DEFERRED_PACKETS, psRgxDevInfo->ui16DEWriteIdx, -+// psRgxDevInfo->ui16DEReadIdx)); -+#endif -+ *ppui8Dest = NULL; -+ } -+ else -+ { -+ /* Return the position where deferred event would be written */ -+ *ppui8Dest = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEWriteIdx); -+ -+ /* Make sure packet write "state" is "write-pending" _before_ moving write -+ * pointer forward */ -+ pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(*ppui8Dest); -+ *pbPacketWritten = IMG_FALSE; -+ -+ psRgxDevInfo->ui16DEWriteIdx = ui16NewWriteIdx; -+ -+#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) -+ _UpdateDEBufferHighWatermark(psRgxDevInfo); -+#endif -+ } -+ } -+ -+ OSSpinLockRelease(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags); -+} -+ -+static inline void _SetupHostPacketHeader(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_UINT8 *pui8Dest, -+ RGX_HWPERF_HOST_EVENT_TYPE eEvType, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32Ordinal, -+ IMG_UINT64 ui64Timestamp) -+{ -+ RGX_HWPERF_V2_PACKET_HDR *psHeader = (RGX_HWPERF_V2_PACKET_HDR *) ((void *)pui8Dest); -+ -+ PVR_ASSERT(ui32Size<=RGX_HWPERF_MAX_PACKET_SIZE); -+ -+ psHeader->ui32Ordinal = ui32Ordinal; -+ psHeader->ui64Timestamp = ui64Timestamp; -+ psHeader->ui32Sig = HWPERF_PACKET_V2B_SIG; -+ psHeader->eTypeId = RGX_HWPERF_MAKE_TYPEID(RGX_HWPERF_STREAM_ID1_HOST, -+ eEvType, 0, 0, 0); -+ psHeader->ui32Size = ui32Size; -+} -+ -+static inline void _SetupHostEnqPacketData(IMG_UINT8 *pui8Dest, -+ RGX_HWPERF_KICK_TYPE eEnqType, -+ IMG_UINT32 ui32Pid, -+ IMG_UINT32 ui32FWDMContext, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ PVRSRV_FENCE hCheckFence, -+ PVRSRV_FENCE hUpdateFence, -+ PVRSRV_TIMELINE hUpdateTimeline, -+ IMG_UINT64 ui64CheckFenceUID, -+ IMG_UINT64 ui64UpdateFenceUID, -+ IMG_UINT64 ui64DeadlineInus, -+ IMG_UINT32 ui32CycleEstimate) -+{ -+ RGX_HWPERF_HOST_ENQ_DATA *psData = (RGX_HWPERF_HOST_ENQ_DATA *) -+ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); -+ psData->ui32EnqType = eEnqType; -+ psData->ui32PID = ui32Pid; -+ psData->ui32ExtJobRef = ui32ExtJobRef; -+ psData->ui32IntJobRef = ui32IntJobRef; -+ psData->ui32DMContext = ui32FWDMContext; -+ psData->hCheckFence = hCheckFence; -+ psData->hUpdateFence = hUpdateFence; -+ psData->hUpdateTimeline = hUpdateTimeline; -+ psData->ui64CheckFence_UID = ui64CheckFenceUID; -+ psData->ui64UpdateFence_UID = ui64UpdateFenceUID; -+ psData->ui64DeadlineInus = ui64DeadlineInus; -+ psData->ui32CycleEstimate = ui32CycleEstimate; -+} -+ -+void RGXHWPerfHostPostRaw(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_HOST_EVENT_TYPE eEvType, -+ IMG_BYTE *pbPayload, -+ IMG_UINT32 ui32PayloadSize) -+{ -+ IMG_UINT8 *pui8Dest; -+ IMG_UINT32 ui32PktSize; -+ IMG_UINT32 ui32Ordinal; -+ IMG_UINT64 ui64Timestamp; -+ -+ PVR_ASSERT(ui32PayloadSize <= RGX_HWPERF_MAX_PAYLOAD_SIZE); -+ -+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); -+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); -+ -+ ui32PktSize = RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32PayloadSize); -+ pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32PktSize); -+ -+ if (pui8Dest == NULL) -+ { -+ goto cleanup; -+ } -+ -+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, eEvType, ui32PktSize, ui32Ordinal, ui64Timestamp); -+ OSDeviceMemCopy((IMG_UINT8*)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)), pbPayload, ui32PayloadSize); -+ _CommitHWPerfStream(psRgxDevInfo, ui32PktSize); -+ -+cleanup: -+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); -+} -+ -+void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_KICK_TYPE eEnqType, -+ IMG_UINT32 ui32Pid, -+ IMG_UINT32 ui32FWDMContext, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ PVRSRV_FENCE hCheckFence, -+ PVRSRV_FENCE hUpdateFence, -+ PVRSRV_TIMELINE hUpdateTimeline, -+ IMG_UINT64 ui64CheckFenceUID, -+ IMG_UINT64 ui64UpdateFenceUID, -+ IMG_UINT64 ui64DeadlineInus, -+ IMG_UINT32 ui32CycleEstimate ) -+{ -+ IMG_UINT8 *pui8Dest; -+ IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_ENQ_DATA); -+ IMG_UINT32 ui32Ordinal; -+ IMG_UINT64 ui64Timestamp; -+ -+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, -+ NULL, IMG_TRUE); -+ -+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); -+ -+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) -+ { -+ goto cleanup; -+ } -+ -+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ENQ, ui32Size, -+ ui32Ordinal, ui64Timestamp); -+ _SetupHostEnqPacketData(pui8Dest, -+ eEnqType, -+ ui32Pid, -+ ui32FWDMContext, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ hCheckFence, -+ hUpdateFence, -+ hUpdateTimeline, -+ ui64CheckFenceUID, -+ ui64UpdateFenceUID, -+ ui64DeadlineInus, -+ ui32CycleEstimate); -+ -+ _CommitHWPerfStream(psRgxDevInfo, ui32Size); -+ -+cleanup: -+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); -+} -+ -+static inline IMG_UINT32 _CalculateHostUfoPacketSize(RGX_HWPERF_UFO_EV eUfoType) -+{ -+ IMG_UINT32 ui32Size = -+ (IMG_UINT32) offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData); -+ RGX_HWPERF_UFO_DATA_ELEMENT *puData; -+ -+ switch (eUfoType) -+ { -+ case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: -+ case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: -+ ui32Size += sizeof(puData->sCheckSuccess); -+ break; -+ case RGX_HWPERF_UFO_EV_CHECK_FAIL: -+ case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: -+ ui32Size += sizeof(puData->sCheckFail); -+ break; -+ case RGX_HWPERF_UFO_EV_UPDATE: -+ ui32Size += sizeof(puData->sUpdate); -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO" -+ " event type")); -+ PVR_ASSERT(IMG_FALSE); -+ break; -+ } -+ -+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); -+} -+ -+static inline void _SetupHostUfoPacketData(IMG_UINT8 *pui8Dest, -+ RGX_HWPERF_UFO_EV eUfoType, -+ RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData) -+{ -+ RGX_HWPERF_HOST_UFO_DATA *psData = (RGX_HWPERF_HOST_UFO_DATA *) -+ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); -+ RGX_HWPERF_UFO_DATA_ELEMENT *puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) -+ psData->aui32StreamData; -+ -+ psData->eEvType = eUfoType; -+ /* HWPerfHost always emits 1 UFO at a time, since each UFO has 1-to-1 mapping -+ * with an underlying DevNode, and each DevNode has a dedicated HWPerf buffer */ -+ psData->ui32StreamInfo = RGX_HWPERF_MAKE_UFOPKTINFO(1, -+ offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData)); -+ -+ switch (eUfoType) -+ { -+ case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: -+ case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: -+ puData->sCheckSuccess.ui32FWAddr = -+ psUFOData->sCheckSuccess.ui32FWAddr; -+ puData->sCheckSuccess.ui32Value = -+ psUFOData->sCheckSuccess.ui32Value; -+ break; -+ case RGX_HWPERF_UFO_EV_CHECK_FAIL: -+ case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: -+ puData->sCheckFail.ui32FWAddr = -+ psUFOData->sCheckFail.ui32FWAddr; -+ puData->sCheckFail.ui32Value = -+ psUFOData->sCheckFail.ui32Value; -+ puData->sCheckFail.ui32Required = -+ psUFOData->sCheckFail.ui32Required; -+ break; -+ case RGX_HWPERF_UFO_EV_UPDATE: -+ puData->sUpdate.ui32FWAddr = -+ psUFOData->sUpdate.ui32FWAddr; -+ puData->sUpdate.ui32OldValue = -+ psUFOData->sUpdate.ui32OldValue; -+ puData->sUpdate.ui32NewValue = -+ psUFOData->sUpdate.ui32NewValue; -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO" -+ " event type")); -+ PVR_ASSERT(IMG_FALSE); -+ break; -+ } -+} -+ -+void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_UFO_EV eUfoType, -+ RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData, -+ const IMG_BOOL bSleepAllowed) -+{ -+ IMG_UINT8 *pui8Dest; -+ IMG_UINT32 ui32Size = _CalculateHostUfoPacketSize(eUfoType); -+ IMG_UINT32 ui32Ordinal; -+ IMG_UINT64 ui64Timestamp; -+ IMG_BOOL *pbPacketWritten = NULL; -+ -+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, -+ &pui8Dest, bSleepAllowed); -+ -+ if (bSleepAllowed) -+ { -+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); -+ -+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) -+ { -+ goto cleanup; -+ } -+ } -+ else -+ { -+ if (pui8Dest == NULL) -+ { -+ // Give-up if we couldn't get a place in deferred events buffer -+ goto cleanup; -+ } -+ pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8Dest); -+ pui8Dest = GET_DE_EVENT_DATA(pui8Dest); -+ } -+ -+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_UFO, ui32Size, -+ ui32Ordinal, ui64Timestamp); -+ _SetupHostUfoPacketData(pui8Dest, eUfoType, psUFOData); -+ -+ if (bSleepAllowed) -+ { -+ _CommitHWPerfStream(psRgxDevInfo, ui32Size); -+ } -+ else -+ { -+ *pbPacketWritten = IMG_TRUE; -+ OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR); -+ } -+ -+cleanup: -+ if (bSleepAllowed) -+ { -+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); -+ } -+} -+ -+#define UNKNOWN_SYNC_NAME "UnknownSync" -+ -+static_assert(PVRSRV_SYNC_NAME_LENGTH==PVRSRV_SYNC_NAME_LENGTH, "Sync class name max does not match Fence Sync name max"); -+ -+static inline IMG_UINT32 _FixNameAndCalculateHostAllocPacketSize( -+ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, -+ const IMG_CHAR **ppsName, -+ IMG_UINT32 *ui32NameSize) -+{ -+ RGX_HWPERF_HOST_ALLOC_DATA *psData; -+ IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_ALLOC_DATA, uAllocDetail); -+ -+ if (*ppsName != NULL && *ui32NameSize > 0) -+ { -+ /* if string longer than maximum cut it (leave space for '\0') */ -+ if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH) -+ *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostAllocEvent: Invalid" -+ " resource name given.")); -+ *ppsName = UNKNOWN_SYNC_NAME; -+ *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME); -+ } -+ -+ switch (eAllocType) -+ { -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: -+ ui32Size += sizeof(psData->uAllocDetail.sSyncAlloc) - PVRSRV_SYNC_NAME_LENGTH + -+ *ui32NameSize; -+ break; -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: -+ ui32Size += sizeof(psData->uAllocDetail.sFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH + -+ *ui32NameSize; -+ break; -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW: -+ ui32Size += sizeof(psData->uAllocDetail.sSWFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH + -+ *ui32NameSize; -+ break; -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: -+ ui32Size += sizeof(psData->uAllocDetail.sSyncCheckPointAlloc) - PVRSRV_SYNC_NAME_LENGTH + -+ *ui32NameSize; -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXHWPerfHostPostAllocEvent: Invalid alloc event type")); -+ PVR_ASSERT(IMG_FALSE); -+ break; -+ } -+ -+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); -+} -+ -+static inline void _SetupHostAllocPacketData(IMG_UINT8 *pui8Dest, -+ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, -+ RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail, -+ const IMG_CHAR *psName, -+ IMG_UINT32 ui32NameSize) -+{ -+ RGX_HWPERF_HOST_ALLOC_DATA *psData = (RGX_HWPERF_HOST_ALLOC_DATA *) -+ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); -+ -+ IMG_CHAR *acName = NULL; -+ -+ psData->ui32AllocType = eAllocType; -+ -+ switch (eAllocType) -+ { -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: -+ psData->uAllocDetail.sSyncAlloc = puAllocDetail->sSyncAlloc; -+ acName = psData->uAllocDetail.sSyncAlloc.acName; -+ break; -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: -+ psData->uAllocDetail.sFenceAlloc = puAllocDetail->sFenceAlloc; -+ acName = psData->uAllocDetail.sFenceAlloc.acName; -+ break; -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW: -+ psData->uAllocDetail.sSWFenceAlloc = puAllocDetail->sSWFenceAlloc; -+ acName = psData->uAllocDetail.sSWFenceAlloc.acName; -+ break; -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: -+ psData->uAllocDetail.sSyncCheckPointAlloc = puAllocDetail->sSyncCheckPointAlloc; -+ acName = psData->uAllocDetail.sSyncCheckPointAlloc.acName; -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXHWPerfHostPostAllocEvent: Invalid alloc event type")); -+ PVR_ASSERT(IMG_FALSE); -+ } -+ -+ -+ if (acName != NULL) -+ { -+ if (ui32NameSize) -+ { -+ OSStringLCopy(acName, psName, ui32NameSize); -+ } -+ else -+ { -+ /* In case no name was given make sure we don't access random -+ * memory */ -+ acName[0] = '\0'; -+ } -+ } -+} -+ -+void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO* psRgxDevInfo, -+ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, -+ const IMG_CHAR *psName, -+ IMG_UINT32 ui32NameSize, -+ RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail) -+{ -+ IMG_UINT8 *pui8Dest; -+ IMG_UINT64 ui64Timestamp; -+ IMG_UINT32 ui32Ordinal; -+ IMG_UINT32 ui32Size = _FixNameAndCalculateHostAllocPacketSize(eAllocType, -+ &psName, -+ &ui32NameSize); -+ -+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, -+ NULL, IMG_TRUE); -+ -+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); -+ -+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) -+ { -+ goto cleanup; -+ } -+ -+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ALLOC, ui32Size, -+ ui32Ordinal, ui64Timestamp); -+ -+ _SetupHostAllocPacketData(pui8Dest, -+ eAllocType, -+ puAllocDetail, -+ psName, -+ ui32NameSize); -+ -+ _CommitHWPerfStream(psRgxDevInfo, ui32Size); -+ -+cleanup: -+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); -+} -+ -+static inline void _SetupHostFreePacketData(IMG_UINT8 *pui8Dest, -+ RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, -+ IMG_UINT64 ui64UID, -+ IMG_UINT32 ui32PID, -+ IMG_UINT32 ui32FWAddr) -+{ -+ RGX_HWPERF_HOST_FREE_DATA *psData = (RGX_HWPERF_HOST_FREE_DATA *) -+ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); -+ -+ psData->ui32FreeType = eFreeType; -+ -+ switch (eFreeType) -+ { -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: -+ psData->uFreeDetail.sSyncFree.ui32FWAddr = ui32FWAddr; -+ break; -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: -+ psData->uFreeDetail.sFenceDestroy.ui64Fence_UID = ui64UID; -+ break; -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: -+ psData->uFreeDetail.sSyncCheckPointFree.ui32CheckPt_FWAddr = ui32FWAddr; -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXHWPerfHostPostFreeEvent: Invalid free event type")); -+ PVR_ASSERT(IMG_FALSE); -+ } -+} -+ -+void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, -+ IMG_UINT64 ui64UID, -+ IMG_UINT32 ui32PID, -+ IMG_UINT32 ui32FWAddr) -+{ -+ IMG_UINT8 *pui8Dest; -+ IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_FREE_DATA); -+ IMG_UINT32 ui32Ordinal; -+ IMG_UINT64 ui64Timestamp; -+ -+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, -+ NULL, IMG_TRUE); -+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); -+ -+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) -+ { -+ goto cleanup; -+ } -+ -+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_FREE, ui32Size, -+ ui32Ordinal, ui64Timestamp); -+ _SetupHostFreePacketData(pui8Dest, -+ eFreeType, -+ ui64UID, -+ ui32PID, -+ ui32FWAddr); -+ -+ _CommitHWPerfStream(psRgxDevInfo, ui32Size); -+ -+cleanup: -+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); -+} -+ -+static inline IMG_UINT32 _FixNameAndCalculateHostModifyPacketSize( -+ RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, -+ const IMG_CHAR **ppsName, -+ IMG_UINT32 *ui32NameSize) -+{ -+ RGX_HWPERF_HOST_MODIFY_DATA *psData; -+ RGX_HWPERF_HOST_MODIFY_DETAIL *puData; -+ IMG_UINT32 ui32Size = sizeof(psData->ui32ModifyType); -+ -+ if (*ppsName != NULL && *ui32NameSize > 0) -+ { -+ /* first strip the terminator */ -+ if ((*ppsName)[*ui32NameSize - 1] == '\0') -+ *ui32NameSize -= 1; -+ /* if string longer than maximum cut it (leave space for '\0') */ -+ if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH) -+ *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH - 1; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostModifyEvent: Invalid" -+ " resource name given.")); -+ *ppsName = UNKNOWN_SYNC_NAME; -+ *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME) - 1; -+ } -+ -+ switch (eModifyType) -+ { -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: -+ ui32Size += sizeof(puData->sFenceMerge) - PVRSRV_SYNC_NAME_LENGTH + -+ *ui32NameSize + 1; /* +1 for '\0' */ -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXHWPerfHostPostModifyEvent: Invalid modify event type")); -+ PVR_ASSERT(IMG_FALSE); -+ break; -+ } -+ -+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); -+} -+ -+static inline void _SetupHostModifyPacketData(IMG_UINT8 *pui8Dest, -+ RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, -+ IMG_UINT64 ui64NewUID, -+ IMG_UINT64 ui64UID1, -+ IMG_UINT64 ui64UID2, -+ const IMG_CHAR *psName, -+ IMG_UINT32 ui32NameSize) -+{ -+ RGX_HWPERF_HOST_MODIFY_DATA *psData = (RGX_HWPERF_HOST_MODIFY_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); -+ -+ IMG_CHAR *acName = NULL; -+ -+ psData->ui32ModifyType = eModifyType; -+ -+ switch (eModifyType) -+ { -+ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: -+ psData->uModifyDetail.sFenceMerge.ui64NewFence_UID = ui64NewUID; -+ psData->uModifyDetail.sFenceMerge.ui64InFence1_UID = ui64UID1; -+ psData->uModifyDetail.sFenceMerge.ui64InFence2_UID = ui64UID2; -+ acName = psData->uModifyDetail.sFenceMerge.acName; -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXHWPerfHostPostModifyEvent: Invalid modify event type")); -+ PVR_ASSERT(IMG_FALSE); -+ } -+ -+ if (acName != NULL) -+ { -+ if (ui32NameSize) -+ { -+ OSStringLCopy(acName, psName, ui32NameSize); -+ } -+ else -+ { -+ /* In case no name was given make sure we don't access random -+ * memory */ -+ acName[0] = '\0'; -+ } -+ } -+} -+ -+void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, -+ IMG_UINT64 ui64NewUID, -+ IMG_UINT64 ui64UID1, -+ IMG_UINT64 ui64UID2, -+ const IMG_CHAR *psName, -+ IMG_UINT32 ui32NameSize) -+{ -+ IMG_UINT8 *pui8Dest; -+ IMG_UINT64 ui64Timestamp; -+ IMG_UINT32 ui32Ordinal; -+ IMG_UINT32 ui32Size = _FixNameAndCalculateHostModifyPacketSize(eModifyType, -+ &psName, -+ &ui32NameSize); -+ -+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, -+ NULL, IMG_TRUE); -+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); -+ -+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) -+ { -+ goto cleanup; -+ } -+ -+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_MODIFY, ui32Size, -+ ui32Ordinal, ui64Timestamp); -+ _SetupHostModifyPacketData(pui8Dest, -+ eModifyType, -+ ui64NewUID, -+ ui64UID1, -+ ui64UID2, -+ psName, -+ ui32NameSize); -+ -+ _CommitHWPerfStream(psRgxDevInfo, ui32Size); -+ -+cleanup: -+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); -+} -+ -+static inline void _SetupHostClkSyncPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT8 *pui8Dest) -+{ -+ RGX_HWPERF_HOST_CLK_SYNC_DATA *psData = (RGX_HWPERF_HOST_CLK_SYNC_DATA *) -+ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); -+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psRgxDevInfo->psRGXFWIfGpuUtilFWCb; -+ IMG_UINT32 ui32CurrIdx; -+ RGXFWIF_TIME_CORR *psTimeCorr; -+ -+ RGXFwSharedMemCacheOpValue(psGpuUtilFWCB->ui32TimeCorrSeqCount, INVALIDATE); -+ ui32CurrIdx = RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount); -+ -+ RGXFwSharedMemCacheOpValue(psGpuUtilFWCB->sTimeCorr[ui32CurrIdx], INVALIDATE); -+ psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32CurrIdx]; -+ -+ psData->ui64CRTimestamp = psTimeCorr->ui64CRTimeStamp; -+ psData->ui64OSTimestamp = psTimeCorr->ui64OSTimeStamp; -+ psData->ui32ClockSpeed = psTimeCorr->ui32CoreClockSpeed; -+} -+ -+void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo) -+{ -+ IMG_UINT8 *pui8Dest; -+ IMG_UINT32 ui32Size = -+ RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_CLK_SYNC_DATA); -+ IMG_UINT32 ui32Ordinal; -+ IMG_UINT64 ui64Timestamp; -+ -+ /* if the buffer for time correlation data is not yet available (possibly -+ * device not initialised yet) skip this event */ -+ if (psRgxDevInfo->psRGXFWIfGpuUtilFWCb == NULL) -+ { -+ return; -+ } -+ -+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, -+ NULL, IMG_TRUE); -+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); -+ -+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) -+ { -+ goto cleanup; -+ } -+ -+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLK_SYNC, ui32Size, -+ ui32Ordinal, ui64Timestamp); -+ _SetupHostClkSyncPacketData(psRgxDevInfo, pui8Dest); -+ -+ _CommitHWPerfStream(psRgxDevInfo, ui32Size); -+ -+cleanup: -+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); -+} -+ -+static inline void _SetupHostDeviceInfoPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_DEV_INFO_EV eEvType, -+ RGX_HWPERF_HOST_DEV_INFO_DETAIL *puPacketData, -+ IMG_UINT8 *pui8Dest) -+{ -+ RGX_HWPERF_HOST_DEV_INFO_DATA *psData = (RGX_HWPERF_HOST_DEV_INFO_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); -+ psData->eEvType = eEvType; -+ -+ switch (eEvType) -+ { -+ case RGX_HWPERF_DEV_INFO_EV_HEALTH: -+ if (puPacketData != NULL) -+ { -+ psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthStatus = -+ puPacketData->sDeviceStatus.eDeviceHealthStatus; -+ psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthReason = -+ puPacketData->sDeviceStatus.eDeviceHealthReason; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: puPacketData is invalid.")); -+ } -+ break; -+ case RGX_HWPERF_DEV_INFO_EV_FEATURES: -+ { -+ PVRSRV_ERROR eError; -+ eError = RGXServerFeatureFlagsToHWPerfFlags(psRgxDevInfo, -+ &psData->uDevInfoDetail.sBVNC); -+ PVR_LOG_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags"); -+ psData->uDevInfoDetail.sBVNC.ui32BvncKmFeatureFlags |= -+#if defined(RGX_FEATURE_HWPERF_ROGUE) -+ RGX_HWPERF_FEATURE_ROGUE_FLAG; -+#elif defined(RGX_FEATURE_HWPERF_VOLCANIC) -+ RGX_HWPERF_FEATURE_VOLCANIC_FLAG; -+#else -+ 0x0; -+#endif -+ } -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type")); -+ PVR_ASSERT(IMG_FALSE); -+ break; -+ } -+} -+ -+static inline IMG_UINT32 _CalculateHostDeviceInfoPacketSize(RGX_HWPERF_DEV_INFO_EV eEvType) -+{ -+ IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_DEV_INFO_DATA, uDevInfoDetail); -+ -+ switch (eEvType) -+ { -+ case RGX_HWPERF_DEV_INFO_EV_HEALTH: -+ ui32Size += sizeof(((RGX_HWPERF_HOST_DEV_INFO_DATA*)0)->uDevInfoDetail.sDeviceStatus); -+ break; -+ case RGX_HWPERF_DEV_INFO_EV_FEATURES: -+ ui32Size += sizeof(((RGX_HWPERF_HOST_DEV_INFO_DATA*)0)->uDevInfoDetail.sBVNC); -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type")); -+ PVR_ASSERT(IMG_FALSE); -+ break; -+ } -+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); -+} -+ -+void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_DEV_INFO_EV eEvType, -+ RGX_HWPERF_HOST_DEV_INFO_DETAIL *puData) -+{ -+ IMG_UINT8 *pui8Dest; -+ IMG_UINT32 ui32Ordinal; -+ IMG_UINT64 ui64Timestamp; -+ IMG_UINT32 ui32Size; -+ -+ OSLockAcquire(psRgxDevInfo->hHWPerfLock); -+ -+ if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL) -+ { -+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); -+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); -+ ui32Size = _CalculateHostDeviceInfoPacketSize(eEvType); -+ -+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL) -+ { -+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_DEV_INFO, ui32Size, ui32Ordinal, ui64Timestamp); -+ _SetupHostDeviceInfoPacketData(psRgxDevInfo, eEvType, puData, pui8Dest); -+ _CommitHWPerfStream(psRgxDevInfo, ui32Size); -+ } -+ -+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); -+ } -+ -+ OSLockRelease(psRgxDevInfo->hHWPerfLock); -+} -+ -+static inline void _SetupHostInfoPacketData(RGX_HWPERF_INFO_EV eEvType, -+ IMG_UINT64 ui64TotalMemoryUsage, -+ IMG_UINT32 ui32LivePids, -+ PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage, -+ IMG_UINT8 *pui8Dest) -+{ -+ IMG_INT i; -+ RGX_HWPERF_HOST_INFO_DATA *psData = (RGX_HWPERF_HOST_INFO_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); -+ psData->eEvType = eEvType; -+ -+ switch (eEvType) -+ { -+ case RGX_HWPERF_INFO_EV_MEM64_USAGE: -+ psData->uInfoDetail.sMemUsageStats.ui64TotalMemoryUsage = ui64TotalMemoryUsage; -+ -+ if (psPerProcessMemUsage) -+ { -+ for (i = 0; i < ui32LivePids; ++i) -+ { -+ psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32Pid = psPerProcessMemUsage[i].ui32Pid; -+ psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui64KernelMemUsage = psPerProcessMemUsage[i].ui64KernelMemUsage; -+ psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui64GraphicsMemUsage = psPerProcessMemUsage[i].ui64GraphicsMemUsage; -+ } -+ } -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type")); -+ PVR_ASSERT(IMG_FALSE); -+ break; -+ } -+} -+ -+static inline IMG_UINT32 _CalculateHostInfoPacketSize(RGX_HWPERF_INFO_EV eEvType, -+ IMG_UINT64 *pui64TotalMemoryUsage, -+ IMG_UINT32 *pui32LivePids, -+ PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsage) -+{ -+ IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail); -+ -+ switch (eEvType) -+ { -+ case RGX_HWPERF_INFO_EV_MEM64_USAGE: -+#if !defined(__QNXNTO__) -+ if (PVRSRVGetProcessMemUsage(pui64TotalMemoryUsage, pui32LivePids, ppsPerProcessMemUsage) == PVRSRV_OK) -+ { -+ ui32Size += offsetof(RGX_HWPERF_HOST_INFO_DETAIL, sMemUsageStats.sPerProcessUsage) -+ + ((*pui32LivePids) * sizeof(((RGX_HWPERF_HOST_INFO_DETAIL*)0)->sMemUsageStats.sPerProcessUsage)); -+ } -+#else -+ PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform")); -+#endif -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type")); -+ PVR_ASSERT(IMG_FALSE); -+ break; -+ } -+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); -+} -+ -+void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_INFO_EV eEvType) -+{ -+ IMG_UINT8 *pui8Dest; -+ IMG_UINT32 ui32Size; -+ IMG_UINT32 ui32Ordinal; -+ IMG_UINT64 ui64Timestamp; -+ IMG_UINT64 ui64TotalMemoryUsage = 0; -+ PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage = NULL; -+ IMG_UINT32 ui32LivePids = 0; -+ -+ OSLockAcquire(psRgxDevInfo->hHWPerfLock); -+ -+ if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL) -+ { -+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); -+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); -+ -+ ui32Size = _CalculateHostInfoPacketSize(eEvType, &ui64TotalMemoryUsage, &ui32LivePids, &psPerProcessMemUsage); -+ -+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL) -+ { -+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_INFO, ui32Size, ui32Ordinal, ui64Timestamp); -+ _SetupHostInfoPacketData(eEvType, ui64TotalMemoryUsage, ui32LivePids, psPerProcessMemUsage, pui8Dest); -+ _CommitHWPerfStream(psRgxDevInfo, ui32Size); -+ } -+ -+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); -+ -+ if (psPerProcessMemUsage) -+ OSFreeMemNoStats(psPerProcessMemUsage); // psPerProcessMemUsage was allocated with OSAllocZMemNoStats -+ } -+ -+ OSLockRelease(psRgxDevInfo->hHWPerfLock); -+} -+ -+static inline IMG_UINT32 -+_CalculateHostFenceWaitPacketSize(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType) -+{ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psSizeCalculator; -+ IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA, uDetail); -+ -+ switch (eWaitType) -+ { -+ case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN: -+ ui32Size += sizeof(psSizeCalculator->uDetail.sBegin); -+ break; -+ case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END: -+ ui32Size += sizeof(psSizeCalculator->uDetail.sEnd); -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid wait event type (%u)", __func__, -+ eWaitType)); -+ PVR_ASSERT(IMG_FALSE); -+ break; -+ } -+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); -+} -+ -+static inline void -+_SetupHostFenceWaitPacketData(IMG_UINT8 *pui8Dest, -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType, -+ IMG_PID uiPID, -+ PVRSRV_FENCE hFence, -+ IMG_UINT32 ui32Data) -+{ -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psData = (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *) -+ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); -+ -+ psData->eType = eWaitType; -+ psData->uiPID = uiPID; -+ psData->hFence = hFence; -+ -+ switch (eWaitType) -+ { -+ case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN: -+ psData->uDetail.sBegin.ui32TimeoutInMs = ui32Data; -+ break; -+ case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END: -+ psData->uDetail.sEnd.eResult = -+ (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT) ui32Data; -+ break; -+ default: -+ // unknown type - this should never happen -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid fence-wait event type", __func__)); -+ PVR_ASSERT(IMG_FALSE); -+ } -+} -+ -+void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType, -+ IMG_PID uiPID, -+ PVRSRV_FENCE hFence, -+ IMG_UINT32 ui32Data) -+{ -+ IMG_UINT8 *pui8Dest; -+ IMG_UINT32 ui32Size; -+ IMG_UINT32 ui32Ordinal; -+ IMG_UINT64 ui64Timestamp; -+ -+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, -+ NULL, IMG_TRUE); -+ -+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); -+ -+ ui32Size = _CalculateHostFenceWaitPacketSize(eType); -+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) -+ { -+ goto cleanup; -+ } -+ -+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_FENCE_WAIT, -+ ui32Size, ui32Ordinal, ui64Timestamp); -+ _SetupHostFenceWaitPacketData(pui8Dest, eType, uiPID, hFence, ui32Data); -+ -+ _CommitHWPerfStream(psRgxDevInfo, ui32Size); -+ -+cleanup: -+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); -+} -+ -+static inline IMG_UINT32 _CalculateHostSWTimelineAdvPacketSize(void) -+{ -+ IMG_UINT32 ui32Size = sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA); -+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); -+} -+ -+static inline void -+_SetupHostSWTimelineAdvPacketData(IMG_UINT8 *pui8Dest, -+ IMG_PID uiPID, -+ PVRSRV_TIMELINE hSWTimeline, -+ IMG_UINT64 ui64SyncPtIndex) -+ -+{ -+ RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *psData = (RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *) -+ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); -+ -+ psData->uiPID = uiPID; -+ psData->hTimeline = hSWTimeline; -+ psData->ui64SyncPtIndex = ui64SyncPtIndex; -+} -+ -+void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_PID uiPID, -+ PVRSRV_TIMELINE hSWTimeline, -+ IMG_UINT64 ui64SyncPtIndex) -+{ -+ IMG_UINT8 *pui8Dest; -+ IMG_UINT32 ui32Size; -+ IMG_UINT32 ui32Ordinal; -+ IMG_UINT64 ui64Timestamp; -+ -+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, -+ NULL, IMG_TRUE); -+ -+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); -+ -+ ui32Size = _CalculateHostSWTimelineAdvPacketSize(); -+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) -+ { -+ goto cleanup; -+ } -+ -+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE, -+ ui32Size, ui32Ordinal, ui64Timestamp); -+ _SetupHostSWTimelineAdvPacketData(pui8Dest, uiPID, hSWTimeline, ui64SyncPtIndex); -+ -+ _CommitHWPerfStream(psRgxDevInfo, ui32Size); -+ -+cleanup: -+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); -+ -+} -+ -+void RGXHWPerfHostPostClientInfoProcName(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_PID uiPID, -+ const IMG_CHAR *psName) -+{ -+ RGX_HWPERF_HOST_CLIENT_INFO_DATA* psPkt; -+ IMG_UINT8 *pui8Dest; -+ IMG_UINT32 ui32Size; -+ IMG_UINT32 ui32NameLen; -+ IMG_UINT32 ui32Ordinal; -+ IMG_UINT64 ui64Timestamp; -+ -+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); -+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); -+ -+ ui32NameLen = OSStringLength(psName) + 1U; -+ ui32Size = RGX_HWPERF_MAKE_SIZE_VARIABLE(RGX_HWPERF_HOST_CLIENT_INFO_PROC_NAME_BASE_SIZE -+ + RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen)); -+ -+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) -+ { -+ goto cleanup; -+ } -+ -+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLIENT_INFO, -+ ui32Size, ui32Ordinal, ui64Timestamp); -+ -+ psPkt = (RGX_HWPERF_HOST_CLIENT_INFO_DATA*)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); -+ psPkt->eType = RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME; -+ psPkt->uDetail.sProcName.ui32Count = 1U; -+ psPkt->uDetail.sProcName.asProcNames[0].uiClientPID = uiPID; -+ psPkt->uDetail.sProcName.asProcNames[0].ui32Length = ui32NameLen; -+ (void)OSCachedMemCopy(psPkt->uDetail.sProcName.asProcNames[0].acName, psName, ui32NameLen); -+ -+ _CommitHWPerfStream(psRgxDevInfo, ui32Size); -+ -+cleanup: -+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); -+} -+ -+/****************************************************************************** -+ * Currently only implemented on Linux. Feature can be enabled to provide -+ * an interface to 3rd-party kernel modules that wish to access the -+ * HWPerf data. The API is documented in the rgxapi_km.h header and -+ * the rgx_hwperf* headers. -+ *****************************************************************************/ -+ -+/* Internal HWPerf kernel connection/device data object to track the state -+ * of a client session. -+ */ -+typedef struct -+{ -+ PVRSRV_DEVICE_NODE* psRgxDevNode; -+ PVRSRV_RGXDEV_INFO* psRgxDevInfo; -+ -+ /* TL Open/close state */ -+ IMG_HANDLE hSD[RGX_HWPERF_MAX_STREAM_ID]; -+ -+ /* TL Acquire/release state */ -+ IMG_PBYTE pHwpBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer returned to user in acquire call */ -+ IMG_PBYTE pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to end of HwpBuf */ -+ IMG_PBYTE pTlBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer obtained via TlAcquireData */ -+ IMG_PBYTE pTlBufPos[RGX_HWPERF_MAX_STREAM_ID]; /*!< initial position in TlBuf to acquire packets */ -+ IMG_PBYTE pTlBufRead[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to the last packet read */ -+ IMG_UINT32 ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID]; /*!< length of acquired TlBuf */ -+ IMG_BOOL bRelease[RGX_HWPERF_MAX_STREAM_ID]; /*!< used to determine whether or not to release currently held TlBuf */ -+ -+ -+} RGX_KM_HWPERF_DEVDATA; -+ -+PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ RGX_KM_HWPERF_DEVDATA *psDevData; -+ RGX_HWPERF_DEVICE *psNewHWPerfDevice; -+ RGX_HWPERF_CONNECTION* psHWPerfConnection; -+ IMG_BOOL bFWActive = IMG_FALSE; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ /* avoid uninitialised data */ -+ PVR_ASSERT(*ppsHWPerfConnection == NULL); -+ PVR_ASSERT(psPVRSRVData); -+ -+ /* Allocate connection object */ -+ psHWPerfConnection = OSAllocZMem(sizeof(*psHWPerfConnection)); -+ if (!psHWPerfConnection) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ /* early save the return pointer to aid clean-up if failure occurs */ -+ *ppsHWPerfConnection = psHWPerfConnection; -+ -+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); -+ psDeviceNode = psPVRSRVData->psDeviceNodeList; -+ -+ while (psDeviceNode) -+ { -+ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: HWPerf: Device not currently active. ID:%u", -+ __func__, -+ psDeviceNode->sDevId.i32KernelDeviceID)); -+ psDeviceNode = psDeviceNode->psNext; -+ continue; -+ } -+ /* Create a list node to be attached to connection object's list */ -+ psNewHWPerfDevice = OSAllocMem(sizeof(*psNewHWPerfDevice)); -+ if (!psNewHWPerfDevice) -+ { -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ /* Insert node at head of the list */ -+ psNewHWPerfDevice->psNext = psHWPerfConnection->psHWPerfDevList; -+ psHWPerfConnection->psHWPerfDevList = psNewHWPerfDevice; -+ -+ /* create a device data object for kernel server */ -+ psDevData = OSAllocZMem(sizeof(*psDevData)); -+ psNewHWPerfDevice->hDevData = (IMG_HANDLE)psDevData; -+ if (!psDevData) -+ { -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ if (OSSNPrintf(psNewHWPerfDevice->pszName, sizeof(psNewHWPerfDevice->pszName), -+ "hwperf_device_%d", psDeviceNode->sDevId.i32KernelDeviceID) < 0) -+ { -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to form HWPerf device name for device %d", -+ __func__, -+ psDeviceNode->sDevId.i32KernelDeviceID)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevData->psRgxDevNode = psDeviceNode; -+ psDevData->psRgxDevInfo = psDeviceNode->pvDevice; -+ -+ psDeviceNode = psDeviceNode->psNext; -+ -+ /* At least one device is active */ -+ bFWActive = IMG_TRUE; -+ } -+ -+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); -+ -+ if (!bFWActive) -+ { -+ return PVRSRV_ERROR_NOT_READY; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection) -+{ -+ RGX_KM_HWPERF_DEVDATA *psDevData; -+ RGX_HWPERF_DEVICE *psHWPerfDev; -+ PVRSRV_RGXDEV_INFO *psRgxDevInfo; -+ PVRSRV_ERROR eError; -+ IMG_CHAR pszHWPerfFwStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; -+ IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; -+ IMG_UINT32 ui32BufSize; -+ -+ /* Disable producer callback by default for the Kernel API. */ -+ IMG_UINT32 ui32StreamFlags = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING | -+ PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ /* Validate input argument values supplied by the caller */ -+ if (!psHWPerfConnection) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psHWPerfDev = psHWPerfConnection->psHWPerfDevList; -+ while (psHWPerfDev) -+ { -+ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; -+ psRgxDevInfo = psDevData->psRgxDevInfo; -+ -+ /* In the case where the AppHint has not been set we need to -+ * initialise the HWPerf resources here. Allocated on-demand -+ * to reduce RAM foot print on systems not needing HWPerf. -+ */ -+ OSLockAcquire(psRgxDevInfo->hHWPerfLock); -+ if (RGXHWPerfIsInitRequired(psRgxDevInfo, RGX_HWPERF_L2_STREAM_HWPERF)) -+ { -+ eError = RGXHWPerfInitOnDemandL1Buffer(psRgxDevInfo); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Initialisation of on-demand HWPerfFW resources failed", -+ __func__)); -+ OSLockRelease(psRgxDevInfo->hHWPerfLock); -+ return eError; -+ } -+ -+ /* if this fails it also cleans up L1 buffer */ -+ eError = RGXHWPerfInitOnDemandL2Stream(psRgxDevInfo, RGX_HWPERF_L2_STREAM_HWPERF); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Initialisation of on-demand HWPerfFW resources failed", -+ __func__)); -+ OSLockRelease(psRgxDevInfo->hHWPerfLock); -+ return eError; -+ } -+ } -+ OSLockRelease(psRgxDevInfo->hHWPerfLock); -+ -+ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); -+ if (psRgxDevInfo->hHWPerfHostStream == NULL) -+ { -+ eError = RGXHWPerfHostInitOnDemandResources(psRgxDevInfo); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Initialisation of on-demand HWPerfHost resources failed", -+ __func__)); -+ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); -+ return eError; -+ } -+ } -+ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); -+ -+ /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ -+ if (OSSNPrintf(pszHWPerfFwStreamName, sizeof(pszHWPerfFwStreamName), "%s%d", -+ PVRSRV_TL_HWPERF_RGX_FW_STREAM, -+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to form HWPerf stream name for device %d", -+ __func__, -+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ /* Open the RGX TL stream for reading in this session */ -+ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, -+ pszHWPerfFwStreamName, -+ ui32StreamFlags, -+ &psDevData->hSD[RGX_HWPERF_STREAM_ID0_FW]); -+ PVR_LOG_RETURN_IF_ERROR(eError, "TLClientOpenStream(RGX_HWPerf)"); -+ -+ /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */ -+ if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d", -+ PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, -+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to form HWPerf host stream name for device %d", -+ __func__, -+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Open the host TL stream for reading in this session */ -+ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, -+ pszHWPerfHostStreamName, -+ PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING, -+ &psDevData->hSD[RGX_HWPERF_STREAM_ID1_HOST]); -+ PVR_LOG_RETURN_IF_ERROR(eError, "TLClientOpenStream(Host_HWPerf)"); -+ -+ /* Allocate a large enough buffer for use during the entire session to -+ * avoid the need to resize in the Acquire call as this might be in an ISR -+ * Choose size that can contain at least one packet. -+ */ -+ /* Allocate buffer for FW Stream */ -+ ui32BufSize = FW_STREAM_BUFFER_SIZE; -+ psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] = OSAllocMem(ui32BufSize); -+ if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID0_FW] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]+ui32BufSize; -+ -+ /* Allocate buffer for Host Stream */ -+ ui32BufSize = HOST_STREAM_BUFFER_SIZE; -+ psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] = OSAllocMem(ui32BufSize); -+ if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] == NULL) -+ { -+ OSFreeMem(psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID1_HOST] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST]+ui32BufSize; -+ -+ psHWPerfDev = psHWPerfDev->psNext; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ eError = RGXHWPerfLazyConnect(ppsHWPerfConnection); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfLazyConnect", e0); -+ -+ eError = RGXHWPerfOpen(*ppsHWPerfConnection); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfOpen", e1); -+ -+ return PVRSRV_OK; -+ -+e1: /* HWPerfOpen might have opened some, and then failed */ -+ RGXHWPerfClose(*ppsHWPerfConnection); -+e0: /* LazyConnect might have allocated some resources and then failed, -+ * make sure they are cleaned up */ -+ RGXHWPerfFreeConnection(ppsHWPerfConnection); -+ return eError; -+} -+ -+/* -+ PVRSRVRGXControlHWPerfBlocksKM -+ */ -+PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_BOOL bEnable, -+ IMG_UINT32 ui32ArrayLen, -+ IMG_UINT16 * psBlockIDs) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sKccbCmd; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ PVRSRV_RGXDEV_INFO *psDevice; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psBlockIDs != NULL, "psBlockIDs"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM((ui32ArrayLen>0) && (ui32ArrayLen <= RGXFWIF_HWPERF_CTRL_BLKS_MAX), "ui32ArrayLen"); -+ -+ PVR_ASSERT(psDeviceNode); -+ psDevice = psDeviceNode->pvDevice; -+ -+ /* Fill in the command structure with the parameters needed -+ */ -+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS; -+ sKccbCmd.uCmdData.sHWPerfCtrlBlks.bEnable = bEnable; -+ sKccbCmd.uCmdData.sHWPerfCtrlBlks.ui32NumBlocks = ui32ArrayLen; -+ -+ OSDeviceMemCopy(sKccbCmd.uCmdData.sHWPerfCtrlBlks.aeBlockIDs, psBlockIDs, sizeof(IMG_UINT16) * ui32ArrayLen); -+ -+ -+ /* Ask the FW to carry out the HWPerf configuration command -+ */ -+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, -+ RGXFWIF_DM_GP, -+ &sKccbCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); -+ -+ /* Wait for FW to complete */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); -+ -+ -+#if defined(DEBUG) -+ if (bEnable) -+ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been ENABLED", ui32ArrayLen)); -+ else -+ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been DISABLED", ui32ArrayLen)); -+#endif -+ -+ PVR_DPF_RETURN_OK; -+} -+ -+/* -+ PVRSRVRGXCtrlHWPerfKM -+ */ -+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_HWPERF_STREAM_ID eStreamId, -+ IMG_BOOL bToggle, -+ IMG_UINT64 ui64Mask) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ PVR_DPF_ENTERED; -+ PVR_ASSERT(psDeviceNode); -+ -+ if (eStreamId == RGX_HWPERF_STREAM_ID0_FW) -+ { -+ return RGXHWPerfCtrlFwBuffer(psDeviceNode, RGX_HWPERF_L2_STREAM_HWPERF, -+ bToggle, ui64Mask); -+ } -+ else if (eStreamId == RGX_HWPERF_STREAM_ID1_HOST) -+ { -+ return RGXHWPerfCtrlHostBuffer(psDeviceNode, bToggle, (IMG_UINT32) ui64Mask); -+ } -+ else if (eStreamId == RGX_HWPERF_STREAM_ID2_CLIENT) -+ { -+ IMG_UINT32 ui32Index = (IMG_UINT32) (ui64Mask >> 32); -+ IMG_UINT32 ui32Mask = (IMG_UINT32) ui64Mask; -+ -+ return RGXHWPerfCtrlClientBuffer(bToggle, ui32Index, ui32Mask); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXCtrlHWPerfKM: Unknown stream id.")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PVR_DPF_RETURN_OK; -+} -+ -+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfFW( -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId, -+ IMG_UINT64 ui64Mask, -+ HWPERF_FILTER_OPERATION eMaskOp) -+{ -+ IMG_UINT64 uiTmpFilter; -+ -+ PVR_DPF_ENTERED; -+ PVR_ASSERT(psDeviceNode); -+ PVR_ASSERT(eL2StreamId < RGX_HWPERF_L2_STREAM_LAST); -+ -+ uiTmpFilter = -+ ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui64HWPerfFilter[eL2StreamId]; -+ -+ switch (eMaskOp) -+ { -+ case HWPERF_FILTER_OPERATION_SET: -+ uiTmpFilter = ui64Mask; -+ break; -+ case HWPERF_FILTER_OPERATION_BIT_CLR: -+ uiTmpFilter &= ~ui64Mask; -+ break; -+ case HWPERF_FILTER_OPERATION_BIT_OR: -+ uiTmpFilter |= ui64Mask; -+ break; -+ } -+ -+ PVR_DPF_RETURN_RC(RGXHWPerfCtrlFwBuffer(psDeviceNode, eL2StreamId, IMG_FALSE, -+ uiTmpFilter)); -+} -+ -+#if defined(PVRSRV_FORCE_HWPERF_TO_SCHED_CLK) -+/* -+ PVRSRVRGXGetHWPerfTimeStampKM -+ */ -+PVRSRV_ERROR PVRSRVRGXGetHWPerfTimeStampKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT64 *pui64TimeStamp) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ *pui64TimeStamp = RGXTimeCorrGetClockus64(psDeviceNode); -+ return PVRSRV_OK; -+} -+#endif -+ -+PVRSRV_ERROR RGXHWPerfControl( -+ RGX_HWPERF_CONNECTION *psHWPerfConnection, -+ RGX_HWPERF_STREAM_ID eStreamId, -+ IMG_BOOL bToggle, -+ IMG_UINT64 ui64Mask) -+{ -+ PVRSRV_ERROR eError; -+ RGX_KM_HWPERF_DEVDATA* psDevData; -+ RGX_HWPERF_DEVICE* psHWPerfDev; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ /* Validate input argument values supplied by the caller */ -+ if (!psHWPerfConnection) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psHWPerfDev = psHWPerfConnection->psHWPerfDevList; -+ -+ while (psHWPerfDev) -+ { -+ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; -+ -+ /* Call the internal server API */ -+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDevData->psRgxDevNode, eStreamId, bToggle, ui64Mask); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); -+ -+ psHWPerfDev = psHWPerfDev->psNext; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+ -+IMG_INTERNAL PVRSRV_ERROR RGXHWPerfToggleCounters( -+ RGX_HWPERF_CONNECTION *psHWPerfConnection, -+ IMG_UINT32 ui32NumBlocks, -+ IMG_UINT16* aeBlockIDs, -+ IMG_BOOL bToggle, -+ const char* szFunctionString); -+ -+IMG_INTERNAL PVRSRV_ERROR RGXHWPerfToggleCounters( -+ RGX_HWPERF_CONNECTION *psHWPerfConnection, -+ IMG_UINT32 ui32NumBlocks, -+ IMG_UINT16* aeBlockIDs, -+ IMG_BOOL bToggle, -+ const char* szFunctionString) -+{ -+ PVRSRV_ERROR eError; -+ RGX_KM_HWPERF_DEVDATA* psDevData; -+ RGX_HWPERF_DEVICE* psHWPerfDev; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ if (!psHWPerfConnection || ui32NumBlocks==0 || !aeBlockIDs) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psHWPerfDev = psHWPerfConnection->psHWPerfDevList; -+ -+ while (psHWPerfDev) -+ { -+ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; -+ -+ /* Call the internal server API */ -+ eError = PVRSRVRGXControlHWPerfBlocksKM(NULL, -+ psDevData->psRgxDevNode, -+ bToggle, -+ ui32NumBlocks, -+ aeBlockIDs); -+ -+ PVR_LOG_RETURN_IF_ERROR(eError, szFunctionString); -+ -+ psHWPerfDev = psHWPerfDev->psNext; -+ } -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXHWPerfDisableCounters( -+ RGX_HWPERF_CONNECTION *psHWPerfConnection, -+ IMG_UINT32 ui32NumBlocks, -+ IMG_UINT16* aeBlockIDs) -+{ -+ return RGXHWPerfToggleCounters(psHWPerfConnection, -+ ui32NumBlocks, -+ aeBlockIDs, -+ IMG_FALSE, -+ __func__); -+} -+ -+ -+PVRSRV_ERROR RGXHWPerfEnableCounters( -+ RGX_HWPERF_CONNECTION *psHWPerfConnection, -+ IMG_UINT32 ui32NumBlocks, -+ IMG_UINT16* aeBlockIDs) -+{ -+ return RGXHWPerfToggleCounters(psHWPerfConnection, -+ ui32NumBlocks, -+ aeBlockIDs, -+ IMG_TRUE, -+ __func__); -+} -+ -+ -+PVRSRV_ERROR RGXHWPerfAcquireEvents( -+ IMG_HANDLE hDevData, -+ RGX_HWPERF_STREAM_ID eStreamId, -+ IMG_PBYTE* ppBuf, -+ IMG_UINT32* pui32BufLen) -+{ -+ PVRSRV_ERROR eError; -+ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData; -+ IMG_PBYTE pDataDest; -+ IMG_UINT32 ui32TlPackets = 0; -+ IMG_PBYTE pBufferEnd; -+ PVRSRVTL_PPACKETHDR psHDRptr; -+ PVRSRVTL_PACKETTYPE ui16TlType; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ /* Reset the output arguments in case we discover an error */ -+ *ppBuf = NULL; -+ *pui32BufLen = 0; -+ -+ /* Valid input argument values supplied by the caller */ -+ if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (psDevData->pTlBuf[eStreamId] == NULL) -+ { -+ /* Acquire some data to read from the HWPerf TL stream */ -+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, -+ psDevData->hSD[eStreamId], -+ &psDevData->pTlBuf[eStreamId], -+ &psDevData->ui32AcqDataLen[eStreamId]); -+ PVR_LOG_RETURN_IF_ERROR(eError, "TLClientAcquireData"); -+ -+ psDevData->pTlBufPos[eStreamId] = psDevData->pTlBuf[eStreamId]; -+ } -+ -+ /* TL indicates no data exists so return OK and zero. */ -+ if ((psDevData->pTlBufPos[eStreamId] == NULL) || (psDevData->ui32AcqDataLen[eStreamId] == 0)) -+ { -+ return PVRSRV_OK; -+ } -+ -+ /* Process each TL packet in the data buffer we have acquired */ -+ pBufferEnd = psDevData->pTlBuf[eStreamId]+psDevData->ui32AcqDataLen[eStreamId]; -+ pDataDest = psDevData->pHwpBuf[eStreamId]; -+ psHDRptr = GET_PACKET_HDR(psDevData->pTlBufPos[eStreamId]); -+ psDevData->pTlBufRead[eStreamId] = psDevData->pTlBufPos[eStreamId]; -+ while (psHDRptr < (PVRSRVTL_PPACKETHDR)((void *)pBufferEnd)) -+ { -+ ui16TlType = GET_PACKET_TYPE(psHDRptr); -+ if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA) -+ { -+ IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr); -+ if (0 == ui16DataLen) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfAcquireEvents: ZERO Data in TL data packet: %p", psHDRptr)); -+ } -+ else -+ { -+ /* Check next packet does not fill buffer */ -+ if (pDataDest + ui16DataLen > psDevData->pHwpBufEnd[eStreamId]) -+ { -+ break; -+ } -+ -+ /* For valid data copy it into the client buffer and move -+ * the write position on */ -+ OSDeviceMemCopy(pDataDest, GET_PACKET_DATA_PTR(psHDRptr), ui16DataLen); -+ pDataDest += ui16DataLen; -+ } -+ } -+ else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Indication that the transport buffer was full")); -+ } -+ else -+ { -+ /* else Ignore padding packet type and others */ -+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Ignoring TL packet, type %d", ui16TlType )); -+ } -+ -+ /* Update loop variable to the next packet and increment counts */ -+ psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr); -+ /* Updated to keep track of the next packet to be read. */ -+ psDevData->pTlBufRead[eStreamId] = (IMG_PBYTE) ((void *)psHDRptr); -+ ui32TlPackets++; -+ } -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfAcquireEvents: TL Packets processed %03d", ui32TlPackets)); -+ -+ psDevData->bRelease[eStreamId] = IMG_FALSE; -+ if (psHDRptr >= (PVRSRVTL_PPACKETHDR)((void *)pBufferEnd)) -+ { -+ psDevData->bRelease[eStreamId] = IMG_TRUE; -+ } -+ -+ /* Update output arguments with client buffer details and true length */ -+ *ppBuf = psDevData->pHwpBuf[eStreamId]; -+ *pui32BufLen = pDataDest - psDevData->pHwpBuf[eStreamId]; -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR RGXHWPerfReleaseEvents( -+ IMG_HANDLE hDevData, -+ RGX_HWPERF_STREAM_ID eStreamId) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ /* Valid input argument values supplied by the caller */ -+ if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (psDevData->bRelease[eStreamId]) -+ { -+ /* Inform the TL that we are done with reading the data. */ -+ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[eStreamId]); -+ psDevData->ui32AcqDataLen[eStreamId] = 0; -+ psDevData->pTlBuf[eStreamId] = NULL; -+ } -+ else -+ { -+ psDevData->pTlBufPos[eStreamId] = psDevData->pTlBufRead[eStreamId]; -+ } -+ return eError; -+} -+ -+ -+PVRSRV_ERROR RGXHWPerfGetFilter( -+ IMG_HANDLE hDevData, -+ RGX_HWPERF_STREAM_ID eStreamId, -+ IMG_UINT64 *ui64Filter) -+{ -+ PVRSRV_RGXDEV_INFO* psRgxDevInfo = -+ hDevData ? ((RGX_KM_HWPERF_DEVDATA*) hDevData)->psRgxDevInfo : NULL; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ /* Valid input argument values supplied by the caller */ -+ if (!psRgxDevInfo) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pointer to the RGX device", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* No need to take hHWPerfLock here since we are only reading data -+ * from always existing integers to return to debugfs which is an -+ * atomic operation. -+ */ -+ switch (eStreamId) { -+ case RGX_HWPERF_STREAM_ID0_FW: -+ *ui64Filter = psRgxDevInfo->ui64HWPerfFilter[RGX_HWPERF_L2_STREAM_HWPERF]; -+ break; -+ case RGX_HWPERF_STREAM_ID1_HOST: -+ *ui64Filter = psRgxDevInfo->ui32HWPerfHostFilter; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid stream ID", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) -+{ -+ RGX_HWPERF_DEVICE *psHWPerfDev, *psHWPerfNextDev; -+ RGX_HWPERF_CONNECTION *psHWPerfConnection = *ppsHWPerfConnection; -+ -+ /* if connection object itself is NULL, nothing to free */ -+ if (psHWPerfConnection == NULL) -+ { -+ return PVRSRV_OK; -+ } -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ psHWPerfNextDev = psHWPerfConnection->psHWPerfDevList; -+ while (psHWPerfNextDev) -+ { -+ psHWPerfDev = psHWPerfNextDev; -+ psHWPerfNextDev = psHWPerfNextDev->psNext; -+ -+ /* Free the session memory */ -+ if (psHWPerfDev->hDevData) -+ OSFreeMem(psHWPerfDev->hDevData); -+ OSFreeMem(psHWPerfDev); -+ } -+ OSFreeMem(psHWPerfConnection); -+ *ppsHWPerfConnection = NULL; -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection) -+{ -+ RGX_HWPERF_DEVICE *psHWPerfDev; -+ RGX_KM_HWPERF_DEVDATA* psDevData; -+ IMG_UINT uiStreamId; -+ PVRSRV_ERROR eError; -+ -+ /* Check session connection is not zero */ -+ if (!psHWPerfConnection) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ psHWPerfDev = psHWPerfConnection->psHWPerfDevList; -+ while (psHWPerfDev) -+ { -+ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; -+ for (uiStreamId = 0; uiStreamId < RGX_HWPERF_MAX_STREAM_ID; uiStreamId++) -+ { -+ /* If the TL buffer exists they have not called ReleaseData -+ * before disconnecting so clean it up */ -+ if (psDevData->pTlBuf[uiStreamId]) -+ { -+ /* TLClientReleaseData call and null out the buffer fields -+ * and length */ -+ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[uiStreamId]); -+ psDevData->ui32AcqDataLen[uiStreamId] = 0; -+ psDevData->pTlBuf[uiStreamId] = NULL; -+ PVR_LOG_IF_ERROR(eError, "TLClientReleaseData"); -+ /* Packets may be lost if release was not required */ -+ if (!psDevData->bRelease[uiStreamId]) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfClose: Events in buffer waiting to be read, remaining events may be lost.")); -+ } -+ } -+ -+ /* Close the TL stream, ignore the error if it occurs as we -+ * are disconnecting */ -+ if (psDevData->hSD[uiStreamId]) -+ { -+ eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, -+ psDevData->hSD[uiStreamId]); -+ PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); -+ psDevData->hSD[uiStreamId] = NULL; -+ } -+ -+ /* Free the client buffer used in session */ -+ if (psDevData->pHwpBuf[uiStreamId]) -+ { -+ OSFreeMem(psDevData->pHwpBuf[uiStreamId]); -+ psDevData->pHwpBuf[uiStreamId] = NULL; -+ } -+ } -+ psHWPerfDev = psHWPerfDev->psNext; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); -+ -+ eError = RGXHWPerfClose(*ppsHWPerfConnection); -+ PVR_LOG_IF_ERROR(eError, "RGXHWPerfClose"); -+ -+ eError = RGXHWPerfFreeConnection(ppsHWPerfConnection); -+ PVR_LOG_IF_ERROR(eError, "RGXHWPerfFreeConnection"); -+ -+ return eError; -+} -+ -+IMG_UINT64 RGXHWPerfConvertCRTimeStamp( -+ IMG_UINT32 ui32ClkSpeed, -+ IMG_UINT64 ui64CorrCRTimeStamp, -+ IMG_UINT64 ui64CorrOSTimeStamp, -+ IMG_UINT64 ui64CRTimeStamp) -+{ -+ IMG_UINT64 ui64CRDeltaToOSDeltaKNs; -+ IMG_UINT64 ui64EventOSTimestamp, deltaRgxTimer, delta_ns; -+ -+ if (!(ui64CRTimeStamp) || !(ui32ClkSpeed) || !(ui64CorrCRTimeStamp) || !(ui64CorrOSTimeStamp)) -+ { -+ return 0; -+ } -+ -+ ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(ui32ClkSpeed); -+ -+ /* RGX CR timer ticks delta */ -+ deltaRgxTimer = ui64CRTimeStamp - ui64CorrCRTimeStamp; -+ /* RGX time delta in nanoseconds */ -+ delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs); -+ /* Calculate OS time of HWPerf event */ -+ ui64EventOSTimestamp = ui64CorrOSTimeStamp + delta_ns; -+ -+ return ui64EventOSTimestamp; -+} -+ -+/****************************************************************************** -+ End of file (rgxhwperf_common.c) -+ ******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxhwperf_common.h b/drivers/gpu/drm/img-rogue/rgxhwperf_common.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxhwperf_common.h -@@ -0,0 +1,635 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX HW Performance header file -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX HWPerf functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXHWPERF_COMMON_H_ -+#define RGXHWPERF_COMMON_H_ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+#include "device.h" -+#include "connection_server.h" -+#include "rgxdevice.h" -+#include "rgx_hwperf.h" -+#include "rgx_fwif_hwperf.h" -+#include "cache_ops.h" -+#include "rgxfwmemctx.h" -+ -+/* HWPerf host buffer size constraints in KBs */ -+#define HWPERF_HOST_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB -+#define HWPERF_HOST_TL_STREAM_SIZE_MIN (32U) -+#define HWPERF_HOST_TL_STREAM_SIZE_MAX (3072U) -+ -+/* Operations on HWPerf filter. */ -+typedef enum HWPERF_FILTER_OPERATION_TAG -+{ -+ HWPERF_FILTER_OPERATION_SET, -+ HWPERF_FILTER_OPERATION_BIT_CLR, -+ HWPERF_FILTER_OPERATION_BIT_OR, -+} HWPERF_FILTER_OPERATION; -+ -+/****************************************************************************** -+ * RGX HW Performance decode Bvnc Features for HWPerf -+ *****************************************************************************/ -+PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGX_HWPERF_BVNC *psBVNC); -+ -+PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_HWPERF_BVNC *psBVNC); -+ -+/****************************************************************************** -+ * RGX HW Performance Data Transport Routines -+ *****************************************************************************/ -+ -+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE* psDevInfo); -+ -+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); -+void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); -+ -+PVRSRV_ERROR RGXHWPerfInitOnDemandL1Buffer(PVRSRV_RGXDEV_INFO* psRgxDevInfo); -+PVRSRV_ERROR RGXHWPerfInitOnDemandL2Stream(PVRSRV_RGXDEV_INFO* psRgxDevInfo, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId); -+void RGXHWPerfDeinitL2Stream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId); -+void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); -+void RGXHWPerfClientInitAppHintCallbacks(void); -+ -+static INLINE PVRSRV_ERROR RGXAcquireHWPerfCtlCPUAddr(PVRSRV_DEVICE_NODE *psDevNode, -+ RGXFWIF_HWPERF_CTL **ppsHWPerfCtl) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ PVRSRV_ERROR eError; -+ -+ PVR_RETURN_IF_FALSE(psDevNode != NULL, PVRSRV_ERROR_INVALID_PARAMS); -+ -+ psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice; -+ PVR_RETURN_IF_FALSE(psDevInfo != NULL, PVRSRV_ERROR_INVALID_PARAMS); -+ -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, -+ (void**)ppsHWPerfCtl); -+ RGXFwSharedMemCacheOpPtr(ppsHWPerfCtl, INVALIDATE); -+ -+ return eError; -+} -+ -+static INLINE void RGXReleaseHWPerfCtlCPUAddr(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_LOG_RETURN_VOID_IF_FALSE(psDevNode != NULL, "psDevNode is invalid"); -+ -+ psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice; -+ PVR_LOG_RETURN_VOID_IF_FALSE(psDevInfo != NULL, "psDevInfo invalid"); -+ -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); -+} -+ -+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCountersKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_UINT32 ui32BlockID, -+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters); -+ -+PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocksKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ const IMG_UINT32 ui32ArrayLen, -+ IMG_UINT32 *pui32BlockCount, -+ IMG_UINT32 *pui32EnabledBlockIDs); -+ -+/****************************************************************************** -+ * RGX HW Performance Profiling API(s) -+ *****************************************************************************/ -+ -+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ RGX_HWPERF_STREAM_ID eStreamId, -+ IMG_BOOL bToggle, -+ IMG_UINT64 ui64Mask); -+ -+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfFW( -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId, -+ IMG_UINT64 ui64Mask, -+ HWPERF_FILTER_OPERATION eMaskOp); -+ -+#if defined(PVRSRV_FORCE_HWPERF_TO_SCHED_CLK) -+PVRSRV_ERROR PVRSRVRGXGetHWPerfTimeStampKM( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT64 *pui64TimeStamp); -+#endif -+ -+PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_BOOL bEnable, -+ IMG_UINT32 ui32ArrayLen, -+ IMG_UINT16 * psBlockIDs); -+ -+/****************************************************************************** -+ * RGX HW Performance Host Stream API -+ *****************************************************************************/ -+ -+static inline RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS -+RGXHWPerfConvDeviceHealthStatus(PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus) -+{ -+ switch (eDeviceHealthStatus) -+ { -+ case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; -+ case PVRSRV_DEVICE_HEALTH_STATUS_OK: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK; -+ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_NOT_RESPONDING; -+ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD; -+ case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT; -+ default: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; -+ } -+} -+ -+static inline RGX_HWPERF_HOST_DEVICE_HEALTH_REASON -+RGXHWPerfConvDeviceHealthReason(PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason) -+{ -+ switch (eDeviceHealthReason) -+ { -+ case PVRSRV_DEVICE_HEALTH_REASON_NONE: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE; -+ case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED; -+ case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING; -+ case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS; -+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; -+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED; -+ case PVRSRV_DEVICE_HEALTH_REASON_IDLING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING; -+ case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING; -+ case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS; -+ case PVRSRV_DEVICE_HEALTH_REASON_PCI_ERROR: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_PCI_ERROR; -+ default: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED; -+ } -+} -+ -+PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB); -+PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo); -+void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); -+ -+IMG_UINT64 RGXHWPerfFwSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_L2_STREAM_ID eL2StreamId, -+ IMG_UINT64 uiFilter); -+ -+void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_UINT32 ui32Filter); -+ -+void RGXHWPerfHostPostRaw(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_HOST_EVENT_TYPE eEvType, -+ IMG_BYTE *pbPayload, -+ IMG_UINT32 ui32PayloadSize); -+ -+void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_KICK_TYPE eEnqType, -+ IMG_UINT32 ui32Pid, -+ IMG_UINT32 ui32FWDMContext, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ PVRSRV_FENCE hCheckFence, -+ PVRSRV_FENCE hUpdateFence, -+ PVRSRV_TIMELINE hUpdateTimeline, -+ IMG_UINT64 ui64CheckFenceUID, -+ IMG_UINT64 ui64UpdateFenceUID, -+ IMG_UINT64 ui64DeadlineInus, -+ IMG_UINT32 ui32CycleEstimate); -+ -+void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, -+ const IMG_CHAR *psName, -+ IMG_UINT32 ui32NameSize, -+ RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail); -+ -+void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, -+ IMG_UINT64 ui64UID, -+ IMG_UINT32 ui32PID, -+ IMG_UINT32 ui32FWAddr); -+ -+void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, -+ IMG_UINT64 ui64NewUID, -+ IMG_UINT64 ui64UID1, -+ IMG_UINT64 ui64UID2, -+ const IMG_CHAR *psName, -+ IMG_UINT32 ui32NameSize); -+ -+void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_UFO_EV eUfoType, -+ RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData, -+ const IMG_BOOL bSleepAllowed); -+ -+void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo); -+ -+void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_DEV_INFO_EV eEvType, -+ RGX_HWPERF_HOST_DEV_INFO_DETAIL *psData); -+ -+void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_INFO_EV eEvType); -+ -+void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType, -+ IMG_PID uiPID, -+ PVRSRV_FENCE hFence, -+ IMG_UINT32 ui32Data); -+ -+void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_PID uiPID, -+ PVRSRV_TIMELINE hSWTimeline, -+ IMG_UINT64 ui64SyncPtIndex); -+ -+void RGXHWPerfHostPostClientInfoProcName(PVRSRV_RGXDEV_INFO *psRgxDevInfo, -+ IMG_PID uiPID, -+ const IMG_CHAR *psName); -+ -+IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent); -+ -+#define _RGX_HWPERF_HOST_FILTER(CTX, EV) \ -+ (((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)->ui32HWPerfHostFilter \ -+ & RGX_HWPERF_EVENT_MASK_VALUE(EV)) -+ -+#define _RGX_DEVICE_INFO_FROM_CTX(CTX) \ -+ ((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice) -+ -+#define _RGX_DEVICE_INFO_FROM_NODE(DEVNODE) \ -+ ((PVRSRV_RGXDEV_INFO *)DEVNODE->pvDevice) -+ -+/* Deadline and cycle estimate is not supported for all ENQ events */ -+#define NO_DEADLINE 0 -+#define NO_CYCEST 0 -+ -+ -+#if defined(SUPPORT_RGX) -+ -+/** -+ * This macro checks if HWPerfHost and the event are enabled and if they are -+ * it posts event to the HWPerfHost stream. -+ * -+ * @param C Kick context -+ * @param P Pid of kicking process -+ * @param X Related FW context -+ * @param E External job reference -+ * @param I Job ID -+ * @param K Kick type -+ * @param CF Check fence handle -+ * @param UF Update fence handle -+ * @param UT Update timeline (on which above UF was created) handle -+ * @param CHKUID Check fence UID -+ * @param UPDUID Update fence UID -+ * @param D Deadline -+ * @param CE Cycle estimate -+ */ -+#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE) \ -+ do { \ -+ if (_RGX_HWPERF_HOST_FILTER(C, RGX_HWPERF_HOST_ENQ)) \ -+ { \ -+ RGXHWPerfHostPostEnqEvent(_RGX_DEVICE_INFO_FROM_CTX(C), \ -+ (K), (P), (X), (E), (I), \ -+ (CF), (UF), (UT), \ -+ (CHKUID), (UPDUID), (D), (CE)); \ -+ } \ -+ } while (0) -+ -+/** -+ * This macro checks if HWPerfHost and the event are enabled and if they are -+ * it posts event to the HWPerfHost stream. -+ * -+ * @param I Device Info pointer -+ * @param T Host UFO event type -+ * @param D Pointer to UFO data -+ * @param S Is sleeping allowed? -+ */ -+#define RGXSRV_HWPERF_UFO(I, T, D, S) \ -+ do { \ -+ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_UFO)) \ -+ { \ -+ RGXHWPerfHostPostUfoEvent((I), (T), (D), (S)); \ -+ } \ -+ } while (0) -+ -+/** -+ * This macro checks if HWPerfHost and the event are enabled and if they are -+ * it posts event to the HWPerfHost stream. -+ * -+ * @param D Device node pointer -+ * @param T Host ALLOC event type -+ * @param FWADDR sync firmware address -+ * @param N string containing sync name -+ * @param Z string size including null terminating character -+ */ -+#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z) \ -+ do { \ -+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ -+ { \ -+ RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ -+ uAllocDetail.sSyncAlloc.ui32FWAddr = (FWADDR); \ -+ RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ -+ (N), (Z), &uAllocDetail); \ -+ } \ -+ } while (0) -+ -+/** -+ * This macro checks if HWPerfHost and the event are enabled and if they are -+ * it posts event to the HWPerfHost stream. -+ * -+ * @param D Device Node pointer -+ * @param PID ID of allocating process -+ * @param FENCE PVRSRV_FENCE object -+ * @param FWADDR sync firmware address -+ * @param N string containing sync name -+ * @param Z string size including null terminating character -+ */ -+#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z) \ -+ do { \ -+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ -+ { \ -+ RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ -+ uAllocDetail.sFenceAlloc.uiPID = (PID); \ -+ uAllocDetail.sFenceAlloc.hFence = (FENCE); \ -+ uAllocDetail.sFenceAlloc.ui32CheckPt_FWAddr = (FWADDR); \ -+ RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, \ -+ N, Z, &uAllocDetail); \ -+ } \ -+ } while (0) -+ -+/** -+ * @param D Device Node pointer -+ * @param TL PVRSRV_TIMELINE on which CP is allocated -+ * @param PID Allocating process ID of this TL/FENCE -+ * @param FENCE PVRSRV_FENCE as passed to SyncCheckpointResolveFence OR PVRSRV_NO_FENCE -+ * @param FWADDR sync firmware address -+ * @param N string containing sync name -+ * @param Z string size including null terminating character -+ */ -+#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z) \ -+ do { \ -+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ -+ { \ -+ RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ -+ uAllocDetail.sSyncCheckPointAlloc.ui32CheckPt_FWAddr = (FWADDR); \ -+ uAllocDetail.sSyncCheckPointAlloc.hTimeline = (TL); \ -+ uAllocDetail.sSyncCheckPointAlloc.uiPID = (PID); \ -+ uAllocDetail.sSyncCheckPointAlloc.hFence = (FENCE); \ -+ RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, \ -+ N, Z, &uAllocDetail); \ -+ } \ -+ } while (0) -+ -+/** -+ * @param D Device Node pointer -+ * @param PID ID of allocating process -+ * @param SW_FENCE PVRSRV_FENCE object -+ * @param SW_TL PVRSRV_TIMELINE on which SW_FENCE is allocated -+ * @param SPI Sync point index on the SW_TL on which this SW_FENCE is allocated -+ * @param N string containing sync name -+ * @param Z string size including null terminating character -+ */ -+#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z) \ -+ do { \ -+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ -+ { \ -+ RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ -+ uAllocDetail.sSWFenceAlloc.uiPID = (PID); \ -+ uAllocDetail.sSWFenceAlloc.hSWFence = (SW_FENCE); \ -+ uAllocDetail.sSWFenceAlloc.hSWTimeline = (SW_TL); \ -+ uAllocDetail.sSWFenceAlloc.ui64SyncPtIndex = (SPI); \ -+ RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, \ -+ N, Z, &uAllocDetail); \ -+ } \ -+ } while (0) -+ -+/** -+ * This macro checks if HWPerfHost and the event are enabled and if they are -+ * it posts event to the HWPerfHost stream. -+ * -+ * @param D Device Node pointer -+ * @param T Host ALLOC event type -+ * @param FWADDR sync firmware address -+ */ -+#define RGXSRV_HWPERF_FREE(D, T, FWADDR) \ -+ do { \ -+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \ -+ { \ -+ RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ -+ (0), (0), (FWADDR)); \ -+ } \ -+ } while (0) -+ -+/** -+ * This macro checks if HWPerfHost and the event are enabled and if they are -+ * it posts event to the HWPerfHost stream. -+ * -+ * @param D Device Node pointer -+ * @param T Host ALLOC event type -+ * @param UID ID of input object -+ * @param PID ID of allocating process -+ * @param FWADDR sync firmware address -+ */ -+#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) \ -+ do { \ -+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \ -+ { \ -+ RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ -+ (UID), (PID), (FWADDR)); \ -+ } \ -+ } while (0) -+ -+/** -+ * This macro checks if HWPerfHost and the event are enabled and if they are -+ * it posts event to the HWPerfHost stream. -+ * -+ * @param D Device Node pointer -+ * @param T Host ALLOC event type -+ * @param NEWUID ID of output object -+ * @param UID1 ID of first input object -+ * @param UID2 ID of second input object -+ * @param N string containing new object's name -+ * @param Z string size including null terminating character -+ */ -+#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) \ -+ do { \ -+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_MODIFY)) \ -+ { \ -+ RGXHWPerfHostPostModifyEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ -+ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ -+ (NEWUID), (UID1), (UID2), N, Z); \ -+ } \ -+ } while (0) -+ -+ -+/** -+ * This macro checks if HWPerfHost and the event are enabled and if they are -+ * it posts event to the HWPerfHost stream. -+ * -+ * @param I Device info pointer -+ */ -+#define RGXSRV_HWPERF_CLK_SYNC(I) \ -+ do { \ -+ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_CLK_SYNC)) \ -+ { \ -+ RGXHWPerfHostPostClkSyncEvent((I)); \ -+ } \ -+ } while (0) -+ -+ -+/** -+ * This macro checks if HWPerfHost and the event are enabled and if they are -+ * it posts a device info health event to the HWPerfHost stream. -+ * -+ * @param I Device info pointer -+ * @param H Health status enum -+ * @param R Health reason enum -+ */ -+#define RGXSRV_HWPERF_DEVICE_INFO_HEALTH(I, H, R) \ -+ do { \ -+ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \ -+ { \ -+ RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevDetail; \ -+ uDevDetail.sDeviceStatus.eDeviceHealthStatus = RGXHWPerfConvDeviceHealthStatus(H); \ -+ uDevDetail.sDeviceStatus.eDeviceHealthReason = RGXHWPerfConvDeviceHealthReason(R); \ -+ RGXHWPerfHostPostDeviceInfo((I), RGX_HWPERF_DEV_INFO_EV_HEALTH, &uDevDetail); \ -+ } \ -+ } while (0) -+ -+/** -+ * This macro checks if HWPerfHost and the event are enabled and if they are -+ * it posts a device info features event to the HWPerfHost stream. -+ * -+ * @param I Device info pointer -+ */ -+#define RGXSRV_HWPERF_DEVICE_INFO_FEATURES(I) \ -+ do { \ -+ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \ -+ { \ -+ RGXHWPerfHostPostDeviceInfo((I), RGX_HWPERF_DEV_INFO_EV_FEATURES, NULL); \ -+ } \ -+ } while (0) -+ -+/** -+ * This macro checks if HWPerfHost and the event are enabled and if they are -+ * it posts event to the HWPerfHost stream. -+ * -+ * @param I Device info pointer -+ * @param T Event type -+ */ -+#define RGXSRV_HWPERF_HOST_INFO(I, T) \ -+do { \ -+ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_INFO)) \ -+ { \ -+ RGXHWPerfHostPostInfo((I), (T)); \ -+ } \ -+} while (0) -+ -+/** -+ * @param I Device info pointer -+ * @param T Wait Event type -+ * @param PID Process ID that the following fence belongs to -+ * @param F Fence handle -+ * @param D Data for this wait event type -+ */ -+#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) \ -+do { \ -+ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_FENCE_WAIT)) \ -+ { \ -+ RGXHWPerfHostPostFenceWait(I, RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_##T, \ -+ (PID), (F), (D)); \ -+ } \ -+} while (0) -+ -+/** -+ * @param I Device info pointer -+ * @param PID Process ID that the following timeline belongs to -+ * @param F SW-timeline handle -+ * @param SPI Sync-pt index where this SW-timeline has reached -+ */ -+#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI)\ -+do { \ -+ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE)) \ -+ { \ -+ RGXHWPerfHostPostSWTimelineAdv((I), (PID), (SW_TL), (SPI)); \ -+ } \ -+} while (0) -+ -+/** -+ * @param D Device Node pointer -+ * @param PID Process ID that the following timeline belongs to -+ * @param N Null terminated string containing the process name -+ */ -+#define RGXSRV_HWPERF_HOST_CLIENT_INFO_PROCESS_NAME(D, PID, N) \ -+do { \ -+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_CLIENT_INFO)) \ -+ { \ -+ RGXHWPerfHostPostClientInfoProcName(_RGX_DEVICE_INFO_FROM_NODE(D), (PID), (N)); \ -+ } \ -+} while (0) -+ -+#else -+ -+#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE) -+#define RGXSRV_HWPERF_UFO(I, T, D, S) -+#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z) -+#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z) -+#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z) -+#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z) -+#define RGXSRV_HWPERF_FREE(D, T, FWADDR) -+#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) -+#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) -+#define RGXSRV_HWPERF_CLK_SYNC(I) -+#define RGXSRV_HWPERF_DEVICE_INFO_HEALTH(I, H, R) -+#define RGXSRV_HWPERF_DEVICE_INFO_FEATURES(I) -+#define RGXSRV_HWPERF_HOST_INFO(I, T) -+#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) -+#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI) -+#define RGXSRV_HWPERF_HOST_CLIENT_INFO_PROCESS_NAME(D, PID, N) -+ -+#endif -+ -+#endif /* RGXHWPERF_COMMON_H_ */ -diff --git a/drivers/gpu/drm/img-rogue/rgxinit.c b/drivers/gpu/drm/img-rogue/rgxinit.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxinit.c -@@ -0,0 +1,5208 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device specific initialisation routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if defined(__linux__) -+#include -+#else -+#include -+#endif -+ -+#include "img_defs.h" -+#include "pvr_notifier.h" -+#include "pvrsrv.h" -+#include "pvrsrv_bridge_init.h" -+#include "rgx_bridge_init.h" -+#include "syscommon.h" -+#include "rgx_heaps.h" -+#include "rgxheapconfig.h" -+#include "rgxpower.h" -+#include "tlstream.h" -+#include "pvrsrv_tlstreams.h" -+#include "pvr_ricommon.h" -+ -+#include "rgxinit.h" -+#include "rgxbvnc.h" -+#include "rgxmulticore.h" -+ -+#include "pdump_km.h" -+#include "handle.h" -+#include "allocmem.h" -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "rgxmem.h" -+#include "sync_internal.h" -+#include "pvrsrv_apphint.h" -+#include "os_apphint.h" -+#include "rgxfwdbg.h" -+#include "info_page.h" -+ -+#include "rgxfwimageutils.h" -+#include "rgxutils.h" -+#include "rgxfwutils.h" -+#include "rgx_fwif_km.h" -+ -+#include "rgxmmuinit.h" -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+#include "rgxmipsmmuinit.h" -+#endif -+#include "physmem.h" -+#include "devicemem_utils.h" -+#include "devicemem_server.h" -+#include "physmem_osmem.h" -+#include "physmem_lma.h" -+ -+#include "rgxdebug_common.h" -+#include "rgxhwperf.h" -+#include "htbserver.h" -+ -+#include "rgx_options.h" -+#include "pvrversion.h" -+ -+#include "rgx_compat_bvnc.h" -+ -+#include "rgx_heaps.h" -+ -+#include "rgxta3d.h" -+#include "rgxtimecorr.h" -+#include "rgxshader.h" -+ -+#include "rgx_bvnc_defs_km.h" -+#if defined(PDUMP) -+#include "rgxstartstop.h" -+#endif -+ -+#include "rgx_fwif_alignchecks.h" -+#include "vmm_pvz_client.h" -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+#include "rgxworkest.h" -+#endif -+ -+#if defined(SUPPORT_PDVFS) -+#include "rgxpdvfs.h" -+#endif -+ -+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) -+#include "rgxsoctimer.h" -+#endif -+ -+#if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION) -+#include "pdump_physmem.h" -+#endif -+ -+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); -+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString); -+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32 pui32RGXClockSpeed); -+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2); -+static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode); -+static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) -+static PVRSRV_ERROR RGXInitFwRawHeap(PVRSRV_RGXDEV_INFO *psDevInfo, DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32DriverID); -+static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap); -+#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -+ -+/* Services internal heap identification used in this file only */ -+#define RGX_FIRMWARE_MAIN_HEAP_IDENT "FwMain" /*!< RGX Main Firmware Heap identifier */ -+#define RGX_FIRMWARE_CONFIG_HEAP_IDENT "FwConfig" /*!< RGX Config firmware Heap identifier */ -+ -+#define RGX_MMU_PAGE_SIZE_4KB ( 4 * 1024) -+#define RGX_MMU_PAGE_SIZE_16KB ( 16 * 1024) -+#define RGX_MMU_PAGE_SIZE_64KB ( 64 * 1024) -+#define RGX_MMU_PAGE_SIZE_256KB ( 256 * 1024) -+#define RGX_MMU_PAGE_SIZE_1MB (1024 * 1024) -+#define RGX_MMU_PAGE_SIZE_2MB (2048 * 1024) -+#define RGX_MMU_PAGE_SIZE_MIN RGX_MMU_PAGE_SIZE_4KB -+#define RGX_MMU_PAGE_SIZE_MAX RGX_MMU_PAGE_SIZE_2MB -+ -+#define VAR(x) #x -+ -+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo); -+ -+#if !defined(NO_HARDWARE) -+/*************************************************************************/ /*! -+@Function SampleIRQCount -+@Description Utility function taking snapshots of RGX FW interrupt count. -+@Input psDevInfo Device Info structure -+ -+@Return IMG_BOOL Returns IMG_TRUE if RGX FW IRQ is not equal to -+ sampled RGX FW IRQ count for any RGX FW thread. -+ */ /**************************************************************************/ -+static INLINE IMG_BOOL SampleIRQCount(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_BOOL bReturnVal = IMG_FALSE; -+ volatile IMG_UINT32 *pui32SampleIrqCount = psDevInfo->aui32SampleIRQCount; -+ IMG_UINT32 ui32IrqCnt; -+ -+#if defined(RGX_FW_IRQ_OS_COUNTERS) -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ bReturnVal = IMG_TRUE; -+ } -+ else -+ { -+ get_irq_cnt_val(ui32IrqCnt, RGXFW_HOST_DRIVER_ID, psDevInfo); -+ -+ if (ui32IrqCnt != pui32SampleIrqCount[RGXFW_THREAD_0]) -+ { -+ pui32SampleIrqCount[RGXFW_THREAD_0] = ui32IrqCnt; -+ bReturnVal = IMG_TRUE; -+ } -+ } -+#else -+ IMG_UINT32 ui32TID; -+ -+ for_each_irq_cnt(ui32TID) -+ { -+ get_irq_cnt_val(ui32IrqCnt, ui32TID, psDevInfo); -+ -+ /* treat unhandled interrupts here to align host count with fw count */ -+ if (pui32SampleIrqCount[ui32TID] != ui32IrqCnt) -+ { -+ pui32SampleIrqCount[ui32TID] = ui32IrqCnt; -+ bReturnVal = IMG_TRUE; -+ } -+ } -+#endif -+ -+ return bReturnVal; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXHostSafetyEvents -+@Description Returns the event status masked to keep only the safety -+ events handled by the Host -+@Input psDevInfo Device Info structure -+@Return IMG_UINT32 Status of Host-handled safety events -+ */ /**************************************************************************/ -+static INLINE IMG_UINT32 RGXHostSafetyEvents(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ if (PVRSRV_VZ_MODE_IS(GUEST) || (psDevInfo->ui32HostSafetyEventMask == 0)) -+ { -+ return 0; -+ } -+ else -+ { -+ IMG_UINT32 ui32SafetyEventStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE); -+ return (ui32SafetyEventStatus & psDevInfo->ui32HostSafetyEventMask); -+ } -+} -+ -+/*************************************************************************/ /*! -+@Function RGXSafetyEventCheck -+@Description Clears the Event Status register and checks if any of the -+ safety events need Host handling -+@Input psDevInfo Device Info structure -+@Return IMG_BOOL Are there any safety events for Host to handle ? -+ */ /**************************************************************************/ -+static INLINE IMG_BOOL RGXSafetyEventCheck(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_BOOL bSafetyEvent = IMG_FALSE; -+ -+ if (psDevInfo->ui32HostSafetyEventMask != 0) -+ { -+ IMG_UINT32 ui32EventStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_STATUS); -+ -+ if (BIT_ISSET(ui32EventStatus, RGX_CR_EVENT_STATUS__ROGUEXE__SAFETY_SHIFT)) -+ { -+ /* clear the safety event */ -+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_CLEAR, RGX_CR_EVENT_CLEAR__ROGUEXE__SAFETY_EN); -+ -+ /* report if there is anything for the Host to handle */ -+ bSafetyEvent = (RGXHostSafetyEvents(psDevInfo) != 0); -+ } -+ } -+ -+ return bSafetyEvent; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXSafetyEventHandler -+@Description Handles the Safety Events that the Host is responsible for -+@Input psDevInfo Device Info structure -+ */ /**************************************************************************/ -+static void RGXSafetyEventHandler(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_UINT32 ui32HostSafetyStatus = RGXHostSafetyEvents(psDevInfo); -+ RGX_CONTEXT_RESET_REASON eResetReason = RGX_CONTEXT_RESET_REASON_NONE; -+ -+ if (ui32HostSafetyStatus != 0) -+ { -+ /* clear the safety bus events handled by the Host */ -+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE, ui32HostSafetyStatus); -+ -+ if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT)) -+ { -+ IMG_UINT32 ui32FaultFlag; -+ IMG_UINT32 ui32FaultFW = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_STATUS); -+ IMG_UINT32 ui32CorrectedBitOffset = RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT - -+ RGX_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT; -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety fault status: 0x%X", __func__, ui32FaultFW)); -+ -+ for (ui32FaultFlag = 0; ui32FaultFlag < ui32CorrectedBitOffset; ui32FaultFlag++) -+ { -+ if (BIT_ISSET(ui32FaultFW, ui32FaultFlag)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety hardware fault detected (0x%lX).", -+ __func__, BIT(ui32FaultFlag))); -+ eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_ERR; -+ } -+ else if BIT_ISSET(ui32FaultFW, ui32FaultFlag + ui32CorrectedBitOffset) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware safety hardware fault corrected.(0x%lX).", -+ __func__, BIT(ui32FaultFlag))); -+ -+ /* Only report this if we haven't detected a more serious error */ -+ if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR) -+ { -+ eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_OK; -+ } -+ } -+ } -+ -+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_CLEAR, ui32FaultFW); -+ } -+ -+ if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT)) -+ { -+ volatile RGXFWIF_POW_STATE ePowState; -+ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ePowState, -+ INVALIDATE); -+ ePowState = psDevInfo->psRGXFWIfFwSysData->ePowState; -+ -+ if (ePowState != RGXFWIF_POW_OFF) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Safety Watchdog Trigger !", __func__)); -+ -+ /* Only report this if we haven't detected a more serious error */ -+ if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR) -+ { -+ eResetReason = RGX_CONTEXT_RESET_REASON_FW_WATCHDOG; -+ } -+ } -+ } -+ -+ /* Notify client and system layer of any error */ -+ if (eResetReason != RGX_CONTEXT_RESET_REASON_NONE) -+ { -+ PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; -+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; -+ -+ /* Client notification of device error will be achieved by -+ * clients calling UM function RGXGetLastDeviceError() */ -+ psDevInfo->eLastDeviceError = eResetReason; -+ -+ /* Notify system layer of any error */ -+ if (psDevConfig->pfnSysDevErrorNotify) -+ { -+ PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; -+ -+ sErrorData.eResetReason = eResetReason; -+ -+ psDevConfig->pfnSysDevErrorNotify(psDevConfig, -+ &sErrorData); -+ } -+ } -+ } -+} -+ -+static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+#if defined(PVRSRV_DEBUG_LISR_EXECUTION) -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; -+ IMG_UINT32 ui32idx; -+#endif -+ -+ RGXDEBUG_PRINT_IRQ_COUNT(psDevInfo); -+ -+#if defined(PVRSRV_DEBUG_LISR_EXECUTION) -+ PVR_DPF((PVR_DBG_ERROR, -+ "Last RGX_LISRHandler State (DevID %u): 0x%08X Clock: %" IMG_UINT64_FMTSPEC, -+ psDeviceNode->sDevId.ui32InternalID, -+ psDeviceNode->sLISRExecutionInfo.ui32Status, -+ psDeviceNode->sLISRExecutionInfo.ui64Clockns)); -+ -+ for_each_irq_cnt(ui32idx) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ MSG_IRQ_CNT_TYPE " %u: InterruptCountSnapshot: 0x%X", -+ ui32idx, psDeviceNode->sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32idx])); -+ } -+#else -+ PVR_DPF((PVR_DBG_ERROR, "No further information available. Please enable PVRSRV_DEBUG_LISR_EXECUTION")); -+#endif -+ -+ return SampleIRQCount(psDevInfo); -+} -+ -+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_BOOL bScheduleMISR; -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ bScheduleMISR = IMG_TRUE; -+ } -+ else -+ { -+ bScheduleMISR = _WaitForInterruptsTimeoutCheck(psDevInfo); -+ } -+ -+ if (bScheduleMISR) -+ { -+ OSScheduleMISR(psDevInfo->pvMISRData); -+ -+ if (psDevInfo->pvAPMISRData != NULL) -+ { -+ OSScheduleMISR(psDevInfo->pvAPMISRData); -+ } -+ } -+} -+ -+static inline IMG_BOOL RGXAckHwIrq(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32IRQStatusReg, -+ IMG_UINT32 ui32IRQStatusEventMsk, -+ IMG_UINT32 ui32IRQClearReg, -+ IMG_UINT32 ui32IRQClearMask) -+{ -+ IMG_UINT32 ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQStatusReg); -+ -+ if (ui32IRQStatus & ui32IRQStatusEventMsk) -+ { -+ /* acknowledge and clear the interrupt */ -+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQClearReg, ui32IRQClearMask); -+ return IMG_TRUE; -+ } -+ else -+ { -+ /* spurious interrupt */ -+ return IMG_FALSE; -+ } -+} -+ -+static IMG_BOOL RGXAckIrqMETA(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ return RGXAckHwIrq(psDevInfo, -+ RGX_CR_META_SP_MSLVIRQSTATUS, -+ RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN, -+ RGX_CR_META_SP_MSLVIRQSTATUS, -+ RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK); -+} -+ -+static IMG_BOOL RGXAckIrqMIPS(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ return RGXAckHwIrq(psDevInfo, -+ RGX_CR_MIPS_WRAPPER_IRQ_STATUS, -+ RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN, -+ RGX_CR_MIPS_WRAPPER_IRQ_CLEAR, -+ RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN); -+} -+ -+static IMG_BOOL RGXAckIrqDedicated(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ /* status & clearing registers are available on both Host and Guests -+ * and are agnostic of the Fw CPU type. Due to the remappings done by -+ * the 2nd stage device MMU, all drivers assume they are accessing -+ * register bank 0 */ -+ return RGXAckHwIrq(psDevInfo, -+ RGX_CR_IRQ_OS0_EVENT_STATUS, -+ RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN, -+ RGX_CR_IRQ_OS0_EVENT_CLEAR, -+ RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN); -+} -+ -+static IMG_BOOL RGX_LISRHandler(void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ IMG_BOOL bIrqAcknowledged = IMG_FALSE; -+ -+#if defined(PVRSRV_DEBUG_LISR_EXECUTION) -+ IMG_UINT32 ui32idx, ui32IrqCnt; -+ -+ for_each_irq_cnt(ui32idx) -+ { -+ get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); -+ UPDATE_LISR_DBG_SNAPSHOT(ui32idx, ui32IrqCnt); -+ } -+ -+ UPDATE_LISR_DBG_STATUS(RGX_LISR_INIT); -+ UPDATE_LISR_DBG_TIMESTAMP(); -+#endif -+ -+ UPDATE_LISR_DBG_COUNTER(); -+ -+ if (psDevInfo->bRGXPowered) -+ { -+ IMG_BOOL bSafetyEvent = RGXSafetyEventCheck(psDevInfo); -+ -+ if ((psDevInfo->pfnRGXAckIrq == NULL) || psDevInfo->pfnRGXAckIrq(psDevInfo) || bSafetyEvent) -+ { -+ bIrqAcknowledged = IMG_TRUE; -+ -+ if (bSafetyEvent || SampleIRQCount(psDevInfo)) -+ { -+ UPDATE_LISR_DBG_STATUS(RGX_LISR_PROCESSED); -+ UPDATE_MISR_DBG_COUNTER(); -+ -+ OSScheduleMISR(psDevInfo->pvMISRData); -+ -+#if defined(SUPPORT_AUTOVZ) -+ RGXUpdateAutoVzWdgToken(psDevInfo); -+#endif -+ if (psDevInfo->pvAPMISRData != NULL) -+ { -+ OSScheduleMISR(psDevInfo->pvAPMISRData); -+ } -+ } -+ else -+ { -+ UPDATE_LISR_DBG_STATUS(RGX_LISR_FW_IRQ_COUNTER_NOT_UPDATED); -+ } -+ } -+ else -+ { -+ UPDATE_LISR_DBG_STATUS(RGX_LISR_NOT_TRIGGERED_BY_HW); -+ } -+ } -+ else -+ { -+ /* AutoVz drivers rebooting while the firmware is active must acknowledge -+ * and clear the hw IRQ line before the RGXInit() has finished. */ -+ if (!(psDevInfo->psDeviceNode->bAutoVzFwIsUp && -+ (psDevInfo->pfnRGXAckIrq != NULL) && -+ psDevInfo->pfnRGXAckIrq(psDevInfo))) -+ { -+ UPDATE_LISR_DBG_STATUS(RGX_LISR_DEVICE_NOT_POWERED); -+ } -+ } -+ -+ return bIrqAcknowledged; -+} -+ -+static void RGX_MISR_ProcessKCCBDeferredList(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ /* First check whether there are pending commands in Deferred KCCB List */ -+ OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); -+ if (dllist_is_empty(&psDevInfo->sKCCBDeferredCommandsListHead)) -+ { -+ OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); -+ return; -+ } -+ OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); -+ -+ /* Powerlock to avoid further Power transition requests -+ while KCCB deferred list is being processed */ -+ eError = PVRSRVPowerLock(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to acquire PowerLock (device: %p, error: %s)", -+ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); -+ return; -+ } -+ -+ /* Try to send deferred KCCB commands Do not Poll from here*/ -+ eError = RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE); -+ -+ PVRSRVPowerUnlock(psDeviceNode); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s could not flush Deferred KCCB list, KCCB is full.", -+ __func__)); -+ } -+} -+ -+static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevice; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ const RGXFWIF_SYSDATA *psFwSysData; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->ePowState, -+ INVALIDATE); -+ psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ -+ if (psFwSysData->ePowState == RGXFWIF_POW_ON || psFwSysData->ePowState == RGXFWIF_POW_IDLE) -+ { -+ RGX_MISR_ProcessKCCBDeferredList(psDeviceNode); -+ } -+ -+ if (psFwSysData->ePowState == RGXFWIF_POW_IDLE) -+ { -+ /* The FW is IDLE and therefore could be shut down */ -+ eError = RGXActivePowerRequest(psDeviceNode); -+ -+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)) -+ { -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Failed RGXActivePowerRequest call (device: %p) with %s", -+ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); -+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); -+ } -+ else -+ { -+ /* Re-schedule the power down request as it was deferred. */ -+ OSScheduleMISR(psDevInfo->pvAPMISRData); -+ } -+ } -+ } -+ -+} -+ -+/* Shorter defines to keep the code a bit shorter */ -+#define GPU_IDLE RGXFWIF_GPU_UTIL_STATE_IDLE -+#define GPU_ACTIVE RGXFWIF_GPU_UTIL_STATE_ACTIVE -+#define GPU_BLOCKED RGXFWIF_GPU_UTIL_STATE_BLOCKED -+#define MAX_ITERATIONS 64 -+ -+static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_HANDLE hGpuUtilUser, -+ RGXFWIF_GPU_UTIL_STATS *psReturnStats) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ const volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; -+ RGXFWIF_GPU_UTIL_STATS *psAggregateStats; -+ IMG_UINT64 (*paaui64DMOSTmpCounters)[RGX_NUM_DRIVERS_SUPPORTED][RGXFWIF_GPU_UTIL_STATE_NUM]; -+ IMG_UINT64 (*paui64DMOSTmpLastWord)[RGX_NUM_DRIVERS_SUPPORTED]; -+ IMG_UINT64 (*paui64DMOSTmpLastState)[RGX_NUM_DRIVERS_SUPPORTED]; -+ IMG_UINT64 (*paui64DMOSTmpLastPeriod)[RGX_NUM_DRIVERS_SUPPORTED]; -+ IMG_UINT64 (*paui64DMOSTmpLastTime)[RGX_NUM_DRIVERS_SUPPORTED]; -+ IMG_UINT64 ui64TimeNow; -+ IMG_UINT32 ui32Attempts; -+ IMG_UINT32 ui32Remainder; -+ IMG_UINT32 ui32DriverID; -+ IMG_UINT32 ui32MaxDMCount; -+ RGXFWIF_DM eDM; -+ -+ -+ /***** (1) Initialise return stats *****/ -+ -+ psReturnStats->bValid = IMG_FALSE; -+ psReturnStats->ui64GpuStatIdle = 0; -+ psReturnStats->ui64GpuStatActive = 0; -+ psReturnStats->ui64GpuStatBlocked = 0; -+ psReturnStats->ui64GpuStatCumulative = 0; -+ -+ memset(psReturnStats->aaui64DMOSStatIdle, 0, sizeof(psReturnStats->aaui64DMOSStatIdle)); -+ memset(psReturnStats->aaui64DMOSStatActive, 0, sizeof(psReturnStats->aaui64DMOSStatActive)); -+ memset(psReturnStats->aaui64DMOSStatBlocked, 0, sizeof(psReturnStats->aaui64DMOSStatBlocked)); -+ memset(psReturnStats->aaui64DMOSStatCumulative, 0, sizeof(psReturnStats->aaui64DMOSStatCumulative)); -+ -+ if (hGpuUtilUser == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ psAggregateStats = hGpuUtilUser; -+ -+ ui32MaxDMCount = psDevInfo->sDevFeatureCfg.ui32MAXDMCount; -+ -+ /* Reset temporary counters used in the attempts loop */ -+ memset(&psReturnStats->sTempGpuStats, 0, sizeof(psReturnStats->sTempGpuStats)); -+ paaui64DMOSTmpCounters = &psAggregateStats->sTempGpuStats.aaaui64DMOSTmpCounters[0]; -+ paui64DMOSTmpLastWord = &psAggregateStats->sTempGpuStats.aaui64DMOSTmpLastWord[0]; -+ paui64DMOSTmpLastState = &psAggregateStats->sTempGpuStats.aaui64DMOSTmpLastState[0]; -+ paui64DMOSTmpLastPeriod = &psAggregateStats->sTempGpuStats.aaui64DMOSTmpLastPeriod[0]; -+ paui64DMOSTmpLastTime = &psAggregateStats->sTempGpuStats.aaui64DMOSTmpLastTime[0]; -+ -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfGpuUtilFWCb, INVALIDATE); -+ -+ /* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */ -+ for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++) -+ { -+ const volatile IMG_UINT64 *pui64GpuStatsCounters = &psUtilFWCb->aui64GpuStatsCounters[0]; -+ const volatile IMG_UINT64 (*paui64DMOSLastWord)[RGXFW_MAX_NUM_OSIDS] = &psUtilFWCb->aaui64DMOSLastWord[0]; -+ const volatile IMG_UINT64 (*paaui64DMOSStatsCounters)[RGXFW_MAX_NUM_OSIDS][RGXFWIF_GPU_UTIL_STATE_NUM] = &psUtilFWCb->aaaui64DMOSStatsCounters[0]; -+ -+ IMG_UINT64 aui64GpuTmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0}; -+ IMG_UINT64 ui64GpuLastPeriod = 0, ui64GpuLastWord = 0, ui64GpuLastState = 0, ui64GpuLastTime = 0; -+ IMG_UINT32 i = 0; -+ -+ -+ /***** (2) Get latest data from shared area *****/ -+ -+ OSLockAcquire(psDevInfo->hGPUUtilLock); -+ -+ /* -+ * First attempt at detecting if the FW is in the middle of an update. -+ * This should also help if the FW is in the middle of a 64 bit variable update. -+ */ -+ while (((ui64GpuLastWord != psUtilFWCb->ui64GpuLastWord) || -+ (aui64GpuTmpCounters[ui64GpuLastState] != -+ pui64GpuStatsCounters[ui64GpuLastState])) && -+ (i < MAX_ITERATIONS)) -+ { -+ ui64GpuLastWord = psUtilFWCb->ui64GpuLastWord; -+ ui64GpuLastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64GpuLastWord); -+ aui64GpuTmpCounters[GPU_IDLE] = pui64GpuStatsCounters[GPU_IDLE]; -+ aui64GpuTmpCounters[GPU_ACTIVE] = pui64GpuStatsCounters[GPU_ACTIVE]; -+ aui64GpuTmpCounters[GPU_BLOCKED] = pui64GpuStatsCounters[GPU_BLOCKED]; -+ -+ for (eDM = 0; eDM < ui32MaxDMCount; eDM++) -+ { -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ paui64DMOSTmpLastWord[eDM][ui32DriverID] = paui64DMOSLastWord[eDM][ui32DriverID]; -+ paui64DMOSTmpLastState[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_STATE(paui64DMOSTmpLastWord[eDM][ui32DriverID]); -+ paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_IDLE] = paaui64DMOSStatsCounters[eDM][ui32DriverID][GPU_IDLE]; -+ paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_ACTIVE] = paaui64DMOSStatsCounters[eDM][ui32DriverID][GPU_ACTIVE]; -+ paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_BLOCKED] = paaui64DMOSStatsCounters[eDM][ui32DriverID][GPU_BLOCKED]; -+ } -+ } -+ -+ i++; -+ } -+ -+ OSLockRelease(psDevInfo->hGPUUtilLock); -+ -+ if (i == MAX_ITERATIONS) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "RGXGetGpuUtilStats could not get reliable data after trying %u times", i)); -+ -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ -+ /***** (3) Compute return stats *****/ -+ -+ /* Update temp counters to account for the time since the last update to the shared ones */ -+ OSMemoryBarrier(NULL); /* Ensure the current time is read after the loop above */ -+ ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDeviceNode)); -+ -+ ui64GpuLastTime = RGXFWIF_GPU_UTIL_GET_TIME(ui64GpuLastWord); -+ ui64GpuLastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64GpuLastTime); -+ aui64GpuTmpCounters[ui64GpuLastState] += ui64GpuLastPeriod; -+ -+ /* Get statistics for a user since its last request */ -+ psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64GpuTmpCounters[GPU_IDLE], -+ psAggregateStats->ui64GpuStatIdle); -+ psReturnStats->ui64GpuStatActive = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64GpuTmpCounters[GPU_ACTIVE], -+ psAggregateStats->ui64GpuStatActive); -+ psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64GpuTmpCounters[GPU_BLOCKED], -+ psAggregateStats->ui64GpuStatBlocked); -+ psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatIdle + -+ psReturnStats->ui64GpuStatActive + -+ psReturnStats->ui64GpuStatBlocked; -+ -+ for (eDM = 0; eDM < ui32MaxDMCount; eDM++) -+ { -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ paui64DMOSTmpLastTime[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_TIME(paui64DMOSTmpLastWord[eDM][ui32DriverID]); -+ paui64DMOSTmpLastPeriod[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, paui64DMOSTmpLastTime[eDM][ui32DriverID]); -+ paaui64DMOSTmpCounters[eDM][ui32DriverID][paui64DMOSTmpLastState[eDM][ui32DriverID]] += paui64DMOSTmpLastPeriod[eDM][ui32DriverID]; -+ -+ /* Get statistics for a user since its last request */ -+ psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_IDLE], -+ psAggregateStats->aaui64DMOSStatIdle[eDM][ui32DriverID]); -+ psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_ACTIVE], -+ psAggregateStats->aaui64DMOSStatActive[eDM][ui32DriverID]); -+ psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_BLOCKED], -+ psAggregateStats->aaui64DMOSStatBlocked[eDM][ui32DriverID]); -+ psReturnStats->aaui64DMOSStatCumulative[eDM][ui32DriverID] = psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID] + -+ psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID] + -+ psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID]; -+ } -+ } -+ -+ if (psAggregateStats->ui64TimeStamp != 0) -+ { -+ IMG_UINT64 ui64TimeSinceLastCall = ui64TimeNow - psAggregateStats->ui64TimeStamp; -+ /* We expect to return at least 75% of the time since the last call in GPU stats */ -+ IMG_UINT64 ui64MinReturnedStats = ui64TimeSinceLastCall - (ui64TimeSinceLastCall / 4); -+ -+ /* -+ * If the returned stats are substantially lower than the time since -+ * the last call, then the Host might have read a partial update from the FW. -+ * If this happens, try sampling the shared counters again. -+ */ -+ if (psReturnStats->ui64GpuStatCumulative < ui64MinReturnedStats) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Return stats (%" IMG_UINT64_FMTSPEC ") too low " -+ "(call period %" IMG_UINT64_FMTSPEC ")", -+ __func__, psReturnStats->ui64GpuStatCumulative, ui64TimeSinceLastCall)); -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Attempt #%u has failed, trying again", -+ __func__, ui32Attempts)); -+ continue; -+ } -+ } -+ -+ break; -+ } -+ -+ /***** (4) Update aggregate stats for the current user *****/ -+ -+ psAggregateStats->ui64GpuStatIdle += psReturnStats->ui64GpuStatIdle; -+ psAggregateStats->ui64GpuStatActive += psReturnStats->ui64GpuStatActive; -+ psAggregateStats->ui64GpuStatBlocked += psReturnStats->ui64GpuStatBlocked; -+ psAggregateStats->ui64TimeStamp = ui64TimeNow; -+ -+ for (eDM = 0; eDM < ui32MaxDMCount; eDM++) -+ { -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ psAggregateStats->aaui64DMOSStatIdle[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID]; -+ psAggregateStats->aaui64DMOSStatActive[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID]; -+ psAggregateStats->aaui64DMOSStatBlocked[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID]; -+ } -+ } -+ -+ /***** (5) Convert return stats to microseconds *****/ -+ -+ psReturnStats->ui64GpuStatIdle = OSDivide64(psReturnStats->ui64GpuStatIdle, 1000, &ui32Remainder); -+ psReturnStats->ui64GpuStatActive = OSDivide64(psReturnStats->ui64GpuStatActive, 1000, &ui32Remainder); -+ psReturnStats->ui64GpuStatBlocked = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder); -+ psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder); -+ -+ for (eDM = 0; eDM < ui32MaxDMCount; eDM++) -+ { -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID] = OSDivide64(psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID], 1000, &ui32Remainder); -+ psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID] = OSDivide64(psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID], 1000, &ui32Remainder); -+ psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID] = OSDivide64(psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID], 1000, &ui32Remainder); -+ psReturnStats->aaui64DMOSStatCumulative[eDM][ui32DriverID] = OSDivide64(psReturnStats->aaui64DMOSStatCumulative[eDM][ui32DriverID], 1000, &ui32Remainder); -+ } -+ } -+ -+ /* Check that the return stats make sense */ -+ if (psReturnStats->ui64GpuStatCumulative == 0) -+ { -+ /* We can enter here only if allocating the temporary stats -+ * buffers failed, or all the RGXFWIF_GPU_UTIL_GET_PERIOD -+ * returned 0. The latter could happen if the GPU frequency value -+ * is not well calibrated and the FW is updating the GPU state -+ * while the Host is reading it. -+ * When such an event happens frequently, timers or the aggregate -+ * stats might not be accurate... -+ */ -+#if defined(VIRTUAL_PLATFORM) -+ /* To avoid spamming the console logging system on emulated devices, -+ * we special-case so that we will only produce a single message per -+ * driver invocation. This should reduce the time spent logging -+ * information which is not relevant for very slow timers found in -+ * VP device configurations -+ */ -+ static IMG_BOOL bFirstTime = IMG_TRUE; -+ -+ if (bFirstTime) -+ { -+ bFirstTime = IMG_FALSE; -+#endif -+ PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data.")); -+#if defined(VIRTUAL_PLATFORM) -+ } -+#endif /* defined(VIRTUAL_PLATFORM) */ -+ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; -+ } -+ -+ psReturnStats->bValid = IMG_TRUE; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser) -+{ -+ RGXFWIF_GPU_UTIL_STATS *psAggregateStats; -+ -+ /* NoStats used since this may be called outside of the register/de-register -+ * process calls which track memory use. */ -+ psAggregateStats = OSAllocZMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS)); -+ if (psAggregateStats == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ *phGpuUtilUser = psAggregateStats; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser) -+{ -+ RGXFWIF_GPU_UTIL_STATS *psAggregateStats; -+ -+ if (hGpuUtilUser == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psAggregateStats = hGpuUtilUser; -+ OSFreeMemNoStats(psAggregateStats); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ RGX MISR Handler -+*/ -+static void RGX_MISRHandler_Main (void *pvData) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ /* Give the HWPerf service a chance to transfer some data from the FW -+ * buffer to the host driver transport layer buffer. -+ */ -+ RGXHWPerfDataStoreCB(psDeviceNode); -+ -+ /* Inform other services devices that we have finished an operation */ -+ PVRSRVNotifyCommandCompletion(psDeviceNode); -+ -+#if defined(SUPPORT_PDVFS) && defined(RGXFW_META_SUPPORT_2ND_THREAD) -+ /* Normally, firmware CCB only exists for the primary FW thread unless PDVFS -+ is running on the second[ary] FW thread, here we process said CCB */ -+ RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice); -+#endif -+ -+ /* Handle Safety events if necessary */ -+ RGXSafetyEventHandler(psDeviceNode->pvDevice); -+ -+ /* Signal the global event object */ -+ PVRSRVSignalDriverWideEO(); -+ -+ /* Process the Firmware CCB for pending commands */ -+ RGXCheckFirmwareCCB(psDeviceNode->pvDevice); -+ -+ /* Calibrate the GPU frequency and recorrelate Host and GPU timers (done every few seconds) */ -+ RGXTimeCorrRestartPeriodic(psDeviceNode); -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Process Workload Estimation Specific commands from the FW */ -+ WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice); -+ } -+#endif -+ -+ if (psDevInfo->pvAPMISRData == NULL) -+ { -+ RGX_MISR_ProcessKCCBDeferredList(psDeviceNode); -+ } -+} -+#endif /* !defined(NO_HARDWARE) */ -+ -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) && defined(PDUMP) -+static PVRSRV_ERROR RGXPDumpBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ PMR *psFWDataPMR; -+ RGXMIPSFW_BOOT_DATA *psBootData; -+ IMG_DEV_PHYADDR sTmpAddr; -+ IMG_UINT32 ui32BootConfOffset, ui32ParamOffset, i; -+ PVRSRV_ERROR eError; -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR); -+ ui32BootConfOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); -+ ui32BootConfOffset += RGXMIPSFW_BOOTLDR_CONF_OFFSET; -+ -+ /* The physical addresses used by a pdump player will be different -+ * than the ones we have put in the MIPS bootloader configuration data. -+ * We have to tell the pdump player to replace the original values with the real ones. -+ */ -+ PDUMPCOMMENT(psDeviceNode, "Pass new boot parameters to the FW"); -+ -+ /* Rogue Registers physical address */ -+ ui32ParamOffset = ui32BootConfOffset + offsetof(RGXMIPSFW_BOOT_DATA, ui64RegBase); -+ -+ eError = PDumpRegLabelToMem64(RGX_PDUMPREG_NAME, -+ 0x0, -+ psFWDataPMR, -+ ui32ParamOffset, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of Rogue registers phy address failed (%u)", eError)); -+ return eError; -+ } -+ -+ /* Page Table physical Address */ -+ eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sTmpAddr); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXBootldrDataInit: MMU_AcquireBaseAddr failed (%u)", -+ eError)); -+ return eError; -+ } -+ -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, -+ (void **)&psBootData); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire pointer to FW data (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ return eError; -+ } -+ -+ psBootData = IMG_OFFSET_ADDR(psBootData, ui32BootConfOffset); -+ -+ for (i = 0; i < psBootData->ui32PTNumPages; i++) -+ { -+ ui32ParamOffset = ui32BootConfOffset + -+ offsetof(RGXMIPSFW_BOOT_DATA, aui64PTPhyAddr[0]) -+ + i * sizeof(psBootData->aui64PTPhyAddr[0]); -+ -+ eError = PDumpPTBaseObjectToMem64(psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName, -+ psFWDataPMR, -+ 0, -+ ui32ParamOffset, -+ PDUMP_FLAGS_CONTINUOUS, -+ MMU_LEVEL_1, -+ sTmpAddr.uiAddr, -+ i << psBootData->ui32PTLog2PageSize); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of page tables phy address failed (%u)", eError)); -+ return eError; -+ } -+ } -+ -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); -+ -+ /* Stack physical address */ -+ ui32ParamOffset = ui32BootConfOffset + offsetof(RGXMIPSFW_BOOT_DATA, ui64StackPhyAddr); -+ -+ eError = PDumpMemLabelToMem64(psFWDataPMR, -+ psFWDataPMR, -+ RGXGetFWImageSectionOffset(NULL, MIPS_STACK), -+ ui32ParamOffset, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of stack phy address failed (%u)", eError)); -+ return eError; -+ } -+ -+ return eError; -+} -+#endif /* PDUMP */ -+ -+static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo, -+ PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ /* Save information used on power transitions for later -+ * (when RGXStart and RGXStop are executed) -+ */ -+ psDevInfo->sLayerParams.psDevInfo = psDevInfo; -+ psDevInfo->sLayerParams.psDevConfig = psDevConfig; -+#if defined(PDUMP) -+ psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS; -+#endif -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || -+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ IMG_DEV_PHYADDR sKernelMMUCtxPCAddr; -+ -+ if (psDevInfo->psDeviceNode->bAutoVzFwIsUp) -+ { -+ /* If AutoVz firmware is up at this stage, the driver initialised it -+ * during a previous life-cycle. The firmware's memory is already pre-mapped -+ * and the MMU page tables reside in the predetermined memory carveout. -+ * The Kernel MMU Context created in this life-cycle is a dummy structure -+ * that is not used for mapping. -+ * To program the Device's BIF with the correct PC address, use the base -+ * address of the carveout reserved for MMU mappings as Kernel MMU PC Address */ -+ IMG_DEV_PHYADDR sDevPAddr; -+ PHYS_HEAP *psFwPageTableHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]; -+ -+ PVR_LOG_RETURN_IF_FALSE((NULL != psFwPageTableHeap), -+ "Firmware Page Table heap not defined.", -+ PVRSRV_ERROR_INVALID_HEAP); -+ -+ PhysHeapGetDevPAddr(psFwPageTableHeap, &sDevPAddr); -+ sKernelMMUCtxPCAddr.uiAddr = sDevPAddr.uiAddr; -+ } -+ else -+ { -+ eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, -+ &sKernelMMUCtxPCAddr); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog")); -+ return eError; -+ } -+ } -+ -+ psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr; -+ } -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ else -+ { -+ PMR *psFWCodePMR = (PMR *)(psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR); -+ PMR *psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR); -+ IMG_DEV_PHYADDR sPhyAddr; -+ IMG_BOOL bValid; -+ -+#if defined(SUPPORT_ALT_REGBASE) -+ psDevInfo->sLayerParams.sGPURegAddr = psDevConfig->sAltRegsGpuPBase; -+#else -+ /* The physical address of the GPU registers needs to be translated -+ * in case we are in a LMA scenario -+ */ -+ PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL], -+ 1, -+ &sPhyAddr, -+ &(psDevConfig->sRegsCpuPBase)); -+ -+ psDevInfo->sLayerParams.sGPURegAddr = sPhyAddr; -+#endif -+ -+ /* Register bank must be aligned to 512KB (as per the core integration) to -+ * prevent the FW accessing incorrect registers */ -+ if ((psDevInfo->sLayerParams.sGPURegAddr.uiAddr & 0x7FFFFU) != 0U) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Register bank must be aligned to 512KB, but current address (0x%016"IMG_UINT64_FMTSPECX") is not", -+ psDevInfo->sLayerParams.sGPURegAddr.uiAddr)); -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ eError = RGXGetPhyAddr(psFWCodePMR, -+ &sPhyAddr, -+ RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE), -+ OSGetPageShift(), /* FW will be using the same page size as the OS */ -+ 1, -+ &bValid); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI code address")); -+ return eError; -+ } -+ -+ psDevInfo->sLayerParams.sBootRemapAddr = sPhyAddr; -+ -+ eError = RGXGetPhyAddr(psFWDataPMR, -+ &sPhyAddr, -+ RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA), -+ OSGetPageShift(), -+ 1, -+ &bValid); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI data address")); -+ return eError; -+ } -+ -+ psDevInfo->sLayerParams.sDataRemapAddr = sPhyAddr; -+ -+ eError = RGXGetPhyAddr(psFWCodePMR, -+ &sPhyAddr, -+ RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE), -+ OSGetPageShift(), -+ 1, -+ &bValid); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW exceptions address")); -+ return eError; -+ } -+ -+ psDevInfo->sLayerParams.sCodeRemapAddr = sPhyAddr; -+ -+ psDevInfo->sLayerParams.sTrampolineRemapAddr.uiAddr = psDevInfo->psTrampoline->sPhysAddr.uiAddr; -+ -+ psDevInfo->sLayerParams.bDevicePA0IsValid = psDevConfig->bDevicePA0IsValid; -+ } -+#endif -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ /* Send information used on power transitions to the trusted device as -+ * in this setup the driver cannot start/stop the GPU and perform resets -+ */ -+ if (psDevConfig->pfnTDSetPowerParams) -+ { -+ PVRSRV_TD_POWER_PARAMS sTDPowerParams; -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || -+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr; -+ } -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ sTDPowerParams.sGPURegAddr = psDevInfo->sLayerParams.sGPURegAddr; -+ sTDPowerParams.sBootRemapAddr = psDevInfo->sLayerParams.sBootRemapAddr; -+ sTDPowerParams.sCodeRemapAddr = psDevInfo->sLayerParams.sCodeRemapAddr; -+ sTDPowerParams.sDataRemapAddr = psDevInfo->sLayerParams.sDataRemapAddr; -+ } -+#endif -+ -+ eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData, -+ &sTDPowerParams); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!")); -+ eError = PVRSRV_ERROR_NOT_IMPLEMENTED; -+ } -+#endif -+ -+ return eError; -+} -+ -+/* -+ RGXSystemHasFBCDCVersion31 -+*/ -+static IMG_BOOL RGXSystemHasFBCDCVersion31(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+#if defined(SUPPORT_VALIDATION) -+ IMG_UINT32 ui32FBCDCVersionOverride = 0; -+#endif -+ -+#if defined(FIX_HW_ERN_66622_BIT_MASK) -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 66622)) -+ { -+#if defined(SUPPORT_VALIDATION) -+ void *pvAppHintState = NULL; -+ -+ IMG_UINT32 ui32AppHintDefault; -+ -+ OSCreateAppHintState(&pvAppHintState); -+ ui32AppHintDefault = PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FBCDCVersionOverride, -+ &ui32AppHintDefault, &ui32FBCDCVersionOverride); -+ OSFreeAppHintState(pvAppHintState); -+ -+ if (ui32FBCDCVersionOverride > 0) -+ { -+ if (ui32FBCDCVersionOverride == 2) -+ { -+ return IMG_TRUE; -+ } -+ } -+ else -+#endif -+ { -+ if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) -+ { -+ return IMG_TRUE; -+ } -+ } -+ } -+ else -+#endif -+ { -+ -+#if defined(SUPPORT_VALIDATION) -+ if (ui32FBCDCVersionOverride == 2) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: FBCDCVersionOverride forces FBC3.1 but this core doesn't support it!", -+ __func__)); -+ } -+#endif -+ -+#if !defined(NO_HARDWARE) -+ if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: System uses FBCDC3.1 but GPU doesn't support it!", -+ __func__)); -+ } -+#endif -+ } -+ -+ return IMG_FALSE; -+} -+ -+/* -+ RGXGetTFBCLossyGroup -+*/ -+static IMG_UINT32 RGXGetTFBCLossyGroup(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) -+ return psDevInfo->ui32TFBCLossyGroup; -+#else -+ return 0; -+#endif -+} -+ -+/* -+ RGXDevMMUAttributes -+*/ -+static MMU_DEVICEATTRIBS *RGXDevMMUAttributes(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bKernelFWMemoryCtx) -+{ -+ MMU_DEVICEATTRIBS *psMMUDevAttrs = NULL; -+ -+ if (psDeviceNode->pfnCheckDeviceFeature) -+ { -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) -+ { -+ psMMUDevAttrs = bKernelFWMemoryCtx ? -+ psDeviceNode->psFirmwareMMUDevAttrs : -+ psDeviceNode->psMMUDevAttrs; -+ } -+ else -+#endif -+ { -+ PVR_UNREFERENCED_PARAMETER(bKernelFWMemoryCtx); -+ psMMUDevAttrs = psDeviceNode->psMMUDevAttrs; -+ } -+ } -+ -+ return psMMUDevAttrs; -+} -+ -+/* -+ * RGXInitDevPart2 -+ */ -+PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DeviceFlags, -+ IMG_UINT32 ui32HWPerfHostFilter, -+ RGX_ACTIVEPM_CONF eActivePMConf, -+ RGX_FWT_LOGTYPE eDebugDumpFWTLogType) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_DEV_POWER_STATE eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; -+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; -+ -+ /* Assume system layer has turned power on by this point, required before powering device */ -+ psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; -+ -+ PDUMPCOMMENT(psDeviceNode, "RGX Initialisation Part 2"); -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) && defined(PDUMP) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ RGXPDumpBootldrData(psDeviceNode, psDevInfo); -+ } -+#endif -+#if defined(TIMING) || defined(DEBUG) -+ OSUserModeAccessToPerfCountersEn(); -+#endif -+ -+ /* Initialise Device Flags */ -+ psDevInfo->ui32DeviceFlags = 0; -+ RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE); -+ -+ /* Allocate DVFS Table (needs to be allocated before GPU trace events -+ * component is initialised because there is a dependency between them) */ -+ psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable))); -+ PVR_LOG_GOTO_IF_NOMEM(psDevInfo->psGpuDVFSTable, eError, ErrorExit); -+ -+ if (psDevInfo->ui32HWPerfHostFilter == 0) -+ { -+ RGXHWPerfHostSetEventFilter(psDevInfo, ui32HWPerfHostFilter); -+ } -+ -+ /* If HWPerf enabled allocate all resources for the host side buffer. */ -+ if (psDevInfo->ui32HWPerfHostFilter != 0) -+ { -+ if (RGXHWPerfHostInitOnDemandResources(psDevInfo) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer on demand" -+ " initialisation failed.")); -+ } -+ } -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Initialise work estimation lock */ -+ eError = OSLockCreate(&psDevInfo->hWorkEstLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit); -+ } -+#endif -+ -+ /* Initialise lists of ZSBuffers */ -+ eError = OSLockCreate(&psDevInfo->hLockZSBuffer); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(LockZSBuffer)", ErrorExit); -+ dllist_init(&psDevInfo->sZSBufferHead); -+ psDevInfo->ui32ZSBufferCurrID = 1; -+ -+ /* Initialise lists of growable Freelists */ -+ eError = OSLockCreate(&psDevInfo->hLockFreeList); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(LockFreeList)", ErrorExit); -+ dllist_init(&psDevInfo->sFreeListHead); -+ psDevInfo->ui32FreelistCurrID = 1; -+ -+ eError = OSLockCreate(&psDevInfo->hDebugFaultInfoLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(DebugFaultInfoLock)", ErrorExit); -+ -+ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) -+ { -+ eError = OSLockCreate(&psDevInfo->hMMUCtxUnregLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(MMUCtxUnregLock)", ErrorExit); -+ } -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ eError = OSLockCreate(&psDevInfo->hNMILock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(NMILock)", ErrorExit); -+ } -+#endif -+ -+ /* Setup GPU utilisation stats update callback */ -+ eError = OSLockCreate(&psDevInfo->hGPUUtilLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(GPUUtilLock)", ErrorExit); -+#if !defined(NO_HARDWARE) -+ psDevInfo->pfnGetGpuUtilStats = RGXGetGpuUtilStats; -+#endif -+ -+ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; -+ psDevInfo->eActivePMConf = eActivePMConf; -+ -+#if !defined(NO_HARDWARE) -+ /* set-up the Active Power Mgmt callback */ -+ { -+ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; -+ IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM; -+ IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) || -+ (eActivePMConf == RGX_ACTIVEPM_FORCE_ON); -+ -+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ) -+ /* The AutoVz driver enables a virtualisation watchdog not compatible with APM */ -+ if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE))) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in AutoVz mode", __func__)); -+ bEnableAPM = IMG_FALSE; -+ } -+ -+ PVR_ASSERT(bEnableAPM == IMG_FALSE); -+#endif -+ -+ if (bEnableAPM) -+ { -+ eError = OSInstallMISR(&psDevInfo->pvAPMISRData, -+ RGX_MISRHandler_CheckFWActivePowerState, -+ psDeviceNode, -+ "RGX_CheckFWActivePower"); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR(APMISR)", ErrorExit); -+ -+ /* Prevent the device being woken up before there is something to do. */ -+ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF; -+ } -+ } -+#endif -+ -+ psDevInfo->eDebugDumpFWTLogType = eDebugDumpFWTLogType; -+ -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM, -+ RGXQueryAPMState, -+ RGXSetAPMState, -+ psDeviceNode, -+ NULL); -+ -+ RGXTimeCorrInitAppHintCallbacks(psDeviceNode); -+ -+ /* Register the device with the power manager */ -+ eError = PVRSRVRegisterPowerDevice(psDeviceNode, -+ (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPrePowerState : &RGXVzPrePowerState, -+ (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPostPowerState : &RGXVzPostPowerState, -+ psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState, -+ &RGXPreClockSpeedChange, &RGXPostClockSpeedChange, -+ &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest, -+ &RGXDustCountChange, -+ (IMG_HANDLE)psDeviceNode, -+ PVRSRV_DEV_POWER_STATE_OFF, -+ eDefaultPowerState); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterPowerDevice", ErrorExit); -+ -+ eError = RGXSetPowerParams(psDevInfo, psDevConfig); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetPowerParams", ErrorExit); -+ -+#if defined(SUPPORT_VALIDATION) -+ { -+ void *pvAppHintState = NULL; -+ -+ IMG_UINT32 ui32AppHintDefault; -+ -+ OSCreateAppHintState(&pvAppHintState); -+ ui32AppHintDefault = PVRSRV_APPHINT_TESTSLRINTERVAL; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TestSLRInterval, -+ &ui32AppHintDefault, &psDevInfo->ui32TestSLRInterval); -+ PVR_LOG(("OSGetAppHintUINT32(TestSLRInterval) ui32AppHintDefault=%d, psDevInfo->ui32TestSLRInterval=%d", -+ ui32AppHintDefault, psDevInfo->ui32TestSLRInterval)); -+ OSFreeAppHintState(pvAppHintState); -+ psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; -+ psDevInfo->ui32SLRSkipFWAddr = 0; -+ -+ ui32AppHintDefault = 0; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, ECCRAMErrInj, &ui32AppHintDefault, &psDevInfo->ui32ECCRAMErrInjModule); -+ psDevInfo->ui32ECCRAMErrInjInterval = RGXKM_ECC_ERR_INJ_INTERVAL; -+ -+#if defined(PDUMP) && defined(SUPPORT_VALIDATION) -+ /* POL on ECC RAM GPU fault events, MARS is FW fault */ -+ if (psDevInfo->ui32ECCRAMErrInjModule != RGXKM_ECC_ERR_INJ_DISABLE && -+ psDevInfo->ui32ECCRAMErrInjModule != RGXKM_ECC_ERR_INJ_MARS) -+ { -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT, "Verify ECC fault event"); -+ eError = PDUMPREGPOL(psDeviceNode, RGX_PDUMPREG_NAME, -+ RGX_CR_SCRATCH11, -+ 1U, -+ 0xFFFFFFFF, -+ PDUMP_FLAGS_DEINIT, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ } -+#endif -+ } -+#endif -+ -+#if defined(PDUMP) -+#if defined(NO_HARDWARE) -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT, "Wait for the FW to signal idle"); -+ -+ /* Kick the FW once, in case it still needs to detect and set the idle state */ -+ PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME, -+ RGX_CR_MTS_SCHEDULE, -+ RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK, -+ PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT); -+ -+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfFwSysDataMemDesc, -+ offsetof(RGXFWIF_SYSDATA, ePowState), -+ RGXFWIF_POW_IDLE, -+ 0xFFFFFFFFU, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemPDumpDevmemPol32", ErrorExit); -+#endif -+ -+ /* Run RGXStop with the correct PDump flags to feed the last-frame deinit buffer */ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT, -+ "RGX deinitialisation commands"); -+ -+ psDevInfo->sLayerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW; -+ -+ if (! PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ eError = RGXStop(&psDevInfo->sLayerParams); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXStop", ErrorExit); -+ } -+ -+ psDevInfo->sLayerParams.ui32PdumpFlags &= ~(PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW); -+#endif -+ -+#if !defined(NO_HARDWARE) -+ eError = RGXInstallProcessQueuesMISR(&psDevInfo->hProcessQueuesMISR, psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInstallProcessQueuesMISR", ErrorExit); -+ -+ /* Register RGX to receive notifies when other devices complete some work */ -+ PVRSRVRegisterCmdCompleteNotify(&psDeviceNode->hCmdCompNotify, &RGXScheduleProcessQueuesKM, psDeviceNode); -+ -+ /* Register the interrupt handlers */ -+ eError = OSInstallMISR(&psDevInfo->pvMISRData, -+ RGX_MISRHandler_Main, -+ psDeviceNode, -+ "RGX_Main"); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR(MISR)", ErrorExit); -+ -+ /* Register appropriate mechanism for clearing hw interrupts */ -+ if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, IRQ_PER_OS)) && (!PVRSRV_VZ_MODE_IS(NATIVE))) -+ { -+ psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated; -+ } -+ else if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ psDevInfo->pfnRGXAckIrq = NULL; -+ } -+ else -+ { -+ /* native and host drivers must clear the unique GPU physical interrupt */ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ psDevInfo->pfnRGXAckIrq = RGXAckIrqMIPS; -+ } -+ else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ psDevInfo->pfnRGXAckIrq = RGXAckIrqMETA; -+ } -+ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) -+ { -+ psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: GPU IRQ clearing mechanism not implemented " -+ "for the this architecture.", __func__)); -+ PVR_LOG_GOTO_WITH_ERROR("pfnRGXAckIrq", eError, PVRSRV_ERROR_NOT_IMPLEMENTED, ErrorExit); -+ } -+ } -+ -+#if defined(RGX_IRQ_HYPERV_HANDLER) -+ /* The hypervisor receives and acknowledges the GPU irq, then it injects an -+ * irq only in the recipient OS. The KM driver doesn't handle the GPU irq line */ -+ psDevInfo->pfnRGXAckIrq = NULL; -+#endif -+ -+ eError = SysInstallDeviceLISR(psDevConfig->hSysData, -+ psDevConfig->ui32IRQ, -+ PVRSRV_MODNAME, -+ RGX_LISRHandler, -+ psDeviceNode, -+ &psDevInfo->pvLISRData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "SysInstallDeviceLISR", ErrorExit); -+#endif /* !defined(NO_HARDWARE) */ -+ -+#if defined(PDUMP) -+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_CACHE_HIERARCHY))) -+ { -+ if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) && -+ !PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) -+ { -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "System has NO cache snooping"); -+ } -+ else -+ { -+ if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig)) -+ { -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "System has CPU cache snooping"); -+ } -+ if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) -+ { -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "System has DEVICE cache snooping"); -+ } -+ } -+ } -+#endif -+ -+#if defined(RGX_FEATURE_COMPUTE_ONLY_BIT_MASK) -+ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE_ONLY)) -+#endif -+ { -+ eError = PVRSRVTQLoadShaders(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVTQLoadShaders", ErrorExit); -+ } -+ -+#if defined(SUPPORT_SECURE_ALLOC_KM) -+ eError = OSAllocateSecBuf(psDeviceNode, RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE, "SharedSecMem", &psDevInfo->psGenHeapSecMem); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSAllocateSecBuf", ErrorExit); -+#endif -+ -+ psDevInfo->bDevInit2Done = IMG_TRUE; -+ -+ return PVRSRV_OK; -+ -+ErrorExit: -+ DevPart2DeInitRGX(psDeviceNode); -+ -+ return eError; -+} -+ -+#define VZ_RGX_FW_FILENAME_SUFFIX ".vz" -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+#define RGX_64K_FW_FILENAME_SUFFIX ".64k" -+#define RGX_FW_FILENAME_MAX_SIZE ((sizeof(RGX_FW_FILENAME)+ \ -+ RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX) + sizeof(RGX_64K_FW_FILENAME_SUFFIX))) -+#else -+#define RGX_FW_FILENAME_MAX_SIZE ((sizeof(RGX_FW_FILENAME)+ \ -+ RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX))) -+#endif -+ -+static void _GetFWFileName(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR *pszFWFilenameStr, -+ IMG_CHAR *pszFWpFilenameStr) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ const IMG_CHAR * const pszFWFilenameSuffix = -+ PVRSRV_VZ_MODE_IS(NATIVE) ? "" : VZ_RGX_FW_FILENAME_SUFFIX; -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ const IMG_CHAR * const pszFWFilenameSuffix2 = -+ ((OSGetPageSize() == RGX_MMU_PAGE_SIZE_64KB) && -+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ ? RGX_64K_FW_FILENAME_SUFFIX : ""; -+#else -+ const IMG_CHAR * const pszFWFilenameSuffix2 = ""; -+#endif -+ -+ OSSNPrintf(pszFWFilenameStr, RGX_FW_FILENAME_MAX_SIZE, -+ "%s." RGX_BVNC_STR_FMTSPEC "%s%s", -+ RGX_FW_FILENAME, -+ psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, -+ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, -+ pszFWFilenameSuffix, pszFWFilenameSuffix2); -+ -+ OSSNPrintf(pszFWpFilenameStr, RGX_FW_FILENAME_MAX_SIZE, -+ "%s." RGX_BVNC_STRP_FMTSPEC "%s%s", -+ RGX_FW_FILENAME, -+ psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, -+ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, -+ pszFWFilenameSuffix, pszFWFilenameSuffix2); -+} -+ -+PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, -+ OS_FW_IMAGE **ppsRGXFW, -+ const IMG_BYTE **ppbFWData) -+{ -+ IMG_CHAR aszFWFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; -+ IMG_CHAR aszFWpFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; -+ IMG_CHAR *pszLoadedFwStr; -+ PVRSRV_ERROR eErr; -+ -+ /* Prepare the image filenames to use in the following code */ -+ _GetFWFileName(psDeviceNode, aszFWFilenameStr, aszFWpFilenameStr); -+ -+ /* Get pointer to Firmware image */ -+ pszLoadedFwStr = aszFWFilenameStr; -+ eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW); -+ if (eErr == PVRSRV_ERROR_NOT_FOUND) -+ { -+ pszLoadedFwStr = aszFWpFilenameStr; -+ eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW); -+ if (eErr == PVRSRV_ERROR_NOT_FOUND) -+ { -+ pszLoadedFwStr = RGX_FW_FILENAME; -+ eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW); -+ if (eErr == PVRSRV_ERROR_NOT_FOUND) -+ { -+ PVR_DPF((PVR_DBG_FATAL, "All RGX Firmware image loads failed for '%s' (%s)", -+ aszFWFilenameStr, PVRSRVGetErrorString(eErr))); -+ } -+ } -+ } -+ -+ if (eErr == PVRSRV_OK) -+ { -+ PVR_LOG(("RGX Firmware image '%s' loaded", pszLoadedFwStr)); -+ *ppbFWData = (const IMG_BYTE*)OSFirmwareData(*ppsRGXFW); -+ } -+ else -+ { -+ *ppbFWData = NULL; -+ } -+ -+ return eErr; -+ -+} -+ -+#if defined(PDUMP) -+PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ return PVRSRV_OK; -+} -+#endif -+ -+PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ /* set up fw memory contexts */ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError; -+ -+#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) -+ IMG_BOOL bNativeFwUMAHeap = PVRSRV_VZ_MODE_IS(NATIVE) && -+ (PhysHeapGetType(psDeviceNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM]) == PHYS_HEAP_TYPE_UMA); -+#endif -+ -+#if defined(RGX_PREMAP_FW_HEAPS) -+ PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap; -+ if ((!PVRSRV_VZ_MODE_IS(GUEST)) && (!psDeviceNode->bAutoVzFwIsUp) && (!bNativeFwUMAHeap)) -+ { -+ PHYS_HEAP *psFwPageTableHeap = -+ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]; -+ -+ PVR_LOG_GOTO_IF_INVALID_PARAM((psFwPageTableHeap != NULL), -+ eError, failed_to_create_ctx); -+ -+ /* Temporarily swap the MMU and default GPU physheap to allow the page -+ * tables of all memory mapped by the FwKernel context to be placed -+ * in a dedicated memory carveout. This should allow the firmware mappings to -+ * persist after a Host kernel crash or driver reset. */ -+ psDeviceNode->psMMUPhysHeap = psFwPageTableHeap; -+ } -+#endif -+ -+ /* Register callbacks for creation of device memory contexts */ -+ psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext; -+ psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext; -+ -+ RGXFwSharedMemCheckSnoopMode(psDevInfo->psDeviceNode->psDevConfig); -+ -+ /* Create the memory context for the firmware. */ -+ eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_FORFW, -+ &psDevInfo->psKernelDevmemCtx); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed DevmemCreateContext (%u)", -+ __func__, -+ eError)); -+ goto failed_to_create_ctx; -+ } -+ -+ eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_MAIN_HEAP_IDENT, -+ &psDevInfo->psFirmwareMainHeap); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed DevmemFindHeapByName (%u)", -+ __func__, -+ eError)); -+ goto failed_to_find_heap; -+ } -+ -+ eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_CONFIG_HEAP_IDENT, -+ &psDevInfo->psFirmwareConfigHeap); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed DevmemFindHeapByName (%u)", -+ __func__, -+ eError)); -+ goto failed_to_find_heap; -+ } -+ -+#if (defined(RGX_PREMAP_FW_HEAPS)) || (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ IMG_UINT32 ui32DriverID; -+ -+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID) -+ { -+ IMG_CHAR szHeapName[RA_MAX_NAME_LENGTH]; -+ -+ OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID); -+ eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName, -+ &psDevInfo->psPremappedFwRawHeap[ui32DriverID]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemFindHeapByName", failed_to_find_heap); -+ } -+ } -+#endif -+ -+#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) -+ if (!PVRSRV_VZ_MODE_IS(GUEST) && !bNativeFwUMAHeap) -+ { -+ IMG_DEV_PHYADDR sPhysHeapBase; -+ IMG_UINT32 ui32DriverID; -+ -+ eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM], &sPhysHeapBase); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", failed_to_find_heap); -+ -+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID) -+ { -+ IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32DriverID * RGX_FIRMWARE_RAW_HEAP_SIZE)}; -+ -+ eError = RGXFwRawHeapAllocMap(psDeviceNode, -+ ui32DriverID, -+ sRawFwHeapBase, -+ RGX_FIRMWARE_RAW_HEAP_SIZE); -+ if (eError != PVRSRV_OK) -+ { -+ for (; ui32DriverID > RGX_FIRST_RAW_HEAP_DRIVER_ID; ui32DriverID--) -+ { -+ RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID); -+ } -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", failed_to_find_heap); -+ } -+ } -+ -+#if defined(RGX_PREMAP_FW_HEAPS) -+ /* restore default Px setup */ -+ psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap; -+#endif -+ } -+#endif /* defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ -+ -+#if !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) -+ /* On setups with dynamically mapped Guest heaps, the Guest makes -+ * a PVZ call to the Host to request the mapping during init. */ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ eError = PvzClientMapDevPhysHeap(psDeviceNode->psDevConfig); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PvzClientMapDevPhysHeap", failed_to_find_heap); -+ } -+#endif /* !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); -+ DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); -+ } -+ -+ return eError; -+ -+failed_to_find_heap: -+ /* -+ * Clear the mem context create callbacks before destroying the RGX firmware -+ * context to avoid a spurious callback. -+ */ -+ psDeviceNode->pfnRegisterMemoryContext = NULL; -+ psDeviceNode->pfnUnregisterMemoryContext = NULL; -+ DevmemDestroyContext(psDevInfo->psKernelDevmemCtx); -+ psDevInfo->psKernelDevmemCtx = NULL; -+failed_to_create_ctx: -+ return eError; -+} -+ -+void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError; -+#if defined(RGX_PREMAP_FW_HEAPS) -+ PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap; -+#endif -+ -+#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+#if defined(RGX_PREMAP_FW_HEAPS) -+ psDeviceNode->psMMUPhysHeap = -+ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]; -+ -+ if (!psDeviceNode->bAutoVzFwIsUp) -+#endif -+ { -+ IMG_UINT32 ui32DriverID; -+ -+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID) -+ { -+ RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID); -+ } -+ } -+ } -+#endif /* defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ -+ -+#if !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ (void) PvzClientUnmapDevPhysHeap(psDeviceNode->psDevConfig); -+ -+ if (psDevInfo->psFirmwareMainHeap) -+ { -+ DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_FALSE); -+ } -+ if (psDevInfo->psFirmwareConfigHeap) -+ { -+ DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_FALSE); -+ } -+ } -+#endif -+ -+ /* -+ * Clear the mem context create callbacks before destroying the RGX firmware -+ * context to avoid a spurious callback. -+ */ -+ psDeviceNode->pfnRegisterMemoryContext = NULL; -+ psDeviceNode->pfnUnregisterMemoryContext = NULL; -+ -+ if (psDevInfo->psKernelDevmemCtx) -+ { -+ eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ } -+ -+#if defined(RGX_PREMAP_FW_HEAPS) -+ psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap; -+#endif -+} -+ -+static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32AlignChecksSizeUM, -+ IMG_UINT32 aui32AlignChecksUM[]) -+{ -+ static const IMG_UINT32 aui32AlignChecksKM[] = {RGXFW_ALIGN_CHECKS_INIT_KM}; -+ IMG_UINT32 ui32UMChecksOffset = ARRAY_SIZE(aui32AlignChecksKM) + 1; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; -+ IMG_UINT32 i, *paui32FWAlignChecks; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ /* Skip the alignment check if the driver is guest -+ since there is no firmware to check against */ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, eError); -+ -+ if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: FW Alignment Check Mem Descriptor is NULL", -+ __func__)); -+ return PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE; -+ } -+ -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc, -+ (void **) &paui32FWAlignChecks); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to acquire kernel address for alignment checks (%u)", -+ __func__, -+ eError)); -+ return eError; -+ } -+ -+ paui32FWAlignChecks += ui32UMChecksOffset; -+ /* Invalidate the size value, check the next region size (UM) and invalidate */ -+ RGXFwSharedMemCacheOpPtr(paui32FWAlignChecks, INVALIDATE); -+ if (*paui32FWAlignChecks++ != ui32AlignChecksSizeUM) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Mismatching sizes of RGXFW_ALIGN_CHECKS_INIT" -+ " array between UM(%d) and FW(%d)", -+ __func__, -+ ui32AlignChecksSizeUM, -+ *paui32FWAlignChecks)); -+ eError = PVRSRV_ERROR_INVALID_ALIGNMENT; -+ goto return_; -+ } -+ -+ RGXFwSharedMemCacheOpExec(paui32FWAlignChecks, -+ ui32AlignChecksSizeUM * sizeof(IMG_UINT32), -+ PVRSRV_CACHE_OP_INVALIDATE); -+ -+ for (i = 0; i < ui32AlignChecksSizeUM; i++) -+ { -+ if (aui32AlignChecksUM[i] != paui32FWAlignChecks[i]) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: size/offset mismatch in RGXFW_ALIGN_CHECKS_INIT[%d]" -+ " between UM(%d) and FW(%d)", -+ __func__, i, aui32AlignChecksUM[i], paui32FWAlignChecks[i])); -+ eError = PVRSRV_ERROR_INVALID_ALIGNMENT; -+ } -+ } -+ -+ if (eError == PVRSRV_ERROR_INVALID_ALIGNMENT) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Check for FW/KM structure" -+ " alignment failed.", __func__)); -+ } -+ -+return_: -+ -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc); -+ -+ return eError; -+} -+ -+static -+PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEVMEM_SIZE_T ui32Size, -+ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags, -+ const IMG_PCHAR pszText, -+ DEVMEM_MEMDESC **ppsMemDescPtr) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift(); -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) && defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY) -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ uiLog2Align = RGXMIPSFW_LOG2_PAGE_SIZE_64K; -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(uiLog2Align); -+#endif -+ -+ uiMemAllocFlags = (uiMemAllocFlags | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) & -+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ uiMemAllocFlags &= PVRSRV_MEMALLOCFLAGS_TDFWMASK; -+#endif -+ -+ PDUMPCOMMENT(psDeviceNode, "Allocate FW %s memory", pszText); -+ -+ eError = DevmemFwAllocateExportable(psDeviceNode, -+ ui32Size, -+ 1ULL << uiLog2Align, -+ uiMemAllocFlags, -+ pszText, -+ ppsMemDescPtr); -+ -+ return eError; -+} -+ -+/*! -+ ******************************************************************************* -+ -+ @Function RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver -+ -+ @Description -+ -+ Validate the FW build options against KM driver build options (KM build options only) -+ -+ Following check is redundant, because next check checks the same bits. -+ Redundancy occurs because if client-server are build-compatible and client-firmware are -+ build-compatible then server-firmware are build-compatible as well. -+ -+ This check is left for clarity in error messages if any incompatibility occurs. -+ -+ @Input psDevInfo - device info -+ -+ @Return PVRSRV_ERROR - depending on mismatch found -+ -+ ******************************************************************************/ -+static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ -+ IMG_UINT32 ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch; -+ RGX_FW_INFO_HEADER *psFWInfoHeader = NULL; -+ RGXFWIF_OSINIT *psFwOsInit = NULL; -+ -+ if (psDevInfo == NULL) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ ui32BuildOptions = (RGX_BUILD_OPTIONS_KM & RGX_BUILD_OPTIONS_MASK_FW); -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ psFwOsInit = psDevInfo->psRGXFWIfOsInit; -+ if (psFwOsInit == NULL) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ ui32BuildOptionsFWKMPart = psFwOsInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_FW; -+ } -+ else -+ { -+ psFWInfoHeader = &psDevInfo->sFWInfoHeader; -+ ui32BuildOptionsFWKMPart = psFWInfoHeader->ui32Flags & RGX_BUILD_OPTIONS_MASK_FW; -+ } -+ -+ /* Check if the FW is missing support for any features required by the driver */ -+ if (~ui32BuildOptionsFWKMPart & ui32BuildOptions) -+ { -+ ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart; -+#if !defined(PVRSRV_STRICT_COMPAT_CHECK) -+ /*Mask non-critical options out as we do support combining them in UM & KM */ -+ ui32BuildOptionsMismatch &= FW_OPTIONS_STRICT; -+#endif -+ if ((ui32BuildOptions & ui32BuildOptionsMismatch) != 0) -+ { -+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; " -+ "extra options present in the KM driver: (0x%x). Please check rgx_options.h", -+ ui32BuildOptions & ui32BuildOptionsMismatch)); -+ return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; -+ } -+ -+ if ((ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0) -+ { -+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; " -+ "extra options present in Firmware: (0x%x). Please check rgx_options.h", -+ ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch )); -+ return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; -+ } -+ PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ.")); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]")); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+ ******************************************************************************* -+ -+ @Function RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver -+ -+ @Description -+ -+ Validate FW DDK version against driver DDK version -+ -+ @Input psDevInfo - device info -+ -+ @Return PVRSRV_ERROR - depending on mismatch found -+ -+ ******************************************************************************/ -+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_UINT32 ui32KMDDKVersion; -+ IMG_UINT32 ui32FWDDKVersion; -+ PVRSRV_ERROR eError; -+ RGX_FW_INFO_HEADER *psFWInfoHeader = NULL; -+ RGXFWIF_OSINIT *psFwOsInit = NULL; -+ -+ if (psDevInfo == NULL) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ ui32KMDDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ psFwOsInit = psDevInfo->psRGXFWIfOsInit; -+ if (psFwOsInit == NULL) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ ui32FWDDKVersion = psFwOsInit->sRGXCompChecks.ui32DDKVersion; -+ } -+ else -+ { -+ psFWInfoHeader = &psDevInfo->sFWInfoHeader; -+ -+ ui32FWDDKVersion = PVRVERSION_PACK(psFWInfoHeader->ui16PVRVersionMajor, psFWInfoHeader->ui16PVRVersionMinor); -+ } -+ -+ if (ui32FWDDKVersion != ui32KMDDKVersion) -+ { -+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK version (%u.%u).", -+ PVRVERSION_MAJ, PVRVERSION_MIN, -+ PVRVERSION_UNPACK_MAJ(ui32FWDDKVersion), -+ PVRVERSION_UNPACK_MIN(ui32FWDDKVersion))); -+ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH; -+ PVR_DBG_BREAK; -+ return eError; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK version (%u.%u) match. [ OK ]", -+ PVRVERSION_MAJ, PVRVERSION_MIN, -+ PVRVERSION_MAJ, PVRVERSION_MIN)); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+ ******************************************************************************* -+ -+ @Function RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver -+ -+ @Description -+ -+ Validate FW DDK build against driver DDK build -+ -+ @Input psDevInfo - device info -+ -+ @Return PVRSRV_ERROR - depending on mismatch found -+ -+ ******************************************************************************/ -+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32KMDDKBuild; -+ IMG_UINT32 ui32FWDDKBuild; -+ RGX_FW_INFO_HEADER *psFWInfoHeader = NULL; -+ RGXFWIF_OSINIT *psFwOsInit = NULL; -+ -+ ui32KMDDKBuild = PVRVERSION_BUILD; -+ -+ if (psDevInfo == NULL) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ psFwOsInit = psDevInfo->psRGXFWIfOsInit; -+ if (psFwOsInit == NULL) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ ui32FWDDKBuild = psFwOsInit->sRGXCompChecks.ui32DDKBuild; -+ } -+ else -+ { -+ psFWInfoHeader = &psDevInfo->sFWInfoHeader; -+ ui32FWDDKBuild = psFWInfoHeader->ui32PVRVersionBuild; -+ } -+ -+ if (ui32FWDDKBuild != ui32KMDDKBuild) -+ { -+ PVR_LOG(("(WARN) RGXDevInitCompatCheck: Different driver DDK build version (%d) / Firmware DDK build version (%d).", -+ ui32KMDDKBuild, ui32FWDDKBuild)); -+#if defined(PVRSRV_STRICT_COMPAT_CHECK) -+ eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH; -+ PVR_DBG_BREAK; -+ return eError; -+#endif -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]", -+ ui32KMDDKBuild, ui32FWDDKBuild)); -+ } -+ return eError; -+} -+ -+/*! -+ ******************************************************************************* -+ -+ @Function RGXDevInitCompatCheck_BVNC_FWAgainstDriver -+ -+ @Description -+ -+ Validate FW BVNC against driver BVNC -+ -+ @Input psDevInfo - device info -+ -+ @Return PVRSRV_ERROR - depending on mismatch found -+ -+ ******************************************************************************/ -+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ RGX_FW_INFO_HEADER *psFWInfoHeader; -+ IMG_UINT64 ui64KMBVNC; -+ -+ if (psDevInfo == NULL) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ psFWInfoHeader = &psDevInfo->sFWInfoHeader; -+ -+ ui64KMBVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, -+ psDevInfo->sDevFeatureCfg.ui32V, -+ psDevInfo->sDevFeatureCfg.ui32N, -+ psDevInfo->sDevFeatureCfg.ui32C); -+ -+ if (ui64KMBVNC != psFWInfoHeader->ui64BVNC) -+ { -+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%u.%u.%u) and Firmware BVNC (%u.%u.%u.%u)", -+ RGX_BVNC_PACKED_EXTR_B(ui64KMBVNC), -+ RGX_BVNC_PACKED_EXTR_V(ui64KMBVNC), -+ RGX_BVNC_PACKED_EXTR_N(ui64KMBVNC), -+ RGX_BVNC_PACKED_EXTR_C(ui64KMBVNC), -+ RGX_BVNC_PACKED_EXTR_B(psFWInfoHeader->ui64BVNC), -+ RGX_BVNC_PACKED_EXTR_V(psFWInfoHeader->ui64BVNC), -+ RGX_BVNC_PACKED_EXTR_N(psFWInfoHeader->ui64BVNC), -+ RGX_BVNC_PACKED_EXTR_C(psFWInfoHeader->ui64BVNC))); -+ -+ eError = PVRSRV_ERROR_BVNC_MISMATCH; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: KM driver BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d) match. [ OK ]", -+ RGX_BVNC_PACKED_EXTR_B(ui64KMBVNC), -+ RGX_BVNC_PACKED_EXTR_V(ui64KMBVNC), -+ RGX_BVNC_PACKED_EXTR_N(ui64KMBVNC), -+ RGX_BVNC_PACKED_EXTR_C(ui64KMBVNC), -+ RGX_BVNC_PACKED_EXTR_B(psFWInfoHeader->ui64BVNC), -+ RGX_BVNC_PACKED_EXTR_V(psFWInfoHeader->ui64BVNC), -+ RGX_BVNC_PACKED_EXTR_N(psFWInfoHeader->ui64BVNC), -+ RGX_BVNC_PACKED_EXTR_C(psFWInfoHeader->ui64BVNC))); -+ -+ eError = PVRSRV_OK; -+ } -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDevInitCompatCheck -+ -+ @Description -+ -+ Check compatibility of host driver and firmware (DDK and build options) -+ for RGX devices at services/device initialisation -+ -+ @Input psDeviceNode - device node -+ -+ @Return PVRSRV_ERROR - depending on mismatch found -+ -+ ******************************************************************************/ -+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+#if !defined(NO_HARDWARE) -+ IMG_UINT32 ui32FwTimeout = MAX_HW_TIME_US; -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ LOOP_UNTIL_TIMEOUT(ui32FwTimeout) -+ { -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, -+ INVALIDATE); -+ if (*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) -+ { -+ /* No need to wait if the FW has already updated the values */ -+ break; -+ } -+ OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ } -+ -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Flush covers this instance and the reads in the functions below */ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks, -+ INVALIDATE); -+ if (!*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) -+ { -+ eError = PVRSRV_ERROR_TIMEOUT; -+ PVR_DPF((PVR_DBG_ERROR, "%s: GPU Firmware not responding: failed to supply compatibility info (%u)", -+ __func__, eError)); -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Potential causes: firmware not initialised or the current Guest driver's " -+ "OsConfig initialisation data was not accepted by the firmware", __func__)); -+ goto chk_exit; -+ } -+ } -+ -+ if (PVRSRV_VZ_MODE_IS(NATIVE) && (RGX_NUM_DRIVERS_SUPPORTED > 1)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", -+ __func__, 1, RGX_NUM_DRIVERS_SUPPORTED)); -+ } -+#endif /* defined(NO_HARDWARE) */ -+ -+ eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psDevInfo); -+ if (eError != PVRSRV_OK) -+ { -+ goto chk_exit; -+ } -+ -+ eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo); -+ if (eError != PVRSRV_OK) -+ { -+ goto chk_exit; -+ } -+ -+ eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo); -+ if (eError != PVRSRV_OK) -+ { -+ goto chk_exit; -+ } -+ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo); -+ if (eError != PVRSRV_OK) -+ { -+ goto chk_exit; -+ } -+ } -+ -+ eError = PVRSRV_OK; -+chk_exit: -+ -+ return eError; -+} -+ -+/**************************************************************************/ /*! -+@Function RGXSoftReset -+@Description Resets some modules of the RGX device -+@Input psDeviceNode Device node -+@Input ui64ResetValue1 A mask for which each bit set corresponds -+ to a module to reset (via the SOFT_RESET -+ register). -+@Input ui64ResetValue2 A mask for which each bit set corresponds -+ to a module to reset (via the SOFT_RESET2 -+ register). -+@Return PVRSRV_ERROR -+ */ /***************************************************************************/ -+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT64 ui64ResetValue1, -+ IMG_UINT64 ui64ResetValue2) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ IMG_BOOL bSoftReset = IMG_FALSE; -+ IMG_UINT64 ui64SoftResetMask = 0; -+ -+ PVR_ASSERT(psDeviceNode != NULL); -+ PVR_ASSERT(psDeviceNode->pvDevice != NULL); -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ /* the device info */ -+ psDevInfo = psDeviceNode->pvDevice; -+#if defined(RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) -+ { -+ ui64SoftResetMask = RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL; -+ }else -+#endif -+ { -+ ui64SoftResetMask = RGX_CR_SOFT_RESET_MASKFULL; -+ } -+ -+#if defined(RGX_CR_SOFT_RESET2_MASKFULL) -+ if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) && -+ ((ui64ResetValue2 & RGX_CR_SOFT_RESET2_MASKFULL) != ui64ResetValue2)) -+ { -+ bSoftReset = IMG_TRUE; -+ } -+#endif -+ -+ if (((ui64ResetValue1 & ui64SoftResetMask) != ui64ResetValue1) || bSoftReset) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Set in soft-reset */ -+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue1); -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) -+ { -+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, ui64ResetValue2); -+ } -+ -+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ -+ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET); -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) -+ { -+ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2); -+ } -+ -+ /* Take the modules out of reset... */ -+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, 0); -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) -+ { -+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, 0); -+ } -+ -+ /* ...and fence again */ -+ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET); -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) -+ { -+ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+static const RGX_MIPS_ADDRESS_TRAMPOLINE sNullTrampoline; -+ -+static void RGXFreeTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ DevPhysMemFree(psDeviceNode, -+#if defined(PDUMP) -+ psDevInfo->psTrampoline->hPdumpPages, -+#endif -+ &psDevInfo->psTrampoline->sPages); -+ -+ if (psDevInfo->psTrampoline != &sNullTrampoline) -+ { -+ OSFreeMem(psDevInfo->psTrampoline); -+ } -+ psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline; -+} -+ -+#define RANGES_OVERLAP(x,y,size) (x < (y+size) && y < (x+size)) -+#define TRAMPOLINE_ALLOC_MAX_RETRIES (3) -+ -+static PVRSRV_ERROR RGXAllocTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ IMG_INT32 i, j; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_MIPS_ADDRESS_TRAMPOLINE *pasTrampoline[TRAMPOLINE_ALLOC_MAX_RETRIES]; -+ -+ PDUMPCOMMENT(psDeviceNode, "Allocate pages for trampoline"); -+ -+ /* Retry the allocation of the trampoline block (16KB), retaining any -+ * previous allocations overlapping with the target range until we get an -+ * allocation that doesn't overlap with the target range. -+ * Any allocation like this will require a maximum of 3 tries as we are -+ * allocating a physical contiguous block of memory, not individual pages. -+ * Free the unused allocations at the end only after the desired range -+ * is obtained to prevent the alloc function from returning the same bad -+ * range repeatedly. -+ */ -+ for (i = 0; i < TRAMPOLINE_ALLOC_MAX_RETRIES; i++) -+ { -+ pasTrampoline[i] = OSAllocMem(sizeof(RGX_MIPS_ADDRESS_TRAMPOLINE)); -+ eError = DevPhysMemAlloc(psDeviceNode, -+ RGXMIPSFW_TRAMPOLINE_SIZE, -+ RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE, -+ 0, // (init) u8Value -+ IMG_FALSE, // bInitPage, -+#if defined(PDUMP) -+ psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName, -+ "TrampolineRegion", -+ &pasTrampoline[i]->hPdumpPages, -+#endif -+ PVR_SYS_ALLOC_PID, -+ &pasTrampoline[i]->sPages, -+ &pasTrampoline[i]->sPhysAddr); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s failed (%u)", -+ __func__, -+ eError)); -+ goto fail; -+ } -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+ /* Set the persistent uiOSid value so that we free from the correct -+ * base arena when unloading the driver and freeing the trampoline. -+ */ -+ pasTrampoline[i]->sPages.uiOSid = 0; /* Firmware global arena */ -+#endif -+ -+ if (!RANGES_OVERLAP(pasTrampoline[i]->sPhysAddr.uiAddr, -+ RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR, -+ RGXMIPSFW_TRAMPOLINE_SIZE)) -+ { -+ break; -+ } -+ } -+ if (TRAMPOLINE_ALLOC_MAX_RETRIES == i) -+ { -+ /* Failed to find a physical allocation after 3 attempts */ -+ eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s failed to allocate non-overlapping pages (%u)", -+ __func__, eError)); -+ /* Fall through, clean up and return error. */ -+ } -+ else -+ { -+ /* Remember the last physical block allocated, it will not be freed */ -+ psDevInfo->psTrampoline = pasTrampoline[i]; -+ } -+ -+fail: -+ /* free all unused allocations */ -+ for (j = 0; j < i; j++) -+ { -+ DevPhysMemFree(psDeviceNode, -+#if defined(PDUMP) -+ pasTrampoline[j]->hPdumpPages, -+#endif -+ &pasTrampoline[j]->sPages); -+ OSFreeMem(pasTrampoline[j]); -+ } -+ -+ return eError; -+} -+ -+#undef RANGES_OVERLAP -+#endif -+ -+PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEVMEM_SIZE_T uiFWCodeLen, -+ IMG_DEVMEM_SIZE_T uiFWDataLen, -+ IMG_DEVMEM_SIZE_T uiFWCorememCodeLen, -+ IMG_DEVMEM_SIZE_T uiFWCorememDataLen) -+{ -+ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError; -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ IMG_DEVMEM_SIZE_T uiDummyLen; -+ DEVMEM_MEMDESC *psDummyMemDesc = NULL; -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && -+ (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32)) -+ { -+ eError = RGXAllocTrampoline(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to allocate trampoline region (%u)", -+ eError)); -+ goto failTrampolineMemDescAlloc; -+ } -+ } -+#endif -+ -+ /* -+ * Set up Allocation for FW code section -+ */ -+ uiMemAllocFlags = RGX_FWCODEDATA_ALLOCFLAGS | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE); -+ eError = RGXAllocateFWMemoryRegion(psDeviceNode, -+ uiFWCodeLen, -+ uiMemAllocFlags, -+ "FwExCodeRegion", -+ &psDevInfo->psRGXFWCodeMemDesc); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to allocate fw code mem (%u)", -+ eError)); -+ goto failFWCodeMemDescAlloc; -+ } -+ -+ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc, -+ &psDevInfo->sFWCodeDevVAddrBase); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to acquire devVAddr for fw code mem (%u)", -+ eError)); -+ goto failFWCodeMemDescAqDevVirt; -+ } -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) || (PVRSRV_VZ_MODE_IS(GUEST)))) -+#endif -+ { -+ /* -+ * The FW code must be the first allocation in the firmware heap, otherwise -+ * the bootloader will not work (the FW will not be able to find the bootloader). -+ */ -+ PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_RAW_HEAP_BASE); -+ } -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ /* -+ * Allocate Dummy Pages so that Data segment allocation gets the same -+ * device virtual address as specified in MIPS firmware linker script -+ */ -+ uiDummyLen = RGXGetFWImageSectionMaxSize(NULL, MIPS_CODE) + -+ RGXGetFWImageSectionMaxSize(NULL, MIPS_EXCEPTIONS_CODE) + -+ RGXGetFWImageSectionMaxSize(NULL, MIPS_BOOT_CODE) - -+ uiFWCodeLen; /* code actual size */ -+ -+ if (uiDummyLen > 0) -+ { -+ eError = DevmemFwAllocateExportable(psDeviceNode, -+ uiDummyLen, -+ OSGetPageSize(), -+ uiMemAllocFlags, -+ "FwExDummyPages", -+ &psDummyMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to allocate fw dummy mem (%u)", -+ eError)); -+ goto failDummyMemDescAlloc; -+ } -+ } -+ } -+#endif -+ -+ /* -+ * Set up Allocation for FW data section -+ */ -+ eError = RGXAllocateFWMemoryRegion(psDeviceNode, -+ uiFWDataLen, -+ RGX_FWCODEDATA_ALLOCFLAGS | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA), -+ "FwExDataRegion", -+ &psDevInfo->psRGXFWDataMemDesc); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to allocate fw data mem (%u)", -+ eError)); -+ goto failFWDataMemDescAlloc; -+ } -+ -+ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWDataMemDesc, -+ &psDevInfo->sFWDataDevVAddrBase); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to acquire devVAddr for fw data mem (%u)", -+ eError)); -+ goto failFWDataMemDescAqDevVirt; -+ } -+ -+ if (uiFWCorememCodeLen != 0) -+ { -+ /* -+ * Set up Allocation for FW coremem code section -+ */ -+ uiMemAllocFlags = (RGX_FWCODEDATA_ALLOCFLAGS | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE)) & -+ ~PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE; -+ eError = RGXAllocateFWMemoryRegion(psDeviceNode, -+ uiFWCorememCodeLen, -+ uiMemAllocFlags, -+ "FwExCorememCodeRegion", -+ &psDevInfo->psRGXFWCorememCodeMemDesc); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to allocate fw coremem code mem, size: %" IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)", -+ uiFWCorememCodeLen, uiMemAllocFlags, eError)); -+ goto failFWCorememCodeMemDescAlloc; -+ } -+ -+ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, -+ &psDevInfo->sFWCorememCodeDevVAddrBase); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to acquire devVAddr for fw coremem mem code (%u)", -+ eError)); -+ goto failFWCorememCodeMemDescAqDevVirt; -+ } -+ -+ eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememCodeFWAddr, -+ psDevInfo->psRGXFWCorememCodeMemDesc, -+ 0, RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", failFWCorememCodeMemDescFwAddr); -+ } -+ else -+ { -+ psDevInfo->sFWCorememCodeDevVAddrBase.uiAddr = 0; -+ psDevInfo->sFWCorememCodeFWAddr.ui32Addr = 0; -+ } -+ -+ if (uiFWCorememDataLen != 0) -+ { -+ /* -+ * Set up Allocation for FW coremem data section -+ */ -+ uiMemAllocFlags = RGX_FWCODEDATA_ALLOCFLAGS | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA); -+ eError = RGXAllocateFWMemoryRegion(psDeviceNode, -+ uiFWCorememDataLen, -+ uiMemAllocFlags, -+ "FwExCorememDataRegion", -+ &psDevInfo->psRGXFWIfCorememDataStoreMemDesc); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to allocate fw coremem data mem, " -+ "size: %" IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)", -+ uiFWCorememDataLen, -+ uiMemAllocFlags, -+ eError)); -+ goto failFWCorememDataMemDescAlloc; -+ } -+ -+ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, -+ &psDevInfo->sFWCorememDataStoreDevVAddrBase); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Failed to acquire devVAddr for fw coremem mem data (%u)", -+ eError)); -+ goto failFWCorememDataMemDescAqDevVirt; -+ } -+ -+ eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememDataStoreFWAddr, -+ psDevInfo->psRGXFWIfCorememDataStoreMemDesc, -+ 0, RFW_FWADDR_NOREF_FLAG); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", failFWCorememDataMemDescFwAddr); -+ } -+ else -+ { -+ psDevInfo->sFWCorememDataStoreDevVAddrBase.uiAddr = 0; -+ psDevInfo->sFWCorememDataStoreFWAddr.ui32Addr = 0; -+ } -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ /* Free Dummy Pages */ -+ if (psDummyMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDummyMemDesc); -+ } -+#endif -+ -+ return PVRSRV_OK; -+ -+failFWCorememDataMemDescFwAddr: -+failFWCorememDataMemDescAqDevVirt: -+ if (uiFWCorememDataLen != 0) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc); -+ psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; -+ } -+failFWCorememDataMemDescAlloc: -+failFWCorememCodeMemDescFwAddr: -+failFWCorememCodeMemDescAqDevVirt: -+ if (uiFWCorememCodeLen != 0) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc); -+ psDevInfo->psRGXFWCorememCodeMemDesc = NULL; -+ } -+failFWCorememCodeMemDescAlloc: -+failFWDataMemDescAqDevVirt: -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); -+ psDevInfo->psRGXFWDataMemDesc = NULL; -+failFWDataMemDescAlloc: -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (psDummyMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psDummyMemDesc); -+ } -+failDummyMemDescAlloc: -+#endif -+failFWCodeMemDescAqDevVirt: -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); -+ psDevInfo->psRGXFWCodeMemDesc = NULL; -+failFWCodeMemDescAlloc: -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && -+ (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32)) -+ { -+ RGXFreeTrampoline(psDeviceNode); -+ } -+failTrampolineMemDescAlloc: -+#endif -+ return eError; -+} -+ -+/* -+ AppHint parameter interface -+ */ -+static -+PVRSRV_ERROR RGXFWTraceQueryFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT32 *pui32Value) -+{ -+ PVRSRV_ERROR eResult; -+ -+ eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value); -+ *pui32Value &= RGXFWIF_LOG_TYPE_GROUP_MASK; -+ return eResult; -+} -+ -+static -+PVRSRV_ERROR RGXFWTraceQueryLogType(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT32 *pui32Value) -+{ -+ PVRSRV_ERROR eResult; -+ -+ eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value); -+ if (PVRSRV_OK == eResult) -+ { -+ if (*pui32Value & RGXFWIF_LOG_TYPE_TRACE) -+ { -+ *pui32Value = 0; /* Trace */ -+ } -+ else -+ { -+ *pui32Value = 1; /* TBI */ -+ } -+ } -+ return eResult; -+} -+ -+static -+PVRSRV_ERROR RGXFWTraceSetFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT32 ui32Value) -+{ -+ PVRSRV_ERROR eResult; -+ IMG_UINT32 ui32RGXFWLogType; -+ -+ eResult = RGXFWTraceQueryLogType(psDeviceNode, NULL, &ui32RGXFWLogType); -+ if (PVRSRV_OK == eResult) -+ { -+ if (0 == ui32RGXFWLogType) -+ { -+ BITMASK_SET(ui32Value, RGXFWIF_LOG_TYPE_TRACE); -+ } -+ eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32Value); -+ } -+ return eResult; -+} -+ -+static -+PVRSRV_ERROR RGXFWTraceSetLogType(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT32 ui32Value) -+{ -+ PVRSRV_ERROR eResult; -+ IMG_UINT32 ui32RGXFWLogType = ui32Value; -+ -+ eResult = RGXFWTraceQueryFilter(psDeviceNode, NULL, &ui32RGXFWLogType); -+ if (PVRSRV_OK != eResult) -+ { -+ return eResult; -+ } -+ -+ /* 0 - trace, 1 - tbi */ -+ if (0 == ui32Value) -+ { -+ BITMASK_SET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE); -+ } -+#if defined(SUPPORT_TBI_INTERFACE) -+ else if (1 == ui32Value) -+ { -+ BITMASK_UNSET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE); -+ } -+#endif -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid parameter %u specified to set FW log type AppHint.", -+ __func__, ui32Value)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32RGXFWLogType); -+ return eResult; -+} -+ -+#if defined(DEBUG) -+static -+PVRSRV_ERROR RGXQueryFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_BOOL *pbValue) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; -+ -+ *pbValue = (PVRSRV_MEMALLOCFLAG_POISON_ON_FREE == psDevInfo->uiFWPoisonOnFreeFlag) -+ ? IMG_TRUE -+ : IMG_FALSE; -+ return PVRSRV_OK; -+} -+ -+static -+PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_BOOL bValue) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; -+ psDevInfo->uiFWPoisonOnFreeFlag = bValue -+ ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE -+ : 0ULL; -+ -+ return PVRSRV_OK; -+} -+#endif -+ -+/* -+ * RGXInitFirmware -+ */ -+PVRSRV_ERROR -+RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bEnableSignatureChecks, -+ IMG_UINT32 ui32SignatureChecksBufSize, -+ IMG_UINT32 ui32HWPerfFWBufSizeKB, -+ IMG_UINT64 ui64HWPerfFilter, -+ IMG_UINT32 ui32ConfigFlags, -+ IMG_UINT32 ui32LogType, -+ IMG_UINT32 ui32FilterFlags, -+ IMG_UINT32 ui32JonesDisableMask, -+ IMG_UINT32 ui32HWRDebugDumpLimit, -+ IMG_UINT32 ui32HWPerfCountersDataSize, -+ IMG_UINT32 *pui32TPUTrilinearFracMask, -+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, -+ FW_PERF_CONF eFirmwarePerf, -+ IMG_UINT32 ui32KCCBSizeLog2, -+ IMG_UINT32 ui32ConfigFlagsExt, -+ IMG_UINT32 ui32FwOsCfgFlags) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+#if defined(DEBUG) -+ void *pvAppHintState = NULL; -+ IMG_BOOL bAppHintDefault; -+ IMG_BOOL bEnableFWPoisonOnFree = IMG_FALSE; -+#endif -+ -+ eError = RGXSetupFirmware(psDeviceNode, -+ bEnableSignatureChecks, -+ ui32SignatureChecksBufSize, -+ ui32HWPerfFWBufSizeKB, -+ ui64HWPerfFilter, -+ ui32ConfigFlags, -+ ui32ConfigFlagsExt, -+ ui32FwOsCfgFlags, -+ ui32LogType, -+ ui32FilterFlags, -+ ui32JonesDisableMask, -+ ui32HWRDebugDumpLimit, -+ ui32HWPerfCountersDataSize, -+ pui32TPUTrilinearFracMask, -+ eRGXRDPowerIslandingConf, -+ eFirmwarePerf, -+ ui32KCCBSizeLog2); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRGXInitFirmwareKM: RGXSetupFirmware failed (%u)", -+ eError)); -+ goto failed_init_firmware; -+ } -+ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup, -+ RGXFWTraceQueryFilter, -+ RGXFWTraceSetFilter, -+ psDeviceNode, -+ NULL); -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FirmwareLogType, -+ RGXFWTraceQueryLogType, -+ RGXFWTraceSetLogType, -+ psDeviceNode, -+ NULL); -+ } -+ -+#if defined(DEBUG) -+ OSCreateAppHintState(&pvAppHintState); -+ -+ bAppHintDefault = PVRSRV_APPHINT_ENABLEFWPOISONONFREE; -+ OSGetAppHintBOOL(psDeviceNode, -+ pvAppHintState, -+ EnableFWPoisonOnFree, -+ &bAppHintDefault, -+ &bEnableFWPoisonOnFree); -+ -+ OSFreeAppHintState(pvAppHintState); -+ -+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree, -+ RGXQueryFWPoisonOnFree, -+ RGXSetFWPoisonOnFree, -+ psDeviceNode, -+ NULL); -+ -+ psDevInfo->uiFWPoisonOnFreeFlag = bEnableFWPoisonOnFree -+ ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE -+ : 0ULL; -+#else -+ psDevInfo->uiFWPoisonOnFreeFlag = 0ULL; -+#endif -+ -+ psDevInfo->ui32ClockSource = PVRSRV_APPHINT_TIMECORRCLOCK; -+ psDevInfo->ui32LastClockSource = PVRSRV_APPHINT_TIMECORRCLOCK; -+ -+ return PVRSRV_OK; -+ -+failed_init_firmware: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+/* See device.h for function declaration */ -+static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEM_MEMDESC **psMemDesc, -+ IMG_UINT32 *puiSyncPrimVAddr, -+ IMG_UINT32 *puiSyncPrimBlockSize) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ PVRSRV_ERROR eError; -+ RGXFWIF_DEV_VIRTADDR pFirmwareAddr; -+ IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32); -+ IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32); -+ IMG_UINT32 ui32CoherencyFlag = 0; -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ -+ /* Size and align are 'expanded' because we request an Exportalign allocation */ -+ eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), -+ &uiUFOBlockSize, -+ &ui32UFOBlockAlign); -+ -+ if (eError != PVRSRV_OK) -+ { -+ goto e0; -+ } -+ -+ if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && -+ PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig)) -+ { -+ ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_CACHE_COHERENT; -+ } -+ else -+ { -+ ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_UNCACHED; -+ } -+ -+ eError = DevmemFwAllocateExportable(psDeviceNode, -+ uiUFOBlockSize, -+ ui32UFOBlockAlign, -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ ui32CoherencyFlag, -+ "FwExUFOBlock", -+ psMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ goto e0; -+ } -+ -+ eError = RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE); -+ PVR_GOTO_IF_ERROR(eError, e1); -+ -+ *puiSyncPrimVAddr = pFirmwareAddr.ui32Addr; -+ *puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize); -+ -+ return PVRSRV_OK; -+ -+e1: -+ DevmemFwUnmapAndFree(psDevInfo, *psMemDesc); -+e0: -+ return eError; -+} -+ -+/* See device.h for function declaration */ -+static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEM_MEMDESC *psMemDesc) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ /* -+ If the system has snooping of the device cache then the UFO block -+ might be in the cache so we need to flush it out before freeing -+ the memory -+ -+ When the device is being shutdown/destroyed we don't care anymore. -+ Several necessary data structures to issue a flush were destroyed -+ already. -+ */ -+ if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && -+ psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT && -+ psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT_POWERED_OFF) -+ { -+ RGXFWIF_KCCB_CMD sFlushInvalCmd; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32kCCBCommandSlot; -+ -+ /* Schedule the SLC flush command ... */ -+#if defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "Submit SLC flush and invalidate"); -+#endif -+ sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; -+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE; -+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; -+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0; -+ -+ eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, -+ &sFlushInvalCmd, -+ PDUMP_FLAGS_CONTINUOUS, -+ &ui32kCCBCommandSlot); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to schedule SLC flush command with error (%u)", -+ __func__, -+ eError)); -+ } -+ else -+ { -+ /* Wait for the SLC flush to complete */ -+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: SLC flush and invalidate aborted with error (%u)", -+ __func__, -+ eError)); -+ } -+ else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & -+ RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); -+ } -+ } -+ } -+ -+ RGXUnsetFirmwareAddress(psMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psMemDesc); -+} -+ -+static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; -+ -+ psDevInfo->bDevInit2Done = IMG_FALSE; -+ -+#if defined(SUPPORT_SECURE_ALLOC_KM) -+ if (psDevInfo->psGenHeapSecMem != NULL) -+ { -+ OSFreeSecBuf(psDevInfo->psGenHeapSecMem); -+ } -+#endif -+ -+#if defined(RGX_FEATURE_COMPUTE_ONLY_BIT_MASK) -+ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE_ONLY)) -+#endif -+ { -+ if ((psDevInfo->hTQUSCSharedMem != NULL) && -+ (psDevInfo->hTQCLISharedMem != NULL)) -+ { -+ PVRSRVTQUnloadShaders(psDeviceNode); -+ } -+ } -+ -+#if !defined(NO_HARDWARE) -+ if (psDevInfo->pvLISRData != NULL) -+ { -+ (void) SysUninstallDeviceLISR(psDevInfo->pvLISRData); -+ } -+ if (psDevInfo->pvMISRData != NULL) -+ { -+ (void) OSUninstallMISR(psDevInfo->pvMISRData); -+ } -+ if (psDevInfo->hProcessQueuesMISR != NULL) -+ { -+ (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR); -+ } -+ if (psDevInfo->pvAPMISRData != NULL) -+ { -+ (void) OSUninstallMISR(psDevInfo->pvAPMISRData); -+ } -+ if (psDeviceNode->hCmdCompNotify != NULL) -+ { -+ /* Cancel notifications to this device */ -+ PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify); -+ psDeviceNode->hCmdCompNotify = NULL; -+ } -+#endif /* !NO_HARDWARE */ -+ -+ /* Remove the device from the power manager */ -+ PVRSRVRemovePowerDevice(psDeviceNode); -+ -+ psDevInfo->pfnGetGpuUtilStats = NULL; -+ if (psDevInfo->hGPUUtilLock != NULL) -+ { -+ OSLockDestroy(psDevInfo->hGPUUtilLock); -+ } -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && -+ (psDevInfo->hNMILock != NULL)) -+ { -+ OSLockDestroy(psDevInfo->hNMILock); -+ } -+#endif -+ -+ if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && -+ (psDevInfo->hMMUCtxUnregLock != NULL)) -+ { -+ OSLockDestroy(psDevInfo->hMMUCtxUnregLock); -+ } -+ -+ if (psDevInfo->hDebugFaultInfoLock != NULL) -+ { -+ OSLockDestroy(psDevInfo->hDebugFaultInfoLock); -+ } -+ -+ /* De-init Freelists/ZBuffers... */ -+ if (psDevInfo->hLockFreeList != NULL) -+ { -+ OSLockDestroy(psDevInfo->hLockFreeList); -+ } -+ -+ if (psDevInfo->hLockZSBuffer != NULL) -+ { -+ OSLockDestroy(psDevInfo->hLockZSBuffer); -+ } -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* De-init work estimation lock */ -+ if (psDevInfo->hWorkEstLock != NULL) -+ { -+ OSLockDestroy(psDevInfo->hWorkEstLock); -+ } -+ } -+#endif -+ -+ /* Free DVFS Table */ -+ if (psDevInfo->psGpuDVFSTable != NULL) -+ { -+ OSFreeMem(psDevInfo->psGpuDVFSTable); -+ psDevInfo->psGpuDVFSTable = NULL; -+ } -+} -+ -+/* -+ DevDeInitRGX -+ */ -+PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError; -+ DEVICE_MEMORY_INFO *psDevMemoryInfo; -+ -+ if (!psDevInfo) -+ { -+ /* Can happen if DevInitRGX failed */ -+ PVR_DPF((PVR_DBG_ERROR, "DevDeInitRGX: Null DevInfo")); -+ return PVRSRV_OK; -+ } -+ -+ if (psDevInfo->psRGXFWIfOsInit) -+ { -+ KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); -+ KM_CONNECTION_CACHEOP(Os, FLUSH); -+ } -+ -+ DeviceDepBridgeDeInit(psDevInfo); -+ -+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) -+ OSLockDestroy(psDevInfo->hCounterDumpingLock); -+#endif -+ -+ RGXDeInitMultiCoreInfo(psDeviceNode); -+ -+ /* Unregister debug request notifiers first as they could depend on anything. */ -+ -+ RGXDebugDeinit(psDevInfo); -+ -+ /* De-initialise in reverse order, so stage 2 init is undone first. */ -+ if (psDevInfo->bDevInit2Done) -+ { -+ DevPart2DeInitRGX(psDeviceNode); -+ } -+ -+ /* Unregister MMU related stuff */ -+ eError = RGXMMUInit_Unregister(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevDeInitRGX: Failed RGXMMUInit_Unregister (0x%x)", -+ eError)); -+ } -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ /* Unregister MMU related stuff */ -+ eError = RGXMipsMMUInit_Unregister(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevDeInitRGX: Failed RGXMipsMMUInit_Unregister (0x%x)", -+ eError)); -+ } -+ } -+#endif -+ -+ /* UnMap Regs */ -+ if (psDevInfo->pvRegsBaseKM != NULL) -+ { -+#if !defined(NO_HARDWARE) -+ OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, -+ psDevInfo->ui32RegSize); -+#endif /* !NO_HARDWARE */ -+ psDevInfo->pvRegsBaseKM = NULL; -+ } -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ if (psDevInfo->pvSecureRegsBaseKM != NULL) -+ { -+#if !defined(NO_HARDWARE) -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, HOST_SECURITY_VERSION) && -+ (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)) -+ { -+ /* undo the VA offset performed in RGXRegisterDevice() to allow the allocation to be unmapped */ -+ psDevInfo->pvSecureRegsBaseKM = (void __iomem *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM + RGX_HOST_SECURE_REGBANK_OFFSET); -+ OSUnMapPhysToLin((void __force *) psDevInfo->pvSecureRegsBaseKM, RGX_HOST_SECURE_REGBANK_SIZE); -+ } -+#endif /* !NO_HARDWARE */ -+ psDevInfo->pvSecureRegsBaseKM = NULL; -+ } -+#endif -+ -+#if 0 /* not required at this time */ -+ if (psDevInfo->hTimer) -+ { -+ eError = OSRemoveTimer(psDevInfo->hTimer); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevDeInitRGX: Failed to remove timer")); -+ return eError; -+ } -+ psDevInfo->hTimer = NULL; -+ } -+#endif -+ -+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; -+ -+ RGXDeInitHeaps(psDevMemoryInfo); -+ -+ if (psDevInfo->psRGXFWCodeMemDesc) -+ { -+ /* Free fw code */ -+ PDUMPCOMMENT(psDeviceNode, "Freeing FW code memory"); -+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); -+ psDevInfo->psRGXFWCodeMemDesc = NULL; -+ } -+#if !defined(NO_HARDWARE) -+ else if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "No firmware code memory to free")); -+ } -+#endif /* !defined(NO_HARDWARE) */ -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && -+ (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32)) -+ { -+ if (psDevInfo->psTrampoline->sPages.u.pvHandle) -+ { -+ /* Free trampoline region */ -+ PDUMPCOMMENT(psDeviceNode, "Freeing trampoline memory"); -+ RGXFreeTrampoline(psDeviceNode); -+ } -+ } -+#endif -+ -+ if (psDevInfo->psRGXFWDataMemDesc) -+ { -+ /* Free fw data */ -+ PDUMPCOMMENT(psDeviceNode, "Freeing FW data memory"); -+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); -+ psDevInfo->psRGXFWDataMemDesc = NULL; -+ } -+#if !defined(NO_HARDWARE) -+ else if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "No firmware data memory to free")); -+ } -+#endif /* !defined(NO_HARDWARE) */ -+ -+ if (psDevInfo->psRGXFWCorememCodeMemDesc) -+ { -+ /* Free fw core mem code */ -+ PDUMPCOMMENT(psDeviceNode, "Freeing FW coremem code memory"); -+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc); -+ psDevInfo->psRGXFWCorememCodeMemDesc = NULL; -+ } -+ -+ if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) -+ { -+ /* Free fw core mem data */ -+ PDUMPCOMMENT(psDeviceNode, "Freeing FW coremem data store memory"); -+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc); -+ psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; -+ } -+ -+ /* -+ Free the firmware allocations. -+ */ -+ RGXFreeFirmware(psDevInfo); -+ -+ /* De-initialise non-device specific (TL) users of RGX device memory */ -+ { -+ IMG_UINT32 i; -+ for (i = 0; i < RGX_HWPERF_L2_STREAM_LAST; i++) -+ { -+ RGXHWPerfDeinitL2Stream(psDevInfo, i); -+ } -+ -+ RGXHWPerfDeinit(psDevInfo); -+ } -+ -+ RGXHWPerfHostDeInit(psDevInfo); -+ eError = HTBDeInit(); -+ PVR_LOG_IF_ERROR(eError, "HTBDeInit"); -+ -+ RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode); -+ -+ /* destroy the stalled CCB locks */ -+ OSLockDestroy(psDevInfo->hCCBRecoveryLock); -+ OSLockDestroy(psDevInfo->hCCBStallCheckLock); -+ -+ /* destroy the context list locks */ -+ OSLockDestroy(psDevInfo->sRegCongfig.hLock); -+ OSLockDestroy(psDevInfo->hBPLock); -+ OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock); -+ OSWRLockDestroy(psDevInfo->hRenderCtxListLock); -+ OSWRLockDestroy(psDevInfo->hComputeCtxListLock); -+ OSWRLockDestroy(psDevInfo->hTransferCtxListLock); -+ OSWRLockDestroy(psDevInfo->hTDMCtxListLock); -+ OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock); -+ OSWRLockDestroy(psDevInfo->hMemoryCtxListLock); -+ OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList); -+ OSWRLockDestroy(psDevInfo->hCommonCtxtListLock); -+ -+ /* Free device BVNC string */ -+ if (NULL != psDevInfo->sDevFeatureCfg.pszBVNCString) -+ { -+ OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString); -+ } -+ -+ /* DeAllocate devinfo */ -+ OSFreeMem(psDevInfo); -+ -+ psDeviceNode->pvDevice = NULL; -+ -+ return PVRSRV_OK; -+} -+ -+#if defined(PDUMP) -+static -+PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice); -+ -+ psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE; -+ -+ return PVRSRV_OK; -+} -+#endif /* PDUMP */ -+ -+/* Takes a log2 page size parameter and calculates a suitable page size -+ * for the RGX heaps. Returns 0 if parameter is wrong.*/ -+static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) -+{ -+ IMG_BOOL bFound = IMG_FALSE; -+ -+ /* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT, -+ * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/ -+ if (uiLog2PageSize == 0U || -+ (uiLog2PageSize < RGX_HEAP_4KB_PAGE_SHIFT) || -+ (uiLog2PageSize > RGX_HEAP_2MB_PAGE_SHIFT)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Provided incompatible log2 page size %u", -+ __func__, -+ uiLog2PageSize)); -+ PVR_ASSERT(0); -+ return 0; -+ } -+ -+ do -+ { -+ switch (uiLog2PageSize) -+ { -+ case RGX_HEAP_4KB_PAGE_SHIFT: -+ case RGX_HEAP_16KB_PAGE_SHIFT: -+ case RGX_HEAP_64KB_PAGE_SHIFT: -+ case RGX_HEAP_256KB_PAGE_SHIFT: -+ case RGX_HEAP_1MB_PAGE_SHIFT: -+ case RGX_HEAP_2MB_PAGE_SHIFT: -+ /* All good, RGX page size equals given page size -+ * => use it as default for heaps */ -+ bFound = IMG_TRUE; -+ break; -+ default: -+ /* We have to fall back to a smaller device -+ * page size than given page size because there -+ * is no exact match for any supported size. */ -+ uiLog2PageSize -= 1U; -+ break; -+ } -+ } while (!bFound); -+ -+ return uiLog2PageSize; -+} -+ -+/* First 16-bits define possible types */ -+#define HEAP_INST_VALUE_MASK (0xFFFF) -+#define HEAP_INST_DEFAULT_VALUE (1U) /* Used to show either the heap is always instantiated by default (pfn = NULL) -+ OR -+ that this is the default configuration of the heap with an Alternative BRN */ -+#define HEAP_INST_BRN_DEP_VALUE (2U) /* The inclusion of this heap is dependent on the brn being present */ -+#define HEAP_INST_FEAT_DEP_VALUE (3U) /* The inclusion of this heap is dependent on the feature being present */ -+#define HEAP_INST_BRN_ALT_VALUE (4U) /* This entry is a possible alternative to the default determined by a BRN */ -+#define HEAP_INST_FEAT_ALT_VALUE (5U) /* The entry is a possible alternative to the default determined by a Feature define */ -+ -+/* Latter 16-bits define other flags we may need */ -+#define HEAP_INST_NON4K_FLAG (1 << 16U) /* This is a possible NON4K Entry and we should use the device -+ NON4K size when instantiating */ -+ -+typedef struct RGX_HEAP_INFO_TAG RGX_HEAP_INFO; // Forward declaration -+typedef IMG_BOOL (*PFN_IS_PRESENT)(PVRSRV_RGXDEV_INFO*, const RGX_HEAP_INFO*); -+ -+struct RGX_HEAP_INFO_TAG -+{ -+ IMG_CHAR *pszName; -+ IMG_UINT64 ui64HeapBase; -+ IMG_DEVMEM_SIZE_T uiHeapLength; -+ IMG_DEVMEM_SIZE_T uiHeapReservedRegionLength; -+ IMG_UINT32 ui32Log2ImportAlignment; -+ PFN_IS_PRESENT pfnIsHeapPresent; -+ PFN_HEAP_INIT pfnInit; -+ PFN_HEAP_DEINIT pfnDeInit; -+ IMG_UINT32 ui32HeapInstanceFlags; -+}; -+ -+/* RGX_GENERAL_HEAP_RESERVED_TOTAL_BYTES is the total amount of reserved space, to be specified in gasRGXHeapLayoutApp[] */ -+#define RGX_GENERAL_HEAP_RESERVED_TOTAL_BYTES (RGX_HEAP_UM_GENERAL_RESERVED_SIZE + RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE) -+ -+#if defined(SUPPORT_SECURE_ALLOC_KM) -+/* Private data struct for general heap. */ -+typedef struct RGX_GENERAL_HEAP_DATA_TAG -+{ -+ DEVMEMINT_RESERVATION *psSecMemReservation; -+ DEVMEMINT_MAPPING *psSecMemMapping; -+} RGX_GENERAL_HEAP_DATA; -+ -+/* Init callback function for general heap. */ -+static PVRSRV_ERROR GeneralHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_HEAP *psDevmemHeap, -+ IMG_HANDLE *phPrivData) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ RGX_GENERAL_HEAP_DATA *psHeapData; -+ IMG_DEV_VIRTADDR sCarveOutAddr; -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemHeap, "psDevmemHeap"); -+ PVR_LOG_RETURN_IF_INVALID_PARAM(phPrivData, "phPrivData"); -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ -+ psHeapData = OSAllocMem(sizeof(*psHeapData)); -+ PVR_LOG_RETURN_IF_NOMEM(psHeapData, "psHeapData"); -+ -+ /* Map the per device secure mem PMR allocation to the general devmem heap carveout. */ -+ sCarveOutAddr = DevmemIntHeapGetBaseAddr(psDevmemHeap); -+ sCarveOutAddr.uiAddr += RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET; -+ -+ eError = DevmemIntReserveRange(psDevmemHeap, -+ sCarveOutAddr, -+ RGXFWIF_KM_GENERAL_HEAP_RESERVED_SIZE, -+ &psHeapData->psSecMemReservation); -+ PVR_GOTO_IF_ERROR(eError, ErrorFreeHeapData); -+ -+ eError = DevmemIntMapPMR(psDevmemHeap, psHeapData->psSecMemReservation, psDevInfo->psGenHeapSecMem, -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE -+ | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE, -+ &psHeapData->psSecMemMapping); -+ PVR_GOTO_IF_ERROR(eError, ErrorUnreserve); -+ -+ *phPrivData = (IMG_HANDLE)psHeapData; -+ -+ return PVRSRV_OK; -+ -+ErrorUnreserve: -+ DevmemIntUnreserveRange(psHeapData->psSecMemReservation); -+ErrorFreeHeapData: -+ OSFreeMem(psHeapData); -+ -+ return eError; -+} -+ -+/* Deinit callback function for general heap. */ -+static void GeneralHeapDeInit(IMG_HANDLE hPrivData) -+{ -+ RGX_GENERAL_HEAP_DATA *psHeapData = (RGX_GENERAL_HEAP_DATA*)hPrivData; -+ -+ PVR_ASSERT(hPrivData); -+ -+ DevmemIntUnmapPMR(psHeapData->psSecMemMapping); -+ DevmemIntUnreserveRange(psHeapData->psSecMemReservation); -+ -+ OSFreeMem(psHeapData); -+} -+#else -+/* Callbacks not used */ -+#define GeneralHeapInit NULL -+#define GeneralHeapDeInit NULL -+#endif -+ -+/* Feature Present function prototypes */ -+ -+static IMG_BOOL BRN65273IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) -+{ -+#if defined(FIX_HW_BRN_65273_BIT_MASK) -+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) -+ { -+ return (((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_BRN_ALT_VALUE) || -+ ((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_BRN_DEP_VALUE)) ? -+ IMG_TRUE : IMG_FALSE; -+ } -+ else -+#else -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+#endif -+ { -+ return ((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_DEFAULT_VALUE) ? IMG_TRUE : IMG_FALSE; -+ } -+} -+ -+static IMG_BOOL BRN63142IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) -+{ -+ PVR_UNREFERENCED_PARAMETER(pksHeapInfo); -+ -+#if defined(FIX_HW_BRN_63142_BIT_MASK) -+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 63142)) -+ { -+ PVR_ASSERT((pksHeapInfo->ui64HeapBase & IMG_UINT64_C(0x3FFFFFFFF)) + -+ pksHeapInfo->uiHeapLength == IMG_UINT64_C(0x400000000)); -+ -+ return IMG_TRUE; -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+#endif -+ -+ return IMG_FALSE; -+} -+ -+static IMG_BOOL FBCDescriptorIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) -+{ -+ PVR_UNREFERENCED_PARAMETER(pksHeapInfo); -+ -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, FBC_MAX_DEFAULT_DESCRIPTORS)) -+ { -+ return IMG_TRUE; -+ } -+ -+ return IMG_FALSE; -+} -+ -+static IMG_BOOL FBCLargeDescriptorIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) -+{ -+ PVR_UNREFERENCED_PARAMETER(pksHeapInfo); -+ -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, FBC_MAX_LARGE_DESCRIPTORS)) -+ { -+ return IMG_TRUE; -+ } -+ -+ return IMG_FALSE; -+} -+ -+static IMG_BOOL TextureStateIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) -+{ -+ PVR_UNREFERENCED_PARAMETER(pksHeapInfo); -+#if defined(RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, BINDLESS_IMAGE_AND_TEXTURE_STATE)) -+ { -+ return IMG_TRUE; -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(psDevInfo); -+#endif -+ return IMG_FALSE; -+} -+ -+static IMG_BOOL SignalSnoopingIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) -+{ -+ PVR_UNREFERENCED_PARAMETER(pksHeapInfo); -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIGNAL_SNOOPING)) -+ { -+ return IMG_TRUE; -+ } -+ -+ return IMG_FALSE; -+} -+ -+static IMG_BOOL FWBRN65101IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) -+{ -+ /* Used to determine the correct table row to instantiate as a heap by checking -+ * the Heap size and base at run time VS the current table instance -+ */ -+ IMG_UINT64 ui64MainSubHeapSize; -+ -+ /* MIPS Firmware must reserve some space in its Host/Native heap for GPU memory mappings */ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST))) -+ { -+#if defined(FIX_HW_BRN_65101_BIT_MASK) -+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101)) -+ { -+ ui64MainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101; -+ } -+ else -+#endif -+ { -+ ui64MainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL; -+ } -+ } -+ else -+ { -+ ui64MainSubHeapSize = RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE; -+ } -+ -+ /* Determine if we should include this entry based upon previous checks */ -+ return (pksHeapInfo->uiHeapLength == ui64MainSubHeapSize && -+ pksHeapInfo->ui64HeapBase == RGX_FIRMWARE_MAIN_HEAP_BASE) ? -+ IMG_TRUE : IMG_FALSE; -+} -+ -+static IMG_BOOL FWVZConfigPresent(PVRSRV_RGXDEV_INFO* psDevInfo, const RGX_HEAP_INFO* pksHeapInfo) -+{ -+ /* Used to determine the correct table row to instantiate as a heap by checking -+ * the Heap base at run time VS the current table instance -+ */ -+ -+ /* Determine if we should include this entry based upon previous checks */ -+ return (pksHeapInfo->ui64HeapBase == RGX_FIRMWARE_CONFIG_HEAP_BASE) ? IMG_TRUE : IMG_FALSE; -+} -+ -+/* Blueprint array. note: not all heaps are available to clients*/ -+ -+static const RGX_HEAP_INFO gasRGXHeapLayoutApp[] = -+{ -+ /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnPresent pfnInit pfnDeInit HeapInstanceFlags */ -+ {RGX_GENERAL_SVM_HEAP_IDENT, RGX_GENERAL_SVM_HEAP_BASE, RGX_GENERAL_SVM_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, -+ {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_HEAP_BASE, RGX_GENERAL_HEAP_SIZE, RGX_GENERAL_HEAP_RESERVED_TOTAL_BYTES, 0, BRN65273IsPresent, GeneralHeapInit, GeneralHeapDeInit, HEAP_INST_DEFAULT_VALUE }, -+ {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_BRN_65273_HEAP_BASE, RGX_GENERAL_BRN_65273_HEAP_SIZE, RGX_GENERAL_HEAP_RESERVED_TOTAL_BYTES, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, -+ {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_HEAP_BASE, RGX_GENERAL_NON4K_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG }, -+ {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE, RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE | HEAP_INST_NON4K_FLAG }, -+ {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_HEAP_BASE, RGX_PDSCODEDATA_HEAP_SIZE, RGX_HEAP_PDS_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, -+ {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_BRN_65273_HEAP_BASE, RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE, RGX_HEAP_PDS_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, -+ {RGX_RGNHDR_BRN_63142_HEAP_IDENT, RGX_RGNHDR_BRN_63142_HEAP_BASE, RGX_RGNHDR_BRN_63142_HEAP_SIZE, 0, 0, BRN63142IsPresent, NULL, NULL, HEAP_INST_BRN_DEP_VALUE }, -+ {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_HEAP_BASE, RGX_USCCODE_HEAP_SIZE, RGX_HEAP_USC_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, -+ {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_BRN_65273_HEAP_BASE, RGX_USCCODE_BRN_65273_HEAP_SIZE, RGX_HEAP_USC_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, -+ {RGX_TQ3DPARAMETERS_HEAP_IDENT, RGX_TQ3DPARAMETERS_HEAP_BASE, RGX_TQ3DPARAMETERS_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, -+ {RGX_TQ3DPARAMETERS_HEAP_IDENT, RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE, RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, -+ {RGX_VK_CAPT_REPLAY_HEAP_IDENT, RGX_VK_CAPT_REPLAY_HEAP_BASE, RGX_VK_CAPT_REPLAY_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, -+ {RGX_SIGNALS_HEAP_IDENT, RGX_SIGNALS_HEAP_BASE, RGX_SIGNALS_HEAP_SIZE, 0, 0, SignalSnoopingIsPresent, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE}, -+ {RGX_FBCDC_HEAP_IDENT, RGX_FBCDC_HEAP_BASE, RGX_FBCDC_HEAP_SIZE, 0, 0, FBCDescriptorIsPresent, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE}, -+ {RGX_FBCDC_LARGE_HEAP_IDENT, RGX_FBCDC_LARGE_HEAP_BASE, RGX_FBCDC_LARGE_HEAP_SIZE, 0, 0, FBCLargeDescriptorIsPresent, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE}, -+ {RGX_CMP_MISSION_RMW_HEAP_IDENT, RGX_CMP_MISSION_RMW_HEAP_BASE, RGX_CMP_MISSION_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, -+ {RGX_CMP_SAFETY_RMW_HEAP_IDENT, RGX_CMP_SAFETY_RMW_HEAP_BASE, RGX_CMP_SAFETY_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, -+ {RGX_TEXTURE_STATE_HEAP_IDENT, RGX_TEXTURE_STATE_HEAP_BASE, RGX_TEXTURE_STATE_HEAP_SIZE, 0, 0, TextureStateIsPresent, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE}, -+ {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_HEAP_BASE, RGX_VISIBILITY_TEST_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE }, -+ {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_BRN_65273_HEAP_BASE, RGX_VISIBILITY_TEST_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE }, -+ {RGX_MMU_INIA_BRN_65273_HEAP_IDENT, RGX_MMU_INIA_BRN_65273_HEAP_BASE, RGX_MMU_INIA_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_DEP_VALUE }, -+ {RGX_MMU_INIB_BRN_65273_HEAP_IDENT, RGX_MMU_INIB_BRN_65273_HEAP_BASE, RGX_MMU_INIB_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_DEP_VALUE } -+}; -+ -+static const RGX_HEAP_INFO gasRGXHeapLayoutFW[] = -+{ -+ /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent pfnInit pfnDeInit HeapInstanceFlags*/ -+ {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE, 0, 0, FWBRN65101IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, -+ {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL, 0, 0, FWBRN65101IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, -+ {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101, 0, 0, FWBRN65101IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE}, -+ {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_CONFIG_HEAP_BASE, RGX_FIRMWARE_CONFIG_HEAP_SIZE, 0, 0, FWVZConfigPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE}, -+}; -+ -+/* Generic counting method. */ -+static void _CountRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, -+ const RGX_HEAP_INFO pksHeapInfo[], -+ IMG_UINT32 ui32HeapListSize, -+ IMG_UINT32* ui32HeapCount) -+{ -+ IMG_UINT32 i; -+ -+ /* Loop over rows in the heap data array using callback to decide if we -+ * should include the heap -+ */ -+ for (i = 0; i < ui32HeapListSize; i++) -+ { -+ const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i]; -+ -+ if (psHeapInfo->pfnIsHeapPresent) -+ { -+ if (!psHeapInfo->pfnIsHeapPresent(psDevInfo, psHeapInfo)) -+ { -+ /* We don't need to create this heap */ -+ continue; -+ } -+ } -+ -+ (*ui32HeapCount)++; -+ } -+} -+/* Generic heap instantiator */ -+static void _InstantiateRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, -+ const RGX_HEAP_INFO pksHeapInfo[], -+ IMG_UINT32 ui32HeapListSize, -+ const IMG_UINT32 ui32Log2RgxDefaultPageShift, -+ DEVMEM_HEAP_BLUEPRINT **psDeviceMemoryHeapCursor) -+{ -+ IMG_UINT32 i; -+ /* We now have a list of the heaps to include and so we should loop over this -+ * list and instantiate. -+ */ -+ for (i = 0; i < ui32HeapListSize; i++) -+ { -+ IMG_UINT32 ui32Log2DataPageSize = 0; -+ const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i]; -+ -+ if (psHeapInfo->pfnIsHeapPresent) -+ { -+ if (!psHeapInfo->pfnIsHeapPresent(psDevInfo, psHeapInfo)) -+ { -+ /* We don't need to create this heap */ -+ continue; -+ } -+ } -+ -+ if (psHeapInfo->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG) -+ { -+ ui32Log2DataPageSize = psDevInfo->psDeviceNode->ui32Non4KPageSizeLog2; -+ } -+ else -+ { -+ ui32Log2DataPageSize = ui32Log2RgxDefaultPageShift; -+ } -+ -+ HeapCfgBlueprintInit(psHeapInfo->pszName, -+ psHeapInfo->ui64HeapBase, -+ psHeapInfo->uiHeapLength, -+ psHeapInfo->uiHeapReservedRegionLength, -+ ui32Log2DataPageSize, -+ psHeapInfo->ui32Log2ImportAlignment, -+ psHeapInfo->pfnInit, -+ psHeapInfo->pfnDeInit, -+ *psDeviceMemoryHeapCursor); -+ -+ (*psDeviceMemoryHeapCursor)++; -+ } -+} -+ -+static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DEVICE_MEMORY_INFO *psNewMemoryInfo) -+{ -+ PVRSRV_ERROR eError; -+ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor; -+ -+ IMG_UINT32 ui32AppHeapListSize = ARRAY_SIZE(gasRGXHeapLayoutApp); -+ IMG_UINT32 ui32FWHeapListSize = ARRAY_SIZE(gasRGXHeapLayoutFW); -+ IMG_UINT32 ui32CountedHeapSize; -+ -+ IMG_UINT32 ui32AppHeapCount = 0; -+ IMG_UINT32 ui32FWHeapCount = 0; -+ -+ IMG_UINT32 ui32Log2DefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift()); -+ -+ if (ui32Log2DefaultPageShift == 0) -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e0; -+ } -+ -+#if defined(FIX_HW_BRN_71317_BIT_MASK) -+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71317)) -+ { -+ if (ui32Log2DefaultPageShift == RGX_HEAP_2MB_PAGE_SHIFT -+ || ui32Log2DefaultPageShift == RGX_HEAP_1MB_PAGE_SHIFT) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OS page size too large for device virtual heaps. " -+ "Maximum page size supported is 256KB when BRN71317 is present.")); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e0; -+ } -+ } -+#endif -+ -+ /* Count heaps required for the app heaps */ -+ _CountRequiredHeaps(psDevInfo, -+ gasRGXHeapLayoutApp, -+ ui32AppHeapListSize, -+ &ui32AppHeapCount); -+ -+ /* Count heaps required for the FW heaps */ -+ _CountRequiredHeaps(psDevInfo, -+ gasRGXHeapLayoutFW, -+ ui32FWHeapListSize, -+ &ui32FWHeapCount); -+ -+ ui32CountedHeapSize = (ui32AppHeapCount + ui32FWHeapCount + RGX_NUM_DRIVERS_SUPPORTED); -+ -+ psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * ui32CountedHeapSize); -+ PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeap, eError, e0); -+ -+ /* Initialise the heaps */ -+ psDeviceMemoryHeapCursor = psNewMemoryInfo->psDeviceMemoryHeap; -+ -+ /* Instantiate App Heaps */ -+ _InstantiateRequiredHeaps(psDevInfo, -+ gasRGXHeapLayoutApp, -+ ui32AppHeapListSize, -+ ui32Log2DefaultPageShift, -+ &psDeviceMemoryHeapCursor); -+ -+ /* Instantiate FW Heaps */ -+ _InstantiateRequiredHeaps(psDevInfo, -+ gasRGXHeapLayoutFW, -+ ui32FWHeapListSize, -+ ui32Log2DefaultPageShift, -+ &psDeviceMemoryHeapCursor); -+ -+ /* set the heap count */ -+ psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap); -+ -+ /* Check we have allocated the correct # of heaps, minus any VZ heaps as these -+ * have not been created at this point -+ */ -+ PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_DRIVERS_SUPPORTED)); -+ -+ /* -+ In the new heap setup, we initialise 2 configurations: -+ 1 - One will be for the firmware only (index 1 in array) -+ a. This primarily has the firmware heap in it. -+ b. It also has additional guest OSID firmware heap(s) -+ - Only if the number of support firmware OSID > 1 -+ 2 - Others shall be for clients only (index 0 in array) -+ a. This has all the other client heaps in it. -+ */ -+ psNewMemoryInfo->uiNumHeapConfigs = 2; -+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray = OSAllocMem(sizeof(DEVMEM_HEAP_CONFIG) * psNewMemoryInfo->uiNumHeapConfigs); -+ PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeapConfigArray, eError, e1); -+ -+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].pszName = "Default Heap Configuration"; -+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].uiNumHeaps = psNewMemoryInfo->ui32HeapCount - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; -+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].psHeapBlueprintArray = psNewMemoryInfo->psDeviceMemoryHeap; -+ -+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].pszName = "Firmware Heap Configuration"; -+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; -+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; -+ -+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ IMG_UINT32 ui32DriverID; -+ -+ /* Create additional raw firmware heaps */ -+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID) -+ { -+ eError = RGXInitFwRawHeap(psDevInfo, psDeviceMemoryHeapCursor, ui32DriverID); -+ if (eError != PVRSRV_OK) -+ { -+ /* if any allocation fails, free previously allocated heaps and abandon initialisation */ -+ for (; ui32DriverID > RGX_FIRST_RAW_HEAP_DRIVER_ID; ui32DriverID--) -+ { -+ RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); -+ psDeviceMemoryHeapCursor--; -+ } -+ goto e1; -+ } -+ -+ /* Append additional firmware heaps to host driver firmware context heap configuration */ -+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps += 1; -+ -+ /* advance to the next heap */ -+ psDeviceMemoryHeapCursor++; -+ } -+ } -+#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -+ -+ return PVRSRV_OK; -+e1: -+ OSFreeMem(psNewMemoryInfo->psDeviceMemoryHeap); -+e0: -+ return eError; -+} -+ -+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo) -+{ -+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ IMG_UINT32 ui32DriverID; -+ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap; -+ -+ /* Delete all guest firmware heaps */ -+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID) -+ { -+ RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); -+ psDeviceMemoryHeapCursor++; -+ } -+ } -+#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -+ -+ OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray); -+ OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap); -+} -+ -+static PVRSRV_ERROR RGXInitSharedFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ PHYS_HEAP_CONFIG *psSysHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, -+ PHYS_HEAP_USAGE_FW_SHARED); -+ -+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) -+ /* VZ heap validation */ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ PVR_LOG_RETURN_IF_FALSE(psSysHeapCfg != NULL, -+ "FW Main heap is required for VZ Guest.", -+ PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ } -+#endif -+ -+ if (psSysHeapCfg != NULL) -+ { -+ /* Check FW_SHARED for multiple usage flags. Because FW_SHARED is divided -+ into subheaps, shared usage with other heaps is not allowed. */ -+ PVR_LOG_RETURN_IF_FALSE(psSysHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_SHARED, -+ "FW_SHARED phys heap config not specified with more than one usage." -+ "FW_SHARED heap must be exclusively used as FW_SHARED.", -+ PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ } -+ -+ if (psSysHeapCfg == NULL) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap not set", __func__)); -+ /* Nothing to do. Default to the physheap fallback option */ -+ } -+ else if (psSysHeapCfg->eType == PHYS_HEAP_TYPE_UMA) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses OS System memory (UMA)", __func__)); -+ -+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode, -+ psSysHeapCfg, -+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); -+ -+ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG] = psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; -+ } -+ else /* PHYS_HEAP_TYPE_LMA or PHYS_HEAP_TYPE_DMA */ -+ { -+ PHYS_HEAP_CONFIG sFwMainHeapCfg, sFwCfgHeapCfg; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses local memory managed by the driver (LMA)", __func__)); -+ -+ -+ /* Subheap layout: Main + (optional MIPS reserved range) + Config */ -+ sFwMainHeapCfg = *psSysHeapCfg; -+ -+ /* Reserve space for the Config heap */ -+ sFwMainHeapCfg.uiSize -= RGX_FIRMWARE_CONFIG_HEAP_SIZE; -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ /* MIPS Firmware must reserve some space in its Host/Native heap for GPU memory mappings */ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST))) -+ { -+#if defined(FIX_HW_BRN_65101_BIT_MASK) -+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101)) -+ { -+ sFwMainHeapCfg.uiSize -= RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101; -+ } -+ else -+#endif -+ { -+ sFwMainHeapCfg.uiSize -= RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL; -+ } -+ } -+#endif -+ -+ eError = PhysmemCreateHeapLMA(psDeviceNode, -+ RGXPhysHeapGetLMAPolicy(sFwMainHeapCfg.ui32UsageFlags), -+ &sFwMainHeapCfg, -+ "Fw Main subheap", -+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MAIN", ErrorDeinit); -+ -+ sFwCfgHeapCfg = *psSysHeapCfg; -+ sFwCfgHeapCfg.sStartAddr.uiAddr += psSysHeapCfg->uiSize - RGX_FIRMWARE_CONFIG_HEAP_SIZE; -+ sFwCfgHeapCfg.sCardBase.uiAddr += psSysHeapCfg->uiSize - RGX_FIRMWARE_CONFIG_HEAP_SIZE; -+ -+ sFwCfgHeapCfg.uiSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE; -+ -+ eError = PhysmemCreateHeapLMA(psDeviceNode, -+ RGXPhysHeapGetLMAPolicy(sFwCfgHeapCfg.ui32UsageFlags), -+ &sFwCfgHeapCfg, -+ "Fw Cfg subheap", -+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:CFG", ErrorDeinit); -+ } -+ -+ /* Acquire FW heaps */ -+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode, -+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_MAIN", ErrorDeinit); -+ -+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode, -+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CONFIG", ErrorDeinit); -+ -+ return eError; -+ -+ErrorDeinit: -+ PVR_ASSERT(IMG_FALSE); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR RGXInitPrivateFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PHYS_HEAP_CONFIG *psFwCodeHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, -+ PHYS_HEAP_USAGE_FW_CODE); -+ PHYS_HEAP_CONFIG *psFwDataHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, -+ PHYS_HEAP_USAGE_FW_PRIV_DATA); -+ PHYS_HEAP_CONFIG *psFwPrivateHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, -+ PHYS_HEAP_USAGE_FW_PRIVATE); -+ PHYS_HEAP_CONFIG sFwPrivateTempCfg; -+ -+ if (psFwPrivateHeapCfg != NULL) -+ { -+ PVR_LOG_RETURN_IF_FALSE((psFwCodeHeapCfg == NULL) && (psFwDataHeapCfg == NULL), -+ "FW_PRIVATE and the FW_CODE & FW_PRIV_DATA usage flags " -+ "achieve the same goal and are mutually exclusive.", -+ PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ -+ /* Fw code and data are both allocated from this unified heap */ -+ sFwPrivateTempCfg = *psFwPrivateHeapCfg; -+ sFwPrivateTempCfg.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA; -+ -+ psFwCodeHeapCfg = &sFwPrivateTempCfg; -+ psFwDataHeapCfg = &sFwPrivateTempCfg; -+ } -+ -+ if ((psFwCodeHeapCfg == NULL) || (psFwDataHeapCfg == NULL)) -+ { -+ if (psFwCodeHeapCfg != psFwDataHeapCfg) -+ { -+ /* Private Firmware code and data heaps must be either both defined -+ * or both undefined. There is no point in isolating one but not -+ * the other.*/ -+ eError = PVRSRV_ERROR_PHYSHEAP_CONFIG; -+ PVR_LOG_GOTO_IF_ERROR(eError, "PrivateFwPhysHeap check", ErrorDeinit); -+ } -+ else -+ { -+ /* No dedicated heaps, default to the physheap fallback option */ -+ } -+ } -+ else if (psFwCodeHeapCfg == psFwDataHeapCfg) -+ { -+ if (psFwCodeHeapCfg->ui32UsageFlags == -+ (PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA)) -+ { -+ /* Fw code and private data allocations come from the same system heap -+ * Instantiate one physheap and share it between them. */ -+ -+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode, -+ psFwCodeHeapCfg, -+ NULL); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig"); -+ } -+ else -+ { -+ /* Not an exclusive heap, can be used for other purposes (e.g. secure buffers). -+ * Expect the PVR layer to have already created a heap for the other uses. */ -+ } -+ } -+ else -+ { -+ /* -+ * Separating private Firmware code and data is allowed for backwards compatibility -+ * purposes. New platforms should use the unified FW_PRIVATE heap instead. -+ * -+ * Early security implementations on Rogue cores required separate FW_PRIV_DATA -+ * and FW_CODE heaps, as access permissions to Firmware were granted differently -+ * based on the transaction types (code or data). -+ */ -+ PVR_LOG_RETURN_IF_FALSE((psFwCodeHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_CODE) && -+ (psFwDataHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_PRIV_DATA), -+ "Dedicated private heaps for Fw code and " -+ "data must have one usage flag exclusively.", -+ PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ -+ /* Dedicated Fw code heap */ -+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode, -+ psFwCodeHeapCfg, -+ NULL); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); -+ -+ /* Dedicated Fw private data heap */ -+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode, -+ psFwDataHeapCfg, -+ NULL); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); -+ } -+ -+#if defined(RGX_PREMAP_FW_HEAPS) && defined(SUPPORT_TRUSTED_DEVICE) -+ /* When premapping distinct private and shared Firmware phys heaps -+ * inside the same virtual devmem heap, their sizes must add up to -+ * the fixed RGX_FIRMWARE_RAW_HEAP_SIZE for the premapping to work */ -+ { -+ PHYS_HEAP_CONFIG *psFwSharedHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, -+ PHYS_HEAP_USAGE_FW_SHARED); -+ IMG_UINT64 ui64FwPrivateHeapSize; -+ -+ PVR_LOG_GOTO_IF_FALSE((psFwCodeHeapCfg != NULL) && (psFwDataHeapCfg != NULL), -+ "Security support requires Fw code and data memory be" -+ " separate from the heap shared with the kernel driver.", ErrorDeinit); -+ -+ if (psFwCodeHeapCfg != psFwDataHeapCfg) -+ { -+ /* Private Firmware allocations come from 2 different heaps */ -+ ui64FwPrivateHeapSize = psFwCodeHeapCfg->uiSize + psFwDataHeapCfg->uiSize; -+ } -+ else -+ { -+ /* Private Firmware allocations come from a single heap */ -+ ui64FwPrivateHeapSize = psFwCodeHeapCfg->uiSize; -+ } -+ -+ PVR_LOG_GOTO_IF_FALSE((psFwSharedHeapCfg->uiSize + -+ ui64FwPrivateHeapSize) == -+ RGX_FIRMWARE_RAW_HEAP_SIZE, -+ "Invalid firmware physical heap size.", ErrorDeinit); -+ } -+#endif -+ -+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode, -+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CODE", ErrorDeinit); -+ -+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode, -+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_DATA", ErrorDeinit); -+ -+ return eError; -+ -+ErrorDeinit: -+ PVR_ASSERT(IMG_FALSE); -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR RGXInitFwPageTableHeap(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+#if defined(RGX_PREMAP_FW_HEAPS) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ PHYS_HEAP_CONFIG *psFwPageTableHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, -+ PHYS_HEAP_USAGE_FW_PREMAP_PT); -+ -+ PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg != NULL), -+ "The Firmware Page Table phys heap config not found.", -+ PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ -+ -+ PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_PREMAP_PT), -+ "The Firmware Page Table heap must be used exclusively for this purpose", -+ PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ -+ PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->eType == PHYS_HEAP_TYPE_LMA) || -+ (psFwPageTableHeapCfg->eType == PHYS_HEAP_TYPE_DMA), -+ "The Firmware Page Table heap must be LMA or DMA memory.", -+ PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ -+ PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->uiSize >= RGX_FIRMWARE_MAX_PAGETABLE_SIZE), -+ "The Firmware Page Table heap must be able to hold the maximum " -+ "number of pagetables needed to cover the Firmware's VA space.", -+ PVRSRV_ERROR_PHYSHEAP_CONFIG); -+ -+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode, -+ psFwPageTableHeapCfg, -+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig:FwPageTableHeap"); -+ -+ eError = PhysHeapAcquire(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquire:FwPageTableHeap"); -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+#endif /* defined(RGX_PREMAP_FW_HEAPS) */ -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ eError = RGXInitFwPageTableHeap(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitFwPageTableHeap", ErrorDeinit); -+ eError = RGXInitSharedFwPhysHeaps(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitSharedFwPhysHeaps", ErrorDeinit); -+ eError = RGXInitPrivateFwPhysHeaps(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitPrivateFwPhysHeaps", ErrorDeinit); -+ -+ErrorDeinit: -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXDeviceFWMainHeapMemCheck -+@Description Checks the free memory in FW Main PhysHeap of a device to ensure -+ there is enough for a connection to be made. -+ -+@Input psDeviceNode The device of the FW Main PhysHeap to be checked. -+ -+@Return On success PVRSRV_OK, else a PVRSRV_ERROR code. -+*/ /**************************************************************************/ -+static PVRSRV_ERROR RGXDeviceFWMainHeapMemCheck(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PHYS_HEAP *psFWMainPhysHeap; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode"); -+ -+ psFWMainPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; -+ if (psFWMainPhysHeap == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to get device's FW Main PhysHeap")); -+ return PVRSRV_ERROR_INVALID_HEAP; -+ } -+ -+ if (PhysHeapGetType(psFWMainPhysHeap) == PHYS_HEAP_TYPE_LMA) -+ { -+ const IMG_UINT32 ui32MinMemInKBs = RGX_FW_PHYSHEAP_MINMEM_ON_CONNECTION; -+ IMG_UINT64 ui64FreePhysHeapMem; -+ -+ eError = PhysHeapFreeMemCheck(psFWMainPhysHeap, -+ KB2B(ui32MinMemInKBs), -+ &ui64FreePhysHeapMem); -+ -+ if (eError == PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "FW_MAIN PhysHeap contains less than the " -+ "minimum free space required to acquire a connection. " -+ "Free space: %"IMG_UINT64_FMTSPEC"KB " -+ "Minimum required: %uKB", -+ B2KB(ui64FreePhysHeapMem), -+ ui32MinMemInKBs)); -+ } -+ } -+ -+ return eError; -+} -+ -+static PVRSRV_ERROR _ReadNon4KHeapPageSize(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 *pui32Log2Non4KPgSize) -+{ -+ void *pvAppHintState = NULL; -+ IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE; -+ IMG_UINT32 ui32GeneralNon4KHeapPageSize; -+ IMG_UINT32 uiLog2OSPageSize = OSGetPageShift(); -+ -+ /* Get the page size for the dummy page from the NON4K heap apphint */ -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, -+ GeneralNon4KHeapPageSize, &ui32AppHintDefault, -+ &ui32GeneralNon4KHeapPageSize); -+ *pui32Log2Non4KPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize); -+ OSFreeAppHintState(pvAppHintState); -+ -+#if defined(FIX_HW_BRN_71317_BIT_MASK) -+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71317)) -+ { -+ if (*pui32Log2Non4KPgSize == RGX_HEAP_2MB_PAGE_SHIFT -+ || *pui32Log2Non4KPgSize == RGX_HEAP_1MB_PAGE_SHIFT) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Page sizes of 2MB or 1MB cause page faults.")); -+ return PVRSRV_ERROR_INVALID_NON4K_HEAP_PAGESIZE; -+ } -+ } -+#endif -+ -+ /* Check the Non4k page size is at least the size of the OS page size -+ * or larger. The Non4k page size also has to be a multiple of the OS page -+ * size but since we have the log2 value from the apphint we know powers of 2 -+ * will always be multiples. -+ */ -+ PVR_LOG_RETURN_IF_FALSE(*pui32Log2Non4KPgSize >= uiLog2OSPageSize, -+ "Non4K page size smaller than OS page size", -+ PVRSRV_ERROR_INVALID_NON4K_HEAP_PAGESIZE); -+ -+ return PVRSRV_OK; -+} -+ -+/* RGXRegisterDevice -+ * -+ * WARNING! -+ * -+ * No PDUMP statements are allowed until device initialisation starts. -+ */ -+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ DEVICE_MEMORY_INFO *psDevMemoryInfo; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ void *pvAppHintState = NULL; -+ IMG_UINT32 ui32AppHintDefault = HWPERF_HOST_TL_STREAM_SIZE_DEFAULT, ui32HWPerfHostBufSizeKB; -+ -+ ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE; -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfHostBufSizeInKB, -+ &ui32AppHintDefault, &ui32HWPerfHostBufSizeKB); -+ OSFreeAppHintState(pvAppHintState); -+ pvAppHintState = NULL; -+ -+ /********************* -+ * Device node setup * -+ *********************/ -+ /* Setup static data and callbacks on the device agnostic device node */ -+#if defined(PDUMP) -+ psDeviceNode->sDevId.pszPDumpRegName = RGX_PDUMPREG_NAME; -+ psDeviceNode->sDevId.pszPDumpDevName = PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]); -+ psDeviceNode->pfnPDumpInitDevice = &RGXResetPDump; -+#endif /* PDUMP */ -+ -+ OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK); -+ OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE); -+ -+ /* Configure MMU specific stuff */ -+ RGXMMUInit_Register(psDeviceNode); -+ -+ psDeviceNode->pfnInvalFBSCTable = NULL; -+ -+ psDeviceNode->pfnValidateOrTweakPhysAddrs = NULL; -+ -+ /* Callback for getting the MMU device attributes */ -+ psDeviceNode->pfnGetMMUDeviceAttributes = RGXDevMMUAttributes; -+ psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate; -+ psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick; -+ psDeviceNode->pfnMMUTopLevelPxWorkarounds = NULL; -+/* psDeviceNode->pfnMMUTweakProtFlags is set later on once BNVC features setup */ -+ -+ psDeviceNode->pfnInitDeviceCompatCheck = &RGXDevInitCompatCheck; -+ -+ /* Register callbacks for creation of device memory contexts */ -+ psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext; -+ psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext; -+ -+ /* Register callbacks for Unified Fence Objects */ -+ psDeviceNode->pfnAllocUFOBlock = RGXAllocUFOBlock; -+ psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock; -+ -+ /* Register callback for checking the device's health */ -+ psDeviceNode->pfnUpdateHealthStatus = PVRSRV_VZ_MODE_IS(GUEST) ? NULL : RGXUpdateHealthStatus; -+ -+#if defined(SUPPORT_AUTOVZ) -+ /* Register callback for updating the virtualization watchdog */ -+ psDeviceNode->pfnUpdateAutoVzWatchdog = RGXUpdateAutoVzWatchdog; -+#endif -+ -+ /* Register method to service the FW HWPerf buffer */ -+ psDeviceNode->pfnServiceHWPerf = RGXHWPerfDataStoreCB; -+ -+ /* Register callback for getting the device version information string */ -+ psDeviceNode->pfnDeviceVersionString = RGXDevVersionString; -+ -+ /* Register callback for getting the device clock speed */ -+ psDeviceNode->pfnDeviceClockSpeed = RGXDevClockSpeed; -+ -+ /* Register callback for soft resetting some device modules */ -+ psDeviceNode->pfnSoftReset = RGXSoftReset; -+ -+ /* Register callback for resetting the HWR logs */ -+ psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs; -+ -+ /* Register callback for resetting the HWR logs */ -+ psDeviceNode->pfnVerifyBVNC = RGXVerifyBVNC; -+ -+ /* Register callback for checking alignment of UM structures */ -+ psDeviceNode->pfnAlignmentCheck = RGXAlignmentCheck; -+ -+ /*Register callback for checking the supported features and getting the -+ * corresponding values */ -+ psDeviceNode->pfnCheckDeviceFeature = RGXBvncCheckFeatureSupported; -+ psDeviceNode->pfnGetDeviceFeatureValue = RGXBvncGetSupportedFeatureValue; -+ -+ /* Callback for checking if system layer supports FBC 3.1 */ -+ psDeviceNode->pfnHasFBCDCVersion31 = RGXSystemHasFBCDCVersion31; -+ -+ /* Callback for getting TFBC configuration */ -+ psDeviceNode->pfnGetTFBCLossyGroup = RGXGetTFBCLossyGroup; -+ -+ /* Register callback for initialising device-specific physical memory heaps */ -+ psDeviceNode->pfnPhysMemDeviceHeapsInit = RGXPhysMemDeviceHeapsInit; -+ -+ /* Register callback for checking a device's FW Main physical heap for sufficient free memory */ -+ psDeviceNode->pfnCheckForSufficientFWPhysMem = RGXDeviceFWMainHeapMemCheck; -+ -+ /* Register callback for determining the appropriate LMA allocation policy for a phys heap */ -+ psDeviceNode->pfnPhysHeapGetLMAPolicy = RGXPhysHeapGetLMAPolicy; -+ -+ /********************* -+ * Device info setup * -+ *********************/ -+ /* Allocate device control block */ -+ psDevInfo = OSAllocZMem(sizeof(*psDevInfo)); -+ if (psDevInfo == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevInitRGXPart1 : Failed to alloc memory for DevInfo")); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ /* Default psTrampoline to point to null struct */ -+ psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline; -+#endif -+ -+ /* create locks for the context lists stored in the DevInfo structure. -+ * these lists are modified on context create/destroy and read by the -+ * watchdog thread -+ */ -+ -+ eError = OSWRLockCreate(&(psDevInfo->hRenderCtxListLock)); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create render context list lock", __func__)); -+ goto e0; -+ } -+ -+ eError = OSWRLockCreate(&(psDevInfo->hComputeCtxListLock)); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create compute context list lock", __func__)); -+ goto e1; -+ } -+ -+ eError = OSWRLockCreate(&(psDevInfo->hTransferCtxListLock)); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create transfer context list lock", __func__)); -+ goto e2; -+ } -+ -+ eError = OSWRLockCreate(&(psDevInfo->hTDMCtxListLock)); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create TDM context list lock", __func__)); -+ goto e3; -+ } -+ -+ eError = OSWRLockCreate(&(psDevInfo->hKickSyncCtxListLock)); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create kick sync context list lock", __func__)); -+ goto e4; -+ } -+ -+ eError = OSWRLockCreate(&(psDevInfo->hMemoryCtxListLock)); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create memory context list lock", __func__)); -+ goto e5; -+ } -+ -+ eError = OSSpinLockCreate(&psDevInfo->hLockKCCBDeferredCommandsList); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to KCCB deferred commands list lock", __func__)); -+ goto e6; -+ } -+ dllist_init(&(psDevInfo->sKCCBDeferredCommandsListHead)); -+ -+ dllist_init(&(psDevInfo->sRenderCtxtListHead)); -+ dllist_init(&(psDevInfo->sComputeCtxtListHead)); -+ dllist_init(&(psDevInfo->sTransferCtxtListHead)); -+ dllist_init(&(psDevInfo->sTDMCtxtListHead)); -+ dllist_init(&(psDevInfo->sKickSyncCtxtListHead)); -+ -+ dllist_init(&(psDevInfo->sCommonCtxtListHead)); -+ psDevInfo->ui32CommonCtxtCurrentID = 1; -+ -+ -+ eError = OSWRLockCreate(&psDevInfo->hCommonCtxtListLock); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create common context list lock", __func__)); -+ goto e7; -+ } -+ -+ eError = OSLockCreate(&psDevInfo->sRegCongfig.hLock); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create register configuration lock", __func__)); -+ goto e8; -+ } -+ -+ eError = OSLockCreate(&psDevInfo->hBPLock); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for break points", __func__)); -+ goto e9; -+ } -+ -+ eError = OSLockCreate(&psDevInfo->hRGXFWIfBufInitLock); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for trace buffers", __func__)); -+ goto e10; -+ } -+ -+ eError = OSLockCreate(&psDevInfo->hCCBStallCheckLock); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB checking lock", __func__)); -+ goto e11; -+ } -+ eError = OSLockCreate(&psDevInfo->hCCBRecoveryLock); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB recovery lock", __func__)); -+ goto e12; -+ } -+ -+ dllist_init(&psDevInfo->sMemoryContextList); -+ -+ /* initialise ui32SLRHoldoffCounter */ -+ if (RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS > DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT) -+ { -+ psDevInfo->ui32SLRHoldoffCounter = RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS / DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; -+ } -+ else -+ { -+ psDevInfo->ui32SLRHoldoffCounter = 0; -+ } -+ -+ /* Setup static data and callbacks on the device specific device info */ -+ psDevInfo->psDeviceNode = psDeviceNode; -+ -+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; -+ psDevInfo->pvDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; -+ -+ /* -+ * Map RGX Registers -+ */ -+ psDevInfo->ui32RegSize = psDeviceNode->psDevConfig->ui32RegsSize; -+ psDevInfo->sRegsPhysBase = psDeviceNode->psDevConfig->sRegsCpuPBase; -+ -+#if !defined(NO_HARDWARE) -+ psDevInfo->pvRegsBaseKM = (void __iomem *) OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase, -+ psDeviceNode->psDevConfig->ui32RegsSize, -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); -+ -+ if (psDevInfo->pvRegsBaseKM == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to create RGX register mapping", -+ __func__)); -+ eError = PVRSRV_ERROR_BAD_MAPPING; -+ goto e13; -+ } -+#endif /* !NO_HARDWARE */ -+ -+ psDeviceNode->pvDevice = psDevInfo; -+ -+ eError = RGXBvncInitialiseConfiguration(psDeviceNode); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Unsupported HW device detected by driver", -+ __func__)); -+ goto e14; -+ } -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ /* -+ * We must now setup the SECURITY mappings if supported. We cannot -+ * check on the features until we have reached here as the BVNC is -+ * not setup before now. -+ */ -+#if !defined(NO_HARDWARE) -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, HOST_SECURITY_VERSION) && -+ (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)) -+ { -+ IMG_CPU_PHYADDR sHostSecureRegBankBase = {psDeviceNode->psDevConfig->sRegsCpuPBase.uiAddr + RGX_HOST_SECURE_REGBANK_OFFSET}; -+ -+ psDevInfo->pvSecureRegsBaseKM = (void __iomem *) OSMapPhysToLin(sHostSecureRegBankBase, -+ RGX_HOST_SECURE_REGBANK_SIZE, -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); -+ -+ if (psDevInfo->pvSecureRegsBaseKM == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRGXInitDevPart2KM: Failed to create RGX secure register mapping")); -+ eError = PVRSRV_ERROR_BAD_MAPPING; -+ goto e14; -+ } -+ -+ /* -+ * The secure register bank is mapped into the CPU VA space starting from -+ * the base of the normal register bank + an offset of RGX_HOST_SECURE_REGBAK_OFFSET. -+ * The hardware register addresses are all indexed from the base of the regular register bank. -+ * For the RegBankBase+RegOffset computation to still be accurate for host-secure registers, -+ * we need to compensate for offsets of registers in the secure bank -+ */ -+ psDevInfo->pvSecureRegsBaseKM = (void __iomem *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM - RGX_HOST_SECURE_REGBANK_OFFSET); -+ } -+ else -+ { -+ psDevInfo->pvSecureRegsBaseKM = psDevInfo->pvRegsBaseKM; -+ } -+#endif /* !NO_HARDWARE */ -+#endif /* defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) */ -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ psDeviceNode->pfnMMUTweakProtFlags = (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ? -+ RGXMMUTweakProtFlags : NULL; -+#endif -+ -+ eError = _ReadNon4KHeapPageSize(psDevInfo, -+ &psDeviceNode->ui32Non4KPageSizeLog2); -+ PVR_LOG_GOTO_IF_ERROR(eError, "_ReadNon4KHeapPageSize", e15); -+ -+ eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo); -+ if (eError != PVRSRV_OK) -+ { -+ goto e15; -+ } -+ -+ eError = RGXHWPerfInit(psDevInfo); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e15); -+ -+ eError = RGXHWPerfHostInit(psDeviceNode->pvDevice, ui32HWPerfHostBufSizeKB); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfHostInit", ErrorDeInitHWPerfFw); -+ -+#if defined(SUPPORT_VALIDATION) -+ /* This completion will be signaled by the ISR when processing -+ * the answer CCB command carrying an RGX Register read value */ -+ init_completion(&psDevInfo->sFwRegs.sRegComp); -+ psDevInfo->sFwRegs.ui64RegVal = 0; -+ -+#if defined(SUPPORT_SOC_TIMER) -+ { -+ IMG_BOOL bAppHintDefault = IMG_FALSE; -+ IMG_BOOL bInitSocTimer; -+ void *pvAppHintState = NULL; -+ -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ValidateSOCUSCTimer, &bAppHintDefault, &bInitSocTimer); -+ OSFreeAppHintState(pvAppHintState); -+ -+ if (bInitSocTimer) -+ { -+ eError = RGXInitSOCUSCTimer(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitSOCUSCTimer", ErrorDeInitHWPerfHost); -+ } -+ } -+#endif -+#endif -+ -+ /* Register callback for dumping debug info */ -+ eError = RGXDebugInit(psDevInfo); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXDebugInit", ErrorDeInitHWPerfHost); -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ /* Register callback for fw mmu init */ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ psDeviceNode->pfnFwMMUInit = RGXMipsMMUInit_Register; -+ } -+#endif -+ -+ /* The device shared-virtual-memory heap address-space size is stored here for faster -+ look-up without having to walk the device heap configuration structures during -+ client device connection (i.e. this size is relative to a zero-based offset) */ -+#if defined(FIX_HW_BRN_65273_BIT_MASK) -+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) -+ { -+ psDeviceNode->ui64GeneralSVMHeapTopVA = 0; -+ }else -+#endif -+ { -+ psDeviceNode->ui64GeneralSVMHeapTopVA = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE; -+ } -+ -+ if (NULL != psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit) -+ { -+ psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit(psDeviceNode->psDevConfig, -+ psDevInfo->sDevFeatureCfg.ui64Features); -+ } -+ -+ psDeviceNode->bHasSystemDMA = psDeviceNode->psDevConfig->bHasDma; -+ -+ /* Initialise the device dependent bridges */ -+ eError = DeviceDepBridgeInit(psDevInfo); -+ PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeInit"); -+ -+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) -+ eError = OSLockCreate(&psDevInfo->hCounterDumpingLock); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for counter sampling.", __func__)); -+ goto ErrorDeInitDeviceDepBridge; -+ } -+#endif -+ -+ /* Initialise error counters */ -+ memset(&psDevInfo->sErrorCounts, 0, sizeof(PVRSRV_RGXDEV_ERROR_COUNTS)); -+ -+ return PVRSRV_OK; -+ -+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) -+ErrorDeInitDeviceDepBridge: -+ DeviceDepBridgeDeInit(psDevInfo); -+#endif -+ -+ErrorDeInitHWPerfHost: -+ RGXHWPerfHostDeInit(psDevInfo); -+ErrorDeInitHWPerfFw: -+ RGXHWPerfDeinit(psDevInfo); -+e15: -+#if !defined(NO_HARDWARE) -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ if (psDevInfo->pvSecureRegsBaseKM != NULL) -+ { -+ /* Adjust pvSecureRegsBaseKM if device has SECURITY_VERSION > 1 */ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, HOST_SECURITY_VERSION) && -+ (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)) -+ { -+ /* Undo the VA offset adjustment to unmap correct VAddr */ -+ psDevInfo->pvSecureRegsBaseKM = (void __iomem *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM + RGX_HOST_SECURE_REGBANK_OFFSET); -+ OSUnMapPhysToLin((void __force *) psDevInfo->pvSecureRegsBaseKM, -+ RGX_HOST_SECURE_REGBANK_SIZE); -+ } -+ } -+#endif -+#endif /* !NO_HARDWARE */ -+e14: -+#if !defined(NO_HARDWARE) -+ if (psDevInfo->pvRegsBaseKM != NULL) -+ { -+ OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, -+ psDevInfo->ui32RegSize); -+ } -+e13: -+#endif /* !NO_HARDWARE */ -+ OSLockDestroy(psDevInfo->hCCBRecoveryLock); -+e12: -+ OSLockDestroy(psDevInfo->hCCBStallCheckLock); -+e11: -+ OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock); -+e10: -+ OSLockDestroy(psDevInfo->hBPLock); -+e9: -+ OSLockDestroy(psDevInfo->sRegCongfig.hLock); -+e8: -+ OSWRLockDestroy(psDevInfo->hCommonCtxtListLock); -+e7: -+ OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList); -+e6: -+ OSWRLockDestroy(psDevInfo->hMemoryCtxListLock); -+e5: -+ OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock); -+e4: -+ OSWRLockDestroy(psDevInfo->hTDMCtxListLock); -+e3: -+ OSWRLockDestroy(psDevInfo->hTransferCtxListLock); -+e2: -+ OSWRLockDestroy(psDevInfo->hComputeCtxListLock); -+e1: -+ OSWRLockDestroy(psDevInfo->hRenderCtxListLock); -+e0: -+ OSFreeMem(psDevInfo); -+ -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_PCHAR psz = psDevInfo->sDevFeatureCfg.pszBVNCString; -+ if (NULL == psz) -+ { -+ IMG_CHAR pszBVNCInfo[RGX_HWPERF_MAX_BVNC_LEN]; -+ size_t uiBVNCStringSize; -+ size_t uiStringLength; -+ -+ uiStringLength = OSSNPrintf(pszBVNCInfo, RGX_HWPERF_MAX_BVNC_LEN, "%d.%d.%d.%d", -+ psDevInfo->sDevFeatureCfg.ui32B, -+ psDevInfo->sDevFeatureCfg.ui32V, -+ psDevInfo->sDevFeatureCfg.ui32N, -+ psDevInfo->sDevFeatureCfg.ui32C); -+ PVR_ASSERT(uiStringLength < RGX_HWPERF_MAX_BVNC_LEN); -+ -+ uiBVNCStringSize = (uiStringLength + 1) * sizeof(IMG_CHAR); -+ psz = OSAllocMem(uiBVNCStringSize); -+ if (NULL != psz) -+ { -+ OSCachedMemCopy(psz, pszBVNCInfo, uiBVNCStringSize); -+ psDevInfo->sDevFeatureCfg.pszBVNCString = psz; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Allocating memory for BVNC Info string failed", -+ __func__)); -+ } -+ } -+ -+ return psz; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXDevVersionString -+@Description Gets the version string for the given device node and returns -+ a pointer to it in ppszVersionString. It is then the -+ responsibility of the caller to free this memory. -+@Input psDeviceNode Device node from which to obtain the -+ version string -+@Output ppszVersionString Contains the version string upon return -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_CHAR **ppszVersionString) -+{ -+#if defined(NO_HARDWARE) || defined(EMULATOR) -+ const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (SW)"; -+#else -+ const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (HW)"; -+#endif -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ IMG_PCHAR pszBVNC; -+ size_t uiStringLength; -+ -+ if (psDeviceNode == NULL || ppszVersionString == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ pszBVNC = RGXDevBVNCString(psDevInfo); -+ -+ if (NULL == pszBVNC) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ uiStringLength = OSStringLength(pszBVNC); -+ uiStringLength += (sizeof(szFormatString) - 2); /* sizeof includes the null, -2 for "%s" */ -+ *ppszVersionString = OSAllocMem(uiStringLength * sizeof(IMG_CHAR)); -+ if (*ppszVersionString == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ OSSNPrintf(*ppszVersionString, uiStringLength, szFormatString, -+ pszBVNC); -+ -+ return PVRSRV_OK; -+} -+ -+/**************************************************************************/ /*! -+@Function RGXDevClockSpeed -+@Description Gets the clock speed for the given device node and returns -+ it in pui32RGXClockSpeed. -+@Input psDeviceNode Device node -+@Output pui32RGXClockSpeed Variable for storing the clock speed -+@Return PVRSRV_ERROR -+*/ /***************************************************************************/ -+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_PUINT32 pui32RGXClockSpeed) -+{ -+ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; -+ -+ /* get clock speed */ -+ *pui32RGXClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; -+ -+ return PVRSRV_OK; -+} -+ -+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) -+/*! -+ ******************************************************************************* -+ -+ @Function RGXInitFwRawHeap -+ -+ @Description Called to perform additional initialisation -+ ******************************************************************************/ -+static PVRSRV_ERROR RGXInitFwRawHeap(PVRSRV_RGXDEV_INFO *psDevInfo, DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32DriverID) -+{ -+ IMG_UINT32 uiStringLength; -+ IMG_UINT32 uiStringLengthMax = 32; -+ -+ IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift()); -+ -+ PVR_RETURN_IF_FALSE(ui32Log2RgxDefaultPageShift != 0, PVRSRV_ERROR_INVALID_PARAMS); -+ -+#if defined(FIX_HW_BRN_71317_BIT_MASK) -+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71317)) -+ { -+ if (ui32Log2RgxDefaultPageShift == RGX_HEAP_2MB_PAGE_SHIFT -+ || ui32Log2RgxDefaultPageShift == RGX_HEAP_1MB_PAGE_SHIFT) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "OS page size too large for device virtual heaps. " -+ "Maximum page size supported is 256KB when BRN71317 is present.")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+#endif -+ -+ uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1); -+ -+ /* Start by allocating memory for this DriverID heap identification string */ -+ psDevMemHeap->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR)); -+ if (psDevMemHeap->pszName == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ /* Append the DriverID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */ -+ OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID); -+ -+ /* Use the common blueprint template support function to initialise the heap */ -+ HeapCfgBlueprintInit(psDevMemHeap->pszName, -+ RGX_FIRMWARE_RAW_HEAP_BASE + (ui32DriverID * RGX_FIRMWARE_RAW_HEAP_SIZE), -+ RGX_FIRMWARE_RAW_HEAP_SIZE, -+ 0, -+ ui32Log2RgxDefaultPageShift, -+ 0, -+ NULL, -+ NULL, -+ psDevMemHeap); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+ ******************************************************************************* -+ -+ @Function RGXDeInitFwRawHeap -+ -+ @Description Called to perform additional deinitialisation -+ ******************************************************************************/ -+static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap) -+{ -+ IMG_UINT64 uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE; -+ IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_DRIVERS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE); -+ -+ /* Safe to do as the guest firmware heaps are last in the list */ -+ if (psDevMemHeap->sHeapBaseAddr.uiAddr >= uiBase && -+ psDevMemHeap->sHeapBaseAddr.uiAddr < uiSpan) -+ { -+ void *pszName = (void*)psDevMemHeap->pszName; -+ OSFreeMem(pszName); -+ } -+} -+#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -+ -+/****************************************************************************** -+ End of file (rgxinit.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxinit.h b/drivers/gpu/drm/img-rogue/rgxinit.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxinit.h -@@ -0,0 +1,282 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX initialisation header file -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX initialisation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXINIT_H) -+#define RGXINIT_H -+ -+#include "connection_server.h" -+#include "pvrsrv_error.h" -+#include "img_types.h" -+#include "device.h" -+#include "rgxdevice.h" -+#include "rgx_bridge.h" -+#include "fwload.h" -+ -+#if defined(__linux__) -+#define OS_FW_VERIFY_FUNCTION OSVerifyFirmware -+#else -+#define OS_FW_VERIFY_FUNCTION NULL -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXInitDevPart2 -+ -+ @Description -+ -+ Second part of server-side RGX initialisation -+ -+ @Input psDeviceNode - device node -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32DeviceFlags, -+ IMG_UINT32 ui32HWPerfHostFilter, -+ RGX_ACTIVEPM_CONF eActivePMConf, -+ RGX_FWT_LOGTYPE eDebugDumpFWTLogType); -+ -+PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEVMEM_SIZE_T ui32FWCodeLen, -+ IMG_DEVMEM_SIZE_T ui32FWDataLen, -+ IMG_DEVMEM_SIZE_T uiFWCorememCodeLen, -+ IMG_DEVMEM_SIZE_T uiFWCorememDataLen); -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXInitFirmware -+ -+ @Description -+ -+ Server-side RGX firmware initialisation -+ -+ @Input psDeviceNode - device node -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR -+RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_BOOL bEnableSignatureChecks, -+ IMG_UINT32 ui32SignatureChecksBufSize, -+ IMG_UINT32 ui32HWPerfFWBufSizeKB, -+ IMG_UINT64 ui64HWPerfFilter, -+ IMG_UINT32 ui32ConfigFlags, -+ IMG_UINT32 ui32LogType, -+ IMG_UINT32 ui32FilterFlags, -+ IMG_UINT32 ui32JonesDisableMask, -+ IMG_UINT32 ui32HWRDebugDumpLimit, -+ IMG_UINT32 ui32HWPerfCountersDataSize, -+ IMG_UINT32 *pui32TPUTrilinearFracMask, -+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, -+ FW_PERF_CONF eFirmwarePerf, -+ IMG_UINT32 ui32KCCBSizeLog2, -+ IMG_UINT32 ui32ConfigFlagsExt, -+ IMG_UINT32 ui32FwOsCfgFlags); -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXLoadAndGetFWData -+ -+ @Description -+ -+ Load FW and return pointer to FW data. -+ -+ @Input psDeviceNode - device node -+ -+ @Input ppsRGXFW - fw pointer -+ -+ @Output ppbFWData - pointer to FW data (NULL if an error occurred) -+ -+ @Return PVRSRV_ERROR - PVRSRV_OK on success -+ PVRSRV_ERROR_NOT_READY if filesystem is not ready -+ PVRSRV_ERROR_NOT_FOUND if no suitable FW image found -+ PVRSRV_ERROR_OUT_OF_MEMORY if unable to alloc memory for FW image -+ PVRSRV_ERROR_NOT_AUTHENTICATED if FW image failed verification -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, -+ OS_FW_IMAGE **ppsRGXFW, -+ const IMG_BYTE **ppbFWData); -+ -+#if defined(PDUMP) -+/*! -+******************************************************************************* -+ -+ @Function RGXInitHWPerfCounters -+ -+ @Description -+ -+ Initialisation of the performance counters -+ -+ @Input psDeviceNode - device node -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode); -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXRegisterDevice -+ -+ @Description -+ -+ Registers the device with the system -+ -+ @Input: psDeviceNode - device node -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDevBVNCString -+ -+ @Description -+ -+ Returns the Device BVNC string. It will allocate and fill it first, if necessary. -+ -+ @Input: psDevInfo - device info (must not be null) -+ -+ @Return IMG_PCHAR - pointer to BVNC string -+ -+******************************************************************************/ -+IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/*! -+******************************************************************************* -+ -+ @Function DevDeInitRGX -+ -+ @Description -+ -+ Reset and deinitialise Chip -+ -+ @Input psDeviceNode - device info. structure -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+ -+#if !defined(NO_HARDWARE) -+ -+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/*! -+******************************************************************************* -+ -+ @Function SORgxGpuUtilStatsRegister -+ -+ @Description SO Interface function called from the OS layer implementation. -+ Initialise data used to compute GPU utilisation statistics -+ for a particular user (identified by the handle passed as -+ argument). This function must be called only once for each -+ different user/handle. -+ -+ @Input phGpuUtilUser - Pointer to handle used to identify a user of -+ RGXGetGpuUtilStats -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser); -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function SORgxGpuUtilStatsUnregister -+ -+ @Description SO Interface function called from the OS layer implementation. -+ Free data previously used to compute GPU utilisation statistics -+ for a particular user (identified by the handle passed as -+ argument). -+ -+ @Input hGpuUtilUser - Handle used to identify a user of -+ RGXGetGpuUtilStats -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser); -+#endif /* !defined(NO_HARDWARE) */ -+ -+/*! -+ ******************************************************************************* -+ -+ @Function RGXInitCreateFWKernelMemoryContext -+ -+ @Description Called to perform initialisation during firmware kernel context -+ creation. -+ -+ @Input psDeviceNode device node -+ ******************************************************************************/ -+PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/*! -+ ******************************************************************************* -+ -+ @Function RGXDeInitDestroyFWKernelMemoryContext -+ -+ @Description Called to perform deinitialisation during firmware kernel -+ context destruction. -+ -+ @Input psDeviceNode device node -+ ******************************************************************************/ -+void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+#endif /* RGXINIT_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxkicksync.c b/drivers/gpu/drm/img-rogue/rgxkicksync.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxkicksync.c -@@ -0,0 +1,804 @@ -+/*************************************************************************/ /*! -+@File rgxkicksync.c -+@Title Server side of the sync only kick API -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "rgxkicksync.h" -+ -+#include "rgxdevice.h" -+#include "rgxmem.h" -+#include "rgxutils.h" -+#include "rgxfwutils.h" -+#include "rgxfwcmnctx.h" -+#include "allocmem.h" -+#include "sync.h" -+#include "rgxhwperf.h" -+#include "ospvr_gputrace.h" -+ -+#include "sync_checkpoint.h" -+#include "sync_checkpoint_internal.h" -+ -+/* Enable this to dump the compiled list of UFOs prior to kick call */ -+#define ENABLE_KICKSYNC_UFO_DUMP 0 -+ -+//#define KICKSYNC_CHECKPOINT_DEBUG 1 -+ -+#if defined(KICKSYNC_CHECKPOINT_DEBUG) -+#define CHKPT_DBG(X) PVR_DPF(X) -+#else -+#define CHKPT_DBG(X) -+#endif -+ -+struct _RGX_SERVER_KICKSYNC_CONTEXT_ -+{ -+ PVRSRV_DEVICE_NODE * psDeviceNode; -+ RGX_SERVER_COMMON_CONTEXT * psServerCommonContext; -+ DLLIST_NODE sListNode; -+ SYNC_ADDR_LIST sSyncAddrListFence; -+ SYNC_ADDR_LIST sSyncAddrListUpdate; -+ POS_LOCK hLock; -+}; -+ -+ -+PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_HANDLE hMemCtxPrivData, -+ IMG_UINT32 ui32PackedCCBSizeU88, -+ IMG_UINT32 ui32ContextFlags, -+ RGX_SERVER_KICKSYNC_CONTEXT **ppsKickSyncContext) -+{ -+ PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice; -+ DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); -+ RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext; -+ RGX_COMMON_CONTEXT_INFO sInfo; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2; -+ -+ memset(&sInfo, 0, sizeof(sInfo)); -+ -+ /* Prepare cleanup struct */ -+ * ppsKickSyncContext = NULL; -+ psKickSyncContext = OSAllocZMem(sizeof(*psKickSyncContext)); -+ if (psKickSyncContext == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ eError = OSLockCreate(&psKickSyncContext->hLock); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto err_lockcreate; -+ } -+ -+ psKickSyncContext->psDeviceNode = psDeviceNode; -+ -+ ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88); -+ ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88); -+ eError = FWCommonContextAllocate(psConnection, -+ psDeviceNode, -+ REQ_TYPE_KICKSYNC, -+ RGXFWIF_DM_GP, -+ hMemCtxPrivData, -+ NULL, -+ 0, -+ psFWMemContextMemDesc, -+ NULL, -+ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_KICKSYNC_CCB_SIZE_LOG2, -+ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_KICKSYNC_CCB_MAX_SIZE_LOG2, -+ ui32ContextFlags, -+ 0, /* priority */ -+ 0, /* max deadline MS */ -+ 0, /* robustness address */ -+ & sInfo, -+ & psKickSyncContext->psServerCommonContext); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_contextalloc; -+ } -+ -+ OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock); -+ dllist_add_to_tail(&(psDevInfo->sKickSyncCtxtListHead), &(psKickSyncContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock); -+ -+ SyncAddrListInit(&psKickSyncContext->sSyncAddrListFence); -+ SyncAddrListInit(&psKickSyncContext->sSyncAddrListUpdate); -+ -+ * ppsKickSyncContext = psKickSyncContext; -+ return PVRSRV_OK; -+ -+fail_contextalloc: -+ OSLockDestroy(psKickSyncContext->hLock); -+err_lockcreate: -+ OSFreeMem(psKickSyncContext); -+ return eError; -+} -+ -+ -+PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext) -+{ -+ PVRSRV_RGXDEV_INFO * psDevInfo = psKickSyncContext->psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError; -+ -+ /* Check if the FW has finished with this resource ... */ -+ eError = RGXFWRequestCommonContextCleanUp(psKickSyncContext->psDeviceNode, -+ psKickSyncContext->psServerCommonContext, -+ RGXFWIF_DM_GP, -+ PDUMP_FLAGS_NONE); -+ -+ if (RGXIsErrorAndDeviceRecoverable(psKickSyncContext->psDeviceNode, &eError)) -+ { -+ return eError; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ /* ... it has so we can free its resources */ -+ -+ OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock); -+ dllist_remove_node(&(psKickSyncContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock); -+ -+ FWCommonContextFree(psKickSyncContext->psServerCommonContext); -+ psKickSyncContext->psServerCommonContext = NULL; -+ -+ SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListFence); -+ SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListUpdate); -+ -+ OSLockDestroy(psKickSyncContext->hLock); -+ -+ OSFreeMem(psKickSyncContext); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXSetKickSyncContextPropertyKM(RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContext, -+ RGX_CONTEXT_PROPERTY eContextProperty, -+ IMG_UINT64 ui64Input, -+ IMG_UINT64 *pui64Output) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ switch (eContextProperty) -+ { -+ case RGX_CONTEXT_PROPERTY_FLAGS: -+ { -+ IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input; -+ -+ OSLockAcquire(psKickSyncContext->hLock); -+ eError = FWCommonContextSetFlags(psKickSyncContext->psServerCommonContext, -+ ui32ContextFlags); -+ -+ OSLockRelease(psKickSyncContext->hLock); -+ break; -+ } -+ -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); -+ eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ } -+ } -+ -+ return eError; -+} -+ -+void DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock); -+ dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode); -+ -+ if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext) -+ { -+ DumpFWCommonContextInfo(psCurrentServerKickSyncCtx->psServerCommonContext, -+ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+ } -+ } -+ OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock); -+} -+ -+IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ IMG_UINT32 ui32ContextBitMask = 0; -+ -+ OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock); -+ -+ dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode); -+ -+ if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext) -+ { -+ if (CheckStalledClientCommonContext(psCurrentServerKickSyncCtx->psServerCommonContext, RGX_KICK_TYPE_DM_GP) == PVRSRV_ERROR_CCCB_STALLED) -+ { -+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_GP; -+ } -+ } -+ } -+ -+ OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock); -+ return ui32ContextBitMask; -+} -+ -+PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext, -+ IMG_UINT32 ui32ClientUpdateCount, -+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, -+ IMG_UINT32 * paui32ClientUpdateOffset, -+ IMG_UINT32 * paui32ClientUpdateValue, -+ PVRSRV_FENCE iCheckFence, -+ PVRSRV_TIMELINE iUpdateTimeline, -+ PVRSRV_FENCE * piUpdateFence, -+ IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], -+ IMG_UINT32 ui32ExtJobRef) -+{ -+ RGXFWIF_KCCB_CMD sKickSyncKCCBCmd; -+ RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; -+ PVRSRV_ERROR eError; -+ PVRSRV_ERROR eError2; -+ IMG_BOOL bCCBStateOpen = IMG_FALSE; -+ PRGXFWIF_UFO_ADDR *pauiClientFenceUFOAddress = NULL; -+ PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress = NULL; -+ IMG_UINT32 ui32ClientFenceCount = 0; -+ IMG_UINT32 *paui32ClientFenceValue = NULL; -+ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; -+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr; -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psKickSyncContext->psServerCommonContext); -+ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext); -+ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); -+ IMG_UINT64 uiCheckFenceUID = 0; -+ IMG_UINT64 uiUpdateFenceUID = 0; -+ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; -+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; -+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0; -+ IMG_UINT32 ui32FenceTimelineUpdateValue = 0; -+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; -+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; -+ void *pvUpdateFenceFinaliseData = NULL; -+ -+ /* Ensure we haven't been given a null ptr to -+ * update values if we have been told we -+ * have dev var updates -+ */ -+ if (ui32ClientUpdateCount > 0) -+ { -+ PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, -+ "paui32ClientUpdateValue NULL but ui32ClientUpdateCount > 0", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ OSLockAcquire(psKickSyncContext->hLock); -+ eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListUpdate, -+ ui32ClientUpdateCount, -+ pauiClientUpdateUFODevVarBlock, -+ paui32ClientUpdateOffset); -+ -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_syncaddrlist; -+ } -+ -+ if (ui32ClientUpdateCount > 0) -+ { -+ pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs; -+ } -+ /* Ensure the string is null-terminated (Required for safety) */ -+ szUpdateFenceName[31] = '\0'; -+ -+ /* This will never be true if called from the bridge since piUpdateFence will always be valid */ -+ if (iUpdateTimeline >= 0 && !piUpdateFence) -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto out_unlock; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), " -+ "psKickSyncContext->psDeviceNode->hSyncCheckpointContext=<%p>...", -+ __func__, iCheckFence, -+ (void*)psKickSyncContext->psDeviceNode->hSyncCheckpointContext)); -+ /* Resolve the sync checkpoints that make up the input fence */ -+ eError = SyncCheckpointResolveFence(psKickSyncContext->psDeviceNode->hSyncCheckpointContext, -+ iCheckFence, -+ &ui32FenceSyncCheckpointCount, -+ &apsFenceSyncCheckpoints, -+ &uiCheckFenceUID, -+ PDUMP_FLAGS_NONE); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_resolve_fence; -+ } -+ -+ /* Create the output fence (if required) */ -+ if (iUpdateTimeline != PVRSRV_NO_TIMELINE) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: calling SyncCheckpointCreateFence (iUpdateTimeline=%d)...", -+ __func__, iUpdateTimeline)); -+ eError = SyncCheckpointCreateFence(psKickSyncContext->psDeviceNode, -+ szUpdateFenceName, -+ iUpdateTimeline, -+ psKickSyncContext->psDeviceNode->hSyncCheckpointContext, -+ &iUpdateFence, -+ &uiUpdateFenceUID, -+ &pvUpdateFenceFinaliseData, -+ &psUpdateSyncCheckpoint, -+ (void*)&psFenceTimelineUpdateSync, -+ &ui32FenceTimelineUpdateValue, -+ PDUMP_FLAGS_NONE); -+ if (eError != PVRSRV_OK) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", -+ __func__, eError)); -+ goto fail_create_output_fence; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: ...returned from SyncCheckpointCreateFence " -+ "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " -+ "ui32FenceTimelineUpdateValue=%u)", -+ __func__, iUpdateFence, psFenceTimelineUpdateSync, -+ ui32FenceTimelineUpdateValue)); -+ -+ /* Append the sync prim update for the timeline (if required) */ -+ if (psFenceTimelineUpdateSync) -+ { -+ IMG_UINT32 *pui32TimelineUpdateWp = NULL; -+ -+ /* Allocate memory to hold the list of update values (including our timeline update) */ -+ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*paui32ClientUpdateValue) * (ui32ClientUpdateCount+1)); -+ if (!pui32IntAllocatedUpdateValues) -+ { -+ /* Failed to allocate memory */ -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_alloc_update_values_mem; -+ } -+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateCount+1)); -+ /* Copy the update values into the new memory, then append our timeline update value */ -+ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32ClientUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32ClientUpdateCount); -+ /* Now set the additional update value */ -+ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32ClientUpdateCount; -+ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; -+ ui32ClientUpdateCount++; -+ /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ -+ paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; -+#if defined(KICKSYNC_CHECKPOINT_DEBUG) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; -+ -+ for (iii=0; iii) = 0x%x", -+ __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ /* Now append the timeline sync prim addr to the kicksync context update list */ -+ SyncAddrListAppendSyncPrim(&psKickSyncContext->sSyncAddrListUpdate, -+ psFenceTimelineUpdateSync); -+ } -+ } -+ -+ /* Reset number of fence syncs in kicksync context fence list to 0 */ -+ SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListFence, -+ 0, NULL, NULL); -+ -+ if (ui32FenceSyncCheckpointCount > 0) -+ { -+ /* Append the checks (from input fence) */ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Append %d sync checkpoints to KickSync Fence " -+ "(&psKickSyncContext->sSyncAddrListFence=<%p>)...", -+ __func__, ui32FenceSyncCheckpointCount, -+ (void*)&psKickSyncContext->sSyncAddrListFence)); -+ SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListFence, -+ ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ -+ pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs; -+ -+ ui32ClientFenceCount += ui32FenceSyncCheckpointCount; -+#if defined(KICKSYNC_CHECKPOINT_DEBUG) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientFenceUFOAddress; -+ -+ for (iii=0; iii) = 0x%x", -+ __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ } -+ -+ if (psUpdateSyncCheckpoint) -+ { -+ PVRSRV_ERROR eErr; -+ -+ /* Append the update (from output fence) */ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Append 1 sync checkpoint to KickSync Update " -+ "(&psKickSyncContext->sSyncAddrListUpdate=<%p>)...", -+ __func__, (void*)&psKickSyncContext->sSyncAddrListUpdate)); -+ eErr = SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListUpdate, -+ 1, -+ &psUpdateSyncCheckpoint); -+ if (eErr != PVRSRV_OK) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: ...done. SyncAddrListAppendCheckpoints() returned error (%d)", -+ __func__, eErr)); -+ } -+ else -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done.", __func__)); -+ } -+ if (!pauiClientUpdateUFOAddress) -+ { -+ pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs; -+ } -+ ui32ClientUpdateCount++; -+#if defined(KICKSYNC_CHECKPOINT_DEBUG) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientUpdateUFOAddress; -+ -+ for (iii=0; iii) = 0x%x", -+ __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ } -+ -+#if (ENABLE_KICKSYNC_UFO_DUMP == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: dumping KICKSYNC fence/updates syncs...", -+ __func__)); -+ { -+ IMG_UINT32 ii; -+ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiClientFenceUFOAddress; -+ IMG_UINT32 *pui32TmpIntFenceValue = paui32ClientFenceValue; -+ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiClientUpdateUFOAddress; -+ IMG_UINT32 *pui32TmpIntUpdateValue = paui32ClientUpdateValue; -+ -+ /* Dump Fence syncs and Update syncs */ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Prepared %d KickSync fence syncs " -+ "(&psKickSyncContext->sSyncAddrListFence=<%p>, " -+ "pauiClientFenceUFOAddress=<%p>):", -+ __func__, ui32ClientFenceCount, -+ (void*)&psKickSyncContext->sSyncAddrListFence, -+ (void*)pauiClientFenceUFOAddress)); -+ for (ii=0; iiui32Addr & 0x1) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %d/%d<%p>. FWAddr=0x%x, " -+ "CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", -+ __func__, ii + 1, ui32ClientFenceCount, -+ (void*)psTmpIntFenceUFOAddress, -+ psTmpIntFenceUFOAddress->ui32Addr)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", -+ __func__, ii + 1, ui32ClientFenceCount, -+ (void*)psTmpIntFenceUFOAddress, -+ psTmpIntFenceUFOAddress->ui32Addr, -+ *pui32TmpIntFenceValue, -+ *pui32TmpIntFenceValue)); -+ pui32TmpIntFenceValue++; -+ } -+ psTmpIntFenceUFOAddress++; -+ } -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Prepared %d KickSync update syncs " -+ "(&psKickSyncContext->sSyncAddrListUpdate=<%p>, " -+ "pauiClientUpdateUFOAddress=<%p>):", -+ __func__, ui32ClientUpdateCount, -+ (void*)&psKickSyncContext->sSyncAddrListUpdate, -+ (void*)pauiClientUpdateUFOAddress)); -+ for (ii=0; ii", -+ __func__, __LINE__, -+ (void*)psTmpIntUpdateUFOAddress)); -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Line %d, pui32TmpIntUpdateValue=<%p>", -+ __func__, __LINE__, -+ (void*)pui32TmpIntUpdateValue)); -+ if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %d/%d<%p>. FWAddr=0x%x, " -+ "UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", -+ __func__, ii + 1, ui32ClientUpdateCount, -+ (void*)psTmpIntUpdateUFOAddress, -+ psTmpIntUpdateUFOAddress->ui32Addr)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", -+ __func__, ii + 1, ui32ClientUpdateCount, -+ (void*)psTmpIntUpdateUFOAddress, -+ psTmpIntUpdateUFOAddress->ui32Addr, -+ *pui32TmpIntUpdateValue)); -+ pui32TmpIntUpdateValue++; -+ } -+ psTmpIntUpdateUFOAddress++; -+ } -+ } -+#endif -+ -+ RGXCmdHelperInitCmdCCB(psDevInfo, -+ psClientCCB, -+ 0, /* empty ui64FBSCEntryMask */ -+ ui32ClientFenceCount, -+ pauiClientFenceUFOAddress, -+ paui32ClientFenceValue, -+ ui32ClientUpdateCount, -+ pauiClientUpdateUFOAddress, -+ paui32ClientUpdateValue, -+ 0, -+ NULL, -+ NULL, -+ NULL, -+ NULL, -+ RGXFWIF_CCB_CMD_TYPE_NULL, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ PDUMP_FLAGS_NONE, -+ NULL, -+ "KickSync", -+ bCCBStateOpen, -+ asCmdHelperData); -+ -+ eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_cmdaquire; -+ } -+ -+ /* -+ * We should reserve space in the kernel CCB here and fill in the command -+ * directly. -+ * This is so if there isn't space in the kernel CCB we can return with -+ * retry back to services client before we take any operations -+ */ -+ -+ /* -+ * All the required resources are ready at this point, we can't fail so -+ * take the required server sync operations and commit all the resources -+ */ -+ eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ -+ /* If system is found powered OFF, Retry scheduling the command */ -+ if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) -+ { -+ eError = PVRSRV_ERROR_RETRY; -+ } -+ goto fail_acquirepowerlock; -+ } -+ -+ RGXCmdHelperReleaseCmdCCB(1, -+ asCmdHelperData, -+ "KickSync", -+ FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr); -+ -+ /* Construct the kernel kicksync CCB command. */ -+ sKickSyncKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; -+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext); -+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); -+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); -+ -+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; -+#endif -+ -+ /* -+ * Submit the kicksync command to the firmware. -+ */ -+ RGXSRV_HWPERF_ENQ(psKickSyncContext, -+ OSGetCurrentClientProcessIDKM(), -+ ui32FWCtx, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ RGX_HWPERF_KICK_TYPE2_SYNC, -+ iCheckFence, -+ iUpdateFence, -+ iUpdateTimeline, -+ uiCheckFenceUID, -+ uiUpdateFenceUID, -+ NO_DEADLINE, -+ NO_CYCEST); -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError2 = RGXScheduleCommandWithoutPowerLock(psKickSyncContext->psDeviceNode->pvDevice, -+ RGXFWIF_DM_GP, -+ & sKickSyncKCCBCmd, -+ PDUMP_FLAGS_NONE); -+ if (eError2 != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ PVRSRVPowerUnlock(psDevInfo->psDeviceNode); -+ -+ PVRGpuTraceEnqueueEvent(psKickSyncContext->psDeviceNode, -+ ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, -+ RGX_HWPERF_KICK_TYPE2_SYNC); -+ -+ if (eError2 != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PVRSRVRGXKickSync failed to schedule kernel CCB command. (0x%x)", -+ eError)); -+ eError = eError2; -+ } -+ -+ /* -+ * Now check eError (which may have returned an error from our earlier call -+ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first -+ * so we check it now... -+ */ -+ if (eError != PVRSRV_OK ) -+ { -+ goto fail_cmdaquire; -+ } -+ -+#if defined(NO_HARDWARE) -+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ -+ if (psUpdateSyncCheckpoint) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", -+ __func__, (void*)psUpdateSyncCheckpoint, -+ SyncCheckpointGetId(psUpdateSyncCheckpoint), -+ SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); -+ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); -+ } -+ if (psFenceTimelineUpdateSync) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Updating NOHW sync prim<%p> to %d", -+ __func__, (void*)psFenceTimelineUpdateSync, -+ ui32FenceTimelineUpdateValue)); -+ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); -+ } -+ SyncCheckpointNoHWUpdateTimelines(NULL); -+#endif -+ /* Drop the references taken on the sync checkpoints in the -+ * resolved input fence */ -+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ -+ if (apsFenceSyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); -+ } -+ /* Free memory allocated to hold the internal list of update values */ -+ if (pui32IntAllocatedUpdateValues) -+ { -+ OSFreeMem(pui32IntAllocatedUpdateValues); -+ pui32IntAllocatedUpdateValues = NULL; -+ } -+ -+ *piUpdateFence = iUpdateFence; -+ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) -+ { -+ SyncCheckpointFinaliseFence(psKickSyncContext->psDeviceNode, iUpdateFence, -+ pvUpdateFenceFinaliseData, -+ psUpdateSyncCheckpoint, szUpdateFenceName); -+ } -+ -+ OSLockRelease(psKickSyncContext->hLock); -+ return PVRSRV_OK; -+ -+fail_acquirepowerlock: -+fail_cmdaquire: -+ SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListFence); -+ SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListUpdate); -+ if (iUpdateFence != PVRSRV_NO_FENCE) -+ { -+ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); -+ } -+ -+ /* Free memory allocated to hold update values */ -+ if (pui32IntAllocatedUpdateValues) -+ { -+ OSFreeMem(pui32IntAllocatedUpdateValues); -+ } -+fail_alloc_update_values_mem: -+fail_create_output_fence: -+ /* Drop the references taken on the sync checkpoints in the -+ * resolved input fence */ -+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ /* Free memory allocated to hold the resolved fence's checkpoints */ -+ if (apsFenceSyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); -+ } -+fail_resolve_fence: -+fail_syncaddrlist: -+out_unlock: -+ OSLockRelease(psKickSyncContext->hLock); -+ return eError; -+} -+ -+/**************************************************************************//** -+ End of file (rgxkicksync.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxkicksync.h b/drivers/gpu/drm/img-rogue/rgxkicksync.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxkicksync.h -@@ -0,0 +1,128 @@ -+/*************************************************************************/ /*! -+@File rgxkicksync.h -+@Title Server side of the sync only kick API -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXKICKSYNC_H) -+#define RGXKICKSYNC_H -+ -+#include "pvrsrv_error.h" -+#include "connection_server.h" -+#include "sync_server.h" -+#include "rgxdevice.h" -+ -+ -+typedef struct _RGX_SERVER_KICKSYNC_CONTEXT_ RGX_SERVER_KICKSYNC_CONTEXT; -+ -+/**************************************************************************/ /*! -+@Function DumpKickSyncCtxtsInfo -+@Description Function that dumps debug info of kick sync ctxs on this device -+@Return none -+*/ /**************************************************************************/ -+void -+DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel); -+ -+/**************************************************************************/ /*! -+@Function CheckForStalledClientKickSyncCtxt -+@Description Function that checks if a kick sync client is stalled -+@Return RGX_KICK_TYPE_DM_GP on stalled context. Otherwise, 0 -+*/ /**************************************************************************/ -+IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+/**************************************************************************/ /*! -+@Function PVRSRVRGXCreateKickSyncContextKM -+@Description Server-side implementation of RGXCreateKicksyncContext -+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_HANDLE hMemCtxPrivData, -+ IMG_UINT32 ui32PackedCCBSizeU88, -+ IMG_UINT32 ui32ContextFlags, -+ RGX_SERVER_KICKSYNC_CONTEXT ** ppsKicksyncContext); -+ -+ -+ -+/**************************************************************************/ /*! -+@Function PVRSRVRGXDestroyKickSyncContextKM -+@Description Server-side implementation of RGXDestroyKicksyncContext -+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext); -+ -+/**************************************************************************/ /*! -+@Function PVRSRVRGXSetKickSyncContextPropertyKM -+@Description Server-side implementation of RGXSetKickSyncContextProperty -+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code -+ */ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXSetKickSyncContextPropertyKM(RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContext, -+ RGX_CONTEXT_PROPERTY eContextProperty, -+ IMG_UINT64 ui64Input, -+ IMG_UINT64 *pui64Output); -+ -+/**************************************************************************/ /*! -+@Function PVRSRVRGXKickSyncKM -+@Description Kicks a sync only command -+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext, -+ IMG_UINT32 ui32ClientUpdateCount, -+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, -+ IMG_UINT32 * paui32ClientUpdateDevVarOffset, -+ IMG_UINT32 * paui32ClientUpdateValue, -+ PVRSRV_FENCE iCheckFence, -+ PVRSRV_TIMELINE iUpdateTimeline, -+ PVRSRV_FENCE * piUpdateFence, -+ IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], -+ -+ IMG_UINT32 ui32ExtJobRef); -+ -+#endif /* RGXKICKSYNC_H */ -+ -+/**************************************************************************//** -+ End of file (rgxkicksync.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxlayer.h b/drivers/gpu/drm/img-rogue/rgxlayer.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxlayer.h -@@ -0,0 +1,850 @@ -+/*************************************************************************/ /*! -+@File -+@Title Header for Services abstraction layer -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Declaration of an interface layer used to abstract code that -+ can be compiled outside of the DDK, potentially in a -+ completely different OS. -+ All the headers included by this file must also be copied to -+ the alternative source tree. -+ All the functions declared here must have a DDK implementation -+ inside the DDK source tree (e.g. rgxlayer_impl.h/.c) and -+ another different implementation in case they are used outside -+ of the DDK. -+ All of the functions accept as a first parameter a -+ "const void *hPrivate" argument. It should be used to pass -+ around any implementation specific data required. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXLAYER_H) -+#define RGXLAYER_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "img_elf.h" -+#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */ -+#include "pvrsrv_firmware_boot.h" -+#include "rgx_bvnc_defs_km.h" -+#include "rgx_fw_info.h" -+#include "rgx_fwif_shared.h" /* includes rgx_common.h and mem_types.h */ -+#include "rgx_meta.h" -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+#include "rgx_mips.h" -+#endif -+#include "rgx_riscv.h" -+ -+#include "rgxdefs_km.h" -+/* includes: -+ * rgx_cr_defs_km.h, -+ * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h), -+ * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h) -+ */ -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXMemCopy -+ -+ @Description MemCopy implementation -+ -+ @Input hPrivate : Implementation specific data -+ @Input pvDst : Pointer to the destination -+ @Input pvSrc : Pointer to the source location -+ @Input uiSize : The amount of memory to copy in bytes -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXMemCopy(const void *hPrivate, -+ void *pvDst, -+ void *pvSrc, -+ size_t uiSize); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXMemSet -+ -+ @Description MemSet implementation -+ -+ @Input hPrivate : Implementation specific data -+ @Input pvDst : Pointer to the start of the memory region -+ @Input ui8Value : The value to be written -+ @Input uiSize : The number of bytes to be set to ui8Value -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXMemSet(const void *hPrivate, -+ void *pvDst, -+ IMG_UINT8 ui8Value, -+ size_t uiSize); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXCommentLog -+ -+ @Description Generic log function used for debugging or other purposes -+ -+ @Input hPrivate : Implementation specific data -+ @Input pszString : Message to be printed -+ @Input ... : Variadic arguments -+ -+ @Return void -+ -+******************************************************************************/ -+__printf(2, 3) -+void RGXCommentLog(const void *hPrivate, -+ const IMG_CHAR *pszString, -+ ...); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXErrorLog -+ -+ @Description Generic error log function used for debugging or other purposes -+ -+ @Input hPrivate : Implementation specific data -+ @Input pszString : Message to be printed -+ @Input ... : Variadic arguments -+ -+ @Return void -+ -+******************************************************************************/ -+__printf(2, 3) -+void RGXErrorLog(const void *hPrivate, -+ const IMG_CHAR *pszString, -+ ...); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXGetOSPageSize -+ -+ @Description Return Page size used on OS -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return IMG_UINT32 -+ -+******************************************************************************/ -+ -+IMG_UINT32 RGXGetOSPageSize(const void *hPrivate); -+ -+/* This is used to get the value of a specific feature from hprivate. -+ * Should be used instead of calling RGXDeviceHasFeature. */ -+#define RGX_DEVICE_HAS_FEATURE(hPrivate, Feature) \ -+ RGXDeviceHasFeature(hPrivate, RGX_FEATURE_##Feature##_BIT_MASK) -+ -+/* This is used to check if a specific feature with value is enabled. -+ * Should be used instead of calling RGXDeviceGetFeatureValue. */ -+#define RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, Feature) \ -+ (RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) >= 0) -+ -+/* This is used to get the value of a specific feature from hPrivate. -+ * Should be used instead of calling RGXDeviceGetFeatureValue. */ -+#define RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, Feature) \ -+ RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) -+ -+ -+/* This is used to check if a specific ERN is enabled from hPrivate. -+ * Should be used instead of calling RGXDeviceHasErnBrn. */ -+#define RGX_DEVICE_HAS_ERN(hPrivate, ERN) \ -+ RGXDeviceHasErnBrn(hPrivate, HW_ERN_##ERN##_BIT_MASK) -+ -+/* This is used to check if a specific BRN is enabled from hPrivate. -+ * Should be used instead of calling RGXDeviceHasErnBrn. */ -+#define RGX_DEVICE_HAS_BRN(hPrivate, BRN) \ -+ RGXDeviceHasErnBrn(hPrivate, FIX_HW_BRN_##BRN##_BIT_MASK) -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDeviceGetFeatureValue -+ -+ @Description Checks if a device has a particular feature with values -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui64Feature : Feature with values to check -+ -+ @Return Value >= 0 if the given feature is available, -1 otherwise -+ -+******************************************************************************/ -+IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDeviceHasFeature -+ -+ @Description Checks if a device has a particular feature -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui64Feature : Feature to check -+ -+ @Return IMG_TRUE if the given feature is available, IMG_FALSE otherwise -+ -+******************************************************************************/ -+IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDeviceHasErnBrn -+ -+ @Description Checks if a device has a particular errata -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui64ErnsBrns : Flags to check -+ -+ @Return IMG_TRUE if the given errata is available, IMG_FALSE otherwise -+ -+******************************************************************************/ -+IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXGetFWCorememSize -+ -+ @Description Get the FW coremem size -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return FW coremem size -+ -+******************************************************************************/ -+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXWriteReg32/64 -+ -+ @Description Write a value to a 32/64 bit RGX register -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui32RegAddr : Register offset inside the register bank -+ @Input ui32/64RegValue : New register value -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXWriteReg32(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue); -+ -+void RGXWriteReg64(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT64 ui64RegValue); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXReadReg32/64 -+ -+ @Description Read a 32/64 bit RGX register -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui32RegAddr : Register offset inside the register bank -+ -+ @Return Register value -+ -+******************************************************************************/ -+IMG_UINT32 RGXReadReg32(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr); -+ -+IMG_UINT64 RGXReadReg64(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXReadModifyWriteReg32 -+ -+ @Description Read-modify-write a 32 bit RGX register -+ -+ @Input hPrivate : Implementation specific data. -+ @Input ui32RegAddr : Register offset inside the register bank. -+ @Input ui32RegValue : New register value. -+ @Input ui32RegMask : Keep the bits set in the mask. -+ -+ @Return Always returns PVRSRV_OK -+ -+******************************************************************************/ -+IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT64 ui64RegValue, -+ IMG_UINT64 ui64RegKeepMask); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXPollReg32/64 -+ -+ @Description Poll on a 32/64 bit RGX register until some bits are set/unset -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui32RegAddr : Register offset inside the register bank -+ @Input ui32/64RegValue : Value expected from the register -+ @Input ui32/64RegMask : Only the bits set in this mask will be -+ checked against uiRegValue -+ -+ @Return PVRSRV_OK if the poll succeeds, -+ PVRSRV_ERROR_TIMEOUT if the poll takes too long -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXPollReg32(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue, -+ IMG_UINT32 ui32RegMask); -+ -+PVRSRV_ERROR RGXPollReg64(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT64 ui64RegValue, -+ IMG_UINT64 ui64RegMask); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXSetPoweredState -+ -+ @Description Declare if the device is powered or not -+ -+ @Input hPrivate : Implementation specific data -+ @Input bPowered : true is powered, false otherwise -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXSetPoweredState(const void *hPrivate, IMG_BOOL bPowered); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXWaitCycles -+ -+ @Description Wait for a number of GPU cycles and/or microseconds -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui32Cycles : Number of GPU cycles to wait for in pdumps, -+ it can also be used when running driver-live -+ if desired (ignoring the next parameter) -+ @Input ui32WaitUs : Number of microseconds to wait for when running -+ driver-live -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXWaitCycles(const void *hPrivate, -+ IMG_UINT32 ui32Cycles, -+ IMG_UINT32 ui32WaitUs); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXAcquireKernelMMUPC -+ -+ @Description Acquire the Kernel MMU Page Catalogue device physical address -+ -+ @Input hPrivate : Implementation specific data -+ @Input psPCAddr : Returned page catalog address -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXWriteKernelMMUPC32/64 -+ -+ @Description Write the Kernel MMU Page Catalogue to the 32/64 bit -+ RGX register passed as argument. -+ In a driver-live scenario without PDump these functions -+ are the same as RGXWriteReg32/64 and they don't need -+ to be reimplemented. -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui32PCReg : Register offset inside the register bank -+ @Input ui32AlignShift : PC register alignshift -+ @Input ui32Shift : PC register shift -+ @Input ui32/64PCVal : Page catalog value (aligned and shifted) -+ -+ @Return void -+ -+******************************************************************************/ -+#if defined(PDUMP) -+void RGXWriteKernelMMUPC64(const void *hPrivate, -+ IMG_UINT32 ui32PCReg, -+ IMG_UINT32 ui32PCRegAlignShift, -+ IMG_UINT32 ui32PCRegShift, -+ IMG_UINT64 ui64PCVal); -+ -+void RGXWriteKernelMMUPC32(const void *hPrivate, -+ IMG_UINT32 ui32PCReg, -+ IMG_UINT32 ui32PCRegAlignShift, -+ IMG_UINT32 ui32PCRegShift, -+ IMG_UINT32 ui32PCVal); -+#else /* defined(PDUMP) */ -+ -+#define RGXWriteKernelMMUPC64(priv, pcreg, alignshift, shift, pcval) \ -+ RGXWriteReg64(priv, pcreg, pcval) -+ -+#define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \ -+ RGXWriteReg32(priv, pcreg, pcval) -+ -+#endif /* defined(PDUMP) */ -+ -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+/*! -+******************************************************************************* -+ -+ @Function RGXAcquireGPURegsAddr -+ -+ @Description Acquire the GPU registers base device physical address -+ -+ @Input hPrivate : Implementation specific data -+ @Input psGPURegsAddr : Returned GPU registers base address -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXMIPSWrapperConfig -+ -+ @Description Write GPU register bank transaction ID and MIPS boot mode -+ to the MIPS wrapper config register (passed as argument). -+ In a driver-live scenario without PDump this is the same as -+ RGXWriteReg64 and it doesn't need to be reimplemented. -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui32RegAddr : Register offset inside the register bank -+ @Input ui64GPURegsAddr : GPU registers base address -+ @Input ui32GPURegsAlign : Register bank transactions alignment -+ @Input ui32BootMode : Mips BOOT ISA mode -+ -+ @Return void -+ -+******************************************************************************/ -+#if defined(PDUMP) -+void RGXMIPSWrapperConfig(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT64 ui64GPURegsAddr, -+ IMG_UINT32 ui32GPURegsAlign, -+ IMG_UINT32 ui32BootMode); -+#else -+#define RGXMIPSWrapperConfig(priv, regaddr, gpuregsaddr, gpuregsalign, bootmode) \ -+ RGXWriteReg64(priv, regaddr, ((gpuregsaddr) >> (gpuregsalign)) | (bootmode)) -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXAcquireBootRemapAddr -+ -+ @Description Acquire the device physical address of the MIPS bootloader -+ accessed through remap region -+ -+ @Input hPrivate : Implementation specific data -+ @Output psBootRemapAddr : Base address of the remapped bootloader -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXBootRemapConfig -+ -+ @Description Configure the bootloader remap registers passed as arguments. -+ In a driver-live scenario without PDump this is the same as -+ two RGXWriteReg64 and it doesn't need to be reimplemented. -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui32Config1RegAddr : Remap config1 register offset -+ @Input ui64Config1RegValue : Remap config1 register value -+ @Input ui32Config2RegAddr : Remap config2 register offset -+ @Input ui64Config2PhyAddr : Output remapped aligned physical address -+ @Input ui64Config2PhyMask : Mask for the output physical address -+ @Input ui64Config2Settings : Extra settings for this remap region -+ -+ @Return void -+ -+******************************************************************************/ -+#if defined(PDUMP) -+void RGXBootRemapConfig(const void *hPrivate, -+ IMG_UINT32 ui32Config1RegAddr, -+ IMG_UINT64 ui64Config1RegValue, -+ IMG_UINT32 ui32Config2RegAddr, -+ IMG_UINT64 ui64Config2PhyAddr, -+ IMG_UINT64 ui64Config2PhyMask, -+ IMG_UINT64 ui64Config2Settings); -+#else -+#define RGXBootRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \ -+ RGXWriteReg64(priv, c1reg, (c1val)); \ -+ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ -+ } while (0) -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXAcquireCodeRemapAddr -+ -+ @Description Acquire the device physical address of the MIPS code -+ accessed through remap region -+ -+ @Input hPrivate : Implementation specific data -+ @Output psCodeRemapAddr : Base address of the remapped code -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXCodeRemapConfig -+ -+ @Description Configure the code remap registers passed as arguments. -+ In a driver-live scenario without PDump this is the same as -+ two RGXWriteReg64 and it doesn't need to be reimplemented. -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui32Config1RegAddr : Remap config1 register offset -+ @Input ui64Config1RegValue : Remap config1 register value -+ @Input ui32Config2RegAddr : Remap config2 register offset -+ @Input ui64Config2PhyAddr : Output remapped aligned physical address -+ @Input ui64Config2PhyMask : Mask for the output physical address -+ @Input ui64Config2Settings : Extra settings for this remap region -+ -+ @Return void -+ -+******************************************************************************/ -+#if defined(PDUMP) -+void RGXCodeRemapConfig(const void *hPrivate, -+ IMG_UINT32 ui32Config1RegAddr, -+ IMG_UINT64 ui64Config1RegValue, -+ IMG_UINT32 ui32Config2RegAddr, -+ IMG_UINT64 ui64Config2PhyAddr, -+ IMG_UINT64 ui64Config2PhyMask, -+ IMG_UINT64 ui64Config2Settings); -+#else -+#define RGXCodeRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \ -+ RGXWriteReg64(priv, c1reg, (c1val)); \ -+ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ -+ } while (0) -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXAcquireDataRemapAddr -+ -+ @Description Acquire the device physical address of the MIPS data -+ accessed through remap region -+ -+ @Input hPrivate : Implementation specific data -+ @Output psDataRemapAddr : Base address of the remapped data -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDataRemapConfig -+ -+ @Description Configure the data remap registers passed as arguments. -+ In a driver-live scenario without PDump this is the same as -+ two RGXWriteReg64 and it doesn't need to be reimplemented. -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui32Config1RegAddr : Remap config1 register offset -+ @Input ui64Config1RegValue : Remap config1 register value -+ @Input ui32Config2RegAddr : Remap config2 register offset -+ @Input ui64Config2PhyAddr : Output remapped aligned physical address -+ @Input ui64Config2PhyMask : Mask for the output physical address -+ @Input ui64Config2Settings : Extra settings for this remap region -+ -+ @Return void -+ -+******************************************************************************/ -+#if defined(PDUMP) -+void RGXDataRemapConfig(const void *hPrivate, -+ IMG_UINT32 ui32Config1RegAddr, -+ IMG_UINT64 ui64Config1RegValue, -+ IMG_UINT32 ui32Config2RegAddr, -+ IMG_UINT64 ui64Config2PhyAddr, -+ IMG_UINT64 ui64Config2PhyMask, -+ IMG_UINT64 ui64Config2Settings); -+#else -+#define RGXDataRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \ -+ RGXWriteReg64(priv, c1reg, (c1val)); \ -+ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ -+ } while (0) -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXAcquireTrampolineRemapAddr -+ -+ @Description Acquire the device physical address of the MIPS data -+ accessed through remap region -+ -+ @Input hPrivate : Implementation specific data -+ @Output psTrampolineRemapAddr: Base address of the remapped data -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXTrampolineRemapConfig -+ -+ @Description Configure the trampoline remap registers passed as arguments. -+ In a driver-live scenario without PDump this is the same as -+ two RGXWriteReg64 and it doesn't need to be reimplemented. -+ -+ @Input hPrivate : Implementation specific data -+ @Input ui32Config1RegAddr : Remap config1 register offset -+ @Input ui64Config1RegValue : Remap config1 register value -+ @Input ui32Config2RegAddr : Remap config2 register offset -+ @Input ui64Config2PhyAddr : Output remapped aligned physical address -+ @Input ui64Config2PhyMask : Mask for the output physical address -+ @Input ui64Config2Settings : Extra settings for this remap region -+ -+ @Return void -+ -+******************************************************************************/ -+#if defined(PDUMP) -+void RGXTrampolineRemapConfig(const void *hPrivate, -+ IMG_UINT32 ui32Config1RegAddr, -+ IMG_UINT64 ui64Config1RegValue, -+ IMG_UINT32 ui32Config2RegAddr, -+ IMG_UINT64 ui64Config2PhyAddr, -+ IMG_UINT64 ui64Config2PhyMask, -+ IMG_UINT64 ui64Config2Settings); -+#else -+#define RGXTrampolineRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \ -+ RGXWriteReg64(priv, c1reg, (c1val)); \ -+ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ -+ } while (0) -+#endif -+#endif /* defined(RGX_FEATURE_MIPS_BIT_MASK) */ -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDoFWSlaveBoot -+ -+ @Description Returns whether or not a FW Slave Boot is required -+ while powering on -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return IMG_BOOL -+ -+******************************************************************************/ -+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXFabricCoherencyTest -+ -+ @Description Performs fabric coherency test -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return PVRSRV_OK if the test succeeds, -+ PVRSRV_ERROR_INIT_FAILURE if the test fails at some point -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXGetDeviceSLCBanks -+ -+ @Description Returns the number of SLC banks used by the device -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return Number of SLC banks -+ -+******************************************************************************/ -+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXGetDeviceCacheLineSize -+ -+ @Description Returns the device cache line size -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return Cache line size -+ -+******************************************************************************/ -+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate); -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+/*! -+******************************************************************************* -+ -+ @Function RGXGetDevicePhysBusWidth -+ -+ @Description Returns the device physical bus width -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return Physical bus width -+ -+******************************************************************************/ -+IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDevicePA0IsValid -+ -+ @Description Returns true if the device physical address 0x0 is a valid -+ address and can be accessed by the GPU. -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return IMG_TRUE if device physical address 0x0 is a valid address, -+ IMG_FALSE otherwise -+ -+******************************************************************************/ -+IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate); -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXAcquireBootCodeAddr -+ -+ @Description Acquire the device virtual address of the RISCV boot code -+ -+ @Input hPrivate : Implementation specific data -+ @Output psBootCodeAddr : Boot code base address -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXCalculateHostFWDataAddress -+ -+ @Description Calculates the base host address of the RISCV firmware data -+ -+ @Input hPrivate : Implementation specific data -+ @Input pvHostFWDataAddr : Initial host address of the firmware data -+ -+ @Return: Base host address of the RISCV firmware data -+ -+******************************************************************************/ -+void *RGXCalculateHostFWDataAddress(const void *hPrivate, void *pvHostFWDataAddr); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXAcquireBootDataAddr -+ -+ @Description Acquire the device virtual address of the RISCV boot data -+ -+ @Input hPrivate : Implementation specific data -+ @Output psBootDataAddr : Boot data base address -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXDeviceAckIrq -+ -+ @Description Checks the implementation specific IRQ status register, -+ clearing it if necessary and returning the IRQ status. -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return: IRQ status -+ -+******************************************************************************/ -+IMG_BOOL RGXDeviceAckIrq(const void *hPrivate); -+ -+#if defined(__cplusplus) -+} -+#endif -+ -+#endif /* RGXLAYER_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxlayer_impl.c b/drivers/gpu/drm/img-rogue/rgxlayer_impl.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxlayer_impl.c -@@ -0,0 +1,1346 @@ -+/*************************************************************************/ /*! -+@File -+@Title DDK implementation of the Services abstraction layer -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description DDK implementation of the Services abstraction layer -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "rgxlayer_impl.h" -+#include "osfunc.h" -+#include "pdump_km.h" -+#include "rgxfwutils.h" -+#include "rgxfwimageutils.h" -+#include "devicemem.h" -+#include "cache_km.h" -+#include "pmr.h" -+ -+#if defined(PDUMP) -+#if defined(__linux__) -+ #include -+ -+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) -+ #include -+ #else -+ #include -+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ -+#else -+ #include -+#endif /* __linux__ */ -+#endif -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+#define RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr) \ -+ (((ui32RegAddr) < RGX_HOST_SECURE_REGBANK_OFFSET) ? \ -+ ((psDevInfo)->pvRegsBaseKM) : ((psDevInfo)->pvSecureRegsBaseKM)) -+#else -+#define RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr) ((psDevInfo)->pvRegsBaseKM) -+#endif -+ -+void RGXMemCopy(const void *hPrivate, -+ void *pvDst, -+ void *pvSrc, -+ size_t uiSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(hPrivate); -+ OSDeviceMemCopy(pvDst, pvSrc, uiSize); -+} -+ -+void RGXMemSet(const void *hPrivate, -+ void *pvDst, -+ IMG_UINT8 ui8Value, -+ size_t uiSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(hPrivate); -+ OSDeviceMemSet(pvDst, ui8Value, uiSize); -+} -+ -+void RGXCommentLog(const void *hPrivate, -+ const IMG_CHAR *pszString, -+ ...) -+{ -+#if defined(PDUMP) -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ va_list argList; -+ va_start(argList, pszString); -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ -+ PDumpCommentWithFlagsVA(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, pszString, argList); -+ va_end(argList); -+#else -+ PVR_UNREFERENCED_PARAMETER(hPrivate); -+ PVR_UNREFERENCED_PARAMETER(pszString); -+#endif -+} -+ -+void RGXErrorLog(const void *hPrivate, -+ const IMG_CHAR *pszString, -+ ...) -+{ -+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; -+ va_list argList; -+ -+ PVR_UNREFERENCED_PARAMETER(hPrivate); -+ -+ va_start(argList, pszString); -+ vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList); -+ va_end(argList); -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer)); -+} -+ -+IMG_UINT32 RGXGetOSPageSize(const void *hPrivate) -+{ -+ PVR_UNREFERENCED_PARAMETER(hPrivate); -+ return OSGetPageSize(); -+} -+ -+IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature) -+{ -+ IMG_INT32 i32Ret = -1; -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ psDeviceNode = psDevInfo->psDeviceNode; -+ -+ if ((psDeviceNode->pfnGetDeviceFeatureValue)) -+ { -+ i32Ret = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ui64Feature); -+ } -+ -+ return i32Ret; -+} -+ -+IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ -+ return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0; -+} -+ -+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ IMG_UINT32 ui32CorememSize = 0; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) -+ { -+ ui32CorememSize = RGX_GET_FEATURE_VALUE(psDevInfo, META_COREMEM_SIZE); -+ } -+ -+ return ui32CorememSize; -+} -+ -+void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ void __iomem *pvRegsBase; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); -+ -+#if defined(PDUMP) -+ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -+#endif -+ { -+ OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue); -+ } -+ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, -+ ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags); -+} -+ -+void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ void __iomem *pvRegsBase; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); -+ -+#if defined(PDUMP) -+ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -+#endif -+ { -+ OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue); -+ } -+ -+ PDUMPREG64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, -+ ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags); -+} -+ -+IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ void __iomem *pvRegsBase; -+ IMG_UINT32 ui32RegValue; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); -+ -+#if defined(PDUMP) -+ if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) -+ { -+ ui32RegValue = IMG_UINT32_MAX; -+ } -+ else -+#endif -+ { -+ ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr); -+ } -+ -+ PDUMPREGREAD32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, -+ ui32RegAddr, psParams->ui32PdumpFlags); -+ -+ return ui32RegValue; -+} -+ -+IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ void __iomem *pvRegsBase; -+ IMG_UINT64 ui64RegValue; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); -+ -+#if defined(PDUMP) -+ if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) -+ { -+ ui64RegValue = IMG_UINT64_MAX; -+ } -+ else -+#endif -+ { -+ ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr); -+ } -+ -+ PDUMPREGREAD64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, -+ ui32RegAddr, PDUMP_FLAGS_CONTINUOUS); -+ -+ return ui64RegValue; -+} -+ -+IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT64 uiRegValueNew, -+ IMG_UINT64 uiRegKeepMask) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ void __iomem *pvRegsBase; -+#if defined(PDUMP) -+ PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; -+#endif -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); -+ -+ /* only use the new values for bits we update according to the keep mask */ -+ uiRegValueNew &= ~uiRegKeepMask; -+ -+#if defined(PDUMP) -+ -+ PDUMP_BLKSTART(ui32PDumpFlags); -+ -+ /* Store register offset to temp PDump variable */ -+ PDumpRegRead64ToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, -+ ":SYSMEM:$1", ui32RegAddr, ui32PDumpFlags); -+ -+ /* Keep the bits set in the mask */ -+ PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", -+ uiRegKeepMask, ui32PDumpFlags); -+ -+ /* OR the new values */ -+ PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", -+ uiRegValueNew, ui32PDumpFlags); -+ -+ /* Do the actual register write */ -+ PDumpInternalVarToReg64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, -+ ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); -+ -+ PDUMP_BLKEND(ui32PDumpFlags); -+ -+ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -+#endif -+ -+ { -+ IMG_UINT64 uiRegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr); -+ uiRegValue &= uiRegKeepMask; -+ OSWriteHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXPollReg32(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT32 ui32RegValue, -+ IMG_UINT32 ui32RegMask) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ void __iomem *pvRegsBase; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); -+ -+#if defined(PDUMP) -+ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -+#endif -+ { -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), -+ ui32RegValue, -+ ui32RegMask, -+ POLL_FLAG_LOG_ERROR, -+ NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr)); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ } -+ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32RegAddr, -+ ui32RegValue, -+ ui32RegMask, -+ psParams->ui32PdumpFlags, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXPollReg64(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT64 ui64RegValue, -+ IMG_UINT64 ui64RegMask) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ void __iomem *pvRegsBase; -+ -+ /* Split lower and upper words */ -+ IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32); -+ IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue); -+ IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32); -+ IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask); -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ pvRegsBase = RGX_GET_REGS_BASE(psDevInfo, ui32RegAddr); -+ -+#if defined(PDUMP) -+ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) -+#endif -+ { -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr + 4), -+ ui32UpperValue, -+ ui32UpperMask, -+ POLL_FLAG_LOG_ERROR, -+ NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr)); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, -+ (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), -+ ui32LowerValue, -+ ui32LowerMask, -+ POLL_FLAG_LOG_ERROR, -+ NULL) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for lower part of Reg (0x%x) failed", ui32RegAddr)); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ } -+ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32RegAddr + 4, -+ ui32UpperValue, -+ ui32UpperMask, -+ psParams->ui32PdumpFlags, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ -+ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32RegAddr, -+ ui32LowerValue, -+ ui32LowerMask, -+ psParams->ui32PdumpFlags, -+ PDUMP_POLL_OPERATOR_EQUAL); -+ -+ return PVRSRV_OK; -+} -+ -+void RGXSetPoweredState(const void *hPrivate, IMG_BOOL bPowered) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ -+ psDevInfo->bRGXPowered = bPowered; -+} -+ -+void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ OSWaitus(ui32TimeUs); -+ PDUMPIDLWITHFLAGS(psDevInfo->psDeviceNode, ui32Cycles, PDUMP_FLAGS_CONTINUOUS); -+} -+ -+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr) -+{ -+ PVR_ASSERT(hPrivate != NULL); -+ *psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr; -+} -+ -+#if defined(PDUMP) -+void RGXWriteKernelMMUPC64(const void *hPrivate, -+ IMG_UINT32 ui32PCReg, -+ IMG_UINT32 ui32PCRegAlignShift, -+ IMG_UINT32 ui32PCRegShift, -+ IMG_UINT64 ui64PCVal) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ -+ /* Write the cat-base address */ -+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ui32PCReg, ui64PCVal); -+ -+ /* Pdump catbase address */ -+ MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, -+ RGX_PDUMPREG_NAME, -+ ui32PCReg, -+ 8, -+ ui32PCRegAlignShift, -+ ui32PCRegShift, -+ PDUMP_FLAGS_CONTINUOUS); -+} -+ -+void RGXWriteKernelMMUPC32(const void *hPrivate, -+ IMG_UINT32 ui32PCReg, -+ IMG_UINT32 ui32PCRegAlignShift, -+ IMG_UINT32 ui32PCRegShift, -+ IMG_UINT32 ui32PCVal) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ -+ /* Write the cat-base address */ -+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal); -+ -+ /* Pdump catbase address */ -+ MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, -+ RGX_PDUMPREG_NAME, -+ ui32PCReg, -+ 4, -+ ui32PCRegAlignShift, -+ ui32PCRegShift, -+ PDUMP_FLAGS_CONTINUOUS); -+} -+#endif /* defined(PDUMP) */ -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr) -+{ -+ PVR_ASSERT(hPrivate != NULL); -+ *psGPURegsAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sGPURegAddr; -+} -+ -+#if defined(PDUMP) -+void RGXMIPSWrapperConfig(const void *hPrivate, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT64 ui64GPURegsAddr, -+ IMG_UINT32 ui32GPURegsAlign, -+ IMG_UINT32 ui32BootMode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ -+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, -+ ui32RegAddr, -+ (ui64GPURegsAddr >> ui32GPURegsAlign) | ui32BootMode); -+ -+ PDUMP_BLKSTART(ui32PDumpFlags); -+ -+ /* Store register offset to temp PDump variable */ -+ PDumpRegLabelToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, -+ ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); -+ -+ /* Align register transactions identifier */ -+ PDumpWriteVarSHRValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", -+ ui32GPURegsAlign, ui32PDumpFlags); -+ -+ /* Enable micromips instruction encoding */ -+ PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", -+ ui32BootMode, ui32PDumpFlags); -+ -+ /* Do the actual register write */ -+ PDumpInternalVarToReg64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, -+ ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); -+ -+ PDUMP_BLKEND(ui32PDumpFlags); -+} -+#endif -+ -+void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr) -+{ -+ PVR_ASSERT(hPrivate != NULL); -+ *psBootRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sBootRemapAddr; -+} -+ -+void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr) -+{ -+ PVR_ASSERT(hPrivate != NULL); -+ *psCodeRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sCodeRemapAddr; -+} -+ -+void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr) -+{ -+ PVR_ASSERT(hPrivate != NULL); -+ *psDataRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sDataRemapAddr; -+} -+ -+void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr) -+{ -+ PVR_ASSERT(hPrivate != NULL); -+ *psTrampolineRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sTrampolineRemapAddr; -+} -+ -+#if defined(PDUMP) -+static inline -+void RGXWriteRemapConfig2Reg(void __iomem *pvRegs, -+ PMR *psPMR, -+ IMG_DEVMEM_OFFSET_T uiLogicalOffset, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT64 ui64PhyAddr, -+ IMG_UINT64 ui64PhyMask, -+ IMG_UINT64 ui64Settings) -+{ -+ PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; -+ PVRSRV_DEVICE_NODE *psDevNode; -+ -+ PVR_ASSERT(psPMR != NULL); -+ psDevNode = PMR_DeviceNode(psPMR); -+ -+ OSWriteHWReg64(pvRegs, ui32RegAddr, (ui64PhyAddr & ui64PhyMask) | ui64Settings); -+ -+ PDUMP_BLKSTART(ui32PDumpFlags); -+ -+ /* Store memory offset to temp PDump variable */ -+ PDumpMemLabelToInternalVar64(":SYSMEM:$1", psPMR, -+ uiLogicalOffset, ui32PDumpFlags); -+ -+ /* Keep only the relevant bits of the output physical address */ -+ PDumpWriteVarANDValueOp(psDevNode, ":SYSMEM:$1", ui64PhyMask, ui32PDumpFlags); -+ -+ /* Extra settings for this remapped region */ -+ PDumpWriteVarORValueOp(psDevNode, ":SYSMEM:$1", ui64Settings, ui32PDumpFlags); -+ -+ /* Do the actual register write */ -+ PDumpInternalVarToReg64(psDevNode, RGX_PDUMPREG_NAME, ui32RegAddr, -+ ":SYSMEM:$1", ui32PDumpFlags); -+ -+ PDUMP_BLKEND(ui32PDumpFlags); -+} -+ -+void RGXBootRemapConfig(const void *hPrivate, -+ IMG_UINT32 ui32Config1RegAddr, -+ IMG_UINT64 ui64Config1RegValue, -+ IMG_UINT32 ui32Config2RegAddr, -+ IMG_UINT64 ui64Config2PhyAddr, -+ IMG_UINT64 ui64Config2PhyMask, -+ IMG_UINT64 ui64Config2Settings) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ IMG_UINT32 ui32BootRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE); -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ -+ /* Write remap config1 register */ -+ RGXWriteReg64(hPrivate, -+ ui32Config1RegAddr, -+ ui64Config1RegValue); -+ -+ /* Write remap config2 register */ -+ RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, -+ psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR, -+ psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32BootRemapMemOffset, -+ ui32Config2RegAddr, -+ ui64Config2PhyAddr, -+ ui64Config2PhyMask, -+ ui64Config2Settings); -+} -+ -+void RGXCodeRemapConfig(const void *hPrivate, -+ IMG_UINT32 ui32Config1RegAddr, -+ IMG_UINT64 ui64Config1RegValue, -+ IMG_UINT32 ui32Config2RegAddr, -+ IMG_UINT64 ui64Config2PhyAddr, -+ IMG_UINT64 ui64Config2PhyMask, -+ IMG_UINT64 ui64Config2Settings) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ IMG_UINT32 ui32CodeRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE); -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ -+ /* Write remap config1 register */ -+ RGXWriteReg64(hPrivate, -+ ui32Config1RegAddr, -+ ui64Config1RegValue); -+ -+ /* Write remap config2 register */ -+ RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, -+ psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR, -+ psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32CodeRemapMemOffset, -+ ui32Config2RegAddr, -+ ui64Config2PhyAddr, -+ ui64Config2PhyMask, -+ ui64Config2Settings); -+} -+ -+void RGXDataRemapConfig(const void *hPrivate, -+ IMG_UINT32 ui32Config1RegAddr, -+ IMG_UINT64 ui64Config1RegValue, -+ IMG_UINT32 ui32Config2RegAddr, -+ IMG_UINT64 ui64Config2PhyAddr, -+ IMG_UINT64 ui64Config2PhyMask, -+ IMG_UINT64 ui64Config2Settings) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ IMG_UINT32 ui32DataRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ -+ /* Write remap config1 register */ -+ RGXWriteReg64(hPrivate, -+ ui32Config1RegAddr, -+ ui64Config1RegValue); -+ -+ /* Write remap config2 register */ -+ RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, -+ psDevInfo->psRGXFWDataMemDesc->psImport->hPMR, -+ psDevInfo->psRGXFWDataMemDesc->uiOffset + ui32DataRemapMemOffset, -+ ui32Config2RegAddr, -+ ui64Config2PhyAddr, -+ ui64Config2PhyMask, -+ ui64Config2Settings); -+} -+ -+void RGXTrampolineRemapConfig(const void *hPrivate, -+ IMG_UINT32 ui32Config1RegAddr, -+ IMG_UINT64 ui64Config1RegValue, -+ IMG_UINT32 ui32Config2RegAddr, -+ IMG_UINT64 ui64Config2PhyAddr, -+ IMG_UINT64 ui64Config2PhyMask, -+ IMG_UINT64 ui64Config2Settings) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ -+ /* write the register for real, without PDump */ -+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, -+ ui32Config1RegAddr, -+ ui64Config1RegValue); -+ -+ PDUMP_BLKSTART(ui32PDumpFlags); -+ -+ /* Store the memory address in a PDump variable */ -+ PDumpPhysHandleToInternalVar64(psDevInfo->psDeviceNode, ":SYSMEM:$1", -+ psDevInfo->psTrampoline->hPdumpPages, -+ ui32PDumpFlags); -+ -+ /* Keep only the relevant bits of the input physical address */ -+ PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", -+ ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK, -+ ui32PDumpFlags); -+ -+ /* Enable bit */ -+ PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", -+ RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN, -+ ui32PDumpFlags); -+ -+ /* Do the PDump register write */ -+ PDumpInternalVarToReg64(psDevInfo->psDeviceNode, -+ RGX_PDUMPREG_NAME, -+ ui32Config1RegAddr, -+ ":SYSMEM:$1", -+ ui32PDumpFlags); -+ -+ PDUMP_BLKEND(ui32PDumpFlags); -+ -+ /* this can be written directly */ -+ RGXWriteReg64(hPrivate, -+ ui32Config2RegAddr, -+ (ui64Config2PhyAddr & ui64Config2PhyMask) | ui64Config2Settings); -+} -+#endif /* defined(PDUMP) */ -+#endif /* defined(RGX_FEATURE_MIPS_BIT_MASK) */ -+ -+#define MAX_NUM_COHERENCY_TESTS (10) -+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ -+ if (psDevInfo->ui32CoherencyTestsDone >= MAX_NUM_COHERENCY_TESTS) -+ { -+ return IMG_FALSE; -+ } -+ -+ psDeviceNode = psDevInfo->psDeviceNode; -+ -+ return PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig); -+} -+ -+/* -+ * The fabric coherency test is performed when platform supports fabric coherency -+ * either in the form of ACE-lite or Full-ACE. This test is done quite early -+ * with the firmware processor quiescent and makes exclusive use of the slave -+ * port interface for reading/writing through the device memory hierarchy. The -+ * rationale for the test is to ensure that what the CPU writes to its dcache -+ * is visible to the GPU via coherency snoop miss/hit and vice-versa without -+ * any intervening cache maintenance by the writing agent. -+ */ -+PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ IMG_UINT32 *pui32FabricCohTestBufferCpuVA; -+ DEVMEM_MEMDESC *psFabricCohTestBufferMemDesc; -+ RGXFWIF_DEV_VIRTADDR sFabricCohTestBufferDevVA; -+ IMG_DEVMEM_SIZE_T uiFabricCohTestBlockSize = sizeof(IMG_UINT64); -+ IMG_DEVMEM_ALIGN_T uiFabricCohTestBlockAlign = sizeof(IMG_UINT64); -+ IMG_UINT32 ui32SLCCTRL = 0; -+ IMG_UINT32 ui32OddEven; -+ IMG_BOOL bFeatureS7 = RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE); -+ IMG_UINT32 ui32OddEvenSeed = 1; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_BOOL bFullTestPassed = IMG_TRUE; -+ IMG_BOOL bExit = IMG_FALSE; -+#if defined(DEBUG) -+ IMG_BOOL bSubTestPassed = IMG_FALSE; -+#endif -+ enum TEST_TYPE { -+ CPU_WRITE_GPU_READ_SM=0, GPU_WRITE_CPU_READ_SM, -+ CPU_WRITE_GPU_READ_SH, GPU_WRITE_CPU_READ_SH -+ } eTestType; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ -+ PVR_LOG(("Starting fabric coherency test .....")); -+ -+ if (bFeatureS7) -+ { -+ IMG_UINT64 ui64SegOutAddrTopUncached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(MMU_CONTEXT_MAPPING_FWIF); -+ -+ /* Configure META to use SLC force-linefill for the bootloader segment */ -+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), -+ (ui64SegOutAddrTopUncached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); -+ } -+ else -+ { -+ /* Bypass the SLC when IO coherency is enabled */ -+ ui32SLCCTRL = RGXReadReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS); -+ RGXWriteReg32(hPrivate, -+ RGX_CR_SLC_CTRL_BYPASS, -+ ui32SLCCTRL | RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN); -+ } -+ -+ /* Size and align are 'expanded' because we request an export align allocation */ -+ eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), -+ &uiFabricCohTestBlockSize, -+ &uiFabricCohTestBlockAlign); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevmemExportalignAdjustSizeAndAlign() error: %s, exiting", -+ PVRSRVGetErrorString(eError))); -+ goto e0; -+ } -+ -+ /* Allocate, acquire cpu address and set firmware address */ -+ eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode, -+ uiFabricCohTestBlockSize, -+ uiFabricCohTestBlockAlign, -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | -+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | -+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), -+ "FwExFabricCoherencyTestBuffer", -+ &psFabricCohTestBufferMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevmemFwAllocateExportable() error: %s, exiting", -+ PVRSRVGetErrorString(eError))); -+ goto e0; -+ } -+ -+ eError = DevmemAcquireCpuVirtAddr(psFabricCohTestBufferMemDesc, (void **) &pui32FabricCohTestBufferCpuVA); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "DevmemAcquireCpuVirtAddr() error: %s, exiting", -+ PVRSRVGetErrorString(eError))); -+ goto e1; -+ } -+ -+ /* Create a FW address which is uncached in the Meta DCache and in the SLC using the Meta bootloader segment. -+ This segment is the only one configured correctly out of reset (when this test is meant to be executed) */ -+ -+ eError = RGXSetFirmwareAddress(&sFabricCohTestBufferDevVA, -+ psFabricCohTestBufferMemDesc, -+ 0, -+ RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", e2); -+ -+ /* Undo most of the FW mappings done by RGXSetFirmwareAddress */ -+ sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK; -+ sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK; -+ sFabricCohTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS; -+ -+ /* Map the buffer in the bootloader segment as uncached */ -+ sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR; -+ sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED; -+ -+ for (eTestType = CPU_WRITE_GPU_READ_SH; eTestType <= GPU_WRITE_CPU_READ_SH && bExit == IMG_FALSE; eTestType++) -+ { -+ IMG_CPU_PHYADDR sCpuPhyAddr; -+ IMG_BOOL bValid; -+ PMR *psPMR; -+ -+ /* Acquire underlying PMR CpuPA in preparation for cache maintenance */ -+ (void) DevmemLocalGetImportHandle(psFabricCohTestBufferMemDesc, (void**)&psPMR); -+ eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid); -+ if (eError != PVRSRV_OK || bValid == IMG_FALSE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "PMR_CpuPhysAddr error: %s, exiting", -+ PVRSRVGetErrorString(eError))); -+ bExit = IMG_TRUE; -+ continue; -+ } -+ -+ /* Here we do two passes mostly to account for the effects of using a different -+ seed (i.e. ui32OddEvenSeed) value to read and write */ -+ for (ui32OddEven = 1; ui32OddEven < 3 && bExit == IMG_FALSE; ui32OddEven++) -+ { -+ IMG_UINT32 i; -+ -+#if defined(DEBUG) -+ switch (eTestType) -+ { -+ case CPU_WRITE_GPU_READ_SM: -+ PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven)); -+ break; -+ case GPU_WRITE_CPU_READ_SM: -+ PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven)); -+ break; -+ case CPU_WRITE_GPU_READ_SH: -+ PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: starting [run #%u]", ui32OddEven)); -+ break; -+ case GPU_WRITE_CPU_READ_SH: -+ PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: starting [run #%u]", ui32OddEven)); -+ break; -+ default: -+ PVR_LOG(("Internal error, exiting test")); -+ eError = PVRSRV_ERROR_INIT_FAILURE; -+ bExit = IMG_TRUE; -+ continue; -+ } -+#endif -+ -+ /* Do multiple sub-dword cache line tests */ -+ for (i = 0; i < 2 && bExit == IMG_FALSE; i++) -+ { -+ IMG_UINT32 ui32FWAddr; -+ IMG_UINT32 ui32FWValue; -+ IMG_UINT32 ui32FWValue2; -+ IMG_CPU_PHYADDR sCpuPhyAddrStart; -+ IMG_CPU_PHYADDR sCpuPhyAddrEnd; -+ IMG_UINT32 ui32LastFWValue = ~0; -+ IMG_UINT32 ui32Offset = i * sizeof(IMG_UINT32); -+ -+ /* Calculate next address and seed value to write/read from slave-port */ -+ ui32FWAddr = sFabricCohTestBufferDevVA.ui32Addr + ui32Offset; -+ sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + ui32Offset; -+ sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr; -+ ui32OddEvenSeed += 1; -+ -+ if (eTestType == GPU_WRITE_CPU_READ_SM || eTestType == GPU_WRITE_CPU_READ_SH) -+ { -+ /* Clean dcache to ensure there is no stale data in dcache that might over-write -+ what we are about to write via slave-port here because if it drains from the CPU -+ dcache before we read it, it would corrupt what we are going to read back via -+ the CPU */ -+ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); -+ CacheOpExec(psDevInfo->psDeviceNode, -+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, -+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), -+ sCpuPhyAddrStart, -+ sCpuPhyAddrEnd, -+ PVRSRV_CACHE_OP_CLEAN); -+ -+ /* Calculate a new value to write */ -+ ui32FWValue = i + ui32OddEvenSeed; -+ -+ /* Write the value using the RGX slave-port interface */ -+ eError = RGXWriteFWModuleAddr(psDevInfo, ui32FWAddr, ui32FWValue); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXWriteFWModuleAddr error: %s, exiting", -+ PVRSRVGetErrorString(eError))); -+ bExit = IMG_TRUE; -+ continue; -+ } -+ -+ /* Read back value using RGX slave-port interface, this is used -+ as a sort of memory barrier for the above write */ -+ eError = RGXReadFWModuleAddr(psDevInfo, ui32FWAddr, &ui32FWValue2); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXReadFWModuleAddr error: %s, exiting", -+ PVRSRVGetErrorString(eError))); -+ bExit = IMG_TRUE; -+ continue; -+ } -+ else if (ui32FWValue != ui32FWValue2) -+ { -+ /* Fatal error, we should abort */ -+ PVR_DPF((PVR_DBG_ERROR, -+ "At Offset: %d, RAW via SlavePort failed: expected: %x, got: %x", -+ i, -+ ui32FWValue, -+ ui32FWValue2)); -+ eError = PVRSRV_ERROR_INIT_FAILURE; -+ bExit = IMG_TRUE; -+ continue; -+ } -+ -+ if (!PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig)) -+ { -+ /* Invalidate dcache to ensure that any prefetched data by the CPU from this memory -+ region is discarded before we read (i.e. next read must trigger a cache miss). -+ If there is snooping of device cache, then any prefetching done by the CPU -+ will reflect the most up to date datum writing by GPU into said location, -+ that is to say prefetching must be coherent so CPU d-flush is not needed */ -+ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); -+ CacheOpExec(psDevInfo->psDeviceNode, -+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, -+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), -+ sCpuPhyAddrStart, -+ sCpuPhyAddrEnd, -+ PVRSRV_CACHE_OP_INVALIDATE); -+ } -+ } -+ else -+ { -+ IMG_UINT32 ui32RAWCpuValue; -+ -+ /* Ensures line is in dcache */ -+ ui32FWValue = IMG_UINT32_MAX; -+ -+ /* Dirty allocation in dcache */ -+ ui32RAWCpuValue = i + ui32OddEvenSeed; -+ pui32FabricCohTestBufferCpuVA[i] = i + ui32OddEvenSeed; -+ -+ /* Flush possible cpu store-buffer(ing) on LMA */ -+ OSWriteMemoryBarrier(&pui32FabricCohTestBufferCpuVA[i]); -+ -+ if (eTestType == CPU_WRITE_GPU_READ_SM) -+ { -+ /* Flush dcache to force subsequent incoming CPU-bound snoop to miss so -+ memory is coherent before the SlavePort reads */ -+ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); -+ CacheOpExec(psDevInfo->psDeviceNode, -+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, -+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), -+ sCpuPhyAddrStart, -+ sCpuPhyAddrEnd, -+ PVRSRV_CACHE_OP_FLUSH); -+ } -+ -+ /* Read back value using RGX slave-port interface */ -+ eError = RGXReadFWModuleAddr(psDevInfo, ui32FWAddr, &ui32FWValue); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXReadFWModuleAddr error: %s, exiting", -+ PVRSRVGetErrorString(eError))); -+ bExit = IMG_TRUE; -+ continue; -+ } -+ -+ /* Being mostly paranoid here, verify that CPU RAW operation is valid -+ after the above slave port read */ -+ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); -+ CacheOpExec(psDevInfo->psDeviceNode, -+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, -+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), -+ sCpuPhyAddrStart, -+ sCpuPhyAddrEnd, -+ PVRSRV_CACHE_OP_FLUSH); -+ if (pui32FabricCohTestBufferCpuVA[i] != ui32RAWCpuValue) -+ { -+ /* Fatal error, we should abort */ -+ PVR_DPF((PVR_DBG_ERROR, -+ "At Offset: %d, RAW by CPU failed: expected: %x, got: %x", -+ i, -+ ui32RAWCpuValue, -+ pui32FabricCohTestBufferCpuVA[i])); -+ eError = PVRSRV_ERROR_INIT_FAILURE; -+ bExit = IMG_TRUE; -+ continue; -+ } -+ } -+ -+ /* Compare to see if sub-test passed */ -+ if (pui32FabricCohTestBufferCpuVA[i] == ui32FWValue) -+ { -+#if defined(DEBUG) -+ bSubTestPassed = IMG_TRUE; -+#endif -+ } -+ else -+ { -+ bFullTestPassed = IMG_FALSE; -+ eError = PVRSRV_ERROR_INIT_FAILURE; -+#if defined(DEBUG) -+ bSubTestPassed = IMG_FALSE; -+#endif -+ if (ui32LastFWValue != ui32FWValue) -+ { -+#if defined(DEBUG) -+ PVR_LOG(("At Offset: %d, Expected: %x, Got: %x", -+ i, -+ (eTestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i], -+ (eTestType & 0x1) ? pui32FabricCohTestBufferCpuVA[i] : ui32FWValue)); -+#endif -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "test encountered unexpected error, exiting")); -+ eError = PVRSRV_ERROR_INIT_FAILURE; -+ bExit = IMG_TRUE; -+ continue; -+ } -+ } -+ -+ ui32LastFWValue = (eTestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i]; -+ } -+ -+#if defined(DEBUG) -+ if (bExit) -+ { -+ continue; -+ } -+ -+ switch (eTestType) -+ { -+ case CPU_WRITE_GPU_READ_SM: -+ PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: completed [run #%u]: %s", -+ ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); -+ break; -+ case GPU_WRITE_CPU_READ_SM: -+ PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: completed [run #%u]: %s", -+ ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); -+ break; -+ case CPU_WRITE_GPU_READ_SH: -+ PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: completed [run #%u]: %s", -+ ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); -+ break; -+ case GPU_WRITE_CPU_READ_SH: -+ PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: completed [run #%u]: %s", -+ ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); -+ break; -+ default: -+ PVR_LOG(("Internal error, exiting test")); -+ eError = PVRSRV_ERROR_INIT_FAILURE; -+ bExit = IMG_TRUE; -+ continue; -+ } -+#endif -+ } -+ } -+ -+ RGXUnsetFirmwareAddress(psFabricCohTestBufferMemDesc); -+e2: -+ DevmemReleaseCpuVirtAddr(psFabricCohTestBufferMemDesc); -+e1: -+ DevmemFwUnmapAndFree(psDevInfo, psFabricCohTestBufferMemDesc); -+ -+e0: -+ if (bFeatureS7) -+ { -+ /* Restore bootloader segment settings */ -+ IMG_UINT64 ui64SegOutAddrTopCached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWIF); -+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), -+ (ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); -+ } -+ else -+ { -+ /* Restore SLC bypass settings */ -+ RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS, ui32SLCCTRL); -+ } -+ -+ bFullTestPassed = bExit ? IMG_FALSE: bFullTestPassed; -+ if (bFullTestPassed) -+ { -+ PVR_LOG(("fabric coherency test: PASSED")); -+ psDevInfo->ui32CoherencyTestsDone = MAX_NUM_COHERENCY_TESTS + 1; -+ } -+ else -+ { -+ PVR_LOG(("fabric coherency test: FAILED")); -+ psDevInfo->ui32CoherencyTestsDone++; -+ } -+ -+ return eError; -+} -+ -+IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ -+ return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0; -+} -+ -+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ -+ if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS)) -+ { -+ return 0; -+ } -+ return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS); -+} -+ -+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ -+ if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)) -+ { -+ return 0; -+ } -+ return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS); -+} -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ -+ if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH)) -+ { -+ return 0; -+ } -+ return RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH); -+} -+ -+IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ -+ return psDevInfo->sLayerParams.bDevicePA0IsValid; -+} -+#endif -+ -+void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ -+ *psBootCodeAddr = psDevInfo->sFWCodeDevVAddrBase; -+} -+ -+void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; -+ -+ *psBootDataAddr = psDevInfo->sFWDataDevVAddrBase; -+} -+ -+void *RGXCalculateHostFWDataAddress(const void *hPrivate, void *pvHostFWDataAddr) -+{ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ IMG_UINT8 *ui8HostFWDataAddr = (IMG_UINT8*)pvHostFWDataAddr; -+ IMG_UINT32 ui32Offset = 0U; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) -+ { -+ ui32Offset = -+ PVR_ALIGN(RGXGetFWImageSectionAllocSize(hPrivate, RISCV_UNCACHED_CODE), -+ RGXRISCVFW_REMAP_CONFIG_DEVVADDR_ALIGN) + -+ PVR_ALIGN(RGXGetFWImageSectionAllocSize(hPrivate, RISCV_CACHED_CODE), -+ RGXRISCVFW_REMAP_CONFIG_DEVVADDR_ALIGN); -+ } -+ -+ ui8HostFWDataAddr -= ui32Offset; -+ return (void*)ui8HostFWDataAddr; -+#else -+ PVR_UNREFERENCED_PARAMETER(hPrivate); -+ -+ return pvHostFWDataAddr; -+#endif -+} -+ -+IMG_BOOL RGXDeviceAckIrq(const void *hPrivate) -+{ -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ -+ return (psDevInfo->pfnRGXAckIrq != NULL) ? -+ psDevInfo->pfnRGXAckIrq(psDevInfo) : IMG_TRUE; -+} -diff --git a/drivers/gpu/drm/img-rogue/rgxlayer_impl.h b/drivers/gpu/drm/img-rogue/rgxlayer_impl.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxlayer_impl.h -@@ -0,0 +1,67 @@ -+/*************************************************************************/ /*! -+@File -+@Title Header for DDK implementation of the Services abstraction layer -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for DDK implementation of the Services abstraction layer -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXLAYER_IMPL_H) -+#define RGXLAYER_IMPL_H -+ -+#include "rgxlayer.h" -+#include "device_connection.h" -+ -+typedef struct _RGX_LAYER_PARAMS_ -+{ -+ void *psDevInfo; -+ void *psDevConfig; -+#if defined(PDUMP) -+ IMG_UINT32 ui32PdumpFlags; -+#endif -+ -+ IMG_DEV_PHYADDR sPCAddr; -+ IMG_DEV_PHYADDR sGPURegAddr; -+ IMG_DEV_PHYADDR sBootRemapAddr; -+ IMG_DEV_PHYADDR sCodeRemapAddr; -+ IMG_DEV_PHYADDR sDataRemapAddr; -+ IMG_DEV_PHYADDR sTrampolineRemapAddr; -+ IMG_BOOL bDevicePA0IsValid; -+} RGX_LAYER_PARAMS; -+ -+#endif /* RGXLAYER_IMPL_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxmem.c b/drivers/gpu/drm/img-rogue/rgxmem.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxmem.c -@@ -0,0 +1,972 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX memory context management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX memory context management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "pvr_debug.h" -+#include "rgxmem.h" -+#include "allocmem.h" -+#include "devicemem.h" -+#include "devicemem_server_utils.h" -+#include "devicemem_pdump.h" -+#include "rgxdevice.h" -+#include "rgx_fwif_km.h" -+#include "rgxfwutils.h" -+#include "pdump_km.h" -+#include "pdump_physmem.h" -+#include "pvr_notifier.h" -+#include "pvrsrv.h" -+#include "sync_internal.h" -+#include "rgx_memallocflags.h" -+#include "rgx_bvnc_defs_km.h" -+#include "info_page.h" -+ -+# include "rgxmmudefs_km.h" -+ -+#if defined(PDUMP) -+#include "sync.h" -+#endif -+ -+struct SERVER_MMU_CONTEXT_TAG -+{ -+ DEVMEM_MEMDESC *psFWMemContextMemDesc; -+ PRGXFWIF_FWMEMCONTEXT sFWMemContextDevVirtAddr; -+ MMU_CONTEXT *psMMUContext; -+ IMG_PID uiPID; -+ IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; -+ IMG_UINT64 ui64FBSCEntryMask; -+ DLLIST_NODE sNode; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+}; /* SERVER_MMU_CONTEXT is typedef-ed in rgxmem.h */ -+ -+PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode, -+ MMU_CONTEXT *psMMUContext, -+ IMG_UINT64 ui64FBSCEntryMask) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ SERVER_MMU_CONTEXT *psServerMMUContext = NULL; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); -+ -+ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) -+ { -+ SERVER_MMU_CONTEXT *psIter = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); -+ if (psIter->psMMUContext == psMMUContext) -+ { -+ psServerMMUContext = psIter; -+ } -+ } -+ -+ OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); -+ -+ if (! psServerMMUContext) -+ { -+ return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; -+ } -+ -+ /* Accumulate the FBSC invalidate request */ -+ psServerMMUContext->ui64FBSCEntryMask |= ui64FBSCEntryMask; -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * RGXExtractFBSCEntryMaskFromMMUContext -+ * -+ */ -+PVRSRV_ERROR RGXExtractFBSCEntryMaskFromMMUContext(PVRSRV_DEVICE_NODE *psDeviceNode, -+ SERVER_MMU_CONTEXT *psServerMMUContext, -+ IMG_UINT64 *pui64FBSCEntryMask) -+{ -+ if (!psServerMMUContext) -+ { -+ return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; -+ } -+ -+ *pui64FBSCEntryMask = psServerMMUContext->ui64FBSCEntryMask; -+ psServerMMUContext->ui64FBSCEntryMask = 0; -+ -+ return PVRSRV_OK; -+} -+ -+void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode, -+ MMU_CONTEXT *psMMUContext, -+ MMU_LEVEL eMMULevel, -+ IMG_BOOL bUnmap) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ IMG_UINT32 ui32NewCacheFlags; -+ -+ PVR_UNREFERENCED_PARAMETER(bUnmap); -+ -+ switch (eMMULevel) -+ { -+ case MMU_LEVEL_3: -+ ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PC; -+ -+ break; -+ case MMU_LEVEL_2: -+ ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PD; -+ -+ break; -+ case MMU_LEVEL_1: -+ ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PT; -+ -+#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) -+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))) -+#endif -+ { -+ ui32NewCacheFlags |= RGXFWIF_MMUCACHEDATA_FLAGS_TLB; -+ } -+ -+ break; -+ default: -+ ui32NewCacheFlags = 0; -+ PVR_ASSERT(0); -+ -+ break; -+ } -+ -+#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) -+ /* For VIVT devices only accumulate the flags on the Firmware MMU context -+ * since the Firmware/HW invalidates caches on every kick automatically. */ -+ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT) || -+ psMMUContext == psDevInfo->psKernelMMUCtx) -+#endif -+ { -+ MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32NewCacheFlags); -+ } -+} -+ -+static -+PVRSRV_ERROR _PrepareAndSubmitCacheCommand(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGXFWIF_DM eDM, -+ IMG_UINT32 ui32CacheFlags, -+ IMG_BOOL bInterrupt, -+ IMG_UINT32 *pui32MMUInvalidateUpdate) -+{ -+ PVRSRV_ERROR eError; -+ RGXFWIF_KCCB_CMD sFlushCmd; -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ -+ *pui32MMUInvalidateUpdate = psDeviceNode->ui32NextMMUInvalidateUpdate++; -+ -+ /* Setup cmd and add the device nodes sync object */ -+ sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_MMUCACHE; -+ sFlushCmd.uCmdData.sMMUCacheData.ui32MMUCacheSyncUpdateValue = *pui32MMUInvalidateUpdate; -+ SyncPrimGetFirmwareAddr(psDeviceNode->psMMUCacheSyncPrim, -+ &sFlushCmd.uCmdData.sMMUCacheData.sMMUCacheSync.ui32Addr); -+ -+ /* -+ * Indicate if the firmware should signal command completion to the host. -+ * The sync update will always happen. This flag requests that the FW pass -+ * back the result of the KCCB command and interrupt the host. The KCCB -+ * response is not used but the interrupt signal to the host is as a way -+ * to know the sync update may have happened. -+ */ -+ if (bInterrupt) -+ { -+ ui32CacheFlags |= RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT; -+ } -+ -+ sFlushCmd.uCmdData.sMMUCacheData.ui32CacheFlags = ui32CacheFlags; -+ -+#if defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "Submit MMU flush and invalidate (flags = 0x%08x)", -+ ui32CacheFlags); -+#endif -+ -+ /* Schedule MMU cache command */ -+ eError = RGXSendCommand(psDevInfo, -+ &sFlushCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to schedule MMU cache command to " -+ "DM=%d with error (%u)", -+ __func__, eDM, eError)); -+ psDeviceNode->ui32NextMMUInvalidateUpdate--; -+ -+ MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32CacheFlags); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 *pui32MMUInvalidateUpdate) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32FWCacheFlags; -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ eError = PVRSRVPowerLock(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ goto RGXMMUCacheInvalidateKick_exit; -+ } -+ -+ /* -+ * Atomically clear flags to ensure we never accidentally read state -+ * inconsistently or overwrite valid cache flags with 0. -+ */ -+ ui32FWCacheFlags = MMU_GetAndResetCacheFlags(psDevInfo->psKernelMMUCtx); -+ if (ui32FWCacheFlags == 0) -+ { -+ /* Nothing to do if no cache ops pending */ -+ eError = PVRSRV_OK; -+ goto _PowerUnlockAndReturnErr; -+ } -+ -+ /* Ensure device is powered up before sending cache command */ -+ PDUMPPOWCMDSTART(psDeviceNode); -+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, -+ PVRSRV_DEV_POWER_STATE_ON, -+ PVRSRV_POWER_FLAGS_NONE); -+ PDUMPPOWCMDEND(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32FWCacheFlags); -+ goto _PowerUnlockAndReturnErr; -+ } -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = _PrepareAndSubmitCacheCommand(psDeviceNode, RGXFWIF_DM_GP, ui32FWCacheFlags, -+ IMG_TRUE, pui32MMUInvalidateUpdate); -+ if (!PVRSRVIsRetryError(eError)) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } -+ END_LOOP_UNTIL_TIMEOUT(); -+ -+ PVR_LOG_IF_ERROR(eError, "_PrepareAndSubmitCacheCommand"); -+ -+_PowerUnlockAndReturnErr: -+ PVRSRVPowerUnlock(psDeviceNode); -+ -+RGXMMUCacheInvalidateKick_exit: -+ return eError; -+} -+ -+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_DM eDM, -+ IMG_UINT32 *pui32MMUInvalidateUpdate) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; -+ IMG_UINT32 ui32FWCacheFlags; -+ -+ /* Caller should ensure that power lock is held before calling this function */ -+ PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock)); -+ -+ /* -+ * Atomically clear flags to ensure we never accidentally read state -+ * inconsistently or overwrite valid cache flags with 0. -+ */ -+ ui32FWCacheFlags = MMU_GetAndResetCacheFlags(psDevInfo->psKernelMMUCtx); -+ if (ui32FWCacheFlags == 0) -+ { -+ /* Nothing to do if no cache ops pending */ -+ return PVRSRV_OK; -+ } -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = _PrepareAndSubmitCacheCommand(psDeviceNode, eDM, ui32FWCacheFlags, -+ IMG_FALSE, pui32MMUInvalidateUpdate); -+ if (!PVRSRVIsRetryError(eError)) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } -+ END_LOOP_UNTIL_TIMEOUT(); -+ -+ PVR_LOG_RETURN_IF_ERROR(eError, "_PrepareAndSubmitCacheCommand"); -+ -+ return PVRSRV_OK; -+} -+ -+#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR) -+/* -+ * RGXMapBRN71422TargetPhysicalAddress -+ * -+ * Only used on early Volcanic cores. -+ * Set-up a special MMU tree mapping with a single page that eventually points -+ * to RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR. This is supplied by the -+ * customer (in rgxdefs_km.h) and hence this WA is off by default. -+ * -+ * PC entries are 32b, with the last 4 bits being 0 except for the LSB bit that should be 1 (Valid). Addr is 4KB aligned. -+ * PD entries are 64b, with addr in bits 39:5 and everything else 0 except for LSB bit that is Valid. Addr is byte aligned? -+ * PT entries are 64b, with phy addr in bits 39:12 and everything else 0 except for LSB bit that is Valid. Addr is 4KB aligned. -+ * So, we can construct the page tables in a single page like this: -+ * 0x00 : PCE (PCE index 0) -+ * 0x04 : 0x0 -+ * 0x08 : PDEa (PDE index 1) -+ * 0x0C : PDEb -+ * 0x10 : PTEa (PTE index 2) -+ * 0x14 : PTEb -+ * -+ * With the PCE and the PDE pointing to this same page. -+ * The VA address that we are mapping is therefore: -+ * VA = PCE_idx*PCE_size + PDE_idx*PDE_size + PTE_idx*PTE_size = -+ * = 0 * 1GB + 1 * 2MB + 2 * 4KB = -+ * = 0 + 0x20_0000 + 0x2000 = -+ * = 0x00_0020_2000 -+ */ -+void RGXMapBRN71422TargetPhysicalAddress(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_DEV_PHYADDR sPhysAddrL1Px, -+ void *pxL1PxCpuVAddr) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; -+ IMG_UINT32 *pui32Px = pxL1PxCpuVAddr; -+ IMG_UINT64 *pui64Px = pxL1PxCpuVAddr; -+ IMG_UINT64 ui64Entry; -+ -+ -+ /* Only setup the BRN71422 workaround if this is the FW memory -+ * context and BRN present. -+ */ -+ if ((psConnection != NULL) || !RGX_IS_BRN_SUPPORTED(psDevInfo, 71422)) -+ { -+ return; -+ } -+ -+ /* Setup dummy mapping to fast constant time physical address. */ -+ /* PCE points to PC */ -+ ui64Entry = sPhysAddrL1Px.uiAddr; -+ ui64Entry = ui64Entry >> RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; -+ ui64Entry = ui64Entry << RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT; -+ ui64Entry = ui64Entry & ~RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK; -+ ui64Entry = ui64Entry | RGX_MMUCTRL_PC_DATA_VALID_EN; -+ pui32Px[0] = (IMG_UINT32) ui64Entry; -+ -+ /* PDE points to PC */ -+ ui64Entry = sPhysAddrL1Px.uiAddr; -+ ui64Entry = ui64Entry & ~RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK; -+ ui64Entry = ui64Entry | RGX_MMUCTRL_PD_DATA_VALID_EN; -+ pui64Px[1] = ui64Entry; -+ -+ /* PTE points to PAddr */ -+ ui64Entry = RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR; -+ ui64Entry = ui64Entry & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK; -+ ui64Entry = ui64Entry | RGX_MMUCTRL_PT_DATA_VALID_EN; -+ pui64Px[2] = ui64Entry; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapping the BRN71422 workaround to target physical address 0x%" IMG_UINT64_FMTSPECx ".", -+ __func__, RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR)); -+ -+} -+#endif -+ -+/* Common header, only needed for architectures with MIPS FW CPU */ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+void RGXMMUTweakProtFlags(struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ MMU_DEVICEATTRIBS *psDevAttrs, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, -+ MMU_PROTFLAGS_T *puiMMUProtFlags) -+{ -+ /* If we are allocating on the MMU of the firmware processor, the -+ * cached/uncached attributes must depend on the FIRMWARE_CACHED -+ * allocation flag. -+ */ -+ if (psDevAttrs == psDevNode->psFirmwareMMUDevAttrs) -+ { -+ if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) -+ { -+ *puiMMUProtFlags |= MMU_PROTFLAGS_CACHED; -+ } -+ else -+ { -+ *puiMMUProtFlags &= ~MMU_PROTFLAGS_CACHED; -+ -+ } -+ *puiMMUProtFlags &= ~MMU_PROTFLAGS_CACHE_COHERENT; -+ } -+} -+#endif -+ -+ -+/* page fault debug is the only current use case for needing to find process info -+ * after that process device memory context has been destroyed -+ */ -+ -+typedef struct _UNREGISTERED_MEMORY_CONTEXT_ -+{ -+ IMG_PID uiPID; -+ IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; -+ IMG_DEV_PHYADDR sPCDevPAddr; -+} UNREGISTERED_MEMORY_CONTEXT; -+ -+/* must be a power of two */ -+#define UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE (1 << 3) -+ -+static UNREGISTERED_MEMORY_CONTEXT gasUnregisteredMemCtxs[UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE]; -+static IMG_UINT32 gui32UnregisteredMemCtxsHead; -+ -+/* record a device memory context being unregistered. -+ * the list of unregistered contexts can be used to find the PID and process name -+ * belonging to a memory context which has been destroyed -+ */ -+static void _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERVER_MMU_CONTEXT *psServerMMUContext) -+{ -+ UNREGISTERED_MEMORY_CONTEXT *psRecord; -+ -+ OSLockAcquire(psDevInfo->hMMUCtxUnregLock); -+ -+ psRecord = &gasUnregisteredMemCtxs[gui32UnregisteredMemCtxsHead]; -+ -+ gui32UnregisteredMemCtxsHead = (gui32UnregisteredMemCtxsHead + 1) -+ & (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1); -+ -+ OSLockRelease(psDevInfo->hMMUCtxUnregLock); -+ -+ psRecord->uiPID = psServerMMUContext->uiPID; -+ if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &psRecord->sPCDevPAddr) != PVRSRV_OK) -+ { -+ PVR_LOG(("_RecordUnregisteredMemoryContext: Failed to get PC address for memory context")); -+ } -+ OSStringLCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName)); -+} -+ -+ -+void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData) -+{ -+ SERVER_MMU_CONTEXT *psServerMMUContext = hPrivData; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psServerMMUContext->psDevInfo; -+ -+#if defined(PDUMP) -+ { -+ RGXFWIF_DEV_VIRTADDR sFWAddr; -+ -+ RGXSetFirmwareAddress(&sFWAddr, -+ psServerMMUContext->psFWMemContextMemDesc, -+ 0, -+ RFW_FWADDR_NOREF_FLAG); -+ -+ /* -+ * MMU cache commands (always dumped) might have a pointer to this FW -+ * memory context, wait until the FW has caught-up to the latest command. -+ */ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, -+ "Ensure FW has executed all MMU invalidations on FW memory " -+ "context 0x%x before freeing it", sFWAddr.ui32Addr); -+ SyncPrimPDumpPol(psDevInfo->psDeviceNode->psMMUCacheSyncPrim, -+ psDevInfo->psDeviceNode->ui32NextMMUInvalidateUpdate - 1, -+ 0xFFFFFFFF, -+ PDUMP_POLL_OPERATOR_GREATEREQUAL, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+#endif -+ -+ OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock); -+ dllist_remove_node(&psServerMMUContext->sNode); -+ OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock); -+ -+ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) -+ { -+ _RecordUnregisteredMemoryContext(psDevInfo, psServerMMUContext); -+ } -+ -+ /* -+ * Release the page catalogue address acquired in RGXRegisterMemoryContext(). -+ */ -+ MMU_ReleaseBaseAddr(NULL); -+ -+ /* -+ * Free the firmware memory context. -+ */ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Free FW memory context"); -+ DevmemFwUnmapAndFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc); -+ -+ OSFreeMem(psServerMMUContext); -+} -+ -+/* -+ * RGXRegisterMemoryContext -+ */ -+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, -+ MMU_CONTEXT *psMMUContext, -+ IMG_HANDLE *hPrivData) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_FWMEMCONTEXT *psFWMemContext; -+ DEVMEM_MEMDESC *psFWMemContextMemDesc; -+ SERVER_MMU_CONTEXT *psServerMMUContext; -+ -+ if (psDevInfo->psKernelMMUCtx == NULL) -+ { -+ /* -+ * This must be the creation of the Kernel memory context. Take a copy -+ * of the MMU context for use when programming the BIF. -+ */ -+ psDevInfo->psKernelMMUCtx = psMMUContext; -+ } -+ else -+ { -+ psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext)); -+ if (psServerMMUContext == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_alloc_server_ctx; -+ } -+ -+ psServerMMUContext->psDevInfo = psDevInfo; -+ psServerMMUContext->ui64FBSCEntryMask = 0; -+ psServerMMUContext->sFWMemContextDevVirtAddr.ui32Addr = 0; -+ -+ /* -+ Allocate device memory for the firmware memory context for the new -+ application. -+ */ -+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate RGX firmware memory context"); -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(*psFWMemContext), -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "FwMemoryContext", -+ &psFWMemContextMemDesc); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate firmware memory context (%u)", -+ __func__, -+ eError)); -+ goto fail_alloc_fw_ctx; -+ } -+ -+ /* -+ Temporarily map the firmware memory context to the kernel. -+ */ -+ eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc, -+ (void **)&psFWMemContext); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map firmware memory context (%u)", -+ __func__, -+ eError)); -+ goto fail_acquire_cpu_addr; -+ } -+ -+ /* -+ * Write the new memory context's page catalogue into the firmware memory -+ * context for the client. -+ */ -+ eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to acquire Page Catalogue address (%u)", -+ __func__, -+ eError)); -+ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); -+ goto fail_acquire_base_addr; -+ } -+ -+ /* -+ * Set default values for the rest of the structure. -+ */ -+ psFWMemContext->uiPageCatBaseRegSet = RGXFW_BIF_INVALID_PCSET; -+ psFWMemContext->uiBreakpointAddr = 0; -+ psFWMemContext->uiBPHandlerAddr = 0; -+ psFWMemContext->uiBreakpointCtl = 0; -+ -+#if defined(SUPPORT_CUSTOM_OSID_EMISSION) -+{ -+ IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; -+ IMG_BOOL bOSidAxiProt; -+ -+ MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg, &bOSidAxiProt); -+ -+ psFWMemContext->ui32OSid = ui32OSidReg; -+ psFWMemContext->bOSidAxiProt = bOSidAxiProt; -+} -+#endif -+ -+#if defined(PDUMP) -+ { -+ IMG_CHAR aszName[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH]; -+ IMG_DEVMEM_OFFSET_T uiOffset = 0; -+ -+ /* -+ * Dump the Mem context allocation -+ */ -+ DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS); -+ -+ -+ /* -+ * Obtain a symbolic addr of the mem context structure -+ */ -+ eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc, -+ &uiOffset, -+ aszName, -+ PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to generate a Dump Page Catalogue address (%u)", -+ __func__, -+ eError)); -+ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); -+ goto fail_pdump_cat_base_addr; -+ } -+ -+ /* -+ * Dump the Page Cat tag in the mem context (symbolic address) -+ */ -+ eError = MMU_PDumpWritePageCatBase(psMMUContext, -+ aszName, -+ uiOffset, -+ 8, /* 64-bit register write */ -+ 0, -+ 0, -+ 0); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to acquire Page Catalogue address (%u)", -+ __func__, -+ eError)); -+ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); -+ goto fail_pdump_cat_base; -+ } -+ } -+#endif -+ -+ /* -+ * Release kernel address acquired above. -+ */ -+ RGXFwSharedMemCacheOpPtr(psFWMemContext, FLUSH); -+ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); -+ -+ /* -+ * Store the process information for this device memory context -+ * for use with the host page-fault analysis. -+ */ -+ psServerMMUContext->uiPID = OSGetCurrentClientProcessIDKM(); -+ psServerMMUContext->psMMUContext = psMMUContext; -+ psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc; -+ OSStringLCopy(psServerMMUContext->szProcessName, -+ OSGetCurrentClientProcessNameKM(), -+ sizeof(psServerMMUContext->szProcessName)); -+ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "New memory context: Process Name: %s PID: %u (0x%08X)", -+ psServerMMUContext->szProcessName, -+ psServerMMUContext->uiPID, -+ psServerMMUContext->uiPID); -+ -+ OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock); -+ dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode); -+ OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock); -+ -+ *hPrivData = psServerMMUContext; -+ } -+ -+ return PVRSRV_OK; -+ -+#if defined(PDUMP) -+fail_pdump_cat_base: -+fail_pdump_cat_base_addr: -+ MMU_ReleaseBaseAddr(NULL); -+#endif -+fail_acquire_base_addr: -+ /* Done before jumping to the fail point as the release is done before exit */ -+fail_acquire_cpu_addr: -+ DevmemFwUnmapAndFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc); -+fail_alloc_fw_ctx: -+ OSFreeMem(psServerMMUContext); -+fail_alloc_server_ctx: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv) -+{ -+ SERVER_MMU_CONTEXT *psMMUContext = (SERVER_MMU_CONTEXT *) hPriv; -+ -+ return psMMUContext->psFWMemContextMemDesc; -+} -+ -+void RGXSetFWMemContextDevVirtAddr(SERVER_MMU_CONTEXT *psServerMMUContext, -+ RGXFWIF_DEV_VIRTADDR sFWMemContextAddr) -+{ -+ psServerMMUContext->sFWMemContextDevVirtAddr.ui32Addr = sFWMemContextAddr.ui32Addr; -+} -+ -+void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_DEV_VIRTADDR *psDevVAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ MMU_FAULT_DATA *psOutFaultData) -+{ -+ IMG_DEV_PHYADDR sPCDevPAddr; -+ DLLIST_NODE *psNode, *psNext; -+ -+ OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); -+ -+ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) -+ { -+ SERVER_MMU_CONTEXT *psServerMMUContext = -+ IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); -+ -+ if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK) -+ { -+ PVR_LOG(("Failed to get PC address for memory context")); -+ continue; -+ } -+ -+ if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) -+ { -+ MMU_CheckFaultAddress(psServerMMUContext->psMMUContext, psDevVAddr, psOutFaultData); -+ goto out_unlock; -+ } -+ } -+ -+ /* Lastly check for fault in the kernel allocated memory */ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK) -+ { -+ PVR_LOG(("Failed to get PC address for kernel memory context")); -+ } -+ -+ if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) -+ { -+ MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr, psOutFaultData); -+ } -+ } -+ -+out_unlock: -+ OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); -+} -+ -+/* given the physical address of a page catalogue, searches for a corresponding -+ * MMU context and if found, provides the caller details of the process. -+ * Returns IMG_TRUE if a process is found. -+ */ -+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress, -+ RGXMEM_PROCESS_INFO *psInfo) -+{ -+ IMG_BOOL bRet = IMG_FALSE; -+ DLLIST_NODE *psNode, *psNext; -+ SERVER_MMU_CONTEXT *psServerMMUContext = NULL; -+ -+ /* check if the input PC addr corresponds to an active memory context */ -+ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) -+ { -+ SERVER_MMU_CONTEXT *psThisMMUContext = -+ IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); -+ IMG_DEV_PHYADDR sPCDevPAddr; -+ -+ if (MMU_AcquireBaseAddr(psThisMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK) -+ { -+ PVR_LOG(("Failed to get PC address for memory context")); -+ continue; -+ } -+ -+ if (sPCAddress.uiAddr == sPCDevPAddr.uiAddr) -+ { -+ psServerMMUContext = psThisMMUContext; -+ break; -+ } -+ } -+ -+ if (psServerMMUContext != NULL) -+ { -+ psInfo->uiPID = psServerMMUContext->uiPID; -+ OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); -+ psInfo->bUnregistered = IMG_FALSE; -+ bRet = IMG_TRUE; -+ } -+ /* else check if the input PC addr corresponds to the firmware */ -+ else -+ { -+ IMG_DEV_PHYADDR sKernelPCDevPAddr; -+ PVRSRV_ERROR eError; -+ -+ eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sKernelPCDevPAddr); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("Failed to get PC address for kernel memory context")); -+ } -+ else -+ { -+ if (sPCAddress.uiAddr == sKernelPCDevPAddr.uiAddr) -+ { -+ psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE; -+ OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); -+ psInfo->bUnregistered = IMG_FALSE; -+ bRet = IMG_TRUE; -+ } -+ } -+ } -+ -+ if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && -+ (bRet == IMG_FALSE)) -+ { -+ /* no active memory context found with the given PC address. -+ * Check the list of most recently freed memory contexts. -+ */ -+ const IMG_UINT32 ui32Mask = UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1; -+ IMG_UINT32 i, j; -+ -+ OSLockAcquire(psDevInfo->hMMUCtxUnregLock); -+ -+ /* iterate through the list of unregistered memory contexts -+ * from newest (one before the head) to the oldest (the current head) -+ */ -+ for (i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j = 0; -+ j < UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE; -+ i = (i - 1) & ui32Mask, j++) -+ { -+ UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i]; -+ -+ if (psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr) -+ { -+ psInfo->uiPID = psRecord->uiPID; -+ OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); -+ psInfo->bUnregistered = IMG_TRUE; -+ bRet = IMG_TRUE; -+ break; -+ } -+ } -+ -+ OSLockRelease(psDevInfo->hMMUCtxUnregLock); -+ } -+ -+ return bRet; -+} -+ -+IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, -+ RGXMEM_PROCESS_INFO *psInfo) -+{ -+ IMG_BOOL bRet = IMG_FALSE; -+ DLLIST_NODE *psNode, *psNext; -+ SERVER_MMU_CONTEXT *psServerMMUContext = NULL; -+ -+ /* check if the input PID corresponds to an active memory context */ -+ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) -+ { -+ SERVER_MMU_CONTEXT *psThisMMUContext = -+ IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); -+ -+ if (psThisMMUContext->uiPID == uiPID) -+ { -+ psServerMMUContext = psThisMMUContext; -+ break; -+ } -+ } -+ -+ if (psServerMMUContext != NULL) -+ { -+ psInfo->uiPID = psServerMMUContext->uiPID; -+ OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); -+ psInfo->bUnregistered = IMG_FALSE; -+ bRet = IMG_TRUE; -+ } -+ /* else check if the input PID corresponds to the firmware */ -+ else if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) -+ { -+ psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE; -+ OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); -+ psInfo->bUnregistered = IMG_FALSE; -+ bRet = IMG_TRUE; -+ } -+ -+ if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && -+ (bRet == IMG_FALSE)) -+ { -+ /* if the PID didn't correspond to an active context or the -+ * FW address then see if it matches a recently unregistered context -+ */ -+ const IMG_UINT32 ui32Mask = UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1; -+ IMG_UINT32 i, j; -+ -+ OSLockAcquire(psDevInfo->hMMUCtxUnregLock); -+ -+ for (i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j = 0; -+ j < UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE; -+ i = (i - 1) & ui32Mask, j++) -+ { -+ UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i]; -+ -+ if (psRecord->uiPID == uiPID) -+ { -+ psInfo->uiPID = psRecord->uiPID; -+ OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); -+ psInfo->bUnregistered = IMG_TRUE; -+ bRet = IMG_TRUE; -+ break; -+ } -+ } -+ -+ OSLockRelease(psDevInfo->hMMUCtxUnregLock); -+ } -+ -+ return bRet; -+} -+ -+IMG_PID RGXGetPIDFromServerMMUContext(SERVER_MMU_CONTEXT *psServerMMUContext) -+{ -+ if (psServerMMUContext) -+ { -+ return psServerMMUContext->uiPID; -+ } -+ return 0; -+} -+ -+/****************************************************************************** -+ End of file (rgxmem.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxmem.h b/drivers/gpu/drm/img-rogue/rgxmem.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxmem.h -@@ -0,0 +1,157 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX memory context management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for RGX memory context management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXMEM_H) -+#define RGXMEM_H -+ -+#include "pvrsrv_error.h" -+#include "device.h" -+#include "mmu_common.h" -+#include "rgxdevice.h" -+ -+#define RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME 16 -+ -+/* this PID denotes the firmware */ -+#define RGXMEM_SERVER_PID_FIRMWARE 0xFFFFFFFF -+ -+/* this PID denotes the PM */ -+#define RGXMEM_SERVER_PID_PM 0xEFFFFFFF -+ -+typedef struct _RGXMEM_PROCESS_INFO_ -+{ -+ IMG_PID uiPID; -+ IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; -+ IMG_BOOL bUnregistered; -+} RGXMEM_PROCESS_INFO; -+ -+typedef struct SERVER_MMU_CONTEXT_TAG SERVER_MMU_CONTEXT; -+ -+IMG_DEV_PHYADDR GetPC(MMU_CONTEXT * psContext); -+ -+void RGXSetFWMemContextDevVirtAddr(SERVER_MMU_CONTEXT *psServerMMUContext, -+ RGXFWIF_DEV_VIRTADDR sFWMemContextAddr); -+ -+void RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDevNode); -+void RGXMMUSyncPrimFree(void); -+ -+PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode, -+ MMU_CONTEXT *psMMUContext, -+ IMG_UINT64 ui64FBSCEntryMask); -+ -+PVRSRV_ERROR RGXExtractFBSCEntryMaskFromMMUContext(PVRSRV_DEVICE_NODE *psDeviceNode, -+ SERVER_MMU_CONTEXT *psServerMMUContext, -+ IMG_UINT64 *pui64FBSCEntryMask); -+ -+void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDevNode, -+ MMU_CONTEXT *psMMUContext, -+ MMU_LEVEL eMMULevel, -+ IMG_BOOL bUnmap); -+ -+/*************************************************************************/ /*! -+@Function RGXMMUCacheInvalidateKick -+ -+@Description Sends a flush command to a particular DM but first takes -+ the power lock. -+ -+@Input psDevNode Device Node pointer -+@Input pui32NextMMUInvalidateUpdate -+ -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 *pui32NextMMUInvalidateUpdate); -+ -+/*************************************************************************/ /*! -+@Function RGXPreKickCacheCommand -+ -+@Description Sends a cache flush command to a particular DM without -+ honouring the power lock. It's the caller's responsibility -+ to ensure power lock is held before calling this function. -+ -+@Input psDevInfo Device Info -+@Input eDM To which DM the cmd is sent. -+@Input pui32MMUInvalidateUpdate -+ -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo, -+ RGXFWIF_DM eDM, -+ IMG_UINT32 *pui32MMUInvalidateUpdate); -+ -+/* Needed for Volcanic architectures with BRN71422 */ -+#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR) -+void RGXMapBRN71422TargetPhysicalAddress(struct _CONNECTION_DATA_ *psConnection, -+ struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ IMG_DEV_PHYADDR sPhysAddrL1Px, -+ void *pxL1PxCpuVAddr); -+#endif -+ -+/* Needed for Rogue architecture where a MIPS FW CPU is used */ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+void RGXMMUTweakProtFlags(struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ MMU_DEVICEATTRIBS *psDevAttrs, -+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, -+ MMU_PROTFLAGS_T *uiMMUProtFlags); -+#endif -+ -+void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData); -+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDevNode, -+ MMU_CONTEXT *psMMUContext, -+ IMG_HANDLE *hPrivData); -+ -+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv); -+ -+void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_DEV_VIRTADDR *psDevVAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ MMU_FAULT_DATA *psOutFaultData); -+ -+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress, -+ RGXMEM_PROCESS_INFO *psInfo); -+ -+IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, -+ RGXMEM_PROCESS_INFO *psInfo); -+ -+IMG_PID RGXGetPIDFromServerMMUContext(SERVER_MMU_CONTEXT *psServerMMUContext); -+ -+#endif /* RGXMEM_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxmipsmmuinit.c b/drivers/gpu/drm/img-rogue/rgxmipsmmuinit.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxmipsmmuinit.c -@@ -0,0 +1,1068 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device specific initialisation routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific MMU initialisation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include "rgxmipsmmuinit.h" -+ -+#include "device.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "mmu_common.h" -+#include "pdump_mmu.h" -+#include "rgxheapconfig.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+#include "rgx_memallocflags.h" -+#include "pdump_km.h" -+#include "rgxdevice.h" -+#include "log2.h" -+ -+/* -+ * Bits of PT, PD and PC not involving addresses -+ */ -+ -+/* Currently there is no page directory for MIPS MMU */ -+#define RGX_MIPS_MMUCTRL_PDE_PROTMASK 0 -+/* Currently there is no page catalog for MIPS MMU */ -+#define RGX_MIPS_MMUCTRL_PCE_PROTMASK 0 -+ -+ -+static MMU_PxE_CONFIG sRGXMMUPCEConfig; -+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig; -+ -+ -+/* -+ * -+ * Configuration for heaps with 4kB Data-Page size -+ * -+ */ -+ -+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP; -+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP; -+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP; -+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB; -+ -+ -+/* -+ * -+ * Configuration for heaps with 16kB Data-Page size -+ * -+ */ -+ -+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP; -+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP; -+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP; -+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB; -+ -+ -+/* -+ * -+ * Configuration for heaps with 64kB Data-Page size -+ * -+ */ -+ -+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP; -+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP; -+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP; -+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB; -+ -+ -+/* -+ * -+ * Configuration for heaps with 256kB Data-Page size -+ * -+ */ -+ -+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP; -+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP; -+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP; -+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB; -+ -+ -+/* -+ * -+ * Configuration for heaps with 1MB Data-Page size -+ * -+ */ -+ -+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP; -+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP; -+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP; -+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB; -+ -+ -+/* -+ * -+ * Configuration for heaps with 2MB Data-Page size -+ * -+ */ -+ -+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP; -+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP; -+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP; -+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB; -+ -+ -+/* Forward declaration of protection bits derivation functions, for -+ the following structure */ -+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); -+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags); -+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); -+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags); -+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); -+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags); -+ -+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, -+ const MMU_PxE_CONFIG **ppsMMUPDEConfig, -+ const MMU_PxE_CONFIG **ppsMMUPTEConfig, -+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, -+ IMG_HANDLE *phPriv); -+ -+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv); -+ -+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize); -+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); -+ -+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes; -+ -+/* Cached policy */ -+static IMG_UINT32 gui32CachedPolicy; -+ -+static PVRSRV_ERROR RGXCheckTrampolineAddrs(struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ MMU_DEVICEATTRIBS *psDevAttrs, -+ IMG_UINT64 *pui64Addr); -+ -+PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ IMG_BOOL bPhysBusAbove32Bit = 0; -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH)) -+ { -+ bPhysBusAbove32Bit = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32; -+ } -+ -+ sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName = -+ PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); -+ -+ /* -+ * Setup sRGXMMUPCEConfig, no PC in MIPS MMU currently -+ */ -+ sRGXMMUPCEConfig.ePxLevel = MMU_LEVEL_LAST; -+ sRGXMMUPCEConfig.pszPxLevelStr = "UnD"; -+ sRGXMMUPCEConfig.uiBytesPerEntry = 0; /* 32 bit entries */ -+ -+ sRGXMMUPCEConfig.uiAddrMask = 0; /* Mask to get significant address bits of PC entry */ -+ sRGXMMUPCEConfig.uiAddrShift = 0; /* Shift this many bits to get PD address in PC entry */ -+ sRGXMMUPCEConfig.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; /* Alignment of PD AND PC */ -+ -+ sRGXMMUPCEConfig.uiProtMask = RGX_MIPS_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits of the PC */ -+ sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to have status bits starting with bit 0 */ -+ -+ sRGXMMUPCEConfig.uiValidEnMask = RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */ -+ sRGXMMUPCEConfig.uiValidEnShift = RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to have entry valid bit starting with bit 0 */ -+ -+ /* -+ * Setup sRGXMMUTopLevelDevVAddrConfig -+ */ -+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = 0; /* Get the PC address bits from a 40 bit virt. address (in a 64bit UINT) */ -+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = 0; -+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = 0; -+ -+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = 0; /* Get the PD address bits from a 40 bit virt. address (in a 64bit UINT) */ -+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = 0; -+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = 0; -+ -+ sRGXMMUTopLevelDevVAddrConfig.uiPTIndexMask = IMG_UINT64_C(0xfffffff000); /* Get the PT address bits from a 40 bit virt. address (in a 64bit UINT) */ -+ sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; -+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = (RGX_NUM_DRIVERS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; -+ -+/* -+ * -+ * Configuration for heaps with 4kB Data-Page size -+ * -+ */ -+ -+ /* -+ * Setup sRGXMMUPDEConfig_4KBDP. No PD in MIPS MMU currently -+ */ -+ sRGXMMUPDEConfig_4KBDP.ePxLevel = MMU_LEVEL_LAST; -+ sRGXMMUPDEConfig_4KBDP.pszPxLevelStr = "UnD"; -+ sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 0; -+ -+ /* No PD used for MIPS */ -+ sRGXMMUPDEConfig_4KBDP.uiAddrMask = 0; -+ sRGXMMUPDEConfig_4KBDP.uiAddrShift = 0; -+ sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; -+ -+ sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x0); -+ sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 0; -+ -+ sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MIPS_MMUCTRL_PDE_PROTMASK; -+ sRGXMMUPDEConfig_4KBDP.uiProtShift = 0; -+ -+ sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN; -+ sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUPTEConfig_4KBDP. -+ */ -+ sRGXMMUPTEConfig_4KBDP.ePxLevel = MMU_LEVEL_1; -+ sRGXMMUPTEConfig_4KBDP.pszPxLevelStr = "PT"; -+ sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; -+ -+ -+ if (bPhysBusAbove32Bit) -+ { -+ sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT; -+ gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT; -+ } -+ else -+ { -+ sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK; -+ gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY; -+ } -+ -+ sRGXMMUPTEConfig_4KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT; -+ sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; -+ -+ sRGXMMUPTEConfig_4KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK | -+ RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN | RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN; -+ sRGXMMUPTEConfig_4KBDP.uiProtShift = 0; -+ -+ sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN; -+ sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUDevVAddrConfig_4KBDP -+ */ -+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = 0; -+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = 0; -+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = 0; -+ -+ -+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = 0; -+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = 0; -+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = 0; -+ -+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = (RGX_NUM_DRIVERS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; -+ -+ -+ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff); -+ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0; -+ sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff); -+ -+ /* -+ * Setup gsPageSizeConfig4KB -+ */ -+ gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP; -+ gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP; -+ gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP; -+ gsPageSizeConfig4KB.uiRefCount = 0; -+ gsPageSizeConfig4KB.uiMaxRefCount = 0; -+ -+ -+/* -+ * -+ * Configuration for heaps with 16kB Data-Page size -+ * -+ */ -+ -+ /* -+ * Setup sRGXMMUPDEConfig_16KBDP -+ */ -+ sRGXMMUPDEConfig_16KBDP.ePxLevel = MMU_LEVEL_LAST; -+ sRGXMMUPDEConfig_16KBDP.pszPxLevelStr = "UnD"; -+ sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 0; -+ -+ sRGXMMUPDEConfig_16KBDP.uiAddrMask = 0; -+ sRGXMMUPDEConfig_16KBDP.uiAddrShift = 0; /* These are for a page directory ENTRY, meaning the address of a PT cropped to suit the PD */ -+ sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the page tables NOT directories */ -+ -+ sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = 0; -+ sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 0; -+ -+ sRGXMMUPDEConfig_16KBDP.uiProtMask = 0; -+ sRGXMMUPDEConfig_16KBDP.uiProtShift = 0; -+ -+ sRGXMMUPDEConfig_16KBDP.uiValidEnMask = 0; -+ sRGXMMUPDEConfig_16KBDP.uiValidEnShift = 0; -+ -+ /* -+ * Setup sRGXMMUPTEConfig_16KBDP. Not supported yet -+ */ -+ sRGXMMUPTEConfig_16KBDP.ePxLevel = MMU_LEVEL_LAST; -+ sRGXMMUPTEConfig_16KBDP.pszPxLevelStr = "UnD"; -+ sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 0; -+ -+ sRGXMMUPTEConfig_16KBDP.uiAddrMask = 0; -+ sRGXMMUPTEConfig_16KBDP.uiAddrShift = 0; /* These are for a page table ENTRY, meaning the address of a PAGE cropped to suit the PD */ -+ sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the pages NOT tables */ -+ -+ sRGXMMUPTEConfig_16KBDP.uiProtMask = 0; -+ sRGXMMUPTEConfig_16KBDP.uiProtShift = 0; -+ -+ sRGXMMUPTEConfig_16KBDP.uiValidEnMask = 0; -+ sRGXMMUPTEConfig_16KBDP.uiValidEnShift = 0; -+ -+ /* -+ * Setup sRGXMMUDevVAddrConfig_16KBDP -+ */ -+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = 0; -+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = 0; -+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = 0; -+ -+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = 0; -+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = 0; -+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = 0; -+ -+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = 0; -+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 0; -+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = 0; -+ -+ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = 0; -+ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0; -+ sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0; -+ -+ /* -+ * Setup gsPageSizeConfig16KB -+ */ -+ gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP; -+ gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP; -+ gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP; -+ gsPageSizeConfig16KB.uiRefCount = 0; -+ gsPageSizeConfig16KB.uiMaxRefCount = 0; -+ -+ -+/* -+ * -+ * Configuration for heaps with 64kB Data-Page size. Not supported yet -+ * -+ */ -+ -+ /* -+ * Setup sRGXMMUPDEConfig_64KBDP -+ */ -+ sRGXMMUPDEConfig_64KBDP.ePxLevel = MMU_LEVEL_LAST; -+ sRGXMMUPDEConfig_64KBDP.pszPxLevelStr = "UnD"; -+ sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 0; -+ -+ sRGXMMUPDEConfig_64KBDP.uiAddrMask = 0; -+ sRGXMMUPDEConfig_64KBDP.uiAddrShift = 0; -+ sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 0; -+ -+ sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = 0; -+ sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 0; -+ -+ sRGXMMUPDEConfig_64KBDP.uiProtMask = 0; -+ sRGXMMUPDEConfig_64KBDP.uiProtShift = 0; -+ -+ sRGXMMUPDEConfig_64KBDP.uiValidEnMask = 0; -+ sRGXMMUPDEConfig_64KBDP.uiValidEnShift = 0; -+ -+ /* -+ * Setup sRGXMMUPTEConfig_64KBDP. -+ * -+ */ -+ sRGXMMUPTEConfig_64KBDP.ePxLevel = MMU_LEVEL_1; -+ sRGXMMUPTEConfig_64KBDP.pszPxLevelStr = "PT"; -+ sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; -+ -+ if (bPhysBusAbove32Bit) -+ { -+ sRGXMMUPTEConfig_64KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT; -+ gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT; -+ } -+ else -+ { -+ sRGXMMUPTEConfig_64KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK; -+ gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY; -+ } -+ -+ /* Even while using 64K pages, MIPS still aligns addresses to 4K */ -+ sRGXMMUPTEConfig_64KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT; -+ sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; -+ -+ sRGXMMUPTEConfig_64KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK | -+ RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN | RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN; -+ sRGXMMUPTEConfig_64KBDP.uiProtShift = 0; -+ -+ sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN; -+ sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUDevVAddrConfig_64KBDP. -+ */ -+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = 0; -+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = 0; -+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = 0; -+ -+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = 0; -+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = 0; -+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = 0; -+ -+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00ffff0000); -+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_64K; -+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = (RGX_NUM_DRIVERS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; -+ -+ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff); -+ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0; -+ sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff); -+ -+ /* -+ * Setup gsPageSizeConfig64KB. -+ */ -+ gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP; -+ gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP; -+ gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP; -+ gsPageSizeConfig64KB.uiRefCount = 0; -+ gsPageSizeConfig64KB.uiMaxRefCount = 0; -+ -+ -+/* -+ * -+ * Configuration for heaps with 256kB Data-Page size. Not supported yet -+ * -+ */ -+ -+ /* -+ * Setup sRGXMMUPDEConfig_256KBDP -+ */ -+ sRGXMMUPDEConfig_256KBDP.ePxLevel = MMU_LEVEL_LAST; -+ sRGXMMUPDEConfig_256KBDP.pszPxLevelStr = "UnD"; -+ sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 0; -+ -+ sRGXMMUPDEConfig_256KBDP.uiAddrMask = 0; -+ sRGXMMUPDEConfig_256KBDP.uiAddrShift = 0; -+ sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 0; -+ -+ sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = 0; -+ sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 0; -+ -+ sRGXMMUPDEConfig_256KBDP.uiProtMask = 0; -+ sRGXMMUPDEConfig_256KBDP.uiProtShift = 0; -+ -+ sRGXMMUPDEConfig_256KBDP.uiValidEnMask = 0; -+ sRGXMMUPDEConfig_256KBDP.uiValidEnShift = 0; -+ -+ /* -+ * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP -+ */ -+ sRGXMMUPTEConfig_256KBDP.ePxLevel = MMU_LEVEL_LAST; -+ sRGXMMUPTEConfig_256KBDP.pszPxLevelStr = "UnD"; -+ sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 0; -+ -+ sRGXMMUPTEConfig_256KBDP.uiAddrMask = 0; -+ sRGXMMUPTEConfig_256KBDP.uiAddrShift = 0; -+ sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 0; -+ -+ sRGXMMUPTEConfig_256KBDP.uiProtMask = 0; -+ sRGXMMUPTEConfig_256KBDP.uiProtShift = 0; -+ -+ sRGXMMUPTEConfig_256KBDP.uiValidEnMask = 0; -+ sRGXMMUPTEConfig_256KBDP.uiValidEnShift = 0; -+ -+ /* -+ * Setup sRGXMMUDevVAddrConfig_256KBDP -+ */ -+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = 0; -+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = 0; -+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = 0; -+ -+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = 0; -+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = 0; -+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = 0; -+ -+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = 0; -+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 0; -+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = 0; -+ -+ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = 0; -+ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0; -+ sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0; -+ -+ /* -+ * Setup gsPageSizeConfig256KB -+ */ -+ gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP; -+ gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP; -+ gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP; -+ gsPageSizeConfig256KB.uiRefCount = 0; -+ gsPageSizeConfig256KB.uiMaxRefCount = 0; -+ -+ /* -+ * Setup sRGXMMUPDEConfig_1MBDP. Not supported yet -+ */ -+ sRGXMMUPDEConfig_1MBDP.ePxLevel = MMU_LEVEL_LAST; -+ sRGXMMUPDEConfig_1MBDP.pszPxLevelStr = "UnD"; -+ sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 0; -+ -+ sRGXMMUPDEConfig_1MBDP.uiAddrMask = 0; -+ sRGXMMUPDEConfig_1MBDP.uiAddrShift = 0; -+ sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 0; -+ -+ sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = 0; -+ sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 0; -+ -+ sRGXMMUPDEConfig_1MBDP.uiProtMask = 0; -+ sRGXMMUPDEConfig_1MBDP.uiProtShift = 0; -+ -+ sRGXMMUPDEConfig_1MBDP.uiValidEnMask = 0; -+ sRGXMMUPDEConfig_1MBDP.uiValidEnShift = 0; -+ -+ /* -+ * Setup sRGXMMUPTEConfig_1MBDP -+ */ -+ sRGXMMUPTEConfig_1MBDP.ePxLevel = MMU_LEVEL_LAST; -+ sRGXMMUPTEConfig_1MBDP.pszPxLevelStr = "UnD"; -+ sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 0; -+ -+ sRGXMMUPTEConfig_1MBDP.uiAddrMask = 0; -+ sRGXMMUPTEConfig_1MBDP.uiAddrShift = 0; -+ sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 0; -+ -+ sRGXMMUPTEConfig_1MBDP.uiProtMask = 0; -+ sRGXMMUPTEConfig_1MBDP.uiProtShift = 0; -+ -+ sRGXMMUPTEConfig_1MBDP.uiValidEnMask = 0; -+ sRGXMMUPTEConfig_1MBDP.uiValidEnShift = 0; -+ -+ /* -+ * Setup sRGXMMUDevVAddrConfig_1MBDP -+ */ -+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = 0; -+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = 0; -+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = 0; -+ -+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = 0; -+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = 0; -+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = 0; -+ -+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = 0; -+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 0; -+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = 0; -+ -+ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = 0; -+ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0; -+ sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0; -+ -+ /* -+ * Setup gsPageSizeConfig1MB -+ */ -+ gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP; -+ gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP; -+ gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP; -+ gsPageSizeConfig1MB.uiRefCount = 0; -+ gsPageSizeConfig1MB.uiMaxRefCount = 0; -+ -+ /* -+ * Setup sRGXMMUPDEConfig_2MBDP. Not supported yet -+ */ -+ sRGXMMUPDEConfig_2MBDP.ePxLevel = MMU_LEVEL_LAST; -+ sRGXMMUPDEConfig_2MBDP.pszPxLevelStr = "UnD"; -+ sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 0; -+ -+ sRGXMMUPDEConfig_2MBDP.uiAddrMask = 0; -+ sRGXMMUPDEConfig_2MBDP.uiAddrShift = 0; -+ sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 0; -+ -+ sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = 0; -+ sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 0; -+ -+ sRGXMMUPDEConfig_2MBDP.uiProtMask = 0; -+ sRGXMMUPDEConfig_2MBDP.uiProtShift = 0; -+ -+ sRGXMMUPDEConfig_2MBDP.uiValidEnMask = 0; -+ sRGXMMUPDEConfig_2MBDP.uiValidEnShift = 0; -+ -+ /* -+ * Setup sRGXMMUPTEConfig_2MBDP -+ */ -+ sRGXMMUPTEConfig_2MBDP.ePxLevel = MMU_LEVEL_LAST; -+ sRGXMMUPTEConfig_2MBDP.pszPxLevelStr = "UnD"; -+ sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 0; -+ -+ sRGXMMUPTEConfig_2MBDP.uiAddrMask = 0; -+ sRGXMMUPTEConfig_2MBDP.uiAddrShift = 0; -+ sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 0; -+ -+ sRGXMMUPTEConfig_2MBDP.uiProtMask = 0; -+ sRGXMMUPTEConfig_2MBDP.uiProtShift = 0; -+ -+ sRGXMMUPTEConfig_2MBDP.uiValidEnMask = 0; -+ sRGXMMUPTEConfig_2MBDP.uiValidEnShift = 0; -+ -+ /* -+ * Setup sRGXMMUDevVAddrConfig_2MBDP -+ */ -+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = 0; -+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = 0; -+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = 0; -+ -+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = 0; -+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = 0; -+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = 0; -+ -+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = 0; -+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 0; -+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = 0; -+ -+ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = 0; -+ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0; -+ sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0; -+ -+ /* -+ * Setup gsPageSizeConfig2MB -+ */ -+ gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP; -+ gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP; -+ gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP; -+ gsPageSizeConfig2MB.uiRefCount = 0; -+ gsPageSizeConfig2MB.uiMaxRefCount = 0; -+ -+ /* -+ * Setup sRGXMMUDeviceAttributes -+ */ -+ sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_MIPS_MICROAPTIV; -+ -+ /* -+ * The page table fits in one or more big physically adjacent pages, -+ * at most as big as the page table itself. -+ * To calculate its alignment/page size, calculate the log2 size of the page -+ * table taking into account all OSes, then round that down to a valid MIPS -+ * log2 page size (12, 14, 16 for a 4K, 16K, 64K page size). -+ */ -+ sRGXMMUDeviceAttributes.ui32BaseAlign = -+ (CeilLog2(RGX_NUM_DRIVERS_SUPPORTED) + RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) & ~1U; -+ -+ /* 256K alignment might be too hard to achieve, fall back to 64K */ -+ sRGXMMUDeviceAttributes.ui32BaseAlign = -+ MIN(sRGXMMUDeviceAttributes.ui32BaseAlign, RGXMIPSFW_LOG2_PAGE_SIZE_64K); -+ -+ -+ -+ /* The base configuration is set to 4kB pages*/ -+ sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPTEConfig_4KBDP; -+ sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig; -+ -+ /* Functions for deriving page table/dir/cat protection bits */ -+ sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8; -+ sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4; -+ sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8; -+ sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4; -+ sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8; -+ sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4; -+ -+ /* Functions for establishing configurations for PDE/PTE/DEVVADDR -+ on per-heap basis */ -+ sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB; -+ sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB; -+ -+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4; -+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8; -+ -+ psDeviceNode->psFirmwareMMUDevAttrs = &sRGXMMUDeviceAttributes; -+ -+ psDeviceNode->pfnValidateOrTweakPhysAddrs = RGXCheckTrampolineAddrs; -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR RGXCheckTrampolineAddrs(struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ MMU_DEVICEATTRIBS *psDevAttrs, -+ IMG_UINT64 *pui64Addr) -+{ -+ if (PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, MIPS)) -+ { -+ /* -+ * If mapping for the MIPS FW context, check for sensitive PAs -+ */ -+ if (psDevAttrs == psDevNode->psFirmwareMMUDevAttrs) -+ { -+ PVRSRV_RGXDEV_INFO *psDevice = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice; -+ -+ if ((RGX_GET_FEATURE_VALUE(psDevice, PHYS_BUS_WIDTH) == 32) && -+ RGXMIPSFW_SENSITIVE_ADDR(*pui64Addr)) -+ { -+ *pui64Addr = psDevice->psTrampoline->sPhysAddr.uiAddr + RGXMIPSFW_TRAMPOLINE_OFFSET(*pui64Addr); -+ } -+ /* FIX_HW_BRN_63553 is mainlined for all MIPS cores */ -+ else if (*pui64Addr == 0x0 && !psDevice->sLayerParams.bDevicePA0IsValid) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s attempt to map addr 0x0 in the FW but 0x0 is not considered valid.", __func__)); -+ return PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE; -+ } -+ } -+ } -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = PVRSRV_OK; -+ -+#if defined(PDUMP) -+ psDeviceNode->pfnMMUGetContextID = NULL; -+#endif -+ -+ psDeviceNode->psFirmwareMMUDevAttrs = NULL; -+ -+#if defined(DEBUG) -+ PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:")); -+ PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d", -+ gsPageSizeConfig4KB.uiMaxRefCount)); -+ PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d", -+ gsPageSizeConfig4KB.uiRefCount)); -+ PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d", -+ gsPageSizeConfig16KB.uiMaxRefCount)); -+ PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d", -+ gsPageSizeConfig16KB.uiRefCount)); -+ PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d", -+ gsPageSizeConfig64KB.uiMaxRefCount)); -+ PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d", -+ gsPageSizeConfig64KB.uiRefCount)); -+ PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d", -+ gsPageSizeConfig256KB.uiMaxRefCount)); -+ PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d", -+ gsPageSizeConfig256KB.uiRefCount)); -+ PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d", -+ gsPageSizeConfig1MB.uiMaxRefCount)); -+ PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d", -+ gsPageSizeConfig1MB.uiRefCount)); -+ PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d", -+ gsPageSizeConfig2MB.uiMaxRefCount)); -+ PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d", -+ gsPageSizeConfig2MB.uiRefCount)); -+#endif -+ if (gsPageSizeConfig4KB.uiRefCount > 0 || -+ gsPageSizeConfig16KB.uiRefCount > 0 || -+ gsPageSizeConfig64KB.uiRefCount > 0 || -+ gsPageSizeConfig256KB.uiRefCount > 0 || -+ gsPageSizeConfig1MB.uiRefCount > 0 || -+ gsPageSizeConfig2MB.uiRefCount > 0 -+ ) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)")); -+ } -+ -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXDerivePCEProt4 -+@Description calculate the PCE protection flags based on a 4 byte entry -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags) -+{ -+ PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU")); -+ return 0; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function RGXDerivePCEProt8 -+@Description calculate the PCE protection flags based on an 8 byte entry -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(uiProtFlags); -+ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); -+ -+ PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU")); -+ return 0; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function RGXDerivePDEProt4 -+@Description derive the PDE protection flags based on a 4 byte entry -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(uiProtFlags); -+ PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU")); -+ return 0; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function RGXDerivePDEProt8 -+@Description derive the PDE protection flags based on an 8 byte entry -+ -+@Input uiLog2DataPageSize The log2 of the required page size. -+ E.g, for 4KiB pages, this parameter must be 12. -+ For 2MiB pages, it must be set to 21. -+ -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(uiProtFlags); -+ PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU")); -+ return 0; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function RGXDerivePTEProt4 -+@Description calculate the PTE protection flags based on a 4 byte entry -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags) -+{ -+ IMG_UINT32 ui32MMUFlags = 0; -+ -+ if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE)) -+ { -+ /* read/write */ -+ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_DIRTY_EN; -+ } -+ else if (MMU_PROTFLAGS_READABLE & uiProtFlags) -+ { -+ /* read only */ -+ } -+ else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags) -+ { -+ /* write only */ -+ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN; -+ } -+ else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: neither read nor write specified...")); -+ } -+ -+ /* cache coherency */ -+ if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: cache coherency not supported for MIPS caches")); -+ } -+ -+ /* cache setup */ -+ if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0) -+ { -+ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_UNCACHED; -+ } -+ else -+ { -+ ui32MMUFlags |= gui32CachedPolicy << -+ RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT; -+ } -+ -+ if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0) -+ { -+ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_VALID_EN; -+ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_GLOBAL_EN; -+ } -+ -+ if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags) -+ { -+ /* PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt4: PMMETA Protect not existent for MIPS, option discarded")); */ -+ } -+ -+ return ui32MMUFlags; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXDerivePTEProt8 -+@Description calculate the PTE protection flags based on an 8 byte entry -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(uiProtFlags); -+ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); -+ -+ PVR_DPF((PVR_DBG_ERROR, "8-byte PTE not supported on this device")); -+ -+ return 0; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function RGXGetPageSizeConfig -+@Description Set up configuration for variable sized data pages. -+ RGXPutPageSizeConfigCB has to be called to ensure correct -+ refcounting. -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, -+ const MMU_PxE_CONFIG **ppsMMUPDEConfig, -+ const MMU_PxE_CONFIG **ppsMMUPTEConfig, -+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, -+ IMG_HANDLE *phPriv) -+{ -+ MMU_PAGESIZECONFIG *psPageSizeConfig; -+ -+ switch (uiLog2DataPageSize) -+ { -+ case RGXMIPSFW_LOG2_PAGE_SIZE_64K: -+ psPageSizeConfig = &gsPageSizeConfig64KB; -+ break; -+ case RGXMIPSFW_LOG2_PAGE_SIZE_4K: -+ psPageSizeConfig = &gsPageSizeConfig4KB; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", -+ uiLog2DataPageSize)); -+ *phPriv = NULL; -+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; -+ } -+ -+ /* Refer caller's pointers to the data */ -+ *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig; -+ *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig; -+ *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig; -+ -+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) -+ /* Increment ref-count - not that we're allocating anything here -+ (I'm using static structs), but one day we might, so we want -+ the Get/Put code to be balanced properly */ -+ psPageSizeConfig->uiRefCount++; -+ -+ /* This is purely for debug statistics */ -+ psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount, -+ psPageSizeConfig->uiRefCount); -+#endif -+ -+ *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize; -+ PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv); -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXPutPageSizeConfig -+@Description Tells this code that the mmu module is done with the -+ configurations set in RGXGetPageSizeConfig. This can -+ be a no-op. -+ Called after RGXGetPageSizeConfigCB. -+@Return PVRSRV_ERROR -+*/ /**************************************************************************/ -+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv) -+{ -+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) -+ MMU_PAGESIZECONFIG *psPageSizeConfig; -+ IMG_UINT32 uiLog2DataPageSize; -+ -+ uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv; -+ -+ switch (uiLog2DataPageSize) -+ { -+ case RGXMIPSFW_LOG2_PAGE_SIZE_64K: -+ psPageSizeConfig = &gsPageSizeConfig64KB; -+ break; -+ case RGXMIPSFW_LOG2_PAGE_SIZE_4K: -+ psPageSizeConfig = &gsPageSizeConfig4KB; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", -+ uiLog2DataPageSize)); -+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; -+ } -+ -+ /* Ref-count here is not especially useful, but it's an extra -+ check that the API is being used correctly */ -+ psPageSizeConfig->uiRefCount--; -+#else -+ PVR_UNREFERENCED_PARAMETER(hPriv); -+#endif -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(ui32PDE); -+ PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); -+ PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS")); -+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; -+} -+ -+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(ui64PDE); -+ PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); -+ PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS")); -+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; -+} -+ -+void RGXMipsCheckFaultAddress(MMU_CONTEXT *psFwMMUCtx, -+ IMG_UINT32 ui32FwVA, -+ MMU_FAULT_DATA *psOutFaultData) -+{ -+ IMG_UINT32 *pui32PageTable = NULL; -+ PVRSRV_ERROR eError = MMU_AcquireCPUBaseAddr(psFwMMUCtx, (void**) &pui32PageTable); -+ MMU_LEVEL_DATA *psMMULevelData; -+ IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_BASE & UINT_MAX); -+ IMG_UINT32 ui32PageSize = OSGetPageSize(); -+ -+ /* MIPS Firmware CPU must use the same page size as the Host */ -+ IMG_UINT32 ui32PTEIndex = ((ui32FwVA & ~(ui32PageSize - 1)) - ui32FwHeapBase) / ui32PageSize; -+ -+ psOutFaultData->eTopLevel = MMU_LEVEL_1; -+ psOutFaultData->eType = MMU_FAULT_TYPE_NON_PM; -+ -+ psMMULevelData = &psOutFaultData->sLevelData[MMU_LEVEL_1]; -+ psMMULevelData->uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; -+ psMMULevelData->ui32Index = ui32PTEIndex; -+ psMMULevelData->ui32NumOfEntries = RGX_FIRMWARE_RAW_HEAP_SIZE / ui32PageSize; -+ -+ if ((eError == PVRSRV_OK) && (pui32PageTable != NULL)) -+ { -+ psMMULevelData->ui64Address = pui32PageTable[ui32PTEIndex]; -+ } -+ else -+ { -+ psMMULevelData->ui64Address = 0U; -+ } -+ -+ psMMULevelData->psDebugStr = BITMASK_HAS(psMMULevelData->ui64Address, -+ RGXMIPSFW_TLB_VALID) ? -+ ("valid") : ("not valid"); -+} -diff --git a/drivers/gpu/drm/img-rogue/rgxmipsmmuinit.h b/drivers/gpu/drm/img-rogue/rgxmipsmmuinit.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxmipsmmuinit.h -@@ -0,0 +1,97 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device specific initialisation routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific MMU initialisation for the MIPS firmware -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* NB: this file is not to be included arbitrarily. It exists solely -+ for the linkage between rgxinit.c and rgxmmuinit.c, the former -+ being otherwise cluttered by the contents of the latter */ -+ -+#ifndef SRVKM_RGXMIPSMMUINIT_H -+#define SRVKM_RGXMIPSMMUINIT_H -+ -+#include "device.h" -+#include "img_types.h" -+#include "mmu_common.h" -+#include "img_defs.h" -+#include "rgx_mips.h" -+ -+/* -+ -+ Labelling of fields within virtual address. No PD and PC are used currently for -+ the MIPS MMU -+*/ -+/* -+Page Table entry # -+*/ -+#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) -+#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF)) -+ -+ -+/* PC entries related definitions */ -+/* No PC is currently used for MIPS MMU */ -+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN (0U) -+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT (0U) -+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_CLRMSK (0U) -+ -+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_SHIFT (0U) -+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_CLRMSK (0U) -+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_EN (0U) -+ -+/* PD entries related definitions */ -+/* No PD is currently used for MIPS MMU */ -+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN (0U) -+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT (0U) -+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_CLRMSK (0U) -+ -+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_SHIFT (0U) -+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_CLRMSK (0U) -+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_EN (0U) -+ -+ -+PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode); -+PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+void RGXMipsCheckFaultAddress(MMU_CONTEXT *psFwMMUCtx, -+ IMG_UINT32 ui32FwVA, -+ MMU_FAULT_DATA *psOutFaultData); -+ -+#endif /* #ifndef SRVKM_RGXMIPSMMUINIT_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxmmuinit.c b/drivers/gpu/drm/img-rogue/rgxmmuinit.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxmmuinit.c -@@ -0,0 +1,1147 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device specific initialisation routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific MMU initialisation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ /**************************************************************************/ -+#include "rgxmmuinit.h" -+#include "rgxmmudefs_km.h" -+ -+#include "device.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "mmu_common.h" -+#include "pdump_mmu.h" -+ -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+#include "rgx_memallocflags.h" -+#include "rgx_heaps.h" -+#include "pdump_km.h" -+ -+ -+/* useful macros */ -+/* units represented in a bitfield */ -+#define UNITS_IN_BITFIELD(Mask, Shift) ((Mask >> Shift) + 1) -+ -+ -+/* -+ * Bits of PT, PD and PC not involving addresses -+ */ -+ -+#define RGX_MMUCTRL_PTE_PROTMASK (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \ -+ RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \ -+ RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \ -+ RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN | \ -+ RGX_MMUCTRL_PT_DATA_CC_EN | \ -+ RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \ -+ RGX_MMUCTRL_PT_DATA_VALID_EN) -+ -+#define RGX_MMUCTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \ -+ ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \ -+ RGX_MMUCTRL_PD_DATA_VALID_EN) -+ -+#define RGX_MMUCTRL_PCE_PROTMASK (RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \ -+ RGX_MMUCTRL_PC_DATA_VALID_EN) -+ -+ -+ -+static MMU_PxE_CONFIG sRGXMMUPCEConfig; -+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig; -+ -+ -+/* -+ * -+ * Configuration for heaps with 4kB Data-Page size -+ * -+ */ -+ -+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP; -+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP; -+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP; -+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB; -+ -+ -+/* -+ * -+ * Configuration for heaps with 16kB Data-Page size -+ * -+ */ -+ -+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP; -+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP; -+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP; -+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB; -+ -+ -+/* -+ * -+ * Configuration for heaps with 64kB Data-Page size -+ * -+ */ -+ -+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP; -+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP; -+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP; -+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB; -+ -+ -+/* -+ * -+ * Configuration for heaps with 256kB Data-Page size -+ * -+ */ -+ -+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP; -+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP; -+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP; -+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB; -+ -+ -+/* -+ * -+ * Configuration for heaps with 1MB Data-Page size -+ * -+ */ -+ -+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP; -+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP; -+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP; -+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB; -+ -+ -+/* -+ * -+ * Configuration for heaps with 2MB Data-Page size -+ * -+ */ -+ -+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP; -+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP; -+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP; -+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB; -+ -+ -+/* Forward declaration of protection bits derivation functions, for -+ the following structure */ -+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); -+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags); -+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); -+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags); -+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); -+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags); -+ -+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, -+ const MMU_PxE_CONFIG **ppsMMUPDEConfig, -+ const MMU_PxE_CONFIG **ppsMMUPTEConfig, -+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, -+ IMG_HANDLE *phPriv); -+ -+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv); -+ -+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize); -+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); -+ -+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes; -+ -+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ /* Setup of Px Entries: -+ * -+ * -+ * PAGE TABLE (8 Byte): -+ * -+ * | 62 | 61...40 | 39...12 (varies) | 11...6 | 5 | 4 | 3 | 2 | 1 | 0 | -+ * | PM/Meta protect | VP Page (39:18) | Physical Page | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid | -+ * -+ * -+ * PAGE DIRECTORY (8 Byte): -+ * -+ * | 40 | 39...5 (varies) | 4 | 3...1 | 0 | -+ * | Entry Pending | Page Table base address | (reserved) | Page Size | Valid | -+ * -+ * -+ * PAGE CATALOGUE (4 Byte): -+ * -+ * | 31...4 | 3...2 | 1 | 0 | -+ * | Page Directory base address | (reserved) | Entry Pending | Valid | -+ * -+ */ -+ -+ -+ /* Example how to get the PD address from a PC entry. -+ * The procedure is the same for PD and PT entries to retrieve PT and Page addresses: -+ * -+ * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&': -+ * | 31...4 | 3...2 | 1 | 0 | -+ * | PD Addr | 0 | 0 | 0 | -+ * -+ * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>': -+ * | 27...0 | -+ * | PD Addr | -+ * -+ * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<': -+ * | 39...0 | -+ * | PD Addr | -+ * -+ */ -+ -+ -+ sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName = -+ PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]); -+ -+ /* -+ * Setup sRGXMMUPCEConfig -+ */ -+ sRGXMMUPCEConfig.ePxLevel = MMU_LEVEL_3; -+ sRGXMMUPCEConfig.pszPxLevelStr = "PC"; -+ sRGXMMUPCEConfig.uiBytesPerEntry = 4; /* 32 bit entries */ -+ -+ sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */ -+ sRGXMMUPCEConfig.uiAddrShift = 4; /* Shift this many bits to get PD address */ -+ sRGXMMUPCEConfig.uiAddrLog2Align = 12; /* Alignment of PD physical addresses. */ -+ -+ sRGXMMUPCEConfig.uiProtMask = RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits (pending | valid)*/ -+ sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to get the status bits */ -+ -+ sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */ -+ sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */ -+ -+ /* -+ * Setup sRGXMMUTopLevelDevVAddrConfig -+ */ -+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */ -+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PC index */ -+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask, -+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift)); -+ -+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */ -+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PD index */ -+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask, -+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift)); -+ -+ /* -+ * -+ * Configuration for heaps with 4kB Data-Page size -+ * -+ * Bit 39 30 29 21 20 12 11 0 -+ * | \ / \ / \ / \ -+ * |....PCE...|...PDE...|...PTE...|....VAddr...| -+ */ -+ -+ /* -+ * Setup sRGXMMUPDEConfig_4KBDP -+ */ -+ sRGXMMUPDEConfig_4KBDP.ePxLevel = MMU_LEVEL_2; -+ sRGXMMUPDEConfig_4KBDP.pszPxLevelStr = "PD"; -+ sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8; -+ -+ sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); -+ sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12; -+ sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12; -+ -+ sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); -+ sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1; -+ -+ sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; -+ sRGXMMUPDEConfig_4KBDP.uiProtShift = 0; -+ -+ sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; -+ sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUPTEConfig_4KBDP -+ */ -+ sRGXMMUPTEConfig_4KBDP.ePxLevel = MMU_LEVEL_1; -+ sRGXMMUPTEConfig_4KBDP.pszPxLevelStr = "PT"; -+ sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8; -+ -+ sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000); -+ sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12; -+ sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */ -+ -+ sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; -+ sRGXMMUPTEConfig_4KBDP.uiProtShift = 0; -+ -+ sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; -+ sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUDevVAddrConfig_4KBDP -+ */ -+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask, -+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift)); -+ -+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask, -+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift)); -+ -+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask, -+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift)); -+ -+ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff); -+ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0; -+ sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0; -+ -+ /* -+ * Setup gsPageSizeConfig4KB -+ */ -+ gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP; -+ gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP; -+ gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP; -+ gsPageSizeConfig4KB.uiRefCount = 0; -+ gsPageSizeConfig4KB.uiMaxRefCount = 0; -+ -+ -+ /* -+ * -+ * Configuration for heaps with 16kB Data-Page size -+ * -+ * Bit 39 30 29 21 20 14 13 0 -+ * | \ / \ / \ / \ -+ * |....PCE...|...PDE...|..PTE..|.....VAddr....| -+ * -+ */ -+ -+ /* -+ * Setup sRGXMMUPDEConfig_16KBDP -+ */ -+ sRGXMMUPDEConfig_16KBDP.ePxLevel = MMU_LEVEL_2; -+ sRGXMMUPDEConfig_16KBDP.pszPxLevelStr = "PD"; -+ sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8; -+ -+ sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); -+ sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10; -+ sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10; -+ -+ sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); -+ sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1; -+ -+ sRGXMMUPDEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; -+ sRGXMMUPDEConfig_16KBDP.uiProtShift = 0; -+ -+ sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; -+ sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUPTEConfig_16KBDP -+ */ -+ sRGXMMUPTEConfig_16KBDP.ePxLevel = MMU_LEVEL_1; -+ sRGXMMUPTEConfig_16KBDP.pszPxLevelStr = "PT"; -+ sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8; -+ -+ sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000); -+ sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14; -+ sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14; -+ -+ sRGXMMUPTEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; -+ sRGXMMUPTEConfig_16KBDP.uiProtShift = 0; -+ -+ sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; -+ sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUDevVAddrConfig_16KBDP -+ */ -+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask, -+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask, -+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000); -+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14; -+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask, -+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift)); -+ -+ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff); -+ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0; -+ sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0; -+ -+ /* -+ * Setup gsPageSizeConfig16KB -+ */ -+ gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP; -+ gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP; -+ gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP; -+ gsPageSizeConfig16KB.uiRefCount = 0; -+ gsPageSizeConfig16KB.uiMaxRefCount = 0; -+ -+ -+ /* -+ * -+ * Configuration for heaps with 64kB Data-Page size -+ * -+ * Bit 39 30 29 21 20 16 15 0 -+ * | \ / \ / \ / \ -+ * |....PCE...|...PDE...|.PTE.|.....VAddr......| -+ * -+ */ -+ -+ /* -+ * Setup sRGXMMUPDEConfig_64KBDP -+ */ -+ sRGXMMUPDEConfig_64KBDP.ePxLevel = MMU_LEVEL_2; -+ sRGXMMUPDEConfig_64KBDP.pszPxLevelStr = "PD"; -+ sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8; -+ -+ sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); -+ sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8; -+ sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8; -+ -+ sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); -+ sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1; -+ -+ sRGXMMUPDEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; -+ sRGXMMUPDEConfig_64KBDP.uiProtShift = 0; -+ -+ sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; -+ sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUPTEConfig_64KBDP -+ */ -+ sRGXMMUPTEConfig_64KBDP.ePxLevel = MMU_LEVEL_1; -+ sRGXMMUPTEConfig_64KBDP.pszPxLevelStr = "PT"; -+ sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8; -+ -+ sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000); -+ sRGXMMUPTEConfig_64KBDP.uiAddrShift = 16; -+ sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16; -+ -+ sRGXMMUPTEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; -+ sRGXMMUPTEConfig_64KBDP.uiProtShift = 0; -+ -+ sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; -+ sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUDevVAddrConfig_64KBDP -+ */ -+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask, -+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask, -+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000); -+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16; -+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask, -+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff); -+ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0; -+ sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0; -+ -+ /* -+ * Setup gsPageSizeConfig64KB -+ */ -+ gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP; -+ gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP; -+ gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP; -+ gsPageSizeConfig64KB.uiRefCount = 0; -+ gsPageSizeConfig64KB.uiMaxRefCount = 0; -+ -+ -+ /* -+ * -+ * Configuration for heaps with 256kB Data-Page size -+ * -+ * Bit 39 30 29 21 20 18 17 0 -+ * | \ / \| |/ \ -+ * |....PCE...|...PDE...|PTE|.......VAddr......| -+ * -+ */ -+ -+ /* -+ * Setup sRGXMMUPDEConfig_256KBDP -+ */ -+ sRGXMMUPDEConfig_256KBDP.ePxLevel = MMU_LEVEL_2; -+ sRGXMMUPDEConfig_256KBDP.pszPxLevelStr = "PD"; -+ sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8; -+ -+ sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); -+ sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6; -+ sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6; -+ -+ sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); -+ sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1; -+ -+ sRGXMMUPDEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; -+ sRGXMMUPDEConfig_256KBDP.uiProtShift = 0; -+ -+ sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; -+ sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP -+ */ -+ sRGXMMUPTEConfig_256KBDP.ePxLevel = MMU_LEVEL_1; -+ sRGXMMUPTEConfig_256KBDP.pszPxLevelStr = "PT"; -+ sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8; -+ -+ sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000); -+ sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18; -+ sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18; -+ -+ sRGXMMUPTEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; -+ sRGXMMUPTEConfig_256KBDP.uiProtShift = 0; -+ -+ sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; -+ sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUDevVAddrConfig_256KBDP -+ */ -+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask, -+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask, -+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000); -+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18; -+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask, -+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff); -+ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0; -+ sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0; -+ -+ /* -+ * Setup gsPageSizeConfig256KB -+ */ -+ gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP; -+ gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP; -+ gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP; -+ gsPageSizeConfig256KB.uiRefCount = 0; -+ gsPageSizeConfig256KB.uiMaxRefCount = 0; -+ -+ /* -+ * -+ * Configuration for heaps with 1MB Data-Page size -+ * -+ * Bit 39 30 29 21 20 19 0 -+ * | \ / \ | / \ -+ * |....PCE...|...PDE...|.|........VAddr.......| -+ * PTE -+ */ -+ -+ /* -+ * Setup sRGXMMUPDEConfig_1MBDP -+ */ -+ sRGXMMUPDEConfig_1MBDP.ePxLevel = MMU_LEVEL_2; -+ sRGXMMUPDEConfig_1MBDP.pszPxLevelStr = "PD"; -+ sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8; -+ -+ sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); -+ /* -+ * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even -+ * if they contain fewer entries. -+ */ -+ sRGXMMUPDEConfig_1MBDP.uiAddrShift = 6; -+ sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 6; -+ -+ sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); -+ sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1; -+ -+ sRGXMMUPDEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; -+ sRGXMMUPDEConfig_1MBDP.uiProtShift = 0; -+ -+ sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; -+ sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUPTEConfig_1MBDP -+ */ -+ sRGXMMUPTEConfig_1MBDP.ePxLevel = MMU_LEVEL_1; -+ sRGXMMUPTEConfig_1MBDP.pszPxLevelStr = "PT"; -+ sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8; -+ -+ sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000); -+ sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20; -+ sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20; -+ -+ sRGXMMUPTEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; -+ sRGXMMUPTEConfig_1MBDP.uiProtShift = 0; -+ -+ sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; -+ sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUDevVAddrConfig_1MBDP -+ */ -+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask, -+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask, -+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000); -+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20; -+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask, -+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff); -+ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0; -+ sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0; -+ -+ /* -+ * Setup gsPageSizeConfig1MB -+ */ -+ gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP; -+ gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP; -+ gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP; -+ gsPageSizeConfig1MB.uiRefCount = 0; -+ gsPageSizeConfig1MB.uiMaxRefCount = 0; -+ -+ /* -+ * -+ * Configuration for heaps with 2MB Data-Page size -+ * -+ * Bit 39 30 29 21 20 0 -+ * | \ / \ / \ -+ * |....PCE...|...PDE...|.........VAddr.......| -+ * -+ */ -+ -+ /* -+ * Setup sRGXMMUPDEConfig_2MBDP -+ */ -+ sRGXMMUPDEConfig_2MBDP.ePxLevel = MMU_LEVEL_2; -+ sRGXMMUPDEConfig_2MBDP.pszPxLevelStr = "PD"; -+ sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8; -+ -+ sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); -+ /* -+ * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even -+ * if they contain fewer entries. -+ */ -+ sRGXMMUPDEConfig_2MBDP.uiAddrShift = 6; -+ sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 6; -+ -+ sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); -+ sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1; -+ -+ sRGXMMUPDEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; -+ sRGXMMUPDEConfig_2MBDP.uiProtShift = 0; -+ -+ sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; -+ sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUPTEConfig_2MBDP -+ */ -+ sRGXMMUPTEConfig_2MBDP.ePxLevel = MMU_LEVEL_1; -+ sRGXMMUPTEConfig_2MBDP.pszPxLevelStr = "PT"; -+ sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8; -+ -+ sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000); -+ sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21; -+ sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21; -+ -+ sRGXMMUPTEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; -+ sRGXMMUPTEConfig_2MBDP.uiProtShift = 0; -+ -+ sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; -+ sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; -+ -+ /* -+ * Setup sRGXMMUDevVAddrConfig_2MBDP -+ */ -+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask, -+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; -+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; -+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask, -+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000); -+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21; -+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask, -+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift)); -+ -+ -+ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff); -+ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0; -+ sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0; -+ -+ /* -+ * Setup gsPageSizeConfig2MB -+ */ -+ gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP; -+ gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP; -+ gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP; -+ gsPageSizeConfig2MB.uiRefCount = 0; -+ gsPageSizeConfig2MB.uiMaxRefCount = 0; -+ -+ /* -+ * Setup sRGXMMUDeviceAttributes -+ */ -+ sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT; -+ sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; -+ sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig; -+ sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig; -+ -+ sRGXMMUDeviceAttributes.pfnTestPremapConfigureMMU = NULL; -+ -+ /* Functions for deriving page table/dir/cat protection bits */ -+ sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8; -+ sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4; -+ sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8; -+ sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4; -+ sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8; -+ sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4; -+ -+ /* Functions for establishing configurations for PDE/PTE/DEVVADDR -+ on per-heap basis */ -+ sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB; -+ sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB; -+ -+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4; -+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8; -+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromVirtAddr = NULL; -+ -+ psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = PVRSRV_OK; -+ -+#if defined(PDUMP) -+ psDeviceNode->pfnMMUGetContextID = NULL; -+#endif -+ -+ psDeviceNode->psMMUDevAttrs = NULL; -+ -+#if defined(DEBUG) -+ PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:")); -+ PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d", -+ gsPageSizeConfig4KB.uiMaxRefCount)); -+ PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d", -+ gsPageSizeConfig4KB.uiRefCount)); -+ PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d", -+ gsPageSizeConfig16KB.uiMaxRefCount)); -+ PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d", -+ gsPageSizeConfig16KB.uiRefCount)); -+ PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d", -+ gsPageSizeConfig64KB.uiMaxRefCount)); -+ PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d", -+ gsPageSizeConfig64KB.uiRefCount)); -+ PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d", -+ gsPageSizeConfig256KB.uiMaxRefCount)); -+ PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d", -+ gsPageSizeConfig256KB.uiRefCount)); -+ PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d", -+ gsPageSizeConfig1MB.uiMaxRefCount)); -+ PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d", -+ gsPageSizeConfig1MB.uiRefCount)); -+ PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d", -+ gsPageSizeConfig2MB.uiMaxRefCount)); -+ PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d", -+ gsPageSizeConfig2MB.uiRefCount)); -+#endif -+ if (gsPageSizeConfig4KB.uiRefCount > 0 || -+ gsPageSizeConfig16KB.uiRefCount > 0 || -+ gsPageSizeConfig64KB.uiRefCount > 0 || -+ gsPageSizeConfig256KB.uiRefCount > 0 || -+ gsPageSizeConfig1MB.uiRefCount > 0 || -+ gsPageSizeConfig2MB.uiRefCount > 0 -+ ) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)")); -+ } -+ -+ return eError; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXDerivePCEProt4 -+@Description calculate the PCE protection flags based on a 4 byte entry -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags) -+{ -+ return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function RGXDerivePCEProt8 -+@Description calculate the PCE protection flags based on an 8 byte entry -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(uiProtFlags); -+ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); -+ -+ PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device")); -+ return 0; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function RGXDerivePDEProt4 -+@Description derive the PDE protection flags based on a 4 byte entry -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(uiProtFlags); -+ PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device")); -+ return 0; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function RGXDerivePDEProt8 -+@Description derive the PDE protection flags based on an 8 byte entry -+ -+@Input uiLog2DataPageSize The log2 of the required page size. -+ E.g, for 4KiB pages, this parameter must be 12. -+ For 2MiB pages, it must be set to 21. -+ -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) -+{ -+ IMG_UINT64 ret_value = 0; /* 0 means invalid */ -+ -+ if (!(uiProtFlags & MMU_PROTFLAGS_INVALID)) /* if not invalid */ -+ { -+ switch (uiLog2DataPageSize) -+ { -+ case RGX_HEAP_4KB_PAGE_SHIFT: -+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB; -+ break; -+ case RGX_HEAP_16KB_PAGE_SHIFT: -+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB; -+ break; -+ case RGX_HEAP_64KB_PAGE_SHIFT: -+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB; -+ break; -+ case RGX_HEAP_256KB_PAGE_SHIFT: -+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB; -+ break; -+ case RGX_HEAP_1MB_PAGE_SHIFT: -+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB; -+ break; -+ case RGX_HEAP_2MB_PAGE_SHIFT: -+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]", -+ __FILE__, __LINE__, __func__, uiLog2DataPageSize)); -+ } -+ } -+ return ret_value; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function RGXDerivePTEProt4 -+@Description calculate the PTE protection flags based on a 4 byte entry -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(uiProtFlags); -+ PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device")); -+ -+ return 0; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXDerivePTEProt8 -+@Description calculate the PTE protection flags based on an 8 byte entry -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) -+{ -+ IMG_UINT64 ui64MMUFlags=0; -+ -+ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); -+ -+ if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE)) -+ { -+ /* read/write */ -+ } -+ else if (MMU_PROTFLAGS_READABLE & uiProtFlags) -+ { -+ /* read only */ -+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN; -+ } -+ else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags) -+ { -+ /* write only */ -+ PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt8: write-only is not possible on this device")); -+ } -+ else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified...")); -+ } -+ -+ /* cache coherency */ -+ if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags) -+ { -+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN; -+ } -+ -+ /* cache setup */ -+ if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0) -+ { -+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN; -+ } -+ -+ if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0) -+ { -+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN; -+ } -+ -+ if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags) -+ { -+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN; -+ } -+ -+ return ui64MMUFlags; -+} -+ -+ -+/*************************************************************************/ /*! -+@Function RGXGetPageSizeConfig -+@Description Set up configuration for variable sized data pages. -+ RGXPutPageSizeConfigCB has to be called to ensure correct -+ refcounting. -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, -+ const MMU_PxE_CONFIG **ppsMMUPDEConfig, -+ const MMU_PxE_CONFIG **ppsMMUPTEConfig, -+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, -+ IMG_HANDLE *phPriv) -+{ -+ MMU_PAGESIZECONFIG *psPageSizeConfig; -+ -+ switch (uiLog2DataPageSize) -+ { -+ case RGX_HEAP_4KB_PAGE_SHIFT: -+ psPageSizeConfig = &gsPageSizeConfig4KB; -+ break; -+ case RGX_HEAP_16KB_PAGE_SHIFT: -+ psPageSizeConfig = &gsPageSizeConfig16KB; -+ break; -+ case RGX_HEAP_64KB_PAGE_SHIFT: -+ psPageSizeConfig = &gsPageSizeConfig64KB; -+ break; -+ case RGX_HEAP_256KB_PAGE_SHIFT: -+ psPageSizeConfig = &gsPageSizeConfig256KB; -+ break; -+ case RGX_HEAP_1MB_PAGE_SHIFT: -+ psPageSizeConfig = &gsPageSizeConfig1MB; -+ break; -+ case RGX_HEAP_2MB_PAGE_SHIFT: -+ psPageSizeConfig = &gsPageSizeConfig2MB; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", -+ uiLog2DataPageSize)); -+ *phPriv = NULL; -+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; -+ } -+ -+ /* Refer caller's pointers to the data */ -+ *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig; -+ *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig; -+ *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig; -+ -+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) -+ /* Increment ref-count - not that we're allocating anything here -+ (I'm using static structs), but one day we might, so we want -+ the Get/Put code to be balanced properly */ -+ psPageSizeConfig->uiRefCount++; -+ -+ /* This is purely for debug statistics */ -+ psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount, -+ psPageSizeConfig->uiRefCount); -+#endif -+ -+ *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize; -+ PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv); -+ -+ return PVRSRV_OK; -+} -+ -+/*************************************************************************/ /*! -+@Function RGXPutPageSizeConfig -+@Description Tells this code that the mmu module is done with the -+ configurations set in RGXGetPageSizeConfig. This can -+ be a no-op. -+ Called after RGXGetPageSizeConfigCB. -+@Return PVRSRV_ERROR -+ */ /**************************************************************************/ -+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv) -+{ -+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) -+ MMU_PAGESIZECONFIG *psPageSizeConfig; -+ IMG_UINT32 uiLog2DataPageSize; -+ -+ uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv; -+ -+ switch (uiLog2DataPageSize) -+ { -+ case RGX_HEAP_4KB_PAGE_SHIFT: -+ psPageSizeConfig = &gsPageSizeConfig4KB; -+ break; -+ case RGX_HEAP_16KB_PAGE_SHIFT: -+ psPageSizeConfig = &gsPageSizeConfig16KB; -+ break; -+ case RGX_HEAP_64KB_PAGE_SHIFT: -+ psPageSizeConfig = &gsPageSizeConfig64KB; -+ break; -+ case RGX_HEAP_256KB_PAGE_SHIFT: -+ psPageSizeConfig = &gsPageSizeConfig256KB; -+ break; -+ case RGX_HEAP_1MB_PAGE_SHIFT: -+ psPageSizeConfig = &gsPageSizeConfig1MB; -+ break; -+ case RGX_HEAP_2MB_PAGE_SHIFT: -+ psPageSizeConfig = &gsPageSizeConfig2MB; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", -+ uiLog2DataPageSize)); -+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; -+ } -+ -+ /* Ref-count here is not especially useful, but it's an extra -+ check that the API is being used correctly */ -+ psPageSizeConfig->uiRefCount--; -+#else -+ PVR_UNREFERENCED_PARAMETER(hPriv); -+#endif -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(ui32PDE); -+ PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); -+ PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device")); -+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; -+} -+ -+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize) -+{ -+ IMG_UINT64 ui64PageSizeBits = ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK); -+ -+ switch (ui64PageSizeBits) -+ { -+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB: -+ *pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT; -+ break; -+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB: -+ *pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT; -+ break; -+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB: -+ *pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT; -+ break; -+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB: -+ *pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT; -+ break; -+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB: -+ *pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT; -+ break; -+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB: -+ *pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT; -+ break; -+ default: -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXGetPageSizeFromPDE8: Invalid page size bitfield %" IMG_UINT64_FMTSPECx " in PDE", -+ ui64PageSizeBits)); -+ -+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; -+ } -+ return PVRSRV_OK; -+} -diff --git a/drivers/gpu/drm/img-rogue/rgxmmuinit.h b/drivers/gpu/drm/img-rogue/rgxmmuinit.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxmmuinit.h -@@ -0,0 +1,60 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device specific initialisation routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific MMU initialisation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* NB: this file is not to be included arbitrarily. It exists solely -+ for the linkage between rgxinit.c and rgxmmuinit.c, the former -+ being otherwise cluttered by the contents of the latter */ -+ -+#ifndef SRVKM_RGXMMUINIT_H -+#define SRVKM_RGXMMUINIT_H -+ -+#include "device.h" -+#include "img_types.h" -+#include "mmu_common.h" -+#include "img_defs.h" -+ -+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode); -+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+ -+#endif /* #ifndef SRVKM_RGXMMUINIT_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxmulticore.c b/drivers/gpu/drm/img-rogue/rgxmulticore.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxmulticore.c -@@ -0,0 +1,252 @@ -+/*************************************************************************/ /*! -+@File rgxmulticore.c -+@Title Functions related to multicore devices -+@Codingstyle IMG -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Kernel mode workload estimation functionality. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "rgxdevice.h" -+#include "rgxdefs_km.h" -+#include "pdump_km.h" -+#include "rgxmulticore.h" -+#include "multicore_defs.h" -+#include "allocmem.h" -+#include "pvr_debug.h" -+ -+/* -+ * check that register defines match our hardcoded definitions. -+ * Rogue has these, volcanic does not. -+ */ -+#if ((RGX_MULTICORE_CAPABILITY_FRAGMENT_EN != RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN) || \ -+ (RGX_MULTICORE_CAPABILITY_GEOMETRY_EN != RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN) || \ -+ (RGX_MULTICORE_CAPABILITY_COMPUTE_EN != RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN) || \ -+ (RGX_MULTICORE_CAPABILITY_PRIMARY_EN != RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN) || \ -+ (RGX_MULTICORE_ID_CLRMSK != RGX_CR_MULTICORE_GPU_ID_CLRMSK)) -+#error "Rogue definitions for RGX_CR_MULTICORE_GPU register have changed" -+#endif -+ -+ -+static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32CapsSize, -+ IMG_UINT32 *pui32NumCores, -+ IMG_UINT64 *pui64Caps); -+ -+ -+/* -+ * RGXInitMultiCoreInfo: -+ * Return multicore information to clients. -+ * Return not supported on cores without multicore. -+ */ -+static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32CapsSize, -+ IMG_UINT32 *pui32NumCores, -+ IMG_UINT64 *pui64Caps) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (psDevInfo->ui32MultiCoreNumCores == 0) -+ { -+ /* MULTICORE not supported on this device */ -+ eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ } -+ else -+ { -+ *pui32NumCores = psDevInfo->ui32MultiCoreNumCores; -+ if (ui32CapsSize > 0) -+ { -+ if (ui32CapsSize < psDevInfo->ui32MultiCoreNumCores) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Multicore caps buffer too small")); -+ eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ } -+ else -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psDevInfo->ui32MultiCoreNumCores; ++i) -+ { -+ pui64Caps[i] = psDevInfo->pui64MultiCoreCapabilities[i]; -+ } -+ } -+ } -+ } -+ -+ return eError; -+} -+ -+ -+ -+/* -+ * RGXInitMultiCoreInfo: -+ * Read multicore HW registers and fill in data structure for clients. -+ * Return not supported on cores without multicore. -+ */ -+PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (psDeviceNode->pfnGetMultiCoreInfo != NULL) -+ { -+ /* we only set this up once */ -+ return PVRSRV_OK; -+ } -+ -+ /* defaults for non-multicore devices */ -+ psDevInfo->ui32MultiCoreNumCores = 0; -+ psDevInfo->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); -+ psDevInfo->pui64MultiCoreCapabilities = NULL; -+ psDeviceNode->pfnGetMultiCoreInfo = NULL; -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) -+ { -+ IMG_BOOL bPowerWasDown; -+ IMG_UINT32 ui32MulticoreRegBankOffset = (1 << RGX_GET_FEATURE_VALUE(psDevInfo, XPU_MAX_REGBANKS_ADDR_WIDTH)); -+ IMG_UINT32 ui32MulticoreGPUReg = RGX_CR_MULTICORE_GPU; -+ IMG_UINT32 ui32NumCores; -+ IMG_UINT32 i; -+ -+#if defined(RGX_HOST_SECURE_REGBANK_OFFSET) && defined(XPU_MAX_REGBANKS_ADDR_WIDTH) -+ /* Ensure the HOST_SECURITY reg bank definitions are correct */ -+ if ((RGX_HOST_SECURE_REGBANK_OFFSET + RGX_HOST_SECURE_REGBANK_SIZE) != ui32MulticoreRegBankOffset) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Register bank definitions for HOST_SECURITY don't match core's configuration.", __func__)); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+#endif -+ bPowerWasDown = (psDeviceNode->psDevConfig->pfnGpuDomainPower(psDeviceNode) == PVRSRV_SYS_POWER_STATE_OFF); -+ -+ /* Power-up the device as required to read the registers */ -+ if (bPowerWasDown) -+ { -+ eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON"); -+ } -+ -+ ui32NumCores = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM); -+#if !defined(NO_HARDWARE) -+ /* check that the number of cores reported is in-bounds */ -+ if (ui32NumCores > (RGX_CR_MULTICORE_SYSTEM_MASKFULL >> RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "invalid return (%u) read from MULTICORE_SYSTEM", ui32NumCores)); -+ return PVRSRV_ERROR_DEVICE_REGISTER_FAILED; -+ } -+#else -+ /* for nohw set to max so clients can allocate enough memory for all pdump runs on any config */ -+ ui32NumCores = RGX_MULTICORE_MAX_NOHW_CORES; -+#endif -+ PVR_DPF((PVR_DBG_MESSAGE, "Multicore system has %u cores", ui32NumCores)); -+ PDUMPCOMMENT(psDeviceNode, "RGX Multicore has %d cores\n", ui32NumCores); -+ -+ /* allocate storage for capabilities */ -+ psDevInfo->pui64MultiCoreCapabilities = OSAllocMem(ui32NumCores * sizeof(psDevInfo->pui64MultiCoreCapabilities[0])); -+ if (psDevInfo->pui64MultiCoreCapabilities == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to alloc memory for Multicore info", __func__)); -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psDevInfo->ui32MultiCoreNumCores = ui32NumCores; -+ -+ for (i = 0; i < ui32NumCores; ++i) -+ { -+ #if !defined(NO_HARDWARE) -+ psDevInfo->pui64MultiCoreCapabilities[i] = -+ OSReadHWReg64(psDevInfo->pvRegsBaseKM, ui32MulticoreGPUReg) & RGX_CR_MULTICORE_GPU_MASKFULL; -+ #else -+ /* emulation for what we think caps are */ -+ psDevInfo->pui64MultiCoreCapabilities[i] = -+ i | ((i == 0) ? (RGX_MULTICORE_CAPABILITY_PRIMARY_EN -+ | RGX_MULTICORE_CAPABILITY_GEOMETRY_EN) : 0) -+ | RGX_MULTICORE_CAPABILITY_COMPUTE_EN -+ | RGX_MULTICORE_CAPABILITY_FRAGMENT_EN; -+ #endif -+ PVR_DPF((PVR_DBG_MESSAGE, "Core %d has capabilities value 0x%x", i, (IMG_UINT32)psDevInfo->pui64MultiCoreCapabilities[i] )); -+ PDUMPCOMMENT(psDeviceNode, "\tCore %d has caps 0x%08x\n", i, -+ (IMG_UINT32)psDevInfo->pui64MultiCoreCapabilities[i]); -+ -+ if (psDevInfo->pui64MultiCoreCapabilities[i] & RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN) -+ { -+ psDevInfo->ui32MultiCorePrimaryId = (psDevInfo->pui64MultiCoreCapabilities[i] -+ & ~RGX_CR_MULTICORE_GPU_ID_CLRMSK) -+ >> RGX_CR_MULTICORE_GPU_ID_SHIFT; -+ } -+ -+ ui32MulticoreGPUReg += ui32MulticoreRegBankOffset; -+ } -+ -+ /* revert power state to what it was on entry to this function */ -+ if (bPowerWasDown) -+ { -+ eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_OFF); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF"); -+ } -+ -+ /* Register callback to return info about multicore setup to client bridge */ -+ psDeviceNode->pfnGetMultiCoreInfo = RGXGetMultiCoreInfo; -+ } -+ else -+ { -+ /* MULTICORE not supported on this device */ -+ eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ } -+ -+ return eError; -+} -+ -+ -+/* -+ * RGXDeinitMultiCoreInfo: -+ * Release resources and clear the MultiCore values in the DeviceNode. -+ */ -+void RGXDeInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ if (psDevInfo->pui64MultiCoreCapabilities != NULL) -+ { -+ OSFreeMem(psDevInfo->pui64MultiCoreCapabilities); -+ psDevInfo->pui64MultiCoreCapabilities = NULL; -+ psDevInfo->ui32MultiCoreNumCores = 0; -+ psDevInfo->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); -+ } -+ psDeviceNode->pfnGetMultiCoreInfo = NULL; -+} -diff --git a/drivers/gpu/drm/img-rogue/rgxmulticore.h b/drivers/gpu/drm/img-rogue/rgxmulticore.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxmulticore.h -@@ -0,0 +1,54 @@ -+/*************************************************************************/ /*! -+@File rgxmulticore.h -+@Title Functions related to multicore devices -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description General purpose memory shared between kernel driver and user -+ mode. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXMULTICORE_H -+#define RGXMULTICORE_H -+ -+#include "pvrsrv_error.h" -+#include "pvrsrv.h" -+ -+PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode); -+void RGXDeInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+#endif /* RGXMULTICORE_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxpower.c b/drivers/gpu/drm/img-rogue/rgxpower.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxpower.c -@@ -0,0 +1,1691 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device specific power routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if defined(__linux__) -+#include -+#else -+#include -+#endif -+ -+#include "rgxpower.h" -+#include "rgxinit.h" -+#include "rgx_fwif_km.h" -+#include "rgxfwutils.h" -+#include "rgxfwriscv.h" -+#include "pdump_km.h" -+#include "pvr_debug.h" -+#include "osfunc.h" -+#include "rgxdebug_common.h" -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "rgxtimecorr.h" -+#include "devicemem_utils.h" -+#include "htbserver.h" -+#include "rgxstartstop.h" -+#include "rgxfwimageutils.h" -+#include "sync.h" -+#include "rgxdefs_km.h" -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+#include "process_stats.h" -+#endif -+#if defined(SUPPORT_LINUX_DVFS) -+#include "pvr_dvfs_device.h" -+#endif -+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) -+#include "os_apphint.h" -+#include "validation_soc.h" -+#endif -+ -+static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGXFWIF_KCCB_CMD sCmd; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32CmdKCCBSlot; -+ -+ /* Send the Timeout notification to the FW */ -+ sCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; -+ sCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; -+ sCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_HOST_TIMEOUT; -+ -+ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, -+ &sCmd, -+ PDUMP_FLAGS_NONE, -+ &ui32CmdKCCBSlot); -+ -+ return eError; -+} -+ -+static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb; -+ IMG_UINT64 (*paui64DMOSLastWord)[RGXFW_MAX_NUM_OSIDS]; -+ IMG_UINT64 (*paaui64DMOSStatsCounters)[RGXFW_MAX_NUM_OSIDS][RGXFWIF_GPU_UTIL_STATE_NUM]; -+ IMG_UINT64 ui64LastPeriod; -+ IMG_UINT64 ui64LastState; -+ IMG_UINT64 ui64LastTime; -+ IMG_UINT64 ui64TimeNow; -+ RGXFWIF_DM eDM; -+ -+ psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfGpuUtilFWCb, INVALIDATE); -+ paui64DMOSLastWord = &psUtilFWCb->aaui64DMOSLastWord[0]; -+ paaui64DMOSStatsCounters = &psUtilFWCb->aaaui64DMOSStatsCounters[0]; -+ -+ OSLockAcquire(psDevInfo->hGPUUtilLock); -+ -+ ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDevInfo->psDeviceNode)); -+ -+ /* Update counters to account for the time since the last update */ -+ ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64GpuLastWord); -+ ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64GpuLastWord); -+ ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); -+ psUtilFWCb->aui64GpuStatsCounters[ui64LastState] += ui64LastPeriod; -+ -+ /* Update state and time of the latest update */ -+ psUtilFWCb->ui64GpuLastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState); -+ -+ for (eDM = 0; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++) -+ { -+ IMG_UINT32 ui32DriverID; -+ -+ FOREACH_SUPPORTED_DRIVER(ui32DriverID) -+ { -+ ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->aaui64DMOSLastWord[eDM][ui32DriverID]); -+ ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->aaui64DMOSLastWord[eDM][ui32DriverID]); -+ ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); -+ paaui64DMOSStatsCounters[eDM][ui32DriverID][ui64LastState] += ui64LastPeriod; -+ -+ /* Update state and time of the latest update */ -+ paui64DMOSLastWord[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState); -+ } -+ } -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfGpuUtilFWCb, FLUSH); -+ -+ OSLockRelease(psDevInfo->hGPUUtilLock); -+} -+ -+static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ if (psDevConfig->pfnTDRGXStop == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!")); -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+ } -+ -+ eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData); -+#else -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ eError = RGXStop(&psDevInfo->sLayerParams); -+#endif -+ -+ return eError; -+} -+ -+/* -+ RGXPrePowerState -+*/ -+PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ -+ if ((eNewPowerState != eCurrentPowerState) && -+ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) -+ { -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_KCCB_CMD sPowCmd; -+ IMG_UINT32 ui32CmdKCCBSlot; -+ -+ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ -+ /* Send the Power off request to the FW */ -+ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; -+ sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ; -+ sPowCmd.uCmdData.sPowData.uPowerReqData.bForced = BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED); -+ -+ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", -+ __func__)); -+ return eError; -+ } -+ -+ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, -+ &sPowCmd, -+ PDUMP_FLAGS_NONE, -+ &ui32CmdKCCBSlot); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send Power off request", -+ __func__)); -+ return eError; -+ } -+ -+ /* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies -+ on the EventObject which is signalled in this MISR */ -+ eError = RGXPollForGPCommandCompletion(psDeviceNode, -+ psDevInfo->psPowSyncPrim->pui32LinAddr, -+ 0x1, 0xFFFFFFFF); -+ -+ /* Check the Power state after the answer */ -+ if (eError == PVRSRV_OK) -+ { -+ RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, -+ INVALIDATE); -+ /* Finally, de-initialise some registers. */ -+ if (psFwSysData->ePowState == RGXFWIF_POW_OFF) -+ { -+#if !defined(NO_HARDWARE) -+ IMG_UINT32 ui32idx; -+ -+ /* Driver takes the VZ Fw-KM connection down, preventing the -+ * firmware from submitting further interrupts */ -+ KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); -+ KM_CONNECTION_CACHEOP(Os, FLUSH); -+ -+#if defined(RGX_FW_IRQ_OS_COUNTERS) -+ ui32idx = RGXFW_HOST_DRIVER_ID; -+#else -+ for_each_irq_cnt(ui32idx) -+#endif /* RGX_FW_IRQ_OS_COUNTERS */ -+ { -+ IMG_UINT32 ui32IrqCnt; -+ -+ get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); -+ -+ /* Wait for the pending FW processor to host interrupts to come back. */ -+ eError = PVRSRVPollForValueKM(psDeviceNode, -+ (IMG_UINT32 __iomem *)&psDevInfo->aui32SampleIRQCount[ui32idx], -+ ui32IrqCnt, -+ 0xffffffff, -+ POLL_FLAG_LOG_ERROR, -+ NULL); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Wait for pending interrupts failed (DevID %u)." MSG_IRQ_CNT_TYPE " %u Host: %u, FW: %u", -+ __func__, -+ psDeviceNode->sDevId.ui32InternalID, -+ ui32idx, -+ psDevInfo->aui32SampleIRQCount[ui32idx], -+ ui32IrqCnt)); -+ -+ RGX_WaitForInterruptsTimeout(psDevInfo); -+ } -+ } -+#endif /* NO_HARDWARE */ -+ -+ /* Update GPU frequency and timer correlation related data */ -+ RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_POWER); -+ -+ /* Update GPU state counters */ -+ _RGXUpdateGPUUtilStats(psDevInfo); -+ -+#if defined(SUPPORT_LINUX_DVFS) -+ eError = SuspendDVFS(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to suspend DVFS", __func__)); -+ return eError; -+ } -+#endif -+ -+ /* Firmware was successfully stopped, no further interrupts expected */ -+ psDevInfo->bRGXPowered = IMG_FALSE; -+ -+ eError = RGXDoStop(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ /* Power down failures are treated as successful since the power was removed but logged. */ -+ PVR_DPF((PVR_DBG_WARNING, "%s: RGXDoStop failed (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ psDevInfo->ui32ActivePMReqNonIdle++; -+ eError = PVRSRV_OK; -+ } -+ } -+ else -+ { -+ /* the sync was updated but the pow state isn't off -> the FW denied the transition */ -+ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; -+ -+ if (BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED)) -+ { /* It is an error for a forced request to be denied */ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failure to power off during a forced power off. FW: %d", -+ __func__, psFwSysData->ePowState)); -+ } -+ } -+ } -+ else if (eError == PVRSRV_ERROR_TIMEOUT) -+ { -+ /* timeout waiting for the FW to ack the request: return timeout */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Timeout waiting for powoff ack from the FW", -+ __func__)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error waiting for powoff ack from the FW (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE; -+ } -+ } -+ -+ return eError; -+} -+ -+#if defined(SUPPORT_AUTOVZ) -+static PVRSRV_ERROR _RGXWaitForGuestsToDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError = PVRSRV_ERROR_TIMEOUT; -+ IMG_UINT32 ui32FwTimeout = (20 * SECONDS_TO_MICROSECONDS); -+ -+ LOOP_UNTIL_TIMEOUT(ui32FwTimeout) -+ { -+ IMG_UINT32 ui32DriverID; -+ IMG_BOOL bGuestOnline = IMG_FALSE; -+ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror, -+ INVALIDATE); -+ -+ for (ui32DriverID = RGXFW_GUEST_DRIVER_ID_START; -+ ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED; ui32DriverID++) -+ { -+ RGXFWIF_CONNECTION_FW_STATE eGuestState = (RGXFWIF_CONNECTION_FW_STATE) -+ psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32DriverID].bfOsState; -+ -+ if ((eGuestState == RGXFW_CONNECTION_FW_ACTIVE) || -+ (eGuestState == RGXFW_CONNECTION_FW_OFFLOADING)) -+ { -+ bGuestOnline = IMG_TRUE; -+ PVR_DPF((PVR_DBG_WARNING, "%s: Guest OS %u still online.", __func__, ui32DriverID)); -+ } -+ } -+ -+ if (!bGuestOnline) -+ { -+ /* Allow Guests to finish reading Connection state registers before disconnecting. */ -+ OSSleepms(100); -+ -+ PVR_DPF((PVR_DBG_WARNING, "%s: All Guest connections are down. " -+ "Host can power down the GPU.", __func__)); -+ eError = PVRSRV_OK; -+ break; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Waiting for Guests to disconnect " -+ "before powering down GPU.", __func__)); -+ -+ if (PVRSRVPwrLockIsLockedByMe(psDeviceNode)) -+ { -+ /* Don't wait with the power lock held as this prevents the vz -+ * watchdog thread from keeping the fw-km connection alive. */ -+ PVRSRVPowerUnlock(psDeviceNode); -+ } -+ } -+ -+ OSSleepms(10); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if (!PVRSRVPwrLockIsLockedByMe(psDeviceNode)) -+ { -+ /* Take back power lock after waiting for Guests */ -+ eError = PVRSRVPowerLock(psDeviceNode); -+ } -+ -+ return eError; -+} -+#endif /* defined(SUPPORT_AUTOVZ) */ -+ -+/* -+ RGXVzPrePowerState -+*/ -+PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ -+ PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError); -+ -+ if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON) -+ { -+ /* powering down */ -+#if defined(SUPPORT_AUTOVZ) -+ if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp || psDeviceNode->bAutoVzAllowGPUPowerdown)) -+ { -+ if (psDeviceNode->bAutoVzFwIsUp) -+ { -+ /* bAutoVzAllowGPUPowerdown must be TRUE here and -+ * bAutoVzFwIsUp=TRUE indicates that this is a powerdown event -+ * so send requests to the FW to disconnect all guest connections -+ * before GPU is powered down. */ -+ eError = RGXDisconnectAllGuests(psDeviceNode); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXDisconnectAllGuests"); -+ } -+ -+ /* The Host must ensure all Guest drivers have disconnected from the GPU before powering it down. -+ * Guest drivers regularly access hardware registers during runtime. If an attempt is made to -+ * access a GPU register while the GPU is down, the SoC might lock up. */ -+ eError = _RGXWaitForGuestsToDisconnect(psDeviceNode); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_RGXWaitForGuestsToDisconnect"); -+ -+ if (psDeviceNode->bAutoVzAllowGPUPowerdown) -+ { -+ psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; -+ } -+ -+ /* Temporarily restore all power callbacks used by the driver to fully power down the GPU. -+ * Under AutoVz, power transitions requests (e.g. on driver deinitialisation and unloading) -+ * are generally ignored and the GPU power state is unaffected. Special power requests like -+ * those triggered by Suspend/Resume calls must reinstate the callbacks when needed. */ -+ PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev, -+ &RGXVzPrePowerState, &RGXVzPostPowerState, -+ psDeviceNode->psDevConfig->pfnPrePowerState, -+ psDeviceNode->psDevConfig->pfnPostPowerState, -+ &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest); -+ } -+ else -+ { -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ KM_CONNECTION_CACHEOP(Fw, INVALIDATE); -+ KM_CONNECTION_CACHEOP(Os, INVALIDATE); -+ -+ if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && -+ KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)) -+ { -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError = RGXFWSetFwOsState(psDevInfo, 0, RGXFWIF_OS_OFFLINE); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXFWSetFwOsState"); -+ } -+ } -+#endif -+ PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s", -+ __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", -+ psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); -+ } -+ else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON) -+ { -+ /* powering up */ -+ PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s", -+ __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", -+ psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); -+ -+ } -+ -+ if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))) -+ { -+ /* call regular device power function */ -+ eError = RGXPrePowerState(hDevHandle, eNewPowerState, eCurrentPowerState, ePwrFlags); -+ } -+ -+ return eError; -+} -+ -+/* -+ RGXVzPostPowerState -+*/ -+PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError); -+ -+ if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))) -+ { -+ /* call regular device power function */ -+ eError = RGXPostPowerState(hDevHandle, eNewPowerState, eCurrentPowerState, ePwrFlags); -+ } -+ -+ if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON) -+ { -+ /* powering down */ -+ PVR_LOG_RETURN_IF_FALSE((!psDeviceNode->bAutoVzFwIsUp), "AutoVz Fw active, power not changed", eError); -+ PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s", -+ __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", -+ psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); -+ -+#if !defined(SUPPORT_AUTOVZ_HW_REGS) -+ /* The connection states must be reset on a GPU power cycle. If the states are kept -+ * in hardware scratch registers, they will be cleared on power down. When using shared -+ * memory the connection data must be explicitly cleared by the driver. */ -+ OSCachedMemSetWMB(psDevInfo->psRGXFWIfConnectionCtl, 0, sizeof(RGXFWIF_CONNECTION_CTL)); -+ RGXFwSharedMemCacheOpPtr(psDevInfo->psRGXFWIfConnectionCtl, FLUSH); -+#endif /* defined(SUPPORT_AUTOVZ) && !defined(SUPPORT_AUTOVZ_HW_REGS) */ -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) -+ { -+#if defined(SUPPORT_AUTOVZ) -+ /* AutoVz Guests attempting to suspend have updated their connections earlier in RGXVzPrePowerState. -+ * Skip this redundant register write, as the Host could have powered down the GPU by now. */ -+ if (psDeviceNode->bAutoVzFwIsUp) -+#endif -+ { -+ /* Take the VZ connection down to prevent firmware from submitting further interrupts */ -+ KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); -+ KM_CONNECTION_CACHEOP(Os, FLUSH); -+ } -+ /* Power transition callbacks were not executed, update RGXPowered flag here */ -+ psDevInfo->bRGXPowered = IMG_FALSE; -+ } -+ } -+ else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON) -+ { -+ /* powering up */ -+ IMG_UINT32 ui32FwTimeout = (3 * SECONDS_TO_MICROSECONDS); -+ volatile IMG_BOOL *pbUpdatedFlag; -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, -+ INVALIDATE); -+ pbUpdatedFlag = &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated; -+ -+ PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s", -+ __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", -+ psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Guests don't execute the power transition callbacks, so update their RGXPowered flag here */ -+ psDevInfo->bRGXPowered = IMG_TRUE; -+ -+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) -+ /* Guest drivers expect the firmware to have set its end of the -+ * connection to Ready state by now. */ -+ KM_CONNECTION_CACHEOP(Fw, INVALIDATE); -+ if (!KM_FW_CONNECTION_IS(READY, psDevInfo)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Ready state. Waiting for Firmware ...", __func__)); -+ } -+ -+ LOOP_UNTIL_TIMEOUT(RGX_VZ_CONNECTION_TIMEOUT_US) -+ { -+ KM_CONNECTION_CACHEOP(Fw, INVALIDATE); -+ if (KM_FW_CONNECTION_IS(READY, psDevInfo)) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware Connection is Ready. Initialisation proceeding.", __func__)); -+ break; -+ } -+ else -+ { -+ OSSleepms(10); -+ } -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ KM_CONNECTION_CACHEOP(Fw, INVALIDATE); -+ if (!KM_FW_CONNECTION_IS(READY, psDevInfo)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Timed out waiting for the Firmware to enter Ready state.", __func__)); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+#endif /* RGX_VZ_STATIC_CARVEOUT_FW_HEAPS */ -+ -+ /* Guests can only access the register holding the connection states, -+ * after the GPU is confirmed to be powered up */ -+ KM_SET_OS_CONNECTION(READY, psDevInfo); -+ KM_CONNECTION_CACHEOP(Os, FLUSH); -+ -+ OSWriteDeviceMem32WithWMB(pbUpdatedFlag, IMG_FALSE); -+ -+ /* Kick an initial dummy command to make the firmware initialise all -+ * its internal guest OS data structures and compatibility information. -+ * Use the lower-level RGXSendCommand() for the job, to make -+ * sure only 1 KCCB command is issued to the firmware. -+ * The default RGXFWHealthCheckCmd() prefaces each HealthCheck command with -+ * a pre-kick cache command which can interfere with the FW-KM init handshake. */ -+ { -+ RGXFWIF_KCCB_CMD sCmpKCCBCmd; -+ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; -+ -+ eError = RGXSendCommand(psDevInfo, &sCmpKCCBCmd, PDUMP_FLAGS_CONTINUOUS); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXSendCommand()"); -+ } -+ } -+ else -+ { -+ KM_SET_OS_CONNECTION(READY, psDevInfo); -+ KM_CONNECTION_CACHEOP(Os, FLUSH); -+ -+#if defined(SUPPORT_AUTOVZ) -+ /* Disable power callbacks that should not be run on virtualised drivers after the GPU -+ * is fully initialised: system layer pre/post functions and driver idle requests. -+ * The original device RGX Pre/Post functions are called from this Vz wrapper. */ -+ PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev, -+ &RGXVzPrePowerState, &RGXVzPostPowerState, -+ NULL, NULL, NULL, NULL); -+ -+ /* During first-time boot the flag is set here, while subsequent reboots will already -+ * have set it earlier in RGXInit. Set to true from this point onwards in any case. */ -+ psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; -+#endif -+ } -+ -+ /* Wait for the firmware to accept and enable the connection with this OS by setting its state to Active */ -+ LOOP_UNTIL_TIMEOUT(RGX_VZ_CONNECTION_TIMEOUT_US) -+ { -+ KM_CONNECTION_CACHEOP(Fw, INVALIDATE); -+ if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Active. Initialisation proceeding.", __func__)); -+ break; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Active state. Waiting for Firmware ...", __func__)); -+ OSSleepms(10); -+ } -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ KM_CONNECTION_CACHEOP(Fw, INVALIDATE); -+ if (!KM_FW_CONNECTION_IS(ACTIVE, psDevInfo)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Timed out waiting for the Firmware to enter Active state.", __func__)); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* poll on the Firmware supplying the compatibility data */ -+ LOOP_UNTIL_TIMEOUT(ui32FwTimeout) -+ { -+ if (*pbUpdatedFlag) -+ { -+ break; -+ } -+ OSSleepms(10); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ PVR_LOG_RETURN_IF_FALSE(*pbUpdatedFlag, "Firmware does not respond with compatibility data. ", PVRSRV_ERROR_TIMEOUT); -+ } -+ -+ KM_SET_OS_CONNECTION(ACTIVE, psDevInfo); -+ KM_CONNECTION_CACHEOP(Os, FLUSH); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+#if defined(TRACK_FW_BOOT) -+static INLINE void RGXCheckFWBootStage(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ FW_BOOT_STAGE eStage; -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ /* Boot stage temporarily stored to the register below */ -+ eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, -+ RGX_FW_BOOT_STAGE_REGISTER); -+ } -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ IMG_BYTE *pbBootData; -+ -+ if (PVRSRV_OK != DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, -+ (void**)&pbBootData)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Could not acquire pointer to FW boot stage", __func__)); -+ eStage = FW_BOOT_STAGE_NOT_AVAILABLE; -+ } -+ else -+ { -+ pbBootData += RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); -+ -+ eStage = *(FW_BOOT_STAGE*)&pbBootData[RGXMIPSFW_BOOT_STAGE_OFFSET]; -+ -+ if (eStage == FW_BOOT_STAGE_TLB_INIT_FAILURE) -+ { -+ RGXMIPSFW_BOOT_DATA *psBootData = -+ (RGXMIPSFW_BOOT_DATA*) (pbBootData + RGXMIPSFW_BOOTLDR_CONF_OFFSET); -+ -+ PVR_LOG(("MIPS TLB could not be initialised. Boot data info:" -+ " num PT pages %u, log2 PT page size %u, PT page addresses" -+ " %"IMG_UINT64_FMTSPECx " %"IMG_UINT64_FMTSPECx -+ " %"IMG_UINT64_FMTSPECx " %"IMG_UINT64_FMTSPECx, -+ psBootData->ui32PTNumPages, -+ psBootData->ui32PTLog2PageSize, -+ psBootData->aui64PTPhyAddr[0U], -+ psBootData->aui64PTPhyAddr[1U], -+ psBootData->aui64PTPhyAddr[2U], -+ psBootData->aui64PTPhyAddr[3U])); -+ } -+ -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); -+ } -+ } -+#endif -+ else -+ { -+ eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SCRATCH14); -+ } -+ -+ PVR_LOG(("%s: FW reached boot stage %i/%i.", -+ __func__, eStage, FW_BOOT_INIT_DONE)); -+} -+#endif -+ -+static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; -+ -+ if (psDevConfig->pfnTDRGXStart == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: TDRGXStart not implemented!")); -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+ } -+ -+ eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData); -+#else -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ eError = RGXStart(&psDevInfo->sLayerParams); -+#endif -+ -+ return eError; -+} -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS) -+/* -+ * To validate the MTS unit we do the following: -+ * - Immediately after firmware loading for each OSID -+ * - Write the OSid to a memory location shared with FW -+ * - Kick the register of that OSid -+ * (Uncounted, DM 0) -+ * - FW clears the memory location if OSid matches -+ * - Host checks that memory location is cleared -+ * -+ * See firmware/rgxfw_bg.c -+ */ -+static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGXFWIF_SYSINIT *psFwSysInit, -+ PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ IMG_UINT32 ui32ScheduleRegister; -+ IMG_UINT32 ui32OSid; -+ IMG_UINT32 ui32KickType; -+ IMG_UINT32 ui32OsRegBanksMapped = (psDeviceNode->psDevConfig->ui32RegsSize / RGX_VIRTUALISATION_REG_SIZE_PER_OS); -+ -+ /* Nothing to do if the device does not support GPU_VIRTUALISATION */ -+ if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, GPU_VIRTUALISATION)) -+ { -+ return PVRSRV_OK; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Testing per-os kick registers:")); -+ -+ ui32OsRegBanksMapped = MIN(ui32OsRegBanksMapped, GPUVIRT_VALIDATION_NUM_OS); -+ -+ if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OSIDS) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "The register bank mapped into kernel VA does not cover all OS' registers:")); -+ PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OSIDS, ui32OsRegBanksMapped)); -+ PVR_DPF((PVR_DBG_WARNING, "Only first %d MTS registers will be tested", ui32OsRegBanksMapped)); -+ } -+ -+ ui32KickType = RGX_CR_MTS_SCHEDULE_DM_DM0 | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED; -+ -+ for (ui32OSid = 0; ui32OSid < ui32OsRegBanksMapped; ui32OSid++) -+ { -+ /* set Test field */ -+ psFwSysInit->ui32OSKickTest = (ui32OSid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT; -+ -+#if defined(PDUMP) -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, -+ offsetof(RGXFWIF_SYSINIT, ui32OSKickTest), -+ sizeof(psFwSysInit->ui32OSKickTest), -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+ /* Force a read-back to memory to avoid posted writes on certain buses */ -+ OSWriteMemoryBarrier(&psFwSysInit->ui32OSKickTest); -+ RGXFwSharedMemCacheOpValue(psFwSysInit->ui32OSKickTest, FLUSH); -+ -+ /* kick register */ -+ ui32ScheduleRegister = RGX_CR_MTS_SCHEDULE + (ui32OSid * RGX_VIRTUALISATION_REG_SIZE_PER_OS); -+ PVR_DPF((PVR_DBG_MESSAGE, " Testing OS: %u, Kick Reg: %X", -+ ui32OSid, -+ ui32ScheduleRegister)); -+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32ScheduleRegister, ui32KickType); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "VZ sideband test, kicking MTS register %u", ui32OSid); -+ -+ PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME, -+ ui32ScheduleRegister, ui32KickType, PDUMP_FLAGS_CONTINUOUS); -+ -+ DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc, -+ offsetof(RGXFWIF_SYSINIT, ui32OSKickTest), -+ 0, -+ 0xFFFFFFFF, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+#if !defined(NO_HARDWARE) -+ OSMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + ui32ScheduleRegister); -+ -+ /* Wait test enable bit to be unset */ -+ if (PVRSRVPollForValueKM(psDeviceNode, -+ (volatile IMG_UINT32 __iomem *)&psFwSysInit->ui32OSKickTest, -+ 0, -+ RGXFWIF_KICK_TEST_ENABLED_BIT, -+ POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP, -+ RGXFwSharedMemCacheOpExecPfn) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware did not clear test location (contents: 0x%X)", -+ ui32OSid, -+ psFwSysInit->ui32OSKickTest)); -+ -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ /* Check that the value is what we expect */ -+ if (psFwSysInit->ui32OSKickTest != 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware wrote 0x%X to test location", -+ ui32OSid, -+ psFwSysInit->ui32OSKickTest)); -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, " PASS")); -+#endif -+ } -+ -+ PVR_LOG(("MTS passed sideband tests")); -+ return PVRSRV_OK; -+} -+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION_MTS) */ -+ -+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) -+#define SCRATCH_VALUE (0x12345678U) -+ -+static void RGXRiscvDebugModuleTest(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ void *pvAppHintState = NULL; -+ const IMG_BOOL bDefaultFalse = IMG_FALSE; -+ IMG_BOOL bRunRiscvDmiTest; -+ -+ IMG_UINT32 *pui32FWCode = NULL; -+ PVRSRV_ERROR eError; -+ -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, RiscvDmiTest, -+ &bDefaultFalse, &bRunRiscvDmiTest); -+ OSFreeAppHintState(pvAppHintState); -+ -+ if (bRunRiscvDmiTest == IMG_FALSE) -+ { -+ return; -+ } -+ -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32FWCode); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error acquiring FW code memory pointer (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ PDumpIfKM(psDevInfo->psDeviceNode, "ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS); -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "DMI_TEST BEGIN"); -+ -+ RGXRiscvHalt(psDevInfo); -+ -+ /* -+ * Test RISC-V register reads/writes. -+ * RGXRiscv[Write/Poll]Reg are used to access internal RISC-V registers -+ * via debug module. -+ */ -+ -+ /* Write RISC-V mscratch register */ -+ RGXRiscvWriteReg(psDevInfo, RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE); -+ /* Read RISC-V misa register (compare against default standard value) */ -+ RGXRiscvPollReg(psDevInfo, RGXRISCVFW_MISA_ADDR, RGXRISCVFW_MISA_VALUE); -+ /* Read RISC-V mscratch register (compare against previously written value) */ -+ RGXRiscvPollReg(psDevInfo, RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE); -+ -+ /* -+ * Test RISC-V memory reads/writes. -+ * RGXRiscv[Write/Poll]Mem are used to access system memory via debug module -+ * (from RISC-V point of view). -+ */ -+ -+ if (pui32FWCode != NULL) -+ { -+ IMG_UINT32 ui32Tmp; -+ IMG_UINT32 ui32FWBootCodeRemap; -+ -+ /* Get the FW boot code remap */ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) >= 4) -+ { -+ ui32FWBootCodeRemap = RGXRISCVFW_BOOTLDR_CODE_REMAP_SECURE; -+ } -+ else -+#endif -+ { -+ ui32FWBootCodeRemap = RGXRISCVFW_BOOTLDR_CODE_REMAP; -+ } -+ -+ /* Acquire pointer to FW code (bootloader) */ -+ pui32FWCode += RGXGetFWImageSectionOffset(NULL, RISCV_UNCACHED_CODE) / sizeof(IMG_UINT32); -+ /* Save FW code at address (bootloader) */ -+ ui32Tmp = *pui32FWCode; -+ -+ /* Write FW code at address (bootloader) */ -+ RGXWriteFWModuleAddr(psDevInfo, ui32FWBootCodeRemap, SCRATCH_VALUE); -+ /* Read FW code at address (bootloader + 4) (compare against value read from Host) */ -+ RGXRiscvPollMem(psDevInfo, ui32FWBootCodeRemap + 4, *(pui32FWCode + 1)); -+ /* Read FW code at address (bootloader) (compare against previously written value) */ -+ RGXRiscvPollMem(psDevInfo, ui32FWBootCodeRemap, SCRATCH_VALUE); -+ /* Restore FW code at address (bootloader) */ -+ RGXWriteFWModuleAddr(psDevInfo, ui32FWBootCodeRemap, ui32Tmp); -+ -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); -+ } -+ -+ /* -+ * Test GPU register reads/writes. -+ * RGXRiscv[Write/Poll]Mem are used to access GPU registers via debug module -+ * (from RISC-V point of view). -+ * Note that system memory and GPU register accesses both use the same -+ * debug module interface, targeting different address ranges. -+ */ -+ -+ /* Write SCRATCH0 from the Host */ -+ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0, -+ SCRATCH_VALUE, PDUMP_FLAGS_CONTINUOUS); -+ /* Read SCRATCH0 */ -+ RGXRiscvPollMem(psDevInfo, RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, SCRATCH_VALUE); -+ /* Write SCRATCH0 */ -+ RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, ~SCRATCH_VALUE); -+ /* Read SCRATCH0 from the Host */ -+ PDUMPREGPOL(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0, -+ ~SCRATCH_VALUE, 0xFFFFFFFFU, -+ PDUMP_FLAGS_CONTINUOUS, PDUMP_POLL_OPERATOR_EQUAL); -+ -+ RGXRiscvResume(psDevInfo); -+ -+ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "DMI_TEST END"); -+ PDumpFiKM(psDevInfo->psDeviceNode, "ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS); -+} -+#endif -+ -+/* -+ RGXPostPowerState -+*/ -+PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if ((eNewPowerState != eCurrentPowerState) && -+ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) -+ { -+ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) -+ { -+ /* Update timer correlation related data */ -+ RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_POWER); -+ -+ /* Update GPU state counters */ -+ _RGXUpdateGPUUtilStats(psDevInfo); -+ -+ eError = RGXDoStart(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXDoStart", fail); -+ -+ OSMemoryBarrier(NULL); -+ -+ /* -+ * Check whether the FW has started by polling on bFirmwareStarted flag -+ */ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfSysInit->bFirmwareStarted, -+ INVALIDATE); -+ if (PVRSRVPollForValueKM(psDeviceNode, -+ (IMG_UINT32 __iomem *)&psDevInfo->psRGXFWIfSysInit->bFirmwareStarted, -+ IMG_TRUE, -+ 0xFFFFFFFF, -+ POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP, -+ RGXFwSharedMemCacheOpExecPfn) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed.")); -+ eError = PVRSRV_ERROR_TIMEOUT; -+ -+#if defined(TRACK_FW_BOOT) -+ RGXCheckFWBootStage(psDevInfo); -+#endif -+ -+ /* -+ * When bFirmwareStarted fails some info may be gained by doing the following -+ * debug dump but unfortunately it could be potentially dangerous if the reason -+ * for not booting is the GPU power is not ON. However, if we have reached this -+ * point the System Layer has returned without errors, we assume the GPU power -+ * is indeed ON. -+ */ -+ RGXDumpRGXDebugSummary(NULL, NULL, psDeviceNode->pvDevice, IMG_TRUE); -+ RGXDumpRGXRegisters(NULL, NULL, psDeviceNode->pvDevice); -+ -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPollForValueKM(bFirmwareStarted)", fail); -+ } -+ -+#if defined(PDUMP) -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start."); -+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc, -+ offsetof(RGXFWIF_SYSINIT, bFirmwareStarted), -+ IMG_TRUE, -+ 0xFFFFFFFFU, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "RGXPostPowerState: problem pdumping POL for psRGXFWIfSysInitMemDesc (%d)", -+ eError)); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemPDumpDevmemPol32", fail); -+ } -+ -+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) -+ /* Check if the Validation IRQ flag is set */ -+ if ((psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_IRQ) != 0) -+ { -+ eError = PVRSRVValidateIrqs(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVValidateIrqs", fail); -+ } -+#endif /* defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) */ -+#endif /* defined(PDUMP) */ -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS) -+ eError = RGXVirtualisationPowerupSidebandTest(psDeviceNode, psDevInfo->psRGXFWIfSysInit, psDevInfo); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXVirtualisationPowerupSidebandTest", fail); -+#endif -+ -+#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) -+ RGXRiscvDebugModuleTest(psDevInfo); -+#endif -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp, -+ INVALIDATE); -+ PVRSRVSetFirmwareStartTime(psDeviceNode->psPowerDev, -+ psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp); -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp, -+ FLUSH); -+#endif -+ -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal, -+ INVALIDATE); -+ HTBSyncPartitionMarker(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal); -+ -+#if defined(SUPPORT_LINUX_DVFS) -+ eError = ResumeDVFS(psDeviceNode); -+ PVR_LOG_GOTO_IF_ERROR(eError, "ResumeDVFS", fail); -+#endif -+ } -+ } -+ -+ PDUMPCOMMENT(psDeviceNode, -+ "RGXPostPowerState: Current state: %d, New state: %d", -+ eCurrentPowerState, eNewPowerState); -+ -+ return eError; -+ -+fail: -+ psDevInfo->bRGXPowered = IMG_FALSE; -+ -+ return eError; -+} -+ -+/* -+ RGXPreClockSpeedChange -+*/ -+PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ const PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; -+ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ PVR_UNREFERENCED_PARAMETER(psRGXData); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "RGXPreClockSpeedChange: RGX clock speed was %uHz", -+ psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); -+ -+ RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, -+ INVALIDATE); -+ -+ if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && -+ (psFwSysData->ePowState != RGXFWIF_POW_OFF)) -+ { -+ /* Update GPU frequency and timer correlation related data */ -+ RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_DVFS); -+ } -+ -+ return eError; -+} -+ -+/* -+ RGXPostClockSpeedChange -+*/ -+PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ const PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; -+ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ /* Update runtime configuration with the new value */ -+ OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed, -+ ui32NewClockSpeed); -+ RGXFwSharedMemCacheOpValue(psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed, FLUSH); -+ -+ RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, -+ INVALIDATE); -+ if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && -+ (psFwSysData->ePowState != RGXFWIF_POW_OFF)) -+ { -+ RGXFWIF_KCCB_CMD sCOREClkSpeedChangeCmd; -+ IMG_UINT32 ui32CmdKCCBSlot; -+ -+ RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_DVFS); -+ -+ sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE; -+ sCOREClkSpeedChangeCmd.uCmdData.sCoreClkSpeedChangeData.ui32NewClockSpeed = ui32NewClockSpeed; -+ -+ PDUMPCOMMENT(psDeviceNode, "Scheduling CORE clock speed change command"); -+ -+ PDUMPPOWCMDSTART(psDeviceNode); -+ eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, -+ &sCOREClkSpeedChangeCmd, -+ PDUMP_FLAGS_NONE, -+ &ui32CmdKCCBSlot); -+ PDUMPPOWCMDEND(psDeviceNode); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PDUMPCOMMENT(psDeviceNode, "Scheduling CORE clock speed change command failed"); -+ PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError)); -+ return eError; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "RGXPostClockSpeedChange: RGX clock speed changed to %uHz", -+ psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); -+ } -+ -+ return eError; -+} -+ -+/*! -+ ****************************************************************************** -+ -+ @Function RGXDustCountChange -+ -+ @Description -+ -+ Does change of number of DUSTs -+ -+ @Input hDevHandle : RGX Device Node -+ @Input ui32NumberOfDusts : Number of DUSTs to make transition to -+ -+ @Return PVRSRV_ERROR : -+ -+ ******************************************************************************/ -+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, -+ IMG_UINT32 ui32NumberOfDusts) -+{ -+ -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError; -+ RGXFWIF_KCCB_CMD sDustCountChange; -+ IMG_UINT32 ui32MaxAvailableDusts = psDevInfo->sDevFeatureCfg.ui32MAXDustCount; -+ IMG_UINT32 ui32CmdKCCBSlot; -+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ if (ui32NumberOfDusts > ui32MaxAvailableDusts) -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Invalid number of DUSTs (%u) while expecting value within <0,%u>. Error:%u", -+ __func__, -+ ui32NumberOfDusts, -+ ui32MaxAvailableDusts, -+ eError)); -+ return eError; -+ } -+ -+ psRuntimeCfg->ui32DefaultDustsNumInit = ui32NumberOfDusts; -+ OSWriteMemoryBarrier(&psRuntimeCfg->ui32DefaultDustsNumInit); -+ RGXFwSharedMemCacheOpValue(psRuntimeCfg->ui32DefaultDustsNumInit, FLUSH); -+ -+#if !defined(NO_HARDWARE) -+ { -+ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, -+ INVALIDATE); -+ -+ if (psFwSysData->ePowState == RGXFWIF_POW_OFF) -+ { -+ return PVRSRV_OK; -+ } -+ -+ if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) -+ { -+ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Attempt to change dust count when not IDLE", -+ __func__)); -+ return eError; -+ } -+ } -+#endif -+ -+ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", -+ __func__)); -+ return eError; -+ } -+ -+ sDustCountChange.eCmdType = RGXFWIF_KCCB_CMD_POW; -+ sDustCountChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUM_UNITS_CHANGE; -+ sDustCountChange.uCmdData.sPowData.uPowerReqData.ui32NumOfDusts = ui32NumberOfDusts; -+ -+ PDUMPCOMMENT(psDeviceNode, -+ "Scheduling command to change Dust Count to %u", -+ ui32NumberOfDusts); -+ eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, -+ &sDustCountChange, -+ PDUMP_FLAGS_NONE, -+ &ui32CmdKCCBSlot); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PDUMPCOMMENT(psDeviceNode, -+ "Scheduling command to change Dust Count failed. Error:%u", -+ eError); -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Scheduling KCCB to change Dust Count failed. Error:%u", -+ __func__, eError)); -+ return eError; -+ } -+ -+ /* Wait for the firmware to answer. */ -+ eError = RGXPollForGPCommandCompletion(psDeviceNode, -+ psDevInfo->psPowSyncPrim->pui32LinAddr, -+ 0x1, 0xFFFFFFFF); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for idle request", __func__)); -+ return eError; -+ } -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDeviceNode, -+ "%s: Poll for Kernel SyncPrim [0x%p] on DM %d", -+ __func__, psDevInfo->psPowSyncPrim->pui32LinAddr, -+ RGXFWIF_DM_GP); -+ -+ SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, -+ 1, -+ 0xffffffff, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ 0); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ @Function RGXAPMLatencyChange -+*/ -+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, -+ IMG_UINT32 ui32ActivePMLatencyms, -+ IMG_BOOL bActivePMLatencyPersistant) -+{ -+ -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError; -+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; -+ IMG_UINT32 ui32CmdKCCBSlot; -+ PVRSRV_DEV_POWER_STATE ePowerState; -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ eError = PVRSRVPowerLock(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Failed to acquire power lock")); -+ return eError; -+ } -+ -+ /* Update runtime configuration with the new values and ensure the -+ * new APM latency is written to memory before requesting the FW to -+ * read it -+ */ -+ psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms; -+ psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant; -+ OSWriteMemoryBarrier(&psRuntimeCfg->bActivePMLatencyPersistant); -+ RGXFwSharedMemCacheOpValue(psRuntimeCfg->ui32ActivePMLatencyms, FLUSH); -+ RGXFwSharedMemCacheOpValue(psRuntimeCfg->bActivePMLatencyPersistant, FLUSH); -+ -+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); -+ -+ if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) -+ { -+ RGXFWIF_KCCB_CMD sActivePMLatencyChange; -+ sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW; -+ sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE; -+ -+ PDUMPCOMMENT(psDeviceNode, -+ "Scheduling command to change APM latency to %u", -+ ui32ActivePMLatencyms); -+ eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, -+ &sActivePMLatencyChange, -+ PDUMP_FLAGS_NONE, -+ &ui32CmdKCCBSlot); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PDUMPCOMMENT(psDeviceNode, -+ "Scheduling command to change APM latency failed. Error:%u", -+ eError); -+ PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError)); -+ goto ErrorExit; -+ } -+ } -+ -+ErrorExit: -+ PVRSRVPowerUnlock(psDeviceNode); -+ -+ return eError; -+} -+ -+/* -+ RGXActivePowerRequest -+*/ -+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ -+ psDevInfo->ui32ActivePMReqTotal++; -+ -+ /* Powerlock to avoid further requests from racing with the FW hand-shake -+ * from now on (previous kicks to this point are detected by the FW) -+ * PVRSRVPowerLock is replaced with PVRSRVPowerTryLock to avoid -+ * potential dead lock between PDumpWriteLock and PowerLock -+ * during 'DriverLive + PDUMP=1 + EnableAPM=1'. -+ */ -+ eError = PVRSRVPowerTryLock(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ PVR_LOG_ERROR(eError, "PVRSRVPowerTryLock"); -+ } -+ else -+ { -+ psDevInfo->ui32ActivePMReqRetry++; -+ } -+ goto _RGXActivePowerRequest_PowerLock_failed; -+ } -+ -+ RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, -+ INVALIDATE); -+ -+ /* Check again for IDLE once we have the power lock */ -+ if (psFwSysData->ePowState == RGXFWIF_POW_IDLE) -+ { -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVRSRVSetFirmwareHandshakeIdleTime(psDeviceNode->psPowerDev, -+ RGXReadHWTimerReg(psDevInfo)-psFwSysData->ui64StartIdleTime); -+#endif -+ -+ PDUMPPOWCMDSTART(psDeviceNode); -+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, -+ PVRSRV_DEV_POWER_STATE_OFF, -+ PVRSRV_POWER_FLAGS_NONE); -+ PDUMPPOWCMDEND(psDeviceNode); -+ -+ if (eError == PVRSRV_OK) -+ { -+ psDevInfo->ui32ActivePMReqOk++; -+ } -+ else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) -+ { -+ psDevInfo->ui32ActivePMReqDenied++; -+ } -+ } -+ else -+ { -+ psDevInfo->ui32ActivePMReqNonIdle++; -+ } -+ -+ PVRSRVPowerUnlock(psDeviceNode); -+ -+_RGXActivePowerRequest_PowerLock_failed: -+ -+ return eError; -+} -+/* -+ RGXForcedIdleRequest -+*/ -+ -+#define RGX_FORCED_IDLE_RETRY_COUNT 10 -+ -+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_KCCB_CMD sPowCmd; -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32RetryCount = 0; -+ IMG_UINT32 ui32CmdKCCBSlot; -+#if !defined(NO_HARDWARE) -+ const RGXFWIF_SYSDATA *psFwSysData; -+#endif -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+#if !defined(NO_HARDWARE) -+ psFwSysData = psDevInfo->psRGXFWIfFwSysData; -+ RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, -+ INVALIDATE); -+ -+ /* Firmware already forced idle */ -+ if (psFwSysData->ePowState == RGXFWIF_POW_FORCED_IDLE) -+ { -+ return PVRSRV_OK; -+ } -+ -+ /* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */ -+ if (psFwSysData->ePowState == RGXFWIF_POW_OFF) -+ { -+ return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; -+ } -+#endif -+ -+ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", -+ __func__)); -+ return eError; -+ } -+ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; -+ sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; -+ sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_FORCE_IDLE; -+ -+ PDUMPCOMMENT(psDeviceNode, -+ "RGXForcedIdleRequest: Sending forced idle command"); -+ -+ /* Send one forced IDLE command to GP */ -+ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, -+ &sPowCmd, -+ PDUMP_FLAGS_NONE, -+ &ui32CmdKCCBSlot); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send idle request", __func__)); -+ return eError; -+ } -+ -+ /* Wait for GPU to finish current workload */ -+ do { -+ eError = RGXPollForGPCommandCompletion(psDeviceNode, -+ psDevInfo->psPowSyncPrim->pui32LinAddr, -+ 0x1, 0xFFFFFFFF); -+ if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT)) -+ { -+ break; -+ } -+ ui32RetryCount++; -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Request timeout. Retry %d of %d", -+ __func__, ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT)); -+ } while (IMG_TRUE); -+ -+ if (eError != PVRSRV_OK) -+ { -+ RGXFWNotifyHostTimeout(psDevInfo); -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Idle request failed. Firmware potentially left in forced idle state", -+ __func__)); -+ return eError; -+ } -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDeviceNode, -+ "RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", -+ psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); -+ -+ SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, -+ 1, -+ 0xffffffff, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ 0); -+#endif -+ -+#if !defined(NO_HARDWARE) -+ /* Check the firmware state for idleness */ -+ RGXFwSharedMemCacheOpValue(psFwSysData->ePowState, -+ INVALIDATE); -+ if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) -+ { -+ return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; -+ } -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ RGXCancelForcedIdleRequest -+*/ -+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_KCCB_CMD sPowCmd; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32CmdKCCBSlot; -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); -+ -+ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", -+ __func__)); -+ goto ErrorExit; -+ } -+ -+ /* Send the IDLE request to the FW */ -+ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; -+ sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; -+ sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_CANCEL_FORCED_IDLE; -+ -+ PDUMPCOMMENT(psDeviceNode, -+ "RGXForcedIdleRequest: Sending cancel forced idle command"); -+ -+ /* Send cancel forced IDLE command to GP */ -+ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, -+ &sPowCmd, -+ PDUMP_FLAGS_NONE, -+ &ui32CmdKCCBSlot); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PDUMPCOMMENT(psDeviceNode, -+ "RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d", -+ RGXFWIF_DM_GP); -+ goto ErrorExit; -+ } -+ -+ /* Wait for the firmware to answer. */ -+ eError = RGXPollForGPCommandCompletion(psDeviceNode, -+ psDevInfo->psPowSyncPrim->pui32LinAddr, -+ 1, 0xFFFFFFFF); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for cancel idle request", __func__)); -+ goto ErrorExit; -+ } -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDeviceNode, -+ "RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", -+ psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); -+ -+ SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, -+ 1, -+ 0xffffffff, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ 0); -+#endif -+ -+ return eError; -+ -+ErrorExit: -+ PVR_DPF((PVR_DBG_ERROR, "%s: Firmware potentially left in forced idle state", __func__)); -+ return eError; -+} -+ -+/*! -+ ****************************************************************************** -+ -+ @Function PVRSRVGetNextDustCount -+ -+ @Description -+ -+ Calculate a sequence of dust counts to achieve full transition coverage. -+ We increment two counts of dusts and switch up and down between them. -+ It does contain a few redundant transitions. If two dust exist, the -+ output transitions should be as follows. -+ -+ 0->1, 0<-1, 0->2, 0<-2, (0->1) -+ 1->1, 1->2, 1<-2, (1->2) -+ 2->2, (2->0), -+ 0->0. Repeat. -+ -+ Redundant transitions in brackets. -+ -+ @Input psDustReqState : Counter state used to calculate next dust count -+ @Input ui32DustCount : Number of dusts in the core -+ -+ @Return PVRSRV_ERROR -+ -+ ******************************************************************************/ -+IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustReqState, IMG_UINT32 ui32DustCount) -+{ -+ if (psDustReqState->bToggle) -+ { -+ psDustReqState->ui32DustCount2++; -+ } -+ -+ if (psDustReqState->ui32DustCount2 > ui32DustCount) -+ { -+ psDustReqState->ui32DustCount1++; -+ psDustReqState->ui32DustCount2 = psDustReqState->ui32DustCount1; -+ } -+ -+ if (psDustReqState->ui32DustCount1 > ui32DustCount) -+ { -+ psDustReqState->ui32DustCount1 = 0; -+ psDustReqState->ui32DustCount2 = 0; -+ } -+ -+ psDustReqState->bToggle = !psDustReqState->bToggle; -+ -+ return (psDustReqState->bToggle) ? psDustReqState->ui32DustCount1 : psDustReqState->ui32DustCount2; -+} -+ -+/****************************************************************************** -+ End of file (rgxpower.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxpower.h b/drivers/gpu/drm/img-rogue/rgxpower.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxpower.h -@@ -0,0 +1,286 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX power header file -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX power -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXPOWER_H) -+#define RGXPOWER_H -+ -+#include "pvrsrv_error.h" -+#include "img_types.h" -+#include "servicesext.h" -+#include "rgxdevice.h" -+ -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXPrePowerState -+ -+ @Description -+ -+ does necessary preparation before power state transition -+ -+ @Input hDevHandle : RGX Device Node -+ @Input eNewPowerState : New power state -+ @Input eCurrentPowerState : Current power state -+ -+ @Return PVRSRV_ERROR : -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXPostPowerState -+ -+ @Description -+ -+ does necessary preparation after power state transition -+ -+ @Input hDevHandle : RGX Device Node -+ @Input eNewPowerState : New power state -+ @Input eCurrentPowerState : Current power state -+ -+ @Return PVRSRV_ERROR : -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXVzPrePowerState -+ -+ @Description -+ -+ does necessary preparation before power state transition on a vz driver -+ -+ @Input hDevHandle : RGX Device Node -+ @Input eNewPowerState : New power state -+ @Input eCurrentPowerState : Current power state -+ -+ @Return PVRSRV_ERROR : -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXVzPostPowerState -+ -+ @Description -+ -+ does necessary preparation after power state transition on a vz driver -+ -+ @Input hDevHandle : RGX Device Node -+ @Input eNewPowerState : New power state -+ @Input eCurrentPowerState : Current power state -+ -+ @Return PVRSRV_ERROR : -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eNewPowerState, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXPreClockSpeedChange -+ -+ @Description -+ -+ Does processing required before an RGX clock speed change. -+ -+ @Input hDevHandle : RGX Device Node -+ @Input eCurrentPowerState : Power state of the device -+ -+ @Return PVRSRV_ERROR : -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXPostClockSpeedChange -+ -+ @Description -+ -+ Does processing required after an RGX clock speed change. -+ -+ @Input hDevHandle : RGX Device Node -+ @Input eCurrentPowerState : Power state of the device -+ -+ @Return PVRSRV_ERROR : -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, -+ PVRSRV_DEV_POWER_STATE eCurrentPowerState); -+ -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXDustCountChange -+ -+ @Description Change of number of DUSTs -+ -+ @Input hDevHandle : RGX Device Node -+ @Input ui32NumberOfDusts : Number of DUSTs to make transition to -+ -+ @Return PVRSRV_ERROR : -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, -+ IMG_UINT32 ui32NumberOfDusts); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXAPMLatencyChange -+ -+ @Description -+ -+ Changes the wait duration used before firmware indicates IDLE. -+ Reducing this value will cause the firmware to shut off faster and -+ more often but may increase bubbles in GPU scheduling due to the added -+ power management activity. If bPersistent is NOT set, APM latency will -+ return back to system default on power up. -+ -+ @Input hDevHandle : RGX Device Node -+ @Input ui32ActivePMLatencyms : Number of milliseconds to wait -+ @Input bActivePMLatencyPersistant : Set to ensure new value is not reset -+ -+ @Return PVRSRV_ERROR : -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, -+ IMG_UINT32 ui32ActivePMLatencyms, -+ IMG_BOOL bActivePMLatencyPersistant); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXActivePowerRequest -+ -+ @Description Initiate a handshake with the FW to power off the GPU -+ -+ @Input hDevHandle : RGX Device Node -+ -+ @Return PVRSRV_ERROR : -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXForcedIdleRequest -+ -+ @Description Initiate a handshake with the FW to idle the GPU -+ -+ @Input hDevHandle : RGX Device Node -+ -+ @Input bDeviceOffPermitted : Set to indicate device state being off is not -+ erroneous. -+ -+ @Return PVRSRV_ERROR : -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXCancelForcedIdleRequest -+ -+ @Description Send a request to cancel idle to the firmware. -+ -+ @Input hDevHandle : RGX Device Node -+ -+ @Return PVRSRV_ERROR : -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle); -+ -+/*! -+****************************************************************************** -+ -+ @Function PVRSRVGetNextDustCount -+ -+ @Description -+ -+ Calculate a sequence of dust counts to achieve full transition coverage. -+ We increment two counts of dusts and switch up and down between them. -+ It does contain a few redundant transitions. If two dust exist, the -+ output transitions should be as follows. -+ -+ 0->1, 0<-1, 0->2, 0<-2, (0->1) -+ 1->1, 1->2, 1<-2, (1->2) -+ 2->2, (2->0), -+ 0->0. Repeat. -+ -+ Redundant transitions in brackets. -+ -+ @Input psDustReqState : Counter state used to calculate next dust count -+ @Input ui32DustCount : Number of dusts in the core -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustState, IMG_UINT32 ui32DustCount); -+ -+#endif /* RGXPOWER_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxregconfig.c b/drivers/gpu/drm/img-rogue/rgxregconfig.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxregconfig.c -@@ -0,0 +1,319 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX Register configuration -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX Regconfig routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "rgxregconfig.h" -+#include "pvr_debug.h" -+#include "rgxutils.h" -+#include "rgxfwutils.h" -+#include "device.h" -+#include "sync_internal.h" -+#include "pdump_km.h" -+#include "pvrsrv.h" -+ -+PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT8 ui8RegCfgType) -+{ -+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; -+ RGXFWIF_REG_CFG_TYPE eRegCfgType = (RGXFWIF_REG_CFG_TYPE) ui8RegCfgType; -+ -+ PVR_UNREFERENCED_PARAMETER(psDevConnection); -+ -+ OSLockAcquire(psRegCfg->hLock); -+ -+ if (eRegCfgType < psRegCfg->eRegCfgTypeToPush) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Register configuration requested (%d) is not valid since it has to be at least %d." -+ " Configurations of different types need to go in order", -+ __func__, -+ eRegCfgType, -+ psRegCfg->eRegCfgTypeToPush)); -+ OSLockRelease(psRegCfg->hLock); -+ return PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE; -+ } -+ -+ psRegCfg->eRegCfgTypeToPush = eRegCfgType; -+ -+ OSLockRelease(psRegCfg->hLock); -+ -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(psDevConnection); -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", -+ __func__)); -+ return PVRSRV_ERROR_FEATURE_DISABLED; -+#endif -+} -+ -+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32RegAddr, -+ IMG_UINT64 ui64RegValue, -+ IMG_UINT64 ui64RegMask) -+{ -+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sRegCfgCmd; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ OSLockAcquire(psRegCfg->hLock); -+ -+ if (psRegCfg->bEnabled) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Cannot add record whilst register configuration active.", -+ __func__)); -+ OSLockRelease(psRegCfg->hLock); -+ return PVRSRV_ERROR_REG_CONFIG_ENABLED; -+ } -+ if (psRegCfg->ui32NumRegRecords == RGXFWIF_REG_CFG_MAX_SIZE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Register configuration full.", -+ __func__)); -+ OSLockRelease(psRegCfg->hLock); -+ return PVRSRV_ERROR_REG_CONFIG_FULL; -+ } -+ -+ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; -+ sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Addr = (IMG_UINT64) ui32RegAddr; -+ sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Value = ui64RegValue; -+ sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Mask = ui64RegMask; -+ sRegCfgCmd.uCmdData.sRegConfigData.eRegConfigType = psRegCfg->eRegCfgTypeToPush; -+ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ADD; -+ -+ eError = RGXScheduleCommand(psDeviceNode->pvDevice, -+ RGXFWIF_DM_GP, -+ &sRegCfgCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: RGXScheduleCommand failed. Error:%u", -+ __func__, -+ eError)); -+ OSLockRelease(psRegCfg->hLock); -+ return eError; -+ } -+ -+ psRegCfg->ui32NumRegRecords++; -+ -+ OSLockRelease(psRegCfg->hLock); -+ -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", -+ __func__)); -+ return PVRSRV_ERROR_FEATURE_DISABLED; -+#endif -+} -+ -+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sRegCfgCmd; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ OSLockAcquire(psRegCfg->hLock); -+ -+ if (psRegCfg->bEnabled) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Attempt to clear register configuration whilst active.", -+ __func__)); -+ OSLockRelease(psRegCfg->hLock); -+ return PVRSRV_ERROR_REG_CONFIG_ENABLED; -+ } -+ -+ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; -+ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_CLEAR; -+ -+ eError = RGXScheduleCommand(psDeviceNode->pvDevice, -+ RGXFWIF_DM_GP, -+ &sRegCfgCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: RGXScheduleCommand failed. Error:%u", -+ __func__, -+ eError)); -+ OSLockRelease(psRegCfg->hLock); -+ return eError; -+ } -+ -+ psRegCfg->ui32NumRegRecords = 0; -+ psRegCfg->eRegCfgTypeToPush = RGXFWIF_REG_CFG_TYPE_PWR_ON; -+ -+ OSLockRelease(psRegCfg->hLock); -+ -+ return eError; -+#else -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", -+ __func__)); -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ return PVRSRV_ERROR_FEATURE_DISABLED; -+#endif -+} -+ -+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sRegCfgCmd; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ OSLockAcquire(psRegCfg->hLock); -+ -+ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; -+ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ENABLE; -+ -+ eError = RGXScheduleCommand(psDeviceNode->pvDevice, -+ RGXFWIF_DM_GP, -+ &sRegCfgCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: RGXScheduleCommand failed. Error:%u", -+ __func__, -+ eError)); -+ OSLockRelease(psRegCfg->hLock); -+ return eError; -+ } -+ -+ psRegCfg->bEnabled = IMG_TRUE; -+ -+ OSLockRelease(psRegCfg->hLock); -+ -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", -+ __func__)); -+ return PVRSRV_ERROR_FEATURE_DISABLED; -+#endif -+} -+ -+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGXFWIF_KCCB_CMD sRegCfgCmd; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); -+ -+ OSLockAcquire(psRegCfg->hLock); -+ -+ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; -+ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_DISABLE; -+ -+ eError = RGXScheduleCommand(psDeviceNode->pvDevice, -+ RGXFWIF_DM_GP, -+ &sRegCfgCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: RGXScheduleCommand failed. Error:%u", -+ __func__, -+ eError)); -+ OSLockRelease(psRegCfg->hLock); -+ return eError; -+ } -+ -+ psRegCfg->bEnabled = IMG_FALSE; -+ -+ OSLockRelease(psRegCfg->hLock); -+ -+ return eError; -+#else -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", -+ __func__)); -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ return PVRSRV_ERROR_FEATURE_DISABLED; -+#endif -+} -+ -+/****************************************************************************** -+ End of file (rgxregconfig.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxregconfig.h b/drivers/gpu/drm/img-rogue/rgxregconfig.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxregconfig.h -@@ -0,0 +1,130 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX register configuration functionality -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX register configuration functionality -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXREGCONFIG_H) -+#define RGXREGCONFIG_H -+ -+#include "pvr_debug.h" -+#include "rgxutils.h" -+#include "rgxfwutils.h" -+#include "rgx_fwif_km.h" -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXSetRegConfigTypeKM -+ -+ @Description -+ Server-side implementation of RGXSetRegConfig -+ -+ @Input psDeviceNode - RGX Device node -+ @Input ui8RegPowerIsland - Reg configuration -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT8 ui8RegPowerIsland); -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXSetRegConfigKM -+ -+ @Description -+ Server-side implementation of RGXSetRegConfig -+ -+ @Input psDeviceNode - RGX Device node -+ @Input ui64RegAddr - Register address -+ @Input ui64RegValue - Reg value -+ @Input ui64RegMask - Reg mask -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+ -+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui64RegAddr, -+ IMG_UINT64 ui64RegValue, -+ IMG_UINT64 ui64RegMask); -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXClearRegConfigKM -+ -+ @Description -+ Server-side implementation of RGXClearRegConfig -+ -+ @Input psDeviceNode - RGX Device node -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXEnableRegConfigKM -+ -+ @Description -+ Server-side implementation of RGXEnableRegConfig -+ -+ @Input psDeviceNode - RGX Device node -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/*! -+******************************************************************************* -+ @Function PVRSRVRGXDisableRegConfigKM -+ -+ @Description -+ Server-side implementation of RGXDisableRegConfig -+ -+ @Input psDeviceNode - RGX Device node -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+#endif /* RGXREGCONFIG_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxshader.c b/drivers/gpu/drm/img-rogue/rgxshader.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxshader.c -@@ -0,0 +1,308 @@ -+/*************************************************************************/ /*! -+@File rgxshader.c -+@Title TQ Shader Load -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Shader code and info are shared for all context on the device. -+ If allocation doesn't already exist, read shader data from file -+ and allocate PMR memory. PMR memory is not deallocated until -+ device deinit. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "rgxshader.h" -+#include "osfunc_common.h" -+#include "rgxdevice.h" -+#include "pdump_km.h" -+#include "physmem.h" -+#include "ri_server.h" -+#include "pvr_ricommon.h" -+ -+static void -+RGXShaderReadHeader(OS_FW_IMAGE *psShaderFW, RGX_SHADER_HEADER *psHeader) -+{ -+ const void * pvData; -+ -+ pvData = OSFirmwareData(psShaderFW); -+ -+ OSDeviceMemCopy(psHeader, pvData, sizeof(RGX_SHADER_HEADER)); -+} -+ -+static size_t -+RGXShaderCLIMemSize(OS_FW_IMAGE *psShaderFW) -+{ -+ RGX_SHADER_HEADER sHeader; -+ -+ RGXShaderReadHeader(psShaderFW, &sHeader); -+ -+ return sHeader.ui32SizeClientMem; -+} -+ -+static size_t -+RGXShaderUSCMemSize(OS_FW_IMAGE *psShaderFW) -+{ -+ RGX_SHADER_HEADER sHeader; -+ -+ RGXShaderReadHeader(psShaderFW, &sHeader); -+ -+ return sHeader.ui32SizeFragment; -+} -+ -+static void * -+RGXShaderCLIMem(OS_FW_IMAGE *psShaderFW) -+{ -+ return (void*)OSFirmwareData(psShaderFW); -+} -+ -+static void * -+RGXShaderUSCMem(OS_FW_IMAGE *psShaderFW) -+{ -+ IMG_PBYTE pui8Data; -+ -+ pui8Data = (IMG_PBYTE)OSFirmwareData(psShaderFW); -+ -+ pui8Data += RGXShaderCLIMemSize(psShaderFW); -+ -+ return (void*) pui8Data; -+} -+ -+#define RGX_SHADER_FILENAME_MAX_SIZE ((sizeof(RGX_SH_FILENAME)+ \ -+ RGX_BVNC_STR_SIZE_MAX)) -+ -+static void -+_GetShaderFileName(PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_CHAR * pszShaderFilenameStr, -+ IMG_CHAR * pszShaderpFilenameStr) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ OSSNPrintf(pszShaderFilenameStr, RGX_SHADER_FILENAME_MAX_SIZE, -+ "%s." RGX_BVNC_STR_FMTSPEC, -+ RGX_SH_FILENAME, -+ psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, -+ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C); -+ -+ OSSNPrintf(pszShaderpFilenameStr, RGX_SHADER_FILENAME_MAX_SIZE, -+ "%s." RGX_BVNC_STRP_FMTSPEC, -+ RGX_SH_FILENAME, -+ psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, -+ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C); -+} -+ -+PVRSRV_ERROR -+PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ OS_FW_IMAGE *psShaderFW; -+ RGX_SHADER_HEADER sHeader; -+ IMG_UINT32 ui32MappingTable = 0; -+ IMG_UINT32 ui32NumPages; -+ IMG_CHAR aszShaderFilenameStr[RGX_SHADER_FILENAME_MAX_SIZE]; -+ IMG_CHAR aszShaderpFilenameStr[RGX_SHADER_FILENAME_MAX_SIZE]; -+ const IMG_CHAR *pszShaderFilenameStr = aszShaderFilenameStr; -+ size_t uiNumBytes; -+ PVRSRV_ERROR eError; -+ -+ _GetShaderFileName(psDeviceNode, aszShaderFilenameStr, aszShaderpFilenameStr); -+ -+ eError = OSLoadFirmware(psDeviceNode, aszShaderFilenameStr, NULL, &psShaderFW); -+ -+ if (eError != PVRSRV_OK) -+ { -+ eError = OSLoadFirmware(psDeviceNode, aszShaderpFilenameStr, -+ NULL, &psShaderFW); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load shader binary file %s (%s)", -+ __func__, -+ aszShaderpFilenameStr, -+ PVRSRVGetErrorString(eError))); -+ eError = PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE; -+ goto failed_init; -+ } -+ -+ pszShaderFilenameStr = aszShaderpFilenameStr; -+ } -+ -+ PVR_LOG(("Shader binary image '%s' loaded", pszShaderFilenameStr)); -+ -+ RGXShaderReadHeader(psShaderFW, &sHeader); -+ -+ if (sHeader.ui32Version != RGX_TQ_SHADERS_VERSION_PACK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: unsupported TQ shaders version: %d != %d", -+ __func__, sHeader.ui32Version, RGX_TQ_SHADERS_VERSION_PACK)); -+ eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ goto failed_firmware; -+ } -+ -+ ui32NumPages = (sHeader.ui32SizeFragment / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1; -+ -+ PDUMPCOMMENT(psDeviceNode, "Allocate TDM USC PMR Block (Pages %08X)", ui32NumPages); -+ -+ eError = PhysmemNewRamBackedPMR(NULL, -+ psDeviceNode, -+ (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, -+ 1, -+ 1, -+ &ui32MappingTable, -+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE -+ | PVRSRV_MEMALLOCFLAG_GPU_READABLE -+ | PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT -+ | PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER, -+ sizeof("tquscpmr"), -+ "tquscpmr", -+ PVR_SYS_ALLOC_PID, -+ (PMR**)&psDevInfo->hTQUSCSharedMem, -+ PDUMP_NONE, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from PhysmemNewRamBackedPMR (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto failed_firmware; -+ } -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ eError = RIWritePMREntryWithOwnerKM(psDevInfo->hTQUSCSharedMem, PVR_SYS_ALLOC_PID); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from RIWritePMREntryWithOwnerKM (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto failed_uscpmr; -+ } -+#endif -+ -+ eError = PMR_WriteBytes(psDevInfo->hTQUSCSharedMem, 0, RGXShaderUSCMem(psShaderFW), RGXShaderUSCMemSize(psShaderFW), &uiNumBytes); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from PMR_WriteBytes (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto failed_uscpmr; -+ } -+ -+ ui32NumPages = (sHeader.ui32SizeClientMem / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1; -+ -+ PDUMPCOMMENT(psDeviceNode, "Allocate TDM Client PMR Block (Pages %08X)", ui32NumPages); -+ -+ eError = PhysmemNewRamBackedPMR(NULL, -+ psDeviceNode, -+ (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, -+ 1, -+ 1, -+ &ui32MappingTable, -+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE -+ | PVRSRV_MEMALLOCFLAG_CPU_READABLE -+ | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT -+ | PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER, -+ sizeof("tqclipmr"), -+ "tqclipmr", -+ PVR_SYS_ALLOC_PID, -+ (PMR**)&psDevInfo->hTQCLISharedMem, -+ PDUMP_NONE, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from PhysmemNewRamBackedPMR (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto failed_uscpmr; -+ } -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ eError = RIWritePMREntryWithOwnerKM(psDevInfo->hTQCLISharedMem, PVR_SYS_ALLOC_PID); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from RIWritePMREntryWithOwnerKM (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto failed_clipmr; -+ } -+#endif -+ -+ eError = PMR_WriteBytes(psDevInfo->hTQCLISharedMem, 0, RGXShaderCLIMem(psShaderFW), RGXShaderCLIMemSize(psShaderFW), &uiNumBytes); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from PMR_WriteBytes (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto failed_clipmr; -+ } -+ -+ OSUnloadFirmware(psShaderFW); -+ -+ PVR_ASSERT(psDevInfo->hTQUSCSharedMem != NULL); -+ PVR_ASSERT(psDevInfo->hTQCLISharedMem != NULL); -+ -+ return PVRSRV_OK; -+ -+failed_clipmr: -+ PMRUnrefPMR(psDevInfo->hTQCLISharedMem); -+failed_uscpmr: -+ PMRUnrefPMR(psDevInfo->hTQUSCSharedMem); -+failed_firmware: -+ OSUnloadFirmware(psShaderFW); -+failed_init: -+ return eError; -+} -+ -+void -+PVRSRVTQAcquireShaders(PVRSRV_DEVICE_NODE * psDeviceNode, -+ PMR ** ppsCLIPMRMem, -+ PMR ** ppsUSCPMRMem) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ PVR_ASSERT(psDevInfo->hTQUSCSharedMem != NULL); -+ PVR_ASSERT(psDevInfo->hTQCLISharedMem != NULL); -+ -+ *ppsUSCPMRMem = psDevInfo->hTQUSCSharedMem; -+ *ppsCLIPMRMem = psDevInfo->hTQCLISharedMem; -+} -+ -+void PVRSRVTQUnloadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ (void) PMRUnrefPMR(psDevInfo->hTQUSCSharedMem); -+ (void) PMRUnrefPMR(psDevInfo->hTQCLISharedMem); -+} -diff --git a/drivers/gpu/drm/img-rogue/rgxshader.h b/drivers/gpu/drm/img-rogue/rgxshader.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxshader.h -@@ -0,0 +1,83 @@ -+/*************************************************************************/ /*! -+@File rgxshader.h -+@Title TQ Shader Load -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Shader code and info are shared for all context on the device. -+ If allocation doesn't already exist, read shader data from file -+ and allocate PMR memory. PMR memory is not deallocated until -+ device deinit. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXSHADER_H) -+#define RGXSHADER_H -+ -+#include "fwload.h" -+#include "rgxtransfer_shader.h" -+#include "connection_server.h" -+ -+/*************************************************************************/ /*! -+@Function PVRSRVTQLoadShaders -+@Description If PMR is not allocated, reads shader binary data from file -+ and allocates new PMR memory. -+@Input psDeviceNode Device node -+@Return PVRSRV_ERROR Returns PVRSRV_OK on success. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVTQAcquireShaders -+@Description Get handle to ready allocated shader PMR memory -+@Input psDeviceNode Device node -+@Output ppsCLIPMRMem Shader data used by CPU client side. -+@Output ppsUSCPMRMem Shader usc code used by GPU. -+*/ /**************************************************************************/ -+void -+PVRSRVTQAcquireShaders(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PMR **ppsCLIPMRMem, -+ PMR **ppsUSCPMRMem); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVTQUnLoadShaders -+@Description Unref PMR memory. -+@Input psDeviceNode Device node -+*/ /**************************************************************************/ -+void PVRSRVTQUnloadShaders(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+#endif /* RGXSHADER_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxsrvinit.c b/drivers/gpu/drm/img-rogue/rgxsrvinit.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxsrvinit.c -@@ -0,0 +1,1863 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services initialisation routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "srvinit.h" -+#include "pvr_debug.h" -+#include "osfunc.h" -+#include "km_apphint_defs.h" -+#include "htbuffer_types.h" -+ -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+ -+#include "rgx_fwif_km.h" -+#include "pdump_km.h" -+ -+#include "rgxinit.h" -+#include "rgxmulticore.h" -+ -+#include "rgx_compat_bvnc.h" -+ -+#include "osfunc.h" -+ -+#include "rgxdefs_km.h" -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+#include "virt_validation_defs.h" -+#endif -+ -+#include "rgx_fwif_hwperf.h" -+#include "rgx_hwperf_table.h" -+ -+#include "fwload.h" -+#include "rgxlayer_impl.h" -+#include "rgxfwimageutils.h" -+#include "rgxfwutils.h" -+ -+#include "rgx_hwperf.h" -+#include "rgx_bvnc_defs_km.h" -+ -+#include "rgxdevice.h" -+ -+#include "pvrsrv.h" -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+#include "rgxdevice.h" -+#include "pvrsrv_device.h" -+#endif -+ -+#define DRIVER_MODE_HOST 0 /* AppHint value for host driver mode */ -+ -+#define HW_PERF_FILTER_DEFAULT 0x00000000 /* Default to no HWPerf */ -+#define HW_PERF_FILTER_DEFAULT_ALL_ON 0xFFFFFFFF /* All events */ -+ -+/* Kernel CCB size */ -+ -+#if !defined(PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE) -+#define PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE 4 -+#endif -+#if !defined(PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE) -+#define PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE 16 -+#endif -+ -+#if PVRSRV_APPHINT_KCCB_SIZE_LOG2 < PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE -+#error PVRSRV_APPHINT_KCCB_SIZE_LOG2 is too low. -+#elif PVRSRV_APPHINT_KCCB_SIZE_LOG2 > PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE -+#error PVRSRV_APPHINT_KCCB_SIZE_LOG2 is too high. -+#endif -+ -+#if defined(SUPPORT_VALIDATION) -+#include "pvrsrv_apphint.h" -+#endif -+ -+#include "os_apphint.h" -+ -+/* -+ * Container for all the apphints used by this module -+ */ -+typedef struct _RGX_SRVINIT_APPHINTS_ -+{ -+ IMG_UINT32 ui32DriverMode; -+ IMG_BOOL bGPUUnitsPowerChange; -+ IMG_BOOL bEnableSignatureChecks; -+ IMG_UINT32 ui32SignatureChecksBufSize; -+ -+ IMG_BOOL bAssertOnOutOfMem; -+ IMG_BOOL bAssertOnHWRTrigger; -+#if defined(SUPPORT_VALIDATION) -+ IMG_BOOL bValidateIrq; -+ IMG_BOOL bValidateSOCUSCTimer; -+ IMG_UINT32 ui32FBCDCVersionOverride; -+ IMG_UINT32 aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; -+#endif -+ IMG_UINT32 ui32TFBCCompressionControlGroup; -+ IMG_UINT32 ui32TFBCCompressionControlScheme; -+ IMG_BOOL bTFBCCompressionControlYUVFormat; -+ IMG_BOOL bTFBCCompressionControlLossyMinChannel; -+ IMG_BOOL bCheckMlist; -+ IMG_BOOL bDisableClockGating; -+ IMG_BOOL bDisableDMOverlap; -+ IMG_BOOL bDisableFEDLogging; -+ IMG_BOOL bDisablePDP; -+ IMG_BOOL bEnableCDMKillRand; -+ IMG_BOOL bEnableRandomCsw; -+ IMG_BOOL bEnableSoftResetCsw; -+ IMG_BOOL bFilteringMode; -+ IMG_BOOL bHWPerfDisableCustomCounterFilter; -+ IMG_BOOL bZeroFreelist; -+ IMG_UINT32 ui32EnableFWContextSwitch; -+ IMG_UINT32 ui32FWContextSwitchProfile; -+ -+ IMG_UINT32 ui32HWPerfFWBufSize; -+ IMG_UINT32 ui32HWPerfHostBufSize; -+ IMG_UINT32 ui32HWPerfFilter0; -+ IMG_UINT32 ui32HWPerfFilter1; -+ IMG_UINT32 ui32HWPerfHostFilter; -+ IMG_UINT32 ui32TimeCorrClock; -+ IMG_UINT32 ui32HWRDebugDumpLimit; -+ IMG_UINT32 ui32JonesDisableMask; -+ IMG_UINT32 ui32LogType; -+ IMG_UINT32 ui32TruncateMode; -+ IMG_UINT32 ui32KCCBSizeLog2; -+ FW_PERF_CONF eFirmwarePerf; -+ RGX_ACTIVEPM_CONF eRGXActivePMConf; -+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf; -+ -+ IMG_BOOL bEnableTrustedDeviceAceConfig; -+ IMG_UINT32 ui32FWContextSwitchCrossDM; -+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) -+ IMG_UINT32 ui32PhysMemTestPasses; -+#endif -+ -+ RGX_FWT_LOGTYPE eDebugDumpFWTLogType; -+ IMG_UINT32 ui32TFBCVersion; -+} RGX_SRVINIT_APPHINTS; -+ -+/* -+ * _ParseHTBAppHints: -+ * -+ * Generate necessary references to the globally visible AppHints which are -+ * declared in the above #include "km_apphint_defs.h" -+ * Without these local references some compiler tool-chains will treat -+ * unreferenced declarations as fatal errors. This function duplicates the -+ * HTB_specific apphint references which are made in htbserver.c:HTBInit() -+ * However, it makes absolutely *NO* use of these hints. -+ */ -+static void -+_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode, void *pvAppHintState) -+{ -+ IMG_UINT32 ui32AppHintDefault; -+ IMG_UINT32 ui32LogType; -+ IMG_UINT32 ui32OpMode; -+ IMG_UINT32 ui32BufferSize; -+ -+ ui32AppHintDefault = PVRSRV_APPHINT_ENABLEHTBLOGGROUP; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, EnableHTBLogGroup, -+ &ui32AppHintDefault, &ui32LogType); -+ ui32AppHintDefault = PVRSRV_APPHINT_HTBOPERATIONMODE; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HTBOperationMode, -+ &ui32AppHintDefault, &ui32OpMode); -+ ui32AppHintDefault = PVRSRV_APPHINT_HTBUFFERSIZE; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HTBufferSizeInKB, -+ &ui32AppHintDefault, &ui32BufferSize); -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function GetApphints -+ -+ @Description Read init time apphints and initialise internal variables -+ -+ @Input psHints : Pointer to apphints container -+ -+ @Return void -+ -+******************************************************************************/ -+static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHINTS *psHints) -+{ -+ void *pvAppHintState = NULL; -+ IMG_UINT32 ui32AppHintDefault; -+ IMG_BOOL bAppHintDefault; -+ IMG_UINT32 ui32ParamTemp; -+ IMG_BOOL bS7TopInfra = IMG_FALSE, bE42290 = IMG_FALSE, bTPUFiltermodeCtrl = IMG_FALSE; -+ IMG_BOOL bE42606 = IMG_FALSE; -+#if defined(EMULATOR) -+ IMG_BOOL bAXIACELite = IMG_FALSE; -+#endif -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) -+ { -+ bS7TopInfra = IMG_TRUE; -+ } -+#if defined(RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TPU_FILTERING_MODE_CONTROL)) -+ { -+ bTPUFiltermodeCtrl = IMG_TRUE; -+ } -+#endif -+#if defined(HW_ERN_42290_BIT_MASK) -+ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42290)) -+ { -+ bE42290 = IMG_TRUE; -+ } -+#endif -+#if defined(HW_ERN_42606_BIT_MASK) -+ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42606)) -+ { -+ bE42606 = IMG_TRUE; -+ } -+#endif -+#if defined(HW_FEATURE_AXI_ACELITE_BIT_MASK) && defined(EMULATOR) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)) -+ { -+ bAXIACELite = IMG_TRUE; -+ } -+#endif -+ -+ OSCreateAppHintState(&pvAppHintState); -+ -+ ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DriverMode, -+ &ui32AppHintDefault, &psHints->ui32DriverMode); -+ bAppHintDefault = PVRSRV_APPHINT_GPUUNITSPOWERCHANGE; -+ OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, GPUUnitsPowerChange, -+ &bAppHintDefault, &psHints->bGPUUnitsPowerChange); -+ bAppHintDefault = PVRSRV_APPHINT_ENABLESIGNATURECHECKS; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableSignatureChecks, -+ &bAppHintDefault, &psHints->bEnableSignatureChecks); -+ ui32AppHintDefault = PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, SignatureChecksBufSize, -+ &ui32AppHintDefault, &psHints->ui32SignatureChecksBufSize); -+ -+ bAppHintDefault = PVRSRV_APPHINT_ASSERTOUTOFMEMORY; -+ OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, AssertOutOfMemory, -+ &bAppHintDefault, &psHints->bAssertOnOutOfMem); -+ bAppHintDefault = PVRSRV_APPHINT_ASSERTONHWRTRIGGER; -+ OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, AssertOnHWRTrigger, -+ &bAppHintDefault, &psHints->bAssertOnHWRTrigger); -+ bAppHintDefault = PVRSRV_APPHINT_CHECKMLIST; -+ OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, CheckMList, -+ &bAppHintDefault, &psHints->bCheckMlist); -+ bAppHintDefault = PVRSRV_APPHINT_DISABLECLOCKGATING; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, DisableClockGating, -+ &bAppHintDefault, &psHints->bDisableClockGating); -+ bAppHintDefault = PVRSRV_APPHINT_DISABLEDMOVERLAP; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, DisableDMOverlap, -+ &bAppHintDefault, &psHints->bDisableDMOverlap); -+ bAppHintDefault = PVRSRV_APPHINT_DISABLEFEDLOGGING; -+ OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, DisableFEDLogging, -+ &bAppHintDefault, &psHints->bDisableFEDLogging); -+ ui32AppHintDefault = PVRSRV_APPHINT_ENABLEAPM; -+ OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, EnableAPM, -+ &ui32AppHintDefault, &ui32ParamTemp); -+ psHints->eRGXActivePMConf = ui32ParamTemp; -+ bAppHintDefault = PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableCDMKillingRandMode, -+ &bAppHintDefault, &psHints->bEnableCDMKillRand); -+ bAppHintDefault = PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableRandomContextSwitch, -+ &bAppHintDefault, &psHints->bEnableRandomCsw); -+ bAppHintDefault = PVRSRV_APPHINT_ENABLESOFTRESETCONTEXTSWITCH; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableSoftResetContextSwitch, -+ &bAppHintDefault, &psHints->bEnableSoftResetCsw); -+ ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, EnableFWContextSwitch, -+ &ui32AppHintDefault, &psHints->ui32EnableFWContextSwitch); -+ ui32AppHintDefault = PVRSRV_APPHINT_ENABLERDPOWERISLAND; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, EnableRDPowerIsland, -+ &ui32AppHintDefault, &ui32ParamTemp); -+ psHints->eRGXRDPowerIslandConf = ui32ParamTemp; -+ ui32AppHintDefault = PVRSRV_APPHINT_FIRMWAREPERF; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FirmwarePerf, -+ &ui32AppHintDefault, &ui32ParamTemp); -+ psHints->eFirmwarePerf = ui32ParamTemp; -+ ui32AppHintDefault = PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FWContextSwitchProfile, -+ &ui32AppHintDefault, &psHints->ui32FWContextSwitchProfile); -+ bAppHintDefault = PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, HWPerfDisableCustomCounterFilter, -+ &bAppHintDefault, &psHints->bHWPerfDisableCustomCounterFilter); -+ ui32AppHintDefault = PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfHostBufSizeInKB, -+ &ui32AppHintDefault, &psHints->ui32HWPerfHostBufSize); -+ ui32AppHintDefault = PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfFWBufSizeInKB, -+ &ui32AppHintDefault, &psHints->ui32HWPerfFWBufSize); -+ ui32AppHintDefault = PVRSRV_APPHINT_KCCB_SIZE_LOG2; -+ OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, KernelCCBSizeLog2, -+ &ui32AppHintDefault, &psHints->ui32KCCBSizeLog2); -+ -+ if (psHints->ui32KCCBSizeLog2 < PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "KCCB size %u is too low, setting to %u", -+ psHints->ui32KCCBSizeLog2, PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE)); -+ psHints->ui32KCCBSizeLog2 = PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE; -+ } -+ else if (psHints->ui32KCCBSizeLog2 > PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "KCCB size %u is too high, setting to %u", -+ psHints->ui32KCCBSizeLog2, PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE)); -+ psHints->ui32KCCBSizeLog2 = PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE; -+ } -+ -+#if defined(SUPPORT_VALIDATION) -+ if (psHints->ui32KCCBSizeLog2 != PVRSRV_APPHINT_KCCB_SIZE_LOG2) -+ { -+ PVR_LOG(("KernelCCBSizeLog2 set to %u", psHints->ui32KCCBSizeLog2)); -+ } -+#endif -+ -+#if defined(__linux__) -+ /* name changes */ -+ { -+ IMG_UINT64 ui64Tmp; -+ IMG_UINT64 ui64AppHintDefault = PVRSRV_APPHINT_HWPERFFWFILTER; -+ bAppHintDefault = PVRSRV_APPHINT_DISABLEPDUMPPANIC; -+ OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, DisablePDumpPanic, -+ &bAppHintDefault, &psHints->bDisablePDP); -+ OSGetAppHintUINT64(psDevInfo->psDeviceNode, pvAppHintState, HWPerfFWFilter, -+ &ui64AppHintDefault, &ui64Tmp); -+ psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu); -+ psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu); -+ } -+#endif -+ -+ ui32AppHintDefault = PVRSRV_APPHINT_HWPERFHOSTFILTER; -+ OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, HWPerfHostFilter, -+ &ui32AppHintDefault, &psHints->ui32HWPerfHostFilter); -+ ui32AppHintDefault = PVRSRV_APPHINT_TIMECORRCLOCK; -+ OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, TimeCorrClock, -+ &ui32AppHintDefault, &psHints->ui32TimeCorrClock); -+ ui32AppHintDefault = PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT; -+ OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, HWRDebugDumpLimit, -+ &ui32AppHintDefault, &ui32ParamTemp); -+ psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL); -+ -+ if (bS7TopInfra) -+ { -+ #define RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK (0XFFFFFFCFU) -+ #define RGX_CR_JONES_FIX_MT_ORDER_ISP_EN (0X00000020U) -+ #define RGX_CR_JONES_FIX_MT_ORDER_TE_EN (0X00000010U) -+ -+ ui32AppHintDefault = PVRSRV_APPHINT_JONESDISABLEMASK; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, JonesDisableMask, -+ &ui32AppHintDefault, &ui32ParamTemp); -+ if (((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_ISP_EN) || -+ ((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_TE_EN)) -+ { -+ ui32ParamTemp |= (RGX_CR_JONES_FIX_MT_ORDER_TE_EN | -+ RGX_CR_JONES_FIX_MT_ORDER_ISP_EN); -+ PVR_DPF((PVR_DBG_WARNING, "Tile reordering mode requires both TE and ISP enabled. Forcing JonesDisableMask = %d", -+ ui32ParamTemp)); -+ } -+ psHints->ui32JonesDisableMask = ui32ParamTemp; -+ } -+ -+ if ((bE42290) && (bTPUFiltermodeCtrl)) -+ { -+ bAppHintDefault = PVRSRV_APPHINT_NEWFILTERINGMODE; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, NewFilteringMode, -+ &bAppHintDefault, &psHints->bFilteringMode); -+ } -+ -+ if (bE42606) -+ { -+ ui32AppHintDefault = PVRSRV_APPHINT_TRUNCATEMODE; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TruncateMode, -+ &ui32AppHintDefault, &psHints->ui32TruncateMode); -+ } -+#if defined(EMULATOR) -+ if (bAXIACELite) -+ { -+ bAppHintDefault = PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableTrustedDeviceAceConfig, -+ &bAppHintDefault, &psHints->bEnableTrustedDeviceAceConfig); -+ } -+#endif -+ -+ bAppHintDefault = PVRSRV_APPHINT_ZEROFREELIST; -+ OSGetAppHintBOOL(psDevInfo->psDeviceNode, pvAppHintState, ZeroFreelist, -+ &bAppHintDefault, &psHints->bZeroFreelist); -+ -+#if defined(__linux__) -+ ui32AppHintDefault = 0; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FWContextSwitchCrossDM, -+ &ui32AppHintDefault, &psHints->ui32FWContextSwitchCrossDM); -+#endif -+ -+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) -+ ui32AppHintDefault = PVRSRV_APPHINT_PHYSMEMTESTPASSES; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, PhysMemTestPasses, -+ &ui32AppHintDefault, &psHints->ui32PhysMemTestPasses); -+#endif -+ -+#if defined(SUPPORT_VALIDATION) -+ /* Apphints for TPU trilinear frac masking */ -+ ui32AppHintDefault = 0xF; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TPUTrilinearFracMaskPDM, &ui32AppHintDefault, &psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_PDM]); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TPUTrilinearFracMaskVDM, &ui32AppHintDefault, &psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_VDM]); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TPUTrilinearFracMaskCDM, &ui32AppHintDefault, &psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_CDM]); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TPUTrilinearFracMaskTDM, &ui32AppHintDefault, &psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_TDM]); -+ bAppHintDefault = PVRSRV_APPHINT_VALIDATEIRQ; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ValidateIrq, &bAppHintDefault, &psHints->bValidateIrq); -+ bAppHintDefault = PVRSRV_APPHINT_VALIDATESOCUSCTIMERS; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ValidateSOCUSCTimer, &bAppHintDefault, &psHints->bValidateSOCUSCTimer); -+ ui32AppHintDefault = PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FBCDCVersionOverride, &ui32AppHintDefault, &psHints->ui32FBCDCVersionOverride); -+ ui32AppHintDefault = PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TFBCCompressionControlGroup, &ui32AppHintDefault, &psHints->ui32TFBCCompressionControlGroup); -+ ui32AppHintDefault = PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TFBCCompressionControlScheme, &ui32AppHintDefault, &psHints->ui32TFBCCompressionControlScheme); -+ bAppHintDefault = IMG_FALSE; -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, TFBCCompressionControlYUVFormat, &bAppHintDefault, &psHints->bTFBCCompressionControlYUVFormat); -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, TFBCCompressionControlLossyMinChannel, &bAppHintDefault, &psHints->bTFBCCompressionControlLossyMinChannel); -+#else -+ psHints->bTFBCCompressionControlLossyMinChannel = false; -+ psHints->bTFBCCompressionControlYUVFormat = false; -+ psHints->ui32TFBCCompressionControlScheme = -+ PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME; -+ psHints->ui32TFBCCompressionControlGroup = -+ PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP; -+#endif -+ -+ ui32AppHintDefault = PVRSRV_APPHINT_TFBCVERSION; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TFBCVersionDowngrade, -+ &ui32AppHintDefault, &psHints->ui32TFBCVersion); -+ -+ if (ui32AppHintDefault != psHints->ui32TFBCVersion) -+ { -+ PVR_LOG(("TFBCVersionDowngrade set to %u", psHints->ui32TFBCVersion)); -+ } -+ -+ ui32AppHintDefault = PVRSRV_APPHINT_DEBUGDUMPFWTLOGTYPE; -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DebugDumpFWTLogType, &ui32AppHintDefault, &psHints->eDebugDumpFWTLogType); -+ if ((IMG_UINT32)psHints->eDebugDumpFWTLogType > RGX_FWT_LOGTYPE_PARTIAL) -+ { -+ psHints->eDebugDumpFWTLogType = RGX_FWT_LOGTYPE_NONE; -+ PVR_DPF((PVR_DBG_WARNING, "Invalid value for DebugDumpFWTLogType. Setting to 0 (disabled).")); -+ } -+ -+ /* -+ * FW logs apphints -+ */ -+ { -+ IMG_UINT32 ui32LogGroup, ui32TraceOrTBI; -+ -+ ui32AppHintDefault = PVRSRV_APPHINT_ENABLELOGGROUP; -+ OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, EnableLogGroup, &ui32AppHintDefault, &ui32LogGroup); -+ ui32AppHintDefault = PVRSRV_APPHINT_FIRMWARELOGTYPE; -+ OSGetAppHintUINT32(psDevInfo->psDeviceNode, pvAppHintState, FirmwareLogType, &ui32AppHintDefault, &ui32TraceOrTBI); -+ -+ /* Defaulting to TRACE */ -+ BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); -+ -+#if defined(SUPPORT_TBI_INTERFACE) -+ if (ui32TraceOrTBI == 1 /* TBI */) -+ { -+ if ((ui32LogGroup & RGXFWIF_LOG_TYPE_GROUP_MASK) == 0) -+ { -+ /* No groups configured - defaulting to MAIN group */ -+ BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_GROUP_MAIN); -+ } -+ BITMASK_UNSET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); -+ } -+#endif -+ psHints->ui32LogType = ui32LogGroup; -+ } -+ -+ _ParseHTBAppHints(psDevInfo->psDeviceNode, pvAppHintState); -+ -+ OSFreeAppHintState(pvAppHintState); -+} -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function GetFWConfigFlags -+ -+ @Description Initialise and return FW config flags -+ -+ @Input psHints : Apphints container -+ @Input pui32FWConfigFlags : Pointer to config flags -+ -+ @Return void -+ -+******************************************************************************/ -+static INLINE void GetFWConfigFlags(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_SRVINIT_APPHINTS *psHints, -+ IMG_UINT32 *pui32FWConfigFlags, -+ IMG_UINT32 *pui32FWConfigFlagsExt, -+ IMG_UINT32 *pui32FwOsCfgFlags) -+{ -+ IMG_UINT32 ui32FWConfigFlags = 0; -+ IMG_UINT32 ui32FWConfigFlagsExt = 0; -+ IMG_UINT32 ui32FWConfigFlagsSupValExt = 0; -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ ui32FWConfigFlags = 0; -+ ui32FWConfigFlagsExt = 0; -+ } -+ else -+ { -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ IMG_UINT32 ui32TFBCVersion = 0U; -+ -+ ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0; -+ ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0; -+ ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0; -+ ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0; -+ ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0; -+ ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_INICFG_DISABLE_PDP_EN : 0; -+ ui32FWConfigFlags |= psHints->bEnableCDMKillRand ? RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN : 0; -+ ui32FWConfigFlags |= psHints->bEnableRandomCsw ? RGXFWIF_INICFG_CTXSWITCH_MODE_RAND : 0; -+ ui32FWConfigFlags |= psHints->bEnableSoftResetCsw ? RGXFWIF_INICFG_CTXSWITCH_SRESET_EN : 0; -+ ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0; -+ ui32FWConfigFlags |= psHints->bHWPerfDisableCustomCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0; -+ ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK; -+ -+#if defined(SUPPORT_VALIDATION) -+#if defined(NO_HARDWARE) && defined(PDUMP) -+ ui32FWConfigFlags |= psHints->bValidateIrq ? RGXFWIF_INICFG_VALIDATE_IRQ : 0; -+#endif -+ -+ if (psHints->ui32FBCDCVersionOverride > 0) -+ { -+ ui32FWConfigFlags |= (psHints->ui32FBCDCVersionOverride == 2) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; -+ } -+ else -+#endif /* defined(SUPPORT_VALIDATION) */ -+ { -+ ui32FWConfigFlags |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; -+ } -+ -+#if defined(SUPPORT_VALIDATION) -+ ui32FWConfigFlags |= psHints->bValidateSOCUSCTimer ? RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER : 0; -+ -+ if ((ui32FWConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) && -+ ((psHints->eRGXActivePMConf != 0) || (psHints->eRGXRDPowerIslandConf != 0))) -+ { -+ psHints->eRGXActivePMConf = 0; -+ psHints->eRGXRDPowerIslandConf = 0; -+ PVR_DPF((PVR_DBG_WARNING, "SoC/USC Timer test needs to run with both EnableAPM and EnableRDPowerIsland disabled.\n" -+ "Overriding current value for both with new value 0.")); -+ } -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT) || -+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION) || -+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_NATIVE_YUV10)) -+ { -+ ui32FWConfigFlagsSupValExt = ui32FWConfigFlagsExt; -+ -+ ui32FWConfigFlagsSupValExt |= -+ ((((psHints->ui32TFBCCompressionControlGroup << RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT) & -+ ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) | -+ ((psHints->ui32TFBCCompressionControlScheme << RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT) & -+ ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK) | -+ ((psHints->bTFBCCompressionControlYUVFormat) ? RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN : 0) | -+ ((psHints->bTFBCCompressionControlLossyMinChannel) ? RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_EN : 0)) -+ << RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT) & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK; -+ /* Save TFBCCompressionControlGroup for later querying by -+ * ->pfnGetTFBCLossyGroup() -+ */ -+#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) -+ psDevInfo->ui32TFBCLossyGroup = psHints->ui32TFBCCompressionControlGroup; -+#endif -+ } -+#endif /* defined(SUPPORT_VALIDATION) */ -+ /* Determine if we need to present a TFBC v1.0, v1.1 or native -+ * behaviour. For V1.0 we need to set the following features: -+ * TFBCCompressionControlLossyMinChannel = 0x1 -+ * TFBCCompressionControlYUVFormat = 0x1 -+ * TFBCCompressionControlScheme = 0x2 -+ * TFBCCompressionControlGroup = 0x0 -+ * For V1.1 we need to set the following: -+ * TFBCCompressionControlLossyMinChannel = 0x1 -+ * TFBCCompressionControlYUVFormat = 0x0 -+ * TFBCCompressionControlScheme = 0x1 -+ * TFBCCompressionControlGroup = 0 / 1 (depends on LOSSY_37_PERCENT) -+ * The gating for these values depends on whether the GPU supports -+ * RGX_FEATURE_TFBC_VERSION = 20U -+ */ -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, TFBC_VERSION)) -+ { -+ ui32TFBCVersion = RGX_GET_FEATURE_VALUE(psDevInfo, TFBC_VERSION); -+ -+ if (ui32TFBCVersion >= 20U) -+ { -+ switch (psHints->ui32TFBCVersion) { -+ case 10: /* TFBC Version 1.0 */ -+ psHints->bTFBCCompressionControlLossyMinChannel = true; -+ psHints->bTFBCCompressionControlYUVFormat = true; -+ psHints->ui32TFBCCompressionControlScheme = 2U; -+ psHints->ui32TFBCCompressionControlGroup = 0U; -+ -+#if defined(DEBUG) || defined(SUPPORT_VALIDATION) -+ PVR_LOG(("%s: Setting TFBC Version 1.0, Native v%u", -+ __func__, ui32TFBCVersion)); -+#endif -+ break; -+ -+ case 11: /* TFBC Version 1.1 */ -+ psHints->bTFBCCompressionControlLossyMinChannel = true; -+ psHints->bTFBCCompressionControlYUVFormat = false; -+ psHints->ui32TFBCCompressionControlScheme = 1U; -+#if !defined(SUPPORT_VALIDATION) -+ psHints->ui32TFBCCompressionControlGroup = -+ PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP; -+#endif -+ -+#if defined(DEBUG) || defined(SUPPORT_VALIDATION) -+ PVR_LOG(("%s: Setting TFBC Version 1.1, Native v%u", -+ __func__, ui32TFBCVersion)); -+#endif -+ break; -+ -+ case 0: /* Leave with whatever the ui32TFBCVersion is */ -+ break; -+ default: /* Unexpected / unsupported value */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Unexpected TFBC Version %u" -+ " Ignoring. Using value %u instead", -+ __func__, psHints->ui32TFBCVersion, -+ ui32TFBCVersion)); -+ break; -+ } -+ -+#if defined(FIX_HW_BRN_73472_BIT_MASK) -+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 73472)) -+ { -+ if (psHints->ui32TFBCCompressionControlScheme != 1U) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "Ignoring TFBCCompressionControlScheme value. Setting to 1 (as per BRN73472)")); -+ } -+ -+ psHints->ui32TFBCCompressionControlScheme = 1U; -+ } -+#endif -+ ui32FWConfigFlagsExt |= -+ ((((psHints->ui32TFBCCompressionControlGroup << RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT) & -+ ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) | -+ ((psHints->ui32TFBCCompressionControlScheme << RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT) & -+ ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK) | -+ ((psHints->bTFBCCompressionControlYUVFormat) ? RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN : 0) | -+ ((psHints->bTFBCCompressionControlLossyMinChannel) ? RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_EN : 0)) -+ << RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT) & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK; -+ /* Save the CompressionControlGroup for later use by -+ * ->pfnGetTFBCLossyGroup() */ -+#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) -+ psDevInfo->ui32TFBCLossyGroup = psHints->ui32TFBCCompressionControlGroup; -+#endif -+ } -+ else if (ui32TFBCVersion == 11U) -+ { -+ switch (psHints->ui32TFBCVersion) { -+ case 10: /* TFBC Version 1.0 */ -+ psHints->bTFBCCompressionControlLossyMinChannel = true; -+ psHints->bTFBCCompressionControlYUVFormat = true; -+ psHints->ui32TFBCCompressionControlScheme = 2U; -+ psHints->ui32TFBCCompressionControlGroup = 0U; -+ -+#if defined(DEBUG) || defined(SUPPORT_VALIDATION) -+ PVR_LOG(("%s: Setting TFBC Version 1.0, Native v%u", -+ __func__, ui32TFBCVersion)); -+#endif -+ break; -+ -+ case 0: /* Leave with whatever the ui32TFBCVersion is */ -+ break; -+ -+ default: /* Unexpected / unsupported value */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Unexpected TFBC Version %u" -+ " Ignoring. Using value %u instead", -+ __func__, psHints->ui32TFBCVersion, -+ ui32TFBCVersion)); -+ break; -+ } -+ ui32FWConfigFlagsExt |= -+ ((((psHints->ui32TFBCCompressionControlGroup << RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT) & -+ ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) | -+ ((psHints->ui32TFBCCompressionControlScheme << RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT) & -+ ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK) | -+ ((psHints->bTFBCCompressionControlYUVFormat) ? RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN : 0) | -+ ((psHints->bTFBCCompressionControlLossyMinChannel) ? RGX_CR_TFBC_COMPRESSION_CONTROL_LOSSY_MIN_CHANNEL_OVERRIDE_EN : 0)) -+ << RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT) & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK; -+ /* Save the CompressionControlGroup for later use by -+ * ->pfnGetTFBCLossyGroup() */ -+#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) -+ psDevInfo->ui32TFBCLossyGroup = psHints->ui32TFBCCompressionControlGroup; -+#endif -+ } -+ else /* TFBC v1.0 */ -+ { -+#if defined(SUPPORT_VALIDATION) -+ ui32FWConfigFlagsExt = ui32FWConfigFlagsSupValExt; -+#else /* !defined(SUPPORT_VALIDATION) */ -+ PVR_UNREFERENCED_PARAMETER(ui32FWConfigFlagsSupValExt); -+#endif /* !defined(SUPPORT_VALIDATION) */ -+#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) -+ psDevInfo->ui32TFBCLossyGroup = 0; -+#endif -+ if ((psHints->ui32TFBCVersion != 0U) && -+ (psHints->ui32TFBCVersion != ui32TFBCVersion)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Cannot specify TFBC version %u" -+ " on a version %u GPU core", __func__, -+ psHints->ui32TFBCVersion, ui32TFBCVersion)); -+ } -+ } -+ } -+ } -+ -+ *pui32FWConfigFlags = ui32FWConfigFlags; -+ *pui32FWConfigFlagsExt = ui32FWConfigFlagsExt; -+ *pui32FwOsCfgFlags = psHints->ui32FWContextSwitchCrossDM | -+ (psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK); -+} -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function GetFilterFlags -+ -+ @Description Initialise and return filter flags -+ -+ @Input psHints : Apphints container -+ -+ @Return IMG_UINT32 : Filter flags -+ -+******************************************************************************/ -+static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints) -+{ -+ IMG_UINT32 ui32FilterFlags = 0; -+ -+ ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0; -+ if (psHints->ui32TruncateMode == 2) -+ { -+ ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT; -+ } -+ else if (psHints->ui32TruncateMode == 3) -+ { -+ ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF; -+ } -+ -+ return ui32FilterFlags; -+} -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function InitDeviceFlags -+ -+ @Description Initialise and return device flags -+ -+ @Input psHints : Apphints container -+ @Input pui32DeviceFlags : Pointer to device flags -+ -+ @Return void -+ -+******************************************************************************/ -+static INLINE void InitDeviceFlags(RGX_SRVINIT_APPHINTS *psHints, -+ IMG_UINT32 *pui32DeviceFlags) -+{ -+ IMG_UINT32 ui32DeviceFlags = 0; -+ -+ ui32DeviceFlags |= psHints->bGPUUnitsPowerChange ? RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN : 0; -+ ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKM_DEVICE_STATE_ZERO_FREELIST : 0; -+ ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0; -+#if defined(PVRSRV_ENABLE_CCCB_GROW) -+ BITMASK_SET(ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); -+#endif -+ -+ *pui32DeviceFlags = ui32DeviceFlags; -+} -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -+/*! -+******************************************************************************* -+ -+ @Function RGXTDProcessFWImage -+ -+ @Description Fetch and send data used by the trusted device to complete -+ the FW image setup -+ -+ @Input psDeviceNode : Device node -+ @Input psRGXFW : Firmware blob -+ @Input puFWParams : Parameters used by the FW at boot time -+ -+ @Return PVRSRV_ERROR -+******************************************************************************/ -+static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode, -+ OS_FW_IMAGE *psRGXFW, -+ PVRSRV_FW_BOOT_PARAMS *puFWParams) -+{ -+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_TD_FW_PARAMS sTDFWParams; -+ PVRSRV_ERROR eError; -+ -+ if (psDevConfig->pfnTDSendFWImage == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: TDSendFWImage not implemented!", __func__)); -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+ } -+ -+ sTDFWParams.pvFirmware = OSFirmwareData(psRGXFW); -+ sTDFWParams.ui32FirmwareSize = OSFirmwareSize(psRGXFW); -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ sTDFWParams.uFWP.sMeta = puFWParams->sMeta; -+ } -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ sTDFWParams.uFWP.sMips = puFWParams->sMips; -+ -+ if (sTDFWParams.uFWP.sMips.ui32FWPageTableNumPages > TD_MAX_NUM_MIPS_PAGETABLE_PAGES) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Number of page table pages %u greater " -+ "than what is allowed by the TD interface (%u), FW might " -+ "not work properly!", __func__, -+ puFWParams->sMips.ui32FWPageTableNumPages, -+ TD_MAX_NUM_MIPS_PAGETABLE_PAGES)); -+ } -+ } -+#endif -+ else -+ { -+ sTDFWParams.uFWP.sRISCV = puFWParams->sRISCV; -+ } -+ -+ eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams); -+ -+ return eError; -+} -+#endif -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+/*! -+******************************************************************************* -+ -+ @Function RGXAcquireMipsBootldrData -+ -+ @Description Acquire MIPS bootloader data parameters -+ -+ @Input psDeviceNode : Device node -+ @Input puFWParams : FW boot parameters -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+static PVRSRV_ERROR RGXAcquireMipsBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PVRSRV_FW_BOOT_PARAMS *puFWParams) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice; -+ MMU_DEVICEATTRIBS *psFWMMUDevAttrs = psDevInfo->psDeviceNode->psFirmwareMMUDevAttrs; -+ IMG_DEV_PHYADDR sAddr; -+ IMG_UINT32 ui32PTSize, i; -+ PVRSRV_ERROR eError; -+ IMG_BOOL bValid; -+ -+ /* Rogue Registers physical address */ -+#if defined(SUPPORT_ALT_REGBASE) -+ puFWParams->sMips.sGPURegAddr = psDeviceNode->psDevConfig->sAltRegsGpuPBase; -+#else -+ PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL], -+ 1, -+ &puFWParams->sMips.sGPURegAddr, -+ &(psDeviceNode->psDevConfig->sRegsCpuPBase)); -+#endif -+ -+ /* MIPS Page Table physical address */ -+ MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sAddr); -+ -+ /* MIPS Page Table allocation is contiguous. Pass one or more addresses -+ * to the FW depending on the Page Table size and alignment. */ -+ -+ ui32PTSize = (psFWMMUDevAttrs->psTopLevelDevVAddrConfig->uiNumEntriesPT) -+ << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; -+ ui32PTSize = PVR_ALIGN(ui32PTSize, 1U << psFWMMUDevAttrs->ui32BaseAlign); -+ -+ puFWParams->sMips.ui32FWPageTableLog2PageSize = psFWMMUDevAttrs->ui32BaseAlign; -+ puFWParams->sMips.ui32FWPageTableNumPages = ui32PTSize >> psFWMMUDevAttrs->ui32BaseAlign; -+ -+ if (puFWParams->sMips.ui32FWPageTableNumPages > 4U) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Page table cannot be mapped by the FW " -+ "(size 0x%x, log2 page size %u, %u pages)", -+ __func__, ui32PTSize, puFWParams->sMips.ui32FWPageTableLog2PageSize, -+ puFWParams->sMips.ui32FWPageTableNumPages)); -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ /* Confirm page alignment fits in 64-bits */ -+ if (psFWMMUDevAttrs->ui32BaseAlign > 63) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid page alignment " -+ "(psFWMMUDevAttrs->ui32BaseAlign = %u)", -+ __func__, psFWMMUDevAttrs->ui32BaseAlign)); -+ return PVRSRV_ERROR_INIT_FAILURE; -+ } -+ -+ for (i = 0; i < puFWParams->sMips.ui32FWPageTableNumPages; i++) -+ { -+ puFWParams->sMips.asFWPageTableAddr[i].uiAddr = -+ sAddr.uiAddr + i * (1ULL << psFWMMUDevAttrs->ui32BaseAlign); -+ } -+ -+ /* MIPS Stack Pointer Physical Address */ -+ eError = RGXGetPhyAddr(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR, -+ &puFWParams->sMips.sFWStackAddr, -+ RGXGetFWImageSectionOffset(NULL, MIPS_STACK), -+ OSGetPageShift(), -+ 1, -+ &bValid); -+ -+ return eError; -+} -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function InitFirmware -+ -+ @Description Allocate, initialise and pdump Firmware code and data memory -+ -+ @Input psDeviceNode : Device Node -+ @Input psHints : Apphints -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_SRVINIT_APPHINTS *psHints) -+{ -+ OS_FW_IMAGE *psRGXFW = NULL; -+ const IMG_BYTE *pbRGXFirmware = NULL; -+ -+ /* FW code memory */ -+ IMG_DEVMEM_SIZE_T uiFWCodeAllocSize; -+ void *pvFWCodeHostAddr; -+ -+ /* FW data memory */ -+ IMG_DEVMEM_SIZE_T uiFWDataAllocSize; -+ void *pvFWDataHostAddr; -+ -+ /* FW coremem code memory */ -+ IMG_DEVMEM_SIZE_T uiFWCorememCodeAllocSize; -+ void *pvFWCorememCodeHostAddr = NULL; -+ -+ /* FW coremem data memory */ -+ IMG_DEVMEM_SIZE_T uiFWCorememDataAllocSize; -+ void *pvFWCorememDataHostAddr = NULL; -+ -+ PVRSRV_FW_BOOT_PARAMS uFWParams; -+ RGX_LAYER_PARAMS sLayerParams; -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ IMG_BOOL bUseSecureFWData = -+ RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || -+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR) || -+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && -+ RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32); -+#endif -+ -+ /* -+ * Get pointer to Firmware image -+ */ -+ eError = RGXLoadAndGetFWData(psDeviceNode, &psRGXFW, &pbRGXFirmware); -+ -+ if (eError != PVRSRV_OK) -+ { -+ /* Error or confirmation message generated in RGXLoadAndGetFWData */ -+ goto fw_load_fail; -+ } -+ -+ sLayerParams.psDevInfo = psDevInfo; -+ -+ /* -+ * Allocate Firmware memory -+ */ -+ -+ eError = RGXGetFWImageAllocSize(&sLayerParams, -+ pbRGXFirmware, -+ OSFirmwareSize(psRGXFW), -+ &uiFWCodeAllocSize, -+ &uiFWDataAllocSize, -+ &uiFWCorememCodeAllocSize, -+ &uiFWCorememDataAllocSize, -+ &psDevInfo->sFWInfoHeader); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: RGXGetFWImageAllocSize failed", -+ __func__)); -+ goto cleanup_initfw; -+ } -+ -+ /* -+ * Initiate FW compatibility check for Native and Host. -+ * Guest compatibility check must be done after FW boot. -+ */ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ eError = PVRSRVDevInitCompatCheck(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed compatibility check for device %p (%s)", -+ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); -+ goto cleanup_initfw; -+ } -+ } -+ -+ psDevInfo->ui32FWCodeSizeInBytes = uiFWCodeAllocSize; -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: META DMA not available, disabling core memory code/data", -+ __func__)); -+ uiFWCorememCodeAllocSize = 0; -+ uiFWCorememDataAllocSize = 0; -+ } -+#endif -+ -+ psDevInfo->ui32FWCorememCodeSizeInBytes = uiFWCorememCodeAllocSize; -+ -+ eError = RGXInitAllocFWImgMem(psDeviceNode, -+ uiFWCodeAllocSize, -+ uiFWDataAllocSize, -+ uiFWCorememCodeAllocSize, -+ uiFWCorememDataAllocSize); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: RGXInitAllocFWImgMem failed (%d)", -+ __func__, -+ eError)); -+ goto cleanup_initfw; -+ } -+ -+ /* -+ * Acquire pointers to Firmware allocations -+ */ -+ -+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, &pvFWCodeHostAddr); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", cleanup_initfw); -+ -+#else -+ /* We can't get a pointer to a secure FW allocation from within the DDK */ -+ pvFWCodeHostAddr = NULL; -+#endif -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ if (bUseSecureFWData) -+ { -+ /* We can't get a pointer to a secure FW allocation from within the DDK */ -+ pvFWDataHostAddr = NULL; -+ } -+ else -+#endif -+ { -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWDataHostAddr); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_code); -+ } -+ -+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) -+ if (uiFWCorememCodeAllocSize != 0) -+ { -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, &pvFWCorememCodeHostAddr); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_data); -+ } -+#else -+ /* We can't get a pointer to a secure FW allocation from within the DDK */ -+ pvFWCorememCodeHostAddr = NULL; -+#endif -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ if (bUseSecureFWData) -+ { -+ pvFWCorememDataHostAddr = NULL; -+ } -+ else -+#endif -+ if (uiFWCorememDataAllocSize != 0) -+ { -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, &pvFWCorememDataHostAddr); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_corememcode); -+ } -+ -+ /* -+ * Prepare FW boot parameters -+ */ -+ OSCachedMemSet(&uFWParams, 0, sizeof(PVRSRV_FW_BOOT_PARAMS)); -+ -+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) -+ { -+ uFWParams.sMeta.sFWCodeDevVAddr = psDevInfo->sFWCodeDevVAddrBase; -+ uFWParams.sMeta.sFWDataDevVAddr = psDevInfo->sFWDataDevVAddrBase; -+ uFWParams.sMeta.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; -+ uFWParams.sMeta.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; -+ uFWParams.sMeta.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; -+ uFWParams.sMeta.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase; -+ uFWParams.sMeta.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; -+#if defined(RGXFW_META_SUPPORT_2ND_THREAD) -+ uFWParams.sMeta.ui32NumThreads = 2; -+#else -+ uFWParams.sMeta.ui32NumThreads = 1; -+#endif -+ } -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) -+ { -+ eError = RGXAcquireMipsBootldrData(psDeviceNode, &uFWParams); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: RGXAcquireMipsBootldrData failed (%d)", -+ __func__, eError)); -+ goto release_fw_allocations; -+ } -+ } -+#endif -+ else -+ { -+ uFWParams.sRISCV.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; -+ uFWParams.sRISCV.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; -+ uFWParams.sRISCV.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; -+ -+ uFWParams.sRISCV.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase; -+ uFWParams.sRISCV.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; -+ uFWParams.sRISCV.uiFWCorememDataSize = uiFWCorememDataAllocSize; -+ } -+ -+ -+ /* -+ * Process the Firmware image and setup code and data segments. -+ * -+ * When the trusted device is enabled and the FW code lives -+ * in secure memory we will only setup the data segments here, -+ * while the code segments will be loaded to secure memory -+ * by the trusted device. -+ */ -+ if (!psDeviceNode->bAutoVzFwIsUp) -+ { -+ eError = RGXProcessFWImage(&sLayerParams, -+ pbRGXFirmware, -+ pvFWCodeHostAddr, -+ pvFWDataHostAddr, -+ pvFWCorememCodeHostAddr, -+ pvFWCorememDataHostAddr, -+ &uFWParams); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: RGXProcessFWImage failed (%d)", -+ __func__, eError)); -+ goto release_fw_allocations; -+ } -+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) -+ RGXFwSharedMemCacheOpExec(pvFWCodeHostAddr, -+ sizeof(psDevInfo->psRGXFWCodeMemDesc->uiAllocSize), -+ PVRSRV_CACHE_OP_FLUSH); -+ if (uiFWCorememCodeAllocSize) -+ { -+ RGXFwSharedMemCacheOpExec(pvFWCorememCodeHostAddr, -+ sizeof(psDevInfo->psRGXFWCorememCodeMemDesc->uiAllocSize), -+ PVRSRV_CACHE_OP_FLUSH); -+ } -+ -+ RGXFwSharedMemCacheOpExec(pvFWDataHostAddr, -+ sizeof(psDevInfo->psRGXFWDataMemDesc->uiAllocSize), -+ PVRSRV_CACHE_OP_FLUSH); -+ if (uiFWCorememDataAllocSize) -+ { -+ RGXFwSharedMemCacheOpExec(pvFWCorememDataHostAddr, -+ sizeof(psDevInfo->psRGXFWIfCorememDataStoreMemDesc->uiAllocSize), -+ PVRSRV_CACHE_OP_FLUSH); -+ } -+#endif -+ } -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ RGXTDProcessFWImage(psDeviceNode, psRGXFW, &uFWParams); -+#endif -+ -+ -+ /* -+ * PDump Firmware allocations -+ */ -+ -+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "Dump firmware code image"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWCodeMemDesc, -+ 0, -+ uiFWCodeAllocSize, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ if (!bUseSecureFWData) -+#endif -+ { -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "Dump firmware data image"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWDataMemDesc, -+ 0, -+ uiFWDataAllocSize, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ -+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) -+ if (uiFWCorememCodeAllocSize != 0) -+ { -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "Dump firmware coremem code image"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWCorememCodeMemDesc, -+ 0, -+ uiFWCorememCodeAllocSize, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+#endif -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ if (!bUseSecureFWData && uiFWCorememDataAllocSize) -+#else -+ if (uiFWCorememDataAllocSize != 0) -+#endif -+ { -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "Dump firmware coremem data store image"); -+ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, -+ 0, -+ uiFWCorememDataAllocSize, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ -+ /* -+ * Release Firmware allocations and clean up -+ */ -+release_fw_allocations: -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ if (!bUseSecureFWData && uiFWCorememDataAllocSize) -+#else -+ if (uiFWCorememDataAllocSize != 0) -+#endif -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc); -+ } -+ -+release_corememcode: -+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) -+ if (uiFWCorememCodeAllocSize != 0) -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); -+ } -+#endif -+ -+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) -+release_data: -+#endif -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ if (!bUseSecureFWData) -+#endif -+ { -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); -+ } -+ -+release_code: -+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); -+#endif -+cleanup_initfw: -+ OSUnloadFirmware(psRGXFW); -+fw_load_fail: -+ -+ return eError; -+} -+ -+ -+#if defined(PDUMP) -+/*! -+******************************************************************************* -+ -+ @Function InitialiseHWPerfCounters -+ -+ @Description Initialisation of hardware performance counters and dumping -+ them out to pdump, so that they can be modified at a later -+ point. -+ -+ @Input pvDevice -+ @Input psHWPerfDataMemDesc -+ @Input psHWPerfInitDataInt -+ -+ @Return void -+ -+******************************************************************************/ -+ -+static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode, -+ void *pvDevice, -+ DEVMEM_MEMDESC *psHWPerfDataMemDesc, -+ RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt) -+{ -+ RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData; -+#if defined(HWPERF_UNIFIED) -+ RGXFWIF_HWPERF_DA_BLK *psHWPerfInitDABlkData; -+#endif -+ IMG_UINT32 ui32CntBlkModelLen; -+ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel; -+ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc; -+ IMG_UINT32 ui32BlockID, ui32BlkCfgIdx, ui32CounterIdx; -+ RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo; -+ -+ ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel); -+ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "HWPerf Counter Config starts here."); -+ -+ for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++) -+ { -+ IMG_UINT32 uiUnit; -+ IMG_BOOL bDirect; -+ -+ /* Exit early if this core does not have any of these counter blocks -+ * due to core type/BVNC features.... */ -+ psBlkTypeDesc = &asCntBlkTypeModel[ui32BlkCfgIdx]; -+ if (psBlkTypeDesc->pfnIsBlkPresent(psBlkTypeDesc, pvDevice, &sCntBlkRtInfo) == IMG_FALSE) -+ { -+ continue; -+ } -+ -+ /* Program all counters in one block so those already on may -+ * be configured off and vice-versa. */ -+ for (ui32BlockID = psBlkTypeDesc->ui32CntBlkIdBase; -+ ui32BlockID < psBlkTypeDesc->ui32CntBlkIdBase+sCntBlkRtInfo.ui32NumUnits; -+ ui32BlockID++) -+ { -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "Unit %d Block : %s", -+ ui32BlockID-psBlkTypeDesc->ui32CntBlkIdBase, -+ psBlkTypeDesc->pszBlockNameComment); -+ -+ /* Get the block configure store to update from the global store of -+ * block configuration. This is used to remember the configuration -+ * between configurations and core power on in APM. -+ * For HWPERF_UNIFIED layout we will have a different -+ * structure type to decode the HWPerf block. This is indicated by -+ * the RGX_CNTBLK_ID_DA_MASK bit being set in the block-ID value. */ -+ -+ bDirect = (psBlkTypeDesc->ui32IndirectReg == 0U); -+ uiUnit = ui32BlockID - psBlkTypeDesc->ui32CntBlkIdBase; -+ -+#if defined(HWPERF_UNIFIED) -+ if ((ui32BlockID & RGX_CNTBLK_ID_DA_MASK) == RGX_CNTBLK_ID_DA_MASK) -+ { -+ psHWPerfInitDABlkData = rgxfw_hwperf_get_da_block_ctl(ui32BlockID, psHWPerfInitDataInt); -+ -+ PVR_ASSERT(psHWPerfInitDABlkData); -+ -+ psHWPerfInitDABlkData->eBlockID = ui32BlockID; -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "eBlockID: The Block ID for the layout block. See RGX_HWPERF_CNTBLK_ID for further information."); -+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, -+ (size_t)&(psHWPerfInitDABlkData->eBlockID) - (size_t)(psHWPerfInitDataInt), -+ psHWPerfInitDABlkData->eBlockID, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ psHWPerfInitDABlkData->uiEnabled = 0U; -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "uiEnabled: Set to 0x1 if the block needs to be enabled during playback."); -+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, -+ (size_t)&(psHWPerfInitDABlkData->uiEnabled) - (size_t)(psHWPerfInitDataInt), -+ psHWPerfInitDABlkData->uiEnabled, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ psHWPerfInitDABlkData->uiNumCounters = 0U; -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "uiNumCounters (X): Specifies the number of valid counters" -+ " [0..%d] which follow.", RGX_CNTBLK_COUNTERS_MAX); -+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, -+ (size_t)&(psHWPerfInitDABlkData->uiNumCounters) - (size_t)(psHWPerfInitDataInt), -+ psHWPerfInitDABlkData->uiNumCounters, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ for (ui32CounterIdx = 0; ui32CounterIdx < RGX_CNTBLK_COUNTERS_MAX; ui32CounterIdx++) -+ { -+ psHWPerfInitDABlkData->aui32Counters[ui32CounterIdx] = IMG_UINT32_C(0x00000000); -+ -+ if (bDirect) -+ { -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "%s_COUNTER_%d", -+ psBlkTypeDesc->pszBlockNameComment, -+ ui32CounterIdx); -+ } -+ else -+ { -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "%s%d_COUNTER_%d", -+ psBlkTypeDesc->pszBlockNameComment, -+ uiUnit, ui32CounterIdx); -+ } -+ -+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, -+ (size_t)&(psHWPerfInitDABlkData->aui32Counters[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt), -+ psHWPerfInitDABlkData->aui32Counters[ui32CounterIdx], -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ } -+ else -+#endif /* defined(HWPERF_UNIFIED) */ -+ { -+ psHWPerfInitBlkData = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfInitDataInt); -+ /* Assert to check for HWPerf block mis-configuration */ -+ PVR_ASSERT(psHWPerfInitBlkData); -+ -+ psHWPerfInitBlkData->ui32Valid = IMG_TRUE; -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "bValid: This specifies if the layout block is valid for the given BVNC."); -+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, -+ (size_t)&(psHWPerfInitBlkData->ui32Valid) - (size_t)(psHWPerfInitDataInt), -+ psHWPerfInitBlkData->ui32Valid, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ psHWPerfInitBlkData->ui32Enabled = IMG_FALSE; -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "bEnabled: Set to 0x1 if the block needs to be enabled during playback."); -+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, -+ (size_t)&(psHWPerfInitBlkData->ui32Enabled) - (size_t)(psHWPerfInitDataInt), -+ psHWPerfInitBlkData->ui32Enabled, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ psHWPerfInitBlkData->eBlockID = ui32BlockID; -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "eBlockID: The Block ID for the layout block. See RGX_HWPERF_CNTBLK_ID for further information."); -+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, -+ (size_t)&(psHWPerfInitBlkData->eBlockID) - (size_t)(psHWPerfInitDataInt), -+ psHWPerfInitBlkData->eBlockID, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ psHWPerfInitBlkData->uiCounterMask = 0x00; -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "uiCounterMask: Bitmask for selecting the counters that need to be configured. (Bit 0 - counter0, bit 1 - counter1 and so on.)"); -+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, -+ (size_t)&(psHWPerfInitBlkData->uiCounterMask) - (size_t)(psHWPerfInitDataInt), -+ psHWPerfInitBlkData->uiCounterMask, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ for (ui32CounterIdx = RGX_CNTBLK_COUNTER0_ID; ui32CounterIdx < psBlkTypeDesc->ui8NumCounters; ui32CounterIdx++) -+ { -+ psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx] = IMG_UINT64_C(0x0000000000000000); -+ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "%s_COUNTER_%d", psBlkTypeDesc->pszBlockNameComment, ui32CounterIdx); -+ DevmemPDumpLoadMemValue64(psHWPerfDataMemDesc, -+ (size_t)&(psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt), -+ psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx], -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ } -+ } -+ } -+ } -+} -+/*! -+******************************************************************************* -+ -+ @Function InitialiseCustomCounters -+ -+ @Description Initialisation of custom counters and dumping them out to -+ pdump, so that they can be modified at a later point. -+ -+ @Input psHWPerfDataMemDesc -+ -+ @Return void -+ -+******************************************************************************/ -+ -+static void InitialiseCustomCounters(PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEM_MEMDESC *psHWPerfDataMemDesc) -+{ -+ IMG_UINT32 ui32CustomBlock, ui32CounterID; -+ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "ui32SelectedCountersBlockMask - The Bitmask of the custom counters that are to be selected"); -+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, -+ offsetof(RGXFWIF_HWPERF_CTL, ui32SelectedCountersBlockMask), -+ 0, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ for (ui32CustomBlock = 0; ui32CustomBlock < RGX_HWPERF_MAX_CUSTOM_BLKS; ui32CustomBlock++) -+ { -+ /* -+ * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of -+ * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is -+ * "expression must have a constant value". -+ */ -+ const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounters -+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].ui32NumSelectedCounters); -+ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "ui32NumSelectedCounters - The Number of counters selected for this Custom Block: %d",ui32CustomBlock ); -+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, -+ uiOffsetOfCustomBlockSelectedCounters, -+ 0, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ for (ui32CounterID = 0; ui32CounterID < RGX_HWPERF_MAX_CUSTOM_CNTRS; ui32CounterID++ ) -+ { -+ const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounterIDs -+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].aui32SelectedCountersIDs[ui32CounterID]); -+ -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, -+ "CUSTOMBLK_%d_COUNTERID_%d",ui32CustomBlock, ui32CounterID); -+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, -+ uiOffsetOfCustomBlockSelectedCounterIDs, -+ 0, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ } -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function InitialiseAllCounters -+ -+ @Description Initialise HWPerf and custom counters -+ -+ @Input psDeviceNode : Device Node -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ RGXFWIF_HWPERF_CTL *psHWPerfInitData; -+ PVRSRV_ERROR eError; -+ -+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, (void **)&psHWPerfInitData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", failHWPerfCountersMemDescAqCpuVirt); -+ -+ InitialiseHWPerfCounters(psDeviceNode, psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc, psHWPerfInitData); -+ InitialiseCustomCounters(psDeviceNode, psDevInfo->psRGXFWIfHWPerfCountersMemDesc); -+ RGXFwSharedMemCacheOpPtr(psHWPerfInitData, FLUSH); -+ -+failHWPerfCountersMemDescAqCpuVirt: -+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); -+ -+ return eError; -+} -+#endif /* PDUMP */ -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+static PVRSRV_ERROR RGXValidateTDHeap(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PVRSRV_PHYS_HEAP ePhysHeap, -+ PHYS_HEAP_USAGE_FLAGS ui32RequiredFlags) -+{ -+ PHYS_HEAP *psHeap = psDeviceNode->apsPhysHeap[ePhysHeap]; -+ PHYS_HEAP_USAGE_FLAGS ui32HeapFlags = PhysHeapGetFlags(psHeap); -+ PHYS_HEAP_USAGE_FLAGS ui32InvalidFlags = ~(PHYS_HEAP_USAGE_FW_PRIV_DATA | PHYS_HEAP_USAGE_FW_CODE -+ | PHYS_HEAP_USAGE_GPU_SECURE | PHYS_HEAP_USAGE_FW_PRIVATE); -+ -+ PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32RequiredFlags) != 0, -+ PVRSRV_ERROR_NOT_SUPPORTED, -+ "TD heap is missing required flags. flags: 0x%x / required:0x%x", -+ ui32HeapFlags, -+ ui32RequiredFlags); -+ -+ PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32InvalidFlags) == 0, -+ PVRSRV_ERROR_NOT_SUPPORTED, -+ "TD heap uses invalid flags. flags: 0x%x / invalid:0x%x", -+ ui32HeapFlags, -+ ui32InvalidFlags); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR RGXValidateTDHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_PRIV_DATA, PHYS_HEAP_USAGE_FW_PRIV_DATA); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_PRIV_DATA"); -+ -+ eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_CODE, PHYS_HEAP_USAGE_FW_CODE); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_CODE"); -+ -+ eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_GPU_SECURE, PHYS_HEAP_USAGE_GPU_SECURE); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:GPU_SECURE"); -+ -+ return PVRSRV_OK; -+} -+#endif -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXInit -+ -+ @Description RGX Initialisation -+ -+ @Input psDeviceNode -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Services initialisation parameters */ -+ RGX_SRVINIT_APPHINTS sApphints = {0}; -+ IMG_UINT32 ui32FWConfigFlags, ui32FWConfigFlagsExt, ui32FwOsCfgFlags; -+ IMG_UINT32 ui32DeviceFlags; -+ -+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ RGX_LAYER_PARAMS sLayerParams; -+ -+ PDUMPCOMMENT(psDeviceNode, "RGX Initialisation Part 1"); -+ -+ PDUMPCOMMENT(psDeviceNode, "Device Name: %s", -+ psDeviceNode->psDevConfig->pszName); -+ PDUMPCOMMENT(psDeviceNode, "Device ID: %u (%d)", -+ psDeviceNode->sDevId.ui32InternalID, -+ psDeviceNode->sDevId.i32KernelDeviceID); -+ -+ if (psDeviceNode->psDevConfig->pszVersion) -+ { -+ PDUMPCOMMENT(psDeviceNode, "Device Version: %s", -+ psDeviceNode->psDevConfig->pszVersion); -+ } -+ -+ /* pdump info about the core */ -+ PDUMPCOMMENT(psDeviceNode, -+ "RGX Version Information (KM): %d.%d.%d.%d", -+ psDevInfo->sDevFeatureCfg.ui32B, -+ psDevInfo->sDevFeatureCfg.ui32V, -+ psDevInfo->sDevFeatureCfg.ui32N, -+ psDevInfo->sDevFeatureCfg.ui32C); -+ -+ RGXInitMultiCoreInfo(psDeviceNode); -+ -+ sLayerParams.psDevInfo = psDevInfo; -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+ eError = RGXValidateTDHeaps(psDeviceNode); -+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeaps"); -+#endif -+ -+#if defined(SUPPORT_AUTOVZ) -+ if (PVRSRV_VZ_MODE_IS(HOST)) -+ { -+ /* The RGX_CR_MTS_DM0_INTERRUPT_ENABLE register is always set by the firmware during initialisation -+ * and it provides a good method of determining if the firmware has been booted previously */ -+ psDeviceNode->bAutoVzFwIsUp = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_DM0_INTERRUPT_ENABLE) != 0); -+ -+ PVR_LOG(("AutoVz startup check: firmware is %s;", -+ (psDeviceNode->bAutoVzFwIsUp) ? "already running" : "powered down")); -+ PVR_LOG(("AutoVz allow GPU powerdown is %s:", -+ (psDeviceNode->bAutoVzAllowGPUPowerdown) ? "enabled" : "disabled")); -+ } -+ else if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Guest assumes the firmware is always available */ -+ psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; -+ psDeviceNode->bAutoVzAllowGPUPowerdown = IMG_FALSE; -+ } -+ else -+#endif -+ { -+ /* Firmware does not follow the AutoVz life-cycle */ -+ psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; -+ } -+ -+ if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) -+ { -+ /* set the device power state here as the regular power -+ * callbacks will not be executed on this driver */ -+ psDevInfo->bRGXPowered = IMG_TRUE; -+ } -+ -+ /* Set which HW Safety Events will be handled by the driver */ -+ psDevInfo->ui32HostSafetyEventMask |= RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER) ? -+ RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN : 0; -+ psDevInfo->ui32HostSafetyEventMask |= (RGX_DEVICE_HAS_FEATURE_VALUE(&sLayerParams, ECC_RAMS) -+ && (RGX_DEVICE_GET_FEATURE_VALUE(&sLayerParams, ECC_RAMS) > 0)) ? -+ RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN : 0; -+ -+ /* Services initialisation parameters */ -+ GetApphints(psDevInfo, &sApphints); -+ InitDeviceFlags(&sApphints, &ui32DeviceFlags); -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+#if defined(EMULATOR) -+ if ((sApphints.bEnableTrustedDeviceAceConfig) && -+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE))) -+ { -+ SetTrustedDeviceAceEnabled(psDeviceNode->psDevConfig->hSysData); -+ } -+#endif -+#endif -+ -+ eError = RGXInitCreateFWKernelMemoryContext(psDeviceNode); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create FW kernel memory context (%u)", -+ __func__, eError)); -+ goto cleanup; -+ } -+ -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ eError = InitFirmware(psDeviceNode, &sApphints); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: InitFirmware failed (%d)", -+ __func__, eError)); -+ goto cleanup; -+ } -+ } -+ -+ /* -+ * Setup Firmware initialisation data -+ */ -+ -+ GetFWConfigFlags(psDeviceNode, &sApphints, &ui32FWConfigFlags, &ui32FWConfigFlagsExt, &ui32FwOsCfgFlags); -+ -+ eError = RGXInitFirmware(psDeviceNode, -+ sApphints.bEnableSignatureChecks, -+ sApphints.ui32SignatureChecksBufSize, -+ sApphints.ui32HWPerfFWBufSize, -+ (IMG_UINT64)sApphints.ui32HWPerfFilter0 | -+ ((IMG_UINT64)sApphints.ui32HWPerfFilter1 << 32), -+ ui32FWConfigFlags, -+ sApphints.ui32LogType, -+ GetFilterFlags(&sApphints), -+ sApphints.ui32JonesDisableMask, -+ sApphints.ui32HWRDebugDumpLimit, -+ sizeof(RGXFWIF_HWPERF_CTL), -+#if defined(SUPPORT_VALIDATION) -+ &sApphints.aui32TPUTrilinearFracMask[0], -+#else -+ NULL, -+#endif -+ sApphints.eRGXRDPowerIslandConf, -+ sApphints.eFirmwarePerf, -+ sApphints.ui32KCCBSizeLog2, -+ ui32FWConfigFlagsExt, -+ ui32FwOsCfgFlags); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: RGXInitFirmware failed (%d)", -+ __func__, eError)); -+ goto cleanup; -+ } -+ -+#if defined(PDUMP) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ eError = InitialiseAllCounters(psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: InitialiseAllCounters failed (%d)", -+ __func__, eError)); -+ goto cleanup; -+ } -+ } -+#endif -+ -+ /* -+ * Perform second stage of RGX initialisation -+ */ -+ eError = RGXInitDevPart2(psDeviceNode, -+ ui32DeviceFlags, -+ sApphints.ui32HWPerfHostFilter, -+ sApphints.eRGXActivePMConf, -+ sApphints.eDebugDumpFWTLogType); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: RGXInitDevPart2 failed (%d)", -+ __func__, eError)); -+ goto cleanup; -+ } -+ -+#if defined(SUPPORT_VALIDATION) -+ PVRSRVAppHintDumpState(psDeviceNode); -+#endif -+ -+ eError = PVRSRV_OK; -+ -+cleanup: -+ return eError; -+} -+ -+/****************************************************************************** -+ End of file (rgxsrvinit.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxstartstop.c b/drivers/gpu/drm/img-rogue/rgxstartstop.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxstartstop.c -@@ -0,0 +1,1314 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device specific start/stop routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific start/stop routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* The routines implemented here are built on top of an abstraction layer to -+ * hide DDK/OS-specific details in case they are used outside of the DDK -+ * (e.g. when trusted device is enabled). -+ * Any new dependency should be added to rgxlayer.h. -+ * Any new code should be built on top of the existing abstraction layer, -+ * which should be extended when necessary. */ -+#include "rgxstartstop.h" -+#include "rgxfwutils.h" -+ -+#if defined(SUPPORT_SHARED_SLC) -+#include "rgxapi_km.h" -+#endif -+ -+#include "rgxdevice.h" -+#include "km/rgxdefs_km.h" -+ -+#define SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXEnableClocks -+ -+ @Description Enable RGX Clocks -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return void -+ -+******************************************************************************/ -+static void RGXEnableClocks(const void *hPrivate) -+{ -+ RGXCommentLog(hPrivate, "RGX clock: use default (automatic clock gating)"); -+} -+ -+static PVRSRV_ERROR RGXWriteMetaCoreRegThoughSP(const void *hPrivate, -+ IMG_UINT32 ui32CoreReg, -+ IMG_UINT32 ui32Value) -+{ -+ IMG_UINT32 i = 0; -+ -+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXDT_OFFSET, ui32Value); -+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, ui32CoreReg & ~META_CR_TXUXXRXRQ_RDnWR_BIT); -+ -+ do -+ { -+ RGXReadMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, &ui32Value); -+ } while (((ui32Value & META_CR_TXUXXRXRQ_DREADY_BIT) != META_CR_TXUXXRXRQ_DREADY_BIT) && (i++ < 1000)); -+ -+ if (i == 1000) -+ { -+ RGXCommentLog(hPrivate, "RGXWriteMetaCoreRegThoughSP: Timeout"); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR RGXStartFirmware(const void *hPrivate) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Give privilege to debug and slave port */ -+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN); -+ -+ /* Point Meta to the bootloader address, global (uncached) range */ -+ eError = RGXWriteMetaCoreRegThoughSP(hPrivate, -+ PC_ACCESS(0), -+ RGXFW_BOOTLDR_META_ADDR | META_MEM_GLOBAL_RANGE_BIT); -+ -+ if (eError != PVRSRV_OK) -+ { -+ RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start failed!"); -+ return eError; -+ } -+ -+ /* Enable minim encoding */ -+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXPRIVEXT, META_CR_TXPRIVEXT_MINIM_EN); -+ -+ /* Enable Meta thread */ -+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_T0ENABLE_OFFSET, META_CR_TXENABLE_ENABLE_BIT); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXInitMetaProcWrapper -+ -+ @Description Configures the hardware wrapper of the META processor -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return void -+ -+******************************************************************************/ -+static void RGXInitMetaProcWrapper(const void *hPrivate) -+{ -+ IMG_UINT64 ui64GartenConfig; -+ -+ /* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address */ -+ -+ /* Garten IDLE bit controlled by META */ -+ ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META; -+ -+ /* The fence addr is set at the fw init sequence */ -+ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) -+ { -+ /* Set PC = 0 for fences */ -+ ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PC_BASE_CLRMSK; -+ ui64GartenConfig |= (IMG_UINT64)MMU_CONTEXT_MAPPING_FWPRIV -+ << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_INFRA__FENCE_PC_BASE_SHIFT; -+ -+ } -+ else -+ { -+ /* Set PC = 0 for fences */ -+ ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK; -+ ui64GartenConfig |= (IMG_UINT64)MMU_CONTEXT_MAPPING_FWPRIV -+ << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT; -+ -+ /* Set SLC DM=META */ -+ ui64GartenConfig |= ((IMG_UINT64) RGXFW_SEGMMU_META_BIFDM_ID) << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT; -+ } -+ -+ RGXCommentLog(hPrivate, "RGXStart: Configure META wrapper"); -+ RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig); -+} -+ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+/*! -+******************************************************************************* -+ -+ @Function RGXInitMipsProcWrapper -+ -+ @Description Configures the hardware wrapper of the MIPS processor -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return void -+ -+******************************************************************************/ -+static void RGXInitMipsProcWrapper(const void *hPrivate) -+{ -+ IMG_DEV_PHYADDR sPhyAddr; -+ IMG_UINT64 ui64RemapSettings = RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE; /* Same for all remap registers */ -+ -+ RGXCommentLog(hPrivate, "RGXStart: Configure MIPS wrapper"); -+ -+ /* -+ * MIPS wrapper (registers transaction ID and ISA mode) setup -+ */ -+ -+ RGXCommentLog(hPrivate, "RGXStart: Write wrapper config register"); -+ -+ if (RGXGetDevicePhysBusWidth(hPrivate) > 32) -+ { -+ RGXWriteReg32(hPrivate, -+ RGX_CR_MIPS_WRAPPER_CONFIG, -+ (RGXMIPSFW_REGISTERS_VIRTUAL_BASE >> -+ RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN) | -+ RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS); -+ } -+ else -+ { -+ RGXAcquireGPURegsAddr(hPrivate, &sPhyAddr); -+ -+ RGXMIPSWrapperConfig(hPrivate, -+ RGX_CR_MIPS_WRAPPER_CONFIG, -+ sPhyAddr.uiAddr, -+ RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN, -+ RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS); -+ } -+ -+ /* -+ * Boot remap setup -+ */ -+ -+ RGXAcquireBootRemapAddr(hPrivate, &sPhyAddr); -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+ /* Do not mark accesses to a FW code remap region as DRM accesses */ -+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; -+#endif -+ -+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; -+#if defined(MIPS_FW_CODE_OSID) -+ ui64RemapSettings |= ((IMG_UINT64) MIPS_FW_CODE_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; -+#else -+ ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; -+#endif -+ -+ RGXCommentLog(hPrivate, "RGXStart: Write boot remap registers"); -+ RGXBootRemapConfig(hPrivate, -+ RGX_CR_MIPS_ADDR_REMAP1_CONFIG1, -+ RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN, -+ RGX_CR_MIPS_ADDR_REMAP1_CONFIG2, -+ sPhyAddr.uiAddr, -+ ~RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK, -+ ui64RemapSettings); -+ -+#if defined(FIX_HW_BRN_63553_BIT_MASK) -+ if (RGX_DEVICE_HAS_BRN(hPrivate, 63553)) -+ { -+ IMG_BOOL bPhysBusAbove32Bit = RGXGetDevicePhysBusWidth(hPrivate) > 32; -+ IMG_BOOL bDevicePA0IsValid = RGXDevicePA0IsValid(hPrivate); -+ -+ /* WA always required on 36 bit cores, to avoid continuous unmapped memory accesses to address 0x0 */ -+ if (bPhysBusAbove32Bit || !bDevicePA0IsValid) -+ { -+ RGXCodeRemapConfig(hPrivate, -+ RGX_CR_MIPS_ADDR_REMAP5_CONFIG1, -+ 0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN, -+ RGX_CR_MIPS_ADDR_REMAP5_CONFIG2, -+ sPhyAddr.uiAddr, -+ ~RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK, -+ ui64RemapSettings); -+ } -+ } -+#endif -+ -+ /* -+ * Data remap setup -+ */ -+ -+ RGXAcquireDataRemapAddr(hPrivate, &sPhyAddr); -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+ if (RGXGetDevicePhysBusWidth(hPrivate) > 32) -+ { -+ /* Remapped private data in secure memory */ -+ ui64RemapSettings |= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN; -+ } -+ else -+ { -+ /* Remapped data in non-secure memory */ -+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; -+ } -+#endif -+ -+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; -+ ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; -+ -+ RGXCommentLog(hPrivate, "RGXStart: Write data remap registers"); -+ RGXDataRemapConfig(hPrivate, -+ RGX_CR_MIPS_ADDR_REMAP2_CONFIG1, -+ RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN, -+ RGX_CR_MIPS_ADDR_REMAP2_CONFIG2, -+ sPhyAddr.uiAddr, -+ ~RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK, -+ ui64RemapSettings); -+ -+ /* -+ * Code remap setup -+ */ -+ -+ RGXAcquireCodeRemapAddr(hPrivate, &sPhyAddr); -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+ /* Do not mark accesses to a FW code remap region as DRM accesses */ -+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; -+#endif -+ -+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; -+#if defined(MIPS_FW_CODE_OSID) -+ ui64RemapSettings |= ((IMG_UINT64) MIPS_FW_CODE_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; -+#else -+ ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; -+#endif -+ -+ RGXCommentLog(hPrivate, "RGXStart: Write exceptions remap registers"); -+ RGXCodeRemapConfig(hPrivate, -+ RGX_CR_MIPS_ADDR_REMAP3_CONFIG1, -+ RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN, -+ RGX_CR_MIPS_ADDR_REMAP3_CONFIG2, -+ sPhyAddr.uiAddr, -+ ~RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK, -+ ui64RemapSettings); -+ -+ if (RGXGetDevicePhysBusWidth(hPrivate) == 32) -+ { -+ /* -+ * Trampoline remap setup -+ */ -+ -+ RGXAcquireTrampolineRemapAddr(hPrivate, &sPhyAddr); -+ ui64RemapSettings = RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE; -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+ /* Remapped data in non-secure memory */ -+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; -+#endif -+ -+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; -+ ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; -+ -+ RGXCommentLog(hPrivate, "RGXStart: Write trampoline remap registers"); -+ RGXTrampolineRemapConfig(hPrivate, -+ RGX_CR_MIPS_ADDR_REMAP4_CONFIG1, -+ sPhyAddr.uiAddr | RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN, -+ RGX_CR_MIPS_ADDR_REMAP4_CONFIG2, -+ RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR, -+ ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK, -+ ui64RemapSettings); -+ } -+ -+ /* Garten IDLE bit controlled by MIPS */ -+ RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to MIPS"); -+ RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); -+ -+ /* Turn on the EJTAG probe (only useful driver live) */ -+ RGXWriteReg32(hPrivate, RGX_CR_MIPS_DEBUG_CONFIG, 0); -+} -+#endif -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXInitRiscvProcWrapper -+ -+ @Description Configures the hardware wrapper of the RISCV processor -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return void -+ -+******************************************************************************/ -+static void RGXInitRiscvProcWrapper(const void *hPrivate) -+{ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo; -+#endif -+ IMG_UINT32 ui32BootCodeRemap = RGXRISCVFW_BOOTLDR_CODE_REMAP; -+ IMG_UINT32 ui32BootDataRemap = RGXRISCVFW_BOOTLDR_DATA_REMAP; -+ IMG_DEV_VIRTADDR sTmp; -+ -+ RGXCommentLog(hPrivate, "RGXStart: Configure RISCV wrapper"); -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) < 4) -+#endif -+ { -+ RGXCommentLog(hPrivate, "RGXStart: Write boot code remap"); -+ RGXAcquireBootCodeAddr(hPrivate, &sTmp); -+ RGXWriteReg64(hPrivate, -+ ui32BootCodeRemap, -+ sTmp.uiAddr | -+ (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) -+ << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | -+ (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | -+ RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN); -+ -+ RGXCommentLog(hPrivate, "RGXStart: Write boot data remap"); -+ RGXAcquireBootDataAddr(hPrivate, &sTmp); -+ RGXWriteReg64(hPrivate, -+ ui32BootDataRemap, -+ sTmp.uiAddr | -+ (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) -+ << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | -+ (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | -+#if defined(SUPPORT_TRUSTED_DEVICE) -+ RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN | -+#endif -+ RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN); -+ } -+ -+ /* Garten IDLE bit controlled by RISCV */ -+ RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to RISCV"); -+ RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); -+} -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function __RGXInitSLC -+ -+ @Description Initialise RGX SLC -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return void -+ -+******************************************************************************/ -+static void __RGXInitSLC(const void *hPrivate) -+{ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_CACHE_HIERARCHY)) -+ { -+ IMG_UINT32 ui32Reg; -+ IMG_UINT32 ui32RegVal; -+ -+ /* -+ * SLC control -+ */ -+ ui32Reg = RGX_CR_SLC3_CTRL_MISC; -+ ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH | -+ RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN; -+ RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal); -+ -+ /* -+ * SLC scramble bits -+ */ -+ { -+ IMG_UINT32 i; -+ IMG_UINT32 ui32Count=0; -+ IMG_UINT32 ui32SLCBanks = RGXGetDeviceSLCBanks(hPrivate); -+ IMG_UINT64 aui64ScrambleValues[4]; -+ IMG_UINT32 aui32ScrambleRegs[] = { -+ RGX_CR_SLC3_SCRAMBLE, -+ RGX_CR_SLC3_SCRAMBLE2, -+ RGX_CR_SLC3_SCRAMBLE3, -+ RGX_CR_SLC3_SCRAMBLE4 -+ }; -+ -+ if (2 == ui32SLCBanks) -+ { -+ aui64ScrambleValues[0] = IMG_UINT64_C(0x6965a99a55696a6a); -+ aui64ScrambleValues[1] = IMG_UINT64_C(0x6aa9aa66959aaa9a); -+ aui64ScrambleValues[2] = IMG_UINT64_C(0x9a5665965a99a566); -+ aui64ScrambleValues[3] = IMG_UINT64_C(0x5aa69596aa66669a); -+ ui32Count = 4; -+ } -+ else if (4 == ui32SLCBanks) -+ { -+ aui64ScrambleValues[0] = IMG_UINT64_C(0xc6788d722dd29ce4); -+ aui64ScrambleValues[1] = IMG_UINT64_C(0x7272e4e11b279372); -+ aui64ScrambleValues[2] = IMG_UINT64_C(0x87d872d26c6c4be1); -+ aui64ScrambleValues[3] = IMG_UINT64_C(0xe1b4878d4b36e478); -+ ui32Count = 4; -+ -+ } -+ else if (8 == ui32SLCBanks) -+ { -+ aui64ScrambleValues[0] = IMG_UINT64_C(0x859d6569e8fac688); -+ aui64ScrambleValues[1] = IMG_UINT64_C(0xf285e1eae4299d33); -+ aui64ScrambleValues[2] = IMG_UINT64_C(0x1e1af2be3c0aa447); -+ ui32Count = 3; -+ } -+ -+ for (i = 0; i < ui32Count; i++) -+ { -+ IMG_UINT32 ui32Reg = aui32ScrambleRegs[i]; -+ IMG_UINT64 ui64Value = aui64ScrambleValues[i]; -+ RGXWriteReg64(hPrivate, ui32Reg, ui64Value); -+ } -+ } -+ -+ { -+ /* Disable the forced SLC coherency which the hardware enables for compatibility with older pdumps */ -+ RGXCommentLog(hPrivate, "Disable forced SLC coherency"); -+ RGXWriteReg64(hPrivate, RGX_CR_GARTEN_SLC, 0); -+ } -+ } -+ else -+ { -+ IMG_UINT32 ui32Reg; -+ IMG_UINT32 ui32RegVal; -+ IMG_UINT64 ui64RegVal; -+ -+ /* -+ * SLC Bypass control -+ */ -+ ui32Reg = RGX_CR_SLC_CTRL_BYPASS; -+ ui64RegVal = 0; -+ -+#if defined(RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN) -+ if ((RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, SLC_SIZE_IN_KILOBYTES) == 8) || -+ RGX_DEVICE_HAS_BRN(hPrivate, 61450)) -+ { -+ RGXCommentLog(hPrivate, "Bypass SLC for IPF_OBJ and IPF_CPF"); -+ ui64RegVal |= (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN | -+ (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN; -+ } -+#endif -+ -+ if (ui64RegVal != 0) -+ { -+ RGXReadModifyWriteReg64(hPrivate, ui32Reg, ui64RegVal, ~ui64RegVal); -+ } -+ -+ /* -+ * SLC Misc control. -+ * -+ * Note: This is a 64bit register and we set only the lower 32bits leaving the top -+ * 32bits (RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS) unchanged from the HW default. -+ */ -+ ui32Reg = RGX_CR_SLC_CTRL_MISC; -+ ui32RegVal = RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1; -+ ui32RegVal |= RGXReadReg32(hPrivate, ui32Reg) & RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN; -+ -+#if defined(FIX_HW_BRN_60084_BIT_MASK) -+ if (RGX_DEVICE_HAS_BRN(hPrivate, 60084)) -+ { -+#if !defined(SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING) -+ ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN; -+#else -+ if (RGX_DEVICE_HAS_ERN(hPrivate, 61389)) -+ { -+ ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN; -+ } -+#endif -+ } -+#endif -+ -+ /* Bypass burst combiner if SLC line size is smaller than 1024 bits */ -+ if (RGXGetDeviceCacheLineSize(hPrivate) < 1024) -+ { -+ ui32RegVal |= RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN; -+ } -+ -+ RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal); -+ } -+} -+ -+ -+static void RGXWriteKernelCatBase(const void *hPrivate, IMG_DEV_PHYADDR sPCAddr) -+{ -+ IMG_UINT32 uiPCAddr; -+ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo; -+ -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) -+ { -+ IMG_UINT32 ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MHPW_LT6_AND_MMU_VER_GEQ4; -+ -+ uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT) -+ << RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT) -+ & ~RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_CLRMSK); -+ -+ /* Set the mapping context */ -+ RGXWriteReg32(hPrivate, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWPRIV); -+ (void)RGXReadReg32(hPrivate, ui32CBaseMapCtxReg); /* Fence write */ -+ -+ /* Write the cat-base address */ -+ RGXWriteKernelMMUPC32(hPrivate, -+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1, -+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT, -+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT, -+ uiPCAddr); -+ -+#if (MMU_CONTEXT_MAPPING_FWIF != MMU_CONTEXT_MAPPING_FWPRIV) -+ /* Set-up different MMU ID mapping to the same PC used above */ -+ RGXWriteReg32(hPrivate, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWIF); -+ (void)RGXReadReg32(hPrivate, ui32CBaseMapCtxReg); /* Fence write */ -+ -+ RGXWriteKernelMMUPC32(hPrivate, -+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1, -+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT, -+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT, -+ uiPCAddr); -+#endif -+ } -+#else /* defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) */ -+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) -+ { -+ /* Write the cat-base address */ -+ RGXWriteKernelMMUPC64(hPrivate, -+ BIF_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), -+ RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT, -+ RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT, -+ ((sPCAddr.uiAddr -+ >> RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT) -+ << RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT) -+ & ~RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK); -+ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) -+ { -+ /* Keep catbase registers in sync */ -+ RGXWriteKernelMMUPC64(hPrivate, -+ FWCORE_MEM_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), -+ RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT, -+ RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT, -+ ((sPCAddr.uiAddr -+ >> RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT) -+ << RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) -+ & ~RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK); -+ } -+ -+ /* -+ * Trusted Firmware boot -+ */ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+ RGXCommentLog(hPrivate, "RGXWriteKernelCatBase: Trusted Device enabled"); -+ RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN); -+#endif -+ } -+#endif /* defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) */ -+ else -+ { -+ uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT) -+ << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT) -+ & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK); -+ -+ /* Set the mapping context */ -+ RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWPRIV); -+ (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ -+ -+ /* Write the cat-base address */ -+ RGXWriteKernelMMUPC32(hPrivate, -+ RGX_CR_MMU_CBASE_MAPPING, -+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, -+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, -+ uiPCAddr); -+ -+#if (MMU_CONTEXT_MAPPING_FWIF != MMU_CONTEXT_MAPPING_FWPRIV) -+ /* Set-up different MMU ID mapping to the same PC used above */ -+ RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWIF); -+ (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ -+ -+ RGXWriteKernelMMUPC32(hPrivate, -+ RGX_CR_MMU_CBASE_MAPPING, -+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, -+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, -+ uiPCAddr); -+#endif -+ } -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXInitBIF -+ -+ @Description Initialise RGX BIF -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return void -+ -+******************************************************************************/ -+static void RGXInitBIF(const void *hPrivate) -+{ -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) -+ { -+ /* -+ * Trusted Firmware boot -+ */ -+#if defined(SUPPORT_TRUSTED_DEVICE) -+ RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled"); -+ RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN); -+#endif -+ } -+ else -+#endif /* defined(RGX_FEATURE_MIPS_BIT_MASK) */ -+ { -+ IMG_DEV_PHYADDR sPCAddr; -+ -+ /* -+ * Acquire the address of the Kernel Page Catalogue. -+ */ -+ RGXAcquireKernelMMUPC(hPrivate, &sPCAddr); -+ -+ /* -+ * Write the kernel catalogue base. -+ */ -+ RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue"); -+ -+ RGXWriteKernelCatBase(hPrivate, sPCAddr); -+ } -+} -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXAXIACELiteInit -+ -+ @Description Initialise AXI-ACE Lite interface -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return void -+ -+******************************************************************************/ -+static void RGXAXIACELiteInit(const void *hPrivate) -+{ -+ IMG_UINT32 ui32RegAddr; -+ IMG_UINT64 ui64RegVal; -+ -+ ui32RegAddr = RGX_CR_AXI_ACE_LITE_CONFIGURATION; -+ -+ /* Setup AXI-ACE config. Set everything to outer cache */ -+ ui64RegVal = (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) | -+ (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) | -+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT) | -+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) | -+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) | -+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) | -+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) | -+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT); -+ -+#if defined(FIX_HW_BRN_42321_BIT_MASK) -+ if (RGX_DEVICE_HAS_BRN(hPrivate, 42321)) -+ { -+ ui64RegVal |= (((IMG_UINT64) 1) << RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT); -+ } -+#endif -+ -+#if defined(FIX_HW_BRN_68186_BIT_MASK) -+ if (RGX_DEVICE_HAS_BRN(hPrivate, 68186)) -+ { -+ /* default value for reg_enable_fence_out is zero. Force to 1 to allow core_clk < mem_clk */ -+ ui64RegVal |= (IMG_UINT64)1 << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT; -+ } -+#endif -+ -+ RGXCommentLog(hPrivate, "Init AXI-ACE interface"); -+ RGXWriteReg64(hPrivate, ui32RegAddr, ui64RegVal); -+} -+ -+PVRSRV_ERROR RGXStart(const void *hPrivate) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_CHAR *pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS; -+ IMG_BOOL bDoFWSlaveBoot = IMG_FALSE; -+ IMG_BOOL bMetaFW = IMG_FALSE; -+ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) -+ { -+ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; -+ } -+ else if (RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, META)) -+ { -+ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; -+ bMetaFW = IMG_TRUE; -+ bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate); -+ } -+ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, SYS_BUS_SECURE_RESET)) -+ { -+ /* Disable the default sys_bus_secure protection to perform minimal setup */ -+ RGXCommentLog(hPrivate, "RGXStart: Disable sys_bus_secure"); -+ RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0); -+ (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ -+ } -+ -+#if defined(SUPPORT_SHARED_SLC) -+ /* When the SLC is shared, the SLC reset is performed by the System layer when calling -+ * RGXInitSLC (before any device uses it), therefore mask out the SLC bit to avoid -+ * soft_resetting it here. -+ */ -+#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL ^ RGX_CR_SOFT_RESET_SLC_EN) -+ RGXCommentLog(hPrivate, "RGXStart: Shared SLC (don't reset SLC as part of RGX reset)"); -+#else -+#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL) -+#endif -+ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) -+ { -+ RGXCommentLog(hPrivate, "RGXStart: soft reset cpu core"); -+ RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 0); -+ } -+ -+#if defined(RGX_S7_SOFT_RESET_DUSTS) -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) -+ { -+ /* Set RGX in soft-reset */ -+ RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1"); -+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS); -+ -+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ -+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); -+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2); -+ -+ RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 2"); -+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_JONES_ALL | RGX_S7_SOFT_RESET_DUSTS); -+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, RGX_S7_SOFT_RESET2); -+ -+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); -+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2); -+ -+ /* Take everything out of reset but the FW processor */ -+ RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 1 excluding %s", pcRGXFW_PROCESSOR); -+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS | RGX_CR_SOFT_RESET_GARTEN_EN); -+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, 0x0); -+ -+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); -+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2); -+ -+ RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 2 excluding %s", pcRGXFW_PROCESSOR); -+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN); -+ -+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); -+ } -+ else -+#endif -+ { -+ /* Set RGX in soft-reset */ -+ RGXCommentLog(hPrivate, "RGXStart: soft reset everything"); -+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL); -+ -+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ -+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); -+ -+ /* Take Rascal and Dust out of reset */ -+ RGXCommentLog(hPrivate, "RGXStart: Rascal and Dust out of reset"); -+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL ^ RGX_CR_SOFT_RESET_RASCALDUSTS_EN); -+ -+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); -+ -+ /* Take everything out of reset but the FW processor */ -+ RGXCommentLog(hPrivate, "RGXStart: Take everything out of reset but %s", pcRGXFW_PROCESSOR); -+ -+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN); -+ -+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); -+ } -+ -+ /* Enable clocks */ -+ RGXEnableClocks(hPrivate); -+ -+ /* -+ * Initialise SLC. -+ */ -+#if !defined(SUPPORT_SHARED_SLC) -+ __RGXInitSLC(hPrivate); -+#endif -+ -+ if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, ECC_RAMS) > 0) -+ { -+ RGXCommentLog(hPrivate, "RGXStart: Enable safety events"); -+ RGXWriteReg32(hPrivate, RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE, -+ RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL); -+ } -+ -+ if (bMetaFW) -+ { -+ if (bDoFWSlaveBoot) -+ { -+ /* Configure META to Slave boot */ -+ RGXCommentLog(hPrivate, "RGXStart: META Slave boot"); -+ RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0); -+ } -+ else -+ { -+ /* Configure META to Master boot */ -+ RGXCommentLog(hPrivate, "RGXStart: META Master boot"); -+ RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN); -+ } -+ } -+ -+ /* -+ * Initialise Firmware wrapper -+ */ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) -+ { -+ RGXInitRiscvProcWrapper(hPrivate); -+ } -+ else if (bMetaFW) -+ { -+ RGXInitMetaProcWrapper(hPrivate); -+ } -+#if defined(RGX_FEATURE_MIPS_BIT_MASK) -+ else -+ { -+ RGXInitMipsProcWrapper(hPrivate); -+ } -+#endif -+ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, AXI_ACELITE)) -+ { -+ /* We must init the AXI-ACE interface before 1st BIF transaction */ -+ RGXAXIACELiteInit(hPrivate); -+ } -+ -+ /* -+ * Initialise BIF. -+ */ -+ RGXInitBIF(hPrivate); -+ -+ RGXSetPoweredState(hPrivate, IMG_TRUE); -+ -+ RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR); -+ -+ /* Need to wait for at least 32 cycles before taking the FW processor out of reset ... */ -+ RGXWaitCycles(hPrivate, 32, 3); -+ -+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0); -+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); -+ -+ /* ... and afterwards */ -+ RGXWaitCycles(hPrivate, 32, 3); -+ -+ if (bMetaFW && bDoFWSlaveBoot) -+ { -+ eError = RGXFabricCoherencyTest(hPrivate); -+ if (eError != PVRSRV_OK) return eError; -+ -+ RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start"); -+ eError = RGXStartFirmware(hPrivate); -+ if (eError != PVRSRV_OK) return eError; -+ } -+ else -+ { -+ RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start"); -+ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) -+ { -+ /* Bring Debug Module out of reset */ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, HOST_SECURITY_VERSION) >= 4) -+ { -+ RGXWriteReg32(hPrivate, RGX_CR_FWCORE_DMI_DMCONTROL__RISCV_AND_HOST_SECURITY_GEQ4, RGX_CR_FWCORE_DMI_DMCONTROL__RISCV_AND_HOST_SECURITY_GEQ4__DMACTIVE_EN); -+ } -+ else -+#endif -+ { -+ RGXWriteReg32(hPrivate, RGX_CR_FWCORE_DMI_DMCONTROL, RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); -+ } -+ -+ /* Boot the FW */ -+ RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 1); -+ RGXWaitCycles(hPrivate, 32, 3); -+ } -+ } -+ -+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) -+ RGXCommentLog(hPrivate, "RGXStart: Enable sys_bus_secure"); -+ RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN); -+ (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ -+#endif -+ -+ return eError; -+} -+ -+PVRSRV_ERROR RGXStop(const void *hPrivate) -+{ -+ IMG_BOOL bMipsFW = RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS); -+ IMG_BOOL bRiscvFW = RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR); -+ IMG_BOOL bMetaFW = !bMipsFW && !bRiscvFW; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ RGX_LAYER_PARAMS *psParams; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ PVR_ASSERT(hPrivate != NULL); -+ psParams = (RGX_LAYER_PARAMS*)hPrivate; -+ psDevInfo = psParams->psDevInfo; -+ -+ RGXDeviceAckIrq(hPrivate); -+ -+ /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper -+ * For LAYOUT_MARS = 1, SIDEKICK would have been powered down by FW -+ */ -+ if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)) -+ { -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_JONES_IDLE, -+ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN), -+ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN)); -+ } -+ else -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_SIDEKICK_IDLE, -+ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN), -+ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN)); -+ } -+ -+ if (eError != PVRSRV_OK) return eError; -+ } -+ -+ if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)) -+ { -+#if !defined(SUPPORT_SHARED_SLC) -+ /* -+ * Wait for SLC to signal IDLE -+ * For LAYOUT_MARS = 1, SLC would have been powered down by FW -+ */ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_SLC3_IDLE, -+ RGX_CR_SLC3_IDLE_MASKFULL, -+ RGX_CR_SLC3_IDLE_MASKFULL); -+ } -+ else -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_SLC_IDLE, -+ RGX_CR_SLC_IDLE_MASKFULL, -+ RGX_CR_SLC_IDLE_MASKFULL); -+ } -+#endif /* SUPPORT_SHARED_SLC */ -+ if (eError != PVRSRV_OK) return eError; -+ } -+ -+ /* Unset MTS DM association with threads */ -+ RGXWriteReg32(hPrivate, -+ RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC, -+ RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK -+ & RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL); -+ RGXWriteReg32(hPrivate, -+ RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC, -+ RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK -+ & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL); -+ -+#if defined(RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC) -+ RGXWriteReg32(hPrivate, -+ RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC, -+ RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK -+ & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL); -+ RGXWriteReg32(hPrivate, -+ RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC, -+ RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK -+ & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL); -+#endif -+ -+#if defined(PDUMP) -+ if (bMetaFW) -+ { -+ /* Disabling threads is only required for pdumps to stop the fw gracefully */ -+ -+ /* Disable thread 0 */ -+ eError = RGXWriteMetaRegThroughSP(hPrivate, -+ META_CR_T0ENABLE_OFFSET, -+ ~META_CR_TXENABLE_ENABLE_BIT); -+ if (eError != PVRSRV_OK) return eError; -+ -+ /* Disable thread 1 */ -+ eError = RGXWriteMetaRegThroughSP(hPrivate, -+ META_CR_T1ENABLE_OFFSET, -+ ~META_CR_TXENABLE_ENABLE_BIT); -+ if (eError != PVRSRV_OK) return eError; -+ -+ /* Clear down any irq raised by META (done after disabling the FW -+ * threads to avoid a race condition). -+ * This is only really needed for PDumps but we do it anyway driver-live. -+ */ -+#if defined(RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX) -+ /* Wait for the Slave Port to finish all the transactions */ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES)) -+ { -+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) -+ { -+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA, 0x0); -+ (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_MRUA); /* Fence write */ -+ -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_MRUA__GBLPORT_IDLE_EN); -+ } -+ else -+ { -+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA, 0x0); -+ (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_EQ1_AND_MRUA); /* Fence write */ -+ -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__READY_EN -+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_EQ1_AND_MRUA__GBLPORT_IDLE_EN); -+ } -+ } -+ else -+#endif -+ { -+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0); -+ (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS); /* Fence write */ -+ -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_META_SP_MSLVCTRL1, -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, -+ RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); -+ } -+ -+ if (eError != PVRSRV_OK) return eError; -+ } -+#endif -+ -+ /* Extra Idle checks */ -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_BIF_STATUS_MMU, -+ 0, -+ RGX_CR_BIF_STATUS_MMU_MASKFULL); -+ if (eError != PVRSRV_OK) return eError; -+ -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_BIFPM_STATUS_MMU, -+ 0, -+ RGX_CR_BIFPM_STATUS_MMU_MASKFULL); -+ if (eError != PVRSRV_OK) return eError; -+ -+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE) && -+ !RGX_DEVICE_HAS_FEATURE(hPrivate, XT_TOP_INFRASTRUCTURE)) -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_BIF_READS_EXT_STATUS, -+ 0, -+ RGX_CR_BIF_READS_EXT_STATUS_MASKFULL); -+ if (eError != PVRSRV_OK) return eError; -+ } -+ -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_BIFPM_READS_EXT_STATUS, -+ 0, -+ RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL); -+ if (eError != PVRSRV_OK) return eError; -+ { -+ IMG_UINT64 ui64SLCMask = RGX_CR_SLC_STATUS1_MASKFULL; -+ eError = RGXPollReg64(hPrivate, -+ RGX_CR_SLC_STATUS1, -+ 0, -+ ui64SLCMask); -+ if (eError != PVRSRV_OK) return eError; -+ } -+ -+ if (4 == RGXGetDeviceSLCBanks(hPrivate)) -+ { -+ eError = RGXPollReg64(hPrivate, -+ RGX_CR_SLC_STATUS2, -+ 0, -+ RGX_CR_SLC_STATUS2_MASKFULL); -+ if (eError != PVRSRV_OK) return eError; -+ } -+ -+ if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)) -+ { -+#if !defined(SUPPORT_SHARED_SLC) -+ /* -+ * Wait for SLC to signal IDLE -+ * For LAYOUT_MARS = 1, SLC would have been powered down by FW -+ */ -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_SLC3_IDLE, -+ RGX_CR_SLC3_IDLE_MASKFULL, -+ RGX_CR_SLC3_IDLE_MASKFULL); -+ } -+ else -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_SLC_IDLE, -+ RGX_CR_SLC_IDLE_MASKFULL, -+ RGX_CR_SLC_IDLE_MASKFULL); -+ } -+#endif /* SUPPORT_SHARED_SLC */ -+ if (eError != PVRSRV_OK) return eError; -+ } -+ -+ /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper -+ * For LAYOUT_MARS = 1, SIDEKICK would have been powered down by FW -+ */ -+ if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)) -+ { -+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) -+ { -+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, FASTRENDER_DM)) -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_JONES_IDLE, -+ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN), -+ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN)); -+ } -+ } -+ else -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_SIDEKICK_IDLE, -+ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN), -+ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN)); -+ } -+ -+ if (eError != PVRSRV_OK) return eError; -+ } -+ -+ if (bMetaFW) -+ { -+ IMG_UINT32 ui32RegValue; -+ -+ eError = RGXReadMetaRegThroughSP(hPrivate, -+ META_CR_TxVECINT_BHALT, -+ &ui32RegValue); -+ if (eError != PVRSRV_OK) return eError; -+ -+ if ((ui32RegValue & 0xFFFFFFFFU) == 0x0) -+ { -+ /* Wait for Sidekick/Jones to signal IDLE including -+ * the Garten Wrapper if there is no debugger attached -+ * (TxVECINT_BHALT = 0x0) */ -+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_SIDEKICK_IDLE, -+ RGX_CR_SIDEKICK_IDLE_GARTEN_EN, -+ RGX_CR_SIDEKICK_IDLE_GARTEN_EN); -+ if (eError != PVRSRV_OK) return eError; -+ } -+ else -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_JONES_IDLE, -+ RGX_CR_JONES_IDLE_GARTEN_EN, -+ RGX_CR_JONES_IDLE_GARTEN_EN); -+ if (eError != PVRSRV_OK) return eError; -+ } -+ } -+ } -+ else -+ { -+ if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0) -+ { -+ /* As FW core has been moved from SIDEKICK to the new MARS domain, checking -+ * idle bits for CPU & System Arbiter excluding SOCIF which will never be Idle -+ * if Host polling on this register -+ */ -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_MARS_IDLE, -+ RGX_CR_MARS_IDLE_CPU_EN | RGX_CR_MARS_IDLE_MH_SYSARB0_EN, -+ RGX_CR_MARS_IDLE_CPU_EN | RGX_CR_MARS_IDLE_MH_SYSARB0_EN); -+ if (eError != PVRSRV_OK) return eError; -+ } -+ else -+ { -+ eError = RGXPollReg32(hPrivate, -+ RGX_CR_SIDEKICK_IDLE, -+ RGX_CR_SIDEKICK_IDLE_GARTEN_EN, -+ RGX_CR_SIDEKICK_IDLE_GARTEN_EN); -+ if (eError != PVRSRV_OK) return eError; -+ } -+ } -+ -+ return eError; -+} -+ -+ -+/* -+ * RGXInitSLC -+ */ -+#if defined(SUPPORT_SHARED_SLC) -+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ void *pvPowerParams; -+ -+ if (psDeviceNode == NULL) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ psDevInfo = psDeviceNode->pvDevice; -+ pvPowerParams = &psDevInfo->sLayerParams; -+ -+ /* reset the SLC */ -+ RGXCommentLog(pvPowerParams, "RGXInitSLC: soft reset SLC"); -+ RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_SLC_EN); -+ -+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ -+ (void) RGXReadReg64(pvPowerParams, RGX_CR_SOFT_RESET); -+ -+ /* Take everything out of reset */ -+ RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, 0x0); -+ -+ __RGXInitSLC(pvPowerParams); -+ -+ return PVRSRV_OK; -+} -+#endif -diff --git a/drivers/gpu/drm/img-rogue/rgxstartstop.h b/drivers/gpu/drm/img-rogue/rgxstartstop.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxstartstop.h -@@ -0,0 +1,84 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX start/stop header file -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX start/stop functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXSTARTSTOP_H) -+#define RGXSTARTSTOP_H -+ -+/* The routines declared here are built on top of an abstraction layer to -+ * hide DDK/OS-specific details in case they are used outside of the DDK -+ * (e.g. when DRM security is enabled). -+ * Any new dependency should be added to rgxlayer.h. -+ * Any new code should be built on top of the existing abstraction layer, -+ * which should be extended when necessary. -+ */ -+#include "rgxlayer.h" -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXStart -+ -+ @Description Perform GPU reset and initialisation -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXStart(const void *hPrivate); -+ -+/*! -+******************************************************************************* -+ -+ @Function RGXStop -+ -+ @Description Stop Rogue in preparation for power down -+ -+ @Input hPrivate : Implementation specific data -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXStop(const void *hPrivate); -+ -+#endif /* RGXSTARTSTOP_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxsyncutils.c b/drivers/gpu/drm/img-rogue/rgxsyncutils.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxsyncutils.c -@@ -0,0 +1,184 @@ -+/*************************************************************************/ /*! -+@File rgxsyncutils.c -+@Title RGX Sync Utilities -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX Sync helper functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include "rgxsyncutils.h" -+ -+#include "sync_server.h" -+#include "sync_internal.h" -+#include "sync.h" -+#include "allocmem.h" -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+#include "pvr_buffer_sync.h" -+#endif -+ -+#include "sync_checkpoint.h" -+#include "sync_checkpoint_internal.h" -+ -+//#define TA3D_CHECKPOINT_DEBUG -+ -+#if defined(TA3D_CHECKPOINT_DEBUG) -+#define CHKPT_DBG(X) PVR_DPF(X) -+static -+void _DebugSyncValues(IMG_UINT32 *pui32UpdateValues, -+ IMG_UINT32 ui32Count) -+{ -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues; -+ -+ for (iii = 0; iii < ui32Count; iii++) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+} -+#else -+#define CHKPT_DBG(X) -+#endif -+ -+ -+PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue, -+ SYNC_ADDR_LIST *psSyncList, -+ SYNC_ADDR_LIST *psPRSyncList, -+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync, -+ RGX_SYNC_DATA *psSyncData, -+ IMG_BOOL bKick3D) -+{ -+ IMG_UINT32 *pui32TimelineUpdateWOff = NULL; -+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; -+ -+ IMG_UINT32 ui32ClientUpdateValueCount = psSyncData->ui32ClientUpdateValueCount; -+ -+ /* Space for original client updates, and the one new update */ -+ size_t uiUpdateSize = sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateValueCount + 1); -+ -+ if (!bKick3D) -+ { -+ /* Additional space for one PR update, only the newest one */ -+ uiUpdateSize += sizeof(*pui32IntAllocatedUpdateValues) * 1; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: About to allocate memory to hold updates in pui32IntAllocatedUpdateValues(<%p>)", -+ __func__, -+ (void*)pui32IntAllocatedUpdateValues)); -+ -+ /* Allocate memory to hold the list of update values (including our timeline update) */ -+ pui32IntAllocatedUpdateValues = OSAllocMem(uiUpdateSize); -+ if (!pui32IntAllocatedUpdateValues) -+ { -+ /* Failed to allocate memory */ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xcc, uiUpdateSize); -+ pui32TimelineUpdateWOff = pui32IntAllocatedUpdateValues; -+ -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Copying %d %s update values into pui32IntAllocatedUpdateValues(<%p>)", -+ __func__, -+ ui32ClientUpdateValueCount, -+ bKick3D ? "TA/3D" : "TA/PR", -+ (void*)pui32IntAllocatedUpdateValues)); -+ /* Copy the update values into the new memory, then append our timeline update value */ -+ OSCachedMemCopy(pui32TimelineUpdateWOff, psSyncData->paui32ClientUpdateValue, ui32ClientUpdateValueCount * sizeof(*psSyncData->paui32ClientUpdateValue)); -+ -+#if defined(TA3D_CHECKPOINT_DEBUG) -+ _DebugSyncValues(pui32TimelineUpdateWOff, ui32ClientUpdateValueCount); -+#endif -+ -+ pui32TimelineUpdateWOff += ui32ClientUpdateValueCount; -+ } -+ -+ /* Now set the additional update value and append the timeline sync prim addr to either the -+ * render context 3D (or TA) update list -+ */ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Appending the additional update value (0x%x) to psRenderContext->sSyncAddrList%sUpdate...", -+ __func__, -+ ui32FenceTimelineUpdateValue, -+ bKick3D ? "TA/3D" : "TA/PR")); -+ -+ /* Append the TA/3D update */ -+ { -+ *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue; -+ psSyncData->ui32ClientUpdateValueCount++; -+ psSyncData->ui32ClientUpdateCount++; -+ SyncAddrListAppendSyncPrim(psSyncList, psFenceTimelineUpdateSync); -+ -+ if (!psSyncData->pauiClientUpdateUFOAddress) -+ { -+ psSyncData->pauiClientUpdateUFOAddress = psSyncList->pasFWAddrs; -+ } -+ /* Update paui32ClientUpdateValue to point to our new list of update values */ -+ psSyncData->paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; -+ -+#if defined(TA3D_CHECKPOINT_DEBUG) -+ _DebugSyncValues(pui32IntAllocatedUpdateValues, psSyncData->ui32ClientUpdateValueCount); -+#endif -+ } -+ -+ if (!bKick3D) -+ { -+ /* Use the sSyncAddrList3DUpdate for PR (as it doesn't have one of its own) */ -+ *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue; -+ psSyncData->ui32ClientPRUpdateValueCount = 1; -+ psSyncData->ui32ClientPRUpdateCount = 1; -+ SyncAddrListAppendSyncPrim(psPRSyncList, psFenceTimelineUpdateSync); -+ -+ if (!psSyncData->pauiClientPRUpdateUFOAddress) -+ { -+ psSyncData->pauiClientPRUpdateUFOAddress = psPRSyncList->pasFWAddrs; -+ } -+ /* Update paui32ClientPRUpdateValue to point to our new list of update values */ -+ psSyncData->paui32ClientPRUpdateValue = &pui32IntAllocatedUpdateValues[psSyncData->ui32ClientUpdateValueCount]; -+ -+#if defined(TA3D_CHECKPOINT_DEBUG) -+ _DebugSyncValues(psSyncData->paui32ClientPRUpdateValue, psSyncData->ui32ClientPRUpdateValueCount); -+#endif -+ } -+ -+ /* Do not free the old psSyncData->ui32ClientUpdateValueCount, -+ * as it was constant data passed through the bridge down to PVRSRVRGXKickTA3DKM() */ -+ -+ return PVRSRV_OK; -+} -diff --git a/drivers/gpu/drm/img-rogue/rgxsyncutils.h b/drivers/gpu/drm/img-rogue/rgxsyncutils.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxsyncutils.h -@@ -0,0 +1,76 @@ -+/*************************************************************************/ /*! -+@File rgxsyncutils.h -+@Title RGX Sync Utilities -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX Sync helper functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXSYNCUTILS_H -+#define RGXSYNCUTILS_H -+ -+#include "rgxdevice.h" -+#include "sync_server.h" -+#include "rgxdebug_common.h" -+#include "rgx_fwif_km.h" -+ -+typedef struct _RGX_SYNC_DATA_ -+{ -+ PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress; -+ IMG_UINT32 *paui32ClientUpdateValue; -+ IMG_UINT32 ui32ClientUpdateValueCount; -+ IMG_UINT32 ui32ClientUpdateCount; -+ -+ PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress; -+ IMG_UINT32 *paui32ClientPRUpdateValue; -+ IMG_UINT32 ui32ClientPRUpdateValueCount; -+ IMG_UINT32 ui32ClientPRUpdateCount; -+} RGX_SYNC_DATA; -+ -+PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue, -+ SYNC_ADDR_LIST *psSyncList, -+ SYNC_ADDR_LIST *psPRSyncList, -+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync, -+ RGX_SYNC_DATA *psSyncData, -+ IMG_BOOL bKick3D); -+ -+#endif /* RGXSYNCUTILS_H */ -+ -+/****************************************************************************** -+ End of file (rgxsyncutils.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxta3d.c b/drivers/gpu/drm/img-rogue/rgxta3d.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxta3d.c -@@ -0,0 +1,5603 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX TA/3D routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX TA/3D routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+/* for the offsetof macro */ -+#if defined(__linux__) -+#include -+#else -+#include -+#endif -+ -+#include "pdump_km.h" -+#include "pvr_debug.h" -+#include "rgxutils.h" -+#include "rgxfwutils.h" -+#include "rgxfwcmnctx.h" -+#include "rgxta3d.h" -+#include "rgxmem.h" -+#include "allocmem.h" -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "ri_server.h" -+#include "osfunc.h" -+#include "pvrsrv.h" -+#include "rgx_memallocflags.h" -+#include "rgxccb.h" -+#include "rgxhwperf.h" -+#include "ospvr_gputrace.h" -+#include "rgxsyncutils.h" -+#include "htbserver.h" -+ -+#include "rgxdefs_km.h" -+#include "rgx_fwif_km.h" -+#include "physmem.h" -+#include "sync_server.h" -+#include "sync_internal.h" -+#include "sync.h" -+#include "process_stats.h" -+ -+#include "rgxtimerquery.h" -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+#include "pvr_buffer_sync.h" -+#endif -+ -+#include "sync_checkpoint.h" -+#include "sync_checkpoint_internal.h" -+ -+#if defined(SUPPORT_PDVFS) -+#include "rgxpdvfs.h" -+#endif -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+#include "rgxworkest.h" -+ -+#define HASH_CLEAN_LIMIT 6 -+#endif -+ -+/* Enable this to dump the compiled list of UFOs prior to kick call */ -+#define ENABLE_TA3D_UFO_DUMP 0 -+ -+//#define TA3D_CHECKPOINT_DEBUG -+ -+#if defined(TA3D_CHECKPOINT_DEBUG) -+#define CHKPT_DBG(X) PVR_DPF(X) -+static INLINE -+void _DebugSyncValues(const IMG_CHAR *pszFunction, -+ const IMG_UINT32 *pui32UpdateValues, -+ const IMG_UINT32 ui32Count) -+{ -+ IMG_UINT32 i; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues; -+ -+ for (i = 0; i < ui32Count; i++) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", pszFunction, i, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+} -+ -+static INLINE -+void _DebugSyncCheckpoints(const IMG_CHAR *pszFunction, -+ const IMG_CHAR *pszDMName, -+ const PSYNC_CHECKPOINT *apsSyncCheckpoints, -+ const IMG_UINT32 ui32Count) -+{ -+ IMG_UINT32 i; -+ -+ for (i = 0; i < ui32Count; i++) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFence%sSyncCheckpoints[%d]=<%p>", pszFunction, pszDMName, i, *(apsSyncCheckpoints + i))); -+ } -+} -+ -+#else -+#define CHKPT_DBG(X) -+#endif -+ -+/* define the number of commands required to be set up by the CCB helper */ -+/* 1 command for the TA */ -+#define CCB_CMD_HELPER_NUM_TA_COMMANDS 1 -+/* Up to 3 commands for the 3D (partial render fence, partial render, and render) */ -+#define CCB_CMD_HELPER_NUM_3D_COMMANDS 3 -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+#define WORKEST_CYCLES_PREDICTION_GET(x) ((x).ui32CyclesPrediction) -+#else -+#define WORKEST_CYCLES_PREDICTION_GET(x) (NO_CYCEST) -+#endif -+ -+typedef struct { -+ DEVMEM_MEMDESC *psContextStateMemDesc; -+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; -+ IMG_INT32 i32Priority; -+} RGX_SERVER_RC_TA_DATA; -+ -+typedef struct { -+ DEVMEM_MEMDESC *psContextStateMemDesc; -+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; -+ IMG_INT32 i32Priority; -+} RGX_SERVER_RC_3D_DATA; -+ -+struct _RGX_SERVER_RENDER_CONTEXT_ { -+ /* this lock protects usage of the render context. -+ * it ensures only one kick is being prepared and/or submitted on -+ * this render context at any time -+ */ -+ POS_LOCK hLock; -+ RGX_CCB_CMD_HELPER_DATA asTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS]; -+ RGX_CCB_CMD_HELPER_DATA as3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS]; -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ DEVMEM_MEMDESC *psFWRenderContextMemDesc; -+ DEVMEM_MEMDESC *psFWFrameworkMemDesc; -+ RGX_SERVER_RC_TA_DATA sTAData; -+ RGX_SERVER_RC_3D_DATA s3DData; -+ IMG_UINT32 ui32CleanupStatus; -+#define RC_CLEANUP_TA_COMPLETE (1 << 0) -+#define RC_CLEANUP_3D_COMPLETE (1 << 1) -+ DLLIST_NODE sListNode; -+ SYNC_ADDR_LIST sSyncAddrListTAFence; -+ SYNC_ADDR_LIST sSyncAddrListTAUpdate; -+ SYNC_ADDR_LIST sSyncAddrList3DFence; -+ SYNC_ADDR_LIST sSyncAddrList3DUpdate; -+ ATOMIC_T hIntJobRef; -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ WORKEST_HOST_DATA sWorkEstData; -+#endif -+#if defined(SUPPORT_BUFFER_SYNC) -+ struct pvr_buffer_sync_context *psBufferSyncContext; -+#endif -+}; -+ -+ -+/* -+ Static functions used by render context code -+*/ -+ -+static -+PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData, -+ PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Check if the FW has finished with this resource ... */ -+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, -+ psTAData->psServerCommonContext, -+ RGXFWIF_DM_GEOM, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (RGXIsErrorAndDeviceRecoverable(psDeviceNode, &eError)) -+ { -+ return eError; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ /* ... it has so we can free its resources */ -+ FWCommonContextFree(psTAData->psServerCommonContext); -+ DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc); -+ psTAData->psServerCommonContext = NULL; -+ -+ return eError; -+} -+ -+static -+PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData, -+ PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Check if the FW has finished with this resource ... */ -+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, -+ ps3DData->psServerCommonContext, -+ RGXFWIF_DM_3D, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (RGXIsErrorAndDeviceRecoverable(psDeviceNode, &eError)) -+ { -+ return eError; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ /* ... it has so we can free its resources */ -+ FWCommonContextFree(ps3DData->psServerCommonContext); -+ DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc); -+ ps3DData->psServerCommonContext = NULL; -+ -+ return eError; -+} -+ -+static void _RGXDumpPMRPageList(DLLIST_NODE *psNode) -+{ -+ RGX_PMR_NODE *psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); -+ PVRSRV_ERROR eError; -+ -+ eError = PMRDumpPageList(psPMRNode->psPMR, -+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Error (%s) printing pmr %p", -+ PVRSRVGetErrorString(eError), -+ psPMRNode->psPMR)); -+ } -+} -+ -+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ -+ PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx, -+ psFreeList->sFreeListFWDevVAddr.ui32Addr, -+ psFreeList->ui32FreelistID, -+ psFreeList->ui64FreelistChecksum)); -+ -+ /* Dump Init FreeList page list */ -+ PVR_LOG((" Initial Memory block")); -+ dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext) -+ { -+ _RGXDumpPMRPageList(psNode); -+ } -+ -+ /* Dump Grow FreeList page list */ -+ PVR_LOG((" Grow Memory blocks")); -+ dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext) -+ { -+ _RGXDumpPMRPageList(psNode); -+ } -+ -+ return IMG_TRUE; -+} -+ -+static void _CheckFreelist(RGX_FREELIST *psFreeList, -+ IMG_UINT32 ui32NumOfPagesToCheck, -+ IMG_UINT64 ui64ExpectedCheckSum, -+ IMG_UINT64 *pui64CalculatedCheckSum) -+{ -+#if defined(NO_HARDWARE) -+ /* No checksum needed as we have all information in the pdumps */ -+ PVR_UNREFERENCED_PARAMETER(psFreeList); -+ PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck); -+ PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum); -+ *pui64CalculatedCheckSum = 0; -+#else -+ PVRSRV_ERROR eError; -+ size_t uiNumBytes; -+ IMG_UINT8* pui8Buffer; -+ IMG_UINT32* pui32Buffer; -+ IMG_UINT32 ui32CheckSumAdd = 0; -+ IMG_UINT32 ui32CheckSumXor = 0; -+ IMG_UINT32 ui32Entry; -+ IMG_UINT32 ui32Entry2; -+ IMG_BOOL bFreelistBad = IMG_FALSE; -+ -+ *pui64CalculatedCheckSum = 0; -+ -+ PVR_ASSERT(ui32NumOfPagesToCheck <= (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages)); -+ -+ /* Allocate Buffer of the size of the freelist */ -+ pui8Buffer = OSAllocMem(ui32NumOfPagesToCheck * sizeof(IMG_UINT32)); -+ if (pui8Buffer == NULL) -+ { -+ PVR_LOG(("%s: Failed to allocate buffer to check freelist %p!", -+ __func__, psFreeList)); -+ PVR_ASSERT(0); -+ return; -+ } -+ -+ /* Copy freelist content into Buffer */ -+ eError = PMR_ReadBytes(psFreeList->psFreeListPMR, -+ psFreeList->uiFreeListPMROffset + -+ (((psFreeList->ui32MaxFLPages - -+ psFreeList->ui32CurrentFLPages - psFreeList->ui32ReadyFLPages) * sizeof(IMG_UINT32)) & -+ ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)), -+ pui8Buffer, -+ ui32NumOfPagesToCheck * sizeof(IMG_UINT32), -+ &uiNumBytes); -+ if (eError != PVRSRV_OK) -+ { -+ OSFreeMem(pui8Buffer); -+ PVR_LOG(("%s: Failed to get freelist data for freelist %p!", -+ __func__, psFreeList)); -+ PVR_ASSERT(0); -+ return; -+ } -+ -+ PVR_ASSERT(uiNumBytes == ui32NumOfPagesToCheck * sizeof(IMG_UINT32)); -+ -+ /* Generate checksum (skipping the first page if not allocated) */ -+ pui32Buffer = (IMG_UINT32 *)pui8Buffer; -+ ui32Entry = ((psFreeList->ui32GrowFLPages == 0 && psFreeList->ui32CurrentFLPages > 1) ? 1 : 0); -+ for (/*ui32Entry*/ ; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++) -+ { -+ ui32CheckSumAdd += pui32Buffer[ui32Entry]; -+ ui32CheckSumXor ^= pui32Buffer[ui32Entry]; -+ -+ /* Check for double entries */ -+ for (ui32Entry2 = ui32Entry+1; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++) -+ { -+ if (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2]) -+ { -+ PVR_LOG(("%s: Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d", -+ __func__, -+ psFreeList->sFreeListFWDevVAddr.ui32Addr, -+ pui32Buffer[ui32Entry2], -+ ui32Entry, -+ ui32Entry2, -+ psFreeList->ui32CurrentFLPages)); -+ bFreelistBad = IMG_TRUE; -+ break; -+ } -+ } -+ } -+ -+ OSFreeMem(pui8Buffer); -+ -+ /* Check the calculated checksum against the expected checksum... */ -+ *pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd; -+ -+ if (ui64ExpectedCheckSum != 0 && ui64ExpectedCheckSum != *pui64CalculatedCheckSum) -+ { -+ PVR_LOG(("%s: Checksum mismatch for freelist %p! Expected 0x%016" IMG_UINT64_FMTSPECx " calculated 0x%016" IMG_UINT64_FMTSPECx, -+ __func__, psFreeList, -+ ui64ExpectedCheckSum, *pui64CalculatedCheckSum)); -+ bFreelistBad = IMG_TRUE; -+ } -+ -+ if (bFreelistBad) -+ { -+ PVR_LOG(("%s: Sleeping for ever!", __func__)); -+ PVR_ASSERT(!bFreelistBad); -+ } -+#endif -+} -+ -+ -+/* -+ * Function to work out the number of freelist pages to reserve for growing -+ * within the FW without having to wait for the host to progress a grow -+ * request. -+ * -+ * The number of pages must be a multiple of 4 to align the PM addresses -+ * for the initial freelist allocation and also be less than the grow size. -+ * -+ * If the threshold or grow size means less than 4 pages, then the feature -+ * is not used. -+ */ -+static IMG_UINT32 _CalculateFreelistReadyPages(RGX_FREELIST *psFreeList, -+ IMG_UINT32 ui32FLPages) -+{ -+ IMG_UINT32 ui32ReadyFLPages = ((ui32FLPages * psFreeList->ui32GrowThreshold) / 100) & -+ ~((RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE/sizeof(IMG_UINT32))-1); -+ -+ if (ui32ReadyFLPages > psFreeList->ui32GrowFLPages) -+ { -+ ui32ReadyFLPages = psFreeList->ui32GrowFLPages; -+ } -+ -+ return ui32ReadyFLPages; -+} -+ -+ -+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, -+ IMG_UINT32 ui32NumPages, -+ PDLLIST_NODE pListHeader) -+{ -+ RGX_PMR_NODE *psPMRNode; -+ IMG_DEVMEM_SIZE_T uiSize; -+ IMG_UINT32 ui32MappingTable = 0; -+ IMG_DEVMEM_OFFSET_T uiOffset; -+ IMG_DEVMEM_SIZE_T uiLength; -+ IMG_DEVMEM_SIZE_T uistartPage; -+ PVRSRV_ERROR eError; -+ static const IMG_CHAR szAllocName[] = "Free List"; -+ -+ /* Are we allowed to grow ? */ -+ if (psFreeList->ui32MaxFLPages - (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) < ui32NumPages) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "Freelist [0x%p]: grow by %u pages denied. " -+ "Max PB size reached (current pages %u+%u/%u)", -+ psFreeList, -+ ui32NumPages, -+ psFreeList->ui32CurrentFLPages, -+ psFreeList->ui32ReadyFLPages, -+ psFreeList->ui32MaxFLPages)); -+ return PVRSRV_ERROR_PBSIZE_ALREADY_MAX; -+ } -+ -+ /* Allocate kernel memory block structure */ -+ psPMRNode = OSAllocMem(sizeof(*psPMRNode)); -+ if (psPMRNode == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to allocate host data structure", -+ __func__)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ErrorAllocHost; -+ } -+ -+ /* -+ * Lock protects simultaneous manipulation of: -+ * - the memory block list -+ * - the freelist's ui32CurrentFLPages -+ */ -+ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); -+ -+ -+ /* -+ * The PM never takes the last page in a freelist, so if this block -+ * of pages is the first one and there is no ability to grow, then -+ * we can skip allocating one 4K page for the lowest entry. -+ */ -+ if (OSGetPageSize() > RGX_BIF_PM_PHYSICAL_PAGE_SIZE) -+ { -+ /* -+ * Allocation size will be rounded up to the OS page size, -+ * any attempt to change it a bit now will be invalidated later. -+ */ -+ psPMRNode->bFirstPageMissing = IMG_FALSE; -+ } -+ else -+ { -+ psPMRNode->bFirstPageMissing = (psFreeList->ui32GrowFLPages == 0 && ui32NumPages > 1); -+ } -+ -+ psPMRNode->ui32NumPages = ui32NumPages; -+ psPMRNode->psFreeList = psFreeList; -+ -+ /* Allocate Memory Block */ -+ PDUMPCOMMENT(psFreeList->psDevInfo->psDeviceNode, "Allocate PB Block (Pages %08X)", ui32NumPages); -+ uiSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE; -+ if (psPMRNode->bFirstPageMissing) -+ { -+ uiSize -= RGX_BIF_PM_PHYSICAL_PAGE_SIZE; -+ } -+ -+ eError = PhysmemNewRamBackedPMR(psFreeList->psConnection, -+ psFreeList->psDevInfo->psDeviceNode, -+ uiSize, -+ 1, -+ 1, -+ &ui32MappingTable, -+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(GPU_PRIVATE) | -+ PVRSRV_MEMALLOCFLAG_MANDATE_PHYSHEAP, -+ sizeof(szAllocName), -+ szAllocName, -+ psFreeList->ownerPid, -+ &psPMRNode->psPMR, -+ PDUMP_NONE, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate PB block of size: 0x%016" IMG_UINT64_FMTSPECX, -+ __func__, -+ (IMG_UINT64)uiSize)); -+ goto ErrorBlockAlloc; -+ } -+ -+ /* Zeroing physical pages pointed by the PMR */ -+ if (psFreeList->psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST) -+ { -+ eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to zero PMR %p of freelist %p (%s)", -+ __func__, -+ psPMRNode->psPMR, -+ psFreeList, -+ PVRSRVGetErrorString(eError))); -+ PVR_ASSERT(0); -+ } -+ } -+ -+ uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32); -+ uistartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages); -+ uiOffset = psFreeList->uiFreeListPMROffset + ((uistartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)); -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ -+ eError = RIWritePMREntryWithOwnerKM(psPMRNode->psPMR, -+ psFreeList->ownerPid); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: call to RIWritePMREntryWithOwnerKM failed (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ /* Attach RI information */ -+ eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR, -+ OSStringNLength(szAllocName, DEVMEM_ANNOTATION_MAX_LEN), -+ szAllocName, -+ 0, -+ uiSize, -+ IMG_FALSE, -+ IMG_FALSE, -+ &psPMRNode->hRIHandle); -+ PVR_LOG_IF_ERROR(eError, "RIWriteMEMDESCEntryKM"); -+ -+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ -+ -+ /* write Freelist with Memory Block physical addresses */ -+ eError = PMRWritePMPageList( -+ /* Target PMR, offset, and length */ -+ psFreeList->psFreeListPMR, -+ (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), -+ (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), -+ /* Referenced PMR, and "page" granularity */ -+ psPMRNode->psPMR, -+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, -+ &psPMRNode->psPageList); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to write pages of Node %p", -+ __func__, -+ psPMRNode)); -+ goto ErrorPopulateFreelist; -+ } -+ -+#if defined(SUPPORT_SHADOW_FREELISTS) -+ /* Copy freelist memory to shadow freelist */ -+ { -+ const IMG_UINT32 ui32FLMaxSize = psFreeList->ui32MaxFLPages * sizeof(IMG_UINT32); -+ const IMG_UINT32 ui32MapSize = ui32FLMaxSize * 2; -+ const IMG_UINT32 ui32CopyOffset = uiOffset - psFreeList->uiFreeListPMROffset; -+ IMG_BYTE *pFLMapAddr; -+ size_t uiNumBytes; -+ PVRSRV_ERROR res; -+ IMG_HANDLE hMapHandle; -+ IMG_DEVMEM_SIZE_T uiPMRSize; -+ -+ PMR_LogicalSize(psFreeList->psFreeListPMR, &uiPMRSize); -+ -+ /* Check for overflow. Validate size and offset. */ -+ PVR_GOTO_IF_INVALID_PARAM(psFreeList->uiFreeListPMROffset + ui32MapSize > psFreeList->uiFreeListPMROffset, eError, ErrorPopulateFreelist); -+ PVR_GOTO_IF_INVALID_PARAM(psFreeList->uiFreeListPMROffset + ui32MapSize <= uiPMRSize, eError, ErrorPopulateFreelist); -+ -+ /* Map both the FL and the shadow FL */ -+ res = PMRAcquireKernelMappingData(psFreeList->psFreeListPMR, psFreeList->uiFreeListPMROffset, ui32MapSize, -+ (void**) &pFLMapAddr, &uiNumBytes, &hMapHandle); -+ if (res != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map freelist (ID=%d)", -+ __func__, -+ psFreeList->ui32FreelistID)); -+ goto ErrorPopulateFreelist; -+ } -+ -+ /* Copy only the newly added memory */ -+ OSCachedMemCopy(pFLMapAddr + ui32FLMaxSize + ui32CopyOffset, pFLMapAddr + ui32CopyOffset , uiLength); -+ OSWriteMemoryBarrier(pFLMapAddr); -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psFreeList->psDevInfo->psDeviceNode, "Initialize shadow freelist"); -+ -+ /* Translate memcpy to pdump */ -+ { -+ IMG_DEVMEM_OFFSET_T uiCurrOffset; -+ -+ for (uiCurrOffset = uiOffset; (uiCurrOffset - uiOffset) < uiLength; uiCurrOffset += sizeof(IMG_UINT32)) -+ { -+ PMRPDumpCopyMem32(psFreeList->psFreeListPMR, -+ uiCurrOffset + ui32FLMaxSize, -+ psFreeList->psFreeListPMR, -+ uiCurrOffset, -+ ":SYSMEM:$1", -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ } -+#endif -+ -+ -+ res = PMRReleaseKernelMappingData(psFreeList->psFreeListPMR, hMapHandle); -+ -+ if (res != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to release freelist mapping (ID=%d)", -+ __func__, -+ psFreeList->ui32FreelistID)); -+ goto ErrorPopulateFreelist; -+ } -+ } -+#endif -+ -+ /* We add It must be added to the tail, otherwise the freelist population won't work */ -+ dllist_add_to_head(pListHeader, &psPMRNode->sMemoryBlock); -+ -+ /* Update number of available pages */ -+ psFreeList->ui32CurrentFLPages += ui32NumPages; -+ -+ /* Update statistics (needs to happen before the ReadyFL calculation to also count those pages) */ -+ if (psFreeList->ui32NumHighPages < psFreeList->ui32CurrentFLPages) -+ { -+ psFreeList->ui32NumHighPages = psFreeList->ui32CurrentFLPages; -+ } -+ -+ /* Reserve a number ready pages to allow the FW to process OOM quickly and asynchronously request a grow. */ -+ psFreeList->ui32ReadyFLPages = _CalculateFreelistReadyPages(psFreeList, psFreeList->ui32CurrentFLPages); -+ psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages; -+ -+ if (psFreeList->bCheckFreelist) -+ { -+ /* -+ * We can only calculate the freelist checksum when the list is full -+ * (e.g. at initial creation time). At other times the checksum cannot -+ * be calculated and has to be disabled for this freelist. -+ */ -+ if ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages) -+ { -+ _CheckFreelist(psFreeList, ui32NumPages, 0, &psFreeList->ui64FreelistChecksum); -+ } -+ else -+ { -+ psFreeList->ui64FreelistChecksum = 0; -+ } -+ } -+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: %s %u pages (pages=%u+%u/%u checksum=0x%016" IMG_UINT64_FMTSPECx "%s)", -+ psFreeList, -+ ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages ? "Create initial" : "Grow by"), -+ ui32NumPages, -+ psFreeList->ui32CurrentFLPages, -+ psFreeList->ui32ReadyFLPages, -+ psFreeList->ui32MaxFLPages, -+ psFreeList->ui64FreelistChecksum, -+ (psPMRNode->bFirstPageMissing ? " - lowest page not allocated" : ""))); -+ -+ return PVRSRV_OK; -+ -+ /* Error handling */ -+ErrorPopulateFreelist: -+ PMRUnrefPMR(psPMRNode->psPMR); -+ -+ErrorBlockAlloc: -+ OSFreeMem(psPMRNode); -+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList); -+ -+ErrorAllocHost: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+ -+} -+ -+static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader, -+ RGX_FREELIST *psFreeList) -+{ -+ DLLIST_NODE *psNode; -+ RGX_PMR_NODE *psPMRNode; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32OldValue; -+ -+ /* -+ * Lock protects simultaneous manipulation of: -+ * - the memory block list -+ * - the freelist's ui32CurrentFLPages value -+ */ -+ PVR_ASSERT(pListHeader); -+ PVR_ASSERT(psFreeList); -+ PVR_ASSERT(psFreeList->psDevInfo); -+ PVR_ASSERT(psFreeList->psDevInfo->hLockFreeList); -+ -+ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); -+ -+ /* Get node from head of list and remove it */ -+ psNode = dllist_get_next_node(pListHeader); -+ if (psNode) -+ { -+ dllist_remove_node(psNode); -+ -+ psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); -+ PVR_ASSERT(psPMRNode); -+ PVR_ASSERT(psPMRNode->psPMR); -+ PVR_ASSERT(psPMRNode->psFreeList); -+ -+ /* remove block from freelist list */ -+ -+ /* Unwrite Freelist with Memory Block physical addresses */ -+ eError = PMRUnwritePMPageList(psPMRNode->psPageList); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to unwrite pages of Node %p", -+ __func__, -+ psPMRNode)); -+ PVR_ASSERT(IMG_FALSE); -+ } -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ -+ if (psPMRNode->hRIHandle) -+ { -+ PVRSRV_ERROR eError; -+ -+ eError = RIDeleteMEMDESCEntryKM(psPMRNode->hRIHandle); -+ PVR_LOG_IF_ERROR(eError, "RIDeleteMEMDESCEntryKM"); -+ } -+ -+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ -+ -+ /* Free PMR (We should be the only one that holds a ref on the PMR) */ -+ eError = PMRUnrefPMR(psPMRNode->psPMR); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to free PB block %p (%s)", -+ __func__, -+ psPMRNode->psPMR, -+ PVRSRVGetErrorString(eError))); -+ PVR_ASSERT(IMG_FALSE); -+ } -+ -+ /* update available pages in freelist */ -+ ui32OldValue = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; -+ -+ /* -+ * Deallocated pages should first be deducted from ReadyPages bank, once -+ * there are no more left, start deducting them from CurrentPage bank. -+ */ -+ if (psPMRNode->ui32NumPages > psFreeList->ui32ReadyFLPages) -+ { -+ psFreeList->ui32CurrentFLPages -= psPMRNode->ui32NumPages - psFreeList->ui32ReadyFLPages; -+ psFreeList->ui32ReadyFLPages = 0; -+ } -+ else -+ { -+ psFreeList->ui32ReadyFLPages -= psPMRNode->ui32NumPages; -+ } -+ -+ /* check underflow */ -+ PVR_ASSERT(ui32OldValue > (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages)); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)", -+ psFreeList, -+ psPMRNode->ui32NumPages, -+ psFreeList->ui32CurrentFLPages, -+ psFreeList->ui32MaxFLPages)); -+ -+ OSFreeMem(psPMRNode); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)", -+ psFreeList, -+ psFreeList->ui32InitFLPages)); -+ eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN; -+ } -+ -+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList); -+ -+ return eError; -+} -+ -+static RGX_FREELIST *FindFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ RGX_FREELIST *psFreeList = NULL; -+ -+ OSLockAcquire(psDevInfo->hLockFreeList); -+ -+ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) -+ { -+ RGX_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); -+ -+ if (psThisFreeList->ui32FreelistID == ui32FreelistID) -+ { -+ psFreeList = psThisFreeList; -+ break; -+ } -+ } -+ -+ OSLockRelease(psDevInfo->hLockFreeList); -+ return psFreeList; -+} -+ -+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32FreelistID) -+{ -+ RGX_FREELIST *psFreeList = NULL; -+ RGXFWIF_KCCB_CMD s3DCCBCmd; -+ IMG_UINT32 ui32GrowValue; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psDevInfo); -+ -+ psFreeList = FindFreeList(psDevInfo, ui32FreelistID); -+ if (psFreeList == NULL) -+ { -+ /* Should never happen */ -+ PVR_DPF((PVR_DBG_ERROR, -+ "FreeList Lookup for FreeList ID 0x%08x failed (Populate)", -+ ui32FreelistID)); -+ PVR_ASSERT(IMG_FALSE); -+ return; -+ } -+ -+ /* Since the FW made the request, it has already consumed the ready pages, update the host struct */ -+ psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages; -+ psFreeList->ui32ReadyFLPages = 0; -+ -+ /* Try to grow the freelist */ -+ eError = RGXGrowFreeList(psFreeList, -+ psFreeList->ui32GrowFLPages, -+ &psFreeList->sMemoryBlockHead); -+ -+ if (eError == PVRSRV_OK) -+ { -+ /* Grow successful, return size of grow size */ -+ ui32GrowValue = psFreeList->ui32GrowFLPages; -+ -+ psFreeList->ui32NumGrowReqByFW++; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ /* Update Stats */ -+ PVRSRVStatsUpdateFreelistStats(psDevInfo->psDeviceNode, -+ 0, -+ 1, /* Add 1 to the appropriate counter (Requests by FW) */ -+ psFreeList->ui32InitFLPages, -+ psFreeList->ui32NumHighPages, -+ psFreeList->ownerPid); -+ -+#endif -+ -+ } -+ else -+ { -+ /* Grow failed */ -+ ui32GrowValue = 0; -+ PVR_DPF((PVR_DBG_ERROR, -+ "Grow for FreeList %p failed (%s)", -+ psFreeList, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ /* send feedback */ -+ s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE; -+ s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr; -+ s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue; -+ s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; -+ s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages; -+ -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psDevInfo, -+ RGXFWIF_DM_3D, -+ &s3DCCBCmd, -+ PDUMP_FLAGS_NONE); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ /* Kernel CCB should never fill up, as the FW is processing them right away */ -+ -+ PVR_ASSERT(eError == PVRSRV_OK); -+} -+ -+static void _RGXFreeListReconstruction(PDLLIST_NODE psNode) -+{ -+ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ RGX_FREELIST *psFreeList; -+ RGX_PMR_NODE *psPMRNode; -+ PVRSRV_ERROR eError; -+ IMG_DEVMEM_OFFSET_T uiOffset; -+ IMG_DEVMEM_SIZE_T uiLength; -+ IMG_UINT32 ui32StartPage; -+ -+ psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); -+ psFreeList = psPMRNode->psFreeList; -+ PVR_ASSERT(psFreeList); -+ psDevInfo = psFreeList->psDevInfo; -+ PVR_ASSERT(psDevInfo); -+ -+ uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32); -+ ui32StartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages); -+ uiOffset = psFreeList->uiFreeListPMROffset + ((ui32StartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)); -+ -+ PMRUnwritePMPageList(psPMRNode->psPageList); -+ psPMRNode->psPageList = NULL; -+ eError = PMRWritePMPageList( -+ /* Target PMR, offset, and length */ -+ psFreeList->psFreeListPMR, -+ (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), -+ (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), -+ /* Referenced PMR, and "page" granularity */ -+ psPMRNode->psPMR, -+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, -+ &psPMRNode->psPageList); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Error (%s) writing FL 0x%08x", -+ __func__, -+ PVRSRVGetErrorString(eError), -+ (IMG_UINT32)psFreeList->ui32FreelistID)); -+ } -+ -+ /* Zeroing physical pages pointed by the reconstructed freelist */ -+ if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST) -+ { -+ eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to zero PMR %p of freelist %p (%s)", -+ __func__, -+ psPMRNode->psPMR, -+ psFreeList, -+ PVRSRVGetErrorString(eError))); -+ PVR_ASSERT(0); -+ } -+ } -+ -+ psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages; -+} -+ -+ -+static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList) -+{ -+ IMG_UINT32 ui32OriginalFLPages; -+ DLLIST_NODE *psNode, *psNext; -+ RGXFWIF_FREELIST *psFWFreeList; -+ PVRSRV_ERROR eError; -+ -+ //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: Reconstructing freelist %p (ID=%u)", psFreeList, psFreeList->ui32FreelistID)); -+ -+ /* Do the FreeList Reconstruction */ -+ ui32OriginalFLPages = psFreeList->ui32CurrentFLPages; -+ psFreeList->ui32CurrentFLPages = 0; -+ -+ /* Reconstructing Init FreeList pages */ -+ dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext) -+ { -+ _RGXFreeListReconstruction(psNode); -+ } -+ -+ /* Reconstructing Grow FreeList pages */ -+ dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext) -+ { -+ _RGXFreeListReconstruction(psNode); -+ } -+ -+ /* Ready pages are allocated but kept hidden until OOM occurs. */ -+ psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages; -+ if (psFreeList->ui32CurrentFLPages != ui32OriginalFLPages) -+ { -+ PVR_ASSERT(psFreeList->ui32CurrentFLPages == ui32OriginalFLPages); -+ return PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED; -+ } -+ -+ /* Reset the firmware freelist structure */ -+ eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; -+ psFWFreeList->ui32AllocatedPageCount = 0; -+ psFWFreeList->ui32AllocatedMMUPageCount = 0; -+ RGXFwSharedMemCacheOpPtr(psFWFreeList, FLUSH); -+ -+ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); -+ -+ /* Check the Freelist checksum if required (as the list is fully populated) */ -+ if (psFreeList->bCheckFreelist) -+ { -+ IMG_UINT64 ui64CheckSum; -+ -+ _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum); -+ } -+ -+ return eError; -+} -+ -+ -+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32FreelistsCount, -+ const IMG_UINT32 *paui32Freelists) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ DLLIST_NODE *psNode, *psNext; -+ IMG_UINT32 ui32Loop; -+ RGXFWIF_KCCB_CMD sTACCBCmd; -+#if !defined(SUPPORT_SHADOW_FREELISTS) -+ DLLIST_NODE *psNodeHWRTData, *psNextHWRTData; -+ RGX_KM_HW_RT_DATASET *psKMHWRTDataSet; -+ RGXFWIF_HWRTDATA *psHWRTData; -+#endif -+ IMG_UINT32 ui32FinalFreelistsCount = 0; -+ IMG_UINT32 aui32FinalFreelists[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT * 2]; /* Worst-case is double what we are sent */ -+ -+ PVR_ASSERT(psDevInfo != NULL); -+ PVR_ASSERT(ui32FreelistsCount <= RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT); -+ if (ui32FreelistsCount > RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT) -+ { -+ ui32FreelistsCount = RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT; -+ } -+ -+ //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: %u freelist(s) requested for reconstruction", ui32FreelistsCount)); -+ -+ /* -+ * Initialise the response command (in case we don't find a freelist ID). -+ * Also copy the list to the 'final' freelist array. -+ */ -+ sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE; -+ sTACCBCmd.uCmdData.sFreeListsReconstructionData.ui32FreelistsCount = ui32FreelistsCount; -+ -+ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) -+ { -+ sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] | -+ RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG; -+ aui32FinalFreelists[ui32Loop] = paui32Freelists[ui32Loop]; -+ } -+ -+ ui32FinalFreelistsCount = ui32FreelistsCount; -+ -+ /* -+ * The list of freelists we have been given for reconstruction will -+ * consist of local and global freelists (maybe MMU as well). Any -+ * local freelists should have their global list specified as well. -+ * There may be cases where the global freelist is not given (in -+ * cases of partial setups before a poll failure for example). To -+ * handle that we must first ensure every local freelist has a global -+ * freelist specified, otherwise we add that to the 'final' list. -+ * This final list of freelists is created in a first pass. -+ * -+ * Even with the global freelists listed, there may be other local -+ * freelists not listed, which are going to have their global freelist -+ * reconstructed. Therefore we have to find those freelists as well -+ * meaning we will have to iterate the entire list of freelists to -+ * find which must be reconstructed. This is the second pass. -+ */ -+ OSLockAcquire(psDevInfo->hLockFreeList); -+ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) -+ { -+ RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); -+ IMG_BOOL bInList = IMG_FALSE; -+ IMG_BOOL bGlobalInList = IMG_FALSE; -+ -+ /* Check if this local freelist is in the list and ensure its global is too. */ -+ if (psFreeList->ui32FreelistGlobalID != 0) -+ { -+ for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) -+ { -+ if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID) -+ { -+ bInList = IMG_TRUE; -+ } -+ if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) -+ { -+ bGlobalInList = IMG_TRUE; -+ } -+ } -+ -+ if (bInList && !bGlobalInList) -+ { -+ aui32FinalFreelists[ui32FinalFreelistsCount] = psFreeList->ui32FreelistGlobalID; -+ ui32FinalFreelistsCount++; -+ } -+ } -+ } -+ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) -+ { -+ RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); -+ IMG_BOOL bReconstruct = IMG_FALSE; -+ -+ /* -+ * Check if this freelist needs to be reconstructed (was it requested -+ * or is its global freelist going to be reconstructed)... -+ */ -+ for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) -+ { -+ if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID || -+ aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) -+ { -+ bReconstruct = IMG_TRUE; -+ break; -+ } -+ } -+ -+ if (bReconstruct) -+ { -+ eError = RGXReconstructFreeList(psFreeList); -+ if (eError == PVRSRV_OK) -+ { -+#if !defined(SUPPORT_SHADOW_FREELISTS) -+ /* Mark all HWRTData's of reconstructing local freelists as HWR (applies to TA/3D's not finished yet) */ -+ dllist_foreach_node(&psFreeList->sNodeHWRTDataHead, psNodeHWRTData, psNextHWRTData) -+ { -+ psKMHWRTDataSet = IMG_CONTAINER_OF(psNodeHWRTData, RGX_KM_HW_RT_DATASET, sNodeHWRTData); -+ eError = DevmemAcquireCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc, (void **)&psHWRTData); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Devmem AcquireCpuVirtAddr Failed during Reconstructing of FreeList, FwMemDesc(%p),psHWRTData(%p)", -+ psKMHWRTDataSet->psHWRTDataFwMemDesc, -+ psHWRTData)); -+ continue; -+ } -+ -+ psHWRTData->eState = RGXFWIF_RTDATA_STATE_HWR; -+ psHWRTData->ui32HWRTDataFlags &= ~HWRTDATA_HAS_LAST_TA; -+ RGXFwSharedMemCacheOpValue(psHWRTData->eState, FLUSH); -+ RGXFwSharedMemCacheOpValue(psHWRTData->ui32HWRTDataFlags, FLUSH); -+ -+ DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); -+ } -+#endif -+ -+ /* Update the response for this freelist if it was specifically requested for reconstruction. */ -+ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) -+ { -+ if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID) -+ { -+ /* Reconstruction of this requested freelist was successful... */ -+ sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &= ~RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG; -+ break; -+ } -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Reconstructing of FreeList %p failed (%s)", -+ psFreeList, -+ PVRSRVGetErrorString(eError))); -+ } -+ } -+ } -+ OSLockRelease(psDevInfo->hLockFreeList); -+ -+ /* Check that all freelists were found and reconstructed... */ -+ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) -+ { -+ PVR_ASSERT((sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] & -+ RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG) == 0); -+ } -+ -+ /* send feedback */ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psDevInfo, -+ RGXFWIF_DM_GEOM, -+ &sTACCBCmd, -+ PDUMP_FLAGS_NONE); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ /* Kernel CCB should never fill up, as the FW is processing them right away */ -+ PVR_ASSERT(eError == PVRSRV_OK); -+} -+ -+/* Create a single HWRTData instance */ -+static PVRSRV_ERROR RGXCreateHWRTData_aux( -+ CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEV_VIRTADDR psVHeapTableDevVAddr, -+ IMG_DEV_VIRTADDR psPMMListDevVAddr, /* per-HWRTData */ -+ RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], -+ IMG_DEV_VIRTADDR sTailPtrsDevVAddr, -+ IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr, /* per-HWRTData */ -+ IMG_DEV_VIRTADDR sRgnHeaderDevVAddr, /* per-HWRTData */ -+ IMG_DEV_VIRTADDR sRTCDevVAddr, -+ IMG_UINT16 ui16MaxRTs, -+ RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie, -+ RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet) /* per-HWRTData */ -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ IMG_UINT32 ui32Loop; -+ -+ /* KM cookie storing all the FW/HW data */ -+ RGX_KM_HW_RT_DATASET *psKMHWRTDataSet; -+ -+ /* local pointers for memory descriptors of FW allocations */ -+ DEVMEM_MEMDESC *psHWRTDataFwMemDesc = NULL; -+ DEVMEM_MEMDESC *psRTArrayFwMemDesc = NULL; -+ DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc = NULL; -+ -+ /* local pointer for CPU-mapped [FW]HWRTData */ -+ RGXFWIF_HWRTDATA *psHWRTData = NULL; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ /* Prepare the HW RT DataSet struct */ -+ psKMHWRTDataSet = OSAllocZMem(sizeof(*psKMHWRTDataSet)); -+ if (psKMHWRTDataSet == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto AllocError; -+ } -+ -+ *ppsKMHWRTDataSet = psKMHWRTDataSet; -+ psKMHWRTDataSet->psDeviceNode = psDeviceNode; -+ -+ psKMHWRTDataSet->psHWRTDataCommonCookie = psHWRTDataCommonCookie; -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ -+ /* -+ * This FW RT-Data is only mapped into kernel for initialisation. -+ * Otherwise this allocation is only used by the FW. -+ * Therefore the GPU cache doesn't need coherency and write-combine will -+ * suffice on the CPU side. (WC buffer will be flushed at the first TA-kick) -+ */ -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(RGXFWIF_HWRTDATA), -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "FwHwRTData", -+ &psHWRTDataFwMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: DevmemAllocate for RGX_FWIF_HWRTDATA failed", -+ __func__)); -+ goto FWRTDataAllocateError; -+ } -+ -+ psKMHWRTDataSet->psHWRTDataFwMemDesc = psHWRTDataFwMemDesc; -+ eError = RGXSetFirmwareAddress( &psKMHWRTDataSet->sHWRTDataFwAddr, -+ psHWRTDataFwMemDesc, -+ 0, -+ RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", FWRTDataFwAddrError); -+ -+ eError = DevmemAcquireCpuVirtAddr(psHWRTDataFwMemDesc, -+ (void **)&psHWRTData); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError); -+ -+ psHWRTData->psVHeapTableDevVAddr = psVHeapTableDevVAddr; -+ -+ psHWRTData->sHWRTDataCommonFwAddr = psHWRTDataCommonCookie->sHWRTDataCommonFwAddr; -+ -+ psHWRTData->psPMMListDevVAddr = psPMMListDevVAddr; -+ -+ psHWRTData->sTailPtrsDevVAddr = sTailPtrsDevVAddr; -+ psHWRTData->sMacrotileArrayDevVAddr = sMacrotileArrayDevVAddr; -+ psHWRTData->sRgnHeaderDevVAddr = sRgnHeaderDevVAddr; -+ psHWRTData->sRTCDevVAddr = sRTCDevVAddr; -+ -+ OSLockAcquire(psDevInfo->hLockFreeList); -+ for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) -+ { -+ psKMHWRTDataSet->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop]; -+ psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount++; -+ psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psKMHWRTDataSet->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr; -+ /* invalid initial snapshot value, the snapshot is always taken during first kick -+ * and hence the value get replaced during the first kick anyway. So it's safe to set it 0. -+ */ -+ psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0; -+ } -+#if !defined(SUPPORT_SHADOW_FREELISTS) -+ dllist_add_to_tail(&apsFreeLists[RGXFW_LOCAL_FREELIST]->sNodeHWRTDataHead, &(psKMHWRTDataSet->sNodeHWRTData)); -+#endif -+ OSLockRelease(psDevInfo->hLockFreeList); -+ -+ { -+ RGXFWIF_RTA_CTL *psRTACtl = &psHWRTData->sRTACtl; -+ -+ psRTACtl->ui32RenderTargetIndex = 0; -+ psRTACtl->ui32ActiveRenderTargets = 0; -+ psRTACtl->sValidRenderTargets.ui32Addr = 0; -+ psRTACtl->sRTANumPartialRenders.ui32Addr = 0; -+ psRTACtl->ui32MaxRTs = (IMG_UINT32) ui16MaxRTs; -+ -+ if (ui16MaxRTs > 1) -+ { -+ PDUMPCOMMENT(psDeviceNode, "Allocate memory for shadow render target cache"); -+ eError = DevmemFwAllocate( psDevInfo, -+ ui16MaxRTs * sizeof(IMG_UINT32), -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), -+ "FwShadowRTCache", -+ &psRTArrayFwMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate %u bytes for render target array (%s)", -+ __func__, -+ ui16MaxRTs, PVRSRVGetErrorString(eError))); -+ goto FWAllocateRTArryError; -+ } -+ -+ psKMHWRTDataSet->psRTArrayFwMemDesc = psRTArrayFwMemDesc; -+ -+ eError = RGXSetFirmwareAddress( &psRTACtl->sValidRenderTargets, -+ psRTArrayFwMemDesc, -+ 0, -+ RFW_FWADDR_FLAG_NONE ); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", FWAllocateRTArryFwAddrError); -+ -+ PDUMPCOMMENT(psDeviceNode, "Allocate memory for tracking renders accumulation"); -+ eError = DevmemFwAllocate(psDevInfo, -+ ui16MaxRTs * sizeof(IMG_UINT32), -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), -+ "FwRendersAccumulation", -+ &psRendersAccArrayFwMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate %u bytes for render target array (%s) (renders accumulation)", -+ __func__, -+ ui16MaxRTs, PVRSRVGetErrorString(eError))); -+ goto FWAllocateRTAccArryError; -+ } -+ -+ psKMHWRTDataSet->psRendersAccArrayFwMemDesc = psRendersAccArrayFwMemDesc; -+ -+ eError = RGXSetFirmwareAddress( &psRTACtl->sRTANumPartialRenders, -+ psRendersAccArrayFwMemDesc, -+ 0, -+ RFW_FWADDR_FLAG_NONE ); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", FWAllocRTAccArryFwAddrError); -+ } -+ } -+ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDeviceNode, "Dump HWRTData 0x%08X", psKMHWRTDataSet->sHWRTDataFwAddr.ui32Addr); -+ DevmemPDumpLoadMem(psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+ RGXFwSharedMemCacheOpPtr(psHWRTData, FLUSH); -+ DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); -+ return PVRSRV_OK; -+ -+FWAllocRTAccArryFwAddrError: -+ DevmemFwUnmapAndFree(psDevInfo, psRendersAccArrayFwMemDesc); -+FWAllocateRTAccArryError: -+ RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRTArrayFwMemDesc); -+FWAllocateRTArryFwAddrError: -+ DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRTArrayFwMemDesc); -+FWAllocateRTArryError: -+ OSLockAcquire(psDevInfo->hLockFreeList); -+ for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) -+ { -+ PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0); -+ psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--; -+ } -+ OSLockRelease(psDevInfo->hLockFreeList); -+ DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); -+FWRTDataCpuMapError: -+ RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc); -+FWRTDataFwAddrError: -+ DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc); -+FWRTDataAllocateError: -+ *ppsKMHWRTDataSet = NULL; -+ OSFreeMem(psKMHWRTDataSet); -+ -+AllocError: -+ return eError; -+} -+ -+static void RGXDestroyHWRTData_aux(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ IMG_UINT32 ui32Loop; -+ -+ if (psKMHWRTDataSet == NULL) -+ { -+ return; -+ } -+ -+ psDevInfo = psKMHWRTDataSet->psDeviceNode->pvDevice; -+ -+ if (psKMHWRTDataSet->psRTArrayFwMemDesc) -+ { -+ RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRTArrayFwMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRTArrayFwMemDesc); -+ } -+ -+ if (psKMHWRTDataSet->psRendersAccArrayFwMemDesc) -+ { -+ RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRendersAccArrayFwMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRendersAccArrayFwMemDesc); -+ } -+ -+ /* Decrease freelist refcount */ -+ OSLockAcquire(psDevInfo->hLockFreeList); -+ for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) -+ { -+ PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0); -+ psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--; -+ } -+#if !defined(SUPPORT_SHADOW_FREELISTS) -+ dllist_remove_node(&psKMHWRTDataSet->sNodeHWRTData); -+#endif -+ OSLockRelease(psDevInfo->hLockFreeList); -+ -+ /* Freeing the memory has to happen _after_ removing the HWRTData from the freelist -+ * otherwise we risk traversing the freelist to find a pointer from a freed data structure */ -+ RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc); -+ DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc); -+ -+ OSFreeMem(psKMHWRTDataSet); -+} -+ -+/* Create set of HWRTData(s) and bind it with a shared FW HWRTDataCommon */ -+PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEV_VIRTADDR asVHeapTableDevVAddr[RGXMKIF_NUM_GEOMDATAS], -+ IMG_DEV_VIRTADDR asPMMListDevVAddr[RGXMKIF_NUM_RTDATAS], -+ RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], -+ IMG_UINT32 ui32ScreenPixelMax, -+ IMG_UINT64 ui64MultiSampleCtl, -+ IMG_UINT64 ui64FlippedMultiSampleCtl, -+ IMG_UINT32 ui32TPCStride, -+ IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], -+ IMG_UINT32 ui32TPCSize, -+ IMG_UINT32 ui32TEScreen, -+ IMG_UINT32 ui32TEAA, -+ IMG_UINT32 ui32TEMTILE1, -+ IMG_UINT32 ui32TEMTILE2, -+ IMG_UINT32 ui32MTileStride, -+ IMG_UINT32 ui32ISPMergeLowerX, -+ IMG_UINT32 ui32ISPMergeLowerY, -+ IMG_UINT32 ui32ISPMergeUpperX, -+ IMG_UINT32 ui32ISPMergeUpperY, -+ IMG_UINT32 ui32ISPMergeScaleX, -+ IMG_UINT32 ui32ISPMergeScaleY, -+ IMG_DEV_VIRTADDR asMacrotileArrayDevVAddr[RGXMKIF_NUM_RTDATAS], -+ IMG_DEV_VIRTADDR asRgnHeaderDevVAddr[RGXMKIF_NUM_RTDATAS], -+ IMG_DEV_VIRTADDR asRTCDevVAddr[RGXMKIF_NUM_GEOMDATAS], -+ IMG_UINT32 uiRgnHeaderSize, -+ IMG_UINT32 ui32ISPMtileSize, -+ IMG_UINT16 ui16MaxRTs, -+ RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32RTDataID; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; -+ RGXFWIF_HWRTDATA_COMMON *psHWRTDataCommon; -+ DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; -+ RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; -+ -+ /* Prepare KM cleanup object for HWRTDataCommon FW object */ -+ psHWRTDataCommonCookie = OSAllocZMem(sizeof(*psHWRTDataCommonCookie)); -+ if (psHWRTDataCommonCookie == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto err_HWRTDataCommonCookieAlloc; -+ } -+ -+ /* -+ * This FW common context is only mapped into kernel for initialisation. -+ * Otherwise this allocation is only used by the FW. -+ * Therefore the GPU cache doesn't need coherency, and write-combine will -+ * suffice on the CPU side (WC buffer will be flushed at the first TA-kick) -+ */ -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(RGXFWIF_HWRTDATA_COMMON), -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "FwHWRTDataCommon", -+ &psHWRTDataCommonFwMemDesc); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: DevmemAllocate for FwHWRTDataCommon failed", __func__)); -+ goto err_HWRTDataCommonAlloc; -+ } -+ eError = RGXSetFirmwareAddress(&sHWRTDataCommonFwAddr, psHWRTDataCommonFwMemDesc, 0, RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", err_HWRTDataCommonFwAddr); -+ -+ eError = DevmemAcquireCpuVirtAddr(psHWRTDataCommonFwMemDesc, (void **)&psHWRTDataCommon); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", err_HWRTDataCommonVA); -+ -+ psHWRTDataCommon->bTACachesNeedZeroing = IMG_FALSE; -+ psHWRTDataCommon->ui32ScreenPixelMax = ui32ScreenPixelMax; -+ psHWRTDataCommon->ui64MultiSampleCtl = ui64MultiSampleCtl; -+ psHWRTDataCommon->ui64FlippedMultiSampleCtl = ui64FlippedMultiSampleCtl; -+ psHWRTDataCommon->ui32TPCStride = ui32TPCStride; -+ psHWRTDataCommon->ui32TPCSize = ui32TPCSize; -+ psHWRTDataCommon->ui32TEScreen = ui32TEScreen; -+ psHWRTDataCommon->ui32TEAA = ui32TEAA; -+ psHWRTDataCommon->ui32TEMTILE1 = ui32TEMTILE1; -+ psHWRTDataCommon->ui32TEMTILE2 = ui32TEMTILE2; -+ psHWRTDataCommon->ui32MTileStride = ui32MTileStride; -+ psHWRTDataCommon->ui32ISPMergeLowerX = ui32ISPMergeLowerX; -+ psHWRTDataCommon->ui32ISPMergeLowerY = ui32ISPMergeLowerY; -+ psHWRTDataCommon->ui32ISPMergeUpperX = ui32ISPMergeUpperX; -+ psHWRTDataCommon->ui32ISPMergeUpperY = ui32ISPMergeUpperY; -+ psHWRTDataCommon->ui32ISPMergeScaleX = ui32ISPMergeScaleX; -+ psHWRTDataCommon->ui32ISPMergeScaleY = ui32ISPMergeScaleY; -+ psHWRTDataCommon->uiRgnHeaderSize = uiRgnHeaderSize; -+ psHWRTDataCommon->ui32ISPMtileSize = ui32ISPMtileSize; -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDeviceNode, "Dump HWRTDataCommon"); -+ DevmemPDumpLoadMem(psHWRTDataCommonFwMemDesc, 0, sizeof(*psHWRTDataCommon), PDUMP_FLAGS_CONTINUOUS); -+#endif -+ RGXFwSharedMemCacheOpPtr(psHWRTDataCommon, FLUSH); -+ DevmemReleaseCpuVirtAddr(psHWRTDataCommonFwMemDesc); -+ -+ psHWRTDataCommonCookie->ui32RefCount = 0; -+ psHWRTDataCommonCookie->psHWRTDataCommonFwMemDesc = psHWRTDataCommonFwMemDesc; -+ psHWRTDataCommonCookie->sHWRTDataCommonFwAddr = sHWRTDataCommonFwAddr; -+ -+ /* Here we are creating a set of HWRTData(s) -+ the number of elements in the set equals RGXMKIF_NUM_RTDATAS. -+ */ -+ -+ for (ui32RTDataID = 0; ui32RTDataID < RGXMKIF_NUM_RTDATAS; ui32RTDataID++) -+ { -+ eError = RGXCreateHWRTData_aux( -+ psConnection, -+ psDeviceNode, -+ asVHeapTableDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], -+ asPMMListDevVAddr[ui32RTDataID], -+ &apsFreeLists[(ui32RTDataID % RGXMKIF_NUM_GEOMDATAS) * RGXFW_MAX_FREELISTS], -+ asTailPtrsDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], -+ asMacrotileArrayDevVAddr[ui32RTDataID], -+ asRgnHeaderDevVAddr[ui32RTDataID], -+ asRTCDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], -+ ui16MaxRTs, -+ psHWRTDataCommonCookie, -+ &pasKMHWRTDataSet[ui32RTDataID]); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to create HWRTData [slot %u] (%s)", -+ __func__, -+ ui32RTDataID, -+ PVRSRVGetErrorString(eError))); -+ goto err_HWRTDataAlloc; -+ } -+ psHWRTDataCommonCookie->ui32RefCount += 1; -+ } -+ -+ return PVRSRV_OK; -+ -+err_HWRTDataAlloc: -+ PVR_DPF((PVR_DBG_WARNING, "%s: err_HWRTDataAlloc %u", -+ __func__, psHWRTDataCommonCookie->ui32RefCount)); -+ if (pasKMHWRTDataSet) -+ { -+ for (ui32RTDataID = psHWRTDataCommonCookie->ui32RefCount; ui32RTDataID > 0; ui32RTDataID--) -+ { -+ if (pasKMHWRTDataSet[ui32RTDataID-1] != NULL) -+ { -+ RGXDestroyHWRTData_aux(pasKMHWRTDataSet[ui32RTDataID-1]); -+ pasKMHWRTDataSet[ui32RTDataID-1] = NULL; -+ } -+ } -+ } -+err_HWRTDataCommonVA: -+ RGXUnsetFirmwareAddress(psHWRTDataCommonFwMemDesc); -+err_HWRTDataCommonFwAddr: -+ DevmemFwUnmapAndFree(psDevInfo, psHWRTDataCommonFwMemDesc); -+err_HWRTDataCommonAlloc: -+ OSFreeMem(psHWRTDataCommonCookie); -+err_HWRTDataCommonCookieAlloc: -+ -+ return eError; -+} -+ -+/* Destroy a single instance of HWRTData. -+ Additionally, destroy the HWRTDataCommon{Cookie} objects -+ when it is the last HWRTData within a corresponding set of HWRTDatas. -+*/ -+PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ PVRSRV_ERROR eError; -+ PRGXFWIF_HWRTDATA psHWRTData; -+ RGX_HWRTDATA_COMMON_COOKIE *psCommonCookie; -+ -+ PVR_ASSERT(psKMHWRTDataSet); -+ -+ psDevNode = psKMHWRTDataSet->psDeviceNode; -+ -+ eError = RGXSetFirmwareAddress(&psHWRTData, -+ psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, -+ RFW_FWADDR_NOREF_FLAG); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ /* Cleanup HWRTData */ -+ eError = RGXFWRequestHWRTDataCleanUp(psDevNode, psHWRTData); -+ if (RGXIsErrorAndDeviceRecoverable(psDevNode, &eError)) -+ { -+ return eError; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from RGXFWRequestHWRTDataCleanUp (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ psCommonCookie = psKMHWRTDataSet->psHWRTDataCommonCookie; -+ -+ RGXDestroyHWRTData_aux(psKMHWRTDataSet); -+ -+ /* We've got past potential PVRSRV_ERROR_RETRY events, so we are sure -+ that the HWRTDATA instance will be destroyed during this call. -+ Consequently, we decrease the ref count for HWRTDataCommonCookie. -+ -+ NOTE: This ref count does not require locks or atomics. -+ ------------------------------------------------------- -+ HWRTDatas bound into one pair are always destroyed sequentially, -+ within a single loop on the Client side. -+ The Common/Cookie objects always belong to only one pair of -+ HWRTDatas, and ref count is used to ensure that the Common/Cookie -+ objects will be destroyed after destruction of all HWRTDatas -+ within a single pair. -+ */ -+ psCommonCookie->ui32RefCount--; -+ -+ /* When ref count for HWRTDataCommonCookie hits ZERO -+ * we have to destroy the HWRTDataCommon [FW object] and the cookie -+ * [KM object] afterwards. */ -+ if (psCommonCookie->ui32RefCount == 0) -+ { -+ RGXUnsetFirmwareAddress(psCommonCookie->psHWRTDataCommonFwMemDesc); -+ -+ /* We don't need to flush the SLC before freeing. -+ * FW RequestCleanUp has already done that for HWRTData, so we're fine -+ * now. */ -+ -+ DevmemFwUnmapAndFree(psDevNode->pvDevice, -+ psCommonCookie->psHWRTDataCommonFwMemDesc); -+ OSFreeMem(psCommonCookie); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_HANDLE hMemCtxPrivData, -+ IMG_UINT32 ui32MaxFLPages, -+ IMG_UINT32 ui32InitFLPages, -+ IMG_UINT32 ui32GrowFLPages, -+ IMG_UINT32 ui32GrowParamThreshold, -+ RGX_FREELIST *psGlobalFreeList, -+ IMG_BOOL bCheckFreelist, -+ IMG_DEV_VIRTADDR sFreeListDevVAddr, -+ PMR *psFreeListPMR, -+ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, -+ RGX_FREELIST **ppsFreeList) -+{ -+ PVRSRV_ERROR eError; -+ RGXFWIF_FREELIST *psFWFreeList; -+ DEVMEM_MEMDESC *psFWFreelistMemDesc; -+ RGX_FREELIST *psFreeList; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ if (OSGetPageShift() > RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) -+ { -+ IMG_UINT32 ui32Size, ui32NewInitFLPages, ui32NewMaxFLPages, ui32NewGrowFLPages; -+ -+ /* Round up number of FL pages to the next multiple of the OS page size */ -+ -+ ui32Size = ui32InitFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; -+ ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); -+ ui32NewInitFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; -+ -+ ui32Size = ui32GrowFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; -+ ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); -+ ui32NewGrowFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; -+ -+ ui32Size = ui32MaxFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; -+ ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); -+ ui32NewMaxFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u", -+ __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages)); -+ -+ ui32InitFLPages = ui32NewInitFLPages; -+ ui32GrowFLPages = ui32NewGrowFLPages; -+ ui32MaxFLPages = ui32NewMaxFLPages; -+ } -+ -+ /* Allocate kernel freelist struct */ -+ psFreeList = OSAllocZMem(sizeof(*psFreeList)); -+ if (psFreeList == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to allocate host data structure", -+ __func__)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ErrorAllocHost; -+ } -+ -+ /* -+ * This FW FreeList context is only mapped into kernel for initialisation -+ * and reconstruction (at other times it is not mapped and only used by the -+ * FW). -+ * Therefore the GPU cache doesn't need coherency, and write-combine will -+ * suffice on the CPU side (WC buffer will be flushed at the first TA-kick) -+ */ -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(*psFWFreeList), -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), -+ "FwFreeList", -+ &psFWFreelistMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: DevmemAllocate for RGXFWIF_FREELIST failed", -+ __func__)); -+ goto FWFreeListAlloc; -+ } -+ -+ /* Initialise host data structures */ -+ psFreeList->psDevInfo = psDevInfo; -+ psFreeList->psConnection = psConnection; -+ -+ psFreeList->psFreeListPMR = psFreeListPMR; -+ /* Ref the PMR to prevent resource being destroyed before use */ -+ PMRRefPMR(psFreeList->psFreeListPMR); -+ -+ psFreeList->uiFreeListPMROffset = uiFreeListPMROffset; -+ psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc; -+ eError = RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); -+ -+ /* psFreeList->ui32FreelistID set below with lock... */ -+ psFreeList->ui32FreelistGlobalID = (psGlobalFreeList ? psGlobalFreeList->ui32FreelistID : 0); -+ psFreeList->ui32MaxFLPages = ui32MaxFLPages; -+ psFreeList->ui32InitFLPages = ui32InitFLPages; -+ psFreeList->ui32GrowFLPages = ui32GrowFLPages; -+ psFreeList->ui32CurrentFLPages = 0; -+ psFreeList->ui32ReadyFLPages = 0; -+ psFreeList->ui32GrowThreshold = ui32GrowParamThreshold; -+ psFreeList->ui64FreelistChecksum = 0; -+ psFreeList->ui32RefCount = 0; -+ psFreeList->bCheckFreelist = bCheckFreelist; -+ dllist_init(&psFreeList->sMemoryBlockHead); -+ dllist_init(&psFreeList->sMemoryBlockInitHead); -+#if !defined(SUPPORT_SHADOW_FREELISTS) -+ dllist_init(&psFreeList->sNodeHWRTDataHead); -+#endif -+ psFreeList->ownerPid = OSGetCurrentClientProcessIDKM(); -+ -+ -+ /* Add to list of freelists */ -+ OSLockAcquire(psDevInfo->hLockFreeList); -+ psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++; -+ dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode); -+ OSLockRelease(psDevInfo->hLockFreeList); -+ -+ -+ /* Initialise FW data structure */ -+ eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); -+ PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap); -+ -+ { -+ const IMG_UINT32 ui32ReadyPages = _CalculateFreelistReadyPages(psFreeList, ui32InitFLPages); -+ -+ psFWFreeList->ui32MaxPages = ui32MaxFLPages; -+ psFWFreeList->ui32CurrentPages = ui32InitFLPages - ui32ReadyPages; -+ psFWFreeList->ui32GrowPages = ui32GrowFLPages; -+ psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; -+ psFWFreeList->psFreeListDevVAddr = sFreeListDevVAddr; -+ psFWFreeList->ui64CurrentDevVAddr = (sFreeListDevVAddr.uiAddr + -+ ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) & -+ ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); -+ psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID; -+ psFWFreeList->bGrowPending = IMG_FALSE; -+ psFWFreeList->ui32ReadyPages = ui32ReadyPages; -+ RGXFwSharedMemCacheOpPtr(psFWFreeList, FLUSH); -+ -+#if defined(SUPPORT_SHADOW_FREELISTS) -+ /* Get the FW Memory Context address... */ -+ eError = RGXSetFirmwareAddress(&psFWFreeList->psFWMemContext, -+ RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData), -+ 0, RFW_FWADDR_NOREF_FLAG); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: RGXSetFirmwareAddress for RGXFWIF_FWMEMCONTEXT failed", -+ __func__)); -+ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); -+ goto FWFreeListCpuMap; -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(hMemCtxPrivData); -+#endif -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "Freelist %p created: Max pages 0x%08x, Init pages 0x%08x, " -+ "Max FL base address 0x%016" IMG_UINT64_FMTSPECx ", " -+ "Init FL base address 0x%016" IMG_UINT64_FMTSPECx, -+ psFreeList, -+ ui32MaxFLPages, -+ ui32InitFLPages, -+ sFreeListDevVAddr.uiAddr, -+ psFWFreeList->ui64CurrentDevVAddr)); -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDeviceNode, "Dump FW FreeList"); -+ DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS); -+ -+ /* -+ * Separate dump of the Freelist's number of Pages and stack pointer. -+ * This allows to easily modify the PB size in the out2.txt files. -+ */ -+ PDUMPCOMMENT(psDeviceNode, "FreeList TotalPages"); -+ DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, -+ offsetof(RGXFWIF_FREELIST, ui32CurrentPages), -+ psFWFreeList->ui32CurrentPages, -+ PDUMP_FLAGS_CONTINUOUS); -+ PDUMPCOMMENT(psDeviceNode, "FreeList StackPointer"); -+ DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, -+ offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop), -+ psFWFreeList->ui32CurrentStackTop, -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); -+ -+ /* Add initial PB block */ -+ eError = RGXGrowFreeList(psFreeList, -+ ui32InitFLPages, -+ &psFreeList->sMemoryBlockInitHead); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (%s)", -+ __func__, -+ sFreeListDevVAddr.uiAddr, -+ PVRSRVGetErrorString(eError))); -+ goto FWFreeListCpuMap; -+ } -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ /* Update Stats */ -+ PVRSRVStatsUpdateFreelistStats(psDeviceNode, -+ 1, /* Add 1 to the appropriate counter (Requests by App)*/ -+ 0, -+ psFreeList->ui32InitFLPages, -+ psFreeList->ui32NumHighPages, -+ psFreeList->ownerPid); -+ -+#endif -+ -+ /* return values */ -+ *ppsFreeList = psFreeList; -+ -+ return PVRSRV_OK; -+ -+ /* Error handling */ -+ -+FWFreeListCpuMap: -+ /* Remove freelists from list */ -+ OSLockAcquire(psDevInfo->hLockFreeList); -+ dllist_remove_node(&psFreeList->sNode); -+ OSLockRelease(psDevInfo->hLockFreeList); -+ RGXUnsetFirmwareAddress(psFWFreelistMemDesc); -+ -+ErrorSetFwAddr: -+ DevmemFwUnmapAndFree(psDevInfo, psFWFreelistMemDesc); -+ PMRUnrefPMR(psFreeList->psFreeListPMR); -+ -+FWFreeListAlloc: -+ OSFreeMem(psFreeList); -+ -+ErrorAllocHost: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+ -+/* -+ RGXDestroyFreeList -+*/ -+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 ui32RefCount; -+ -+ PVR_ASSERT(psFreeList); -+ -+ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); -+ ui32RefCount = psFreeList->ui32RefCount; -+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList); -+ -+ if (ui32RefCount != 0) -+ { -+ /* Freelist still busy */ -+ return PVRSRV_ERROR_RETRY; -+ } -+ -+ /* Freelist is not in use => start firmware cleanup */ -+ eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo, -+ psFreeList->sFreeListFWDevVAddr); -+ if (RGXIsErrorAndDeviceRecoverable(psFreeList->psDevInfo->psDeviceNode, &eError)) -+ { -+ return eError; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ /* Can happen if the firmware took too long to handle the cleanup request, -+ * or if SLC-flushes didn't went through (due to some GPU lockup) */ -+ PVR_LOG(("%s: Unexpected error from RGXFWRequestFreeListCleanUp (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ /* Remove FreeList from linked list before we destroy it... */ -+ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); -+ dllist_remove_node(&psFreeList->sNode); -+#if !defined(SUPPORT_SHADOW_FREELISTS) -+ /* Confirm all HWRTData nodes are freed before releasing freelist */ -+ PVR_ASSERT(dllist_is_empty(&psFreeList->sNodeHWRTDataHead)); -+#endif -+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList); -+ -+ if (psFreeList->bCheckFreelist) -+ { -+ RGXFWIF_FREELIST *psFWFreeList; -+ IMG_UINT32 ui32CurrentStackTop; -+ IMG_UINT64 ui64CheckSum; -+ -+ /* Get the current stack pointer for this free list */ -+ DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); -+ RGXFwSharedMemCacheOpValue(psFWFreeList->ui32CurrentStackTop, INVALIDATE); -+ ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop; -+ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); -+ -+ if (ui32CurrentStackTop > psFreeList->ui32MaxFLPages) -+ { -+ PVR_LOG(("%s: FW freelist corrupted (%d)", -+ __func__, -+ ui32CurrentStackTop)); -+ } -+ else if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1) -+ { -+ /* Do consistency tests (as the list is fully populated) */ -+ _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum); -+ } -+ else -+ { -+ /* Check for duplicate pages, but don't check the checksum as the list is not fully populated */ -+ _CheckFreelist(psFreeList, ui32CurrentStackTop+1, 0, &ui64CheckSum); -+ } -+ } -+ -+ /* Destroy FW structures */ -+ RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc); -+ DevmemFwUnmapAndFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc); -+ -+ /* Remove grow shrink blocks */ -+ while (!dllist_is_empty(&psFreeList->sMemoryBlockHead)) -+ { -+ eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead, psFreeList); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ } -+ -+ /* Remove initial PB block */ -+ eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockInitHead, psFreeList); -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ /* consistency checks */ -+ PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead)); -+ PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0); -+ -+ /* Remove reference from the PMR resource */ -+ PMRUnrefPMR(psFreeList->psFreeListPMR); -+ -+ /* free Freelist */ -+ OSFreeMem(psFreeList); -+ -+ return eError; -+} -+ -+ -+/* -+ RGXCreateZSBuffer -+*/ -+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_RESERVATION *psReservation, -+ PMR *psPMR, -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, -+ RGX_ZSBUFFER_DATA **ppsZSBuffer) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_PRBUFFER *psFWZSBuffer; -+ RGX_ZSBUFFER_DATA *psZSBuffer; -+ DEVMEM_MEMDESC *psFWZSBufferMemDesc; -+ IMG_BOOL bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE; -+ -+ /* Allocate host data structure */ -+ psZSBuffer = OSAllocZMem(sizeof(*psZSBuffer)); -+ if (psZSBuffer == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate cleanup data structure for ZS-Buffer", -+ __func__)); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ErrorAllocCleanup; -+ } -+ -+ /* Populate Host data */ -+ psZSBuffer->psDevInfo = psDevInfo; -+ psZSBuffer->psReservation = psReservation; -+ -+ /* Obtain reference to reservation object */ -+ if (!DevmemIntReservationAcquire(psZSBuffer->psReservation)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to acquire reservation for ZS-Buffer", -+ __func__)); -+ eError = PVRSRV_ERROR_REFCOUNT_OVERFLOW; -+ goto ErrorReservationAcquire; -+ } -+ -+ psZSBuffer->psPMR = psPMR; -+ /* Obtain reference to PMR */ -+ PMRRefPMR(psZSBuffer->psPMR); -+ -+ psZSBuffer->uiMapFlags = uiMapFlags; -+ psZSBuffer->ui32RefCount = 0; -+ psZSBuffer->bOnDemand = bOnDemand; -+ if (bOnDemand) -+ { -+ /* psZSBuffer->ui32ZSBufferID set below with lock... */ -+ psZSBuffer->psMapping = NULL; -+ -+ OSLockAcquire(psDevInfo->hLockZSBuffer); -+ psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++; -+ dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode); -+ OSLockRelease(psDevInfo->hLockZSBuffer); -+ } -+ -+ /* Allocate firmware memory for ZS-Buffer. */ -+ PDUMPCOMMENT(psDeviceNode, "Allocate firmware ZS-Buffer data structure"); -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(*psFWZSBuffer), -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | -+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | -+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), -+ "FwZSBuffer", -+ &psFWZSBufferMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate firmware ZS-Buffer (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto ErrorAllocFWZSBuffer; -+ } -+ psZSBuffer->psFWZSBufferMemDesc = psFWZSBufferMemDesc; -+ -+ /* Temporarily map the firmware render context to the kernel. */ -+ eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc, -+ (void **)&psFWZSBuffer); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map firmware ZS-Buffer (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto ErrorAcquireFWZSBuffer; -+ } -+ -+ /* Populate FW ZS-Buffer data structure */ -+ psFWZSBuffer->bOnDemand = bOnDemand; -+ psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED; -+ psFWZSBuffer->ui32BufferID = psZSBuffer->ui32ZSBufferID; -+ -+ /* Get firmware address of ZS-Buffer. */ -+ eError = RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); -+ -+ /* Dump the ZS-Buffer and the memory content */ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDeviceNode, "Dump firmware ZS-Buffer"); -+ DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS); -+#endif -+ RGXFwSharedMemCacheOpPtr(psFWZSBuffer, FLUSH); -+ -+ /* Release address acquired above. */ -+ DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc); -+ -+ -+ /* define return value */ -+ *ppsZSBuffer = psZSBuffer; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)", -+ psZSBuffer, -+ (bOnDemand) ? "On-Demand": "Up-front")); -+ -+ psZSBuffer->owner=OSGetCurrentClientProcessIDKM(); -+ -+ return PVRSRV_OK; -+ -+ /* error handling */ -+ -+ErrorSetFwAddr: -+ DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc); -+ErrorAcquireFWZSBuffer: -+ DevmemFwUnmapAndFree(psDevInfo, psFWZSBufferMemDesc); -+ -+ErrorAllocFWZSBuffer: -+ PMRUnrefPMR(psZSBuffer->psPMR); -+ DevmemIntReservationRelease(psZSBuffer->psReservation); -+ErrorReservationAcquire: -+ OSFreeMem(psZSBuffer); -+ -+ErrorAllocCleanup: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+ -+/* -+ RGXDestroyZSBuffer -+*/ -+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer) -+{ -+ POS_LOCK hLockZSBuffer; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psZSBuffer); -+ hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; -+ -+ if (psZSBuffer->ui32RefCount != 0) -+ { -+ PVR_ASSERT(IMG_FALSE); -+ /* ZS-Buffer is still referenced (by population object) */ -+ return PVRSRV_ERROR_RETRY; -+ } -+ -+ /* Request ZS Buffer cleanup */ -+ eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo, -+ psZSBuffer->sZSBufferFWDevVAddr); -+ if (RGXIsErrorAndDeviceRecoverable(psZSBuffer->psDevInfo->psDeviceNode, &eError)) -+ { -+ return eError; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from RGXFWRequestZSBufferCleanUp (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ /* Free the firmware render context. */ -+ RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc); -+ DevmemFwUnmapAndFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc); -+ -+ /* Remove Deferred Allocation from list */ -+ if (psZSBuffer->bOnDemand) -+ { -+ OSLockAcquire(hLockZSBuffer); -+ PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode)); -+ dllist_remove_node(&psZSBuffer->sNode); -+ OSLockRelease(hLockZSBuffer); -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] destroyed", psZSBuffer)); -+ -+ /* Release reference to reservation object and the PMR */ -+ PMRUnrefPMR(psZSBuffer->psPMR); -+ DevmemIntReservationRelease(psZSBuffer->psReservation); -+ -+ /* Free ZS-Buffer host data structure */ -+ OSFreeMem(psZSBuffer); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) -+{ -+ POS_LOCK hLockZSBuffer; -+ PVRSRV_ERROR eError; -+ -+ if (!psZSBuffer) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (!psZSBuffer->bOnDemand) -+ { -+ /* Only deferred allocations can be populated */ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "ZS Buffer [%p, ID=0x%08x]: Physical backing requested", -+ psZSBuffer, -+ psZSBuffer->ui32ZSBufferID)); -+ hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; -+ -+ OSLockAcquire(hLockZSBuffer); -+ -+ if (psZSBuffer->ui32RefCount == 0) -+ { -+ IMG_HANDLE hDevmemHeap; -+ -+ PVR_ASSERT(psZSBuffer->psMapping == NULL); -+ -+ /* Get Heap */ -+ eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap); -+ PVR_ASSERT(psZSBuffer->psMapping == NULL); -+ if (unlikely(hDevmemHeap == (IMG_HANDLE)NULL)) -+ { -+ OSLockRelease(hLockZSBuffer); -+ return PVRSRV_ERROR_INVALID_HEAP; -+ } -+ -+ eError = DevmemIntMapPMR(hDevmemHeap, -+ psZSBuffer->psReservation, -+ psZSBuffer->psPMR, -+ psZSBuffer->uiMapFlags, -+ &psZSBuffer->psMapping); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Unable populate ZS Buffer [%p, ID=0x%08x] (%s)", -+ psZSBuffer, -+ psZSBuffer->ui32ZSBufferID, -+ PVRSRVGetErrorString(eError))); -+ OSLockRelease(hLockZSBuffer); -+ return eError; -+ -+ } -+ PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired", -+ psZSBuffer, -+ psZSBuffer->ui32ZSBufferID)); -+ } -+ -+ /* Increase refcount*/ -+ psZSBuffer->ui32RefCount++; -+ -+ OSLockRelease(hLockZSBuffer); -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR -+RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, -+ RGX_POPULATION **ppsPopulation) -+{ -+ RGX_POPULATION *psPopulation; -+ PVRSRV_ERROR eError; -+ -+ psZSBuffer->ui32NumReqByApp++; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVRSRVStatsUpdateZSBufferStats(psZSBuffer->psDevInfo->psDeviceNode, -+ 1, 0, psZSBuffer->owner); -+#endif -+ -+ /* Do the backing */ -+ eError = RGXBackingZSBuffer(psZSBuffer); -+ if (eError != PVRSRV_OK) -+ { -+ goto OnErrorBacking; -+ } -+ -+ /* Create the handle to the backing */ -+ psPopulation = OSAllocMem(sizeof(*psPopulation)); -+ if (psPopulation == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto OnErrorAlloc; -+ } -+ -+ psPopulation->psZSBuffer = psZSBuffer; -+ -+ /* return value */ -+ *ppsPopulation = psPopulation; -+ -+ return PVRSRV_OK; -+ -+OnErrorAlloc: -+ RGXUnbackingZSBuffer(psZSBuffer); -+ -+OnErrorBacking: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR -+RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) -+{ -+ POS_LOCK hLockZSBuffer; -+ PVRSRV_ERROR eError; -+ -+ if (!psZSBuffer) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PVR_ASSERT(psZSBuffer->ui32RefCount); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested", -+ psZSBuffer, -+ psZSBuffer->ui32ZSBufferID)); -+ -+ hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; -+ -+ OSLockAcquire(hLockZSBuffer); -+ -+ if (psZSBuffer->bOnDemand) -+ { -+ if (psZSBuffer->ui32RefCount == 1) -+ { -+ PVR_ASSERT(psZSBuffer->psMapping); -+ -+ eError = DevmemIntUnmapPMR(psZSBuffer->psMapping); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Unable to unpopulate ZS Buffer [%p, ID=0x%08x] (%s)", -+ psZSBuffer, -+ psZSBuffer->ui32ZSBufferID, -+ PVRSRVGetErrorString(eError))); -+ OSLockRelease(hLockZSBuffer); -+ return eError; -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed", -+ psZSBuffer, -+ psZSBuffer->ui32ZSBufferID)); -+ } -+ } -+ -+ /* Decrease refcount*/ -+ psZSBuffer->ui32RefCount--; -+ -+ OSLockRelease(hLockZSBuffer); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (!psPopulation) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer); -+ if (eError != PVRSRV_OK) -+ { -+ return eError; -+ } -+ -+ OSFreeMem(psPopulation); -+ -+ return PVRSRV_OK; -+} -+ -+static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ RGX_ZSBUFFER_DATA *psZSBuffer = NULL; -+ -+ OSLockAcquire(psDevInfo->hLockZSBuffer); -+ -+ dllist_foreach_node(&psDevInfo->sZSBufferHead, psNode, psNext) -+ { -+ RGX_ZSBUFFER_DATA *psThisZSBuffer = IMG_CONTAINER_OF(psNode, RGX_ZSBUFFER_DATA, sNode); -+ -+ if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID) -+ { -+ psZSBuffer = psThisZSBuffer; -+ break; -+ } -+ } -+ -+ OSLockRelease(psDevInfo->hLockZSBuffer); -+ return psZSBuffer; -+} -+ -+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32ZSBufferID) -+{ -+ IMG_BOOL bBackingDone = IMG_TRUE; -+ RGX_ZSBUFFER_DATA *psZSBuffer; -+ RGXFWIF_KCCB_CMD sTACCBCmd; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psDevInfo); -+ -+ /* scan all deferred allocations */ -+ psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); -+ -+ if (psZSBuffer == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)", -+ ui32ZSBufferID)); -+ -+ return; -+ } -+ -+ /* Populate ZLS */ -+ eError = RGXBackingZSBuffer(psZSBuffer); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "Populating ZS-Buffer (ID = 0x%08x) failed (%s)", -+ ui32ZSBufferID, -+ PVRSRVGetErrorString(eError))); -+ bBackingDone = IMG_FALSE; -+ } -+ -+ /* send confirmation */ -+ sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE; -+ sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; -+ sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone; -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psDevInfo, -+ RGXFWIF_DM_GEOM, -+ &sTACCBCmd, -+ PDUMP_FLAGS_NONE); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ /* Kernel CCB should never fill up, as the FW is processing them right away */ -+ PVR_ASSERT(eError == PVRSRV_OK); -+ -+ psZSBuffer->ui32NumReqByFW++; -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ PVRSRVStatsUpdateZSBufferStats(psDevInfo->psDeviceNode, -+ 0, 1, psZSBuffer->owner); -+#endif -+} -+ -+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32ZSBufferID) -+{ -+ RGX_ZSBUFFER_DATA *psZSBuffer; -+ RGXFWIF_KCCB_CMD sTACCBCmd; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psDevInfo); -+ -+ /* scan all deferred allocations */ -+ psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); -+ -+ if (psZSBuffer == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)", -+ ui32ZSBufferID)); -+ -+ return; -+ } -+ -+ /* Unpopulate ZLS */ -+ eError = RGXUnbackingZSBuffer(psZSBuffer); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "UnPopulating ZS-Buffer (ID = 0x%08x) failed (%s)", -+ ui32ZSBufferID, -+ PVRSRVGetErrorString(eError))); -+ PVR_ASSERT(IMG_FALSE); -+ } -+ -+ /* send confirmation */ -+ sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE; -+ sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; -+ sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE; -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psDevInfo, -+ RGXFWIF_DM_GEOM, -+ &sTACCBCmd, -+ PDUMP_FLAGS_NONE); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ /* Kernel CCB should never fill up, as the FW is processing them right away */ -+ PVR_ASSERT(eError == PVRSRV_OK); -+} -+ -+static -+PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEM_MEMDESC *psAllocatedMemDesc, -+ IMG_UINT32 ui32AllocatedOffset, -+ DEVMEM_MEMDESC *psFWMemContextMemDesc, -+ IMG_DEV_VIRTADDR sVDMCallStackAddr, -+ IMG_UINT32 ui32CallStackDepth, -+ IMG_INT32 i32Priority, -+ IMG_UINT32 ui32MaxDeadlineMS, -+ IMG_UINT64 ui64RobustnessAddress, -+ RGX_COMMON_CONTEXT_INFO *psInfo, -+ RGX_SERVER_RC_TA_DATA *psTAData, -+ IMG_UINT32 ui32CCBAllocSizeLog2, -+ IMG_UINT32 ui32CCBMaxAllocSizeLog2, -+ IMG_UINT32 ui32ContextFlags) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_TACTX_STATE *psContextState; -+ IMG_UINT32 uiCoreIdx; -+ PVRSRV_ERROR eError; -+ -+ /* -+ Allocate device memory for the firmware GPU context suspend state. -+ Note: the FW reads/writes the state to memory by accessing the GPU register interface. -+ */ -+ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware TA context suspend state"); -+ -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(RGXFWIF_TACTX_STATE), -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "FwTAContextState", -+ &psTAData->psContextStateMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate firmware GPU context suspend state (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_tacontextsuspendalloc; -+ } -+ -+ eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc, -+ (void **)&psContextState); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map firmware render context state (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_suspendcpuvirtacquire; -+ } -+ -+ for (uiCoreIdx = 0; uiCoreIdx < RGX_NUM_GEOM_CORES; uiCoreIdx++) -+ { -+ psContextState->asGeomCore[uiCoreIdx].uTAReg_VDM_CALL_STACK_POINTER_Init = -+ sVDMCallStackAddr.uiAddr + (uiCoreIdx * ui32CallStackDepth * sizeof(IMG_UINT64)); -+ } -+ RGXFwSharedMemCacheOpPtr(psContextState->asGeomCore, FLUSH); -+ -+ DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc); -+ -+ eError = FWCommonContextAllocate(psConnection, -+ psDeviceNode, -+ REQ_TYPE_TA, -+ RGXFWIF_DM_GEOM, -+ NULL, -+ psAllocatedMemDesc, -+ ui32AllocatedOffset, -+ psFWMemContextMemDesc, -+ psTAData->psContextStateMemDesc, -+ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2, -+ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2, -+ ui32ContextFlags, -+ i32Priority, -+ ui32MaxDeadlineMS, -+ ui64RobustnessAddress, -+ psInfo, -+ &psTAData->psServerCommonContext); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to init TA fw common context (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_tacommoncontext; -+ } -+ -+ /* -+ * Dump the FW 3D context suspend state buffer -+ */ -+#if defined(PDUMP) -+ PDUMPCOMMENT(psDeviceNode, "Dump the TA context suspend state buffer"); -+ DevmemPDumpLoadMem(psTAData->psContextStateMemDesc, -+ 0, -+ sizeof(RGXFWIF_TACTX_STATE), -+ PDUMP_FLAGS_CONTINUOUS); -+#endif -+ -+ psTAData->i32Priority = i32Priority; -+ return PVRSRV_OK; -+ -+fail_tacommoncontext: -+fail_suspendcpuvirtacquire: -+ DevmemFwUnmapAndFree(psDevInfo, psTAData->psContextStateMemDesc); -+fail_tacontextsuspendalloc: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ return eError; -+} -+ -+static -+PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEM_MEMDESC *psAllocatedMemDesc, -+ IMG_UINT32 ui32AllocatedOffset, -+ DEVMEM_MEMDESC *psFWMemContextMemDesc, -+ IMG_INT32 i32Priority, -+ IMG_UINT32 ui32MaxDeadlineMS, -+ IMG_UINT64 ui64RobustnessAddress, -+ RGX_COMMON_CONTEXT_INFO *psInfo, -+ RGX_SERVER_RC_3D_DATA *ps3DData, -+ IMG_UINT32 ui32CCBAllocSizeLog2, -+ IMG_UINT32 ui32CCBMaxAllocSizeLog2, -+ IMG_UINT32 ui32ContextFlags) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError; -+ IMG_UINT uiNumISPStoreRegs; -+ IMG_UINT ui3DRegISPStateStoreSize = 0; -+ -+ /* -+ Allocate device memory for the firmware GPU context suspend state. -+ Note: the FW reads/writes the state to memory by accessing the GPU register interface. -+ */ -+ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware 3D context suspend state"); -+ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) -+ { -+ uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, -+ RGX_FEATURE_NUM_RASTER_PIPES_IDX); -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) -+ { -+ uiNumISPStoreRegs *= (1U + psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, -+ RGX_FEATURE_XPU_MAX_SLAVES_IDX)); -+ } -+ } -+ else -+ { -+ uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, -+ RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX); -+ } -+ -+ /* Size of the CS buffer */ -+ /* Calculate the size of the 3DCTX ISP state */ -+ ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) + -+ (uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0])); -+ -+ eError = DevmemFwAllocate(psDevInfo, -+ ui3DRegISPStateStoreSize, -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "Fw3DContextState", -+ &ps3DData->psContextStateMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate firmware GPU context suspend state (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_3dcontextsuspendalloc; -+ } -+ -+ eError = FWCommonContextAllocate(psConnection, -+ psDeviceNode, -+ REQ_TYPE_3D, -+ RGXFWIF_DM_3D, -+ NULL, -+ psAllocatedMemDesc, -+ ui32AllocatedOffset, -+ psFWMemContextMemDesc, -+ ps3DData->psContextStateMemDesc, -+ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2, -+ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2, -+ ui32ContextFlags, -+ i32Priority, -+ ui32MaxDeadlineMS, -+ ui64RobustnessAddress, -+ psInfo, -+ &ps3DData->psServerCommonContext); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to init 3D fw common context (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_3dcommoncontext; -+ } -+ -+ /* -+ * Dump the FW 3D context suspend state buffer -+ */ -+ PDUMPCOMMENT(psDeviceNode, "Dump the 3D context suspend state buffer"); -+ DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc, -+ 0, -+ sizeof(RGXFWIF_3DCTX_STATE), -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ ps3DData->i32Priority = i32Priority; -+ return PVRSRV_OK; -+ -+fail_3dcommoncontext: -+ DevmemFwUnmapAndFree(psDevInfo, ps3DData->psContextStateMemDesc); -+fail_3dcontextsuspendalloc: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ return eError; -+} -+ -+ -+/* -+ * PVRSRVRGXCreateRenderContextKM -+ */ -+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_INT32 i32Priority, -+ IMG_DEV_VIRTADDR sVDMCallStackAddr, -+ IMG_UINT32 ui32CallStackDepth, -+ IMG_UINT32 ui32FrameworkRegisterSize, -+ IMG_PBYTE pabyFrameworkRegisters, -+ IMG_HANDLE hMemCtxPrivData, -+ IMG_UINT32 ui32StaticRenderContextStateSize, -+ IMG_PBYTE pStaticRenderContextState, -+ IMG_UINT32 ui32PackedCCBSizeU8888, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_UINT64 ui64RobustnessAddress, -+ IMG_UINT32 ui32MaxTADeadlineMS, -+ IMG_UINT32 ui32Max3DDeadlineMS, -+ RGX_SERVER_RENDER_CONTEXT **ppsRenderContext) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_SERVER_RENDER_CONTEXT *psRenderContext; -+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); -+ RGX_COMMON_CONTEXT_INFO sInfo = {NULL}; -+ RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; -+ -+ *ppsRenderContext = NULL; -+ -+ if (ui32StaticRenderContextStateSize > RGXFWIF_STATIC_RENDERCONTEXT_SIZE) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psRenderContext = OSAllocZMem(sizeof(*psRenderContext)); -+ if (psRenderContext == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ eError = OSLockCreate(&psRenderContext->hLock); -+ -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_lock; -+ } -+ -+ psRenderContext->psDeviceNode = psDeviceNode; -+ -+ /* -+ Create the FW render context, this has the TA and 3D FW common -+ contexts embedded within it -+ */ -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(RGXFWIF_FWRENDERCONTEXT), -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "FwRenderContext", -+ &psRenderContext->psFWRenderContextMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_fwrendercontext; -+ } -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); -+ } -+#endif -+ -+ if (ui32FrameworkRegisterSize) -+ { -+ /* -+ * Create the FW framework buffer -+ */ -+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, -+ &psRenderContext->psFWFrameworkMemDesc, -+ ui32FrameworkRegisterSize); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate firmware GPU framework state (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_frameworkcreate; -+ } -+ -+ /* Copy the Framework client data into the framework buffer */ -+ eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode, -+ psRenderContext->psFWFrameworkMemDesc, -+ pabyFrameworkRegisters, -+ ui32FrameworkRegisterSize); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to populate the framework buffer (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_frameworkcopy; -+ } -+ sInfo.psFWFrameworkMemDesc = psRenderContext->psFWFrameworkMemDesc; -+ } -+ -+ eError = _Create3DContext(psConnection, -+ psDeviceNode, -+ psRenderContext->psFWRenderContextMemDesc, -+ offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), -+ psFWMemContextMemDesc, -+ i32Priority, -+ ui32Max3DDeadlineMS, -+ ui64RobustnessAddress, -+ &sInfo, -+ &psRenderContext->s3DData, -+ U32toU8_Unpack3(ui32PackedCCBSizeU8888), -+ U32toU8_Unpack4(ui32PackedCCBSizeU8888), -+ ui32ContextFlags); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_3dcontext; -+ } -+ -+ eError = _CreateTAContext(psConnection, -+ psDeviceNode, -+ psRenderContext->psFWRenderContextMemDesc, -+ offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), -+ psFWMemContextMemDesc, -+ sVDMCallStackAddr, -+ ui32CallStackDepth, -+ i32Priority, -+ ui32MaxTADeadlineMS, -+ ui64RobustnessAddress, -+ &sInfo, -+ &psRenderContext->sTAData, -+ U32toU8_Unpack1(ui32PackedCCBSizeU8888), -+ U32toU8_Unpack2(ui32PackedCCBSizeU8888), -+ ui32ContextFlags); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_tacontext; -+ } -+ -+ eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, -+ (void **)&psFWRenderContext); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_acquire_cpu_mapping; -+ } -+ -+ /* Copy the static render context data */ -+ OSDeviceMemCopy(&psFWRenderContext->sStaticRenderContextState, pStaticRenderContextState, ui32StaticRenderContextStateSize); -+#if defined(SUPPORT_TRP) -+ psFWRenderContext->eTRPGeomCoreAffinity = RGXFWIF_DM_MAX; -+#endif -+ DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc, 0, sizeof(RGXFWIF_FWRENDERCONTEXT), PDUMP_FLAGS_CONTINUOUS); -+ RGXFwSharedMemCacheOpPtr(psFWRenderContext, FLUSH); -+ DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ psRenderContext->psBufferSyncContext = -+ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, -+ "rogue-ta3d"); -+ if (IS_ERR(psRenderContext->psBufferSyncContext)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to create buffer_sync context (err=%ld)", -+ __func__, PTR_ERR(psRenderContext->psBufferSyncContext))); -+ -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto fail_buffer_sync_context_create; -+ } -+#endif -+ -+ SyncAddrListInit(&psRenderContext->sSyncAddrListTAFence); -+ SyncAddrListInit(&psRenderContext->sSyncAddrListTAUpdate); -+ SyncAddrListInit(&psRenderContext->sSyncAddrList3DFence); -+ SyncAddrListInit(&psRenderContext->sSyncAddrList3DUpdate); -+ -+ { -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); -+ dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); -+ } -+ -+ *ppsRenderContext = psRenderContext; -+ return PVRSRV_OK; -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+fail_buffer_sync_context_create: -+#endif -+fail_acquire_cpu_mapping: -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ PVRSRV_ERROR eError2 = _DestroyTAContext(&psRenderContext->sTAData, -+ psDeviceNode); -+ if (!PVRSRVIsRetryError(eError2)) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } -+ END_LOOP_UNTIL_TIMEOUT(); -+fail_tacontext: -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ PVRSRV_ERROR eError2 = _Destroy3DContext(&psRenderContext->s3DData, -+ psRenderContext->psDeviceNode); -+ if (!PVRSRVIsRetryError(eError2)) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } -+ END_LOOP_UNTIL_TIMEOUT(); -+fail_3dcontext: -+fail_frameworkcopy: -+ if (psRenderContext->psFWFrameworkMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc); -+ } -+fail_frameworkcreate: -+ DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc); -+fail_fwrendercontext: -+ OSLockDestroy(psRenderContext->hLock); -+fail_lock: -+ OSFreeMem(psRenderContext); -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ return eError; -+} -+ -+/* -+ * PVRSRVRGXDestroyRenderContextKM -+ */ -+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psRenderContext->psDeviceNode->pvDevice; -+ -+ /* remove node from list before calling destroy - as destroy, if successful -+ * will invalidate the node -+ * must be re-added if destroy fails -+ */ -+ OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); -+ dllist_remove_node(&(psRenderContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ /* Check psBufferSyncContext has not been destroyed already (by a previous -+ * call to this function which then later returned PVRSRV_ERROR_RETRY) -+ */ -+ if (psRenderContext->psBufferSyncContext != NULL) -+ { -+ pvr_buffer_sync_context_destroy(psRenderContext->psBufferSyncContext); -+ psRenderContext->psBufferSyncContext = NULL; -+ } -+#endif -+ -+ /* Cleanup the TA if we haven't already */ -+ if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0) -+ { -+ eError = _DestroyTAContext(&psRenderContext->sTAData, -+ psRenderContext->psDeviceNode); -+ if (eError == PVRSRV_OK) -+ { -+ psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE; -+ } -+ else -+ { -+ goto e0; -+ } -+ } -+ -+ /* Cleanup the 3D if we haven't already */ -+ if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0) -+ { -+ eError = _Destroy3DContext(&psRenderContext->s3DData, -+ psRenderContext->psDeviceNode); -+ if (eError == PVRSRV_OK) -+ { -+ psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE; -+ } -+ else -+ { -+ goto e0; -+ } -+ } -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; -+ IMG_UINT32 ui32WorkEstCCBSubmitted; -+ -+ eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, -+ (void **)&psFWRenderContext); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map firmware render context (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto e0; -+ } -+ RGXFwSharedMemCacheOpValue(psFWRenderContext->ui32WorkEstCCBSubmitted, INVALIDATE); -+ -+ ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted; -+ -+ DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); -+ -+ /* Check if all of the workload estimation CCB commands for this workload are read */ -+ if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived) -+ { -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", -+ __func__, ui32WorkEstCCBSubmitted, -+ psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)); -+ -+ eError = PVRSRV_ERROR_RETRY; -+ goto e0; -+ } -+ } -+#endif -+ -+ /* -+ Only if both TA and 3D contexts have been cleaned up can we -+ free the shared resources -+ */ -+ if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE)) -+ { -+ if (psRenderContext->psFWFrameworkMemDesc) -+ { -+ /* Free the framework buffer */ -+ DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc); -+ } -+ -+ /* Free the firmware render context */ -+ DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc); -+ -+ SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAFence); -+ SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAUpdate); -+ SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DFence); -+ SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate); -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); -+ } -+#endif -+ -+ OSLockDestroy(psRenderContext->hLock); -+ -+ OSFreeMem(psRenderContext); -+ } -+ -+ return PVRSRV_OK; -+ -+e0: -+ OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); -+ dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); -+ return eError; -+} -+ -+ -+#if (ENABLE_TA3D_UFO_DUMP == 1) -+static void DumpUfoList(IMG_UINT32 ui32ClientTAFenceCount, -+ IMG_UINT32 ui32ClientTAUpdateCount, -+ IMG_UINT32 ui32Client3DFenceCount, -+ IMG_UINT32 ui32Client3DUpdateCount, -+ PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress, -+ IMG_UINT32 *paui32ClientTAFenceValue, -+ PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress, -+ IMG_UINT32 *paui32ClientTAUpdateValue, -+ PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress, -+ IMG_UINT32 *paui32Client3DFenceValue, -+ PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress, -+ IMG_UINT32 *paui32Client3DUpdateValue) -+{ -+ IMG_UINT32 i; -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After populating sync prims ~~~", -+ __func__)); -+ -+ /* Dump Fence syncs, Update syncs and PR Update syncs */ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:", -+ __func__, ui32ClientTAFenceCount)); -+ for (i = 0; i < ui32ClientTAFenceCount; i++) -+ { -+ if (BITMASK_HAS(pauiClientTAFenceUFOAddress->ui32Addr, 1)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %d/%d<%p>. FWAddr=0x%x," -+ " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", -+ __func__, i + 1, ui32ClientTAFenceCount, -+ (void *) pauiClientTAFenceUFOAddress, -+ pauiClientTAFenceUFOAddress->ui32Addr)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", -+ __func__, i + 1, ui32ClientTAFenceCount, -+ (void *) pauiClientTAFenceUFOAddress, -+ pauiClientTAFenceUFOAddress->ui32Addr, -+ *paui32ClientTAFenceValue, -+ *paui32ClientTAFenceValue)); -+ paui32ClientTAFenceValue++; -+ } -+ pauiClientTAFenceUFOAddress++; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:", -+ __func__, ui32ClientTAUpdateCount)); -+ for (i = 0; i < ui32ClientTAUpdateCount; i++) -+ { -+ if (BITMASK_HAS(pauiClientTAUpdateUFOAddress->ui32Addr, 1)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %d/%d<%p>. FWAddr=0x%x," -+ " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", -+ __func__, i + 1, ui32ClientTAUpdateCount, -+ (void *) pauiClientTAUpdateUFOAddress, -+ pauiClientTAUpdateUFOAddress->ui32Addr)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", -+ __func__, i + 1, ui32ClientTAUpdateCount, -+ (void *) pauiClientTAUpdateUFOAddress, -+ pauiClientTAUpdateUFOAddress->ui32Addr, -+ *paui32ClientTAUpdateValue, -+ *paui32ClientTAUpdateValue)); -+ paui32ClientTAUpdateValue++; -+ } -+ pauiClientTAUpdateUFOAddress++; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:", -+ __func__, ui32Client3DFenceCount)); -+ for (i = 0; i < ui32Client3DFenceCount; i++) -+ { -+ if (BITMASK_HAS(pauiClient3DFenceUFOAddress->ui32Addr, 1)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %d/%d<%p>. FWAddr=0x%x," -+ " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", -+ __func__, i + 1, ui32Client3DFenceCount, -+ (void *) pauiClient3DFenceUFOAddress, -+ pauiClient3DFenceUFOAddress->ui32Addr)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", -+ __func__, i + 1, ui32Client3DFenceCount, -+ (void *) pauiClient3DFenceUFOAddress, -+ pauiClient3DFenceUFOAddress->ui32Addr, -+ *paui32Client3DFenceValue, -+ *paui32Client3DFenceValue)); -+ paui32Client3DFenceValue++; -+ } -+ pauiClient3DFenceUFOAddress++; -+ } -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:", -+ __func__, ui32Client3DUpdateCount)); -+ for (i = 0; i < ui32Client3DUpdateCount; i++) -+ { -+ if (BITMASK_HAS(pauiClient3DUpdateUFOAddress->ui32Addr, 1)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %d/%d<%p>. FWAddr=0x%x," -+ " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", -+ __func__, i + 1, ui32Client3DUpdateCount, -+ (void *) pauiClient3DUpdateUFOAddress, -+ pauiClient3DUpdateUFOAddress->ui32Addr)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", -+ __func__, i + 1, ui32Client3DUpdateCount, -+ (void *) pauiClient3DUpdateUFOAddress, -+ pauiClient3DUpdateUFOAddress->ui32Addr, -+ *paui32Client3DUpdateValue, -+ *paui32Client3DUpdateValue)); -+ paui32Client3DUpdateValue++; -+ } -+ pauiClient3DUpdateUFOAddress++; -+ } -+} -+#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ -+ -+/* -+ * PVRSRVRGXKickTA3DKM -+ */ -+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, -+ IMG_UINT32 ui32ClientTAFenceCount, -+ SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock, -+ IMG_UINT32 *paui32ClientTAFenceSyncOffset, -+ IMG_UINT32 *paui32ClientTAFenceValue, -+ IMG_UINT32 ui32ClientTAUpdateCount, -+ SYNC_PRIMITIVE_BLOCK **apsClientTAUpdateSyncPrimBlock, -+ IMG_UINT32 *paui32ClientTAUpdateSyncOffset, -+ IMG_UINT32 *paui32ClientTAUpdateValue, -+ IMG_UINT32 ui32Client3DUpdateCount, -+ SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock, -+ IMG_UINT32 *paui32Client3DUpdateSyncOffset, -+ IMG_UINT32 *paui32Client3DUpdateValue, -+ SYNC_PRIMITIVE_BLOCK *psPRFenceSyncPrimBlock, -+ IMG_UINT32 ui32PRFenceSyncOffset, -+ IMG_UINT32 ui32PRFenceValue, -+ PVRSRV_FENCE iCheckTAFence, -+ PVRSRV_TIMELINE iUpdateTATimeline, -+ PVRSRV_FENCE *piUpdateTAFence, -+ IMG_CHAR szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH], -+ PVRSRV_FENCE iCheck3DFence, -+ PVRSRV_TIMELINE iUpdate3DTimeline, -+ PVRSRV_FENCE *piUpdate3DFence, -+ IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH], -+ IMG_UINT32 ui32TACmdSize, -+ IMG_PBYTE pui8TADMCmd, -+ IMG_UINT32 ui323DPRCmdSize, -+ IMG_PBYTE pui83DPRDMCmd, -+ IMG_UINT32 ui323DCmdSize, -+ IMG_PBYTE pui83DDMCmd, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_BOOL bKickTA, -+ IMG_BOOL bKickPR, -+ IMG_BOOL bKick3D, -+ IMG_BOOL bAbort, -+ IMG_UINT32 ui32PDumpFlags, -+ RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, -+ RGX_ZSBUFFER_DATA *psZSBuffer, -+ RGX_ZSBUFFER_DATA *psMSAAScratchBuffer, -+ IMG_UINT32 ui32SyncPMRCount, -+ IMG_UINT32 *paui32SyncPMRFlags, -+ PMR **ppsSyncPMRs, -+ IMG_UINT32 ui32RenderTargetSize, -+ IMG_UINT32 ui32NumberOfDrawCalls, -+ IMG_UINT32 ui32NumberOfIndices, -+ IMG_UINT32 ui32NumberOfMRTs, -+ IMG_UINT64 ui64DeadlineInus) -+{ -+ /* per-context helper structures */ -+ RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = psRenderContext->asTACmdHelperData; -+ RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = psRenderContext->as3DCmdHelperData; -+ -+ IMG_UINT32 ui32TACmdCount=0; -+ IMG_UINT32 ui323DCmdCount=0; -+ IMG_UINT32 ui32TACmdOffset=0; -+ IMG_UINT32 ui323DCmdOffset=0; -+ RGXFWIF_UFO sPRUFO; -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_ERROR eError2 = PVRSRV_OK; -+ -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psRenderContext->s3DData.psServerCommonContext); -+ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); -+ IMG_BOOL bCCBStateOpen = IMG_FALSE; -+ -+ IMG_UINT32 ui32ClientPRUpdateCount = 0; -+ PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress = NULL; -+ IMG_UINT32 *paui32ClientPRUpdateValue = NULL; -+ -+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr; -+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr; -+ PRGXFWIF_UFO_ADDR pRMWUFOAddr; -+ -+ PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress = NULL; -+ PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress = NULL; -+ PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress = NULL; -+ PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress = NULL; -+ PRGXFWIF_UFO_ADDR uiPRFenceUFOAddress; -+ -+ IMG_UINT64 uiCheckTAFenceUID = 0; -+ IMG_UINT64 uiCheck3DFenceUID = 0; -+ IMG_UINT64 uiUpdateTAFenceUID = 0; -+ IMG_UINT64 uiUpdate3DFenceUID = 0; -+ -+ IMG_BOOL bUseCombined3DAnd3DPR = bKickPR && bKick3D && !pui83DPRDMCmd; -+ -+ RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; -+ RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; -+ IMG_BOOL bUseSingleFWCommand = bKickTA && (bKickPR || bKick3D); -+ -+ IMG_UINT32 ui32TACmdSizeTmp = 0, ui323DCmdSizeTmp = 0; -+ -+ IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE; -+ -+ PVRSRV_FENCE iUpdateTAFence = PVRSRV_NO_FENCE; -+ PVRSRV_FENCE iUpdate3DFence = PVRSRV_NO_FENCE; -+ -+ IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE; -+ IMG_UINT32 ui32TAFenceTimelineUpdateValue = 0; -+ IMG_UINT32 ui323DFenceTimelineUpdateValue = 0; -+ -+ /* -+ * Count of the number of TA and 3D update values (may differ from number of -+ * TA and 3D updates later, as sync checkpoints do not need to specify a value) -+ */ -+ IMG_UINT32 ui32ClientPRUpdateValueCount = 0; -+ IMG_UINT32 ui32ClientTAUpdateValueCount = ui32ClientTAUpdateCount; -+ IMG_UINT32 ui32Client3DUpdateValueCount = ui32Client3DUpdateCount; -+ PSYNC_CHECKPOINT *apsFenceTASyncCheckpoints = NULL; /*!< TA fence checkpoints */ -+ PSYNC_CHECKPOINT *apsFence3DSyncCheckpoints = NULL; /*!< 3D fence checkpoints */ -+ IMG_UINT32 ui32FenceTASyncCheckpointCount = 0; -+ IMG_UINT32 ui32Fence3DSyncCheckpointCount = 0; -+ PSYNC_CHECKPOINT psUpdateTASyncCheckpoint = NULL; /*!< TA update checkpoint (output) */ -+ PSYNC_CHECKPOINT psUpdate3DSyncCheckpoint = NULL; /*!< 3D update checkpoint (output) */ -+ PVRSRV_CLIENT_SYNC_PRIM *psTAFenceTimelineUpdateSync = NULL; -+ PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL; -+ void *pvTAUpdateFenceFinaliseData = NULL; -+ void *pv3DUpdateFenceFinaliseData = NULL; -+ -+ RGX_SYNC_DATA sTASyncData = {NULL}; /*!< Contains internal update syncs for TA */ -+ RGX_SYNC_DATA s3DSyncData = {NULL}; /*!< Contains internal update syncs for 3D */ -+ -+ IMG_BOOL bTestSLRAdd3DCheck = IMG_FALSE; -+#if defined(SUPPORT_VALIDATION) -+ PVRSRV_FENCE hTestSLRTmpFence = PVRSRV_NO_FENCE; -+ PSYNC_CHECKPOINT psDummySyncCheckpoint = NULL; -+#endif -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; -+ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; -+ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; -+ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTA = {0}; -+ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickData3D = {0}; -+ IMG_UINT32 ui32TACommandOffset = 0; -+ IMG_UINT32 ui323DCommandOffset = 0; -+ IMG_UINT32 ui32TACmdHeaderOffset = 0; -+ IMG_UINT32 ui323DCmdHeaderOffset = 0; -+ IMG_UINT32 ui323DFullRenderCommandOffset = 0; -+ IMG_UINT32 ui32TACmdOffsetWrapCheck = 0; -+ IMG_UINT32 ui323DCmdOffsetWrapCheck = 0; -+ RGX_WORKLOAD sWorkloadCharacteristics = {0}; -+#endif -+ -+ IMG_UINT32 ui32TAFenceCount, ui323DFenceCount; -+ IMG_UINT32 ui32TAUpdateCount, ui323DUpdateCount; -+ IMG_UINT32 ui32PRUpdateCount; -+ -+ IMG_PID uiCurrentProcess = OSGetCurrentClientProcessIDKM(); -+ -+ IMG_UINT32 ui32Client3DFenceCount = 0; -+ -+ /* Ensure we haven't been given a null ptr to -+ * TA fence values if we have been told we -+ * have TA sync prim fences -+ */ -+ if (ui32ClientTAFenceCount > 0) -+ { -+ PVR_LOG_RETURN_IF_FALSE(paui32ClientTAFenceValue != NULL, -+ "paui32ClientTAFenceValue NULL but " -+ "ui32ClientTAFenceCount > 0", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ /* Ensure we haven't been given a null ptr to -+ * TA update values if we have been told we -+ * have TA updates -+ */ -+ if (ui32ClientTAUpdateCount > 0) -+ { -+ PVR_LOG_RETURN_IF_FALSE(paui32ClientTAUpdateValue != NULL, -+ "paui32ClientTAUpdateValue NULL but " -+ "ui32ClientTAUpdateCount > 0", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ /* Ensure we haven't been given a null ptr to -+ * 3D update values if we have been told we -+ * have 3D updates -+ */ -+ if (ui32Client3DUpdateCount > 0) -+ { -+ PVR_LOG_RETURN_IF_FALSE(paui32Client3DUpdateValue != NULL, -+ "paui32Client3DUpdateValue NULL but " -+ "ui32Client3DUpdateCount > 0", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ /* Write FW addresses into CMD SHARED BLOCKs */ -+ { -+ CMDTA3D_SHARED *psGeomCmdShared = (CMDTA3D_SHARED *)pui8TADMCmd; -+ CMDTA3D_SHARED *ps3DCmdShared = (CMDTA3D_SHARED *)pui83DDMCmd; -+ CMDTA3D_SHARED *psPR3DCmdShared = (CMDTA3D_SHARED *)pui83DPRDMCmd; -+ -+ if (psKMHWRTDataSet == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "KMHWRTDataSet is a null-pointer")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Write FW address for TA CMD -+ */ -+ if (psGeomCmdShared != NULL) -+ { -+ psGeomCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; -+ -+ if (psZSBuffer != NULL) -+ { -+ psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; -+ } -+ if (psMSAAScratchBuffer != NULL) -+ { -+ psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; -+ } -+ } -+ -+ /* Write FW address for 3D CMD -+ */ -+ if (ps3DCmdShared != NULL) -+ { -+ ps3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; -+ -+ if (psZSBuffer != NULL) -+ { -+ ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; -+ } -+ if (psMSAAScratchBuffer != NULL) -+ { -+ ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; -+ } -+ } -+ -+ /* Write FW address for PR3D CMD -+ */ -+ if (psPR3DCmdShared != NULL) -+ { -+ psPR3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; -+ -+ if (psZSBuffer != NULL) -+ { -+ psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; -+ } -+ if (psMSAAScratchBuffer != NULL) -+ { -+ psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; -+ } -+ } -+ } -+ -+ if (unlikely(iUpdateTATimeline >= 0 && !piUpdateTAFence)) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ if (unlikely(iUpdate3DTimeline >= 0 && !piUpdate3DFence)) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, " -+ "ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d", -+ __func__, -+ ui32ClientTAFenceCount, ui32ClientTAUpdateCount, -+ ui32Client3DFenceCount, ui32Client3DUpdateCount)); -+ -+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRenderContext->psDeviceNode->pvDevice, -+ &pPreAddr, -+ &pPostAddr, -+ &pRMWUFOAddr); -+ -+ /* Double-check we have a PR kick if there are client fences */ -+ if (unlikely(!bKickPR && ui32Client3DFenceCount != 0)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: 3D fence passed without a PR kick", -+ __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Ensure the string is null-terminated (Required for safety) */ -+ szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; -+ szFenceName3D[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; -+ -+ OSLockAcquire(psRenderContext->hLock); -+ -+ ui32TAFenceCount = ui32ClientTAFenceCount; -+ ui323DFenceCount = ui32Client3DFenceCount; -+ ui32TAUpdateCount = ui32ClientTAUpdateCount; -+ ui323DUpdateCount = ui32Client3DUpdateCount; -+ ui32PRUpdateCount = ui32ClientPRUpdateCount; -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ if (ui32SyncPMRCount) -+ { -+ int err; -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling" -+ " pvr_buffer_sync_resolve_and_create_fences", __func__)); -+ -+ err = pvr_buffer_sync_resolve_and_create_fences( -+ psRenderContext->psBufferSyncContext, -+ psRenderContext->psDeviceNode->hSyncCheckpointContext, -+ ui32SyncPMRCount, -+ ppsSyncPMRs, -+ paui32SyncPMRFlags, -+ &ui32BufferFenceSyncCheckpointCount, -+ &apsBufferFenceSyncCheckpoints, -+ &psBufferUpdateSyncCheckpoint, -+ &psBufferSyncData -+ ); -+ -+ if (unlikely(err)) -+ { -+ switch (err) -+ { -+ case -EINTR: -+ eError = PVRSRV_ERROR_RETRY; -+ break; -+ case -ENOMEM: -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ break; -+ default: -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ break; -+ } -+ -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: " -+ "pvr_buffer_sync_resolve_and_create_fences failed (%d)", -+ __func__, eError)); -+ } -+ OSLockRelease(psRenderContext->hLock); -+ -+ return eError; -+ } -+ -+#if !defined(SUPPORT_STRIP_RENDERING) -+ if (bKickTA) -+ { -+ ui32TAFenceCount += ui32BufferFenceSyncCheckpointCount; -+ } -+ else -+ { -+ ui323DFenceCount += ui32BufferFenceSyncCheckpointCount; -+ } -+#else /* !defined(SUPPORT_STRIP_RENDERING) */ -+ ui323DFenceCount += ui32BufferFenceSyncCheckpointCount; -+ -+ PVR_UNREFERENCED_PARAMETER(bTAFenceOnSyncCheckpointsOnly); -+#endif /* !defined(SUPPORT_STRIP_RENDERING) */ -+ -+ if (psBufferUpdateSyncCheckpoint != NULL) -+ { -+ if (bKick3D) -+ { -+ ui323DUpdateCount++; -+ } -+ else -+ { -+ ui32PRUpdateCount++; -+ } -+ } -+ } -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ -+#if !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 -+#error "Invalid value for UPDATE_FENCE_CHECKPOINT_COUNT. Must be either 1 or 2." -+#endif /* !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 */ -+ -+ if (iCheckTAFence != PVRSRV_NO_FENCE) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[TA]" -+ " (iCheckFence=%d)," -+ " psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", -+ __func__, iCheckTAFence, -+ (void *) psRenderContext->psDeviceNode->hSyncCheckpointContext)); -+ -+ /* Resolve the sync checkpoints that make up the input fence */ -+ eError = SyncCheckpointResolveFence( -+ psRenderContext->psDeviceNode->hSyncCheckpointContext, -+ iCheckTAFence, -+ &ui32FenceTASyncCheckpointCount, -+ &apsFenceTASyncCheckpoints, -+ &uiCheckTAFenceUID, -+ ui32PDumpFlags); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", -+ __func__, eError)); -+ goto fail_resolve_input_ta_fence; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d " -+ "checkpoints (apsFenceSyncCheckpoints=<%p>)", -+ __func__, iCheckTAFence, ui32FenceTASyncCheckpointCount, -+ (void *) apsFenceTASyncCheckpoints)); -+ -+#if defined(TA3D_CHECKPOINT_DEBUG) -+ if (apsFenceTASyncCheckpoints) -+ { -+ _DebugSyncCheckpoints(__func__, "TA", apsFenceTASyncCheckpoints, -+ ui32FenceTASyncCheckpointCount); -+ } -+#endif /* defined(TA3D_CHECKPOINT_DEBUG) */ -+ } -+ -+ if (iCheck3DFence != PVRSRV_NO_FENCE) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[3D]" -+ " (iCheckFence=%d), " -+ "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", -+ __func__, iCheck3DFence, -+ (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); -+ -+ /* Resolve the sync checkpoints that make up the input fence */ -+ eError = SyncCheckpointResolveFence( -+ psRenderContext->psDeviceNode->hSyncCheckpointContext, -+ iCheck3DFence, -+ &ui32Fence3DSyncCheckpointCount, -+ &apsFence3DSyncCheckpoints, -+ &uiCheck3DFenceUID, -+ ui32PDumpFlags); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", -+ __func__, eError)); -+ goto fail_resolve_input_3d_fence; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d " -+ "checkpoints (apsFenceSyncCheckpoints=<%p>)", -+ __func__, iCheck3DFence, ui32Fence3DSyncCheckpointCount, -+ (void*)apsFence3DSyncCheckpoints)); -+ -+#if defined(TA3D_CHECKPOINT_DEBUG) -+ if (apsFence3DSyncCheckpoints) -+ { -+ _DebugSyncCheckpoints(__func__, "3D", apsFence3DSyncCheckpoints, -+ ui32Fence3DSyncCheckpointCount); -+ } -+#endif /* defined(TA3D_CHECKPOINT_DEBUG) */ -+ } -+ -+ if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 || -+ iCheck3DFence >= 0 || iUpdate3DTimeline >= 0) -+ { -+ IMG_UINT32 i; -+ -+ if (bKickTA) -+ { -+ ui32TAFenceCount += ui32FenceTASyncCheckpointCount; -+ -+ for (i = 0; i < ui32Fence3DSyncCheckpointCount; i++) -+ { -+ if (SyncCheckpointGetCreator(apsFence3DSyncCheckpoints[i]) != -+ uiCurrentProcess) -+ { -+ ui32TAFenceCount++; -+ } -+ } -+ } -+ -+ if (bKick3D) -+ { -+ ui323DFenceCount += ui32Fence3DSyncCheckpointCount; -+ } -+ -+ ui32TAUpdateCount += iUpdateTATimeline != PVRSRV_NO_TIMELINE ? -+ UPDATE_FENCE_CHECKPOINT_COUNT : 0; -+ ui323DUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE ? -+ UPDATE_FENCE_CHECKPOINT_COUNT : 0; -+ ui32PRUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE && !bKick3D ? -+ UPDATE_FENCE_CHECKPOINT_COUNT : 0; -+ } -+ -+#if defined(SUPPORT_VALIDATION) -+ /* Check if TestingSLR is adding an extra sync checkpoint to the -+ * 3D fence check (which we won't signal) -+ */ -+ if ((psDevInfo->ui32TestSLRInterval > 0) && -+ (--psDevInfo->ui32TestSLRCount == 0)) -+ { -+ bTestSLRAdd3DCheck = IMG_TRUE; -+ psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; -+ } -+ -+ if ((bTestSLRAdd3DCheck) && (iUpdate3DTimeline != PVRSRV_NO_TIMELINE)) -+ { -+ if (iUpdate3DTimeline == PVRSRV_NO_TIMELINE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Would append additional SLR checkpoint " -+ "to 3D fence but no update 3D timeline provided", __func__)); -+ } -+ else -+ { -+ SyncCheckpointAlloc(psRenderContext->psDeviceNode->hSyncCheckpointContext, -+ iUpdate3DTimeline, -+ hTestSLRTmpFence, -+ "TestSLRCheck", -+ &psDummySyncCheckpoint); -+ PVR_DPF((PVR_DBG_WARNING, "%s: Appending additional SLR checkpoint to 3D fence " -+ "checkpoints (psDummySyncCheckpoint=<%p>)", -+ __func__, (void*)psDummySyncCheckpoint)); -+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, -+ 1, -+ &psDummySyncCheckpoint); -+ if (!pauiClient3DFenceUFOAddress) -+ { -+ pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; -+ } -+ -+ if (ui32Client3DFenceCount == 0) -+ { -+ b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; -+ } -+ ui323DFenceCount++; -+ } -+ } -+#endif /* defined(SUPPORT_VALIDATION) */ -+ -+ if (bKickTA) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," -+ " ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", -+ __func__, ui32TAFenceCount, ui32TAUpdateCount)); -+ -+ RGXCmdHelperInitCmdCCB_CommandSize( -+ psDevInfo, -+ 0, -+ ui32TAFenceCount, -+ ui32TAUpdateCount, -+ ui32TACmdSize, -+ &pPreAddr, -+ (bKick3D ? NULL : &pPostAddr), -+ (bKick3D ? NULL : &pRMWUFOAddr), -+ pasTACmdHelperData -+ ); -+ } -+ -+ if (bKickPR) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," -+ " ui32Client3DFenceCount=%d", __func__, -+ ui323DFenceCount)); -+ -+ RGXCmdHelperInitCmdCCB_CommandSize( -+ psDevInfo, -+ 0, -+ ui323DFenceCount, -+ 0, -+ sizeof(sPRUFO), -+ NULL, -+ NULL, -+ NULL, -+ &pas3DCmdHelperData[ui323DCmdCount++] -+ ); -+ } -+ -+ if (bKickPR && !bUseCombined3DAnd3DPR) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," -+ " ui32PRUpdateCount=%d", __func__, -+ ui32PRUpdateCount)); -+ -+ RGXCmdHelperInitCmdCCB_CommandSize( -+ psDevInfo, -+ 0, -+ 0, -+ ui32PRUpdateCount, -+ /* if the client has not provided a 3DPR command, the regular 3D -+ * command should be used instead */ -+ pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, -+ NULL, -+ NULL, -+ NULL, -+ &pas3DCmdHelperData[ui323DCmdCount++] -+ ); -+ } -+ -+ if (bKick3D || bAbort) -+ { -+ if (!bKickTA) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," -+ " ui32Client3DFenceCount=%d", __func__, -+ ui323DFenceCount)); -+ } -+ -+ RGXCmdHelperInitCmdCCB_CommandSize( -+ psDevInfo, -+ 0, -+ bKickTA ? 0 : ui323DFenceCount, -+ ui323DUpdateCount, -+ ui323DCmdSize, -+ (bKickTA ? NULL : & pPreAddr), -+ &pPostAddr, -+ &pRMWUFOAddr, -+ &pas3DCmdHelperData[ui323DCmdCount++] -+ ); -+ } -+ -+ if (bKickTA) -+ { -+ ui32TACmdSizeTmp = RGXCmdHelperGetCommandSize(1, pasTACmdHelperData); -+ -+ eError = RGXCheckSpaceCCB( -+ FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext), -+ ui32TACmdSizeTmp -+ ); -+ if (eError != PVRSRV_OK) -+ { -+ goto err_not_enough_space; -+ } -+ } -+ -+ if (ui323DCmdCount > 0) -+ { -+ ui323DCmdSizeTmp = RGXCmdHelperGetCommandSize(ui323DCmdCount, pas3DCmdHelperData); -+ -+ eError = RGXCheckSpaceCCB( -+ FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext), -+ ui323DCmdSizeTmp -+ ); -+ if (eError != PVRSRV_OK) -+ { -+ goto err_not_enough_space; -+ } -+ } -+ -+ /* need to reset the counter here */ -+ -+ ui323DCmdCount = 0; -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAFence, %d fences)...", -+ __func__, ui32ClientTAFenceCount)); -+ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAFence, -+ ui32ClientTAFenceCount, -+ apsClientTAFenceSyncPrimBlock, -+ paui32ClientTAFenceSyncOffset); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ goto err_populate_sync_addr_list_ta_fence; -+ } -+ -+ if (ui32ClientTAFenceCount) -+ { -+ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: pauiClientTAFenceUFOAddress=<%p> ", -+ __func__, (void*)pauiClientTAFenceUFOAddress)); -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAUpdate, %d updates)...", -+ __func__, ui32ClientTAUpdateCount)); -+ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAUpdate, -+ ui32ClientTAUpdateCount, -+ apsClientTAUpdateSyncPrimBlock, -+ paui32ClientTAUpdateSyncOffset); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ goto err_populate_sync_addr_list_ta_update; -+ } -+ -+ if (ui32ClientTAUpdateCount) -+ { -+ pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: pauiClientTAUpdateUFOAddress=<%p> ", -+ __func__, (void*)pauiClientTAUpdateUFOAddress)); -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DFence, %d fences)...", -+ __func__, ui32Client3DFenceCount)); -+ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DFence, -+ ui32Client3DFenceCount, -+ NULL, -+ NULL); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ goto err_populate_sync_addr_list_3d_fence; -+ } -+ -+ if (ui32Client3DFenceCount) -+ { -+ pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DFenceUFOAddress=<%p> ", -+ __func__, (void*)pauiClient3DFenceUFOAddress)); -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DUpdate, %d updates)...", -+ __func__, ui32Client3DUpdateCount)); -+ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DUpdate, -+ ui32Client3DUpdateCount, -+ apsClient3DUpdateSyncPrimBlock, -+ paui32Client3DUpdateSyncOffset); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ goto err_populate_sync_addr_list_3d_update; -+ } -+ -+ if (ui32Client3DUpdateCount || (iUpdate3DTimeline != PVRSRV_NO_TIMELINE && piUpdate3DFence && bKick3D)) -+ { -+ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DUpdateUFOAddress=<%p> ", -+ __func__, (void*)pauiClient3DUpdateUFOAddress)); -+ -+ eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock, ui32PRFenceSyncOffset, &uiPRFenceUFOAddress); -+ -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ goto err_pr_fence_address; -+ } -+ -+#if (ENABLE_TA3D_UFO_DUMP == 1) -+ DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount, -+ ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), -+ ui32Client3DUpdateCount, -+ pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue, -+ pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue, -+ pauiClient3DFenceUFOAddress, NULL, -+ pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue); -+#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ -+ -+ if (ui32SyncPMRCount) -+ { -+#if defined(SUPPORT_BUFFER_SYNC) -+#if !defined(SUPPORT_STRIP_RENDERING) -+ /* Append buffer sync fences to TA fences */ -+ if (ui32BufferFenceSyncCheckpointCount > 0 && bKickTA) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Append %d buffer sync checkpoints to TA Fence " -+ "(&psRenderContext->sSyncAddrListTAFence=<%p>, " -+ "pauiClientTAFenceUFOAddress=<%p>)...", -+ __func__, -+ ui32BufferFenceSyncCheckpointCount, -+ (void*)&psRenderContext->sSyncAddrListTAFence , -+ (void*)pauiClientTAFenceUFOAddress)); -+ SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrListTAFence, -+ ui32BufferFenceSyncCheckpointCount, -+ apsBufferFenceSyncCheckpoints); -+ if (apsBufferFenceSyncCheckpoints) -+ { -+ kfree(apsBufferFenceSyncCheckpoints); -+ apsBufferFenceSyncCheckpoints = NULL; -+ } -+ if (!pauiClientTAFenceUFOAddress) -+ { -+ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; -+ } -+ if (ui32ClientTAFenceCount == 0) -+ { -+ bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; -+ } -+ ui32ClientTAFenceCount += ui32BufferFenceSyncCheckpointCount; -+ } -+ else -+#endif /* !defined(SUPPORT_STRIP_RENDERING) */ -+ /* Append buffer sync fences to 3D fences */ -+ if (ui32BufferFenceSyncCheckpointCount > 0) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Append %d buffer sync checkpoints to 3D Fence " -+ "(&psRenderContext->sSyncAddrList3DFence=<%p>, " -+ "pauiClient3DFenceUFOAddress=<%p>)...", -+ __func__, -+ ui32BufferFenceSyncCheckpointCount, -+ (void*)&psRenderContext->sSyncAddrList3DFence, -+ (void*)pauiClient3DFenceUFOAddress)); -+ SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrList3DFence, -+ ui32BufferFenceSyncCheckpointCount, -+ apsBufferFenceSyncCheckpoints); -+ if (apsBufferFenceSyncCheckpoints) -+ { -+ kfree(apsBufferFenceSyncCheckpoints); -+ apsBufferFenceSyncCheckpoints = NULL; -+ } -+ if (!pauiClient3DFenceUFOAddress) -+ { -+ pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; -+ } -+ if (ui32Client3DFenceCount == 0) -+ { -+ b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; -+ } -+ ui32Client3DFenceCount += ui32BufferFenceSyncCheckpointCount; -+ } -+ -+ if (psBufferUpdateSyncCheckpoint) -+ { -+ /* If we have a 3D kick append update to the 3D updates else append to the PR update */ -+ if (bKick3D) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Append 1 buffer sync checkpoint<%p> to 3D Update" -+ " (&psRenderContext->sSyncAddrList3DUpdate=<%p>," -+ " pauiClient3DUpdateUFOAddress=<%p>)...", -+ __func__, -+ (void*)psBufferUpdateSyncCheckpoint, -+ (void*)&psRenderContext->sSyncAddrList3DUpdate, -+ (void*)pauiClient3DUpdateUFOAddress)); -+ /* Append buffer sync update to 3D updates */ -+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, -+ 1, -+ &psBufferUpdateSyncCheckpoint); -+ if (!pauiClient3DUpdateUFOAddress) -+ { -+ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; -+ } -+ ui32Client3DUpdateCount++; -+ } -+ else -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Append 1 buffer sync checkpoint<%p> to PR Update" -+ " (&psRenderContext->sSyncAddrList3DUpdate=<%p>," -+ " pauiClientPRUpdateUFOAddress=<%p>)...", -+ __func__, -+ (void*)psBufferUpdateSyncCheckpoint, -+ (void*)&psRenderContext->sSyncAddrList3DUpdate, -+ (void*)pauiClientPRUpdateUFOAddress)); -+ /* Attach update to the 3D (used for PR) Updates */ -+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, -+ 1, -+ &psBufferUpdateSyncCheckpoint); -+ if (!pauiClientPRUpdateUFOAddress) -+ { -+ pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; -+ } -+ ui32ClientPRUpdateCount++; -+ } -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: (after buffer_sync) ui32ClientTAFenceCount=%d, " -+ "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, " -+ "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", -+ __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount, -+ ui32Client3DFenceCount, ui32Client3DUpdateCount, -+ ui32ClientPRUpdateCount)); -+ -+#else /* defined(SUPPORT_BUFFER_SYNC) */ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Buffer sync not supported but got %u buffers", -+ __func__, ui32SyncPMRCount)); -+ -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto err_no_buffer_sync_invalid_params; -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ } -+ -+ /* -+ * The hardware requires a PR to be submitted if there is a TA (otherwise -+ * it can wedge if we run out of PB space with no PR to run) -+ * -+ * If we only have a TA, attach native checks to the TA and updates to the PR -+ * If we have a TA and 3D, attach checks to TA, updates to 3D -+ * If we only have a 3D, attach checks and updates to the 3D -+ * -+ * Note that 'updates' includes the cleanup syncs for 'check' fence FDs, in -+ * addition to the update fence FD (if supplied) -+ * -+ * Currently, the client driver never kicks only the 3D, so we only support -+ * that for the time being. -+ */ -+ if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 || -+ iCheck3DFence >= 0 || iUpdate3DTimeline >= 0) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: [TA] iCheckFence = %d, iUpdateTimeline = %d", -+ __func__, iCheckTAFence, iUpdateTATimeline)); -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: [3D] iCheckFence = %d, iUpdateTimeline = %d", -+ __func__, iCheck3DFence, iUpdate3DTimeline)); -+ -+ { -+ /* Create the output fence for TA (if required) */ -+ if (iUpdateTATimeline != PVRSRV_NO_TIMELINE) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: calling SyncCheckpointCreateFence[TA] " -+ "(iUpdateFence=%d, iUpdateTimeline=%d, " -+ "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", -+ __func__, iUpdateTAFence, iUpdateTATimeline, -+ (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); -+ eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode, -+ szFenceNameTA, -+ iUpdateTATimeline, -+ psRenderContext->psDeviceNode->hSyncCheckpointContext, -+ &iUpdateTAFence, -+ &uiUpdateTAFenceUID, -+ &pvTAUpdateFenceFinaliseData, -+ &psUpdateTASyncCheckpoint, -+ (void*)&psTAFenceTimelineUpdateSync, -+ &ui32TAFenceTimelineUpdateValue, -+ ui32PDumpFlags); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: SyncCheckpointCreateFence[TA] failed (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_create_ta_fence; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: returned from SyncCheckpointCreateFence[TA] " -+ "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " -+ "ui32FenceTimelineUpdateValue=0x%x)", -+ __func__, iUpdateTAFence, -+ (void*)psTAFenceTimelineUpdateSync, -+ ui32TAFenceTimelineUpdateValue)); -+ -+#if defined(TA3D_CHECKPOINT_DEBUG) -+ { -+ PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL; -+ -+ /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */ -+ pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint); -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x", -+ __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr)); -+ } -+#endif -+ } -+ -+ /* Append the sync prim update for the TA timeline (if required) */ -+ if (psTAFenceTimelineUpdateSync) -+ { -+ sTASyncData.ui32ClientUpdateCount = ui32ClientTAUpdateCount; -+ sTASyncData.ui32ClientUpdateValueCount = ui32ClientTAUpdateValueCount; -+ sTASyncData.ui32ClientPRUpdateValueCount = (bKick3D) ? 0 : ui32ClientPRUpdateValueCount; -+ sTASyncData.paui32ClientUpdateValue = paui32ClientTAUpdateValue; -+ -+ eError = RGXSyncAppendTimelineUpdate(ui32TAFenceTimelineUpdateValue, -+ &psRenderContext->sSyncAddrListTAUpdate, -+ (bKick3D) ? NULL : &psRenderContext->sSyncAddrList3DUpdate, -+ psTAFenceTimelineUpdateSync, -+ &sTASyncData, -+ bKick3D); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ goto fail_alloc_update_values_mem_TA; -+ } -+ -+ paui32ClientTAUpdateValue = sTASyncData.paui32ClientUpdateValue; -+ ui32ClientTAUpdateValueCount = sTASyncData.ui32ClientUpdateValueCount; -+ pauiClientTAUpdateUFOAddress = sTASyncData.pauiClientUpdateUFOAddress; -+ ui32ClientTAUpdateCount = sTASyncData.ui32ClientUpdateCount; -+ } -+ -+ /* Create the output fence for 3D (if required) */ -+ if (iUpdate3DTimeline != PVRSRV_NO_TIMELINE) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: calling SyncCheckpointCreateFence[3D] " -+ "(iUpdateFence=%d, iUpdateTimeline=%d, " -+ "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", -+ __func__, iUpdate3DFence, iUpdate3DTimeline, -+ (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); -+ eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode, -+ szFenceName3D, -+ iUpdate3DTimeline, -+ psRenderContext->psDeviceNode->hSyncCheckpointContext, -+ &iUpdate3DFence, -+ &uiUpdate3DFenceUID, -+ &pv3DUpdateFenceFinaliseData, -+ &psUpdate3DSyncCheckpoint, -+ (void*)&ps3DFenceTimelineUpdateSync, -+ &ui323DFenceTimelineUpdateValue, -+ ui32PDumpFlags); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: SyncCheckpointCreateFence[3D] failed (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_create_3d_fence; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: returned from SyncCheckpointCreateFence[3D] " -+ "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " -+ "ui32FenceTimelineUpdateValue=0x%x)", -+ __func__, iUpdate3DFence, -+ (void*)ps3DFenceTimelineUpdateSync, -+ ui323DFenceTimelineUpdateValue)); -+ -+#if defined(TA3D_CHECKPOINT_DEBUG) -+ { -+ PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL; -+ -+ /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */ -+ pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint); -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x", -+ __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr)); -+ } -+#endif -+ } -+ -+ /* Append the sync prim update for the 3D timeline (if required) */ -+ if (ps3DFenceTimelineUpdateSync) -+ { -+ s3DSyncData.ui32ClientUpdateCount = ui32Client3DUpdateCount; -+ s3DSyncData.ui32ClientUpdateValueCount = ui32Client3DUpdateValueCount; -+ s3DSyncData.ui32ClientPRUpdateValueCount = ui32ClientPRUpdateValueCount; -+ s3DSyncData.paui32ClientUpdateValue = paui32Client3DUpdateValue; -+ -+ eError = RGXSyncAppendTimelineUpdate(ui323DFenceTimelineUpdateValue, -+ &psRenderContext->sSyncAddrList3DUpdate, -+ &psRenderContext->sSyncAddrList3DUpdate, /*!< PR update: is this required? */ -+ ps3DFenceTimelineUpdateSync, -+ &s3DSyncData, -+ bKick3D); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ goto fail_alloc_update_values_mem_3D; -+ } -+ -+ paui32Client3DUpdateValue = s3DSyncData.paui32ClientUpdateValue; -+ ui32Client3DUpdateValueCount = s3DSyncData.ui32ClientUpdateValueCount; -+ pauiClient3DUpdateUFOAddress = s3DSyncData.pauiClientUpdateUFOAddress; -+ ui32Client3DUpdateCount = s3DSyncData.ui32ClientUpdateCount; -+ -+ if (!bKick3D) -+ { -+ paui32ClientPRUpdateValue = s3DSyncData.paui32ClientPRUpdateValue; -+ ui32ClientPRUpdateValueCount = s3DSyncData.ui32ClientPRUpdateValueCount; -+ pauiClientPRUpdateUFOAddress = s3DSyncData.pauiClientPRUpdateUFOAddress; -+ ui32ClientPRUpdateCount = s3DSyncData.ui32ClientPRUpdateCount; -+ } -+ } -+ -+ /* -+ * The hardware requires a PR to be submitted if there is a TA OOM. -+ * If we only have a TA, attach native checks and updates to the TA -+ * and 3D updates to the PR. -+ * If we have a TA and 3D, attach the native TA checks and updates -+ * to the TA and similarly for the 3D. -+ * Note that 'updates' includes the cleanup syncs for 'check' fence -+ * FDs, in addition to the update fence FD (if supplied). -+ * Currently, the client driver never kicks only the 3D, so we don't -+ * support that for the time being. -+ */ -+ -+ { -+ if (bKickTA) -+ { -+ /* Attach checks and updates to TA */ -+ -+ /* Checks (from input fence) */ -+ if (ui32FenceTASyncCheckpointCount > 0) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Append %d sync checkpoints to TA Fence (apsFenceSyncCheckpoints=<%p>)...", -+ __func__, -+ ui32FenceTASyncCheckpointCount, -+ (void*)apsFenceTASyncCheckpoints)); -+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence, -+ ui32FenceTASyncCheckpointCount, -+ apsFenceTASyncCheckpoints); -+ if (!pauiClientTAFenceUFOAddress) -+ { -+ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: {ui32ClientTAFenceCount was %d, now %d}", -+ __func__, ui32ClientTAFenceCount, -+ ui32ClientTAFenceCount + ui32FenceTASyncCheckpointCount)); -+ if (ui32ClientTAFenceCount == 0) -+ { -+ bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; -+ } -+ ui32ClientTAFenceCount += ui32FenceTASyncCheckpointCount; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: {ui32ClientTAFenceCount now %d}", -+ __func__, ui32ClientTAFenceCount)); -+ -+ if (psUpdateTASyncCheckpoint) -+ { -+ /* Update (from output fence) */ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Append 1 sync checkpoint<%p> (ID=%d) to TA Update...", -+ __func__, (void*)psUpdateTASyncCheckpoint, -+ SyncCheckpointGetId(psUpdateTASyncCheckpoint))); -+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAUpdate, -+ 1, -+ &psUpdateTASyncCheckpoint); -+ if (!pauiClientTAUpdateUFOAddress) -+ { -+ pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs; -+ } -+ ui32ClientTAUpdateCount++; -+ } -+ -+ if (!bKick3D && psUpdate3DSyncCheckpoint) -+ { -+ /* Attach update to the 3D (used for PR) Updates */ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D(PR) Update...", -+ __func__, (void*)psUpdate3DSyncCheckpoint, -+ SyncCheckpointGetId(psUpdate3DSyncCheckpoint))); -+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, -+ 1, -+ &psUpdate3DSyncCheckpoint); -+ if (!pauiClientPRUpdateUFOAddress) -+ { -+ pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; -+ } -+ ui32ClientPRUpdateCount++; -+ } -+ } -+ -+ if (bKick3D) -+ { -+ /* Attach checks and updates to the 3D */ -+ -+ /* Checks (from input fence) */ -+ if (ui32Fence3DSyncCheckpointCount > 0) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Append %d sync checkpoints to 3D Fence...", -+ __func__, ui32Fence3DSyncCheckpointCount)); -+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, -+ ui32Fence3DSyncCheckpointCount, -+ apsFence3DSyncCheckpoints); -+ if (!pauiClient3DFenceUFOAddress) -+ { -+ pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: {ui32Client3DFenceCount was %d, now %d}", -+ __func__, ui32Client3DFenceCount, -+ ui32Client3DFenceCount + ui32Fence3DSyncCheckpointCount)); -+ if (ui32Client3DFenceCount == 0) -+ { -+ b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; -+ } -+ ui32Client3DFenceCount += ui32Fence3DSyncCheckpointCount; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: {ui32Client3DFenceCount was %d}", -+ __func__, ui32Client3DFenceCount)); -+ -+ if (psUpdate3DSyncCheckpoint) -+ { -+ /* Update (from output fence) */ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...", -+ __func__, (void*)psUpdate3DSyncCheckpoint, -+ SyncCheckpointGetId(psUpdate3DSyncCheckpoint))); -+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, -+ 1, -+ &psUpdate3DSyncCheckpoint); -+ if (!pauiClient3DUpdateUFOAddress) -+ { -+ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; -+ } -+ ui32Client3DUpdateCount++; -+ } -+ } -+ -+ /* -+ * Relocate sync check points from the 3D fence that are -+ * external to the current process, to the TA fence. -+ * This avoids a sync lockup when dependent renders are -+ * submitted out-of-order and a PR must be scheduled. -+ */ -+ if (bKickTA) -+ { -+ /* Search for external timeline dependencies */ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Checking 3D fence for external sync points (%d)...", -+ __func__, ui32Fence3DSyncCheckpointCount)); -+ -+ for (i=0; i (ID=%d) to TA Fence...", -+ __func__, (void*)apsFence3DSyncCheckpoints[i], -+ SyncCheckpointGetId(apsFence3DSyncCheckpoints[i]))); -+ -+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence, -+ 1, -+ &apsFence3DSyncCheckpoints[i]); -+ -+ if (!pauiClientTAFenceUFOAddress) -+ { -+ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: {ui32ClientTAFenceCount was %d, now %d}", -+ __func__, -+ ui32ClientTAFenceCount, -+ ui32ClientTAFenceCount + 1)); -+ -+ if (ui32ClientTAFenceCount == 0) -+ { -+ bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; -+ } -+ -+ ui32ClientTAFenceCount++; -+ } -+ } -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: (after pvr_sync) ui32ClientTAFenceCount=%d, " -+ "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, " -+ "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", -+ __func__, -+ ui32ClientTAFenceCount, ui32ClientTAUpdateCount, -+ ui32Client3DFenceCount, ui32Client3DUpdateCount, -+ ui32ClientPRUpdateCount)); -+ } -+ } -+ -+ if (ui32ClientTAFenceCount) -+ { -+ PVR_ASSERT(pauiClientTAFenceUFOAddress); -+ if (!bTAFenceOnSyncCheckpointsOnly) -+ { -+ PVR_ASSERT(paui32ClientTAFenceValue); -+ } -+ } -+ if (ui32ClientTAUpdateCount) -+ { -+ PVR_ASSERT(pauiClientTAUpdateUFOAddress); -+ if (ui32ClientTAUpdateValueCount>0) -+ { -+ PVR_ASSERT(paui32ClientTAUpdateValue); -+ } -+ } -+ if (ui32Client3DFenceCount) -+ { -+ PVR_ASSERT(pauiClient3DFenceUFOAddress); -+ PVR_ASSERT(b3DFenceOnSyncCheckpointsOnly); -+ } -+ if (ui32Client3DUpdateCount) -+ { -+ PVR_ASSERT(pauiClient3DUpdateUFOAddress); -+ if (ui32Client3DUpdateValueCount>0) -+ { -+ PVR_ASSERT(paui32Client3DUpdateValue); -+ } -+ } -+ if (ui32ClientPRUpdateCount) -+ { -+ PVR_ASSERT(pauiClientPRUpdateUFOAddress); -+ if (ui32ClientPRUpdateValueCount>0) -+ { -+ PVR_ASSERT(paui32ClientPRUpdateValue); -+ } -+ } -+ -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: ui32ClientTAFenceCount=%d, pauiClientTAFenceUFOAddress=<%p> Line ", -+ __func__, -+ ui32ClientTAFenceCount, -+ (void*)paui32ClientTAFenceValue)); -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: ui32ClientTAUpdateCount=%d, pauiClientTAUpdateUFOAddress=<%p> Line ", -+ __func__, -+ ui32ClientTAUpdateCount, -+ (void*)pauiClientTAUpdateUFOAddress)); -+#if (ENABLE_TA3D_UFO_DUMP == 1) -+ DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount, -+ ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), -+ ui32Client3DUpdateCount, -+ pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue, -+ pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue, -+ pauiClient3DFenceUFOAddress, NULL, -+ pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue); -+#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ -+ -+ /* Command size check */ -+ if (ui32TAFenceCount != ui32ClientTAFenceCount) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of fences" -+ " is different than the actual number (%u != %u)", -+ ui32TAFenceCount, ui32ClientTAFenceCount)); -+ } -+ if (ui32TAUpdateCount != ui32ClientTAUpdateCount) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of updates" -+ " is different than the actual number (%u != %u)", -+ ui32TAUpdateCount, ui32ClientTAUpdateCount)); -+ } -+ if (!bTestSLRAdd3DCheck && (ui323DFenceCount != ui32Client3DFenceCount)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of fences" -+ " is different than the actual number (%u != %u)", -+ ui323DFenceCount, ui32Client3DFenceCount)); -+ } -+ if (ui323DUpdateCount != ui32Client3DUpdateCount) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of updates" -+ " is different than the actual number (%u != %u)", -+ ui323DUpdateCount, ui32Client3DUpdateCount)); -+ } -+ if (ui32PRUpdateCount != ui32ClientPRUpdateCount) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "PR pre-calculated number of updates" -+ " is different than the actual number (%u != %u)", -+ ui32PRUpdateCount, ui32ClientPRUpdateCount)); -+ } -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if ((!PVRSRV_VZ_MODE_IS(GUEST)) && (bKickTA || bKick3D || bAbort)) -+ { -+ sWorkloadCharacteristics.sTA3D.ui32RenderTargetSize = ui32RenderTargetSize; -+ sWorkloadCharacteristics.sTA3D.ui32NumberOfDrawCalls = ui32NumberOfDrawCalls; -+ sWorkloadCharacteristics.sTA3D.ui32NumberOfIndices = ui32NumberOfIndices; -+ sWorkloadCharacteristics.sTA3D.ui32NumberOfMRTs = ui32NumberOfMRTs; -+ } -+#endif -+ -+ /* Init and acquire to TA command if required */ -+ if (bKickTA) -+ { -+ RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData; -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Prepare workload estimation */ -+ WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, -+ &psRenderContext->sWorkEstData, -+ &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA, -+ RGXFWIF_CCB_CMD_TYPE_GEOM, -+ &sWorkloadCharacteristics, -+ ui64DeadlineInus, -+ &sWorkloadKickDataTA); -+ } -+#endif -+ -+ /* Init the TA command helper */ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", -+ __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount)); -+ RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(psTAData->psServerCommonContext), -+ ui32ClientTAFenceCount, -+ pauiClientTAFenceUFOAddress, -+ paui32ClientTAFenceValue, -+ ui32ClientTAUpdateCount, -+ pauiClientTAUpdateUFOAddress, -+ paui32ClientTAUpdateValue, -+ ui32TACmdSize, -+ pui8TADMCmd, -+ &pPreAddr, -+ (bKick3D ? NULL : & pPostAddr), -+ (bKick3D ? NULL : & pRMWUFOAddr), -+ RGXFWIF_CCB_CMD_TYPE_GEOM, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ ui32PDumpFlags, -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ &sWorkloadKickDataTA, -+#else -+ NULL, -+#endif -+ "TA", -+ bCCBStateOpen, -+ pasTACmdHelperData); -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* The following is used to determine the offset of the command header containing -+ the workload estimation data so that can be accessed when the KCCB is read */ -+ ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData); -+ } -+#endif -+ -+ eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS, pasTACmdHelperData); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", -+ __func__, eError)); -+ goto fail_taacquirecmd; -+ } -+ else -+ { -+ ui32TACmdCount++; -+ } -+ } -+ -+ /* Only kick the 3D if required */ -+ if (bKickPR) -+ { -+ RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData; -+ -+ /* -+ The command helper doesn't know about the PR fence so create -+ the command with all the fences against it and later create -+ the PR command itself which _must_ come after the PR fence. -+ */ -+ sPRUFO.puiAddrUFO = uiPRFenceUFOAddress; -+ sPRUFO.ui32Value = ui32PRFenceValue; -+ -+ /* Init the PR fence command helper */ -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: calling RGXCmdHelperInitCmdCCB(), ui32Client3DFenceCount=%d", -+ __func__, ui32Client3DFenceCount)); -+ RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), -+ ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), -+ pauiClient3DFenceUFOAddress, -+ NULL, -+ 0, -+ NULL, -+ NULL, -+ sizeof(sPRUFO), -+ (IMG_UINT8*) &sPRUFO, -+ NULL, -+ NULL, -+ NULL, -+ RGXFWIF_CCB_CMD_TYPE_FENCE_PR, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ ui32PDumpFlags, -+ NULL, -+ "3D-PR-Fence", -+ bCCBStateOpen, -+ &pas3DCmdHelperData[ui323DCmdCount++]); -+ -+ /* Init the 3D PR command helper */ -+ /* -+ Updates for Android (fence sync and Timeline sync prim) are provided in the PR-update -+ if no 3D is present. This is so the timeline update cannot happen out of order with any -+ other 3D already in flight for the same timeline (PR-updates are done in the 3D cCCB). -+ This out of order timeline sync prim update could happen if we attach it to the TA update. -+ */ -+ if (ui32ClientPRUpdateCount) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Line %d, ui32ClientPRUpdateCount=%d, " -+ "pauiClientPRUpdateUFOAddress=0x%x, " -+ "ui32ClientPRUpdateValueCount=%d, " -+ "paui32ClientPRUpdateValue=0x%x", -+ __func__, __LINE__, ui32ClientPRUpdateCount, -+ pauiClientPRUpdateUFOAddress->ui32Addr, -+ ui32ClientPRUpdateValueCount, -+ (ui32ClientPRUpdateValueCount == 0) ? PVRSRV_SYNC_CHECKPOINT_SIGNALLED : *paui32ClientPRUpdateValue)); -+ } -+ -+ if (!bUseCombined3DAnd3DPR) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientPRUpdateCount=%d", -+ __func__, ui32ClientPRUpdateCount)); -+ RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), -+ 0, -+ NULL, -+ NULL, -+ ui32ClientPRUpdateCount, -+ pauiClientPRUpdateUFOAddress, -+ paui32ClientPRUpdateValue, -+ pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, // If the client has not provided a 3DPR command, the regular 3D command should be used instead -+ pui83DPRDMCmd ? pui83DPRDMCmd : pui83DDMCmd, -+ NULL, -+ NULL, -+ NULL, -+ RGXFWIF_CCB_CMD_TYPE_3D_PR, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ ui32PDumpFlags, -+ NULL, -+ "3D-PR", -+ bCCBStateOpen, -+ &pas3DCmdHelperData[ui323DCmdCount++]); -+ } -+ } -+ -+ if (bKick3D || bAbort) -+ { -+ RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData; -+ const RGXFWIF_CCB_CMD_TYPE e3DCmdType = bAbort ? RGXFWIF_CCB_CMD_TYPE_ABORT : RGXFWIF_CCB_CMD_TYPE_3D; -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Prepare workload estimation */ -+ WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, -+ &psRenderContext->sWorkEstData, -+ &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D, -+ e3DCmdType, -+ &sWorkloadCharacteristics, -+ ui64DeadlineInus, -+ &sWorkloadKickData3D); -+ } -+#endif -+ -+ /* Init the 3D command helper */ -+ RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), -+ bKickTA ? 0 : ui32Client3DFenceCount, /* For a kick with a TA, the 3D fences are added before the PR command instead */ -+ bKickTA ? NULL : pauiClient3DFenceUFOAddress, -+ NULL, -+ ui32Client3DUpdateCount, -+ pauiClient3DUpdateUFOAddress, -+ paui32Client3DUpdateValue, -+ ui323DCmdSize, -+ pui83DDMCmd, -+ (bKickTA ? NULL : & pPreAddr), -+ &pPostAddr, -+ &pRMWUFOAddr, -+ e3DCmdType, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ ui32PDumpFlags, -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ &sWorkloadKickData3D, -+#else -+ NULL, -+#endif -+ "3D", -+ bCCBStateOpen, -+ &pas3DCmdHelperData[ui323DCmdCount++]); -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* The following are used to determine the offset of the command header containing the workload estimation -+ data so that can be accessed when the KCCB is read */ -+ ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]); -+ ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1); -+ } -+#endif -+ } -+ -+ /* Protect against array overflow in RGXCmdHelperAcquireCmdCCB() */ -+ if (unlikely(ui323DCmdCount > CCB_CMD_HELPER_NUM_3D_COMMANDS)) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError)); -+ goto fail_3dcmdinit; -+ } -+ -+ if (ui323DCmdCount) -+ { -+ PVR_ASSERT(bKickPR || bKick3D); -+ -+ /* Acquire space for all the 3D command(s) */ -+ eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, pas3DCmdHelperData); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ /* If RGXCmdHelperAcquireCmdCCB fails we skip the scheduling -+ * of a new TA command with the same Write offset in Kernel CCB. -+ */ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError)); -+ goto fail_3dacquirecmd; -+ } -+ } -+ -+ if (ui32TACmdCount) -+ { -+ eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl, -+ &sTACmdKickData.ui32NumCleanupCtl, -+ RGXFWIF_DM_GEOM, -+ bKickTA, -+ psKMHWRTDataSet, -+ psZSBuffer, -+ psMSAAScratchBuffer); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", -+ __func__, eError)); -+ goto fail_taattachcleanupctls; -+ } -+ } -+ -+ if (ui323DCmdCount) -+ { -+ eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl, -+ &s3DCmdKickData.ui32NumCleanupCtl, -+ RGXFWIF_DM_3D, -+ bKick3D, -+ psKMHWRTDataSet, -+ psZSBuffer, -+ psMSAAScratchBuffer); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", -+ __func__, eError)); -+ goto fail_3dattachcleanupctls; -+ } -+ } -+ -+ /* -+ We should acquire the space in the kernel CCB here as after this point -+ we release the commands which will take operations on server syncs -+ which can't be undone -+ */ -+ -+ /* -+ Everything is ready to go now, release the commands -+ */ -+ -+ eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ -+ /* If system is found powered OFF, Retry scheduling the command */ -+ if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) -+ { -+ eError = PVRSRV_ERROR_RETRY; -+ } -+ goto fail_acquirepowerlock; -+ } -+ -+ if (ui32TACmdCount) -+ { -+ ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); -+ RGXCmdHelperReleaseCmdCCB(ui32TACmdCount, -+ pasTACmdHelperData, -+ "TA", -+ FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr); -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); -+ -+ /* This checks if the command would wrap around at the end of the CCB and therefore would start at an -+ offset of 0 rather than the current command offset */ -+ if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck) -+ { -+ ui32TACommandOffset = ui32TACmdOffset; -+ } -+ else -+ { -+ ui32TACommandOffset = 0; -+ } -+ } -+#endif -+ } -+ -+ if (ui323DCmdCount) -+ { -+ ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); -+ RGXCmdHelperReleaseCmdCCB(ui323DCmdCount, -+ pas3DCmdHelperData, -+ "3D", -+ FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr); -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); -+ -+ if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck) -+ { -+ ui323DCommandOffset = ui323DCmdOffset; -+ } -+ else -+ { -+ ui323DCommandOffset = 0; -+ } -+ } -+#endif -+ } -+ -+ if (ui32TACmdCount) -+ { -+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr; -+ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext); -+ CMDTA3D_SHARED *psGeomCmdShared = IMG_OFFSET_ADDR(pui8TADMCmd, 0); -+ -+ sTACmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext); -+ sTACmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); -+ sTACmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Add the Workload data into the KCCB kick */ -+ sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset; -+ } -+#endif -+ -+ if (psGeomCmdShared) -+ { -+ HTBLOGK(HTB_SF_MAIN_KICK_TA, -+ sTACmdKickData.psContext, -+ ui32TACmdOffset, -+ psGeomCmdShared->sCmn.ui32FrameNum, -+ ui32ExtJobRef, -+ ui32IntJobRef); -+ } -+ -+ RGXSRV_HWPERF_ENQ(psRenderContext, -+ OSGetCurrentClientProcessIDKM(), -+ ui32FWCtx, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ RGX_HWPERF_KICK_TYPE2_GEOM, -+ iCheckTAFence, -+ iUpdateTAFence, -+ iUpdateTATimeline, -+ uiCheckTAFenceUID, -+ uiUpdateTAFenceUID, -+ ui64DeadlineInus, -+ WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickDataTA)); -+ -+ if (!bUseSingleFWCommand) -+ { -+ /* Construct the kernel TA CCB command. */ -+ RGXFWIF_KCCB_CMD sTAKCCBCmd; -+ sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; -+ sTAKCCBCmd.uCmdData.sCmdKickData = sTACmdKickData; -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError2 = RGXScheduleCommandWithoutPowerLock(psRenderContext->psDeviceNode->pvDevice, -+ RGXFWIF_DM_GEOM, -+ &sTAKCCBCmd, -+ ui32PDumpFlags); -+ if (eError2 != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ } -+ -+ if (eError2 != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2)); -+ /* Mark the error and bail out */ -+ eError = eError2; -+ goto fail_tasubmitcmd; -+ } -+ -+ PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode, -+ ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, -+ RGX_HWPERF_KICK_TYPE2_GEOM); -+ } -+ -+ if (ui323DCmdCount) -+ { -+ RGXFWIF_KCCB_CMD s3DKCCBCmd = { 0 }; -+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr; -+ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext); -+ CMDTA3D_SHARED *ps3DCmdShared = IMG_OFFSET_ADDR(pui83DDMCmd, 0); -+ -+ s3DCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext); -+ s3DCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); -+ s3DCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); -+ -+ /* Add the Workload data into the KCCB kick */ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */ -+ s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset; -+ } -+#endif -+ -+ if (ps3DCmdShared) -+ { -+ HTBLOGK(HTB_SF_MAIN_KICK_3D, -+ s3DCmdKickData.psContext, -+ ui323DCmdOffset, -+ ps3DCmdShared->sCmn.ui32FrameNum, -+ ui32ExtJobRef, -+ ui32IntJobRef); -+ } -+ -+ RGXSRV_HWPERF_ENQ(psRenderContext, -+ OSGetCurrentClientProcessIDKM(), -+ ui32FWCtx, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ RGX_HWPERF_KICK_TYPE2_3D, -+ iCheck3DFence, -+ iUpdate3DFence, -+ iUpdate3DTimeline, -+ uiCheck3DFenceUID, -+ uiUpdate3DFenceUID, -+ ui64DeadlineInus, -+ WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickData3D)); -+ -+ if (bUseSingleFWCommand) -+ { -+ /* Construct the kernel TA/3D CCB command. */ -+ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK; -+ s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.sTACmdKickData = sTACmdKickData; -+ s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.s3DCmdKickData = s3DCmdKickData; -+ } -+ else -+ { -+ /* Construct the kernel 3D CCB command. */ -+ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; -+ s3DKCCBCmd.uCmdData.sCmdKickData = s3DCmdKickData; -+ } -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError2 = RGXScheduleCommandWithoutPowerLock(psRenderContext->psDeviceNode->pvDevice, -+ RGXFWIF_DM_3D, -+ &s3DKCCBCmd, -+ ui32PDumpFlags); -+ if (eError2 != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if (eError2 != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2)); -+ if (eError == PVRSRV_OK) -+ { -+ eError = eError2; -+ } -+ goto fail_3dsubmitcmd; -+ } -+ -+ PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode, -+ ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, -+ RGX_HWPERF_KICK_TYPE2_3D); -+ } -+ -+ /* -+ * Now check eError (which may have returned an error from our earlier calls -+ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first -+ * so we check it now... -+ */ -+ if (unlikely(eError != PVRSRV_OK )) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", -+ __func__, eError)); -+ goto fail_3dsubmitcmd; -+ } -+ -+ PVRSRVPowerUnlock(psDevInfo->psDeviceNode); -+ -+#if defined(NO_HARDWARE) -+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ -+ if (psUpdateTASyncCheckpoint) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Signalling NOHW sync checkpoint [TA] <%p>, ID:%d, FwAddr=0x%x", -+ __func__, (void*)psUpdateTASyncCheckpoint, -+ SyncCheckpointGetId(psUpdateTASyncCheckpoint), -+ SyncCheckpointGetFirmwareAddr(psUpdateTASyncCheckpoint))); -+ SyncCheckpointSignalNoHW(psUpdateTASyncCheckpoint); -+ } -+ if (psTAFenceTimelineUpdateSync) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Updating NOHW sync prim [TA] <%p> to %d", -+ __func__, (void*)psTAFenceTimelineUpdateSync, -+ ui32TAFenceTimelineUpdateValue)); -+ SyncPrimNoHwUpdate(psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue); -+ } -+ -+ if (psUpdate3DSyncCheckpoint) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Signalling NOHW sync checkpoint [3D] <%p>, ID:%d, FwAddr=0x%x", -+ __func__, (void*)psUpdate3DSyncCheckpoint, -+ SyncCheckpointGetId(psUpdate3DSyncCheckpoint), -+ SyncCheckpointGetFirmwareAddr(psUpdate3DSyncCheckpoint))); -+ SyncCheckpointSignalNoHW(psUpdate3DSyncCheckpoint); -+ } -+ if (ps3DFenceTimelineUpdateSync) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: Updating NOHW sync prim [3D] <%p> to %d", -+ __func__, (void*)ps3DFenceTimelineUpdateSync, -+ ui323DFenceTimelineUpdateValue)); -+ SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue); -+ } -+ SyncCheckpointNoHWUpdateTimelines(NULL); -+ -+#endif /* defined(NO_HARDWARE) */ -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ if (psBufferSyncData) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, -+ "%s: calling pvr_buffer_sync_kick_succeeded(psBufferSyncData=<%p>)...", -+ __func__, (void*)psBufferSyncData)); -+ pvr_buffer_sync_kick_succeeded(psBufferSyncData); -+ } -+ if (apsBufferFenceSyncCheckpoints) -+ { -+ kfree(apsBufferFenceSyncCheckpoints); -+ } -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ -+ if (piUpdateTAFence) -+ { -+ *piUpdateTAFence = iUpdateTAFence; -+ } -+ if (piUpdate3DFence) -+ { -+ *piUpdate3DFence = iUpdate3DFence; -+ } -+ -+ /* Drop the references taken on the sync checkpoints in the -+ * resolved input fence. -+ * NOTE: 3D fence is always submitted, either via 3D or TA(PR). -+ */ -+ if (bKickTA) -+ { -+ SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints); -+ } -+ SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints); -+ -+ if (pvTAUpdateFenceFinaliseData && (iUpdateTAFence != PVRSRV_NO_FENCE)) -+ { -+ SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdateTAFence, -+ pvTAUpdateFenceFinaliseData, -+ psUpdateTASyncCheckpoint, szFenceNameTA); -+ } -+ if (pv3DUpdateFenceFinaliseData && (iUpdate3DFence != PVRSRV_NO_FENCE)) -+ { -+ SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdate3DFence, -+ pv3DUpdateFenceFinaliseData, -+ psUpdate3DSyncCheckpoint, szFenceName3D); -+ } -+ -+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ -+ if (apsFenceTASyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints); -+ } -+ if (apsFence3DSyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints); -+ } -+ -+ if (sTASyncData.paui32ClientUpdateValue) -+ { -+ OSFreeMem(sTASyncData.paui32ClientUpdateValue); -+ } -+ if (s3DSyncData.paui32ClientUpdateValue) -+ { -+ OSFreeMem(s3DSyncData.paui32ClientUpdateValue); -+ } -+ -+#if defined(SUPPORT_VALIDATION) -+ if (bTestSLRAdd3DCheck) -+ { -+ SyncCheckpointFree(psDummySyncCheckpoint); -+ } -+#endif -+ OSLockRelease(psRenderContext->hLock); -+ -+ return PVRSRV_OK; -+ -+fail_3dsubmitcmd: -+fail_tasubmitcmd: -+ PVRSRVPowerUnlock(psDevInfo->psDeviceNode); -+fail_acquirepowerlock: -+fail_3dattachcleanupctls: -+fail_taattachcleanupctls: -+fail_3dacquirecmd: -+fail_3dcmdinit: -+fail_taacquirecmd: -+ SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAFence); -+ SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAUpdate); -+ SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DFence); -+ SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DUpdate); -+ /* Where a TA-only kick (ie no 3D) is submitted, the PR update will make use of the unused 3DUpdate list. -+ * If this has happened, performing a rollback on pauiClientPRUpdateUFOAddress will simply repeat what -+ * has already been done for the sSyncAddrList3DUpdate above and result in a double decrement of the -+ * sync checkpoint's hEnqueuedCCBCount, so we need to check before rolling back the PRUpdate. -+ */ -+ if (pauiClientPRUpdateUFOAddress && (pauiClientPRUpdateUFOAddress != psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs)) -+ { -+ SyncCheckpointRollbackFromUFO(psRenderContext->psDeviceNode, pauiClientPRUpdateUFOAddress->ui32Addr); -+ } -+ -+fail_alloc_update_values_mem_3D: -+ if (iUpdate3DFence != PVRSRV_NO_FENCE) -+ { -+ SyncCheckpointRollbackFenceData(iUpdate3DFence, pv3DUpdateFenceFinaliseData); -+ } -+fail_create_3d_fence: -+fail_alloc_update_values_mem_TA: -+ if (iUpdateTAFence != PVRSRV_NO_FENCE) -+ { -+ SyncCheckpointRollbackFenceData(iUpdateTAFence, pvTAUpdateFenceFinaliseData); -+ } -+fail_create_ta_fence: -+#if !defined(SUPPORT_BUFFER_SYNC) -+err_no_buffer_sync_invalid_params: -+#endif /* !defined(SUPPORT_BUFFER_SYNC) */ -+err_pr_fence_address: -+err_populate_sync_addr_list_3d_update: -+err_populate_sync_addr_list_3d_fence: -+err_populate_sync_addr_list_ta_update: -+err_populate_sync_addr_list_ta_fence: -+err_not_enough_space: -+ /* Drop the references taken on the sync checkpoints in the -+ * resolved input fence. -+ * NOTE: 3D fence is always submitted, either via 3D or TA(PR). -+ */ -+#if defined(SUPPORT_BUFFER_SYNC) -+ if (apsBufferFenceSyncCheckpoints) -+ { -+ SyncAddrListDeRefCheckpoints(ui32BufferFenceSyncCheckpointCount, -+ apsBufferFenceSyncCheckpoints); -+ } -+#endif -+ SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints); -+fail_resolve_input_3d_fence: -+ if (bKickTA) -+ { -+ SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints); -+ } -+fail_resolve_input_ta_fence: -+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ -+ if (apsFenceTASyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints); -+ } -+ if (apsFence3DSyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints); -+ } -+ if (sTASyncData.paui32ClientUpdateValue) -+ { -+ OSFreeMem(sTASyncData.paui32ClientUpdateValue); -+ } -+ if (s3DSyncData.paui32ClientUpdateValue) -+ { -+ OSFreeMem(s3DSyncData.paui32ClientUpdateValue); -+ } -+#if defined(SUPPORT_VALIDATION) -+ if (bTestSLRAdd3DCheck) -+ { -+ SyncCheckpointFree(psDummySyncCheckpoint); -+ } -+#endif -+#if defined(SUPPORT_BUFFER_SYNC) -+ if (psBufferSyncData) -+ { -+ pvr_buffer_sync_kick_failed(psBufferSyncData); -+ } -+ if (apsBufferFenceSyncCheckpoints) -+ { -+ kfree(apsBufferFenceSyncCheckpoints); -+ } -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ PVR_ASSERT(eError != PVRSRV_OK); -+ OSLockRelease(psRenderContext->hLock); -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXSendZSStoreDisableKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ RGX_SERVER_RENDER_CONTEXT *psRenderContext, -+ IMG_BOOL bDisableDepthStore, -+ IMG_BOOL bDisableStencilStore, -+ IMG_UINT32 ui32ExtJobRefToDisableZSStore) -+{ -+ PVRSRV_ERROR eError; -+ RGXFWIF_KCCB_CMD sDisableZSStoreCmd = { 0 }; -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ -+ sDisableZSStoreCmd.eCmdType = RGXFWIF_KCCB_CMD_DISABLE_ZSSTORE; -+ sDisableZSStoreCmd.uCmdData.sDisableZSStoreData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext); -+ sDisableZSStoreCmd.uCmdData.sDisableZSStoreData.sDisableZSStore.bDisableZStore = bDisableDepthStore; -+ sDisableZSStoreCmd.uCmdData.sDisableZSStoreData.sDisableZSStore.bDisableSStore = bDisableStencilStore; -+ sDisableZSStoreCmd.uCmdData.sDisableZSStoreData.sDisableZSStore.ui32ExtJobRefToDisableZSStore = ui32ExtJobRefToDisableZSStore; -+ -+ if (psRenderContext->ui32CleanupStatus & (RC_CLEANUP_TA_COMPLETE | RC_CLEANUP_3D_COMPLETE)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: submit disable depth and stencil store command to render context that has been cleaned up (%u)", -+ __func__, PVRSRV_ERROR_INVALID_CONTEXT)); -+ return PVRSRV_ERROR_INVALID_CONTEXT; -+ } -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, -+ RGXFWIF_DM_3D, -+ &sDisableZSStoreCmd, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to submit disable depth and stencil store command (%u)", -+ __func__, -+ eError)); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ RGX_SERVER_RENDER_CONTEXT *psRenderContext, -+ IMG_INT32 i32Priority) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ -+ OSLockAcquire(psRenderContext->hLock); -+ -+ if (psRenderContext->sTAData.i32Priority != i32Priority) -+ { -+ eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext, -+ psConnection, -+ psRenderContext->psDeviceNode->pvDevice, -+ i32Priority, -+ RGXFWIF_DM_GEOM); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to set the priority of the TA part of the rendercontext (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ goto fail_tacontext; -+ } -+ psRenderContext->sTAData.i32Priority = i32Priority; -+ } -+ -+ if (psRenderContext->s3DData.i32Priority != i32Priority) -+ { -+ eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext, -+ psConnection, -+ psRenderContext->psDeviceNode->pvDevice, -+ i32Priority, -+ RGXFWIF_DM_3D); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to set the priority of the 3D part of the rendercontext (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ goto fail_3dcontext; -+ } -+ psRenderContext->s3DData.i32Priority = i32Priority; -+ } -+ -+ OSLockRelease(psRenderContext->hLock); -+ return PVRSRV_OK; -+ -+fail_3dcontext: -+fail_tacontext: -+ OSLockRelease(psRenderContext->hLock); -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+ -+PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, -+ RGX_CONTEXT_PROPERTY eContextProperty, -+ IMG_UINT64 ui64Input, -+ IMG_UINT64 *pui64Output) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ switch (eContextProperty) -+ { -+ case RGX_CONTEXT_PROPERTY_FLAGS: -+ { -+ IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input; -+ -+ OSLockAcquire(psRenderContext->hLock); -+ eError = FWCommonContextSetFlags(psRenderContext->sTAData.psServerCommonContext, -+ ui32ContextFlags); -+ if (eError == PVRSRV_OK) -+ { -+ eError = FWCommonContextSetFlags(psRenderContext->s3DData.psServerCommonContext, -+ ui32ContextFlags); -+ } -+ OSLockRelease(psRenderContext->hLock); -+ break; -+ } -+ -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); -+ eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ } -+ } -+ -+ return eError; -+} -+ -+void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock); -+ dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); -+ -+ DumpFWCommonContextInfo(psCurrentServerRenderCtx->sTAData.psServerCommonContext, -+ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+ DumpFWCommonContextInfo(psCurrentServerRenderCtx->s3DData.psServerCommonContext, -+ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+ } -+ OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock); -+} -+ -+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ IMG_UINT32 ui32ContextBitMask = 0; -+ -+ OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock); -+ -+ dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); -+ if (NULL != psCurrentServerRenderCtx->sTAData.psServerCommonContext) -+ { -+ if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext, RGX_KICK_TYPE_DM_TA) == PVRSRV_ERROR_CCCB_STALLED) -+ { -+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_TA; -+ } -+ } -+ -+ if (NULL != psCurrentServerRenderCtx->s3DData.psServerCommonContext) -+ { -+ if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_3D) == PVRSRV_ERROR_CCCB_STALLED) -+ { -+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_3D; -+ } -+ } -+ } -+ -+ OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock); -+ return ui32ContextBitMask; -+} -+ -+/* -+ * RGXRenderContextStalledKM -+ */ -+PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext) -+{ -+ RGXCheckForStalledClientContexts((PVRSRV_RGXDEV_INFO *) psRenderContext->psDeviceNode->pvDevice, IMG_TRUE); -+ return PVRSRV_OK; -+} -+ -+/****************************************************************************** -+ End of file (rgxta3d.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxta3d.h b/drivers/gpu/drm/img-rogue/rgxta3d.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxta3d.h -@@ -0,0 +1,510 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX TA and 3D Functionality -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX TA and 3D Functionality -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RGXTA3D_H -+#define RGXTA3D_H -+ -+#include "devicemem.h" -+#include "devicemem_server.h" -+#include "device.h" -+#include "rgxdevice.h" -+#include "rgx_fwif_shared.h" -+#include "rgx_fwif_resetframework.h" -+#include "sync_server.h" -+#include "connection_server.h" -+#include "rgxdebug_common.h" -+#include "pvr_notifier.h" -+ -+typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT; -+typedef struct _RGX_FREELIST_ RGX_FREELIST; -+typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE; -+ -+/***************************************************************************** -+ * The Design of Data Storage System for Render Targets * -+ * ==================================================== * -+ * Relevant for * -+ * understanding RGXCreateHWRTDataSet & RGXDestroyHWRTDataSet * -+ * * -+ * * -+ * +=========================================+ * -+ * | RenderTargetDataSet | * -+ * +---------------|---------|---------------+ * -+ * | | * -+ * V V * -+ * +- - - - - - - - - - - - + +- - - - - - - - - - - - + * -+ * | KM_HW_RT_DATA_HANDLE_0 | | KM_HW_RT_DATA_HANDLE_1 | * -+ * +- - -|- - - - - - - - - + +- - - - - - - - - | - - + * -+ * | | * -+ * | | [UM]Client * -+ * ------|-----------------------------------------|----------------------- * -+ * | | Bridge * -+ * ------|-----------------------------------------|----------------------- * -+ * | | [KM]Server * -+ * | | * -+ * | KM-ptr | KM-ptr * -+ * V V * -+ * +====================+ +====================+ * -+ * | KM_HW_RT_DATA_0 | | KM_HW_RT_DATA_1 | * -+ * +-----|------------|-+ +-|------------|-----+ * -+ * | | | | * -+ * | | | | * -+ * | | | | * -+ * | | | | * -+ * | | KM-ptr | KM-ptr | * -+ * | V V | * -+ * | +==========================+ | * -+ * | | HW_RT_DATA_COMMON_COOKIE | | * -+ * | +--------------------------+ | * -+ * | | | * -+ * | | | * -+ * ------|-------------------|---------------------|----------------------- * -+ * | | | [FW]Firmware * -+ * | | | * -+ * | FW-addr | | FW-addr * -+ * V | V * -+ * +===============+ | +===============+ * -+ * | HW_RT_DATA_0 | | | HW_RT_DATA_1 | * -+ * +------------|--+ | +--|------------+ * -+ * | | | * -+ * | FW-addr | FW-addr | FW-addr * -+ * V V V * -+ * +=========================================+ * -+ * | HW_RT_DATA_COMMON | * -+ * +-----------------------------------------+ * -+ * * -+ *****************************************************************************/ -+ -+typedef struct _RGX_HWRTDATA_COMMON_COOKIE_ -+{ -+ DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; -+ RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; -+ IMG_UINT32 ui32RefCount; -+ -+} RGX_HWRTDATA_COMMON_COOKIE; -+ -+typedef struct _RGX_KM_HW_RT_DATASET_ -+{ -+ RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; -+ -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ RGXFWIF_DEV_VIRTADDR sHWRTDataFwAddr; -+ -+ DEVMEM_MEMDESC *psHWRTDataFwMemDesc; -+ DEVMEM_MEMDESC *psRTArrayFwMemDesc; -+ DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc; -+ -+ RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS]; -+#if !defined(SUPPORT_SHADOW_FREELISTS) -+ DLLIST_NODE sNodeHWRTData; -+#endif -+ -+} RGX_KM_HW_RT_DATASET; -+ -+struct _RGX_FREELIST_ { -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ CONNECTION_DATA *psConnection; -+ -+ /* Free list PMR */ -+ PMR *psFreeListPMR; -+ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset; -+ -+ /* Freelist config */ -+ IMG_UINT32 ui32MaxFLPages; -+ IMG_UINT32 ui32InitFLPages; -+ IMG_UINT32 ui32CurrentFLPages; -+ IMG_UINT32 ui32GrowFLPages; -+ IMG_UINT32 ui32ReadyFLPages; -+ IMG_UINT32 ui32GrowThreshold; /* Percentage of FL memory used that should trigger a new grow request */ -+ IMG_UINT32 ui32FreelistID; -+ IMG_UINT32 ui32FreelistGlobalID; /* related global freelist for this freelist */ -+ IMG_UINT64 ui64FreelistChecksum; /* checksum over freelist content */ -+ IMG_BOOL bCheckFreelist; /* freelist check enabled */ -+ IMG_UINT32 ui32RefCount; /* freelist reference counting */ -+ -+ IMG_UINT32 ui32NumGrowReqByApp; /* Total number of grow requests by Application */ -+ IMG_UINT32 ui32NumGrowReqByFW; /* Total Number of grow requests by Firmware */ -+ IMG_UINT32 ui32NumHighPages; /* High Mark of pages in the freelist */ -+ -+ IMG_PID ownerPid; /* Pid of the owner of the list */ -+ -+ /* Memory Blocks */ -+ DLLIST_NODE sMemoryBlockHead; -+ DLLIST_NODE sMemoryBlockInitHead; -+ DLLIST_NODE sNode; -+#if !defined(SUPPORT_SHADOW_FREELISTS) -+ /* HWRTData nodes linked to local freelist */ -+ DLLIST_NODE sNodeHWRTDataHead; -+#endif -+ -+ /* FW data structures */ -+ DEVMEM_MEMDESC *psFWFreelistMemDesc; -+ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; -+}; -+ -+struct _RGX_PMR_NODE_ { -+ RGX_FREELIST *psFreeList; -+ PMR *psPMR; -+ PMR_PAGELIST *psPageList; -+ DLLIST_NODE sMemoryBlock; -+ IMG_UINT32 ui32NumPages; -+ IMG_BOOL bFirstPageMissing; -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ RI_HANDLE hRIHandle; -+#endif -+}; -+ -+typedef struct { -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ DEVMEM_MEMDESC *psFWZSBufferMemDesc; -+ RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; -+ -+ DEVMEMINT_RESERVATION *psReservation; -+ PMR *psPMR; -+ DEVMEMINT_MAPPING *psMapping; -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags; -+ IMG_UINT32 ui32ZSBufferID; -+ IMG_UINT32 ui32RefCount; -+ IMG_BOOL bOnDemand; -+ -+ IMG_UINT32 ui32NumReqByApp; /* Number of Backing Requests from Application */ -+ IMG_UINT32 ui32NumReqByFW; /* Number of Backing Requests from Firmware */ -+ -+ IMG_PID owner; -+ -+ DLLIST_NODE sNode; -+}RGX_ZSBUFFER_DATA; -+ -+typedef struct { -+ RGX_ZSBUFFER_DATA *psZSBuffer; -+} RGX_POPULATION; -+ -+/* Dump the physical pages of a freelist */ -+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList); -+ -+ -+/* Create set of HWRTData(s) */ -+PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_DEV_VIRTADDR asVHeapTableDevVAddr[RGXMKIF_NUM_GEOMDATAS], -+ IMG_DEV_VIRTADDR psPMMListDevVAddr[RGXMKIF_NUM_RTDATAS], -+ RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], -+ IMG_UINT32 ui32ScreenPixelMax, -+ IMG_UINT64 ui64MultiSampleCtl, -+ IMG_UINT64 ui64FlippedMultiSampleCtl, -+ IMG_UINT32 ui32TPCStride, -+ IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], -+ IMG_UINT32 ui32TPCSize, -+ IMG_UINT32 ui32TEScreen, -+ IMG_UINT32 ui32TEAA, -+ IMG_UINT32 ui32TEMTILE1, -+ IMG_UINT32 ui32TEMTILE2, -+ IMG_UINT32 ui32MTileStride, -+ IMG_UINT32 ui32ISPMergeLowerX, -+ IMG_UINT32 ui32ISPMergeLowerY, -+ IMG_UINT32 ui32ISPMergeUpperX, -+ IMG_UINT32 ui32ISPMergeUpperY, -+ IMG_UINT32 ui32ISPMergeScaleX, -+ IMG_UINT32 ui32ISPMergeScaleY, -+ IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr[RGXMKIF_NUM_RTDATAS], -+ IMG_DEV_VIRTADDR sRgnHeaderDevVAddr[RGXMKIF_NUM_RTDATAS], -+ IMG_DEV_VIRTADDR asRTCDevVAddr[RGXMKIF_NUM_GEOMDATAS], -+ IMG_UINT32 uiRgnHeaderSize, -+ IMG_UINT32 ui32ISPMtileSize, -+ IMG_UINT16 ui16MaxRTs, -+ RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]); -+ -+/* Destroy HWRTDataSet */ -+PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet); -+ -+/* -+ RGXCreateZSBufferKM -+*/ -+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEMINT_RESERVATION *psReservation, -+ PMR *psPMR, -+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, -+ RGX_ZSBUFFER_DATA **ppsZSBuffer); -+ -+/* -+ RGXDestroyZSBufferKM -+*/ -+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer); -+ -+ -+/* -+ * RGXBackingZSBuffer() -+ * -+ * Backs ZS-Buffer with physical pages -+ */ -+PVRSRV_ERROR -+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer); -+ -+/* -+ * RGXPopulateZSBufferKM() -+ * -+ * Backs ZS-Buffer with physical pages (called by Bridge calls) -+ */ -+PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, -+ RGX_POPULATION **ppsPopulation); -+ -+/* -+ * RGXUnbackingZSBuffer() -+ * -+ * Frees ZS-Buffer's physical pages -+ */ -+PVRSRV_ERROR RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer); -+ -+/* -+ * RGXUnpopulateZSBufferKM() -+ * -+ * Frees ZS-Buffer's physical pages (called by Bridge calls) -+ */ -+PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation); -+ -+/* -+ RGXProcessRequestZSBufferBacking -+*/ -+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32ZSBufferID); -+ -+/* -+ RGXProcessRequestZSBufferUnbacking -+*/ -+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32ZSBufferID); -+ -+/* -+ RGXGrowFreeList -+*/ -+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, -+ IMG_UINT32 ui32NumPages, -+ PDLLIST_NODE pListHeader); -+ -+/* Create free list */ -+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_HANDLE hMemCtxPrivData, -+ IMG_UINT32 ui32MaxFLPages, -+ IMG_UINT32 ui32InitFLPages, -+ IMG_UINT32 ui32GrowFLPages, -+ IMG_UINT32 ui32GrowParamThreshold, -+ RGX_FREELIST *psGlobalFreeList, -+ IMG_BOOL bCheckFreelist, -+ IMG_DEV_VIRTADDR sFreeListDevVAddr, -+ PMR *psFreeListPMR, -+ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, -+ RGX_FREELIST **ppsFreeList); -+ -+/* Destroy free list */ -+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList); -+ -+/* -+ RGXProcessRequestGrow -+*/ -+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32FreelistID); -+ -+ -+/* Reconstruct free list after Hardware Recovery */ -+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32FreelistsCount, -+ const IMG_UINT32 *paui32Freelists); -+ -+/*! -+******************************************************************************* -+ -+ @Function PVRSRVRGXCreateRenderContextKM -+ -+ @Description -+ Server-side implementation of RGXCreateRenderContext -+ -+ @Input psConnection - -+ @Input psDeviceNode - device node -+ @Input i32Priority - context priority -+ @Input sVDMCallStackAddr - VDM call stack device virtual address -+ @Input ui32CallStackDepth - VDM call stack depth -+ @Input ui32FrameworkCommandSize - framework command size -+ @Input pabyFrameworkCommand - ptr to framework command -+ @Input hMemCtxPrivData - memory context private data -+ @Input ui32StaticRenderContextStateSize - size of fixed render state -+ @Input pStaticRenderContextState - ptr to fixed render state buffer -+ @Input ui32PackedCCBSizeU8888 : -+ ui8TACCBAllocSizeLog2 - TA CCB size -+ ui8TACCBMaxAllocSizeLog2 - maximum size to which TA CCB can grow -+ ui83DCCBAllocSizeLog2 - 3D CCB size -+ ui83DCCBMaxAllocSizeLog2 - maximum size to which 3D CCB can grow -+ @Input ui32ContextFlags - flags which specify properties of the context -+ @Output ppsRenderContext - -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_INT32 i32Priority, -+ IMG_DEV_VIRTADDR sVDMCallStackAddr, -+ IMG_UINT32 ui32CallStackDepth, -+ IMG_UINT32 ui32FrameworkCommandSize, -+ IMG_PBYTE pabyFrameworkCommand, -+ IMG_HANDLE hMemCtxPrivData, -+ IMG_UINT32 ui32StaticRenderContextStateSize, -+ IMG_PBYTE pStaticRenderContextState, -+ IMG_UINT32 ui32PackedCCBSizeU8888, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_UINT64 ui64RobustnessAddress, -+ IMG_UINT32 ui32MaxTADeadlineMS, -+ IMG_UINT32 ui32Max3DDeadlineMS, -+ RGX_SERVER_RENDER_CONTEXT **ppsRenderContext); -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function PVRSRVRGXDestroyRenderContextKM -+ -+ @Description -+ Server-side implementation of RGXDestroyRenderContext -+ -+ @Input psRenderContext - -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext); -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function PVRSRVRGXKickTA3DKM -+ -+ @Description -+ Server-side implementation of RGXKickTA3D -+ -+ @Input psRTDataCleanup - RT data associated with the kick (or NULL) -+ @Input psZBuffer - Z-buffer associated with the kick (or NULL) -+ @Input psSBuffer - S-buffer associated with the kick (or NULL) -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, -+ IMG_UINT32 ui32ClientTAFenceCount, -+ SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock, -+ IMG_UINT32 *paui32ClientTAFenceSyncOffset, -+ IMG_UINT32 *paui32ClientTAFenceValue, -+ IMG_UINT32 ui32ClientTAUpdateCount, -+ SYNC_PRIMITIVE_BLOCK **apsClientUpdateSyncPrimBlock, -+ IMG_UINT32 *paui32ClientUpdateSyncOffset, -+ IMG_UINT32 *paui32ClientTAUpdateValue, -+ IMG_UINT32 ui32Client3DUpdateCount, -+ SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock, -+ IMG_UINT32 *paui32Client3DUpdateSyncOffset, -+ IMG_UINT32 *paui32Client3DUpdateValue, -+ SYNC_PRIMITIVE_BLOCK *psPRSyncPrimBlock, -+ IMG_UINT32 ui32PRSyncOffset, -+ IMG_UINT32 ui32PRFenceValue, -+ PVRSRV_FENCE iCheckFence, -+ PVRSRV_TIMELINE iUpdateTimeline, -+ PVRSRV_FENCE *piUpdateFence, -+ IMG_CHAR szFenceName[PVRSRV_SYNC_NAME_LENGTH], -+ PVRSRV_FENCE iCheckFence3D, -+ PVRSRV_TIMELINE iUpdateTimeline3D, -+ PVRSRV_FENCE *piUpdateFence3D, -+ IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH], -+ IMG_UINT32 ui32TACmdSize, -+ IMG_PBYTE pui8TADMCmd, -+ IMG_UINT32 ui323DPRCmdSize, -+ IMG_PBYTE pui83DPRDMCmd, -+ IMG_UINT32 ui323DCmdSize, -+ IMG_PBYTE pui83DDMCmd, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_BOOL bKickTA, -+ IMG_BOOL bKickPR, -+ IMG_BOOL bKick3D, -+ IMG_BOOL bAbort, -+ IMG_UINT32 ui32PDumpFlags, -+ RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, -+ RGX_ZSBUFFER_DATA *psZSBuffer, -+ RGX_ZSBUFFER_DATA *psMSAAScratchBuffer, -+ IMG_UINT32 ui32SyncPMRCount, -+ IMG_UINT32 *paui32SyncPMRFlags, -+ PMR **ppsSyncPMRs, -+ IMG_UINT32 ui32RenderTargetSize, -+ IMG_UINT32 ui32NumberOfDrawCalls, -+ IMG_UINT32 ui32NumberOfIndices, -+ IMG_UINT32 ui32NumberOfMRTs, -+ IMG_UINT64 ui64DeadlineInus); -+ -+ -+PVRSRV_ERROR PVRSRVRGXSendZSStoreDisableKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ RGX_SERVER_RENDER_CONTEXT *psRenderContext, -+ IMG_BOOL bDisableDepthStore, -+ IMG_BOOL bDisableStencilStore, -+ IMG_UINT32 ui32ExtJobRefToDisableZSStore); -+ -+ -+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDevNode, -+ RGX_SERVER_RENDER_CONTEXT *psRenderContext, -+ IMG_INT32 i32Priority); -+ -+PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, -+ RGX_CONTEXT_PROPERTY eContextProperty, -+ IMG_UINT64 ui64Input, -+ IMG_UINT64 *pui64Output); -+ -+/* Debug - Dump debug info of render contexts on this device */ -+void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel); -+ -+/* Debug/Watchdog - check if client contexts are stalled */ -+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext); -+ -+#endif /* RGXTA3D_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxtdmtransfer.c b/drivers/gpu/drm/img-rogue/rgxtdmtransfer.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxtdmtransfer.c -@@ -0,0 +1,1374 @@ -+/*************************************************************************/ /*! -+@File rgxtdmtransfer.c -+@Title Device specific TDM transfer queue routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "pdump_km.h" -+#include "rgxdevice.h" -+#include "rgxccb.h" -+#include "rgxutils.h" -+#include "rgxfwcmnctx.h" -+#include "rgxtdmtransfer.h" -+#include "rgx_tq_shared.h" -+#include "rgxmem.h" -+#include "allocmem.h" -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "osfunc.h" -+#include "pvr_debug.h" -+#include "pvrsrv.h" -+#include "rgx_fwif_resetframework.h" -+#include "rgx_memallocflags.h" -+#include "rgxhwperf.h" -+#include "ospvr_gputrace.h" -+#include "rgxshader.h" -+ -+#include "pdump_km.h" -+ -+#include "sync_server.h" -+#include "sync_internal.h" -+#include "sync.h" -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+#include "pvr_buffer_sync.h" -+#endif -+ -+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) -+#include "validation_soc.h" -+#endif -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+#include "rgxworkest.h" -+#endif -+ -+#include "rgxtimerquery.h" -+ -+/* Enable this to dump the compiled list of UFOs prior to kick call */ -+#define ENABLE_TDM_UFO_DUMP 0 -+ -+//#define TDM_CHECKPOINT_DEBUG 1 -+ -+#if defined(TDM_CHECKPOINT_DEBUG) -+#define CHKPT_DBG(X) PVR_DPF(X) -+#else -+#define CHKPT_DBG(X) -+#endif -+ -+typedef struct { -+ RGX_SERVER_COMMON_CONTEXT * psServerCommonContext; -+ IMG_INT32 i32Priority; -+#if defined(SUPPORT_BUFFER_SYNC) -+ struct pvr_buffer_sync_context *psBufferSyncContext; -+#endif -+} RGX_SERVER_TQ_TDM_DATA; -+ -+ -+struct _RGX_SERVER_TQ_TDM_CONTEXT_ { -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ DEVMEM_MEMDESC *psFWFrameworkMemDesc; -+ DEVMEM_MEMDESC *psFWTransferContextMemDesc; -+ IMG_UINT32 ui32Flags; -+ RGX_SERVER_TQ_TDM_DATA sTDMData; -+ DLLIST_NODE sListNode; -+ SYNC_ADDR_LIST sSyncAddrListFence; -+ SYNC_ADDR_LIST sSyncAddrListUpdate; -+ POS_LOCK hLock; -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ WORKEST_HOST_DATA sWorkEstData; -+#endif -+}; -+ -+static PVRSRV_ERROR _CreateTDMTransferContext( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ DEVMEM_MEMDESC * psAllocatedMemDesc, -+ IMG_UINT32 ui32AllocatedOffset, -+ SERVER_MMU_CONTEXT * psServerMMUContext, -+ DEVMEM_MEMDESC * psFWMemContextMemDesc, -+ IMG_INT32 i32Priority, -+ RGX_COMMON_CONTEXT_INFO * psInfo, -+ RGX_SERVER_TQ_TDM_DATA * psTDMData, -+ IMG_UINT32 ui32CCBAllocSizeLog2, -+ IMG_UINT32 ui32CCBMaxAllocSizeLog2, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_UINT64 ui64RobustnessAddress) -+{ -+ PVRSRV_ERROR eError; -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ psTDMData->psBufferSyncContext = -+ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, -+ "rogue-tdm"); -+ if (IS_ERR(psTDMData->psBufferSyncContext)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to create buffer_sync context (err=%ld)", -+ __func__, PTR_ERR(psTDMData->psBufferSyncContext))); -+ -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto fail_buffer_sync_context_create; -+ } -+#endif -+ -+ eError = FWCommonContextAllocate( -+ psConnection, -+ psDeviceNode, -+ REQ_TYPE_TQ_TDM, -+ RGXFWIF_DM_TDM, -+ psServerMMUContext, -+ psAllocatedMemDesc, -+ ui32AllocatedOffset, -+ psFWMemContextMemDesc, -+ NULL, -+ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TDM_CCB_SIZE_LOG2, -+ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TDM_CCB_MAX_SIZE_LOG2, -+ ui32ContextFlags, -+ i32Priority, -+ UINT_MAX, /* max deadline MS */ -+ ui64RobustnessAddress, -+ psInfo, -+ &psTDMData->psServerCommonContext); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_contextalloc; -+ } -+ -+ psTDMData->i32Priority = i32Priority; -+ return PVRSRV_OK; -+ -+fail_contextalloc: -+#if defined(SUPPORT_BUFFER_SYNC) -+ pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext); -+ psTDMData->psBufferSyncContext = NULL; -+fail_buffer_sync_context_create: -+#endif -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+ -+static PVRSRV_ERROR _DestroyTDMTransferContext( -+ RGX_SERVER_TQ_TDM_DATA * psTDMData, -+ PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Check if the FW has finished with this resource ... */ -+ eError = RGXFWRequestCommonContextCleanUp( -+ psDeviceNode, -+ psTDMData->psServerCommonContext, -+ RGXFWIF_DM_TDM, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (RGXIsErrorAndDeviceRecoverable(psDeviceNode, &eError)) -+ { -+ return eError; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ /* ... it has so we can free it's resources */ -+ FWCommonContextFree(psTDMData->psServerCommonContext); -+ psTDMData->psServerCommonContext = NULL; -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext); -+ psTDMData->psBufferSyncContext = NULL; -+#endif -+ -+ return eError; -+} -+ -+/* -+ * PVRSRVCreateTransferContextKM -+ */ -+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_INT32 i32Priority, -+ IMG_UINT32 ui32FrameworkCommandSize, -+ IMG_PBYTE pabyFrameworkCommand, -+ IMG_HANDLE hMemCtxPrivData, -+ IMG_UINT32 ui32PackedCCBSizeU88, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_UINT64 ui64RobustnessAddress, -+ RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext) -+{ -+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext; -+ -+ DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); -+ PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice; -+ RGX_COMMON_CONTEXT_INFO sInfo = {NULL}; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ /* Allocate the server side structure */ -+ *ppsTransferContext = NULL; -+ psTransferContext = OSAllocZMem(sizeof(*psTransferContext)); -+ if (psTransferContext == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ /* -+ Create the FW transfer context, this has the TDM common -+ context embedded within it -+ */ -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(RGXFWIF_FWTDMCONTEXT), -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "FwTransferContext", -+ &psTransferContext->psFWTransferContextMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_fwtransfercontext; -+ } -+ -+ eError = OSLockCreate(&psTransferContext->hLock); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_lockcreate; -+ } -+ -+ psTransferContext->psDeviceNode = psDeviceNode; -+ -+ if (ui32FrameworkCommandSize) -+ { -+ /* -+ * Create the FW framework buffer -+ */ -+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, -+ &psTransferContext->psFWFrameworkMemDesc, -+ ui32FrameworkCommandSize); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate firmware GPU framework state (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_frameworkcreate; -+ } -+ -+ /* Copy the Framework client data into the framework buffer */ -+ eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode, -+ psTransferContext->psFWFrameworkMemDesc, -+ pabyFrameworkCommand, -+ ui32FrameworkCommandSize); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to populate the framework buffer (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_frameworkcopy; -+ } -+ -+ sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc; -+ } -+ -+ eError = _CreateTDMTransferContext(psConnection, -+ psDeviceNode, -+ psTransferContext->psFWTransferContextMemDesc, -+ offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext), -+ hMemCtxPrivData, -+ psFWMemContextMemDesc, -+ i32Priority, -+ &sInfo, -+ &psTransferContext->sTDMData, -+ U32toU8_Unpack1(ui32PackedCCBSizeU88), -+ U32toU8_Unpack2(ui32PackedCCBSizeU88), -+ ui32ContextFlags, -+ ui64RobustnessAddress); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_tdmtransfercontext; -+ } -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ WorkEstInitTDM(psDevInfo, &psTransferContext->sWorkEstData); -+ } -+#endif -+ -+ SyncAddrListInit(&psTransferContext->sSyncAddrListFence); -+ SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate); -+ -+ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); -+ dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); -+ *ppsTransferContext = psTransferContext; -+ -+ return PVRSRV_OK; -+ -+fail_tdmtransfercontext: -+fail_frameworkcopy: -+ if (psTransferContext->psFWFrameworkMemDesc != NULL) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); -+ } -+fail_frameworkcreate: -+ OSLockDestroy(psTransferContext->hLock); -+fail_lockcreate: -+ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); -+fail_fwtransfercontext: -+ OSFreeMem(psTransferContext); -+ PVR_ASSERT(eError != PVRSRV_OK); -+ *ppsTransferContext = NULL; -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ PMR ** ppsCLIPMRMem, -+ PMR ** ppsUSCPMRMem) -+{ -+ PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psPMRMem) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPMRMem); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice; -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ RGXFWIF_FWTDMCONTEXT *psFWTransferContext; -+ IMG_UINT32 ui32WorkEstCCBSubmitted; -+ -+ eError = DevmemAcquireCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc, -+ (void **)&psFWTransferContext); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to map firmware transfer context (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ return eError; -+ } -+ -+ RGXFwSharedMemCacheOpValue(psFWTransferContext->ui32WorkEstCCBSubmitted, INVALIDATE); -+ ui32WorkEstCCBSubmitted = psFWTransferContext->ui32WorkEstCCBSubmitted; -+ -+ DevmemReleaseCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc); -+ -+ /* Check if all of the workload estimation CCB commands for this workload are read */ -+ if (ui32WorkEstCCBSubmitted != psTransferContext->sWorkEstData.ui32WorkEstCCBReceived) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", -+ __func__, ui32WorkEstCCBSubmitted, -+ psTransferContext->sWorkEstData.ui32WorkEstCCBReceived)); -+ -+ return PVRSRV_ERROR_RETRY; -+ } -+ } -+#endif -+ -+ -+ /* remove node from list before calling destroy - as destroy, if successful -+ * will invalidate the node -+ * must be re-added if destroy fails -+ */ -+ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); -+ dllist_remove_node(&(psTransferContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); -+ -+ -+ eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData, -+ psTransferContext->psDeviceNode); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_destroyTDM; -+ } -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ WorkEstDeInitTDM(psDevInfo, &psTransferContext->sWorkEstData); -+ } -+#endif -+ -+ if (psTransferContext->psFWFrameworkMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); -+ } -+ -+ SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence); -+ SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate); -+ -+ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); -+ -+ OSLockDestroy(psTransferContext->hLock); -+ -+ OSFreeMem(psTransferContext); -+ -+ return PVRSRV_OK; -+ -+fail_destroyTDM: -+ -+ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); -+ dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+ -+/* -+ * PVRSRVSubmitTQ3DKickKM -+ */ -+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( -+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, -+ IMG_UINT32 ui32PDumpFlags, -+ IMG_UINT32 ui32ClientUpdateCount, -+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, -+ IMG_UINT32 * paui32ClientUpdateSyncOffset, -+ IMG_UINT32 * paui32ClientUpdateValue, -+ PVRSRV_FENCE iCheckFence, -+ PVRSRV_TIMELINE iUpdateTimeline, -+ PVRSRV_FENCE * piUpdateFence, -+ IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], -+ IMG_UINT32 ui32FWCommandSize, -+ IMG_UINT8 * pui8FWCommand, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32SyncPMRCount, -+ IMG_UINT32 * paui32SyncPMRFlags, -+ PMR ** ppsSyncPMRs, -+ IMG_UINT32 ui32TDMCharacteristic1, -+ IMG_UINT32 ui32TDMCharacteristic2, -+ IMG_UINT64 ui64DeadlineInus) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; -+ RGX_CCB_CMD_HELPER_DATA *psCmdHelper; -+ PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress = NULL; -+ PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress = NULL; -+ IMG_UINT32 ui32IntClientFenceCount = 0; -+ IMG_UINT32 * paui32IntUpdateValue = paui32ClientUpdateValue; -+ IMG_UINT32 ui32IntClientUpdateCount = ui32ClientUpdateCount; -+ PVRSRV_ERROR eError; -+ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; -+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psTransferContext->sTDMData.psServerCommonContext); -+ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext); -+ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); -+ -+ IMG_UINT64 ui64FBSCEntryMask = 0; -+ -+ IMG_BOOL bCCBStateOpen; -+ -+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr; -+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr; -+ PRGXFWIF_UFO_ADDR pRMWUFOAddr; -+ -+ IMG_UINT64 uiCheckFenceUID = 0; -+ IMG_UINT64 uiUpdateFenceUID = 0; -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ IMG_UINT32 ui32CmdOffset = 0; -+ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTransfer = {0}; -+ IMG_UINT32 ui32TDMWorkloadDataRO = 0; -+ IMG_UINT32 ui32TDMCmdHeaderOffset = 0; -+ IMG_UINT32 ui32TDMCmdOffsetWrapCheck = 0; -+ RGX_WORKLOAD sWorkloadCharacteristics = {0}; -+#endif -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; -+ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; -+ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; -+ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; -+#endif -+ -+ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; -+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; -+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0; -+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; -+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; -+ IMG_UINT32 ui32FenceTimelineUpdateValue = 0; -+ void *pvUpdateFenceFinaliseData = NULL; -+ -+ if (iUpdateTimeline >= 0 && !piUpdateFence) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+#if !defined(SUPPORT_WORKLOAD_ESTIMATION) -+ PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic1); -+ PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic2); -+ PVR_UNREFERENCED_PARAMETER(ui64DeadlineInus); -+#endif -+ -+ /* Ensure we haven't been given a null ptr to -+ * update values if we have been told we -+ * have updates -+ */ -+ if (ui32ClientUpdateCount > 0) -+ { -+ PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, -+ "paui32ClientUpdateValue NULL but " -+ "ui32ClientUpdateCount > 0", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ /* Ensure the string is null-terminated (Required for safety) */ -+ szUpdateFenceName[31] = '\0'; -+ -+ if (ui32SyncPMRCount != 0) -+ { -+ if (!ppsSyncPMRs) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+ -+ OSLockAcquire(psTransferContext->hLock); -+ -+ /* We can't allocate the required amount of stack space on all consumer architectures */ -+ psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA)); -+ if (psCmdHelper == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_allochelper; -+ } -+ -+ -+ /* -+ Init the command helper commands for all the prepares -+ */ -+ { -+ IMG_CHAR *pszCommandName; -+ RGXFWIF_CCB_CMD_TYPE eType; -+#if defined(SUPPORT_BUFFER_SYNC) -+ struct pvr_buffer_sync_context *psBufferSyncContext; -+#endif -+ -+ pszCommandName = "TQ-TDM"; -+ -+ if (ui32FWCommandSize == 0) -+ { -+ /* A NULL CMD for TDM is used to append updates to a non finished -+ * FW command. bCCBStateOpen is used in case capture range is -+ * entered on this command, to not drain CCB up to the Roff for this -+ * command, but the finished command prior to this. -+ */ -+ bCCBStateOpen = IMG_TRUE; -+ eType = RGXFWIF_CCB_CMD_TYPE_NULL; -+ } -+ else -+ { -+ bCCBStateOpen = IMG_FALSE; -+ eType = RGXFWIF_CCB_CMD_TYPE_TQ_TDM; -+ } -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ psBufferSyncContext = psTransferContext->sTDMData.psBufferSyncContext; -+#endif -+ -+ eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence, -+ 0, -+ NULL, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_populate_sync_addr_list; -+ } -+ -+ eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate, -+ ui32ClientUpdateCount, -+ pauiClientUpdateUFODevVarBlock, -+ paui32ClientUpdateSyncOffset); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_populate_sync_addr_list; -+ } -+ paui32IntUpdateValue = paui32ClientUpdateValue; -+ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; -+ -+ -+ if (ui32SyncPMRCount) -+ { -+#if defined(SUPPORT_BUFFER_SYNC) -+ int err; -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__)); -+ err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext, -+ psTransferContext->psDeviceNode->hSyncCheckpointContext, -+ ui32SyncPMRCount, -+ ppsSyncPMRs, -+ paui32SyncPMRFlags, -+ &ui32BufferFenceSyncCheckpointCount, -+ &apsBufferFenceSyncCheckpoints, -+ &psBufferUpdateSyncCheckpoint, -+ &psBufferSyncData); -+ if (err) -+ { -+ switch (err) -+ { -+ case -EINTR: -+ eError = PVRSRV_ERROR_RETRY; -+ break; -+ case -ENOMEM: -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ break; -+ default: -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ break; -+ } -+ -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError))); -+ } -+ goto fail_resolve_input_fence; -+ } -+ -+ /* Append buffer sync fences */ -+ if (ui32BufferFenceSyncCheckpointCount > 0) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence , (void*)pauiIntFenceUFOAddress)); -+ SyncAddrListAppendAndDeRefCheckpoints(&psTransferContext->sSyncAddrListFence, -+ ui32BufferFenceSyncCheckpointCount, -+ apsBufferFenceSyncCheckpoints); -+ if (!pauiIntFenceUFOAddress) -+ { -+ pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs; -+ } -+ ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount; -+ } -+ -+ if (psBufferUpdateSyncCheckpoint) -+ { -+ /* Append the update (from output fence) */ -+ SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate, -+ 1, -+ &psBufferUpdateSyncCheckpoint); -+ if (!pauiIntUpdateUFOAddress) -+ { -+ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; -+ } -+ ui32IntClientUpdateCount++; -+ } -+#else /* defined(SUPPORT_BUFFER_SYNC) */ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto fail_populate_sync_addr_list; -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ } -+ -+ /* Resolve the sync checkpoints that make up the input fence */ -+ eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext, -+ iCheckFence, -+ &ui32FenceSyncCheckpointCount, -+ &apsFenceSyncCheckpoints, -+ &uiCheckFenceUID, -+ ui32PDumpFlags); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_resolve_input_fence; -+ } -+#if defined(TDM_CHECKPOINT_DEBUG) -+ { -+ IMG_UINT32 ii; -+ for (ii=0; ii<32; ii++) -+ { -+ PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii); -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii])); -+ } -+ } -+#endif -+ /* Create the output fence (if required) */ -+ if (iUpdateTimeline != PVRSRV_NO_TIMELINE) -+ { -+ eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode, -+ szUpdateFenceName, -+ iUpdateTimeline, -+ psTransferContext->psDeviceNode->hSyncCheckpointContext, -+ &iUpdateFence, -+ &uiUpdateFenceUID, -+ &pvUpdateFenceFinaliseData, -+ &psUpdateSyncCheckpoint, -+ (void*)&psFenceTimelineUpdateSync, -+ &ui32FenceTimelineUpdateValue, -+ ui32PDumpFlags); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_create_output_fence; -+ } -+ -+ /* Append the sync prim update for the timeline (if required) */ -+ if (psFenceTimelineUpdateSync) -+ { -+ IMG_UINT32 *pui32TimelineUpdateWp = NULL; -+ -+ /* Allocate memory to hold the list of update values (including our timeline update) */ -+ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); -+ if (!pui32IntAllocatedUpdateValues) -+ { -+ /* Failed to allocate memory */ -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_alloc_update_values_mem; -+ } -+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); -+ /* Copy the update values into the new memory, then append our timeline update value */ -+ if (paui32IntUpdateValue) -+ { -+ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); -+ } -+ /* Now set the additional update value */ -+ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; -+ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; -+ ui32IntClientUpdateCount++; -+#if defined(TDM_CHECKPOINT_DEBUG) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; -+ -+ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ /* Now append the timeline sync prim addr to the transfer context update list */ -+ SyncAddrListAppendSyncPrim(&psTransferContext->sSyncAddrListUpdate, -+ psFenceTimelineUpdateSync); -+#if defined(TDM_CHECKPOINT_DEBUG) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; -+ -+ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ -+ paui32IntUpdateValue = pui32IntAllocatedUpdateValues; -+ } -+ } -+ -+ if (ui32FenceSyncCheckpointCount > 0) -+ { -+ /* Append the checks (from input fence) */ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence)); -+#if defined(TDM_CHECKPOINT_DEBUG) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; -+ -+ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListFence, -+ ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ if (!pauiIntFenceUFOAddress) -+ { -+ pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs; -+ } -+ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; -+ -+#if defined(TDM_CHECKPOINT_DEBUG) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; -+ -+ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ } -+ if (psUpdateSyncCheckpoint) -+ { -+ /* Append the update (from output fence) */ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); -+ SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate, -+ 1, -+ &psUpdateSyncCheckpoint); -+ if (!pauiIntUpdateUFOAddress) -+ { -+ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; -+ } -+ ui32IntClientUpdateCount++; -+#if defined(TDM_CHECKPOINT_DEBUG) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; -+ -+ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ } -+ -+#if (ENABLE_TDM_UFO_DUMP == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__)); -+ { -+ IMG_UINT32 ii; -+ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; -+ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; -+ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; -+ -+ /* Dump Fence syncs and Update syncs */ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM fence syncs (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); -+ for (ii=0; ii. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); -+ psTmpIntFenceUFOAddress++; -+ } -+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM update syncs (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); -+ for (ii=0; iiui32Addr & 0x1) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); -+ pui32TmpIntUpdateValue++; -+ } -+ psTmpIntUpdateUFOAddress++; -+ } -+ } -+#endif -+ -+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice, -+ &pPreAddr, -+ &pPostAddr, -+ &pRMWUFOAddr); -+ -+#if defined(RGX_FBSC_INVALIDATE_COMMAND_SUPPORTED) -+ /* -+ * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command, -+ * in other words, take the value and set it to zero afterwards. -+ * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts -+ * as it must be ready at the time of context activation. -+ */ -+ { -+ eError = RGXExtractFBSCEntryMaskFromMMUContext(psTransferContext->psDeviceNode, -+ FWCommonContextGetServerMMUCtx(psTransferContext->sTDMData.psServerCommonContext), -+ &ui64FBSCEntryMask); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError)); -+ goto fail_invalfbsc; -+ } -+ } -+#endif -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ sWorkloadCharacteristics.sTransfer.ui32Characteristic1 = ui32TDMCharacteristic1; -+ sWorkloadCharacteristics.sTransfer.ui32Characteristic2 = ui32TDMCharacteristic2; -+ -+ /* Prepare workload estimation */ -+ WorkEstPrepare(psDeviceNode->pvDevice, -+ &psTransferContext->sWorkEstData, -+ &psTransferContext->sWorkEstData.uWorkloadMatchingData.sTransfer.sDataTDM, -+ eType, -+ &sWorkloadCharacteristics, -+ ui64DeadlineInus, -+ &sWorkloadKickDataTransfer); -+ } -+#endif -+ -+ /* -+ Create the command helper data for this command -+ */ -+ RGXCmdHelperInitCmdCCB(psDevInfo, -+ psClientCCB, -+ ui64FBSCEntryMask, -+ ui32IntClientFenceCount, -+ pauiIntFenceUFOAddress, -+ NULL, -+ ui32IntClientUpdateCount, -+ pauiIntUpdateUFOAddress, -+ paui32IntUpdateValue, -+ ui32FWCommandSize, -+ pui8FWCommand, -+ &pPreAddr, -+ &pPostAddr, -+ &pRMWUFOAddr, -+ eType, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ ui32PDumpFlags, -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ &sWorkloadKickDataTransfer, -+#else /* SUPPORT_WORKLOAD_ESTIMATION */ -+ NULL, -+#endif /* SUPPORT_WORKLOAD_ESTIMATION */ -+ pszCommandName, -+ bCCBStateOpen, -+ psCmdHelper); -+ } -+ -+ /* -+ Acquire space for all the commands in one go -+ */ -+ -+ eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_3dcmdacquire; -+ } -+ -+ -+ /* -+ We should acquire the kernel CCB(s) space here as the schedule could fail -+ and we would have to roll back all the syncs -+ */ -+ -+ /* -+ Only do the command helper release (which takes the server sync -+ operations if the acquire succeeded -+ */ -+ -+ eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ -+ /* If system is found powered OFF, Retry scheduling the command */ -+ if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) -+ { -+ eError = PVRSRV_ERROR_RETRY; -+ } -+ goto fail_acquirepowerlock; -+ } -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); -+ } -+#endif -+ RGXCmdHelperReleaseCmdCCB(1, -+ psCmdHelper, -+ "TQ_TDM", -+ FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr); -+ -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* The following is used to determine the offset of the command header containing -+ the workload estimation data so that can be accessed when the KCCB is read */ -+ ui32TDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(psCmdHelper); -+ -+ ui32TDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); -+ -+ /* This checks if the command would wrap around at the end of the CCB and -+ * therefore would start at an offset of 0 rather than the current command -+ * offset */ -+ if (ui32CmdOffset < ui32TDMCmdOffsetWrapCheck) -+ { -+ ui32TDMWorkloadDataRO = ui32CmdOffset; -+ } -+ else -+ { -+ ui32TDMWorkloadDataRO = 0; -+ } -+ } -+#endif -+ -+ /* -+ Even if we failed to acquire the client CCB space we might still need -+ to kick the HW to process a padding packet to release space for us next -+ time round -+ */ -+ { -+ RGXFWIF_KCCB_CMD sTDMKCCBCmd; -+ IMG_UINT32 ui32FWAddr = FWCommonContextGetFWAddress( -+ psTransferContext->sTDMData.psServerCommonContext).ui32Addr; -+ -+ /* Construct the kernel 3D CCB command. */ -+ sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; -+ sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext); -+ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); -+ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); -+ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; -+ -+ /* Add the Workload data into the KCCB kick */ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Store the offset to the CCCB command header so that it can be referenced -+ * when the KCCB command reaches the FW */ -+ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TDMWorkloadDataRO + ui32TDMCmdHeaderOffset; -+ } -+#endif -+ -+ /* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */ -+ /* s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */ -+ /* ui323DCmdOffset); */ -+ RGXSRV_HWPERF_ENQ(psTransferContext, -+ OSGetCurrentClientProcessIDKM(), -+ FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ RGX_HWPERF_KICK_TYPE2_TQTDM, -+ iCheckFence, -+ iUpdateFence, -+ iUpdateTimeline, -+ uiCheckFenceUID, -+ uiUpdateFenceUID, -+ NO_DEADLINE, -+ NO_CYCEST); -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommandWithoutPowerLock(psDeviceNode->pvDevice, -+ RGXFWIF_DM_TDM, -+ & sTDMKCCBCmd, -+ ui32PDumpFlags); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ PVRSRVPowerUnlock(psDevInfo->psDeviceNode); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXTDMSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError)); -+ goto fail_2dcmdacquire; -+ } -+ -+ PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWAddr, ui32ExtJobRef, -+ ui32IntJobRef, RGX_HWPERF_KICK_TYPE2_TQTDM); -+ } -+ -+#if defined(NO_HARDWARE) -+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ -+ if (psUpdateSyncCheckpoint) -+ { -+ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); -+ } -+ if (psFenceTimelineUpdateSync) -+ { -+ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); -+ } -+ SyncCheckpointNoHWUpdateTimelines(NULL); -+#endif /* defined(NO_HARDWARE) */ -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ if (psBufferSyncData) -+ { -+ pvr_buffer_sync_kick_succeeded(psBufferSyncData); -+ } -+ if (apsBufferFenceSyncCheckpoints) -+ { -+ kfree(apsBufferFenceSyncCheckpoints); -+ } -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ -+ * piUpdateFence = iUpdateFence; -+ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) -+ { -+ SyncCheckpointFinaliseFence(psDeviceNode, iUpdateFence, pvUpdateFenceFinaliseData, -+ psUpdateSyncCheckpoint, szUpdateFenceName); -+ } -+ -+ OSFreeMem(psCmdHelper); -+ -+ /* Drop the references taken on the sync checkpoints in the -+ * resolved input fence */ -+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ -+ if (apsFenceSyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); -+ } -+ /* Free memory allocated to hold the internal list of update values */ -+ if (pui32IntAllocatedUpdateValues) -+ { -+ OSFreeMem(pui32IntAllocatedUpdateValues); -+ pui32IntAllocatedUpdateValues = NULL; -+ } -+ -+ OSLockRelease(psTransferContext->hLock); -+ return PVRSRV_OK; -+ -+/* -+ No resources are created in this function so there is nothing to free -+ unless we had to merge syncs. -+ If we fail after the client CCB acquire there is still nothing to do -+ as only the client CCB release will modify the client CCB -+*/ -+fail_2dcmdacquire: -+fail_acquirepowerlock: -+fail_3dcmdacquire: -+#if defined(RGX_FBSC_INVALIDATE_COMMAND_SUPPORTED) -+fail_invalfbsc: -+#endif -+ SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListFence); -+ SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListUpdate); -+ -+ /* Free memory allocated to hold the internal list of update values */ -+ if (pui32IntAllocatedUpdateValues) -+ { -+ OSFreeMem(pui32IntAllocatedUpdateValues); -+ pui32IntAllocatedUpdateValues = NULL; -+ } -+fail_alloc_update_values_mem: -+ -+/* fail_pdumpcheck: */ -+/* fail_cmdtype: */ -+ -+ if (iUpdateFence != PVRSRV_NO_FENCE) -+ { -+ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); -+ } -+fail_create_output_fence: -+ /* Drop the references taken on the sync checkpoints in the -+ * resolved input fence */ -+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ -+fail_resolve_input_fence: -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ if (psBufferSyncData) -+ { -+ pvr_buffer_sync_kick_failed(psBufferSyncData); -+ } -+ if (apsBufferFenceSyncCheckpoints) -+ { -+ kfree(apsBufferFenceSyncCheckpoints); -+ } -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ -+fail_populate_sync_addr_list: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ OSFreeMem(psCmdHelper); -+fail_allochelper: -+ -+ if (apsFenceSyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); -+ } -+ OSLockRelease(psTransferContext->hLock); -+ return eError; -+} -+ -+ -+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( -+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ RGXFWIF_KCCB_CMD sKCCBCmd; -+ PVRSRV_ERROR eError; -+ -+ OSLockAcquire(psTransferContext->hLock); -+ -+ /* Schedule the firmware command */ -+ sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE; -+ sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext); -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice, -+ RGXFWIF_DM_TDM, -+ &sKCCBCmd, -+ ui32PDumpFlags); -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to schedule the FW command %d (%s)", -+ __func__, eError, PVRSRVGETERRORSTRING(eError))); -+ } -+ -+ OSLockRelease(psTransferContext->hLock); -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, -+ IMG_INT32 i32Priority) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ -+ OSLockAcquire(psTransferContext->hLock); -+ -+ if (psTransferContext->sTDMData.i32Priority != i32Priority) -+ { -+ eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext, -+ psConnection, -+ psTransferContext->psDeviceNode->pvDevice, -+ i32Priority, -+ RGXFWIF_DM_TDM); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __func__, PVRSRVGetErrorString(eError))); -+ -+ OSLockRelease(psTransferContext->hLock); -+ return eError; -+ } -+ } -+ -+ OSLockRelease(psTransferContext->hLock); -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, -+ RGX_CONTEXT_PROPERTY eContextProperty, -+ IMG_UINT64 ui64Input, -+ IMG_UINT64 *pui64Output) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ switch (eContextProperty) -+ { -+ case RGX_CONTEXT_PROPERTY_FLAGS: -+ { -+ IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input; -+ -+ OSLockAcquire(psTransferContext->hLock); -+ eError = FWCommonContextSetFlags(psTransferContext->sTDMData.psServerCommonContext, -+ ui32ContextFlags); -+ OSLockRelease(psTransferContext->hLock); -+ break; -+ } -+ -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); -+ eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ } -+ } -+ -+ return eError; -+} -+ -+void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ -+ OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock); -+ -+ dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode); -+ -+ DumpFWCommonContextInfo(psCurrentServerTransferCtx->sTDMData.psServerCommonContext, -+ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+ } -+ -+ OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock); -+} -+ -+ -+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ IMG_UINT32 ui32ContextBitMask = 0; -+ -+ OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock); -+ -+ dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode); -+ -+ if (CheckStalledClientCommonContext( -+ psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D) -+ == PVRSRV_ERROR_CCCB_STALLED) { -+ ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D; -+ } -+ } -+ -+ OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock); -+ return ui32ContextBitMask; -+} -+ -+/**************************************************************************//** -+ End of file (rgxtdmtransfer.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxtdmtransfer.h b/drivers/gpu/drm/img-rogue/rgxtdmtransfer.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxtdmtransfer.h -@@ -0,0 +1,132 @@ -+/*************************************************************************/ /*! -+@File rgxtdmtransfer.h -+@Title RGX Transfer queue 2 Functionality -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX Transfer queue Functionality -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXTDMTRANSFER_H) -+#define RGXTDMTRANSFER_H -+ -+#include "devicemem.h" -+#include "device.h" -+#include "rgxdevice.h" -+#include "rgxfwutils.h" -+#include "rgx_fwif_resetframework.h" -+#include "rgxdebug_common.h" -+#include "pvr_notifier.h" -+ -+#include "sync_server.h" -+#include "connection_server.h" -+ -+typedef struct _RGX_SERVER_TQ_TDM_CONTEXT_ RGX_SERVER_TQ_TDM_CONTEXT; -+ -+ -+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_INT32 i32Priority, -+ IMG_UINT32 ui32FrameworkCommandSize, -+ IMG_PBYTE pabyFrameworkCommand, -+ IMG_HANDLE hMemCtxPrivData, -+ IMG_UINT32 ui32PackedCCBSizeU88, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_UINT64 ui64RobustnessAddress, -+ RGX_SERVER_TQ_TDM_CONTEXT **ppsTransferContext); -+ -+ -+PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ PMR ** ppsCLIPMRMem, -+ PMR ** ppsUSCPMRMem); -+ -+ -+PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psUSCPMRMem); -+ -+ -+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext); -+ -+ -+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( -+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, -+ IMG_UINT32 ui32PDumpFlags, -+ IMG_UINT32 ui32ClientUpdateCount, -+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, -+ IMG_UINT32 * paui32ClientUpdateSyncOffset, -+ IMG_UINT32 * paui32ClientUpdateValue, -+ PVRSRV_FENCE iCheckFence, -+ PVRSRV_TIMELINE iUpdateTimeline, -+ PVRSRV_FENCE * piUpdateFence, -+ IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], -+ IMG_UINT32 ui32FWCommandSize, -+ IMG_UINT8 * pui8FWCommand, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32SyncPMRCount, -+ IMG_UINT32 * pui32SyncPMRFlags, -+ PMR ** ppsSyncPMRs, -+ IMG_UINT32 ui32TDMCharacteristic1, -+ IMG_UINT32 ui32TDMCharacteristic2, -+ IMG_UINT64 ui64DeadlineInus); -+ -+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( -+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, -+ IMG_UINT32 ui32PDumpFlags); -+ -+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, -+ IMG_INT32 i32Priority); -+ -+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, -+ RGX_CONTEXT_PROPERTY eContextProperty, -+ IMG_UINT64 ui64Input, -+ IMG_UINT64 *pui64Output); -+ -+/* Debug - Dump debug info of TDM transfer contexts on this device */ -+void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel); -+ -+/* Debug/Watchdog - check if client transfer contexts are stalled */ -+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+ -+#endif /* RGXTDMTRANSFER_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxtimecorr.c b/drivers/gpu/drm/img-rogue/rgxtimecorr.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxtimecorr.c -@@ -0,0 +1,729 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device specific time correlation and calibration routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific time correlation and calibration routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "rgxtimecorr.h" -+#include "rgxfwutils.h" -+#include "htbserver.h" -+#include "pvrsrv_apphint.h" -+#include "rgxpower.h" -+ -+/****************************************************************************** -+ * -+ * - A calibration period is started on power-on and after a DVFS transition, -+ * and it's closed before a power-off and before a DVFS transition -+ * (so power-on -> dfvs -> dvfs -> power-off , power on -> dvfs -> dvfs..., -+ * where each arrow is a calibration period). -+ * -+ * - The timers on the Host and on the FW are correlated at the beginning of -+ * each period together with the current GPU frequency. -+ * -+ * - Correlation and calibration are also done at regular intervals using -+ * a best effort approach. -+ * -+ *****************************************************************************/ -+ -+/* -+ AppHint interfaces -+*/ -+ -+static PVRSRV_ERROR _SetClock(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT32 ui32Value) -+{ -+ static __maybe_unused const char* const apszClocks[] = { -+ "mono", "mono_raw", "sched" -+ }; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ PVR_ASSERT(psDeviceNode->pvDevice != NULL); -+ -+ PVR_UNREFERENCED_PARAMETER(psPrivate); -+ -+ if (ui32Value >= RGXTIMECORR_CLOCK_LAST) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Invalid clock source type (%u)", ui32Value)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "Setting time correlation clock from \"%s\" to \"%s\"", -+ apszClocks[psDevInfo->ui32ClockSource], -+ apszClocks[ui32Value])); -+ -+ /* PVRSRVPowerLock() fails only when power is off. */ -+ if (PVRSRVPowerLock((PVRSRV_DEVICE_NODE *) psDeviceNode) == PVRSRV_OK) -+ { -+ RGXTimeCorrEnd((PVRSRV_DEVICE_NODE *) psDeviceNode, -+ RGXTIMECORR_EVENT_CLOCK_CHANGE); -+ -+ psDevInfo->ui32ClockSource = ui32Value; -+ -+ RGXTimeCorrBegin((PVRSRV_DEVICE_NODE *) psDeviceNode, -+ RGXTIMECORR_EVENT_CLOCK_CHANGE); -+ -+ PVRSRVPowerUnlock((PVRSRV_DEVICE_NODE *)psDeviceNode); -+ } -+ else -+ { -+ /* Set the new clock source without updating the time correlation -+ * data. This is going to be accounted for during the next power up. */ -+ psDevInfo->ui32ClockSource = ui32Value; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR _GetClock(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *psPrivate, -+ IMG_UINT32 *pui32Value) -+{ -+ PVR_ASSERT(psDeviceNode->pvDevice != NULL); -+ -+ *pui32Value = -+ ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32ClockSource; -+ -+ PVR_UNREFERENCED_PARAMETER(psPrivate); -+ -+ return PVRSRV_OK; -+} -+ -+void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_TimeCorrClock, _GetClock, -+ _SetClock, psDeviceNode, NULL); -+} -+ -+/* -+ End of AppHint interface -+*/ -+ -+IMG_UINT64 RGXTimeCorrGetClockns64(const PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ IMG_UINT64 ui64Clock; -+ -+ switch (((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32ClockSource) { -+ case RGXTIMECORR_CLOCK_MONO: -+ return ((void) OSClockMonotonicns64(&ui64Clock), ui64Clock); -+ case RGXTIMECORR_CLOCK_MONO_RAW: -+ return OSClockMonotonicRawns64(); -+ case RGXTIMECORR_CLOCK_SCHED: -+ return OSClockns64(); -+ default: -+ PVR_ASSERT(IMG_FALSE); -+ return 0; -+ } -+} -+ -+IMG_UINT64 RGXTimeCorrGetClockus64(const PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ IMG_UINT32 rem; -+ return OSDivide64r64(RGXTimeCorrGetClockns64(psDeviceNode), 1000, &rem); -+} -+ -+void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGXFWIF_TIME_CORR *psTimeCorrs, -+ IMG_UINT32 ui32NumOut) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; -+ IMG_UINT32 ui32CurrentIndex; -+ -+ RGXFwSharedMemCacheOpValue(psGpuUtilFWCB->ui32TimeCorrSeqCount, INVALIDATE); -+ ui32CurrentIndex = psGpuUtilFWCB->ui32TimeCorrSeqCount; -+ -+ RGXFwSharedMemCacheOpExec(&psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32CurrentIndex - ui32NumOut)], -+ sizeof(psGpuUtilFWCB->sTimeCorr[0]) * ui32NumOut, -+ PVRSRV_CACHE_OP_INVALIDATE); -+ while (ui32NumOut--) -+ { -+ *(psTimeCorrs++) = psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32CurrentIndex)]; -+ ui32CurrentIndex--; -+ } -+} -+ -+static __maybe_unused const IMG_CHAR* _EventToString(RGXTIMECORR_EVENT eEvent) -+{ -+ switch (eEvent) -+ { -+ case RGXTIMECORR_EVENT_POWER: -+ return "power"; -+ case RGXTIMECORR_EVENT_DVFS: -+ return "dvfs"; -+ case RGXTIMECORR_EVENT_PERIODIC: -+ return "periodic"; -+ case RGXTIMECORR_EVENT_CLOCK_CHANGE: -+ return "clock source"; -+ default: -+ return "n/a"; -+ } -+} -+ -+static inline IMG_UINT32 _RGXGetSystemLayerGPUClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; -+ -+ return psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; -+} -+ -+static inline IMG_UINT32 _RGXGetEstimatedGPUClockSpeed(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; -+ GPU_FREQ_TRACKING_DATA *psTrackingData; -+ -+ psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex]; -+ -+ return psTrackingData->ui32EstCoreClockSpeed; -+} -+ -+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) -+static inline void _DumpTimerCorrelationHistory(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; -+ IMG_UINT32 i = psGpuDVFSTable->ui32HistoryIndex; -+ -+ PVR_DPF((PVR_DBG_ERROR, "Dumping history of timer correlation data (latest first):")); -+ -+ do -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ " Begin times: OS %" IMG_UINT64_FMTSPEC ", CR %" IMG_UINT64_FMTSPEC ", " -+ "End times: OS %" IMG_UINT64_FMTSPEC ", CR %" IMG_UINT64_FMTSPEC ", " -+ "Core clk %u, Estimated clk %u", -+ psGpuDVFSTable->asTrackingHistory[i].ui64BeginOSTimestamp, -+ psGpuDVFSTable->asTrackingHistory[i].ui64BeginCRTimestamp, -+ psGpuDVFSTable->asTrackingHistory[i].ui64EndOSTimestamp, -+ psGpuDVFSTable->asTrackingHistory[i].ui64EndCRTimestamp, -+ psGpuDVFSTable->asTrackingHistory[i].ui32CoreClockSpeed, -+ psGpuDVFSTable->asTrackingHistory[i].ui32EstCoreClockSpeed)); -+ -+ i = (i - 1) % RGX_GPU_FREQ_TRACKING_SIZE; -+ -+ } while (i != psGpuDVFSTable->ui32HistoryIndex); -+} -+#endif -+ -+static void _RGXMakeTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, RGXTIMECORR_EVENT eEvent) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; -+ IMG_UINT32 ui32NewSeqCount; -+ RGXFWIF_TIME_CORR *psTimeCorr; -+ RGXFWIF_TIME_CORR sTimeCorr = {0}; -+ -+ RGXFwSharedMemCacheOpValue(psGpuUtilFWCB->ui32TimeCorrSeqCount, INVALIDATE); -+ ui32NewSeqCount = psGpuUtilFWCB->ui32TimeCorrSeqCount + 1; -+ RGXFwSharedMemCacheOpValue(psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)], INVALIDATE); -+ psTimeCorr = &psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)]; -+ -+ /* -+ * The following reads must be done as close together as possible, because -+ * they represent the same current time sampled from different clock sources. -+ */ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ if (!PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ if (OSClockMonotonicns64(&sTimeCorr.ui64OSMonoTimeStamp) != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "_RGXMakeTimeCorrData: System Monotonic Clock not available.")); -+ PVR_ASSERT(0); -+ } -+ } -+#endif -+ sTimeCorr.ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo); -+ sTimeCorr.ui64OSTimeStamp = RGXTimeCorrGetClockns64(psDeviceNode); -+ sTimeCorr.ui32CoreClockSpeed = _RGXGetEstimatedGPUClockSpeed(psDevInfo); -+ sTimeCorr.ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(sTimeCorr.ui32CoreClockSpeed); -+ -+ if (sTimeCorr.ui64CRDeltaToOSDeltaKNs == 0) -+ { -+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) -+ _DumpTimerCorrelationHistory(psDevInfo); -+#endif -+ -+ /* Revert to original clock speed (error already printed) */ -+ sTimeCorr.ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode); -+ sTimeCorr.ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(sTimeCorr.ui32CoreClockSpeed); -+ } -+ -+ OSCachedMemCopy(psTimeCorr, &sTimeCorr, sizeof(sTimeCorr)); -+ /* Make sure the values are written to memory before updating the index of the current entry */ -+ OSWriteMemoryBarrier(psTimeCorr); -+ RGXFwSharedMemCacheOpPtr(psTimeCorr, FLUSH); -+ -+ -+ /* Update the index of the current entry in the timer correlation array */ -+ psGpuUtilFWCB->ui32TimeCorrSeqCount = ui32NewSeqCount; -+ RGXFwSharedMemCacheOpValue(psGpuUtilFWCB->ui32TimeCorrSeqCount, FLUSH); -+ -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "Timer correlation data (post %s event): OS %" IMG_UINT64_FMTSPEC " ns, " -+ "CR %" IMG_UINT64_FMTSPEC ", GPU freq. %u Hz (given as %u Hz)", -+ _EventToString(eEvent), -+ sTimeCorr.ui64OSTimeStamp, -+ sTimeCorr.ui64CRTimeStamp, -+ RGXFWIF_ROUND_TO_KHZ(sTimeCorr.ui32CoreClockSpeed), -+ _RGXGetSystemLayerGPUClockSpeed(psDeviceNode))); -+ -+ /* -+ * Don't log timing data to the HTB log after a power(-on) event. -+ * Otherwise this will be logged before the HTB partition marker, breaking -+ * the log sync grammar. This data will be automatically repeated when the -+ * partition marker is written. -+ */ -+ HTBSyncScale(eEvent != RGXTIMECORR_EVENT_POWER, -+ sTimeCorr.ui64OSTimeStamp, -+ sTimeCorr.ui64CRTimeStamp, -+ sTimeCorr.ui32CoreClockSpeed); -+} -+ -+static void _RGXCheckTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable) -+{ -+#if !defined(NO_HARDWARE) && !defined(VIRTUAL_PLATFORM) && defined(DEBUG) -+#define SCALING_FACTOR (10) -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; -+ IMG_UINT32 ui32Index; -+ RGXFWIF_TIME_CORR *psTimeCorr; -+ IMG_UINT64 ui64EstimatedTime, ui64CRTimeStamp, ui64OSTimeStamp; -+ IMG_UINT64 ui64CRTimeDiff, ui64OSTimeDiff; -+ IMG_INT64 i64Diff; -+ IMG_UINT32 ui32Ratio, ui32Remainder; -+ -+ RGXFwSharedMemCacheOpValue(psGpuUtilFWCB->ui32TimeCorrSeqCount, INVALIDATE); -+ ui32Index = RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount); -+ RGXFwSharedMemCacheOpValue(psGpuUtilFWCB->sTimeCorr[ui32Index], INVALIDATE); -+ psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32Index]; -+ -+ /* -+ * The following reads must be done as close together as possible, because -+ * they represent the same current time sampled from different clock sources. -+ */ -+ ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo); -+ ui64OSTimeStamp = RGXTimeCorrGetClockns64(psDeviceNode); -+ -+ if ((ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) < (1 << SCALING_FACTOR)) -+ { -+ /* -+ * Less than ~1us has passed since the timer correlation data was generated. -+ * A time frame this short is probably not enough to get an estimate -+ * of how good the timer correlation data was. -+ * Skip calculations for the above reason and to avoid a division by 0 below. -+ */ -+ return; -+ } -+ -+ -+ /* Calculate an estimated timestamp based on the latest timer correlation data */ -+ ui64CRTimeDiff = ui64CRTimeStamp - psTimeCorr->ui64CRTimeStamp; -+ ui64OSTimeDiff = RGXFWIF_GET_DELTA_OSTIME_NS(ui64CRTimeDiff, -+ psTimeCorr->ui64CRDeltaToOSDeltaKNs); -+ ui64EstimatedTime = psTimeCorr->ui64OSTimeStamp + ui64OSTimeDiff; -+ -+ /* Get difference between estimated timestamp and current timestamp, in ns */ -+ i64Diff = ui64EstimatedTime - ui64OSTimeStamp; -+ -+ /* -+ * Calculate ratio between estimated time diff and real time diff: -+ * ratio% : 100% = (OSestimate - OStimecorr) : (OSreal - OStimecorr) -+ * -+ * The operands are scaled down (approximately from ns to us) so at least -+ * the divisor fits on 32 bit. -+ */ -+ ui32Ratio = OSDivide64(((ui64EstimatedTime - psTimeCorr->ui64OSTimeStamp) * 100ULL) >> SCALING_FACTOR, -+ (ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) >> SCALING_FACTOR, -+ &ui32Remainder); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "Estimated timestamp check: diff %" IMG_INT64_FMTSPECd " ns over " -+ "period %" IMG_UINT64_FMTSPEC " ns, estimated timer speed %u%%", -+ i64Diff, -+ ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp, -+ ui32Ratio)); -+ -+ /* Warn if the estimated timestamp is not within +/- 1% of the current time */ -+ if (ui32Ratio < 99 || ui32Ratio > 101) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "Estimated timestamps generated in the last %" IMG_UINT64_FMTSPEC " ns " -+ "were %s the real time (increasing at %u%% speed)", -+ ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp, -+ i64Diff > 0 ? "ahead of" : "behind", -+ ui32Ratio)); -+ -+ /* Higher ratio == higher delta OS == higher delta CR == frequency higher than expected (and viceversa) */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "Current GPU frequency %u Hz (given as %u Hz) is probably %s than expected", -+ RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed), -+ _RGXGetSystemLayerGPUClockSpeed(psDeviceNode), -+ i64Diff > 0 ? "lower" : "higher")); -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable); -+#endif -+} -+ -+static inline IMG_UINT32 _RGXGPUFreqGetIndex(RGX_GPU_DVFS_TABLE *psGpuDVFSTable, IMG_UINT32 ui32CoreClockSpeed) -+{ -+ IMG_UINT32 *paui32GPUFrequencies = psGpuDVFSTable->aui32GPUFrequency; -+ IMG_UINT32 i; -+ -+ for (i = 0; i < RGX_GPU_DVFS_TABLE_SIZE; i++) -+ { -+ if (paui32GPUFrequencies[i] == ui32CoreClockSpeed) -+ { -+ return i; -+ } -+ -+ if (paui32GPUFrequencies[i] == 0) -+ { -+ paui32GPUFrequencies[i] = ui32CoreClockSpeed; -+ return i; -+ } -+ } -+ -+ i--; -+ -+ PVR_DPF((PVR_DBG_ERROR, "GPU frequency table in the driver is full! " -+ "Table size should be increased! Overriding last entry (%u) with %u", -+ paui32GPUFrequencies[i], ui32CoreClockSpeed)); -+ -+ paui32GPUFrequencies[i] = ui32CoreClockSpeed; -+ -+ return i; -+} -+ -+static void _RGXGPUFreqCalibrationPeriodStart(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_GPU_DVFS_TABLE *psGpuDVFSTable) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ GPU_FREQ_TRACKING_DATA *psTrackingData; -+ IMG_UINT32 ui32CoreClockSpeed, ui32Index; -+ -+ IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo); -+ IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64(psDeviceNode); -+ -+ psGpuDVFSTable->ui64CalibrationCRTimestamp = ui64CRTimestamp; -+ psGpuDVFSTable->ui64CalibrationOSTimestamp = ui64OSTimestamp; -+ -+ ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode); -+ ui32Index = _RGXGPUFreqGetIndex(psGpuDVFSTable, ui32CoreClockSpeed); -+ psTrackingData = &psGpuDVFSTable->asTrackingData[ui32Index]; -+ -+ /* Set the time needed to (re)calibrate the GPU frequency */ -+ if (psTrackingData->ui32CalibrationCount == 0) /* We never met this frequency */ -+ { -+ psTrackingData->ui32EstCoreClockSpeed = ui32CoreClockSpeed; -+ psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US; -+ } -+ else if (psTrackingData->ui32CalibrationCount == 1) /* We calibrated this frequency only once */ -+ { -+ psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US; -+ } -+ else -+ { -+ psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US; -+ } -+ -+ /* Update the index to the DVFS table */ -+ psGpuDVFSTable->ui32FreqIndex = ui32Index; -+ -+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) -+ /* Update tracking history */ -+ { -+ GPU_FREQ_TRACKING_HISTORY *psTrackingHistory; -+ -+ psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex]; -+ psTrackingHistory->ui32CoreClockSpeed = ui32CoreClockSpeed; -+ psTrackingHistory->ui32EstCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed; -+ psTrackingHistory->ui64BeginCRTimestamp = ui64CRTimestamp; -+ psTrackingHistory->ui64BeginOSTimestamp = ui64OSTimestamp; -+ psTrackingHistory->ui64EndCRTimestamp = 0ULL; -+ psTrackingHistory->ui64EndOSTimestamp = 0ULL; -+ } -+#endif -+} -+ -+static void _RGXGPUFreqCalibrationPeriodStop(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo); -+ IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64(psDeviceNode); -+ -+ psGpuDVFSTable->ui64CalibrationCRTimediff = -+ ui64CRTimestamp - psGpuDVFSTable->ui64CalibrationCRTimestamp; -+ psGpuDVFSTable->ui64CalibrationOSTimediff = -+ ui64OSTimestamp - psGpuDVFSTable->ui64CalibrationOSTimestamp; -+ -+ /* Check if the current timer correlation data is good enough */ -+ _RGXCheckTimeCorrData(psDeviceNode, psGpuDVFSTable); -+ -+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) -+ /* Update tracking history */ -+ { -+ GPU_FREQ_TRACKING_HISTORY *psTrackingHistory; -+ -+ psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex]; -+ psTrackingHistory->ui64EndCRTimestamp = ui64CRTimestamp; -+ psTrackingHistory->ui64EndOSTimestamp = ui64OSTimestamp; -+ } -+#endif -+} -+ -+static void _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable, -+ RGXTIMECORR_EVENT eEvent) -+{ -+#if !defined(DISABLE_GPU_FREQUENCY_CALIBRATION) -+ GPU_FREQ_TRACKING_DATA *psTrackingData; -+ IMG_UINT32 ui32EstCoreClockSpeed, ui32PrevCoreClockSpeed, ui32SysGPUClockSpeed; -+ IMG_INT32 i32Diff; -+ IMG_UINT32 ui32Remainder; -+ -+ ui32SysGPUClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode); -+ /* -+ * Find out what the GPU frequency was in the last period. -+ * This should return a value very close to the frequency passed by the system layer. -+ */ -+ ui32EstCoreClockSpeed = -+ RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(psGpuDVFSTable->ui64CalibrationCRTimediff, -+ psGpuDVFSTable->ui64CalibrationOSTimediff, -+ ui32Remainder); -+ -+ /* Update GPU frequency used by the driver for a given system layer frequency */ -+ psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex]; -+ -+ ui32PrevCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed; -+ psTrackingData->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed; -+ psTrackingData->ui32CalibrationCount++; -+ -+ i32Diff = (IMG_INT32) (ui32EstCoreClockSpeed - ui32PrevCoreClockSpeed); -+ -+ if ((i32Diff < -1000000) || (i32Diff > 1000000)) -+ { -+ /* Warn if the frequency changed by more than 1 MHz between recalculations */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "GPU frequency calibration of system layer frequency %u Hz (pre %s event): " -+ "more than 1 MHz difference between old and new value " -+ "(%u Hz -> %u Hz over %" IMG_UINT64_FMTSPEC " us)", -+ ui32SysGPUClockSpeed, -+ _EventToString(eEvent), -+ RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed), -+ RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed), -+ psGpuDVFSTable->ui64CalibrationOSTimediff)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "GPU frequency calibration of system layer frequency %u Hz (pre %s event): " -+ "%u Hz -> %u Hz done over %" IMG_UINT64_FMTSPEC " us", -+ ui32SysGPUClockSpeed, -+ _EventToString(eEvent), -+ RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed), -+ RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed), -+ psGpuDVFSTable->ui64CalibrationOSTimediff)); -+ } -+ -+ if (eEvent == RGXTIMECORR_EVENT_PERIODIC) -+ { -+ PVRSRV_DEV_POWER_STATE ePowerState; -+ -+ i32Diff = (IMG_INT32) (ui32EstCoreClockSpeed - ui32SysGPUClockSpeed); -+ -+ /* -+ * Notify the Firmware about unexpected frequency differences observed during -+ * periodic frequency calibration events only. -+ * Other events like PDVFS and power transitions are already likely to call -+ * the pre/post clock callback directly to set frequencies as needed. -+ * Platforms without PDVFS or APM need a method to correct the Firmware's -+ * internal timing measurements. -+ */ -+ if (((i32Diff < -1000000) || (i32Diff > 1000000)) && -+ (PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState) == PVRSRV_OK)) -+ { -+ PVRSRV_ERROR eError = RGXPreClockSpeedChange((IMG_HANDLE)psDeviceNode, ePowerState); -+ -+ PVR_LOG_IF_ERROR(eError, "RGXPreClockSpeedChange"); -+ if (eError == PVRSRV_OK) -+ { -+ RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; -+ -+ /* Update the internal core frequency variable and notify the Firmware of the change */ -+ psRGXData->psRGXTimingInfo->ui32CoreClockSpeed = RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed); -+ -+ eError = RGXPostClockSpeedChange((IMG_HANDLE)psDeviceNode, ePowerState); -+ PVR_LOG_IF_ERROR(eError, "RGXPostClockSpeedChange"); -+ } -+ } -+ } -+ -+ /* Reset time deltas to avoid recalibrating the same frequency over and over again */ -+ psGpuDVFSTable->ui64CalibrationCRTimediff = 0; -+ psGpuDVFSTable->ui64CalibrationOSTimediff = 0; -+ -+#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) -+ /* Update tracking history */ -+ { -+ GPU_FREQ_TRACKING_HISTORY *psTrackingHistory; -+ -+ psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex]; -+ psTrackingHistory->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed; -+ psGpuDVFSTable->ui32HistoryIndex = -+ (psGpuDVFSTable->ui32HistoryIndex + 1) % RGX_GPU_FREQ_TRACKING_SIZE; -+ } -+#endif -+ -+#else -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable); -+ PVR_UNREFERENCED_PARAMETER(eEvent); -+#endif -+} -+ -+void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; -+ PVRSRV_VZ_RETN_IF_MODE(GUEST); -+ -+ _RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable); -+ _RGXMakeTimeCorrData(psDeviceNode, eEvent); -+} -+ -+void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; -+ PVRSRV_VZ_RETN_IF_MODE(GUEST); -+ -+ _RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable); -+ -+ if (psGpuDVFSTable->ui64CalibrationOSTimediff >= psGpuDVFSTable->ui32CalibrationPeriod) -+ { -+ _RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable, eEvent); -+ } -+} -+ -+void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; -+ IMG_UINT64 ui64TimeNow = RGXTimeCorrGetClockus64(psDeviceNode); -+ PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT; -+ PVRSRV_VZ_RETN_IF_MODE(GUEST); -+ -+ if (psGpuDVFSTable == NULL) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Required data not initialised yet", __func__)); -+ return; -+ } -+ -+ /* Check if it's the right time to recalibrate the GPU clock frequency */ -+ if ((ui64TimeNow - psGpuDVFSTable->ui64CalibrationOSTimestamp) < psGpuDVFSTable->ui32CalibrationPeriod) return; -+ -+ /* Try to acquire the powerlock, if not possible then don't wait */ -+ if (PVRSRVPowerTryLock(psDeviceNode) != PVRSRV_OK) return; -+ -+ /* If the GPU is off then we can't do anything */ -+ PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); -+ if (ePowerState != PVRSRV_DEV_POWER_STATE_ON) -+ { -+ PVRSRVPowerUnlock(psDeviceNode); -+ return; -+ } -+ -+ /* All checks passed, we can calibrate and correlate */ -+ RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC); -+ RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC); -+ -+ PVRSRVPowerUnlock(psDeviceNode); -+} -+ -+/* -+ RGXTimeCorrGetClockSource -+*/ -+RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(const PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+ return ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32ClockSource; -+} -+ -+/* -+ RGXTimeCorrSetClockSource -+*/ -+PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGXTIMECORR_CLOCK_TYPE eClockType) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO* psRGXDevNode = psDeviceNode->pvDevice; -+ RGXTIMECORR_CLOCK_TYPE eLastClock = psRGXDevNode->ui32ClockSource; -+ -+ eError = _SetClock(psDeviceNode, NULL, eClockType); -+ PVR_LOG_RETURN_IF_ERROR(eError, "_SetClock"); -+ -+ psRGXDevNode->ui32LastClockSource = eLastClock; -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT64 * pui64Time) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ *pui64Time = RGXTimeCorrGetClockns64(psDeviceNode); -+ -+ return PVRSRV_OK; -+} -+ -+/****************************************************************************** -+ End of file (rgxtimecorr.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxtimecorr.h b/drivers/gpu/drm/img-rogue/rgxtimecorr.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxtimecorr.h -@@ -0,0 +1,272 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX time correlation and calibration header file -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX time correlation and calibration routines -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXTIMECORR_H) -+#define RGXTIMECORR_H -+ -+#include "img_types.h" -+#include "device.h" -+#include "osfunc.h" -+#include "connection_server.h" -+ -+typedef enum -+{ -+ RGXTIMECORR_CLOCK_MONO, -+ RGXTIMECORR_CLOCK_MONO_RAW, -+ RGXTIMECORR_CLOCK_SCHED, -+ -+ RGXTIMECORR_CLOCK_LAST -+} RGXTIMECORR_CLOCK_TYPE; -+ -+typedef enum -+{ -+ RGXTIMECORR_EVENT_POWER, -+ RGXTIMECORR_EVENT_DVFS, -+ RGXTIMECORR_EVENT_PERIODIC, -+ RGXTIMECORR_EVENT_CLOCK_CHANGE -+} RGXTIMECORR_EVENT; -+ -+/* -+ * Calibrated GPU frequencies are rounded to the nearest multiple of 1 KHz -+ * before use, to reduce the noise introduced by calculations done with -+ * imperfect operands (correlated timers not sampled at exactly the same -+ * time, GPU CR timer incrementing only once every 256 GPU cycles). -+ * This also helps reducing the variation between consecutive calculations. -+ */ -+#define RGXFWIF_CONVERT_TO_KHZ(freq) (((freq) + 500) / 1000) -+#define RGXFWIF_ROUND_TO_KHZ(freq) ((((freq) + 500) / 1000) * 1000) -+ -+/* Constants used in different calculations */ -+#define SECONDS_TO_MICROSECONDS (1000000ULL) -+#define CRTIME_TO_CYCLES_WITH_US_SCALE (RGX_CRTIME_TICK_IN_CYCLES * SECONDS_TO_MICROSECONDS) -+ -+/* -+ * Use this macro to get a more realistic GPU core clock speed than the one -+ * given by the upper layers (used when doing GPU frequency calibration) -+ */ -+#define RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(deltacr_us, deltaos_us, remainder) \ -+ OSDivide64((deltacr_us) * CRTIME_TO_CYCLES_WITH_US_SCALE, (deltaos_us), &(remainder)) -+ -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXTimeCorrGetConversionFactor -+ -+ @Description Generate constant used to convert a GPU time difference into -+ an OS time difference (for more info see rgx_fwif_km.h). -+ -+ @Input ui32ClockSpeed : GPU clock speed -+ -+ @Return 0 on failure, conversion factor otherwise -+ -+******************************************************************************/ -+static inline IMG_UINT64 RGXTimeCorrGetConversionFactor(IMG_UINT32 ui32ClockSpeed) -+{ -+ IMG_UINT32 ui32Remainder; -+ -+ if (RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed) == 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: GPU clock frequency %u is too low", -+ __func__, ui32ClockSpeed)); -+ -+ return 0; -+ } -+ -+ return OSDivide64r64(CRTIME_TO_CYCLES_WITH_US_SCALE << RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT, -+ RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed), &ui32Remainder); -+} -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXTimeCorrBegin -+ -+ @Description Generate new timer correlation data, and start tracking -+ the current GPU frequency. -+ -+ @Input hDevHandle : RGX Device Node -+ @Input eEvent : Event associated with the beginning of a timer -+ correlation period -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXTimeCorrEnd -+ -+ @Description Stop tracking the CPU and GPU timers, and if possible -+ recalculate the GPU frequency to a value which makes the timer -+ correlation data more accurate. -+ -+ @Input hDevHandle : RGX Device Node -+ @Input eEvent : Event associated with the end of a timer -+ correlation period -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXTimeCorrRestartPeriodic -+ -+ @Description Perform actions from RGXTimeCorrEnd and RGXTimeCorrBegin, -+ but only if enough time has passed since the last timer -+ correlation data was generated. -+ -+ @Input hDevHandle : RGX Device Node -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXTimeCorrGetClockns64 -+ -+ @Description Returns value of currently selected clock (in ns). -+ -+ @Input psDeviceNode : RGX Device Node -+ @Return clock value from currently selected clock source -+ -+******************************************************************************/ -+IMG_UINT64 RGXTimeCorrGetClockns64(const PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXTimeCorrGetClockus64 -+ -+ @Description Returns value of currently selected clock (in us). -+ -+ @Input psDeviceNode : RGX Device Node -+ @Return clock value from currently selected clock source -+ -+******************************************************************************/ -+IMG_UINT64 RGXTimeCorrGetClockus64(const PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXTimeCorrGetClockSource -+ -+ @Description Returns currently selected clock source -+ -+ @Input psDeviceNode : RGX Device Node -+ @Return clock source type -+ -+******************************************************************************/ -+RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(const PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXTimeCorrSetClockSource -+ -+ @Description Sets clock source for correlation data. -+ -+ @Input psDeviceNode : RGX Device Node -+ @Input eClockType : clock source type -+ -+ @Return error code -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGXTIMECORR_CLOCK_TYPE eClockType); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXTimeCorrInitAppHintCallbacks -+ -+ @Description Initialise apphint callbacks for timer correlation -+ related apphints. -+ -+ @Input psDeviceNode : RGX Device Node -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXGetTimeCorrData -+ -+ @Description Get a number of the most recent time correlation data points -+ -+ @Input psDeviceNode : RGX Device Node -+ @Output psTimeCorrs : Output array of RGXFWIF_TIME_CORR elements -+ for data to be written to -+ @Input ui32NumOut : Number of elements to be written out -+ -+ @Return void -+ -+******************************************************************************/ -+void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, -+ RGXFWIF_TIME_CORR *psTimeCorrs, -+ IMG_UINT32 ui32NumOut); -+ -+/**************************************************************************/ /*! -+@Function PVRSRVRGXCurrentTime -+@Description Returns the current state of the device timer -+@Input psDevData Device data. -+@Out pui64Time -+@Return PVRSRV_OK on success. -+*/ /***************************************************************************/ -+PVRSRV_ERROR -+PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT64 * pui64Time); -+ -+#endif /* RGXTIMECORR_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxtimerquery.c b/drivers/gpu/drm/img-rogue/rgxtimerquery.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxtimerquery.c -@@ -0,0 +1,225 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX Timer queries -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description RGX Timer queries -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "rgxtimerquery.h" -+#include "rgxdevice.h" -+#include "rgxtimecorr.h" -+ -+#include "rgxfwutils.h" -+#include "pdump_km.h" -+ -+PVRSRV_ERROR -+PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32QueryId) -+{ -+ PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ if (ui32QueryId >= RGX_MAX_TIMER_QUERIES) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+#if !defined(PVRSRV_USE_BRIDGE_LOCK) -+ OSLockAcquire(psDevInfo->hTimerQueryLock); -+#endif -+ -+ psDevInfo->bSaveStart = IMG_TRUE; -+ psDevInfo->bSaveEnd = IMG_TRUE; -+ -+ /* clear the stamps, in case there is no Kick */ -+ psDevInfo->pui64StartTimeById[ui32QueryId] = 0UL; -+ psDevInfo->pui64EndTimeById[ui32QueryId] = 0UL; -+ OSWriteMemoryBarrier(&psDevInfo->pui64EndTimeById[ui32QueryId]); -+ RGXFwSharedMemCacheOpValue(psDevInfo->pui64StartTimeById[ui32QueryId], FLUSH); -+ RGXFwSharedMemCacheOpValue(psDevInfo->pui64EndTimeById[ui32QueryId], FLUSH); -+ -+ /* save of the active query index */ -+ psDevInfo->ui32ActiveQueryId = ui32QueryId; -+ -+#if !defined(PVRSRV_USE_BRIDGE_LOCK) -+ OSLockRelease(psDevInfo->hTimerQueryLock); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR -+PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode) -+{ -+ PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+#if !defined(PVRSRV_USE_BRIDGE_LOCK) -+ OSLockAcquire(psDevInfo->hTimerQueryLock); -+#endif -+ -+ /* clear off the flags set by Begin(). Note that _START_TIME is -+ * probably already cleared by Kick() -+ */ -+ psDevInfo->bSaveStart = IMG_FALSE; -+ psDevInfo->bSaveEnd = IMG_FALSE; -+ -+#if !defined(PVRSRV_USE_BRIDGE_LOCK) -+ OSLockRelease(psDevInfo->hTimerQueryLock); -+#endif -+ -+ return PVRSRV_OK; -+} -+ -+ -+PVRSRV_ERROR -+PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32QueryId, -+ IMG_UINT64 * pui64StartTime, -+ IMG_UINT64 * pui64EndTime) -+{ -+ PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; -+ IMG_UINT32 ui32Scheduled; -+ IMG_UINT32 ui32Completed; -+ PVRSRV_ERROR eError; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ if (ui32QueryId >= RGX_MAX_TIMER_QUERIES) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+#if !defined(PVRSRV_USE_BRIDGE_LOCK) -+ OSLockAcquire(psDevInfo->hTimerQueryLock); -+#endif -+ -+ ui32Scheduled = psDevInfo->aui32ScheduledOnId[ui32QueryId]; -+ -+ RGXFwSharedMemCacheOpValue(psDevInfo->pui32CompletedById[ui32QueryId], INVALIDATE); -+ ui32Completed = psDevInfo->pui32CompletedById[ui32QueryId]; -+ -+ /* if there was no kick since the Begin() on this id we return 0-s as Begin cleared -+ * the stamps. If there was no begin the returned data is undefined - but still -+ * safe from services pov -+ */ -+ if (ui32Completed >= ui32Scheduled) -+ { -+ RGXFwSharedMemCacheOpValue(psDevInfo->pui64StartTimeById[ui32QueryId], INVALIDATE); -+ RGXFwSharedMemCacheOpValue(psDevInfo->pui64EndTimeById[ui32QueryId], INVALIDATE); -+ * pui64StartTime = psDevInfo->pui64StartTimeById[ui32QueryId]; -+ * pui64EndTime = psDevInfo->pui64EndTimeById[ui32QueryId]; -+ -+ eError = PVRSRV_OK; -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; -+ } -+ -+#if !defined(PVRSRV_USE_BRIDGE_LOCK) -+ OSLockRelease(psDevInfo->hTimerQueryLock); -+#endif -+ return eError; -+} -+ -+ -+ -+/****************************************************************************** -+ NOT BRIDGED/EXPORTED FUNCS -+******************************************************************************/ -+ -+void -+RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO * psDevInfo, -+ PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr, -+ PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr, -+ PRGXFWIF_UFO_ADDR * ppUpdate) -+{ -+ if (ppPreAddr != NULL) -+ { -+ if (psDevInfo->bSaveStart) -+ { -+ /* drop the SaveStart on the first Kick */ -+ psDevInfo->bSaveStart = IMG_FALSE; -+ -+ RGXSetFirmwareAddress(ppPreAddr, -+ psDevInfo->psStartTimeMemDesc, -+ sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId, -+ RFW_FWADDR_NOREF_FLAG); -+ } -+ else -+ { -+ ppPreAddr->ui32Addr = 0; -+ } -+ } -+ -+ if (ppPostAddr != NULL && ppUpdate != NULL) -+ { -+ if (psDevInfo->bSaveEnd) -+ { -+ RGXSetFirmwareAddress(ppPostAddr, -+ psDevInfo->psEndTimeMemDesc, -+ sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId, -+ RFW_FWADDR_NOREF_FLAG); -+ -+ psDevInfo->aui32ScheduledOnId[psDevInfo->ui32ActiveQueryId]++; -+ -+ RGXSetFirmwareAddress(ppUpdate, -+ psDevInfo->psCompletedMemDesc, -+ sizeof(IMG_UINT32) * psDevInfo->ui32ActiveQueryId, -+ RFW_FWADDR_NOREF_FLAG); -+ } -+ else -+ { -+ ppUpdate->ui32Addr = 0; -+ ppPostAddr->ui32Addr = 0; -+ } -+ } -+} -+ -+ -+/****************************************************************************** -+ End of file (rgxtimerquery.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxtimerquery.h b/drivers/gpu/drm/img-rogue/rgxtimerquery.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxtimerquery.h -@@ -0,0 +1,117 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX Timer queries -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX Timer queries functionality -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGX_TIMERQUERIES_H) -+#define RGX_TIMERQUERIES_H -+ -+#include "pvrsrv_error.h" -+#include "img_types.h" -+#include "device.h" -+#include "rgxdevice.h" -+ -+#include "connection_server.h" -+ -+/*************************************************************************/ /*! -+@Function PVRSRVRGXBeginTimerQueryKM -+@Description Opens a new timer query. -+ -+@Input ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ] -+@Return PVRSRV_OK on success. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32QueryId); -+ -+ -+/*************************************************************************/ /*! -+@Function PVRSRVRGXEndTimerQueryKM -+@Description Closes a timer query -+ -+ The lack of ui32QueryId argument expresses the fact that there -+ can't be overlapping queries open. -+@Return PVRSRV_OK on success. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode); -+ -+ -+ -+/*************************************************************************/ /*! -+@Function PVRSRVRGXQueryTimerKM -+@Description Queries the state of the specified timer -+ -+@Input ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ] -+@Out pui64StartTime -+@Out pui64EndTime -+@Return PVRSRV_OK on success. -+ PVRSRV_ERROR_RESOURCE_UNAVAILABLE if the device is still busy with -+ operations from the queried period -+ other error code otherwise -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32QueryId, -+ IMG_UINT64 * pui64StartTime, -+ IMG_UINT64 * pui64EndTime); -+ -+ -+ -+/****************************************************************************** -+ NON BRIDGED/EXPORTED interface -+******************************************************************************/ -+ -+/* get the relevant data from the Kick to the helper*/ -+void -+RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO * psDevInfo, -+ PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr, -+ PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr, -+ PRGXFWIF_UFO_ADDR * ppUpdate); -+ -+#endif /* RGX_TIMERQUERIES_H */ -+ -+/****************************************************************************** -+ End of file (rgxtimerquery.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxtransfer.c b/drivers/gpu/drm/img-rogue/rgxtransfer.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxtransfer.c -@@ -0,0 +1,1839 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device specific transfer queue routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "pdump_km.h" -+#include "rgxdevice.h" -+#include "rgxccb.h" -+#include "rgxutils.h" -+#include "rgxfwcmnctx.h" -+#include "rgxtransfer.h" -+#include "rgx_tq_shared.h" -+#include "rgxmem.h" -+#include "allocmem.h" -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "osfunc.h" -+#include "pvr_debug.h" -+#include "pvrsrv.h" -+#include "rgx_fwif_resetframework.h" -+#include "rgx_memallocflags.h" -+#include "rgxhwperf.h" -+#include "ospvr_gputrace.h" -+#include "htbserver.h" -+#include "rgxshader.h" -+ -+#include "pdump_km.h" -+ -+#include "sync_server.h" -+#include "sync_internal.h" -+#include "sync.h" -+#include "rgx_bvnc_defs_km.h" -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+#include "pvr_buffer_sync.h" -+#endif -+ -+#include "sync_checkpoint.h" -+#include "sync_checkpoint_internal.h" -+ -+#include "rgxtimerquery.h" -+ -+/* Enable this to dump the compiled list of UFOs prior to kick call */ -+#define ENABLE_TQ_UFO_DUMP 0 -+ -+//#define TRANSFER_CHECKPOINT_DEBUG 1 -+ -+#if defined(TRANSFER_CHECKPOINT_DEBUG) -+#define CHKPT_DBG(X) PVR_DPF(X) -+#else -+#define CHKPT_DBG(X) -+#endif -+ -+typedef struct { -+ DEVMEM_MEMDESC *psFWContextStateMemDesc; -+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; -+ IMG_INT32 i32Priority; -+#if defined(SUPPORT_BUFFER_SYNC) -+ struct pvr_buffer_sync_context *psBufferSyncContext; -+#endif -+} RGX_SERVER_TQ_3D_DATA; -+ -+typedef struct { -+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; -+ IMG_INT32 i32Priority; -+#if defined(SUPPORT_BUFFER_SYNC) -+ struct pvr_buffer_sync_context *psBufferSyncContext; -+#endif -+} RGX_SERVER_TQ_2D_DATA; -+ -+struct _RGX_SERVER_TQ_CONTEXT_ { -+ PVRSRV_DEVICE_NODE *psDeviceNode; -+ DEVMEM_MEMDESC *psFWFrameworkMemDesc; -+ DEVMEM_MEMDESC *psFWTransferContextMemDesc; -+ IMG_UINT32 ui32Flags; -+#define RGX_SERVER_TQ_CONTEXT_FLAGS_2D (1<<0) -+#define RGX_SERVER_TQ_CONTEXT_FLAGS_3D (1<<1) -+ RGX_SERVER_TQ_3D_DATA s3DData; -+ RGX_SERVER_TQ_2D_DATA s2DData; -+ DLLIST_NODE sListNode; -+ ATOMIC_T hIntJobRef; -+ IMG_UINT32 ui32PDumpFlags; -+ /* per-prepare sync address lists */ -+ SYNC_ADDR_LIST asSyncAddrListFence[TQ_MAX_PREPARES_PER_SUBMIT]; -+ SYNC_ADDR_LIST asSyncAddrListUpdate[TQ_MAX_PREPARES_PER_SUBMIT]; -+ POS_LOCK hLock; -+}; -+ -+/* -+ Static functions used by transfer context code -+*/ -+static PVRSRV_ERROR _Create3DTransferContext(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEM_MEMDESC *psAllocatedMemDesc, -+ IMG_UINT32 ui32AllocatedOffset, -+ DEVMEM_MEMDESC *psFWMemContextMemDesc, -+ IMG_INT32 i32Priority, -+ RGX_COMMON_CONTEXT_INFO *psInfo, -+ RGX_SERVER_TQ_3D_DATA *ps3DData, -+ IMG_UINT32 ui32CCBAllocSizeLog2, -+ IMG_UINT32 ui32CCBMaxAllocSizeLog2, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_UINT64 ui64RobustnessAddress) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ PVRSRV_ERROR eError; -+ IMG_UINT ui3DRegISPStateStoreSize = 0; -+ IMG_UINT uiNumISPStoreRegs = 1; /* default value 1 expected */ -+ /* -+ Allocate device memory for the firmware GPU context suspend state. -+ Note: the FW reads/writes the state to memory by accessing the GPU register interface. -+ */ -+ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware TQ/3D context suspend state"); -+ -+ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) -+ { -+ uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, -+ RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX); -+ } -+ -+ /* Calculate the size of the 3DCTX ISP state */ -+ ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) + -+ uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]); -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ ps3DData->psBufferSyncContext = -+ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, -+ "rogue-tq3d"); -+ if (IS_ERR(ps3DData->psBufferSyncContext)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to create buffer_sync context (err=%ld)", -+ __func__, PTR_ERR(ps3DData->psBufferSyncContext))); -+ -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto fail_buffer_sync_context_create; -+ } -+#endif -+ -+ eError = DevmemFwAllocate(psDevInfo, -+ ui3DRegISPStateStoreSize, -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "FwTQ3DContext", -+ &ps3DData->psFWContextStateMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_contextswitchstate; -+ } -+ -+ eError = FWCommonContextAllocate(psConnection, -+ psDeviceNode, -+ REQ_TYPE_TQ_3D, -+ RGXFWIF_DM_3D, -+ NULL, -+ psAllocatedMemDesc, -+ ui32AllocatedOffset, -+ psFWMemContextMemDesc, -+ ps3DData->psFWContextStateMemDesc, -+ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ3D_CCB_SIZE_LOG2, -+ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ3D_CCB_MAX_SIZE_LOG2, -+ ui32ContextFlags, -+ i32Priority, -+ UINT_MAX, /* max deadline MS */ -+ ui64RobustnessAddress, -+ psInfo, -+ &ps3DData->psServerCommonContext); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_contextalloc; -+ } -+ -+ -+ PDUMPCOMMENT(psDeviceNode, "Dump 3D context suspend state buffer"); -+ DevmemPDumpLoadMem(ps3DData->psFWContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS); -+ -+ ps3DData->i32Priority = i32Priority; -+ return PVRSRV_OK; -+ -+fail_contextalloc: -+ DevmemFwUnmapAndFree(psDevInfo, ps3DData->psFWContextStateMemDesc); -+fail_contextswitchstate: -+#if defined(SUPPORT_BUFFER_SYNC) -+ pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext); -+ ps3DData->psBufferSyncContext = NULL; -+fail_buffer_sync_context_create: -+#endif -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+static PVRSRV_ERROR _Create2DTransferContext(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ DEVMEM_MEMDESC *psFWMemContextMemDesc, -+ IMG_INT32 i32Priority, -+ RGX_COMMON_CONTEXT_INFO *psInfo, -+ RGX_SERVER_TQ_2D_DATA *ps2DData, -+ IMG_UINT32 ui32CCBAllocSizeLog2, -+ IMG_UINT32 ui32CCBMaxAllocSizeLog2, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_UINT64 ui64RobustnessAddress) -+{ -+ PVRSRV_ERROR eError; -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ ps2DData->psBufferSyncContext = -+ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, -+ "rogue-tqtla"); -+ if (IS_ERR(ps2DData->psBufferSyncContext)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to create buffer_sync context (err=%ld)", -+ __func__, PTR_ERR(ps2DData->psBufferSyncContext))); -+ -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto fail_buffer_sync_context_create; -+ } -+#endif -+ -+ eError = FWCommonContextAllocate(psConnection, -+ psDeviceNode, -+ REQ_TYPE_TQ_2D, -+ RGXFWIF_DM_2D, -+ NULL, -+ NULL, -+ 0, -+ psFWMemContextMemDesc, -+ NULL, -+ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ2D_CCB_SIZE_LOG2, -+ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ2D_CCB_MAX_SIZE_LOG2, -+ ui32ContextFlags, -+ i32Priority, -+ UINT_MAX, /* max deadline MS */ -+ ui64RobustnessAddress, -+ psInfo, -+ &ps2DData->psServerCommonContext); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_contextalloc; -+ } -+ -+ ps2DData->i32Priority = i32Priority; -+ return PVRSRV_OK; -+ -+fail_contextalloc: -+#if defined(SUPPORT_BUFFER_SYNC) -+ pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext); -+ ps2DData->psBufferSyncContext = NULL; -+fail_buffer_sync_context_create: -+#endif -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+ -+static PVRSRV_ERROR _Destroy2DTransferContext(RGX_SERVER_TQ_2D_DATA *ps2DData, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Check if the FW has finished with this resource ... */ -+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, -+ ps2DData->psServerCommonContext, -+ RGXFWIF_DM_2D, -+ ui32PDumpFlags); -+ if (RGXIsErrorAndDeviceRecoverable(psDeviceNode, &eError)) -+ { -+ return eError; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ /* ... it has so we can free its resources */ -+ FWCommonContextFree(ps2DData->psServerCommonContext); -+ ps2DData->psServerCommonContext = NULL; -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext); -+ ps2DData->psBufferSyncContext = NULL; -+#endif -+ -+ return eError; -+} -+#endif /* #if defined(RGX_FEATURE_TLA_BIT_MASK) */ -+ -+static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Check if the FW has finished with this resource ... */ -+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, -+ ps3DData->psServerCommonContext, -+ RGXFWIF_DM_3D, -+ ui32PDumpFlags); -+ if (PVRSRVIsRetryError(eError)) -+ { -+ return eError; -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ } -+ -+ /* ... it has so we can free its resources */ -+ DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psFWContextStateMemDesc); -+ FWCommonContextFree(ps3DData->psServerCommonContext); -+ ps3DData->psServerCommonContext = NULL; -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext); -+ ps3DData->psBufferSyncContext = NULL; -+#endif -+ -+ return eError; -+} -+ -+ -+/* -+ * PVRSRVCreateTransferContextKM -+ */ -+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_INT32 i32Priority, -+ IMG_UINT32 ui32FrameworkCommandSize, -+ IMG_PBYTE pabyFrameworkCommand, -+ IMG_HANDLE hMemCtxPrivData, -+ IMG_UINT32 ui32PackedCCBSizeU8888, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_UINT64 ui64RobustnessAddress, -+ RGX_SERVER_TQ_CONTEXT **ppsTransferContext) -+{ -+ RGX_SERVER_TQ_CONTEXT *psTransferContext; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); -+ RGX_COMMON_CONTEXT_INFO sInfo = {NULL}; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ /* Allocate the server side structure */ -+ *ppsTransferContext = NULL; -+ psTransferContext = OSAllocZMem(sizeof(*psTransferContext)); -+ if (psTransferContext == NULL) -+ { -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ /* -+ Create the FW transfer context, this has the TQ common -+ context embedded within it -+ */ -+ eError = DevmemFwAllocate(psDevInfo, -+ sizeof(RGXFWIF_FWTRANSFERCONTEXT), -+ RGX_FWCOMCTX_ALLOCFLAGS, -+ "FwTransferContext", -+ &psTransferContext->psFWTransferContextMemDesc); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_fwtransfercontext; -+ } -+ -+ eError = OSLockCreate(&psTransferContext->hLock); -+ -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_createlock; -+ } -+ -+ psTransferContext->psDeviceNode = psDeviceNode; -+ -+ if (ui32FrameworkCommandSize) -+ { -+ /* -+ * Create the FW framework buffer -+ */ -+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, -+ &psTransferContext->psFWFrameworkMemDesc, -+ ui32FrameworkCommandSize); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to allocate firmware GPU framework state (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_frameworkcreate; -+ } -+ -+ /* Copy the Framework client data into the framework buffer */ -+ eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode, -+ psTransferContext->psFWFrameworkMemDesc, -+ pabyFrameworkCommand, -+ ui32FrameworkCommandSize); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to populate the framework buffer (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_frameworkcopy; -+ } -+ -+ sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc; -+ } -+ -+ eError = _Create3DTransferContext(psConnection, -+ psDeviceNode, -+ psTransferContext->psFWTransferContextMemDesc, -+ offsetof(RGXFWIF_FWTRANSFERCONTEXT, sTQContext), -+ psFWMemContextMemDesc, -+ i32Priority, -+ &sInfo, -+ &psTransferContext->s3DData, -+ U32toU8_Unpack3(ui32PackedCCBSizeU8888), -+ U32toU8_Unpack4(ui32PackedCCBSizeU8888), -+ ui32ContextFlags, -+ ui64RobustnessAddress); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_3dtransfercontext; -+ } -+ psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_3D; -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)) -+ { -+ eError = _Create2DTransferContext(psConnection, -+ psDeviceNode, -+ psFWMemContextMemDesc, -+ i32Priority, -+ &sInfo, -+ &psTransferContext->s2DData, -+ U32toU8_Unpack1(ui32PackedCCBSizeU8888), -+ U32toU8_Unpack2(ui32PackedCCBSizeU8888), -+ ui32ContextFlags, -+ ui64RobustnessAddress); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_2dtransfercontext; -+ } -+ psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_2D; -+ } -+#endif -+ -+ { -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ -+ OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock); -+ dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock); -+ *ppsTransferContext = psTransferContext; -+ } -+ -+ return PVRSRV_OK; -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+fail_2dtransfercontext: -+ _Destroy3DTransferContext(&psTransferContext->s3DData, -+ psTransferContext->psDeviceNode, -+ psTransferContext->ui32PDumpFlags); -+#endif -+fail_3dtransfercontext: -+fail_frameworkcopy: -+ if (psTransferContext->psFWFrameworkMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); -+ } -+fail_frameworkcreate: -+ OSLockDestroy(psTransferContext->hLock); -+fail_createlock: -+ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); -+fail_fwtransfercontext: -+ OSFreeMem(psTransferContext); -+ PVR_ASSERT(eError != PVRSRV_OK); -+ *ppsTransferContext = NULL; -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice; -+ IMG_UINT32 i; -+ -+ /* remove node from list before calling destroy - as destroy, if successful -+ * will invalidate the node -+ * must be re-added if destroy fails -+ */ -+ OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock); -+ dllist_remove_node(&(psTransferContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock); -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if ((psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && -+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) -+ { -+ eError = _Destroy2DTransferContext(&psTransferContext->s2DData, -+ psTransferContext->psDeviceNode, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_destroy2d; -+ } -+ /* We've freed the 2D context, don't try to free it again */ -+ psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_2D; -+ } -+#endif -+ -+ if (psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) -+ { -+ eError = _Destroy3DTransferContext(&psTransferContext->s3DData, -+ psTransferContext->psDeviceNode, -+ PDUMP_FLAGS_CONTINUOUS); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_destroy3d; -+ } -+ /* We've freed the 3D context, don't try to free it again */ -+ psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_3D; -+ } -+ -+ /* free any resources within the per-prepare UFO address stores */ -+ for (i = 0; i < TQ_MAX_PREPARES_PER_SUBMIT; i++) -+ { -+ SyncAddrListDeinit(&psTransferContext->asSyncAddrListFence[i]); -+ SyncAddrListDeinit(&psTransferContext->asSyncAddrListUpdate[i]); -+ } -+ -+ if (psTransferContext->psFWFrameworkMemDesc) -+ { -+ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); -+ } -+ -+ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); -+ -+ OSLockDestroy(psTransferContext->hLock); -+ -+ OSFreeMem(psTransferContext); -+ -+ return PVRSRV_OK; -+ -+fail_destroy3d: -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ -+fail_destroy2d: -+#endif -+ OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock); -+ dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode)); -+ OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock); -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+/* -+ * PVRSRVSubmitTQ3DKickKM -+ */ -+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, -+ IMG_UINT32 ui32PrepareCount, -+ IMG_UINT32 *paui32ClientUpdateCount, -+ SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFODevVarBlock, -+ IMG_UINT32 **papaui32ClientUpdateSyncOffset, -+ IMG_UINT32 **papaui32ClientUpdateValue, -+ PVRSRV_FENCE iCheckFence, -+ PVRSRV_TIMELINE i2DUpdateTimeline, -+ PVRSRV_FENCE *pi2DUpdateFence, -+ PVRSRV_TIMELINE i3DUpdateTimeline, -+ PVRSRV_FENCE *pi3DUpdateFence, -+ IMG_CHAR szFenceName[32], -+ IMG_UINT32 *paui32FWCommandSize, -+ IMG_UINT8 **papaui8FWCommand, -+ IMG_UINT32 *pui32TQPrepareFlags, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32SyncPMRCount, -+ IMG_UINT32 *paui32SyncPMRFlags, -+ PMR **ppsSyncPMRs) -+{ -+ PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+ RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelper; -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper; -+#endif -+ IMG_UINT32 ui323DCmdCount = 0; -+ IMG_UINT32 ui323DCmdLast = 0; -+ IMG_UINT32 ui323DCmdOffset = 0; -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ IMG_UINT32 ui322DCmdCount = 0; -+ IMG_UINT32 ui322DCmdLast = 0; -+ IMG_UINT32 ui322DCmdOffset = 0; -+#endif -+ IMG_UINT32 ui32PDumpFlags = PDUMP_FLAGS_NONE; -+ IMG_UINT32 i; -+ IMG_UINT64 uiCheckFenceUID = 0; -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ IMG_UINT64 ui2DUpdateFenceUID = 0; -+#endif -+ IMG_UINT64 ui3DUpdateFenceUID = 0; -+ -+ PSYNC_CHECKPOINT ps3DUpdateSyncCheckpoint = NULL; -+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; -+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0; -+ IMG_UINT32 *pui323DIntAllocatedUpdateValues = NULL; -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ PSYNC_CHECKPOINT ps2DUpdateSyncCheckpoint = NULL; -+ IMG_UINT32 *pui322DIntAllocatedUpdateValues = NULL; -+ PVRSRV_CLIENT_SYNC_PRIM *ps2DFenceTimelineUpdateSync = NULL; -+ IMG_UINT32 ui322DFenceTimelineUpdateValue = 0; -+ void *pv2DUpdateFenceFinaliseData = NULL; -+#endif -+ PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL; -+ IMG_UINT32 ui323DFenceTimelineUpdateValue = 0; -+ void *pv3DUpdateFenceFinaliseData = NULL; -+#if defined(SUPPORT_BUFFER_SYNC) -+ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; -+ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; -+ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; -+ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_ERROR eError2; -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ PVRSRV_FENCE i2DUpdateFence = PVRSRV_NO_FENCE; -+#endif -+ PVRSRV_FENCE i3DUpdateFence = PVRSRV_NO_FENCE; -+ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); -+ IMG_UINT32 ui32PreparesDone = 0; -+ -+ -+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr; -+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr; -+ PRGXFWIF_UFO_ADDR pRMWUFOAddr; -+ -+#if !defined(RGX_FEATURE_TLA_BIT_MASK) -+ PVR_UNREFERENCED_PARAMETER(i2DUpdateTimeline); -+ PVR_UNREFERENCED_PARAMETER(pi2DUpdateFence); -+#endif -+ -+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice, -+ &pPreAddr, -+ &pPostAddr, -+ &pRMWUFOAddr); -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if (i2DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi2DUpdateFence) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+#endif -+ if (i3DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi3DUpdateFence) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Validate sync prim fence/update value ptrs -+ * for each prepare. -+ */ -+ { -+ IMG_UINT32 ui32Prepare; -+ IMG_UINT32 *pui32UpdateCount = paui32ClientUpdateCount; -+ IMG_UINT32 **papui32UpdateValue = papaui32ClientUpdateValue; -+ -+ /* Check that we have not been given a null ptr for -+ * update count parameters. -+ */ -+ PVR_LOG_RETURN_IF_FALSE((paui32ClientUpdateCount != NULL), -+ "paui32ClientUpdateCount NULL", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ for (ui32Prepare=0; ui32Prepare 0) -+ { -+ PVR_LOG_RETURN_IF_FALSE(*papui32UpdateValue != NULL, -+ "paui32ClientUpdateValue NULL but " -+ "ui32ClientUpdateCount > 0", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ /* Advance local ptr to update values ptr for next prepare. */ -+ papui32UpdateValue++; -+ /* Advance local ptr to update count for next prepare. */ -+ pui32UpdateCount++; -+ } -+ } -+ -+ /* Ensure the string is null-terminated (Required for safety) */ -+ szFenceName[31] = '\0'; -+ -+ if ((ui32PrepareCount == 0) || (ui32PrepareCount > TQ_MAX_PREPARES_PER_SUBMIT)) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (ui32SyncPMRCount != 0) -+ { -+ if (!ppsSyncPMRs) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ /* PMR sync is valid only when there is no batching */ -+ if ((ui32PrepareCount != 1)) -+#endif -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+ -+ OSLockAcquire(psTransferContext->hLock); -+ -+ /* We can't allocate the required amount of stack space on all consumer architectures */ -+ pas3DCmdHelper = OSAllocMem(sizeof(*pas3DCmdHelper) * ui32PrepareCount); -+ if (pas3DCmdHelper == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_alloc3dhelper; -+ } -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ pas2DCmdHelper = OSAllocMem(sizeof(*pas2DCmdHelper) * ui32PrepareCount); -+ if (pas2DCmdHelper == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_alloc2dhelper; -+ } -+#endif -+ -+ if (iCheckFence != PVRSRV_NO_FENCE) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psDeviceNode->hSyncCheckpointContext)); -+ /* Resolve the sync checkpoints that make up the input fence */ -+ eError = SyncCheckpointResolveFence(psDeviceNode->hSyncCheckpointContext, -+ iCheckFence, -+ &ui32FenceSyncCheckpointCount, -+ &apsFenceSyncCheckpoints, -+ &uiCheckFenceUID, -+ ui32PDumpFlags); -+ if (eError != PVRSRV_OK) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError)); -+ goto fail_resolve_fencesync_input_fence; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); -+#if defined(TRANSFER_CHECKPOINT_DEBUG) -+ if (ui32FenceSyncCheckpointCount > 0) -+ { -+ IMG_UINT32 ii; -+ for (ii=0; ii", __func__, ii, (void*)psNextCheckpoint)); -+ } -+ } -+#endif -+ } -+ /* -+ Ensure we do the right thing for server syncs which cross call boundaries -+ */ -+ for (i=0;iasSyncAddrListFence[i]; -+ SYNC_ADDR_LIST *psSyncAddrListUpdate = &psTransferContext->asSyncAddrListUpdate[i]; -+ IMG_UINT32 ui32IntClientFenceCount = 0U; -+ IMG_UINT32 ui32IntClientUpdateCount = paui32ClientUpdateCount[i]; -+ IMG_UINT32 *paui32IntUpdateValue = papaui32ClientUpdateValue[i]; -+#if defined(SUPPORT_BUFFER_SYNC) -+ struct pvr_buffer_sync_context *psBufferSyncContext; -+#endif -+ -+ PVRSRV_FENCE *piUpdateFence = NULL; -+ PVRSRV_TIMELINE iUpdateTimeline = PVRSRV_NO_TIMELINE; -+ void **ppvUpdateFenceFinaliseData = NULL; -+ PSYNC_CHECKPOINT * ppsUpdateSyncCheckpoint = NULL; -+ PVRSRV_CLIENT_SYNC_PRIM **ppsFenceTimelineUpdateSync = NULL; -+ IMG_UINT32 *pui32FenceTimelineUpdateValue = NULL; -+ IMG_UINT32 **ppui32IntAllocatedUpdateValues = NULL; -+ IMG_BOOL bCheckFence = IMG_FALSE; -+ IMG_BOOL bUpdateFence = IMG_FALSE; -+ IMG_UINT64 *puiUpdateFenceUID = NULL; -+ -+ IMG_BOOL bCCBStateOpen = IMG_FALSE; -+ -+ if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D)) -+ { -+ psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext; -+ psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx); -+ pszCommandName = "TQ-3D"; -+ psCmdHelper = &pas3DCmdHelper[ui323DCmdCount++]; -+ eType = RGXFWIF_CCB_CMD_TYPE_TQ_3D; -+#if defined(SUPPORT_BUFFER_SYNC) -+ psBufferSyncContext = psTransferContext->s3DData.psBufferSyncContext; -+#endif -+ bCheckFence = ui323DCmdCount == 1; -+ bUpdateFence = ui323DCmdCount == ui323DCmdLast -+ && i3DUpdateTimeline != PVRSRV_NO_TIMELINE; -+ -+ if (bUpdateFence) -+ { -+ piUpdateFence = &i3DUpdateFence; -+ iUpdateTimeline = i3DUpdateTimeline; -+ ppvUpdateFenceFinaliseData = &pv3DUpdateFenceFinaliseData; -+ ppsUpdateSyncCheckpoint = &ps3DUpdateSyncCheckpoint; -+ ppsFenceTimelineUpdateSync = &ps3DFenceTimelineUpdateSync; -+ pui32FenceTimelineUpdateValue = &ui323DFenceTimelineUpdateValue; -+ ppui32IntAllocatedUpdateValues = &pui323DIntAllocatedUpdateValues; -+ puiUpdateFenceUID = &ui3DUpdateFenceUID; -+ } -+ } -+ else -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D) && -+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) -+ { -+ psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext; -+ psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx); -+ pszCommandName = "TQ-2D"; -+ psCmdHelper = &pas2DCmdHelper[ui322DCmdCount++]; -+ eType = RGXFWIF_CCB_CMD_TYPE_TQ_2D; -+#if defined(SUPPORT_BUFFER_SYNC) -+ psBufferSyncContext = psTransferContext->s2DData.psBufferSyncContext; -+#endif -+ bCheckFence = ui322DCmdCount == 1; -+ bUpdateFence = ui322DCmdCount == ui322DCmdLast -+ && i2DUpdateTimeline != PVRSRV_NO_TIMELINE; -+ -+ if (bUpdateFence) -+ { -+ piUpdateFence = &i2DUpdateFence; -+ iUpdateTimeline = i2DUpdateTimeline; -+ ppvUpdateFenceFinaliseData = &pv2DUpdateFenceFinaliseData; -+ ppsUpdateSyncCheckpoint = &ps2DUpdateSyncCheckpoint; -+ ppsFenceTimelineUpdateSync = &ps2DFenceTimelineUpdateSync; -+ pui32FenceTimelineUpdateValue = &ui322DFenceTimelineUpdateValue; -+ ppui32IntAllocatedUpdateValues = &pui322DIntAllocatedUpdateValues; -+ puiUpdateFenceUID = &ui2DUpdateFenceUID; -+ } -+ } -+ else -+#endif -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto fail_prepare_loop; -+ } -+ -+ if (i == 0) -+ { -+ ui32PDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE; -+ PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags, -+ "%s Command Server Submit on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psServerCommonCtx).ui32Addr); -+ psTransferContext->ui32PDumpFlags |= ui32PDumpFlags; -+ } -+ else -+ { -+ IMG_UINT32 ui32NewPDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE; -+ if (ui32NewPDumpFlags != ui32PDumpFlags) -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ PVR_DPF((PVR_DBG_ERROR, "%s: Mixing of continuous and non-continuous command in a batch is not permitted", __func__)); -+ goto fail_prepare_loop; -+ } -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->sSyncAddrListFence, %d fences)", __func__, ui32IntClientFenceCount)); -+ eError = SyncAddrListPopulate(psSyncAddrListFence, -+ 0, -+ NULL, -+ NULL); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_prepare_loop; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->asSyncAddrListUpdate[], %d updates)", __func__, ui32IntClientUpdateCount)); -+ eError = SyncAddrListPopulate(psSyncAddrListUpdate, -+ ui32IntClientUpdateCount, -+ papauiClientUpdateUFODevVarBlock[i], -+ papaui32ClientUpdateSyncOffset[i]); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_prepare_loop; -+ } -+ if (!pauiIntUpdateUFOAddress) -+ { -+ pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after sync prims) ui32IntClientUpdateCount=%d", __func__, ui32IntClientUpdateCount)); -+ if (ui32SyncPMRCount) -+ { -+#if defined(SUPPORT_BUFFER_SYNC) -+ int err; -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__)); -+ err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext, -+ psTransferContext->psDeviceNode->hSyncCheckpointContext, -+ ui32SyncPMRCount, -+ ppsSyncPMRs, -+ paui32SyncPMRFlags, -+ &ui32BufferFenceSyncCheckpointCount, -+ &apsBufferFenceSyncCheckpoints, -+ &psBufferUpdateSyncCheckpoint, -+ &psBufferSyncData); -+ if (err) -+ { -+ switch (err) -+ { -+ case -EINTR: -+ eError = PVRSRV_ERROR_RETRY; -+ break; -+ case -ENOMEM: -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ break; -+ default: -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ break; -+ } -+ -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError))); -+ } -+ goto fail_resolve_buffersync_input_fence; -+ } -+ -+ /* Append buffer sync fences */ -+ if (ui32BufferFenceSyncCheckpointCount > 0) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)psSyncAddrListFence , (void*)pauiIntFenceUFOAddress)); -+ SyncAddrListAppendAndDeRefCheckpoints(psSyncAddrListFence, -+ ui32BufferFenceSyncCheckpointCount, -+ apsBufferFenceSyncCheckpoints); -+ if (!pauiIntFenceUFOAddress) -+ { -+ pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs; -+ } -+ ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount; -+ } -+ -+ if (psBufferUpdateSyncCheckpoint) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 buffer sync checkpoint<%p> to TQ Update (&psTransferContext->asSyncAddrListUpdate[i]=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)psBufferUpdateSyncCheckpoint, (void*)psSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); -+ /* Append the update (from output fence) */ -+ SyncAddrListAppendCheckpoints(psSyncAddrListUpdate, -+ 1, -+ &psBufferUpdateSyncCheckpoint); -+ if (!pauiIntUpdateUFOAddress) -+ { -+ pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; -+ } -+ ui32IntClientUpdateCount++; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after buffer_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); -+#else /* defined(SUPPORT_BUFFER_SYNC) */ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: <--EXIT(%d)", __func__, PVRSRV_ERROR_INVALID_PARAMS)); -+ OSLockRelease(psTransferContext->hLock); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ } -+ -+ /* Create the output fence (if required) */ -+ if (bUpdateFence) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (piUpdateFence=%p, iUpdateTimeline=%d, psTranserContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __func__, piUpdateFence, iUpdateTimeline, (void*)psDeviceNode->hSyncCheckpointContext)); -+ eError = SyncCheckpointCreateFence(psDeviceNode, -+ szFenceName, -+ iUpdateTimeline, -+ psDeviceNode->hSyncCheckpointContext, -+ piUpdateFence, -+ puiUpdateFenceUID, -+ ppvUpdateFenceFinaliseData, -+ ppsUpdateSyncCheckpoint, -+ (void*)ppsFenceTimelineUpdateSync, -+ pui32FenceTimelineUpdateValue, -+ ui32PDumpFlags); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: SyncCheckpointCreateFence failed (%s)", -+ __func__, -+ PVRSRVGetErrorString(eError))); -+ goto fail_prepare_loop; -+ } -+ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (piUpdateFence=%p)", __func__, piUpdateFence)); -+ -+ /* Append the sync prim update for the timeline (if required) */ -+ if (*ppsFenceTimelineUpdateSync) -+ { -+ IMG_UINT32 *pui32TimelineUpdateWp = NULL; -+ -+ /* Allocate memory to hold the list of update values (including our timeline update) */ -+ *ppui32IntAllocatedUpdateValues = OSAllocMem(sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); -+ if (!*ppui32IntAllocatedUpdateValues) -+ { -+ /* Failed to allocate memory */ -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_prepare_loop; -+ } -+ OSCachedMemSet(*ppui32IntAllocatedUpdateValues, 0xbb, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); -+#if defined(SUPPORT_BUFFER_SYNC) -+ if (psBufferUpdateSyncCheckpoint) -+ { -+ /* Copy the update values into the new memory, then append our timeline update value */ -+ OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount-1)); -+ pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + (ui32IntClientUpdateCount-1); -+ } -+ else -+#endif -+ { -+ /* Copy the update values into the new memory, then append our timeline update value */ -+ OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); -+ pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value 0x%x)", __func__, *pui32FenceTimelineUpdateValue)); -+ /* Now set the additional update value */ -+ *pui32TimelineUpdateWp = *pui32FenceTimelineUpdateValue; -+#if defined(TRANSFER_CHECKPOINT_DEBUG) -+ if (ui32IntClientUpdateCount > 0) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues; -+ -+ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ /* Now append the timeline sync prim addr to the transfer context update list */ -+ SyncAddrListAppendSyncPrim(psSyncAddrListUpdate, -+ *ppsFenceTimelineUpdateSync); -+ ui32IntClientUpdateCount++; -+#if defined(TRANSFER_CHECKPOINT_DEBUG) -+ if (ui32IntClientUpdateCount > 0) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues; -+ -+ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: set paui32IntUpdateValue<%p> to point to *ppui32IntAllocatedUpdateValues<%p>", __func__, (void*)paui32IntUpdateValue, (void*)*ppui32IntAllocatedUpdateValues)); -+ paui32IntUpdateValue = *ppui32IntAllocatedUpdateValues; -+ } -+ } -+ -+ if (bCheckFence && ui32FenceSyncCheckpointCount) -+ { -+ /* Append the checks (from input fence) */ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)psSyncAddrListFence)); -+ SyncAddrListAppendCheckpoints(psSyncAddrListFence, -+ ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ if (!pauiIntFenceUFOAddress) -+ { -+ pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs; -+ } -+ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; -+ -+#if defined(TRANSFER_CHECKPOINT_DEBUG) -+ if (ui32IntClientFenceCount > 0) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; -+ -+ for (iii=0; iiipasFWAddrs[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ } -+ if (bUpdateFence && *ppsUpdateSyncCheckpoint) -+ { -+ /* Append the update (from output fence) */ -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (psSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->asSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); -+ SyncAddrListAppendCheckpoints(psSyncAddrListUpdate, -+ 1, -+ ppsUpdateSyncCheckpoint); -+ if (!pauiIntUpdateUFOAddress) -+ { -+ pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; -+ } -+ ui32IntClientUpdateCount++; -+#if defined(TRANSFER_CHECKPOINT_DEBUG) -+ { -+ IMG_UINT32 iii; -+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; -+ -+ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); -+ pui32Tmp++; -+ } -+ } -+#endif -+ } -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); -+ -+#if (ENABLE_TQ_UFO_DUMP == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: dumping TQ fence/updates syncs...", __func__)); -+ { -+ IMG_UINT32 ii; -+ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; -+ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; -+ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; -+ -+ /* Dump Fence syncs and Update syncs */ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ fence syncs (&psTransferContext->asSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->asSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); -+ for (ii=0; iiui32Addr & 0x1); -+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); -+ -+ psTmpIntFenceUFOAddress++; -+ } -+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ update syncs (&psTransferContext->asSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->asSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); -+ for (ii=0; iiui32Addr & 0x1) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); -+ pui32TmpIntUpdateValue++; -+ } -+ psTmpIntUpdateUFOAddress++; -+ } -+ } -+#endif -+ -+ ui32PreparesDone++; -+ -+ /* -+ Create the command helper data for this command -+ */ -+ RGXCmdHelperInitCmdCCB(psDevInfo, -+ psClientCCB, -+ 0, -+ ui32IntClientFenceCount, -+ pauiIntFenceUFOAddress, -+ NULL, /* fence value */ -+ ui32IntClientUpdateCount, -+ pauiIntUpdateUFOAddress, -+ paui32IntUpdateValue, -+ paui32FWCommandSize[i], -+ papaui8FWCommand[i], -+ &pPreAddr, -+ &pPostAddr, -+ &pRMWUFOAddr, -+ eType, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ ui32PDumpFlags, -+ NULL, -+ pszCommandName, -+ bCCBStateOpen, -+ psCmdHelper); -+ } -+ -+ /* -+ Acquire space for all the commands in one go -+ */ -+ if (ui323DCmdCount) -+ { -+ eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, -+ &pas3DCmdHelper[0]); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_cmdacquire; -+ } -+ } -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) -+ { -+ eError = RGXCmdHelperAcquireCmdCCB(ui322DCmdCount, -+ &pas2DCmdHelper[0]); -+ if (eError != PVRSRV_OK) -+ { -+ goto fail_cmdacquire; -+ } -+ } -+#endif -+ -+ /* -+ We should acquire the kernel CCB(s) space here as the schedule could fail -+ and we would have to roll back all the syncs -+ */ -+ -+ eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ -+ /* If system is found powered OFF, Retry scheduling the command */ -+ if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) -+ { -+ eError = PVRSRV_ERROR_RETRY; -+ } -+ goto fail_acquirepowerlock; -+ } -+ -+ if (ui323DCmdCount) -+ { -+ ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext)); -+ RGXCmdHelperReleaseCmdCCB(ui323DCmdCount, -+ &pas3DCmdHelper[0], -+ "TQ_3D", -+ FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr); -+ } -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) -+ { -+ ui322DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext)); -+ RGXCmdHelperReleaseCmdCCB(ui322DCmdCount, -+ &pas2DCmdHelper[0], -+ "TQ_2D", -+ FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr); -+ } -+#endif -+ -+ if (ui323DCmdCount) -+ { -+ RGXFWIF_KCCB_CMD s3DKCCBCmd; -+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr; -+ RGX_CLIENT_CCB *ps3DTQCCB = FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext); -+ -+ /* Take one of the helper data structs and extract the common cmd struct, -+ * this is used to obtain the frame num. Each command should share the same -+ * frame number so we can just get the first. -+ */ -+#if defined(PVRSRV_ENABLE_HTB) -+ RGX_CCB_CMD_HELPER_DATA *psCmdHelper = &pas3DCmdHelper[0]; -+ CMD_COMMON *psTransferCmdCmn = IMG_OFFSET_ADDR(psCmdHelper->pui8DMCmd, 0); -+#endif -+ -+ /* Construct the kernel 3D CCB command. */ -+ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; -+ s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext); -+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(ps3DTQCCB); -+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(ps3DTQCCB); -+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; -+#endif -+ -+#if defined(PVRSRV_ENABLE_HTB) -+ HTBLOGK(HTB_SF_MAIN_KICK_3D, -+ s3DKCCBCmd.uCmdData.sCmdKickData.psContext, -+ ui323DCmdOffset, -+ psTransferCmdCmn->ui32FrameNum, -+ ui32ExtJobRef, -+ ui32IntJobRef -+ ); -+#endif -+ -+ RGXSRV_HWPERF_ENQ(psTransferContext, -+ OSGetCurrentClientProcessIDKM(), -+ ui32FWCtx, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ RGX_HWPERF_KICK_TYPE2_TQ3D, -+ iCheckFence, -+ i3DUpdateFence, -+ i3DUpdateTimeline, -+ uiCheckFenceUID, -+ ui3DUpdateFenceUID, -+ NO_DEADLINE, -+ NO_CYCEST); -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError2 = RGXScheduleCommandWithoutPowerLock(psDevInfo, -+ RGXFWIF_DM_3D, -+ &s3DKCCBCmd, -+ ui32PDumpFlags); -+ if (eError2 != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if (eError2 != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2)); -+ eError = eError2; -+ goto fail_cmdsubmit; -+ } -+ -+ PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef, -+ ui32IntJobRef, RGX_HWPERF_KICK_TYPE2_TQ3D); -+ } -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) -+ { -+ RGXFWIF_KCCB_CMD s2DKCCBCmd; -+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr; -+ RGX_CLIENT_CCB *ps2DTQCCB = FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext); -+ -+ /* Take one of the helper data structs and extract the common cmd struct, -+ * this is used to obtain the frame num. Each command should share the same -+ * frame number so we can just get the first. -+ */ -+#if defined(PVRSRV_ENABLE_HTB) -+ RGX_CCB_CMD_HELPER_DATA *psCmdHelper = &pas2DCmdHelper[0]; -+ CMD_COMMON *psTransferCmdCmn = IMG_OFFSET_ADDR(psCmdHelper->pui8DMCmd, 0); -+#endif -+ -+ /* Construct the kernel 2D CCB command. */ -+ s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; -+ s2DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext); -+ s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(ps2DTQCCB); -+ s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(ps2DTQCCB); -+ s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; -+ -+#if defined(PVRSRV_ENABLE_HTB) -+ HTBLOGK(HTB_SF_MAIN_KICK_2D, -+ s2DKCCBCmd.uCmdData.sCmdKickData.psContext, -+ ui322DCmdOffset, -+ psTransferCmdCmn->ui32FrameNum, -+ ui32ExtJobRef, -+ ui32IntJobRef); -+#endif -+ -+ RGXSRV_HWPERF_ENQ(psTransferContext, -+ OSGetCurrentClientProcessIDKM(), -+ ui32FWCtx, -+ ui32ExtJobRef, -+ ui32IntJobRef, -+ RGX_HWPERF_KICK_TYPE2_TQ2D, -+ iCheckFence, -+ i2DUpdateFence, -+ i2DUpdateTimeline, -+ uiCheckFenceUID, -+ ui2DUpdateFenceUID, -+ NO_DEADLINE, -+ NO_CYCEST); -+ -+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+ { -+ eError2 = RGXScheduleCommandWithoutPowerLock(psDevInfo, -+ RGXFWIF_DM_2D, -+ &s2DKCCBCmd, -+ ui32PDumpFlags); -+ if (eError2 != PVRSRV_ERROR_RETRY) -+ { -+ break; -+ } -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if (eError2 != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2)); -+ eError = eError2; -+ goto fail_cmdsubmit; -+ } -+ -+ PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef, -+ ui32IntJobRef, RGX_HWPERF_KICK_TYPE2_TQ2D); -+ } -+#endif -+ -+ /* -+ * Now check eError (which may have returned an error from our earlier calls -+ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first -+ * so we check it now... -+ */ -+ if (eError != PVRSRV_OK ) -+ { -+ goto fail_cmdsubmit; -+ } -+ -+ PVRSRVPowerUnlock(psDevInfo->psDeviceNode); -+ -+#if defined(NO_HARDWARE) -+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if (ps2DUpdateSyncCheckpoint) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling TLA NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps2DUpdateSyncCheckpoint, SyncCheckpointGetId(ps2DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps2DUpdateSyncCheckpoint))); -+ SyncCheckpointSignalNoHW(ps2DUpdateSyncCheckpoint); -+ } -+ if (ps2DFenceTimelineUpdateSync) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating TLA NOHW sync prim<%p> to %d", __func__, (void*)ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue)); -+ SyncPrimNoHwUpdate(ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue); -+ } -+#endif -+ if (ps3DUpdateSyncCheckpoint) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling TQ3D NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps3DUpdateSyncCheckpoint, SyncCheckpointGetId(ps3DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps3DUpdateSyncCheckpoint))); -+ SyncCheckpointSignalNoHW(ps3DUpdateSyncCheckpoint); -+ } -+ if (ps3DFenceTimelineUpdateSync) -+ { -+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating TQ3D NOHW sync prim<%p> to %d", __func__, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue)); -+ SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue); -+ } -+ SyncCheckpointNoHWUpdateTimelines(NULL); -+#endif /* defined(NO_HARDWARE) */ -+ -+#if defined(SUPPORT_BUFFER_SYNC) -+ if (psBufferSyncData) -+ { -+ pvr_buffer_sync_kick_succeeded(psBufferSyncData); -+ } -+ if (apsBufferFenceSyncCheckpoints) -+ { -+ kfree(apsBufferFenceSyncCheckpoints); -+ } -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if (pi2DUpdateFence) -+ { -+ *pi2DUpdateFence = i2DUpdateFence; -+ } -+#endif -+ if (pi3DUpdateFence) -+ { -+ *pi3DUpdateFence = i3DUpdateFence; -+ } -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if (pv2DUpdateFenceFinaliseData && (i2DUpdateFence != PVRSRV_NO_FENCE)) -+ { -+ SyncCheckpointFinaliseFence(psDeviceNode, i2DUpdateFence, pv2DUpdateFenceFinaliseData, -+ ps2DUpdateSyncCheckpoint, szFenceName); -+ } -+#endif -+ if (pv3DUpdateFenceFinaliseData && (i3DUpdateFence != PVRSRV_NO_FENCE)) -+ { -+ SyncCheckpointFinaliseFence(psDeviceNode, i3DUpdateFence, pv3DUpdateFenceFinaliseData, -+ ps3DUpdateSyncCheckpoint, szFenceName); -+ } -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ OSFreeMem(pas2DCmdHelper); -+#endif -+ OSFreeMem(pas3DCmdHelper); -+ -+ /* Drop the references taken on the sync checkpoints in the -+ * resolved input fence */ -+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ -+ if (apsFenceSyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); -+ } -+ /* Free memory allocated to hold the internal list of update values */ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if (pui322DIntAllocatedUpdateValues) -+ { -+ OSFreeMem(pui322DIntAllocatedUpdateValues); -+ pui322DIntAllocatedUpdateValues = NULL; -+ } -+#endif -+ if (pui323DIntAllocatedUpdateValues) -+ { -+ OSFreeMem(pui323DIntAllocatedUpdateValues); -+ pui323DIntAllocatedUpdateValues = NULL; -+ } -+ -+ OSLockRelease(psTransferContext->hLock); -+ return PVRSRV_OK; -+ -+/* -+ No resources are created in this function so there is nothing to free -+ unless we had to merge syncs. -+ If we fail after the client CCB acquire there is still nothing to do -+ as only the client CCB release will modify the client CCB -+*/ -+fail_cmdsubmit: -+ PVRSRVPowerUnlock(psDevInfo->psDeviceNode); -+fail_acquirepowerlock: -+fail_cmdacquire: -+fail_prepare_loop: -+ -+ PVR_ASSERT(eError != PVRSRV_OK); -+ -+ for (i=0;iasSyncAddrListFence[i]); -+ SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListUpdate[i]); -+ } -+#if defined(SUPPORT_BUFFER_SYNC) -+ if (ui32PreparesDone > 0) -+ { -+ /* Prevent duplicate rollback in case of buffer sync. */ -+ psBufferUpdateSyncCheckpoint = NULL; -+ } -+#endif -+ -+ /* Free memory allocated to hold the internal list of update values */ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if (pui322DIntAllocatedUpdateValues) -+ { -+ OSFreeMem(pui322DIntAllocatedUpdateValues); -+ pui322DIntAllocatedUpdateValues = NULL; -+ } -+#endif -+ if (pui323DIntAllocatedUpdateValues) -+ { -+ OSFreeMem(pui323DIntAllocatedUpdateValues); -+ pui323DIntAllocatedUpdateValues = NULL; -+ } -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if (i2DUpdateFence != PVRSRV_NO_FENCE) -+ { -+ SyncCheckpointRollbackFenceData(i2DUpdateFence, pv2DUpdateFenceFinaliseData); -+ } -+#endif -+ if (i3DUpdateFence != PVRSRV_NO_FENCE) -+ { -+ SyncCheckpointRollbackFenceData(i3DUpdateFence, pv3DUpdateFenceFinaliseData); -+ } -+#if defined(SUPPORT_BUFFER_SYNC) -+ if (psBufferUpdateSyncCheckpoint) -+ { -+ SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListUpdate[0]); -+ } -+ if (psBufferSyncData) -+ { -+ pvr_buffer_sync_kick_failed(psBufferSyncData); -+ } -+ if (apsBufferFenceSyncCheckpoints) -+ { -+ kfree(apsBufferFenceSyncCheckpoints); -+ } -+fail_resolve_buffersync_input_fence: -+#endif /* defined(SUPPORT_BUFFER_SYNC) */ -+ -+ /* Drop the references taken on the sync checkpoints in the -+ * resolved input fence */ -+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, -+ apsFenceSyncCheckpoints); -+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ -+ if (apsFenceSyncCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); -+ } -+fail_resolve_fencesync_input_fence: -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ OSFreeMem(pas2DCmdHelper); -+fail_alloc2dhelper: -+#endif -+ OSFreeMem(pas3DCmdHelper); -+fail_alloc3dhelper: -+ -+ OSLockRelease(psTransferContext->hLock); -+ return eError; -+} -+ -+ -+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDevNode, -+ RGX_SERVER_TQ_CONTEXT *psTransferContext, -+ IMG_INT32 i32Priority) -+{ -+ PVRSRV_ERROR eError; -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; -+#endif -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ -+ OSLockAcquire(psTransferContext->hLock); -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if ((psTransferContext->s2DData.i32Priority != i32Priority) && -+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) -+ { -+ eError = ContextSetPriority(psTransferContext->s2DData.psServerCommonContext, -+ psConnection, -+ psTransferContext->psDeviceNode->pvDevice, -+ i32Priority, -+ RGXFWIF_DM_2D); -+ if (eError != PVRSRV_OK) -+ { -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 2D part of the transfercontext (%s)", __func__, PVRSRVGetErrorString(eError))); -+ } -+ goto fail_2dcontext; -+ } -+ psTransferContext->s2DData.i32Priority = i32Priority; -+ } -+#endif -+ -+ if (psTransferContext->s3DData.i32Priority != i32Priority) -+ { -+ eError = ContextSetPriority(psTransferContext->s3DData.psServerCommonContext, -+ psConnection, -+ psTransferContext->psDeviceNode->pvDevice, -+ i32Priority, -+ RGXFWIF_DM_3D); -+ if (eError != PVRSRV_OK) -+ { -+ if (eError != PVRSRV_ERROR_RETRY) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the transfercontext (%s)", __func__, PVRSRVGetErrorString(eError))); -+ } -+ goto fail_3dcontext; -+ } -+ psTransferContext->s3DData.i32Priority = i32Priority; -+ } -+ -+ OSLockRelease(psTransferContext->hLock); -+ return PVRSRV_OK; -+ -+fail_3dcontext: -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ -+fail_2dcontext: -+#endif -+ OSLockRelease(psTransferContext->hLock); -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+PVRSRV_ERROR PVRSRVRGXSetTransferContextPropertyKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, -+ RGX_CONTEXT_PROPERTY eContextProperty, -+ IMG_UINT64 ui64Input, -+ IMG_UINT64 *pui64Output) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ switch (eContextProperty) -+ { -+ case RGX_CONTEXT_PROPERTY_FLAGS: -+ { -+ IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input; -+ -+ OSLockAcquire(psTransferContext->hLock); -+ eError = FWCommonContextSetFlags(psTransferContext->s2DData.psServerCommonContext, -+ ui32ContextFlags); -+ if (eError == PVRSRV_OK) -+ { -+ eError = FWCommonContextSetFlags(psTransferContext->s3DData.psServerCommonContext, -+ ui32ContextFlags); -+ } -+ OSLockRelease(psTransferContext->hLock); -+ break; -+ } -+ -+ default: -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); -+ eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ } -+ } -+ -+ return eError; -+} -+ -+void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ -+ OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock); -+ -+ dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode); -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && -+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) -+ { -+ DumpFWCommonContextInfo(psCurrentServerTransferCtx->s2DData.psServerCommonContext, -+ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+ } -+#endif -+ -+ if (psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) -+ { -+ DumpFWCommonContextInfo(psCurrentServerTransferCtx->s3DData.psServerCommonContext, -+ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); -+ } -+ } -+ -+ OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock); -+} -+ -+IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ IMG_UINT32 ui32ContextBitMask = 0; -+ -+ OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock); -+ -+ dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext) -+ { -+ RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx = -+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode); -+ -+#if defined(RGX_FEATURE_TLA_BIT_MASK) -+ if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && -+ (NULL != psCurrentServerTransferCtx->s2DData.psServerCommonContext) && -+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) -+ { -+ if (CheckStalledClientCommonContext(psCurrentServerTransferCtx->s2DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ2D) == PVRSRV_ERROR_CCCB_STALLED) -+ { -+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ2D; -+ } -+ } -+#endif -+ -+ if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) && (NULL != psCurrentServerTransferCtx->s3DData.psServerCommonContext)) -+ { -+ if ((CheckStalledClientCommonContext(psCurrentServerTransferCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ3D) == PVRSRV_ERROR_CCCB_STALLED)) -+ { -+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ3D; -+ } -+ } -+ } -+ -+ OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock); -+ return ui32ContextBitMask; -+} -+ -+PVRSRV_ERROR PVRSRVRGXTQGetSharedMemoryKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ PMR ** ppsCLIPMRMem, -+ PMR ** ppsUSCPMRMem) -+{ -+ PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR PVRSRVRGXTQReleaseSharedMemoryKM(PMR * psPMRMem) -+{ -+ PVR_UNREFERENCED_PARAMETER(psPMRMem); -+ -+ return PVRSRV_OK; -+} -+ -+/**************************************************************************//** -+ End of file (rgxtransfer.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxtransfer.h b/drivers/gpu/drm/img-rogue/rgxtransfer.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxtransfer.h -@@ -0,0 +1,159 @@ -+/*************************************************************************/ /*! -+@File -+@Title RGX Transfer queue Functionality -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the RGX Transfer queue Functionality -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXTRANSFER_H) -+#define RGXTRANSFER_H -+ -+#include "devicemem.h" -+#include "device.h" -+#include "rgxdevice.h" -+#include "rgxfwutils.h" -+#include "rgx_fwif_resetframework.h" -+#include "rgxdebug_common.h" -+#include "pvr_notifier.h" -+ -+#include "sync_server.h" -+#include "connection_server.h" -+ -+typedef struct _RGX_SERVER_TQ_CONTEXT_ RGX_SERVER_TQ_CONTEXT; -+ -+/*! -+******************************************************************************* -+ -+ @Function PVRSRVRGXCreateTransferContextKM -+ -+ @Description -+ Server-side implementation of RGXCreateTransferContext -+ -+ @Input pvDeviceNode - device node -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_INT32 i32Priority, -+ IMG_UINT32 ui32FrameworkCommandSize, -+ IMG_PBYTE pabyFrameworkCommand, -+ IMG_HANDLE hMemCtxPrivData, -+ IMG_UINT32 ui32PackedCCBSizeU8888, -+ IMG_UINT32 ui32ContextFlags, -+ IMG_UINT64 ui64RobustnessAddress, -+ RGX_SERVER_TQ_CONTEXT **ppsTransferContext); -+ -+ -+/*! -+******************************************************************************* -+ -+ @Function PVRSRVRGXDestroyTransferContextKM -+ -+ @Description -+ Server-side implementation of RGXDestroyTransferContext -+ -+ @Input psTransferContext - Transfer context -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext); -+ -+/*! -+******************************************************************************* -+ -+ @Function PVRSRVSubmitTransferKM -+ -+ @Description -+ Schedules one or more 2D or 3D HW commands on the firmware -+ -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, -+ IMG_UINT32 ui32PrepareCount, -+ IMG_UINT32 *paui32ClientUpdateCount, -+ SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFODevVarBlock, -+ IMG_UINT32 **papaui32ClientUpdateSyncOffset, -+ IMG_UINT32 **papaui32ClientUpdateValue, -+ PVRSRV_FENCE iCheckFence, -+ PVRSRV_TIMELINE i2DUpdateTimeline, -+ PVRSRV_FENCE *pi2DUpdateFence, -+ PVRSRV_TIMELINE i3DUpdateTimeline, -+ PVRSRV_FENCE *pi3DUpdateFence, -+ IMG_CHAR szFenceName[32], -+ IMG_UINT32 *paui32FWCommandSize, -+ IMG_UINT8 **papaui8FWCommand, -+ IMG_UINT32 *pui32TQPrepareFlags, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32SyncPMRCount, -+ IMG_UINT32 *paui32SyncPMRFlags, -+ PMR **ppsSyncPMRs); -+ -+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDevNode, -+ RGX_SERVER_TQ_CONTEXT *psTransferContext, -+ IMG_INT32 i32Priority); -+ -+PVRSRV_ERROR PVRSRVRGXSetTransferContextPropertyKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, -+ RGX_CONTEXT_PROPERTY eContextProperty, -+ IMG_UINT64 ui64Input, -+ IMG_UINT64 *pui64Output); -+ -+/* Debug - Dump debug info of transfer contexts on this device */ -+void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile, -+ IMG_UINT32 ui32VerbLevel); -+ -+/* Debug/Watchdog - check if client transfer contexts are stalled */ -+IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); -+ -+PVRSRV_ERROR PVRSRVRGXTQGetSharedMemoryKM( -+ CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ PMR ** ppsCLIPMRMem, -+ PMR ** ppsUSCPMRMem); -+ -+PVRSRV_ERROR PVRSRVRGXTQReleaseSharedMemoryKM(PMR * psUSCPMRMem); -+ -+#endif /* RGXTRANSFER_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxtransfer_shader.h b/drivers/gpu/drm/img-rogue/rgxtransfer_shader.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxtransfer_shader.h -@@ -0,0 +1,71 @@ -+/*************************************************************************/ /*! -+@File rgxtransfer_shader.h -+@Title TQ binary shader file info -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This header holds info about TQ binary shader file generated -+ by the TQ shader factory. This header is need by shader factory -+ when generating the file; by services KM when reading and -+ loading the file into memory; and by services UM when -+ constructing blits using the shaders. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(RGXSHADERHEADER_H) -+#define RGXSHADERHEADER_H -+ -+#include "pvrversion.h" -+ -+typedef struct _RGX_SHADER_HEADER_ -+{ -+ IMG_UINT32 ui32Version; -+ IMG_UINT32 ui32NumFragment; -+ IMG_UINT32 ui32SizeFragment; -+ IMG_UINT32 ui32NumTDMFragment; -+ IMG_UINT32 ui32SizeTDMFragment; -+ IMG_UINT32 ui32SizeClientMem; -+} RGX_SHADER_HEADER; -+ -+/* TQ shaders version is used to check compatibility between the -+ binary TQ shaders file and the DDK. This number should be incremented -+ if a change to the TQ shader factory breaks compatibility. */ -+#define RGX_TQ_SHADERS_VERSION 1U -+ -+#define RGX_TQ_SHADERS_VERSION_PACK \ -+ (((RGX_TQ_SHADERS_VERSION & 0xFFU) << 16) | ((PVRVERSION_MAJ & 0xFFU) << 8) | ((PVRVERSION_MIN & 0xFFU) << 0)) -+ -+#endif /* RGXSHADERHEADER_H */ -diff --git a/drivers/gpu/drm/img-rogue/rgxutils.c b/drivers/gpu/drm/img-rogue/rgxutils.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxutils.c -@@ -0,0 +1,306 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device specific utility routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Device specific functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "rgx_fwif_km.h" -+#include "pdump_km.h" -+#include "osfunc.h" -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "rgxutils.h" -+#include "power.h" -+#include "pvrsrv.h" -+#include "sync_internal.h" -+#include "rgxfwutils.h" -+ -+ -+PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *pvPrivateData, -+ IMG_UINT32 *pui32State) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_UNREFERENCED_PARAMETER(pvPrivateData); -+ -+ if (!psDeviceNode) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ *pui32State = psDevInfo->eActivePMConf; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *pvPrivateData, -+ IMG_UINT32 ui32State) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+#if !defined(NO_HARDWARE) -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+#endif -+ -+ PVR_UNREFERENCED_PARAMETER(pvPrivateData); -+ -+ if (!psDeviceNode || !psDeviceNode->pvDevice) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (RGX_ACTIVEPM_FORCE_OFF != ui32State) -+ { -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+ } -+ -+#if !defined(NO_HARDWARE) -+ psDevInfo = psDeviceNode->pvDevice; -+ -+ if (psDevInfo->pvAPMISRData) -+ { -+ psDevInfo->eActivePMConf = RGX_ACTIVEPM_FORCE_OFF; -+ psDevInfo->pvAPMISRData = NULL; -+ eError = PVRSRVSetDeviceDefaultPowerState((PPVRSRV_DEVICE_NODE)psDeviceNode, -+ PVRSRV_DEV_POWER_STATE_ON); -+ } -+#endif -+ -+ return eError; -+} -+ -+PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *pvPrivateData, -+ IMG_BOOL *pbDisabled) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_UNREFERENCED_PARAMETER(pvPrivateData); -+ -+ if (!psDeviceNode || !psDeviceNode->pvDevice) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ -+ *pbDisabled = !psDevInfo->bPDPEnabled; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *pvPrivateData, -+ IMG_BOOL bDisable) -+{ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ PVR_UNREFERENCED_PARAMETER(pvPrivateData); -+ -+ if (!psDeviceNode || !psDeviceNode->pvDevice) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psDevInfo = psDeviceNode->pvDevice; -+ -+ psDevInfo->bPDPEnabled = !bDisable; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 *pui32DeviceFlags) -+{ -+ if (!pui32DeviceFlags || !psDevInfo) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ *pui32DeviceFlags = psDevInfo->ui32DeviceFlags; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32Config, -+ IMG_BOOL bSetNotClear) -+{ -+ if (!psDevInfo) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if ((ui32Config & ~RGXKM_DEVICE_STATE_MASK) != 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Bits outside of device state mask set (input: 0x%x, mask: 0x%x)", -+ __func__, ui32Config, RGXKM_DEVICE_STATE_MASK)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (bSetNotClear) -+ { -+ psDevInfo->ui32DeviceFlags |= ui32Config; -+ } -+ else -+ { -+ psDevInfo->ui32DeviceFlags &= ~ui32Config; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+inline const char * RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM) -+{ -+ PVR_ASSERT(eKickTypeDM < RGX_KICK_TYPE_DM_LAST); -+ -+ switch (eKickTypeDM) { -+ case RGX_KICK_TYPE_DM_GP: -+ return "GP "; -+ case RGX_KICK_TYPE_DM_TDM_2D: -+ return "TDM/2D "; -+ case RGX_KICK_TYPE_DM_TA: -+ return "TA "; -+ case RGX_KICK_TYPE_DM_3D: -+ return "3D "; -+ case RGX_KICK_TYPE_DM_CDM: -+ return "CDM "; -+ case RGX_KICK_TYPE_DM_RTU: -+ return "RTU "; -+ case RGX_KICK_TYPE_DM_SHG: -+ return "SHG "; -+ case RGX_KICK_TYPE_DM_TQ2D: -+ return "TQ2D "; -+ case RGX_KICK_TYPE_DM_TQ3D: -+ return "TQ3D "; -+ default: -+ return "Invalid DM "; -+ } -+} -+ -+PHYS_HEAP_POLICY RGXPhysHeapGetLMAPolicy(PHYS_HEAP_USAGE_FLAGS ui32UsageFlags) -+{ -+ PHYS_HEAP_POLICY ui32Policy; -+ -+ if (OSIsMapPhysNonContigSupported()) -+ { -+ ui32Policy = PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG; -+ -+ if (BITMASK_ANY(ui32UsageFlags, -+ (PHYS_HEAP_USAGE_FW_SHARED | -+ PHYS_HEAP_USAGE_FW_PRIVATE | -+ PHYS_HEAP_USAGE_FW_PREMAP_PT | -+ PHYS_HEAP_USAGE_FW_CODE | -+ PHYS_HEAP_USAGE_FW_PRIV_DATA))) -+ { -+ if (PVRSRV_VZ_MODE_IS(GUEST)) -+ { -+ /* Guest Firmware heaps are always premepped */ -+ ui32Policy = PHYS_HEAP_POLICY_DEFAULT; -+ } -+#if defined(RGX_PREMAP_FW_HEAPS) -+ else if (PVRSRV_VZ_MODE_IS(HOST)) -+ { -+ /* All Firmware heaps are premapped under AutoVz*/ -+ ui32Policy = PHYS_HEAP_POLICY_DEFAULT; -+ } -+#endif -+ } -+ -+ if (BITMASK_ANY(ui32UsageFlags, PHYS_HEAP_USAGE_FW_PREMAP)) -+ { -+ ui32Policy = PHYS_HEAP_POLICY_DEFAULT; -+ } -+ } -+ else -+ { -+ ui32Policy = PHYS_HEAP_POLICY_DEFAULT; -+ } -+ -+ return ui32Policy; -+} -+ -+IMG_BOOL RGXIsErrorAndDeviceRecoverable(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_ERROR *peError) -+{ -+ IMG_BOOL bRecoverable = IMG_TRUE; -+ -+ if (*peError == PVRSRV_OK) -+ { -+ /* No recovery required */ -+ return IMG_FALSE; -+ } -+ -+ if (!PVRSRVIsStatusRecoverable(OSAtomicRead(&psDeviceNode->eHealthStatus))) -+ { -+ bRecoverable = IMG_FALSE; -+ } -+ else -+ { -+ RGXUpdateHealthStatus(psDeviceNode, IMG_FALSE); -+ -+ if (!PVRSRVIsStatusRecoverable(OSAtomicRead(&psDeviceNode->eHealthStatus))) -+ { -+ bRecoverable = IMG_FALSE; -+ } -+ } -+ -+ if (bRecoverable && !PVRSRVIsRetryError(*peError)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Device is recoverable. Changing error type (%s) to retry.", -+ __func__, PVRSRVGetErrorString(*peError))); -+ *peError = PVRSRV_ERROR_RETRY; -+ } -+ -+ if (!bRecoverable && PVRSRVIsRetryError(*peError)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Device is not recoverable. Error type should not be retry (%s).", -+ __func__, PVRSRVGetErrorString(*peError))); -+ *peError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return bRecoverable; -+} -+ -+/****************************************************************************** -+ End of file (rgxutils.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/rgxutils.h b/drivers/gpu/drm/img-rogue/rgxutils.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rgxutils.h -@@ -0,0 +1,211 @@ -+/*************************************************************************/ /*! -+@File -+@Title Device specific utility routines declarations -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Inline functions/structures specific to RGX -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "device.h" -+#include "rgxdevice.h" -+#include "rgxdebug_common.h" -+#include "pvr_notifier.h" -+#include "pvrsrv.h" -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXQueryAPMState -+ -+ @Description Query the state of the APM configuration -+ -+ @Input psDeviceNode : The device node -+ -+ @Input pvPrivateData: Unused (required for AppHint callback) -+ -+ @Output pui32State : The APM configuration state -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *pvPrivateData, -+ IMG_UINT32 *pui32State); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXSetAPMState -+ -+ @Description Set the APM configuration state. Currently only 'OFF' is -+ supported -+ -+ @Input psDeviceNode : The device node -+ -+ @Input pvPrivateData: Unused (required for AppHint callback) -+ -+ @Input ui32State : The requested APM configuration state -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *pvPrivateData, -+ IMG_UINT32 ui32State); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXQueryPdumpPanicDisable -+ -+ @Description Get the PDump Panic Enable configuration state. -+ -+ @Input psDeviceNode : The device node -+ -+ @Input pvPrivateData: Unused (required for AppHint callback) -+ -+ @Input pbDisabled : IMG_TRUE if PDump Panic is disabled -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *pvPrivateData, -+ IMG_BOOL *pbDisabled); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXSetPdumpPanicDisable -+ -+ @Description Set the PDump Panic Enable flag -+ -+ @Input psDeviceNode : The device node -+ -+ @Input pvPrivateData: Unused (required for AppHint callback) -+ -+ @Input bDisable : The requested configuration state -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, -+ const void *pvPrivateData, -+ IMG_BOOL bDisable); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXGetDeviceFlags -+ -+ @Description Get the device flags for a given device -+ -+ @Input psDevInfo : The device descriptor query -+ -+ @Output pui32DeviceFlags : The current state of the device flags -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 *pui32DeviceFlags); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXSetDeviceFlags -+ -+ @Description Set the device flags for a given device -+ -+ @Input psDevInfo : The device descriptor to modify -+ -+ @Input ui32Config : The device flags to modify -+ -+ @Input bSetNotClear : Set or clear the specified flags -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, -+ IMG_UINT32 ui32Config, -+ IMG_BOOL bSetNotClear); -+ -+/*! -+****************************************************************************** -+ -+ @Function RGXStringifyKickTypeDM -+ -+ @Description Gives the kick type DM name stringified -+ -+ @Input Kick type DM -+ -+ @Return Array containing the kick type DM name -+ -+******************************************************************************/ -+const char* RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM); -+ -+/*************************************************************************/ /*! -+ -+@Function RGXPhysHeapGetLMAPolicy -+ -+@Description Returns the optimal LMA allocation policy based on a heap's -+ usage flags -+ -+@Input ui32UsageFlags Flags specifying a heap's intended use -+ -+@Return PHYS_HEAP_POLICY The recommended LMA policy -+ -+*/ /**************************************************************************/ -+PHYS_HEAP_POLICY RGXPhysHeapGetLMAPolicy(PHYS_HEAP_USAGE_FLAGS ui32UsageFlags); -+ -+#define RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(bitmask, eKickTypeDM) bitmask & eKickTypeDM ? RGXStringifyKickTypeDM(eKickTypeDM) : "" -+ -+/*************************************************************************/ /*! -+@Function RGXIsErrorAndDeviceRecoverable -+@Description This function is used to check if device (and firmware) is in -+ a state that can be recovered from without a full reset of the -+ device. -+@Input psDeviceNode The device node. -+@Input peError Pointer to error. Can be changed to retry type. -+@Return IMG_BOOL Return true if device is recoverable. -+*/ /**************************************************************************/ -+IMG_BOOL RGXIsErrorAndDeviceRecoverable(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_ERROR *peError); -+ -+/****************************************************************************** -+ End of file (rgxutils.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/ri_server.c b/drivers/gpu/drm/img-rogue/ri_server.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/ri_server.c -@@ -0,0 +1,2161 @@ -+/*************************************************************************/ /*! -+@File ri_server.c -+@Title Resource Information (RI) server implementation -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Resource Information (RI) server functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if defined(__linux__) && defined(__KERNEL__) -+ #include -+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) -+ #include -+ #else -+ #include -+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ -+#else -+ #include -+#endif /* __linux__ */ -+#include "img_defs.h" -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "pvrsrv_error.h" -+#include "osfunc.h" -+ -+#include "srvkm.h" -+#include "lock.h" -+ -+/* services/include */ -+#include "pvr_ricommon.h" -+ -+/* services/server/include/ */ -+#include "ri_server.h" -+ -+/* services/include/shared/ */ -+#include "hash.h" -+/* services/shared/include/ */ -+#include "dllist.h" -+ -+#include "pmr.h" -+#include "physheap.h" -+ -+/* include/device.h */ -+#include "device.h" -+ -+#if !defined(RI_UNIT_TEST) -+#include "pvrsrv.h" -+#endif -+ -+ -+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) -+ -+#define USE_RI_LOCK 1 -+ -+/* -+ * Initial size use for Hash table. (Used to index the RI list entries). -+ */ -+#define _RI_INITIAL_HASH_TABLE_SIZE 64 -+ -+/* -+ * Values written to the 'valid' field of RI structures when created and -+ * cleared prior to being destroyed. The code can then check this value -+ * before accessing the provided pointer contents as a valid RI structure. -+ */ -+#define _VALID_RI_LIST_ENTRY 0x66bccb66 -+#define _VALID_RI_SUBLIST_ENTRY 0x77cddc77 -+#define _INVALID 0x00000000 -+ -+/* -+ * If this define is set to 1, details of the linked lists (addresses, -+ * prev/next ptrs, etc) are also output when function RIDumpList() is called. -+ */ -+#define _DUMP_LINKEDLIST_INFO 0 -+ -+ -+typedef IMG_UINT64 _RI_BASE_T; -+ -+ -+/* No +1 in SIZE macros since sizeof includes \0 byte in size */ -+ -+#define RI_PROC_BUF_SIZE 16 -+ -+#define RI_DEV_ID_BUF_SIZE 4 -+ -+#define RI_MEMDESC_SUM_FRMT "PID %d %s MEMDESCs Alloc'd:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) + "\ -+ "Imported:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) = "\ -+ "Total:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K)\n" -+#define RI_MEMDESC_SUM_BUF_SIZE (sizeof(RI_MEMDESC_SUM_FRMT)+5+RI_PROC_BUF_SIZE+30+60) -+ -+ -+#define RI_PMR_SUM_FRMT "PID %d %s PMRs Alloc'd:0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K "\ -+ "[Physical: 0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K]\n" -+#define RI_PMR_SUM_BUF_SIZE (sizeof(RI_PMR_SUM_FRMT)+(20+40)) -+ -+#define RI_PMR_ENTRY_FRMT "%%sPID:%%-5d DEV:%%s <%%p>\t%%-%ds\t%%-%ds\t0x%%010" IMG_UINT64_FMTSPECx "\t[0x%%010" IMG_UINT64_FMTSPECx "]\t%%c" -+#define RI_PMR_ENTRY_BUF_SIZE (sizeof(RI_PMR_ENTRY_FRMT)+(3+5+RI_DEV_ID_BUF_SIZE+16+(PVR_ANNOTATION_MAX_LEN/2)+PHYS_HEAP_NAME_SIZE+10+10)) -+#define RI_PMR_ENTRY_FRMT_SIZE (sizeof(RI_PMR_ENTRY_FRMT)) -+ -+/* Use %5d rather than %d so the output aligns in server/kernel.log, debugFS sees extra spaces */ -+#define RI_MEMDESC_ENTRY_PROC_FRMT "[%5d:%s]" -+#define RI_MEMDESC_ENTRY_PROC_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_PROC_FRMT)+5+16) -+ -+#define RI_SYS_ALLOC_IMPORT_FRMT "{Import from PID %d}" -+#define RI_SYS_ALLOC_IMPORT_FRMT_SIZE (sizeof(RI_SYS_ALLOC_IMPORT_FRMT)+5) -+static IMG_CHAR g_szSysAllocImport[RI_SYS_ALLOC_IMPORT_FRMT_SIZE]; -+ -+#define RI_MEMDESC_ENTRY_IMPORT_FRMT "{Import from PID %d}" -+#define RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_IMPORT_FRMT)+5) -+ -+#define RI_MEMDESC_ENTRY_FRMT "%%sPID:%%-5d DEV:%%s 0x%%010" IMG_UINT64_FMTSPECx "\t%%-%ds %%s\t0x%%010" IMG_UINT64_FMTSPECx "\t<%%p> %%s%%s%%c" -+#define RI_MEMDESC_ENTRY_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)+(3+5+RI_DEV_ID_BUF_SIZE+10+PVR_ANNOTATION_MAX_LEN+RI_MEMDESC_ENTRY_PROC_BUF_SIZE+16+\ -+ RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE+RI_SYS_ALLOC_IMPORT_FRMT_SIZE)) -+#define RI_MEMDESC_ENTRY_FRMT_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)) -+ -+ -+#define RI_FRMT_SIZE_MAX (MAX(RI_MEMDESC_ENTRY_BUF_SIZE,\ -+ MAX(RI_PMR_ENTRY_BUF_SIZE,\ -+ MAX(RI_MEMDESC_SUM_BUF_SIZE,\ -+ RI_PMR_SUM_BUF_SIZE)))) -+ -+ -+ -+ -+/* Structure used to make linked sublist of memory allocations (MEMDESC) */ -+struct _RI_SUBLIST_ENTRY_ -+{ -+ DLLIST_NODE sListNode; -+ struct _RI_LIST_ENTRY_ *psRI; -+ IMG_UINT32 valid; -+ IMG_BOOL bIsImport; -+ IMG_BOOL bIsSuballoc; -+ IMG_PID pid; -+ IMG_UINT32 ui32DevID; -+ IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE]; -+ IMG_DEV_VIRTADDR sVAddr; -+ IMG_UINT64 ui64Offset; -+ IMG_UINT64 ui64Size; -+ IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN+1]; -+ DLLIST_NODE sProcListNode; -+}; -+ -+/* -+ * Structure used to make linked list of PMRs. Sublists of allocations -+ * (MEMDESCs) made from these PMRs are chained off these entries. -+ */ -+struct _RI_LIST_ENTRY_ -+{ -+ DLLIST_NODE sListNode; -+ DLLIST_NODE sSysAllocListNode; -+ DLLIST_NODE sSubListFirst; -+ IMG_UINT32 valid; -+ PMR *psPMR; -+ IMG_PID pid; -+ IMG_UINT32 ui32DevID; -+ IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE]; -+ IMG_UINT16 ui16SubListCount; -+ IMG_UINT16 ui16MaxSubListCount; -+ IMG_UINT32 ui32RIPMRFlags; /* Flags used to indicate the type of allocation */ -+ IMG_UINT32 ui32Flags; /* Flags used to indicate if PMR appears in ri debugfs output */ -+}; -+ -+typedef struct _RI_LIST_ENTRY_ RI_LIST_ENTRY; -+typedef struct _RI_SUBLIST_ENTRY_ RI_SUBLIST_ENTRY; -+ -+static IMG_UINT16 g_ui16RICount; -+static HASH_TABLE *g_pRIHashTable; -+static IMG_UINT16 g_ui16ProcCount; -+static HASH_TABLE *g_pProcHashTable; -+ -+static POS_LOCK g_hRILock; -+ -+/* Linked list of PMR allocations made against the PVR_SYS_ALLOC_PID and lock -+ * to prevent concurrent access to it. -+ */ -+static POS_LOCK g_hSysAllocPidListLock; -+static DLLIST_NODE g_sSysAllocPidListHead; -+ -+/* -+ * Flag used to indicate if RILock should be destroyed when final PMR entry is -+ * deleted, i.e. if RIDeInitKM() has already been called before that point but -+ * the handle manager has deferred deletion of RI entries. -+ */ -+static IMG_BOOL bRIDeInitDeferred = IMG_FALSE; -+ -+/* -+ * Used as head of linked-list of PMR RI entries - this is useful when we wish -+ * to iterate all PMR list entries (when we don't have a PMR ref) -+ */ -+static DLLIST_NODE sListFirst; -+ -+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */ -+static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString); -+/* Function used to produce string containing info for PMR RI entries (used for both debugfs and kernel log output) */ -+static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString); -+ -+static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v, void* pvPriv); -+static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v, void* pvPriv); -+static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v, void* pvPriv); -+static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid); -+#define _RIOutput(x) PVR_LOG(x) -+ -+#define RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS 0x1 -+#define RI_FLAG_SYSALLOC_PMR 0x2 -+ -+static IMG_UINT32 -+_ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); -+ -+static IMG_UINT32 -+_ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) -+{ -+ IMG_UINT32 *p = (IMG_UINT32 *)pKey; -+ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32); -+ IMG_UINT32 ui; -+ IMG_UINT32 uHashKey = 0; -+ -+ PVR_UNREFERENCED_PARAMETER(uHashTabLen); -+ -+ for (ui = 0; ui < uKeyLen; ui++) -+ { -+ IMG_UINT32 uHashPart = *p++; -+ -+ uHashPart += (uHashPart << 12); -+ uHashPart ^= (uHashPart >> 22); -+ uHashPart += (uHashPart << 4); -+ uHashPart ^= (uHashPart >> 9); -+ uHashPart += (uHashPart << 10); -+ uHashPart ^= (uHashPart >> 2); -+ uHashPart += (uHashPart << 7); -+ uHashPart ^= (uHashPart >> 12); -+ -+ uHashKey += uHashPart; -+ } -+ -+ return uHashKey; -+} -+ -+static IMG_BOOL -+_ProcHashComp(size_t uKeySize, void *pKey1, void *pKey2); -+ -+static IMG_BOOL -+_ProcHashComp(size_t uKeySize, void *pKey1, void *pKey2) -+{ -+ IMG_UINT32 *p1 = (IMG_UINT32 *)pKey1; -+ IMG_UINT32 *p2 = (IMG_UINT32 *)pKey2; -+ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32); -+ IMG_UINT32 ui; -+ -+ for (ui = 0; ui < uKeyLen; ui++) -+ { -+ if (*p1++ != *p2++) -+ return IMG_FALSE; -+ } -+ -+ return IMG_TRUE; -+} -+ -+static void _RILock(void) -+{ -+#if (USE_RI_LOCK == 1) -+ OSLockAcquire(g_hRILock); -+#endif -+} -+ -+static void _RIUnlock(void) -+{ -+#if (USE_RI_LOCK == 1) -+ OSLockRelease(g_hRILock); -+#endif -+} -+ -+/* This value maintains a count of the number of PMRs attributed to the -+ * PVR_SYS_ALLOC_PID. Access to this value is protected by g_hRILock, so it -+ * does not need to be an ATOMIC_T. -+ */ -+static IMG_UINT32 g_ui32SysAllocPMRCount; -+ -+ -+PVRSRV_ERROR RIInitKM(void) -+{ -+ IMG_INT iCharsWritten; -+ PVRSRV_ERROR eError; -+ -+ bRIDeInitDeferred = IMG_FALSE; -+ -+ iCharsWritten = OSSNPrintf(g_szSysAllocImport, -+ RI_SYS_ALLOC_IMPORT_FRMT_SIZE, -+ RI_SYS_ALLOC_IMPORT_FRMT, -+ PVR_SYS_ALLOC_PID); -+ PVR_LOG_IF_FALSE((iCharsWritten>0 && iCharsWritten<(IMG_INT32)RI_SYS_ALLOC_IMPORT_FRMT_SIZE), -+ "OSSNPrintf failed to initialise g_szSysAllocImport"); -+ -+ eError = OSLockCreate(&g_hSysAllocPidListLock); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: OSLockCreate (g_hSysAllocPidListLock) failed (returned %d)", -+ __func__, -+ eError)); -+ } -+ dllist_init(&(g_sSysAllocPidListHead)); -+#if (USE_RI_LOCK == 1) -+ eError = OSLockCreate(&g_hRILock); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: OSLockCreate (g_hRILock) failed (returned %d)", -+ __func__, -+ eError)); -+ } -+#endif -+ return eError; -+} -+void RIDeInitKM(void) -+{ -+#if (USE_RI_LOCK == 1) -+ if (g_ui16RICount > 0) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: called with %d entries remaining - deferring OSLockDestroy()", -+ __func__, -+ g_ui16RICount)); -+ bRIDeInitDeferred = IMG_TRUE; -+ } -+ else -+ { -+ OSLockDestroy(g_hRILock); -+ OSLockDestroy(g_hSysAllocPidListLock); -+ } -+#endif -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RILockAcquireKM -+ -+ @Description -+ Acquires the RI Lock (which protects the integrity of the RI -+ linked lists). Caller will be suspended until lock is acquired. -+ -+ @Return None -+ -+******************************************************************************/ -+void RILockAcquireKM(void) -+{ -+ _RILock(); -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RILockReleaseKM -+ -+ @Description -+ Releases the RI Lock (which protects the integrity of the RI -+ linked lists). -+ -+ @Return None -+ -+******************************************************************************/ -+void RILockReleaseKM(void) -+{ -+ _RIUnlock(); -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIWritePMREntryWithOwnerKM -+ -+ @Description -+ Writes a new Resource Information list entry. -+ The new entry will be inserted at the head of the list of -+ PMR RI entries and assigned the values provided. -+ -+ @input psPMR - Reference (handle) to the PMR to which this reference relates -+ -+ @input ui32Owner - PID of the process which owns the allocation. This -+ may not be the current process (e.g. a request to -+ grow a buffer may happen in the context of a kernel -+ thread, or we may import further resource for a -+ suballocation made from the FW heap which can then -+ also be utilized by other processes) -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR, -+ IMG_PID ui32Owner) -+{ -+ PMR *pPMRHashKey = psPMR; -+ RI_LIST_ENTRY *psRIEntry; -+ uintptr_t hashData; -+ -+ /* if Hash table has not been created, create it now */ -+ if (!g_pRIHashTable) -+ { -+ g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default); -+ g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp); -+ } -+ PVR_RETURN_IF_NOMEM(g_pRIHashTable); -+ PVR_RETURN_IF_NOMEM(g_pProcHashTable); -+ -+ PVR_RETURN_IF_INVALID_PARAM(psPMR); -+ -+ /* Acquire RI Lock */ -+ _RILock(); -+ -+ /* Look-up psPMR in Hash Table */ -+ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); -+ psRIEntry = (RI_LIST_ENTRY *)hashData; -+ if (!psRIEntry) -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psPMR); -+ -+ /* -+ * If failed to find a matching existing entry, create a new one -+ */ -+ psRIEntry = (RI_LIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_LIST_ENTRY)); -+ if (!psRIEntry) -+ { -+ /* Release RI Lock */ -+ _RIUnlock(); -+ /* Error - no memory to allocate for new RI entry */ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ else -+ { -+ PMR_FLAGS_T uiPMRFlags = PMR_Flags(psPMR); -+ -+ /* -+ * Add new RI Entry -+ */ -+ if (g_ui16RICount == 0) -+ { -+ /* Initialise PMR entry linked-list head */ -+ dllist_init(&sListFirst); -+ } -+ g_ui16RICount++; -+ -+ dllist_init (&(psRIEntry->sSysAllocListNode)); -+ dllist_init (&(psRIEntry->sSubListFirst)); -+ psRIEntry->ui16SubListCount = 0; -+ psRIEntry->ui16MaxSubListCount = 0; -+ psRIEntry->valid = _VALID_RI_LIST_ENTRY; -+ -+ /* Check if this PMR should be accounted for under the -+ * PVR_SYS_ALLOC_PID debugFS entry. This should happen if -+ * we are in the driver init phase, the flags indicate -+ * this is a FW Main allocation (made from FW heap) -+ * or the owner PID is PVR_SYS_ALLOC_PID. -+ * Also record host dev node allocs on the system PID. -+ */ -+ if (psDeviceNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE || -+ PVRSRV_CHECK_FW_MAIN(uiPMRFlags) || -+ ui32Owner == PVR_SYS_ALLOC_PID || -+ psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode) -+ { -+ psRIEntry->ui32RIPMRFlags = RI_FLAG_SYSALLOC_PMR; -+ OSSNPrintf(psRIEntry->ai8ProcName, -+ RI_PROC_BUF_SIZE, -+ "SysProc"); -+ psRIEntry->pid = PVR_SYS_ALLOC_PID; -+ OSLockAcquire(g_hSysAllocPidListLock); -+ /* Add this psRIEntry to the list of entries for PVR_SYS_ALLOC_PID */ -+ dllist_add_to_tail(&g_sSysAllocPidListHead, (PDLLIST_NODE)&(psRIEntry->sSysAllocListNode)); -+ OSLockRelease(g_hSysAllocPidListLock); -+ g_ui32SysAllocPMRCount++; -+ } -+ else -+ { -+ psRIEntry->ui32RIPMRFlags = 0; -+ psRIEntry->pid = ui32Owner; -+ } -+ -+ OSSNPrintf(psRIEntry->ai8ProcName, -+ RI_PROC_BUF_SIZE, -+ "%s", -+ OSGetCurrentClientProcessNameKM()); -+ /* Add PMR entry to linked-list of all PMR entries */ -+ dllist_init (&(psRIEntry->sListNode)); -+ dllist_add_to_tail(&sListFirst, (PDLLIST_NODE)&(psRIEntry->sListNode)); -+ } -+ -+ psRIEntry->psPMR = psPMR; -+ psRIEntry->ui32Flags = 0; -+ psRIEntry->ui32DevID = psDeviceNode->sDevId.ui32InternalID; -+ -+ /* Create index entry in Hash Table */ -+ HASH_Insert_Extended (g_pRIHashTable, (void *)&pPMRHashKey, (uintptr_t)psRIEntry); -+ -+ /* Store phRIHandle in PMR structure, so it can delete the associated RI entry when it destroys the PMR */ -+ PMRStoreRIHandle(psPMR, psRIEntry); -+ } -+ /* Release RI Lock */ -+ _RIUnlock(); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIWritePMREntryKM -+ -+ @Description -+ Writes a new Resource Information list entry. -+ The new entry will be inserted at the head of the list of -+ PMR RI entries and assigned the values provided. -+ -+ @input psPMR - Reference (handle) to the PMR to which this reference relates -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR) -+{ -+ return RIWritePMREntryWithOwnerKM(psPMR, -+ OSGetCurrentClientProcessIDKM()); -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIWriteMEMDESCEntryKM -+ -+ @Description -+ Writes a new Resource Information sublist entry. -+ The new entry will be inserted at the head of the sublist of -+ the indicated PMR list entry, and assigned the values provided. -+ -+ @input psPMR - Reference (handle) to the PMR to which this MEMDESC RI entry relates -+ @input ui32TextBSize - Length of string provided in psz8TextB parameter -+ @input psz8TextB - String describing this secondary reference (may be null) -+ @input ui64Offset - Offset from the start of the PMR at which this allocation begins -+ @input ui64Size - Size of this allocation -+ @input bIsImport - Flag indicating if this is an allocation or an import -+ @input bIsSuballoc - Flag indicating if this is a sub-allocation -+ @output phRIHandle - Handle to the created RI entry -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, -+ IMG_UINT32 ui32TextBSize, -+ const IMG_CHAR *psz8TextB, -+ IMG_UINT64 ui64Offset, -+ IMG_UINT64 ui64Size, -+ IMG_BOOL bIsImport, -+ IMG_BOOL bIsSuballoc, -+ RI_HANDLE *phRIHandle) -+{ -+ RI_SUBLIST_ENTRY *psRISubEntry; -+ RI_LIST_ENTRY *psRIEntry; -+ PMR *pPMRHashKey = psPMR; -+ uintptr_t hashData; -+ IMG_PID pid; -+ -+ /* Check Hash tables have been created (meaning at least one PMR has been defined) */ -+ PVR_RETURN_IF_INVALID_PARAM(g_pRIHashTable); -+ PVR_RETURN_IF_INVALID_PARAM(g_pProcHashTable); -+ -+ PVR_RETURN_IF_INVALID_PARAM(psPMR); -+ PVR_RETURN_IF_INVALID_PARAM(phRIHandle); -+ -+ /* Acquire RI Lock */ -+ _RILock(); -+ -+ *phRIHandle = NULL; -+ -+ /* Look-up psPMR in Hash Table */ -+ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); -+ psRIEntry = (RI_LIST_ENTRY *)hashData; -+ if (!psRIEntry) -+ { -+ /* Release RI Lock */ -+ _RIUnlock(); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY)); -+ if (!psRISubEntry) -+ { -+ /* Release RI Lock */ -+ _RIUnlock(); -+ /* Error - no memory to allocate for new RI sublist entry */ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ else -+ { -+ /* -+ * Insert new entry in sublist -+ */ -+ PDLLIST_NODE currentNode = dllist_get_next_node(&(psRIEntry->sSubListFirst)); -+ -+ /* -+ * Insert new entry before currentNode -+ */ -+ if (!currentNode) -+ { -+ currentNode = &(psRIEntry->sSubListFirst); -+ } -+ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sListNode)); -+ -+ psRISubEntry->psRI = psRIEntry; -+ -+ /* Increment number of entries in sublist */ -+ psRIEntry->ui16SubListCount++; -+ if (psRIEntry->ui16SubListCount > psRIEntry->ui16MaxSubListCount) -+ { -+ psRIEntry->ui16MaxSubListCount = psRIEntry->ui16SubListCount; -+ } -+ psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY; -+ } -+ -+ /* If allocation is made during device or driver initialisation, -+ * track the MEMDESC entry under PVR_SYS_ALLOC_PID, otherwise use -+ * the current PID. -+ * Record host dev node allocations on the system PID. -+ */ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psRISubEntry->psRI->psPMR); -+ -+ if (psDeviceNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE || -+ psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode) -+ { -+ psRISubEntry->pid = psRISubEntry->psRI->pid; -+ } -+ else -+ { -+ psRISubEntry->pid = OSGetCurrentClientProcessIDKM(); -+ } -+ -+ if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: TextBSize too long (%u). Text will be truncated " -+ "to %zu characters", __func__, -+ ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1)); -+ } -+ -+ /* copy ai8TextB field data */ -+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB); -+ -+ psRISubEntry->ui32DevID = psDeviceNode->sDevId.ui32InternalID; -+ psRISubEntry->ui64Offset = ui64Offset; -+ psRISubEntry->ui64Size = ui64Size; -+ psRISubEntry->bIsImport = bIsImport; -+ psRISubEntry->bIsSuballoc = bIsSuballoc; -+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM()); -+ dllist_init (&(psRISubEntry->sProcListNode)); -+ } -+ -+ /* -+ * Now insert this MEMDESC into the proc list -+ */ -+ /* look-up pid in Hash Table */ -+ pid = psRISubEntry->pid; -+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); -+ if (!hashData) -+ { -+ /* -+ * No allocations for this pid yet -+ */ -+ HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode)); -+ /* Increment number of entries in proc hash table */ -+ g_ui16ProcCount++; -+ } -+ else -+ { -+ /* -+ * Insert allocation into pid allocations linked list -+ */ -+ PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData; -+ -+ /* -+ * Insert new entry -+ */ -+ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode)); -+ } -+ *phRIHandle = (RI_HANDLE)psRISubEntry; -+ /* Release RI Lock */ -+ _RIUnlock(); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIWriteProcListEntryKM -+ -+ @Description -+ Write a new entry in the process list directly. We have to do this -+ because there might be no, multiple or changing PMR handles. -+ -+ In the common case we have a PMR that will be added to the PMR list -+ and one or several MemDescs that are associated to it in a sub-list. -+ Additionally these MemDescs will be inserted in the per-process list. -+ -+ There might be special descriptors from e.g. new user APIs that -+ are associated with no or multiple PMRs and not just one. -+ These can be now added to the per-process list (as RI_SUBLIST_ENTRY) -+ directly with this function and won't be listed in the PMR list (RIEntry) -+ because there might be no PMR. -+ -+ To remove entries from the per-process list, just use -+ RIDeleteMEMDESCEntryKM(). -+ -+ @input psz8TextB - String describing this secondary reference (may be null) -+ @input ui64Size - Size of this allocation -+ @input ui64DevVAddr - Virtual address of this entry -+ @output phRIHandle - Handle to the created RI entry -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RIWriteProcListEntryKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32TextBSize, -+ const IMG_CHAR *psz8TextB, -+ IMG_UINT64 ui64Size, -+ IMG_UINT64 ui64DevVAddr, -+ RI_HANDLE *phRIHandle) -+{ -+ uintptr_t hashData = 0; -+ IMG_PID pid; -+ RI_SUBLIST_ENTRY *psRISubEntry = NULL; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ if (!g_pRIHashTable) -+ { -+ g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default); -+ g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp); -+ -+ if (!g_pRIHashTable || !g_pProcHashTable) -+ { -+ /* Error - no memory to allocate for Hash table(s) */ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ } -+ -+ /* Acquire RI Lock */ -+ _RILock(); -+ -+ *phRIHandle = NULL; -+ -+ psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY)); -+ if (!psRISubEntry) -+ { -+ /* Release RI Lock */ -+ _RIUnlock(); -+ /* Error - no memory to allocate for new RI sublist entry */ -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY; -+ -+ psRISubEntry->pid = OSGetCurrentClientProcessIDKM(); -+ psRISubEntry->ui32DevID = psDeviceNode->sDevId.ui32InternalID; -+ -+ if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: TextBSize too long (%u). Text will be truncated " -+ "to %zu characters", __func__, -+ ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1)); -+ } -+ -+ /* copy ai8TextB field data */ -+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB); -+ -+ psRISubEntry->ui64Offset = 0; -+ psRISubEntry->ui64Size = ui64Size; -+ psRISubEntry->sVAddr.uiAddr = ui64DevVAddr; -+ psRISubEntry->bIsImport = IMG_FALSE; -+ psRISubEntry->bIsSuballoc = IMG_FALSE; -+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM()); -+ dllist_init (&(psRISubEntry->sProcListNode)); -+ -+ /* -+ * Now insert this MEMDESC into the proc list -+ */ -+ /* look-up pid in Hash Table */ -+ pid = psRISubEntry->pid; -+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); -+ if (!hashData) -+ { -+ /* -+ * No allocations for this pid yet -+ */ -+ HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode)); -+ /* Increment number of entries in proc hash table */ -+ g_ui16ProcCount++; -+ } -+ else -+ { -+ /* -+ * Insert allocation into pid allocations linked list -+ */ -+ PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData; -+ -+ /* -+ * Insert new entry -+ */ -+ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode)); -+ } -+ *phRIHandle = (RI_HANDLE)psRISubEntry; -+ /* Release RI Lock */ -+ _RIUnlock(); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIUpdateMEMDESCAddrKM -+ -+ @Description -+ Update a Resource Information entry. -+ -+ @input hRIHandle - Handle of object whose reference info is to be updated -+ @input sVAddr - New address for the RI entry -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle, -+ IMG_DEV_VIRTADDR sVAddr) -+{ -+ RI_SUBLIST_ENTRY *psRISubEntry; -+ -+ PVR_RETURN_IF_INVALID_PARAM(hRIHandle); -+ -+ psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle; -+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) -+ { -+ /* Pointer does not point to valid structure */ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Acquire RI lock*/ -+ _RILock(); -+ -+ psRISubEntry->sVAddr.uiAddr = sVAddr.uiAddr; -+ -+ /* Release RI lock */ -+ _RIUnlock(); -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIDeletePMREntryKM -+ -+ @Description -+ Delete a Resource Information entry. -+ -+ @input hRIHandle - Handle of object whose reference info is to be deleted -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle) -+{ -+ RI_LIST_ENTRY *psRIEntry; -+ PMR *pPMRHashKey; -+ PVRSRV_ERROR eResult = PVRSRV_OK; -+ -+ PVR_RETURN_IF_INVALID_PARAM(hRIHandle); -+ -+ psRIEntry = (RI_LIST_ENTRY *)hRIHandle; -+ -+ if (psRIEntry->valid != _VALID_RI_LIST_ENTRY) -+ { -+ /* Pointer does not point to valid structure */ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ if (psRIEntry->ui16SubListCount == 0) -+ { -+ /* Acquire RI lock*/ -+ _RILock(); -+ -+ /* Remove the HASH table index entry */ -+ pPMRHashKey = psRIEntry->psPMR; -+ HASH_Remove_Extended(g_pRIHashTable, (void *)&pPMRHashKey); -+ -+ psRIEntry->valid = _INVALID; -+ -+ /* Remove PMR entry from linked-list of PMR entries */ -+ dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sListNode)); -+ -+ if (psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) -+ { -+ dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sSysAllocListNode)); -+ g_ui32SysAllocPMRCount--; -+ } -+ -+ /* Now, free the memory used to store the RI entry */ -+ OSFreeMemNoStats(psRIEntry); -+ psRIEntry = NULL; -+ -+ /* -+ * Decrement number of RI entries - if this is now zero, -+ * we can delete the RI hash table -+ */ -+ if (--g_ui16RICount == 0) -+ { -+ HASH_Delete(g_pRIHashTable); -+ g_pRIHashTable = NULL; -+ -+ _RIUnlock(); -+ -+ /* If deInit has been deferred, we can now destroy the RI Lock */ -+ if (bRIDeInitDeferred) -+ { -+ OSLockDestroy(g_hRILock); -+ } -+ } -+ else -+ { -+ /* Release RI lock*/ -+ _RIUnlock(); -+ } -+ /* -+ * Make the handle NULL once PMR RI entry is deleted -+ */ -+ hRIHandle = NULL; -+ } -+ else -+ { -+ eResult = PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP; -+ } -+ -+ return eResult; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIDeleteMEMDESCEntryKM -+ -+ @Description -+ Delete a Resource Information entry. -+ Entry can be from RIEntry list or ProcList. -+ -+ @input hRIHandle - Handle of object whose reference info is to be deleted -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle) -+{ -+ RI_LIST_ENTRY *psRIEntry = NULL; -+ RI_SUBLIST_ENTRY *psRISubEntry; -+ uintptr_t hashData; -+ IMG_PID pid; -+ -+ PVR_RETURN_IF_INVALID_PARAM(hRIHandle); -+ -+ psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle; -+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) -+ { -+ /* Pointer does not point to valid structure */ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ /* Acquire RI lock*/ -+ _RILock(); -+ -+ /* For entries which do have a parent PMR remove the node from the sublist */ -+ if (psRISubEntry->psRI) -+ { -+ psRIEntry = (RI_LIST_ENTRY *)psRISubEntry->psRI; -+ -+ /* Now, remove entry from the sublist */ -+ dllist_remove_node(&(psRISubEntry->sListNode)); -+ } -+ -+ psRISubEntry->valid = _INVALID; -+ -+ /* Remove the entry from the proc allocations linked list */ -+ pid = psRISubEntry->pid; -+ /* If this is the only allocation for this pid, just remove it from the hash table */ -+ if (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) -+ { -+ HASH_Remove_Extended(g_pProcHashTable, (void *)&pid); -+ /* Decrement number of entries in proc hash table, and delete the hash table if there are now none */ -+ if (--g_ui16ProcCount == 0) -+ { -+ HASH_Delete(g_pProcHashTable); -+ g_pProcHashTable = NULL; -+ } -+ } -+ else -+ { -+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); -+ if ((PDLLIST_NODE)hashData == &(psRISubEntry->sProcListNode)) -+ { -+ HASH_Remove_Extended(g_pProcHashTable, (void *)&pid); -+ HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)dllist_get_next_node(&(psRISubEntry->sProcListNode))); -+ } -+ } -+ dllist_remove_node(&(psRISubEntry->sProcListNode)); -+ -+ /* Now, free the memory used to store the sublist entry */ -+ OSFreeMemNoStats(psRISubEntry); -+ psRISubEntry = NULL; -+ -+ /* -+ * Decrement number of entries in sublist if this MemDesc had a parent entry. -+ */ -+ if (psRIEntry) -+ { -+ psRIEntry->ui16SubListCount--; -+ } -+ -+ /* Release RI lock*/ -+ _RIUnlock(); -+ -+ /* -+ * Make the handle NULL once MEMDESC RI entry is deleted -+ */ -+ hRIHandle = NULL; -+ -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIDeleteListKM -+ -+ @Description -+ Delete all Resource Information entries and free associated -+ memory. -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RIDeleteListKM(void) -+{ -+ PVRSRV_ERROR eResult = PVRSRV_OK; -+ -+ _RILock(); -+ -+ if (g_pRIHashTable) -+ { -+ eResult = HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DeleteAllEntries, NULL); -+ if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE) -+ { -+ /* -+ * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when -+ * the hash table gets deleted as a result of deleting the final PMR entry, -+ * so this is not a real error condition... -+ */ -+ eResult = PVRSRV_OK; -+ } -+ } -+ -+ /* After the run through the RIHashTable that holds the PMR entries there might be -+ * still entries left in the per-process hash table because they were added with -+ * RIWriteProcListEntryKM() and have no PMR parent associated. -+ */ -+ if (g_pProcHashTable) -+ { -+ eResult = HASH_Iterate(g_pProcHashTable, (HASH_pfnCallback) _DeleteAllProcEntries, NULL); -+ if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE) -+ { -+ /* -+ * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when -+ * the hash table gets deleted as a result of deleting the final PMR entry, -+ * so this is not a real error condition... -+ */ -+ eResult = PVRSRV_OK; -+ } -+ } -+ -+ _RIUnlock(); -+ -+ return eResult; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIDumpListKM -+ -+ @Description -+ Dumps out the contents of the RI List entry for the -+ specified PMR, and all MEMDESC allocation entries -+ in the associated sub linked list. -+ At present, output is directed to Kernel log -+ via PVR_DPF. -+ -+ @input psPMR - PMR for which RI entry details are to be output -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RIDumpListKM(PMR *psPMR) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* Acquire RI lock*/ -+ _RILock(); -+ -+ eError = _DumpList(psPMR, 0); -+ -+ /* Release RI lock*/ -+ _RIUnlock(); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIGetListEntryKM -+ -+ @Description -+ Returns pointer to a formatted string with details of the specified -+ list entry. If no entry exists (e.g. it may have been deleted -+ since the previous call), NULL is returned. -+ -+ @input pid - pid for which RI entry details are to be output -+ @input ppHandle - handle to the entry, if NULL, the first entry will be -+ returned. -+ @output pszEntryString - string to be output for the entry -+ @output hEntry - hEntry will be returned pointing to the next entry -+ (or NULL if there is no next entry) -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+IMG_BOOL RIGetListEntryKM(IMG_PID pid, -+ IMG_HANDLE **ppHandle, -+ IMG_CHAR **ppszEntryString) -+{ -+ RI_SUBLIST_ENTRY *psRISubEntry = NULL; -+ RI_LIST_ENTRY *psRIEntry = NULL; -+ uintptr_t hashData = 0; -+ IMG_PID hashKey = pid; -+ -+ static IMG_CHAR acStringBuffer[RI_FRMT_SIZE_MAX]; -+ -+ static IMG_UINT64 ui64TotalMemdescAlloc; -+ static IMG_UINT64 ui64TotalImport; -+ static IMG_UINT64 ui64TotalPMRAlloc; -+ static IMG_UINT64 ui64TotalPMRBacked; -+ static enum { -+ RI_GET_STATE_MEMDESCS_LIST_START, -+ RI_GET_STATE_MEMDESCS_SUMMARY, -+ RI_GET_STATE_PMR_LIST, -+ RI_GET_STATE_PMR_SUMMARY, -+ RI_GET_STATE_END, -+ RI_GET_STATE_LAST -+ } g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START; -+ -+ static DLLIST_NODE *psNode; -+ static DLLIST_NODE *psSysAllocNode; -+ static IMG_CHAR szProcName[RI_PROC_BUF_SIZE]; -+ static IMG_UINT32 ui32ProcessedSysAllocPMRCount; -+ -+ acStringBuffer[0] = '\0'; -+ -+ switch (g_bNextGetState) -+ { -+ case RI_GET_STATE_MEMDESCS_LIST_START: -+ /* look-up pid in Hash Table, to obtain first entry for pid */ -+ hashData = HASH_Retrieve_Extended(g_pProcHashTable, (void *)&hashKey); -+ if (hashData) -+ { -+ if (*ppHandle) -+ { -+ psRISubEntry = (RI_SUBLIST_ENTRY *)*ppHandle; -+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) -+ { -+ psRISubEntry = NULL; -+ } -+ } -+ else -+ { -+ psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); -+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) -+ { -+ psRISubEntry = NULL; -+ } -+ } -+ } -+ -+ if (psRISubEntry) -+ { -+ PDLLIST_NODE psNextProcListNode = dllist_get_next_node(&psRISubEntry->sProcListNode); -+ -+ if (psRISubEntry->bIsImport) -+ { -+ ui64TotalImport += psRISubEntry->ui64Size; -+ } -+ else -+ { -+ ui64TotalMemdescAlloc += psRISubEntry->ui64Size; -+ } -+ -+ _GenerateMEMDESCEntryString(psRISubEntry, -+ IMG_TRUE, -+ RI_MEMDESC_ENTRY_BUF_SIZE, -+ acStringBuffer); -+ -+ if (szProcName[0] == '\0') -+ { -+ OSStringLCopy(szProcName, (pid == PVR_SYS_ALLOC_PID) ? -+ PVRSRV_MODNAME : psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE); -+ } -+ -+ -+ *ppszEntryString = acStringBuffer; -+ *ppHandle = (IMG_HANDLE)IMG_CONTAINER_OF(psNextProcListNode, RI_SUBLIST_ENTRY, sProcListNode); -+ -+ if (psNextProcListNode == NULL || -+ psNextProcListNode == (PDLLIST_NODE)hashData) -+ { -+ g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY; -+ } -+ /* else continue to list MEMDESCs */ -+ } -+ else -+ { -+ if (ui64TotalMemdescAlloc == 0) -+ { -+ acStringBuffer[0] = '\0'; -+ *ppszEntryString = acStringBuffer; -+ g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY; -+ } -+ /* else continue to list MEMDESCs */ -+ } -+ break; -+ -+ case RI_GET_STATE_MEMDESCS_SUMMARY: -+ OSSNPrintf(acStringBuffer, -+ RI_MEMDESC_SUM_BUF_SIZE, -+ RI_MEMDESC_SUM_FRMT, -+ pid, -+ szProcName, -+ ui64TotalMemdescAlloc, -+ ui64TotalMemdescAlloc >> 10, -+ ui64TotalImport, -+ ui64TotalImport >> 10, -+ (ui64TotalMemdescAlloc + ui64TotalImport), -+ (ui64TotalMemdescAlloc + ui64TotalImport) >> 10); -+ -+ *ppszEntryString = acStringBuffer; -+ ui64TotalMemdescAlloc = 0; -+ ui64TotalImport = 0; -+ szProcName[0] = '\0'; -+ -+ g_bNextGetState = RI_GET_STATE_PMR_LIST; -+ break; -+ -+ case RI_GET_STATE_PMR_LIST: -+ if (pid == PVR_SYS_ALLOC_PID) -+ { -+ OSLockAcquire(g_hSysAllocPidListLock); -+ acStringBuffer[0] = '\0'; -+ if (!psSysAllocNode) -+ { -+ psSysAllocNode = &g_sSysAllocPidListHead; -+ ui32ProcessedSysAllocPMRCount = 0; -+ } -+ psSysAllocNode = dllist_get_next_node(psSysAllocNode); -+ -+ if (szProcName[0] == '\0') -+ { -+ OSStringLCopy(szProcName, PVRSRV_MODNAME, RI_PROC_BUF_SIZE); -+ } -+ if (psSysAllocNode != NULL && psSysAllocNode != &g_sSysAllocPidListHead) -+ { -+ IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0; -+ -+ psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode); -+ _GeneratePMREntryString(psRIEntry, -+ IMG_TRUE, -+ RI_PMR_ENTRY_BUF_SIZE, -+ acStringBuffer); -+ PMR_LogicalSize(psRIEntry->psPMR, -+ &uiPMRLogicalSize); -+ ui64TotalPMRAlloc += uiPMRLogicalSize; -+ PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking); -+ ui64TotalPMRBacked += uiPMRPhysicalBacking; -+ -+ ui32ProcessedSysAllocPMRCount++; -+ if (ui32ProcessedSysAllocPMRCount > g_ui32SysAllocPMRCount+1) -+ { -+ g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; -+ } -+ /* else continue to list PMRs */ -+ } -+ else -+ { -+ g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; -+ } -+ *ppszEntryString = (IMG_CHAR *)acStringBuffer; -+ OSLockRelease(g_hSysAllocPidListLock); -+ } -+ else -+ { -+ IMG_BOOL bPMRToDisplay = IMG_FALSE; -+ -+ /* Iterate through the 'touched' PMRs and display details */ -+ if (!psNode) -+ { -+ psNode = dllist_get_next_node(&sListFirst); -+ } -+ else -+ { -+ psNode = dllist_get_next_node(psNode); -+ } -+ -+ while ((psNode != NULL && psNode != &sListFirst) && -+ !bPMRToDisplay) -+ { -+ psRIEntry = IMG_CONTAINER_OF(psNode, RI_LIST_ENTRY, sListNode); -+ if (psRIEntry->pid == pid) -+ { -+ IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0; -+ -+ /* This PMR was 'touched', so display details and unflag it*/ -+ _GeneratePMREntryString(psRIEntry, -+ IMG_TRUE, -+ RI_PMR_ENTRY_BUF_SIZE, -+ acStringBuffer); -+ PMR_LogicalSize(psRIEntry->psPMR, &uiPMRLogicalSize); -+ ui64TotalPMRAlloc += uiPMRLogicalSize; -+ PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking); -+ ui64TotalPMRBacked += uiPMRPhysicalBacking; -+ -+ /* Remember the name of the process for 1 PMR for the summary */ -+ if (szProcName[0] == '\0') -+ { -+ OSStringLCopy(szProcName, psRIEntry->ai8ProcName, RI_PROC_BUF_SIZE); -+ } -+ bPMRToDisplay = IMG_TRUE; -+ } -+ else -+ { -+ psNode = dllist_get_next_node(psNode); -+ } -+ } -+ -+ if (psNode == NULL || (psNode == &sListFirst)) -+ { -+ g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; -+ } -+ /* else continue listing PMRs */ -+ } -+ break; -+ -+ case RI_GET_STATE_PMR_SUMMARY: -+ OSSNPrintf(acStringBuffer, -+ RI_PMR_SUM_BUF_SIZE, -+ RI_PMR_SUM_FRMT, -+ pid, -+ szProcName, -+ ui64TotalPMRAlloc, -+ ui64TotalPMRAlloc >> 10, -+ ui64TotalPMRBacked, -+ ui64TotalPMRBacked >> 10); -+ -+ *ppszEntryString = acStringBuffer; -+ ui64TotalPMRAlloc = 0; -+ ui64TotalPMRBacked = 0; -+ szProcName[0] = '\0'; -+ psSysAllocNode = NULL; -+ -+ g_bNextGetState = RI_GET_STATE_END; -+ break; -+ -+ default: -+ PVR_DPF((PVR_DBG_ERROR, "%s: Bad %d)",__func__, g_bNextGetState)); -+ -+ __fallthrough; -+ case RI_GET_STATE_END: -+ /* Reset state ready for the next gpu_mem_area file to display */ -+ *ppszEntryString = NULL; -+ *ppHandle = NULL; -+ psNode = NULL; -+ szProcName[0] = '\0'; -+ -+ g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START; -+ return IMG_FALSE; -+ break; -+ } -+ -+ return IMG_TRUE; -+} -+ -+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */ -+static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, -+ IMG_BOOL bDebugFs, -+ IMG_UINT16 ui16MaxStrLen, -+ IMG_CHAR *pszEntryString) -+{ -+ IMG_CHAR szProc[RI_MEMDESC_ENTRY_PROC_BUF_SIZE]; -+ IMG_CHAR szImport[RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE]; -+ IMG_CHAR szEntryFormat[RI_MEMDESC_ENTRY_FRMT_SIZE]; -+ const IMG_CHAR *pszAnnotationText; -+ IMG_PID uiRIPid = 0; -+ PMR* psRIPMR = NULL; -+ IMG_UINT32 ui32RIPMRFlags = 0; -+ IMG_BOOL bHostDevice = psRISubEntry->ui32DevID == PVRSRV_HOST_DEVICE_ID; -+ IMG_CHAR szDeviceID[RI_DEV_ID_BUF_SIZE]; -+ -+ if (psRISubEntry->psRI != NULL) -+ { -+ uiRIPid = psRISubEntry->psRI->pid; -+ psRIPMR = psRISubEntry->psRI->psPMR; -+ ui32RIPMRFlags = psRISubEntry->psRI->ui32RIPMRFlags; -+ } -+ -+ OSSNPrintf(szEntryFormat, -+ RI_MEMDESC_ENTRY_FRMT_SIZE, -+ RI_MEMDESC_ENTRY_FRMT, -+ DEVMEM_ANNOTATION_MAX_LEN); -+ -+ if (!bDebugFs) -+ { -+ /* we don't include process ID info for debugfs output */ -+ OSSNPrintf(szProc, -+ RI_MEMDESC_ENTRY_PROC_BUF_SIZE, -+ RI_MEMDESC_ENTRY_PROC_FRMT, -+ psRISubEntry->pid, -+ psRISubEntry->ai8ProcName); -+ } -+ -+ if (!bHostDevice) -+ { -+ OSSNPrintf(szDeviceID, -+ sizeof(szDeviceID), -+ "%-3d", -+ psRISubEntry->ui32DevID); -+ } -+ -+ if (psRISubEntry->bIsImport && psRIPMR) -+ { -+ OSSNPrintf((IMG_CHAR *)&szImport, -+ RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE, -+ RI_MEMDESC_ENTRY_IMPORT_FRMT, -+ uiRIPid); -+ /* Set pszAnnotationText to that of the 'parent' PMR RI entry */ -+ pszAnnotationText = PMR_GetAnnotation(psRIPMR); -+ } -+ else if (!psRISubEntry->bIsSuballoc && psRIPMR) -+ { -+ /* Set pszAnnotationText to that of the 'parent' PMR RI entry */ -+ pszAnnotationText = PMR_GetAnnotation(psRIPMR); -+ } -+ else -+ { -+ /* Set pszAnnotationText to that of the MEMDESC RI entry */ -+ pszAnnotationText = psRISubEntry->ai8TextB; -+ } -+ -+ /* Don't print memdescs if they are local imports -+ * (i.e. imported PMRs allocated by this process) -+ */ -+ if (bDebugFs && -+ ((psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset) == 0) && -+ (psRISubEntry->bIsImport && ((psRISubEntry->pid == uiRIPid) -+ || (psRISubEntry->pid == PVR_SYS_ALLOC_PID)))) -+ { -+ /* Don't print this entry */ -+ pszEntryString[0] = '\0'; -+ } -+ else -+ { -+ OSSNPrintf(pszEntryString, -+ ui16MaxStrLen, -+ szEntryFormat, -+ (bDebugFs ? "" : " "), -+ psRISubEntry->pid, -+ (bHostDevice ? "- " : szDeviceID), -+ (psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset), -+ pszAnnotationText, -+ (bDebugFs ? "" : (char *)szProc), -+ psRISubEntry->ui64Size, -+ psRIPMR, -+ (psRISubEntry->bIsImport ? (char *)&szImport : ""), -+ (!psRISubEntry->bIsImport && (ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) && (psRISubEntry->pid != PVR_SYS_ALLOC_PID)) ? g_szSysAllocImport : "", -+ (bDebugFs ? '\n' : ' ')); -+ } -+} -+ -+/* Function used to produce string containing info for PMR RI entries (used for debugfs and kernel log output) */ -+static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, -+ IMG_BOOL bDebugFs, -+ IMG_UINT16 ui16MaxStrLen, -+ IMG_CHAR *pszEntryString) -+{ -+ const IMG_CHAR* pszAnnotationText; -+ const IMG_CHAR* pszHeapText; -+ -+ IMG_DEVMEM_SIZE_T uiLogicalSize = 0; -+ IMG_DEVMEM_SIZE_T uiPhysicalSize = 0; -+ IMG_CHAR szEntryFormat[RI_PMR_ENTRY_FRMT_SIZE]; -+ IMG_BOOL bHostDevice = psRIEntry->ui32DevID == PVRSRV_HOST_DEVICE_ID; -+ IMG_CHAR szDeviceID[RI_DEV_ID_BUF_SIZE]; -+ -+ PMR_LogicalSize(psRIEntry->psPMR, &uiLogicalSize); -+ -+ PMR_PhysicalSize(psRIEntry->psPMR, &uiPhysicalSize); -+ -+ OSSNPrintf(szEntryFormat, -+ RI_PMR_ENTRY_FRMT_SIZE, -+ RI_PMR_ENTRY_FRMT, -+ (DEVMEM_ANNOTATION_MAX_LEN/2), -+ PHYS_HEAP_NAME_SIZE); -+ -+ /* Set pszAnnotationText to that PMR RI entry */ -+ pszAnnotationText = (IMG_PCHAR) PMR_GetAnnotation(psRIEntry->psPMR); -+ -+ /* Acquire PhysHeap Name to that PMR RI entry */ -+ pszHeapText = PhysHeapName((PMR_PhysHeap(psRIEntry->psPMR))); -+ -+ if (!bHostDevice) -+ { -+ OSSNPrintf(szDeviceID, -+ sizeof(szDeviceID), -+ "%-3d", -+ psRIEntry->ui32DevID); -+ } -+ -+ OSSNPrintf(pszEntryString, -+ ui16MaxStrLen, -+ szEntryFormat, -+ (bDebugFs ? "" : " "), -+ psRIEntry->pid, -+ (bHostDevice ? "- " : szDeviceID), -+ (void*)psRIEntry->psPMR, -+ pszAnnotationText, -+ pszHeapText, -+ uiLogicalSize, -+ uiPhysicalSize, -+ (bDebugFs ? '\n' : ' ')); -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function _DumpList -+ -+ @Description -+ Dumps out RI List entries according to parameters passed. -+ -+ @input psPMR - If not NULL, function will output the RI entries for -+ the specified PMR only -+ @input pid - If non-zero, the function will only output MEMDESC RI -+ entries made by the process with ID pid. -+ If zero, all MEMDESC RI entries will be output. -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid) -+{ -+ RI_LIST_ENTRY *psRIEntry = NULL; -+ RI_SUBLIST_ENTRY *psRISubEntry = NULL; -+ IMG_UINT16 ui16SubEntriesParsed = 0; -+ uintptr_t hashData = 0; -+ IMG_PID hashKey; -+ PMR *pPMRHashKey = psPMR; -+ IMG_BOOL bDisplayedThisPMR = IMG_FALSE; -+ IMG_UINT64 ui64LogicalSize = 0; -+ -+ PVR_RETURN_IF_INVALID_PARAM(psPMR); -+ -+ if (g_pRIHashTable && g_pProcHashTable) -+ { -+ if (pid != 0) -+ { -+ /* look-up pid in Hash Table */ -+ hashKey = pid; -+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey); -+ if (hashData) -+ { -+ psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); -+ if (psRISubEntry) -+ { -+ psRIEntry = psRISubEntry->psRI; -+ } -+ } -+ } -+ else -+ { -+ /* Look-up psPMR in Hash Table */ -+ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); -+ psRIEntry = (RI_LIST_ENTRY *)hashData; -+ } -+ if (!psRIEntry) -+ { -+ /* No entry found in hash table */ -+ return PVRSRV_ERROR_NOT_FOUND; -+ } -+ while (psRIEntry) -+ { -+ bDisplayedThisPMR = IMG_FALSE; -+ /* Output details for RI entry */ -+ if (!pid) -+ { -+ PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize); -+ -+ _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx, -+ PMR_GetAnnotation(psRIEntry->psPMR), -+ psRIEntry->psPMR, -+ (IMG_UINT)psRIEntry->ui16SubListCount, -+ ui64LogicalSize)); -+ bDisplayedThisPMR = IMG_TRUE; -+ } -+ ui16SubEntriesParsed = 0; -+ if (psRIEntry->ui16SubListCount) -+ { -+#if _DUMP_LINKEDLIST_INFO -+ _RIOutput (("RI LIST: {sSubListFirst.psNextNode:0x%p}\n", -+ psRIEntry->sSubListFirst.psNextNode)); -+#endif /* _DUMP_LINKEDLIST_INFO */ -+ if (!pid) -+ { -+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), -+ RI_SUBLIST_ENTRY, sListNode); -+ } -+ /* Traverse RI sublist and output details for each entry */ -+ while (psRISubEntry) -+ { -+ if (psRIEntry) -+ { -+ if ((ui16SubEntriesParsed >= psRIEntry->ui16SubListCount)) -+ { -+ break; -+ } -+ if (!bDisplayedThisPMR) -+ { -+ PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize); -+ -+ _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx, -+ PMR_GetAnnotation(psRIEntry->psPMR), -+ psRIEntry->psPMR, -+ (IMG_UINT)psRIEntry->ui16SubListCount, -+ ui64LogicalSize)); -+ bDisplayedThisPMR = IMG_TRUE; -+ } -+ } -+#if _DUMP_LINKEDLIST_INFO -+ _RIOutput (("RI LIST: [this subentry:0x%p]\n",psRISubEntry)); -+ _RIOutput (("RI LIST: psRI:0x%p\n",psRISubEntry->psRI)); -+#endif /* _DUMP_LINKEDLIST_INFO */ -+ -+ { -+ IMG_CHAR szEntryString[RI_MEMDESC_ENTRY_BUF_SIZE]; -+ -+ _GenerateMEMDESCEntryString(psRISubEntry, -+ IMG_FALSE, -+ RI_MEMDESC_ENTRY_BUF_SIZE, -+ szEntryString); -+ _RIOutput (("%s",szEntryString)); -+ } -+ -+ if (pid) -+ { -+ if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || -+ (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) -+ { -+ psRISubEntry = NULL; -+ } -+ else -+ { -+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), -+ RI_SUBLIST_ENTRY, sProcListNode); -+ if (psRISubEntry) -+ { -+ if (psRIEntry != psRISubEntry->psRI) -+ { -+ /* -+ * The next MEMDESC in the process linked list is in a different PMR -+ */ -+ psRIEntry = psRISubEntry->psRI; -+ bDisplayedThisPMR = IMG_FALSE; -+ } -+ } -+ } -+ } -+ else -+ { -+ ui16SubEntriesParsed++; -+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)), -+ RI_SUBLIST_ENTRY, sListNode); -+ } -+ } -+ } -+ if (!pid && psRIEntry) -+ { -+ if (ui16SubEntriesParsed != psRIEntry->ui16SubListCount) -+ { -+ /* -+ * Output error message as sublist does not contain the -+ * number of entries indicated by sublist count -+ */ -+ _RIOutput (("RI ERROR: RI sublist contains %d entries, not %d entries\n", -+ ui16SubEntriesParsed, psRIEntry->ui16SubListCount)); -+ } -+ else if (psRIEntry->ui16SubListCount && !dllist_get_next_node(&(psRIEntry->sSubListFirst))) -+ { -+ /* -+ * Output error message as sublist is empty but sublist count -+ * is not zero -+ */ -+ _RIOutput (("RI ERROR: ui16SubListCount=%d for empty RI sublist\n", -+ psRIEntry->ui16SubListCount)); -+ } -+ } -+ psRIEntry = NULL; -+ } -+ } -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIDumpAllKM -+ -+ @Description -+ Dumps out the contents of all RI List entries (i.e. for all -+ MEMDESC allocations for each PMR). -+ At present, output is directed to Kernel log -+ via PVR_DPF. -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RIDumpAllKM(void) -+{ -+ if (g_pRIHashTable) -+ { -+ return HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DumpAllEntries, NULL); -+ } -+ return PVRSRV_OK; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIDumpProcessKM -+ -+ @Description -+ Dumps out the contents of all MEMDESC RI List entries (for every -+ PMR) which have been allocate by the specified process only. -+ At present, output is directed to Kernel log -+ via PVR_DPF. -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid) -+{ -+ PVRSRV_ERROR eError; -+ IMG_UINT32 dummyPMR; -+ -+ if (!g_pProcHashTable) -+ { -+ return PVRSRV_OK; -+ } -+ -+ /* Acquire RI lock*/ -+ _RILock(); -+ -+ eError = _DumpList((PMR *)&dummyPMR, pid); -+ -+ /* Release RI lock*/ -+ _RIUnlock(); -+ -+ return eError; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function _TotalAllocsForProcess -+ -+ @Description -+ Totals all PMR physical backing for given process. -+ -+ @input pid - ID of process. -+ -+ @input ePhysHeapType - type of Physical Heap for which to total allocs -+ -+ @Return Size of all physical backing for PID's PMRs allocated from the -+ specified heap type (in bytes). -+ -+******************************************************************************/ -+static IMG_INT32 _TotalAllocsForProcess(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType) -+{ -+ RI_LIST_ENTRY *psRIEntry = NULL; -+ RI_SUBLIST_ENTRY *psInitialRISubEntry = NULL; -+ RI_SUBLIST_ENTRY *psRISubEntry = NULL; -+ uintptr_t hashData = 0; -+ IMG_PID hashKey; -+ IMG_INT32 i32TotalPhysical = 0; -+ -+ if (g_pRIHashTable && g_pProcHashTable) -+ { -+ if (pid == PVR_SYS_ALLOC_PID) -+ { -+ IMG_UINT32 ui32ProcessedSysAllocPMRCount = 0; -+ DLLIST_NODE *psSysAllocNode = NULL; -+ -+ OSLockAcquire(g_hSysAllocPidListLock); -+ psSysAllocNode = dllist_get_next_node(&g_sSysAllocPidListHead); -+ while (psSysAllocNode && psSysAllocNode != &g_sSysAllocPidListHead) -+ { -+ psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode); -+ ui32ProcessedSysAllocPMRCount++; -+ if (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType) -+ { -+ IMG_UINT64 ui64PhysicalSize; -+ -+ PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize); -+ if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__)); -+ } -+ i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff); -+ } -+ psSysAllocNode = dllist_get_next_node(psSysAllocNode); -+ } -+ OSLockRelease(g_hSysAllocPidListLock); -+ } -+ else -+ { -+ if (pid != 0) -+ { -+ /* look-up pid in Hash Table */ -+ hashKey = pid; -+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey); -+ if (hashData) -+ { -+ psInitialRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); -+ psRISubEntry = psInitialRISubEntry; -+ if (psRISubEntry) -+ { -+ psRIEntry = psRISubEntry->psRI; -+ } -+ } -+ } -+ -+ while (psRISubEntry && psRIEntry) -+ { -+ if (!psRISubEntry->bIsImport && !(psRIEntry->ui32RIPMRFlags & RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS) && -+ (pid == PVR_SYS_ALLOC_PID || !(psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR)) && -+ (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType)) -+ { -+ IMG_UINT64 ui64PhysicalSize; -+ -+ -+ PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize); -+ if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff)) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__)); -+ } -+ i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff); -+ psRIEntry->ui32RIPMRFlags |= RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS; -+ } -+ if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || -+ (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) -+ { -+ psRISubEntry = NULL; -+ psRIEntry = NULL; -+ } -+ else -+ { -+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), -+ RI_SUBLIST_ENTRY, sProcListNode); -+ if (psRISubEntry) -+ { -+ psRIEntry = psRISubEntry->psRI; -+ } -+ } -+ } -+ psRISubEntry = psInitialRISubEntry; -+ if (psRISubEntry) -+ { -+ psRIEntry = psRISubEntry->psRI; -+ } -+ while (psRISubEntry && psRIEntry) -+ { -+ psRIEntry->ui32RIPMRFlags &= ~RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS; -+ if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || -+ (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) -+ { -+ psRISubEntry = NULL; -+ psRIEntry = NULL; -+ } -+ else -+ { -+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), -+ RI_SUBLIST_ENTRY, sProcListNode); -+ if (psRISubEntry) -+ { -+ psRIEntry = psRISubEntry->psRI; -+ } -+ } -+ } -+ } -+ } -+ return i32TotalPhysical; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RITotalAllocProcessKM -+ -+ @Description -+ Returns the total of allocated GPU memory (backing for PMRs) -+ which has been allocated from the specific heap by the specified -+ process only. -+ -+ @Return Amount of physical backing allocated (in bytes) -+ -+******************************************************************************/ -+IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType) -+{ -+ IMG_INT32 i32BackingTotal = 0; -+ -+ if (g_pProcHashTable) -+ { -+ /* Acquire RI lock*/ -+ _RILock(); -+ -+ i32BackingTotal = _TotalAllocsForProcess(pid, ePhysHeapType); -+ -+ /* Release RI lock*/ -+ _RIUnlock(); -+ } -+ return i32BackingTotal; -+} -+ -+#if defined(DEBUG) -+/*! -+******************************************************************************* -+ -+ @Function _DumpProcessList -+ -+ @Description -+ Dumps out RI List entries according to parameters passed. -+ -+ @input psPMR - If not NULL, function will output the RI entries for -+ the specified PMR only -+ @input pid - If non-zero, the function will only output MEMDESC RI -+ entries made by the process with ID pid. -+ If zero, all MEMDESC RI entries will be output. -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+static PVRSRV_ERROR _DumpProcessList(PMR *psPMR, -+ IMG_PID pid, -+ IMG_UINT64 ui64Offset, -+ IMG_DEV_VIRTADDR *psDevVAddr) -+{ -+ RI_LIST_ENTRY *psRIEntry = NULL; -+ RI_SUBLIST_ENTRY *psRISubEntry = NULL; -+ IMG_UINT16 ui16SubEntriesParsed = 0; -+ uintptr_t hashData = 0; -+ PMR *pPMRHashKey = psPMR; -+ -+ psDevVAddr->uiAddr = 0; -+ -+ PVR_RETURN_IF_INVALID_PARAM(psPMR); -+ -+ if (g_pRIHashTable && g_pProcHashTable) -+ { -+ PVR_ASSERT(psPMR && pid); -+ -+ /* Look-up psPMR in Hash Table */ -+ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); -+ psRIEntry = (RI_LIST_ENTRY *)hashData; -+ -+ if (!psRIEntry) -+ { -+ /* No entry found in hash table */ -+ return PVRSRV_ERROR_NOT_FOUND; -+ } -+ -+ if (psRIEntry->ui16SubListCount) -+ { -+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), -+ RI_SUBLIST_ENTRY, sListNode); -+ -+ /* Traverse RI sublist and output details for each entry */ -+ while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount)) -+ { -+ if (pid == psRISubEntry->pid) -+ { -+ IMG_UINT64 ui64StartOffset = psRISubEntry->ui64Offset; -+ IMG_UINT64 ui64EndOffset = psRISubEntry->ui64Offset + psRISubEntry->ui64Size; -+ -+ if (ui64Offset >= ui64StartOffset && ui64Offset < ui64EndOffset) -+ { -+ psDevVAddr->uiAddr = psRISubEntry->sVAddr.uiAddr; -+ return PVRSRV_OK; -+ } -+ } -+ -+ ui16SubEntriesParsed++; -+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)), -+ RI_SUBLIST_ENTRY, sListNode); -+ } -+ } -+ } -+ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+} -+ -+/*! -+******************************************************************************* -+ -+ @Function RIDumpProcessListKM -+ -+ @Description -+ Dumps out selected contents of all MEMDESC RI List entries (for a -+ PMR) which have been allocate by the specified process only. -+ -+ @Return PVRSRV_ERROR -+ -+******************************************************************************/ -+PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR, -+ IMG_PID pid, -+ IMG_UINT64 ui64Offset, -+ IMG_DEV_VIRTADDR *psDevVAddr) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (!g_pProcHashTable) -+ { -+ return PVRSRV_OK; -+ } -+ -+ /* Acquire RI lock*/ -+ _RILock(); -+ -+ eError = _DumpProcessList(psPMR, -+ pid, -+ ui64Offset, -+ psDevVAddr); -+ -+ /* Release RI lock*/ -+ _RIUnlock(); -+ -+ return eError; -+} -+#endif -+ -+static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v, void* pvPriv) -+{ -+ RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v; -+ -+ PVR_UNREFERENCED_PARAMETER (k); -+ PVR_UNREFERENCED_PARAMETER (pvPriv); -+ -+ return RIDumpListKM(psRIEntry->psPMR); -+} -+ -+static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v, void* pvPriv) -+{ -+ RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v; -+ RI_SUBLIST_ENTRY *psRISubEntry; -+ PVRSRV_ERROR eResult = PVRSRV_OK; -+ -+ PVR_UNREFERENCED_PARAMETER (k); -+ PVR_UNREFERENCED_PARAMETER (pvPriv); -+ -+ while ((eResult == PVRSRV_OK) && (psRIEntry->ui16SubListCount > 0)) -+ { -+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), RI_SUBLIST_ENTRY, sListNode); -+ eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE)psRISubEntry); -+ } -+ if (eResult == PVRSRV_OK) -+ { -+ eResult = RIDeletePMREntryKM((RI_HANDLE)psRIEntry); -+ /* -+ * If we've deleted the Hash table, return -+ * an error to stop the iterator... -+ */ -+ if (!g_pRIHashTable) -+ { -+ eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; -+ } -+ } -+ return eResult; -+} -+ -+static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v, void* pvPriv) -+{ -+ RI_SUBLIST_ENTRY *psRISubEntry = (RI_SUBLIST_ENTRY *)v; -+ PVRSRV_ERROR eResult; -+ -+ PVR_UNREFERENCED_PARAMETER (k); -+ PVR_UNREFERENCED_PARAMETER (pvPriv); -+ -+ eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE) psRISubEntry); -+ if (eResult == PVRSRV_OK && !g_pProcHashTable) -+ { -+ /* -+ * If we've deleted the Hash table, return -+ * an error to stop the iterator... -+ */ -+ eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; -+ } -+ -+ return eResult; -+} -+ -+#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ -diff --git a/drivers/gpu/drm/img-rogue/ri_server.h b/drivers/gpu/drm/img-rogue/ri_server.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/ri_server.h -@@ -0,0 +1,110 @@ -+/*************************************************************************/ /*! -+@File ri_server.h -+@Title Resource Information abstraction -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Resource Information (RI) functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RI_SERVER_H -+#define RI_SERVER_H -+ -+#include "img_defs.h" -+#include "ri_typedefs.h" -+#include "pmr.h" -+#include "pvrsrv_error.h" -+#include "physheap.h" -+#include "connection_server.h" -+#include "device.h" -+ -+PVRSRV_ERROR RIInitKM(void); -+void RIDeInitKM(void); -+ -+void RILockAcquireKM(void); -+void RILockReleaseKM(void); -+ -+PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR); -+ -+PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR, -+ IMG_PID ui32Owner); -+ -+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, -+ IMG_UINT32 ui32TextBSize, -+ const IMG_CHAR *psz8TextB, -+ IMG_UINT64 uiOffset, -+ IMG_UINT64 uiSize, -+ IMG_BOOL bIsImport, -+ IMG_BOOL bIsSuballoc, -+ RI_HANDLE *phRIHandle); -+ -+PVRSRV_ERROR RIWriteProcListEntryKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32TextBSize, -+ const IMG_CHAR *psz8TextB, -+ IMG_UINT64 ui64Size, -+ IMG_UINT64 ui64DevVAddr, -+ RI_HANDLE *phRIHandle); -+ -+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle, -+ IMG_DEV_VIRTADDR sVAddr); -+ -+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle); -+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle); -+ -+PVRSRV_ERROR RIDeleteListKM(void); -+ -+PVRSRV_ERROR RIDumpListKM(PMR *psPMR); -+ -+PVRSRV_ERROR RIDumpAllKM(void); -+ -+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid); -+ -+#if defined(DEBUG) -+PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR, -+ IMG_PID pid, -+ IMG_UINT64 ui64Offset, -+ IMG_DEV_VIRTADDR *psDevVAddr); -+#endif -+ -+IMG_BOOL RIGetListEntryKM(IMG_PID pid, -+ IMG_HANDLE **ppHandle, -+ IMG_CHAR **ppszEntryString); -+ -+IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType); -+ -+#endif /* RI_SERVER_H */ -diff --git a/drivers/gpu/drm/img-rogue/ri_typedefs.h b/drivers/gpu/drm/img-rogue/ri_typedefs.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/ri_typedefs.h -@@ -0,0 +1,52 @@ -+/*************************************************************************/ /*! -+@File -+@Title Resource Information (RI) Management -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Client side part of RI management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef RI_TYPEDEFS_H -+#define RI_TYPEDEFS_H -+ -+#include "img_types.h" -+ -+typedef struct RI_SUBLIST_ENTRY RI_ENTRY; -+typedef RI_ENTRY* RI_HANDLE; -+ -+#endif /* #ifndef RI_TYPEDEFS_H */ -diff --git a/drivers/gpu/drm/img-rogue/rogue_trace_events.h b/drivers/gpu/drm/img-rogue/rogue_trace_events.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/rogue_trace_events.h -@@ -0,0 +1,593 @@ -+/*************************************************************************/ /*! -+@File -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#undef TRACE_SYSTEM -+#define TRACE_SYSTEM rogue -+ -+#if !defined(ROGUE_TRACE_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ) -+#define ROGUE_TRACE_EVENTS_H -+ -+#include -+#include -+#include -+#include -+ -+#define show_secs_from_ns(ns) \ -+ ({ \ -+ u64 t = ns + (NSEC_PER_USEC / 2); \ -+ do_div(t, NSEC_PER_SEC); \ -+ t; \ -+ }) -+ -+#define show_usecs_from_ns(ns) \ -+ ({ \ -+ u64 t = ns + (NSEC_PER_USEC / 2); \ -+ u32 rem; \ -+ do_div(t, NSEC_PER_USEC); \ -+ rem = do_div(t, USEC_PER_SEC); \ -+ }) -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) -+int trace_fence_update_enabled_callback(void); -+#else -+void trace_fence_update_enabled_callback(void); -+#endif -+void trace_fence_update_disabled_callback(void); -+ -+TRACE_EVENT_FN(rogue_fence_update, -+ -+ TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 gpu_id, u32 ctx_id, u32 offset, -+ u32 sync_fwaddr, u32 sync_value), -+ -+ TP_ARGS(comm, cmd, dm, gpu_id, ctx_id, offset, sync_fwaddr, sync_value), -+ -+ TP_STRUCT__entry( -+ __string( comm, comm ) -+ __string( cmd, cmd ) -+ __string( dm, dm ) -+ __field( u32, gpu_id ) -+ __field( u32, ctx_id ) -+ __field( u32, offset ) -+ __field( u32, sync_fwaddr ) -+ __field( u32, sync_value ) -+ ), -+ -+ TP_fast_assign( -+ __assign_str(comm, comm); -+ __assign_str(cmd, cmd); -+ __assign_str(dm, dm); -+ __entry->gpu_id = gpu_id; -+ __entry->ctx_id = ctx_id; -+ __entry->offset = offset; -+ __entry->sync_fwaddr = sync_fwaddr; -+ __entry->sync_value = sync_value; -+ ), -+ -+ TP_printk("comm=%s cmd=%s dm=%s gpu=%lu ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx", -+ __get_str(comm), -+ __get_str(cmd), -+ __get_str(dm), -+ (unsigned long) __entry->gpu_id, -+ (unsigned long) __entry->ctx_id, -+ (unsigned long) __entry->offset, -+ (unsigned long) __entry->sync_fwaddr, -+ (unsigned long) __entry->sync_value -+ ), -+ -+ trace_fence_update_enabled_callback, -+ trace_fence_update_disabled_callback -+); -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) -+int trace_fence_check_enabled_callback(void); -+#else -+void trace_fence_check_enabled_callback(void); -+#endif -+void trace_fence_check_disabled_callback(void); -+ -+TRACE_EVENT_FN(rogue_fence_check, -+ -+ TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 gpu_id, u32 ctx_id, u32 offset, -+ u32 sync_fwaddr, u32 sync_value), -+ -+ TP_ARGS(comm, cmd, dm, gpu_id, ctx_id, offset, sync_fwaddr, sync_value), -+ -+ TP_STRUCT__entry( -+ __string( comm, comm ) -+ __string( cmd, cmd ) -+ __string( dm, dm ) -+ __field( u32, gpu_id ) -+ __field( u32, ctx_id ) -+ __field( u32, offset ) -+ __field( u32, sync_fwaddr ) -+ __field( u32, sync_value ) -+ ), -+ -+ TP_fast_assign( -+ __assign_str(comm, comm); -+ __assign_str(cmd, cmd); -+ __assign_str(dm, dm); -+ __entry->gpu_id = gpu_id; -+ __entry->ctx_id = ctx_id; -+ __entry->offset = offset; -+ __entry->sync_fwaddr = sync_fwaddr; -+ __entry->sync_value = sync_value; -+ ), -+ -+ TP_printk("comm=%s cmd=%s dm=%s gpu=%lu ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx", -+ __get_str(comm), -+ __get_str(cmd), -+ __get_str(dm), -+ (unsigned long)__entry->gpu_id, -+ (unsigned long)__entry->ctx_id, -+ (unsigned long)__entry->offset, -+ (unsigned long)__entry->sync_fwaddr, -+ (unsigned long)__entry->sync_value -+ ), -+ -+ trace_fence_check_enabled_callback, -+ trace_fence_check_disabled_callback -+); -+ -+TRACE_EVENT(rogue_job_enqueue, -+ -+ TP_PROTO(u32 gpu_id, u32 ctx_id, u32 int_id, u32 ext_id, -+ const char *kick_type), -+ -+ TP_ARGS(gpu_id, ctx_id, int_id, ext_id, kick_type), -+ -+ TP_STRUCT__entry( -+ __field(u32, gpu_id) -+ __field(u32, ctx_id) -+ __field(u32, int_id) -+ __field(u32, ext_id) -+ __string(kick_type, kick_type) -+ ), -+ -+ TP_fast_assign( -+ __entry->gpu_id = gpu_id; -+ __entry->ctx_id = ctx_id; -+ __entry->int_id = int_id; -+ __entry->ext_id = ext_id; -+ __assign_str(kick_type, kick_type); -+ ), -+ -+ TP_printk("gpu=%lu, ctx_id=%lu int_id=%lu ext_id=%lu kick_type=%s", -+ (unsigned long) __entry->gpu_id, -+ (unsigned long) __entry->ctx_id, -+ (unsigned long) __entry->int_id, -+ (unsigned long) __entry->ext_id, -+ __get_str(kick_type) -+ ) -+); -+ -+TRACE_EVENT(rogue_sched_switch, -+ -+ TP_PROTO(const char *work_type, u32 switch_type, u64 timestamp, u32 gpu_id, u32 next_ctx_id, -+ u32 next_prio, u32 next_int_id, u32 next_ext_id), -+ -+ TP_ARGS(work_type, switch_type, timestamp, gpu_id, next_ctx_id, next_prio, next_int_id, -+ next_ext_id), -+ -+ TP_STRUCT__entry( -+ __string(work_type, work_type) -+ __field(u32, switch_type) -+ __field(u64, timestamp) -+ __field(u32, gpu_id) -+ __field(u32, next_ctx_id) -+ __field(u32, next_prio) -+ __field(u32, next_int_id) -+ __field(u32, next_ext_id) -+ ), -+ -+ TP_fast_assign( -+ __assign_str(work_type, work_type); -+ __entry->switch_type = switch_type; -+ __entry->timestamp = timestamp; -+ __entry->gpu_id = gpu_id; -+ __entry->next_ctx_id = next_ctx_id; -+ __entry->next_prio = next_prio; -+ __entry->next_int_id = next_int_id; -+ __entry->next_ext_id = next_ext_id; -+ ), -+ -+ TP_printk("ts=%llu.%06lu gpu=%lu next_ctx_id=%lu next_int_id=%lu next_ext_id=%lu" -+ " next_prio=%lu work_type=%s switch_type=%s", -+ (unsigned long long) show_secs_from_ns(__entry->timestamp), -+ (unsigned long) show_usecs_from_ns(__entry->timestamp), -+ (unsigned long) __entry->gpu_id, -+ (unsigned long) __entry->next_ctx_id, -+ (unsigned long) __entry->next_int_id, -+ (unsigned long) __entry->next_ext_id, -+ (unsigned long) __entry->next_prio, -+ __get_str(work_type), -+ __print_symbolic(__entry->switch_type, -+ /* These values are from ospvr_gputrace.h. */ -+ { 1, "begin" }, -+ { 2, "end" }) -+ ) -+); -+ -+TRACE_EVENT(rogue_create_fw_context, -+ -+ TP_PROTO(const char *comm, const char *dm, u32 gpu_id, u32 ctx_id), -+ -+ TP_ARGS(comm, dm, gpu_id, ctx_id), -+ -+ TP_STRUCT__entry( -+ __string( comm, comm ) -+ __string( dm, dm ) -+ __field( u32, gpu_id ) -+ __field( u32, ctx_id ) -+ ), -+ -+ TP_fast_assign( -+ __assign_str(comm, comm); -+ __assign_str(dm, dm); -+ __entry->gpu_id = gpu_id; -+ __entry->ctx_id = ctx_id; -+ ), -+ -+ TP_printk("comm=%s dm=%s gpu=%lu ctx_id=%lu", -+ __get_str(comm), -+ __get_str(dm), -+ (unsigned long) __entry->gpu_id, -+ (unsigned long) __entry->ctx_id -+ ) -+); -+ -+void PVRGpuTraceEnableUfoCallback(void); -+void PVRGpuTraceDisableUfoCallback(void); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) -+int PVRGpuTraceEnableUfoCallbackWrapper(void); -+#else -+#define PVRGpuTraceEnableUfoCallbackWrapper \ -+ PVRGpuTraceEnableUfoCallback -+#endif -+ -+TRACE_EVENT_FN(rogue_ufo_update, -+ -+ TP_PROTO(u64 timestamp, u32 gpu_id, u32 ctx_id, u32 int_id, u32 ext_id, -+ u32 fwaddr, u32 old_value, u32 new_value), -+ -+ TP_ARGS(timestamp, gpu_id, ctx_id, int_id, ext_id, fwaddr, old_value, -+ new_value), -+ -+ TP_STRUCT__entry( -+ __field( u64, timestamp ) -+ __field( u32, gpu_id ) -+ __field( u32, ctx_id ) -+ __field( u32, int_id ) -+ __field( u32, ext_id ) -+ __field( u32, fwaddr ) -+ __field( u32, old_value ) -+ __field( u32, new_value ) -+ ), -+ -+ TP_fast_assign( -+ __entry->timestamp = timestamp; -+ __entry->gpu_id = gpu_id; -+ __entry->ctx_id = ctx_id; -+ __entry->int_id = int_id; -+ __entry->ext_id = ext_id; -+ __entry->fwaddr = fwaddr; -+ __entry->old_value = old_value; -+ __entry->new_value = new_value; -+ ), -+ -+ TP_printk("ts=%llu.%06lu gpu=%lu ctx_id=%lu int_id=%lu ext_id=%lu" -+ " fwaddr=%#lx old_value=%#lx new_value=%#lx", -+ (unsigned long long)show_secs_from_ns(__entry->timestamp), -+ (unsigned long)show_usecs_from_ns(__entry->timestamp), -+ (unsigned long) __entry->gpu_id, -+ (unsigned long) __entry->ctx_id, -+ (unsigned long) __entry->int_id, -+ (unsigned long) __entry->ext_id, -+ (unsigned long) __entry->fwaddr, -+ (unsigned long) __entry->old_value, -+ (unsigned long) __entry->new_value -+ ), -+ -+ PVRGpuTraceEnableUfoCallbackWrapper, -+ PVRGpuTraceDisableUfoCallback -+); -+ -+TRACE_EVENT_FN(rogue_ufo_check_fail, -+ -+ TP_PROTO(u64 timestamp, u32 gpu_id, u32 ctx_id, u32 int_id, u32 ext_id, -+ u32 fwaddr, u32 value, u32 required), -+ -+ TP_ARGS(timestamp, gpu_id, ctx_id, int_id, ext_id, fwaddr, value, required), -+ -+ TP_STRUCT__entry( -+ __field( u64, timestamp ) -+ __field( u32, gpu_id ) -+ __field( u32, ctx_id ) -+ __field( u32, int_id ) -+ __field( u32, ext_id ) -+ __field( u32, fwaddr ) -+ __field( u32, value ) -+ __field( u32, required ) -+ ), -+ -+ TP_fast_assign( -+ __entry->timestamp = timestamp; -+ __entry->gpu_id = gpu_id; -+ __entry->ctx_id = ctx_id; -+ __entry->int_id = int_id; -+ __entry->ext_id = ext_id; -+ __entry->fwaddr = fwaddr; -+ __entry->value = value; -+ __entry->required = required; -+ ), -+ -+ TP_printk("ts=%llu.%06lu gpu=%lu ctx_id=%lu int_id=%lu ext_id=%lu" -+ " fwaddr=%#lx value=%#lx required=%#lx", -+ (unsigned long long)show_secs_from_ns(__entry->timestamp), -+ (unsigned long)show_usecs_from_ns(__entry->timestamp), -+ (unsigned long) __entry->gpu_id, -+ (unsigned long) __entry->ctx_id, -+ (unsigned long) __entry->int_id, -+ (unsigned long) __entry->ext_id, -+ (unsigned long) __entry->fwaddr, -+ (unsigned long) __entry->value, -+ (unsigned long) __entry->required -+ ), -+ -+ PVRGpuTraceEnableUfoCallbackWrapper, -+ PVRGpuTraceDisableUfoCallback -+); -+ -+TRACE_EVENT_FN(rogue_ufo_pr_check_fail, -+ -+ TP_PROTO(u64 timestamp, u32 gpu_id, u32 ctx_id, u32 int_id, u32 ext_id, -+ u32 fwaddr, u32 value, u32 required), -+ -+ TP_ARGS(timestamp, gpu_id, ctx_id, int_id, ext_id, fwaddr, value, required), -+ -+ TP_STRUCT__entry( -+ __field( u64, timestamp ) -+ __field( u32, gpu_id ) -+ __field( u32, ctx_id ) -+ __field( u32, int_id ) -+ __field( u32, ext_id ) -+ __field( u32, fwaddr ) -+ __field( u32, value ) -+ __field( u32, required ) -+ ), -+ -+ TP_fast_assign( -+ __entry->timestamp = timestamp; -+ __entry->gpu_id = gpu_id; -+ __entry->ctx_id = ctx_id; -+ __entry->int_id = int_id; -+ __entry->ext_id = ext_id; -+ __entry->fwaddr = fwaddr; -+ __entry->value = value; -+ __entry->required = required; -+ ), -+ -+ TP_printk("ts=%llu.%06lu gpu=%lu ctx_id=%lu int_id=%lu ext_id=%lu" -+ " fwaddr=%#lx value=%#lx required=%#lx", -+ (unsigned long long)show_secs_from_ns(__entry->timestamp), -+ (unsigned long)show_usecs_from_ns(__entry->timestamp), -+ (unsigned long) __entry->gpu_id, -+ (unsigned long) __entry->ctx_id, -+ (unsigned long) __entry->int_id, -+ (unsigned long) __entry->ext_id, -+ (unsigned long) __entry->fwaddr, -+ (unsigned long) __entry->value, -+ (unsigned long) __entry->required -+ ), -+ -+ PVRGpuTraceEnableUfoCallbackWrapper, -+ PVRGpuTraceDisableUfoCallback -+); -+ -+TRACE_EVENT_FN(rogue_ufo_check_success, -+ -+ TP_PROTO(u64 timestamp, u32 gpu_id, u32 ctx_id, u32 int_id, u32 ext_id, -+ u32 fwaddr, u32 value), -+ -+ TP_ARGS(timestamp, gpu_id, ctx_id, int_id, ext_id, fwaddr, value), -+ -+ TP_STRUCT__entry( -+ __field( u64, timestamp ) -+ __field( u32, gpu_id ) -+ __field( u32, ctx_id ) -+ __field( u32, int_id ) -+ __field( u32, ext_id ) -+ __field( u32, fwaddr ) -+ __field( u32, value ) -+ ), -+ -+ TP_fast_assign( -+ __entry->timestamp = timestamp; -+ __entry->gpu_id = gpu_id; -+ __entry->ctx_id = ctx_id; -+ __entry->int_id = int_id; -+ __entry->ext_id = ext_id; -+ __entry->fwaddr = fwaddr; -+ __entry->value = value; -+ ), -+ -+ TP_printk("ts=%llu.%06lu gpu=%lu ctx_id=%lu int_id=%lu ext_id=%lu" -+ " fwaddr=%#lx value=%#lx", -+ (unsigned long long)show_secs_from_ns(__entry->timestamp), -+ (unsigned long)show_usecs_from_ns(__entry->timestamp), -+ (unsigned long) __entry->gpu_id, -+ (unsigned long) __entry->ctx_id, -+ (unsigned long) __entry->int_id, -+ (unsigned long) __entry->ext_id, -+ (unsigned long) __entry->fwaddr, -+ (unsigned long) __entry->value -+ ), -+ -+ PVRGpuTraceEnableUfoCallbackWrapper, -+ PVRGpuTraceDisableUfoCallback -+); -+ -+TRACE_EVENT_FN(rogue_ufo_pr_check_success, -+ -+ TP_PROTO(u64 timestamp, u32 gpu_id, u32 ctx_id, u32 int_id, u32 ext_id, -+ u32 fwaddr, u32 value), -+ -+ TP_ARGS(timestamp, gpu_id, ctx_id, int_id, ext_id, fwaddr, value), -+ -+ TP_STRUCT__entry( -+ __field( u64, timestamp ) -+ __field( u32, gpu_id ) -+ __field( u32, ctx_id ) -+ __field( u32, int_id ) -+ __field( u32, ext_id ) -+ __field( u32, fwaddr ) -+ __field( u32, value ) -+ ), -+ -+ TP_fast_assign( -+ __entry->timestamp = timestamp; -+ __entry->gpu_id = gpu_id; -+ __entry->ctx_id = ctx_id; -+ __entry->int_id = int_id; -+ __entry->ext_id = ext_id; -+ __entry->fwaddr = fwaddr; -+ __entry->value = value; -+ ), -+ -+ TP_printk("ts=%llu.%06lu gpu=%lu ctx_id=%lu int_id=%lu ext_id=%lu" -+ " fwaddr=%#lx value=%#lx", -+ (unsigned long long)show_secs_from_ns(__entry->timestamp), -+ (unsigned long)show_usecs_from_ns(__entry->timestamp), -+ (unsigned long) __entry->gpu_id, -+ (unsigned long) __entry->ctx_id, -+ (unsigned long) __entry->int_id, -+ (unsigned long) __entry->ext_id, -+ (unsigned long) __entry->fwaddr, -+ (unsigned long) __entry->value -+ ), -+ -+ PVRGpuTraceEnableUfoCallbackWrapper, -+ PVRGpuTraceDisableUfoCallback -+); -+ -+TRACE_EVENT(rogue_events_lost, -+ -+ TP_PROTO(u32 event_source, u32 gpu_id, u32 last_ordinal, u32 curr_ordinal), -+ -+ TP_ARGS(event_source, gpu_id, last_ordinal, curr_ordinal), -+ -+ TP_STRUCT__entry( -+ __field( u32, event_source ) -+ __field( u32, gpu_id ) -+ __field( u32, last_ordinal ) -+ __field( u32, curr_ordinal ) -+ ), -+ -+ TP_fast_assign( -+ __entry->event_source = event_source; -+ __entry->gpu_id = gpu_id; -+ __entry->last_ordinal = last_ordinal; -+ __entry->curr_ordinal = curr_ordinal; -+ ), -+ -+ TP_printk("event_source=%s gpu=%u last_ordinal=%u curr_ordinal=%u", -+ __print_symbolic(__entry->event_source, {0, "GPU"}, {1, "Host"}), -+ __entry->gpu_id, -+ __entry->last_ordinal, -+ __entry->curr_ordinal) -+); -+ -+void PVRGpuTraceEnableFirmwareActivityCallback(void); -+void PVRGpuTraceDisableFirmwareActivityCallback(void); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) -+int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void); -+#else -+#define PVRGpuTraceEnableFirmwareActivityCallbackWrapper \ -+ PVRGpuTraceEnableFirmwareActivityCallback -+#endif -+ -+TRACE_EVENT_FN(rogue_firmware_activity, -+ -+ TP_PROTO(u64 timestamp, u32 gpu_id, const char *task, u32 fw_event), -+ -+ TP_ARGS(timestamp, gpu_id, task, fw_event), -+ -+ TP_STRUCT__entry( -+ __field( u64, timestamp ) -+ __field( u32, gpu_id ) -+ __string( task, task ) -+ __field( u32, fw_event ) -+ ), -+ -+ TP_fast_assign( -+ __entry->timestamp = timestamp; -+ __entry->gpu_id = gpu_id, -+ __assign_str(task, task); -+ __entry->fw_event = fw_event; -+ ), -+ -+ TP_printk("ts=%llu.%06lu gpu=%lu task=%s event=%s", -+ (unsigned long long) show_secs_from_ns(__entry->timestamp), -+ (unsigned long) show_usecs_from_ns(__entry->timestamp), -+ (unsigned long) __entry->gpu_id, -+ __get_str(task), -+ __print_symbolic(__entry->fw_event, -+ /* These values are from ospvr_gputrace.h. */ -+ { 1, "begin" }, -+ { 2, "end" })), -+ -+ PVRGpuTraceEnableFirmwareActivityCallbackWrapper, -+ PVRGpuTraceDisableFirmwareActivityCallback -+); -+ -+#undef show_secs_from_ns -+#undef show_usecs_from_ns -+ -+#endif /* ROGUE_TRACE_EVENTS_H */ -+ -+#undef TRACE_INCLUDE_PATH -+#undef TRACE_INCLUDE_FILE -+#define TRACE_INCLUDE_PATH . -+ -+/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */ -+#define TRACE_INCLUDE_FILE rogue_trace_events -+ -+/* This part must be outside protection */ -+#include -diff --git a/drivers/gpu/drm/img-rogue/server_cache_bridge.c b/drivers/gpu/drm/img-rogue/server_cache_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_cache_bridge.c -@@ -0,0 +1,449 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for cache -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for cache -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "cache_km.h" -+ -+#include "common_cache_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static_assert(CACHE_BATCH_MAX <= IMG_UINT32_MAX, -+ "CACHE_BATCH_MAX must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psCacheOpQueueIN_UI8, -+ IMG_UINT8 * psCacheOpQueueOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_CACHEOPQUEUE *psCacheOpQueueIN = -+ (PVRSRV_BRIDGE_IN_CACHEOPQUEUE *) IMG_OFFSET_ADDR(psCacheOpQueueIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *psCacheOpQueueOUT = -+ (PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *) IMG_OFFSET_ADDR(psCacheOpQueueOUT_UI8, 0); -+ -+ PMR **psPMRInt = NULL; -+ IMG_HANDLE *hPMRInt2 = NULL; -+ IMG_UINT64 *ui64AddressInt = NULL; -+ IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL; -+ IMG_DEVMEM_SIZE_T *uiSizeInt = NULL; -+ PVRSRV_CACHE_OP *iuCacheOpInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)) + -+ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) + -+ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) + -+ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) + -+ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) + -+ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) + 0; -+ -+ if (unlikely(psCacheOpQueueIN->ui32NumCacheOps > CACHE_BATCH_MAX)) -+ { -+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto CacheOpQueue_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto CacheOpQueue_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psCacheOpQueueIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto CacheOpQueue_exit; -+ } -+ } -+ } -+ -+ if (psCacheOpQueueIN->ui32NumCacheOps != 0) -+ { -+ psPMRInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psPMRInt, 0, psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)); -+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *); -+ hPMRInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE); -+ } -+ -+ /* Copy the data over */ -+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, hPMRInt2, (const void __user *)psCacheOpQueueIN->phPMR, -+ psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) != PVRSRV_OK) -+ { -+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto CacheOpQueue_exit; -+ } -+ } -+ if (psCacheOpQueueIN->ui32NumCacheOps != 0) -+ { -+ ui64AddressInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64); -+ } -+ -+ /* Copy the data over */ -+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui64AddressInt, (const void __user *)psCacheOpQueueIN->pui64Address, -+ psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) != PVRSRV_OK) -+ { -+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto CacheOpQueue_exit; -+ } -+ } -+ if (psCacheOpQueueIN->ui32NumCacheOps != 0) -+ { -+ uiOffsetInt = -+ (IMG_DEVMEM_OFFSET_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T); -+ } -+ -+ /* Copy the data over */ -+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiOffsetInt, (const void __user *)psCacheOpQueueIN->puiOffset, -+ psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK) -+ { -+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto CacheOpQueue_exit; -+ } -+ } -+ if (psCacheOpQueueIN->ui32NumCacheOps != 0) -+ { -+ uiSizeInt = (IMG_DEVMEM_SIZE_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T); -+ } -+ -+ /* Copy the data over */ -+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiSizeInt, (const void __user *)psCacheOpQueueIN->puiSize, -+ psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK) -+ { -+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto CacheOpQueue_exit; -+ } -+ } -+ if (psCacheOpQueueIN->ui32NumCacheOps != 0) -+ { -+ iuCacheOpInt = -+ (PVRSRV_CACHE_OP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP); -+ } -+ -+ /* Copy the data over */ -+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, iuCacheOpInt, (const void __user *)psCacheOpQueueIN->piuCacheOp, -+ psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) != PVRSRV_OK) -+ { -+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto CacheOpQueue_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++) -+ { -+ /* Look up the address from the handle */ -+ psCacheOpQueueOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt[i], -+ hPMRInt2[i], -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psCacheOpQueueOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto CacheOpQueue_exit; -+ } -+ } -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psCacheOpQueueOUT->eError = -+ CacheOpQueue(psConnection, OSGetDevNode(psConnection), -+ psCacheOpQueueIN->ui32NumCacheOps, -+ psPMRInt, -+ ui64AddressInt, -+ uiOffsetInt, uiSizeInt, iuCacheOpInt, psCacheOpQueueIN->ui32OpTimeline); -+ -+CacheOpQueue_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ if (hPMRInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt && psPMRInt[i]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMRInt2[i], -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ } -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psCacheOpQueueOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeCacheOpExec(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psCacheOpExecIN_UI8, -+ IMG_UINT8 * psCacheOpExecOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_CACHEOPEXEC *psCacheOpExecIN = -+ (PVRSRV_BRIDGE_IN_CACHEOPEXEC *) IMG_OFFSET_ADDR(psCacheOpExecIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_CACHEOPEXEC *psCacheOpExecOUT = -+ (PVRSRV_BRIDGE_OUT_CACHEOPEXEC *) IMG_OFFSET_ADDR(psCacheOpExecOUT_UI8, 0); -+ -+ IMG_HANDLE hPMR = psCacheOpExecIN->hPMR; -+ PMR *psPMRInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psCacheOpExecOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psCacheOpExecOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto CacheOpExec_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psCacheOpExecOUT->eError = -+ CacheOpValExec(psPMRInt, -+ psCacheOpExecIN->ui64Address, -+ psCacheOpExecIN->uiOffset, -+ psCacheOpExecIN->uiSize, psCacheOpExecIN->iuCacheOp); -+ -+CacheOpExec_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeCacheOpLog(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psCacheOpLogIN_UI8, -+ IMG_UINT8 * psCacheOpLogOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_CACHEOPLOG *psCacheOpLogIN = -+ (PVRSRV_BRIDGE_IN_CACHEOPLOG *) IMG_OFFSET_ADDR(psCacheOpLogIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_CACHEOPLOG *psCacheOpLogOUT = -+ (PVRSRV_BRIDGE_OUT_CACHEOPLOG *) IMG_OFFSET_ADDR(psCacheOpLogOUT_UI8, 0); -+ -+ IMG_HANDLE hPMR = psCacheOpLogIN->hPMR; -+ PMR *psPMRInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psCacheOpLogOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psCacheOpLogOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto CacheOpLog_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psCacheOpLogOUT->eError = -+ CacheOpLog(psPMRInt, -+ psCacheOpLogIN->ui64Address, -+ psCacheOpLogIN->uiOffset, -+ psCacheOpLogIN->uiSize, -+ psCacheOpLogIN->i64StartTime, -+ psCacheOpLogIN->i64EndTime, psCacheOpLogIN->iuCacheOp); -+ -+CacheOpLog_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitCACHEBridge(void); -+void DeinitCACHEBridge(void); -+ -+/* -+ * Register all CACHE functions with services -+ */ -+PVRSRV_ERROR InitCACHEBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE, -+ PVRSRVBridgeCacheOpQueue, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC, -+ PVRSRVBridgeCacheOpExec, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG, -+ PVRSRVBridgeCacheOpLog, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all cache functions with services -+ */ -+void DeinitCACHEBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_cmm_bridge.c b/drivers/gpu/drm/img-rogue/server_cmm_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_cmm_bridge.c -@@ -0,0 +1,409 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for cmm -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for cmm -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "pmr.h" -+#include "devicemem_server.h" -+ -+#include "common_cmm_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+#if !defined(EXCLUDE_CMM_BRIDGE) -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static PVRSRV_ERROR _DevmemIntExportCtxpsContextExportIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = DevmemIntUnexportCtx((DEVMEMINT_CTX_EXPORT *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntExportCtx(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntExportCtxIN_UI8, -+ IMG_UINT8 * psDevmemIntExportCtxOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntExportCtxIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntExportCtxOUT_UI8, -+ 0); -+ -+ IMG_HANDLE hContext = psDevmemIntExportCtxIN->hContext; -+ DEVMEMINT_CTX *psContextInt = NULL; -+ IMG_HANDLE hPMR = psDevmemIntExportCtxIN->hPMR; -+ PMR *psPMRInt = NULL; -+ DEVMEMINT_CTX_EXPORT *psContextExportInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemIntExportCtxOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psContextInt, -+ hContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); -+ if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntExportCtx_exit; -+ } -+ -+ /* Look up the address from the handle */ -+ psDevmemIntExportCtxOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntExportCtx_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntExportCtxOUT->eError = -+ DevmemIntExportCtx(psContextInt, psPMRInt, &psContextExportInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) -+ { -+ goto DevmemIntExportCtx_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntExportCtxOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psDevmemIntExportCtxOUT-> -+ hContextExport, -+ (void *)psContextExportInt, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE, -+ (PFN_HANDLE_RELEASE) & -+ _DevmemIntExportCtxpsContextExportIntRelease); -+ if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntExportCtx_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntExportCtx_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); -+ } -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK) -+ { -+ if (psContextExportInt) -+ { -+ LockHandle(KERNEL_HANDLE_BASE); -+ DevmemIntUnexportCtx(psContextExportInt); -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntUnexportCtx(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntUnexportCtxIN_UI8, -+ IMG_UINT8 * psDevmemIntUnexportCtxOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntUnexportCtxIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *) -+ IMG_OFFSET_ADDR(psDevmemIntUnexportCtxOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntUnexportCtxOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psDevmemIntUnexportCtxIN->hContextExport, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT); -+ if (unlikely((psDevmemIntUnexportCtxOUT->eError != PVRSRV_OK) && -+ (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psDevmemIntUnexportCtxOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntUnexportCtx_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntUnexportCtx_exit: -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _DevmemIntAcquireRemoteCtxpsContextIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntAcquireRemoteCtxIN_UI8, -+ IMG_UINT8 * psDevmemIntAcquireRemoteCtxOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *) -+ IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *) -+ IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxOUT_UI8, 0); -+ -+ IMG_HANDLE hPMR = psDevmemIntAcquireRemoteCtxIN->hPMR; -+ PMR *psPMRInt = NULL; -+ DEVMEMINT_CTX *psContextInt = NULL; -+ IMG_HANDLE hPrivDataInt = NULL; -+ -+ psDevmemIntAcquireRemoteCtxOUT->hContext = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemIntAcquireRemoteCtxOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntAcquireRemoteCtx_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntAcquireRemoteCtxOUT->eError = -+ DevmemIntAcquireRemoteCtx(psPMRInt, &psContextInt, &hPrivDataInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) -+ { -+ goto DevmemIntAcquireRemoteCtx_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntAcquireRemoteCtxOUT->eError = -+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psDevmemIntAcquireRemoteCtxOUT->hContext, -+ (void *)psContextInt, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE, -+ (PFN_HANDLE_RELEASE) & -+ _DevmemIntAcquireRemoteCtxpsContextIntRelease); -+ if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntAcquireRemoteCtx_exit; -+ } -+ -+ psDevmemIntAcquireRemoteCtxOUT->eError = -+ PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, -+ &psDevmemIntAcquireRemoteCtxOUT->hPrivData, -+ (void *)hPrivDataInt, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE, -+ psDevmemIntAcquireRemoteCtxOUT->hContext); -+ if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntAcquireRemoteCtx_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntAcquireRemoteCtx_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK) -+ { -+ if (psDevmemIntAcquireRemoteCtxOUT->hContext) -+ { -+ PVRSRV_ERROR eError; -+ -+ /* Lock over handle creation cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) -+ psDevmemIntAcquireRemoteCtxOUT-> -+ hContext, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); -+ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(eError))); -+ } -+ /* Releasing the handle should free/destroy/release the resource. -+ * This should never fail... */ -+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); -+ -+ /* Avoid freeing/destroying/releasing the resource a second time below */ -+ psContextInt = NULL; -+ /* Release now we have cleaned up creation handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ } -+ -+ if (psContextInt) -+ { -+ DevmemIntCtxDestroy(psContextInt); -+ } -+ } -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+#endif /* EXCLUDE_CMM_BRIDGE */ -+ -+#if !defined(EXCLUDE_CMM_BRIDGE) -+PVRSRV_ERROR InitCMMBridge(void); -+void DeinitCMMBridge(void); -+ -+/* -+ * Register all CMM functions with services -+ */ -+PVRSRV_ERROR InitCMMBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX, -+ PVRSRVBridgeDevmemIntExportCtx, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX, -+ PVRSRVBridgeDevmemIntUnexportCtx, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX, -+ PVRSRVBridgeDevmemIntAcquireRemoteCtx, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all cmm functions with services -+ */ -+void DeinitCMMBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX); -+ -+} -+#else /* EXCLUDE_CMM_BRIDGE */ -+/* This bridge is conditional on EXCLUDE_CMM_BRIDGE - when defined, -+ * do not populate the dispatch table with its functions -+ */ -+#define InitCMMBridge() \ -+ PVRSRV_OK -+ -+#define DeinitCMMBridge() -+ -+#endif /* EXCLUDE_CMM_BRIDGE */ -diff --git a/drivers/gpu/drm/img-rogue/server_devicememhistory_bridge.c b/drivers/gpu/drm/img-rogue/server_devicememhistory_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_devicememhistory_bridge.c -@@ -0,0 +1,826 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for devicememhistory -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for devicememhistory -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "pmr.h" -+#include "devicemem_history_server.h" -+ -+#include "common_devicememhistory_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+#include "lock.h" -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, -+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevicememHistoryMapIN_UI8, -+ IMG_UINT8 * psDevicememHistoryMapOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *psDevicememHistoryMapIN = -+ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *) IMG_OFFSET_ADDR(psDevicememHistoryMapIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *psDevicememHistoryMapOUT = -+ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *) IMG_OFFSET_ADDR(psDevicememHistoryMapOUT_UI8, -+ 0); -+ -+ IMG_HANDLE hPMR = psDevicememHistoryMapIN->hPMR; -+ PMR *psPMRInt = NULL; -+ IMG_CHAR *uiTextInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto DevicememHistoryMap_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryMapIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto DevicememHistoryMap_exit; -+ } -+ } -+ } -+ -+ { -+ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiTextInt, (const void __user *)psDevicememHistoryMapIN->puiText, -+ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto DevicememHistoryMap_exit; -+ } -+ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevicememHistoryMapOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psDevicememHistoryMapOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevicememHistoryMap_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevicememHistoryMapOUT->eError = -+ DevicememHistoryMapKM(psPMRInt, -+ psDevicememHistoryMapIN->uiOffset, -+ psDevicememHistoryMapIN->sDevVAddr, -+ psDevicememHistoryMapIN->uiSize, -+ uiTextInt, -+ psDevicememHistoryMapIN->ui32Log2PageSize, -+ psDevicememHistoryMapIN->ui32AllocationIndex, -+ &psDevicememHistoryMapOUT->ui32AllocationIndexOut); -+ -+DevicememHistoryMap_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psDevicememHistoryMapOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, -+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevicememHistoryUnmapIN_UI8, -+ IMG_UINT8 * psDevicememHistoryUnmapOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapIN = -+ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *) -+ IMG_OFFSET_ADDR(psDevicememHistoryUnmapIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapOUT = -+ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *) -+ IMG_OFFSET_ADDR(psDevicememHistoryUnmapOUT_UI8, 0); -+ -+ IMG_HANDLE hPMR = psDevicememHistoryUnmapIN->hPMR; -+ PMR *psPMRInt = NULL; -+ IMG_CHAR *uiTextInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto DevicememHistoryUnmap_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryUnmapIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto DevicememHistoryUnmap_exit; -+ } -+ } -+ } -+ -+ { -+ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiTextInt, (const void __user *)psDevicememHistoryUnmapIN->puiText, -+ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto DevicememHistoryUnmap_exit; -+ } -+ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevicememHistoryUnmapOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psDevicememHistoryUnmapOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevicememHistoryUnmap_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevicememHistoryUnmapOUT->eError = -+ DevicememHistoryUnmapKM(psPMRInt, -+ psDevicememHistoryUnmapIN->uiOffset, -+ psDevicememHistoryUnmapIN->sDevVAddr, -+ psDevicememHistoryUnmapIN->uiSize, -+ uiTextInt, -+ psDevicememHistoryUnmapIN->ui32Log2PageSize, -+ psDevicememHistoryUnmapIN->ui32AllocationIndex, -+ &psDevicememHistoryUnmapOUT->ui32AllocationIndexOut); -+ -+DevicememHistoryUnmap_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psDevicememHistoryUnmapOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, -+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevicememHistoryMapVRangeIN_UI8, -+ IMG_UINT8 * psDevicememHistoryMapVRangeOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeIN = -+ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *) -+ IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeOUT = -+ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *) -+ IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeOUT_UI8, 0); -+ -+ IMG_CHAR *uiTextInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto DevicememHistoryMapVRange_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryMapVRangeIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto DevicememHistoryMapVRange_exit; -+ } -+ } -+ } -+ -+ { -+ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiTextInt, (const void __user *)psDevicememHistoryMapVRangeIN->puiText, -+ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto DevicememHistoryMapVRange_exit; -+ } -+ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; -+ } -+ -+ psDevicememHistoryMapVRangeOUT->eError = -+ DevicememHistoryMapVRangeKM(psConnection, OSGetDevNode(psConnection), -+ psDevicememHistoryMapVRangeIN->sBaseDevVAddr, -+ psDevicememHistoryMapVRangeIN->ui32ui32StartPage, -+ psDevicememHistoryMapVRangeIN->ui32NumPages, -+ psDevicememHistoryMapVRangeIN->uiAllocSize, -+ uiTextInt, -+ psDevicememHistoryMapVRangeIN->ui32Log2PageSize, -+ psDevicememHistoryMapVRangeIN->ui32AllocationIndex, -+ &psDevicememHistoryMapVRangeOUT->ui32AllocationIndexOut); -+ -+DevicememHistoryMapVRange_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psDevicememHistoryMapVRangeOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, -+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevicememHistoryUnmapVRangeIN_UI8, -+ IMG_UINT8 * psDevicememHistoryUnmapVRangeOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeIN = -+ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *) -+ IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeOUT = -+ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *) -+ IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeOUT_UI8, 0); -+ -+ IMG_CHAR *uiTextInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto DevicememHistoryUnmapVRange_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = -+ (IMG_BYTE *) (void *)psDevicememHistoryUnmapVRangeIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psDevicememHistoryUnmapVRangeOUT->eError = -+ PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto DevicememHistoryUnmapVRange_exit; -+ } -+ } -+ } -+ -+ { -+ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiTextInt, (const void __user *)psDevicememHistoryUnmapVRangeIN->puiText, -+ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto DevicememHistoryUnmapVRange_exit; -+ } -+ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; -+ } -+ -+ psDevicememHistoryUnmapVRangeOUT->eError = -+ DevicememHistoryUnmapVRangeKM(psConnection, OSGetDevNode(psConnection), -+ psDevicememHistoryUnmapVRangeIN->sBaseDevVAddr, -+ psDevicememHistoryUnmapVRangeIN->ui32ui32StartPage, -+ psDevicememHistoryUnmapVRangeIN->ui32NumPages, -+ psDevicememHistoryUnmapVRangeIN->uiAllocSize, -+ uiTextInt, -+ psDevicememHistoryUnmapVRangeIN->ui32Log2PageSize, -+ psDevicememHistoryUnmapVRangeIN->ui32AllocationIndex, -+ &psDevicememHistoryUnmapVRangeOUT-> -+ ui32AllocationIndexOut); -+ -+DevicememHistoryUnmapVRange_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psDevicememHistoryUnmapVRangeOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, -+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); -+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, -+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); -+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, -+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevicememHistorySparseChangeIN_UI8, -+ IMG_UINT8 * psDevicememHistorySparseChangeOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeIN = -+ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *) -+ IMG_OFFSET_ADDR(psDevicememHistorySparseChangeIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeOUT = -+ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *) -+ IMG_OFFSET_ADDR(psDevicememHistorySparseChangeOUT_UI8, 0); -+ -+ IMG_HANDLE hPMR = psDevicememHistorySparseChangeIN->hPMR; -+ PMR *psPMRInt = NULL; -+ IMG_CHAR *uiTextInt = NULL; -+ IMG_UINT32 *ui32AllocPageIndicesInt = NULL; -+ IMG_UINT32 *ui32FreePageIndicesInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + -+ ((IMG_UINT64) psDevicememHistorySparseChangeIN->ui32AllocPageCount * -+ sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psDevicememHistorySparseChangeIN->ui32FreePageCount * -+ sizeof(IMG_UINT32)) + 0; -+ -+ if (unlikely -+ (psDevicememHistorySparseChangeIN->ui32AllocPageCount > -+ PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) -+ { -+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto DevicememHistorySparseChange_exit; -+ } -+ -+ if (unlikely -+ (psDevicememHistorySparseChangeIN->ui32FreePageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) -+ { -+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto DevicememHistorySparseChange_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto DevicememHistorySparseChange_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = -+ (IMG_BYTE *) (void *)psDevicememHistorySparseChangeIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psDevicememHistorySparseChangeOUT->eError = -+ PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto DevicememHistorySparseChange_exit; -+ } -+ } -+ } -+ -+ { -+ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiTextInt, -+ (const void __user *)psDevicememHistorySparseChangeIN->puiText, -+ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto DevicememHistorySparseChange_exit; -+ } -+ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; -+ } -+ if (psDevicememHistorySparseChangeIN->ui32AllocPageCount != 0) -+ { -+ ui32AllocPageIndicesInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32AllocPageIndicesInt, -+ (const void __user *)psDevicememHistorySparseChangeIN->pui32AllocPageIndices, -+ psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != -+ PVRSRV_OK) -+ { -+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto DevicememHistorySparseChange_exit; -+ } -+ } -+ if (psDevicememHistorySparseChangeIN->ui32FreePageCount != 0) -+ { -+ ui32FreePageIndicesInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32FreePageIndicesInt, -+ (const void __user *)psDevicememHistorySparseChangeIN->pui32FreePageIndices, -+ psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32)) != -+ PVRSRV_OK) -+ { -+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto DevicememHistorySparseChange_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevicememHistorySparseChangeOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psDevicememHistorySparseChangeOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevicememHistorySparseChange_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevicememHistorySparseChangeOUT->eError = -+ DevicememHistorySparseChangeKM(psPMRInt, -+ psDevicememHistorySparseChangeIN->uiOffset, -+ psDevicememHistorySparseChangeIN->sDevVAddr, -+ psDevicememHistorySparseChangeIN->uiSize, -+ uiTextInt, -+ psDevicememHistorySparseChangeIN->ui32Log2PageSize, -+ psDevicememHistorySparseChangeIN->ui32AllocPageCount, -+ ui32AllocPageIndicesInt, -+ psDevicememHistorySparseChangeIN->ui32FreePageCount, -+ ui32FreePageIndicesInt, -+ psDevicememHistorySparseChangeIN->ui32AllocationIndex, -+ &psDevicememHistorySparseChangeOUT-> -+ ui32AllocationIndexOut); -+ -+DevicememHistorySparseChange_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psDevicememHistorySparseChangeOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+static POS_LOCK pDEVICEMEMHISTORYBridgeLock; -+ -+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void); -+void DeinitDEVICEMEMHISTORYBridge(void); -+ -+/* -+ * Register all DEVICEMEMHISTORY functions with services -+ */ -+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void) -+{ -+ PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pDEVICEMEMHISTORYBridgeLock), "OSLockCreate"); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, -+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP, -+ PVRSRVBridgeDevicememHistoryMap, pDEVICEMEMHISTORYBridgeLock); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, -+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP, -+ PVRSRVBridgeDevicememHistoryUnmap, pDEVICEMEMHISTORYBridgeLock); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, -+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE, -+ PVRSRVBridgeDevicememHistoryMapVRange, pDEVICEMEMHISTORYBridgeLock); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, -+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE, -+ PVRSRVBridgeDevicememHistoryUnmapVRange, pDEVICEMEMHISTORYBridgeLock); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, -+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE, -+ PVRSRVBridgeDevicememHistorySparseChange, -+ pDEVICEMEMHISTORYBridgeLock); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all devicememhistory functions with services -+ */ -+void DeinitDEVICEMEMHISTORYBridge(void) -+{ -+ OSLockDestroy(pDEVICEMEMHISTORYBridgeLock); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, -+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, -+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, -+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, -+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, -+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_di_bridge.c b/drivers/gpu/drm/img-rogue/server_di_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_di_bridge.c -@@ -0,0 +1,618 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for di -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for di -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "di_impl_brg.h" -+ -+#include "common_di_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static PVRSRV_ERROR _DICreateContextpsContextIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = DIDestroyContextKM((DI_CONTEXT *) pvData); -+ return eError; -+} -+ -+static_assert(PRVSRVTL_MAX_STREAM_NAME_SIZE <= IMG_UINT32_MAX, -+ "PRVSRVTL_MAX_STREAM_NAME_SIZE must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDICreateContextIN_UI8, -+ IMG_UINT8 * psDICreateContextOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DICREATECONTEXT *psDICreateContextIN = -+ (PVRSRV_BRIDGE_IN_DICREATECONTEXT *) IMG_OFFSET_ADDR(psDICreateContextIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DICREATECONTEXT *psDICreateContextOUT = -+ (PVRSRV_BRIDGE_OUT_DICREATECONTEXT *) IMG_OFFSET_ADDR(psDICreateContextOUT_UI8, 0); -+ -+ IMG_CHAR *puiStreamNameInt = NULL; -+ DI_CONTEXT *psContextInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0; -+ -+ PVR_UNREFERENCED_PARAMETER(psDICreateContextIN); -+ -+ psDICreateContextOUT->puiStreamName = psDICreateContextIN->puiStreamName; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psDICreateContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto DICreateContext_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psDICreateContextIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDICreateContextIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psDICreateContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto DICreateContext_exit; -+ } -+ } -+ } -+ -+ if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) -+ { -+ puiStreamNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); -+ } -+ -+ psDICreateContextOUT->eError = DICreateContextKM(puiStreamNameInt, &psContextInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psDICreateContextOUT->eError != PVRSRV_OK)) -+ { -+ goto DICreateContext_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDICreateContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psDICreateContextOUT->hContext, -+ (void *)psContextInt, -+ PVRSRV_HANDLE_TYPE_DI_CONTEXT, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE, -+ (PFN_HANDLE_RELEASE) & -+ _DICreateContextpsContextIntRelease); -+ if (unlikely(psDICreateContextOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DICreateContext_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* If dest ptr is non-null and we have data to copy */ -+ if ((puiStreamNameInt) && ((PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) > 0)) -+ { -+ if (unlikely -+ (OSCopyToUser -+ (NULL, (void __user *)psDICreateContextOUT->puiStreamName, puiStreamNameInt, -+ (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR))) != PVRSRV_OK)) -+ { -+ psDICreateContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto DICreateContext_exit; -+ } -+ } -+ -+DICreateContext_exit: -+ -+ if (psDICreateContextOUT->eError != PVRSRV_OK) -+ { -+ if (psContextInt) -+ { -+ DIDestroyContextKM(psContextInt); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psDICreateContextOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDIDestroyContext(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDIDestroyContextIN_UI8, -+ IMG_UINT8 * psDIDestroyContextOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT *psDIDestroyContextIN = -+ (PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT *) IMG_OFFSET_ADDR(psDIDestroyContextIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT *psDIDestroyContextOUT = -+ (PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT *) IMG_OFFSET_ADDR(psDIDestroyContextOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDIDestroyContextOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psDIDestroyContextIN->hContext, -+ PVRSRV_HANDLE_TYPE_DI_CONTEXT); -+ if (unlikely((psDIDestroyContextOUT->eError != PVRSRV_OK) && -+ (psDIDestroyContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psDIDestroyContextOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(psDIDestroyContextOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto DIDestroyContext_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DIDestroyContext_exit: -+ -+ return 0; -+} -+ -+static_assert(DI_IMPL_BRG_PATH_LEN <= IMG_UINT32_MAX, -+ "DI_IMPL_BRG_PATH_LEN must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDIReadEntryIN_UI8, -+ IMG_UINT8 * psDIReadEntryOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DIREADENTRY *psDIReadEntryIN = -+ (PVRSRV_BRIDGE_IN_DIREADENTRY *) IMG_OFFSET_ADDR(psDIReadEntryIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DIREADENTRY *psDIReadEntryOUT = -+ (PVRSRV_BRIDGE_OUT_DIREADENTRY *) IMG_OFFSET_ADDR(psDIReadEntryOUT_UI8, 0); -+ -+ IMG_HANDLE hContext = psDIReadEntryIN->hContext; -+ DI_CONTEXT *psContextInt = NULL; -+ IMG_CHAR *uiEntryPathInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) + 0; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psDIReadEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto DIReadEntry_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psDIReadEntryIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDIReadEntryIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psDIReadEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto DIReadEntry_exit; -+ } -+ } -+ } -+ -+ { -+ uiEntryPathInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiEntryPathInt, (const void __user *)psDIReadEntryIN->puiEntryPath, -+ DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psDIReadEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto DIReadEntry_exit; -+ } -+ ((IMG_CHAR *) uiEntryPathInt)[(DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDIReadEntryOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psContextInt, -+ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE); -+ if (unlikely(psDIReadEntryOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DIReadEntry_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDIReadEntryOUT->eError = -+ DIReadEntryKM(psContextInt, -+ uiEntryPathInt, psDIReadEntryIN->ui64Offset, psDIReadEntryIN->ui64Size); -+ -+DIReadEntry_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psDIReadEntryOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static_assert(DI_IMPL_BRG_PATH_LEN <= IMG_UINT32_MAX, -+ "DI_IMPL_BRG_PATH_LEN must not be larger than IMG_UINT32_MAX"); -+static_assert(DI_IMPL_BRG_PATH_LEN <= IMG_UINT32_MAX, -+ "DI_IMPL_BRG_PATH_LEN must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDIWriteEntryIN_UI8, -+ IMG_UINT8 * psDIWriteEntryOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DIWRITEENTRY *psDIWriteEntryIN = -+ (PVRSRV_BRIDGE_IN_DIWRITEENTRY *) IMG_OFFSET_ADDR(psDIWriteEntryIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DIWRITEENTRY *psDIWriteEntryOUT = -+ (PVRSRV_BRIDGE_OUT_DIWRITEENTRY *) IMG_OFFSET_ADDR(psDIWriteEntryOUT_UI8, 0); -+ -+ IMG_HANDLE hContext = psDIWriteEntryIN->hContext; -+ DI_CONTEXT *psContextInt = NULL; -+ IMG_CHAR *uiEntryPathInt = NULL; -+ IMG_CHAR *uiValueInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) + -+ ((IMG_UINT64) psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR)) + 0; -+ -+ if (unlikely(psDIWriteEntryIN->ui32ValueSize > DI_IMPL_BRG_PATH_LEN)) -+ { -+ psDIWriteEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto DIWriteEntry_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psDIWriteEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto DIWriteEntry_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psDIWriteEntryIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDIWriteEntryIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psDIWriteEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto DIWriteEntry_exit; -+ } -+ } -+ } -+ -+ { -+ uiEntryPathInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiEntryPathInt, (const void __user *)psDIWriteEntryIN->puiEntryPath, -+ DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psDIWriteEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto DIWriteEntry_exit; -+ } -+ ((IMG_CHAR *) uiEntryPathInt)[(DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; -+ } -+ if (psDIWriteEntryIN->ui32ValueSize != 0) -+ { -+ uiValueInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiValueInt, (const void __user *)psDIWriteEntryIN->puiValue, -+ psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psDIWriteEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto DIWriteEntry_exit; -+ } -+ ((IMG_CHAR *) uiValueInt)[(psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR)) - -+ 1] = '\0'; -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDIWriteEntryOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psContextInt, -+ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE); -+ if (unlikely(psDIWriteEntryOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DIWriteEntry_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDIWriteEntryOUT->eError = -+ DIWriteEntryKM(psContextInt, -+ uiEntryPathInt, psDIWriteEntryIN->ui32ValueSize, uiValueInt); -+ -+DIWriteEntry_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psDIWriteEntryOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDIListAllEntries(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDIListAllEntriesIN_UI8, -+ IMG_UINT8 * psDIListAllEntriesOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DILISTALLENTRIES *psDIListAllEntriesIN = -+ (PVRSRV_BRIDGE_IN_DILISTALLENTRIES *) IMG_OFFSET_ADDR(psDIListAllEntriesIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DILISTALLENTRIES *psDIListAllEntriesOUT = -+ (PVRSRV_BRIDGE_OUT_DILISTALLENTRIES *) IMG_OFFSET_ADDR(psDIListAllEntriesOUT_UI8, 0); -+ -+ IMG_HANDLE hContext = psDIListAllEntriesIN->hContext; -+ DI_CONTEXT *psContextInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDIListAllEntriesOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psContextInt, -+ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE); -+ if (unlikely(psDIListAllEntriesOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DIListAllEntries_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDIListAllEntriesOUT->eError = DIListAllEntriesKM(psContextInt); -+ -+DIListAllEntries_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitDIBridge(void); -+void DeinitDIBridge(void); -+ -+/* -+ * Register all DI functions with services -+ */ -+PVRSRV_ERROR InitDIBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DICREATECONTEXT, -+ PVRSRVBridgeDICreateContext, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT, -+ PVRSRVBridgeDIDestroyContext, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIREADENTRY, -+ PVRSRVBridgeDIReadEntry, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIWRITEENTRY, -+ PVRSRVBridgeDIWriteEntry, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DILISTALLENTRIES, -+ PVRSRVBridgeDIListAllEntries, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all di functions with services -+ */ -+void DeinitDIBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DICREATECONTEXT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIREADENTRY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIWRITEENTRY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DILISTALLENTRIES); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_dmabuf_bridge.c b/drivers/gpu/drm/img-rogue/server_dmabuf_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_dmabuf_bridge.c -@@ -0,0 +1,670 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for dmabuf -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for dmabuf -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "physmem_dmabuf.h" -+#include "pmr.h" -+ -+#include "common_dmabuf_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static PVRSRV_ERROR _PhysmemImportDmaBufpsPMRPtrIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PMRUnrefPMR((PMR *) pvData); -+ return eError; -+} -+ -+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, -+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPhysmemImportDmaBufIN_UI8, -+ IMG_UINT8 * psPhysmemImportDmaBufOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufIN = -+ (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemImportDmaBufIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufOUT = -+ (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemImportDmaBufOUT_UI8, -+ 0); -+ -+ IMG_CHAR *uiNameInt = NULL; -+ PMR *psPMRPtrInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0; -+ -+ if (unlikely(psPhysmemImportDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN)) -+ { -+ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto PhysmemImportDmaBuf_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto PhysmemImportDmaBuf_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psPhysmemImportDmaBufIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportDmaBufIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto PhysmemImportDmaBuf_exit; -+ } -+ } -+ } -+ -+ if (psPhysmemImportDmaBufIN->ui32NameSize != 0) -+ { -+ uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiNameInt, (const void __user *)psPhysmemImportDmaBufIN->puiName, -+ psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto PhysmemImportDmaBuf_exit; -+ } -+ ((IMG_CHAR *) uiNameInt)[(psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) -+ - 1] = '\0'; -+ } -+ -+ psPhysmemImportDmaBufOUT->eError = -+ PhysmemImportDmaBuf(psConnection, OSGetDevNode(psConnection), -+ psPhysmemImportDmaBufIN->ifd, -+ psPhysmemImportDmaBufIN->uiFlags, -+ psPhysmemImportDmaBufIN->ui32NameSize, -+ uiNameInt, -+ &psPMRPtrInt, -+ &psPhysmemImportDmaBufOUT->uiSize, -+ &psPhysmemImportDmaBufOUT->uiAlign); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)) -+ { -+ goto PhysmemImportDmaBuf_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psPhysmemImportDmaBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psPhysmemImportDmaBufOUT-> -+ hPMRPtr, (void *)psPMRPtrInt, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _PhysmemImportDmaBufpsPMRPtrIntRelease); -+ if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto PhysmemImportDmaBuf_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+PhysmemImportDmaBuf_exit: -+ -+ if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK) -+ { -+ if (psPMRPtrInt) -+ { -+ LockHandle(KERNEL_HANDLE_BASE); -+ PMRUnrefPMR(psPMRPtrInt); -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psPhysmemImportDmaBufOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _PhysmemImportDmaBufLockedpsPMRPtrIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PMRUnrefUnlockPMR((PMR *) pvData); -+ return eError; -+} -+ -+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, -+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgePhysmemImportDmaBufLocked(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPhysmemImportDmaBufLockedIN_UI8, -+ IMG_UINT8 * psPhysmemImportDmaBufLockedOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED *psPhysmemImportDmaBufLockedIN = -+ (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED *) -+ IMG_OFFSET_ADDR(psPhysmemImportDmaBufLockedIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED *psPhysmemImportDmaBufLockedOUT = -+ (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED *) -+ IMG_OFFSET_ADDR(psPhysmemImportDmaBufLockedOUT_UI8, 0); -+ -+ IMG_CHAR *uiNameInt = NULL; -+ PMR *psPMRPtrInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) + 0; -+ -+ if (unlikely(psPhysmemImportDmaBufLockedIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN)) -+ { -+ psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto PhysmemImportDmaBufLocked_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto PhysmemImportDmaBufLocked_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psPhysmemImportDmaBufLockedIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportDmaBufLockedIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto PhysmemImportDmaBufLocked_exit; -+ } -+ } -+ } -+ -+ if (psPhysmemImportDmaBufLockedIN->ui32NameSize != 0) -+ { -+ uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiNameInt, (const void __user *)psPhysmemImportDmaBufLockedIN->puiName, -+ psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto PhysmemImportDmaBufLocked_exit; -+ } -+ ((IMG_CHAR *) -+ uiNameInt)[(psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) - 1] = -+ '\0'; -+ } -+ -+ psPhysmemImportDmaBufLockedOUT->eError = -+ PhysmemImportDmaBufLocked(psConnection, OSGetDevNode(psConnection), -+ psPhysmemImportDmaBufLockedIN->ifd, -+ psPhysmemImportDmaBufLockedIN->uiFlags, -+ psPhysmemImportDmaBufLockedIN->ui32NameSize, -+ uiNameInt, -+ &psPMRPtrInt, -+ &psPhysmemImportDmaBufLockedOUT->uiSize, -+ &psPhysmemImportDmaBufLockedOUT->uiAlign); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK)) -+ { -+ goto PhysmemImportDmaBufLocked_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psPhysmemImportDmaBufLockedOUT->eError = -+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psPhysmemImportDmaBufLockedOUT->hPMRPtr, (void *)psPMRPtrInt, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _PhysmemImportDmaBufLockedpsPMRPtrIntRelease); -+ if (unlikely(psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto PhysmemImportDmaBufLocked_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+PhysmemImportDmaBufLocked_exit: -+ -+ if (psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK) -+ { -+ if (psPMRPtrInt) -+ { -+ LockHandle(KERNEL_HANDLE_BASE); -+ PMRUnrefUnlockPMR(psPMRPtrInt); -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psPhysmemImportDmaBufLockedOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPhysmemExportDmaBufIN_UI8, -+ IMG_UINT8 * psPhysmemExportDmaBufOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufIN = -+ (PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemExportDmaBufIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufOUT = -+ (PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemExportDmaBufOUT_UI8, -+ 0); -+ -+ IMG_HANDLE hPMR = psPhysmemExportDmaBufIN->hPMR; -+ PMR *psPMRInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psPhysmemExportDmaBufOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psPhysmemExportDmaBufOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto PhysmemExportDmaBuf_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psPhysmemExportDmaBufOUT->eError = -+ PhysmemExportDmaBuf(psConnection, OSGetDevNode(psConnection), -+ psPMRInt, &psPhysmemExportDmaBufOUT->iFd); -+ -+PhysmemExportDmaBuf_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _PhysmemImportSparseDmaBufpsPMRPtrIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PMRUnrefPMR((PMR *) pvData); -+ return eError; -+} -+ -+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, -+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); -+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, -+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPhysmemImportSparseDmaBufIN_UI8, -+ IMG_UINT8 * psPhysmemImportSparseDmaBufOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufIN = -+ (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *) -+ IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufOUT = -+ (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *) -+ IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufOUT_UI8, 0); -+ -+ IMG_UINT32 *ui32MappingTableInt = NULL; -+ IMG_CHAR *uiNameInt = NULL; -+ PMR *psPMRPtrInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0; -+ -+ if (unlikely -+ (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) -+ { -+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto PhysmemImportSparseDmaBuf_exit; -+ } -+ -+ if (unlikely(psPhysmemImportSparseDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN)) -+ { -+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto PhysmemImportSparseDmaBuf_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto PhysmemImportSparseDmaBuf_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportSparseDmaBufIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto PhysmemImportSparseDmaBuf_exit; -+ } -+ } -+ } -+ -+ if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks != 0) -+ { -+ ui32MappingTableInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32MappingTableInt, -+ (const void __user *)psPhysmemImportSparseDmaBufIN->pui32MappingTable, -+ psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) != -+ PVRSRV_OK) -+ { -+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto PhysmemImportSparseDmaBuf_exit; -+ } -+ } -+ if (psPhysmemImportSparseDmaBufIN->ui32NameSize != 0) -+ { -+ uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiNameInt, (const void __user *)psPhysmemImportSparseDmaBufIN->puiName, -+ psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto PhysmemImportSparseDmaBuf_exit; -+ } -+ ((IMG_CHAR *) -+ uiNameInt)[(psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) - 1] = -+ '\0'; -+ } -+ -+ psPhysmemImportSparseDmaBufOUT->eError = -+ PhysmemImportSparseDmaBuf(psConnection, OSGetDevNode(psConnection), -+ psPhysmemImportSparseDmaBufIN->ifd, -+ psPhysmemImportSparseDmaBufIN->uiFlags, -+ psPhysmemImportSparseDmaBufIN->uiChunkSize, -+ psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks, -+ psPhysmemImportSparseDmaBufIN->ui32NumVirtChunks, -+ ui32MappingTableInt, -+ psPhysmemImportSparseDmaBufIN->ui32NameSize, -+ uiNameInt, -+ &psPMRPtrInt, -+ &psPhysmemImportSparseDmaBufOUT->uiSize, -+ &psPhysmemImportSparseDmaBufOUT->uiAlign); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)) -+ { -+ goto PhysmemImportSparseDmaBuf_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psPhysmemImportSparseDmaBufOUT->eError = -+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psPhysmemImportSparseDmaBufOUT->hPMRPtr, (void *)psPMRPtrInt, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _PhysmemImportSparseDmaBufpsPMRPtrIntRelease); -+ if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto PhysmemImportSparseDmaBuf_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+PhysmemImportSparseDmaBuf_exit: -+ -+ if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK) -+ { -+ if (psPMRPtrInt) -+ { -+ LockHandle(KERNEL_HANDLE_BASE); -+ PMRUnrefPMR(psPMRPtrInt); -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psPhysmemImportSparseDmaBufOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitDMABUFBridge(void); -+void DeinitDMABUFBridge(void); -+ -+/* -+ * Register all DMABUF functions with services -+ */ -+PVRSRV_ERROR InitDMABUFBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF, -+ PVRSRVBridgePhysmemImportDmaBuf, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED, -+ PVRSRVBridgePhysmemImportDmaBufLocked, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF, -+ PVRSRVBridgePhysmemExportDmaBuf, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF, -+ PVRSRVBridgePhysmemImportSparseDmaBuf, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all dmabuf functions with services -+ */ -+void DeinitDMABUFBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, -+ PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, -+ PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_htbuffer_bridge.c b/drivers/gpu/drm/img-rogue/server_htbuffer_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_htbuffer_bridge.c -@@ -0,0 +1,225 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for htbuffer -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for htbuffer -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "htbserver.h" -+ -+#include "common_htbuffer_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+#include "lock.h" -+ -+#if !defined(EXCLUDE_HTBUFFER_BRIDGE) -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static_assert(HTB_FLAG_NUM_EL <= IMG_UINT32_MAX, -+ "HTB_FLAG_NUM_EL must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psHTBControlIN_UI8, -+ IMG_UINT8 * psHTBControlOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_HTBCONTROL *psHTBControlIN = -+ (PVRSRV_BRIDGE_IN_HTBCONTROL *) IMG_OFFSET_ADDR(psHTBControlIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_HTBCONTROL *psHTBControlOUT = -+ (PVRSRV_BRIDGE_OUT_HTBCONTROL *) IMG_OFFSET_ADDR(psHTBControlOUT_UI8, 0); -+ -+ IMG_UINT32 *ui32GroupEnableInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) + 0; -+ -+ if (unlikely(psHTBControlIN->ui32NumGroups > HTB_FLAG_NUM_EL)) -+ { -+ psHTBControlOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto HTBControl_exit; -+ } -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psHTBControlOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto HTBControl_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHTBControlIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psHTBControlOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto HTBControl_exit; -+ } -+ } -+ } -+ -+ if (psHTBControlIN->ui32NumGroups != 0) -+ { -+ ui32GroupEnableInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32GroupEnableInt, -+ (const void __user *)psHTBControlIN->pui32GroupEnable, -+ psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psHTBControlOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto HTBControl_exit; -+ } -+ } -+ -+ psHTBControlOUT->eError = -+ HTBControlKM(psHTBControlIN->ui32NumGroups, -+ ui32GroupEnableInt, -+ psHTBControlIN->ui32LogLevel, -+ psHTBControlIN->ui32EnablePID, -+ psHTBControlIN->ui32LogMode, psHTBControlIN->ui32OpMode); -+ -+HTBControl_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psHTBControlOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+static POS_LOCK pHTBUFFERBridgeLock; -+ -+#endif /* EXCLUDE_HTBUFFER_BRIDGE */ -+ -+#if !defined(EXCLUDE_HTBUFFER_BRIDGE) -+PVRSRV_ERROR InitHTBUFFERBridge(void); -+void DeinitHTBUFFERBridge(void); -+ -+/* -+ * Register all HTBUFFER functions with services -+ */ -+PVRSRV_ERROR InitHTBUFFERBridge(void) -+{ -+ PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock), "OSLockCreate"); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL, -+ PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all htbuffer functions with services -+ */ -+void DeinitHTBUFFERBridge(void) -+{ -+ OSLockDestroy(pHTBUFFERBridgeLock); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL); -+ -+} -+#else /* EXCLUDE_HTBUFFER_BRIDGE */ -+/* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined, -+ * do not populate the dispatch table with its functions -+ */ -+#define InitHTBUFFERBridge() \ -+ PVRSRV_OK -+ -+#define DeinitHTBUFFERBridge() -+ -+#endif /* EXCLUDE_HTBUFFER_BRIDGE */ -diff --git a/drivers/gpu/drm/img-rogue/server_mm_bridge.c b/drivers/gpu/drm/img-rogue/server_mm_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_mm_bridge.c -@@ -0,0 +1,3186 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for mm -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for mm -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "pvrsrv_memalloc_physheap.h" -+#include "devicemem.h" -+#include "devicemem_server.h" -+#include "pmr.h" -+#include "devicemem_heapcfg.h" -+#include "physmem.h" -+#include "devicemem_utils.h" -+#include "process_stats.h" -+ -+#include "common_mm_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+#if defined(SUPPORT_INSECURE_EXPORT) -+static PVRSRV_ERROR ReleasePMRExport(void *pvData) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvData); -+ -+ return PVRSRV_OK; -+} -+#endif -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+#if defined(SUPPORT_INSECURE_EXPORT) -+static PVRSRV_ERROR _PMRExportPMRpsPMRExportIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PMRUnexportPMR((PMR_EXPORT *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPMRExportPMRIN_UI8, -+ IMG_UINT8 * psPMRExportPMROUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PMREXPORTPMR *psPMRExportPMRIN = -+ (PVRSRV_BRIDGE_IN_PMREXPORTPMR *) IMG_OFFSET_ADDR(psPMRExportPMRIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PMREXPORTPMR *psPMRExportPMROUT = -+ (PVRSRV_BRIDGE_OUT_PMREXPORTPMR *) IMG_OFFSET_ADDR(psPMRExportPMROUT_UI8, 0); -+ -+ IMG_HANDLE hPMR = psPMRExportPMRIN->hPMR; -+ PMR *psPMRInt = NULL; -+ PMR_EXPORT *psPMRExportInt = NULL; -+ IMG_HANDLE hPMRExportInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psPMRExportPMROUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto PMRExportPMR_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psPMRExportPMROUT->eError = -+ PMRExportPMR(psPMRInt, -+ &psPMRExportInt, -+ &psPMRExportPMROUT->ui64Size, -+ &psPMRExportPMROUT->ui32Log2Contig, &psPMRExportPMROUT->ui64Password); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) -+ { -+ goto PMRExportPMR_exit; -+ } -+ -+ /* -+ * For cases where we need a cross process handle we actually allocate two. -+ * -+ * The first one is a connection specific handle and it gets given the real -+ * release function. This handle does *NOT* get returned to the caller. It's -+ * purpose is to release any leaked resources when we either have a bad or -+ * abnormally terminated client. If we didn't do this then the resource -+ * wouldn't be freed until driver unload. If the resource is freed normally, -+ * this handle can be looked up via the cross process handle and then -+ * released accordingly. -+ * -+ * The second one is a cross process handle and it gets given a noop release -+ * function. This handle does get returned to the caller. -+ */ -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ psPMRExportPMROUT->eError = -+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, -+ &hPMRExportInt, (void *)psPMRExportInt, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & _PMRExportPMRpsPMRExportIntRelease); -+ if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ goto PMRExportPMR_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ /* Lock over handle creation. */ -+ LockHandle(KERNEL_HANDLE_BASE); -+ psPMRExportPMROUT->eError = PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE, -+ &psPMRExportPMROUT->hPMRExport, -+ (void *)psPMRExportInt, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ ReleasePMRExport); -+ if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ goto PMRExportPMR_exit; -+ } -+ /* Release now we have created handles. */ -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ -+PMRExportPMR_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psPMRExportPMROUT->eError != PVRSRV_OK) -+ { -+ if (psPMRExportPMROUT->hPMRExport) -+ { -+ PVRSRV_ERROR eError; -+ -+ /* Lock over handle creation cleanup. */ -+ LockHandle(KERNEL_HANDLE_BASE); -+ -+ eError = PVRSRVDestroyHandleUnlocked(KERNEL_HANDLE_BASE, -+ (IMG_HANDLE) psPMRExportPMROUT-> -+ hPMRExport, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); -+ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(eError))); -+ } -+ /* Releasing the handle should free/destroy/release the resource. -+ * This should never fail... */ -+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); -+ -+ /* Release now we have cleaned up creation handles. */ -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ -+ } -+ -+ if (hPMRExportInt) -+ { -+ PVRSRV_ERROR eError; -+ /* Lock over handle creation cleanup. */ -+ LockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ eError = -+ PVRSRVDestroyHandleUnlocked(psConnection->psProcessHandleBase-> -+ psHandleBase, hPMRExportInt, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); -+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(eError))); -+ } -+ /* Releasing the handle should free/destroy/release the resource. -+ * This should never fail... */ -+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); -+ -+ /* Avoid freeing/destroying/releasing the resource a second time below */ -+ psPMRExportInt = NULL; -+ /* Release now we have cleaned up creation handles. */ -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ } -+ -+ if (psPMRExportInt) -+ { -+ LockHandle(KERNEL_HANDLE_BASE); -+ PMRUnexportPMR(psPMRExportInt); -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ } -+ } -+ -+ return 0; -+} -+ -+#else -+#define PVRSRVBridgePMRExportPMR NULL -+#endif -+ -+#if defined(SUPPORT_INSECURE_EXPORT) -+ -+static IMG_INT -+PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPMRUnexportPMRIN_UI8, -+ IMG_UINT8 * psPMRUnexportPMROUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *psPMRUnexportPMRIN = -+ (PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *) IMG_OFFSET_ADDR(psPMRUnexportPMRIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *psPMRUnexportPMROUT = -+ (PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *) IMG_OFFSET_ADDR(psPMRUnexportPMROUT_UI8, 0); -+ -+ PMR_EXPORT *psPMRExportInt = NULL; -+ IMG_HANDLE hPMRExportInt = NULL; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(KERNEL_HANDLE_BASE); -+ psPMRUnexportPMROUT->eError = -+ PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE, -+ (void **)&psPMRExportInt, -+ (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, IMG_FALSE); -+ if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); -+ } -+ PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK); -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ /* -+ * Find the connection specific handle that represents the same data -+ * as the cross process handle as releasing it will actually call the -+ * data's real release function (see the function where the cross -+ * process handle is allocated for more details). -+ */ -+ psPMRUnexportPMROUT->eError = -+ PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, -+ &hPMRExportInt, -+ psPMRExportInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); -+ if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); -+ } -+ PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK); -+ -+ psPMRUnexportPMROUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase, -+ hPMRExportInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); -+ if (unlikely((psPMRUnexportPMROUT->eError != PVRSRV_OK) && -+ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); -+ } -+ PVR_ASSERT((psPMRUnexportPMROUT->eError == PVRSRV_OK) || -+ (psPMRUnexportPMROUT->eError == PVRSRV_ERROR_RETRY)); -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(KERNEL_HANDLE_BASE); -+ -+ psPMRUnexportPMROUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(KERNEL_HANDLE_BASE, -+ (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); -+ if (unlikely((psPMRUnexportPMROUT->eError != PVRSRV_OK) && -+ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ goto PMRUnexportPMR_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ -+PMRUnexportPMR_exit: -+ -+ return 0; -+} -+ -+#else -+#define PVRSRVBridgePMRUnexportPMR NULL -+#endif -+ -+static IMG_INT -+PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPMRGetUIDIN_UI8, -+ IMG_UINT8 * psPMRGetUIDOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PMRGETUID *psPMRGetUIDIN = -+ (PVRSRV_BRIDGE_IN_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PMRGETUID *psPMRGetUIDOUT = -+ (PVRSRV_BRIDGE_OUT_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDOUT_UI8, 0); -+ -+ IMG_HANDLE hPMR = psPMRGetUIDIN->hPMR; -+ PMR *psPMRInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psPMRGetUIDOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psPMRGetUIDOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto PMRGetUID_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psPMRGetUIDOUT->eError = PMRGetUID(psPMRInt, &psPMRGetUIDOUT->ui64UID); -+ -+PMRGetUID_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _PMRMakeLocalImportHandlepsExtMemIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PMRUnmakeLocalImportHandle((PMR *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgePMRMakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPMRMakeLocalImportHandleIN_UI8, -+ IMG_UINT8 * psPMRMakeLocalImportHandleOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleIN = -+ (PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *) -+ IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleOUT = -+ (PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *) -+ IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleOUT_UI8, 0); -+ -+ IMG_HANDLE hBuffer = psPMRMakeLocalImportHandleIN->hBuffer; -+ PMR *psBufferInt = NULL; -+ PMR *psExtMemInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psPMRMakeLocalImportHandleOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psBufferInt, -+ hBuffer, -+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, IMG_TRUE); -+ if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto PMRMakeLocalImportHandle_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psPMRMakeLocalImportHandleOUT->eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) -+ { -+ goto PMRMakeLocalImportHandle_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ psPMRMakeLocalImportHandleOUT->eError = -+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, -+ &psPMRMakeLocalImportHandleOUT->hExtMem, (void *)psExtMemInt, -+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _PMRMakeLocalImportHandlepsExtMemIntRelease); -+ if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ goto PMRMakeLocalImportHandle_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+PMRMakeLocalImportHandle_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psBufferInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hBuffer, PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK) -+ { -+ if (psExtMemInt) -+ { -+ LockHandle(KERNEL_HANDLE_BASE); -+ PMRUnmakeLocalImportHandle(psExtMemInt); -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPMRUnmakeLocalImportHandleIN_UI8, -+ IMG_UINT8 * psPMRUnmakeLocalImportHandleOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleIN = -+ (PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *) -+ IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleOUT = -+ (PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *) -+ IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ psPMRUnmakeLocalImportHandleOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase, -+ (IMG_HANDLE) psPMRUnmakeLocalImportHandleIN->hExtMem, -+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); -+ if (unlikely((psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_OK) && -+ (psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psPMRUnmakeLocalImportHandleOUT->eError))); -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ goto PMRUnmakeLocalImportHandle_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+PMRUnmakeLocalImportHandle_exit: -+ -+ return 0; -+} -+ -+#if defined(SUPPORT_INSECURE_EXPORT) -+static PVRSRV_ERROR _PMRImportPMRpsPMRIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PMRUnrefPMR((PMR *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPMRImportPMRIN_UI8, -+ IMG_UINT8 * psPMRImportPMROUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PMRIMPORTPMR *psPMRImportPMRIN = -+ (PVRSRV_BRIDGE_IN_PMRIMPORTPMR *) IMG_OFFSET_ADDR(psPMRImportPMRIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *psPMRImportPMROUT = -+ (PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *) IMG_OFFSET_ADDR(psPMRImportPMROUT_UI8, 0); -+ -+ IMG_HANDLE hPMRExport = psPMRImportPMRIN->hPMRExport; -+ PMR_EXPORT *psPMRExportInt = NULL; -+ PMR *psPMRInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(KERNEL_HANDLE_BASE); -+ -+ /* Look up the address from the handle */ -+ psPMRImportPMROUT->eError = -+ PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE, -+ (void **)&psPMRExportInt, -+ hPMRExport, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, IMG_TRUE); -+ if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ goto PMRImportPMR_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ -+ psPMRImportPMROUT->eError = -+ PhysmemImportPMR(psConnection, OSGetDevNode(psConnection), -+ psPMRExportInt, -+ psPMRImportPMRIN->ui64uiPassword, -+ psPMRImportPMRIN->ui64uiSize, -+ psPMRImportPMRIN->ui32uiLog2Contig, &psPMRInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) -+ { -+ goto PMRImportPMR_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psPMRImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psPMRImportPMROUT->hPMR, -+ (void *)psPMRInt, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _PMRImportPMRpsPMRIntRelease); -+ if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto PMRImportPMR_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+PMRImportPMR_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(KERNEL_HANDLE_BASE); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRExportInt) -+ { -+ PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE, -+ hPMRExport, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ -+ if (psPMRImportPMROUT->eError != PVRSRV_OK) -+ { -+ if (psPMRInt) -+ { -+ LockHandle(KERNEL_HANDLE_BASE); -+ PMRUnrefPMR(psPMRInt); -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ } -+ } -+ -+ return 0; -+} -+ -+#else -+#define PVRSRVBridgePMRImportPMR NULL -+#endif -+ -+static PVRSRV_ERROR _PMRLocalImportPMRpsPMRIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PMRUnrefPMR((PMR *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgePMRLocalImportPMR(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPMRLocalImportPMRIN_UI8, -+ IMG_UINT8 * psPMRLocalImportPMROUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *psPMRLocalImportPMRIN = -+ (PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *) IMG_OFFSET_ADDR(psPMRLocalImportPMRIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *psPMRLocalImportPMROUT = -+ (PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *) IMG_OFFSET_ADDR(psPMRLocalImportPMROUT_UI8, 0); -+ -+ IMG_HANDLE hExtHandle = psPMRLocalImportPMRIN->hExtHandle; -+ PMR *psExtHandleInt = NULL; -+ PMR *psPMRInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psPMRLocalImportPMROUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, -+ (void **)&psExtHandleInt, -+ hExtHandle, PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, IMG_TRUE); -+ if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ goto PMRLocalImportPMR_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ psPMRLocalImportPMROUT->eError = -+ PMRLocalImportPMR(psExtHandleInt, -+ &psPMRInt, -+ &psPMRLocalImportPMROUT->uiSize, &psPMRLocalImportPMROUT->uiAlign); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) -+ { -+ goto PMRLocalImportPMR_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psPMRLocalImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psPMRLocalImportPMROUT->hPMR, -+ (void *)psPMRInt, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _PMRLocalImportPMRpsPMRIntRelease); -+ if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto PMRLocalImportPMR_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+PMRLocalImportPMR_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psExtHandleInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, -+ hExtHandle, PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ if (psPMRLocalImportPMROUT->eError != PVRSRV_OK) -+ { -+ if (psPMRInt) -+ { -+ LockHandle(KERNEL_HANDLE_BASE); -+ PMRUnrefPMR(psPMRInt); -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPMRUnrefPMRIN_UI8, -+ IMG_UINT8 * psPMRUnrefPMROUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PMRUNREFPMR *psPMRUnrefPMRIN = -+ (PVRSRV_BRIDGE_IN_PMRUNREFPMR *) IMG_OFFSET_ADDR(psPMRUnrefPMRIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PMRUNREFPMR *psPMRUnrefPMROUT = -+ (PVRSRV_BRIDGE_OUT_PMRUNREFPMR *) IMG_OFFSET_ADDR(psPMRUnrefPMROUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psPMRUnrefPMROUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psPMRUnrefPMRIN->hPMR, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ if (unlikely((psPMRUnrefPMROUT->eError != PVRSRV_OK) && -+ (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnrefPMROUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto PMRUnrefPMR_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+PMRUnrefPMR_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPMRUnrefUnlockPMRIN_UI8, -+ IMG_UINT8 * psPMRUnrefUnlockPMROUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMRIN = -+ (PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMRIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMROUT = -+ (PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMROUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psPMRUnrefUnlockPMROUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psPMRUnrefUnlockPMRIN->hPMR, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ if (unlikely((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK) && -+ (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnrefUnlockPMROUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto PMRUnrefUnlockPMR_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+PMRUnrefUnlockPMR_exit: -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _PhysmemNewRamBackedPMRpsPMRPtrIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PMRUnrefPMR((PMR *) pvData); -+ return eError; -+} -+ -+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, -+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); -+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, -+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPhysmemNewRamBackedPMRIN_UI8, -+ IMG_UINT8 * psPhysmemNewRamBackedPMROUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMRIN = -+ (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *) -+ IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMRIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMROUT = -+ (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *) -+ IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMROUT_UI8, 0); -+ -+ IMG_UINT32 *ui32MappingTableInt = NULL; -+ IMG_CHAR *uiAnnotationInt = NULL; -+ PMR *psPMRPtrInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) + 0; -+ -+ if (unlikely -+ (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) -+ { -+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto PhysmemNewRamBackedPMR_exit; -+ } -+ -+ if (unlikely(psPhysmemNewRamBackedPMRIN->ui32AnnotationLength > DEVMEM_ANNOTATION_MAX_LEN)) -+ { -+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto PhysmemNewRamBackedPMR_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto PhysmemNewRamBackedPMR_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemNewRamBackedPMRIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto PhysmemNewRamBackedPMR_exit; -+ } -+ } -+ } -+ -+ if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks != 0) -+ { -+ ui32MappingTableInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32MappingTableInt, -+ (const void __user *)psPhysmemNewRamBackedPMRIN->pui32MappingTable, -+ psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) != -+ PVRSRV_OK) -+ { -+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto PhysmemNewRamBackedPMR_exit; -+ } -+ } -+ if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength != 0) -+ { -+ uiAnnotationInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiAnnotationInt, -+ (const void __user *)psPhysmemNewRamBackedPMRIN->puiAnnotation, -+ psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != -+ PVRSRV_OK) -+ { -+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto PhysmemNewRamBackedPMR_exit; -+ } -+ ((IMG_CHAR *) -+ uiAnnotationInt)[(psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * -+ sizeof(IMG_CHAR)) - 1] = '\0'; -+ } -+ -+ psPhysmemNewRamBackedPMROUT->eError = -+ PhysmemNewRamBackedPMR(psConnection, OSGetDevNode(psConnection), -+ psPhysmemNewRamBackedPMRIN->uiSize, -+ psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks, -+ psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks, -+ ui32MappingTableInt, -+ psPhysmemNewRamBackedPMRIN->ui32Log2PageSize, -+ psPhysmemNewRamBackedPMRIN->uiFlags, -+ psPhysmemNewRamBackedPMRIN->ui32AnnotationLength, -+ uiAnnotationInt, -+ psPhysmemNewRamBackedPMRIN->ui32PID, -+ &psPMRPtrInt, -+ psPhysmemNewRamBackedPMRIN->ui32PDumpFlags, -+ &psPhysmemNewRamBackedPMROUT->uiOutFlags); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)) -+ { -+ goto PhysmemNewRamBackedPMR_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psPhysmemNewRamBackedPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psPhysmemNewRamBackedPMROUT-> -+ hPMRPtr, -+ (void *)psPMRPtrInt, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _PhysmemNewRamBackedPMRpsPMRPtrIntRelease); -+ if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto PhysmemNewRamBackedPMR_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+PhysmemNewRamBackedPMR_exit: -+ -+ if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK) -+ { -+ if (psPMRPtrInt) -+ { -+ LockHandle(KERNEL_HANDLE_BASE); -+ PMRUnrefPMR(psPMRPtrInt); -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psPhysmemNewRamBackedPMROUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _DevmemIntCtxCreatepsDevMemServerContextIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntCtxCreateIN_UI8, -+ IMG_UINT8 * psDevmemIntCtxCreateOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateOUT_UI8, -+ 0); -+ -+ DEVMEMINT_CTX *psDevMemServerContextInt = NULL; -+ IMG_HANDLE hPrivDataInt = NULL; -+ -+ psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL; -+ -+ psDevmemIntCtxCreateOUT->eError = -+ DevmemIntCtxCreate(psConnection, OSGetDevNode(psConnection), -+ psDevmemIntCtxCreateIN->bbKernelMemoryCtx, -+ &psDevMemServerContextInt, -+ &hPrivDataInt, &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) -+ { -+ goto DevmemIntCtxCreate_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psDevmemIntCtxCreateOUT-> -+ hDevMemServerContext, -+ (void *) -+ psDevMemServerContextInt, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _DevmemIntCtxCreatepsDevMemServerContextIntRelease); -+ if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntCtxCreate_exit; -+ } -+ -+ psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, -+ &psDevmemIntCtxCreateOUT-> -+ hPrivData, -+ (void *)hPrivDataInt, -+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ psDevmemIntCtxCreateOUT-> -+ hDevMemServerContext); -+ if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntCtxCreate_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntCtxCreate_exit: -+ -+ if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK) -+ { -+ if (psDevmemIntCtxCreateOUT->hDevMemServerContext) -+ { -+ PVRSRV_ERROR eError; -+ -+ /* Lock over handle creation cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psDevmemIntCtxCreateOUT-> -+ hDevMemServerContext, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); -+ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(eError))); -+ } -+ /* Releasing the handle should free/destroy/release the resource. -+ * This should never fail... */ -+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); -+ -+ /* Avoid freeing/destroying/releasing the resource a second time below */ -+ psDevMemServerContextInt = NULL; -+ /* Release now we have cleaned up creation handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ } -+ -+ if (psDevMemServerContextInt) -+ { -+ DevmemIntCtxDestroy(psDevMemServerContextInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntCtxDestroyIN_UI8, -+ IMG_UINT8 * psDevmemIntCtxDestroyOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyOUT_UI8, -+ 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntCtxDestroyOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psDevmemIntCtxDestroyIN-> -+ hDevmemServerContext, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); -+ if (unlikely -+ ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) -+ && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) -+ && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psDevmemIntCtxDestroyOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntCtxDestroy_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntCtxDestroy_exit: -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = DevmemIntHeapDestroy((DEVMEMINT_HEAP *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntHeapCreateIN_UI8, -+ IMG_UINT8 * psDevmemIntHeapCreateOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateOUT_UI8, -+ 0); -+ -+ IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx; -+ DEVMEMINT_CTX *psDevmemCtxInt = NULL; -+ DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemIntHeapCreateOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psDevmemCtxInt, -+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); -+ if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntHeapCreate_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntHeapCreateOUT->eError = -+ DevmemIntHeapCreate(psDevmemCtxInt, -+ psDevmemIntHeapCreateIN->ui32HeapConfigIndex, -+ psDevmemIntHeapCreateIN->ui32HeapIndex, -+ psDevmemIntHeapCreateIN->sHeapBaseAddr, -+ psDevmemIntHeapCreateIN->ui32Log2DataPageSize, &psDevmemHeapPtrInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) -+ { -+ goto DevmemIntHeapCreate_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psDevmemIntHeapCreateOUT-> -+ hDevmemHeapPtr, -+ (void *)psDevmemHeapPtrInt, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease); -+ if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntHeapCreate_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntHeapCreate_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psDevmemCtxInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK) -+ { -+ if (psDevmemHeapPtrInt) -+ { -+ DevmemIntHeapDestroy(psDevmemHeapPtrInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntHeapDestroyIN_UI8, -+ IMG_UINT8 * psDevmemIntHeapDestroyOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *) IMG_OFFSET_ADDR(psDevmemIntHeapDestroyIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *) -+ IMG_OFFSET_ADDR(psDevmemIntHeapDestroyOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntHeapDestroyOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); -+ if (unlikely((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) && -+ (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psDevmemIntHeapDestroyOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntHeapDestroy_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntHeapDestroy_exit: -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _DevmemIntMapPMRpsMappingIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = DevmemIntUnmapPMR((DEVMEMINT_MAPPING *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntMapPMRIN_UI8, -+ IMG_UINT8 * psDevmemIntMapPMROUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMRIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMROUT_UI8, 0); -+ -+ IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap; -+ DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; -+ IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation; -+ DEVMEMINT_RESERVATION *psReservationInt = NULL; -+ IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR; -+ PMR *psPMRInt = NULL; -+ DEVMEMINT_MAPPING *psMappingInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemIntMapPMROUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psDevmemServerHeapInt, -+ hDevmemServerHeap, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); -+ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntMapPMR_exit; -+ } -+ -+ /* Look up the address from the handle */ -+ psDevmemIntMapPMROUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psReservationInt, -+ hReservation, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); -+ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntMapPMR_exit; -+ } -+ -+ /* Look up the address from the handle */ -+ psDevmemIntMapPMROUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntMapPMR_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntMapPMROUT->eError = -+ DevmemIntMapPMR(psDevmemServerHeapInt, -+ psReservationInt, -+ psPMRInt, psDevmemIntMapPMRIN->uiMapFlags, &psMappingInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) -+ { -+ goto DevmemIntMapPMR_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntMapPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psDevmemIntMapPMROUT->hMapping, -+ (void *)psMappingInt, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _DevmemIntMapPMRpsMappingIntRelease); -+ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntMapPMR_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntMapPMR_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psDevmemServerHeapInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); -+ } -+ -+ /* Unreference the previously looked up handle */ -+ if (psReservationInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); -+ } -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psDevmemIntMapPMROUT->eError != PVRSRV_OK) -+ { -+ if (psMappingInt) -+ { -+ DevmemIntUnmapPMR(psMappingInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntUnmapPMRIN_UI8, -+ IMG_UINT8 * psDevmemIntUnmapPMROUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMRIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMROUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntUnmapPMROUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psDevmemIntUnmapPMRIN->hMapping, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); -+ if (unlikely((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) && -+ (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(psDevmemIntUnmapPMROUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntUnmapPMR_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntUnmapPMR_exit: -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _DevmemIntReserveRangepsReservationIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntReserveRangeIN_UI8, -+ IMG_UINT8 * psDevmemIntReserveRangeOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *) -+ IMG_OFFSET_ADDR(psDevmemIntReserveRangeIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *) -+ IMG_OFFSET_ADDR(psDevmemIntReserveRangeOUT_UI8, 0); -+ -+ IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeIN->hDevmemServerHeap; -+ DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; -+ DEVMEMINT_RESERVATION *psReservationInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemIntReserveRangeOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psDevmemServerHeapInt, -+ hDevmemServerHeap, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); -+ if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntReserveRange_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntReserveRangeOUT->eError = -+ DevmemIntReserveRange(psDevmemServerHeapInt, -+ psDevmemIntReserveRangeIN->sAddress, -+ psDevmemIntReserveRangeIN->uiLength, &psReservationInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) -+ { -+ goto DevmemIntReserveRange_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psDevmemIntReserveRangeOUT-> -+ hReservation, -+ (void *)psReservationInt, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _DevmemIntReserveRangepsReservationIntRelease); -+ if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntReserveRange_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntReserveRange_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psDevmemServerHeapInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK) -+ { -+ if (psReservationInt) -+ { -+ DevmemIntUnreserveRange(psReservationInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _DevmemIntReserveRangeAndMapPMRpsMappingIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = DevmemIntUnreserveRangeAndUnmapPMR((DEVMEMINT_MAPPING *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntReserveRangeAndMapPMR(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntReserveRangeAndMapPMRIN_UI8, -+ IMG_UINT8 * psDevmemIntReserveRangeAndMapPMROUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR *psDevmemIntReserveRangeAndMapPMRIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGEANDMAPPMR *) -+ IMG_OFFSET_ADDR(psDevmemIntReserveRangeAndMapPMRIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR *psDevmemIntReserveRangeAndMapPMROUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGEANDMAPPMR *) -+ IMG_OFFSET_ADDR(psDevmemIntReserveRangeAndMapPMROUT_UI8, 0); -+ -+ IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeAndMapPMRIN->hDevmemServerHeap; -+ DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; -+ IMG_HANDLE hPMR = psDevmemIntReserveRangeAndMapPMRIN->hPMR; -+ PMR *psPMRInt = NULL; -+ DEVMEMINT_MAPPING *psMappingInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemIntReserveRangeAndMapPMROUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psDevmemServerHeapInt, -+ hDevmemServerHeap, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); -+ if (unlikely(psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntReserveRangeAndMapPMR_exit; -+ } -+ -+ /* Look up the address from the handle */ -+ psDevmemIntReserveRangeAndMapPMROUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntReserveRangeAndMapPMR_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntReserveRangeAndMapPMROUT->eError = -+ DevmemIntReserveRangeAndMapPMR(psDevmemServerHeapInt, -+ psDevmemIntReserveRangeAndMapPMRIN->sAddress, -+ psDevmemIntReserveRangeAndMapPMRIN->uiLength, -+ psPMRInt, -+ psDevmemIntReserveRangeAndMapPMRIN->uiMapFlags, -+ &psMappingInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK)) -+ { -+ goto DevmemIntReserveRangeAndMapPMR_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntReserveRangeAndMapPMROUT->eError = -+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psDevmemIntReserveRangeAndMapPMROUT->hMapping, -+ (void *)psMappingInt, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _DevmemIntReserveRangeAndMapPMRpsMappingIntRelease); -+ if (unlikely(psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntReserveRangeAndMapPMR_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntReserveRangeAndMapPMR_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psDevmemServerHeapInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); -+ } -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psDevmemIntReserveRangeAndMapPMROUT->eError != PVRSRV_OK) -+ { -+ if (psMappingInt) -+ { -+ DevmemIntUnreserveRangeAndUnmapPMR(psMappingInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntUnreserveRangeAndUnmapPMR(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * -+ psDevmemIntUnreserveRangeAndUnmapPMRIN_UI8, -+ IMG_UINT8 * -+ psDevmemIntUnreserveRangeAndUnmapPMROUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGEANDUNMAPPMR *psDevmemIntUnreserveRangeAndUnmapPMRIN -+ = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGEANDUNMAPPMR *) -+ IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeAndUnmapPMRIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGEANDUNMAPPMR -+ *psDevmemIntUnreserveRangeAndUnmapPMROUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGEANDUNMAPPMR *) -+ IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeAndUnmapPMROUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntUnreserveRangeAndUnmapPMROUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psDevmemIntUnreserveRangeAndUnmapPMRIN-> -+ hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); -+ if (unlikely -+ ((psDevmemIntUnreserveRangeAndUnmapPMROUT->eError != PVRSRV_OK) -+ && (psDevmemIntUnreserveRangeAndUnmapPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) -+ && (psDevmemIntUnreserveRangeAndUnmapPMROUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, -+ PVRSRVGetErrorString(psDevmemIntUnreserveRangeAndUnmapPMROUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntUnreserveRangeAndUnmapPMR_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntUnreserveRangeAndUnmapPMR_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntUnreserveRangeIN_UI8, -+ IMG_UINT8 * psDevmemIntUnreserveRangeOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *) -+ IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *) -+ IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntUnreserveRangeOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psDevmemIntUnreserveRangeIN-> -+ hReservation, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); -+ if (unlikely -+ ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) -+ && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) -+ && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psDevmemIntUnreserveRangeOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntUnreserveRange_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemIntUnreserveRange_exit: -+ -+ return 0; -+} -+ -+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, -+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); -+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX, -+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psChangeSparseMemIN_UI8, -+ IMG_UINT8 * psChangeSparseMemOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN = -+ (PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT = -+ (PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemOUT_UI8, 0); -+ -+ IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap; -+ DEVMEMINT_HEAP *psSrvDevMemHeapInt = NULL; -+ IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR; -+ PMR *psPMRInt = NULL; -+ IMG_UINT32 *ui32AllocPageIndicesInt = NULL; -+ IMG_UINT32 *ui32FreePageIndicesInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0; -+ -+ if (unlikely(psChangeSparseMemIN->ui32AllocPageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) -+ { -+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto ChangeSparseMem_exit; -+ } -+ -+ if (unlikely(psChangeSparseMemIN->ui32FreePageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT)) -+ { -+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto ChangeSparseMem_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto ChangeSparseMem_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psChangeSparseMemIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psChangeSparseMemIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto ChangeSparseMem_exit; -+ } -+ } -+ } -+ -+ if (psChangeSparseMemIN->ui32AllocPageCount != 0) -+ { -+ ui32AllocPageIndicesInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32AllocPageIndicesInt, -+ (const void __user *)psChangeSparseMemIN->pui32AllocPageIndices, -+ psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto ChangeSparseMem_exit; -+ } -+ } -+ if (psChangeSparseMemIN->ui32FreePageCount != 0) -+ { -+ ui32FreePageIndicesInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32FreePageIndicesInt, -+ (const void __user *)psChangeSparseMemIN->pui32FreePageIndices, -+ psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto ChangeSparseMem_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psChangeSparseMemOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSrvDevMemHeapInt, -+ hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); -+ if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto ChangeSparseMem_exit; -+ } -+ -+ /* Look up the address from the handle */ -+ psChangeSparseMemOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto ChangeSparseMem_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psChangeSparseMemOUT->eError = -+ DevmemIntChangeSparse(psSrvDevMemHeapInt, -+ psPMRInt, -+ psChangeSparseMemIN->ui32AllocPageCount, -+ ui32AllocPageIndicesInt, -+ psChangeSparseMemIN->ui32FreePageCount, -+ ui32FreePageIndicesInt, -+ psChangeSparseMemIN->ui32SparseFlags, -+ psChangeSparseMemIN->uiFlags, -+ psChangeSparseMemIN->sDevVAddr, -+ psChangeSparseMemIN->ui64CPUVAddr); -+ -+ChangeSparseMem_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psSrvDevMemHeapInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); -+ } -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psChangeSparseMemOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIsVDevAddrValidIN_UI8, -+ IMG_UINT8 * psDevmemIsVDevAddrValidOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *) -+ IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *) -+ IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidOUT_UI8, 0); -+ -+ IMG_HANDLE hDevmemCtx = psDevmemIsVDevAddrValidIN->hDevmemCtx; -+ DEVMEMINT_CTX *psDevmemCtxInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemIsVDevAddrValidOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psDevmemCtxInt, -+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); -+ if (unlikely(psDevmemIsVDevAddrValidOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIsVDevAddrValid_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemIsVDevAddrValidOUT->eError = -+ DevmemIntIsVDevAddrValid(psConnection, OSGetDevNode(psConnection), -+ psDevmemCtxInt, psDevmemIsVDevAddrValidIN->sAddress); -+ -+DevmemIsVDevAddrValid_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psDevmemCtxInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+#if defined(RGX_FEATURE_FBCDC) -+ -+static IMG_INT -+PVRSRVBridgeDevmemInvalidateFBSCTable(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemInvalidateFBSCTableIN_UI8, -+ IMG_UINT8 * psDevmemInvalidateFBSCTableOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE *psDevmemInvalidateFBSCTableIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE *) -+ IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE *psDevmemInvalidateFBSCTableOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE *) -+ IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableOUT_UI8, 0); -+ -+ IMG_HANDLE hDevmemCtx = psDevmemInvalidateFBSCTableIN->hDevmemCtx; -+ DEVMEMINT_CTX *psDevmemCtxInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemInvalidateFBSCTableOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psDevmemCtxInt, -+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); -+ if (unlikely(psDevmemInvalidateFBSCTableOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemInvalidateFBSCTable_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemInvalidateFBSCTableOUT->eError = -+ DevmemIntInvalidateFBSCTable(psDevmemCtxInt, -+ psDevmemInvalidateFBSCTableIN->ui64FBSCEntries); -+ -+DevmemInvalidateFBSCTable_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psDevmemCtxInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+#else -+#define PVRSRVBridgeDevmemInvalidateFBSCTable NULL -+#endif -+ -+static IMG_INT -+PVRSRVBridgeHeapCfgHeapConfigCount(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psHeapCfgHeapConfigCountIN_UI8, -+ IMG_UINT8 * psHeapCfgHeapConfigCountOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountIN = -+ (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *) -+ IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountOUT = -+ (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *) -+ IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psHeapCfgHeapConfigCountIN); -+ -+ psHeapCfgHeapConfigCountOUT->eError = -+ HeapCfgHeapConfigCount(psConnection, OSGetDevNode(psConnection), -+ &psHeapCfgHeapConfigCountOUT->ui32NumHeapConfigs); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeHeapCfgHeapCount(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psHeapCfgHeapCountIN_UI8, -+ IMG_UINT8 * psHeapCfgHeapCountOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountIN = -+ (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *) IMG_OFFSET_ADDR(psHeapCfgHeapCountIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountOUT = -+ (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *) IMG_OFFSET_ADDR(psHeapCfgHeapCountOUT_UI8, 0); -+ -+ psHeapCfgHeapCountOUT->eError = -+ HeapCfgHeapCount(psConnection, OSGetDevNode(psConnection), -+ psHeapCfgHeapCountIN->ui32HeapConfigIndex, -+ &psHeapCfgHeapCountOUT->ui32NumHeaps); -+ -+ return 0; -+} -+ -+static_assert(DEVMEM_HEAPNAME_MAXLENGTH <= IMG_UINT32_MAX, -+ "DEVMEM_HEAPNAME_MAXLENGTH must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psHeapCfgHeapConfigNameIN_UI8, -+ IMG_UINT8 * psHeapCfgHeapConfigNameOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameIN = -+ (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *) -+ IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameOUT = -+ (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *) -+ IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameOUT_UI8, 0); -+ -+ IMG_CHAR *puiHeapConfigNameInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) + -+ 0; -+ -+ if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz > DEVMEM_HEAPNAME_MAXLENGTH) -+ { -+ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto HeapCfgHeapConfigName_exit; -+ } -+ -+ psHeapCfgHeapConfigNameOUT->puiHeapConfigName = -+ psHeapCfgHeapConfigNameIN->puiHeapConfigName; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto HeapCfgHeapConfigName_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHeapCfgHeapConfigNameIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto HeapCfgHeapConfigName_exit; -+ } -+ } -+ } -+ -+ if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz != 0) -+ { -+ puiHeapConfigNameInt = -+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR); -+ } -+ -+ psHeapCfgHeapConfigNameOUT->eError = -+ HeapCfgHeapConfigName(psConnection, OSGetDevNode(psConnection), -+ psHeapCfgHeapConfigNameIN->ui32HeapConfigIndex, -+ psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz, -+ puiHeapConfigNameInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psHeapCfgHeapConfigNameOUT->eError != PVRSRV_OK)) -+ { -+ goto HeapCfgHeapConfigName_exit; -+ } -+ -+ /* If dest ptr is non-null and we have data to copy */ -+ if ((puiHeapConfigNameInt) && -+ ((psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) > 0)) -+ { -+ if (unlikely -+ (OSCopyToUser -+ (NULL, (void __user *)psHeapCfgHeapConfigNameOUT->puiHeapConfigName, -+ puiHeapConfigNameInt, -+ (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR))) != -+ PVRSRV_OK)) -+ { -+ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto HeapCfgHeapConfigName_exit; -+ } -+ } -+ -+HeapCfgHeapConfigName_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psHeapCfgHeapConfigNameOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static_assert(DEVMEM_HEAPNAME_MAXLENGTH <= IMG_UINT32_MAX, -+ "DEVMEM_HEAPNAME_MAXLENGTH must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psHeapCfgHeapDetailsIN_UI8, -+ IMG_UINT8 * psHeapCfgHeapDetailsOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsIN = -+ (PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *) IMG_OFFSET_ADDR(psHeapCfgHeapDetailsIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsOUT = -+ (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *) IMG_OFFSET_ADDR(psHeapCfgHeapDetailsOUT_UI8, -+ 0); -+ -+ IMG_CHAR *puiHeapNameOutInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) + 0; -+ -+ if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz > DEVMEM_HEAPNAME_MAXLENGTH) -+ { -+ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto HeapCfgHeapDetails_exit; -+ } -+ -+ psHeapCfgHeapDetailsOUT->puiHeapNameOut = psHeapCfgHeapDetailsIN->puiHeapNameOut; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto HeapCfgHeapDetails_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHeapCfgHeapDetailsIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto HeapCfgHeapDetails_exit; -+ } -+ } -+ } -+ -+ if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0) -+ { -+ puiHeapNameOutInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR); -+ } -+ -+ psHeapCfgHeapDetailsOUT->eError = -+ HeapCfgHeapDetails(psConnection, OSGetDevNode(psConnection), -+ psHeapCfgHeapDetailsIN->ui32HeapConfigIndex, -+ psHeapCfgHeapDetailsIN->ui32HeapIndex, -+ psHeapCfgHeapDetailsIN->ui32HeapNameBufSz, -+ puiHeapNameOutInt, -+ &psHeapCfgHeapDetailsOUT->sDevVAddrBase, -+ &psHeapCfgHeapDetailsOUT->uiHeapLength, -+ &psHeapCfgHeapDetailsOUT->uiReservedRegionLength, -+ &psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut, -+ &psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psHeapCfgHeapDetailsOUT->eError != PVRSRV_OK)) -+ { -+ goto HeapCfgHeapDetails_exit; -+ } -+ -+ /* If dest ptr is non-null and we have data to copy */ -+ if ((puiHeapNameOutInt) && -+ ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0)) -+ { -+ if (unlikely -+ (OSCopyToUser -+ (NULL, (void __user *)psHeapCfgHeapDetailsOUT->puiHeapNameOut, -+ puiHeapNameOutInt, -+ (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK)) -+ { -+ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto HeapCfgHeapDetails_exit; -+ } -+ } -+ -+HeapCfgHeapDetails_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psHeapCfgHeapDetailsOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemIntRegisterPFNotifyKMIN_UI8, -+ IMG_UINT8 * psDevmemIntRegisterPFNotifyKMOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *) -+ IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *) -+ IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMOUT_UI8, 0); -+ -+ IMG_HANDLE hDevmemCtx = psDevmemIntRegisterPFNotifyKMIN->hDevmemCtx; -+ DEVMEMINT_CTX *psDevmemCtxInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemIntRegisterPFNotifyKMOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psDevmemCtxInt, -+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); -+ if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemIntRegisterPFNotifyKM_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemIntRegisterPFNotifyKMOUT->eError = -+ DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, psDevmemIntRegisterPFNotifyKMIN->bRegister); -+ -+DevmemIntRegisterPFNotifyKM_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psDevmemCtxInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX, -+ "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgePhysHeapGetMemInfo(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPhysHeapGetMemInfoIN_UI8, -+ IMG_UINT8 * psPhysHeapGetMemInfoOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoIN = -+ (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoOUT = -+ (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoOUT_UI8, -+ 0); -+ -+ PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL; -+ PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStatsInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) + -+ ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) + -+ 0; -+ -+ if (unlikely(psPhysHeapGetMemInfoIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)) -+ { -+ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto PhysHeapGetMemInfo_exit; -+ } -+ -+ psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats = -+ psPhysHeapGetMemInfoIN->pasapPhysHeapMemStats; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto PhysHeapGetMemInfo_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psPhysHeapGetMemInfoIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfoIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto PhysHeapGetMemInfo_exit; -+ } -+ } -+ } -+ -+ if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0) -+ { -+ eaPhysHeapIDInt = -+ (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP); -+ } -+ -+ /* Copy the data over */ -+ if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, eaPhysHeapIDInt, -+ (const void __user *)psPhysHeapGetMemInfoIN->peaPhysHeapID, -+ psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) != -+ PVRSRV_OK) -+ { -+ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto PhysHeapGetMemInfo_exit; -+ } -+ } -+ if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0) -+ { -+ pasapPhysHeapMemStatsInt = -+ (PHYS_HEAP_MEM_STATS *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS); -+ } -+ -+ psPhysHeapGetMemInfoOUT->eError = -+ PVRSRVPhysHeapGetMemInfoKM(psConnection, OSGetDevNode(psConnection), -+ psPhysHeapGetMemInfoIN->ui32PhysHeapCount, -+ eaPhysHeapIDInt, pasapPhysHeapMemStatsInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psPhysHeapGetMemInfoOUT->eError != PVRSRV_OK)) -+ { -+ goto PhysHeapGetMemInfo_exit; -+ } -+ -+ /* If dest ptr is non-null and we have data to copy */ -+ if ((pasapPhysHeapMemStatsInt) && -+ ((psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) > 0)) -+ { -+ if (unlikely -+ (OSCopyToUser -+ (NULL, (void __user *)psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats, -+ pasapPhysHeapMemStatsInt, -+ (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS))) != -+ PVRSRV_OK)) -+ { -+ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto PhysHeapGetMemInfo_exit; -+ } -+ } -+ -+PhysHeapGetMemInfo_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psPhysHeapGetMemInfoOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeGetDefaultPhysicalHeap(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psGetDefaultPhysicalHeapIN_UI8, -+ IMG_UINT8 * psGetDefaultPhysicalHeapOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapIN = -+ (PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *) -+ IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapOUT = -+ (PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *) -+ IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psGetDefaultPhysicalHeapIN); -+ -+ psGetDefaultPhysicalHeapOUT->eError = -+ PVRSRVGetDefaultPhysicalHeapKM(psConnection, OSGetDevNode(psConnection), -+ &psGetDefaultPhysicalHeapOUT->eHeap); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemGetFaultAddressIN_UI8, -+ IMG_UINT8 * psDevmemGetFaultAddressOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *) -+ IMG_OFFSET_ADDR(psDevmemGetFaultAddressIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *) -+ IMG_OFFSET_ADDR(psDevmemGetFaultAddressOUT_UI8, 0); -+ -+ IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx; -+ DEVMEMINT_CTX *psDevmemCtxInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemGetFaultAddressOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psDevmemCtxInt, -+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); -+ if (unlikely(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemGetFaultAddress_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemGetFaultAddressOUT->eError = -+ DevmemIntGetFaultAddress(psConnection, OSGetDevNode(psConnection), -+ psDevmemCtxInt, &psDevmemGetFaultAddressOUT->sFaultAddress); -+ -+DevmemGetFaultAddress_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psDevmemCtxInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+#if defined(PVRSRV_ENABLE_PROCESS_STATS) -+ -+static IMG_INT -+PVRSRVBridgePVRSRVStatsUpdateOOMStat(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPVRSRVStatsUpdateOOMStatIN_UI8, -+ IMG_UINT8 * psPVRSRVStatsUpdateOOMStatOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT *psPVRSRVStatsUpdateOOMStatIN = -+ (PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT *) -+ IMG_OFFSET_ADDR(psPVRSRVStatsUpdateOOMStatIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT *psPVRSRVStatsUpdateOOMStatOUT = -+ (PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT *) -+ IMG_OFFSET_ADDR(psPVRSRVStatsUpdateOOMStatOUT_UI8, 0); -+ -+ psPVRSRVStatsUpdateOOMStatOUT->eError = -+ PVRSRVStatsUpdateOOMStat(psConnection, OSGetDevNode(psConnection), -+ psPVRSRVStatsUpdateOOMStatIN->ui32ui32StatType, -+ psPVRSRVStatsUpdateOOMStatIN->ui32pid); -+ -+ return 0; -+} -+ -+#else -+#define PVRSRVBridgePVRSRVStatsUpdateOOMStat NULL -+#endif -+ -+static PVRSRV_ERROR _DevmemXIntReserveRangepsReservationIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = DevmemXIntUnreserveRange((DEVMEMXINT_RESERVATION *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemXIntReserveRange(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemXIntReserveRangeIN_UI8, -+ IMG_UINT8 * psDevmemXIntReserveRangeOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE *psDevmemXIntReserveRangeIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE *) -+ IMG_OFFSET_ADDR(psDevmemXIntReserveRangeIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE *psDevmemXIntReserveRangeOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE *) -+ IMG_OFFSET_ADDR(psDevmemXIntReserveRangeOUT_UI8, 0); -+ -+ IMG_HANDLE hDevmemServerHeap = psDevmemXIntReserveRangeIN->hDevmemServerHeap; -+ DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; -+ DEVMEMXINT_RESERVATION *psReservationInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemXIntReserveRangeOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psDevmemServerHeapInt, -+ hDevmemServerHeap, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); -+ if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemXIntReserveRange_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemXIntReserveRangeOUT->eError = -+ DevmemXIntReserveRange(psDevmemServerHeapInt, -+ psDevmemXIntReserveRangeIN->sAddress, -+ psDevmemXIntReserveRangeIN->uiLength, &psReservationInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK)) -+ { -+ goto DevmemXIntReserveRange_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemXIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psDevmemXIntReserveRangeOUT-> -+ hReservation, -+ (void *)psReservationInt, -+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _DevmemXIntReserveRangepsReservationIntRelease); -+ if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemXIntReserveRange_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemXIntReserveRange_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psDevmemServerHeapInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK) -+ { -+ if (psReservationInt) -+ { -+ DevmemXIntUnreserveRange(psReservationInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemXIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemXIntUnreserveRangeIN_UI8, -+ IMG_UINT8 * psDevmemXIntUnreserveRangeOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE *psDevmemXIntUnreserveRangeIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE *) -+ IMG_OFFSET_ADDR(psDevmemXIntUnreserveRangeIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE *psDevmemXIntUnreserveRangeOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE *) -+ IMG_OFFSET_ADDR(psDevmemXIntUnreserveRangeOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psDevmemXIntUnreserveRangeOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psDevmemXIntUnreserveRangeIN-> -+ hReservation, -+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION); -+ if (unlikely -+ ((psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_OK) -+ && (psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) -+ && (psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psDevmemXIntUnreserveRangeOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemXIntUnreserveRange_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+DevmemXIntUnreserveRange_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemXIntMapPages(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemXIntMapPagesIN_UI8, -+ IMG_UINT8 * psDevmemXIntMapPagesOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES *psDevmemXIntMapPagesIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntMapPagesIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES *psDevmemXIntMapPagesOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntMapPagesOUT_UI8, -+ 0); -+ -+ IMG_HANDLE hReservation = psDevmemXIntMapPagesIN->hReservation; -+ DEVMEMXINT_RESERVATION *psReservationInt = NULL; -+ IMG_HANDLE hPMR = psDevmemXIntMapPagesIN->hPMR; -+ PMR *psPMRInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemXIntMapPagesOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psReservationInt, -+ hReservation, -+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE); -+ if (unlikely(psDevmemXIntMapPagesOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemXIntMapPages_exit; -+ } -+ -+ /* Look up the address from the handle */ -+ psDevmemXIntMapPagesOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psDevmemXIntMapPagesOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemXIntMapPages_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemXIntMapPagesOUT->eError = -+ DevmemXIntMapPages(psReservationInt, -+ psPMRInt, -+ psDevmemXIntMapPagesIN->ui32PageCount, -+ psDevmemXIntMapPagesIN->ui32PhysPageOffset, -+ psDevmemXIntMapPagesIN->uiFlags, -+ psDevmemXIntMapPagesIN->ui32VirtPageOffset); -+ -+DevmemXIntMapPages_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psReservationInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hReservation, -+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION); -+ } -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemXIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemXIntUnmapPagesIN_UI8, -+ IMG_UINT8 * psDevmemXIntUnmapPagesOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES *psDevmemXIntUnmapPagesIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntUnmapPagesIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES *psDevmemXIntUnmapPagesOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES *) -+ IMG_OFFSET_ADDR(psDevmemXIntUnmapPagesOUT_UI8, 0); -+ -+ IMG_HANDLE hReservation = psDevmemXIntUnmapPagesIN->hReservation; -+ DEVMEMXINT_RESERVATION *psReservationInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemXIntUnmapPagesOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psReservationInt, -+ hReservation, -+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE); -+ if (unlikely(psDevmemXIntUnmapPagesOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemXIntUnmapPages_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemXIntUnmapPagesOUT->eError = -+ DevmemXIntUnmapPages(psReservationInt, -+ psDevmemXIntUnmapPagesIN->ui32VirtPageOffset, -+ psDevmemXIntUnmapPagesIN->ui32PageCount); -+ -+DevmemXIntUnmapPages_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psReservationInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hReservation, -+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDevmemXIntMapVRangeToBackingPage(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDevmemXIntMapVRangeToBackingPageIN_UI8, -+ IMG_UINT8 * psDevmemXIntMapVRangeToBackingPageOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE *psDevmemXIntMapVRangeToBackingPageIN = -+ (PVRSRV_BRIDGE_IN_DEVMEMXINTMAPVRANGETOBACKINGPAGE *) -+ IMG_OFFSET_ADDR(psDevmemXIntMapVRangeToBackingPageIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE *psDevmemXIntMapVRangeToBackingPageOUT = -+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPVRANGETOBACKINGPAGE *) -+ IMG_OFFSET_ADDR(psDevmemXIntMapVRangeToBackingPageOUT_UI8, 0); -+ -+ IMG_HANDLE hReservation = psDevmemXIntMapVRangeToBackingPageIN->hReservation; -+ DEVMEMXINT_RESERVATION *psReservationInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psDevmemXIntMapVRangeToBackingPageOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psReservationInt, -+ hReservation, -+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE); -+ if (unlikely(psDevmemXIntMapVRangeToBackingPageOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto DevmemXIntMapVRangeToBackingPage_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psDevmemXIntMapVRangeToBackingPageOUT->eError = -+ DevmemXIntMapVRangeToBackingPage(psReservationInt, -+ psDevmemXIntMapVRangeToBackingPageIN->ui32PageCount, -+ psDevmemXIntMapVRangeToBackingPageIN->uiFlags, -+ psDevmemXIntMapVRangeToBackingPageIN-> -+ ui32VirtPageOffset); -+ -+DevmemXIntMapVRangeToBackingPage_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psReservationInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hReservation, -+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitMMBridge(void); -+void DeinitMMBridge(void); -+ -+/* -+ * Register all MM functions with services -+ */ -+PVRSRV_ERROR InitMMBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR, -+ PVRSRVBridgePMRExportPMR, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR, -+ PVRSRVBridgePMRUnexportPMR, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, PVRSRVBridgePMRGetUID, -+ NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE, -+ PVRSRVBridgePMRMakeLocalImportHandle, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE, -+ PVRSRVBridgePMRUnmakeLocalImportHandle, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR, -+ PVRSRVBridgePMRImportPMR, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR, -+ PVRSRVBridgePMRLocalImportPMR, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR, -+ PVRSRVBridgePMRUnrefPMR, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR, -+ PVRSRVBridgePMRUnrefUnlockPMR, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR, -+ PVRSRVBridgePhysmemNewRamBackedPMR, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE, -+ PVRSRVBridgeDevmemIntCtxCreate, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY, -+ PVRSRVBridgeDevmemIntCtxDestroy, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE, -+ PVRSRVBridgeDevmemIntHeapCreate, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY, -+ PVRSRVBridgeDevmemIntHeapDestroy, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR, -+ PVRSRVBridgeDevmemIntMapPMR, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR, -+ PVRSRVBridgeDevmemIntUnmapPMR, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE, -+ PVRSRVBridgeDevmemIntReserveRange, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGEANDMAPPMR, -+ PVRSRVBridgeDevmemIntReserveRangeAndMapPMR, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGEANDUNMAPPMR, -+ PVRSRVBridgeDevmemIntUnreserveRangeAndUnmapPMR, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE, -+ PVRSRVBridgeDevmemIntUnreserveRange, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM, -+ PVRSRVBridgeChangeSparseMem, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID, -+ PVRSRVBridgeDevmemIsVDevAddrValid, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE, -+ PVRSRVBridgeDevmemInvalidateFBSCTable, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT, -+ PVRSRVBridgeHeapCfgHeapConfigCount, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT, -+ PVRSRVBridgeHeapCfgHeapCount, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME, -+ PVRSRVBridgeHeapCfgHeapConfigName, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS, -+ PVRSRVBridgeHeapCfgHeapDetails, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM, -+ PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO, -+ PVRSRVBridgePhysHeapGetMemInfo, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP, -+ PVRSRVBridgeGetDefaultPhysicalHeap, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS, -+ PVRSRVBridgeDevmemGetFaultAddress, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT, -+ PVRSRVBridgePVRSRVStatsUpdateOOMStat, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE, -+ PVRSRVBridgeDevmemXIntReserveRange, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE, -+ PVRSRVBridgeDevmemXIntUnreserveRange, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES, -+ PVRSRVBridgeDevmemXIntMapPages, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES, -+ PVRSRVBridgeDevmemXIntUnmapPages, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPVRANGETOBACKINGPAGE, -+ PVRSRVBridgeDevmemXIntMapVRangeToBackingPage, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all mm functions with services -+ */ -+void DeinitMMBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGEANDMAPPMR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, -+ PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGEANDUNMAPPMR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, -+ PVRSRV_BRIDGE_MM_DEVMEMXINTMAPVRANGETOBACKINGPAGE); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_mmextmem_bridge.c b/drivers/gpu/drm/img-rogue/server_mmextmem_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_mmextmem_bridge.c -@@ -0,0 +1,164 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for mmextmem -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for mmextmem -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "devicemem_server.h" -+#include "pmr.h" -+#include "devicemem_heapcfg.h" -+#include "physmem.h" -+#include "physmem_extmem.h" -+ -+#include "common_mmextmem_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static PVRSRV_ERROR _PhysmemWrapExtMempsPMRPtrIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PMRUnrefPMR((PMR *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgePhysmemWrapExtMem(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psPhysmemWrapExtMemIN_UI8, -+ IMG_UINT8 * psPhysmemWrapExtMemOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_PHYSMEMWRAPEXTMEM *psPhysmemWrapExtMemIN = -+ (PVRSRV_BRIDGE_IN_PHYSMEMWRAPEXTMEM *) IMG_OFFSET_ADDR(psPhysmemWrapExtMemIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_PHYSMEMWRAPEXTMEM *psPhysmemWrapExtMemOUT = -+ (PVRSRV_BRIDGE_OUT_PHYSMEMWRAPEXTMEM *) IMG_OFFSET_ADDR(psPhysmemWrapExtMemOUT_UI8, 0); -+ -+ PMR *psPMRPtrInt = NULL; -+ -+ psPhysmemWrapExtMemOUT->eError = -+ PhysmemWrapExtMem(psConnection, OSGetDevNode(psConnection), -+ psPhysmemWrapExtMemIN->uiSize, -+ psPhysmemWrapExtMemIN->ui64CpuVAddr, -+ psPhysmemWrapExtMemIN->uiFlags, &psPMRPtrInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psPhysmemWrapExtMemOUT->eError != PVRSRV_OK)) -+ { -+ goto PhysmemWrapExtMem_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psPhysmemWrapExtMemOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psPhysmemWrapExtMemOUT->hPMRPtr, -+ (void *)psPMRPtrInt, -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _PhysmemWrapExtMempsPMRPtrIntRelease); -+ if (unlikely(psPhysmemWrapExtMemOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto PhysmemWrapExtMem_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+PhysmemWrapExtMem_exit: -+ -+ if (psPhysmemWrapExtMemOUT->eError != PVRSRV_OK) -+ { -+ if (psPMRPtrInt) -+ { -+ LockHandle(KERNEL_HANDLE_BASE); -+ PMRUnrefPMR(psPMRPtrInt); -+ UnlockHandle(KERNEL_HANDLE_BASE); -+ } -+ } -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitMMEXTMEMBridge(void); -+void DeinitMMEXTMEMBridge(void); -+ -+/* -+ * Register all MMEXTMEM functions with services -+ */ -+PVRSRV_ERROR InitMMEXTMEMBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_MMEXTMEM, PVRSRV_BRIDGE_MMEXTMEM_PHYSMEMWRAPEXTMEM, -+ PVRSRVBridgePhysmemWrapExtMem, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all mmextmem functions with services -+ */ -+void DeinitMMEXTMEMBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MMEXTMEM, PVRSRV_BRIDGE_MMEXTMEM_PHYSMEMWRAPEXTMEM); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_pvrtl_bridge.c b/drivers/gpu/drm/img-rogue/server_pvrtl_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_pvrtl_bridge.c -@@ -0,0 +1,814 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for pvrtl -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for pvrtl -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "tlserver.h" -+ -+#include "common_pvrtl_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static PVRSRV_ERROR _TLOpenStreampsSDIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = TLServerCloseStreamKM((TL_STREAM_DESC *) pvData); -+ return eError; -+} -+ -+static_assert(PRVSRVTL_MAX_STREAM_NAME_SIZE <= IMG_UINT32_MAX, -+ "PRVSRVTL_MAX_STREAM_NAME_SIZE must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psTLOpenStreamIN_UI8, -+ IMG_UINT8 * psTLOpenStreamOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_TLOPENSTREAM *psTLOpenStreamIN = -+ (PVRSRV_BRIDGE_IN_TLOPENSTREAM *) IMG_OFFSET_ADDR(psTLOpenStreamIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_TLOPENSTREAM *psTLOpenStreamOUT = -+ (PVRSRV_BRIDGE_OUT_TLOPENSTREAM *) IMG_OFFSET_ADDR(psTLOpenStreamOUT_UI8, 0); -+ -+ IMG_CHAR *uiNameInt = NULL; -+ TL_STREAM_DESC *psSDInt = NULL; -+ PMR *psTLPMRInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0; -+ -+ psTLOpenStreamOUT->hSD = NULL; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psTLOpenStreamOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto TLOpenStream_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLOpenStreamIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psTLOpenStreamOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto TLOpenStream_exit; -+ } -+ } -+ } -+ -+ { -+ uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiNameInt, (const void __user *)psTLOpenStreamIN->puiName, -+ PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psTLOpenStreamOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto TLOpenStream_exit; -+ } -+ ((IMG_CHAR *) uiNameInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) - 1] = -+ '\0'; -+ } -+ -+ psTLOpenStreamOUT->eError = -+ TLServerOpenStreamKM(uiNameInt, psTLOpenStreamIN->ui32Mode, &psSDInt, &psTLPMRInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) -+ { -+ goto TLOpenStream_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psTLOpenStreamOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psTLOpenStreamOUT->hSD, -+ (void *)psSDInt, -+ PVRSRV_HANDLE_TYPE_PVR_TL_SD, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _TLOpenStreampsSDIntRelease); -+ if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto TLOpenStream_exit; -+ } -+ -+ psTLOpenStreamOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, -+ &psTLOpenStreamOUT->hTLPMR, -+ (void *)psTLPMRInt, -+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ psTLOpenStreamOUT->hSD); -+ if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto TLOpenStream_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+TLOpenStream_exit: -+ -+ if (psTLOpenStreamOUT->eError != PVRSRV_OK) -+ { -+ if (psTLOpenStreamOUT->hSD) -+ { -+ PVRSRV_ERROR eError; -+ -+ /* Lock over handle creation cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psTLOpenStreamOUT->hSD, -+ PVRSRV_HANDLE_TYPE_PVR_TL_SD); -+ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(eError))); -+ } -+ /* Releasing the handle should free/destroy/release the resource. -+ * This should never fail... */ -+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); -+ -+ /* Avoid freeing/destroying/releasing the resource a second time below */ -+ psSDInt = NULL; -+ /* Release now we have cleaned up creation handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ } -+ -+ if (psSDInt) -+ { -+ TLServerCloseStreamKM(psSDInt); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psTLOpenStreamOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeTLCloseStream(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psTLCloseStreamIN_UI8, -+ IMG_UINT8 * psTLCloseStreamOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_TLCLOSESTREAM *psTLCloseStreamIN = -+ (PVRSRV_BRIDGE_IN_TLCLOSESTREAM *) IMG_OFFSET_ADDR(psTLCloseStreamIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *psTLCloseStreamOUT = -+ (PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *) IMG_OFFSET_ADDR(psTLCloseStreamOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psTLCloseStreamOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psTLCloseStreamIN->hSD, -+ PVRSRV_HANDLE_TYPE_PVR_TL_SD); -+ if (unlikely((psTLCloseStreamOUT->eError != PVRSRV_OK) && -+ (psTLCloseStreamOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psTLCloseStreamOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(psTLCloseStreamOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto TLCloseStream_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+TLCloseStream_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeTLAcquireData(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psTLAcquireDataIN_UI8, -+ IMG_UINT8 * psTLAcquireDataOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_TLACQUIREDATA *psTLAcquireDataIN = -+ (PVRSRV_BRIDGE_IN_TLACQUIREDATA *) IMG_OFFSET_ADDR(psTLAcquireDataIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_TLACQUIREDATA *psTLAcquireDataOUT = -+ (PVRSRV_BRIDGE_OUT_TLACQUIREDATA *) IMG_OFFSET_ADDR(psTLAcquireDataOUT_UI8, 0); -+ -+ IMG_HANDLE hSD = psTLAcquireDataIN->hSD; -+ TL_STREAM_DESC *psSDInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psTLAcquireDataOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSDInt, -+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); -+ if (unlikely(psTLAcquireDataOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto TLAcquireData_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psTLAcquireDataOUT->eError = -+ TLServerAcquireDataKM(psSDInt, -+ &psTLAcquireDataOUT->ui32ReadOffset, -+ &psTLAcquireDataOUT->ui32ReadLen); -+ -+TLAcquireData_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psSDInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeTLReleaseData(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psTLReleaseDataIN_UI8, -+ IMG_UINT8 * psTLReleaseDataOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_TLRELEASEDATA *psTLReleaseDataIN = -+ (PVRSRV_BRIDGE_IN_TLRELEASEDATA *) IMG_OFFSET_ADDR(psTLReleaseDataIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_TLRELEASEDATA *psTLReleaseDataOUT = -+ (PVRSRV_BRIDGE_OUT_TLRELEASEDATA *) IMG_OFFSET_ADDR(psTLReleaseDataOUT_UI8, 0); -+ -+ IMG_HANDLE hSD = psTLReleaseDataIN->hSD; -+ TL_STREAM_DESC *psSDInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psTLReleaseDataOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSDInt, -+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); -+ if (unlikely(psTLReleaseDataOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto TLReleaseData_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psTLReleaseDataOUT->eError = -+ TLServerReleaseDataKM(psSDInt, -+ psTLReleaseDataIN->ui32ReadOffset, -+ psTLReleaseDataIN->ui32ReadLen); -+ -+TLReleaseData_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psSDInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static_assert(PRVSRVTL_MAX_STREAM_NAME_SIZE <= IMG_UINT32_MAX, -+ "PRVSRVTL_MAX_STREAM_NAME_SIZE must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER <= IMG_UINT32_MAX, -+ "PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psTLDiscoverStreamsIN_UI8, -+ IMG_UINT8 * psTLDiscoverStreamsOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *psTLDiscoverStreamsIN = -+ (PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *) IMG_OFFSET_ADDR(psTLDiscoverStreamsIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *psTLDiscoverStreamsOUT = -+ (PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *) IMG_OFFSET_ADDR(psTLDiscoverStreamsOUT_UI8, 0); -+ -+ IMG_CHAR *uiNamePatternInt = NULL; -+ IMG_CHAR *puiStreamsInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + -+ ((IMG_UINT64) psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) + 0; -+ -+ if (psTLDiscoverStreamsIN->ui32Size > PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER) -+ { -+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto TLDiscoverStreams_exit; -+ } -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ psTLDiscoverStreamsOUT->puiStreams = psTLDiscoverStreamsIN->puiStreams; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto TLDiscoverStreams_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLDiscoverStreamsIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto TLDiscoverStreams_exit; -+ } -+ } -+ } -+ -+ { -+ uiNamePatternInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiNamePatternInt, -+ (const void __user *)psTLDiscoverStreamsIN->puiNamePattern, -+ PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto TLDiscoverStreams_exit; -+ } -+ ((IMG_CHAR *) uiNamePatternInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) - -+ 1] = '\0'; -+ } -+ if (psTLDiscoverStreamsIN->ui32Size != 0) -+ { -+ puiStreamsInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR); -+ } -+ -+ psTLDiscoverStreamsOUT->eError = -+ TLServerDiscoverStreamsKM(uiNamePatternInt, -+ psTLDiscoverStreamsIN->ui32Size, -+ puiStreamsInt, &psTLDiscoverStreamsOUT->ui32NumFound); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psTLDiscoverStreamsOUT->eError != PVRSRV_OK)) -+ { -+ goto TLDiscoverStreams_exit; -+ } -+ -+ /* If dest ptr is non-null and we have data to copy */ -+ if ((puiStreamsInt) && ((psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) > 0)) -+ { -+ if (unlikely -+ (OSCopyToUser -+ (NULL, (void __user *)psTLDiscoverStreamsOUT->puiStreams, puiStreamsInt, -+ (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR))) != PVRSRV_OK)) -+ { -+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto TLDiscoverStreams_exit; -+ } -+ } -+ -+TLDiscoverStreams_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psTLDiscoverStreamsOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeTLReserveStream(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psTLReserveStreamIN_UI8, -+ IMG_UINT8 * psTLReserveStreamOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_TLRESERVESTREAM *psTLReserveStreamIN = -+ (PVRSRV_BRIDGE_IN_TLRESERVESTREAM *) IMG_OFFSET_ADDR(psTLReserveStreamIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *psTLReserveStreamOUT = -+ (PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *) IMG_OFFSET_ADDR(psTLReserveStreamOUT_UI8, 0); -+ -+ IMG_HANDLE hSD = psTLReserveStreamIN->hSD; -+ TL_STREAM_DESC *psSDInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psTLReserveStreamOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSDInt, -+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); -+ if (unlikely(psTLReserveStreamOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto TLReserveStream_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psTLReserveStreamOUT->eError = -+ TLServerReserveStreamKM(psSDInt, -+ &psTLReserveStreamOUT->ui32BufferOffset, -+ psTLReserveStreamIN->ui32Size, -+ psTLReserveStreamIN->ui32SizeMin, -+ &psTLReserveStreamOUT->ui32Available); -+ -+TLReserveStream_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psSDInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeTLCommitStream(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psTLCommitStreamIN_UI8, -+ IMG_UINT8 * psTLCommitStreamOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *psTLCommitStreamIN = -+ (PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *) IMG_OFFSET_ADDR(psTLCommitStreamIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *psTLCommitStreamOUT = -+ (PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *) IMG_OFFSET_ADDR(psTLCommitStreamOUT_UI8, 0); -+ -+ IMG_HANDLE hSD = psTLCommitStreamIN->hSD; -+ TL_STREAM_DESC *psSDInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psTLCommitStreamOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSDInt, -+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); -+ if (unlikely(psTLCommitStreamOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto TLCommitStream_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psTLCommitStreamOUT->eError = -+ TLServerCommitStreamKM(psSDInt, psTLCommitStreamIN->ui32ReqSize); -+ -+TLCommitStream_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psSDInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static_assert(PVRSRVTL_MAX_PACKET_SIZE <= IMG_UINT32_MAX, -+ "PVRSRVTL_MAX_PACKET_SIZE must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psTLWriteDataIN_UI8, -+ IMG_UINT8 * psTLWriteDataOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_TLWRITEDATA *psTLWriteDataIN = -+ (PVRSRV_BRIDGE_IN_TLWRITEDATA *) IMG_OFFSET_ADDR(psTLWriteDataIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_TLWRITEDATA *psTLWriteDataOUT = -+ (PVRSRV_BRIDGE_OUT_TLWRITEDATA *) IMG_OFFSET_ADDR(psTLWriteDataOUT_UI8, 0); -+ -+ IMG_HANDLE hSD = psTLWriteDataIN->hSD; -+ TL_STREAM_DESC *psSDInt = NULL; -+ IMG_BYTE *ui8DataInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) + 0; -+ -+ if (unlikely(psTLWriteDataIN->ui32Size > PVRSRVTL_MAX_PACKET_SIZE)) -+ { -+ psTLWriteDataOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto TLWriteData_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psTLWriteDataOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto TLWriteData_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLWriteDataIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psTLWriteDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto TLWriteData_exit; -+ } -+ } -+ } -+ -+ if (psTLWriteDataIN->ui32Size != 0) -+ { -+ ui8DataInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE); -+ } -+ -+ /* Copy the data over */ -+ if (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui8DataInt, (const void __user *)psTLWriteDataIN->pui8Data, -+ psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK) -+ { -+ psTLWriteDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto TLWriteData_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psTLWriteDataOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSDInt, -+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); -+ if (unlikely(psTLWriteDataOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto TLWriteData_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psTLWriteDataOUT->eError = -+ TLServerWriteDataKM(psSDInt, psTLWriteDataIN->ui32Size, ui8DataInt); -+ -+TLWriteData_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psSDInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psTLWriteDataOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitPVRTLBridge(void); -+void DeinitPVRTLBridge(void); -+ -+/* -+ * Register all PVRTL functions with services -+ */ -+PVRSRV_ERROR InitPVRTLBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM, -+ PVRSRVBridgeTLOpenStream, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM, -+ PVRSRVBridgeTLCloseStream, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA, -+ PVRSRVBridgeTLAcquireData, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA, -+ PVRSRVBridgeTLReleaseData, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS, -+ PVRSRVBridgeTLDiscoverStreams, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM, -+ PVRSRVBridgeTLReserveStream, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM, -+ PVRSRVBridgeTLCommitStream, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA, -+ PVRSRVBridgeTLWriteData, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all pvrtl functions with services -+ */ -+void DeinitPVRTLBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_rgxbreakpoint_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxbreakpoint_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_rgxbreakpoint_bridge.c -@@ -0,0 +1,371 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for rgxbreakpoint -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for rgxbreakpoint -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "rgxbreakpoint.h" -+ -+#include "common_rgxbreakpoint_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE) -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static IMG_INT -+PVRSRVBridgeRGXSetBreakpoint(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXSetBreakpointIN_UI8, -+ IMG_UINT8 * psRGXSetBreakpointOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *psRGXSetBreakpointIN = -+ (PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *) IMG_OFFSET_ADDR(psRGXSetBreakpointIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *psRGXSetBreakpointOUT = -+ (PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *) IMG_OFFSET_ADDR(psRGXSetBreakpointOUT_UI8, 0); -+ -+ IMG_HANDLE hPrivData = psRGXSetBreakpointIN->hPrivData; -+ IMG_HANDLE hPrivDataInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXSetBreakpointOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hPrivDataInt, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); -+ if (unlikely(psRGXSetBreakpointOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXSetBreakpoint_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXSetBreakpointOUT->eError = -+ PVRSRVRGXSetBreakpointKM(psConnection, OSGetDevNode(psConnection), -+ hPrivDataInt, -+ psRGXSetBreakpointIN->eFWDataMaster, -+ psRGXSetBreakpointIN->ui64TempSpillingAddr, -+ psRGXSetBreakpointIN->ui32BreakpointAddr, -+ psRGXSetBreakpointIN->ui32HandlerAddr, -+ psRGXSetBreakpointIN->ui32DM); -+ -+RGXSetBreakpoint_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hPrivDataInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXClearBreakpoint(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXClearBreakpointIN_UI8, -+ IMG_UINT8 * psRGXClearBreakpointOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *psRGXClearBreakpointIN = -+ (PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *) IMG_OFFSET_ADDR(psRGXClearBreakpointIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *psRGXClearBreakpointOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *) IMG_OFFSET_ADDR(psRGXClearBreakpointOUT_UI8, -+ 0); -+ -+ IMG_HANDLE hPrivData = psRGXClearBreakpointIN->hPrivData; -+ IMG_HANDLE hPrivDataInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXClearBreakpointOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hPrivDataInt, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); -+ if (unlikely(psRGXClearBreakpointOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXClearBreakpoint_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXClearBreakpointOUT->eError = -+ PVRSRVRGXClearBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt); -+ -+RGXClearBreakpoint_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hPrivDataInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXEnableBreakpoint(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXEnableBreakpointIN_UI8, -+ IMG_UINT8 * psRGXEnableBreakpointOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *psRGXEnableBreakpointIN = -+ (PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *) IMG_OFFSET_ADDR(psRGXEnableBreakpointIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *psRGXEnableBreakpointOUT = -+ (PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *) IMG_OFFSET_ADDR(psRGXEnableBreakpointOUT_UI8, -+ 0); -+ -+ IMG_HANDLE hPrivData = psRGXEnableBreakpointIN->hPrivData; -+ IMG_HANDLE hPrivDataInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXEnableBreakpointOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hPrivDataInt, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); -+ if (unlikely(psRGXEnableBreakpointOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXEnableBreakpoint_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXEnableBreakpointOUT->eError = -+ PVRSRVRGXEnableBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt); -+ -+RGXEnableBreakpoint_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hPrivDataInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXDisableBreakpoint(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXDisableBreakpointIN_UI8, -+ IMG_UINT8 * psRGXDisableBreakpointOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointIN = -+ (PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *) IMG_OFFSET_ADDR(psRGXDisableBreakpointIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointOUT = -+ (PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *) -+ IMG_OFFSET_ADDR(psRGXDisableBreakpointOUT_UI8, 0); -+ -+ IMG_HANDLE hPrivData = psRGXDisableBreakpointIN->hPrivData; -+ IMG_HANDLE hPrivDataInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXDisableBreakpointOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hPrivDataInt, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); -+ if (unlikely(psRGXDisableBreakpointOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXDisableBreakpoint_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXDisableBreakpointOUT->eError = -+ PVRSRVRGXDisableBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt); -+ -+RGXDisableBreakpoint_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hPrivDataInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXOverallocateBPRegisters(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXOverallocateBPRegistersIN_UI8, -+ IMG_UINT8 * psRGXOverallocateBPRegistersOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersIN = -+ (PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *) -+ IMG_OFFSET_ADDR(psRGXOverallocateBPRegistersIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersOUT = -+ (PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *) -+ IMG_OFFSET_ADDR(psRGXOverallocateBPRegistersOUT_UI8, 0); -+ -+ psRGXOverallocateBPRegistersOUT->eError = -+ PVRSRVRGXOverallocateBPRegistersKM(psConnection, OSGetDevNode(psConnection), -+ psRGXOverallocateBPRegistersIN->ui32TempRegs, -+ psRGXOverallocateBPRegistersIN->ui32SharedRegs); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+#endif /* EXCLUDE_RGXBREAKPOINT_BRIDGE */ -+ -+#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE) -+PVRSRV_ERROR InitRGXBREAKPOINTBridge(void); -+void DeinitRGXBREAKPOINTBridge(void); -+ -+/* -+ * Register all RGXBREAKPOINT functions with services -+ */ -+PVRSRV_ERROR InitRGXBREAKPOINTBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, -+ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT, -+ PVRSRVBridgeRGXSetBreakpoint, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, -+ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT, -+ PVRSRVBridgeRGXClearBreakpoint, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, -+ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT, -+ PVRSRVBridgeRGXEnableBreakpoint, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, -+ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT, -+ PVRSRVBridgeRGXDisableBreakpoint, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, -+ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS, -+ PVRSRVBridgeRGXOverallocateBPRegisters, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all rgxbreakpoint functions with services -+ */ -+void DeinitRGXBREAKPOINTBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, -+ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, -+ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, -+ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, -+ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, -+ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS); -+ -+} -+#else /* EXCLUDE_RGXBREAKPOINT_BRIDGE */ -+/* This bridge is conditional on EXCLUDE_RGXBREAKPOINT_BRIDGE - when defined, -+ * do not populate the dispatch table with its functions -+ */ -+#define InitRGXBREAKPOINTBridge() \ -+ PVRSRV_OK -+ -+#define DeinitRGXBREAKPOINTBridge() -+ -+#endif /* EXCLUDE_RGXBREAKPOINT_BRIDGE */ -diff --git a/drivers/gpu/drm/img-rogue/server_rgxcmp_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxcmp_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_rgxcmp_bridge.c -@@ -0,0 +1,1314 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for rgxcmp -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for rgxcmp -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "rgxcompute.h" -+ -+#include "common_rgxcmp_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+#include "rgx_bvnc_defs_km.h" -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static PVRSRV_ERROR _RGXCreateComputeContextpsComputeContextIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVRGXDestroyComputeContextKM((RGX_SERVER_COMPUTE_CONTEXT *) pvData); -+ return eError; -+} -+ -+static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXFWIF_STATIC_COMPUTECONTEXT_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_STATIC_COMPUTECONTEXT_SIZE must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXCreateComputeContextIN_UI8, -+ IMG_UINT8 * psRGXCreateComputeContextOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextIN = -+ (PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *) -+ IMG_OFFSET_ADDR(psRGXCreateComputeContextIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *) -+ IMG_OFFSET_ADDR(psRGXCreateComputeContextOUT_UI8, 0); -+ -+ IMG_BYTE *ui8FrameworkCmdInt = NULL; -+ IMG_HANDLE hPrivData = psRGXCreateComputeContextIN->hPrivData; -+ IMG_HANDLE hPrivDataInt = NULL; -+ IMG_BYTE *ui8StaticComputeContextStateInt = NULL; -+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) + -+ ((IMG_UINT64) psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * -+ sizeof(IMG_BYTE)) + 0; -+ -+ if (unlikely(psRGXCreateComputeContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE)) -+ { -+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXCreateComputeContext_exit; -+ } -+ -+ if (unlikely -+ (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize > -+ RGXFWIF_STATIC_COMPUTECONTEXT_SIZE)) -+ { -+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXCreateComputeContext_exit; -+ } -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_COMPUTE_BIT_MASK)) -+ { -+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXCreateComputeContext_exit; -+ } -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXCreateComputeContext_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateComputeContextIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXCreateComputeContext_exit; -+ } -+ } -+ } -+ -+ if (psRGXCreateComputeContextIN->ui32FrameworkCmdSize != 0) -+ { -+ ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui8FrameworkCmdInt, -+ (const void __user *)psRGXCreateComputeContextIN->pui8FrameworkCmd, -+ psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != -+ PVRSRV_OK) -+ { -+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateComputeContext_exit; -+ } -+ } -+ if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize != 0) -+ { -+ ui8StaticComputeContextStateInt = -+ (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * -+ sizeof(IMG_BYTE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * sizeof(IMG_BYTE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui8StaticComputeContextStateInt, -+ (const void __user *)psRGXCreateComputeContextIN-> -+ pui8StaticComputeContextState, -+ psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * -+ sizeof(IMG_BYTE)) != PVRSRV_OK) -+ { -+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateComputeContext_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXCreateComputeContextOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hPrivDataInt, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); -+ if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateComputeContext_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateComputeContextOUT->eError = -+ PVRSRVRGXCreateComputeContextKM(psConnection, OSGetDevNode(psConnection), -+ psRGXCreateComputeContextIN->i32Priority, -+ psRGXCreateComputeContextIN->ui32FrameworkCmdSize, -+ ui8FrameworkCmdInt, -+ hPrivDataInt, -+ psRGXCreateComputeContextIN-> -+ ui32StaticComputeContextStateSize, -+ ui8StaticComputeContextStateInt, -+ psRGXCreateComputeContextIN->ui32PackedCCBSizeU88, -+ psRGXCreateComputeContextIN->ui32ContextFlags, -+ psRGXCreateComputeContextIN->ui64RobustnessAddress, -+ psRGXCreateComputeContextIN->ui32MaxDeadlineMS, -+ &psComputeContextInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXCreateComputeContext_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateComputeContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRGXCreateComputeContextOUT-> -+ hComputeContext, -+ (void *) -+ psComputeContextInt, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXCreateComputeContextpsComputeContextIntRelease); -+ if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateComputeContext_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXCreateComputeContext_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hPrivDataInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK) -+ { -+ if (psComputeContextInt) -+ { -+ PVRSRVRGXDestroyComputeContextKM(psComputeContextInt); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXCreateComputeContextOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXDestroyComputeContext(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXDestroyComputeContextIN_UI8, -+ IMG_UINT8 * psRGXDestroyComputeContextOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextIN = -+ (PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *) -+ IMG_OFFSET_ADDR(psRGXDestroyComputeContextIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextOUT = -+ (PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *) -+ IMG_OFFSET_ADDR(psRGXDestroyComputeContextOUT_UI8, 0); -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_COMPUTE_BIT_MASK)) -+ { -+ psRGXDestroyComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXDestroyComputeContext_exit; -+ } -+ } -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXDestroyComputeContextOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psRGXDestroyComputeContextIN-> -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); -+ if (unlikely -+ ((psRGXDestroyComputeContextOUT->eError != PVRSRV_OK) -+ && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) -+ && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psRGXDestroyComputeContextOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXDestroyComputeContext_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXDestroyComputeContext_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFlushComputeDataIN_UI8, -+ IMG_UINT8 * psRGXFlushComputeDataOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataIN = -+ (PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *) IMG_OFFSET_ADDR(psRGXFlushComputeDataIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *) IMG_OFFSET_ADDR(psRGXFlushComputeDataOUT_UI8, -+ 0); -+ -+ IMG_HANDLE hComputeContext = psRGXFlushComputeDataIN->hComputeContext; -+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_COMPUTE_BIT_MASK)) -+ { -+ psRGXFlushComputeDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXFlushComputeData_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXFlushComputeDataOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psComputeContextInt, -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXFlushComputeDataOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXFlushComputeData_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXFlushComputeDataOUT->eError = PVRSRVRGXFlushComputeDataKM(psComputeContextInt); -+ -+RGXFlushComputeData_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psComputeContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXSetComputeContextPriorityIN_UI8, -+ IMG_UINT8 * psRGXSetComputeContextPriorityOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityIN = -+ (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *) -+ IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityOUT = -+ (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *) -+ IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityOUT_UI8, 0); -+ -+ IMG_HANDLE hComputeContext = psRGXSetComputeContextPriorityIN->hComputeContext; -+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_COMPUTE_BIT_MASK)) -+ { -+ psRGXSetComputeContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXSetComputeContextPriority_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXSetComputeContextPriorityOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psComputeContextInt, -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXSetComputeContextPriorityOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXSetComputeContextPriority_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXSetComputeContextPriorityOUT->eError = -+ PVRSRVRGXSetComputeContextPriorityKM(psConnection, OSGetDevNode(psConnection), -+ psComputeContextInt, -+ psRGXSetComputeContextPriorityIN->i32Priority); -+ -+RGXSetComputeContextPriority_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psComputeContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXNotifyComputeWriteOffsetUpdateIN_UI8, -+ IMG_UINT8 * -+ psRGXNotifyComputeWriteOffsetUpdateOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateIN = -+ (PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *) -+ IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateOUT -+ = -+ (PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *) -+ IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateOUT_UI8, 0); -+ -+ IMG_HANDLE hComputeContext = psRGXNotifyComputeWriteOffsetUpdateIN->hComputeContext; -+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_COMPUTE_BIT_MASK)) -+ { -+ psRGXNotifyComputeWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXNotifyComputeWriteOffsetUpdate_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXNotifyComputeWriteOffsetUpdateOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psComputeContextInt, -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXNotifyComputeWriteOffsetUpdateOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXNotifyComputeWriteOffsetUpdate_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXNotifyComputeWriteOffsetUpdateOUT->eError = -+ PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(psComputeContextInt); -+ -+RGXNotifyComputeWriteOffsetUpdate_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psComputeContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, -+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, -+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, -+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXKickCDM2IN_UI8, -+ IMG_UINT8 * psRGXKickCDM2OUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXKICKCDM2 *psRGXKickCDM2IN = -+ (PVRSRV_BRIDGE_IN_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2IN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *psRGXKickCDM2OUT = -+ (PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2OUT_UI8, 0); -+ -+ IMG_HANDLE hComputeContext = psRGXKickCDM2IN->hComputeContext; -+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; -+ SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL; -+ IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL; -+ IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL; -+ IMG_UINT32 *ui32ClientUpdateValueInt = NULL; -+ IMG_CHAR *uiUpdateFenceNameInt = NULL; -+ IMG_BYTE *ui8DMCmdInt = NULL; -+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; -+ PMR **psSyncPMRsInt = NULL; -+ IMG_HANDLE *hSyncPMRsInt2 = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + -+ ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + -+ ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + -+ ((IMG_UINT64) psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) + -+ ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *)) + -+ ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; -+ -+ if (unlikely(psRGXKickCDM2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXKickCDM2_exit; -+ } -+ -+ if (unlikely(psRGXKickCDM2IN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXKickCDM2_exit; -+ } -+ -+ if (unlikely(psRGXKickCDM2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXKickCDM2_exit; -+ } -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_COMPUTE_BIT_MASK)) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXKickCDM2_exit; -+ } -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXKickCDM2_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXKickCDM2IN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickCDM2IN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXKickCDM2_exit; -+ } -+ } -+ } -+ -+ if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) -+ { -+ psClientUpdateUFOSyncPrimBlockInt = -+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psClientUpdateUFOSyncPrimBlockInt, 0, -+ psRGXKickCDM2IN->ui32ClientUpdateCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)); -+ ui32NextOffset += -+ psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); -+ hClientUpdateUFOSyncPrimBlockInt2 = -+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, hClientUpdateUFOSyncPrimBlockInt2, -+ (const void __user *)psRGXKickCDM2IN->phClientUpdateUFOSyncPrimBlock, -+ psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickCDM2_exit; -+ } -+ } -+ if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) -+ { -+ ui32ClientUpdateOffsetInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32ClientUpdateOffsetInt, -+ (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateOffset, -+ psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickCDM2_exit; -+ } -+ } -+ if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) -+ { -+ ui32ClientUpdateValueInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32ClientUpdateValueInt, -+ (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateValue, -+ psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickCDM2_exit; -+ } -+ } -+ -+ { -+ uiUpdateFenceNameInt = -+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiUpdateFenceNameInt, -+ (const void __user *)psRGXKickCDM2IN->puiUpdateFenceName, -+ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickCDM2_exit; -+ } -+ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - -+ 1] = '\0'; -+ } -+ if (psRGXKickCDM2IN->ui32CmdSize != 0) -+ { -+ ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui8DMCmdInt, (const void __user *)psRGXKickCDM2IN->pui8DMCmd, -+ psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickCDM2_exit; -+ } -+ } -+ if (psRGXKickCDM2IN->ui32SyncPMRCount != 0) -+ { -+ ui32SyncPMRFlagsInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32SyncPMRFlagsInt, -+ (const void __user *)psRGXKickCDM2IN->pui32SyncPMRFlags, -+ psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickCDM2_exit; -+ } -+ } -+ if (psRGXKickCDM2IN->ui32SyncPMRCount != 0) -+ { -+ psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psSyncPMRsInt, 0, psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *)); -+ ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *); -+ hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickCDM2IN->phSyncPMRs, -+ psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) -+ { -+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickCDM2_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXKickCDM2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psComputeContextInt, -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickCDM2_exit; -+ } -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) -+ { -+ /* Look up the address from the handle */ -+ psRGXKickCDM2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **) -+ &psClientUpdateUFOSyncPrimBlockInt[i], -+ hClientUpdateUFOSyncPrimBlockInt2[i], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, -+ IMG_TRUE); -+ if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickCDM2_exit; -+ } -+ } -+ } -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++) -+ { -+ /* Look up the address from the handle */ -+ psRGXKickCDM2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSyncPMRsInt[i], -+ hSyncPMRsInt2[i], -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickCDM2_exit; -+ } -+ } -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXKickCDM2OUT->eError = -+ PVRSRVRGXKickCDMKM(psComputeContextInt, -+ psRGXKickCDM2IN->ui32ClientUpdateCount, -+ psClientUpdateUFOSyncPrimBlockInt, -+ ui32ClientUpdateOffsetInt, -+ ui32ClientUpdateValueInt, -+ psRGXKickCDM2IN->hCheckFenceFd, -+ psRGXKickCDM2IN->hUpdateTimeline, -+ &psRGXKickCDM2OUT->hUpdateFence, -+ uiUpdateFenceNameInt, -+ psRGXKickCDM2IN->ui32CmdSize, -+ ui8DMCmdInt, -+ psRGXKickCDM2IN->ui32PDumpFlags, -+ psRGXKickCDM2IN->ui32ExtJobRef, -+ psRGXKickCDM2IN->ui32SyncPMRCount, -+ ui32SyncPMRFlagsInt, -+ psSyncPMRsInt, -+ psRGXKickCDM2IN->ui32NumOfWorkgroups, -+ psRGXKickCDM2IN->ui32NumOfWorkitems, -+ psRGXKickCDM2IN->ui64DeadlineInus); -+ -+RGXKickCDM2_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psComputeContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); -+ } -+ -+ if (hClientUpdateUFOSyncPrimBlockInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psClientUpdateUFOSyncPrimBlockInt -+ && psClientUpdateUFOSyncPrimBlockInt[i]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hClientUpdateUFOSyncPrimBlockInt2[i], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ } -+ } -+ -+ if (hSyncPMRsInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psSyncPMRsInt && psSyncPMRsInt[i]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSyncPMRsInt2[i], -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ } -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXKickCDM2OUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXSetComputeContextPropertyIN_UI8, -+ IMG_UINT8 * psRGXSetComputeContextPropertyOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyIN = -+ (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *) -+ IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyOUT = -+ (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *) -+ IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyOUT_UI8, 0); -+ -+ IMG_HANDLE hComputeContext = psRGXSetComputeContextPropertyIN->hComputeContext; -+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_COMPUTE_BIT_MASK)) -+ { -+ psRGXSetComputeContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXSetComputeContextProperty_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXSetComputeContextPropertyOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psComputeContextInt, -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXSetComputeContextPropertyOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXSetComputeContextProperty_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXSetComputeContextPropertyOUT->eError = -+ PVRSRVRGXSetComputeContextPropertyKM(psComputeContextInt, -+ psRGXSetComputeContextPropertyIN->ui32Property, -+ psRGXSetComputeContextPropertyIN->ui64Input, -+ &psRGXSetComputeContextPropertyOUT->ui64Output); -+ -+RGXSetComputeContextProperty_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psComputeContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXGetLastDeviceError(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXGetLastDeviceErrorIN_UI8, -+ IMG_UINT8 * psRGXGetLastDeviceErrorOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorIN = -+ (PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *) -+ IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorOUT = -+ (PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *) -+ IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorOUT_UI8, 0); -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_COMPUTE_BIT_MASK)) -+ { -+ psRGXGetLastDeviceErrorOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXGetLastDeviceError_exit; -+ } -+ } -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXGetLastDeviceErrorIN); -+ -+ psRGXGetLastDeviceErrorOUT->eError = -+ PVRSRVRGXGetLastDeviceErrorKM(psConnection, OSGetDevNode(psConnection), -+ &psRGXGetLastDeviceErrorOUT->ui32Error); -+ -+RGXGetLastDeviceError_exit: -+ -+ return 0; -+} -+ -+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXKickTimestampQuery(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXKickTimestampQueryIN_UI8, -+ IMG_UINT8 * psRGXKickTimestampQueryOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY *psRGXKickTimestampQueryIN = -+ (PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY *) -+ IMG_OFFSET_ADDR(psRGXKickTimestampQueryIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY *psRGXKickTimestampQueryOUT = -+ (PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY *) -+ IMG_OFFSET_ADDR(psRGXKickTimestampQueryOUT_UI8, 0); -+ -+ IMG_HANDLE hComputeContext = psRGXKickTimestampQueryIN->hComputeContext; -+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; -+ IMG_BYTE *ui8DMCmdInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE)) + 0; -+ -+ if (unlikely(psRGXKickTimestampQueryIN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) -+ { -+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXKickTimestampQuery_exit; -+ } -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_COMPUTE_BIT_MASK)) -+ { -+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXKickTimestampQuery_exit; -+ } -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXKickTimestampQuery_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXKickTimestampQueryIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTimestampQueryIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXKickTimestampQuery_exit; -+ } -+ } -+ } -+ -+ if (psRGXKickTimestampQueryIN->ui32CmdSize != 0) -+ { -+ ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui8DMCmdInt, (const void __user *)psRGXKickTimestampQueryIN->pui8DMCmd, -+ psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) -+ { -+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTimestampQuery_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXKickTimestampQueryOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psComputeContextInt, -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXKickTimestampQueryOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickTimestampQuery_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXKickTimestampQueryOUT->eError = -+ PVRSRVRGXKickTimestampQueryKM(psComputeContextInt, -+ psRGXKickTimestampQueryIN->hCheckFenceFd, -+ psRGXKickTimestampQueryIN->ui32CmdSize, -+ ui8DMCmdInt, psRGXKickTimestampQueryIN->ui32ExtJobRef); -+ -+RGXKickTimestampQuery_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psComputeContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hComputeContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXKickTimestampQueryOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitRGXCMPBridge(void); -+void DeinitRGXCMPBridge(void); -+ -+/* -+ * Register all RGXCMP functions with services -+ */ -+PVRSRV_ERROR InitRGXCMPBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT, -+ PVRSRVBridgeRGXCreateComputeContext, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT, -+ PVRSRVBridgeRGXDestroyComputeContext, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA, -+ PVRSRVBridgeRGXFlushComputeData, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, -+ PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY, -+ PVRSRVBridgeRGXSetComputeContextPriority, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, -+ PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE, -+ PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2, -+ PVRSRVBridgeRGXKickCDM2, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, -+ PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY, -+ PVRSRVBridgeRGXSetComputeContextProperty, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR, -+ PVRSRVBridgeRGXGetLastDeviceError, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY, -+ PVRSRVBridgeRGXKickTimestampQuery, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all rgxcmp functions with services -+ */ -+void DeinitRGXCMPBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, -+ PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, -+ PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, -+ PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, -+ PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_rgxfwdbg_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxfwdbg_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_rgxfwdbg_bridge.c -@@ -0,0 +1,545 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for rgxfwdbg -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for rgxfwdbg -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "devicemem_server.h" -+#include "rgxfwdbg.h" -+#include "pmr.h" -+#include "rgxtimecorr.h" -+ -+#include "common_rgxfwdbg_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugSetFWLog(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugSetFWLogIN_UI8, -+ IMG_UINT8 * psRGXFWDebugSetFWLogOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *) IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *) IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogOUT_UI8, -+ 0); -+ -+ psRGXFWDebugSetFWLogOUT->eError = -+ PVRSRVRGXFWDebugSetFWLogKM(psConnection, OSGetDevNode(psConnection), -+ psRGXFWDebugSetFWLogIN->ui32RGXFWLogType); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugDumpFreelistPageListIN_UI8, -+ IMG_UINT8 * psRGXFWDebugDumpFreelistPageListOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST *psRGXFWDebugDumpFreelistPageListIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST *) -+ IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST *psRGXFWDebugDumpFreelistPageListOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST *) -+ IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXFWDebugDumpFreelistPageListIN); -+ -+ psRGXFWDebugDumpFreelistPageListOUT->eError = -+ PVRSRVRGXFWDebugDumpFreelistPageListKM(psConnection, OSGetDevNode(psConnection)); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugSuspendDevice(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugSuspendDeviceIN_UI8, -+ IMG_UINT8 * psRGXFWDebugSuspendDeviceOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSUSPENDDEVICE *psRGXFWDebugSuspendDeviceIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSUSPENDDEVICE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSuspendDeviceIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSUSPENDDEVICE *psRGXFWDebugSuspendDeviceOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSUSPENDDEVICE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSuspendDeviceOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXFWDebugSuspendDeviceIN); -+ -+ psRGXFWDebugSuspendDeviceOUT->eError = -+ PVRSRVRGXFWDebugSuspendDeviceKM(psConnection, OSGetDevNode(psConnection)); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugResumeDevice(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugResumeDeviceIN_UI8, -+ IMG_UINT8 * psRGXFWDebugResumeDeviceOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGRESUMEDEVICE *psRGXFWDebugResumeDeviceIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGRESUMEDEVICE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugResumeDeviceIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGRESUMEDEVICE *psRGXFWDebugResumeDeviceOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGRESUMEDEVICE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugResumeDeviceOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXFWDebugResumeDeviceIN); -+ -+ psRGXFWDebugResumeDeviceOUT->eError = -+ PVRSRVRGXFWDebugResumeDeviceKM(psConnection, OSGetDevNode(psConnection)); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugSetVzConnectionCooldownPeriodInSec(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * -+ psRGXFWDebugSetVzConnectionCooldownPeriodInSecIN_UI8, -+ IMG_UINT8 * -+ psRGXFWDebugSetVzConnectionCooldownPeriodInSecOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC -+ *psRGXFWDebugSetVzConnectionCooldownPeriodInSecIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetVzConnectionCooldownPeriodInSecIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC -+ *psRGXFWDebugSetVzConnectionCooldownPeriodInSecOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetVzConnectionCooldownPeriodInSecOUT_UI8, 0); -+ -+ psRGXFWDebugSetVzConnectionCooldownPeriodInSecOUT->eError = -+ PVRSRVRGXFWDebugSetVzConnectionCooldownPeriodInSecKM(psConnection, -+ OSGetDevNode(psConnection), -+ psRGXFWDebugSetVzConnectionCooldownPeriodInSecIN-> -+ ui32ui32VzConnectionCooldownPeriodInSec); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugSetHCSDeadlineIN_UI8, -+ IMG_UINT8 * psRGXFWDebugSetHCSDeadlineOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *psRGXFWDebugSetHCSDeadlineIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE *psRGXFWDebugSetHCSDeadlineOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineOUT_UI8, 0); -+ -+ psRGXFWDebugSetHCSDeadlineOUT->eError = -+ PVRSRVRGXFWDebugSetHCSDeadlineKM(psConnection, OSGetDevNode(psConnection), -+ psRGXFWDebugSetHCSDeadlineIN->ui32RGXHCSDeadline); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugSetDriverPriority(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugSetDriverPriorityIN_UI8, -+ IMG_UINT8 * psRGXFWDebugSetDriverPriorityOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY *psRGXFWDebugSetDriverPriorityIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverPriorityIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY *psRGXFWDebugSetDriverPriorityOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverPriorityOUT_UI8, 0); -+ -+ psRGXFWDebugSetDriverPriorityOUT->eError = -+ PVRSRVRGXFWDebugSetDriverPriorityKM(psConnection, OSGetDevNode(psConnection), -+ psRGXFWDebugSetDriverPriorityIN->ui32DriverID, -+ psRGXFWDebugSetDriverPriorityIN->ui32Priority); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugSetDriverTimeSlice(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugSetDriverTimeSliceIN_UI8, -+ IMG_UINT8 * psRGXFWDebugSetDriverTimeSliceOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE *psRGXFWDebugSetDriverTimeSliceIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverTimeSliceIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE *psRGXFWDebugSetDriverTimeSliceOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverTimeSliceOUT_UI8, 0); -+ -+ psRGXFWDebugSetDriverTimeSliceOUT->eError = -+ PVRSRVRGXFWDebugSetDriverTimeSliceKM(psConnection, OSGetDevNode(psConnection), -+ psRGXFWDebugSetDriverTimeSliceIN->ui32DriverID, -+ psRGXFWDebugSetDriverTimeSliceIN->ui32TimeSlice); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugSetDriverTimeSliceInterval(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * -+ psRGXFWDebugSetDriverTimeSliceIntervalIN_UI8, -+ IMG_UINT8 * -+ psRGXFWDebugSetDriverTimeSliceIntervalOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL -+ *psRGXFWDebugSetDriverTimeSliceIntervalIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverTimeSliceIntervalIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL -+ *psRGXFWDebugSetDriverTimeSliceIntervalOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverTimeSliceIntervalOUT_UI8, 0); -+ -+ psRGXFWDebugSetDriverTimeSliceIntervalOUT->eError = -+ PVRSRVRGXFWDebugSetDriverTimeSliceIntervalKM(psConnection, OSGetDevNode(psConnection), -+ psRGXFWDebugSetDriverTimeSliceIntervalIN-> -+ ui32TimeSliceInterval); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugSetDriverIsolationGroup(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugSetDriverIsolationGroupIN_UI8, -+ IMG_UINT8 * -+ psRGXFWDebugSetDriverIsolationGroupOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP *psRGXFWDebugSetDriverIsolationGroupIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverIsolationGroupIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP *psRGXFWDebugSetDriverIsolationGroupOUT -+ = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverIsolationGroupOUT_UI8, 0); -+ -+ psRGXFWDebugSetDriverIsolationGroupOUT->eError = -+ PVRSRVRGXFWDebugSetDriverIsolationGroupKM(psConnection, OSGetDevNode(psConnection), -+ psRGXFWDebugSetDriverIsolationGroupIN-> -+ ui32DriverID, -+ psRGXFWDebugSetDriverIsolationGroupIN-> -+ ui32IsolationGroup); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugSetOSNewOnlineStateIN_UI8, -+ IMG_UINT8 * psRGXFWDebugSetOSNewOnlineStateOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE *psRGXFWDebugSetOSNewOnlineStateIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE *psRGXFWDebugSetOSNewOnlineStateOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateOUT_UI8, 0); -+ -+ psRGXFWDebugSetOSNewOnlineStateOUT->eError = -+ PVRSRVRGXFWDebugSetOSNewOnlineStateKM(psConnection, OSGetDevNode(psConnection), -+ psRGXFWDebugSetOSNewOnlineStateIN->ui32DriverID, -+ psRGXFWDebugSetOSNewOnlineStateIN-> -+ ui32OSNewState); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugMapGuestHeap(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugMapGuestHeapIN_UI8, -+ IMG_UINT8 * psRGXFWDebugMapGuestHeapOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP *psRGXFWDebugMapGuestHeapIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP *) -+ IMG_OFFSET_ADDR(psRGXFWDebugMapGuestHeapIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP *psRGXFWDebugMapGuestHeapOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP *) -+ IMG_OFFSET_ADDR(psRGXFWDebugMapGuestHeapOUT_UI8, 0); -+ -+ psRGXFWDebugMapGuestHeapOUT->eError = -+ PVRSRVRGXFWDebugMapGuestHeapKM(psConnection, OSGetDevNode(psConnection), -+ psRGXFWDebugMapGuestHeapIN->ui32DriverID, -+ psRGXFWDebugMapGuestHeapIN->ui64ui64GuestHeapBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugPHRConfigure(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugPHRConfigureIN_UI8, -+ IMG_UINT8 * psRGXFWDebugPHRConfigureOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureOUT_UI8, 0); -+ -+ psRGXFWDebugPHRConfigureOUT->eError = -+ PVRSRVRGXFWDebugPHRConfigureKM(psConnection, OSGetDevNode(psConnection), -+ psRGXFWDebugPHRConfigureIN->ui32ui32PHRMode); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugWdgConfigure(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugWdgConfigureIN_UI8, -+ IMG_UINT8 * psRGXFWDebugWdgConfigureOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE *psRGXFWDebugWdgConfigureIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugWdgConfigureIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE *psRGXFWDebugWdgConfigureOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE *) -+ IMG_OFFSET_ADDR(psRGXFWDebugWdgConfigureOUT_UI8, 0); -+ -+ psRGXFWDebugWdgConfigureOUT->eError = -+ PVRSRVRGXFWDebugWdgConfigureKM(psConnection, OSGetDevNode(psConnection), -+ psRGXFWDebugWdgConfigureIN->ui32ui32WdgPeriodUs); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXCurrentTimeIN_UI8, -+ IMG_UINT8 * psRGXCurrentTimeOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCURRENTTIME *psRGXCurrentTimeIN = -+ (PVRSRV_BRIDGE_IN_RGXCURRENTTIME *) IMG_OFFSET_ADDR(psRGXCurrentTimeIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *) IMG_OFFSET_ADDR(psRGXCurrentTimeOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN); -+ -+ psRGXCurrentTimeOUT->eError = -+ PVRSRVRGXCurrentTime(psConnection, OSGetDevNode(psConnection), -+ &psRGXCurrentTimeOUT->ui64Time); -+ -+ return 0; -+} -+ -+#if defined(SUPPORT_VALIDATION) -+ -+static IMG_INT -+PVRSRVBridgeRGXFWDebugInjectFault(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXFWDebugInjectFaultIN_UI8, -+ IMG_UINT8 * psRGXFWDebugInjectFaultOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT *psRGXFWDebugInjectFaultIN = -+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT *) -+ IMG_OFFSET_ADDR(psRGXFWDebugInjectFaultIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT *psRGXFWDebugInjectFaultOUT = -+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT *) -+ IMG_OFFSET_ADDR(psRGXFWDebugInjectFaultOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXFWDebugInjectFaultIN); -+ -+ psRGXFWDebugInjectFaultOUT->eError = -+ PVRSRVRGXFWDebugInjectFaultKM(psConnection, OSGetDevNode(psConnection)); -+ -+ return 0; -+} -+ -+#else -+#define PVRSRVBridgeRGXFWDebugInjectFault NULL -+#endif -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitRGXFWDBGBridge(void); -+void DeinitRGXFWDBGBridge(void); -+ -+/* -+ * Register all RGXFWDBG functions with services -+ */ -+PVRSRV_ERROR InitRGXFWDBGBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG, -+ PVRSRVBridgeRGXFWDebugSetFWLog, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST, -+ PVRSRVBridgeRGXFWDebugDumpFreelistPageList, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSUSPENDDEVICE, -+ PVRSRVBridgeRGXFWDebugSuspendDevice, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGRESUMEDEVICE, -+ PVRSRVBridgeRGXFWDebugResumeDevice, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC, -+ PVRSRVBridgeRGXFWDebugSetVzConnectionCooldownPeriodInSec, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE, -+ PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY, -+ PVRSRVBridgeRGXFWDebugSetDriverPriority, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICE, -+ PVRSRVBridgeRGXFWDebugSetDriverTimeSlice, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL, -+ PVRSRVBridgeRGXFWDebugSetDriverTimeSliceInterval, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP, -+ PVRSRVBridgeRGXFWDebugSetDriverIsolationGroup, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE, -+ PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP, -+ PVRSRVBridgeRGXFWDebugMapGuestHeap, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE, -+ PVRSRVBridgeRGXFWDebugPHRConfigure, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE, -+ PVRSRVBridgeRGXFWDebugWdgConfigure, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME, -+ PVRSRVBridgeRGXCurrentTime, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT, -+ PVRSRVBridgeRGXFWDebugInjectFault, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all rgxfwdbg functions with services -+ */ -+void DeinitRGXFWDBGBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSUSPENDDEVICE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGRESUMEDEVICE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETVZCONNECTIONCOOLDOWNPERIODINSEC); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERTIMESLICEINTERVAL); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, -+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_rgxhwperf_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxhwperf_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_rgxhwperf_bridge.c -@@ -0,0 +1,1016 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for rgxhwperf -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for rgxhwperf -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "rgxhwperf.h" -+#include "rgx_fwif_km.h" -+ -+#include "common_rgxhwperf_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static IMG_INT -+PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXCtrlHWPerfIN_UI8, -+ IMG_UINT8 * psRGXCtrlHWPerfOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN = -+ (PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfOUT_UI8, 0); -+ -+ psRGXCtrlHWPerfOUT->eError = -+ PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevNode(psConnection), -+ psRGXCtrlHWPerfIN->ui32StreamId, -+ psRGXCtrlHWPerfIN->bToggle, psRGXCtrlHWPerfIN->ui64Mask); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXGetHWPerfBvncFeatureFlagsIN_UI8, -+ IMG_UINT8 * psRGXGetHWPerfBvncFeatureFlagsOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *psRGXGetHWPerfBvncFeatureFlagsIN = -+ (PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *) -+ IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *psRGXGetHWPerfBvncFeatureFlagsOUT = -+ (PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *) -+ IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXGetHWPerfBvncFeatureFlagsIN); -+ -+ psRGXGetHWPerfBvncFeatureFlagsOUT->eError = -+ PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(psConnection, OSGetDevNode(psConnection), -+ &psRGXGetHWPerfBvncFeatureFlagsOUT->sBVNC); -+ -+ return 0; -+} -+ -+static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX <= IMG_UINT32_MAX, -+ "RGXFWIF_HWPERF_CTRL_BLKS_MAX must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXConfigMuxHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXConfigMuxHWPerfCountersIN_UI8, -+ IMG_UINT8 * psRGXConfigMuxHWPerfCountersOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS *psRGXConfigMuxHWPerfCountersIN = -+ (PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS *) -+ IMG_OFFSET_ADDR(psRGXConfigMuxHWPerfCountersIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS *psRGXConfigMuxHWPerfCountersOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS *) -+ IMG_OFFSET_ADDR(psRGXConfigMuxHWPerfCountersOUT_UI8, 0); -+ -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigsInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * -+ sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) + 0; -+ -+ if (unlikely(psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX)) -+ { -+ psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXConfigMuxHWPerfCounters_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXConfigMuxHWPerfCounters_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXConfigMuxHWPerfCountersIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = -+ (IMG_BYTE *) (void *)psRGXConfigMuxHWPerfCountersIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXConfigMuxHWPerfCountersOUT->eError = -+ PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXConfigMuxHWPerfCounters_exit; -+ } -+ } -+ } -+ -+ if (psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen != 0) -+ { -+ psBlockConfigsInt = -+ (RGX_HWPERF_CONFIG_MUX_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, -+ ui32NextOffset); -+ ui32NextOffset += -+ psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * -+ sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, psBlockConfigsInt, -+ (const void __user *)psRGXConfigMuxHWPerfCountersIN->psBlockConfigs, -+ psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * -+ sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) != PVRSRV_OK) -+ { -+ psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXConfigMuxHWPerfCounters_exit; -+ } -+ } -+ -+ psRGXConfigMuxHWPerfCountersOUT->eError = -+ PVRSRVRGXConfigMuxHWPerfCountersKM(psConnection, OSGetDevNode(psConnection), -+ psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen, -+ psBlockConfigsInt); -+ -+RGXConfigMuxHWPerfCounters_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXConfigMuxHWPerfCountersOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX <= IMG_UINT32_MAX, -+ "RGXFWIF_HWPERF_CTRL_BLKS_MAX must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXControlHWPerfBlocksIN_UI8, -+ IMG_UINT8 * psRGXControlHWPerfBlocksOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksIN = -+ (PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *) -+ IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *) -+ IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksOUT_UI8, 0); -+ -+ IMG_UINT16 *ui16BlockIDsInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) + 0; -+ -+ if (unlikely(psRGXControlHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX)) -+ { -+ psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXControlHWPerfBlocks_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXControlHWPerfBlocks_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXControlHWPerfBlocksIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXControlHWPerfBlocksIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXControlHWPerfBlocks_exit; -+ } -+ } -+ } -+ -+ if (psRGXControlHWPerfBlocksIN->ui32ArrayLen != 0) -+ { -+ ui16BlockIDsInt = (IMG_UINT16 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui16BlockIDsInt, -+ (const void __user *)psRGXControlHWPerfBlocksIN->pui16BlockIDs, -+ psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) != PVRSRV_OK) -+ { -+ psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXControlHWPerfBlocks_exit; -+ } -+ } -+ -+ psRGXControlHWPerfBlocksOUT->eError = -+ PVRSRVRGXControlHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), -+ psRGXControlHWPerfBlocksIN->bEnable, -+ psRGXControlHWPerfBlocksIN->ui32ArrayLen, -+ ui16BlockIDsInt); -+ -+RGXControlHWPerfBlocks_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXControlHWPerfBlocksOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static_assert(RGX_HWPERF_MAX_CUSTOM_CNTRS <= IMG_UINT32_MAX, -+ "RGX_HWPERF_MAX_CUSTOM_CNTRS must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXConfigCustomCountersIN_UI8, -+ IMG_UINT8 * psRGXConfigCustomCountersOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersIN = -+ (PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *) -+ IMG_OFFSET_ADDR(psRGXConfigCustomCountersIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *) -+ IMG_OFFSET_ADDR(psRGXConfigCustomCountersOUT_UI8, 0); -+ -+ IMG_UINT32 *ui32CustomCounterIDsInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) + -+ 0; -+ -+ if (unlikely -+ (psRGXConfigCustomCountersIN->ui16NumCustomCounters > RGX_HWPERF_MAX_CUSTOM_CNTRS)) -+ { -+ psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXConfigCustomCounters_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXConfigCustomCounters_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXConfigCustomCountersIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXConfigCustomCountersIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXConfigCustomCounters_exit; -+ } -+ } -+ } -+ -+ if (psRGXConfigCustomCountersIN->ui16NumCustomCounters != 0) -+ { -+ ui32CustomCounterIDsInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32CustomCounterIDsInt, -+ (const void __user *)psRGXConfigCustomCountersIN->pui32CustomCounterIDs, -+ psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) != -+ PVRSRV_OK) -+ { -+ psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXConfigCustomCounters_exit; -+ } -+ } -+ -+ psRGXConfigCustomCountersOUT->eError = -+ PVRSRVRGXConfigCustomCountersKM(psConnection, OSGetDevNode(psConnection), -+ psRGXConfigCustomCountersIN->ui16CustomBlockID, -+ psRGXConfigCustomCountersIN->ui16NumCustomCounters, -+ ui32CustomCounterIDsInt); -+ -+RGXConfigCustomCounters_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXConfigCustomCountersOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX <= IMG_UINT32_MAX, -+ "RGXFWIF_HWPERF_CTRL_BLKS_MAX must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXConfigureHWPerfBlocksIN_UI8, -+ IMG_UINT8 * psRGXConfigureHWPerfBlocksOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksIN = -+ (PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *) -+ IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *) -+ IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksOUT_UI8, 0); -+ -+ RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * -+ sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0; -+ -+ if (unlikely(psRGXConfigureHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX)) -+ { -+ psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXConfigureHWPerfBlocks_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXConfigureHWPerfBlocks_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXConfigureHWPerfBlocksIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXConfigureHWPerfBlocksIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXConfigureHWPerfBlocks_exit; -+ } -+ } -+ } -+ -+ if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen != 0) -+ { -+ psBlockConfigsInt = -+ (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, psBlockConfigsInt, -+ (const void __user *)psRGXConfigureHWPerfBlocksIN->psBlockConfigs, -+ psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * -+ sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK) -+ { -+ psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXConfigureHWPerfBlocks_exit; -+ } -+ } -+ -+ psRGXConfigureHWPerfBlocksOUT->eError = -+ PVRSRVRGXConfigureHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), -+ psRGXConfigureHWPerfBlocksIN->ui32CtrlWord, -+ psRGXConfigureHWPerfBlocksIN->ui32ArrayLen, -+ psBlockConfigsInt); -+ -+RGXConfigureHWPerfBlocks_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXConfigureHWPerfBlocksOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static_assert(1 <= IMG_UINT32_MAX, "1 must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXGetConfiguredHWPerfMuxCounters(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXGetConfiguredHWPerfMuxCountersIN_UI8, -+ IMG_UINT8 * -+ psRGXGetConfiguredHWPerfMuxCountersOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFMUXCOUNTERS *psRGXGetConfiguredHWPerfMuxCountersIN = -+ (PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFMUXCOUNTERS *) -+ IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfMuxCountersIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFMUXCOUNTERS *psRGXGetConfiguredHWPerfMuxCountersOUT -+ = -+ (PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFMUXCOUNTERS *) -+ IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfMuxCountersOUT_UI8, 0); -+ -+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCountersInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) 1 * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) + 0; -+ -+ psRGXGetConfiguredHWPerfMuxCountersOUT->psConfiguredMuxCounters = -+ psRGXGetConfiguredHWPerfMuxCountersIN->psConfiguredMuxCounters; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXGetConfiguredHWPerfMuxCountersOUT->eError = -+ PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXGetConfiguredHWPerfMuxCounters_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXGetConfiguredHWPerfMuxCountersIN), -+ sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = -+ (IMG_BYTE *) (void *)psRGXGetConfiguredHWPerfMuxCountersIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXGetConfiguredHWPerfMuxCountersOUT->eError = -+ PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXGetConfiguredHWPerfMuxCounters_exit; -+ } -+ } -+ } -+ -+ if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) -+ { -+ psConfiguredMuxCountersInt = -+ (RGX_HWPERF_CONFIG_MUX_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, -+ ui32NextOffset); -+ ui32NextOffset += 1 * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK); -+ } -+ -+ psRGXGetConfiguredHWPerfMuxCountersOUT->eError = -+ PVRSRVRGXGetConfiguredHWPerfMuxCountersKM(psConnection, OSGetDevNode(psConnection), -+ psRGXGetConfiguredHWPerfMuxCountersIN-> -+ ui32BlockID, psConfiguredMuxCountersInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXGetConfiguredHWPerfMuxCountersOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXGetConfiguredHWPerfMuxCounters_exit; -+ } -+ -+ /* If dest ptr is non-null and we have data to copy */ -+ if ((psConfiguredMuxCountersInt) && ((1 * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) > 0)) -+ { -+ if (unlikely -+ (OSCopyToUser -+ (NULL, -+ (void __user *)psRGXGetConfiguredHWPerfMuxCountersOUT-> -+ psConfiguredMuxCounters, psConfiguredMuxCountersInt, -+ (1 * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK))) != PVRSRV_OK)) -+ { -+ psRGXGetConfiguredHWPerfMuxCountersOUT->eError = -+ PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXGetConfiguredHWPerfMuxCounters_exit; -+ } -+ } -+ -+RGXGetConfiguredHWPerfMuxCounters_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXGetConfiguredHWPerfMuxCountersOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static_assert(1 <= IMG_UINT32_MAX, "1 must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXGetConfiguredHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXGetConfiguredHWPerfCountersIN_UI8, -+ IMG_UINT8 * psRGXGetConfiguredHWPerfCountersOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersIN = -+ (PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *) -+ IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersOUT = -+ (PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *) -+ IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersOUT_UI8, 0); -+ -+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCountersInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0; -+ -+ psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters = -+ psRGXGetConfiguredHWPerfCountersIN->psConfiguredCounters; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXGetConfiguredHWPerfCounters_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXGetConfiguredHWPerfCountersIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = -+ (IMG_BYTE *) (void *)psRGXGetConfiguredHWPerfCountersIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXGetConfiguredHWPerfCountersOUT->eError = -+ PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXGetConfiguredHWPerfCounters_exit; -+ } -+ } -+ } -+ -+ if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) -+ { -+ psConfiguredCountersInt = -+ (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK); -+ } -+ -+ psRGXGetConfiguredHWPerfCountersOUT->eError = -+ PVRSRVRGXGetConfiguredHWPerfCountersKM(psConnection, OSGetDevNode(psConnection), -+ psRGXGetConfiguredHWPerfCountersIN->ui32BlockID, -+ psConfiguredCountersInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXGetConfiguredHWPerfCountersOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXGetConfiguredHWPerfCounters_exit; -+ } -+ -+ /* If dest ptr is non-null and we have data to copy */ -+ if ((psConfiguredCountersInt) && ((1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) > 0)) -+ { -+ if (unlikely -+ (OSCopyToUser -+ (NULL, -+ (void __user *)psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters, -+ psConfiguredCountersInt, -+ (1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK))) != PVRSRV_OK)) -+ { -+ psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXGetConfiguredHWPerfCounters_exit; -+ } -+ } -+ -+RGXGetConfiguredHWPerfCounters_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXGetConfiguredHWPerfCountersOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXGetEnabledHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXGetEnabledHWPerfBlocksIN_UI8, -+ IMG_UINT8 * psRGXGetEnabledHWPerfBlocksOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksIN = -+ (PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *) -+ IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksOUT = -+ (PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *) -+ IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksOUT_UI8, 0); -+ -+ IMG_UINT32 *pui32EnabledBlockIDsInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) + 0; -+ -+ psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs = -+ psRGXGetEnabledHWPerfBlocksIN->pui32EnabledBlockIDs; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXGetEnabledHWPerfBlocks_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXGetEnabledHWPerfBlocksIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXGetEnabledHWPerfBlocksIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXGetEnabledHWPerfBlocks_exit; -+ } -+ } -+ } -+ -+ if (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen != 0) -+ { -+ pui32EnabledBlockIDsInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32); -+ } -+ -+ psRGXGetEnabledHWPerfBlocksOUT->eError = -+ PVRSRVRGXGetEnabledHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), -+ psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen, -+ &psRGXGetEnabledHWPerfBlocksOUT->ui32BlockCount, -+ pui32EnabledBlockIDsInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXGetEnabledHWPerfBlocksOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXGetEnabledHWPerfBlocks_exit; -+ } -+ -+ /* If dest ptr is non-null and we have data to copy */ -+ if ((pui32EnabledBlockIDsInt) && -+ ((psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) > 0)) -+ { -+ if (unlikely -+ (OSCopyToUser -+ (NULL, (void __user *)psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs, -+ pui32EnabledBlockIDsInt, -+ (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32))) != -+ PVRSRV_OK)) -+ { -+ psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXGetEnabledHWPerfBlocks_exit; -+ } -+ } -+ -+RGXGetEnabledHWPerfBlocks_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXGetEnabledHWPerfBlocksOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+#if defined(PVRSRV_FORCE_HWPERF_TO_SCHED_CLK) -+ -+static IMG_INT -+PVRSRVBridgeRGXGetHWPerfTimeStamp(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXGetHWPerfTimeStampIN_UI8, -+ IMG_UINT8 * psRGXGetHWPerfTimeStampOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXGETHWPERFTIMESTAMP *psRGXGetHWPerfTimeStampIN = -+ (PVRSRV_BRIDGE_IN_RGXGETHWPERFTIMESTAMP *) -+ IMG_OFFSET_ADDR(psRGXGetHWPerfTimeStampIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP *psRGXGetHWPerfTimeStampOUT = -+ (PVRSRV_BRIDGE_OUT_RGXGETHWPERFTIMESTAMP *) -+ IMG_OFFSET_ADDR(psRGXGetHWPerfTimeStampOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXGetHWPerfTimeStampIN); -+ -+ psRGXGetHWPerfTimeStampOUT->eError = -+ PVRSRVRGXGetHWPerfTimeStampKM(psConnection, OSGetDevNode(psConnection), -+ &psRGXGetHWPerfTimeStampOUT->ui64TimeStamp); -+ -+ return 0; -+} -+ -+#else -+#define PVRSRVBridgeRGXGetHWPerfTimeStamp NULL -+#endif -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitRGXHWPERFBridge(void); -+void DeinitRGXHWPERFBridge(void); -+ -+/* -+ * Register all RGXHWPERF functions with services -+ */ -+PVRSRV_ERROR InitRGXHWPERFBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF, -+ PVRSRVBridgeRGXCtrlHWPerf, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS, -+ PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS, -+ PVRSRVBridgeRGXConfigMuxHWPerfCounters, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS, -+ PVRSRVBridgeRGXControlHWPerfBlocks, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS, -+ PVRSRVBridgeRGXConfigCustomCounters, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS, -+ PVRSRVBridgeRGXConfigureHWPerfBlocks, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFMUXCOUNTERS, -+ PVRSRVBridgeRGXGetConfiguredHWPerfMuxCounters, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS, -+ PVRSRVBridgeRGXGetConfiguredHWPerfCounters, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS, -+ PVRSRVBridgeRGXGetEnabledHWPerfBlocks, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFTIMESTAMP, -+ PVRSRVBridgeRGXGetHWPerfTimeStamp, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all rgxhwperf functions with services -+ */ -+void DeinitRGXHWPERFBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFMUXCOUNTERS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, -+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFTIMESTAMP); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_rgxkicksync_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxkicksync_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_rgxkicksync_bridge.c -@@ -0,0 +1,586 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for rgxkicksync -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for rgxkicksync -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "rgxkicksync.h" -+ -+#include "common_rgxkicksync_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static PVRSRV_ERROR _RGXCreateKickSyncContextpsKickSyncContextIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVRGXDestroyKickSyncContextKM((RGX_SERVER_KICKSYNC_CONTEXT *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXCreateKickSyncContext(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXCreateKickSyncContextIN_UI8, -+ IMG_UINT8 * psRGXCreateKickSyncContextOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextIN = -+ (PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXCreateKickSyncContextIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXCreateKickSyncContextOUT_UI8, 0); -+ -+ IMG_HANDLE hPrivData = psRGXCreateKickSyncContextIN->hPrivData; -+ IMG_HANDLE hPrivDataInt = NULL; -+ RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXCreateKickSyncContextOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hPrivDataInt, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); -+ if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateKickSyncContext_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateKickSyncContextOUT->eError = -+ PVRSRVRGXCreateKickSyncContextKM(psConnection, OSGetDevNode(psConnection), -+ hPrivDataInt, -+ psRGXCreateKickSyncContextIN->ui32PackedCCBSizeU88, -+ psRGXCreateKickSyncContextIN->ui32ContextFlags, -+ &psKickSyncContextInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXCreateKickSyncContext_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateKickSyncContextOUT->eError = -+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRGXCreateKickSyncContextOUT->hKickSyncContext, -+ (void *)psKickSyncContextInt, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXCreateKickSyncContextpsKickSyncContextIntRelease); -+ if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateKickSyncContext_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXCreateKickSyncContext_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hPrivDataInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK) -+ { -+ if (psKickSyncContextInt) -+ { -+ PVRSRVRGXDestroyKickSyncContextKM(psKickSyncContextInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXDestroyKickSyncContext(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXDestroyKickSyncContextIN_UI8, -+ IMG_UINT8 * psRGXDestroyKickSyncContextOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextIN = -+ (PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextOUT = -+ (PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXDestroyKickSyncContextOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psRGXDestroyKickSyncContextIN-> -+ hKickSyncContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); -+ if (unlikely -+ ((psRGXDestroyKickSyncContextOUT->eError != PVRSRV_OK) -+ && (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) -+ && (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psRGXDestroyKickSyncContextOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXDestroyKickSyncContext_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXDestroyKickSyncContext_exit: -+ -+ return 0; -+} -+ -+static_assert(PVRSRV_MAX_DEV_VARS <= IMG_UINT32_MAX, -+ "PVRSRV_MAX_DEV_VARS must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, -+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXKickSync2IN_UI8, -+ IMG_UINT8 * psRGXKickSync2OUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *psRGXKickSync2IN = -+ (PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *) IMG_OFFSET_ADDR(psRGXKickSync2IN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *psRGXKickSync2OUT = -+ (PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *) IMG_OFFSET_ADDR(psRGXKickSync2OUT_UI8, 0); -+ -+ IMG_HANDLE hKickSyncContext = psRGXKickSync2IN->hKickSyncContext; -+ RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; -+ SYNC_PRIMITIVE_BLOCK **psUpdateUFODevVarBlockInt = NULL; -+ IMG_HANDLE *hUpdateUFODevVarBlockInt2 = NULL; -+ IMG_UINT32 *ui32UpdateDevVarOffsetInt = NULL; -+ IMG_UINT32 *ui32UpdateValueInt = NULL; -+ IMG_CHAR *uiUpdateFenceNameInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)) + -+ ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + -+ ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + 0; -+ -+ if (unlikely(psRGXKickSync2IN->ui32ClientUpdateCount > PVRSRV_MAX_DEV_VARS)) -+ { -+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXKickSync2_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXKickSync2_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXKickSync2IN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickSync2IN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXKickSync2_exit; -+ } -+ } -+ } -+ -+ if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) -+ { -+ psUpdateUFODevVarBlockInt = -+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psUpdateUFODevVarBlockInt, 0, -+ psRGXKickSync2IN->ui32ClientUpdateCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)); -+ ui32NextOffset += -+ psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); -+ hUpdateUFODevVarBlockInt2 = -+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, hUpdateUFODevVarBlockInt2, -+ (const void __user *)psRGXKickSync2IN->phUpdateUFODevVarBlock, -+ psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) -+ { -+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickSync2_exit; -+ } -+ } -+ if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) -+ { -+ ui32UpdateDevVarOffsetInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32UpdateDevVarOffsetInt, -+ (const void __user *)psRGXKickSync2IN->pui32UpdateDevVarOffset, -+ psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickSync2_exit; -+ } -+ } -+ if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) -+ { -+ ui32UpdateValueInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32UpdateValueInt, -+ (const void __user *)psRGXKickSync2IN->pui32UpdateValue, -+ psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickSync2_exit; -+ } -+ } -+ -+ { -+ uiUpdateFenceNameInt = -+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiUpdateFenceNameInt, -+ (const void __user *)psRGXKickSync2IN->puiUpdateFenceName, -+ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickSync2_exit; -+ } -+ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - -+ 1] = '\0'; -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXKickSync2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psKickSyncContextInt, -+ hKickSyncContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickSync2_exit; -+ } -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++) -+ { -+ /* Look up the address from the handle */ -+ psRGXKickSync2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psUpdateUFODevVarBlockInt[i], -+ hUpdateUFODevVarBlockInt2[i], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, -+ IMG_TRUE); -+ if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickSync2_exit; -+ } -+ } -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXKickSync2OUT->eError = -+ PVRSRVRGXKickSyncKM(psKickSyncContextInt, -+ psRGXKickSync2IN->ui32ClientUpdateCount, -+ psUpdateUFODevVarBlockInt, -+ ui32UpdateDevVarOffsetInt, -+ ui32UpdateValueInt, -+ psRGXKickSync2IN->hCheckFenceFD, -+ psRGXKickSync2IN->hTimelineFenceFD, -+ &psRGXKickSync2OUT->hUpdateFenceFD, -+ uiUpdateFenceNameInt, psRGXKickSync2IN->ui32ExtJobRef); -+ -+RGXKickSync2_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psKickSyncContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hKickSyncContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); -+ } -+ -+ if (hUpdateUFODevVarBlockInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psUpdateUFODevVarBlockInt && psUpdateUFODevVarBlockInt[i]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hUpdateUFODevVarBlockInt2[i], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ } -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXKickSync2OUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXSetKickSyncContextProperty(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXSetKickSyncContextPropertyIN_UI8, -+ IMG_UINT8 * psRGXSetKickSyncContextPropertyOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY *psRGXSetKickSyncContextPropertyIN = -+ (PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY *) -+ IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY *psRGXSetKickSyncContextPropertyOUT = -+ (PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY *) -+ IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyOUT_UI8, 0); -+ -+ IMG_HANDLE hKickSyncContext = psRGXSetKickSyncContextPropertyIN->hKickSyncContext; -+ RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXSetKickSyncContextPropertyOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psKickSyncContextInt, -+ hKickSyncContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXSetKickSyncContextPropertyOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXSetKickSyncContextProperty_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXSetKickSyncContextPropertyOUT->eError = -+ PVRSRVRGXSetKickSyncContextPropertyKM(psKickSyncContextInt, -+ psRGXSetKickSyncContextPropertyIN->ui32Property, -+ psRGXSetKickSyncContextPropertyIN->ui64Input, -+ &psRGXSetKickSyncContextPropertyOUT->ui64Output); -+ -+RGXSetKickSyncContextProperty_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psKickSyncContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hKickSyncContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+#endif /* SUPPORT_RGXKICKSYNC_BRIDGE */ -+ -+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE) -+PVRSRV_ERROR InitRGXKICKSYNCBridge(void); -+void DeinitRGXKICKSYNCBridge(void); -+ -+/* -+ * Register all RGXKICKSYNC functions with services -+ */ -+PVRSRV_ERROR InitRGXKICKSYNCBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, -+ PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT, -+ PVRSRVBridgeRGXCreateKickSyncContext, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, -+ PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT, -+ PVRSRVBridgeRGXDestroyKickSyncContext, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2, -+ PVRSRVBridgeRGXKickSync2, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, -+ PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY, -+ PVRSRVBridgeRGXSetKickSyncContextProperty, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all rgxkicksync functions with services -+ */ -+void DeinitRGXKICKSYNCBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, -+ PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, -+ PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, -+ PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY); -+ -+} -+#else /* SUPPORT_RGXKICKSYNC_BRIDGE */ -+/* This bridge is conditional on SUPPORT_RGXKICKSYNC_BRIDGE - when not defined, -+ * do not populate the dispatch table with its functions -+ */ -+#define InitRGXKICKSYNCBridge() \ -+ PVRSRV_OK -+ -+#define DeinitRGXKICKSYNCBridge() -+ -+#endif /* SUPPORT_RGXKICKSYNC_BRIDGE */ -diff --git a/drivers/gpu/drm/img-rogue/server_rgxregconfig_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxregconfig_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_rgxregconfig_bridge.c -@@ -0,0 +1,239 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for rgxregconfig -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for rgxregconfig -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "rgxregconfig.h" -+ -+#include "common_rgxregconfig_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static IMG_INT -+PVRSRVBridgeRGXSetRegConfigType(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXSetRegConfigTypeIN_UI8, -+ IMG_UINT8 * psRGXSetRegConfigTypeOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeIN = -+ (PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *) IMG_OFFSET_ADDR(psRGXSetRegConfigTypeIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeOUT = -+ (PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *) IMG_OFFSET_ADDR(psRGXSetRegConfigTypeOUT_UI8, -+ 0); -+ -+ psRGXSetRegConfigTypeOUT->eError = -+ PVRSRVRGXSetRegConfigTypeKM(psConnection, OSGetDevNode(psConnection), -+ psRGXSetRegConfigTypeIN->ui8RegPowerIsland); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXAddRegconfig(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXAddRegconfigIN_UI8, -+ IMG_UINT8 * psRGXAddRegconfigOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *psRGXAddRegconfigIN = -+ (PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *) IMG_OFFSET_ADDR(psRGXAddRegconfigIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *psRGXAddRegconfigOUT = -+ (PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *) IMG_OFFSET_ADDR(psRGXAddRegconfigOUT_UI8, 0); -+ -+ psRGXAddRegconfigOUT->eError = -+ PVRSRVRGXAddRegConfigKM(psConnection, OSGetDevNode(psConnection), -+ psRGXAddRegconfigIN->ui32RegAddr, -+ psRGXAddRegconfigIN->ui64RegValue, -+ psRGXAddRegconfigIN->ui64RegMask); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXClearRegConfig(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXClearRegConfigIN_UI8, -+ IMG_UINT8 * psRGXClearRegConfigOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *psRGXClearRegConfigIN = -+ (PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *) IMG_OFFSET_ADDR(psRGXClearRegConfigIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *psRGXClearRegConfigOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *) IMG_OFFSET_ADDR(psRGXClearRegConfigOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXClearRegConfigIN); -+ -+ psRGXClearRegConfigOUT->eError = -+ PVRSRVRGXClearRegConfigKM(psConnection, OSGetDevNode(psConnection)); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXEnableRegConfig(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXEnableRegConfigIN_UI8, -+ IMG_UINT8 * psRGXEnableRegConfigOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *psRGXEnableRegConfigIN = -+ (PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXEnableRegConfigIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *psRGXEnableRegConfigOUT = -+ (PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXEnableRegConfigOUT_UI8, -+ 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXEnableRegConfigIN); -+ -+ psRGXEnableRegConfigOUT->eError = -+ PVRSRVRGXEnableRegConfigKM(psConnection, OSGetDevNode(psConnection)); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXDisableRegConfig(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXDisableRegConfigIN_UI8, -+ IMG_UINT8 * psRGXDisableRegConfigOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *psRGXDisableRegConfigIN = -+ (PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXDisableRegConfigIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *psRGXDisableRegConfigOUT = -+ (PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXDisableRegConfigOUT_UI8, -+ 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXDisableRegConfigIN); -+ -+ psRGXDisableRegConfigOUT->eError = -+ PVRSRVRGXDisableRegConfigKM(psConnection, OSGetDevNode(psConnection)); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */ -+ -+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) -+PVRSRV_ERROR InitRGXREGCONFIGBridge(void); -+void DeinitRGXREGCONFIGBridge(void); -+ -+/* -+ * Register all RGXREGCONFIG functions with services -+ */ -+PVRSRV_ERROR InitRGXREGCONFIGBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, -+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE, -+ PVRSRVBridgeRGXSetRegConfigType, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, -+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG, -+ PVRSRVBridgeRGXAddRegconfig, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, -+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG, -+ PVRSRVBridgeRGXClearRegConfig, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, -+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG, -+ PVRSRVBridgeRGXEnableRegConfig, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, -+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG, -+ PVRSRVBridgeRGXDisableRegConfig, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all rgxregconfig functions with services -+ */ -+void DeinitRGXREGCONFIGBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, -+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, -+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, -+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, -+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, -+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG); -+ -+} -+#else /* EXCLUDE_RGXREGCONFIG_BRIDGE */ -+/* This bridge is conditional on EXCLUDE_RGXREGCONFIG_BRIDGE - when defined, -+ * do not populate the dispatch table with its functions -+ */ -+#define InitRGXREGCONFIGBridge() \ -+ PVRSRV_OK -+ -+#define DeinitRGXREGCONFIGBridge() -+ -+#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */ -diff --git a/drivers/gpu/drm/img-rogue/server_rgxta3d_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxta3d_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_rgxta3d_bridge.c -@@ -0,0 +1,2447 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for rgxta3d -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for rgxta3d -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "rgxta3d.h" -+ -+#include "common_rgxta3d_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); -+ return eError; -+} -+ -+static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, -+ "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, -+ "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXMKIF_NUM_RTDATA_FREELISTS <= IMG_UINT32_MAX, -+ "RGXMKIF_NUM_RTDATA_FREELISTS must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, -+ "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, -+ "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, -+ "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, -+ "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, -+ "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXCreateHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXCreateHWRTDataSetIN_UI8, -+ IMG_UINT8 * psRGXCreateHWRTDataSetOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetIN = -+ (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *) IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *) -+ IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetOUT_UI8, 0); -+ -+ IMG_DEV_VIRTADDR *sVHeapTableDevVAddrInt = NULL; -+ IMG_DEV_VIRTADDR *sPMMlistDevVAddrInt = NULL; -+ RGX_FREELIST **psapsFreeListsInt = NULL; -+ IMG_HANDLE *hapsFreeListsInt2 = NULL; -+ IMG_DEV_VIRTADDR *sTailPtrsDevVAddrInt = NULL; -+ IMG_DEV_VIRTADDR *sMacrotileArrayDevVAddrInt = NULL; -+ IMG_DEV_VIRTADDR *sRgnHeaderDevVAddrInt = NULL; -+ IMG_DEV_VIRTADDR *sRTCDevVAddrInt = NULL; -+ RGX_KM_HW_RT_DATASET **psKmHwRTDataSetInt = NULL; -+ IMG_HANDLE *hKmHwRTDataSetInt2 = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + -+ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + -+ ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)) + -+ ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) + -+ ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + -+ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + -+ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + -+ ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + -+ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) + -+ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE)) + 0; -+ -+ psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet = psRGXCreateHWRTDataSetIN->phKmHwRTDataSet; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSetIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateHWRTDataSetIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ } -+ } -+ -+ { -+ sVHeapTableDevVAddrInt = -+ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); -+ } -+ -+ /* Copy the data over */ -+ if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, sVHeapTableDevVAddrInt, -+ (const void __user *)psRGXCreateHWRTDataSetIN->psVHeapTableDevVAddr, -+ RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) -+ { -+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ } -+ -+ { -+ sPMMlistDevVAddrInt = -+ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); -+ } -+ -+ /* Copy the data over */ -+ if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, sPMMlistDevVAddrInt, -+ (const void __user *)psRGXCreateHWRTDataSetIN->psPMMlistDevVAddr, -+ RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) -+ { -+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ } -+ -+ { -+ psapsFreeListsInt = -+ (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psapsFreeListsInt, 0, -+ RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)); -+ ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *); -+ hapsFreeListsInt2 = -+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE); -+ } -+ -+ /* Copy the data over */ -+ if (RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, hapsFreeListsInt2, -+ (const void __user *)psRGXCreateHWRTDataSetIN->phapsFreeLists, -+ RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK) -+ { -+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ } -+ -+ { -+ sTailPtrsDevVAddrInt = -+ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); -+ } -+ -+ /* Copy the data over */ -+ if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, sTailPtrsDevVAddrInt, -+ (const void __user *)psRGXCreateHWRTDataSetIN->psTailPtrsDevVAddr, -+ RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) -+ { -+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ } -+ -+ { -+ sMacrotileArrayDevVAddrInt = -+ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); -+ } -+ -+ /* Copy the data over */ -+ if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, sMacrotileArrayDevVAddrInt, -+ (const void __user *)psRGXCreateHWRTDataSetIN->psMacrotileArrayDevVAddr, -+ RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) -+ { -+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ } -+ -+ { -+ sRgnHeaderDevVAddrInt = -+ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); -+ } -+ -+ /* Copy the data over */ -+ if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, sRgnHeaderDevVAddrInt, -+ (const void __user *)psRGXCreateHWRTDataSetIN->psRgnHeaderDevVAddr, -+ RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) -+ { -+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ } -+ -+ { -+ sRTCDevVAddrInt = -+ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); -+ } -+ -+ /* Copy the data over */ -+ if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, sRTCDevVAddrInt, -+ (const void __user *)psRGXCreateHWRTDataSetIN->psRTCDevVAddr, -+ RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) -+ { -+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ } -+ if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) -+ { -+ psKmHwRTDataSetInt = -+ (RGX_KM_HW_RT_DATASET **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psKmHwRTDataSetInt, 0, -+ RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)); -+ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *); -+ hKmHwRTDataSetInt2 = -+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE); -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) -+ { -+ /* Look up the address from the handle */ -+ psRGXCreateHWRTDataSetOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psapsFreeListsInt[i], -+ hapsFreeListsInt2[i], -+ PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); -+ if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ } -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateHWRTDataSetOUT->eError = -+ RGXCreateHWRTDataSet(psConnection, OSGetDevNode(psConnection), -+ sVHeapTableDevVAddrInt, -+ sPMMlistDevVAddrInt, -+ psapsFreeListsInt, -+ psRGXCreateHWRTDataSetIN->ui32PPPScreen, -+ psRGXCreateHWRTDataSetIN->ui64MultiSampleCtl, -+ psRGXCreateHWRTDataSetIN->ui64FlippedMultiSampleCtl, -+ psRGXCreateHWRTDataSetIN->ui32TPCStride, -+ sTailPtrsDevVAddrInt, -+ psRGXCreateHWRTDataSetIN->ui32TPCSize, -+ psRGXCreateHWRTDataSetIN->ui32TEScreen, -+ psRGXCreateHWRTDataSetIN->ui32TEAA, -+ psRGXCreateHWRTDataSetIN->ui32TEMTILE1, -+ psRGXCreateHWRTDataSetIN->ui32TEMTILE2, -+ psRGXCreateHWRTDataSetIN->ui32MTileStride, -+ psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerX, -+ psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerY, -+ psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperX, -+ psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperY, -+ psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleX, -+ psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleY, -+ sMacrotileArrayDevVAddrInt, -+ sRgnHeaderDevVAddrInt, -+ sRTCDevVAddrInt, -+ psRGXCreateHWRTDataSetIN->ui32RgnHeaderSize, -+ psRGXCreateHWRTDataSetIN->ui32ISPMtileSize, -+ psRGXCreateHWRTDataSetIN->ui16MaxRTs, psKmHwRTDataSetInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ if (hKmHwRTDataSetInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) -+ { -+ -+ psRGXCreateHWRTDataSetOUT->eError = -+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &hKmHwRTDataSetInt2[i], -+ (void *)psKmHwRTDataSetInt[i], -+ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease); -+ if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ -+ } -+ } -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* If dest ptr is non-null and we have data to copy */ -+ if ((hKmHwRTDataSetInt2) && ((RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) > 0)) -+ { -+ if (unlikely -+ (OSCopyToUser -+ (NULL, (void __user *)psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet, -+ hKmHwRTDataSetInt2, -+ (RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *))) != PVRSRV_OK)) -+ { -+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateHWRTDataSet_exit; -+ } -+ } -+ -+RGXCreateHWRTDataSet_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ if (hapsFreeListsInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psapsFreeListsInt && psapsFreeListsInt[i]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hapsFreeListsInt2[i], -+ PVRSRV_HANDLE_TYPE_RGX_FREELIST); -+ } -+ } -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK) -+ { -+ { -+ IMG_UINT32 i; -+ -+ if (hKmHwRTDataSetInt2) -+ { -+ for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) -+ { -+ if (hKmHwRTDataSetInt2[i]) -+ { -+ RGXDestroyHWRTDataSet(hKmHwRTDataSetInt2[i]); -+ } -+ } -+ } -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXCreateHWRTDataSetOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXDestroyHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXDestroyHWRTDataSetIN_UI8, -+ IMG_UINT8 * psRGXDestroyHWRTDataSetOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetIN = -+ (PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *) -+ IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetOUT = -+ (PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *) -+ IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXDestroyHWRTDataSetOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psRGXDestroyHWRTDataSetIN-> -+ hKmHwRTDataSet, -+ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); -+ if (unlikely -+ ((psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_OK) -+ && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) -+ && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psRGXDestroyHWRTDataSetOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXDestroyHWRTDataSet_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXDestroyHWRTDataSet_exit: -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _RGXCreateZSBufferpssZSBufferKMIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = RGXDestroyZSBufferKM((RGX_ZSBUFFER_DATA *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXCreateZSBufferIN_UI8, -+ IMG_UINT8 * psRGXCreateZSBufferOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN = -+ (PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferOUT_UI8, 0); -+ -+ IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation; -+ DEVMEMINT_RESERVATION *psReservationInt = NULL; -+ IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR; -+ PMR *psPMRInt = NULL; -+ RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXCreateZSBufferOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psReservationInt, -+ hReservation, -+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); -+ if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateZSBuffer_exit; -+ } -+ -+ /* Look up the address from the handle */ -+ psRGXCreateZSBufferOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRInt, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateZSBuffer_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateZSBufferOUT->eError = -+ RGXCreateZSBufferKM(psConnection, OSGetDevNode(psConnection), -+ psReservationInt, -+ psPMRInt, psRGXCreateZSBufferIN->uiMapFlags, &pssZSBufferKMInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXCreateZSBuffer_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRGXCreateZSBufferOUT-> -+ hsZSBufferKM, -+ (void *)pssZSBufferKMInt, -+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXCreateZSBufferpssZSBufferKMIntRelease); -+ if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateZSBuffer_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXCreateZSBuffer_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psReservationInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); -+ } -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK) -+ { -+ if (pssZSBufferKMInt) -+ { -+ RGXDestroyZSBufferKM(pssZSBufferKMInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXDestroyZSBufferIN_UI8, -+ IMG_UINT8 * psRGXDestroyZSBufferOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN = -+ (PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT = -+ (PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferOUT_UI8, -+ 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXDestroyZSBufferOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psRGXDestroyZSBufferIN-> -+ hsZSBufferMemDesc, -+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); -+ if (unlikely -+ ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) -+ && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) -+ && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psRGXDestroyZSBufferOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXDestroyZSBuffer_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXDestroyZSBuffer_exit: -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _RGXPopulateZSBufferpssPopulationIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = RGXUnpopulateZSBufferKM((RGX_POPULATION *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXPopulateZSBufferIN_UI8, -+ IMG_UINT8 * psRGXPopulateZSBufferOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN = -+ (PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT = -+ (PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferOUT_UI8, -+ 0); -+ -+ IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM; -+ RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; -+ RGX_POPULATION *pssPopulationInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXPopulateZSBufferOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&pssZSBufferKMInt, -+ hsZSBufferKM, -+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); -+ if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXPopulateZSBuffer_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXPopulateZSBufferOUT->eError = -+ RGXPopulateZSBufferKM(pssZSBufferKMInt, &pssPopulationInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXPopulateZSBuffer_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRGXPopulateZSBufferOUT-> -+ hsPopulation, -+ (void *)pssPopulationInt, -+ PVRSRV_HANDLE_TYPE_RGX_POPULATION, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXPopulateZSBufferpssPopulationIntRelease); -+ if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXPopulateZSBuffer_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXPopulateZSBuffer_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (pssZSBufferKMInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hsZSBufferKM, PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK) -+ { -+ if (pssPopulationInt) -+ { -+ RGXUnpopulateZSBufferKM(pssPopulationInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXUnpopulateZSBufferIN_UI8, -+ IMG_UINT8 * psRGXUnpopulateZSBufferOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN = -+ (PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *) -+ IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT = -+ (PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *) -+ IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXUnpopulateZSBufferOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation, -+ PVRSRV_HANDLE_TYPE_RGX_POPULATION); -+ if (unlikely((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) && -+ (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psRGXUnpopulateZSBufferOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXUnpopulateZSBuffer_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXUnpopulateZSBuffer_exit: -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _RGXCreateFreeListpsCleanupCookieIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = RGXDestroyFreeList((RGX_FREELIST *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXCreateFreeListIN_UI8, -+ IMG_UINT8 * psRGXCreateFreeListOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN = -+ (PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListOUT_UI8, 0); -+ -+ IMG_HANDLE hMemCtxPrivData = psRGXCreateFreeListIN->hMemCtxPrivData; -+ IMG_HANDLE hMemCtxPrivDataInt = NULL; -+ IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList; -+ RGX_FREELIST *pssGlobalFreeListInt = NULL; -+ IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR; -+ PMR *pssFreeListPMRInt = NULL; -+ RGX_FREELIST *psCleanupCookieInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXCreateFreeListOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hMemCtxPrivDataInt, -+ hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); -+ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateFreeList_exit; -+ } -+ -+ if (psRGXCreateFreeListIN->hsGlobalFreeList) -+ { -+ /* Look up the address from the handle */ -+ psRGXCreateFreeListOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&pssGlobalFreeListInt, -+ hsGlobalFreeList, -+ PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); -+ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateFreeList_exit; -+ } -+ } -+ -+ /* Look up the address from the handle */ -+ psRGXCreateFreeListOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&pssFreeListPMRInt, -+ hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateFreeList_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateFreeListOUT->eError = -+ RGXCreateFreeList(psConnection, OSGetDevNode(psConnection), -+ hMemCtxPrivDataInt, -+ psRGXCreateFreeListIN->ui32MaxFLPages, -+ psRGXCreateFreeListIN->ui32InitFLPages, -+ psRGXCreateFreeListIN->ui32GrowFLPages, -+ psRGXCreateFreeListIN->ui32GrowParamThreshold, -+ pssGlobalFreeListInt, -+ psRGXCreateFreeListIN->bbFreeListCheck, -+ psRGXCreateFreeListIN->spsFreeListDevVAddr, -+ pssFreeListPMRInt, -+ psRGXCreateFreeListIN->uiPMROffset, &psCleanupCookieInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXCreateFreeList_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRGXCreateFreeListOUT-> -+ hCleanupCookie, -+ (void *)psCleanupCookieInt, -+ PVRSRV_HANDLE_TYPE_RGX_FREELIST, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXCreateFreeListpsCleanupCookieIntRelease); -+ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateFreeList_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXCreateFreeList_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hMemCtxPrivDataInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); -+ } -+ -+ if (psRGXCreateFreeListIN->hsGlobalFreeList) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (pssGlobalFreeListInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hsGlobalFreeList, -+ PVRSRV_HANDLE_TYPE_RGX_FREELIST); -+ } -+ } -+ -+ /* Unreference the previously looked up handle */ -+ if (pssFreeListPMRInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psRGXCreateFreeListOUT->eError != PVRSRV_OK) -+ { -+ if (psCleanupCookieInt) -+ { -+ RGXDestroyFreeList(psCleanupCookieInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXDestroyFreeListIN_UI8, -+ IMG_UINT8 * psRGXDestroyFreeListOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN = -+ (PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT = -+ (PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListOUT_UI8, -+ 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXDestroyFreeListOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie, -+ PVRSRV_HANDLE_TYPE_RGX_FREELIST); -+ if (unlikely((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) && -+ (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psRGXDestroyFreeListOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXDestroyFreeList_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXDestroyFreeList_exit: -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _RGXCreateRenderContextpsRenderContextIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVRGXDestroyRenderContextKM((RGX_SERVER_RENDER_CONTEXT *) pvData); -+ return eError; -+} -+ -+static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXFWIF_STATIC_RENDERCONTEXT_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_STATIC_RENDERCONTEXT_SIZE must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXCreateRenderContextIN_UI8, -+ IMG_UINT8 * psRGXCreateRenderContextOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN = -+ (PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXCreateRenderContextIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXCreateRenderContextOUT_UI8, 0); -+ -+ IMG_BYTE *ui8FrameworkCmdInt = NULL; -+ IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData; -+ IMG_HANDLE hPrivDataInt = NULL; -+ IMG_BYTE *ui8StaticRenderContextStateInt = NULL; -+ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) + -+ ((IMG_UINT64) psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * -+ sizeof(IMG_BYTE)) + 0; -+ -+ if (unlikely(psRGXCreateRenderContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE)) -+ { -+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXCreateRenderContext_exit; -+ } -+ -+ if (unlikely -+ (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize > -+ RGXFWIF_STATIC_RENDERCONTEXT_SIZE)) -+ { -+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXCreateRenderContext_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXCreateRenderContext_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateRenderContextIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXCreateRenderContext_exit; -+ } -+ } -+ } -+ -+ if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize != 0) -+ { -+ ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui8FrameworkCmdInt, -+ (const void __user *)psRGXCreateRenderContextIN->pui8FrameworkCmd, -+ psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != -+ PVRSRV_OK) -+ { -+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateRenderContext_exit; -+ } -+ } -+ if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize != 0) -+ { -+ ui8StaticRenderContextStateInt = -+ (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui8StaticRenderContextStateInt, -+ (const void __user *)psRGXCreateRenderContextIN->pui8StaticRenderContextState, -+ psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * -+ sizeof(IMG_BYTE)) != PVRSRV_OK) -+ { -+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateRenderContext_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXCreateRenderContextOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hPrivDataInt, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); -+ if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateRenderContext_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateRenderContextOUT->eError = -+ PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevNode(psConnection), -+ psRGXCreateRenderContextIN->i32Priority, -+ psRGXCreateRenderContextIN->sVDMCallStackAddr, -+ psRGXCreateRenderContextIN->ui32ui32CallStackDepth, -+ psRGXCreateRenderContextIN->ui32FrameworkCmdSize, -+ ui8FrameworkCmdInt, -+ hPrivDataInt, -+ psRGXCreateRenderContextIN-> -+ ui32StaticRenderContextStateSize, -+ ui8StaticRenderContextStateInt, -+ psRGXCreateRenderContextIN->ui32PackedCCBSizeU8888, -+ psRGXCreateRenderContextIN->ui32ContextFlags, -+ psRGXCreateRenderContextIN->ui64RobustnessAddress, -+ psRGXCreateRenderContextIN->ui32MaxTADeadlineMS, -+ psRGXCreateRenderContextIN->ui32Max3DDeadlineMS, -+ &psRenderContextInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXCreateRenderContext_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRGXCreateRenderContextOUT-> -+ hRenderContext, -+ (void *)psRenderContextInt, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXCreateRenderContextpsRenderContextIntRelease); -+ if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateRenderContext_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXCreateRenderContext_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hPrivDataInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK) -+ { -+ if (psRenderContextInt) -+ { -+ PVRSRVRGXDestroyRenderContextKM(psRenderContextInt); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXCreateRenderContextOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXDestroyRenderContextIN_UI8, -+ IMG_UINT8 * psRGXDestroyRenderContextOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN = -+ (PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXDestroyRenderContextIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT = -+ (PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXDestroyRenderContextOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXDestroyRenderContextOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psRGXDestroyRenderContextIN-> -+ hCleanupCookie, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); -+ if (unlikely -+ ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) -+ && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) -+ && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psRGXDestroyRenderContextOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXDestroyRenderContext_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXDestroyRenderContext_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXSendZSStoreDisable(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXSendZSStoreDisableIN_UI8, -+ IMG_UINT8 * psRGXSendZSStoreDisableOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE *psRGXSendZSStoreDisableIN = -+ (PVRSRV_BRIDGE_IN_RGXSENDZSSTOREDISABLE *) -+ IMG_OFFSET_ADDR(psRGXSendZSStoreDisableIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE *psRGXSendZSStoreDisableOUT = -+ (PVRSRV_BRIDGE_OUT_RGXSENDZSSTOREDISABLE *) -+ IMG_OFFSET_ADDR(psRGXSendZSStoreDisableOUT_UI8, 0); -+ -+ IMG_HANDLE hRenderContext = psRGXSendZSStoreDisableIN->hRenderContext; -+ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXSendZSStoreDisableOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psRenderContextInt, -+ hRenderContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXSendZSStoreDisableOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXSendZSStoreDisable_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXSendZSStoreDisableOUT->eError = -+ PVRSRVRGXSendZSStoreDisableKM(psConnection, OSGetDevNode(psConnection), -+ psRenderContextInt, -+ psRGXSendZSStoreDisableIN->bDisableDepthStore, -+ psRGXSendZSStoreDisableIN->bDisableStencilStore, -+ psRGXSendZSStoreDisableIN->i32ExtJobRefToDisableZSStore); -+ -+RGXSendZSStoreDisable_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psRenderContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hRenderContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXSetRenderContextPriorityIN_UI8, -+ IMG_UINT8 * psRGXSetRenderContextPriorityOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN = -+ (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *) -+ IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT = -+ (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *) -+ IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityOUT_UI8, 0); -+ -+ IMG_HANDLE hRenderContext = psRGXSetRenderContextPriorityIN->hRenderContext; -+ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXSetRenderContextPriorityOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psRenderContextInt, -+ hRenderContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXSetRenderContextPriority_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXSetRenderContextPriorityOUT->eError = -+ PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevNode(psConnection), -+ psRenderContextInt, -+ psRGXSetRenderContextPriorityIN->i32Priority); -+ -+RGXSetRenderContextPriority_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psRenderContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hRenderContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXRenderContextStalledIN_UI8, -+ IMG_UINT8 * psRGXRenderContextStalledOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledIN = -+ (PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *) -+ IMG_OFFSET_ADDR(psRGXRenderContextStalledIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledOUT = -+ (PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *) -+ IMG_OFFSET_ADDR(psRGXRenderContextStalledOUT_UI8, 0); -+ -+ IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext; -+ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXRenderContextStalledOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psRenderContextInt, -+ hRenderContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXRenderContextStalledOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXRenderContextStalled_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXRenderContextStalledOUT->eError = RGXRenderContextStalledKM(psRenderContextInt); -+ -+RGXRenderContextStalled_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psRenderContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hRenderContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, -+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, -+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, -+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, -+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, -+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, -+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXKickTA3D2IN_UI8, -+ IMG_UINT8 * psRGXKickTA3D2OUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *psRGXKickTA3D2IN = -+ (PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2IN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *psRGXKickTA3D2OUT = -+ (PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2OUT_UI8, 0); -+ -+ IMG_HANDLE hRenderContext = psRGXKickTA3D2IN->hRenderContext; -+ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; -+ SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL; -+ IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL; -+ IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL; -+ IMG_UINT32 *ui32ClientTAFenceValueInt = NULL; -+ SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL; -+ IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL; -+ IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL; -+ IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL; -+ SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL; -+ IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL; -+ IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL; -+ IMG_UINT32 *ui32Client3DUpdateValueInt = NULL; -+ IMG_HANDLE hPRFenceUFOSyncPrimBlock = psRGXKickTA3D2IN->hPRFenceUFOSyncPrimBlock; -+ SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL; -+ IMG_CHAR *uiUpdateFenceNameInt = NULL; -+ IMG_CHAR *uiUpdateFenceName3DInt = NULL; -+ IMG_BYTE *ui8TACmdInt = NULL; -+ IMG_BYTE *ui83DPRCmdInt = NULL; -+ IMG_BYTE *ui83DCmdInt = NULL; -+ IMG_HANDLE hKMHWRTDataSet = psRGXKickTA3D2IN->hKMHWRTDataSet; -+ RGX_KM_HW_RT_DATASET *psKMHWRTDataSetInt = NULL; -+ IMG_HANDLE hZSBuffer = psRGXKickTA3D2IN->hZSBuffer; -+ RGX_ZSBUFFER_DATA *psZSBufferInt = NULL; -+ IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3D2IN->hMSAAScratchBuffer; -+ RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL; -+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; -+ PMR **psSyncPMRsInt = NULL; -+ IMG_HANDLE *hSyncPMRsInt2 = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + -+ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)) + -+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; -+ -+ if (unlikely(psRGXKickTA3D2IN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNCS)) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXKickTA3D2_exit; -+ } -+ -+ if (unlikely(psRGXKickTA3D2IN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNCS)) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXKickTA3D2_exit; -+ } -+ -+ if (unlikely(psRGXKickTA3D2IN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNCS)) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXKickTA3D2_exit; -+ } -+ -+ if (unlikely(psRGXKickTA3D2IN->ui32TACmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXKickTA3D2_exit; -+ } -+ -+ if (unlikely(psRGXKickTA3D2IN->ui323DPRCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXKickTA3D2_exit; -+ } -+ -+ if (unlikely(psRGXKickTA3D2IN->ui323DCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXKickTA3D2_exit; -+ } -+ -+ if (unlikely(psRGXKickTA3D2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXKickTA3D2_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXKickTA3D2_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTA3D2IN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ } -+ -+ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) -+ { -+ psClientTAFenceSyncPrimBlockInt = -+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psClientTAFenceSyncPrimBlockInt, 0, -+ psRGXKickTA3D2IN->ui32ClientTAFenceCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)); -+ ui32NextOffset += -+ psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *); -+ hClientTAFenceSyncPrimBlockInt2 = -+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, hClientTAFenceSyncPrimBlockInt2, -+ (const void __user *)psRGXKickTA3D2IN->phClientTAFenceSyncPrimBlock, -+ psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) -+ { -+ ui32ClientTAFenceSyncOffsetInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32ClientTAFenceSyncOffsetInt, -+ (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceSyncOffset, -+ psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) -+ { -+ ui32ClientTAFenceValueInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32ClientTAFenceValueInt, -+ (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceValue, -+ psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) -+ { -+ psClientTAUpdateSyncPrimBlockInt = -+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psClientTAUpdateSyncPrimBlockInt, 0, -+ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)); -+ ui32NextOffset += -+ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); -+ hClientTAUpdateSyncPrimBlockInt2 = -+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, hClientTAUpdateSyncPrimBlockInt2, -+ (const void __user *)psRGXKickTA3D2IN->phClientTAUpdateSyncPrimBlock, -+ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) -+ { -+ ui32ClientTAUpdateSyncOffsetInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32ClientTAUpdateSyncOffsetInt, -+ (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateSyncOffset, -+ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) -+ { -+ ui32ClientTAUpdateValueInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32ClientTAUpdateValueInt, -+ (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateValue, -+ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) -+ { -+ psClient3DUpdateSyncPrimBlockInt = -+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psClient3DUpdateSyncPrimBlockInt, 0, -+ psRGXKickTA3D2IN->ui32Client3DUpdateCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)); -+ ui32NextOffset += -+ psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); -+ hClient3DUpdateSyncPrimBlockInt2 = -+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, hClient3DUpdateSyncPrimBlockInt2, -+ (const void __user *)psRGXKickTA3D2IN->phClient3DUpdateSyncPrimBlock, -+ psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) -+ { -+ ui32Client3DUpdateSyncOffsetInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32Client3DUpdateSyncOffsetInt, -+ (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateSyncOffset, -+ psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) -+ { -+ ui32Client3DUpdateValueInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32Client3DUpdateValueInt, -+ (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateValue, -+ psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ -+ { -+ uiUpdateFenceNameInt = -+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiUpdateFenceNameInt, -+ (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName, -+ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - -+ 1] = '\0'; -+ } -+ -+ { -+ uiUpdateFenceName3DInt = -+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiUpdateFenceName3DInt, -+ (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName3D, -+ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ ((IMG_CHAR *) uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - -+ 1] = '\0'; -+ } -+ if (psRGXKickTA3D2IN->ui32TACmdSize != 0) -+ { -+ ui8TACmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui8TACmdInt, (const void __user *)psRGXKickTA3D2IN->pui8TACmd, -+ psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ if (psRGXKickTA3D2IN->ui323DPRCmdSize != 0) -+ { -+ ui83DPRCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui83DPRCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DPRCmd, -+ psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ if (psRGXKickTA3D2IN->ui323DCmdSize != 0) -+ { -+ ui83DCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui83DCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DCmd, -+ psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) -+ { -+ ui32SyncPMRFlagsInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32SyncPMRFlagsInt, -+ (const void __user *)psRGXKickTA3D2IN->pui32SyncPMRFlags, -+ psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) -+ { -+ psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psSyncPMRsInt, 0, -+ psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *); -+ hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickTA3D2IN->phSyncPMRs, -+ psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) -+ { -+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXKickTA3D2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psRenderContextInt, -+ hRenderContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickTA3D2_exit; -+ } -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) -+ { -+ /* Look up the address from the handle */ -+ psRGXKickTA3D2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psClientTAFenceSyncPrimBlockInt[i], -+ hClientTAFenceSyncPrimBlockInt2[i], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, -+ IMG_TRUE); -+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ } -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) -+ { -+ /* Look up the address from the handle */ -+ psRGXKickTA3D2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **) -+ &psClientTAUpdateSyncPrimBlockInt[i], -+ hClientTAUpdateSyncPrimBlockInt2[i], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, -+ IMG_TRUE); -+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ } -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) -+ { -+ /* Look up the address from the handle */ -+ psRGXKickTA3D2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **) -+ &psClient3DUpdateSyncPrimBlockInt[i], -+ hClient3DUpdateSyncPrimBlockInt2[i], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, -+ IMG_TRUE); -+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ } -+ -+ /* Look up the address from the handle */ -+ psRGXKickTA3D2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPRFenceUFOSyncPrimBlockInt, -+ hPRFenceUFOSyncPrimBlock, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); -+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickTA3D2_exit; -+ } -+ -+ if (psRGXKickTA3D2IN->hKMHWRTDataSet) -+ { -+ /* Look up the address from the handle */ -+ psRGXKickTA3D2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psKMHWRTDataSetInt, -+ hKMHWRTDataSet, -+ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, IMG_TRUE); -+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ -+ if (psRGXKickTA3D2IN->hZSBuffer) -+ { -+ /* Look up the address from the handle */ -+ psRGXKickTA3D2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psZSBufferInt, -+ hZSBuffer, -+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); -+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ -+ if (psRGXKickTA3D2IN->hMSAAScratchBuffer) -+ { -+ /* Look up the address from the handle */ -+ psRGXKickTA3D2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psMSAAScratchBufferInt, -+ hMSAAScratchBuffer, -+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); -+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) -+ { -+ /* Look up the address from the handle */ -+ psRGXKickTA3D2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSyncPMRsInt[i], -+ hSyncPMRsInt2[i], -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXKickTA3D2_exit; -+ } -+ } -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXKickTA3D2OUT->eError = -+ PVRSRVRGXKickTA3DKM(psRenderContextInt, -+ psRGXKickTA3D2IN->ui32ClientTAFenceCount, -+ psClientTAFenceSyncPrimBlockInt, -+ ui32ClientTAFenceSyncOffsetInt, -+ ui32ClientTAFenceValueInt, -+ psRGXKickTA3D2IN->ui32ClientTAUpdateCount, -+ psClientTAUpdateSyncPrimBlockInt, -+ ui32ClientTAUpdateSyncOffsetInt, -+ ui32ClientTAUpdateValueInt, -+ psRGXKickTA3D2IN->ui32Client3DUpdateCount, -+ psClient3DUpdateSyncPrimBlockInt, -+ ui32Client3DUpdateSyncOffsetInt, -+ ui32Client3DUpdateValueInt, -+ psPRFenceUFOSyncPrimBlockInt, -+ psRGXKickTA3D2IN->ui32PRFenceUFOSyncOffset, -+ psRGXKickTA3D2IN->ui32PRFenceValue, -+ psRGXKickTA3D2IN->hCheckFence, -+ psRGXKickTA3D2IN->hUpdateTimeline, -+ &psRGXKickTA3D2OUT->hUpdateFence, -+ uiUpdateFenceNameInt, -+ psRGXKickTA3D2IN->hCheckFence3D, -+ psRGXKickTA3D2IN->hUpdateTimeline3D, -+ &psRGXKickTA3D2OUT->hUpdateFence3D, -+ uiUpdateFenceName3DInt, -+ psRGXKickTA3D2IN->ui32TACmdSize, -+ ui8TACmdInt, -+ psRGXKickTA3D2IN->ui323DPRCmdSize, -+ ui83DPRCmdInt, -+ psRGXKickTA3D2IN->ui323DCmdSize, -+ ui83DCmdInt, -+ psRGXKickTA3D2IN->ui32ExtJobRef, -+ psRGXKickTA3D2IN->bbKickTA, -+ psRGXKickTA3D2IN->bbKickPR, -+ psRGXKickTA3D2IN->bbKick3D, -+ psRGXKickTA3D2IN->bbAbort, -+ psRGXKickTA3D2IN->ui32PDumpFlags, -+ psKMHWRTDataSetInt, -+ psZSBufferInt, -+ psMSAAScratchBufferInt, -+ psRGXKickTA3D2IN->ui32SyncPMRCount, -+ ui32SyncPMRFlagsInt, -+ psSyncPMRsInt, -+ psRGXKickTA3D2IN->ui32RenderTargetSize, -+ psRGXKickTA3D2IN->ui32NumberOfDrawCalls, -+ psRGXKickTA3D2IN->ui32NumberOfIndices, -+ psRGXKickTA3D2IN->ui32NumberOfMRTs, psRGXKickTA3D2IN->ui64Deadline); -+ -+RGXKickTA3D2_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psRenderContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hRenderContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); -+ } -+ -+ if (hClientTAFenceSyncPrimBlockInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psClientTAFenceSyncPrimBlockInt && psClientTAFenceSyncPrimBlockInt[i]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hClientTAFenceSyncPrimBlockInt2[i], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ } -+ } -+ -+ if (hClientTAUpdateSyncPrimBlockInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psClientTAUpdateSyncPrimBlockInt && psClientTAUpdateSyncPrimBlockInt[i]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hClientTAUpdateSyncPrimBlockInt2[i], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ } -+ } -+ -+ if (hClient3DUpdateSyncPrimBlockInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psClient3DUpdateSyncPrimBlockInt && psClient3DUpdateSyncPrimBlockInt[i]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hClient3DUpdateSyncPrimBlockInt2[i], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ } -+ } -+ -+ /* Unreference the previously looked up handle */ -+ if (psPRFenceUFOSyncPrimBlockInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPRFenceUFOSyncPrimBlock, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ -+ if (psRGXKickTA3D2IN->hKMHWRTDataSet) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psKMHWRTDataSetInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hKMHWRTDataSet, -+ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); -+ } -+ } -+ -+ if (psRGXKickTA3D2IN->hZSBuffer) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psZSBufferInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hZSBuffer, -+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); -+ } -+ } -+ -+ if (psRGXKickTA3D2IN->hMSAAScratchBuffer) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psMSAAScratchBufferInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hMSAAScratchBuffer, -+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); -+ } -+ } -+ -+ if (hSyncPMRsInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psSyncPMRsInt && psSyncPMRsInt[i]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSyncPMRsInt2[i], -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ } -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXKickTA3D2OUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXSetRenderContextProperty(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXSetRenderContextPropertyIN_UI8, -+ IMG_UINT8 * psRGXSetRenderContextPropertyOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyIN = -+ (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *) -+ IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyOUT = -+ (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *) -+ IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyOUT_UI8, 0); -+ -+ IMG_HANDLE hRenderContext = psRGXSetRenderContextPropertyIN->hRenderContext; -+ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXSetRenderContextPropertyOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psRenderContextInt, -+ hRenderContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXSetRenderContextPropertyOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXSetRenderContextProperty_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXSetRenderContextPropertyOUT->eError = -+ PVRSRVRGXSetRenderContextPropertyKM(psRenderContextInt, -+ psRGXSetRenderContextPropertyIN->ui32Property, -+ psRGXSetRenderContextPropertyIN->ui64Input, -+ &psRGXSetRenderContextPropertyOUT->ui64Output); -+ -+RGXSetRenderContextProperty_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psRenderContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hRenderContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitRGXTA3DBridge(void); -+void DeinitRGXTA3DBridge(void); -+ -+/* -+ * Register all RGXTA3D functions with services -+ */ -+PVRSRV_ERROR InitRGXTA3DBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET, -+ PVRSRVBridgeRGXCreateHWRTDataSet, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET, -+ PVRSRVBridgeRGXDestroyHWRTDataSet, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER, -+ PVRSRVBridgeRGXCreateZSBuffer, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER, -+ PVRSRVBridgeRGXDestroyZSBuffer, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER, -+ PVRSRVBridgeRGXPopulateZSBuffer, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER, -+ PVRSRVBridgeRGXUnpopulateZSBuffer, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, -+ PVRSRVBridgeRGXCreateFreeList, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST, -+ PVRSRVBridgeRGXDestroyFreeList, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, -+ PVRSRVBridgeRGXCreateRenderContext, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT, -+ PVRSRVBridgeRGXDestroyRenderContext, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSENDZSSTOREDISABLE, -+ PVRSRVBridgeRGXSendZSStoreDisable, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, -+ PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY, -+ PVRSRVBridgeRGXSetRenderContextPriority, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED, -+ PVRSRVBridgeRGXRenderContextStalled, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2, -+ PVRSRVBridgeRGXKickTA3D2, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, -+ PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY, -+ PVRSRVBridgeRGXSetRenderContextProperty, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all rgxta3d functions with services -+ */ -+void DeinitRGXTA3DBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, -+ PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, -+ PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSENDZSSTOREDISABLE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, -+ PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, -+ PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, -+ PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_rgxtimerquery_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxtimerquery_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_rgxtimerquery_bridge.c -@@ -0,0 +1,167 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for rgxtimerquery -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for rgxtimerquery -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "rgxtimerquery.h" -+ -+#include "common_rgxtimerquery_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static IMG_INT -+PVRSRVBridgeRGXBeginTimerQuery(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXBeginTimerQueryIN_UI8, -+ IMG_UINT8 * psRGXBeginTimerQueryOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryIN = -+ (PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *) IMG_OFFSET_ADDR(psRGXBeginTimerQueryIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryOUT = -+ (PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *) IMG_OFFSET_ADDR(psRGXBeginTimerQueryOUT_UI8, -+ 0); -+ -+ psRGXBeginTimerQueryOUT->eError = -+ PVRSRVRGXBeginTimerQueryKM(psConnection, OSGetDevNode(psConnection), -+ psRGXBeginTimerQueryIN->ui32QueryId); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXEndTimerQuery(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXEndTimerQueryIN_UI8, -+ IMG_UINT8 * psRGXEndTimerQueryOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *psRGXEndTimerQueryIN = -+ (PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *) IMG_OFFSET_ADDR(psRGXEndTimerQueryIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *psRGXEndTimerQueryOUT = -+ (PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *) IMG_OFFSET_ADDR(psRGXEndTimerQueryOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXEndTimerQueryIN); -+ -+ psRGXEndTimerQueryOUT->eError = -+ PVRSRVRGXEndTimerQueryKM(psConnection, OSGetDevNode(psConnection)); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXQueryTimer(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXQueryTimerIN_UI8, -+ IMG_UINT8 * psRGXQueryTimerOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXQUERYTIMER *psRGXQueryTimerIN = -+ (PVRSRV_BRIDGE_IN_RGXQUERYTIMER *) IMG_OFFSET_ADDR(psRGXQueryTimerIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *psRGXQueryTimerOUT = -+ (PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *) IMG_OFFSET_ADDR(psRGXQueryTimerOUT_UI8, 0); -+ -+ psRGXQueryTimerOUT->eError = -+ PVRSRVRGXQueryTimerKM(psConnection, OSGetDevNode(psConnection), -+ psRGXQueryTimerIN->ui32QueryId, -+ &psRGXQueryTimerOUT->ui64StartTime, -+ &psRGXQueryTimerOUT->ui64EndTime); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitRGXTIMERQUERYBridge(void); -+void DeinitRGXTIMERQUERYBridge(void); -+ -+/* -+ * Register all RGXTIMERQUERY functions with services -+ */ -+PVRSRV_ERROR InitRGXTIMERQUERYBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, -+ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY, -+ PVRSRVBridgeRGXBeginTimerQuery, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, -+ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY, -+ PVRSRVBridgeRGXEndTimerQuery, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, -+ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER, PVRSRVBridgeRGXQueryTimer, -+ NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all rgxtimerquery functions with services -+ */ -+void DeinitRGXTIMERQUERYBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, -+ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, -+ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, -+ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_rgxtq2_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxtq2_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_rgxtq2_bridge.c -@@ -0,0 +1,1194 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for rgxtq2 -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for rgxtq2 -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "rgxtdmtransfer.h" -+ -+#include "common_rgxtq2_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+#include "rgx_bvnc_defs_km.h" -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static PVRSRV_ERROR _RGXTDMCreateTransferContextpsTransferContextIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVRGXTDMDestroyTransferContextKM((RGX_SERVER_TQ_TDM_CONTEXT *) pvData); -+ return eError; -+} -+ -+static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXTDMCreateTransferContextIN_UI8, -+ IMG_UINT8 * psRGXTDMCreateTransferContextOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextIN = -+ (PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextOUT = -+ (PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextOUT_UI8, 0); -+ -+ IMG_BYTE *ui8FrameworkCmdInt = NULL; -+ IMG_HANDLE hPrivData = psRGXTDMCreateTransferContextIN->hPrivData; -+ IMG_HANDLE hPrivDataInt = NULL; -+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * -+ sizeof(IMG_BYTE)) + 0; -+ -+ if (unlikely(psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE)) -+ { -+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXTDMCreateTransferContext_exit; -+ } -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) -+ { -+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXTDMCreateTransferContext_exit; -+ } -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXTDMCreateTransferContext_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = -+ (IMG_BYTE *) (void *)psRGXTDMCreateTransferContextIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXTDMCreateTransferContextOUT->eError = -+ PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXTDMCreateTransferContext_exit; -+ } -+ } -+ } -+ -+ if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize != 0) -+ { -+ ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui8FrameworkCmdInt, -+ (const void __user *)psRGXTDMCreateTransferContextIN->pui8FrameworkCmd, -+ psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != -+ PVRSRV_OK) -+ { -+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXTDMCreateTransferContext_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXTDMCreateTransferContextOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hPrivDataInt, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); -+ if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTDMCreateTransferContext_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXTDMCreateTransferContextOUT->eError = -+ PVRSRVRGXTDMCreateTransferContextKM(psConnection, OSGetDevNode(psConnection), -+ psRGXTDMCreateTransferContextIN->i32Priority, -+ psRGXTDMCreateTransferContextIN-> -+ ui32FrameworkCmdSize, ui8FrameworkCmdInt, -+ hPrivDataInt, -+ psRGXTDMCreateTransferContextIN-> -+ ui32PackedCCBSizeU88, -+ psRGXTDMCreateTransferContextIN->ui32ContextFlags, -+ psRGXTDMCreateTransferContextIN-> -+ ui64RobustnessAddress, &psTransferContextInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXTDMCreateTransferContext_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXTDMCreateTransferContextOUT->eError = -+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRGXTDMCreateTransferContextOUT->hTransferContext, -+ (void *)psTransferContextInt, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXTDMCreateTransferContextpsTransferContextIntRelease); -+ if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTDMCreateTransferContext_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXTDMCreateTransferContext_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hPrivDataInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK) -+ { -+ if (psTransferContextInt) -+ { -+ PVRSRVRGXTDMDestroyTransferContextKM(psTransferContextInt); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXTDMCreateTransferContextOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXTDMDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXTDMDestroyTransferContextIN_UI8, -+ IMG_UINT8 * psRGXTDMDestroyTransferContextOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextIN = -+ (PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextOUT = -+ (PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextOUT_UI8, 0); -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) -+ { -+ psRGXTDMDestroyTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXTDMDestroyTransferContext_exit; -+ } -+ } -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXTDMDestroyTransferContextOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psRGXTDMDestroyTransferContextIN-> -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); -+ if (unlikely -+ ((psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_OK) -+ && (psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) -+ && (psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, -+ PVRSRVGetErrorString(psRGXTDMDestroyTransferContextOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTDMDestroyTransferContext_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXTDMDestroyTransferContext_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXTDMSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXTDMSetTransferContextPriorityIN_UI8, -+ IMG_UINT8 * psRGXTDMSetTransferContextPriorityOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityIN = -+ (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *) -+ IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityOUT = -+ (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *) -+ IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityOUT_UI8, 0); -+ -+ IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPriorityIN->hTransferContext; -+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) -+ { -+ psRGXTDMSetTransferContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXTDMSetTransferContextPriority_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXTDMSetTransferContextPriorityOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psTransferContextInt, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXTDMSetTransferContextPriorityOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTDMSetTransferContextPriority_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXTDMSetTransferContextPriorityOUT->eError = -+ PVRSRVRGXTDMSetTransferContextPriorityKM(psConnection, OSGetDevNode(psConnection), -+ psTransferContextInt, -+ psRGXTDMSetTransferContextPriorityIN-> -+ i32Priority); -+ -+RGXTDMSetTransferContextPriority_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psTransferContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXTDMNotifyWriteOffsetUpdateIN_UI8, -+ IMG_UINT8 * psRGXTDMNotifyWriteOffsetUpdateOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateIN = -+ (PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *) -+ IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateOUT = -+ (PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *) -+ IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateOUT_UI8, 0); -+ -+ IMG_HANDLE hTransferContext = psRGXTDMNotifyWriteOffsetUpdateIN->hTransferContext; -+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) -+ { -+ psRGXTDMNotifyWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXTDMNotifyWriteOffsetUpdate_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXTDMNotifyWriteOffsetUpdateOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psTransferContextInt, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXTDMNotifyWriteOffsetUpdateOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTDMNotifyWriteOffsetUpdate_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXTDMNotifyWriteOffsetUpdateOUT->eError = -+ PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(psTransferContextInt, -+ psRGXTDMNotifyWriteOffsetUpdateIN-> -+ ui32PDumpFlags); -+ -+RGXTDMNotifyWriteOffsetUpdate_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psTransferContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, -+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, -+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, -+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXTDMSubmitTransfer2IN_UI8, -+ IMG_UINT8 * psRGXTDMSubmitTransfer2OUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2IN = -+ (PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *) -+ IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2IN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2OUT = -+ (PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *) -+ IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2OUT_UI8, 0); -+ -+ IMG_HANDLE hTransferContext = psRGXTDMSubmitTransfer2IN->hTransferContext; -+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; -+ SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL; -+ IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL; -+ IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL; -+ IMG_UINT32 *ui32UpdateValueInt = NULL; -+ IMG_CHAR *uiUpdateFenceNameInt = NULL; -+ IMG_UINT8 *ui8FWCommandInt = NULL; -+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; -+ PMR **psSyncPMRsInt = NULL; -+ IMG_HANDLE *hSyncPMRsInt2 = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)) + -+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + -+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + -+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) + -+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) + -+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; -+ -+ if (unlikely(psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ -+ if (unlikely -+ (psRGXTDMSubmitTransfer2IN->ui32CommandSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ -+ if (unlikely(psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer2IN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXTDMSubmitTransfer2IN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ } -+ } -+ -+ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) -+ { -+ psUpdateUFOSyncPrimBlockInt = -+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psUpdateUFOSyncPrimBlockInt, 0, -+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)); -+ ui32NextOffset += -+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK *); -+ hUpdateUFOSyncPrimBlockInt2 = -+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, hUpdateUFOSyncPrimBlockInt2, -+ (const void __user *)psRGXTDMSubmitTransfer2IN->phUpdateUFOSyncPrimBlock, -+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != -+ PVRSRV_OK) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ } -+ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) -+ { -+ ui32UpdateSyncOffsetInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32UpdateSyncOffsetInt, -+ (const void __user *)psRGXTDMSubmitTransfer2IN->pui32UpdateSyncOffset, -+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != -+ PVRSRV_OK) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ } -+ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) -+ { -+ ui32UpdateValueInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32UpdateValueInt, -+ (const void __user *)psRGXTDMSubmitTransfer2IN->pui32UpdateValue, -+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != -+ PVRSRV_OK) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ } -+ -+ { -+ uiUpdateFenceNameInt = -+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiUpdateFenceNameInt, -+ (const void __user *)psRGXTDMSubmitTransfer2IN->puiUpdateFenceName, -+ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - -+ 1] = '\0'; -+ } -+ if (psRGXTDMSubmitTransfer2IN->ui32CommandSize != 0) -+ { -+ ui8FWCommandInt = (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui8FWCommandInt, -+ (const void __user *)psRGXTDMSubmitTransfer2IN->pui8FWCommand, -+ psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) != PVRSRV_OK) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ } -+ if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0) -+ { -+ ui32SyncPMRFlagsInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32SyncPMRFlagsInt, -+ (const void __user *)psRGXTDMSubmitTransfer2IN->pui32SyncPMRFlags, -+ psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ } -+ if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0) -+ { -+ psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psSyncPMRsInt, 0, -+ psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)); -+ ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *); -+ hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, hSyncPMRsInt2, -+ (const void __user *)psRGXTDMSubmitTransfer2IN->phSyncPMRs, -+ psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) -+ { -+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXTDMSubmitTransfer2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psTransferContextInt, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++) -+ { -+ /* Look up the address from the handle */ -+ psRGXTDMSubmitTransfer2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psUpdateUFOSyncPrimBlockInt[i], -+ hUpdateUFOSyncPrimBlockInt2[i], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, -+ IMG_TRUE); -+ if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ } -+ } -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; i++) -+ { -+ /* Look up the address from the handle */ -+ psRGXTDMSubmitTransfer2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSyncPMRsInt[i], -+ hSyncPMRsInt2[i], -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTDMSubmitTransfer2_exit; -+ } -+ } -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXTDMSubmitTransfer2OUT->eError = -+ PVRSRVRGXTDMSubmitTransferKM(psTransferContextInt, -+ psRGXTDMSubmitTransfer2IN->ui32PDumpFlags, -+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount, -+ psUpdateUFOSyncPrimBlockInt, -+ ui32UpdateSyncOffsetInt, -+ ui32UpdateValueInt, -+ psRGXTDMSubmitTransfer2IN->hCheckFenceFD, -+ psRGXTDMSubmitTransfer2IN->hUpdateTimeline, -+ &psRGXTDMSubmitTransfer2OUT->hUpdateFence, -+ uiUpdateFenceNameInt, -+ psRGXTDMSubmitTransfer2IN->ui32CommandSize, -+ ui8FWCommandInt, -+ psRGXTDMSubmitTransfer2IN->ui32ExternalJobReference, -+ psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount, -+ ui32SyncPMRFlagsInt, -+ psSyncPMRsInt, -+ psRGXTDMSubmitTransfer2IN->ui32Characteristic1, -+ psRGXTDMSubmitTransfer2IN->ui32Characteristic2, -+ psRGXTDMSubmitTransfer2IN->ui64DeadlineInus); -+ -+RGXTDMSubmitTransfer2_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psTransferContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); -+ } -+ -+ if (hUpdateUFOSyncPrimBlockInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psUpdateUFOSyncPrimBlockInt && psUpdateUFOSyncPrimBlockInt[i]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hUpdateUFOSyncPrimBlockInt2[i], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ } -+ } -+ -+ if (hSyncPMRsInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; i++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psSyncPMRsInt && psSyncPMRsInt[i]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSyncPMRsInt2[i], -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ } -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXTDMSubmitTransfer2OUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData); -+ return eError; -+} -+ -+static PVRSRV_ERROR _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXTDMGetSharedMemoryIN_UI8, -+ IMG_UINT8 * psRGXTDMGetSharedMemoryOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryIN = -+ (PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *) -+ IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryOUT = -+ (PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *) -+ IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryOUT_UI8, 0); -+ -+ PMR *psCLIPMRMemInt = NULL; -+ PMR *psUSCPMRMemInt = NULL; -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) -+ { -+ psRGXTDMGetSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXTDMGetSharedMemory_exit; -+ } -+ } -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXTDMGetSharedMemoryIN); -+ -+ psRGXTDMGetSharedMemoryOUT->eError = -+ PVRSRVRGXTDMGetSharedMemoryKM(psConnection, OSGetDevNode(psConnection), -+ &psCLIPMRMemInt, &psUSCPMRMemInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXTDMGetSharedMemory_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXTDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRGXTDMGetSharedMemoryOUT-> -+ hCLIPMRMem, -+ (void *)psCLIPMRMemInt, -+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease); -+ if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTDMGetSharedMemory_exit; -+ } -+ -+ psRGXTDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRGXTDMGetSharedMemoryOUT-> -+ hUSCPMRMem, -+ (void *)psUSCPMRMemInt, -+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease); -+ if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTDMGetSharedMemory_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXTDMGetSharedMemory_exit: -+ -+ if (psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK) -+ { -+ if (psCLIPMRMemInt) -+ { -+ PVRSRVRGXTDMReleaseSharedMemoryKM(psCLIPMRMemInt); -+ } -+ if (psUSCPMRMemInt) -+ { -+ PVRSRVRGXTDMReleaseSharedMemoryKM(psUSCPMRMemInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXTDMReleaseSharedMemory(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXTDMReleaseSharedMemoryIN_UI8, -+ IMG_UINT8 * psRGXTDMReleaseSharedMemoryOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY *psRGXTDMReleaseSharedMemoryIN = -+ (PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY *) -+ IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY *psRGXTDMReleaseSharedMemoryOUT = -+ (PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY *) -+ IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryOUT_UI8, 0); -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) -+ { -+ psRGXTDMReleaseSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXTDMReleaseSharedMemory_exit; -+ } -+ } -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXTDMReleaseSharedMemoryOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psRGXTDMReleaseSharedMemoryIN->hPMRMem, -+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); -+ if (unlikely((psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_OK) && -+ (psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psRGXTDMReleaseSharedMemoryOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTDMReleaseSharedMemory_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXTDMReleaseSharedMemory_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXTDMSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXTDMSetTransferContextPropertyIN_UI8, -+ IMG_UINT8 * psRGXTDMSetTransferContextPropertyOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY *psRGXTDMSetTransferContextPropertyIN = -+ (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY *) -+ IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY *psRGXTDMSetTransferContextPropertyOUT = -+ (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY *) -+ IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyOUT_UI8, 0); -+ -+ IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPropertyIN->hTransferContext; -+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; -+ -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); -+ -+ /* Check that device supports the required feature */ -+ if ((psDeviceNode->pfnCheckDeviceFeature) && -+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, -+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) -+ { -+ psRGXTDMSetTransferContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ -+ goto RGXTDMSetTransferContextProperty_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXTDMSetTransferContextPropertyOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psTransferContextInt, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXTDMSetTransferContextPropertyOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTDMSetTransferContextProperty_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXTDMSetTransferContextPropertyOUT->eError = -+ PVRSRVRGXTDMSetTransferContextPropertyKM(psTransferContextInt, -+ psRGXTDMSetTransferContextPropertyIN-> -+ ui32Property, -+ psRGXTDMSetTransferContextPropertyIN-> -+ ui64Input, -+ &psRGXTDMSetTransferContextPropertyOUT-> -+ ui64Output); -+ -+RGXTDMSetTransferContextProperty_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psTransferContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitRGXTQ2Bridge(void); -+void DeinitRGXTQ2Bridge(void); -+ -+/* -+ * Register all RGXTQ2 functions with services -+ */ -+PVRSRV_ERROR InitRGXTQ2Bridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, -+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT, -+ PVRSRVBridgeRGXTDMCreateTransferContext, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, -+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT, -+ PVRSRVBridgeRGXTDMDestroyTransferContext, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, -+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY, -+ PVRSRVBridgeRGXTDMSetTransferContextPriority, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, -+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE, -+ PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2, -+ PVRSRVBridgeRGXTDMSubmitTransfer2, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY, -+ PVRSRVBridgeRGXTDMGetSharedMemory, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY, -+ PVRSRVBridgeRGXTDMReleaseSharedMemory, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, -+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY, -+ PVRSRVBridgeRGXTDMSetTransferContextProperty, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all rgxtq2 functions with services -+ */ -+void DeinitRGXTQ2Bridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, -+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, -+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, -+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, -+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, -+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, -+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_rgxtq_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxtq_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_rgxtq_bridge.c -@@ -0,0 +1,1288 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for rgxtq -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for rgxtq -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "rgxtransfer.h" -+#include "rgx_tq_shared.h" -+ -+#include "common_rgxtq_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+#if defined(SUPPORT_RGXTQ_BRIDGE) -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static PVRSRV_ERROR _RGXCreateTransferContextpsTransferContextIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVRGXDestroyTransferContextKM((RGX_SERVER_TQ_CONTEXT *) pvData); -+ return eError; -+} -+ -+static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXCreateTransferContextIN_UI8, -+ IMG_UINT8 * psRGXCreateTransferContextOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextIN = -+ (PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXCreateTransferContextIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextOUT = -+ (PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXCreateTransferContextOUT_UI8, 0); -+ -+ IMG_BYTE *ui8FrameworkCmdInt = NULL; -+ IMG_HANDLE hPrivData = psRGXCreateTransferContextIN->hPrivData; -+ IMG_HANDLE hPrivDataInt = NULL; -+ RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) + 0; -+ -+ if (unlikely(psRGXCreateTransferContextIN->ui32FrameworkCmdize > RGXFWIF_RF_CMD_SIZE)) -+ { -+ psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXCreateTransferContext_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXCreateTransferContext_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXCreateTransferContextIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateTransferContextIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXCreateTransferContext_exit; -+ } -+ } -+ } -+ -+ if (psRGXCreateTransferContextIN->ui32FrameworkCmdize != 0) -+ { -+ ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui8FrameworkCmdInt, -+ (const void __user *)psRGXCreateTransferContextIN->pui8FrameworkCmd, -+ psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != -+ PVRSRV_OK) -+ { -+ psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXCreateTransferContext_exit; -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXCreateTransferContextOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hPrivDataInt, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); -+ if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateTransferContext_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateTransferContextOUT->eError = -+ PVRSRVRGXCreateTransferContextKM(psConnection, OSGetDevNode(psConnection), -+ psRGXCreateTransferContextIN->i32Priority, -+ psRGXCreateTransferContextIN->ui32FrameworkCmdize, -+ ui8FrameworkCmdInt, -+ hPrivDataInt, -+ psRGXCreateTransferContextIN->ui32PackedCCBSizeU8888, -+ psRGXCreateTransferContextIN->ui32ContextFlags, -+ psRGXCreateTransferContextIN->ui64RobustnessAddress, -+ &psTransferContextInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXCreateTransferContext_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXCreateTransferContextOUT->eError = -+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRGXCreateTransferContextOUT->hTransferContext, -+ (void *)psTransferContextInt, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXCreateTransferContextpsTransferContextIntRelease); -+ if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXCreateTransferContext_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXCreateTransferContext_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hPrivDataInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK) -+ { -+ if (psTransferContextInt) -+ { -+ PVRSRVRGXDestroyTransferContextKM(psTransferContextInt); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXCreateTransferContextOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXDestroyTransferContextIN_UI8, -+ IMG_UINT8 * psRGXDestroyTransferContextOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextIN = -+ (PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXDestroyTransferContextIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextOUT = -+ (PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *) -+ IMG_OFFSET_ADDR(psRGXDestroyTransferContextOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXDestroyTransferContextOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psRGXDestroyTransferContextIN-> -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); -+ if (unlikely -+ ((psRGXDestroyTransferContextOUT->eError != PVRSRV_OK) -+ && (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) -+ && (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psRGXDestroyTransferContextOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXDestroyTransferContext_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXDestroyTransferContext_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXSetTransferContextPriorityIN_UI8, -+ IMG_UINT8 * psRGXSetTransferContextPriorityOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityIN = -+ (PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *) -+ IMG_OFFSET_ADDR(psRGXSetTransferContextPriorityIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityOUT = -+ (PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *) -+ IMG_OFFSET_ADDR(psRGXSetTransferContextPriorityOUT_UI8, 0); -+ -+ IMG_HANDLE hTransferContext = psRGXSetTransferContextPriorityIN->hTransferContext; -+ RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXSetTransferContextPriorityOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psTransferContextInt, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXSetTransferContextPriorityOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXSetTransferContextPriority_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXSetTransferContextPriorityOUT->eError = -+ PVRSRVRGXSetTransferContextPriorityKM(psConnection, OSGetDevNode(psConnection), -+ psTransferContextInt, -+ psRGXSetTransferContextPriorityIN->i32Priority); -+ -+RGXSetTransferContextPriority_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psTransferContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, -+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, -+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); -+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, -+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); -+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, -+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRGXSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXSubmitTransfer2IN_UI8, -+ IMG_UINT8 * psRGXSubmitTransfer2OUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2 *psRGXSubmitTransfer2IN = -+ (PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2 *) IMG_OFFSET_ADDR(psRGXSubmitTransfer2IN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2 *psRGXSubmitTransfer2OUT = -+ (PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2 *) IMG_OFFSET_ADDR(psRGXSubmitTransfer2OUT_UI8, -+ 0); -+ -+ IMG_HANDLE hTransferContext = psRGXSubmitTransfer2IN->hTransferContext; -+ RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; -+ IMG_UINT32 *ui32ClientUpdateCountInt = NULL; -+ SYNC_PRIMITIVE_BLOCK ***psUpdateUFOSyncPrimBlockInt = NULL; -+ IMG_HANDLE **hUpdateUFOSyncPrimBlockInt2 = NULL; -+ IMG_UINT32 **ui32UpdateSyncOffsetInt = NULL; -+ IMG_UINT32 **ui32UpdateValueInt = NULL; -+ IMG_CHAR *uiUpdateFenceNameInt = NULL; -+ IMG_UINT32 *ui32CommandSizeInt = NULL; -+ IMG_UINT8 **ui8FWCommandInt = NULL; -+ IMG_UINT32 *ui32TQPrepareFlagsInt = NULL; -+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; -+ PMR **psSyncPMRsInt = NULL; -+ IMG_HANDLE *hSyncPMRsInt2 = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BYTE *pArrayArgsBuffer2 = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + -+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + -+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) + -+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; -+ IMG_UINT32 ui32BufferSize2 = 0; -+ IMG_UINT32 ui32NextOffset2 = 0; -+ -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ -+ ui64BufferSize += -+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * -+ sizeof(SYNC_PRIMITIVE_BLOCK **)); -+ ui64BufferSize += -+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_HANDLE **)); -+ ui64BufferSize += -+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *)); -+ ui64BufferSize += -+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *)); -+ ui64BufferSize += -+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT8 *)); -+ } -+ -+ if (unlikely(psRGXSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXSubmitTransfer2_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXSubmitTransfer2_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRGXSubmitTransfer2IN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXSubmitTransfer2IN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ } -+ -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ ui32ClientUpdateCountInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32ClientUpdateCountInt, -+ (const void __user *)psRGXSubmitTransfer2IN->pui32ClientUpdateCount, -+ psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ /* Assigning psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */ -+ psUpdateUFOSyncPrimBlockInt = -+ (SYNC_PRIMITIVE_BLOCK ***) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += -+ psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **); -+ /* Assigning hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */ -+ hUpdateUFOSyncPrimBlockInt2 = -+ (IMG_HANDLE **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_HANDLE); -+ } -+ -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ /* Assigning ui32UpdateSyncOffsetInt to the right offset in the pool buffer for first dimension */ -+ ui32UpdateSyncOffsetInt = -+ (IMG_UINT32 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *); -+ } -+ -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ /* Assigning ui32UpdateValueInt to the right offset in the pool buffer for first dimension */ -+ ui32UpdateValueInt = -+ (IMG_UINT32 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *); -+ } -+ -+ { -+ uiUpdateFenceNameInt = -+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiUpdateFenceNameInt, -+ (const void __user *)psRGXSubmitTransfer2IN->puiUpdateFenceName, -+ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - -+ 1] = '\0'; -+ } -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ ui32CommandSizeInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32CommandSizeInt, -+ (const void __user *)psRGXSubmitTransfer2IN->pui32CommandSize, -+ psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ /* Assigning ui8FWCommandInt to the right offset in the pool buffer for first dimension */ -+ ui8FWCommandInt = (IMG_UINT8 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT8 *); -+ } -+ -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ ui32TQPrepareFlagsInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32TQPrepareFlagsInt, -+ (const void __user *)psRGXSubmitTransfer2IN->pui32TQPrepareFlags, -+ psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ if (psRGXSubmitTransfer2IN->ui32SyncPMRCount != 0) -+ { -+ ui32SyncPMRFlagsInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32SyncPMRFlagsInt, -+ (const void __user *)psRGXSubmitTransfer2IN->pui32SyncPMRFlags, -+ psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ if (psRGXSubmitTransfer2IN->ui32SyncPMRCount != 0) -+ { -+ psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ OSCachedMemSet(psSyncPMRsInt, 0, -+ psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)); -+ ui32NextOffset += psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *); -+ hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); -+ } -+ -+ /* Copy the data over */ -+ if (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, hSyncPMRsInt2, (const void __user *)psRGXSubmitTransfer2IN->phSyncPMRs, -+ psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ IMG_UINT32 i; -+ ui64BufferSize = 0; -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) -+ { -+ ui64BufferSize += -+ ((IMG_UINT64) ui32ClientUpdateCountInt[i] * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)); -+ ui64BufferSize += -+ ((IMG_UINT64) ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *)); -+ ui64BufferSize += -+ ((IMG_UINT64) ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)); -+ ui64BufferSize += -+ ((IMG_UINT64) ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)); -+ ui64BufferSize += ((IMG_UINT64) ui32CommandSizeInt[i] * sizeof(IMG_UINT8)); -+ } -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RGXSubmitTransfer2_exit; -+ } -+ ui32BufferSize2 = (IMG_UINT32) ui64BufferSize; -+ } -+ -+ if (ui32BufferSize2 != 0) -+ { -+ pArrayArgsBuffer2 = OSAllocMemNoStats(ui32BufferSize2); -+ -+ if (!pArrayArgsBuffer2) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ IMG_UINT32 i; -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) -+ { -+ if (ui32ClientUpdateCountInt[i] > PVRSRV_MAX_SYNCS) -+ { -+ psRGXSubmitTransfer2OUT->eError = -+ PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXSubmitTransfer2_exit; -+ } -+ -+ /* Assigning each psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */ -+ psUpdateUFOSyncPrimBlockInt[i] = -+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer2, -+ ui32NextOffset2); -+ OSCachedMemSet(psUpdateUFOSyncPrimBlockInt[i], 0, -+ ui32ClientUpdateCountInt[i] * -+ sizeof(SYNC_PRIMITIVE_BLOCK *)); -+ ui32NextOffset2 += -+ ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *); -+ /* Assigning each hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */ -+ hUpdateUFOSyncPrimBlockInt2[i] = -+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); -+ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE); -+ } -+ } -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ IMG_UINT32 i; -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) -+ { -+ /* Assigning each ui32UpdateSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */ -+ ui32UpdateSyncOffsetInt[i] = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); -+ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); -+ } -+ } -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ IMG_UINT32 i; -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) -+ { -+ /* Assigning each ui32UpdateValueInt to the right offset in the pool buffer (this is the second dimension) */ -+ ui32UpdateValueInt[i] = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); -+ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); -+ } -+ } -+ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) -+ { -+ IMG_UINT32 i; -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) -+ { -+ if (ui32CommandSizeInt[i] > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE) -+ { -+ psRGXSubmitTransfer2OUT->eError = -+ PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RGXSubmitTransfer2_exit; -+ } -+ -+ /* Assigning each ui8FWCommandInt to the right offset in the pool buffer (this is the second dimension) */ -+ ui8FWCommandInt[i] = -+ (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); -+ ui32NextOffset2 += ui32CommandSizeInt[i] * sizeof(IMG_UINT8); -+ } -+ } -+ -+ { -+ IMG_UINT32 i; -+ IMG_HANDLE **psPtr; -+ -+ /* Loop over all the pointers in the array copying the data into the kernel */ -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) -+ { -+ /* Copy the pointer over from the client side */ -+ if (OSCopyFromUser -+ (NULL, &psPtr, -+ (const void __user *)&psRGXSubmitTransfer2IN-> -+ phUpdateUFOSyncPrimBlock[i], sizeof(IMG_HANDLE **)) != PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ -+ /* Copy the data over */ -+ if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE)) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, (hUpdateUFOSyncPrimBlockInt2[i]), -+ (const void __user *)psPtr, -+ (ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE))) != -+ PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = -+ PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ } -+ } -+ -+ { -+ IMG_UINT32 i; -+ IMG_UINT32 **psPtr; -+ -+ /* Loop over all the pointers in the array copying the data into the kernel */ -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) -+ { -+ /* Copy the pointer over from the client side */ -+ if (OSCopyFromUser -+ (NULL, &psPtr, -+ (const void __user *)&psRGXSubmitTransfer2IN->pui32UpdateSyncOffset[i], -+ sizeof(IMG_UINT32 **)) != PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ -+ /* Copy the data over */ -+ if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, (ui32UpdateSyncOffsetInt[i]), (const void __user *)psPtr, -+ (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != -+ PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = -+ PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ } -+ } -+ -+ { -+ IMG_UINT32 i; -+ IMG_UINT32 **psPtr; -+ -+ /* Loop over all the pointers in the array copying the data into the kernel */ -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) -+ { -+ /* Copy the pointer over from the client side */ -+ if (OSCopyFromUser -+ (NULL, &psPtr, -+ (const void __user *)&psRGXSubmitTransfer2IN->pui32UpdateValue[i], -+ sizeof(IMG_UINT32 **)) != PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ -+ /* Copy the data over */ -+ if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, (ui32UpdateValueInt[i]), (const void __user *)psPtr, -+ (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != -+ PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = -+ PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ } -+ } -+ -+ { -+ IMG_UINT32 i; -+ IMG_UINT8 **psPtr; -+ -+ /* Loop over all the pointers in the array copying the data into the kernel */ -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) -+ { -+ /* Copy the pointer over from the client side */ -+ if (OSCopyFromUser -+ (NULL, &psPtr, -+ (const void __user *)&psRGXSubmitTransfer2IN->pui8FWCommand[i], -+ sizeof(IMG_UINT8 **)) != PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ -+ /* Copy the data over */ -+ if ((ui32CommandSizeInt[i] * sizeof(IMG_UINT8)) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, (ui8FWCommandInt[i]), (const void __user *)psPtr, -+ (ui32CommandSizeInt[i] * sizeof(IMG_UINT8))) != PVRSRV_OK) -+ { -+ psRGXSubmitTransfer2OUT->eError = -+ PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ } -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXSubmitTransfer2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psTransferContextInt, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXSubmitTransfer2_exit; -+ } -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) -+ { -+ IMG_UINT32 j; -+ for (j = 0; j < ui32ClientUpdateCountInt[i]; j++) -+ { -+ /* Look up the address from the handle */ -+ psRGXSubmitTransfer2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **) -+ &psUpdateUFOSyncPrimBlockInt[i][j], -+ hUpdateUFOSyncPrimBlockInt2[i][j], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, -+ IMG_TRUE); -+ if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ } -+ } -+ -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32SyncPMRCount; i++) -+ { -+ /* Look up the address from the handle */ -+ psRGXSubmitTransfer2OUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSyncPMRsInt[i], -+ hSyncPMRsInt2[i], -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXSubmitTransfer2_exit; -+ } -+ } -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXSubmitTransfer2OUT->eError = -+ PVRSRVRGXSubmitTransferKM(psTransferContextInt, -+ psRGXSubmitTransfer2IN->ui32PrepareCount, -+ ui32ClientUpdateCountInt, -+ psUpdateUFOSyncPrimBlockInt, -+ ui32UpdateSyncOffsetInt, -+ ui32UpdateValueInt, -+ psRGXSubmitTransfer2IN->hCheckFenceFD, -+ psRGXSubmitTransfer2IN->h2DUpdateTimeline, -+ &psRGXSubmitTransfer2OUT->h2DUpdateFence, -+ psRGXSubmitTransfer2IN->h3DUpdateTimeline, -+ &psRGXSubmitTransfer2OUT->h3DUpdateFence, -+ uiUpdateFenceNameInt, -+ ui32CommandSizeInt, -+ ui8FWCommandInt, -+ ui32TQPrepareFlagsInt, -+ psRGXSubmitTransfer2IN->ui32ExtJobRef, -+ psRGXSubmitTransfer2IN->ui32SyncPMRCount, -+ ui32SyncPMRFlagsInt, psSyncPMRsInt); -+ -+RGXSubmitTransfer2_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psTransferContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); -+ } -+ -+ if (hUpdateUFOSyncPrimBlockInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) -+ { -+ IMG_UINT32 j; -+ for (j = 0; j < ui32ClientUpdateCountInt[i]; j++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psUpdateUFOSyncPrimBlockInt && psUpdateUFOSyncPrimBlockInt[i] -+ && psUpdateUFOSyncPrimBlockInt[i][j]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hUpdateUFOSyncPrimBlockInt2[i] -+ [j], -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ } -+ } -+ } -+ -+ if (hSyncPMRsInt2) -+ { -+ IMG_UINT32 i; -+ -+ for (i = 0; i < psRGXSubmitTransfer2IN->ui32SyncPMRCount; i++) -+ { -+ -+ /* Unreference the previously looked up handle */ -+ if (psSyncPMRsInt && psSyncPMRsInt[i]) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSyncPMRsInt2[i], -+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ } -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXSubmitTransfer2OUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRGXSubmitTransfer2OUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize2 == ui32NextOffset2); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (pArrayArgsBuffer2) -+ OSFreeMemNoStats(pArrayArgsBuffer2); -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _RGXTQGetSharedMemorypsCLIPMRMemIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVRGXTQReleaseSharedMemoryKM((PMR *) pvData); -+ return eError; -+} -+ -+static PVRSRV_ERROR _RGXTQGetSharedMemorypsUSCPMRMemIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVRGXTQReleaseSharedMemoryKM((PMR *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXTQGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXTQGetSharedMemoryIN_UI8, -+ IMG_UINT8 * psRGXTQGetSharedMemoryOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXTQGETSHAREDMEMORY *psRGXTQGetSharedMemoryIN = -+ (PVRSRV_BRIDGE_IN_RGXTQGETSHAREDMEMORY *) IMG_OFFSET_ADDR(psRGXTQGetSharedMemoryIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY *psRGXTQGetSharedMemoryOUT = -+ (PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY *) -+ IMG_OFFSET_ADDR(psRGXTQGetSharedMemoryOUT_UI8, 0); -+ -+ PMR *psCLIPMRMemInt = NULL; -+ PMR *psUSCPMRMemInt = NULL; -+ -+ PVR_UNREFERENCED_PARAMETER(psRGXTQGetSharedMemoryIN); -+ -+ psRGXTQGetSharedMemoryOUT->eError = -+ PVRSRVRGXTQGetSharedMemoryKM(psConnection, OSGetDevNode(psConnection), -+ &psCLIPMRMemInt, &psUSCPMRMemInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRGXTQGetSharedMemoryOUT->eError != PVRSRV_OK)) -+ { -+ goto RGXTQGetSharedMemory_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXTQGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRGXTQGetSharedMemoryOUT-> -+ hCLIPMRMem, -+ (void *)psCLIPMRMemInt, -+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXTQGetSharedMemorypsCLIPMRMemIntRelease); -+ if (unlikely(psRGXTQGetSharedMemoryOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTQGetSharedMemory_exit; -+ } -+ -+ psRGXTQGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRGXTQGetSharedMemoryOUT-> -+ hUSCPMRMem, -+ (void *)psUSCPMRMemInt, -+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RGXTQGetSharedMemorypsUSCPMRMemIntRelease); -+ if (unlikely(psRGXTQGetSharedMemoryOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTQGetSharedMemory_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXTQGetSharedMemory_exit: -+ -+ if (psRGXTQGetSharedMemoryOUT->eError != PVRSRV_OK) -+ { -+ if (psCLIPMRMemInt) -+ { -+ PVRSRVRGXTQReleaseSharedMemoryKM(psCLIPMRMemInt); -+ } -+ if (psUSCPMRMemInt) -+ { -+ PVRSRVRGXTQReleaseSharedMemoryKM(psUSCPMRMemInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXTQReleaseSharedMemory(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXTQReleaseSharedMemoryIN_UI8, -+ IMG_UINT8 * psRGXTQReleaseSharedMemoryOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY *psRGXTQReleaseSharedMemoryIN = -+ (PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY *) -+ IMG_OFFSET_ADDR(psRGXTQReleaseSharedMemoryIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY *psRGXTQReleaseSharedMemoryOUT = -+ (PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY *) -+ IMG_OFFSET_ADDR(psRGXTQReleaseSharedMemoryOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRGXTQReleaseSharedMemoryOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psRGXTQReleaseSharedMemoryIN->hPMRMem, -+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); -+ if (unlikely((psRGXTQReleaseSharedMemoryOUT->eError != PVRSRV_OK) && -+ (psRGXTQReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psRGXTQReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psRGXTQReleaseSharedMemoryOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXTQReleaseSharedMemory_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RGXTQReleaseSharedMemory_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRGXSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRGXSetTransferContextPropertyIN_UI8, -+ IMG_UINT8 * psRGXSetTransferContextPropertyOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY *psRGXSetTransferContextPropertyIN = -+ (PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY *) -+ IMG_OFFSET_ADDR(psRGXSetTransferContextPropertyIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY *psRGXSetTransferContextPropertyOUT = -+ (PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY *) -+ IMG_OFFSET_ADDR(psRGXSetTransferContextPropertyOUT_UI8, 0); -+ -+ IMG_HANDLE hTransferContext = psRGXSetTransferContextPropertyIN->hTransferContext; -+ RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRGXSetTransferContextPropertyOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psTransferContextInt, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE); -+ if (unlikely(psRGXSetTransferContextPropertyOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RGXSetTransferContextProperty_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRGXSetTransferContextPropertyOUT->eError = -+ PVRSRVRGXSetTransferContextPropertyKM(psTransferContextInt, -+ psRGXSetTransferContextPropertyIN->ui32Property, -+ psRGXSetTransferContextPropertyIN->ui64Input, -+ &psRGXSetTransferContextPropertyOUT->ui64Output); -+ -+RGXSetTransferContextProperty_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psTransferContextInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hTransferContext, -+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+#endif /* SUPPORT_RGXTQ_BRIDGE */ -+ -+#if defined(SUPPORT_RGXTQ_BRIDGE) -+PVRSRV_ERROR InitRGXTQBridge(void); -+void DeinitRGXTQBridge(void); -+ -+/* -+ * Register all RGXTQ functions with services -+ */ -+PVRSRV_ERROR InitRGXTQBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT, -+ PVRSRVBridgeRGXCreateTransferContext, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT, -+ PVRSRVBridgeRGXDestroyTransferContext, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, -+ PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY, -+ PVRSRVBridgeRGXSetTransferContextPriority, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2, -+ PVRSRVBridgeRGXSubmitTransfer2, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXTQGETSHAREDMEMORY, -+ PVRSRVBridgeRGXTQGetSharedMemory, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXTQRELEASESHAREDMEMORY, -+ PVRSRVBridgeRGXTQReleaseSharedMemory, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, -+ PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY, -+ PVRSRVBridgeRGXSetTransferContextProperty, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all rgxtq functions with services -+ */ -+void DeinitRGXTQBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, -+ PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXTQGETSHAREDMEMORY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXTQRELEASESHAREDMEMORY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, -+ PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY); -+ -+} -+#else /* SUPPORT_RGXTQ_BRIDGE */ -+/* This bridge is conditional on SUPPORT_RGXTQ_BRIDGE - when not defined, -+ * do not populate the dispatch table with its functions -+ */ -+#define InitRGXTQBridge() \ -+ PVRSRV_OK -+ -+#define DeinitRGXTQBridge() -+ -+#endif /* SUPPORT_RGXTQ_BRIDGE */ -diff --git a/drivers/gpu/drm/img-rogue/server_ri_bridge.c b/drivers/gpu/drm/img-rogue/server_ri_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_ri_bridge.c -@@ -0,0 +1,745 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for ri -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for ri -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "ri_server.h" -+ -+#include "common_ri_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static IMG_INT -+PVRSRVBridgeRIWritePMREntry(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRIWritePMREntryIN_UI8, -+ IMG_UINT8 * psRIWritePMREntryOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *psRIWritePMREntryIN = -+ (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *) IMG_OFFSET_ADDR(psRIWritePMREntryIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *psRIWritePMREntryOUT = -+ (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *) IMG_OFFSET_ADDR(psRIWritePMREntryOUT_UI8, 0); -+ -+ IMG_HANDLE hPMRHandle = psRIWritePMREntryIN->hPMRHandle; -+ PMR *psPMRHandleInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRIWritePMREntryOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRHandleInt, -+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psRIWritePMREntryOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RIWritePMREntry_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRIWritePMREntryOUT->eError = RIWritePMREntryKM(psPMRHandleInt); -+ -+RIWritePMREntry_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRHandleInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _RIWriteMEMDESCEntrypsRIHandleIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData); -+ return eError; -+} -+ -+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, -+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRIWriteMEMDESCEntryIN_UI8, -+ IMG_UINT8 * psRIWriteMEMDESCEntryOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryIN = -+ (PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryOUT = -+ (PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryOUT_UI8, -+ 0); -+ -+ IMG_HANDLE hPMRHandle = psRIWriteMEMDESCEntryIN->hPMRHandle; -+ PMR *psPMRHandleInt = NULL; -+ IMG_CHAR *uiTextBInt = NULL; -+ RI_HANDLE psRIHandleInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0; -+ -+ if (unlikely(psRIWriteMEMDESCEntryIN->ui32TextBSize > DEVMEM_ANNOTATION_MAX_LEN)) -+ { -+ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RIWriteMEMDESCEntry_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RIWriteMEMDESCEntry_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRIWriteMEMDESCEntryIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RIWriteMEMDESCEntry_exit; -+ } -+ } -+ } -+ -+ if (psRIWriteMEMDESCEntryIN->ui32TextBSize != 0) -+ { -+ uiTextBInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiTextBInt, (const void __user *)psRIWriteMEMDESCEntryIN->puiTextB, -+ psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RIWriteMEMDESCEntry_exit; -+ } -+ ((IMG_CHAR *) -+ uiTextBInt)[(psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) - 1] = -+ '\0'; -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRIWriteMEMDESCEntryOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRHandleInt, -+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RIWriteMEMDESCEntry_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRIWriteMEMDESCEntryOUT->eError = -+ RIWriteMEMDESCEntryKM(psPMRHandleInt, -+ psRIWriteMEMDESCEntryIN->ui32TextBSize, -+ uiTextBInt, -+ psRIWriteMEMDESCEntryIN->ui64Offset, -+ psRIWriteMEMDESCEntryIN->ui64Size, -+ psRIWriteMEMDESCEntryIN->bIsImport, -+ psRIWriteMEMDESCEntryIN->bIsSuballoc, &psRIHandleInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) -+ { -+ goto RIWriteMEMDESCEntry_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRIWriteMEMDESCEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRIWriteMEMDESCEntryOUT-> -+ hRIHandle, -+ (void *)psRIHandleInt, -+ PVRSRV_HANDLE_TYPE_RI_HANDLE, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RIWriteMEMDESCEntrypsRIHandleIntRelease); -+ if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RIWriteMEMDESCEntry_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RIWriteMEMDESCEntry_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRHandleInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK) -+ { -+ if (psRIHandleInt) -+ { -+ RIDeleteMEMDESCEntryKM(psRIHandleInt); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRIWriteMEMDESCEntryOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _RIWriteProcListEntrypsRIHandleIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData); -+ return eError; -+} -+ -+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, -+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRIWriteProcListEntryIN_UI8, -+ IMG_UINT8 * psRIWriteProcListEntryOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryIN = -+ (PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *) IMG_OFFSET_ADDR(psRIWriteProcListEntryIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryOUT = -+ (PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *) -+ IMG_OFFSET_ADDR(psRIWriteProcListEntryOUT_UI8, 0); -+ -+ IMG_CHAR *uiTextBInt = NULL; -+ RI_HANDLE psRIHandleInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0; -+ -+ if (unlikely(psRIWriteProcListEntryIN->ui32TextBSize > DEVMEM_ANNOTATION_MAX_LEN)) -+ { -+ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto RIWriteProcListEntry_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto RIWriteProcListEntry_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRIWriteProcListEntryIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto RIWriteProcListEntry_exit; -+ } -+ } -+ } -+ -+ if (psRIWriteProcListEntryIN->ui32TextBSize != 0) -+ { -+ uiTextBInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiTextBInt, (const void __user *)psRIWriteProcListEntryIN->puiTextB, -+ psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto RIWriteProcListEntry_exit; -+ } -+ ((IMG_CHAR *) -+ uiTextBInt)[(psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) - 1] = -+ '\0'; -+ } -+ -+ psRIWriteProcListEntryOUT->eError = -+ RIWriteProcListEntryKM(psConnection, OSGetDevNode(psConnection), -+ psRIWriteProcListEntryIN->ui32TextBSize, -+ uiTextBInt, -+ psRIWriteProcListEntryIN->ui64Size, -+ psRIWriteProcListEntryIN->ui64DevVAddr, &psRIHandleInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)) -+ { -+ goto RIWriteProcListEntry_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRIWriteProcListEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psRIWriteProcListEntryOUT-> -+ hRIHandle, -+ (void *)psRIHandleInt, -+ PVRSRV_HANDLE_TYPE_RI_HANDLE, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _RIWriteProcListEntrypsRIHandleIntRelease); -+ if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RIWriteProcListEntry_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RIWriteProcListEntry_exit: -+ -+ if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK) -+ { -+ if (psRIHandleInt) -+ { -+ RIDeleteMEMDESCEntryKM(psRIHandleInt); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psRIWriteProcListEntryOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRIUpdateMEMDESCAddr(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRIUpdateMEMDESCAddrIN_UI8, -+ IMG_UINT8 * psRIUpdateMEMDESCAddrOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrIN = -+ (PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *) IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrOUT = -+ (PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *) IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrOUT_UI8, -+ 0); -+ -+ IMG_HANDLE hRIHandle = psRIUpdateMEMDESCAddrIN->hRIHandle; -+ RI_HANDLE psRIHandleInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRIUpdateMEMDESCAddrOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psRIHandleInt, -+ hRIHandle, PVRSRV_HANDLE_TYPE_RI_HANDLE, IMG_TRUE); -+ if (unlikely(psRIUpdateMEMDESCAddrOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RIUpdateMEMDESCAddr_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRIUpdateMEMDESCAddrOUT->eError = -+ RIUpdateMEMDESCAddrKM(psRIHandleInt, psRIUpdateMEMDESCAddrIN->sAddr); -+ -+RIUpdateMEMDESCAddr_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psRIHandleInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hRIHandle, PVRSRV_HANDLE_TYPE_RI_HANDLE); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRIDeleteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRIDeleteMEMDESCEntryIN_UI8, -+ IMG_UINT8 * psRIDeleteMEMDESCEntryOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryIN = -+ (PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryOUT = -+ (PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *) -+ IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psRIDeleteMEMDESCEntryOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psRIDeleteMEMDESCEntryIN->hRIHandle, -+ PVRSRV_HANDLE_TYPE_RI_HANDLE); -+ if (unlikely((psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_OK) && -+ (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psRIDeleteMEMDESCEntryOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto RIDeleteMEMDESCEntry_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+RIDeleteMEMDESCEntry_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRIDumpList(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRIDumpListIN_UI8, -+ IMG_UINT8 * psRIDumpListOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RIDUMPLIST *psRIDumpListIN = -+ (PVRSRV_BRIDGE_IN_RIDUMPLIST *) IMG_OFFSET_ADDR(psRIDumpListIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RIDUMPLIST *psRIDumpListOUT = -+ (PVRSRV_BRIDGE_OUT_RIDUMPLIST *) IMG_OFFSET_ADDR(psRIDumpListOUT_UI8, 0); -+ -+ IMG_HANDLE hPMRHandle = psRIDumpListIN->hPMRHandle; -+ PMR *psPMRHandleInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRIDumpListOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRHandleInt, -+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psRIDumpListOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RIDumpList_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRIDumpListOUT->eError = RIDumpListKM(psPMRHandleInt); -+ -+RIDumpList_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRHandleInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRIDumpAll(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRIDumpAllIN_UI8, -+ IMG_UINT8 * psRIDumpAllOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RIDUMPALL *psRIDumpAllIN = -+ (PVRSRV_BRIDGE_IN_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RIDUMPALL *psRIDumpAllOUT = -+ (PVRSRV_BRIDGE_OUT_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psRIDumpAllIN); -+ -+ psRIDumpAllOUT->eError = RIDumpAllKM(); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRIDumpProcess(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRIDumpProcessIN_UI8, -+ IMG_UINT8 * psRIDumpProcessOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RIDUMPPROCESS *psRIDumpProcessIN = -+ (PVRSRV_BRIDGE_IN_RIDUMPPROCESS *) IMG_OFFSET_ADDR(psRIDumpProcessIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *psRIDumpProcessOUT = -+ (PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *) IMG_OFFSET_ADDR(psRIDumpProcessOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ psRIDumpProcessOUT->eError = RIDumpProcessKM(psRIDumpProcessIN->ui32Pid); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeRIWritePMREntryWithOwner(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psRIWritePMREntryWithOwnerIN_UI8, -+ IMG_UINT8 * psRIWritePMREntryWithOwnerOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerIN = -+ (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *) -+ IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerOUT = -+ (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *) -+ IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerOUT_UI8, 0); -+ -+ IMG_HANDLE hPMRHandle = psRIWritePMREntryWithOwnerIN->hPMRHandle; -+ PMR *psPMRHandleInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psRIWritePMREntryWithOwnerOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psPMRHandleInt, -+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); -+ if (unlikely(psRIWritePMREntryWithOwnerOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto RIWritePMREntryWithOwner_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psRIWritePMREntryWithOwnerOUT->eError = -+ RIWritePMREntryWithOwnerKM(psPMRHandleInt, psRIWritePMREntryWithOwnerIN->ui32Owner); -+ -+RIWritePMREntryWithOwner_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psPMRHandleInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitRIBridge(void); -+void DeinitRIBridge(void); -+ -+/* -+ * Register all RI functions with services -+ */ -+PVRSRV_ERROR InitRIBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY, -+ PVRSRVBridgeRIWritePMREntry, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY, -+ PVRSRVBridgeRIWriteMEMDESCEntry, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY, -+ PVRSRVBridgeRIWriteProcListEntry, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR, -+ PVRSRVBridgeRIUpdateMEMDESCAddr, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY, -+ PVRSRVBridgeRIDeleteMEMDESCEntry, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, PVRSRVBridgeRIDumpList, -+ NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, PVRSRVBridgeRIDumpAll, -+ NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS, -+ PVRSRVBridgeRIDumpProcess, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER, -+ PVRSRVBridgeRIWritePMREntryWithOwner, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all ri functions with services -+ */ -+void DeinitRIBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_srvcore_bridge.c b/drivers/gpu/drm/img-rogue/server_srvcore_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_srvcore_bridge.c -@@ -0,0 +1,1053 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for srvcore -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for srvcore -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "srvcore.h" -+#include "info_page.h" -+#include "proc_stats.h" -+#include "rgx_fwif_alignchecks.h" -+ -+#include "common_srvcore_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static IMG_INT -+PVRSRVBridgeConnect(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psConnectIN_UI8, -+ IMG_UINT8 * psConnectOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_CONNECT *psConnectIN = -+ (PVRSRV_BRIDGE_IN_CONNECT *) IMG_OFFSET_ADDR(psConnectIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_CONNECT *psConnectOUT = -+ (PVRSRV_BRIDGE_OUT_CONNECT *) IMG_OFFSET_ADDR(psConnectOUT_UI8, 0); -+ -+ psConnectOUT->eError = -+ PVRSRVConnectKM(psConnection, OSGetDevNode(psConnection), -+ psConnectIN->ui32Flags, -+ psConnectIN->ui32ClientBuildOptions, -+ psConnectIN->ui32ClientDDKVersion, -+ psConnectIN->ui32ClientDDKBuild, -+ &psConnectOUT->ui8KernelArch, -+ &psConnectOUT->ui32CapabilityFlags, &psConnectOUT->ui64PackedBvnc); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDisconnect(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDisconnectIN_UI8, -+ IMG_UINT8 * psDisconnectOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DISCONNECT *psDisconnectIN = -+ (PVRSRV_BRIDGE_IN_DISCONNECT *) IMG_OFFSET_ADDR(psDisconnectIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DISCONNECT *psDisconnectOUT = -+ (PVRSRV_BRIDGE_OUT_DISCONNECT *) IMG_OFFSET_ADDR(psDisconnectOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ PVR_UNREFERENCED_PARAMETER(psDisconnectIN); -+ -+ psDisconnectOUT->eError = PVRSRVDisconnectKM(); -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _AcquireGlobalEventObjecthGlobalEventObjectIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVReleaseGlobalEventObjectKM((IMG_HANDLE) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeAcquireGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psAcquireGlobalEventObjectIN_UI8, -+ IMG_UINT8 * psAcquireGlobalEventObjectOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectIN = -+ (PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *) -+ IMG_OFFSET_ADDR(psAcquireGlobalEventObjectIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectOUT = -+ (PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *) -+ IMG_OFFSET_ADDR(psAcquireGlobalEventObjectOUT_UI8, 0); -+ -+ IMG_HANDLE hGlobalEventObjectInt = NULL; -+ -+ PVR_UNREFERENCED_PARAMETER(psAcquireGlobalEventObjectIN); -+ -+ psAcquireGlobalEventObjectOUT->eError = -+ PVRSRVAcquireGlobalEventObjectKM(&hGlobalEventObjectInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)) -+ { -+ goto AcquireGlobalEventObject_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psAcquireGlobalEventObjectOUT->eError = -+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psAcquireGlobalEventObjectOUT->hGlobalEventObject, -+ (void *)hGlobalEventObjectInt, -+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _AcquireGlobalEventObjecthGlobalEventObjectIntRelease); -+ if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto AcquireGlobalEventObject_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+AcquireGlobalEventObject_exit: -+ -+ if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK) -+ { -+ if (hGlobalEventObjectInt) -+ { -+ PVRSRVReleaseGlobalEventObjectKM(hGlobalEventObjectInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeReleaseGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psReleaseGlobalEventObjectIN_UI8, -+ IMG_UINT8 * psReleaseGlobalEventObjectOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectIN = -+ (PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *) -+ IMG_OFFSET_ADDR(psReleaseGlobalEventObjectIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectOUT = -+ (PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *) -+ IMG_OFFSET_ADDR(psReleaseGlobalEventObjectOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psReleaseGlobalEventObjectOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psReleaseGlobalEventObjectIN-> -+ hGlobalEventObject, -+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); -+ if (unlikely -+ ((psReleaseGlobalEventObjectOUT->eError != PVRSRV_OK) -+ && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) -+ && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psReleaseGlobalEventObjectOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto ReleaseGlobalEventObject_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ReleaseGlobalEventObject_exit: -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _EventObjectOpenhOSEventIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = OSEventObjectClose((IMG_HANDLE) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeEventObjectOpen(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psEventObjectOpenIN_UI8, -+ IMG_UINT8 * psEventObjectOpenOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *psEventObjectOpenIN = -+ (PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *) IMG_OFFSET_ADDR(psEventObjectOpenIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *psEventObjectOpenOUT = -+ (PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *) IMG_OFFSET_ADDR(psEventObjectOpenOUT_UI8, 0); -+ -+ IMG_HANDLE hEventObject = psEventObjectOpenIN->hEventObject; -+ IMG_HANDLE hEventObjectInt = NULL; -+ IMG_HANDLE hOSEventInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psEventObjectOpenOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hEventObjectInt, -+ hEventObject, -+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, IMG_TRUE); -+ if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto EventObjectOpen_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psEventObjectOpenOUT->eError = OSEventObjectOpen(hEventObjectInt, &hOSEventInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) -+ { -+ goto EventObjectOpen_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psEventObjectOpenOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psEventObjectOpenOUT->hOSEvent, -+ (void *)hOSEventInt, -+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _EventObjectOpenhOSEventIntRelease); -+ if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto EventObjectOpen_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+EventObjectOpen_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hEventObjectInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hEventObject, PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psEventObjectOpenOUT->eError != PVRSRV_OK) -+ { -+ if (hOSEventInt) -+ { -+ OSEventObjectClose(hOSEventInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeEventObjectWait(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psEventObjectWaitIN_UI8, -+ IMG_UINT8 * psEventObjectWaitOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *psEventObjectWaitIN = -+ (PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *) IMG_OFFSET_ADDR(psEventObjectWaitIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *psEventObjectWaitOUT = -+ (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *) IMG_OFFSET_ADDR(psEventObjectWaitOUT_UI8, 0); -+ -+ IMG_HANDLE hOSEventKM = psEventObjectWaitIN->hOSEventKM; -+ IMG_HANDLE hOSEventKMInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psEventObjectWaitOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hOSEventKMInt, -+ hOSEventKM, -+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, IMG_TRUE); -+ if (unlikely(psEventObjectWaitOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto EventObjectWait_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psEventObjectWaitOUT->eError = OSEventObjectWait(hOSEventKMInt); -+ -+EventObjectWait_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hOSEventKMInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hOSEventKM, PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeEventObjectClose(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psEventObjectCloseIN_UI8, -+ IMG_UINT8 * psEventObjectCloseOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *psEventObjectCloseIN = -+ (PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *) IMG_OFFSET_ADDR(psEventObjectCloseIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *psEventObjectCloseOUT = -+ (PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *) IMG_OFFSET_ADDR(psEventObjectCloseOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psEventObjectCloseOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psEventObjectCloseIN->hOSEventKM, -+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); -+ if (unlikely((psEventObjectCloseOUT->eError != PVRSRV_OK) && -+ (psEventObjectCloseOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psEventObjectCloseOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: %s", __func__, PVRSRVGetErrorString(psEventObjectCloseOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto EventObjectClose_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+EventObjectClose_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeDumpDebugInfo(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psDumpDebugInfoIN_UI8, -+ IMG_UINT8 * psDumpDebugInfoOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *psDumpDebugInfoIN = -+ (PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *) IMG_OFFSET_ADDR(psDumpDebugInfoIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *psDumpDebugInfoOUT = -+ (PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *) IMG_OFFSET_ADDR(psDumpDebugInfoOUT_UI8, 0); -+ -+ psDumpDebugInfoOUT->eError = -+ PVRSRVDumpDebugInfoKM(psConnection, OSGetDevNode(psConnection), -+ psDumpDebugInfoIN->ui32VerbLevel); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeGetDevClockSpeed(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psGetDevClockSpeedIN_UI8, -+ IMG_UINT8 * psGetDevClockSpeedOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *psGetDevClockSpeedIN = -+ (PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *) IMG_OFFSET_ADDR(psGetDevClockSpeedIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *psGetDevClockSpeedOUT = -+ (PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *) IMG_OFFSET_ADDR(psGetDevClockSpeedOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psGetDevClockSpeedIN); -+ -+ psGetDevClockSpeedOUT->eError = -+ PVRSRVGetDevClockSpeedKM(psConnection, OSGetDevNode(psConnection), -+ &psGetDevClockSpeedOUT->ui32ClockSpeed); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeHWOpTimeout(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psHWOpTimeoutIN_UI8, -+ IMG_UINT8 * psHWOpTimeoutOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_HWOPTIMEOUT *psHWOpTimeoutIN = -+ (PVRSRV_BRIDGE_IN_HWOPTIMEOUT *) IMG_OFFSET_ADDR(psHWOpTimeoutIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *psHWOpTimeoutOUT = -+ (PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *) IMG_OFFSET_ADDR(psHWOpTimeoutOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psHWOpTimeoutIN); -+ -+ psHWOpTimeoutOUT->eError = PVRSRVHWOpTimeoutKM(psConnection, OSGetDevNode(psConnection)); -+ -+ return 0; -+} -+ -+static_assert(RGXFW_ALIGN_CHECKS_UM_MAX <= IMG_UINT32_MAX, -+ "RGXFW_ALIGN_CHECKS_UM_MAX must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psAlignmentCheckIN_UI8, -+ IMG_UINT8 * psAlignmentCheckOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *psAlignmentCheckIN = -+ (PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *) IMG_OFFSET_ADDR(psAlignmentCheckIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *psAlignmentCheckOUT = -+ (PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *) IMG_OFFSET_ADDR(psAlignmentCheckOUT_UI8, 0); -+ -+ IMG_UINT32 *ui32AlignChecksInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) + 0; -+ -+ if (unlikely(psAlignmentCheckIN->ui32AlignChecksSize > RGXFW_ALIGN_CHECKS_UM_MAX)) -+ { -+ psAlignmentCheckOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto AlignmentCheck_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psAlignmentCheckOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto AlignmentCheck_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psAlignmentCheckIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psAlignmentCheckIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psAlignmentCheckOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto AlignmentCheck_exit; -+ } -+ } -+ } -+ -+ if (psAlignmentCheckIN->ui32AlignChecksSize != 0) -+ { -+ ui32AlignChecksInt = -+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32); -+ } -+ -+ /* Copy the data over */ -+ if (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, ui32AlignChecksInt, -+ (const void __user *)psAlignmentCheckIN->pui32AlignChecks, -+ psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) != PVRSRV_OK) -+ { -+ psAlignmentCheckOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto AlignmentCheck_exit; -+ } -+ } -+ -+ psAlignmentCheckOUT->eError = -+ PVRSRVAlignmentCheckKM(psConnection, OSGetDevNode(psConnection), -+ psAlignmentCheckIN->ui32AlignChecksSize, ui32AlignChecksInt); -+ -+AlignmentCheck_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psAlignmentCheckOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeGetDeviceStatus(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psGetDeviceStatusIN_UI8, -+ IMG_UINT8 * psGetDeviceStatusOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_GETDEVICESTATUS *psGetDeviceStatusIN = -+ (PVRSRV_BRIDGE_IN_GETDEVICESTATUS *) IMG_OFFSET_ADDR(psGetDeviceStatusIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *psGetDeviceStatusOUT = -+ (PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *) IMG_OFFSET_ADDR(psGetDeviceStatusOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psGetDeviceStatusIN); -+ -+ psGetDeviceStatusOUT->eError = -+ PVRSRVGetDeviceStatusKM(psConnection, OSGetDevNode(psConnection), -+ &psGetDeviceStatusOUT->ui32DeviceSatus); -+ -+ return 0; -+} -+ -+static_assert(8 <= IMG_UINT32_MAX, "8 must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psGetMultiCoreInfoIN_UI8, -+ IMG_UINT8 * psGetMultiCoreInfoOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_GETMULTICOREINFO *psGetMultiCoreInfoIN = -+ (PVRSRV_BRIDGE_IN_GETMULTICOREINFO *) IMG_OFFSET_ADDR(psGetMultiCoreInfoIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *psGetMultiCoreInfoOUT = -+ (PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *) IMG_OFFSET_ADDR(psGetMultiCoreInfoOUT_UI8, 0); -+ -+ IMG_UINT64 *pui64CapsInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) + 0; -+ -+ if (psGetMultiCoreInfoIN->ui32CapsSize > 8) -+ { -+ psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto GetMultiCoreInfo_exit; -+ } -+ -+ psGetMultiCoreInfoOUT->pui64Caps = psGetMultiCoreInfoIN->pui64Caps; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto GetMultiCoreInfo_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psGetMultiCoreInfoIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetMultiCoreInfoIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto GetMultiCoreInfo_exit; -+ } -+ } -+ } -+ -+ if (psGetMultiCoreInfoIN->ui32CapsSize != 0) -+ { -+ pui64CapsInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64); -+ } -+ -+ psGetMultiCoreInfoOUT->eError = -+ PVRSRVGetMultiCoreInfoKM(psConnection, OSGetDevNode(psConnection), -+ psGetMultiCoreInfoIN->ui32CapsSize, -+ &psGetMultiCoreInfoOUT->ui32NumCores, pui64CapsInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psGetMultiCoreInfoOUT->eError != PVRSRV_OK)) -+ { -+ goto GetMultiCoreInfo_exit; -+ } -+ -+ /* If dest ptr is non-null and we have data to copy */ -+ if ((pui64CapsInt) && ((psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) > 0)) -+ { -+ if (unlikely -+ (OSCopyToUser -+ (NULL, (void __user *)psGetMultiCoreInfoOUT->pui64Caps, pui64CapsInt, -+ (psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64))) != PVRSRV_OK)) -+ { -+ psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto GetMultiCoreInfo_exit; -+ } -+ } -+ -+GetMultiCoreInfo_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psGetMultiCoreInfoOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeEventObjectWaitTimeout(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psEventObjectWaitTimeoutIN_UI8, -+ IMG_UINT8 * psEventObjectWaitTimeoutOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutIN = -+ (PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *) -+ IMG_OFFSET_ADDR(psEventObjectWaitTimeoutIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutOUT = -+ (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *) -+ IMG_OFFSET_ADDR(psEventObjectWaitTimeoutOUT_UI8, 0); -+ -+ IMG_HANDLE hOSEventKM = psEventObjectWaitTimeoutIN->hOSEventKM; -+ IMG_HANDLE hOSEventKMInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psEventObjectWaitTimeoutOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&hOSEventKMInt, -+ hOSEventKM, -+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, IMG_TRUE); -+ if (unlikely(psEventObjectWaitTimeoutOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto EventObjectWaitTimeout_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psEventObjectWaitTimeoutOUT->eError = -+ OSEventObjectWaitTimeout(hOSEventKMInt, psEventObjectWaitTimeoutIN->ui64uiTimeoutus); -+ -+EventObjectWaitTimeout_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (hOSEventKMInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hOSEventKM, PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+static_assert(PVRSRV_PROCESS_STAT_TYPE_COUNT <= IMG_UINT32_MAX, -+ "PVRSRV_PROCESS_STAT_TYPE_COUNT must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psFindProcessMemStatsIN_UI8, -+ IMG_UINT8 * psFindProcessMemStatsOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *psFindProcessMemStatsIN = -+ (PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsIN_UI8, -+ 0); -+ PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *psFindProcessMemStatsOUT = -+ (PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsOUT_UI8, -+ 0); -+ -+ IMG_UINT64 *pui64MemStatsArrayInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64)) + 0; -+ -+ if (psFindProcessMemStatsIN->ui32ArrSize > PVRSRV_PROCESS_STAT_TYPE_COUNT) -+ { -+ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto FindProcessMemStats_exit; -+ } -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ psFindProcessMemStatsOUT->pui64MemStatsArray = psFindProcessMemStatsIN->pui64MemStatsArray; -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto FindProcessMemStats_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psFindProcessMemStatsIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psFindProcessMemStatsIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto FindProcessMemStats_exit; -+ } -+ } -+ } -+ -+ if (psFindProcessMemStatsIN->ui32ArrSize != 0) -+ { -+ pui64MemStatsArrayInt = -+ (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64); -+ } -+ -+ psFindProcessMemStatsOUT->eError = -+ PVRSRVFindProcessMemStatsKM(psFindProcessMemStatsIN->ui32PID, -+ psFindProcessMemStatsIN->ui32ArrSize, -+ psFindProcessMemStatsIN->bbAllProcessStats, -+ pui64MemStatsArrayInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psFindProcessMemStatsOUT->eError != PVRSRV_OK)) -+ { -+ goto FindProcessMemStats_exit; -+ } -+ -+ /* If dest ptr is non-null and we have data to copy */ -+ if ((pui64MemStatsArrayInt) && -+ ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64)) > 0)) -+ { -+ if (unlikely -+ (OSCopyToUser -+ (NULL, (void __user *)psFindProcessMemStatsOUT->pui64MemStatsArray, -+ pui64MemStatsArrayInt, -+ (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64))) != PVRSRV_OK)) -+ { -+ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto FindProcessMemStats_exit; -+ } -+ } -+ -+FindProcessMemStats_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psFindProcessMemStatsOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _AcquireInfoPagepsPMRIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVReleaseInfoPageKM((PMR *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeAcquireInfoPage(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psAcquireInfoPageIN_UI8, -+ IMG_UINT8 * psAcquireInfoPageOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *psAcquireInfoPageIN = -+ (PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *) IMG_OFFSET_ADDR(psAcquireInfoPageIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *psAcquireInfoPageOUT = -+ (PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *) IMG_OFFSET_ADDR(psAcquireInfoPageOUT_UI8, 0); -+ -+ PMR *psPMRInt = NULL; -+ -+ PVR_UNREFERENCED_PARAMETER(psAcquireInfoPageIN); -+ -+ psAcquireInfoPageOUT->eError = PVRSRVAcquireInfoPageKM(&psPMRInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK)) -+ { -+ goto AcquireInfoPage_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ psAcquireInfoPageOUT->eError = -+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, -+ &psAcquireInfoPageOUT->hPMR, (void *)psPMRInt, -+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & _AcquireInfoPagepsPMRIntRelease); -+ if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ goto AcquireInfoPage_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+AcquireInfoPage_exit: -+ -+ if (psAcquireInfoPageOUT->eError != PVRSRV_OK) -+ { -+ if (psPMRInt) -+ { -+ PVRSRVReleaseInfoPageKM(psPMRInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeReleaseInfoPage(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psReleaseInfoPageIN_UI8, -+ IMG_UINT8 * psReleaseInfoPageOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *psReleaseInfoPageIN = -+ (PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *) IMG_OFFSET_ADDR(psReleaseInfoPageIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *psReleaseInfoPageOUT = -+ (PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *) IMG_OFFSET_ADDR(psReleaseInfoPageOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ psReleaseInfoPageOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase, -+ (IMG_HANDLE) psReleaseInfoPageIN->hPMR, -+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); -+ if (unlikely((psReleaseInfoPageOUT->eError != PVRSRV_OK) && -+ (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(psReleaseInfoPageOUT->eError))); -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ goto ReleaseInfoPage_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); -+ -+ReleaseInfoPage_exit: -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitSRVCOREBridge(void); -+void DeinitSRVCOREBridge(void); -+ -+/* -+ * Register all SRVCORE functions with services -+ */ -+PVRSRV_ERROR InitSRVCOREBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT, -+ PVRSRVBridgeConnect, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT, -+ PVRSRVBridgeDisconnect, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT, -+ PVRSRVBridgeAcquireGlobalEventObject, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT, -+ PVRSRVBridgeReleaseGlobalEventObject, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN, -+ PVRSRVBridgeEventObjectOpen, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT, -+ PVRSRVBridgeEventObjectWait, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE, -+ PVRSRVBridgeEventObjectClose, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO, -+ PVRSRVBridgeDumpDebugInfo, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED, -+ PVRSRVBridgeGetDevClockSpeed, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT, -+ PVRSRVBridgeHWOpTimeout, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK, -+ PVRSRVBridgeAlignmentCheck, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS, -+ PVRSRVBridgeGetDeviceStatus, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO, -+ PVRSRVBridgeGetMultiCoreInfo, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT, -+ PVRSRVBridgeEventObjectWaitTimeout, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS, -+ PVRSRVBridgeFindProcessMemStats, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE, -+ PVRSRVBridgeAcquireInfoPage, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE, -+ PVRSRVBridgeReleaseInfoPage, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all srvcore functions with services -+ */ -+void DeinitSRVCOREBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, -+ PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, -+ PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, -+ PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_sync_bridge.c b/drivers/gpu/drm/img-rogue/server_sync_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_sync_bridge.c -@@ -0,0 +1,738 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for sync -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for sync -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "sync.h" -+#include "sync_server.h" -+#include "pdump.h" -+#include "pvrsrv_sync_km.h" -+#include "sync_fallback_server.h" -+#include "sync_checkpoint.h" -+ -+#include "common_sync_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static PVRSRV_ERROR _AllocSyncPrimitiveBlockpsSyncHandleIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVFreeSyncPrimitiveBlockKM((SYNC_PRIMITIVE_BLOCK *) pvData); -+ return eError; -+} -+ -+static IMG_INT -+PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psAllocSyncPrimitiveBlockIN_UI8, -+ IMG_UINT8 * psAllocSyncPrimitiveBlockOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockIN = -+ (PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *) -+ IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockOUT = -+ (PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *) -+ IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockOUT_UI8, 0); -+ -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; -+ PMR *pshSyncPMRInt = NULL; -+ -+ PVR_UNREFERENCED_PARAMETER(psAllocSyncPrimitiveBlockIN); -+ -+ psAllocSyncPrimitiveBlockOUT->hSyncHandle = NULL; -+ -+ psAllocSyncPrimitiveBlockOUT->eError = -+ PVRSRVAllocSyncPrimitiveBlockKM(psConnection, OSGetDevNode(psConnection), -+ &psSyncHandleInt, -+ &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimVAddr, -+ &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimBlockSize, -+ &pshSyncPMRInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) -+ { -+ goto AllocSyncPrimitiveBlock_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psAllocSyncPrimitiveBlockOUT-> -+ hSyncHandle, -+ (void *)psSyncHandleInt, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, -+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, -+ (PFN_HANDLE_RELEASE) & -+ _AllocSyncPrimitiveBlockpsSyncHandleIntRelease); -+ if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto AllocSyncPrimitiveBlock_exit; -+ } -+ -+ psAllocSyncPrimitiveBlockOUT->eError = -+ PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, -+ &psAllocSyncPrimitiveBlockOUT->hhSyncPMR, -+ (void *)pshSyncPMRInt, -+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE, -+ psAllocSyncPrimitiveBlockOUT->hSyncHandle); -+ if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto AllocSyncPrimitiveBlock_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+AllocSyncPrimitiveBlock_exit: -+ -+ if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK) -+ { -+ if (psAllocSyncPrimitiveBlockOUT->hSyncHandle) -+ { -+ PVRSRV_ERROR eError; -+ -+ /* Lock over handle creation cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) -+ psAllocSyncPrimitiveBlockOUT-> -+ hSyncHandle, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", __func__, PVRSRVGetErrorString(eError))); -+ } -+ /* Releasing the handle should free/destroy/release the resource. -+ * This should never fail... */ -+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); -+ -+ /* Avoid freeing/destroying/releasing the resource a second time below */ -+ psSyncHandleInt = NULL; -+ /* Release now we have cleaned up creation handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ } -+ -+ if (psSyncHandleInt) -+ { -+ PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt); -+ } -+ } -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeFreeSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psFreeSyncPrimitiveBlockIN_UI8, -+ IMG_UINT8 * psFreeSyncPrimitiveBlockOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockIN = -+ (PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *) -+ IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockOUT = -+ (PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *) -+ IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psFreeSyncPrimitiveBlockOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psFreeSyncPrimitiveBlockIN->hSyncHandle, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ if (unlikely((psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_OK) && -+ (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psFreeSyncPrimitiveBlockOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto FreeSyncPrimitiveBlock_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+FreeSyncPrimitiveBlock_exit: -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psSyncPrimSetIN_UI8, -+ IMG_UINT8 * psSyncPrimSetOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_SYNCPRIMSET *psSyncPrimSetIN = -+ (PVRSRV_BRIDGE_IN_SYNCPRIMSET *) IMG_OFFSET_ADDR(psSyncPrimSetIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_SYNCPRIMSET *psSyncPrimSetOUT = -+ (PVRSRV_BRIDGE_OUT_SYNCPRIMSET *) IMG_OFFSET_ADDR(psSyncPrimSetOUT_UI8, 0); -+ -+ IMG_HANDLE hSyncHandle = psSyncPrimSetIN->hSyncHandle; -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psSyncPrimSetOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSyncHandleInt, -+ hSyncHandle, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); -+ if (unlikely(psSyncPrimSetOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto SyncPrimSet_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psSyncPrimSetOUT->eError = -+ PVRSRVSyncPrimSetKM(psSyncHandleInt, -+ psSyncPrimSetIN->ui32Index, psSyncPrimSetIN->ui32Value); -+ -+SyncPrimSet_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psSyncHandleInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+#if defined(PDUMP) -+ -+static IMG_INT -+PVRSRVBridgeSyncPrimPDump(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psSyncPrimPDumpIN_UI8, -+ IMG_UINT8 * psSyncPrimPDumpOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *psSyncPrimPDumpIN = -+ (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *) IMG_OFFSET_ADDR(psSyncPrimPDumpIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *psSyncPrimPDumpOUT = -+ (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *) IMG_OFFSET_ADDR(psSyncPrimPDumpOUT_UI8, 0); -+ -+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpIN->hSyncHandle; -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psSyncPrimPDumpOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSyncHandleInt, -+ hSyncHandle, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); -+ if (unlikely(psSyncPrimPDumpOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto SyncPrimPDump_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psSyncPrimPDumpOUT->eError = -+ PVRSRVSyncPrimPDumpKM(psSyncHandleInt, psSyncPrimPDumpIN->ui32Offset); -+ -+SyncPrimPDump_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psSyncHandleInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+#else -+#define PVRSRVBridgeSyncPrimPDump NULL -+#endif -+ -+#if defined(PDUMP) -+ -+static IMG_INT -+PVRSRVBridgeSyncPrimPDumpValue(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psSyncPrimPDumpValueIN_UI8, -+ IMG_UINT8 * psSyncPrimPDumpValueOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueIN = -+ (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *) IMG_OFFSET_ADDR(psSyncPrimPDumpValueIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueOUT = -+ (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *) IMG_OFFSET_ADDR(psSyncPrimPDumpValueOUT_UI8, -+ 0); -+ -+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpValueIN->hSyncHandle; -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psSyncPrimPDumpValueOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSyncHandleInt, -+ hSyncHandle, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); -+ if (unlikely(psSyncPrimPDumpValueOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto SyncPrimPDumpValue_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psSyncPrimPDumpValueOUT->eError = -+ PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, -+ psSyncPrimPDumpValueIN->ui32Offset, -+ psSyncPrimPDumpValueIN->ui32Value); -+ -+SyncPrimPDumpValue_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psSyncHandleInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+#else -+#define PVRSRVBridgeSyncPrimPDumpValue NULL -+#endif -+ -+#if defined(PDUMP) -+ -+static IMG_INT -+PVRSRVBridgeSyncPrimPDumpPol(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psSyncPrimPDumpPolIN_UI8, -+ IMG_UINT8 * psSyncPrimPDumpPolOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolIN = -+ (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *) IMG_OFFSET_ADDR(psSyncPrimPDumpPolIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolOUT = -+ (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *) IMG_OFFSET_ADDR(psSyncPrimPDumpPolOUT_UI8, 0); -+ -+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpPolIN->hSyncHandle; -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psSyncPrimPDumpPolOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSyncHandleInt, -+ hSyncHandle, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); -+ if (unlikely(psSyncPrimPDumpPolOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto SyncPrimPDumpPol_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psSyncPrimPDumpPolOUT->eError = -+ PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt, -+ psSyncPrimPDumpPolIN->ui32Offset, -+ psSyncPrimPDumpPolIN->ui32Value, -+ psSyncPrimPDumpPolIN->ui32Mask, -+ psSyncPrimPDumpPolIN->eOperator, -+ psSyncPrimPDumpPolIN->uiPDumpFlags); -+ -+SyncPrimPDumpPol_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psSyncHandleInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+#else -+#define PVRSRVBridgeSyncPrimPDumpPol NULL -+#endif -+ -+#if defined(PDUMP) -+ -+static IMG_INT -+PVRSRVBridgeSyncPrimPDumpCBP(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psSyncPrimPDumpCBPIN_UI8, -+ IMG_UINT8 * psSyncPrimPDumpCBPOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPIN = -+ (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *) IMG_OFFSET_ADDR(psSyncPrimPDumpCBPIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPOUT = -+ (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *) IMG_OFFSET_ADDR(psSyncPrimPDumpCBPOUT_UI8, 0); -+ -+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpCBPIN->hSyncHandle; -+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psSyncPrimPDumpCBPOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&psSyncHandleInt, -+ hSyncHandle, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); -+ if (unlikely(psSyncPrimPDumpCBPOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto SyncPrimPDumpCBP_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psSyncPrimPDumpCBPOUT->eError = -+ PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt, -+ psSyncPrimPDumpCBPIN->ui32Offset, -+ psSyncPrimPDumpCBPIN->uiWriteOffset, -+ psSyncPrimPDumpCBPIN->uiPacketSize, -+ psSyncPrimPDumpCBPIN->uiBufferSize); -+ -+SyncPrimPDumpCBP_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (psSyncHandleInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ return 0; -+} -+ -+#else -+#define PVRSRVBridgeSyncPrimPDumpCBP NULL -+#endif -+ -+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, -+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psSyncAllocEventIN_UI8, -+ IMG_UINT8 * psSyncAllocEventOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *psSyncAllocEventIN = -+ (PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *) IMG_OFFSET_ADDR(psSyncAllocEventIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *psSyncAllocEventOUT = -+ (PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *) IMG_OFFSET_ADDR(psSyncAllocEventOUT_UI8, 0); -+ -+ IMG_CHAR *uiClassNameInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0; -+ -+ if (unlikely(psSyncAllocEventIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH)) -+ { -+ psSyncAllocEventOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto SyncAllocEvent_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psSyncAllocEventOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto SyncAllocEvent_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psSyncAllocEventIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncAllocEventIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psSyncAllocEventOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto SyncAllocEvent_exit; -+ } -+ } -+ } -+ -+ if (psSyncAllocEventIN->ui32ClassNameSize != 0) -+ { -+ uiClassNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiClassNameInt, (const void __user *)psSyncAllocEventIN->puiClassName, -+ psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psSyncAllocEventOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto SyncAllocEvent_exit; -+ } -+ ((IMG_CHAR *) -+ uiClassNameInt)[(psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) - 1] = -+ '\0'; -+ } -+ -+ psSyncAllocEventOUT->eError = -+ PVRSRVSyncAllocEventKM(psConnection, OSGetDevNode(psConnection), -+ psSyncAllocEventIN->bServerSync, -+ psSyncAllocEventIN->ui32FWAddr, -+ psSyncAllocEventIN->ui32ClassNameSize, uiClassNameInt); -+ -+SyncAllocEvent_exit: -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psSyncAllocEventOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+static IMG_INT -+PVRSRVBridgeSyncFreeEvent(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psSyncFreeEventIN_UI8, -+ IMG_UINT8 * psSyncFreeEventOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_SYNCFREEEVENT *psSyncFreeEventIN = -+ (PVRSRV_BRIDGE_IN_SYNCFREEEVENT *) IMG_OFFSET_ADDR(psSyncFreeEventIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *psSyncFreeEventOUT = -+ (PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *) IMG_OFFSET_ADDR(psSyncFreeEventOUT_UI8, 0); -+ -+ psSyncFreeEventOUT->eError = -+ PVRSRVSyncFreeEventKM(psConnection, OSGetDevNode(psConnection), -+ psSyncFreeEventIN->ui32FWAddr); -+ -+ return 0; -+} -+ -+#if defined(PDUMP) -+ -+static IMG_INT -+PVRSRVBridgeSyncCheckpointSignalledPDumpPol(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psSyncCheckpointSignalledPDumpPolIN_UI8, -+ IMG_UINT8 * psSyncCheckpointSignalledPDumpPolOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *psSyncCheckpointSignalledPDumpPolIN = -+ (PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *) -+ IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *psSyncCheckpointSignalledPDumpPolOUT = -+ (PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *) -+ IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolOUT_UI8, 0); -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ psSyncCheckpointSignalledPDumpPolOUT->eError = -+ PVRSRVSyncCheckpointSignalledPDumpPolKM(psSyncCheckpointSignalledPDumpPolIN->hFence); -+ -+ return 0; -+} -+ -+#else -+#define PVRSRVBridgeSyncCheckpointSignalledPDumpPol NULL -+#endif -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitSYNCBridge(void); -+void DeinitSYNCBridge(void); -+ -+/* -+ * Register all SYNC functions with services -+ */ -+PVRSRV_ERROR InitSYNCBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK, -+ PVRSRVBridgeAllocSyncPrimitiveBlock, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK, -+ PVRSRVBridgeFreeSyncPrimitiveBlock, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET, -+ PVRSRVBridgeSyncPrimSet, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP, -+ PVRSRVBridgeSyncPrimPDump, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE, -+ PVRSRVBridgeSyncPrimPDumpValue, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL, -+ PVRSRVBridgeSyncPrimPDumpPol, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP, -+ PVRSRVBridgeSyncPrimPDumpCBP, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT, -+ PVRSRVBridgeSyncAllocEvent, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT, -+ PVRSRVBridgeSyncFreeEvent, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, -+ PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL, -+ PVRSRVBridgeSyncCheckpointSignalledPDumpPol, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all sync functions with services -+ */ -+void DeinitSYNCBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, -+ PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/server_synctracking_bridge.c b/drivers/gpu/drm/img-rogue/server_synctracking_bridge.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/server_synctracking_bridge.c -@@ -0,0 +1,325 @@ -+/******************************************************************************* -+@File -+@Title Server bridge for synctracking -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side of the bridge for synctracking -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*******************************************************************************/ -+ -+#include -+ -+#include "img_defs.h" -+ -+#include "sync.h" -+#include "sync_server.h" -+ -+#include "common_synctracking_bridge.h" -+ -+#include "allocmem.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+#include "srvcore.h" -+#include "handle.h" -+ -+#include -+ -+/* *************************************************************************** -+ * Server-side bridge entry points -+ */ -+ -+static IMG_INT -+PVRSRVBridgeSyncRecordRemoveByHandle(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psSyncRecordRemoveByHandleIN_UI8, -+ IMG_UINT8 * psSyncRecordRemoveByHandleOUT_UI8, -+ CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleIN = -+ (PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *) -+ IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleOUT = -+ (PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *) -+ IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleOUT_UI8, 0); -+ -+ /* Lock over handle destruction. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psSyncRecordRemoveByHandleOUT->eError = -+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, -+ (IMG_HANDLE) psSyncRecordRemoveByHandleIN->hhRecord, -+ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE); -+ if (unlikely((psSyncRecordRemoveByHandleOUT->eError != PVRSRV_OK) && -+ (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && -+ (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_RETRY))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s", -+ __func__, PVRSRVGetErrorString(psSyncRecordRemoveByHandleOUT->eError))); -+ UnlockHandle(psConnection->psHandleBase); -+ goto SyncRecordRemoveByHandle_exit; -+ } -+ -+ /* Release now we have destroyed handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+SyncRecordRemoveByHandle_exit: -+ -+ return 0; -+} -+ -+static PVRSRV_ERROR _SyncRecordAddpshRecordIntRelease(void *pvData) -+{ -+ PVRSRV_ERROR eError; -+ eError = PVRSRVSyncRecordRemoveByHandleKM((SYNC_RECORD_HANDLE) pvData); -+ return eError; -+} -+ -+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, -+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); -+ -+static IMG_INT -+PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 * psSyncRecordAddIN_UI8, -+ IMG_UINT8 * psSyncRecordAddOUT_UI8, CONNECTION_DATA * psConnection) -+{ -+ PVRSRV_BRIDGE_IN_SYNCRECORDADD *psSyncRecordAddIN = -+ (PVRSRV_BRIDGE_IN_SYNCRECORDADD *) IMG_OFFSET_ADDR(psSyncRecordAddIN_UI8, 0); -+ PVRSRV_BRIDGE_OUT_SYNCRECORDADD *psSyncRecordAddOUT = -+ (PVRSRV_BRIDGE_OUT_SYNCRECORDADD *) IMG_OFFSET_ADDR(psSyncRecordAddOUT_UI8, 0); -+ -+ SYNC_RECORD_HANDLE pshRecordInt = NULL; -+ IMG_HANDLE hhServerSyncPrimBlock = psSyncRecordAddIN->hhServerSyncPrimBlock; -+ SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt = NULL; -+ IMG_CHAR *uiClassNameInt = NULL; -+ -+ IMG_UINT32 ui32NextOffset = 0; -+ IMG_BYTE *pArrayArgsBuffer = NULL; -+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; -+ -+ IMG_UINT32 ui32BufferSize = 0; -+ IMG_UINT64 ui64BufferSize = -+ ((IMG_UINT64) psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0; -+ -+ if (unlikely(psSyncRecordAddIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH)) -+ { -+ psSyncRecordAddOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; -+ goto SyncRecordAdd_exit; -+ } -+ -+ if (ui64BufferSize > IMG_UINT32_MAX) -+ { -+ psSyncRecordAddOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; -+ goto SyncRecordAdd_exit; -+ } -+ -+ ui32BufferSize = (IMG_UINT32) ui64BufferSize; -+ -+ if (ui32BufferSize != 0) -+ { -+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ -+ IMG_UINT32 ui32InBufferOffset = -+ PVR_ALIGN(sizeof(*psSyncRecordAddIN), sizeof(unsigned long)); -+ IMG_UINT32 ui32InBufferExcessSize = -+ ui32InBufferOffset >= -+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; -+ -+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; -+ if (bHaveEnoughSpace) -+ { -+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncRecordAddIN; -+ -+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; -+ } -+ else -+ { -+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); -+ -+ if (!pArrayArgsBuffer) -+ { -+ psSyncRecordAddOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto SyncRecordAdd_exit; -+ } -+ } -+ } -+ -+ if (psSyncRecordAddIN->ui32ClassNameSize != 0) -+ { -+ uiClassNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); -+ ui32NextOffset += psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR); -+ } -+ -+ /* Copy the data over */ -+ if (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0) -+ { -+ if (OSCopyFromUser -+ (NULL, uiClassNameInt, (const void __user *)psSyncRecordAddIN->puiClassName, -+ psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) -+ { -+ psSyncRecordAddOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; -+ -+ goto SyncRecordAdd_exit; -+ } -+ ((IMG_CHAR *) -+ uiClassNameInt)[(psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) - 1] = -+ '\0'; -+ } -+ -+ /* Lock over handle lookup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Look up the address from the handle */ -+ psSyncRecordAddOUT->eError = -+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, -+ (void **)&pshServerSyncPrimBlockInt, -+ hhServerSyncPrimBlock, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); -+ if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto SyncRecordAdd_exit; -+ } -+ /* Release now we have looked up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ psSyncRecordAddOUT->eError = -+ PVRSRVSyncRecordAddKM(psConnection, OSGetDevNode(psConnection), -+ &pshRecordInt, -+ pshServerSyncPrimBlockInt, -+ psSyncRecordAddIN->ui32ui32FwBlockAddr, -+ psSyncRecordAddIN->ui32ui32SyncOffset, -+ psSyncRecordAddIN->bbServerSync, -+ psSyncRecordAddIN->ui32ClassNameSize, uiClassNameInt); -+ /* Exit early if bridged call fails */ -+ if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) -+ { -+ goto SyncRecordAdd_exit; -+ } -+ -+ /* Lock over handle creation. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ psSyncRecordAddOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, -+ &psSyncRecordAddOUT->hhRecord, -+ (void *)pshRecordInt, -+ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE, -+ PVRSRV_HANDLE_ALLOC_FLAG_NONE, -+ (PFN_HANDLE_RELEASE) & -+ _SyncRecordAddpshRecordIntRelease); -+ if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) -+ { -+ UnlockHandle(psConnection->psHandleBase); -+ goto SyncRecordAdd_exit; -+ } -+ -+ /* Release now we have created handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+SyncRecordAdd_exit: -+ -+ /* Lock over handle lookup cleanup. */ -+ LockHandle(psConnection->psHandleBase); -+ -+ /* Unreference the previously looked up handle */ -+ if (pshServerSyncPrimBlockInt) -+ { -+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, -+ hhServerSyncPrimBlock, -+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); -+ } -+ /* Release now we have cleaned up look up handles. */ -+ UnlockHandle(psConnection->psHandleBase); -+ -+ if (psSyncRecordAddOUT->eError != PVRSRV_OK) -+ { -+ if (pshRecordInt) -+ { -+ PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt); -+ } -+ } -+ -+ /* Allocated space should be equal to the last updated offset */ -+#ifdef PVRSRV_NEED_PVR_ASSERT -+ if (psSyncRecordAddOUT->eError == PVRSRV_OK) -+ PVR_ASSERT(ui32BufferSize == ui32NextOffset); -+#endif /* PVRSRV_NEED_PVR_ASSERT */ -+ -+ if (!bHaveEnoughSpace && pArrayArgsBuffer) -+ OSFreeMemNoStats(pArrayArgsBuffer); -+ -+ return 0; -+} -+ -+/* *************************************************************************** -+ * Server bridge dispatch related glue -+ */ -+ -+PVRSRV_ERROR InitSYNCTRACKINGBridge(void); -+void DeinitSYNCTRACKINGBridge(void); -+ -+/* -+ * Register all SYNCTRACKING functions with services -+ */ -+PVRSRV_ERROR InitSYNCTRACKINGBridge(void) -+{ -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, -+ PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE, -+ PVRSRVBridgeSyncRecordRemoveByHandle, NULL); -+ -+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD, -+ PVRSRVBridgeSyncRecordAdd, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Unregister all synctracking functions with services -+ */ -+void DeinitSYNCTRACKINGBridge(void) -+{ -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, -+ PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE); -+ -+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, -+ PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD); -+ -+} -diff --git a/drivers/gpu/drm/img-rogue/services_kernel_client.h b/drivers/gpu/drm/img-rogue/services_kernel_client.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/services_kernel_client.h -@@ -0,0 +1,289 @@ -+/*************************************************************************/ /*! -+@File services_kernel_client.h -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* This file contains a partial redefinition of the PowerVR Services 5 -+ * interface for use by components which are checkpatch clean. This -+ * header is included by the unrefined, non-checkpatch clean headers -+ * to ensure that prototype/typedef/macro changes break the build. -+ */ -+ -+#ifndef __SERVICES_KERNEL_CLIENT__ -+#define __SERVICES_KERNEL_CLIENT__ -+ -+#include "pvrsrv_error.h" -+ -+#include -+ -+#include "pvrsrv_sync_km.h" -+#include "sync_checkpoint_external.h" -+ -+/* included for the define PVRSRV_LINUX_DEV_INIT_ON_PROBE */ -+#include "pvr_drm.h" -+ -+#ifndef __pvrsrv_defined_struct_enum__ -+ -+/* sync_external.h */ -+ -+struct PVRSRV_CLIENT_SYNC_PRIM_TAG { -+ volatile __u32 *pui32LinAddr; -+}; -+ -+struct PVRSRV_CLIENT_SYNC_PRIM_OP { -+ __u32 ui32Flags; -+ struct pvrsrv_sync_prim *psSync; -+ __u32 ui32FenceValue; -+ __u32 ui32UpdateValue; -+}; -+ -+#else /* __pvrsrv_defined_struct_enum__ */ -+ -+struct PVRSRV_CLIENT_SYNC_PRIM_TAG; -+struct PVRSRV_CLIENT_SYNC_PRIM_OP; -+ -+#endif /* __pvrsrv_defined_struct_enum__ */ -+ -+struct _PMR_; -+struct _PVRSRV_DEVICE_NODE_; -+struct dma_buf; -+struct SYNC_PRIM_CONTEXT_TAG; -+ -+/* pvr_notifier.h */ -+ -+#ifndef CMDCOMPNOTIFY_PFN -+typedef void (*PFN_CMDCOMP_NOTIFY)(void *hCmdCompHandle); -+#define CMDCOMPNOTIFY_PFN -+#endif -+enum PVRSRV_ERROR_TAG PVRSRVRegisterCmdCompleteNotify(void **phNotify, -+ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, void *hPrivData); -+enum PVRSRV_ERROR_TAG PVRSRVUnregisterCmdCompleteNotify(void *hNotify); -+void PVRSRVCheckStatus(void *hCmdCompCallerHandle); -+ -+#define DEBUG_REQUEST_DC 0 -+#define DEBUG_REQUEST_SYNCTRACKING 1 -+#define DEBUG_REQUEST_SRV 2 -+#define DEBUG_REQUEST_SYS 3 -+#define DEBUG_REQUEST_RGX 4 -+#define DEBUG_REQUEST_ANDROIDSYNC 5 -+#define DEBUG_REQUEST_LINUXFENCE 6 -+#define DEBUG_REQUEST_SYNCCHECKPOINT 7 -+#define DEBUG_REQUEST_HTB 8 -+#define DEBUG_REQUEST_APPHINT 9 -+#define DEBUG_REQUEST_FALLBACKSYNC 10 -+ -+#define DEBUG_REQUEST_VERBOSITY_LOW 0 -+#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1 -+#define DEBUG_REQUEST_VERBOSITY_HIGH 2 -+#define DEBUG_REQUEST_VERBOSITY_MAX DEBUG_REQUEST_VERBOSITY_HIGH -+ -+#define DD_VERB_LVL_ENABLED(_verbLvl, _verbLvlChk) ((_verbLvl) >= (_verbLvlChk)) -+ -+#ifndef DBGNOTIFY_PFNS -+typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile, -+ const char *fmt, ...) __printf(2, 3); -+typedef void (*PFN_DBGREQ_NOTIFY) (void *hDebugRequestHandle, -+ __u32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+#define DBGNOTIFY_PFNS -+#endif -+enum PVRSRV_ERROR_TAG PVRSRVRegisterDeviceDbgRequestNotify(void **phNotify, -+ struct _PVRSRV_DEVICE_NODE_ *psDevNode, -+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, -+ __u32 ui32RequesterID, -+ void *hDbgRequestHandle); -+enum PVRSRV_ERROR_TAG PVRSRVUnregisterDeviceDbgRequestNotify(void *hNotify); -+enum PVRSRV_ERROR_TAG PVRSRVRegisterDriverDbgRequestNotify(void **phNotify, -+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, -+ __u32 ui32RequesterID, -+ void *hDbgRequestHandle); -+enum PVRSRV_ERROR_TAG PVRSRVUnregisterDriverDbgRequestNotify(void *hNotify); -+ -+/* physmem_dmabuf.h */ -+ -+struct dma_buf *PhysmemGetDmaBuf(struct _PMR_ *psPMR); -+ -+/* pvrsrv.h */ -+ -+enum PVRSRV_ERROR_TAG PVRSRVAcquireGlobalEventObjectKM(void **phGlobalEventObject); -+enum PVRSRV_ERROR_TAG PVRSRVReleaseGlobalEventObjectKM(void *hGlobalEventObject); -+ -+/* sync.h */ -+ -+enum PVRSRV_ERROR_TAG SyncPrimContextCreate( -+ struct _PVRSRV_DEVICE_NODE_ *psDevConnection, -+ struct SYNC_PRIM_CONTEXT_TAG **phSyncPrimContext); -+void SyncPrimContextDestroy(struct SYNC_PRIM_CONTEXT_TAG *hSyncPrimContext); -+ -+enum PVRSRV_ERROR_TAG SyncPrimAlloc(struct SYNC_PRIM_CONTEXT_TAG *hSyncPrimContext, -+ struct PVRSRV_CLIENT_SYNC_PRIM_TAG **ppsSync, const char *pszClassName); -+enum PVRSRV_ERROR_TAG SyncPrimFree(struct PVRSRV_CLIENT_SYNC_PRIM_TAG *psSync); -+enum PVRSRV_ERROR_TAG SyncPrimGetFirmwareAddr( -+ struct PVRSRV_CLIENT_SYNC_PRIM_TAG *psSync, -+ __u32 *sync_addr); -+ -+/* osfunc.h */ -+enum PVRSRV_ERROR_TAG OSEventObjectWait(void *hOSEventKM); -+enum PVRSRV_ERROR_TAG OSEventObjectOpen(void *hEventObject, void **phOSEventKM); -+enum PVRSRV_ERROR_TAG OSEventObjectClose(void *hOSEventKM); -+__u32 OSGetCurrentClientProcessIDKM(void); -+__u32 OSStringUINT32ToStr(char *pszBuf, size_t uSize, __u32 ui32Num); -+ -+/* srvkm.h */ -+ -+enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceCreate(void *pvOSDevice, -+ int i32KernelDeviceID, -+ struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode); -+void PVRSRVCommonDeviceDestroy( -+ struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); -+const char *PVRSRVGetErrorString(enum PVRSRV_ERROR_TAG eError); -+#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE) -+enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceInitialise( -+ struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); -+#endif -+ -+#ifndef CHECKPOINT_PFNS -+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE fence, u32 *nr_checkpoints, PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid); -+ -+#ifndef CHECKPOINT_PFNS -+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)( -+ struct _PVRSRV_DEVICE_NODE_ *device, -+ const char *fence_name, -+ PVRSRV_TIMELINE timeline, -+ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, -+ PVRSRV_FENCE *new_fence, -+ u64 *fence_uid, -+ void **fence_finalise_data, -+ PSYNC_CHECKPOINT *new_checkpoint_handle, -+ void **timeline_update_sync, -+ __u32 *timeline_update_value); -+#endif -+ -+#ifndef CHECKPOINT_PFNS -+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data); -+#endif -+ -+#ifndef CHECKPOINT_PFNS -+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data); -+#endif -+ -+#ifndef CHECKPOINT_PFNS -+typedef __u32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(__u32 num_ufos, __u32 *vaddrs); -+#endif -+ -+#ifndef CHECKPOINT_PFNS -+typedef bool (*PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN)( -+ __u32 ui32FwAddr, __u32 ui32Value); -+typedef enum PVRSRV_ERROR_TAG (*PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN)(void); -+typedef void(*PFN_SYNC_CHECKPOINT_CHECK_STATE_FN)(void); -+#if defined(PDUMP) -+typedef PVRSRV_ERROR(*PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN)(PVRSRV_FENCE iFence, -+ IMG_UINT32 *puiNumCheckpoints, -+ PSYNC_CHECKPOINT **papsCheckpoints); -+#endif -+#endif -+ -+/* This is the function that kick code will call in a NO_HARDWARE build only after -+ * sync checkpoints have been manually signalled, to allow the OS native sync -+ * implementation to update its timelines (as the usual callback notification -+ * of signalled checkpoints is not supported for NO_HARDWARE). -+ */ -+#ifndef CHECKPOINT_PFNS -+typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data); -+typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr); -+ -+#define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20 -+ -+typedef struct { -+ PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve; -+ PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate; -+ PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback; -+ PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise; -+ PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines; -+ PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem; -+ PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs; -+ char pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN]; -+#if defined(PDUMP) -+ PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN pfnSyncFenceGetCheckpoints; -+#endif -+} PFN_SYNC_CHECKPOINT_STRUCT; -+ -+enum PVRSRV_ERROR_TAG SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns); -+ -+#define CHECKPOINT_PFNS -+#endif -+ -+/* sync_checkpoint.h */ -+enum PVRSRV_ERROR_TAG SyncCheckpointContextCreate(struct _PVRSRV_DEVICE_NODE_ *psDevConnection, PSYNC_CHECKPOINT_CONTEXT *phSyncCheckpointContext); -+enum PVRSRV_ERROR_TAG SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext); -+void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext); -+void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext); -+enum PVRSRV_ERROR_TAG SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, PVRSRV_TIMELINE timeline, PVRSRV_FENCE fence, const char *pszCheckpointName, PSYNC_CHECKPOINT *ppsSyncCheckpoint); -+void SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); -+void SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); -+bool SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); -+bool SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); -+enum PVRSRV_ERROR_TAG SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint); -+enum PVRSRV_ERROR_TAG SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint); -+void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint); -+__u32 SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint); -+void SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint); -+__u32 SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint); -+__u32 SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint); -+__u32 SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint); -+PVRSRV_TIMELINE SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint); -+const char *SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint); -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) -+struct _PVRSRV_DEVICE_NODE_ *SyncCheckpointGetAssociatedDevice(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext); -+#endif -+ -+#endif -+ -+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC) -+/*************************************************************************/ /*! -+@Function NativeSyncGetFenceStatusWq -+@Description Called to get the Foreign Fence status workqueue used in -+ Fence sync and Buffer sync. -+@Return struct workqueue_struct ptr on success, NULL otherwise. -+*/ /**************************************************************************/ -+struct workqueue_struct *NativeSyncGetFenceStatusWq(void); -+#endif -+ -+#endif /* __SERVICES_KERNEL_CLIENT__ */ -diff --git a/drivers/gpu/drm/img-rogue/services_km.h b/drivers/gpu/drm/img-rogue/services_km.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/services_km.h -@@ -0,0 +1,180 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services API Kernel mode Header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Exported services API details -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SERVICES_KM_H -+#define SERVICES_KM_H -+ -+#include "img_types.h" -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+#include "virt_validation_defs.h" -+#endif -+ -+/*! 4k page size definition */ -+#define PVRSRV_4K_PAGE_SIZE 4096U /*!< Size of a 4K Page */ -+#define PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT 12 /*!< Amount to shift an address by so that -+ it is always page-aligned */ -+/*! 16k page size definition */ -+#define PVRSRV_16K_PAGE_SIZE 16384U /*!< Size of a 16K Page */ -+#define PVRSRV_16K_PAGE_SIZE_ALIGNSHIFT 14 /*!< Amount to shift an address by so that -+ it is always page-aligned */ -+/*! 64k page size definition */ -+#define PVRSRV_64K_PAGE_SIZE 65536U /*!< Size of a 64K Page */ -+#define PVRSRV_64K_PAGE_SIZE_ALIGNSHIFT 16 /*!< Amount to shift an address by so that -+ it is always page-aligned */ -+/*! 256k page size definition */ -+#define PVRSRV_256K_PAGE_SIZE 262144U /*!< Size of a 256K Page */ -+#define PVRSRV_256K_PAGE_SIZE_ALIGNSHIFT 18 /*!< Amount to shift an address by so that -+ it is always page-aligned */ -+/*! 1MB page size definition */ -+#define PVRSRV_1M_PAGE_SIZE 1048576U /*!< Size of a 1M Page */ -+#define PVRSRV_1M_PAGE_SIZE_ALIGNSHIFT 20 /*!< Amount to shift an address by so that -+ it is always page-aligned */ -+/*! 2MB page size definition */ -+#define PVRSRV_2M_PAGE_SIZE 2097152U /*!< Size of a 2M Page */ -+#define PVRSRV_2M_PAGE_SIZE_ALIGNSHIFT 21 /*!< Amount to shift an address by so that -+ it is always page-aligned */ -+ -+/*! -+ * @AddToGroup SRVConnectInterfaces -+ * @{ -+ */ -+ -+#ifndef PVRSRV_DEV_CONNECTION_TYPEDEF -+#define PVRSRV_DEV_CONNECTION_TYPEDEF -+/*! -+ * Forward declaration (look on connection.h) -+ */ -+typedef struct PVRSRV_DEV_CONNECTION_TAG PVRSRV_DEV_CONNECTION; -+#endif -+ -+/*! -+ * @Anchor SRV_FLAGS -+ * @Name SRV_FLAGS: Services connection flags -+ * Allows to define per-client policy for Services. -+ * @{ -+ */ -+ -+/* -+ * Use of the 32-bit connection flags mask -+ * ( X = taken/in use, - = available/unused ) -+ * -+ * 31 27 20 6 4 0 -+ * | | | | | | -+ * X---XXXXXXXX-------------XXX---- -+ */ -+ -+#define SRV_NO_HWPERF_CLIENT_STREAM (1UL << 4) /*!< Don't create HWPerf for this connection */ -+#define SRV_FLAGS_CLIENT_64BIT_COMPAT (1UL << 5) /*!< This flags gets set if the client is 64 Bit compatible. */ -+#define SRV_FLAGS_CLIENT_SLR_DISABLED (1UL << 6) /*!< This flag is set if the client does not want Sync Lockup Recovery (SLR) enabled. */ -+#define SRV_FLAGS_PDUMPCTRL (1UL << 31) /*!< PDump Ctrl client flag */ -+ -+/*! @} SRV_FLAGS */ -+ -+/*! @} End of SRVConnectInterfaces */ -+ -+/* -+ * Bits 20 - 27 are used to pass information needed for validation -+ * of the GPU Virtualisation Validation mechanism. In particular: -+ * -+ * Bits: -+ * [20 - 22]: OSid of the memory region that will be used for allocations -+ * [23 - 25]: OSid that will be emitted by the Firmware for all memory accesses -+ * regarding that memory context. -+ * [26]: If the AXI Protection register will be set to secure for that OSid -+ * [27]: If the Emulator Wrapper Register checking for protection violation -+ * will be set to secure for that OSid -+ */ -+ -+#define VIRTVAL_FLAG_OSID_SHIFT (20) -+#define SRV_VIRTVAL_FLAG_OSID_MASK (7U << VIRTVAL_FLAG_OSID_SHIFT) -+ -+#define VIRTVAL_FLAG_OSIDREG_SHIFT (23) -+#define SRV_VIRTVAL_FLAG_OSIDREG_MASK (7U << VIRTVAL_FLAG_OSIDREG_SHIFT) -+ -+#define VIRTVAL_FLAG_AXIPREG_SHIFT (26) -+#define SRV_VIRTVAL_FLAG_AXIPREG_MASK (1U << VIRTVAL_FLAG_AXIPREG_SHIFT) -+ -+#define VIRTVAL_FLAG_AXIPTD_SHIFT (27) -+#define SRV_VIRTVAL_FLAG_AXIPTD_MASK (1U << VIRTVAL_FLAG_AXIPTD_SHIFT) -+ -+ -+/* Size of pointer on a 64 bit machine */ -+#define POINTER_SIZE_64BIT (8U) -+ -+ -+/* -+ Pdump flags which are accessible to Services clients -+*/ -+#define PDUMP_NONE 0x00000000U /* -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "power.h" -+#include "spacemit_init.h" -+#include "pvrsrv_device.h" -+#include "syscommon.h" -+#include -+#include -+#include -+#include "rgxdevice.h" -+#include "sysconfig.h" -+ -+static struct st_context *g_platform = NULL; -+ -+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) -+void stSetFrequency(IMG_HANDLE hSysData, IMG_UINT32 ui32Frequency) -+{ -+ int ret = 0; -+ unsigned int old_freq; -+ -+ if (NULL == g_platform) -+ panic("oops"); -+ -+ old_freq = clk_get_rate(g_platform->gpu_clk); -+ mutex_lock(&g_platform->set_power_state); -+ if (g_platform->bEnablePd) -+ ret = clk_set_rate(g_platform->gpu_clk, ui32Frequency); -+ mutex_unlock(&g_platform->set_power_state); -+ PVR_DPF((PVR_DBG_VERBOSE, "gpu clock change frequency: %d",ui32Frequency)); -+ if (ret) { -+ PVR_DPF((PVR_DBG_ERROR, "failed to set gpu clock rate: %d", ret)); -+ return; -+ } -+} -+ -+void stSetVoltage(IMG_HANDLE hSysData, IMG_UINT32 ui32Volt) -+{ -+ if (NULL == g_platform) -+ panic("oops"); -+ PVR_DPF((PVR_DBG_VERBOSE, "entry %s", __func__)); -+} -+#endif -+ -+#if defined(CONFIG_DEVFREQ_THERMAL) && defined(SUPPORT_LINUX_DVFS) -+ -+#define FALLBACK_STATIC_TEMPERATURE 55000 -+ -+static u32 dynamic_coefficient; -+static u32 static_coefficient; -+static s32 ts[4]; -+static struct thermal_zone_device *gpu_tz; -+ -+static unsigned long model_static_power(struct devfreq *df, unsigned long voltage) -+{ -+ int temperature; -+ unsigned long temp; -+ unsigned long temp_squared, temp_cubed, temp_scaling_factor; -+ const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10; -+ -+ if (gpu_tz) { -+ int ret; -+ -+ ret = gpu_tz->ops->get_temp(gpu_tz, &temperature); -+ if (ret) { -+ pr_warn_ratelimited("Error reading temperature for gpu thermal zone: %d\n", -+ ret); -+ temperature = FALLBACK_STATIC_TEMPERATURE; -+ } -+ } else { -+ temperature = FALLBACK_STATIC_TEMPERATURE; -+ } -+ -+ /* Calculate the temperature scaling factor. To be applied to the -+ * voltage scaled power. -+ */ -+ temp = temperature / 1000; -+ temp_squared = temp * temp; -+ temp_cubed = temp_squared * temp; -+ temp_scaling_factor = -+ (ts[3] * temp_cubed) -+ + (ts[2] * temp_squared) -+ + (ts[1] * temp) -+ + ts[0]; -+ -+ return (((static_coefficient * voltage_cubed) >> 20) -+ * temp_scaling_factor) -+ / 1000000; -+} -+ -+static unsigned long model_dynamic_power(struct devfreq *df, unsigned long freq, -+ unsigned long voltage) -+{ -+ /* The inputs: freq (f) is in Hz, and voltage (v) in mV. -+ * The coefficient (c) is in mW/(MHz mV mV). -+ * -+ * This function calculates the dynamic power after this formula: -+ * Pdyn (mW) = c (mW/(MHz*mV*mV)) * v (mV) * v (mV) * f (MHz) -+ */ -+ const unsigned long v2 = (voltage * voltage) / 1000; /* m*(V*V) */ -+ const unsigned long f_mhz = freq / 1000000; /* MHz */ -+ -+ return (dynamic_coefficient * v2 * f_mhz) / 1000000; /* mW */ -+} -+ -+struct devfreq_cooling_power spacemit_power_model_simple_ops = { -+ .get_static_power = model_static_power, -+ .get_dynamic_power = model_dynamic_power, -+}; -+ -+int spacemit_power_model_simple_init(struct device *dev) -+{ -+ struct device_node *power_model_node; -+ const char *tz_name; -+ -+ power_model_node = of_get_child_by_name(dev->of_node, -+ "power_model"); -+ if (!power_model_node) { -+ dev_err(dev, "could not find power_model node\n"); -+ return -ENODEV; -+ } -+ if (!of_device_is_compatible(power_model_node, -+ "img,pvr-simple-power-model")) { -+ dev_err(dev, "power_model incompatible with simple power model\n"); -+ return -ENODEV; -+ } -+ -+ if (of_property_read_string(power_model_node, "thermal-zone", -+ &tz_name)) { -+ dev_err(dev, "ts in power_model not available\n"); -+ return -EINVAL; -+ } -+ -+ gpu_tz = thermal_zone_get_zone_by_name(tz_name); -+ if (IS_ERR(gpu_tz)) { -+ pr_warn_ratelimited("Error getting gpu thermal zone (%ld), not yet ready?\n", -+ PTR_ERR(gpu_tz)); -+ gpu_tz = NULL; -+ -+ return -EPROBE_DEFER; -+ } -+ -+ if (of_property_read_u32(power_model_node, "dynamic-coefficient", -+ &dynamic_coefficient)) { -+ dev_err(dev, "dynamic-coefficient in power_model not available\n"); -+ return -EINVAL; -+ } -+ if (of_property_read_u32(power_model_node, "static-coefficient", -+ &static_coefficient)) { -+ dev_err(dev, "static-coefficient in power_model not available\n"); -+ return -EINVAL; -+ } -+ if (of_property_read_u32_array(power_model_node, "ts", (u32 *)ts, 4)) { -+ dev_err(dev, "ts in power_model not available\n"); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+#endif -+static void RgxEnableClock(struct st_context *platform) -+{ -+ if (!platform->gpu_clk) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: gpu_clk is null", __func__)); -+ return; -+ } -+ -+ if (!platform->gpu_active) { -+ //clk_prepare_enable(platform->gpu_clk); -+ reset_control_deassert(platform->gpu_reset); -+ platform->gpu_active = IMG_TRUE; -+ PVR_DPF((PVR_DBG_VERBOSE, "gpu clock on gpu_active:%d", platform->gpu_active)); -+ } else { -+ PVR_DPF((PVR_DBG_VERBOSE, "gpu clock already on!")); -+ } -+} -+ -+static void RgxDisableClock(struct st_context *platform) -+{ -+ -+ if (!platform->gpu_clk) { -+ PVR_DPF((PVR_DBG_ERROR, "%s: gpu_clk is null", __func__)); -+ return; -+ } -+ -+ if (platform->gpu_active) { -+ reset_control_assert(platform->gpu_reset); -+ //clk_disable_unprepare(platform->gpu_clk); -+ platform->gpu_active = IMG_FALSE; -+ PVR_DPF((PVR_DBG_VERBOSE, "gpu clock off gpu_active:%d", platform->gpu_active)); -+ } else { -+ PVR_DPF((PVR_DBG_VERBOSE, "gpu clock already off!")); -+ } -+} -+ -+static void RgxEnablePower(struct st_context *platform) -+{ -+ struct device *dev = (struct device *)platform->dev_config->pvOSDevice; -+ if (!platform->bEnablePd) { -+ pm_runtime_get_sync(dev); -+ platform->bEnablePd = IMG_TRUE; -+ PVR_DPF((PVR_DBG_VERBOSE, "gpu power on bEnablePd:%d", platform->bEnablePd)); -+ } else { -+ PVR_DPF((PVR_DBG_VERBOSE, "gpu already power on!")); -+ } -+} -+ -+static void RgxDisablePower(struct st_context *platform) -+{ -+ struct device *dev = (struct device *)platform->dev_config->pvOSDevice; -+ if (platform->bEnablePd) { -+ pm_runtime_put(dev); -+ platform->bEnablePd = IMG_FALSE; -+ PVR_DPF((PVR_DBG_VERBOSE, "gpu power off bEnablePd:%d", platform->bEnablePd)); -+ } else { -+ PVR_DPF((PVR_DBG_VERBOSE, "gpu already power off!")); -+ } -+} -+ -+void RgxResume(struct st_context *platform) -+{ -+ PVR_DPF((PVR_DBG_VERBOSE, "%s", __func__)); -+ RgxEnablePower(platform); -+ RgxEnableClock(platform); -+ } -+ -+void RgxSuspend(struct st_context *platform) -+{ -+ PVR_DPF((PVR_DBG_VERBOSE, "%s", __func__)); -+ RgxDisableClock(platform); -+ RgxDisablePower(platform); -+} -+ -+PVRSRV_ERROR STPrePowerState(IMG_HANDLE hSysData, -+ PVRSRV_SYS_POWER_STATE eNewPowerState, -+ PVRSRV_SYS_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags) -+{ -+ struct st_context *platform = (struct st_context *)hSysData; -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "STPrePowerState new:%d, curr:%d", eNewPowerState, eCurrentPowerState)); -+ mutex_lock(&platform->set_power_state); -+ if ((PVRSRV_SYS_POWER_STATE_ON == eNewPowerState) && -+ (PVRSRV_SYS_POWER_STATE_OFF == eCurrentPowerState)) -+ RgxResume(platform); -+ mutex_unlock(&platform->set_power_state); -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR STPostPowerState(IMG_HANDLE hSysData, -+ PVRSRV_SYS_POWER_STATE eNewPowerState, -+ PVRSRV_SYS_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags) -+{ -+ struct st_context *platform = (struct st_context *)hSysData; -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "STPostPowerState new:%d, curr:%d", eNewPowerState, eCurrentPowerState)); -+ mutex_lock(&platform->set_power_state); -+ if ((PVRSRV_SYS_POWER_STATE_OFF == eNewPowerState) && -+ (PVRSRV_SYS_POWER_STATE_ON == eCurrentPowerState)) -+ RgxSuspend(platform); -+ mutex_unlock(&platform->set_power_state); -+ return PVRSRV_OK; -+} -+ -+void RgxStUnInit(struct st_context *platform) -+{ -+ struct device *dev = (struct device *)platform->dev_config->pvOSDevice; -+ -+ RgxSuspend(platform); -+ -+ if (platform->gpu_clk) { -+ devm_clk_put(dev, platform->gpu_clk); -+ platform->gpu_clk = NULL; -+ } -+ -+ pm_runtime_disable(dev); -+ devm_kfree(dev, platform); -+} -+ -+struct st_context *RgxStInit(PVRSRV_DEVICE_CONFIG* psDevConfig) -+{ -+ struct device *dev = (struct device *)psDevConfig->pvOSDevice; -+ struct st_context *platform; -+ RGX_DATA* psRGXData = (RGX_DATA*)psDevConfig->hDevData; -+ -+ platform = devm_kzalloc(dev, sizeof(struct st_context), GFP_KERNEL); -+ if (NULL == platform) { -+ PVR_DPF((PVR_DBG_ERROR, "RgxRkInit: Failed to kzalloc rk_context")); -+ return NULL; -+ } -+ g_platform = platform; -+ if (!dev->dma_mask) -+ dev->dma_mask = &dev->coherent_dma_mask; -+ -+ platform->dev_config = psDevConfig; -+ platform->gpu_active = IMG_FALSE; -+#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) -+ psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE; -+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = stSetFrequency; -+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = stSetVoltage; -+ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = 300; -+ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90; -+ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10; -+#if defined(CONFIG_DEVFREQ_THERMAL) && defined(SUPPORT_LINUX_DVFS) -+ psDevConfig->sDVFS.sDVFSDeviceCfg.psPowerOps = &spacemit_power_model_simple_ops; -+ if (spacemit_power_model_simple_init(dev)) { -+ PVR_DPF((PVR_DBG_ERROR, "RgxStInit: spacemit_power_model_simple_init fail")); -+ goto fail; -+ } -+#endif -+#endif -+ platform->bEnablePd = IMG_FALSE; -+ pm_runtime_enable(dev); -+ -+ platform->gpu_reset = devm_reset_control_get_exclusive(dev, NULL); -+ if (IS_ERR_OR_NULL(platform->gpu_reset)) { -+ PVR_DPF((PVR_DBG_ERROR, "RgxStInit: Failed to find gpu reset source")); -+ goto fail; -+ } -+ -+ platform->gpu_clk = devm_clk_get(dev, "gpu_clk"); -+ if (IS_ERR_OR_NULL(platform->gpu_clk)) { -+ PVR_DPF((PVR_DBG_ERROR, "RgxStInit: Failed to find gpu clk source")); -+ goto fail; -+ } -+ -+ reset_control_deassert(platform->gpu_reset); -+ -+ clk_prepare_enable(platform->gpu_clk); -+ clk_set_rate(platform->gpu_clk, RGX_ST_CORE_CLOCK_SPEED); -+ -+ if (psRGXData && psRGXData->psRGXTimingInfo) -+ { -+ psRGXData->psRGXTimingInfo->ui32CoreClockSpeed = clk_get_rate(platform->gpu_clk); -+ } -+ -+ reset_control_assert(platform->gpu_reset); -+ -+ mutex_init(&platform->set_power_state); -+ -+ return platform; -+ -+fail: -+ devm_kfree(dev, platform); -+ return NULL; -+} -diff --git a/drivers/gpu/drm/img-rogue/spacemit/spacemit_init.h b/drivers/gpu/drm/img-rogue/spacemit/spacemit_init.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/spacemit/spacemit_init.h -@@ -0,0 +1,34 @@ -+#if !defined(__SPACEMIT_INIT_H__) -+#define __SPACEMIT_INIT_H__ -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_device.h" -+#include "servicesext.h" -+#include -+ -+struct st_context { -+ PVRSRV_DEVICE_CONFIG *dev_config; -+ struct clk *gpu_clk; -+ struct reset_control *gpu_reset; -+ /* mutex protect for set power state */ -+ struct mutex set_power_state; -+ IMG_BOOL gpu_active; -+ IMG_BOOL bEnablePd; -+}; -+ -+struct st_context * RgxStInit(PVRSRV_DEVICE_CONFIG* psDevConfig); -+void RgxStUnInit(struct st_context *platform); -+void RgxResume(struct st_context *platform); -+void RgxSuspend(struct st_context *platform); -+PVRSRV_ERROR STPrePowerState(IMG_HANDLE hSysData, -+ PVRSRV_SYS_POWER_STATE eNewPowerState, -+ PVRSRV_SYS_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags); -+PVRSRV_ERROR STPostPowerState(IMG_HANDLE hSysData, -+ PVRSRV_SYS_POWER_STATE eNewPowerState, -+ PVRSRV_SYS_POWER_STATE eCurrentPowerState, -+ PVRSRV_POWER_FLAGS ePwrFlags); -+void stSetFrequency(IMG_HANDLE hSysData, IMG_UINT32 ui32Frequency); -+void stSetVoltage(IMG_HANDLE hSysData, IMG_UINT32 ui32Voltage); -+#endif -diff --git a/drivers/gpu/drm/img-rogue/spacemit/sysconfig.c b/drivers/gpu/drm/img-rogue/spacemit/sysconfig.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/spacemit/sysconfig.c -@@ -0,0 +1,342 @@ -+/*************************************************************************/ /*! -+@File -+@Title System Configuration -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description System Configuration functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+ -+#include "interrupt_support.h" -+#include "pvrsrv_device.h" -+#include "syscommon.h" -+#include "sysconfig.h" -+#include "physheap.h" -+#if defined(SUPPORT_ION) -+#include "ion_support.h" -+#endif -+#include "spacemit_init.h" -+#include "vz_vmm_pvz.h" -+ -+static RGX_TIMING_INFORMATION gsRGXTimingInfo; -+static RGX_DATA gsRGXData; -+static PVRSRV_DEVICE_CONFIG gsDevices[1]; -+static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs; -+static PHYS_HEAP_CONFIG gsPhysHeapConfig[3]; -+ -+/* -+ CPU to Device physical address translation -+*/ -+static -+void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_DEV_PHYADDR *psDevPAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr) -+{ -+ PVR_UNREFERENCED_PARAMETER(hPrivData); -+ -+ /* Optimise common case */ -+ psDevPAddr[0].uiAddr = phys_cpu2gpu(psCpuPAddr[0].uiAddr); -+ if (ui32NumOfAddr > 1) -+ { -+ IMG_UINT32 ui32Idx; -+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) -+ { -+ psDevPAddr[ui32Idx].uiAddr = phys_cpu2gpu(psCpuPAddr[ui32Idx].uiAddr); -+ } -+ } -+} -+ -+/* -+ Device to CPU physical address translation -+*/ -+static -+void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, -+ IMG_UINT32 ui32NumOfAddr, -+ IMG_CPU_PHYADDR *psCpuPAddr, -+ IMG_DEV_PHYADDR *psDevPAddr) -+{ -+ PVR_UNREFERENCED_PARAMETER(hPrivData); -+ -+ /* Optimise common case */ -+ psCpuPAddr[0].uiAddr = phys_gpu2cpu(psDevPAddr[0].uiAddr); -+ if (ui32NumOfAddr > 1) -+ { -+ IMG_UINT32 ui32Idx; -+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) -+ { -+ psCpuPAddr[ui32Idx].uiAddr = phys_gpu2cpu(psDevPAddr[ui32Idx].uiAddr); -+ } -+ } -+} -+ -+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) -+{ -+ IMG_UINT32 ui32NextPhysHeapID = 0; -+ int iIrq; -+ struct resource *psDevMemRes = NULL; -+ struct platform_device *psDev; -+ -+ psDev = to_platform_device((struct device *)pvOSDevice); -+ -+ if (gsDevices[0].pvOSDevice) -+ { -+ return PVRSRV_ERROR_INVALID_DEVICE; -+ } -+ -+ /* -+ * Setup information about physical memory heap(s) we have -+ */ -+ gsPhysHeapFuncs.pfnCpuPAddrToDevPAddr = UMAPhysHeapCpuPAddrToDevPAddr; -+ gsPhysHeapFuncs.pfnDevPAddrToCpuPAddr = UMAPhysHeapDevPAddrToCpuPAddr; -+ -+ gsPhysHeapConfig[0].pszPDumpMemspaceName = "SYSMEM"; -+ gsPhysHeapConfig[0].eType = PHYS_HEAP_TYPE_UMA; -+ gsPhysHeapConfig[0].psMemFuncs = &gsPhysHeapFuncs; -+ gsPhysHeapConfig[0].hPrivData = NULL; -+ gsPhysHeapConfig[0].ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL; -+ ui32NextPhysHeapID += 1; -+ -+ /* -+ * Setup RGX specific timing data -+ */ -+ gsRGXTimingInfo.ui32CoreClockSpeed = RGX_ST_CORE_CLOCK_SPEED; -+ gsRGXTimingInfo.bEnableActivePM = IMG_FALSE; -+ gsRGXTimingInfo.bEnableRDPowIsland = IMG_TRUE; -+ gsRGXTimingInfo.ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; -+ -+ /* -+ * Setup RGX specific data -+ */ -+ gsRGXData.psRGXTimingInfo = &gsRGXTimingInfo; -+ -+ /* -+ * Setup RGX device -+ */ -+ gsDevices[0].pvOSDevice = pvOSDevice; -+ gsDevices[0].pszName = "spacemit"; -+ -+ /* Device setup information */ -+ -+ psDevMemRes = platform_get_resource(psDev, IORESOURCE_MEM, 0); -+ if (psDevMemRes) -+ { -+ gsDevices[0].sRegsCpuPBase.uiAddr = psDevMemRes->start; -+ gsDevices[0].ui32RegsSize = (unsigned int)(psDevMemRes->end - psDevMemRes->start); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: platform_get_resource failed", __func__)); -+ gsDevices[0].sRegsCpuPBase.uiAddr = ST_GPU_PBASE; -+ gsDevices[0].ui32RegsSize = ST_GPU_SIZE; -+ } -+ -+ iIrq = platform_get_irq(psDev, 0); -+ if (iIrq >= 0) -+ { -+ gsDevices[0].ui32IRQ = (IMG_UINT32) iIrq; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: platform_get_irq failed (%d)", __func__, -iIrq)); -+ gsDevices[0].ui32IRQ = ST_IRQ_GPU; -+ } -+ -+ gsDevices[0].eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_EMULATED; -+ -+ /* Device's physical heaps */ -+ gsDevices[0].pasPhysHeaps = &gsPhysHeapConfig[0]; -+ gsDevices[0].ui32PhysHeapCount = ui32NextPhysHeapID; -+ gsDevices[0].eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; -+ -+ /* No power management on ST system */ -+ gsDevices[0].pfnPrePowerState = STPrePowerState; -+ gsDevices[0].pfnPostPowerState = STPostPowerState; -+ -+ /* No clock frequency either */ -+ gsDevices[0].pfnClockFreqGet = NULL; -+ -+ //gsDevices[0].pfnCheckMemAllocSize = NULL; -+ -+ gsDevices[0].hDevData = &gsRGXData; -+ -+ gsDevices[0].bHasFBCDCVersion31 = IMG_FALSE; -+ gsDevices[0].bDevicePA0IsValid = IMG_FALSE; -+ -+ /* device error notify callback function */ -+ gsDevices[0].pfnSysDevErrorNotify = NULL; -+ -+ /* TODO: ST Init */ -+ gsDevices[0].hSysData = (IMG_HANDLE)RgxStInit(&gsDevices[0]); -+ if (!gsDevices[0].hSysData) -+ { -+ gsDevices[0].pvOSDevice = NULL; -+ return PVRSRV_ERROR_OUT_OF_MEMORY; -+ } -+ -+ /* Setup other system specific stuff */ -+#if defined(SUPPORT_ION) -+ IonInit(NULL); -+#endif -+ -+ /* Virtualization support services needs to know which heap ID corresponds to FW */ -+ gsPhysHeapConfig[ui32NextPhysHeapID].pszPDumpMemspaceName = "SYSMEM"; -+ gsPhysHeapConfig[ui32NextPhysHeapID].eType = PHYS_HEAP_TYPE_UMA; -+ gsPhysHeapConfig[ui32NextPhysHeapID].psMemFuncs = &gsPhysHeapFuncs; -+ gsPhysHeapConfig[ui32NextPhysHeapID].hPrivData = NULL; -+ gsPhysHeapConfig[ui32NextPhysHeapID].ui32UsageFlags = PHYS_HEAP_USAGE_FW_SHARED; -+ gsDevices[0].ui32PhysHeapCount = ++ui32NextPhysHeapID; -+ -+ *ppsDevConfig = &gsDevices[0]; -+ return PVRSRV_OK; -+} -+ -+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevConfig); -+ -+ /* TODO:ST UnInit */ -+ RgxStUnInit(psDevConfig->hSysData); -+ psDevConfig->hSysData = NULL; -+ -+#if defined(SUPPORT_ION) -+ IonDeinit(); -+#endif -+ -+ psDevConfig->pvOSDevice = NULL; -+} -+ -+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, -+ IMG_UINT32 ui32IRQ, -+ const IMG_CHAR *pszName, -+ PFN_LISR pfnLISR, -+ void *pvData, -+ IMG_HANDLE *phLISRData) -+{ -+ PVR_UNREFERENCED_PARAMETER(hSysData); -+ return OSInstallSystemLISR(phLISRData, ui32IRQ, pszName, pfnLISR, pvData, -+ SYS_IRQ_FLAG_TRIGGER_DEFAULT); -+} -+ -+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) -+{ -+ return OSUninstallSystemLISR(hLISRData); -+} -+ -+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ PVR_UNREFERENCED_PARAMETER(psDevConfig); -+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); -+ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ * Convert gpu physical address to cpu physical address: -+ * spacemit_k1x: gpu(0-2g) --> cpu(0-2g); -+ * gpu(2-xg) --> cpu(2g-(x+2)g); note: x>=2g -+ * CPU -+ * 0x04 8000 0000 +--> +----------+ -+ * | | | -+ * DEVICE DRAM | | | -+ * 0x04 0000 0000+---------+ <------+ +----------+ +---+ | | -+ * | | | | | | -+ * | | | | | DDR | -+ * | | | | | | -+ * | | | | | | -+ * | | | | | | -+ * 0x01 0000 0000| | | | +--> +----------+ -+ * | | | | | | | -+ * | | | | | | IO | -+ * 0x00 8000 0000+---------+ <------+ +----------+ +---+--> +----------+ -+ * | | | | | | -+ * | | | | | DDR | -+ * 0x00 0000 0000+---------+ <------+ +----------+ +------> +----------+ -+ * -+ */ -+unsigned long phys_gpu2cpu(unsigned long phys_addr) -+{ -+#ifdef CONFIG_SOC_SPACEMIT_K1X -+ if (phys_addr >= 0x80000000UL) { -+ phys_addr += 0x80000000UL; -+ } -+#endif -+ -+ return phys_addr; -+} -+ -+/* -+ * Convert cpu physical address to gpu physical address: -+ * spacemit_k1x: cpu(0-2g) --> gpu(0-2g); -+ * cpu(4-xg) --> gpu(2-(x-2)g); note: x>=4g -+ * CPU -+ * 0x04 8000 0000 +--> +----------+ -+ * | | | -+ * DEVICE DRAM | | | -+ * 0x04 0000 0000+---------+ <------+ +----------+ +---+ | | -+ * | | | | | | -+ * | | | | | DDR | -+ * | | | | | | -+ * | | | | | | -+ * | | | | | | -+ * 0x01 0000 0000| | | | +--> +----------+ -+ * | | | | | | | -+ * | | | | | | IO | -+ * 0x00 8000 0000+---------+ <------+ +----------+ +---+--> +----------+ -+ * | | | | | | -+ * | | | | | DDR | -+ * 0x00 0000 0000+---------+ <------+ +----------+ +------> +----------+ -+ * -+ */ -+unsigned long phys_cpu2gpu(unsigned long phys_addr) -+{ -+#ifdef CONFIG_SOC_SPACEMIT_K1X -+ if (phys_addr >= 0x100000000UL) { -+ phys_addr -= 0x80000000UL; -+ } -+#endif -+ -+ return phys_addr; -+} -+/****************************************************************************** -+ End of file (sysconfig.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/spacemit/sysconfig.h b/drivers/gpu/drm/img-rogue/spacemit/sysconfig.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/spacemit/sysconfig.h -@@ -0,0 +1,63 @@ -+/*************************************************************************/ /*! -+@File -+@Title System Description Header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This header provides system-specific declarations and macros -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "pvrsrv_device.h" -+#include "rgxdevice.h" -+ -+#if !defined(__SYSCCONFIG_H__) -+#define __SYSCCONFIG_H__ -+ -+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (100) -+//for st soc -+#define RGX_ST_CORE_CLOCK_SPEED (614*1000*1000) -+#define ST_GPU_PBASE 0xcac00000 -+#define ST_GPU_SIZE 0x10000 -+#define ST_IRQ_GPU 59 -+ -+unsigned long phys_gpu2cpu(unsigned long phys_addr); -+unsigned long phys_cpu2gpu(unsigned long phys_addr); -+/***************************************************************************** -+ * system specific data structures -+ *****************************************************************************/ -+ -+#endif /* __SYSCCONFIG_H__ */ -diff --git a/drivers/gpu/drm/img-rogue/spacemit/sysinfo.h b/drivers/gpu/drm/img-rogue/spacemit/sysinfo.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/spacemit/sysinfo.h -@@ -0,0 +1,58 @@ -+/*************************************************************************/ /*! -+@Title System Description Header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This header provides system-specific declarations and macros -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(__SYSINFO_H__) -+#define __SYSINFO_H__ -+ -+/*!< System specific poll/timeout details */ -+#define MAX_HW_TIME_US (1000000) -+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (3000) -+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) -+#define WAIT_TRY_COUNT (10000) -+ -+#define SYS_RGX_OF_COMPATIBLE "img,rgx" -+ -+#if defined(__linux__) -+#define SYS_RGX_DEV_NAME "spacemit" -+#endif -+ -+#endif /* !defined(__SYSINFO_H__) */ -diff --git a/drivers/gpu/drm/img-rogue/srvcore.c b/drivers/gpu/drm/img-rogue/srvcore.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/srvcore.c -@@ -0,0 +1,1643 @@ -+/*************************************************************************/ /*! -+@File -+@Title PVR Common Bridge Module (kernel side) -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements core PVRSRV API, server side -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "img_types_check.h" -+#include "pvr_debug.h" -+#include "ra.h" -+#include "pvr_bridge.h" -+#include "connection_server.h" -+#include "device.h" -+#include "htbserver.h" -+ -+#include "pdump_km.h" -+ -+#include "srvkm.h" -+#include "allocmem.h" -+#include "devicemem.h" -+#include "log2.h" -+ -+#include "srvcore.h" -+#include "pvrsrv.h" -+#include "power.h" -+ -+#include "os_apphint.h" -+ -+#if defined(SUPPORT_RGX) -+#include "rgxdevice.h" -+#include "rgxinit.h" -+#include "rgx_compat_bvnc.h" -+#endif -+ -+#include "rgx_options.h" -+#include "pvrversion.h" -+#include "lock.h" -+#include "osfunc.h" -+#include "device_connection.h" -+#include "process_stats.h" -+#include "pvrsrv_pool.h" -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+#include "physmem_lma.h" -+#include "services_km.h" -+#endif -+ -+#include "pvrsrv_tlstreams.h" -+#include "tlstream.h" -+ -+#if defined(PVRSRV_MISSING_NO_SPEC_IMPL) -+#pragma message ("There is no implementation of OSConfineArrayIndexNoSpeculation() - see osfunc.h") -+#endif -+ -+/* For the purpose of maintainability, it is intended that this file should not -+ * contain any OS specific #ifdefs. Please find a way to add e.g. -+ * an osfunc.c abstraction or override the entire function in question within -+ * env,*,pvr_bridge_k.c -+ */ -+ -+PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT] = { {.pfFunction = DummyBW,} ,}; -+ -+#define PVR_DISPATCH_OFFSET_FIRST_FUNC 0 -+#define PVR_DISPATCH_OFFSET_LAST_FUNC 1 -+#define PVR_DISPATCH_OFFSET_ARRAY_MAX 2 -+ -+#define PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE -+ -+static IMG_UINT16 g_BridgeDispatchTableStartOffsets[BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT][PVR_DISPATCH_OFFSET_ARRAY_MAX]; -+ -+ -+#define PVRSRV_MAX_POOLED_BRIDGE_BUFFERS 8 /*!< Initial number of pooled bridge buffers */ -+ -+static PVRSRV_POOL *g_psBridgeBufferPool; /*! Pool of bridge buffers */ -+ -+ -+#if defined(DEBUG_BRIDGE_KM) -+/* a lock used for protecting bridge call timing calculations -+ * for calls which do not acquire a lock -+ */ -+static POS_LOCK g_hStatsLock; -+PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats; -+ -+void BridgeGlobalStatsLock(void) -+{ -+ OSLockAcquire(g_hStatsLock); -+} -+ -+void BridgeGlobalStatsUnlock(void) -+{ -+ OSLockRelease(g_hStatsLock); -+} -+#endif -+ -+void BridgeDispatchTableStartOffsetsInit(void) -+{ -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RESERVED1][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RESERVED1_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RESERVED1][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RESERVED1_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RESERVED2][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RESERVED2_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RESERVED2][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RESERVED2_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCFALLBACK][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCFALLBACK][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DI][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DI_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DI][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DI_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMA][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DMA_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMA][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DMA_DISPATCH_LAST; -+#if defined(SUPPORT_RGX) -+ /* Need a gap here to start next entry at element 128 */ -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXBREAKPOINT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXBREAKPOINT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXFWDBG][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXFWDBG][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXREGCONFIG][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXREGCONFIG][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTIMERQUERY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTIMERQUERY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXRAY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST; -+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXRAY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST; -+#endif -+} -+ -+#if defined(DEBUG_BRIDGE_KM) -+ -+#if defined(INTEGRITY_OS) -+void PVRSRVPrintBridgeStats(void) -+{ -+ IMG_UINT32 ui32Index; -+ IMG_UINT32 ui32Remainder; -+ -+ BridgeGlobalStatsLock(); -+ -+ printf("Total Bridge call count = %u\n" -+ "Total number of bytes copied via copy_from_user = %u\n" -+ "Total number of bytes copied via copy_to_user = %u\n" -+ "Total number of bytes copied via copy_*_user = %u\n\n" -+ "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s\n", -+ g_BridgeGlobalStats.ui32IOCTLCount, -+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes, -+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes, -+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + g_BridgeGlobalStats.ui32TotalCopyToUserBytes, -+ "#", -+ "Bridge Name", -+ "Wrapper Function", -+ "Call Count", -+ "copy_from_user (B)", -+ "copy_to_user (B)", -+ "Total Time (us)", -+ "Max Time (us)"); -+ -+ /* Is the item asked for (starts at 0) a valid table index? */ -+ for ( ui32Index=0; ui32Index < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT; ui32Index++ ) -+ { -+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = &g_BridgeDispatchTable[ui32Index]; -+ printf("%3d: %-60s %-48s %-10u %-20u %-20u %-20llu %-20llu\n", -+ (IMG_UINT32)(((size_t)psEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)), -+ psEntry->pszIOCName, -+ (psEntry->pfFunction != NULL) ? psEntry->pszFunctionName : "(null)", -+ psEntry->ui32CallCount, -+ psEntry->ui32CopyFromUserTotalBytes, -+ psEntry->ui32CopyToUserTotalBytes, -+ (unsigned long long) OSDivide64r64(psEntry->ui64TotalTimeNS, 1000, &ui32Remainder), -+ (unsigned long long) OSDivide64r64(psEntry->ui64MaxTimeNS, 1000, &ui32Remainder)); -+ -+ -+ } -+ -+ BridgeGlobalStatsUnlock(); -+} -+#endif -+ -+PVRSRV_ERROR -+CopyFromUserWrapper(CONNECTION_DATA *psConnection, -+ IMG_UINT32 ui32DispatchTableEntry, -+ void *pvDest, -+ void __user *pvSrc, -+ IMG_UINT32 ui32Size) -+{ -+ BridgeGlobalStatsLock(); -+ g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyFromUserTotalBytes+=ui32Size; -+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size; -+ BridgeGlobalStatsUnlock(); -+ -+ return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size); -+} -+PVRSRV_ERROR -+CopyToUserWrapper(CONNECTION_DATA *psConnection, -+ IMG_UINT32 ui32DispatchTableEntry, -+ void __user *pvDest, -+ void *pvSrc, -+ IMG_UINT32 ui32Size) -+{ -+ BridgeGlobalStatsLock(); -+ g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyToUserTotalBytes+=ui32Size; -+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size; -+ BridgeGlobalStatsUnlock(); -+ -+ return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size); -+} -+#else -+INLINE PVRSRV_ERROR -+CopyFromUserWrapper(CONNECTION_DATA *psConnection, -+ IMG_UINT32 ui32DispatchTableEntry, -+ void *pvDest, -+ void __user *pvSrc, -+ IMG_UINT32 ui32Size) -+{ -+ PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry); -+ return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size); -+} -+INLINE PVRSRV_ERROR -+CopyToUserWrapper(CONNECTION_DATA *psConnection, -+ IMG_UINT32 ui32DispatchTableEntry, -+ void __user *pvDest, -+ void *pvSrc, -+ IMG_UINT32 ui32Size) -+{ -+ PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry); -+ return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size); -+} -+#endif -+ -+/**************************************************************************/ /*! -+@Function DeviceDefaultPhysHeapFreeMemCheck -+ -+@Description Check if the required amount of free space is available in the -+ Default PhysHeap for a connection to be made. -+ -+@Input psDeviceNode The device the connection is being -+ made on. -+@Input ui32MinMemInMBs The minimum memory required to be -+ available in the Default PhysHeap. -+ -+@Return PVRSRV_OK if successful else a PVRSRV_ERROR. -+*/ /***************************************************************************/ -+static PVRSRV_ERROR DeviceDefaultPhysHeapFreeMemCheck(PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32MinMemInMBs) -+{ -+ PHYS_HEAP *psDefaultHeap = NULL; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL, "psDeviceNode"); -+ -+ psDefaultHeap = psDeviceNode->apsPhysHeap[psDeviceNode->psDevConfig->eDefaultHeap]; -+ if (psDefaultHeap == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to get device's default PhysHeap")); -+ return PVRSRV_ERROR_INVALID_HEAP; -+ } -+ -+ if (PhysHeapGetType(psDefaultHeap) == PHYS_HEAP_TYPE_LMA) -+ { -+ IMG_UINT64 ui64FreePhysHeapMem; -+ -+ eError = PhysHeapFreeMemCheck(psDefaultHeap, -+ MB2B(ui32MinMemInMBs), -+ &ui64FreePhysHeapMem); -+ if (eError == PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Default PhysHeap contains less than the " -+ "minimum free space required to acquire a connection. " -+ "Free space: %"IMG_UINT64_FMTSPEC"MB " -+ "Minimum required: %uMB", -+ B2MB(ui64FreePhysHeapMem), -+ ui32MinMemInMBs)); -+ } -+ } -+ -+ return eError; -+} -+ -+/**************************************************************************/ /*! -+@Function CheckConnectionPhysHeapMem -+ -+@Description Check if there is enough memory in the PhysHeaps to allow a -+ connection to be made. -+ -+@Input psConnection The connection being made. -+ -+@Return PVRSRV_OK if successful else a PVRSRV_ERROR. -+*/ /***************************************************************************/ -+static PVRSRV_ERROR CheckConnectionPhysHeapMem(CONNECTION_DATA *psConnection) -+{ -+ IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_PHYSHEAPMINMEMONCONNECTION; -+ IMG_UINT32 ui32AppHintPhysHeapMinMemOnConnection = 0; -+ void *pvAppHintState = NULL; -+ PVRSRV_DEVICE_NODE *psDeviceNode = NULL; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_RETURN_IF_INVALID_PARAM(psConnection); -+ -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, PhysHeapMinMemOnConnection, -+ &ui32AppHintDefault, &ui32AppHintPhysHeapMinMemOnConnection); -+ OSFreeAppHintState(pvAppHintState); -+ -+ psDeviceNode = OSGetDevNode(psConnection); -+ -+ if (ui32AppHintPhysHeapMinMemOnConnection != 0) -+ { -+ eError = DeviceDefaultPhysHeapFreeMemCheck(psDeviceNode, -+ ui32AppHintPhysHeapMinMemOnConnection); -+ PVR_LOG_RETURN_IF_ERROR(eError, "DeviceDefaultPhysHeapFreeMemCheck"); -+ -+ if (psDeviceNode->pfnCheckForSufficientFWPhysMem != NULL -+ && RGX_FW_PHYSHEAP_MINMEM_ON_CONNECTION > 0) -+ { -+ eError = psDeviceNode->pfnCheckForSufficientFWPhysMem(psDeviceNode); -+ PVR_LOG_RETURN_IF_ERROR(eError, "pfnCheckForSufficientFWPhysMem"); -+ } -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVConnectKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32ClientBuildOptions, -+ IMG_UINT32 ui32ClientDDKVersion, -+ IMG_UINT32 ui32ClientDDKBuild, -+ IMG_UINT8 *pui8KernelArch, -+ IMG_UINT32 *pui32CapabilityFlags, -+ IMG_UINT64 *ui64PackedBvnc) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32ServerBuildOptions; -+ IMG_UINT32 ui32DDKVersion, ui32DDKBuild; -+ PVRSRV_DATA *psSRVData = NULL; -+ IMG_UINT64 ui64ProcessVASpaceSize = OSGetCurrentProcessVASpaceSize(); -+ static IMG_BOOL bIsFirstConnection=IMG_FALSE; -+#if defined(SUPPORT_RGX) -+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; -+#endif -+ -+ /* Check the minimum free PhysHeap memory is available before allowing -+ * the connection to succeed */ -+ eError = CheckConnectionPhysHeapMem(psConnection); -+ PVR_RETURN_IF_ERROR(eError); -+ -+#if defined(SUPPORT_RGX) -+ /* Gather BVNC information to output to UM */ -+ -+ *ui64PackedBvnc = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, -+ psDevInfo->sDevFeatureCfg.ui32V, -+ psDevInfo->sDevFeatureCfg.ui32N, -+ psDevInfo->sDevFeatureCfg.ui32C); -+#else -+ *ui64PackedBvnc = 0; -+#endif /* defined(SUPPORT_RGX)*/ -+ -+ /* Clear the flags */ -+ *pui32CapabilityFlags = 0; -+ -+ psSRVData = PVRSRVGetPVRSRVData(); -+ -+ psConnection->ui32ClientFlags = ui32Flags; -+ -+ /*Set flags to pass back to the client showing which cache coherency is available.*/ -+ /* Is the system snooping of caches emulated in software? */ -+ if (PVRSRVSystemSnoopingIsEmulated(psDeviceNode->psDevConfig)) -+ { -+ *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_EMULATE_FLAG; -+ } -+ else -+ { -+ /*Set flags to pass back to the client showing which cache coherency is available.*/ -+ /*Is the system CPU cache coherent?*/ -+ if (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig)) -+ { -+ *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_DEVICE_FLAG; -+ } -+ /*Is the system device cache coherent?*/ -+ if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) -+ { -+ *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_CPU_FLAG; -+ } -+ } -+ -+ /* Has the system device non-mappable local memory?*/ -+ if (PVRSRVSystemHasNonMappableLocalMemory(psDeviceNode->psDevConfig)) -+ { -+ *pui32CapabilityFlags |= PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG; -+ } -+ -+ /* Is system using FBCDC v31? */ -+ if (psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode)) -+ { -+ *pui32CapabilityFlags |= PVRSRV_FBCDC_V3_1_USED; -+ } -+ -+ /* Set flags to indicate shared-virtual-memory (SVM) allocation availability */ -+ if (! psDeviceNode->ui64GeneralSVMHeapTopVA || ! ui64ProcessVASpaceSize) -+ { -+ *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED; -+ } -+ else -+ { -+ if (ui64ProcessVASpaceSize <= psDeviceNode->ui64GeneralSVMHeapTopVA) -+ { -+ *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED; -+ } -+ else -+ { -+ /* This can happen when processor has more virtual address bits -+ than device (i.e. alloc is not always guaranteed to succeed) */ -+ *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL; -+ } -+ } -+ -+ /* Is the system DMA capable? */ -+ if (psDeviceNode->bHasSystemDMA) -+ { -+ *pui32CapabilityFlags |= PVRSRV_SYSTEM_DMA_USED; -+ } -+ -+#if defined(SUPPORT_RGX) && defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) -+ /* For GPUs with lossy TFBC support, is system using lossy control group 1? */ -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT)) -+ { -+ if (psDeviceNode->pfnGetTFBCLossyGroup(psDeviceNode) == 1) -+ { -+ *pui32CapabilityFlags |= PVRSRV_TFBC_LOSSY_GROUP_1; -+ } -+ } -+#endif -+ -+#if defined(SUPPORT_CUSTOM_OSID_EMISSION) -+{ -+ IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; -+ IMG_BOOL bOSidAxiProtReg = IMG_FALSE; -+ -+ ui32OSid = (ui32Flags & SRV_VIRTVAL_FLAG_OSID_MASK) >> (VIRTVAL_FLAG_OSID_SHIFT); -+ ui32OSidReg = (ui32Flags & SRV_VIRTVAL_FLAG_OSIDREG_MASK) >> (VIRTVAL_FLAG_OSIDREG_SHIFT); -+ -+#if defined(EMULATOR) -+{ -+ /* AXI_ACELITE is only supported on rogue cores - volcanic cores all support full ACE -+ * and don't want to compile the code below (RGX_FEATURE_AXI_ACELITE_BIT_MASK is not -+ * defined for volcanic cores). -+ */ -+ -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; -+ -+#if defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)) -+#else -+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACE)) -+#endif -+ { -+ IMG_UINT32 ui32OSidAxiProtReg = 0, ui32OSidAxiProtTD = 0; -+ -+ ui32OSidAxiProtReg = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPREG_MASK) >> (VIRTVAL_FLAG_AXIPREG_SHIFT); -+ ui32OSidAxiProtTD = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPTD_MASK) >> (VIRTVAL_FLAG_AXIPTD_SHIFT); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "[AxiProt & Virt]: Setting bOSidAxiProt of Emulator's Trusted Device for Catbase %d to %s", -+ ui32OSidReg, -+ (ui32OSidAxiProtTD == 1)?"TRUE":"FALSE")); -+ -+ bOSidAxiProtReg = ui32OSidAxiProtReg == 1; -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "[AxiProt & Virt]: Setting bOSidAxiProt of FW's Register for Catbase %d to %s", -+ ui32OSidReg, -+ bOSidAxiProtReg?"TRUE":"FALSE")); -+ -+ SetAxiProtOSid(psDeviceNode->psDevConfig->hSysData, ui32OSidReg, ui32OSidAxiProtTD); -+ } -+} -+#endif /* defined(EMULATOR) */ -+ -+ /* We now know the OSid, OSidReg and bOSidAxiProtReg setting for this -+ * connection. We can access these from wherever we have a connection -+ * reference and do not need to traverse an arbitrary linked-list to -+ * obtain them. The settings are process-specific. -+ */ -+ psConnection->ui32OSid = ui32OSid; -+ psConnection->ui32OSidReg = ui32OSidReg; -+ psConnection->bOSidAxiProtReg = bOSidAxiProtReg; -+ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "[GPU Virtualization Validation]: OSIDs: %d, %d", -+ ui32OSid, -+ ui32OSidReg)); -+} -+#endif /* defined(SUPPORT_CUSTOM_OSID_EMISSION) */ -+ -+#if defined(SUPPORT_WORKLOAD_ESTIMATION) -+ /* Only enabled if enabled in the UM */ -+ if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_WORKLOAD_ESTIMATION_EN)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Workload Estimation disabled. Not enabled in UM", -+ __func__)); -+ } -+#endif -+ -+#if defined(SUPPORT_PDVFS) -+ /* Only enabled if enabled in the UM */ -+ if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_PDVFS_EN)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Proactive DVFS disabled. Not enabled in UM", -+ __func__)); -+ } -+#endif -+ -+ ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); -+ ui32DDKBuild = PVRVERSION_BUILD; -+ -+ if (ui32Flags & SRV_FLAGS_CLIENT_64BIT_COMPAT) -+ { -+ psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_64BIT; -+ } -+ else -+ { -+ psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_32BIT; -+ } -+ -+ if (IMG_FALSE == bIsFirstConnection) -+ { -+ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions = (RGX_BUILD_OPTIONS_KM); -+ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions = ui32ClientBuildOptions; -+ -+ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildVersion = ui32DDKVersion; -+ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildVersion = ui32ClientDDKVersion; -+ -+ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildRevision = ui32DDKBuild; -+ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision = ui32ClientDDKBuild; -+ -+ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType = -+ ((RGX_BUILD_OPTIONS_KM) & OPTIONS_DEBUG_EN) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE; -+ -+ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType = -+ (ui32ClientBuildOptions & OPTIONS_DEBUG_EN) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE; -+ -+ if (sizeof(void *) == POINTER_SIZE_64BIT) -+ { -+ psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_64BIT; -+ } -+ else -+ { -+ psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_32BIT; -+ } -+ } -+ -+ /* Masking out every option that is not kernel specific*/ -+ ui32ClientBuildOptions &= RGX_BUILD_OPTIONS_MASK_KM; -+ -+ /* -+ * Validate the build options -+ */ -+ ui32ServerBuildOptions = (RGX_BUILD_OPTIONS_KM); -+ if (ui32ServerBuildOptions != ui32ClientBuildOptions) -+ { -+ IMG_UINT32 ui32ServerBuildOptionsMismatch = ui32ServerBuildOptions ^ ui32ClientBuildOptions; -+ IMG_UINT32 ui32ClientBuildOptionsMismatch = ui32ServerBuildOptionsMismatch; -+ -+#if !defined(PVRSRV_STRICT_COMPAT_CHECK) -+ /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/ -+ ui32ServerBuildOptionsMismatch &= KM_OPTIONS_STRICT; -+ ui32ClientBuildOptionsMismatch &= UM_OPTIONS_STRICT; -+#endif -+ if ( (ui32ClientBuildOptions & ui32ClientBuildOptionsMismatch) != 0) -+ { -+ PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; " -+ "extra options present in client-side driver: (0x%x). Please check rgx_options.h", -+ __func__, -+ ui32ClientBuildOptions & ui32ClientBuildOptionsMismatch)); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH, chk_exit); -+ } -+ -+ if ( (ui32ServerBuildOptions & ui32ServerBuildOptionsMismatch) != 0) -+ { -+ PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; " -+ "extra options present in KM driver: (0x%x). Please check rgx_options.h", -+ __func__, -+ ui32ServerBuildOptions & ui32ServerBuildOptionsMismatch )); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH, chk_exit); -+ } -+ if (IMG_FALSE == bIsFirstConnection) -+ { -+ PVR_LOG(("%s: COMPAT_TEST: Client-side (0x%04x) (%s) and KM driver (0x%04x) (%s) build options differ.", -+ __func__, -+ ui32ClientBuildOptions, -+ (psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug", -+ ui32ServerBuildOptions, -+ (psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug")); -+ }else{ -+ PVR_DPF((PVR_DBG_WARNING, "%s: COMPAT_TEST: Client-side (0x%04x) and KM driver (0x%04x) build options differ.", -+ __func__, -+ ui32ClientBuildOptions, -+ ui32ServerBuildOptions)); -+ -+ } -+ if (!psSRVData->sDriverInfo.bIsNoMatch) -+ psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: Client-side and KM driver build options match. [ OK ]", __func__)); -+ } -+ -+ /* -+ * Validate DDK version -+ */ -+ if (ui32ClientDDKVersion != ui32DDKVersion) -+ { -+ if (!psSRVData->sDriverInfo.bIsNoMatch) -+ psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE; -+ PVR_LOG(("(FAIL) %s: Incompatible driver DDK version (%u.%u) / client DDK version (%u.%u).", -+ __func__, -+ PVRVERSION_MAJ, PVRVERSION_MIN, -+ PVRVERSION_UNPACK_MAJ(ui32ClientDDKVersion), -+ PVRVERSION_UNPACK_MIN(ui32ClientDDKVersion))); -+ PVR_DBG_BREAK; -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DDK_VERSION_MISMATCH, chk_exit); -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK version (%u.%u) and client DDK version (%u.%u) match. [ OK ]", -+ __func__, -+ PVRVERSION_MAJ, PVRVERSION_MIN, PVRVERSION_MAJ, PVRVERSION_MIN)); -+ } -+ -+ /* Create stream for every connection except for the special clients -+ * that don't need it e.g.: recipients of HWPerf data. */ -+ if (!(psConnection->ui32ClientFlags & SRV_NO_HWPERF_CLIENT_STREAM)) -+ { -+ IMG_CHAR acStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; -+ OSSNPrintf(acStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE, -+ PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC, -+ psDeviceNode->sDevId.i32KernelDeviceID, -+ psConnection->pid); -+ -+ eError = TLStreamCreate(&psConnection->hClientTLStream, -+ acStreamName, -+ PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT, -+ TL_OPMODE_DROP_NEWER | -+ TL_FLAG_ALLOCATE_ON_FIRST_OPEN, -+ NULL, NULL, NULL, NULL); -+ if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_ALREADY_EXISTS) -+ { -+ PVR_LOG_ERROR(eError, "TLStreamCreate"); -+ psConnection->hClientTLStream = NULL; -+ } -+ else if (eError == PVRSRV_OK) -+ { -+ /* Set "tlctrl" stream as a notification channel. This channel is -+ * is used to notify recipients about stream open/close (by writer) -+ * actions (and possibly other actions in the future). */ -+ eError = TLStreamSetNotifStream(psConnection->hClientTLStream, -+ psSRVData->hTLCtrlStream); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "TLStreamSetNotifStream"); -+ TLStreamClose(psConnection->hClientTLStream); -+ psConnection->hClientTLStream = NULL; -+ } -+ } -+ -+ /* Reset error status. We don't want to propagate any errors from here. */ -+ eError = PVRSRV_OK; -+ PVR_DPF((PVR_DBG_MESSAGE, "Created stream \"%s\".", acStreamName)); -+ } -+ -+ /* -+ * Validate DDK build -+ */ -+ if (ui32ClientDDKBuild != ui32DDKBuild) -+ { -+ if (!psSRVData->sDriverInfo.bIsNoMatch) -+ psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE; -+ PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch in driver DDK revision (%d) / client DDK revision (%d).", -+ __func__, ui32DDKBuild, ui32ClientDDKBuild)); -+#if defined(PVRSRV_STRICT_COMPAT_CHECK) -+ PVR_DBG_BREAK; -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DDK_BUILD_MISMATCH, chk_exit); -+#endif -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK revision (%d) and client DDK revision (%d) match. [ OK ]", -+ __func__, ui32DDKBuild, ui32ClientDDKBuild)); -+ } -+ -+#if defined(PDUMP) -+ /* Success so far so is it the PDump client that is connecting? */ -+ if (ui32Flags & SRV_FLAGS_PDUMPCTRL) -+ { -+ if (psDeviceNode->sDevId.ui32InternalID == psSRVData->ui32PDumpBoundDevice) -+ { -+ PDumpConnectionNotify(psDeviceNode); -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE; -+ PVR_DPF((PVR_DBG_ERROR, "%s: PDump requested for device %u but only permitted for device %u", -+ __func__, psDeviceNode->sDevId.ui32InternalID, psSRVData->ui32PDumpBoundDevice)); -+ goto chk_exit; -+ } -+ } -+ else -+ { -+ /* Warn if the app is connecting to a device PDump won't be able to capture */ -+ if (psDeviceNode->sDevId.ui32InternalID != psSRVData->ui32PDumpBoundDevice) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: NB. App running on device %d won't be captured by PDump (must be on device %u)", -+ __func__, psDeviceNode->sDevId.ui32InternalID, psSRVData->ui32PDumpBoundDevice)); -+ } -+ } -+#endif -+ -+ PVR_ASSERT(pui8KernelArch != NULL); -+ -+ if (psSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT) -+ { -+ *pui8KernelArch = 64; -+ } -+ else -+ { -+ *pui8KernelArch = 32; -+ } -+ -+ bIsFirstConnection = IMG_TRUE; -+ -+#if defined(DEBUG_BRIDGE_KM) -+ { -+ int ii; -+ -+ /* dump dispatch table offset lookup table */ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: g_BridgeDispatchTableStartOffsets[0-%lu] entries:", __func__, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT - 1)); -+ for (ii=0; ii < BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT; ii++) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "g_BridgeDispatchTableStartOffsets[%d]: %u", ii, g_BridgeDispatchTableStartOffsets[ii][PVR_DISPATCH_OFFSET_FIRST_FUNC])); -+ } -+ } -+#endif -+ -+#if defined(PDUMP) -+ if (!(ui32Flags & SRV_FLAGS_PDUMPCTRL)) -+ { -+ IMG_UINT64 ui64PDumpState = 0; -+ -+ PDumpGetStateKM(&ui64PDumpState); -+ if (ui64PDumpState & PDUMP_STATE_CONNECTED) -+ { -+ *pui32CapabilityFlags |= PVRSRV_PDUMP_IS_RECORDING; -+ } -+ } -+#endif -+ -+chk_exit: -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVDisconnectKM(void) -+{ -+#if defined(INTEGRITY_OS) && defined(DEBUG_BRIDGE_KM) -+ PVRSRVPrintBridgeStats(); -+#endif -+ /* just return OK, per-process data is cleaned up by resmgr */ -+ -+ return PVRSRV_OK; -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVAcquireGlobalEventObjectKM -+@Description Acquire the global event object. -+@Output phGlobalEventObject On success, points to the global event -+ object handle -+@Return PVRSRV_ERROR PVRSRV_OK on success or an error -+ otherwise -+*/ /***************************************************************************/ -+PVRSRV_ERROR -+PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ *phGlobalEventObject = psPVRSRVData->hGlobalEventObject; -+ -+ return PVRSRV_OK; -+} -+ -+/**************************************************************************/ /*! -+@Function PVRSRVReleaseGlobalEventObjectKM -+@Description Release the global event object. -+@Output hGlobalEventObject Global event object handle -+@Return PVRSRV_ERROR PVRSRV_OK on success or an error otherwise -+*/ /***************************************************************************/ -+PVRSRV_ERROR -+PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject) -+{ -+ PVR_ASSERT(PVRSRVGetPVRSRVData()->hGlobalEventObject == hGlobalEventObject); -+ -+ return PVRSRV_OK; -+} -+ -+static void _DumpDebugUMReqPrintWrapper(void *pvPriv, const IMG_CHAR *pszFmt, ...) -+{ -+ va_list pvArgs; -+ PVR_UNREFERENCED_PARAMETER(pvPriv); -+ -+ va_start(pvArgs, pszFmt); -+ PVRSRVReleasePrintfVArgs(pszFmt, pvArgs); -+ va_end(pvArgs); -+} -+ -+/* -+ PVRSRVDumpDebugInfoKM -+*/ -+PVRSRV_ERROR -+PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32VerbLevel) -+{ -+ if (ui32VerbLevel > DEBUG_REQUEST_VERBOSITY_MAX) -+ { -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ PVR_LOG(("User requested PVR debug info")); -+ -+ PVRSRVDebugRequest(psDeviceNode, ui32VerbLevel, _DumpDebugUMReqPrintWrapper, NULL); -+ -+ return PVRSRV_OK; -+} -+ -+/* -+ PVRSRVGetDevClockSpeedKM -+*/ -+PVRSRV_ERROR -+PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_PUINT32 pui32RGXClockSpeed) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVR_ASSERT(psDeviceNode->pfnDeviceClockSpeed != NULL); -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ eError = psDeviceNode->pfnDeviceClockSpeed(psDeviceNode, pui32RGXClockSpeed); -+ PVR_WARN_IF_ERROR(eError, "pfnDeviceClockSpeed"); -+ -+ return eError; -+} -+ -+ -+/* -+ PVRSRVHWOpTimeoutKM -+*/ -+PVRSRV_ERROR -+PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode) -+{ -+#if defined(PVRSRV_RESET_ON_HWTIMEOUT) -+ PVR_LOG(("User requested OS reset")); -+ OSPanic(); -+#endif -+ PVR_LOG(("HW operation timeout, dump server info")); -+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); -+ return PVRSRV_OK; -+} -+ -+ -+IMG_INT -+DummyBW(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 *psBridgeIn, -+ IMG_UINT8 *psBridgeOut, -+ CONNECTION_DATA *psConnection) -+{ -+ PVR_UNREFERENCED_PARAMETER(psBridgeIn); -+ PVR_UNREFERENCED_PARAMETER(psBridgeOut); -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+#if defined(DEBUG_BRIDGE_KM) -+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u (%s) mapped to " -+ "Dummy Wrapper (probably not what you want!)", -+ __func__, ui32DispatchTableEntry, g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName)); -+#else -+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u mapped to " -+ "Dummy Wrapper (probably not what you want!)", -+ __func__, ui32DispatchTableEntry)); -+#endif -+ return PVRSRV_ERROR_BRIDGE_ENOTTY; -+} -+ -+PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32AlignChecksSize, -+ IMG_UINT32 aui32AlignChecks[]) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+#if !defined(NO_HARDWARE) -+ -+ PVR_ASSERT(psDeviceNode->pfnAlignmentCheck != NULL); -+ return psDeviceNode->pfnAlignmentCheck(psDeviceNode, ui32AlignChecksSize, -+ aui32AlignChecks); -+ -+#else -+ -+ PVR_UNREFERENCED_PARAMETER(psDeviceNode); -+ PVR_UNREFERENCED_PARAMETER(ui32AlignChecksSize); -+ PVR_UNREFERENCED_PARAMETER(aui32AlignChecks); -+ -+ return PVRSRV_OK; -+ -+#endif /* !defined(NO_HARDWARE) */ -+ -+} -+ -+PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 *pui32DeviceStatus) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ /* First try to update the status. */ -+ if (psDeviceNode->pfnUpdateHealthStatus != NULL) -+ { -+ PVRSRV_ERROR eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, -+ IMG_FALSE); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDeviceStatusKM: Failed to " -+ "check for device status (%d)", eError)); -+ -+ /* Return unknown status and error because we don't know what -+ * happened and if the status is valid. */ -+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN; -+ return eError; -+ } -+ } -+ -+ switch (OSAtomicRead(&psDeviceNode->eHealthStatus)) -+ { -+ case PVRSRV_DEVICE_HEALTH_STATUS_OK: -+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_OK; -+ return PVRSRV_OK; -+ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: -+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_NOT_RESPONDING; -+ return PVRSRV_OK; -+ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: -+ case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: -+ case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: -+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_DEVICE_ERROR; -+ return PVRSRV_OK; -+ default: -+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN; -+ return PVRSRV_ERROR_INTERNAL_ERROR; -+ } -+} -+ -+PVRSRV_ERROR PVRSRVGetMultiCoreInfoKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32CapsSize, -+ IMG_UINT32 *pui32NumCores, -+ IMG_UINT64 *pui64Caps) -+{ -+ PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ if (ui32CapsSize > 0) -+ { -+ /* Clear the buffer to ensure no uninitialised data is returned to UM -+ * if the pfn call below does not write to the whole array, or is null. -+ */ -+ memset(pui64Caps, 0x00, (ui32CapsSize * sizeof(IMG_UINT64))); -+ } -+ -+ if (psDeviceNode->pfnGetMultiCoreInfo != NULL) -+ { -+ eError = psDeviceNode->pfnGetMultiCoreInfo(psDeviceNode, ui32CapsSize, pui32NumCores, pui64Caps); -+ } -+ return eError; -+} -+ -+ -+/*! -+ * ***************************************************************************** -+ * @brief A wrapper for removing entries in the g_BridgeDispatchTable array. -+ * All this does is zero the entry to allow for a full table re-population -+ * later. -+ * -+ * @param ui32BridgeGroup -+ * @param ui32Index -+ * -+ * @return -+ ********************************************************************************/ -+void -+UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, IMG_UINT32 ui32Index) -+{ -+ ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]; -+ -+ g_BridgeDispatchTable[ui32Index].pfFunction = NULL; -+ g_BridgeDispatchTable[ui32Index].hBridgeLock = NULL; -+#if defined(DEBUG_BRIDGE_KM) -+ g_BridgeDispatchTable[ui32Index].pszIOCName = NULL; -+ g_BridgeDispatchTable[ui32Index].pszFunctionName = NULL; -+ g_BridgeDispatchTable[ui32Index].pszBridgeLockName = NULL; -+ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0; -+ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0; -+ g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0; -+ g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0; -+#endif -+} -+ -+/*! -+ * ***************************************************************************** -+ * @brief A wrapper for filling in the g_BridgeDispatchTable array that does -+ * error checking. -+ * -+ * @param ui32Index -+ * @param pszIOCName -+ * @param pfFunction -+ * @param pszFunctionName -+ * -+ * @return -+ ********************************************************************************/ -+void -+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, -+ IMG_UINT32 ui32Index, -+ const IMG_CHAR *pszIOCName, -+ BridgeWrapperFunction pfFunction, -+ const IMG_CHAR *pszFunctionName, -+ POS_LOCK hBridgeLock, -+ const IMG_CHAR *pszBridgeLockName) -+{ -+ static IMG_UINT32 ui32PrevIndex = IMG_UINT32_MAX; /* -1 */ -+ -+#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM) -+ PVR_UNREFERENCED_PARAMETER(pszFunctionName); -+ PVR_UNREFERENCED_PARAMETER(pszBridgeLockName); -+#endif -+ -+ ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]; -+ -+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) -+ /* Enable this to dump out the dispatch table entries */ -+ PVR_DPF((PVR_DBG_WARNING, "%s: g_BridgeDispatchTableStartOffsets[%d]=%d", __func__, ui32BridgeGroup, g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC])); -+ PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s %s", __func__, ui32Index, pszIOCName, pszFunctionName, pszBridgeLockName)); -+#endif -+ -+ /* Any gaps are sub-optimal in-terms of memory usage, but we are mainly -+ * interested in spotting any large gap of wasted memory that could be -+ * accidentally introduced. -+ * -+ * This will currently flag up any gaps > 5 entries. -+ * -+ * NOTE: This shouldn't be debug only since switching from debug->release -+ * etc is likely to modify the available ioctls and thus be a point where -+ * mistakes are exposed. This isn't run at a performance critical time. -+ */ -+ if ((ui32PrevIndex != IMG_UINT32_MAX) && -+ ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) || -+ (ui32Index <= ui32PrevIndex))) -+ { -+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)", -+ __func__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName, -+ ui32Index, pszIOCName)); -+#else -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: There is a gap in the dispatch table between indices %u and %u (%s)", -+ __func__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName)); -+#endif -+ } -+ -+ if (ui32Index >= BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Index %u (%s) out of range", -+ __func__, (IMG_UINT)ui32Index, pszIOCName)); -+ -+#if defined(DEBUG_BRIDGE_KM) -+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE_DISPATCH_TABLE_ENTRY_COUNT = %lu", -+ __func__, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)); -+#if defined(SUPPORT_RGX) -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST)); -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_RGX_DISPATCH_LAST)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_RGX_LAST)); -+#endif -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_LAST)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST)); -+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST = %lu", -+ __func__, PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST)); -+#endif -+ -+ OSPanic(); -+ } -+ -+ /* Panic if the previous entry has been overwritten as this is not allowed! -+ * NOTE: This shouldn't be debug only since switching from debug->release -+ * etc is likely to modify the available ioctls and thus be a point where -+ * mistakes are exposed. This isn't run at a performance critical time. -+ */ -+ if (g_BridgeDispatchTable[ui32Index].pfFunction) -+ { -+ if (g_BridgeDispatchTable[ui32Index].pfFunction != pfFunction) -+ { -+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Adding dispatch table entry for %s clobbers an existing entry for %s (current pfn=<%p>, new pfn=<%p>)", -+ __func__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName, -+ (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction)); -+#else -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Adding dispatch table entry for %s clobbers an existing entry (index=%u). (current pfn=<%p>, new pfn=<%p>)", -+ __func__, pszIOCName, ui32Index, -+ (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction)); -+ PVR_DPF((PVR_DBG_WARNING, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.")); -+#endif -+ OSPanic(); -+ } -+ } -+ else -+ { -+ g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction; -+ g_BridgeDispatchTable[ui32Index].hBridgeLock = hBridgeLock; -+#if defined(DEBUG_BRIDGE_KM) -+ g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName; -+ g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName; -+ g_BridgeDispatchTable[ui32Index].pszBridgeLockName = pszBridgeLockName; -+ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0; -+ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0; -+ g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0; -+ g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0; -+#endif -+ } -+ -+ ui32PrevIndex = ui32Index; -+} -+ -+static PVRSRV_ERROR _BridgeBufferAlloc(void *pvPrivData, void **pvOut) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvPrivData); -+ -+ *pvOut = OSAllocZMem(PVRSRV_MAX_BRIDGE_IN_SIZE + -+ PVRSRV_MAX_BRIDGE_OUT_SIZE); -+ PVR_RETURN_IF_NOMEM(*pvOut); -+ -+ return PVRSRV_OK; -+} -+ -+static void _BridgeBufferFree(void *pvPrivData, void *pvFreeData) -+{ -+ PVR_UNREFERENCED_PARAMETER(pvPrivData); -+ -+ OSFreeMem(pvFreeData); -+} -+ -+PVRSRV_ERROR BridgeDispatcherInit(void) -+{ -+ PVRSRV_ERROR eError; -+ -+#if defined(DEBUG_BRIDGE_KM) -+ eError = OSLockCreate(&g_hStatsLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", errorLockCreateFailed); -+#endif -+ -+ eError = PVRSRVPoolCreate(_BridgeBufferAlloc, -+ _BridgeBufferFree, -+ PVRSRV_MAX_POOLED_BRIDGE_BUFFERS, -+ "Bridge buffer pool", -+ NULL, -+ &g_psBridgeBufferPool); -+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPoolCreate", erroPoolCreateFailed); -+ -+ return PVRSRV_OK; -+ -+erroPoolCreateFailed: -+#if defined(DEBUG_BRIDGE_KM) -+ OSLockDestroy(g_hStatsLock); -+ g_hStatsLock = NULL; -+errorLockCreateFailed: -+#endif -+ return eError; -+} -+ -+void BridgeDispatcherDeinit(void) -+{ -+ if (g_psBridgeBufferPool) -+ { -+ PVRSRVPoolDestroy(g_psBridgeBufferPool); -+ g_psBridgeBufferPool = NULL; -+ } -+ -+#if defined(DEBUG_BRIDGE_KM) -+ if (g_hStatsLock) -+ { -+ OSLockDestroy(g_hStatsLock); -+ g_hStatsLock = NULL; -+ } -+#endif -+} -+ -+PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection, -+ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM) -+{ -+ -+ void * psBridgeIn=NULL; -+ void * psBridgeOut=NULL; -+ BridgeWrapperFunction pfBridgeHandler; -+ IMG_UINT32 ui32DispatchTableEntry, ui32GroupBoundary; -+ PVRSRV_ERROR err = PVRSRV_OK; -+#if !defined(INTEGRITY_OS) -+ PVRSRV_POOL_TOKEN hBridgeBufferPoolToken = NULL; -+#endif -+ IMG_UINT32 ui32Timestamp = OSClockus(); -+#if defined(DEBUG_BRIDGE_KM) -+ IMG_UINT64 ui64TimeStart; -+ IMG_UINT64 ui64TimeEnd; -+ IMG_UINT64 ui64TimeDiff; -+#endif -+ IMG_UINT32 ui32DispatchTableIndex, ui32DispatchTableEntryIndex; -+ -+#if defined(DEBUG_BRIDGE_KM_STOP_AT_DISPATCH) -+ PVR_DBG_BREAK; -+#endif -+ -+#if !defined(PVRSRV_ENABLE_HTB) -+ PVR_UNREFERENCED_PARAMETER(ui32Timestamp); -+#endif -+ -+ if (psBridgePackageKM->ui32BridgeID >= BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Out of range dispatch table group ID: %d", -+ __func__, psBridgePackageKM->ui32BridgeID)); -+ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); -+ } -+ -+ ui32DispatchTableIndex = OSConfineArrayIndexNoSpeculation(psBridgePackageKM->ui32BridgeID, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT); -+ -+ ui32DispatchTableEntry = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_FIRST_FUNC]; -+ ui32GroupBoundary = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_LAST_FUNC]; -+ -+ /* bridge function is not implemented in this build */ -+ if (0 == ui32DispatchTableEntry) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)", -+ __func__, -+ ui32DispatchTableEntry, -+ ui32GroupBoundary, -+ psBridgePackageKM->ui32BridgeID, -+ psBridgePackageKM->ui32FunctionID)); -+ /* this points to DummyBW() which returns PVRSRV_ERROR_ENOTTY */ -+ err = g_BridgeDispatchTable[ui32DispatchTableEntry].pfFunction(ui32DispatchTableEntry, -+ psBridgeIn, -+ psBridgeOut, -+ psConnection); -+ goto return_error; -+ } -+ if ((ui32DispatchTableEntry + psBridgePackageKM->ui32FunctionID) > ui32GroupBoundary) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)", -+ __func__, -+ ui32DispatchTableEntry, -+ ui32GroupBoundary, -+ psBridgePackageKM->ui32BridgeID, -+ psBridgePackageKM->ui32FunctionID)); -+ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); -+ } -+ ui32DispatchTableEntry += psBridgePackageKM->ui32FunctionID; -+ ui32DispatchTableEntryIndex = OSConfineArrayIndexNoSpeculation(ui32DispatchTableEntry, ui32GroupBoundary+1); -+ if (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT <= ui32DispatchTableEntry) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, entry count = %lu," -+ " (bridge module %d, function %d)", __func__, -+ ui32DispatchTableEntry, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT, -+ psBridgePackageKM->ui32BridgeID, -+ psBridgePackageKM->ui32FunctionID)); -+ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); -+ } -+ -+#if defined(DEBUG_BRIDGE_KM) -+ BridgeGlobalStatsLock(); -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch table entry index=%d, (bridge module %d, function %d)", -+ __func__, -+ ui32DispatchTableEntryIndex, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID)); -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: %s", -+ __func__, -+ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pszIOCName)); -+ -+ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32CallCount++; -+ g_BridgeGlobalStats.ui32IOCTLCount++; -+ BridgeGlobalStatsUnlock(); -+#endif -+ -+ if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL) -+ { -+ OSLockAcquire(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock); -+ } -+#if !defined(INTEGRITY_OS) -+ /* try to acquire a bridge buffer from the pool */ -+ -+ err = PVRSRVPoolGet(g_psBridgeBufferPool, -+ &hBridgeBufferPoolToken, -+ &psBridgeIn); -+ PVR_LOG_GOTO_IF_ERROR(err, "PVRSRVPoolGet", unlock_and_return_error); -+ -+ psBridgeOut = ((IMG_BYTE *) psBridgeIn) + PVRSRV_MAX_BRIDGE_IN_SIZE; -+#endif -+ -+#if defined(DEBUG_BRIDGE_KM) -+ ui64TimeStart = OSClockns64(); -+#endif -+ -+ if (psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Bridge input buffer too small " -+ "(data size %u, buffer size %u)!", __func__, -+ psBridgePackageKM->ui32InBufferSize, PVRSRV_MAX_BRIDGE_IN_SIZE)); -+ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_ERANGE, unlock_and_return_error); -+ } -+ -+#if !defined(INTEGRITY_OS) -+ if (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Bridge output buffer too small " -+ "(data size %u, buffer size %u)!", __func__, -+ psBridgePackageKM->ui32OutBufferSize, PVRSRV_MAX_BRIDGE_OUT_SIZE)); -+ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_ERANGE, unlock_and_return_error); -+ } -+ -+ if ((CopyFromUserWrapper (psConnection, -+ ui32DispatchTableEntryIndex, -+ psBridgeIn, -+ psBridgePackageKM->pvParamIn, -+ psBridgePackageKM->ui32InBufferSize) != PVRSRV_OK) -+#if defined(__QNXNTO__) -+/* For Neutrino, the output bridge buffer acts as an input as well */ -+ || (CopyFromUserWrapper(psConnection, -+ ui32DispatchTableEntryIndex, -+ psBridgeOut, -+ (void *)((uintptr_t)psBridgePackageKM->pvParamIn + psBridgePackageKM->ui32InBufferSize), -+ psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK) -+#endif -+ ) /* end of if-condition */ -+ { -+ PVR_LOG_GOTO_WITH_ERROR("CopyFromUserWrapper", err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error); -+ } -+#else -+ psBridgeIn = psBridgePackageKM->pvParamIn; -+ psBridgeOut = psBridgePackageKM->pvParamOut; -+#endif -+ -+ pfBridgeHandler = -+ (BridgeWrapperFunction)g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pfFunction; -+ -+ if (pfBridgeHandler == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: ui32DispatchTableEntry = %d is not a registered function!", -+ __func__, ui32DispatchTableEntry)); -+ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error); -+ } -+ -+ /* pfBridgeHandler functions do not fail and return an IMG_INT. -+ * The value returned is either 0 or PVRSRV_OK (0). -+ * In the event this changes an error may be +ve or -ve, -+ * so try to return something consistent here. -+ */ -+ if (0 != pfBridgeHandler(ui32DispatchTableEntryIndex, -+ psBridgeIn, -+ psBridgeOut, -+ psConnection) -+ ) -+ { -+ PVR_LOG_GOTO_WITH_ERROR("pfBridgeHandler", err, PVRSRV_ERROR_BRIDGE_EPERM, unlock_and_return_error); -+ } -+ -+ /* -+ This should always be true as a.t.m. all bridge calls have to -+ return an error message, but this could change so we do this -+ check to be safe. -+ */ -+#if !defined(INTEGRITY_OS) -+ if (psBridgePackageKM->ui32OutBufferSize > 0) -+ { -+ if (CopyToUserWrapper (psConnection, -+ ui32DispatchTableEntryIndex, -+ psBridgePackageKM->pvParamOut, -+ psBridgeOut, -+ psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK) -+ { -+ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error); -+ } -+ } -+#endif -+ -+#if defined(DEBUG_BRIDGE_KM) -+ ui64TimeEnd = OSClockns64(); -+ -+ ui64TimeDiff = ui64TimeEnd - ui64TimeStart; -+ -+ BridgeGlobalStatsLock(); -+ -+ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64TotalTimeNS += ui64TimeDiff; -+ -+ if (ui64TimeDiff > g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS) -+ { -+ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS = ui64TimeDiff; -+ } -+ -+ BridgeGlobalStatsUnlock(); -+#endif -+ -+unlock_and_return_error: -+ -+ if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL) -+ { -+ OSLockRelease(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock); -+ } -+ -+#if !defined(INTEGRITY_OS) -+ if (hBridgeBufferPoolToken != NULL) -+ { -+ err = PVRSRVPoolPut(g_psBridgeBufferPool, -+ hBridgeBufferPoolToken); -+ PVR_LOG_IF_ERROR(err, "PVRSRVPoolPut"); -+ } -+#endif -+ -+return_error: -+ if (err) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: returning (err = %d)", __func__, err)); -+ } -+ /* ignore transport layer bridge to avoid HTB flooding */ -+ if (psBridgePackageKM->ui32BridgeID != PVRSRV_BRIDGE_PVRTL) -+ { -+ if (err) -+ { -+ HTBLOGK(HTB_SF_BRG_BRIDGE_CALL_ERR, ui32Timestamp, -+ psBridgePackageKM->ui32BridgeID, -+ psBridgePackageKM->ui32FunctionID, err); -+ } -+ else -+ { -+ HTBLOGK(HTB_SF_BRG_BRIDGE_CALL, ui32Timestamp, -+ psBridgePackageKM->ui32BridgeID, -+ psBridgePackageKM->ui32FunctionID); -+ } -+ } -+ -+ return err; -+} -+ -+PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT64 *pui64MemStatArray) -+{ -+#if !defined(__QNXNTO__) -+ return PVRSRVFindProcessMemStats(pid, -+ ui32ArrSize, -+ bAllProcessStats, -+ pui64MemStatArray); -+#else -+ PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform")); -+ -+ return PVRSRV_ERROR_NOT_SUPPORTED; -+#endif -+ -+} -+ -+void PVRSRVBlockIfFrozen(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ /* Short-circuit if we're not marked as frozen */ -+ if (OSAtomicRead(&psDevNode->eFrozen) == 0) -+ { -+ return; -+ } -+ -+ while ((psDevNode->eDevState == PVRSRV_DEVICE_STATE_FROZEN) && -+ ((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_TIMEOUT))) -+ { -+ IMG_HANDLE hEvent; -+ -+ eError = OSEventObjectOpen(psDevNode->hDeviceThreadEvObj, &hEvent); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Failed to open event object (%d)", __func__, -+ eError)); -+ -+ /* Continue with the loop by resetting to PVRSRV_OK. This makes it -+ * a busy wait (with a 10ms delay) if we cannot grab an EvObj -+ * reference. -+ */ -+ eError = PVRSRV_OK; -+ OSSleepms(10U); /* Allow failure (OOM etc.) to resolve */ -+ continue; -+ } -+ -+ if (hEvent != NULL) -+ { -+ /* Register that we've got an interest in the device */ -+ (void) OSAtomicIncrement(&psDevNode->iFreezeCount); -+ -+ eError = OSEventObjectWait(hEvent); -+ -+ OSEventObjectClose(hEvent); -+ -+ (void) OSAtomicDecrement(&psDevNode->iFreezeCount); -+ } -+ } -+ -+ /* We expect to exit the above loop only when the device is no longer -+ * FROZEN. If we are still marked as frozen that is an unexpected error -+ * so log it. -+ */ -+ if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_FROZEN) -+ { -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectWait"); -+ if (eError != PVRSRV_OK) -+ { -+ OSDumpStack(); -+ } -+ } -+} -diff --git a/drivers/gpu/drm/img-rogue/srvcore.h b/drivers/gpu/drm/img-rogue/srvcore.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/srvcore.h -@@ -0,0 +1,240 @@ -+/**************************************************************************/ /*! -+@File -+@Title PVR Bridge Functionality -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header for the PVR Bridge code -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef SRVCORE_H -+#define SRVCORE_H -+ -+#include "lock_types.h" -+#include "connection_server.h" -+#include "pvr_debug.h" -+ -+#include "pvr_bridge.h" -+#if defined(SUPPORT_RGX) -+#include "rgx_bridge.h" -+#endif -+ -+PVRSRV_ERROR -+CopyFromUserWrapper(CONNECTION_DATA *psConnection, -+ IMG_UINT32 ui32DispatchTableEntry, -+ void *pvDest, -+ void __user *pvSrc, -+ IMG_UINT32 ui32Size); -+PVRSRV_ERROR -+CopyToUserWrapper(CONNECTION_DATA *psConnection, -+ IMG_UINT32 ui32DispatchTableEntry, -+ void __user *pvDest, -+ void *pvSrc, -+ IMG_UINT32 ui32Size); -+ -+IMG_INT -+DummyBW(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 *psBridgeIn, -+ IMG_UINT8 *psBridgeOut, -+ CONNECTION_DATA *psConnection); -+ -+typedef PVRSRV_ERROR (*ServerResourceDestroyFunction)(IMG_HANDLE, IMG_HANDLE); -+ -+typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32DispatchTableEntry, -+ IMG_UINT8 *psBridgeIn, -+ IMG_UINT8 *psBridgeOut, -+ CONNECTION_DATA *psConnection); -+ -+typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY -+{ -+ BridgeWrapperFunction pfFunction; /*!< The wrapper function that validates the ioctl -+ arguments before calling into srvkm proper */ -+ POS_LOCK hBridgeLock; /*!< The bridge lock which needs to be acquired -+ before calling the above wrapper */ -+#if defined(DEBUG_BRIDGE_KM) -+ const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */ -+ const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */ -+ const IMG_CHAR *pszBridgeLockName; /*!< Name of bridge lock which will be acquired */ -+ IMG_UINT32 ui32CallCount; /*!< The total number of times the ioctl has been called */ -+ IMG_UINT32 ui32CopyFromUserTotalBytes; /*!< The total number of bytes copied from -+ userspace within this ioctl */ -+ IMG_UINT32 ui32CopyToUserTotalBytes; /*!< The total number of bytes copied from -+ userspace within this ioctl */ -+ IMG_UINT64 ui64TotalTimeNS; /*!< The total amount of time spent in this bridge function */ -+ IMG_UINT64 ui64MaxTimeNS; /*!< The maximum amount of time for a single call to this bridge function */ -+#endif -+}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY; -+ -+#if defined(SUPPORT_RGX) -+ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_RGX_DISPATCH_LAST+1) -+ #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT (PVRSRV_BRIDGE_RGX_LAST+1) -+#else -+ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_DISPATCH_LAST+1) -+ #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT (PVRSRV_BRIDGE_LAST+1) -+#endif -+ -+extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT]; -+ -+void BridgeDispatchTableStartOffsetsInit(void); -+ -+void -+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, -+ IMG_UINT32 ui32Index, -+ const IMG_CHAR *pszIOCName, -+ BridgeWrapperFunction pfFunction, -+ const IMG_CHAR *pszFunctionName, -+ POS_LOCK hBridgeLock, -+ const IMG_CHAR* pszBridgeLockName); -+void -+UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, -+ IMG_UINT32 ui32Index); -+ -+ -+/* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */ -+#define SetDispatchTableEntry(ui32BridgeGroup, ui32Index, pfFunction,\ -+ hBridgeLock) \ -+ _SetDispatchTableEntry(ui32BridgeGroup, ui32Index, #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction,\ -+ (POS_LOCK)hBridgeLock, #hBridgeLock) -+ -+#define DISPATCH_TABLE_GAP_THRESHOLD 5 -+ -+ -+#if defined(DEBUG_BRIDGE_KM) -+typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS -+{ -+ IMG_UINT32 ui32IOCTLCount; -+ IMG_UINT32 ui32TotalCopyFromUserBytes; -+ IMG_UINT32 ui32TotalCopyToUserBytes; -+} PVRSRV_BRIDGE_GLOBAL_STATS; -+ -+void BridgeGlobalStatsLock(void); -+void BridgeGlobalStatsUnlock(void); -+ -+/* OS specific code may want to report the stats held here and within the -+ * BRIDGE_DISPATCH_TABLE_ENTRYs (E.g. on Linux we report these via a -+ * debugfs entry /(sys/kernel/debug|proc)/pvr/bridge_stats) */ -+extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats; -+#endif -+ -+PVRSRV_ERROR BridgeDispatcherInit(void); -+void BridgeDispatcherDeinit(void); -+ -+PVRSRV_ERROR -+BridgedDispatchKM(CONNECTION_DATA * psConnection, -+ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM); -+ -+PVRSRV_ERROR -+PVRSRVConnectKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32Flags, -+ IMG_UINT32 ui32ClientBuildOptions, -+ IMG_UINT32 ui32ClientDDKVersion, -+ IMG_UINT32 ui32ClientDDKBuild, -+ IMG_UINT8 *pui8KernelArch, -+ IMG_UINT32 *ui32CapabilityFlags, -+ IMG_UINT64 *ui64PackedBvnc); -+ -+PVRSRV_ERROR -+PVRSRVDisconnectKM(void); -+ -+PVRSRV_ERROR -+PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject); -+ -+PVRSRV_ERROR -+PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject); -+ -+PVRSRV_ERROR -+PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32VerbLevel); -+ -+PVRSRV_ERROR -+PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_PUINT32 pui32RGXClockSpeed); -+ -+PVRSRV_ERROR -+PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDeviceNode, -+ IMG_UINT32 ui32FWAlignChecksSize, -+ IMG_UINT32 aui32FWAlignChecks[]); -+ -+PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 *pui32DeviceStatus); -+ -+PVRSRV_ERROR PVRSRVGetMultiCoreInfoKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ IMG_UINT32 ui32CapsSize, -+ IMG_UINT32 *pui32NumCores, -+ IMG_UINT64 *pui64Caps); -+ -+PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, -+ IMG_UINT32 ui32ArrSize, -+ IMG_BOOL bAllProcessStats, -+ IMG_UINT64 *pui64MemoryStats); -+ -+static INLINE -+PVRSRV_ERROR DestroyServerResource(const SHARED_DEV_CONNECTION hConnection, -+ IMG_HANDLE hEvent, -+ ServerResourceDestroyFunction pfnDestroyCall, -+ IMG_HANDLE hResource) -+{ -+ PVR_UNREFERENCED_PARAMETER(hEvent); -+ -+ return pfnDestroyCall(GetBridgeHandle(hConnection), hResource); -+} -+ -+/*************************************************************************/ /*! -+@Function PVRSRVBlockIfFrozen -+@Description Puts caller into a blocking wait (using event-objects) if the -+ specified device is in a FROZEN state. Routine completes and -+ returns control to the caller once the underlying device state -+ clears its FROZEN state. -+@Input psDeviceNode Device Node reference -+@Return Nothing. Execution blocks if device is frozen. -+*/ /**************************************************************************/ -+void PVRSRVBlockIfFrozen(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+#endif /* SRVCORE_H */ -+ -+/****************************************************************************** -+ End of file (srvcore.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/srvinit.h b/drivers/gpu/drm/img-rogue/srvinit.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/srvinit.h -@@ -0,0 +1,68 @@ -+/*************************************************************************/ /*! -+@File -+@Title Initialisation server internal header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines the connections between the various parts of the -+ initialisation server. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SRVINIT_H -+#define SRVINIT_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "device_connection.h" -+#include "device.h" -+ -+#if defined(SUPPORT_RGX) -+PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode); -+#endif -+ -+#if defined(__cplusplus) -+} -+#endif -+#endif /* SRVINIT_H */ -+ -+/****************************************************************************** -+ End of file (srvinit.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/srvkm.h b/drivers/gpu/drm/img-rogue/srvkm.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/srvkm.h -@@ -0,0 +1,144 @@ -+/**************************************************************************/ /*! -+@File -+@Title Services kernel module internal header file -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef SRVKM_H -+#define SRVKM_H -+ -+#include "servicesext.h" -+ -+#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) -+#define __pvrsrv_defined_struct_enum__ -+#include -+#endif -+ -+struct _PVRSRV_DEVICE_NODE_; -+ -+/*************************************************************************/ /*! -+@Function PVRSRVCommonDriverInit -+@Description Performs one time driver initialisation of Services Common and -+ Device layers. -+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise -+*/ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVCommonDriverInit(void); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVCommonDriverInit -+@Description Performs one time driver de-initialisation of Services. -+@Return void -+*/ /**************************************************************************/ -+void PVRSRVCommonDriverDeInit(void); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVCommonDeviceCreate -+@Description Creates and initialises a common layer Services device node -+ for an OS native device. First stage device discovery. -+@Input pvOSDevice OS native device -+@Input i32KernelDeviceID A unique identifier which helps recognise this -+ Device in the UM space provided by the OS. -+@Output ppsDeviceNode Points to the new device node on success -+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+PVRSRVCommonDeviceCreate(void *pvOSDevice, IMG_INT32 i32KernelDeviceID, -+ struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVCommonDeviceInitialise -+@Description Initialises the device layer specifics (e.g. boot FW etc) -+ for the supplied device node, created previously by -+ PVRSRVCommonDeviceCreate. The device is ready for use when this -+ second stage device initialisation returns successfully. -+@Input psDeviceNode Device node of the device to be initialised -+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise -+*/ /**************************************************************************/ -+PVRSRV_ERROR PVRSRVCommonDeviceInitialise(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); -+ -+/*************************************************************************/ /*! -+@Function PVRSRVCommonDeviceDestroy -+@Description Destroys a PVR Services device node. -+@Input psDeviceNode Device node to destroy -+*/ /**************************************************************************/ -+void -+PVRSRVCommonDeviceDestroy(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); -+ -+/****************** -+HIGHER LEVEL MACROS -+*******************/ -+ -+/*---------------------------------------------------------------------------- -+Repeats the body of the loop for a certain minimum time, or until the body -+exits by its own means (break, return, goto, etc.) -+ -+Example of usage: -+ -+LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) -+{ -+ if (psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset) -+ { -+ bTimeout = IMG_FALSE; -+ break; -+ } -+ -+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); -+} END_LOOP_UNTIL_TIMEOUT(); -+ -+-----------------------------------------------------------------------------*/ -+ -+/* uiNotLastLoop will remain at 1 until the timeout has expired, at which time -+ * it will be decremented and the loop executed one final time. This is -+ * necessary when preemption is enabled. -+ */ -+/* PRQA S 3411,3431 12 */ /* critical format, leave alone */ -+#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \ -+{\ -+ IMG_UINT32 uiOffset, uiStart, uiCurrent; \ -+ IMG_INT32 iNotLastLoop; \ -+ for (uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\ -+ ((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--; \ -+ uiCurrent = OSClockus(), \ -+ uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \ -+ uiStart = uiCurrent < uiStart ? 0 : uiStart) -+ -+#define END_LOOP_UNTIL_TIMEOUT() \ -+} -+ -+#endif /* SRVKM_H */ -diff --git a/drivers/gpu/drm/img-rogue/sync.c b/drivers/gpu/drm/img-rogue/sync.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sync.c -@@ -0,0 +1,824 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services synchronisation interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements client side code for services synchronisation -+ interface -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ /**************************************************************************/ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "client_sync_bridge.h" -+#include "client_synctracking_bridge.h" -+#include "info_page_client.h" -+#include "pvr_bridge.h" -+#include "allocmem.h" -+#include "osfunc.h" -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "pvr_debug.h" -+#include "dllist.h" -+#include "sync.h" -+#include "sync_internal.h" -+#include "lock.h" -+#include "log2.h" -+#if defined(__KERNEL__) -+#include "pvrsrv.h" -+#include "srvcore.h" -+#else -+#include "srvcore_intern.h" -+#endif -+ -+ -+#define SYNC_BLOCK_LIST_CHUNCK_SIZE 10 -+ -+/* -+ This defines the maximum amount of synchronisation memory -+ that can be allocated per SyncPrim context. -+ In reality this number is meaningless as we would run out -+ of synchronisation memory before we reach this limit, but -+ we need to provide a size to the span RA. -+ */ -+#define MAX_SYNC_MEM (4 * 1024 * 1024) -+ -+/* forward declaration */ -+static PVRSRV_ERROR -+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value); -+ -+/* -+ Internal interfaces for management of SYNC_PRIM_CONTEXT -+ */ -+static void -+_SyncPrimContextUnref(SYNC_PRIM_CONTEXT *psContext) -+{ -+ if (!OSAtomicRead(&psContext->hRefCount)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: context already freed", __func__)); -+ } -+ else if (0 == OSAtomicDecrement(&psContext->hRefCount)) -+ { -+ /* SyncPrimContextDestroy only when no longer referenced */ -+ RA_Delete(psContext->psSpanRA); -+ RA_Delete(psContext->psSubAllocRA); -+ OSFreeMem(psContext); -+ } -+} -+ -+static void -+_SyncPrimContextRef(SYNC_PRIM_CONTEXT *psContext) -+{ -+ if (!OSAtomicRead(&psContext->hRefCount)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: context use after free", __func__)); -+ } -+ else -+ { -+ OSAtomicIncrement(&psContext->hRefCount); -+ } -+} -+ -+/* -+ Internal interfaces for management of synchronisation block memory -+ */ -+static PVRSRV_ERROR -+AllocSyncPrimitiveBlock(SYNC_PRIM_CONTEXT *psContext, -+ SYNC_PRIM_BLOCK **ppsSyncBlock) -+{ -+ SYNC_PRIM_BLOCK *psSyncBlk; -+ IMG_HANDLE hSyncPMR; -+ IMG_HANDLE hSyncImportHandle; -+ IMG_DEVMEM_SIZE_T uiImportSize; -+ PVRSRV_ERROR eError; -+ -+ psSyncBlk = OSAllocMem(sizeof(SYNC_PRIM_BLOCK)); -+ PVR_GOTO_IF_NOMEM(psSyncBlk, eError, fail_alloc); -+ -+ psSyncBlk->psContext = psContext; -+ -+ /* Allocate sync prim block */ -+ eError = BridgeAllocSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection), -+ &psSyncBlk->hServerSyncPrimBlock, -+ &psSyncBlk->ui32FirmwareAddr, -+ &psSyncBlk->ui32SyncBlockSize, -+ &hSyncPMR); -+ PVR_GOTO_IF_ERROR(eError, fail_blockalloc); -+ -+ /* Make it mappable by the client */ -+ eError = DevmemMakeLocalImportHandle(psContext->hDevConnection, -+ hSyncPMR, -+ &hSyncImportHandle); -+ PVR_GOTO_IF_ERROR(eError, fail_export); -+ -+ /* Get CPU mapping of the memory block */ -+ eError = DevmemLocalImport(psContext->hDevConnection, -+ hSyncImportHandle, -+ PVRSRV_MEMALLOCFLAG_CPU_READABLE, -+ &psSyncBlk->hMemDesc, -+ &uiImportSize, -+ "SyncPrimitiveBlock"); -+ -+ /* -+ Regardless of success or failure we "undo" the export -+ */ -+ DevmemUnmakeLocalImportHandle(psContext->hDevConnection, -+ hSyncImportHandle); -+ -+ PVR_GOTO_IF_ERROR(eError, fail_import); -+ -+ eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc, -+ (void **) &psSyncBlk->pui32LinAddr); -+ PVR_GOTO_IF_ERROR(eError, fail_cpuvaddr); -+ -+ *ppsSyncBlock = psSyncBlk; -+ return PVRSRV_OK; -+ -+fail_cpuvaddr: -+ DevmemFree(psSyncBlk->hMemDesc); -+fail_import: -+fail_export: -+ BridgeFreeSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection), -+ psSyncBlk->hServerSyncPrimBlock); -+fail_blockalloc: -+ OSFreeMem(psSyncBlk); -+fail_alloc: -+ return eError; -+} -+ -+static void -+FreeSyncPrimitiveBlock(SYNC_PRIM_BLOCK *psSyncBlk) -+{ -+ SYNC_PRIM_CONTEXT *psContext = psSyncBlk->psContext; -+ -+ DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc); -+ DevmemFree(psSyncBlk->hMemDesc); -+ (void) DestroyServerResource(psContext->hDevConnection, -+ NULL, -+ BridgeFreeSyncPrimitiveBlock, -+ psSyncBlk->hServerSyncPrimBlock); -+ OSFreeMem(psSyncBlk); -+} -+ -+static PVRSRV_ERROR -+SyncPrimBlockImport(RA_PERARENA_HANDLE hArena, -+ RA_LENGTH_T uSize, -+ RA_FLAGS_T uFlags, -+ RA_LENGTH_T uBaseAlignment, -+ const IMG_CHAR *pszAnnotation, -+ RA_BASE_T *puiBase, -+ RA_LENGTH_T *puiActualSize, -+ RA_PERISPAN_HANDLE *phImport) -+{ -+ SYNC_PRIM_CONTEXT *psContext = hArena; -+ SYNC_PRIM_BLOCK *psSyncBlock = NULL; -+ RA_LENGTH_T uiSpanSize; -+ PVRSRV_ERROR eError; -+ PVR_UNREFERENCED_PARAMETER(uFlags); -+ PVR_UNREFERENCED_PARAMETER(uBaseAlignment); -+ -+ /* Check we've not been called with an unexpected size */ -+ PVR_LOG_GOTO_IF_INVALID_PARAM(hArena, eError, e0); -+ PVR_LOG_GOTO_IF_INVALID_PARAM(uSize == sizeof(IMG_UINT32), eError, e0); -+ -+ /* -+ Ensure the synprim context doesn't go away while we have sync blocks -+ attached to it -+ */ -+ _SyncPrimContextRef(psContext); -+ -+ /* Allocate the block of memory */ -+ eError = AllocSyncPrimitiveBlock(psContext, &psSyncBlock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "AllocSyncPrimitiveBlock", fail_syncblockalloc); -+ -+ /* Allocate a span for it */ -+ eError = RA_Alloc(psContext->psSpanRA, -+ psSyncBlock->ui32SyncBlockSize, -+ RA_NO_IMPORT_MULTIPLIER, -+ 0, -+ psSyncBlock->ui32SyncBlockSize, -+ pszAnnotation, -+ &psSyncBlock->uiSpanBase, -+ &uiSpanSize, -+ NULL); -+ PVR_GOTO_IF_ERROR(eError, fail_spanalloc); -+ -+ /* -+ There is no reason the span RA should return an allocation larger -+ then we request -+ */ -+ PVR_ASSERT(uiSpanSize == psSyncBlock->ui32SyncBlockSize); -+ -+ *puiBase = psSyncBlock->uiSpanBase; -+ *puiActualSize = psSyncBlock->ui32SyncBlockSize; -+ *phImport = psSyncBlock; -+ return PVRSRV_OK; -+ -+fail_spanalloc: -+ FreeSyncPrimitiveBlock(psSyncBlock); -+fail_syncblockalloc: -+ _SyncPrimContextUnref(psContext); -+e0: -+ return eError; -+} -+ -+static void -+SyncPrimBlockUnimport(RA_PERARENA_HANDLE hArena, -+ RA_BASE_T uiBase, -+ RA_PERISPAN_HANDLE hImport) -+{ -+ SYNC_PRIM_CONTEXT *psContext = hArena; -+ SYNC_PRIM_BLOCK *psSyncBlock = hImport; -+ -+ if (!psContext || !psSyncBlock || uiBase != psSyncBlock->uiSpanBase) -+ { -+ /* Invalid input params */ -+ return; -+ } -+ -+ /* Free the span this import is using */ -+ RA_Free(psContext->psSpanRA, uiBase); -+ -+ /* Free the syncpim block */ -+ FreeSyncPrimitiveBlock(psSyncBlock); -+ -+ /* Drop our reference to the syncprim context */ -+ _SyncPrimContextUnref(psContext); -+} -+ -+static INLINE IMG_UINT32 SyncPrimGetOffset(SYNC_PRIM *psSyncInt) -+{ -+ IMG_UINT64 ui64Temp = psSyncInt->uiSpanAddr - psSyncInt->psSyncBlock->uiSpanBase; -+ -+ PVR_ASSERT(ui64TemppsSyncBlock; -+ -+ psSyncInt->sCommon.pui32LinAddr = psSyncBlock->pui32LinAddr + -+ (SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32)); -+} -+ -+static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt, IMG_BOOL bFreeFirstSyncPrim) -+{ -+ SYNC_PRIM_BLOCK *psSyncBlock; -+ SYNC_PRIM_CONTEXT *psContext; -+ -+ psSyncBlock = psSyncInt->psSyncBlock; -+ psContext = psSyncBlock->psContext; -+ -+#if !defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) -+ PVR_UNREFERENCED_PARAMETER(bFreeFirstSyncPrim); -+#else -+ /* Defer freeing the first allocated sync prim in the sync context */ -+ if (psSyncInt != psContext->hFirstSyncPrim || bFreeFirstSyncPrim) -+#endif -+ { -+ PVRSRV_ERROR eError; -+ SHARED_DEV_CONNECTION hDevConnection = -+ psSyncInt->psSyncBlock->psContext->hDevConnection; -+ -+ if (GetInfoPageDebugFlags(hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) -+ { -+ if (psSyncInt->hRecord) -+ { -+ /* remove this sync record */ -+ eError = DestroyServerResource(hDevConnection, -+ NULL, -+ BridgeSyncRecordRemoveByHandle, -+ psSyncInt->hRecord); -+ PVR_LOG_IF_ERROR(eError, "BridgeSyncRecordRemoveByHandle"); -+ } -+ } -+ else -+ { -+ IMG_UINT32 ui32FWAddr = psSyncBlock->ui32FirmwareAddr + -+ SyncPrimGetOffset(psSyncInt); -+ -+ eError = BridgeSyncFreeEvent(GetBridgeHandle(hDevConnection), ui32FWAddr); -+ PVR_LOG_IF_ERROR(eError, "BridgeSyncFreeEvent"); -+ } -+#if defined(PVRSRV_ENABLE_SYNC_POISONING) -+ (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_POISON_VALUE); -+#else -+ /* reset the sync prim value as it is freed. -+ * this guarantees the client sync allocated to the client will -+ * have a value of zero and the client does not need to -+ * explicitly initialise the sync value to zero. -+ * the allocation of the backing memory for the sync prim block -+ * is done with ZERO_ON_ALLOC so the memory is initially all zero. -+ */ -+ (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_RESET_VALUE); -+#endif -+ -+ RA_Free(psContext->psSubAllocRA, psSyncInt->uiSpanAddr); -+ OSFreeMem(psSyncInt); -+ _SyncPrimContextUnref(psContext); -+ } -+} -+ -+static void SyncPrimLocalUnref(SYNC_PRIM *psSyncInt) -+{ -+ if (!OSAtomicRead(&psSyncInt->hRefCount)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalUnref sync already freed")); -+ } -+ else if (0 == OSAtomicDecrement(&psSyncInt->hRefCount)) -+ { -+ SyncPrimLocalFree(psSyncInt, IMG_FALSE); -+ } -+} -+ -+static IMG_UINT32 SyncPrimGetFirmwareAddrLocal(SYNC_PRIM *psSyncInt) -+{ -+ SYNC_PRIM_BLOCK *psSyncBlock; -+ -+ psSyncBlock = psSyncInt->psSyncBlock; -+ return psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psSyncInt); -+} -+ -+static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align) -+{ -+ PVR_ASSERT(IsPower2(ui32Align)); -+ return ExactLog2(ui32Align); -+} -+ -+/* -+ External interfaces -+ */ -+ -+IMG_INTERNAL PVRSRV_ERROR -+SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection, -+ PSYNC_PRIM_CONTEXT *phSyncPrimContext) -+{ -+ SYNC_PRIM_CONTEXT *psContext; -+ PVRSRV_ERROR eError; -+ -+ psContext = OSAllocMem(sizeof(SYNC_PRIM_CONTEXT)); -+ PVR_GOTO_IF_NOMEM(psContext, eError, fail_alloc); -+ -+ psContext->hDevConnection = hDevConnection; -+ -+ OSSNPrintf(psContext->azName, SYNC_PRIM_NAME_SIZE, "Sync Prim RA-%p", psContext); -+ OSSNPrintf(psContext->azSpanName, SYNC_PRIM_NAME_SIZE, "Sync Prim span RA-%p", psContext); -+ -+ /* -+ Create the RA for sub-allocations of the SynPrim's -+ -+ Note: -+ The import size doesn't matter here as the server will pass -+ back the blocksize when does the import which overrides -+ what we specify here. -+ */ -+ -+ psContext->psSubAllocRA = RA_Create(psContext->azName, -+ /* Params for imports */ -+ _Log2(sizeof(IMG_UINT32)), -+ RA_LOCKCLASS_2, -+ SyncPrimBlockImport, -+ SyncPrimBlockUnimport, -+ psContext, -+ RA_POLICY_DEFAULT); -+ PVR_GOTO_IF_NOMEM(psContext->psSubAllocRA, eError, fail_suballoc); -+ -+ /* -+ Create the span-management RA -+ -+ The RA requires that we work with linear spans. For our use -+ here we don't require this behaviour as we're always working -+ within offsets of blocks (imports). However, we need to keep -+ the RA happy so we create the "span" management RA which -+ ensures that all are imports are added to the RA in a linear -+ fashion -+ */ -+ psContext->psSpanRA = RA_Create(psContext->azSpanName, -+ /* Params for imports */ -+ 0, -+ RA_LOCKCLASS_1, -+ NULL, -+ NULL, -+ NULL, -+ RA_POLICY_DEFAULT); -+ PVR_GOTO_IF_NOMEM(psContext->psSpanRA, eError, fail_span); -+ -+ if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_MEM, 0, NULL)) -+ { -+ RA_Delete(psContext->psSpanRA); -+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, fail_span); -+ } -+ -+#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) -+ psContext->hFirstSyncPrim = NULL; -+#endif -+ -+ OSAtomicWrite(&psContext->hRefCount, 1); -+ -+ *phSyncPrimContext = psContext; -+ return PVRSRV_OK; -+fail_span: -+ RA_Delete(psContext->psSubAllocRA); -+fail_suballoc: -+ OSFreeMem(psContext); -+fail_alloc: -+ return eError; -+} -+ -+IMG_INTERNAL void SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext) -+{ -+ SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext; -+ -+#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) -+ /* Free the first sync prim that was allocated as part of this context */ -+ if (psContext->hFirstSyncPrim) -+ { -+ SyncPrimLocalFree((SYNC_PRIM *)psContext->hFirstSyncPrim, IMG_TRUE); -+ psContext->hFirstSyncPrim = NULL; -+ } -+#endif -+ -+ if (1 != OSAtomicRead(&psContext->hRefCount)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s attempted with active references, may be the result of a race", __func__)); -+ } -+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) -+#if defined(__KERNEL__) -+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Forcing context destruction due to bad driver state", __func__)); -+ OSAtomicWrite(&psContext->hRefCount, 1); -+ } -+#endif -+#endif -+ _SyncPrimContextUnref(psContext); -+} -+ -+static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, -+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync, -+ const IMG_CHAR *pszClassName, -+ IMG_BOOL bServerSync) -+{ -+ SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext; -+ SYNC_PRIM_BLOCK *psSyncBlock; -+ SYNC_PRIM *psNewSync; -+ PVRSRV_ERROR eError; -+ RA_BASE_T uiSpanAddr; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(hSyncPrimContext, "hSyncPrimeContext"); -+ -+ psNewSync = OSAllocMem(sizeof(SYNC_PRIM)); -+ PVR_GOTO_IF_NOMEM(psNewSync, eError, fail_alloc); -+ -+ eError = RA_Alloc(psContext->psSubAllocRA, -+ sizeof(IMG_UINT32), -+ RA_NO_IMPORT_MULTIPLIER, -+ 0, -+ sizeof(IMG_UINT32), -+ "Sync_Prim", -+ &uiSpanAddr, -+ NULL, -+ (RA_PERISPAN_HANDLE *) &psSyncBlock); -+ PVR_GOTO_IF_ERROR(eError, fail_raalloc); -+ -+ OSAtomicWrite(&psNewSync->hRefCount, 1); -+ psNewSync->uiSpanAddr = uiSpanAddr; -+ psNewSync->psSyncBlock = psSyncBlock; -+ SyncPrimGetCPULinAddr(psNewSync); -+ *ppsSync = &psNewSync->sCommon; -+ _SyncPrimContextRef(psContext); -+#if defined(PVRSRV_ENABLE_SYNC_POISONING) -+ (void) _SyncPrimSetValue(psNewSync, LOCAL_SYNC_PRIM_RESET_VALUE); -+#endif -+ -+#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) -+ /* If this is the first sync prim allocated in the context, keep a handle to it */ -+ if (psSyncBlock->uiSpanBase == 0 && psNewSync->uiSpanAddr == 0) -+ { -+ psContext->hFirstSyncPrim = psNewSync; -+ } -+#endif -+ -+ if (GetInfoPageDebugFlags(psSyncBlock->psContext->hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) -+ { -+ IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH]; -+ size_t uiSize; -+ -+ if (pszClassName) -+ { -+ uiSize = OSStringNLength(pszClassName, PVRSRV_SYNC_NAME_LENGTH); -+ /* Copy the class name annotation into a fixed-size array */ -+ OSCachedMemCopy(szClassName, pszClassName, uiSize); -+ if (uiSize == PVRSRV_SYNC_NAME_LENGTH) -+ szClassName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; -+ else -+ szClassName[uiSize++] = '\0'; -+ } -+ else -+ { -+ /* No class name annotation */ -+ uiSize = 0; -+ szClassName[0] = '\0'; -+ } -+ -+ /* record this sync */ -+ eError = BridgeSyncRecordAdd( -+ GetBridgeHandle(psSyncBlock->psContext->hDevConnection), -+ &psNewSync->hRecord, -+ psSyncBlock->hServerSyncPrimBlock, -+ psSyncBlock->ui32FirmwareAddr, -+ SyncPrimGetOffset(psNewSync), -+ bServerSync, -+ uiSize, -+ szClassName); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to add SyncRecord \"%s\" (%s)", -+ __func__, -+ szClassName, -+ PVRSRVGETERRORSTRING(eError))); -+ psNewSync->hRecord = NULL; -+ } -+ } -+ else -+ { -+ size_t uiSize; -+ -+ uiSize = OSStringNLength(pszClassName, PVRSRV_SYNC_NAME_LENGTH); -+ -+ if (uiSize < PVRSRV_SYNC_NAME_LENGTH) -+ uiSize++; -+ /* uiSize now reflects size used for pszClassName + NUL byte */ -+ -+ eError = BridgeSyncAllocEvent(GetBridgeHandle(hSyncPrimContext->hDevConnection), -+ bServerSync, -+ psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psNewSync), -+ uiSize, -+ pszClassName); -+ PVR_LOG_IF_ERROR(eError, "BridgeSyncAllocEvent"); -+ } -+ -+ return PVRSRV_OK; -+ -+fail_raalloc: -+ OSFreeMem(psNewSync); -+fail_alloc: -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, -+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync, -+ const IMG_CHAR *pszClassName) -+{ -+ return _SyncPrimAlloc(hSyncPrimContext, -+ ppsSync, -+ pszClassName, -+ IMG_FALSE); -+} -+ -+static PVRSRV_ERROR -+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value) -+{ -+ PVRSRV_ERROR eError; -+ -+ SYNC_PRIM_BLOCK *psSyncBlock; -+ SYNC_PRIM_CONTEXT *psContext; -+ -+ psSyncBlock = psSyncInt->psSyncBlock; -+ psContext = psSyncBlock->psContext; -+ -+ eError = BridgeSyncPrimSet(GetBridgeHandle(psContext->hDevConnection), -+ psSyncBlock->hServerSyncPrimBlock, -+ SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32), -+ ui32Value); -+ -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ SYNC_PRIM *psSyncInt; -+ -+ PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); -+ -+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); -+ -+ SyncPrimLocalUnref(psSyncInt); -+ -+ -+err_out: -+ return eError; -+} -+ -+#if defined(NO_HARDWARE) -+IMG_INTERNAL PVRSRV_ERROR -+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ SYNC_PRIM *psSyncInt; -+ -+ PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); -+ -+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); -+ -+ eError = _SyncPrimSetValue(psSyncInt, ui32Value); -+ -+err_out: -+ return eError; -+} -+#endif -+ -+IMG_INTERNAL PVRSRV_ERROR -+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ SYNC_PRIM *psSyncInt; -+ -+ PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); -+ -+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); -+ -+ eError = _SyncPrimSetValue(psSyncInt, ui32Value); -+ -+#if defined(PDUMP) -+ SyncPrimPDump(psSync); -+#endif -+err_out: -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync, -+ IMG_HANDLE *phBlock, -+ IMG_UINT32 *pui32Offset) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ SYNC_PRIM *psSyncInt; -+ -+ PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); -+ PVR_LOG_GOTO_IF_INVALID_PARAM(phBlock, eError, err_out); -+ PVR_LOG_GOTO_IF_INVALID_PARAM(pui32Offset, eError, err_out); -+ -+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); -+ -+ *phBlock = psSyncInt->psSyncBlock->hServerSyncPrimBlock; -+ *pui32Offset = psSyncInt->uiSpanAddr - psSyncInt->psSyncBlock->uiSpanBase; -+ -+err_out: -+ return eError; -+} -+ -+IMG_INTERNAL PVRSRV_ERROR -+SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ SYNC_PRIM *psSyncInt; -+ -+ *pui32FwAddr = 0; -+ PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); -+ -+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); -+ *pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt); -+ -+err_out: -+ return eError; -+} -+ -+#if defined(PDUMP) -+IMG_INTERNAL void SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync) -+{ -+ SYNC_PRIM *psSyncInt; -+ SYNC_PRIM_BLOCK *psSyncBlock; -+ SYNC_PRIM_CONTEXT *psContext; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psSync != NULL); -+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); -+ -+ psSyncBlock = psSyncInt->psSyncBlock; -+ psContext = psSyncBlock->psContext; -+ -+ eError = BridgeSyncPrimPDump(GetBridgeHandle(psContext->hDevConnection), -+ psSyncBlock->hServerSyncPrimBlock, -+ SyncPrimGetOffset(psSyncInt)); -+ PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDump"); -+ PVR_ASSERT(eError == PVRSRV_OK); -+} -+ -+IMG_INTERNAL void SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) -+{ -+ SYNC_PRIM *psSyncInt; -+ SYNC_PRIM_BLOCK *psSyncBlock; -+ SYNC_PRIM_CONTEXT *psContext; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psSync != NULL); -+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); -+ -+ psSyncBlock = psSyncInt->psSyncBlock; -+ psContext = psSyncBlock->psContext; -+ -+ eError = BridgeSyncPrimPDumpValue(GetBridgeHandle(psContext->hDevConnection), -+ psSyncBlock->hServerSyncPrimBlock, -+ SyncPrimGetOffset(psSyncInt), -+ ui32Value); -+ PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpValue"); -+ PVR_ASSERT(eError == PVRSRV_OK); -+} -+ -+IMG_INTERNAL void SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ SYNC_PRIM *psSyncInt; -+ SYNC_PRIM_BLOCK *psSyncBlock; -+ SYNC_PRIM_CONTEXT *psContext; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psSync != NULL); -+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); -+ -+ psSyncBlock = psSyncInt->psSyncBlock; -+ psContext = psSyncBlock->psContext; -+ -+ eError = BridgeSyncPrimPDumpPol(GetBridgeHandle(psContext->hDevConnection), -+ psSyncBlock->hServerSyncPrimBlock, -+ SyncPrimGetOffset(psSyncInt), -+ ui32Value, -+ ui32Mask, -+ eOperator, -+ ui32PDumpFlags); -+ PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpPol"); -+ PVR_ASSERT(eError == PVRSRV_OK); -+} -+ -+IMG_INTERNAL void SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, -+ IMG_UINT64 uiWriteOffset, -+ IMG_UINT64 uiPacketSize, -+ IMG_UINT64 uiBufferSize) -+{ -+ SYNC_PRIM *psSyncInt; -+ SYNC_PRIM_BLOCK *psSyncBlock; -+ SYNC_PRIM_CONTEXT *psContext; -+ PVRSRV_ERROR eError; -+ -+ PVR_ASSERT(psSync != NULL); -+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); -+ -+ psSyncBlock = psSyncInt->psSyncBlock; -+ psContext = psSyncBlock->psContext; -+ -+#if defined(__linux__) && defined(__i386__) -+ PVR_ASSERT(uiWriteOffsethDevConnection), -+ psSyncBlock->hServerSyncPrimBlock, -+ SyncPrimGetOffset(psSyncInt), -+ TRUNCATE_64BITS_TO_32BITS(uiWriteOffset), -+ TRUNCATE_64BITS_TO_32BITS(uiPacketSize), -+ TRUNCATE_64BITS_TO_32BITS(uiBufferSize)); -+ PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpCBP"); -+ PVR_ASSERT(eError == PVRSRV_OK); -+} -+ -+#endif -diff --git a/drivers/gpu/drm/img-rogue/sync.h b/drivers/gpu/drm/img-rogue/sync.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sync.h -@@ -0,0 +1,292 @@ -+/*************************************************************************/ /*! -+@File -+@Title Synchronisation interface header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines the client side interface for synchronisation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SYNC_H -+#define SYNC_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "sync_prim_internal.h" -+#include "pdumpdefs.h" -+#include "dllist.h" -+#include "pvr_debug.h" -+ -+#include "device_connection.h" -+ -+#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) -+#define __pvrsrv_defined_struct_enum__ -+#include -+#endif -+ -+/*************************************************************************/ /*! -+@Function SyncPrimContextCreate -+ -+@Description Create a new synchronisation context -+ -+@Input hBridge Bridge handle -+ -+@Input hDeviceNode Device node handle -+ -+@Output hSyncPrimContext Handle to the created synchronisation -+ primitive context -+ -+@Return PVRSRV_OK if the synchronisation primitive context was -+ successfully created -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection, -+ PSYNC_PRIM_CONTEXT *hSyncPrimContext); -+ -+/*************************************************************************/ /*! -+@Function SyncPrimContextDestroy -+ -+@Description Destroy a synchronisation context -+ -+@Input hSyncPrimContext Handle to the synchronisation -+ primitive context to destroy -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext); -+ -+/*************************************************************************/ /*! -+@Function SyncPrimAlloc -+ -+@Description Allocate a new synchronisation primitive on the specified -+ synchronisation context -+ -+@Input hSyncPrimContext Handle to the synchronisation -+ primitive context -+ -+@Output ppsSync Created synchronisation primitive -+ -+@Input pszClassName Sync source annotation -+ -+@Return PVRSRV_OK if the synchronisation primitive was -+ successfully created -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, -+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync, -+ const IMG_CHAR *pszClassName); -+ -+/*************************************************************************/ /*! -+@Function SyncPrimFree -+ -+@Description Free a synchronisation primitive -+ -+@Input psSync The synchronisation primitive to free -+ -+@Return PVRSRV_OK if the synchronisation primitive was -+ successfully freed -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync); -+ -+/*************************************************************************/ /*! -+@Function SyncPrimSet -+ -+@Description Set the synchronisation primitive to a value -+ -+@Input psSync The synchronisation primitive to set -+ -+@Input ui32Value Value to set it to -+ -+@Return PVRSRV_OK on success -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value); -+ -+#if defined(NO_HARDWARE) -+ -+/*************************************************************************/ /*! -+@Function SyncPrimNoHwUpdate -+ -+@Description Updates the synchronisation primitive value (in NoHardware drivers) -+ -+@Input psSync The synchronisation primitive to update -+ -+@Input ui32Value Value to update it to -+ -+@Return PVRSRV_OK on success -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value); -+#endif -+ -+#if defined(PDUMP) -+/*************************************************************************/ /*! -+@Function SyncPrimPDump -+ -+@Description PDump the current value of the synchronisation primitive -+ -+@Input psSync The synchronisation primitive to PDump -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync); -+ -+/*************************************************************************/ /*! -+@Function SyncPrimPDumpValue -+ -+@Description PDump the ui32Value as the value of the synchronisation -+ primitive (regardless of the current value). -+ -+@Input psSync The synchronisation primitive to PDump -+@Input ui32Value Value to give to the sync prim on the pdump -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value); -+ -+/*************************************************************************/ /*! -+@Function SyncPrimPDumpPol -+ -+@Description Do a PDump poll of the synchronisation primitive -+ -+@Input psSync The synchronisation primitive to PDump -+ -+@Input ui32Value Value to poll for -+ -+@Input ui32Mask PDump mask operator -+ -+@Input ui32PDumpFlags PDump flags -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ IMG_UINT32 ui32PDumpFlags); -+ -+/*************************************************************************/ /*! -+@Function SyncPrimPDumpCBP -+ -+@Description Do a PDump CB poll using the synchronisation primitive -+ -+@Input psSync The synchronisation primitive to PDump -+ -+@Input uiWriteOffset Current write offset of buffer -+ -+@Input uiPacketSize Size of the packet to write into CB -+ -+@Input uiBufferSize Size of the CB -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, -+ IMG_UINT64 uiWriteOffset, -+ IMG_UINT64 uiPacketSize, -+ IMG_UINT64 uiBufferSize); -+ -+#else -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(SyncPrimPDumpValue) -+#endif -+static INLINE void -+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) -+{ -+ PVR_UNREFERENCED_PARAMETER(psSync); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(SyncPrimPDump) -+#endif -+static INLINE void -+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync) -+{ -+ PVR_UNREFERENCED_PARAMETER(psSync); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(SyncPrimPDumpPol) -+#endif -+static INLINE void -+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, -+ IMG_UINT32 ui32Value, -+ IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ IMG_UINT32 ui32PDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psSync); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ PVR_UNREFERENCED_PARAMETER(ui32Mask); -+ PVR_UNREFERENCED_PARAMETER(eOperator); -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(SyncPrimPDumpCBP) -+#endif -+static INLINE void -+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, -+ IMG_UINT64 uiWriteOffset, -+ IMG_UINT64 uiPacketSize, -+ IMG_UINT64 uiBufferSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(psSync); -+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); -+ PVR_UNREFERENCED_PARAMETER(uiPacketSize); -+ PVR_UNREFERENCED_PARAMETER(uiBufferSize); -+} -+#endif /* PDUMP */ -+#endif /* SYNC_H */ -diff --git a/drivers/gpu/drm/img-rogue/sync_checkpoint.c b/drivers/gpu/drm/img-rogue/sync_checkpoint.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sync_checkpoint.c -@@ -0,0 +1,3238 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services synchronisation checkpoint interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Server side code for services synchronisation interface -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ /**************************************************************************/ -+ -+#include "img_defs.h" -+#include "img_types.h" -+#include "allocmem.h" -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "pvr_debug.h" -+#include "pvr_notifier.h" -+#include "osfunc.h" -+#include "dllist.h" -+#include "sync.h" -+#include "sync_checkpoint_external.h" -+#include "sync_checkpoint.h" -+#include "sync_checkpoint_internal.h" -+#include "sync_checkpoint_init.h" -+#include "lock.h" -+#include "log2.h" -+#include "pvrsrv.h" -+#include "pdump_km.h" -+#include "info_page.h" -+#include "os_apphint.h" -+#include "rgxfwutils.h" -+ -+#include "pvrsrv_sync_km.h" -+#include "rgxhwperf.h" -+ -+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) -+#include "rgxsoctimer.h" -+#endif -+ -+#if defined(PVRSRV_NEED_PVR_DPF) -+ -+/* Enable this to turn on debug relating to the creation and -+ resolution of contexts */ -+#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0 -+ -+/* Enable this to turn on debug relating to the creation and -+ resolution of fences */ -+#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0 -+ -+/* Enable this to turn on debug relating to the sync checkpoint -+ allocation and freeing */ -+#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0 -+ -+/* Enable this to turn on debug relating to the sync checkpoint -+ enqueuing and signalling */ -+#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0 -+ -+/* Enable this to turn on debug relating to the sync checkpoint pool */ -+#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0 -+ -+/* Enable this to turn on debug relating to sync checkpoint UFO -+ lookup */ -+#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0 -+ -+/* Enable this to turn on sync checkpoint deferred cleanup debug -+ * (for syncs we have been told to free but which have some -+ * outstanding FW operations remaining (enqueued in CCBs) -+ */ -+#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0 -+ -+#else -+ -+#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0 -+#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0 -+#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0 -+#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0 -+#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0 -+#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0 -+#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0 -+ -+#endif -+ -+/* Maximum number of deferred sync checkpoint signal/error received for atomic context */ -+#define SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL 500 -+ -+/* Set the size of the sync checkpoint pool (not used if 0). -+ * A pool will be maintained for each sync checkpoint context. -+ * SYNC_CHECKPOINT_POOL_LIMIT must be a power of 2 (POT), -+ * as the pool wrap mask is calculated using it. -+ */ -+#define SYNC_CHECKPOINT_POOL_LIMIT 1024 -+ -+/* The 'sediment' value represents the minimum number of -+ * sync checkpoints which must be in the pool before one -+ * will be allocated from the pool rather than from memory. -+ * This effectively helps avoid re-use of a sync checkpoint -+ * just after it has been returned to the pool, making -+ * debugging somewhat easier to understand. -+ */ -+#if defined(PDUMP) -+#define SYNC_CHECKPOINT_POOL_SEDIMENT 20 -+#else -+#define SYNC_CHECKPOINT_POOL_SEDIMENT 0 -+#endif -+ -+#if (SYNC_CHECKPOINT_POOL_LIMIT & (SYNC_CHECKPOINT_POOL_LIMIT - 1)) != 0 -+#error "SYNC_CHECKPOINT_POOL_LIMIT must be power of 2." -+#endif -+ -+/* -+ This defines the maximum amount of synchronisation memory -+ that can be allocated per sync checkpoint context. -+ In reality this number is meaningless as we would run out -+ of synchronisation memory before we reach this limit, but -+ we need to provide a size to the span RA. -+ */ -+#define MAX_SYNC_CHECKPOINT_MEM (4 * 1024 * 1024) -+ -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+/* Flags used to indicate state of pool */ -+#define SYNC_CHECKPOINT_POOL_FULL (1) -+#define SYNC_CHECKPOINT_POOL_VALID (1 << 7) -+#define CHECKPOINT_POOL_FULL(ctxctl) \ -+ ctxctl->ui8PoolStateFlags & SYNC_CHECKPOINT_POOL_FULL -+#define CHECKPOINT_POOL_VALID(ctxctl) \ -+ ctxctl->ui8PoolStateFlags & SYNC_CHECKPOINT_POOL_VALID -+#define SET_CHECKPOINT_POOL_FULL(ctxctl) \ -+ ctxctl->ui8PoolStateFlags |= SYNC_CHECKPOINT_POOL_FULL -+#define SET_CHECKPOINT_POOL_VALID(ctxctl) \ -+ ctxctl->ui8PoolStateFlags |= SYNC_CHECKPOINT_POOL_VALID -+#define CLEAR_CHECKPOINT_POOL_FULL(ctxctl) \ -+ ctxctl->ui8PoolStateFlags &= ~SYNC_CHECKPOINT_POOL_FULL -+#define CLEAR_CHECKPOINT_POOL_VALID(ctxctl) \ -+ ctxctl->ui8PoolStateFlags &= ~SYNC_CHECKPOINT_POOL_VALID -+#endif -+struct _SYNC_CHECKPOINT_CONTEXT_CTL_ -+{ -+ SHARED_DEV_CONNECTION psDeviceNode; -+ /* -+ * Used as head of linked-list of sync checkpoints for which -+ * SyncCheckpointFree() has been called, but have outstanding -+ * FW operations (enqueued in CCBs) -+ * This list will be check whenever a SyncCheckpointFree() is -+ * called, and when SyncCheckpointContextDestroy() is called. -+ */ -+ DLLIST_NODE sDeferredCleanupListHead; -+ /* Lock to protect the deferred cleanup list */ -+ POS_SPINLOCK hDeferredCleanupListLock; -+ -+ /* Counters to provide stats for number of checkpoints used at any one time */ -+ IMG_UINT32 ui32CurrentInUseSyncCheckpoints; -+ IMG_UINT32 ui32MaxInUseSyncCheckpoints; -+ /* Lock to protect the checkpoint stats */ -+ POS_SPINLOCK hSyncCheckpointStatsLock; -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+ IMG_UINT32 ui32SyncCheckpointPoolSize; /*! Allocated size of the pool */ -+ IMG_UINT32 ui32SyncCheckpointPoolCount; /*! Number of checkpoints currently in pool */ -+ IMG_UINT32 ui32SyncCheckpointPoolWp; /*! Pool write pointer */ -+ IMG_UINT32 ui32SyncCheckpointPoolRp; /*! Pool read pointer */ -+ POS_SPINLOCK hSyncCheckpointPoolLock; /*! Lock to protect access to pool control data */ -+ IMG_UINT8 ui8PoolStateFlags; /*! Flags to indicate state of pool */ -+ /*! Array of SYNC_CHECKPOINTs. Must be last member in structure */ -+ SYNC_CHECKPOINT *apsSyncCheckpointPool[1]; /*! The allocated checkpoint pool */ -+#endif -+}; /*_SYNC_CHECKPOINT_CONTEXT_CTL is already typedef-ed in sync_checkpoint_internal.h */ -+ -+struct SYNC_CHECKPOINT_CONTEXT_TAG -+{ -+#if defined(PDUMP) -+ DLLIST_NODE sSyncCheckpointBlockListHead; /*!< List head for the sync chkpt blocks in this context*/ -+ DLLIST_NODE sListNode; /*!< List node for the sync chkpt context list*/ -+ POS_LOCK hSyncCheckpointBlockListLock; /*!< sync chkpt blocks list lock*/ -+#endif -+ RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */ -+ RA_ARENA *psSubAllocRA; /*!< RA context */ -+ _PSYNC_CHECKPOINT_CONTEXT_CTL psContextCtl; -+ ATOMIC_T hCheckpointCount; /*!< Checkpoint count for this context */ -+ ATOMIC_T hRefCount; /*!< Ref count for this context */ -+}; /*_SYNC_CHECKPOINT_CONTEXT is already typedef-ed in sync_checkpoint_internal.h */ -+ -+/* this is the max number of sync checkpoint records we will search or dump -+ * at any time. -+ */ -+#define SYNC_CHECKPOINT_RECORD_LIMIT 20000 -+ -+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1)) -+ -+struct SYNC_CHECKPOINT_RECORD -+{ -+ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< handle to SYNC_CHECKPOINT_BLOCK */ -+ IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */ -+ IMG_UINT32 ui32FwBlockAddr; -+ IMG_PID uiPID; -+ IMG_UINT32 ui32UID; -+ IMG_UINT64 ui64OSTime; -+ DLLIST_NODE sNode; -+ IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH]; -+ PSYNC_CHECKPOINT pSyncCheckpt; -+}; -+ -+static PFN_SYNC_CHECKPOINT_STRUCT *g_psSyncCheckpointPfnStruct = NULL; -+ -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+static SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext); -+static IMG_BOOL _PutCheckpointInPool(SYNC_CHECKPOINT *psSyncCheckpoint); -+static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext); -+#endif -+ -+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) -+static IMG_UINT32 gui32NumSyncCheckpointContexts = 0; -+#endif -+ -+/* Defined values to indicate status of sync checkpoint, which is -+ * stored in the memory of the structure */ -+#define SYNC_CHECKPOINT_PATTERN_IN_USE 0x1a1aa -+#define SYNC_CHECKPOINT_PATTERN_IN_POOL 0x2b2bb -+#define SYNC_CHECKPOINT_PATTERN_FREED 0x3c3cc -+ -+#if defined(SUPPORT_RGX) -+static inline void RGXSRVHWPerfSyncCheckpointUFOIsSignalled(PVRSRV_RGXDEV_INFO *psDevInfo, -+ SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags) -+{ -+ if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO) -+ && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) -+ { -+ RGX_HWPERF_UFO_EV eEv; -+ RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; -+ -+ if (psSyncCheckpointInt) -+ { -+ IMG_UINT32 ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; -+ -+ if ((ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) || -+ (ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED)) -+ { -+ sSyncData.sCheckSuccess.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); -+ sSyncData.sCheckSuccess.ui32Value = ui32State; -+ eEv = RGX_HWPERF_UFO_EV_CHECK_SUCCESS; -+ } -+ else -+ { -+ sSyncData.sCheckFail.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); -+ sSyncData.sCheckFail.ui32Value = ui32State; -+ sSyncData.sCheckFail.ui32Required = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; -+ eEv = RGX_HWPERF_UFO_EV_CHECK_FAIL; -+ } -+ RGXHWPerfHostPostUfoEvent(psDevInfo, eEv, &sSyncData, -+ (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE); -+ } -+ } -+} -+ -+static inline void RGXSRVHWPerfSyncCheckpointUFOUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, -+ SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags) -+{ -+ if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO) -+ && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) -+ { -+ RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; -+ -+ if (psSyncCheckpointInt) -+ { -+ sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); -+ sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; -+ sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; -+ RGXHWPerfHostPostUfoEvent(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData, -+ (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE); -+ } -+ } -+} -+#endif -+ -+static PVRSRV_ERROR -+_SyncCheckpointRecordAdd(PSYNC_CHECKPOINT_RECORD_HANDLE *phRecord, -+ SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock, -+ IMG_UINT32 ui32FwBlockAddr, -+ IMG_UINT32 ui32SyncOffset, -+ IMG_UINT32 ui32UID, -+ IMG_UINT32 ui32ClassNameSize, -+ const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt); -+static PVRSRV_ERROR -+_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord); -+static void _SyncCheckpointState(PDLLIST_NODE psNode, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode); -+static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode); -+ -+#if defined(PDUMP) -+static void -+MISRHandler_PdumpDeferredSyncSignalPoster(void *pvData); -+static PVRSRV_ERROR _SyncCheckpointAllocPDump(PVRSRV_DEVICE_NODE *psDevNode, SYNC_CHECKPOINT *psSyncCheckpoint); -+static PVRSRV_ERROR _SyncCheckpointUpdatePDump(PPVRSRV_DEVICE_NODE psDevNode, SYNC_CHECKPOINT *psSyncCheckpoint, IMG_UINT32 ui32Status, IMG_UINT32 ui32FenceSyncFlags); -+static PVRSRV_ERROR _SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent); -+#endif -+ -+/* Unique incremental ID assigned to sync checkpoints when allocated */ -+static IMG_UINT32 g_SyncCheckpointUID; -+ -+static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext); -+ -+void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext) -+{ -+ _SYNC_CHECKPOINT_CONTEXT *psContextInt = (_SYNC_CHECKPOINT_CONTEXT *) psContext; -+ _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContextInt->psContextCtl; -+ IMG_UINT32 ui32RefCt = OSAtomicRead(&psContextInt->hRefCount); -+ -+ if (ui32RefCt == 0) -+ { -+ PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT, -+ "SyncCheckpointContextUnref context already freed"); -+ } -+ else if (OSAtomicDecrement(&psContextInt->hRefCount) == 0) -+ { -+#if defined(PDUMP) -+ PPVRSRV_DEVICE_NODE psDeviceNode = psCtxCtl->psDeviceNode; -+#endif -+ -+ /* SyncCheckpointContextDestroy only when no longer referenced */ -+ OSSpinLockDestroy(psCtxCtl->hDeferredCleanupListLock); -+ psCtxCtl->hDeferredCleanupListLock = NULL; -+ -+ OSSpinLockDestroy(psCtxCtl->hSyncCheckpointStatsLock); -+ psCtxCtl->hSyncCheckpointStatsLock = NULL; -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+ if (psCtxCtl->ui32SyncCheckpointPoolCount) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s called for context<%p> with %d sync checkpoints still" -+ " in the pool", -+ __func__, -+ (void *) psContext, -+ psCtxCtl->ui32SyncCheckpointPoolCount)); -+ } -+ CLEAR_CHECKPOINT_POOL_FULL(psCtxCtl); -+ OSSpinLockDestroy(psCtxCtl->hSyncCheckpointPoolLock); -+ psCtxCtl->hSyncCheckpointPoolLock = NULL; -+#endif -+ OSFreeMem(psContextInt->psContextCtl); -+ RA_Delete(psContextInt->psSpanRA); -+ RA_Delete(psContextInt->psSubAllocRA); -+ -+#if defined(PDUMP) -+ PVR_ASSERT(dllist_is_empty(&psContext->sSyncCheckpointBlockListHead)); -+ -+ OSLockAcquire(psDeviceNode->hSyncCheckpointContextListLock); -+ dllist_remove_node(&psContext->sListNode); -+ OSLockRelease(psDeviceNode->hSyncCheckpointContextListLock); -+ -+ OSLockDestroy(psContext->hSyncCheckpointBlockListLock); -+#endif -+ -+ OSFreeMem(psContext); -+ } -+} -+ -+void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext) -+{ -+ _SYNC_CHECKPOINT_CONTEXT *psContextInt = (_SYNC_CHECKPOINT_CONTEXT *)psContext; -+ IMG_UINT32 ui32RefCt = OSAtomicRead(&psContextInt->hRefCount); -+ -+ if (ui32RefCt == 0) -+ { -+ PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT, -+ "SyncCheckpointContextRef context use after free"); -+ } -+ else -+ { -+ OSAtomicIncrement(&psContextInt->hRefCount); -+ } -+} -+ -+/* -+ Internal interfaces for management of synchronisation block memory -+ */ -+static PVRSRV_ERROR -+_AllocSyncCheckpointBlock(_SYNC_CHECKPOINT_CONTEXT *psContext, -+ SYNC_CHECKPOINT_BLOCK **ppsSyncBlock) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ SYNC_CHECKPOINT_BLOCK *psSyncBlk; -+ PVRSRV_ERROR eError; -+ -+ psSyncBlk = OSAllocMem(sizeof(*psSyncBlk)); -+ PVR_LOG_GOTO_IF_NOMEM(psSyncBlk, eError, fail_alloc); -+ -+ psSyncBlk->psContext = psContext; -+ -+ /* Allocate sync checkpoint block */ -+ psDevNode = psContext->psContextCtl->psDeviceNode; -+ PVR_LOG_GOTO_IF_INVALID_PARAM(psDevNode, eError, fail_alloc_ufo_block); -+ -+ eError = psDevNode->pfnAllocUFOBlock(psDevNode, -+ &psSyncBlk->hMemDesc, -+ &psSyncBlk->ui32FirmwareAddr, -+ &psSyncBlk->ui32SyncBlockSize); -+ PVR_LOG_GOTO_IF_ERROR(eError, "pfnAllocUFOBlock", fail_alloc_ufo_block); -+ -+ eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc, -+ (void **) &psSyncBlk->pui32LinAddr); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail_devmem_acquire); -+ -+ OSAtomicWrite(&psSyncBlk->hRefCount, 1); -+ -+ OSLockCreate(&psSyncBlk->hLock); -+ -+ PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, -+ "Allocated Sync Checkpoint UFO block (FirmwareVAddr = 0x%08x)", -+ psSyncBlk->ui32FirmwareAddr); -+#if defined(PDUMP) -+ OSLockAcquire(psContext->hSyncCheckpointBlockListLock); -+ dllist_add_to_tail(&psContext->sSyncCheckpointBlockListHead, &psSyncBlk->sListNode); -+ OSLockRelease(psContext->hSyncCheckpointBlockListLock); -+#endif -+ -+ *ppsSyncBlock = psSyncBlk; -+ return PVRSRV_OK; -+ -+fail_devmem_acquire: -+ psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc); -+fail_alloc_ufo_block: -+ OSFreeMem(psSyncBlk); -+fail_alloc: -+ return eError; -+} -+ -+static void -+_FreeSyncCheckpointBlock(SYNC_CHECKPOINT_BLOCK *psSyncBlk) -+{ -+ OSLockAcquire(psSyncBlk->hLock); -+ if (0 == OSAtomicDecrement(&psSyncBlk->hRefCount)) -+ { -+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncBlk->psContext; -+ PVRSRV_DEVICE_NODE *psDevNode = psContext->psContextCtl->psDeviceNode; -+ -+#if defined(PDUMP) -+ OSLockAcquire(psSyncBlk->psContext->hSyncCheckpointBlockListLock); -+ dllist_remove_node(&psSyncBlk->sListNode); -+ OSLockRelease(psSyncBlk->psContext->hSyncCheckpointBlockListLock); -+#endif -+ DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc); -+ psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc); -+ OSLockRelease(psSyncBlk->hLock); -+ OSLockDestroy(psSyncBlk->hLock); -+ psSyncBlk->hLock = NULL; -+ OSFreeMem(psSyncBlk); -+ } -+ else -+ { -+ OSLockRelease(psSyncBlk->hLock); -+ } -+} -+ -+static PVRSRV_ERROR -+_SyncCheckpointBlockImport(RA_PERARENA_HANDLE hArena, -+ RA_LENGTH_T uSize, -+ RA_FLAGS_T uFlags, -+ RA_LENGTH_T uBaseAlignment, -+ const IMG_CHAR *pszAnnotation, -+ RA_BASE_T *puiBase, -+ RA_LENGTH_T *puiActualSize, -+ RA_PERISPAN_HANDLE *phImport) -+{ -+ _SYNC_CHECKPOINT_CONTEXT *psContext = hArena; -+ SYNC_CHECKPOINT_BLOCK *psSyncBlock = NULL; -+ RA_LENGTH_T uiSpanSize; -+ PVRSRV_ERROR eError; -+ PVR_UNREFERENCED_PARAMETER(uFlags); -+ PVR_UNREFERENCED_PARAMETER(uBaseAlignment); -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM((hArena != NULL), "hArena"); -+ -+ /* Check we've not be called with an unexpected size */ -+ PVR_LOG_RETURN_IF_INVALID_PARAM((uSize == sizeof(SYNC_CHECKPOINT_FW_OBJ)), "uSize"); -+ -+ /* -+ Ensure the sync checkpoint context doesn't go away while we have -+ sync blocks attached to it. -+ */ -+ SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext); -+ -+ /* Allocate the block of memory */ -+ eError = _AllocSyncCheckpointBlock(psContext, &psSyncBlock); -+ PVR_GOTO_IF_ERROR(eError, fail_syncblockalloc); -+ -+ /* Allocate a span for it */ -+ eError = RA_Alloc(psContext->psSpanRA, -+ psSyncBlock->ui32SyncBlockSize, -+ RA_NO_IMPORT_MULTIPLIER, -+ 0, -+ psSyncBlock->ui32SyncBlockSize, -+ pszAnnotation, -+ &psSyncBlock->uiSpanBase, -+ &uiSpanSize, -+ NULL); -+ PVR_GOTO_IF_ERROR(eError, fail_spanalloc); -+ -+ /* -+ There is no reason the span RA should return an allocation larger -+ then we request -+ */ -+ PVR_LOG_IF_FALSE((uiSpanSize == psSyncBlock->ui32SyncBlockSize), -+ "uiSpanSize invalid"); -+ -+ *puiBase = psSyncBlock->uiSpanBase; -+ *puiActualSize = psSyncBlock->ui32SyncBlockSize; -+ *phImport = psSyncBlock; -+ return PVRSRV_OK; -+ -+fail_spanalloc: -+ _FreeSyncCheckpointBlock(psSyncBlock); -+fail_syncblockalloc: -+ SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext); -+ -+ return eError; -+} -+ -+static void -+_SyncCheckpointBlockUnimport(RA_PERARENA_HANDLE hArena, -+ RA_BASE_T uiBase, -+ RA_PERISPAN_HANDLE hImport) -+{ -+ _SYNC_CHECKPOINT_CONTEXT *psContext = hArena; -+ SYNC_CHECKPOINT_BLOCK *psSyncBlock = hImport; -+ -+ PVR_LOG_RETURN_VOID_IF_FALSE((psContext != NULL), "hArena invalid"); -+ PVR_LOG_RETURN_VOID_IF_FALSE((psSyncBlock != NULL), "hImport invalid"); -+ PVR_LOG_RETURN_VOID_IF_FALSE((uiBase == psSyncBlock->uiSpanBase), "uiBase invalid"); -+ -+ /* Free the span this import is using */ -+ RA_Free(psContext->psSpanRA, uiBase); -+ -+ /* Free the sync checkpoint block */ -+ _FreeSyncCheckpointBlock(psSyncBlock); -+ -+ /* Drop our reference to the sync checkpoint context */ -+ SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext); -+} -+ -+static INLINE IMG_UINT32 _SyncCheckpointGetOffset(SYNC_CHECKPOINT *psSyncInt) -+{ -+ IMG_UINT64 ui64Temp; -+ -+ ui64Temp = (IMG_UINT64)psSyncInt->uiAllocatedAddr - -+ (IMG_UINT64)psSyncInt->psSyncCheckpointBlock->uiSpanBase; -+ PVR_ASSERT(ui64TemppfnFenceResolve)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", -+ __func__)); -+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; -+ PVR_LOG_ERROR(eError, "g_pfnFenceResolve is NULL"); -+ return eError; -+ } -+ -+ if (papsSyncCheckpoints) -+ { -+ eError = g_psSyncCheckpointPfnStruct->pfnFenceResolve( -+ psSyncCheckpointContext, -+ hFence, -+ pui32NumSyncCheckpoints, -+ papsSyncCheckpoints, -+ pui64FenceUID); -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PVR_LOG_RETURN_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceResolve"); -+ -+#if defined(PDUMP) -+ if (*papsSyncCheckpoints) -+ { -+ for (i = 0; i < *pui32NumSyncCheckpoints; i++) -+ { -+ psSyncCheckpoint = (SYNC_CHECKPOINT *)(*papsSyncCheckpoints)[i]; -+ psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags; -+ } -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+#endif -+ -+ if (*pui32NumSyncCheckpoints > MAX_SYNC_CHECKPOINTS_PER_FENCE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: g_psSyncCheckpointPfnStruct->pfnFenceResolve() returned too many checkpoints (%u > MAX_SYNC_CHECKPOINTS_PER_FENCE=%u)", -+ __func__, *pui32NumSyncCheckpoints, MAX_SYNC_CHECKPOINTS_PER_FENCE)); -+ -+ /* Free resources after error */ -+ if (*papsSyncCheckpoints) -+ { -+ for (i = 0; i < *pui32NumSyncCheckpoints; i++) -+ { -+ SyncCheckpointDropRef((*papsSyncCheckpoints)[i]); -+ } -+ -+ SyncCheckpointFreeCheckpointListMem(*papsSyncCheckpoints); -+ } -+ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) -+ { -+ IMG_UINT32 ii; -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: g_psSyncCheckpointPfnStruct->pfnFenceResolve() for fence %d returned the following %d checkpoints:", -+ __func__, -+ hFence, -+ *pui32NumSyncCheckpoints)); -+ -+ for (ii=0; ii<*pui32NumSyncCheckpoints; ii++) -+ { -+ PSYNC_CHECKPOINT psNextCheckpoint = *(*papsSyncCheckpoints + ii); -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: *papsSyncCheckpoints[%d]:<%p>", -+ __func__, -+ ii, -+ (void*)psNextCheckpoint)); -+ } -+ } -+#endif -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+SyncCheckpointCreateFence(PVRSRV_DEVICE_NODE *psDevNode, -+ const IMG_CHAR *pszFenceName, -+ PVRSRV_TIMELINE hTimeline, -+ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, -+ PVRSRV_FENCE *phNewFence, -+ IMG_UINT64 *puiUpdateFenceUID, -+ void **ppvFenceFinaliseData, -+ PSYNC_CHECKPOINT *psNewSyncCheckpoint, -+ void **ppvTimelineUpdateSyncPrim, -+ IMG_UINT32 *pui32TimelineUpdateValue, -+ PDUMP_FLAGS_T ui32PDumpFlags) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_UNREFERENCED_PARAMETER(psDevNode); -+ -+ if (unlikely(!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceCreate)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", -+ __func__)); -+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; -+ PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceCreate is NULL"); -+ } -+ else -+ { -+ eError = g_psSyncCheckpointPfnStruct->pfnFenceCreate( -+ psDevNode, -+ pszFenceName, -+ hTimeline, -+ psSyncCheckpointContext, -+ phNewFence, -+ puiUpdateFenceUID, -+ ppvFenceFinaliseData, -+ psNewSyncCheckpoint, -+ ppvTimelineUpdateSyncPrim, -+ pui32TimelineUpdateValue); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s failed to create new fence<%p> for timeline<%d> using " -+ "sync checkpoint context<%p>, psNewSyncCheckpoint=<%p>, eError=%s", -+ __func__, -+ (void*)phNewFence, -+ hTimeline, -+ (void*)psSyncCheckpointContext, -+ (void*)psNewSyncCheckpoint, -+ PVRSRVGetErrorString(eError))); -+ } -+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s created new fence<%d> for timeline<%d> using " -+ "sync checkpoint context<%p>, new sync_checkpoint=<%p>", -+ __func__, -+ *phNewFence, -+ hTimeline, -+ (void*)psSyncCheckpointContext, -+ (void*)*psNewSyncCheckpoint)); -+ } -+#endif -+ -+#if defined(PDUMP) -+ if (eError == PVRSRV_OK) -+ { -+ SYNC_CHECKPOINT *psSyncCheckpoint = (SYNC_CHECKPOINT*)(*psNewSyncCheckpoint); -+ if (psSyncCheckpoint) -+ { -+ psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags; -+ } -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); -+#endif -+ } -+ return eError; -+} -+ -+PVRSRV_ERROR -+SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceDataRollback) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", -+ __func__)); -+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; -+ PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceDataRollback is NULL"); -+ } -+ else -+ { -+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: called to rollback fence data <%p>", -+ __func__, -+ pvFinaliseData)); -+#endif -+ eError = g_psSyncCheckpointPfnStruct->pfnFenceDataRollback( -+ hFence, pvFinaliseData); -+ PVR_LOG_IF_ERROR(eError, -+ "g_psSyncCheckpointPfnStruct->pfnFenceDataRollback returned error"); -+ } -+ return eError; -+} -+ -+PVRSRV_ERROR -+SyncCheckpointFinaliseFence(PPVRSRV_DEVICE_NODE psDevNode, -+ PVRSRV_FENCE hFence, -+ void *pvFinaliseData, -+ PSYNC_CHECKPOINT psSyncCheckpoint, -+ const IMG_CHAR *pszName) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceFinalise) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: Warning (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) (this is permitted)", -+ __func__)); -+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; -+ } -+ else -+ { -+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: called to finalise fence <%d>", -+ __func__, -+ hFence)); -+#endif -+ eError = g_psSyncCheckpointPfnStruct->pfnFenceFinalise(hFence, pvFinaliseData); -+ PVR_LOG_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceFinalise returned error"); -+ -+ RGXSRV_HWPERF_ALLOC_FENCE(psDevNode, OSGetCurrentClientProcessIDKM(), hFence, -+ SyncCheckpointGetFirmwareAddr(psSyncCheckpoint), -+ pszName, OSStringLength(pszName)); -+ } -+ return eError; -+} -+ -+void -+SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem) -+{ -+ if (g_psSyncCheckpointPfnStruct->pfnFreeCheckpointListMem) -+ { -+ g_psSyncCheckpointPfnStruct->pfnFreeCheckpointListMem(pvCheckpointListMem); -+ } -+} -+ -+PVRSRV_ERROR -+SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", -+ __func__)); -+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; -+ PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines is NULL"); -+ } -+ else -+ { -+ g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines(pvPrivateData); -+ } -+ return eError; -+ -+} -+ -+PVRSRV_ERROR -+SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs, IMG_UINT32 *pui32Vaddrs, IMG_UINT32 *pui32NumSyncOwnedUFOs) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_LOG_RETURN_IF_FALSE((pui32NumSyncOwnedUFOs != NULL), "pui32NumSyncOwnedUFOs invalid", PVRSRV_ERROR_INVALID_PARAMS); -+ -+ if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs) -+ { -+ *pui32NumSyncOwnedUFOs = 0; -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", -+ __func__)); -+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; -+ PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs is NULL"); -+ } -+ else -+ { -+ *pui32NumSyncOwnedUFOs = g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs(ui32NumUFOs, pui32Vaddrs); -+ PVR_LOG(("%d sync checkpoint%s owned by %s in stalled context", -+ *pui32NumSyncOwnedUFOs, *pui32NumSyncOwnedUFOs==1 ? "" : "s", -+ g_psSyncCheckpointPfnStruct->pszImplName)); -+ } -+ return eError; -+} -+ -+static PVRSRV_ERROR -+_AllocSyncCheckpoint(_SYNC_CHECKPOINT_CONTEXT *psContext, -+ SYNC_CHECKPOINT **ppsSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL; -+ PVRSRV_ERROR eError; -+ -+ /* Allocate sync checkpoint */ -+ psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint)); -+ PVR_LOG_RETURN_IF_NOMEM(psNewSyncCheckpoint, "OSAllocMem"); /* Sets OOM error code */ -+ -+ eError = RA_Alloc(psContext->psSubAllocRA, -+ sizeof(*psNewSyncCheckpoint->psSyncCheckpointFwObj), -+ RA_NO_IMPORT_MULTIPLIER, -+ 0, -+ sizeof(IMG_UINT32), -+ NULL, -+ &psNewSyncCheckpoint->uiAllocatedAddr, -+ NULL, -+ (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock); -+ -+ PVR_LOG_GOTO_IF_ERROR(eError, "RA_Alloc", fail_ra_alloc); -+ -+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s CALLED RA_Alloc(), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", -+ __func__, -+ (void*)psContext->psSubAllocRA, -+ psNewSyncCheckpoint->uiAllocatedAddr)); -+#endif -+ psNewSyncCheckpoint->psSyncCheckpointFwObj = -+ (volatile SYNC_CHECKPOINT_FW_OBJ*)(void *)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr + -+ (_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32))); -+ psNewSyncCheckpoint->ui32FWAddr = psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + -+ _SyncCheckpointGetOffset(psNewSyncCheckpoint) + 1; -+ OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount); -+ -+#if defined(DEBUG) -+ psNewSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE; -+#endif -+ psNewSyncCheckpoint->sListNode.psPrevNode = NULL; -+ psNewSyncCheckpoint->sListNode.psNextNode = NULL; -+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s called to allocate new sync checkpoint<%p> for context<%p>", -+ __func__, (void*)psNewSyncCheckpoint, (void*)psContext)); -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s psSyncCheckpointFwObj<%p>", -+ __func__, (void*)psNewSyncCheckpoint->psSyncCheckpointFwObj)); -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s psSyncCheckpoint FwAddr=0x%x", -+ __func__, SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint))); -+#endif -+ *ppsSyncCheckpoint = psNewSyncCheckpoint; -+ return PVRSRV_OK; -+ -+fail_ra_alloc: -+ OSFreeMem(psNewSyncCheckpoint); -+ return eError; -+} -+ -+/* Poisons and frees the checkpoint -+ * Decrements context refcount. */ -+static void _FreeSyncCheckpoint(SYNC_CHECKPOINT *psSyncCheckpoint) -+{ -+ _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext; -+ -+ psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = 0; -+ psSyncCheckpoint->psSyncCheckpointFwObj = NULL; -+#if defined(DEBUG) -+ psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED; -+#endif -+ -+ RA_Free(psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA, -+ psSyncCheckpoint->uiAllocatedAddr); -+ psSyncCheckpoint->psSyncCheckpointBlock = NULL; -+ -+ OSFreeMem(psSyncCheckpoint); -+ -+ OSAtomicDecrement(&psContext->hCheckpointCount); -+} -+ -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+static PVRSRV_ERROR -+_PrepopulateSyncCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext, -+ IMG_UINT32 ui32InitPoolSize) -+{ -+ IMG_UINT32 ui32SyncCheckpoint; -+ SYNC_CHECKPOINT *psNewSyncCheckpoint; -+ PVRSRV_ERROR eError; -+ -+ /* Allocate sync checkpoints and place in the pool */ -+ for (ui32SyncCheckpoint=0; ui32SyncCheckpointpsContextCtl->ui32SyncCheckpointPoolCount, -+ psContext->psContextCtl->ui32SyncCheckpointPoolSize)); -+#endif -+ return PVRSRV_OK; -+} -+#endif /* if (SYNC_CHECKPOINT_POOL_LIMIT > 0) */ -+ -+PVRSRV_ERROR -+SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, -+ PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext) -+{ -+ _SYNC_CHECKPOINT_CONTEXT *psContext = NULL; -+ _SYNC_CHECKPOINT_CONTEXT_CTL *psContextCtl = NULL; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_CHAR azTempName[PVRSRV_SYNC_NAME_LENGTH] = {0}; -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+ void *pvAppHintState = NULL; -+ const IMG_UINT32 ui32DefaultMaxPoolLog2Size = 8; -+#if defined(PDUMP) -+ /* Pdumps start with an empty pool to avoid extra work allocating checkpoints which might not be used. */ -+ const IMG_UINT32 ui32DefaultInitPoolLog2Size = 0; -+#else -+ const IMG_UINT32 ui32DefaultInitPoolLog2Size = 7; -+#endif -+ IMG_UINT32 ui32MaxPoolLog2Size; -+ IMG_UINT32 ui32InitPoolLog2Size; -+ IMG_UINT32 ui32InitPoolSize = 0; -+#endif -+ IMG_UINT32 ui32MaxPoolSize = 0; -+ -+ PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpointContext != NULL), -+ "ppsSyncCheckpointContext invalid", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+ /* Read AppHints to determine the size of the sync checkpoint pool, if specified */ -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, SyncCheckpointPoolMaxLog2, -+ &ui32DefaultMaxPoolLog2Size, &ui32MaxPoolLog2Size); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, SyncCheckpointPoolInitLog2, -+ &ui32DefaultInitPoolLog2Size, &ui32InitPoolLog2Size); -+ OSFreeAppHintState(pvAppHintState); -+ -+ if (ui32MaxPoolLog2Size > 0) -+ { -+ ui32MaxPoolSize = 1 << ui32MaxPoolLog2Size; -+ if (ui32MaxPoolSize > SYNC_CHECKPOINT_POOL_LIMIT) -+ { -+ ui32MaxPoolSize = SYNC_CHECKPOINT_POOL_LIMIT; -+ } -+ } -+#endif -+ -+ psContext = OSAllocMem(sizeof(*psContext)); -+ PVR_LOG_GOTO_IF_NOMEM(psContext, eError, fail_alloc); /* Sets OOM error code */ -+ -+ /* psContextCtl includes allocation for the sync checkpoint pool) */ -+ psContextCtl = OSAllocMem(sizeof(*psContextCtl) + (sizeof(SYNC_CHECKPOINT*) * ui32MaxPoolSize)); -+ PVR_LOG_GOTO_IF_NOMEM(psContextCtl, eError, fail_alloc2); /* Sets OOM error code */ -+ -+ eError = OSSpinLockCreate(&psContextCtl->hDeferredCleanupListLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:1", fail_create_deferred_cleanup_lock); -+ -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+ eError = OSSpinLockCreate(&psContextCtl->hSyncCheckpointPoolLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:2", fail_create_pool_lock); -+#endif -+ -+ dllist_init(&psContextCtl->sDeferredCleanupListHead); -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+ psContextCtl->ui32SyncCheckpointPoolCount = 0; -+ psContextCtl->ui32SyncCheckpointPoolWp = 0; -+ psContextCtl->ui32SyncCheckpointPoolRp = 0; -+ psContextCtl->ui8PoolStateFlags = SYNC_CHECKPOINT_POOL_VALID; -+#endif -+ psContextCtl->psDeviceNode = (SHARED_DEV_CONNECTION)psDevNode; -+ -+ /* -+ Create the RA for sub-allocations of the sync checkpoints -+ -+ Note: -+ The import size doesn't matter here as the server will pass -+ back the blocksize when it does the import which overrides -+ what we specify here. -+ */ -+ OSSNPrintf(azTempName, PVRSRV_SYNC_NAME_LENGTH,"Sync Prim RA-%p", psContext); -+ psContext->psSubAllocRA = RA_Create(azTempName, -+ /* Params for imports */ -+ _Log2(sizeof(IMG_UINT32)), -+ RA_LOCKCLASS_2, -+ _SyncCheckpointBlockImport, -+ _SyncCheckpointBlockUnimport, -+ psContext, -+ RA_POLICY_DEFAULT); -+ PVR_LOG_GOTO_IF_NOMEM(psContext->psSubAllocRA, eError, fail_suballoc); -+ -+ /* -+ Create the span-management RA -+ -+ The RA requires that we work with linear spans. For our use -+ here we don't require this behaviour as we're always working -+ within offsets of blocks (imports). However, we need to keep -+ the RA happy so we create the "span" management RA which -+ ensures that all are imports are added to the RA in a linear -+ fashion -+ */ -+ OSSNPrintf(azTempName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim span RA-%p", psContext); -+ psContext->psSpanRA = RA_Create(azTempName, -+ /* Params for imports */ -+ 0, -+ RA_LOCKCLASS_1, -+ NULL, -+ NULL, -+ NULL, -+ RA_POLICY_DEFAULT); -+ PVR_LOG_GOTO_IF_NOMEM(psContext->psSpanRA, eError, fail_span); -+ -+ if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_CHECKPOINT_MEM, 0, NULL)) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Add(span) failed"); -+ goto fail_span_add; -+ } -+ -+ OSAtomicWrite(&psContext->hRefCount, 1); -+ OSAtomicWrite(&psContext->hCheckpointCount, 0); -+ -+ psContext->psContextCtl = psContextCtl; -+ -+ *ppsSyncCheckpointContext = (PSYNC_CHECKPOINT_CONTEXT)psContext; -+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: created psSyncCheckpointContext=<%p> (%d contexts exist)", -+ __func__, -+ (void*)*ppsSyncCheckpointContext, -+ ++gui32NumSyncCheckpointContexts)); -+#endif -+ -+#if defined(PDUMP) -+ dllist_init(&psContext->sSyncCheckpointBlockListHead); -+ -+ eError = OSLockCreate(&psContext->hSyncCheckpointBlockListLock); -+ PVR_GOTO_IF_ERROR(eError, fail_span_add); -+ -+ OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); -+ dllist_add_to_tail(&psDevNode->sSyncCheckpointContextListHead, &psContext->sListNode); -+ OSLockRelease(psDevNode->hSyncCheckpointContextListLock); -+#endif -+ -+ psContextCtl->ui32CurrentInUseSyncCheckpoints = 0; -+ psContextCtl->ui32MaxInUseSyncCheckpoints = 0; -+ eError = OSSpinLockCreate(&psContextCtl->hSyncCheckpointStatsLock); -+ PVR_GOTO_IF_ERROR(eError, fail_span_stat); -+ -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+ /* Pre-populate the sync checkpoint pool, if specified */ -+ psContextCtl->ui32SyncCheckpointPoolSize = ui32MaxPoolSize; -+ -+ /* Ensure ui32MaxPoolSize is a POT and does not exceed SYNC_CHECKPOINT_POOL_LIMIT, -+ * and ui32InitPoolSize does not exceed ui32MaxPoolSize. -+ */ -+ if (psContextCtl->ui32SyncCheckpointPoolSize > SYNC_CHECKPOINT_POOL_LIMIT) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: AppHint SyncCheckpointPoolMaxLog2(%d) would exceed " -+ "SYNC_CHECKPOINT_POOL_LIMIT(%d) - limiting to %d", -+ __func__, ui32MaxPoolLog2Size, -+ SYNC_CHECKPOINT_POOL_LIMIT, SYNC_CHECKPOINT_POOL_LIMIT)); -+ psContextCtl->ui32SyncCheckpointPoolSize = SYNC_CHECKPOINT_POOL_LIMIT; -+ } -+ -+ if (ui32InitPoolLog2Size > 0) -+ { -+ ui32InitPoolSize = 1 << ui32InitPoolLog2Size; -+ } -+ if (ui32InitPoolSize > psContextCtl->ui32SyncCheckpointPoolSize) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s: AppHint SyncCheckpointPoolInitLog2(%d) would exceed " -+ "ui32SyncCheckpointPoolSize(%d) - limiting to %d", -+ __func__, ui32InitPoolLog2Size, -+ psContextCtl->ui32SyncCheckpointPoolSize, -+ psContextCtl->ui32SyncCheckpointPoolSize)); -+ ui32InitPoolSize = psContextCtl->ui32SyncCheckpointPoolSize; -+ } -+ -+ if (ui32InitPoolSize > 0) -+ { -+ eError = _PrepopulateSyncCheckpointPool(psContext, ui32InitPoolSize); -+ PVR_LOG_RETURN_IF_ERROR_VA(eError, "_PrepopulateSyncCheckpointPool(%d)", ui32InitPoolSize); -+ } -+#endif -+ -+ return PVRSRV_OK; -+fail_span_stat: -+#if defined(PDUMP) -+ OSLockDestroy(psContext->hSyncCheckpointBlockListLock); -+ psContext->hSyncCheckpointBlockListLock = NULL; -+#endif -+fail_span_add: -+ RA_Delete(psContext->psSpanRA); -+fail_span: -+ RA_Delete(psContext->psSubAllocRA); -+fail_suballoc: -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+ OSSpinLockDestroy(psContextCtl->hSyncCheckpointPoolLock); -+ psContextCtl->hSyncCheckpointPoolLock = NULL; -+fail_create_pool_lock: -+#endif -+ OSSpinLockDestroy(psContextCtl->hDeferredCleanupListLock); -+ psContextCtl->hDeferredCleanupListLock = NULL; -+fail_create_deferred_cleanup_lock: -+ OSFreeMem(psContextCtl); -+fail_alloc2: -+ OSFreeMem(psContext); -+fail_alloc: -+ return eError; -+} -+ -+PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointContext; -+ PVRSRV_DEVICE_NODE *psDevNode; -+ IMG_INT iRf = 0; -+ -+ PVR_LOG_RETURN_IF_FALSE((psSyncCheckpointContext != NULL), -+ "psSyncCheckpointContext invalid", -+ PVRSRV_ERROR_INVALID_PARAMS); -+ -+ psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psContextCtl->psDeviceNode; -+ -+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s: destroying psSyncCheckpointContext=<%p> (now have %d contexts)", -+ __func__, -+ (void*)psSyncCheckpointContext, -+ --gui32NumSyncCheckpointContexts)); -+#endif -+ -+ _CheckDeferredCleanupList(psContext); -+ -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+ if (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) -+ { -+ IMG_UINT32 ui32NumFreedFromPool = _CleanCheckpointPool(psContext); -+ -+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s freed %d sync checkpoints that were still in the pool for context<%p>", -+ __func__, -+ ui32NumFreedFromPool, -+ (void*)psContext)); -+#else -+ PVR_UNREFERENCED_PARAMETER(ui32NumFreedFromPool); -+#endif -+ } -+#endif -+ -+ iRf = OSAtomicRead(&psContext->hCheckpointCount); -+ -+ if (iRf != 0) -+ { -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ /* Note, this is not a permanent error as the caller may retry later */ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s <%p> attempted with active references (iRf=%d), " -+ "may be the result of a race", -+ __func__, -+ (void*)psContext, -+ iRf)); -+ -+ eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT; -+ -+ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); -+ { -+ DLLIST_NODE *psNode, *psNext; -+ -+ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) -+ { -+ SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode); -+ bool bDeferredFree = dllist_node_is_in_list(&psSyncCheckpoint->sDeferredFreeListNode); -+#if defined(DEBUG) -+ IMG_UINT32 ui32State = psSyncCheckpoint->psSyncCheckpointFwObj->ui32State; -+#endif -+ -+ /* Line below avoids build error in release builds (where PVR_DPF is not defined) */ -+ PVR_UNREFERENCED_PARAMETER(bDeferredFree); -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s syncCheckpoint<%p> ID=%d, %s, refs=%d, state=%s, fwaddr=%#08x, enqCount:%d, FWCount:%d %s", -+ __func__, -+ (void*)psSyncCheckpoint, -+ psSyncCheckpoint->ui32UID, -+ psSyncCheckpoint->azName, -+ OSAtomicRead(&psSyncCheckpoint->hRefCount), -+ ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED ? -+ "PVRSRV_SYNC_CHECKPOINT_SIGNALLED" : -+ ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE ? -+ "PVRSRV_SYNC_CHECKPOINT_ACTIVE" : "PVRSRV_SYNC_CHECKPOINT_ERRORED", -+ psSyncCheckpoint->ui32FWAddr, -+ OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), -+ psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount, -+ bDeferredFree ? "(deferred free)" : "")); -+ -+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) -+ gui32NumSyncCheckpointContexts++; -+#endif -+ } -+ } -+ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); -+ } -+ else -+ { -+ SyncCheckpointContextUnref(psSyncCheckpointContext); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, -+ PVRSRV_TIMELINE hTimeline, -+ PVRSRV_FENCE hFence, -+ const IMG_CHAR *pszCheckpointName, -+ PSYNC_CHECKPOINT *ppsSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL; -+ _SYNC_CHECKPOINT_CONTEXT *psSyncContextInt = (_SYNC_CHECKPOINT_CONTEXT*)psSyncContext; -+ PVRSRV_DEVICE_NODE *psDevNode; -+ OS_SPINLOCK_FLAGS uiFlags; -+ PVRSRV_ERROR eError; -+ -+ PVR_LOG_RETURN_IF_FALSE((psSyncContext != NULL), "psSyncContext invalid", PVRSRV_ERROR_INVALID_PARAMS); -+ PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpoint != NULL), "ppsSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS); -+ -+ psDevNode = (PVRSRV_DEVICE_NODE *)psSyncContextInt->psContextCtl->psDeviceNode; -+ -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) -+ PVR_DPF((PVR_DBG_WARNING, "%s Entry, Getting checkpoint from pool", -+ __func__)); -+#endif -+ psNewSyncCheckpoint = _GetCheckpointFromPool(psSyncContextInt); -+ if (!psNewSyncCheckpoint) -+ { -+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s checkpoint pool empty - will have to allocate", -+ __func__)); -+#endif -+ } -+#endif -+ /* If pool is empty (or not defined) alloc the new sync checkpoint */ -+ if (!psNewSyncCheckpoint) -+ { -+ eError = _AllocSyncCheckpoint(psSyncContextInt, &psNewSyncCheckpoint); -+ PVR_LOG_GOTO_IF_NOMEM(psNewSyncCheckpoint, eError, fail_alloc); /* Sets OOM error code */ -+ } -+ -+ OSSpinLockAcquire(psSyncContextInt->psContextCtl->hSyncCheckpointStatsLock, uiFlags); -+ if (++psSyncContextInt->psContextCtl->ui32CurrentInUseSyncCheckpoints > psSyncContextInt->psContextCtl->ui32MaxInUseSyncCheckpoints) -+ { -+ psSyncContextInt->psContextCtl->ui32MaxInUseSyncCheckpoints = psSyncContextInt->psContextCtl->ui32CurrentInUseSyncCheckpoints; -+ } -+ OSSpinLockRelease(psSyncContextInt->psContextCtl->hSyncCheckpointStatsLock, uiFlags); -+ -+ psNewSyncCheckpoint->hTimeline = hTimeline; -+ OSAtomicWrite(&psNewSyncCheckpoint->hRefCount, 1); -+ OSAtomicWrite(&psNewSyncCheckpoint->hEnqueuedCCBCount, 0); -+ psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount = 0; -+ psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ACTIVE; -+ psNewSyncCheckpoint->uiProcess = OSGetCurrentClientProcessIDKM(); -+ OSCachedMemSet(&psNewSyncCheckpoint->sDeferredFreeListNode, 0, sizeof(psNewSyncCheckpoint->sDeferredFreeListNode)); -+ -+ if (pszCheckpointName) -+ { -+ /* Copy over the checkpoint name annotation */ -+ OSStringLCopy(psNewSyncCheckpoint->azName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH); -+ } -+ else -+ { -+ /* No sync checkpoint name annotation */ -+ psNewSyncCheckpoint->azName[0] = '\0'; -+ } -+ -+ /* Store sync checkpoint FW address in PRGXFWIF_UFO_ADDR struct */ -+ psNewSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint); -+ -+ /* Assign unique ID to this sync checkpoint */ -+ psNewSyncCheckpoint->ui32UID = g_SyncCheckpointUID++; -+ -+#if defined(PDUMP) -+ /* Flushing deferred fence signals to pdump */ -+ MISRHandler_PdumpDeferredSyncSignalPoster(psDevNode); -+ -+ _SyncCheckpointAllocPDump(psDevNode, psNewSyncCheckpoint); -+#endif -+ -+ RGXSRV_HWPERF_ALLOC_SYNC_CP(psDevNode, psNewSyncCheckpoint->hTimeline, -+ OSGetCurrentClientProcessIDKM(), -+ hFence, -+ psNewSyncCheckpoint->ui32FWAddr, -+ psNewSyncCheckpoint->azName, -+ sizeof(psNewSyncCheckpoint->azName)); -+ -+ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) -+ { -+ IMG_CHAR szChkptName[PVRSRV_SYNC_NAME_LENGTH]; -+ -+ if (pszCheckpointName) -+ { -+ /* Copy the checkpoint name annotation into a fixed-size array */ -+ OSStringLCopy(szChkptName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH); -+ } -+ else -+ { -+ /* No checkpoint name annotation */ -+ szChkptName[0] = 0; -+ } -+ /* record this sync */ -+ eError = _SyncCheckpointRecordAdd(&psNewSyncCheckpoint->hRecord, -+ psNewSyncCheckpoint->psSyncCheckpointBlock, -+ psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr, -+ _SyncCheckpointGetOffset(psNewSyncCheckpoint), -+ psNewSyncCheckpoint->ui32UID, -+ OSStringNLength(szChkptName, PVRSRV_SYNC_NAME_LENGTH), -+ szChkptName, (PSYNC_CHECKPOINT)psNewSyncCheckpoint); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\" (%s)", -+ __func__, -+ szChkptName, -+ PVRSRVGetErrorString(eError))); -+ psNewSyncCheckpoint->hRecord = NULL; -+ /* note the error but continue without affecting driver operation */ -+ } -+ } -+ -+ /* Add the sync checkpoint to the device list */ -+ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); -+ dllist_add_to_head(&psDevNode->sSyncCheckpointSyncsList, -+ &psNewSyncCheckpoint->sListNode); -+ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); -+ -+ *ppsSyncCheckpoint = (PSYNC_CHECKPOINT)psNewSyncCheckpoint; -+ -+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s Exit(Ok), psNewSyncCheckpoint->ui32UID=%d <%p>", -+ __func__, -+ psNewSyncCheckpoint->ui32UID, -+ (void*)psNewSyncCheckpoint)); -+#endif -+ return PVRSRV_OK; -+ -+fail_alloc: -+ return eError; -+} -+ -+static void SyncCheckpointUnref(SYNC_CHECKPOINT *psSyncCheckpointInt) -+{ -+ _SYNC_CHECKPOINT_CONTEXT *psContext; -+ PVRSRV_DEVICE_NODE *psDevNode; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ psContext = psSyncCheckpointInt->psSyncCheckpointBlock->psContext; -+ psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psContextCtl->psDeviceNode; -+ -+ /* -+ * Without this reference, the context may be destroyed as soon -+ * as _FreeSyncCheckpoint is called, but the context is still -+ * needed when _CheckDeferredCleanupList is called at the end -+ * of this function. -+ */ -+ SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext); -+ -+#if defined(DEBUG) -+ PVR_ASSERT(psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE); -+#endif -+ if (!OSAtomicRead(&psSyncCheckpointInt->hRefCount)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "SyncCheckpointUnref sync checkpoint already freed")); -+ } -+ else if (0 == OSAtomicDecrement(&psSyncCheckpointInt->hRefCount)) -+ { -+ /* If the firmware has serviced all enqueued references to the sync checkpoint, free it */ -+ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount == -+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount))) -+ { -+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s No outstanding FW ops and hRef is zero, deleting SyncCheckpoint..", -+ __func__)); -+#endif -+ PVRSRV_ERROR eError; -+ if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) -+ && psSyncCheckpointInt->hRecord) -+ { -+ /* remove this sync record */ -+ eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord); -+ PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove"); -+ } -+ -+ /* Remove the sync checkpoint from the global list */ -+ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); -+ dllist_remove_node(&psSyncCheckpointInt->sListNode); -+ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); -+ -+ RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr); -+ -+ OSSpinLockAcquire(psContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); -+ psContext->psContextCtl->ui32CurrentInUseSyncCheckpoints--; -+ OSSpinLockRelease(psContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); -+ -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s attempting to return sync checkpoint to the pool", -+ __func__)); -+#endif -+ if (!_PutCheckpointInPool(psSyncCheckpointInt)) -+#endif -+ { -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s pool is full, so just free it", -+ __func__)); -+#endif -+#endif -+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32AllocatedAddr=0x%llx", -+ __func__, -+ psSyncCheckpointInt->ui32UID, -+ (void*)psSyncCheckpointInt, -+ (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA, -+ psSyncCheckpointInt->uiAllocatedAddr)); -+#endif -+ _FreeSyncCheckpoint(psSyncCheckpointInt); -+ } -+ } -+ else -+ { -+ OS_SPINLOCK_FLAGS uiFlags; -+#if ((ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s Outstanding FW ops hEnqueuedCCBCount=%d != FwObj->ui32FwRefCount=%d " -+ "- DEFERRING CLEANUP psSyncCheckpoint(ID:%d)<%p>", -+ __func__, -+ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount), -+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount, -+ psSyncCheckpointInt->ui32UID, -+ (void*)psSyncCheckpointInt)); -+#endif -+ /* Add the sync checkpoint to the deferred free list */ -+ OSSpinLockAcquire(psContext->psContextCtl->hDeferredCleanupListLock, uiFlags); -+ dllist_add_to_tail(&psContext->psContextCtl->sDeferredCleanupListHead, -+ &psSyncCheckpointInt->sDeferredFreeListNode); -+ OSSpinLockRelease(psContext->psContextCtl->hDeferredCleanupListLock, uiFlags); -+ } -+ } -+ else -+ { -+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s psSyncCheckpoint(ID:%d)<%p>, hRefCount decremented to %d", -+ __func__, -+ psSyncCheckpointInt->ui32UID, -+ (void*)psSyncCheckpointInt, -+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)))); -+#endif -+ } -+ -+ /* See if any sync checkpoints in the deferred cleanup list can be freed */ -+ _CheckDeferredCleanupList(psContext); -+ -+ SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext); -+} -+ -+void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ -+ PVR_LOG_RETURN_VOID_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); -+ -+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) -+#if defined(DEBUG) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s Entry, psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d, psSyncCheckpoint->ui32ValidationCheck=0x%x", -+ __func__, -+ psSyncCheckpointInt->ui32UID, -+ (void*)psSyncCheckpoint, -+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)), -+ psSyncCheckpointInt->ui32ValidationCheck)); -+#else -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s Entry, psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d", -+ __func__, -+ psSyncCheckpointInt->ui32UID, -+ (void*)psSyncCheckpoint, -+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)))); -+#endif -+#endif -+ SyncCheckpointUnref(psSyncCheckpointInt); -+} -+ -+void -+SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ -+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); -+ -+ if (psSyncCheckpointInt) -+ { -+ IMG_UINT32 ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; -+ -+ PVR_LOG_IF_FALSE((ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), -+ "psSyncCheckpoint already signalled"); -+ -+ if (ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) -+ { -+#if defined(SUPPORT_RGX) -+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice; -+ -+ RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); -+#endif -+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; -+ -+#if defined(SUPPORT_RGX) && defined(PDUMP) -+ _SyncCheckpointUpdatePDump(psContext->psContextCtl->psDeviceNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_SIGNALLED, ui32FenceSyncFlags); -+#endif -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), " -+ "when value is already %d", -+ __func__, -+ PVRSRV_SYNC_CHECKPOINT_SIGNALLED, -+ psSyncCheckpointInt->ui32UID, -+ ui32State)); -+ } -+ } -+} -+ -+void -+SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ -+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); -+ -+ if (psSyncCheckpointInt) -+ { -+ IMG_UINT32 ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; -+ -+ PVR_LOG_IF_FALSE((ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), -+ "psSyncCheckpoint already signalled"); -+ -+ if (ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) -+ { -+#if defined(SUPPORT_RGX) -+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice; -+ -+ RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, PVRSRV_FENCE_FLAG_NONE); -+#endif -+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; -+ } -+ else -+ { -+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), " -+ "when value is already %d", -+ __func__, -+ PVRSRV_SYNC_CHECKPOINT_SIGNALLED, -+ psSyncCheckpointInt->ui32UID, -+ ui32State)); -+#endif -+ } -+ } -+} -+ -+void -+SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ -+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); -+ -+ if (psSyncCheckpointInt) -+ { -+ IMG_UINT32 ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; -+ -+ PVR_LOG_IF_FALSE((ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), -+ "psSyncCheckpoint already signalled"); -+ -+ if (ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) -+ { -+#if defined(SUPPORT_RGX) -+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice; -+ if (!(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) -+ { -+ RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; -+ -+ sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint); -+ sSyncData.sUpdate.ui32OldValue = ui32State; -+ sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_ERRORED; -+ -+ RGXSRV_HWPERF_UFO(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData, -+ (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE); -+ } -+#endif -+ -+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ERRORED; -+ -+#if defined(SUPPORT_RGX) && defined(PDUMP) -+ _SyncCheckpointUpdatePDump(psContext->psContextCtl->psDeviceNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_ERRORED, ui32FenceSyncFlags); -+#endif -+ } -+ } -+} -+ -+IMG_BOOL SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) -+{ -+ IMG_BOOL bRet = IMG_FALSE; -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ -+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); -+ -+ if (psSyncCheckpointInt) -+ { -+ IMG_UINT32 ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; -+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext; -+#if defined(SUPPORT_RGX) -+ PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice; -+ -+ RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); -+#endif -+ -+ switch (ui32State) -+ { -+ case PVRSRV_SYNC_CHECKPOINT_UNDEF: -+ case PVRSRV_SYNC_CHECKPOINT_ACTIVE: -+ { -+ break; -+ } -+ case PVRSRV_SYNC_CHECKPOINT_SIGNALLED: -+ case PVRSRV_SYNC_CHECKPOINT_ERRORED: -+ { -+ bRet = IMG_TRUE; -+ break; -+ } -+ default: -+ { -+ -+ PVRSRV_DEVICE_NODE *psDeviceNode = psContext->psContextCtl->psDeviceNode; -+ -+#if defined(SUPPORT_RGX) -+ RGXUpdateHealthStatus(psDeviceNode, IMG_FALSE); -+#endif -+ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_PCI_ERROR) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: PCI error - state=0x%x, ID=%d, %s, fwaddr=%#08x", -+ __func__, ui32State, psSyncCheckpoint->ui32UID, -+ psSyncCheckpoint->azName, psSyncCheckpoint->ui32FWAddr)); -+ bRet = IMG_TRUE; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: unknown state (0x%x) - ID=%d, %s, fwaddr=%#08x", -+ __func__, ui32State, psSyncCheckpoint->ui32UID, -+ psSyncCheckpoint->azName, psSyncCheckpoint->ui32FWAddr)); -+ } -+ break; -+ } -+ -+ } -+ -+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s called for psSyncCheckpoint<%p>, returning %d", -+ __func__, -+ (void*)psSyncCheckpoint, -+ bRet)); -+#endif -+ } -+ return bRet; -+} -+ -+IMG_BOOL -+SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) -+{ -+ IMG_BOOL bRet = IMG_FALSE; -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ -+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); -+ -+ if (psSyncCheckpointInt) -+ { -+#if defined(SUPPORT_RGX) -+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext; -+ PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice; -+ -+ RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); -+#endif -+ bRet = (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED); -+ -+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s called for psSyncCheckpoint<%p>, returning %d", -+ __func__, -+ (void*)psSyncCheckpoint, -+ bRet)); -+#endif -+ } -+ return bRet; -+} -+ -+const IMG_CHAR * -+SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ -+ PVR_LOG_RETURN_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", "Null"); -+ -+ switch (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State) -+ { -+ case PVRSRV_SYNC_CHECKPOINT_SIGNALLED: -+ return "Signalled"; -+ case PVRSRV_SYNC_CHECKPOINT_ACTIVE: -+ return "Active"; -+ case PVRSRV_SYNC_CHECKPOINT_ERRORED: -+ return "Errored"; -+ case PVRSRV_SYNC_CHECKPOINT_UNDEF: -+ return "Undefined"; -+ default: -+ return "Unknown"; -+ } -+} -+ -+PVRSRV_ERROR -+SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ PVRSRV_ERROR eRet = PVRSRV_OK; -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psSyncCheckpoint, "psSyncCheckpoint"); -+ -+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)", -+ __func__, -+ psSyncCheckpointInt, -+ OSAtomicRead(&psSyncCheckpointInt->hRefCount), -+ OSAtomicRead(&psSyncCheckpointInt->hRefCount)+1, -+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); -+#endif -+ OSAtomicIncrement(&psSyncCheckpointInt->hRefCount); -+ -+ return eRet; -+} -+ -+PVRSRV_ERROR -+SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ PVRSRV_ERROR eRet = PVRSRV_OK; -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ -+ PVR_LOG_RETURN_IF_INVALID_PARAM(psSyncCheckpoint, "psSyncCheckpoint"); -+ -+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)", -+ __func__, -+ psSyncCheckpointInt, -+ OSAtomicRead(&psSyncCheckpointInt->hRefCount), -+ OSAtomicRead(&psSyncCheckpointInt->hRefCount)-1, -+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); -+#endif -+ SyncCheckpointUnref(psSyncCheckpointInt); -+ -+ return eRet; -+} -+ -+void -+SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ -+ PVR_LOG_RETURN_VOID_IF_FALSE(psSyncCheckpointInt != NULL, "psSyncCheckpoint"); -+ -+#if defined(NO_HARDWARE) -+ PVR_UNREFERENCED_PARAMETER(psSyncCheckpointInt); -+#else /* !defined(NO_HARDWARE) */ -+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)", -+ __func__, -+ (void*)psSyncCheckpointInt, -+ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount), -+ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)+1, -+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); -+#endif -+ OSAtomicIncrement(&psSyncCheckpointInt->hEnqueuedCCBCount); -+#endif -+} -+ -+PRGXFWIF_UFO_ADDR* -+SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ -+ PVR_LOG_GOTO_IF_FALSE((psSyncCheckpointInt != NULL), "psSyncCheckpoint invalid", invalid_chkpt); -+ -+#if defined(DEBUG) -+ if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE) -+#endif -+ { -+ return &psSyncCheckpointInt->sCheckpointUFOAddr; -+ } -+#if defined(DEBUG) -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x", -+ __func__, -+ (void*)psSyncCheckpointInt, -+ psSyncCheckpointInt->ui32ValidationCheck)); -+ } -+#endif -+ -+invalid_chkpt: -+ return NULL; -+} -+ -+IMG_UINT32 -+SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ IMG_UINT32 ui32Ret = 0; -+ -+ PVR_LOG_GOTO_IF_FALSE((psSyncCheckpointInt != NULL), "psSyncCheckpoint invalid", invalid_chkpt); -+ -+#if defined(DEBUG) -+ if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE) -+#endif -+ { -+ ui32Ret = psSyncCheckpointInt->ui32FWAddr; -+ } -+#if defined(DEBUG) -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x", -+ __func__, -+ (void*)psSyncCheckpointInt, -+ psSyncCheckpointInt->ui32ValidationCheck)); -+ } -+#endif -+ -+invalid_chkpt: -+ return ui32Ret; -+} -+ -+IMG_UINT32 -+SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ IMG_UINT32 ui32Ret = 0U; -+ -+ PVR_LOG_GOTO_IF_FALSE((psSyncCheckpointInt != NULL), "psSyncCheckpoint invalid", invalid_chkpt); -+ -+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s returning ID for sync checkpoint<%p>", -+ __func__, -+ (void*)psSyncCheckpointInt)); -+#if defined(DEBUG) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s (validationCheck=0x%x)", -+ __func__, -+ psSyncCheckpointInt->ui32ValidationCheck)); -+#endif -+#endif -+ ui32Ret = psSyncCheckpointInt->ui32UID; -+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s (ui32UID=0x%x)", -+ __func__, -+ psSyncCheckpointInt->ui32UID)); -+#endif -+ -+invalid_chkpt: -+ return ui32Ret; -+} -+ -+PVRSRV_TIMELINE -+SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ -+ PVR_LOG_GOTO_IF_FALSE((psSyncCheckpointInt != NULL), "psSyncCheckpoint invalid", invalid_chkpt); -+ -+ return psSyncCheckpointInt->hTimeline; -+ -+invalid_chkpt: -+ return 0; -+} -+ -+ -+IMG_UINT32 -+SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0); -+ -+ return OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount); -+} -+ -+IMG_UINT32 -+SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0); -+ -+ return OSAtomicRead(&psSyncCheckpointInt->hRefCount); -+} -+ -+IMG_PID -+SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; -+ PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0); -+ -+ return psSyncCheckpointInt->uiProcess; -+} -+ -+IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode, -+ IMG_UINT32 ui32FwAddr) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt; -+ PDLLIST_NODE psNode, psNext; -+ IMG_UINT32 ui32State = 0; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); -+ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) -+ { -+ psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode); -+ if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt)) -+ { -+ ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; -+ break; -+ } -+ } -+ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); -+ return ui32State; -+} -+ -+void SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode, -+ IMG_UINT32 ui32FwAddr) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpointInt; -+ PDLLIST_NODE psNode, psNext; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s called to error UFO with ui32FWAddr=%d", -+ __func__, -+ ui32FwAddr)); -+#endif -+ -+ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); -+ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) -+ { -+ psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode); -+ if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt)) -+ { -+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s calling SyncCheckpointError for sync checkpoint <%p>", -+ __func__, -+ (void*)psSyncCheckpointInt)); -+#endif -+ /* Mark as errored */ -+ SyncCheckpointError((PSYNC_CHECKPOINT)psSyncCheckpointInt, PVRSRV_FENCE_FLAG_NONE); -+ break; -+ } -+ } -+ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); -+} -+ -+void SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr) -+{ -+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s called to rollback UFO with ui32FWAddr=0x%x", -+ __func__, -+ ui32FwAddr)); -+#endif -+#if !defined(NO_HARDWARE) -+ { -+ SYNC_CHECKPOINT *psSyncCheckpointInt = NULL; -+ PDLLIST_NODE psNode = NULL, psNext = NULL; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); -+ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) -+ { -+ psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode); -+ if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt)) -+ { -+#if ((ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)) || (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s called for psSyncCheckpointInt<%p> %d->%d", -+ __func__, -+ (void *) psSyncCheckpointInt, -+ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount), -+ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount) - 1)); -+#endif -+ OSAtomicDecrement(&psSyncCheckpointInt->hEnqueuedCCBCount); -+ break; -+ } -+ } -+ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); -+ } -+#endif -+} -+ -+static void _SyncCheckpointState(PDLLIST_NODE psNode, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode); -+ -+ if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) -+ { -+ PVR_DUMPDEBUG_LOG("\t- ID = %d, FWAddr = 0x%08x, r%d:e%d:f%d: %s", -+ psSyncCheckpoint->ui32UID, -+ psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + -+ _SyncCheckpointGetOffset(psSyncCheckpoint), -+ OSAtomicRead(&psSyncCheckpoint->hRefCount), -+ OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), -+ psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount, -+ psSyncCheckpoint->azName); -+ } -+} -+ -+static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; -+ DLLIST_NODE *psNode, *psNext; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ if (psDevNode->hSyncCheckpointContext == NULL) return; -+ -+ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) -+ { -+ PVR_DUMPDEBUG_LOG("------[ Active Sync Checkpoints ]------"); -+ -+ OSSpinLockAcquire(psDevNode->hSyncCheckpointContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); -+ PVR_DUMPDEBUG_LOG("(SyncCP Counts: InUse:%d Max:%d)", -+ psDevNode->hSyncCheckpointContext->psContextCtl->ui32CurrentInUseSyncCheckpoints, -+ psDevNode->hSyncCheckpointContext->psContextCtl->ui32MaxInUseSyncCheckpoints); -+ OSSpinLockRelease(psDevNode->hSyncCheckpointContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); -+ -+ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); -+ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) -+ { -+ _SyncCheckpointState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile); -+ } -+ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); -+ } -+} -+ -+PVRSRV_ERROR -+SyncCheckpointInit(PPVRSRV_DEVICE_NODE psDevNode) -+{ -+ PVRSRV_ERROR eError; -+#if defined(PDUMP) -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ psDevInfo = psDevNode->pvDevice; -+#endif -+ -+ eError = OSSpinLockCreate(&psDevNode->hSyncCheckpointListLock); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ dllist_init(&psDevNode->sSyncCheckpointSyncsList); -+ -+ eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hSyncCheckpointNotify, -+ psDevNode, -+ _SyncCheckpointDebugRequest, -+ DEBUG_REQUEST_SYNCCHECKPOINT, -+ (PVRSRV_DBGREQ_HANDLE)psDevNode); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) -+ { -+ _SyncCheckpointRecordListInit(psDevNode); -+ } -+ -+#if defined(PDUMP) -+ eError = OSSpinLockCreate(&psDevInfo->hSyncCheckpointSignalSpinLock); -+ if (eError != PVRSRV_OK) -+ { -+ psDevInfo->hSyncCheckpointSignalSpinLock = NULL; -+ goto e1; -+ } -+ -+ eError = OSLockCreate(&psDevNode->hSyncCheckpointSignalLock); -+ if (eError != PVRSRV_OK) -+ { -+ psDevNode->hSyncCheckpointSignalLock = NULL; -+ goto e2; -+ } -+ -+ psDevNode->pui8DeferredSyncCPSignal = OSAllocMem(SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL -+ * sizeof(_SYNC_CHECKPOINT_DEFERRED_SIGNAL)); -+ PVR_GOTO_IF_NOMEM(psDevNode->pui8DeferredSyncCPSignal, eError, e3); -+ -+ psDevNode->ui16SyncCPWriteIdx = 0; -+ psDevNode->ui16SyncCPReadIdx = 0; -+ -+ eError = OSInstallMISR(&psDevNode->pvSyncCPMISR, -+ MISRHandler_PdumpDeferredSyncSignalPoster, -+ psDevNode, -+ "RGX_PdumpDeferredSyncSignalPoster"); -+ PVR_GOTO_IF_ERROR(eError, e4); -+ -+ eError = OSLockCreate(&psDevNode->hSyncCheckpointContextListLock); -+ if (eError != PVRSRV_OK) -+ { -+ psDevNode->hSyncCheckpointContextListLock = NULL; -+ goto e5; -+ } -+ -+ -+ dllist_init(&psDevNode->sSyncCheckpointContextListHead); -+ -+ eError = PDumpRegisterTransitionCallbackFenceSync(psDevNode, -+ _SyncCheckpointPDumpTransition, -+ &psDevNode->hTransition); -+ if (eError != PVRSRV_OK) -+ { -+ psDevNode->hTransition = NULL; -+ goto e6; -+ } -+#endif -+ -+ return PVRSRV_OK; -+ -+#if defined(PDUMP) -+e6: -+ OSLockDestroy(psDevNode->hSyncCheckpointContextListLock); -+ psDevNode->hSyncCheckpointContextListLock = NULL; -+e5: -+ (void) OSUninstallMISR(psDevNode->pvSyncCPMISR); -+ psDevNode->pvSyncCPMISR = NULL; -+e4: -+ if (psDevNode->pui8DeferredSyncCPSignal) -+ { -+ OSFreeMem(psDevNode->pui8DeferredSyncCPSignal); -+ psDevNode->pui8DeferredSyncCPSignal = NULL; -+ } -+e3: -+ OSLockDestroy(psDevNode->hSyncCheckpointSignalLock); -+ psDevNode->hSyncCheckpointSignalLock = NULL; -+e2: -+ OSSpinLockDestroy(psDevInfo->hSyncCheckpointSignalSpinLock); -+ psDevInfo->hSyncCheckpointSignalSpinLock = NULL; -+e1: -+ _SyncCheckpointRecordListDeinit(psDevNode); -+#endif -+e0: -+ OSSpinLockDestroy(psDevNode->hSyncCheckpointListLock); -+ psDevNode->hSyncCheckpointListLock = NULL; -+ -+ return eError; -+} -+ -+void SyncCheckpointDeinit(PPVRSRV_DEVICE_NODE psDevNode) -+{ -+#if defined(PDUMP) -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ psDevInfo = psDevNode->pvDevice; -+ PDumpUnregisterTransitionCallbackFenceSync(psDevNode->hTransition); -+ psDevNode->hTransition = NULL; -+ -+ if (psDevNode->hSyncCheckpointContextListLock) -+ { -+ OSLockDestroy(psDevNode->hSyncCheckpointContextListLock); -+ psDevNode->hSyncCheckpointContextListLock = NULL; -+ } -+ -+ if (psDevNode->pvSyncCPMISR) -+ { -+ (void) OSUninstallMISR(psDevNode->pvSyncCPMISR); -+ psDevNode->pvSyncCPMISR = NULL; -+ } -+ -+ if (psDevNode->pui8DeferredSyncCPSignal) -+ { -+ OSFreeMem(psDevNode->pui8DeferredSyncCPSignal); -+ psDevNode->pui8DeferredSyncCPSignal = NULL; -+ } -+ if (psDevNode->hSyncCheckpointSignalLock) -+ { -+ OSLockDestroy(psDevNode->hSyncCheckpointSignalLock); -+ psDevNode->hSyncCheckpointSignalLock = NULL; -+ } -+ if (psDevInfo->hSyncCheckpointSignalSpinLock) -+ { -+ OSSpinLockDestroy(psDevInfo->hSyncCheckpointSignalSpinLock); -+ psDevInfo->hSyncCheckpointSignalSpinLock = NULL; -+ } -+#endif -+ -+ PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hSyncCheckpointNotify); -+ psDevNode->hSyncCheckpointNotify = NULL; -+ OSSpinLockDestroy(psDevNode->hSyncCheckpointListLock); -+ psDevNode->hSyncCheckpointListLock = NULL; -+ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) -+ { -+ _SyncCheckpointRecordListDeinit(psDevNode); -+ } -+} -+ -+void SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr, -+ IMG_CHAR * pszSyncInfo, size_t len) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ IMG_BOOL bFound = IMG_FALSE; -+ -+ if (!pszSyncInfo) -+ { -+ return; -+ } -+ -+ pszSyncInfo[0] = '\0'; -+ -+ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); -+ dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext) -+ { -+ struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec = -+ IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode); -+ if ((psSyncCheckpointRec->ui32FwBlockAddr + psSyncCheckpointRec->ui32SyncOffset + 1) == ui32FwAddr) -+ { -+ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock; -+ if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr) -+ { -+ void *pSyncCheckpointAddr = IMG_OFFSET_ADDR(psSyncCheckpointBlock->pui32LinAddr, -+ psSyncCheckpointRec->ui32SyncOffset); -+ OSSNPrintf(pszSyncInfo, len, "%s Checkpoint:%05u (%s)", -+ (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ? -+ "SIGNALLED" : -+ ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ? -+ "ERRORED" : "ACTIVE"), -+ psSyncCheckpointRec->uiPID, -+ psSyncCheckpointRec->szClassName); -+ } -+ else -+ { -+ OSSNPrintf(pszSyncInfo, len, "Checkpoint:%05u (%s)", -+ psSyncCheckpointRec->uiPID, -+ psSyncCheckpointRec->szClassName); -+ } -+ -+ bFound = IMG_TRUE; -+ break; -+ } -+ } -+ OSLockRelease(psDevNode->hSyncCheckpointRecordLock); -+ -+ if (!bFound && (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT)) -+ { -+ OSSNPrintf(pszSyncInfo, len, "(Record may be lost)"); -+ } -+} -+ -+static PVRSRV_ERROR -+_SyncCheckpointRecordAdd( -+ PSYNC_CHECKPOINT_RECORD_HANDLE * phRecord, -+ SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock, -+ IMG_UINT32 ui32FwBlockAddr, -+ IMG_UINT32 ui32SyncOffset, -+ IMG_UINT32 ui32UID, -+ IMG_UINT32 ui32ClassNameSize, -+ const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt) -+{ -+ struct SYNC_CHECKPOINT_RECORD * psSyncRec; -+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)hSyncCheckpointBlock->psContext; -+ PVRSRV_DEVICE_NODE *psDevNode = psContext->psContextCtl->psDeviceNode; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_RETURN_IF_INVALID_PARAM(phRecord); -+ -+ *phRecord = NULL; -+ -+ psSyncRec = OSAllocMem(sizeof(*psSyncRec)); -+ PVR_LOG_GOTO_IF_NOMEM(psSyncRec, eError, fail_alloc); /* Sets OOM error code */ -+ -+ psSyncRec->psSyncCheckpointBlock = hSyncCheckpointBlock; -+ psSyncRec->ui32SyncOffset = ui32SyncOffset; -+ psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr; -+ psSyncRec->ui64OSTime = OSClockns64(); -+ psSyncRec->uiPID = OSGetCurrentProcessID(); -+ psSyncRec->ui32UID = ui32UID; -+ psSyncRec->pSyncCheckpt = pSyncCheckpt; -+ if (pszClassName) -+ { -+ if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH) -+ ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH; -+ /* Copy over the class name annotation */ -+ OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); -+ } -+ else -+ { -+ /* No class name annotation */ -+ psSyncRec->szClassName[0] = 0; -+ } -+ -+ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); -+ if (psDevNode->ui32SyncCheckpointRecordCount < SYNC_CHECKPOINT_RECORD_LIMIT) -+ { -+ dllist_add_to_head(&psDevNode->sSyncCheckpointRecordList, &psSyncRec->sNode); -+ psDevNode->ui32SyncCheckpointRecordCount++; -+ -+ if (psDevNode->ui32SyncCheckpointRecordCount > psDevNode->ui32SyncCheckpointRecordCountHighWatermark) -+ { -+ psDevNode->ui32SyncCheckpointRecordCountHighWatermark = psDevNode->ui32SyncCheckpointRecordCount; -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\". %u records already exist.", -+ __func__, -+ pszClassName, -+ psDevNode->ui32SyncCheckpointRecordCount)); -+ OSFreeMem(psSyncRec); -+ psSyncRec = NULL; -+ eError = PVRSRV_ERROR_TOOMANYBUFFERS; -+ } -+ OSLockRelease(psDevNode->hSyncCheckpointRecordLock); -+ -+ *phRecord = (PSYNC_CHECKPOINT_RECORD_HANDLE)psSyncRec; -+ -+fail_alloc: -+ return eError; -+} -+ -+static PVRSRV_ERROR -+_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord) -+{ -+ struct SYNC_CHECKPOINT_RECORD **ppFreedSync; -+ struct SYNC_CHECKPOINT_RECORD *pSync = (struct SYNC_CHECKPOINT_RECORD*)hRecord; -+ _SYNC_CHECKPOINT_CONTEXT *psContext = pSync->psSyncCheckpointBlock->psContext; -+ PVRSRV_DEVICE_NODE *psDevNode; -+ -+ PVR_RETURN_IF_INVALID_PARAM(hRecord); -+ -+ psDevNode = psContext->psContextCtl->psDeviceNode; -+ -+ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); -+ -+ dllist_remove_node(&pSync->sNode); -+ -+ if (psDevNode->uiSyncCheckpointRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: psDevNode->uiSyncCheckpointRecordFreeIdx out of range", -+ __func__)); -+ psDevNode->uiSyncCheckpointRecordFreeIdx = 0; -+ } -+ ppFreedSync = &psDevNode->apsSyncCheckpointRecordsFreed[psDevNode->uiSyncCheckpointRecordFreeIdx]; -+ psDevNode->uiSyncCheckpointRecordFreeIdx = -+ (psDevNode->uiSyncCheckpointRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; -+ -+ if (*ppFreedSync) -+ { -+ OSFreeMem(*ppFreedSync); -+ } -+ pSync->psSyncCheckpointBlock = NULL; -+ pSync->ui64OSTime = OSClockns64(); -+ *ppFreedSync = pSync; -+ -+ psDevNode->ui32SyncCheckpointRecordCount--; -+ -+ OSLockRelease(psDevNode->hSyncCheckpointRecordLock); -+ -+ return PVRSRV_OK; -+} -+ -+#define NS_IN_S (1000000000UL) -+static void _SyncCheckpointRecordPrint(struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec, -+ IMG_UINT64 ui64TimeNow, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ SYNC_CHECKPOINT *psSyncCheckpoint = (SYNC_CHECKPOINT *)psSyncCheckpointRec->pSyncCheckpt; -+ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock; -+ IMG_UINT64 ui64DeltaS; -+ IMG_UINT32 ui32DeltaF; -+ IMG_UINT64 ui64Delta = ui64TimeNow - psSyncCheckpointRec->ui64OSTime; -+ ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF); -+ -+ if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr) -+ { -+ void *pSyncCheckpointAddr; -+ pSyncCheckpointAddr = IMG_OFFSET_ADDR(psSyncCheckpointBlock->pui32LinAddr, -+ psSyncCheckpointRec->ui32SyncOffset); -+ -+ PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x (r%d:e%d:f%d) State=%s (%s)", -+ psSyncCheckpointRec->uiPID, -+ ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID, -+ (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset), -+ OSAtomicRead(&psSyncCheckpoint->hRefCount), -+ OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), -+ psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount, -+ (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ? -+ "SIGNALLED" : -+ ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ? -+ "ERRORED" : "ACTIVE"), -+ psSyncCheckpointRec->szClassName); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x State= (%s)", -+ psSyncCheckpointRec->uiPID, -+ ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID, -+ (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset), -+ psSyncCheckpointRec->szClassName -+ ); -+ } -+} -+ -+static void _SyncCheckpointRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; -+ IMG_UINT64 ui64TimeNowS; -+ IMG_UINT32 ui32TimeNowF; -+ IMG_UINT64 ui64TimeNow = OSClockns64(); -+ DLLIST_NODE *psNode, *psNext; -+ -+ ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF); -+ -+ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) -+ { -+ IMG_UINT32 i; -+ -+ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); -+ -+ PVR_DUMPDEBUG_LOG("Dumping allocated sync checkpoints. Allocated: %u High watermark: %u (time ref %05" IMG_UINT64_FMTSPEC ".%09u)", -+ psDevNode->ui32SyncCheckpointRecordCount, -+ psDevNode->ui32SyncCheckpointRecordCountHighWatermark, -+ ui64TimeNowS, -+ ui32TimeNowF); -+ if (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT) -+ { -+ PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.", -+ SYNC_CHECKPOINT_RECORD_LIMIT); -+ } -+ PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)", -+ "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation"); -+ -+ dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext) -+ { -+ struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec = -+ IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode); -+ _SyncCheckpointRecordPrint(psSyncCheckpointRec, ui64TimeNow, -+ pfnDumpDebugPrintf, pvDumpDebugFile); -+ } -+ -+ PVR_DUMPDEBUG_LOG("Dumping all recently freed sync checkpoints @ %05" IMG_UINT64_FMTSPEC ".%09u", -+ ui64TimeNowS, -+ ui32TimeNowF); -+ PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)", -+ "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation"); -+ for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncCheckpointRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN); -+ i != psDevNode->uiSyncCheckpointRecordFreeIdx; -+ i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)) -+ { -+ if (psDevNode->apsSyncCheckpointRecordsFreed[i]) -+ { -+ _SyncCheckpointRecordPrint(psDevNode->apsSyncCheckpointRecordsFreed[i], -+ ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile); -+ } -+ else -+ { -+ break; -+ } -+ } -+ OSLockRelease(psDevNode->hSyncCheckpointRecordLock); -+ } -+} -+#undef NS_IN_S -+static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = OSLockCreate(&psDevNode->hSyncCheckpointRecordLock); -+ PVR_GOTO_IF_ERROR(eError, fail_lock_create); -+ dllist_init(&psDevNode->sSyncCheckpointRecordList); -+ -+ psDevNode->ui32SyncCheckpointRecordCount = 0; -+ psDevNode->ui32SyncCheckpointRecordCountHighWatermark = 0; -+ -+ eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hSyncCheckpointRecordNotify, -+ psDevNode, -+ _SyncCheckpointRecordRequest, -+ DEBUG_REQUEST_SYNCCHECKPOINT, -+ (PVRSRV_DBGREQ_HANDLE)psDevNode); -+ PVR_GOTO_IF_ERROR(eError, fail_dbg_register); -+ -+ return PVRSRV_OK; -+ -+fail_dbg_register: -+ OSLockDestroy(psDevNode->hSyncCheckpointRecordLock); -+fail_lock_create: -+ return eError; -+} -+ -+static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ int i; -+ -+ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); -+ dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext) -+ { -+ struct SYNC_CHECKPOINT_RECORD *pSyncCheckpointRec = -+ IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode); -+ -+ dllist_remove_node(psNode); -+ OSFreeMem(pSyncCheckpointRec); -+ } -+ -+ for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++) -+ { -+ if (psDevNode->apsSyncCheckpointRecordsFreed[i]) -+ { -+ OSFreeMem(psDevNode->apsSyncCheckpointRecordsFreed[i]); -+ psDevNode->apsSyncCheckpointRecordsFreed[i] = NULL; -+ } -+ } -+ OSLockRelease(psDevNode->hSyncCheckpointRecordLock); -+ -+ if (psDevNode->hSyncCheckpointRecordNotify) -+ { -+ PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hSyncCheckpointRecordNotify); -+ } -+ OSLockDestroy(psDevNode->hSyncCheckpointRecordLock); -+} -+ -+#if defined(PDUMP) -+ -+static PVRSRV_ERROR -+_SyncCheckpointAllocPDump(PVRSRV_DEVICE_NODE *psDevNode, SYNC_CHECKPOINT *psSyncCheckpoint) -+{ -+ PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, -+ "Allocated Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)", -+ psSyncCheckpoint->azName, -+ psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline, -+ psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr); -+ -+ DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc, -+ _SyncCheckpointGetOffset(psSyncCheckpoint), -+ PVRSRV_SYNC_CHECKPOINT_ACTIVE, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR -+_SyncCheckpointUpdatePDump(PPVRSRV_DEVICE_NODE psDevNode, SYNC_CHECKPOINT *psSyncCheckpoint, IMG_UINT32 ui32Status, IMG_UINT32 ui32FenceSyncFlags) -+{ -+ IMG_BOOL bSleepAllowed = (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ psDevInfo = psDevNode->pvDevice; -+ /* -+ We might be ask to PDump sync state outside of capture range -+ (e.g. texture uploads) so make this continuous. -+ */ -+ if (bSleepAllowed) -+ { -+ if (ui32Status == PVRSRV_SYNC_CHECKPOINT_ERRORED) -+ { -+ PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, -+ "Errored Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)", -+ psSyncCheckpoint->azName, -+ psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline, -+ (psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + -+ _SyncCheckpointGetOffset(psSyncCheckpoint))); -+ } -+ else -+ { -+ PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, -+ "Signalled Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)", -+ psSyncCheckpoint->azName, -+ psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline, -+ (psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + -+ _SyncCheckpointGetOffset(psSyncCheckpoint))); -+ } -+ -+ DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc, -+ _SyncCheckpointGetOffset(psSyncCheckpoint), -+ ui32Status, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ else -+ { -+ _SYNC_CHECKPOINT_DEFERRED_SIGNAL *psSyncData; -+ OS_SPINLOCK_FLAGS uiFlags; -+ IMG_UINT16 ui16NewWriteIdx; -+ -+ OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); -+ -+ ui16NewWriteIdx = GET_CP_CB_NEXT_IDX(psDevNode->ui16SyncCPWriteIdx); -+ if (ui16NewWriteIdx == psDevNode->ui16SyncCPReadIdx) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: ERROR Deferred SyncCheckpointSignal CB is full)", -+ __func__)); -+ } -+ else -+ { -+ psSyncData = GET_CP_CB_BASE(psDevNode->ui16SyncCPWriteIdx); -+ psSyncData->asSyncCheckpoint = *psSyncCheckpoint; -+ psSyncData->ui32Status = ui32Status; -+ psDevNode->ui16SyncCPWriteIdx = ui16NewWriteIdx; -+ } -+ -+ OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); -+ -+ OSScheduleMISR(psDevNode->pvSyncCPMISR); -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static void -+MISRHandler_PdumpDeferredSyncSignalPoster(void *pvData) -+{ -+ PPVRSRV_DEVICE_NODE psDevNode = (PPVRSRV_DEVICE_NODE) pvData; -+ OS_SPINLOCK_FLAGS uiFlags; -+ IMG_UINT16 ui16ReadIdx, ui16WriteIdx; -+ _SYNC_CHECKPOINT_DEFERRED_SIGNAL *psSyncData; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ psDevInfo = psDevNode->pvDevice; -+ -+ OSLockAcquire(psDevNode->hSyncCheckpointSignalLock); -+ -+ OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); -+ /* Snapshot current write and read offset of CB */ -+ ui16WriteIdx = psDevNode->ui16SyncCPWriteIdx; -+ ui16ReadIdx = psDevNode->ui16SyncCPReadIdx; -+ -+ OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); -+ /* CB is empty */ -+ if (ui16WriteIdx == ui16ReadIdx) -+ { -+ OSLockRelease(psDevNode->hSyncCheckpointSignalLock); -+ return; -+ } -+ do -+ { -+ /* Read item in the CB and flush it to pdump */ -+ psSyncData = GET_CP_CB_BASE(ui16ReadIdx); -+ _SyncCheckpointUpdatePDump(psDevNode, &psSyncData->asSyncCheckpoint, psSyncData->ui32Status, PVRSRV_FENCE_FLAG_NONE); -+ ui16ReadIdx = GET_CP_CB_NEXT_IDX(psDevNode->ui16SyncCPReadIdx); -+ /* Increment read offset in CB as one item is flushed to pdump */ -+ OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); -+ psDevNode->ui16SyncCPReadIdx = ui16ReadIdx; -+ OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); -+ /* Call to this function will flush all the items present in CB -+ * when this function is called i.e. use snapshot of WriteOffset -+ * taken at the beginning in this function and iterate till Write != Read */ -+ } while (ui16WriteIdx != ui16ReadIdx); -+ -+ OSLockRelease(psDevNode->hSyncCheckpointSignalLock); -+} -+ -+#if defined(PDUMP) -+PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence) -+{ -+ PVRSRV_ERROR eError; -+ PSYNC_CHECKPOINT *apsCheckpoints = NULL; -+ SYNC_CHECKPOINT *psSyncCheckpoint = NULL; -+ IMG_UINT32 i, uiNumCheckpoints = 0; -+ _SYNC_CHECKPOINT_CONTEXT *psContext; -+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP) -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+#endif -+ -+ if (hFence != PVRSRV_NO_FENCE) -+ { -+ eError = g_psSyncCheckpointPfnStruct->pfnSyncFenceGetCheckpoints(hFence, &uiNumCheckpoints, &apsCheckpoints); -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ PVR_LOG_RETURN_IF_ERROR(eError, "g_pfnFenceGetCheckpoints"); -+ -+ if (uiNumCheckpoints) -+ { -+ /* Flushing deferred fence signals to pdump */ -+ psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[0]; -+ psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpoint->psSyncCheckpointBlock->psContext; -+ MISRHandler_PdumpDeferredSyncSignalPoster(psContext->psContextCtl->psDeviceNode); -+ } -+ -+ for (i=0; i < uiNumCheckpoints; i++) -+ { -+ psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[i]; -+ if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) -+ { -+ psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpoint->psSyncCheckpointBlock->psContext; -+ PDUMPCOMMENTWITHFLAGS(psContext->psContextCtl->psDeviceNode, -+ psSyncCheckpoint->ui32PDumpFlags, -+ "Wait for Fence %s (ID:%d)", -+ psSyncCheckpoint->azName, -+ psSyncCheckpoint->ui32UID); -+ -+ eError = DevmemPDumpDevmemPol32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc, -+ _SyncCheckpointGetOffset(psSyncCheckpoint), -+ PVRSRV_SYNC_CHECKPOINT_SIGNALLED, -+ 0xFFFFFFFF, -+ PDUMP_POLL_OPERATOR_EQUAL, -+ psSyncCheckpoint->ui32PDumpFlags); -+ PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); -+ } -+ } -+ -+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP) -+ /* Sampling of USC timers can only be done after synchronisation for a 3D kick is over */ -+ if (uiNumCheckpoints) -+ { -+ psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[0]; -+ psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpoint->psSyncCheckpointBlock->psContext; -+ psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice; -+ if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) -+ { -+ RGXValidateSOCUSCTimer(psDevInfo, PDUMP_CONT, 0, 0, NULL); -+ } -+ } -+#endif -+ -+ /* Free the memory that was allocated for the sync checkpoint list returned */ -+ if (apsCheckpoints) -+ { -+ SyncCheckpointFreeCheckpointListMem(apsCheckpoints); -+ } -+ -+ return PVRSRV_OK; -+} -+#endif /* #if defined(PDUMP) */ -+ -+static PVRSRV_ERROR -+_SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent) -+{ -+ _SYNC_CHECKPOINT_CONTEXT *psContext; -+ DLLIST_NODE *psNode, *psNext; -+ DLLIST_NODE *psNode1, *psNext1; -+ PPVRSRV_DEVICE_NODE psDevNode = (PPVRSRV_DEVICE_NODE) pvData; -+ -+ if ((eEvent == PDUMP_TRANSITION_EVENT_RANGE_ENTERED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED)) -+ { -+ OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); -+ dllist_foreach_node(&psDevNode->sSyncCheckpointContextListHead, psNode, psNext) -+ { -+ psContext = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT_CONTEXT, sListNode); -+ -+ OSLockAcquire(psContext->hSyncCheckpointBlockListLock); -+ dllist_foreach_node(&psContext->sSyncCheckpointBlockListHead, psNode1, psNext1) -+ { -+ SYNC_CHECKPOINT_BLOCK *psSyncBlk = -+ IMG_CONTAINER_OF(psNode1, SYNC_CHECKPOINT_BLOCK, sListNode); -+ DevmemPDumpLoadMem(psSyncBlk->hMemDesc, -+ 0, -+ psSyncBlk->ui32SyncBlockSize, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ OSLockRelease(psContext->hSyncCheckpointBlockListLock); -+ } -+ OSLockRelease(psDevNode->hSyncCheckpointContextListLock); -+ } -+ -+ return PVRSRV_OK; -+} -+#endif -+ -+static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext) -+{ -+ _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; -+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)psCtxCtl->psDeviceNode; -+ DECLARE_DLLIST(sCleanupList); -+ DLLIST_NODE *psNode, *psNext; -+ OS_SPINLOCK_FLAGS uiFlags; -+ PVRSRV_ERROR eError; -+ -+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, "%s called", __func__)); -+#endif -+ -+ /* Check the deferred cleanup list and free any sync checkpoints we can */ -+ OSSpinLockAcquire(psCtxCtl->hDeferredCleanupListLock, uiFlags); -+ -+ if (dllist_is_empty(&psCtxCtl->sDeferredCleanupListHead)) -+ { -+ OSSpinLockRelease(psCtxCtl->hDeferredCleanupListLock, uiFlags); -+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, "%s: Defer free list is empty", __func__)); -+#endif -+ /* if list is empty then we have nothing to do here */ -+ return; -+ } -+ -+ dllist_foreach_node(&psCtxCtl->sDeferredCleanupListHead, psNode, psNext) -+ { -+ SYNC_CHECKPOINT *psSyncCheckpointInt = -+ IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sDeferredFreeListNode); -+ -+ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount == -+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount))) -+ { -+ if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) -+ && psSyncCheckpointInt->hRecord) -+ { -+ /* remove this sync record */ -+ eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord); -+ PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove"); -+ } -+ -+ /* Move the sync checkpoint from the deferred free list to local list */ -+ dllist_remove_node(&psSyncCheckpointInt->sDeferredFreeListNode); -+ /* It's not an ideal solution to traverse list of checkpoints-to-free -+ * twice but it allows us to avoid holding the lock for too long */ -+ dllist_add_to_tail(&sCleanupList, &psSyncCheckpointInt->sDeferredFreeListNode); -+ } -+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) -+ else -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint '%s'' (ID:%d)<%p>), " -+ "still pending (enq=%d,FWRef=%d)", __func__, -+ psSyncCheckpointInt->azName, psSyncCheckpointInt->ui32UID, -+ (void*)psSyncCheckpointInt, -+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)), -+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); -+ } -+#endif -+ } -+ -+ OSSpinLockRelease(psCtxCtl->hDeferredCleanupListLock, uiFlags); -+ -+ dllist_foreach_node(&sCleanupList, psNode, psNext) { -+ SYNC_CHECKPOINT *psSyncCheckpointInt = -+ IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sDeferredFreeListNode); -+ -+ /* Remove the sync checkpoint from the global list */ -+ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); -+ dllist_remove_node(&psSyncCheckpointInt->sListNode); -+ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); -+ -+ RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr); -+ -+ /* Unref the checkpoint in use */ -+ OSSpinLockAcquire(psContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); -+ psContext->psContextCtl->ui32CurrentInUseSyncCheckpoints--; -+ OSSpinLockRelease(psContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags); -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s attempting to return sync(ID:%d),%p> to pool", -+ __func__, -+ psSyncCheckpointInt->ui32UID, -+ (void *) psSyncCheckpointInt)); -+#endif -+ if (!_PutCheckpointInPool(psSyncCheckpointInt)) -+#endif -+ { -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, "%s pool is full, so just free it", -+ __func__)); -+#endif -+#endif -+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+ } -+ else -+ { -+#endif -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s psSyncCheckpoint '%s'' (ID:%d)<%p>), still pending (enq=%d,FWRef=%d)", -+ __func__, -+ psSyncCheckpointInt->azName, -+ psSyncCheckpointInt->ui32UID, -+ (void*)psSyncCheckpointInt, -+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)), -+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); -+#endif -+ _FreeSyncCheckpoint(psSyncCheckpointInt); -+ } -+ } -+} -+ -+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0) -+static SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext) -+{ -+ _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; -+ SYNC_CHECKPOINT *psSyncCheckpoint = NULL; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ if (psCtxCtl->ui32SyncCheckpointPoolSize == 0) -+ { -+ goto pool_not_used; -+ } -+ -+ /* Acquire sync checkpoint pool lock */ -+ OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); -+ -+ /* Check if we can allocate from the pool */ -+ if (CHECKPOINT_POOL_VALID(psCtxCtl) && -+ (psCtxCtl->ui32SyncCheckpointPoolCount > SYNC_CHECKPOINT_POOL_SEDIMENT)) -+ { -+ /* Get the next sync checkpoint from the pool */ -+ psSyncCheckpoint = psCtxCtl->apsSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp]; -+ psCtxCtl->ui32SyncCheckpointPoolRp = -+ (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & (psCtxCtl->ui32SyncCheckpointPoolSize-1); -+ psCtxCtl->ui32SyncCheckpointPoolCount--; -+ CLEAR_CHECKPOINT_POOL_FULL(psCtxCtl); -+#if defined(DEBUG) -+ psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE; -+#endif -+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s checkpoint(old ID:%d)<-POOL(%d/%d), psContext=<%p>, " -+ "poolRp=%d, poolWp=%d", -+ __func__, -+ psSyncCheckpoint->ui32UID, -+ psCtxCtl->ui32SyncCheckpointPoolCount, -+ psCtxCtl->ui32SyncCheckpointPoolSize, -+ (void *) psContext, -+ psCtxCtl->ui32SyncCheckpointPoolRp, -+ psCtxCtl->ui32SyncCheckpointPoolWp)); -+#endif -+ } -+ /* Release sync checkpoint pool lock */ -+ OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); -+ -+pool_not_used: -+ return psSyncCheckpoint; -+} -+ -+static IMG_BOOL _PutCheckpointInPool(SYNC_CHECKPOINT *psSyncCheckpoint) -+{ -+ _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext; -+ _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; -+ IMG_BOOL bReturnedToPool = IMG_FALSE; -+ OS_SPINLOCK_FLAGS uiFlags; -+ -+ if (psCtxCtl->ui32SyncCheckpointPoolSize == 0) -+ { -+ return IMG_FALSE; -+ } -+ -+ /* Acquire sync checkpoint pool lock */ -+ OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); -+ -+ /* Check if pool has space */ -+ if (CHECKPOINT_POOL_VALID(psCtxCtl) && !(CHECKPOINT_POOL_FULL(psCtxCtl))) -+ { -+ /* Put the sync checkpoint into the next write slot in the pool */ -+ psCtxCtl->apsSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolWp] = psSyncCheckpoint; -+ psCtxCtl->ui32SyncCheckpointPoolWp = -+ (psCtxCtl->ui32SyncCheckpointPoolWp + 1) & (psCtxCtl->ui32SyncCheckpointPoolSize-1); -+ psCtxCtl->ui32SyncCheckpointPoolCount++; -+ /* Update if the checkpoint that was just added filled up the pool */ -+ if (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp) -+ { -+ SET_CHECKPOINT_POOL_FULL(psCtxCtl); -+ } -+ bReturnedToPool = IMG_TRUE; -+ psSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_UNDEF; -+#if defined(DEBUG) -+ psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_POOL; -+#endif -+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s checkpoint(ID:%d)->POOL(%d/%d), poolRp=%d, poolWp=%d", -+ __func__, -+ psSyncCheckpoint->ui32UID, -+ psCtxCtl->ui32SyncCheckpointPoolCount, -+ psCtxCtl->ui32SyncCheckpointPoolSize, -+ psCtxCtl->ui32SyncCheckpointPoolRp, -+ psCtxCtl->ui32SyncCheckpointPoolWp)); -+#endif -+ } -+ /* Release sync checkpoint pool lock */ -+ OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); -+ -+ return bReturnedToPool; -+} -+ -+static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext) -+{ -+ _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; -+ SYNC_CHECKPOINT *psCheckpoint = NULL; -+ DECLARE_DLLIST(sCleanupList); -+ DLLIST_NODE *psThis, *psNext; -+ OS_SPINLOCK_FLAGS uiFlags; -+ IMG_UINT32 ui32ItemsFreed = 0, ui32NullScpCount = 0, ui32PoolCount; -+ -+ /* Acquire sync checkpoint pool lock */ -+ OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); -+ -+ ui32PoolCount = psCtxCtl->ui32SyncCheckpointPoolCount; -+ -+ /* While the pool still contains sync checkpoints, free them */ -+ while (CHECKPOINT_POOL_VALID(psCtxCtl) && psCtxCtl->ui32SyncCheckpointPoolCount > 0) -+ { -+ /* Get the sync checkpoint from the next read slot in the pool */ -+ psCheckpoint = psCtxCtl->apsSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp]; -+ psCtxCtl->ui32SyncCheckpointPoolRp = -+ (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & (psCtxCtl->ui32SyncCheckpointPoolSize-1); -+ psCtxCtl->ui32SyncCheckpointPoolCount--; -+ -+ CLEAR_CHECKPOINT_POOL_FULL(psCtxCtl); -+ -+ if (psCheckpoint) -+ { -+ PVR_ASSERT(!dllist_node_is_in_list(&psCheckpoint->sListNode)); -+ /* before checkpoints are added to the pool they are removed -+ * from the list so it's safe to use sListNode here */ -+ dllist_add_to_head(&sCleanupList, &psCheckpoint->sListNode); -+ } -+ else -+ { -+ ui32NullScpCount++; -+ } -+ } -+ -+ /* Release sync checkpoint pool lock */ -+ OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); -+ -+ /* go through the local list and free all of the sync checkpoints */ -+ -+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) -+ PVR_DPF((PVR_DBG_WARNING, "%s psContext=<%p>, ui8PoolStateFlags=0x%x, " -+ "uiSyncCheckpointPoolCount=%d", __func__, (void *) psContext, -+ psCtxCtl->ui8PoolStateFlags, psCtxCtl->ui32SyncCheckpointPoolCount)); -+ -+ if (ui32NullScpCount > 0) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s pool contained %u NULL entries", __func__, -+ ui32NullScpCount)); -+ } -+#endif -+ -+ dllist_foreach_node(&sCleanupList, psThis, psNext) -+ { -+ psCheckpoint = IMG_CONTAINER_OF(psThis, SYNC_CHECKPOINT, sListNode); -+ -+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) -+#if defined(DEBUG) -+ if (psCheckpoint->ui32ValidationCheck != SYNC_CHECKPOINT_PATTERN_IN_POOL) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "%s pool contains invalid entry " -+ "(ui32ValidationCheck=0x%x)", __func__, -+ psCheckpoint->ui32ValidationCheck)); -+ } -+#endif -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s psSyncCheckpoint(ID:%d)", -+ __func__, psCheckpoint->ui32UID)); -+#if defined(DEBUG) -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s psSyncCheckpoint->ui32ValidationCheck=0x%x", -+ __func__, psCheckpoint->ui32ValidationCheck)); -+#endif -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s psSyncCheckpoint->uiAllocatedAddr=0x%llx", -+ __func__, psCheckpoint->uiAllocatedAddr)); -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s psSyncCheckpoint->psSyncCheckpointBlock=<%p>", -+ __func__, (void *) psCheckpoint->psSyncCheckpointBlock)); -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext=<%p>", -+ __func__, (void *) psCheckpoint->psSyncCheckpointBlock->psContext)); -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA=<%p>", -+ __func__, (void *) psCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA)); -+ -+ PVR_DPF((PVR_DBG_WARNING, -+ "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), " -+ "psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", -+ __func__, -+ psCheckpoint->ui32UID, -+ (void *) psCheckpoint, -+ (void *) psCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA, -+ psCheckpoint->uiAllocatedAddr)); -+#endif -+ -+ dllist_remove_node(psThis); -+ -+ _FreeSyncCheckpoint(psCheckpoint); -+ ui32ItemsFreed++; -+ } -+ -+ return ui32ItemsFreed; -+} -+#endif /* (SYNC_CHECKPOINT_POOL_LIMIT > 0) */ -diff --git a/drivers/gpu/drm/img-rogue/sync_checkpoint.h b/drivers/gpu/drm/img-rogue/sync_checkpoint.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sync_checkpoint.h -@@ -0,0 +1,666 @@ -+/*************************************************************************/ /*! -+@File -+@Title Synchronisation checkpoint interface header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines the client side interface for synchronisation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SYNC_CHECKPOINT_H -+#define SYNC_CHECKPOINT_H -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_sync_km.h" -+#include "pdumpdefs.h" -+#include "pdump.h" -+#include "dllist.h" -+#include "pvr_debug.h" -+#include "device_connection.h" -+#include "opaque_types.h" -+ -+#ifndef CHECKPOINT_TYPES -+#define CHECKPOINT_TYPES -+typedef struct SYNC_CHECKPOINT_CONTEXT_TAG *PSYNC_CHECKPOINT_CONTEXT; -+ -+typedef struct SYNC_CHECKPOINT_TAG *PSYNC_CHECKPOINT; -+#endif -+ -+/* definitions for functions to be implemented by OS-specific sync - the OS-specific sync code -+ will call SyncCheckpointRegisterFunctions() when initialised, in order to register functions -+ we can then call */ -+#ifndef CHECKPOINT_PFNS -+#define CHECKPOINT_PFNS -+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, -+ PVRSRV_FENCE fence, -+ IMG_UINT32 *nr_checkpoints, -+ PSYNC_CHECKPOINT **checkpoint_handles, -+ IMG_UINT64 *pui64FenceUID); -+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(PPVRSRV_DEVICE_NODE device, -+ const IMG_CHAR *fence_name, -+ PVRSRV_TIMELINE timeline, -+ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, -+ PVRSRV_FENCE *new_fence, -+ IMG_UINT64 *pui64FenceUID, -+ void **ppvFenceFinaliseData, -+ PSYNC_CHECKPOINT *new_checkpoint_handle, -+ IMG_HANDLE *timeline_update_sync, -+ IMG_UINT32 *timeline_update_value); -+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data); -+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data); -+typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data); -+typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr); -+typedef IMG_UINT32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(IMG_UINT32 num_ufos, IMG_UINT32 *vaddrs); -+#if defined(PDUMP) -+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN)(PVRSRV_FENCE iFence, -+ IMG_UINT32 *puiNumCheckpoints, -+ PSYNC_CHECKPOINT **papsCheckpoints); -+#endif -+ -+#define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20 -+ -+typedef struct -+{ -+ PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve; -+ PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate; -+ PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback; -+ PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise; -+ PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines; -+ PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem; -+ PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs; -+ IMG_CHAR pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN]; -+#if defined(PDUMP) -+ PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN pfnSyncFenceGetCheckpoints; -+#endif -+} PFN_SYNC_CHECKPOINT_STRUCT; -+ -+PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns); -+ -+#endif /* ifndef CHECKPOINT_PFNS */ -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointContextCreate -+ -+@Description Create a new synchronisation checkpoint context -+ -+@Input psDevNode Device node -+ -+@Output ppsSyncCheckpointContext Handle to the created synchronisation -+ checkpoint context -+ -+@Return PVRSRV_OK if the synchronisation checkpoint context was -+ successfully created -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, -+ PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointContextDestroy -+ -+@Description Destroy a synchronisation checkpoint context -+ -+@Input psSyncCheckpointContext Handle to the synchronisation -+ checkpoint context to destroy -+ -+@Return PVRSRV_OK if the synchronisation checkpoint context was -+ successfully destroyed. -+ PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT if the context still -+ has sync checkpoints defined -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointContextRef -+ -+@Description Takes a reference on a synchronisation checkpoint context -+ -+@Input psContext Handle to the synchronisation checkpoint context -+ on which a ref is to be taken -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointContextUnref -+ -+@Description Drops a reference taken on a synchronisation checkpoint -+ context -+ -+@Input psContext Handle to the synchronisation checkpoint context -+ on which the ref is to be dropped -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointAlloc -+ -+@Description Allocate a new synchronisation checkpoint on the specified -+ synchronisation checkpoint context -+ -+@Input hSyncCheckpointContext Handle to the synchronisation -+ checkpoint context -+ -+@Input hTimeline Timeline on which this sync -+ checkpoint is being created -+ -+@Input hFence Fence as passed into pfnFenceResolve -+ API, when the API encounters a non-PVR -+ fence as part of its input fence. From -+ all other places this argument must be -+ PVRSRV_NO_FENCE. -+ -+@Input pszClassName Sync checkpoint source annotation -+ (will be truncated to at most -+ PVRSRV_SYNC_NAME_LENGTH chars) -+ -+@Output ppsSyncCheckpoint Created synchronisation checkpoint -+ -+@Return PVRSRV_OK if the synchronisation checkpoint was -+ successfully created -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, -+ PVRSRV_TIMELINE hTimeline, -+ PVRSRV_FENCE hFence, -+ const IMG_CHAR *pszCheckpointName, -+ PSYNC_CHECKPOINT *ppsSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointFree -+ -+@Description Free a synchronisation checkpoint -+ The reference count held for the synchronisation checkpoint -+ is decremented - if it has becomes zero, it is also freed. -+ -+@Input psSyncCheckpoint The synchronisation checkpoint to free -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointSignal -+ -+@Description Signal the synchronisation checkpoint -+ -+@Input psSyncCheckpoint The synchronisation checkpoint to signal -+ -+@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointSignalNoHW -+ -+@Description Signal the synchronisation checkpoint in NO_HARWARE build -+ -+@Input psSyncCheckpoint The synchronisation checkpoint to signal -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointError -+ -+@Description Error the synchronisation checkpoint -+ -+@Input psSyncCheckpoint The synchronisation checkpoint to error -+ -+@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointStateFromUFO -+ -+@Description Returns the current state of the synchronisation checkpoint -+ which has the given UFO firmware address -+ -+@Input psDevNode The device owning the sync -+ checkpoint -+ -+@Input ui32FwAddr The firmware address of the sync -+ checkpoint -+ -+@Return The current state (32-bit value) of the sync checkpoint -+*/ -+/*****************************************************************************/ -+IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode, -+ IMG_UINT32 ui32FwAddr); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointErrorFromUFO -+ -+@Description Error the synchronisation checkpoint which has the -+ given UFO firmware address -+ -+@Input psDevNode The device owning the sync -+ checkpoint to be errored -+ -+@Input ui32FwAddr The firmware address of the sync -+ checkpoint to be errored -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointRollbackFromUFO -+ -+@Description Drop the enqueued count reference taken on the synchronisation -+ checkpoint on behalf of the firmware. -+ Called in the event of a DM Kick failing. -+ -+@Input psDevNode The device owning the sync -+ checkpoint to be rolled back -+ -+@Input ui32FwAddr The firmware address of the sync -+ checkpoint to be rolled back -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointIsSignalled -+ -+@Description Returns IMG_TRUE if the synchronisation checkpoint is -+ signalled or errored -+ -+@Input psSyncCheckpoint The synchronisation checkpoint to test -+ -+@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior -+ -+@Return None -+*/ -+/*****************************************************************************/ -+IMG_BOOL -+SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, -+ IMG_UINT32 ui32FenceSyncFlags); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointIsErrored -+ -+@Description Returns IMG_TRUE if the synchronisation checkpoint is -+ errored -+ -+@Input psSyncCheckpoint The synchronisation checkpoint to test -+ -+@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior -+ -+@Return None -+*/ -+/*****************************************************************************/ -+IMG_BOOL -+SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, -+ IMG_UINT32 ui32FenceSyncFlags); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointTakeRef -+ -+@Description Take a reference on a synchronisation checkpoint -+ -+@Input psSyncCheckpoint Synchronisation checkpoint to take a -+ reference on -+ -+@Return PVRSRV_OK if a reference was taken on the synchronisation -+ primitive -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointDropRef -+ -+@Description Drop a reference on a synchronisation checkpoint -+ -+@Input psSyncCheckpoint Synchronisation checkpoint to drop a -+ reference on -+ -+@Return PVRSRV_OK if a reference was dropped on the synchronisation -+ primitive -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointResolveFence -+ -+@Description Resolve a fence, returning a list of the sync checkpoints -+ that fence contains. -+ This function in turn calls a function provided by the -+ OS native sync implementation. -+ -+@Input psSyncCheckpointContext The sync checkpoint context -+ on which checkpoints should be -+ created (in the event of the fence -+ having a native sync pt with no -+ associated sync checkpoint) -+ -+@Input hFence The fence to be resolved -+ -+@Output pui32NumSyncCheckpoints The number of sync checkpoints the -+ fence contains. Can return 0 if -+ passed a null (-1) fence. -+ -+@Output papsSyncCheckpoints List of sync checkpoints the fence -+ contains -+ -+@Output puiFenceUID Unique ID of the resolved fence -+ -+@Return PVRSRV_OK if a valid fence was provided. -+ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native -+ sync has not registered a callback function. -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, -+ PVRSRV_FENCE hFence, -+ IMG_UINT32 *pui32NumSyncCheckpoints, -+ PSYNC_CHECKPOINT **papsSyncCheckpoints, -+ IMG_UINT64 *puiFenceUID, -+ PDUMP_FLAGS_T ui32PDumpFlags); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointCreateFence -+ -+@Description Create a fence containing a single sync checkpoint. -+ Return the fence and a ptr to sync checkpoint it contains. -+ This function in turn calls a function provided by the -+ OS native sync implementation. -+ -+@Input pszFenceName String to assign to the new fence -+ (for debugging purposes) -+ -+@Input hTimeline Timeline on which the new fence is -+ to be created -+ -+@Input psSyncCheckpointContext Sync checkpoint context to be used -+ when creating the new fence -+ -+@Output phNewFence The newly created fence -+ -+@Output pui64FenceUID Unique ID of the created fence -+ -+@Output ppvFenceFinaliseData Any data needed to finalise the fence -+ in a later call to the function -+ SyncCheckpointFinaliseFence() -+ -+@Output psNewSyncCheckpoint The sync checkpoint contained in -+ the new fence -+ -+@Return PVRSRV_OK if a valid fence was provided. -+ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native -+ sync has not registered a callback function. -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncCheckpointCreateFence(PPVRSRV_DEVICE_NODE psDeviceNode, -+ const IMG_CHAR *pszFenceName, -+ PVRSRV_TIMELINE hTimeline, -+ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, -+ PVRSRV_FENCE *phNewFence, -+ IMG_UINT64 *pui64FenceUID, -+ void **ppvFenceFinaliseData, -+ PSYNC_CHECKPOINT *psNewSyncCheckpoint, -+ void **ppvTimelineUpdateSyncPrim, -+ IMG_UINT32 *pui32TimelineUpdateValue, -+ PDUMP_FLAGS_T ui32PDumpFlags); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointRollbackFenceData -+ -+@Description 'Rolls back' the fence specified (destroys the fence and -+ takes any other required actions to undo the fence -+ creation (eg if the implementation wishes to revert the -+ incrementing of the fence's timeline, etc). -+ This function in turn calls a function provided by the -+ OS native sync implementation. -+ -+@Input hFence Fence to be 'rolled back' -+ -+@Input pvFinaliseData Data needed to finalise the -+ fence -+ -+@Return PVRSRV_OK if a valid fence was provided. -+ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native -+ sync has not registered a callback function. -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointFinaliseFence -+ -+@Description 'Finalise' the fence specified (performs any actions the -+ underlying implementation may need to perform just prior -+ to the fence being returned to the client. -+ This function in turn calls a function provided by the -+ OS native sync implementation - if the native sync -+ implementation does not need to perform any actions at -+ this time, this function does not need to be registered. -+ -+@Input psDevNode Device node -+ -+@Input hFence Fence to be 'finalised' -+ -+@Input pvFinaliseData Data needed to finalise the fence -+ -+@Input psSyncCheckpoint Base sync checkpoint that this fence -+ is formed of -+ -+@Input pszName Fence annotation -+ -+@Return PVRSRV_OK if a valid fence and finalise data were provided. -+ PVRSRV_ERROR_INVALID_PARAMS if an invalid fence or finalise -+ data were provided. -+ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native -+ sync has not registered a callback function (permitted). -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncCheckpointFinaliseFence(PPVRSRV_DEVICE_NODE psDevNode, -+ PVRSRV_FENCE hFence, -+ void *pvFinaliseData, -+ PSYNC_CHECKPOINT psSyncCheckpoint, -+ const IMG_CHAR *pszName); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointFreeCheckpointListMem -+ -+@Description Free memory the memory which was allocated by the sync -+ implementation and used to return the list of sync -+ checkpoints when resolving a fence. -+ to the fence being returned to the client. -+ This function in turn calls a free function registered by -+ the sync implementation (if a function has been registered). -+ -+@Input pvCheckpointListMem Pointer to the memory to be freed -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointNoHWUpdateTimelines -+ -+@Description Called by the DDK in a NO_HARDWARE build only. -+ After syncs have been manually signalled by the DDK, this -+ function is called to allow the OS native sync implementation -+ to update its timelines (as the usual callback notification -+ of signalled checkpoints is not supported for NO_HARDWARE). -+ This function in turn calls a function provided by the -+ OS native sync implementation. -+ -+@Input pvPrivateData Any data the OS native sync -+ implementation might require. -+ -+@Return PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native -+ sync has not registered a callback function, otherwise -+ PVRSRV_OK. -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointDumpInfoOnStalledUFOs -+ -+@Description Called by the DDK in the event of the health check watchdog -+ examining the CCBs and determining that one has failed to -+ progress after 10 second when the GPU is idle due to waiting -+ on one or more UFO fences. -+ The DDK will pass a list of UFOs on which the CCB is waiting -+ and the sync implementation will check them to see if any -+ relate to sync points it has created. If so, the -+ implementation should dump debug information on those sync -+ points to the kernel log or other suitable output (which will -+ allow the unsignalled syncs to be identified). -+ The function shall return the number of syncs in the provided -+ array that were syncs which it had created. -+ -+@Input ui32NumUFOs The number of UFOs in the array passed -+ in the pui32VAddrs parameter. -+ pui32Vaddr The array of UFOs the CCB is waiting on. -+ -+@Output pui32NumSyncOwnedUFOs The number of UFOs in pui32Vaddr which -+ relate to syncs created by the sync -+ implementation. -+ -+@Return PVRSRV_OK if a valid pointer is provided in pui32NumSyncOwnedUFOs. -+ PVRSRV_ERROR_INVALID_PARAMS if a NULL value is provided in -+ pui32NumSyncOwnedUFOs. -+ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native -+ sync has not registered a callback function. -+ -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs, -+ IMG_UINT32 *pui32Vaddrs, -+ IMG_UINT32 *pui32NumSyncOwnedUFOs); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointGetStateString -+ -+@Description Called to get a string representing the current state of a -+ sync checkpoint. -+ -+@Input psSyncCheckpoint Synchronisation checkpoint to get the -+ state for. -+ -+@Return The string representing the current state of this checkpoint -+*/ -+/*****************************************************************************/ -+const IMG_CHAR * -+SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointRecordLookup -+ -+@Description Returns a debug string with information about the -+ sync checkpoint. -+ -+@Input psDevNode The device owning the sync -+ checkpoint to lookup -+ -+@Input ui32FwAddr The firmware address of the sync -+ checkpoint to lookup -+ -+@Input pszSyncInfo Character array to write to -+ -+@Input len Len of the character array -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void -+SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, -+ IMG_UINT32 ui32FwAddr, -+ IMG_CHAR * pszSyncInfo, size_t len); -+ -+#if defined(PDUMP) -+/*************************************************************************/ /*! -+@Function PVRSRVSyncCheckpointFencePDumpPolKM -+ -+@Description Called to insert a poll into the PDump script on a given -+ Fence being signalled or errored. -+ -+@Input hFence Fence for PDump to poll on -+ -+@Return PVRSRV_OK if a valid sync checkpoint was provided. -+*/ -+/*****************************************************************************/ -+ -+PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence); -+ -+#endif -+ -+#endif /* SYNC_CHECKPOINT_H */ -diff --git a/drivers/gpu/drm/img-rogue/sync_checkpoint_external.h b/drivers/gpu/drm/img-rogue/sync_checkpoint_external.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sync_checkpoint_external.h -@@ -0,0 +1,83 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services external synchronisation checkpoint interface header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines synchronisation checkpoint structures that are visible -+ internally and externally -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SYNC_CHECKPOINT_EXTERNAL_H -+#define SYNC_CHECKPOINT_EXTERNAL_H -+ -+#include "img_types.h" -+ -+#ifndef CHECKPOINT_TYPES -+#define CHECKPOINT_TYPES -+typedef struct SYNC_CHECKPOINT_CONTEXT_TAG *PSYNC_CHECKPOINT_CONTEXT; -+ -+typedef struct SYNC_CHECKPOINT_TAG *PSYNC_CHECKPOINT; -+#endif -+ -+/* PVRSRV_SYNC_CHECKPOINT states. -+ * The OS native sync implementation should call pfnIsSignalled() to determine if a -+ * PVRSRV_SYNC_CHECKPOINT has signalled (which will return an IMG_BOOL), but can set the -+ * state for a PVRSRV_SYNC_CHECKPOINT (which is currently in the NOT_SIGNALLED state) -+ * where that PVRSRV_SYNC_CHECKPOINT is representing a foreign sync. -+ */ -+typedef IMG_UINT32 PVRSRV_SYNC_CHECKPOINT_STATE; -+ -+#define PVRSRV_SYNC_CHECKPOINT_UNDEF 0x000U -+#define PVRSRV_SYNC_CHECKPOINT_ACTIVE 0xac1U /*!< checkpoint has not signalled */ -+#define PVRSRV_SYNC_CHECKPOINT_SIGNALLED 0x519U /*!< checkpoint has signalled */ -+#define PVRSRV_SYNC_CHECKPOINT_ERRORED 0xeffU /*!< checkpoint has been errored */ -+ -+ -+#define PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR(fwaddr) (((fwaddr) & 0x1U) != 0U) -+#define PVRSRV_UFO_IS_SYNC_CHECKPOINT(ufoptr) (PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR((ufoptr)->puiAddrUFO.ui32Addr)) -+ -+/* Maximum number of sync checkpoints the firmware supports in one fence */ -+#define MAX_SYNC_CHECKPOINTS_PER_FENCE 32U -+ -+/*! -+ * Define to be used with SyncCheckpointAlloc() to indicate a checkpoint which -+ * represents a foreign sync point or collection of foreign sync points. -+ */ -+#define SYNC_CHECKPOINT_FOREIGN_CHECKPOINT ((PVRSRV_TIMELINE) - 2U) -+ -+#endif /* SYNC_CHECKPOINT_EXTERNAL_H */ -diff --git a/drivers/gpu/drm/img-rogue/sync_checkpoint_init.h b/drivers/gpu/drm/img-rogue/sync_checkpoint_init.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sync_checkpoint_init.h -@@ -0,0 +1,82 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services synchronisation checkpoint initialisation interface -+ header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines synchronisation checkpoint structures that are visible -+ internally and externally -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SYNC_CHECKPOINT_INIT_H -+#define SYNC_CHECKPOINT_INIT_H -+ -+#include "device.h" -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointInit -+ -+@Description Initialise the sync checkpoint driver by giving it the -+ device node (needed to determine the pfnUFOAlloc function -+ to call in order to allocate sync block memory). -+ -+@Input psDevNode Device for which sync checkpoints -+ are being initialised -+ -+@Return PVRSRV_OK initialised successfully, -+ PVRSRV_ERROR_ otherwise -+*/ -+/*****************************************************************************/ -+PVRSRV_ERROR -+SyncCheckpointInit(PVRSRV_DEVICE_NODE *psDevNode); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointDeinit -+ -+@Description Deinitialise the sync checkpoint driver. -+ Frees resources allocated during initialisation. -+ -+@Input psDevNode Device for which sync checkpoints -+ are being de-initialised -+ -+@Return None -+*/ -+/*****************************************************************************/ -+void SyncCheckpointDeinit(PVRSRV_DEVICE_NODE *psDevNode); -+ -+#endif /* SYNC_CHECKPOINT_INIT_H */ -diff --git a/drivers/gpu/drm/img-rogue/sync_checkpoint_internal.h b/drivers/gpu/drm/img-rogue/sync_checkpoint_internal.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sync_checkpoint_internal.h -@@ -0,0 +1,274 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services internal synchronisation checkpoint interface header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines the internal server interface for services -+ synchronisation checkpoints. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SYNC_CHECKPOINT_INTERNAL_H -+#define SYNC_CHECKPOINT_INTERNAL_H -+ -+#include "img_types.h" -+#include "opaque_types.h" -+#include "sync_checkpoint_external.h" -+#include "sync_checkpoint.h" -+#include "ra.h" -+#include "dllist.h" -+#include "lock.h" -+#include "devicemem.h" -+#include "rgx_fwif_shared.h" -+#include "rgx_fwif_km.h" -+ -+struct SYNC_CHECKPOINT_RECORD; -+ -+/* -+ Private structures -+*/ -+ -+typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_ _SYNC_CHECKPOINT_CONTEXT_CTL, *_PSYNC_CHECKPOINT_CONTEXT_CTL; -+ -+typedef struct SYNC_CHECKPOINT_CONTEXT_TAG _SYNC_CHECKPOINT_CONTEXT; -+ -+typedef struct _SYNC_CHECKPOINT_BLOCK_ -+{ -+ ATOMIC_T hRefCount; /*!< Ref count for this sync block */ -+ POS_LOCK hLock; -+ _SYNC_CHECKPOINT_CONTEXT *psContext; /*!< Our copy of the services connection */ -+ IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync checkpoint block */ -+ IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */ -+ DEVMEM_MEMDESC *hMemDesc; /*!< DevMem allocation for block */ -+ volatile IMG_UINT32 *pui32LinAddr; /*!< Server-code CPU mapping */ -+ RA_BASE_T uiSpanBase; /*!< Base of this import (FW DevMem) in the span RA */ -+#if defined(PDUMP) -+ DLLIST_NODE sListNode; /*!< List node for the sync chkpt blocks */ -+#endif -+} SYNC_CHECKPOINT_BLOCK; -+ -+typedef struct SYNC_CHECKPOINT_RECORD* PSYNC_CHECKPOINT_RECORD_HANDLE; -+ -+typedef struct SYNC_CHECKPOINT_TAG -+{ -+ /* A sync checkpoint is assigned a unique ID, to avoid any confusion should -+ * the same memory be re-used later for a different checkpoint -+ */ -+ IMG_UINT32 ui32UID; /*!< Unique ID assigned to sync checkpoint (to distinguish checkpoints if memory is re-used)*/ -+ ATOMIC_T hRefCount; /*!< Ref count for this sync */ -+ ATOMIC_T hEnqueuedCCBCount; /*!< Num times sync has been put in CCBs */ -+ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< Synchronisation block this checkpoint is allocated on */ -+ RA_BASE_T uiAllocatedAddr; /*!< Allocated address of the sync */ -+ volatile SYNC_CHECKPOINT_FW_OBJ *psSyncCheckpointFwObj; /*!< CPU view of the data held in the sync block */ -+ PRGXFWIF_UFO_ADDR sCheckpointUFOAddr; /*!< PRGXFWIF_UFO_ADDR struct used to pass update address to FW */ -+ IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the checkpoint */ -+ PVRSRV_TIMELINE hTimeline; /*!< Timeline on which this sync checkpoint was created */ -+ IMG_PID uiProcess; /*!< The Process ID of the process which created this sync checkpoint */ -+ PSYNC_CHECKPOINT_RECORD_HANDLE hRecord; /*!< Sync record handle */ -+ DLLIST_NODE sListNode; /*!< List node for the global sync chkpt list */ -+ DLLIST_NODE sDeferredFreeListNode; /*!< List node for the deferred free sync chkpt list */ -+ IMG_UINT32 ui32FWAddr; /*!< FWAddr stored at sync checkpoint alloc time */ -+#if defined(PDUMP) -+ PDUMP_FLAGS_T ui32PDumpFlags; /*!< Pdump Capture mode to be used for POL*/ -+#endif -+#if defined(DEBUG) -+ IMG_UINT32 ui32ValidationCheck; /*!< Structure validity pattern */ -+#endif -+} SYNC_CHECKPOINT; -+ -+ -+typedef struct _SYNC_CHECKPOINT_SIGNAL_ -+{ -+ SYNC_CHECKPOINT asSyncCheckpoint; /*!< Store sync checkpt for deferred signal */ -+ IMG_UINT32 ui32Status; /*!< sync checkpt status signal/errored */ -+} _SYNC_CHECKPOINT_DEFERRED_SIGNAL; -+ -+#define GET_CP_CB_NEXT_IDX(_curridx) (((_curridx) + 1) % SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL) -+#define GET_CP_CB_BASE(_idx) (IMG_OFFSET_ADDR(psDevNode->pui8DeferredSyncCPSignal, \ -+ ((_idx) * sizeof(_SYNC_CHECKPOINT_DEFERRED_SIGNAL)))) -+ -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointGetFirmwareAddr -+ -+@Description . -+ -+@Input psSyncCheckpoint Synchronisation checkpoint to get -+ the firmware address of -+ -+@Return The firmware address of the sync checkpoint -+ -+*/ -+/*****************************************************************************/ -+IMG_UINT32 -+SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointCCBEnqueued -+ -+@Description Increment the CCB enqueued reference count for a -+ synchronisation checkpoint. This indicates how many FW -+ operations (checks/update) have been placed into CCBs for the -+ sync checkpoint. -+ When the FW services these operation, it increments its own -+ reference count. When these two values are equal, we know -+ there are not outstanding FW operating for the checkpoint -+ in any CCB. -+ -+@Input psSyncCheckpoint Synchronisation checkpoint for which -+ to increment the enqueued reference -+ count -+ -+@Return None -+ -+*/ -+/*****************************************************************************/ -+void -+SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointGetEnqueuedCount -+ -+@Description . -+ -+@Input psSyncCheckpoint Synchronisation checkpoint to get -+ the enqueued count of -+ -+@Return The enqueued count of the sync checkpoint -+ (i.e. the number of FW operations (checks or updates) -+ currently enqueued in CCBs for the sync checkpoint) -+ -+*/ -+/*****************************************************************************/ -+IMG_UINT32 -+SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointGetReferenceCount -+ -+@Description . -+ -+@Input psSyncCheckpoint Synchronisation checkpoint to get -+ the reference count of -+ -+@Return The host reference count of the sync checkpoint -+ -+*/ -+/*****************************************************************************/ -+IMG_UINT32 -+SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointGetCreator -+ -+@Description . -+ -+@Input psSyncCheckpoint Synchronisation checkpoint to get -+ the creating process of -+ -+@Return The process id of the process which created this sync checkpoint. -+ -+*/ -+/*****************************************************************************/ -+IMG_PID -+SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointGetId -+ -+@Description . -+ -+@Input psSyncCheckpoint Synchronisation checkpoint to get -+ the unique Id of -+ -+@Return The unique Id of the sync checkpoint -+ -+*/ -+/*****************************************************************************/ -+IMG_UINT32 -+SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointGetTimeline -+ -+@Description . -+ -+@Input psSyncCheckpoint Synchronisation checkpoint to get -+ the parent timeline of -+ -+@Return The parent timeline of the sync checkpoint -+ -+*/ -+/*****************************************************************************/ -+PVRSRV_TIMELINE -+SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+/*************************************************************************/ /*! -+@Function SyncCheckpointGetRGXFWIFUFOAddr -+ -+@Description . -+ -+@Input psSyncCheckpoint Synchronisation checkpoint to get -+ the PRGXFWIF_UFO_ADDR of -+ -+@Return The PRGXFWIF_UFO_ADDR of the sync checkpoint, used when -+ providing the update in server kick code. -+ -+*/ -+/*****************************************************************************/ -+PRGXFWIF_UFO_ADDR* -+SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint); -+ -+#if !defined(SUPPORT_NATIVE_FENCE_SYNC) -+/*************************************************************************/ /*! -+@Function SyncCheckpointGetAssociatedDevice -+ -+@Description . -+ -+@Input psSyncCheckpointContext Synchronisation Checkpoint context -+ to get the device node of -+ -+@Return The PVRSRV_DEVICE_NODE of the device on which the sync -+ checkpoint context was created. -+ -+*/ -+/*****************************************************************************/ -+PPVRSRV_DEVICE_NODE -+SyncCheckpointGetAssociatedDevice(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext); -+#endif /* !defined(SUPPORT_NATIVE_FENCE_SYNC) */ -+ -+#endif /* SYNC_CHECKPOINT_INTERNAL_H */ -diff --git a/drivers/gpu/drm/img-rogue/sync_fallback_server.h b/drivers/gpu/drm/img-rogue/sync_fallback_server.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sync_fallback_server.h -@@ -0,0 +1,204 @@ -+/**************************************************************************/ /*! -+@File -+@Title Fallback sync interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#ifndef SYNC_FALLBACK_SERVER_H -+#define SYNC_FALLBACK_SERVER_H -+ -+#include "img_types.h" -+#include "sync_checkpoint.h" -+#include "device.h" -+#include "connection_server.h" -+ -+ -+typedef struct _PVRSRV_TIMELINE_SERVER_ PVRSRV_TIMELINE_SERVER; -+typedef struct _PVRSRV_FENCE_SERVER_ PVRSRV_FENCE_SERVER; -+typedef struct _PVRSRV_FENCE_EXPORT_ PVRSRV_FENCE_EXPORT; -+ -+typedef struct _PVRSRV_SYNC_PT_ PVRSRV_SYNC_PT; -+ -+#define SYNC_FB_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH -+#define SYNC_FB_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH -+ -+/*****************************************************************************/ -+/* */ -+/* SW SPECIFIC FUNCTIONS */ -+/* */ -+/*****************************************************************************/ -+ -+PVRSRV_ERROR SyncFbTimelineCreateSW(IMG_UINT32 uiTimelineNameSize, -+ const IMG_CHAR *pszTimelineName, -+ PVRSRV_TIMELINE_SERVER **ppsTimeline); -+ -+PVRSRV_ERROR SyncFbFenceCreateSW(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDeviceNode, -+ PVRSRV_TIMELINE_SERVER *psTimeline, -+ IMG_UINT32 uiFenceNameSize, -+ const IMG_CHAR *pszFenceName, -+ PVRSRV_FENCE_SERVER **ppsOutputFence, -+ IMG_UINT64 *pui64SyncPtIdx); -+PVRSRV_ERROR SyncFbSWTimelineFenceCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode, -+ PVRSRV_TIMELINE iSWTimeline, -+ const IMG_CHAR *pszFenceName, -+ PVRSRV_FENCE *piOutputFence, -+ IMG_UINT64* pui64SyncPtIdx); -+ -+PVRSRV_ERROR SyncFbTimelineAdvanceSW(PVRSRV_TIMELINE_SERVER *psTimeline, -+ IMG_UINT64 *pui64SyncPtIdx); -+PVRSRV_ERROR SyncFbSWTimelineAdvanceKM(void *pvSWTimelineObj, -+ IMG_UINT64* pui64SyncPtIdx); -+ -+/*****************************************************************************/ -+/* */ -+/* PVR SPECIFIC FUNCTIONS */ -+/* */ -+/*****************************************************************************/ -+ -+PVRSRV_ERROR SyncFbTimelineCreatePVR(IMG_UINT32 uiTimelineNameSize, -+ const IMG_CHAR *pszTimelineName, -+ PVRSRV_TIMELINE_SERVER **ppsTimeline); -+ -+PVRSRV_ERROR SyncFbFenceCreatePVR(PPVRSRV_DEVICE_NODE psDeviceNode, -+ const IMG_CHAR *pszName, -+ PVRSRV_TIMELINE iTl, -+ PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext, -+ PVRSRV_FENCE *piOutFence, -+ IMG_UINT64 *puiFenceUID, -+ void **ppvFenceFinaliseData, -+ PSYNC_CHECKPOINT *ppsOutCheckpoint, -+ void **ppvTimelineUpdateSync, -+ IMG_UINT32 *puiTimelineUpdateValue); -+ -+PVRSRV_ERROR SyncFbFenceResolvePVR(PSYNC_CHECKPOINT_CONTEXT psContext, -+ PVRSRV_FENCE iFence, -+ IMG_UINT32 *puiNumCheckpoints, -+ PSYNC_CHECKPOINT **papsCheckpoints, -+ IMG_UINT64 *puiFenceUID); -+ -+/*****************************************************************************/ -+/* */ -+/* GENERIC FUNCTIONS */ -+/* */ -+/*****************************************************************************/ -+ -+PVRSRV_ERROR SyncFbGetFenceObj(PVRSRV_FENCE iFence, -+ void **ppvFenceObj); -+ -+PVRSRV_ERROR SyncFbSWGetTimelineObj(PVRSRV_TIMELINE iSWTimeline, -+ void **ppvSWTimelineObj); -+ -+PVRSRV_ERROR SyncFbTimelineRelease(PVRSRV_TIMELINE_SERVER *psTl); -+ -+PVRSRV_ERROR SyncFbFenceRelease(PVRSRV_FENCE_SERVER *psFence); -+PVRSRV_ERROR SyncFbFenceReleaseKM(void *pvFenceObj); -+ -+PVRSRV_ERROR SyncFbFenceDup(PVRSRV_FENCE_SERVER *psInFence, -+ PVRSRV_FENCE_SERVER **ppsOutFence); -+ -+PVRSRV_ERROR SyncFbFenceMerge(PVRSRV_FENCE_SERVER *psInFence1, -+ PVRSRV_FENCE_SERVER *psInFence2, -+ IMG_UINT32 uiFenceNameSize, -+ const IMG_CHAR *pszFenceName, -+ PVRSRV_FENCE_SERVER **ppsOutFence); -+ -+PVRSRV_ERROR SyncFbFenceWait(PVRSRV_FENCE_SERVER *psFence, -+ IMG_UINT32 uiTimeout); -+ -+PVRSRV_ERROR SyncFbFenceDump(PVRSRV_FENCE_SERVER *psFence, -+ IMG_UINT32 uiLine, -+ IMG_UINT32 uiFileNameLength, -+ const IMG_CHAR *pszFile, -+ IMG_UINT32 uiModuleLength, -+ const IMG_CHAR *pszModule, -+ IMG_UINT32 uiDescLength, -+ const IMG_CHAR *pszDesc); -+ -+PVRSRV_ERROR SyncFbDumpFenceKM(void *pvSWFenceObj, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+ -+PVRSRV_ERROR SyncFbSWDumpTimelineKM(void *pvSWTimelineObj, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+ -+PVRSRV_ERROR SyncFbRegisterSyncFunctions(void); -+ -+PVRSRV_ERROR SyncFbRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+PVRSRV_ERROR SyncFbDeregisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+IMG_UINT32 SyncFbDumpInfoOnStalledUFOs(IMG_UINT32 nr_ufos, IMG_UINT32 *vaddrs); -+ -+IMG_BOOL SyncFbCheckpointHasSignalled(IMG_UINT32 ui32FwAddr, IMG_UINT32 ui32Value); -+ -+/*****************************************************************************/ -+/* */ -+/* IMPORT/EXPORT FUNCTIONS */ -+/* */ -+/*****************************************************************************/ -+ -+#if defined(SUPPORT_INSECURE_EXPORT) -+PVRSRV_ERROR SyncFbFenceExportInsecure(PVRSRV_FENCE_SERVER *psFence, -+ PVRSRV_FENCE_EXPORT **ppExport); -+ -+PVRSRV_ERROR SyncFbFenceExportDestroyInsecure(PVRSRV_FENCE_EXPORT *psExport); -+ -+PVRSRV_ERROR SyncFbFenceImportInsecure(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevice, -+ PVRSRV_FENCE_EXPORT *psImport, -+ PVRSRV_FENCE_SERVER **psFence); -+#endif /* defined(SUPPORT_INSECURE_EXPORT) */ -+ -+PVRSRV_ERROR SyncFbFenceExportSecure(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDevNode, -+ PVRSRV_FENCE_SERVER *psFence, -+ IMG_SECURE_TYPE *phSecure, -+ PVRSRV_FENCE_EXPORT **ppsExport, -+ CONNECTION_DATA **ppsSecureConnection); -+ -+PVRSRV_ERROR SyncFbFenceExportDestroySecure(PVRSRV_FENCE_EXPORT *psExport); -+ -+PVRSRV_ERROR SyncFbFenceImportSecure(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevice, -+ IMG_SECURE_TYPE hSecure, -+ PVRSRV_FENCE_SERVER **psFence); -+ -+#endif /* SYNC_FALLBACK_SERVER_H */ -diff --git a/drivers/gpu/drm/img-rogue/sync_internal.h b/drivers/gpu/drm/img-rogue/sync_internal.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sync_internal.h -@@ -0,0 +1,112 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services internal synchronisation interface header -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Defines the internal client side interface for services -+ synchronisation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SYNC_INTERNAL -+#define SYNC_INTERNAL -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "ra.h" -+#include "dllist.h" -+#include "lock.h" -+#include "devicemem.h" -+#include "sync_prim_internal.h" -+ -+#define LOCAL_SYNC_PRIM_RESET_VALUE 0 -+#define LOCAL_SYNC_PRIM_POISON_VALUE 0xa5a5a5a5u -+ -+/* -+ Debug feature to protect against GP DM page faults when -+ sync prims are freed by client before work is completed. -+*/ -+#define LOCAL_SYNC_BLOCK_RETAIN_FIRST -+ -+/* -+ Private structure's -+*/ -+#define SYNC_PRIM_NAME_SIZE 50 -+typedef struct SYNC_PRIM_CONTEXT_TAG -+{ -+ SHARED_DEV_CONNECTION hDevConnection; -+ IMG_CHAR azName[SYNC_PRIM_NAME_SIZE]; /*!< Name of the RA */ -+ RA_ARENA *psSubAllocRA; /*!< RA context */ -+ IMG_CHAR azSpanName[SYNC_PRIM_NAME_SIZE];/*!< Name of the span RA */ -+ RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */ -+ ATOMIC_T hRefCount; /*!< Ref count for this context */ -+#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) -+ IMG_HANDLE hFirstSyncPrim; /*!< Handle to the first allocated sync prim */ -+#endif -+} SYNC_PRIM_CONTEXT; -+ -+typedef struct SYNC_PRIM_BLOCK_TAG -+{ -+ SYNC_PRIM_CONTEXT *psContext; /*!< Our copy of the services connection */ -+ IMG_HANDLE hServerSyncPrimBlock; /*!< Server handle for this block */ -+ IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync prim block */ -+ IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */ -+ DEVMEM_MEMDESC *hMemDesc; /*!< Host mapping handle */ -+ IMG_UINT32 __iomem *pui32LinAddr; /*!< User CPU mapping */ -+ IMG_UINT64 uiSpanBase; /*!< Base of this import in the span RA */ -+ DLLIST_NODE sListNode; /*!< List node for the sync block list */ -+} SYNC_PRIM_BLOCK; -+ -+typedef struct SYNC_PRIM_TAG -+{ -+ PVRSRV_CLIENT_SYNC_PRIM sCommon; /*!< Client visible part of the sync prim */ -+ ATOMIC_T hRefCount; /*!< Ref count for this sync */ -+ SYNC_PRIM_BLOCK *psSyncBlock; /*!< Synchronisation block this primitive is allocated on */ -+ IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */ -+ IMG_HANDLE hRecord; /*!< Sync record handle */ -+} SYNC_PRIM; -+ -+ -+IMG_INTERNAL PVRSRV_ERROR -+SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr); -+ -+IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync, -+ IMG_HANDLE *phBlock, -+ IMG_UINT32 *pui32Offset); -+ -+ -+#endif /* SYNC_INTERNAL */ -diff --git a/drivers/gpu/drm/img-rogue/sync_prim_internal.h b/drivers/gpu/drm/img-rogue/sync_prim_internal.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sync_prim_internal.h -@@ -0,0 +1,84 @@ -+/*************************************************************************/ /*! -+@File -+@Title Services internal synchronisation typedef header -+@Description Defines synchronisation types that are used internally -+ only -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef SYNC_INTERNAL_H -+#define SYNC_INTERNAL_H -+ -+#if defined(__cplusplus) -+extern "C" { -+#endif -+ -+#include -+ -+/* These are included here as the typedefs are required -+ * internally. -+ */ -+ -+typedef struct SYNC_PRIM_CONTEXT_TAG *PSYNC_PRIM_CONTEXT; -+typedef struct PVRSRV_CLIENT_SYNC_PRIM_TAG -+{ -+ volatile uint32_t __iomem *pui32LinAddr; /*!< User pointer to the primitive */ -+} PVRSRV_CLIENT_SYNC_PRIM; -+ -+/*! -+ * Bundled information for a sync prim operation -+ * -+ * Structure: #PVRSRV_CLIENT_SYNC_PRIM_OP -+ * Typedef: ::PVRSRV_CLIENT_SYNC_PRIM_OP -+ */ -+typedef struct PVRSRV_CLIENT_SYNC_PRIM_OP_TAG -+{ -+ #define PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK (1U << 0) -+ #define PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE (1U << 1) -+ #define PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE (PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE | (1U<<2)) -+ uint32_t ui32Flags; /*!< Operation flags: PVRSRV_CLIENT_SYNC_PRIM_OP_XXX */ -+ PVRSRV_CLIENT_SYNC_PRIM *psSync; /*!< Pointer to the client sync primitive */ -+ uint32_t ui32FenceValue; /*!< The Fence value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK is set) */ -+ uint32_t ui32UpdateValue; /*!< The Update value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE is set) */ -+} PVRSRV_CLIENT_SYNC_PRIM_OP; -+ -+#if defined(__cplusplus) -+} -+#endif -+#endif /* SYNC_INTERNAL_H */ -diff --git a/drivers/gpu/drm/img-rogue/sync_server.c b/drivers/gpu/drm/img-rogue/sync_server.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sync_server.c -@@ -0,0 +1,1220 @@ -+/*************************************************************************/ /*! -+@File sync_server.c -+@Title Server side synchronisation functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements the server side functions that for synchronisation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include "img_types.h" -+#include "img_defs.h" -+#include "sync_server.h" -+#include "allocmem.h" -+#include "device.h" -+#include "devicemem.h" -+#include "devicemem_pdump.h" -+#include "osfunc.h" -+#include "pdump.h" -+#include "pvr_debug.h" -+#include "pvr_notifier.h" -+#include "pdump_km.h" -+#include "sync.h" -+#include "sync_internal.h" -+#include "connection_server.h" -+#include "htbserver.h" -+#include "rgxhwperf.h" -+#include "info_page.h" -+ -+#include "sync_checkpoint_internal.h" -+#include "sync_checkpoint.h" -+ -+/* Include this to obtain MAX_SYNC_CHECKPOINTS_PER_FENCE */ -+#include "sync_checkpoint_external.h" -+ -+/* Include this to obtain PVRSRV_MAX_DEV_VARS */ -+#include "pvrsrv_devvar.h" -+ -+#if defined(SUPPORT_SECURE_EXPORT) -+#include "ossecure_export.h" -+#endif -+ -+/* Set this to enable debug relating to the construction and maintenance of the sync address list */ -+#define SYNC_ADDR_LIST_DEBUG 0 -+ -+/* Set maximum number of FWAddrs that can be accommodated in a SYNC_ADDR_LIST. -+ * This should allow for PVRSRV_MAX_DEV_VARS dev vars plus -+ * MAX_SYNC_CHECKPOINTS_PER_FENCE sync checkpoints for check fences. -+ * The same SYNC_ADDR_LIST is also used to hold UFOs for updates. While this -+ * may need to accommodate the additional sync prim update returned by Native -+ * sync implementation (used for timeline debug), the size calculated from -+ * PVRSRV_MAX_DEV_VARS+MAX_SYNC_CHECKPOINTS_PER_FENCE should be ample. -+ */ -+#define PVRSRV_MAX_SYNC_ADDR_LIST_SIZE (PVRSRV_MAX_DEV_VARS+MAX_SYNC_CHECKPOINTS_PER_FENCE) -+/* Check that helper functions will not be preparing longer lists of -+ * UFOs than the FW can handle. -+ */ -+static_assert(PVRSRV_MAX_SYNC_ADDR_LIST_SIZE <= RGXFWIF_CCB_CMD_MAX_UFOS, -+ "PVRSRV_MAX_SYNC_ADDR_LIST_SIZE > RGXFWIF_CCB_CMD_MAX_UFOS."); -+ -+/* Max number of syncs allowed in a sync prim op */ -+#define SYNC_PRIM_OP_MAX_SYNCS 1024 -+ -+struct _SYNC_PRIMITIVE_BLOCK_ -+{ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ DEVMEM_MEMDESC *psMemDesc; -+ IMG_UINT32 *pui32LinAddr; -+ IMG_UINT32 ui32BlockSize; /*!< Size of the Sync Primitive Block */ -+ ATOMIC_T sRefCount; -+ DLLIST_NODE sConnectionNode; -+ SYNC_CONNECTION_DATA *psSyncConnectionData; /*!< Link back to the sync connection data if there is one */ -+ PRGXFWIF_UFO_ADDR uiFWAddr; /*!< The firmware address of the sync prim block */ -+}; -+ -+struct _SYNC_CONNECTION_DATA_ -+{ -+ DLLIST_NODE sListHead; /*!< list of sync block associated with / created against this connection */ -+ ATOMIC_T sRefCount; /*!< number of references to this object */ -+ POS_LOCK hLock; /*!< lock protecting the list of sync blocks */ -+}; -+ -+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1)) -+ -+/* this is the max number of syncs we will search or dump -+ * at any time. -+ */ -+#define SYNC_RECORD_LIMIT 20000 -+ -+enum SYNC_RECORD_TYPE -+{ -+ SYNC_RECORD_TYPE_UNKNOWN = 0, -+ SYNC_RECORD_TYPE_CLIENT, -+ SYNC_RECORD_TYPE_SERVER, -+}; -+ -+struct SYNC_RECORD -+{ -+ PVRSRV_DEVICE_NODE *psDevNode; -+ SYNC_PRIMITIVE_BLOCK *psServerSyncPrimBlock; /*!< handle to _SYNC_PRIMITIVE_BLOCK_ */ -+ IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */ -+ IMG_UINT32 ui32FwBlockAddr; -+ IMG_PID uiPID; -+ IMG_UINT64 ui64OSTime; -+ enum SYNC_RECORD_TYPE eRecordType; -+ DLLIST_NODE sNode; -+ IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH]; -+}; -+ -+#if defined(SYNC_DEBUG) || defined(REFCOUNT_DEBUG) -+#define SYNC_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__) -+#else -+#define SYNC_REFCOUNT_PRINT(fmt, ...) -+#endif -+ -+#if defined(SYNC_DEBUG) -+#define SYNC_UPDATES_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__) -+#else -+#define SYNC_UPDATES_PRINT(fmt, ...) -+#endif -+ -+/*! -+***************************************************************************** -+ @Function : SyncPrimitiveBlockToFWAddr -+ -+ @Description : Given a pointer to a sync primitive block and an offset, -+ returns the firmware address of the sync. -+ -+ @Input psSyncPrimBlock : Sync primitive block which contains the sync -+ @Input ui32Offset : Offset of sync within the sync primitive block -+ @Output psAddrOut : Absolute FW address of the sync is written out through -+ this pointer -+ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input -+ parameters are invalid. -+*****************************************************************************/ -+ -+PVRSRV_ERROR -+SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock, -+ IMG_UINT32 ui32Offset, -+ PRGXFWIF_UFO_ADDR *psAddrOut) -+{ -+ /* check offset is legal */ -+ if (unlikely((ui32Offset >= psSyncPrimBlock->ui32BlockSize) || -+ (ui32Offset % sizeof(IMG_UINT32)))) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimitiveBlockToFWAddr: parameters check failed")); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ psAddrOut->ui32Addr = psSyncPrimBlock->uiFWAddr.ui32Addr + ui32Offset; -+ return PVRSRV_OK; -+} -+ -+/*! -+***************************************************************************** -+ @Function : SyncAddrListGrow -+ -+ @Description : Grow the SYNC_ADDR_LIST so it can accommodate the given -+ number of syncs, up to a maximum of PVRSRV_MAX_SYNC_PRIMS. -+ -+ @Input psList : The SYNC_ADDR_LIST to grow -+ @Input ui32NumSyncs : The number of sync addresses to be able to hold -+ @Return : PVRSRV_OK on success -+*****************************************************************************/ -+ -+static PVRSRV_ERROR SyncAddrListGrow(SYNC_ADDR_LIST *psList, IMG_UINT32 ui32NumSyncs) -+{ -+ if (unlikely(ui32NumSyncs > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: ui32NumSyncs=%u > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE=%u", __func__, ui32NumSyncs, PVRSRV_MAX_SYNC_ADDR_LIST_SIZE)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); -+#endif -+ if (ui32NumSyncs > psList->ui32NumSyncs) -+ { -+ if (psList->pasFWAddrs == NULL) -+ { -+ psList->pasFWAddrs = OSAllocMem(sizeof(PRGXFWIF_UFO_ADDR) * PVRSRV_MAX_SYNC_ADDR_LIST_SIZE); -+ PVR_RETURN_IF_NOMEM(psList->pasFWAddrs); -+ } -+ -+ psList->ui32NumSyncs = ui32NumSyncs; -+ } -+ -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); -+#endif -+ return PVRSRV_OK; -+} -+ -+/*! -+***************************************************************************** -+ @Function : SyncAddrListInit -+ -+ @Description : Initialise a SYNC_ADDR_LIST structure ready for use -+ -+ @Input psList : The SYNC_ADDR_LIST structure to initialise -+ @Return : None -+*****************************************************************************/ -+ -+void -+SyncAddrListInit(SYNC_ADDR_LIST *psList) -+{ -+ psList->ui32NumSyncs = 0; -+ psList->pasFWAddrs = NULL; -+} -+ -+/*! -+***************************************************************************** -+ @Function : SyncAddrListDeinit -+ -+ @Description : Frees any resources associated with the given SYNC_ADDR_LIST -+ -+ @Input psList : The SYNC_ADDR_LIST structure to deinitialise -+ @Return : None -+*****************************************************************************/ -+ -+void -+SyncAddrListDeinit(SYNC_ADDR_LIST *psList) -+{ -+ if (psList->pasFWAddrs != NULL) -+ { -+ OSFreeMem(psList->pasFWAddrs); -+ } -+} -+ -+/*! -+***************************************************************************** -+ @Function : SyncAddrListPopulate -+ -+ @Description : Populate the given SYNC_ADDR_LIST with the FW addresses -+ of the syncs given by the SYNC_PRIMITIVE_BLOCKs and sync offsets -+ -+ @Input ui32NumSyncs : The number of syncs being passed in -+ @Input apsSyncPrimBlock: Array of pointers to SYNC_PRIMITIVE_BLOCK structures -+ in which the syncs are based -+ @Input paui32SyncOffset: Array of offsets within each of the sync primitive blocks -+ where the syncs are located -+ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input -+ parameters are invalid. -+*****************************************************************************/ -+ -+PVRSRV_ERROR -+SyncAddrListPopulate(SYNC_ADDR_LIST *psList, -+ IMG_UINT32 ui32NumSyncs, -+ SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock, -+ IMG_UINT32 *paui32SyncOffset) -+{ -+ IMG_UINT32 i; -+ PVRSRV_ERROR eError; -+ -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); -+#endif -+ if (ui32NumSyncs > psList->ui32NumSyncs) -+ { -+ eError = SyncAddrListGrow(psList, ui32NumSyncs); -+ -+ PVR_RETURN_IF_ERROR(eError); -+ } -+ -+ psList->ui32NumSyncs = ui32NumSyncs; -+ -+ for (i = 0; i < ui32NumSyncs; i++) -+ { -+ eError = SyncPrimitiveBlockToFWAddr(apsSyncPrimBlock[i], -+ paui32SyncOffset[i], -+ &psList->pasFWAddrs[i]); -+ -+ PVR_RETURN_IF_ERROR(eError); -+ } -+ -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); -+#endif -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST *psList, -+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32FwAddr = 0; -+ -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs)); -+#endif -+ /* Ensure there's room in psList for the additional sync prim update */ -+ eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + 1); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ SyncPrimGetFirmwareAddr(psSyncPrim, &ui32FwAddr); -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: Appending sync prim <%p> UFO addr (0x%x) to psList[->pasFWAddrss[%d]", __func__, (void*)psSyncPrim, ui32FwAddr, psList->ui32NumSyncs-1)); -+#endif -+ psList->pasFWAddrs[psList->ui32NumSyncs-1].ui32Addr = ui32FwAddr; -+ -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ { -+ IMG_UINT32 iii; -+ -+ PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __func__, psList->ui32NumSyncs)); -+ for (iii=0; iiiui32NumSyncs; iii++) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: psList->pasFWAddrs[%d].ui32Addr=0x%x", __func__, iii, psList->pasFWAddrs[iii].ui32Addr)); -+ } -+ } -+#endif -+e0: -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d", __func__, (void*)psList, psList->ui32NumSyncs)); -+#endif -+ return eError; -+} -+ -+ -+static PVRSRV_ERROR -+_AppendCheckpoints(SYNC_ADDR_LIST *psList, -+ IMG_UINT32 ui32NumCheckpoints, -+ PSYNC_CHECKPOINT *apsSyncCheckpoint, -+ IMG_BOOL bDeRefCheckpoints) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ IMG_UINT32 ui32SyncCheckpointIndex; -+ IMG_UINT32 ui32RollbackSize = psList->ui32NumSyncs; -+ -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints)); -+#endif -+ /* Ensure there's room in psList for the sync checkpoints */ -+ eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + ui32NumCheckpoints); -+ if (unlikely(eError != PVRSRV_OK)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: * * * * ERROR * * * * Trying to SyncAddrListGrow(psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints)); -+ goto e0; -+ } -+ -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: (ui32NumCheckpoints=%d) (psList->ui32NumSyncs is now %d) array already contains %d FWAddrs:", __func__, ui32NumCheckpoints, psList->ui32NumSyncs, ui32RollbackSize)); -+ if (ui32RollbackSize > 0) -+ { -+ { -+ IMG_UINT32 kk; -+ for (kk=0; kkpsList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __func__, -+ (void*)&psList->pasFWAddrs[kk], kk, -+ psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr)); -+ } -+ } -+ } -+ PVR_DPF((PVR_DBG_ERROR, "%s: apsSyncCheckpoint=<%p>, apsSyncCheckpoint[0] = <%p>", __func__, (void*)apsSyncCheckpoint, (void*)apsSyncCheckpoint[0])); -+#endif -+ for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndexpasFWAddrs[ui32RollbackSize + ui32SyncCheckpointIndex].ui32Addr = SyncCheckpointGetFirmwareAddr(apsSyncCheckpoint[ui32SyncCheckpointIndex]); -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCCBEnqueued(<%p>)", __func__, (void*)apsSyncCheckpoint[ui32SyncCheckpointIndex])); -+ PVR_DPF((PVR_DBG_ERROR, "%s: ID:%d", __func__, SyncCheckpointGetId((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]))); -+#endif -+ SyncCheckpointCCBEnqueued((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]); -+ if (bDeRefCheckpoints) -+ { -+ /* Drop the reference that was taken internally by the OS implementation of resolve_fence() */ -+ SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]); -+ } -+ } -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ if (psList->ui32NumSyncs > 0) -+ { -+ IMG_UINT32 kk; -+ for (kk=0; kkui32NumSyncs; kk++) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: <%p>psList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __func__, -+ (void*)&psList->pasFWAddrs[kk], kk, -+ psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr)); -+ } -+ } -+#endif -+ return eError; -+ -+e0: -+ for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints)); -+#endif -+ return eError; -+} -+ -+/*! -+***************************************************************************** -+ @Function : SyncAddrListAppendCheckpoints -+ -+ @Description : Append the FW addresses of the sync checkpoints given in -+ the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST -+ -+ @Input ui32NumSyncCheckpoints : The number of sync checkpoints -+ being passed in -+ @Input apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details -+ are to be appended to the SYNC_ADDR_LIST -+ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input -+ parameters are invalid. -+*****************************************************************************/ -+PVRSRV_ERROR -+SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList, -+ IMG_UINT32 ui32NumCheckpoints, -+ PSYNC_CHECKPOINT *apsSyncCheckpoint) -+{ -+ return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_FALSE); -+} -+ -+/*! -+***************************************************************************** -+ @Function : SyncAddrListAppendAndDeRefCheckpoints -+ -+ @Description : Append the FW addresses of the sync checkpoints given in -+ the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST. -+ A reference is dropped for each of the checkpoints. -+ -+ @Input ui32NumSyncCheckpoints : The number of sync checkpoints -+ being passed in -+ @Input apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details -+ are to be appended to the SYNC_ADDR_LIST -+ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input -+ parameters are invalid. -+*****************************************************************************/ -+PVRSRV_ERROR -+SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList, -+ IMG_UINT32 ui32NumCheckpoints, -+ PSYNC_CHECKPOINT *apsSyncCheckpoint) -+{ -+ return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_TRUE); -+} -+ -+void -+SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints, -+ PSYNC_CHECKPOINT *apsSyncCheckpoint) -+{ -+ IMG_UINT32 ui32SyncCheckpointIndex; -+ -+ for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex)", __func__, (void*)psList)); -+#endif -+ if (psList) -+ { -+#if (SYNC_ADDR_LIST_DEBUG == 1) -+ PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __func__, psList->ui32NumSyncs)); -+#endif -+ for (ui32SyncIndex=0; ui32SyncIndexui32NumSyncs; ui32SyncIndex++) -+ { -+ if (psList->pasFWAddrs[ui32SyncIndex].ui32Addr & 0x1) -+ { -+ SyncCheckpointRollbackFromUFO(psDevNode, psList->pasFWAddrs[ui32SyncIndex].ui32Addr); -+ } -+ } -+ } -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ SYNC_RECORD_HANDLE *phRecord, -+ SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock, -+ IMG_UINT32 ui32FwBlockAddr, -+ IMG_UINT32 ui32SyncOffset, -+ IMG_BOOL bServerSync, -+ IMG_UINT32 ui32ClassNameSize, -+ const IMG_CHAR *pszClassName) -+{ -+ struct SYNC_RECORD * psSyncRec; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ -+ RGXSRV_HWPERF_ALLOC(psDevNode, SYNC, -+ ui32FwBlockAddr + ui32SyncOffset, -+ pszClassName, -+ ui32ClassNameSize); -+ -+ PVR_RETURN_IF_INVALID_PARAM(phRecord); -+ -+ *phRecord = NULL; -+ -+ psSyncRec = OSAllocMem(sizeof(*psSyncRec)); -+ PVR_GOTO_IF_NOMEM(psSyncRec, eError, fail_alloc); -+ -+ psSyncRec->psDevNode = psDevNode; -+ psSyncRec->psServerSyncPrimBlock = hServerSyncPrimBlock; -+ psSyncRec->ui32SyncOffset = ui32SyncOffset; -+ psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr; -+ psSyncRec->ui64OSTime = OSClockns64(); -+ psSyncRec->uiPID = OSGetCurrentProcessID(); -+ psSyncRec->eRecordType = bServerSync? SYNC_RECORD_TYPE_SERVER: SYNC_RECORD_TYPE_CLIENT; -+ -+ if (pszClassName) -+ { -+ if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH) -+ ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH; -+ /* Copy over the class name annotation */ -+ OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); -+ } -+ else -+ { -+ /* No class name annotation */ -+ psSyncRec->szClassName[0] = 0; -+ } -+ -+ OSLockAcquire(psDevNode->hSyncServerRecordLock); -+ if (psDevNode->ui32SyncServerRecordCount < SYNC_RECORD_LIMIT) -+ { -+ dllist_add_to_head(&psDevNode->sSyncServerRecordList, &psSyncRec->sNode); -+ psDevNode->ui32SyncServerRecordCount++; -+ -+ if (psDevNode->ui32SyncServerRecordCount > psDevNode->ui32SyncServerRecordCountHighWatermark) -+ { -+ psDevNode->ui32SyncServerRecordCountHighWatermark = psDevNode->ui32SyncServerRecordCount; -+ } -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync record \"%s\". %u records already exist.", -+ __func__, -+ pszClassName, -+ psDevNode->ui32SyncServerRecordCount)); -+ OSFreeMem(psSyncRec); -+ psSyncRec = NULL; -+ eError = PVRSRV_ERROR_TOOMANYBUFFERS; -+ } -+ OSLockRelease(psDevNode->hSyncServerRecordLock); -+ -+ *phRecord = (SYNC_RECORD_HANDLE)psSyncRec; -+ -+fail_alloc: -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVSyncRecordRemoveByHandleKM( -+ SYNC_RECORD_HANDLE hRecord) -+{ -+ struct SYNC_RECORD **ppFreedSync; -+ struct SYNC_RECORD *pSync = (struct SYNC_RECORD*)hRecord; -+ PVRSRV_DEVICE_NODE *psDevNode; -+ -+ PVR_RETURN_IF_INVALID_PARAM(hRecord); -+ -+ psDevNode = pSync->psDevNode; -+ -+ OSLockAcquire(psDevNode->hSyncServerRecordLock); -+ -+ RGXSRV_HWPERF_FREE(psDevNode, SYNC, pSync->ui32FwBlockAddr + pSync->ui32SyncOffset); -+ -+ dllist_remove_node(&pSync->sNode); -+ -+ if (psDevNode->uiSyncServerRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: freed sync record index out of range", -+ __func__)); -+ psDevNode->uiSyncServerRecordFreeIdx = 0; -+ } -+ ppFreedSync = &psDevNode->apsSyncServerRecordsFreed[psDevNode->uiSyncServerRecordFreeIdx]; -+ psDevNode->uiSyncServerRecordFreeIdx = -+ (psDevNode->uiSyncServerRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; -+ -+ if (*ppFreedSync) -+ { -+ OSFreeMem(*ppFreedSync); -+ } -+ pSync->psServerSyncPrimBlock = NULL; -+ pSync->ui64OSTime = OSClockns64(); -+ *ppFreedSync = pSync; -+ -+ psDevNode->ui32SyncServerRecordCount--; -+ -+ OSLockRelease(psDevNode->hSyncServerRecordLock); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_BOOL bServerSync, -+ IMG_UINT32 ui32FWAddr, -+ IMG_UINT32 ui32ClassNameSize, -+ const IMG_CHAR *pszClassName) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ RGXSRV_HWPERF_ALLOC(psDevNode, SYNC, ui32FWAddr, pszClassName, ui32ClassNameSize); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32FWAddr) -+{ -+ PVR_UNREFERENCED_PARAMETER(psConnection); -+ RGXSRV_HWPERF_FREE(psDevNode, SYNC, ui32FWAddr); -+ -+ return PVRSRV_OK; -+} -+ -+static -+void _SyncConnectionRef(SYNC_CONNECTION_DATA *psSyncConnectionData) -+{ -+ IMG_INT iRefCount = OSAtomicIncrement(&psSyncConnectionData->sRefCount); -+ -+ SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", -+ __func__, psSyncConnectionData, iRefCount); -+ PVR_UNREFERENCED_PARAMETER(iRefCount); -+} -+ -+static -+void _SyncConnectionUnref(SYNC_CONNECTION_DATA *psSyncConnectionData) -+{ -+ IMG_INT iRefCount = OSAtomicDecrement(&psSyncConnectionData->sRefCount); -+ if (iRefCount == 0) -+ { -+ SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", -+ __func__, psSyncConnectionData, iRefCount); -+ -+ PVR_ASSERT(dllist_is_empty(&psSyncConnectionData->sListHead)); -+ OSLockDestroy(psSyncConnectionData->hLock); -+ OSFreeMem(psSyncConnectionData); -+ } -+ else -+ { -+ SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", -+ __func__, psSyncConnectionData, iRefCount); -+ PVR_ASSERT(iRefCount > 0); -+ } -+} -+ -+static -+void _SyncConnectionAddBlock(CONNECTION_DATA *psConnection, SYNC_PRIMITIVE_BLOCK *psBlock) -+{ -+ if (psConnection) -+ { -+ SYNC_CONNECTION_DATA *psSyncConnectionData = psConnection->psSyncConnectionData; -+ -+ /* -+ Make sure the connection doesn't go away. It doesn't matter that we will release -+ the lock between as the refcount and list don't have to be atomic w.r.t. to each other -+ */ -+ _SyncConnectionRef(psSyncConnectionData); -+ -+ OSLockAcquire(psSyncConnectionData->hLock); -+ dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode); -+ OSLockRelease(psSyncConnectionData->hLock); -+ psBlock->psSyncConnectionData = psSyncConnectionData; -+ } -+ else -+ { -+ psBlock->psSyncConnectionData = NULL; -+ } -+} -+ -+static -+void _SyncConnectionRemoveBlock(SYNC_PRIMITIVE_BLOCK *psBlock) -+{ -+ SYNC_CONNECTION_DATA *psSyncConnectionData = psBlock->psSyncConnectionData; -+ -+ if (psBlock->psSyncConnectionData) -+ { -+ OSLockAcquire(psSyncConnectionData->hLock); -+ dllist_remove_node(&psBlock->sConnectionNode); -+ OSLockRelease(psSyncConnectionData->hLock); -+ -+ _SyncConnectionUnref(psBlock->psSyncConnectionData); -+ } -+} -+ -+static inline -+void _DoPrimBlockFree(SYNC_PRIMITIVE_BLOCK *psSyncBlk) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode; -+ -+ SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)", -+ __func__, psSyncBlk, OSAtomicRead(&psSyncBlk->sRefCount)); -+ -+ PVR_ASSERT(OSAtomicRead(&psSyncBlk->sRefCount) == 1); -+ -+ _SyncConnectionRemoveBlock(psSyncBlk); -+ DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc); -+ psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc); -+ OSFreeMem(psSyncBlk); -+} -+ -+PVRSRV_ERROR -+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDevNode, -+ SYNC_PRIMITIVE_BLOCK **ppsSyncBlk, -+ IMG_UINT32 *puiSyncPrimVAddr, -+ IMG_UINT32 *puiSyncPrimBlockSize, -+ PMR **ppsSyncPMR) -+{ -+ SYNC_PRIMITIVE_BLOCK *psNewSyncBlk; -+ PVRSRV_ERROR eError; -+ -+ psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK)); -+ PVR_GOTO_IF_NOMEM(psNewSyncBlk, eError, e0); -+ -+ psNewSyncBlk->psDevNode = psDevNode; -+ -+ PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block"); -+ -+ eError = psDevNode->pfnAllocUFOBlock(psDevNode, -+ &psNewSyncBlk->psMemDesc, -+ &psNewSyncBlk->uiFWAddr.ui32Addr, -+ &psNewSyncBlk->ui32BlockSize); -+ PVR_GOTO_IF_ERROR(eError, e1); -+ -+ *puiSyncPrimVAddr = psNewSyncBlk->uiFWAddr.ui32Addr; -+ -+ eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc, -+ (void **) &psNewSyncBlk->pui32LinAddr); -+ PVR_GOTO_IF_ERROR(eError, e2); -+ -+ eError = DevmemLocalGetImportHandle(psNewSyncBlk->psMemDesc, (void **) ppsSyncPMR); -+ -+ PVR_GOTO_IF_ERROR(eError, e3); -+ -+ OSAtomicWrite(&psNewSyncBlk->sRefCount, 1); -+ -+ /* If there is a connection pointer then add the new block onto it's list */ -+ _SyncConnectionAddBlock(psConnection, psNewSyncBlk); -+ -+ *ppsSyncBlk = psNewSyncBlk; -+ *puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize; -+ -+ PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, -+ "Allocated UFO block (FirmwareVAddr = 0x%08x)", -+ *puiSyncPrimVAddr); -+ -+ return PVRSRV_OK; -+ -+e3: -+ DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc); -+e2: -+ psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc); -+e1: -+ OSFreeMem(psNewSyncBlk); -+e0: -+ return eError; -+} -+ -+PVRSRV_ERROR -+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk) -+{ -+ -+ /* This function is an alternative to the above without reference counting. -+ * With the removal of sync prim ops for server syncs we no longer have to -+ * reference count prim blocks as the reference will never be incremented / -+ * decremented by a prim op */ -+ _DoPrimBlockFree(psSyncBlk); -+ return PVRSRV_OK; -+} -+ -+static INLINE IMG_BOOL _CheckSyncIndex(SYNC_PRIMITIVE_BLOCK *psSyncBlk, -+ IMG_UINT32 ui32Index) -+{ -+ return ((ui32Index * sizeof(IMG_UINT32)) < psSyncBlk->ui32BlockSize); -+} -+ -+PVRSRV_ERROR -+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index, -+ IMG_UINT32 ui32Value) -+{ -+ if (_CheckSyncIndex(psSyncBlk, ui32Index)) -+ { -+ psSyncBlk->pui32LinAddr[ui32Index] = ui32Value; -+ return PVRSRV_OK; -+ } -+ else -+ { -+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimSetKM: Index %u out of range for " -+ "0x%08X byte sync block (value 0x%08X)", -+ ui32Index, -+ psSyncBlk->ui32BlockSize, -+ ui32Value)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+} -+ -+#if defined(PDUMP) -+PVRSRV_ERROR -+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) -+{ -+ /* -+ We might be ask to PDump sync state outside of capture range -+ (e.g. texture uploads) so make this continuous. -+ */ -+ DevmemPDumpLoadMemValue32(psSyncBlk->psMemDesc, -+ ui32Offset, -+ ui32Value, -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset) -+{ -+ /* -+ We might be ask to PDump sync state outside of capture range -+ (e.g. texture uploads) so make this continuous. -+ */ -+ DevmemPDumpLoadMem(psSyncBlk->psMemDesc, -+ ui32Offset, -+ sizeof(IMG_UINT32), -+ PDUMP_FLAGS_CONTINUOUS); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T ui32PDumpFlags) -+{ -+ DevmemPDumpDevmemPol32(psSyncBlk->psMemDesc, -+ ui32Offset, -+ ui32Value, -+ ui32Mask, -+ eOperator, -+ ui32PDumpFlags); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset, -+ IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize, -+ IMG_UINT64 uiBufferSize) -+{ -+ DevmemPDumpCBP(psSyncBlk->psMemDesc, -+ ui32Offset, -+ uiWriteOffset, -+ uiPacketSize, -+ uiBufferSize); -+ return PVRSRV_OK; -+} -+#endif -+ -+/* SyncRegisterConnection */ -+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData) -+{ -+ SYNC_CONNECTION_DATA *psSyncConnectionData; -+ PVRSRV_ERROR eError; -+ -+ psSyncConnectionData = OSAllocMem(sizeof(SYNC_CONNECTION_DATA)); -+ if (psSyncConnectionData == NULL) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto fail_alloc; -+ } -+ -+ eError = OSLockCreate(&psSyncConnectionData->hLock); -+ PVR_GOTO_IF_ERROR(eError, fail_lockcreate); -+ dllist_init(&psSyncConnectionData->sListHead); -+ OSAtomicWrite(&psSyncConnectionData->sRefCount, 1); -+ -+ *ppsSyncConnectionData = psSyncConnectionData; -+ return PVRSRV_OK; -+ -+fail_lockcreate: -+ OSFreeMem(psSyncConnectionData); -+fail_alloc: -+ PVR_ASSERT(eError != PVRSRV_OK); -+ return eError; -+} -+ -+/* SyncUnregisterConnection */ -+void SyncUnregisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData) -+{ -+ _SyncConnectionUnref(psSyncConnectionData); -+} -+ -+void SyncConnectionPDumpSyncBlocks(PVRSRV_DEVICE_NODE *psDevNode, void *hSyncPrivData, PDUMP_TRANSITION_EVENT eEvent) -+{ -+ if ((eEvent == PDUMP_TRANSITION_EVENT_RANGE_ENTERED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED)) -+ { -+ SYNC_CONNECTION_DATA *psSyncConnectionData = hSyncPrivData; -+ DLLIST_NODE *psNode, *psNext; -+ -+ OSLockAcquire(psSyncConnectionData->hLock); -+ -+ PDUMPCOMMENT(psDevNode, "Dump client Sync Prim state"); -+ dllist_foreach_node(&psSyncConnectionData->sListHead, psNode, psNext) -+ { -+ SYNC_PRIMITIVE_BLOCK *psSyncBlock = -+ IMG_CONTAINER_OF(psNode, SYNC_PRIMITIVE_BLOCK, sConnectionNode); -+ -+ DevmemPDumpLoadMem(psSyncBlock->psMemDesc, -+ 0, -+ psSyncBlock->ui32BlockSize, -+ PDUMP_FLAGS_CONTINUOUS); -+ } -+ -+ OSLockRelease(psSyncConnectionData->hLock); -+ } -+} -+ -+void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr, -+ IMG_CHAR * pszSyncInfo, size_t len) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ IMG_INT iEnd; -+ IMG_BOOL bFound = IMG_FALSE; -+ -+ if (!pszSyncInfo) -+ { -+ return; -+ } -+ -+ OSLockAcquire(psDevNode->hSyncServerRecordLock); -+ pszSyncInfo[0] = '\0'; -+ -+ dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext) -+ { -+ struct SYNC_RECORD *psSyncRec = -+ IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode); -+ if ((psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset) == ui32FwAddr -+ && SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType -+ && psSyncRec->psServerSyncPrimBlock -+ && psSyncRec->psServerSyncPrimBlock->pui32LinAddr -+ ) -+ { -+ IMG_UINT32 *pui32SyncAddr; -+ pui32SyncAddr = psSyncRec->psServerSyncPrimBlock->pui32LinAddr -+ + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32)); -+ iEnd = OSSNPrintf(pszSyncInfo, len, "Cur=0x%08x %s:%05u (%s)", -+ *pui32SyncAddr, -+ ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"), -+ psSyncRec->uiPID, -+ psSyncRec->szClassName -+ ); -+ if (iEnd >= 0 && iEnd < len) -+ { -+ pszSyncInfo[iEnd] = '\0'; -+ } -+ bFound = IMG_TRUE; -+ break; -+ } -+ } -+ -+ OSLockRelease(psDevNode->hSyncServerRecordLock); -+ -+ if (!bFound && (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT)) -+ { -+ OSSNPrintf(pszSyncInfo, len, "(Record may be lost)"); -+ } -+} -+ -+#define NS_IN_S (1000000000UL) -+static void _SyncRecordPrint(struct SYNC_RECORD *psSyncRec, -+ IMG_UINT64 ui64TimeNow, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ SYNC_PRIMITIVE_BLOCK *psSyncBlock = psSyncRec->psServerSyncPrimBlock; -+ -+ if (SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType) -+ { -+ IMG_UINT64 ui64DeltaS; -+ IMG_UINT32 ui32DeltaF; -+ IMG_UINT64 ui64Delta = ui64TimeNow - psSyncRec->ui64OSTime; -+ ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF); -+ -+ if (psSyncBlock && psSyncBlock->pui32LinAddr) -+ { -+ IMG_UINT32 *pui32SyncAddr; -+ pui32SyncAddr = psSyncBlock->pui32LinAddr -+ + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32)); -+ -+ PVR_DUMPDEBUG_LOG("\t%s %05u %05" IMG_UINT64_FMTSPEC ".%09u FWAddr=0x%08x Val=0x%08x (%s)", -+ ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"), -+ psSyncRec->uiPID, -+ ui64DeltaS, ui32DeltaF, -+ (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset), -+ *pui32SyncAddr, -+ psSyncRec->szClassName -+ ); -+ } -+ else -+ { -+ PVR_DUMPDEBUG_LOG("\t%s %05u %05" IMG_UINT64_FMTSPEC ".%09u FWAddr=0x%08x Val= (%s)", -+ ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"), -+ psSyncRec->uiPID, -+ ui64DeltaS, ui32DeltaF, -+ (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset), -+ psSyncRec->szClassName -+ ); -+ } -+ } -+} -+ -+static void _SyncRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, -+ IMG_UINT32 ui32VerbLevel, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; -+ IMG_UINT64 ui64TimeNowS; -+ IMG_UINT32 ui32TimeNowF; -+ IMG_UINT64 ui64TimeNow = OSClockns64(); -+ DLLIST_NODE *psNode, *psNext; -+ -+ ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF); -+ -+ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) -+ { -+ IMG_UINT32 i; -+ OSLockAcquire(psDevNode->hSyncServerRecordLock); -+ -+ PVR_DUMPDEBUG_LOG("Dumping all allocated syncs. Allocated: %u High watermark: %u @ %05" IMG_UINT64_FMTSPEC ".%09u", -+ psDevNode->ui32SyncServerRecordCount, -+ psDevNode->ui32SyncServerRecordCountHighWatermark, -+ ui64TimeNowS, -+ ui32TimeNowF); -+ if (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT) -+ { -+ PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.", -+ SYNC_RECORD_LIMIT); -+ } -+ -+ PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)", -+ "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation"); -+ -+ dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext) -+ { -+ struct SYNC_RECORD *psSyncRec = -+ IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode); -+ _SyncRecordPrint(psSyncRec, ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile); -+ } -+ -+ PVR_DUMPDEBUG_LOG("Dumping all recently freed syncs @ %05" IMG_UINT64_FMTSPEC ".%09u", -+ ui64TimeNowS, ui32TimeNowF); -+ PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)", -+ "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation"); -+ for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncServerRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN); -+ i != psDevNode->uiSyncServerRecordFreeIdx; -+ i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)) -+ { -+ if (psDevNode->apsSyncServerRecordsFreed[i]) -+ { -+ _SyncRecordPrint(psDevNode->apsSyncServerRecordsFreed[i], -+ ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile); -+ } -+ else -+ { -+ break; -+ } -+ } -+ -+ OSLockRelease(psDevNode->hSyncServerRecordLock); -+ } -+} -+#undef NS_IN_S -+ -+static PVRSRV_ERROR SyncRecordListInit(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ psDevNode->ui32SyncServerRecordCount = 0; -+ psDevNode->ui32SyncServerRecordCountHighWatermark = 0; -+ -+ eError = OSLockCreate(&psDevNode->hSyncServerRecordLock); -+ PVR_GOTO_IF_ERROR(eError, fail_lock_create); -+ dllist_init(&psDevNode->sSyncServerRecordList); -+ -+ eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hSyncServerRecordNotify, -+ psDevNode, -+ _SyncRecordRequest, -+ DEBUG_REQUEST_SYNCTRACKING, -+ psDevNode); -+ -+ PVR_GOTO_IF_ERROR(eError, fail_dbg_register); -+ -+ return PVRSRV_OK; -+ -+fail_dbg_register: -+ OSLockDestroy(psDevNode->hSyncServerRecordLock); -+fail_lock_create: -+ return eError; -+} -+ -+static void SyncRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ DLLIST_NODE *psNode, *psNext; -+ int i; -+ -+ OSLockAcquire(psDevNode->hSyncServerRecordLock); -+ dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext) -+ { -+ struct SYNC_RECORD *pSyncRec = -+ IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode); -+ -+ dllist_remove_node(psNode); -+ OSFreeMem(pSyncRec); -+ } -+ -+ for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++) -+ { -+ if (psDevNode->apsSyncServerRecordsFreed[i]) -+ { -+ OSFreeMem(psDevNode->apsSyncServerRecordsFreed[i]); -+ psDevNode->apsSyncServerRecordsFreed[i] = NULL; -+ } -+ } -+ OSLockRelease(psDevNode->hSyncServerRecordLock); -+ -+ if (psDevNode->hSyncServerRecordNotify) -+ { -+ PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hSyncServerRecordNotify); -+ } -+ OSLockDestroy(psDevNode->hSyncServerRecordLock); -+} -+ -+PVRSRV_ERROR SyncServerInit(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ PVRSRV_ERROR eError; -+ -+ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) -+ { -+ eError = SyncRecordListInit(psDevNode); -+ PVR_GOTO_IF_ERROR(eError, fail_record_list); -+ } -+ -+ return PVRSRV_OK; -+ -+fail_record_list: -+ return eError; -+} -+ -+void SyncServerDeinit(PVRSRV_DEVICE_NODE *psDevNode) -+{ -+ -+ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) -+ { -+ SyncRecordListDeinit(psDevNode); -+ } -+} -diff --git a/drivers/gpu/drm/img-rogue/sync_server.h b/drivers/gpu/drm/img-rogue/sync_server.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sync_server.h -@@ -0,0 +1,249 @@ -+/**************************************************************************/ /*! -+@File -+@Title Server side synchronisation interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Describes the server side synchronisation functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv.h" -+#include "device.h" -+#include "devicemem.h" -+#include "pdump.h" -+#include "pvrsrv_error.h" -+#include "connection_server.h" -+#include "pdump_km.h" -+ -+#ifndef SYNC_SERVER_H -+#define SYNC_SERVER_H -+ -+typedef struct _SYNC_PRIMITIVE_BLOCK_ SYNC_PRIMITIVE_BLOCK; -+typedef struct _SYNC_CONNECTION_DATA_ SYNC_CONNECTION_DATA; -+typedef struct SYNC_RECORD* SYNC_RECORD_HANDLE; -+ -+typedef struct _SYNC_ADDR_LIST_ -+{ -+ IMG_UINT32 ui32NumSyncs; -+ PRGXFWIF_UFO_ADDR *pasFWAddrs; -+} SYNC_ADDR_LIST; -+ -+PVRSRV_ERROR -+SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock, -+ IMG_UINT32 ui32Offset, -+ PRGXFWIF_UFO_ADDR *psAddrOut); -+ -+void -+SyncAddrListInit(SYNC_ADDR_LIST *psList); -+ -+void -+SyncAddrListDeinit(SYNC_ADDR_LIST *psList); -+ -+PVRSRV_ERROR -+SyncAddrListPopulate(SYNC_ADDR_LIST *psList, -+ IMG_UINT32 ui32NumSyncs, -+ SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock, -+ IMG_UINT32 *paui32SyncOffset); -+ -+PVRSRV_ERROR -+SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST *psList, -+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim); -+PVRSRV_ERROR -+SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList, -+ IMG_UINT32 ui32NumCheckpoints, -+ PSYNC_CHECKPOINT *apsSyncCheckpoint); -+ -+PVRSRV_ERROR -+SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList, -+ IMG_UINT32 ui32NumCheckpoints, -+ PSYNC_CHECKPOINT *apsSyncCheckpoint); -+ -+void -+SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints, -+ PSYNC_CHECKPOINT *apsSyncCheckpoint); -+ -+PVRSRV_ERROR -+SyncAddrListRollbackCheckpoints(PVRSRV_DEVICE_NODE *psDevNode, SYNC_ADDR_LIST *psList); -+ -+PVRSRV_ERROR -+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE * psDevNode, -+ SYNC_PRIMITIVE_BLOCK **ppsSyncBlk, -+ IMG_UINT32 *puiSyncPrimVAddr, -+ IMG_UINT32 *puiSyncPrimBlockSize, -+ PMR **ppsSyncPMR); -+ -+PVRSRV_ERROR -+PVRSRVExportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, -+ DEVMEM_EXPORTCOOKIE **psExportCookie); -+ -+PVRSRV_ERROR -+PVRSRVUnexportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk); -+ -+PVRSRV_ERROR -+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *ppsSyncBlk); -+ -+PVRSRV_ERROR -+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index, -+ IMG_UINT32 ui32Value); -+ -+PVRSRV_ERROR -+PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_BOOL bServerSync, -+ IMG_UINT32 ui32FWAddr, -+ IMG_UINT32 ui32ClassNameSize, -+ const IMG_CHAR *pszClassName); -+ -+PVRSRV_ERROR -+PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ IMG_UINT32 ui32FWAddr); -+ -+PVRSRV_ERROR -+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection, -+ PVRSRV_DEVICE_NODE *psDevNode, -+ SYNC_RECORD_HANDLE *phRecord, -+ SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock, -+ IMG_UINT32 ui32FwBlockAddr, -+ IMG_UINT32 ui32SyncOffset, -+ IMG_BOOL bServerSync, -+ IMG_UINT32 ui32ClassNameSize, -+ const IMG_CHAR *pszClassName); -+ -+PVRSRV_ERROR -+PVRSRVSyncRecordRemoveByHandleKM( -+ SYNC_RECORD_HANDLE hRecord); -+void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr, -+ IMG_CHAR * pszSyncInfo, size_t len); -+ -+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData); -+void SyncUnregisterConnection(SYNC_CONNECTION_DATA *ppsSyncConnectionData); -+void SyncConnectionPDumpSyncBlocks(PVRSRV_DEVICE_NODE *psDevNode, void *hSyncPrivData, PDUMP_TRANSITION_EVENT eEvent); -+ -+/*! -+****************************************************************************** -+@Function SyncServerInit -+ -+@Description Per-device initialisation for the ServerSync module -+******************************************************************************/ -+PVRSRV_ERROR SyncServerInit(PVRSRV_DEVICE_NODE *psDevNode); -+void SyncServerDeinit(PVRSRV_DEVICE_NODE *psDevNode); -+ -+ -+#if defined(PDUMP) -+PVRSRV_ERROR -+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset); -+ -+PVRSRV_ERROR -+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value); -+ -+PVRSRV_ERROR -+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T uiDumpFlags); -+ -+PVRSRV_ERROR -+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset, -+ IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize, -+ IMG_UINT64 uiBufferSize); -+ -+#else /* PDUMP */ -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PVRSRVSyncPrimPDumpKM) -+#endif -+static INLINE PVRSRV_ERROR -+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset) -+{ -+ PVR_UNREFERENCED_PARAMETER(psSyncBlk); -+ PVR_UNREFERENCED_PARAMETER(ui32Offset); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PVRSRVSyncPrimPDumpValueKM) -+#endif -+static INLINE PVRSRV_ERROR -+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value) -+{ -+ PVR_UNREFERENCED_PARAMETER(psSyncBlk); -+ PVR_UNREFERENCED_PARAMETER(ui32Offset); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PVRSRVSyncPrimPDumpPolKM) -+#endif -+static INLINE PVRSRV_ERROR -+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, -+ IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, -+ PDUMP_POLL_OPERATOR eOperator, -+ PDUMP_FLAGS_T uiDumpFlags) -+{ -+ PVR_UNREFERENCED_PARAMETER(psSyncBlk); -+ PVR_UNREFERENCED_PARAMETER(ui32Offset); -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+ PVR_UNREFERENCED_PARAMETER(ui32Mask); -+ PVR_UNREFERENCED_PARAMETER(eOperator); -+ PVR_UNREFERENCED_PARAMETER(uiDumpFlags); -+ return PVRSRV_OK; -+} -+ -+#ifdef INLINE_IS_PRAGMA -+#pragma inline(PVRSRVSyncPrimPDumpCBPKM) -+#endif -+static INLINE PVRSRV_ERROR -+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset, -+ IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize, -+ IMG_UINT64 uiBufferSize) -+{ -+ PVR_UNREFERENCED_PARAMETER(psSyncBlk); -+ PVR_UNREFERENCED_PARAMETER(ui32Offset); -+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); -+ PVR_UNREFERENCED_PARAMETER(uiPacketSize); -+ PVR_UNREFERENCED_PARAMETER(uiBufferSize); -+ return PVRSRV_OK; -+} -+#endif /* PDUMP */ -+#endif /*SYNC_SERVER_H */ -diff --git a/drivers/gpu/drm/img-rogue/syscommon.h b/drivers/gpu/drm/img-rogue/syscommon.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/syscommon.h -@@ -0,0 +1,175 @@ -+/**************************************************************************/ /*! -+@File -+@Title Common System APIs and structures -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This header provides common system-specific declarations and -+ macros that are supported by all systems -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /***************************************************************************/ -+ -+#if !defined(SYSCOMMON_H) -+#define SYSCOMMON_H -+ -+#include "img_types.h" -+#include "pvr_notifier.h" -+#include "pvrsrv_device.h" -+#include "pvrsrv_error.h" -+ -+/*************************************************************************/ /*! -+@Description Pointer to a Low-level Interrupt Service Routine (LISR). -+@Input pvData Private data provided to the LISR. -+@Return True if interrupt handled, false otherwise. -+*/ /**************************************************************************/ -+typedef IMG_BOOL (*PFN_LISR)(void *pvData); -+ -+/**************************************************************************/ /*! -+@Function SysDevInit -+@Description System specific device initialisation function. -+@Input pvOSDevice pointer to the OS device reference -+@Input ppsDevConfig returned device configuration info -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /***************************************************************************/ -+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig); -+ -+/**************************************************************************/ /*! -+@Function SysDevDeInit -+@Description System specific device deinitialisation function. -+@Input psDevConfig device configuration info of the device to be -+ deinitialised -+@Return None. -+*/ /***************************************************************************/ -+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig); -+ -+/**************************************************************************/ /*! -+@Function SysDebugInfo -+@Description Dump system specific device debug information. -+@Input psDevConfig pointer to device configuration info -+@Input pfnDumpDebugPrintf the 'printf' function to be called to -+ display the debug info -+@Input pvDumpDebugFile optional file identifier to be passed to -+ the 'printf' function if required -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /***************************************************************************/ -+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, -+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, -+ void *pvDumpDebugFile); -+ -+/**************************************************************************/ /*! -+@Function SysInstallDeviceLISR -+@Description Installs the system Low-level Interrupt Service Routine (LISR) -+ which handles low-level processing of interrupts from the device -+ (GPU). -+ The LISR will be invoked when the device raises an interrupt. An -+ LISR may not be descheduled, so code which needs to do so should -+ be placed in an MISR. -+ The installed LISR will schedule any MISRs once it has completed -+ its interrupt processing, by calling OSScheduleMISR(). -+@Input hSysData pointer to the system data of the device -+@Input ui32IRQ the IRQ on which the LISR is to be installed -+@Input pszName name of the module installing the LISR -+@Input pfnLISR pointer to the function to be installed as the -+ LISR -+@Input pvData private data provided to the LISR -+@Output phLISRData handle to the installed LISR (to be used for a -+ subsequent uninstall) -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /***************************************************************************/ -+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, -+ IMG_UINT32 ui32IRQ, -+ const IMG_CHAR *pszName, -+ PFN_LISR pfnLISR, -+ void *pvData, -+ IMG_HANDLE *phLISRData); -+ -+/**************************************************************************/ /*! -+@Function SysUninstallDeviceLISR -+@Description Uninstalls the system Low-level Interrupt Service Routine (LISR) -+ which handles low-level processing of interrupts from the device -+ (GPU). -+@Input hLISRData handle of the LISR to be uninstalled -+@Return PVRSRV_OK on success, a failure code otherwise. -+*/ /***************************************************************************/ -+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData); -+ -+/**************************************************************************/ /*! -+@Function SysRGXErrorNotify -+@Description Error reporting callback function, registered as the -+ pfnSysDevErrorNotify member of the PVRSRV_DEVICE_CONFIG -+ struct. System layer will be notified of device errors and -+ resets via this callback. -+ NB. implementers should ensure that the minimal amount of -+ work is done in this callback function, as it will be -+ executed in the main RGX MISR. (e.g. any blocking or lengthy -+ work should be performed by a worker queue/thread instead). -+@Input hSysData pointer to the system data of the device -+@Output psErrorData structure containing details of the reported error -+@Return None. -+*/ /***************************************************************************/ -+void SysRGXErrorNotify(IMG_HANDLE hSysData, -+ PVRSRV_ROBUSTNESS_NOTIFY_DATA *psErrorData); -+ -+/**************************************************************************/ /*! -+@Function SysRestrictGpuLocalPhysheap -+@Description If the Restriction apphint has been set, validate the -+ restriction value and return the new GPU_LOCAL heap size. -+ -+@Input uiHeapSize Current syslayer detected GPU_LOCAL heap size. -+@Return IMG_UINT64 New GPU_LOCAL heap size in bytes. -+*/ /***************************************************************************/ -+IMG_UINT64 SysRestrictGpuLocalPhysheap(IMG_UINT64 uiHeapSize); -+ -+/**************************************************************************/ /*! -+@Function SysRestrictGpuLocalAddPrivateHeap -+@Description Determine if the restriction apphint has been set. -+ -+@Return IMG_BOOL IMG_TRUE if the restriction apphint has been -+ set. -+*/ /***************************************************************************/ -+IMG_BOOL SysRestrictGpuLocalAddPrivateHeap(void); -+ -+/**************************************************************************/ /*! -+@Function SysDefaultToCpuLocalHeap -+@Description Determine if the Default Heap should be CPU_LOCAL -+ Can only be used on TC_MEMORY_HYBRID systems. -+ -+@Return IMG_BOOL IMG_TRUE if the Default heap apphint has been -+ set. -+*/ /***************************************************************************/ -+IMG_BOOL SysDefaultToCpuLocalHeap(void); -+ -+#endif /* !defined(SYSCOMMON_H) */ -diff --git a/drivers/gpu/drm/img-rogue/sysconfig_cmn.c b/drivers/gpu/drm/img-rogue/sysconfig_cmn.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sysconfig_cmn.c -@@ -0,0 +1,210 @@ -+/*************************************************************************/ /*! -+@File -+@Title Sysconfig layer common to all platforms -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implements system layer functions common to all platforms -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv.h" -+#include "pvrsrv_device.h" -+#include "syscommon.h" -+#include "pvr_debug.h" -+#include "os_apphint.h" -+//#include "physmem.h" -+ -+void SysRGXErrorNotify(IMG_HANDLE hSysData, -+ PVRSRV_ROBUSTNESS_NOTIFY_DATA *psErrorData) -+{ -+ PVR_UNREFERENCED_PARAMETER(hSysData); -+ -+#if defined(PVRSRV_NEED_PVR_DPF) -+ { -+ IMG_UINT32 ui32DgbLvl; -+ -+ switch (psErrorData->eResetReason) -+ { -+ case RGX_CONTEXT_RESET_REASON_NONE: -+ case RGX_CONTEXT_RESET_REASON_GUILTY_LOCKUP: -+ case RGX_CONTEXT_RESET_REASON_INNOCENT_LOCKUP: -+ case RGX_CONTEXT_RESET_REASON_GUILTY_OVERRUNING: -+ case RGX_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING: -+ case RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH: -+ case RGX_CONTEXT_RESET_REASON_GPU_ECC_OK: -+ case RGX_CONTEXT_RESET_REASON_FW_ECC_OK: -+ { -+ ui32DgbLvl = PVR_DBG_MESSAGE; -+ break; -+ } -+ case RGX_CONTEXT_RESET_REASON_GPU_ECC_HWR: -+ case RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR: -+ { -+ ui32DgbLvl = PVR_DBG_WARNING; -+ break; -+ } -+ case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM: -+ case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM: -+ case RGX_CONTEXT_RESET_REASON_FW_ECC_ERR: -+ case RGX_CONTEXT_RESET_REASON_FW_WATCHDOG: -+ case RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT: -+ case RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR: -+ case RGX_CONTEXT_PVRIC_SIGNATURE_MISMATCH: -+ { -+ ui32DgbLvl = PVR_DBG_ERROR; -+ break; -+ } -+ default: -+ { -+ PVR_ASSERT(false && "Unhandled reset reason"); -+ ui32DgbLvl = PVR_DBG_ERROR; -+ break; -+ } -+ } -+ -+ if (psErrorData->pid > 0) -+ { -+ PVRSRVDebugPrintf(ui32DgbLvl, __FILE__, __LINE__, " PID %d experienced error %d", -+ psErrorData->pid, psErrorData->eResetReason); -+ } -+ else -+ { -+ PVRSRVDebugPrintf(ui32DgbLvl, __FILE__, __LINE__, " Device experienced error %d", -+ psErrorData->eResetReason); -+ } -+ -+ switch (psErrorData->eResetReason) -+ { -+ case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM: -+ case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM: -+ { -+ PVRSRVDebugPrintf(ui32DgbLvl, __FILE__, __LINE__, " ExtJobRef 0x%x, DM %d", -+ psErrorData->uErrData.sChecksumErrData.ui32ExtJobRef, -+ psErrorData->uErrData.sChecksumErrData.eDM); -+ break; -+ } -+ default: -+ { -+ break; -+ } -+ } -+ } -+#else -+ PVR_UNREFERENCED_PARAMETER(psErrorData); -+#endif /* PVRSRV_NEED_PVR_DPF */ -+} -+ -+IMG_UINT64 SysRestrictGpuLocalPhysheap(IMG_UINT64 uiHeapSize) -+{ -+#if defined(SUPPORT_VALIDATION) -+ void *pvAppHintState = NULL; -+ IMG_UINT32 uiCurrentHeapSizeMB = B2MB(uiHeapSize); -+ IMG_UINT32 uiForcedHeapSizeMB = 0; -+ IMG_UINT64 uiForcedHeapSizeBytes = 0; -+ -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, -+ RestrictGpuLocalPhysHeapSizeMB, &uiCurrentHeapSizeMB, -+ &uiForcedHeapSizeMB); -+ OSFreeAppHintState(pvAppHintState); -+ -+ uiForcedHeapSizeBytes = MB2B((IMG_UINT64)uiForcedHeapSizeMB); -+ -+ if (uiForcedHeapSizeMB == 0) -+ { -+ /* Apphint wasn't set, just return current heapsize */ -+ return uiHeapSize; -+ } -+ -+ if (uiForcedHeapSizeBytes > uiHeapSize) -+ { -+ PVR_DPF((PVR_DBG_WARNING,"GPU_LOCAL Forced heap value greater than possible heap size. " -+ "Given: %llu Available: %llu. Reverting to default.", -+ uiForcedHeapSizeBytes, uiHeapSize)); -+ } -+ else -+ { -+ PVR_LOG(("RestrictGpuLocalPhysHeapSizeMB applied GPU_LOCAL Size Bytes: %llu", uiForcedHeapSizeBytes)); -+ } -+ -+ return uiForcedHeapSizeBytes; -+#else -+ return uiHeapSize; -+#endif -+} -+ -+IMG_BOOL SysRestrictGpuLocalAddPrivateHeap(void) -+{ -+#if defined(SUPPORT_VALIDATION) -+ void *pvAppHintState = NULL; -+ IMG_UINT32 uiCurrentHeapSizeMB = 0; -+ IMG_UINT32 uiForcedHeapSizeMB = 0; -+ -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, -+ RestrictGpuLocalPhysHeapSizeMB, &uiCurrentHeapSizeMB, -+ &uiForcedHeapSizeMB); -+ OSFreeAppHintState(pvAppHintState); -+ -+ return uiForcedHeapSizeMB ? IMG_TRUE : IMG_FALSE; -+#else -+ return IMG_FALSE; -+#endif -+} -+ -+IMG_BOOL SysDefaultToCpuLocalHeap(void) -+{ -+//#if (TC_MEMORY_CONFIG == TC_MEMORY_HYBRID) -+ void *pvAppHintState = NULL; -+ IMG_BOOL bAppHintDefault = IMG_FALSE; -+ IMG_BOOL bSetToCPULocal = IMG_FALSE; -+ -+ OSCreateAppHintState(&pvAppHintState); -+ OSGetAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, -+ PhysHeapHybridDefault2CpuLocal, &bAppHintDefault, &bSetToCPULocal); -+ OSFreeAppHintState(pvAppHintState); -+ -+ return bSetToCPULocal; -+//#else -+// return IMG_FALSE; -+//#endif -+} -+/****************************************************************************** -+ End of file (sysconfig_cmn.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/sysvalidation.h b/drivers/gpu/drm/img-rogue/sysvalidation.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/sysvalidation.h -@@ -0,0 +1,63 @@ -+/*************************************************************************/ /*! -+@File -+@Title Validation System APIs and structures -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This header provides system-specific declarations and macros -+ needed for hardware validation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(SYSVALIDATION_H) -+#define SYSVALIDATION_H -+ -+#if defined(SUPPORT_GPUVIRT_VALIDATION) -+#include "img_types.h" -+#include "rgxdefs_km.h" -+#include "virt_validation_defs.h" -+ -+void SysInitVirtInitialization(IMG_HANDLE hSysData, -+ IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], -+ IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); -+ -+#if defined(EMULATOR) -+void SysSetAxiProtOSid(IMG_HANDLE hSysData, IMG_UINT32 ui32OSid, IMG_BOOL bState); -+void SysSetTrustedDeviceAceEnabled(IMG_HANDLE hSysData); -+#endif -+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ -+ -+#endif /* !defined(SYSVALIDATION_H) */ -diff --git a/drivers/gpu/drm/img-rogue/tlclient.c b/drivers/gpu/drm/img-rogue/tlclient.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/tlclient.c -@@ -0,0 +1,499 @@ -+/*************************************************************************/ /*! -+@File tlclient.c -+@Title Services Transport Layer shared API -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Transport layer common API used in both clients and server -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+/* DESIGN NOTE -+ * This transport layer consumer-role API was created as a shared API when a -+ * client wanted to read the data of a TL stream from within the KM server -+ * driver. This was in addition to the existing clients supported externally -+ * by the UM client library component via PVR API layer. -+ * This shared API is thus used by the PVR TL API in the client library and -+ * by clients internal to the server driver module. It depends on -+ * client entry points of the TL and DEVMEM bridge modules. These entry points -+ * encapsulate from the TL shared API whether a direct bridge or an indirect -+ * (ioctl) bridge is used. -+ * One reason for needing this layer centres around the fact that some of the -+ * API functions make multiple bridge calls and the logic that glues these -+ * together is common regardless of client location. Further this layer has -+ * allowed the defensive coding that checks parameters to move into the PVR -+ * API layer where untrusted clients enter giving a more efficient KM code path. -+ */ -+ -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "pvr_debug.h" -+#include "osfunc.h" -+ -+#include "allocmem.h" -+#include "devicemem.h" -+ -+#include "tlclient.h" -+#include "pvrsrv_tlcommon.h" -+#include "client_pvrtl_bridge.h" -+ -+#if defined(__KERNEL__) -+#include "srvcore.h" -+#else -+#include "srvcore_intern.h" -+#endif -+ -+/* Defines/Constants -+ */ -+ -+#define NO_ACQUIRE 0xffffffffU -+ -+/* User-side stream descriptor structure. -+ */ -+typedef struct _TL_STREAM_DESC_ -+{ -+ /* Handle on kernel-side stream descriptor*/ -+ IMG_HANDLE hServerSD; -+ -+ /* Stream data buffer variables */ -+ DEVMEM_MEMDESC* psUMmemDesc; -+ IMG_PBYTE pBaseAddr; -+ -+ /* Offset in bytes into the circular buffer and valid only after -+ * an Acquire call and undefined after a release. */ -+ IMG_UINT32 uiReadOffset; -+ -+ /* Always a positive integer when the Acquire call returns and a release -+ * is outstanding. Undefined at all other times. */ -+ IMG_UINT32 uiReadLen; -+ -+ /* Counter indicating how many writes to a stream failed. -+ * It's used to reduce number of errors in output log. */ -+ IMG_UINT32 ui32WritesFailed; -+ -+ /* Name of the stream. */ -+ IMG_CHAR szName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; -+} TL_STREAM_DESC, *PTL_STREAM_DESC; -+ -+ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection, -+ const IMG_CHAR* pszName, -+ IMG_UINT32 ui32Mode, -+ IMG_HANDLE* phSD) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ TL_STREAM_DESC *psSD = NULL; -+ IMG_HANDLE hTLPMR; -+ IMG_HANDLE hTLImportHandle; -+ IMG_DEVMEM_SIZE_T uiImportSize; -+ PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE; -+ -+ PVR_ASSERT(hDevConnection); -+ PVR_ASSERT(pszName); -+ PVR_ASSERT(phSD); -+ *phSD = NULL; -+ -+ /* Allocate memory for the stream descriptor object, initialise with -+ * "no data read" yet. */ -+ psSD = OSAllocZMem(sizeof(TL_STREAM_DESC)); -+ PVR_LOG_GOTO_IF_NOMEM(psSD, eError, e0); -+ psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; -+ -+ /* Send open stream request to kernel server to get stream handle and -+ * buffer cookie so we can get access to the buffer in this process. */ -+ eError = BridgeTLOpenStream(GetBridgeHandle(hDevConnection), pszName, -+ ui32Mode, &psSD->hServerSD, &hTLPMR); -+ if (eError != PVRSRV_OK) -+ { -+ if ((ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT) && -+ (eError == PVRSRV_ERROR_TIMEOUT)) -+ { -+ goto e1; -+ } -+ PVR_LOG_GOTO_IF_ERROR(eError, "BridgeTLOpenStream", e1); -+ } -+ -+ /* Convert server export cookie into a cookie for use by this client */ -+ eError = DevmemMakeLocalImportHandle(hDevConnection, -+ hTLPMR, &hTLImportHandle); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemMakeLocalImportHandle", e2); -+ -+ uiMemFlags |= ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0ULL; -+ /* Now convert client cookie into a client handle on the buffer's -+ * physical memory region */ -+ eError = DevmemLocalImport(hDevConnection, -+ hTLImportHandle, -+ uiMemFlags, -+ &psSD->psUMmemDesc, -+ &uiImportSize, -+ "TLBuffer"); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemImport", e3); -+ -+ /* Now map the memory into the virtual address space of this process. */ -+ eError = DevmemAcquireCpuVirtAddr(psSD->psUMmemDesc, (void **) -+ &psSD->pBaseAddr); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e4); -+ -+ /* Ignore error, not much that can be done */ -+ (void) DevmemUnmakeLocalImportHandle(hDevConnection, -+ hTLImportHandle); -+ -+ /* Copy stream name */ -+ OSStringLCopy(psSD->szName, pszName, PRVSRVTL_MAX_STREAM_NAME_SIZE); -+ -+ /* Return client descriptor handle to caller */ -+ *phSD = psSD; -+ return PVRSRV_OK; -+ -+/* Clean up post buffer setup */ -+e4: -+ DevmemFree(psSD->psUMmemDesc); -+e3: -+ (void) DevmemUnmakeLocalImportHandle(hDevConnection, -+ &hTLImportHandle); -+/* Clean up post stream open */ -+e2: -+ BridgeTLCloseStream(GetBridgeHandle(hDevConnection), psSD->hServerSD); -+ -+/* Clean up post allocation of the descriptor object */ -+e1: -+ OSFreeMem(psSD); -+ -+e0: -+ return eError; -+} -+ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientCloseStream(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; -+ -+ PVR_ASSERT(hDevConnection); -+ PVR_ASSERT(hSD); -+ -+ /* Check the caller provided connection is valid */ -+ if (!psSD->hServerSD) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: descriptor already " -+ "closed/not open", __func__)); -+ return PVRSRV_ERROR_HANDLE_NOT_FOUND; -+ } -+ -+ /* Check if acquire is outstanding, perform release if it is, ignore result -+ * as there is not much we can do if it is an error other than close */ -+ if (psSD->uiReadLen != NO_ACQUIRE) -+ { -+ (void) BridgeTLReleaseData(GetBridgeHandle(hDevConnection), -+ psSD->hServerSD, psSD->uiReadOffset, psSD->uiReadLen); -+ psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; -+ } -+ -+ /* Clean up DevMem resources used for this stream in this client */ -+ DevmemReleaseCpuVirtAddr(psSD->psUMmemDesc); -+ -+ DevmemFree(psSD->psUMmemDesc); -+ -+ /* Send close to server to clean up kernel mode resources for this -+ * handle and release the memory. */ -+ eError = DestroyServerResource(hDevConnection, -+ NULL, -+ BridgeTLCloseStream, -+ psSD->hServerSD); -+ PVR_LOG_IF_ERROR(eError, "BridgeTLCloseStream"); -+ -+ if (psSD->ui32WritesFailed != 0) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s() %u writes failed to stream %s (%c)", -+ __func__, psSD->ui32WritesFailed, psSD->szName, -+ psSD->ui32WritesFailed == IMG_UINT32_MAX ? 'T' : 'F')); -+ } -+ -+ OSCachedMemSet(psSD, 0x00, sizeof(TL_STREAM_DESC)); -+ OSFreeMem(psSD); -+ -+ return eError; -+} -+ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientDiscoverStreams(SHARED_DEV_CONNECTION hDevConnection, -+ const IMG_CHAR *pszNamePattern, -+ IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], -+ IMG_UINT32 *pui32NumFound) -+{ -+ PVR_ASSERT(hDevConnection); -+ PVR_ASSERT(pszNamePattern); -+ PVR_ASSERT(pui32NumFound); -+ -+ return BridgeTLDiscoverStreams(GetBridgeHandle(hDevConnection), -+ pszNamePattern, -+ /* we need to treat this as one dimensional array */ -+ *pui32NumFound * PRVSRVTL_MAX_STREAM_NAME_SIZE, -+ (IMG_CHAR *) aszStreams, -+ pui32NumFound); -+} -+ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientReserveStream(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD, -+ IMG_UINT8 **ppui8Data, -+ IMG_UINT32 ui32Size) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; -+ IMG_UINT32 ui32BufferOffset, ui32Unused; -+ -+ PVR_ASSERT(hDevConnection); -+ PVR_ASSERT(hSD); -+ PVR_ASSERT(ppui8Data); -+ PVR_ASSERT(ui32Size); -+ -+ eError = BridgeTLReserveStream(GetBridgeHandle(hDevConnection), -+ psSD->hServerSD, &ui32BufferOffset, ui32Size, ui32Size, &ui32Unused); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ *ppui8Data = psSD->pBaseAddr + ui32BufferOffset; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientReserveStream2(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD, -+ IMG_UINT8 **ppui8Data, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32SizeMin, -+ IMG_UINT32 *pui32Available) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; -+ IMG_UINT32 ui32BufferOffset; -+ -+ PVR_ASSERT(hDevConnection); -+ PVR_ASSERT(hSD); -+ PVR_ASSERT(ppui8Data); -+ PVR_ASSERT(ui32Size); -+ -+ eError = BridgeTLReserveStream(GetBridgeHandle(hDevConnection), -+ psSD->hServerSD, &ui32BufferOffset, ui32Size, ui32SizeMin, -+ pui32Available); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ *ppui8Data = psSD->pBaseAddr + ui32BufferOffset; -+ -+ return PVRSRV_OK; -+} -+ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientCommitStream(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD, -+ IMG_UINT32 ui32Size) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; -+ -+ PVR_ASSERT(hDevConnection); -+ PVR_ASSERT(hSD); -+ PVR_ASSERT(ui32Size); -+ -+ eError = BridgeTLCommitStream(GetBridgeHandle(hDevConnection), -+ psSD->hServerSD, ui32Size); -+ PVR_RETURN_IF_ERROR(eError); -+ -+ return PVRSRV_OK; -+} -+ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientAcquireData(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD, -+ IMG_PBYTE* ppPacketBuf, -+ IMG_UINT32* pui32BufLen) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; -+ -+ PVR_ASSERT(hDevConnection); -+ PVR_ASSERT(hSD); -+ PVR_ASSERT(ppPacketBuf); -+ PVR_ASSERT(pui32BufLen); -+ -+ /* In case of non-blocking acquires, which can return no data, and -+ * error paths ensure we clear the output parameters first. */ -+ *ppPacketBuf = NULL; -+ *pui32BufLen = 0; -+ -+ /* Check Acquire has not been called twice in a row without a release */ -+ if (psSD->uiReadOffset != NO_ACQUIRE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: acquire already " -+ "outstanding, ReadOffset(%d), ReadLength(%d)", -+ __func__, psSD->uiReadOffset, psSD->uiReadLen)); -+ return PVRSRV_ERROR_RETRY; -+ } -+ -+ /* Ask the kernel server for the next chunk of data to read */ -+ eError = BridgeTLAcquireData(GetBridgeHandle(hDevConnection), -+ psSD->hServerSD, &psSD->uiReadOffset, &psSD->uiReadLen); -+ if (eError != PVRSRV_OK) -+ { -+ /* Mask reporting of the errors seen under normal operation */ -+ if ((eError != PVRSRV_ERROR_TIMEOUT) && -+ (eError != PVRSRV_ERROR_STREAM_READLIMIT_REACHED)) -+ { -+ PVR_LOG_ERROR(eError, "BridgeTLAcquireData"); -+ } -+ psSD->uiReadOffset = psSD->uiReadLen = NO_ACQUIRE; -+ return eError; -+ } -+ /* else PVRSRV_OK */ -+ -+ /* Return the data offset and length to the caller if bytes are available -+ * to be read. Could be zero for non-blocking mode so pass back cleared -+ * values above */ -+ if (psSD->uiReadLen) -+ { -+ *ppPacketBuf = psSD->pBaseAddr + psSD->uiReadOffset; -+ *pui32BufLen = psSD->uiReadLen; -+ } -+ -+ return PVRSRV_OK; -+} -+ -+static PVRSRV_ERROR _TLClientReleaseDataLen( -+ SHARED_DEV_CONNECTION hDevConnection, -+ TL_STREAM_DESC* psSD, -+ IMG_UINT32 uiReadLen) -+{ -+ PVRSRV_ERROR eError; -+ -+ /* the previous acquire did not return any data, this is a no-operation */ -+ if (psSD->uiReadLen == 0) -+ { -+ return PVRSRV_OK; -+ } -+ -+ /* Check release has not been called twice in a row without an acquire */ -+ if (psSD->uiReadOffset == NO_ACQUIRE) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: no acquire to release", __func__)); -+ return PVRSRV_ERROR_RETRY; -+ } -+ -+ /* Inform the kernel to release the data from the buffer */ -+ eError = BridgeTLReleaseData(GetBridgeHandle(hDevConnection), -+ psSD->hServerSD, -+ psSD->uiReadOffset, uiReadLen); -+ PVR_LOG_IF_ERROR(eError, "BridgeTLReleaseData"); -+ -+ /* Reset state to indicate no outstanding acquire */ -+ psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; -+ -+ return eError; -+} -+ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientReleaseData(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD) -+{ -+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; -+ -+ PVR_ASSERT(hDevConnection); -+ PVR_ASSERT(hSD); -+ -+ return _TLClientReleaseDataLen(hDevConnection, psSD, psSD->uiReadLen); -+} -+ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientReleaseDataLess(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen) -+{ -+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; -+ -+ PVR_ASSERT(hDevConnection); -+ PVR_ASSERT(hSD); -+ -+ /* Check the specified size is within the size returned by Acquire */ -+ if (uiActualReadLen > psSD->uiReadLen) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: no acquire to release", __func__)); -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ -+ return _TLClientReleaseDataLen(hDevConnection, psSD, uiActualReadLen); -+} -+ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientWriteData(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD, -+ IMG_UINT32 ui32Size, -+ IMG_BYTE *pui8Data) -+{ -+ PVRSRV_ERROR eError; -+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; -+ -+ PVR_ASSERT(hDevConnection); -+ PVR_ASSERT(hSD); -+ PVR_ASSERT(ui32Size); -+ PVR_ASSERT(pui8Data); -+ -+ eError = BridgeTLWriteData(GetBridgeHandle(hDevConnection), -+ psSD->hServerSD, ui32Size, pui8Data); -+ -+ if (eError == PVRSRV_ERROR_STREAM_FULL) -+ { -+ if (psSD->ui32WritesFailed == 0) -+ { -+ PVR_LOG_ERROR(eError, "BridgeTLWriteData"); -+ } -+ if (psSD->ui32WritesFailed != IMG_UINT32_MAX) -+ { -+ psSD->ui32WritesFailed++; -+ } -+ } -+ else if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "BridgeTLWriteData"); -+ } -+ -+ return eError; -+} -+ -+/****************************************************************************** -+ End of file (tlclient.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/tlclient.h b/drivers/gpu/drm/img-rogue/tlclient.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/tlclient.h -@@ -0,0 +1,257 @@ -+/*************************************************************************/ /*! -+@File tlclient.h -+@Title Services Transport Layer shared API -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Transport layer common API used in both clients and server -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef TLCLIENT_H -+#define TLCLIENT_H -+ -+ -+#include "img_defs.h" -+#include "pvrsrv_tlcommon.h" -+#include "pvrsrv_error.h" -+ -+ -+/* This value is used for the hSrvHandle argument in the client API when -+ * called directly from the kernel which will lead to a direct bridge access. -+ */ -+#define DIRECT_BRIDGE_HANDLE ((IMG_HANDLE)0xDEADBEEFU) -+ -+ -+/*************************************************************************/ /*! -+ @Function TLClientOpenStream -+ @Description Open a descriptor onto an existing kernel transport stream. -+ @Input hDevConnection Address of a pointer to a connection object -+ @Input pszName Address of the stream name string, no longer -+ than PRVSRVTL_MAX_STREAM_NAME_SIZE. -+ @Input ui32Mode Unused -+ @Output phSD Address of a pointer to an stream object -+ @Return PVRSRV_ERROR_NOT_FOUND when named stream not found -+ @Return PVRSRV_ERROR_ALREADY_OPEN stream already open by another -+ @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error -+ @Return PVRSRV_ERROR_TIMEOUT timed out, stream not found -+ @Return PVRSRV_ERROR for other system codes -+*/ /**************************************************************************/ -+ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection, -+ const IMG_CHAR* pszName, -+ IMG_UINT32 ui32Mode, -+ IMG_HANDLE* phSD); -+ -+ -+/*************************************************************************/ /*! -+ @Function TLClientCloseStream -+ @Description Close and release the stream connection to Services kernel -+ server transport layer. Any outstanding Acquire will be -+ released. -+ @Input hDevConnection Address of a pointer to a connection object -+ @Input hSD Handle of the stream object to close -+ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle is not known -+ @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error -+ @Return PVRSRV_ERROR for system codes -+*/ /**************************************************************************/ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientCloseStream(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD); -+ -+/*************************************************************************/ /*! -+ @Function TLClientDiscoverStreams -+ @Description Finds all streams that's name starts with pszNamePattern and -+ ends with a number. -+ @Input hDevConnection Address of a pointer to a connection object -+ @Input pszNamePattern Name pattern. Must be beginning of a string. -+ @Output aszStreams Array of numbers from end of the discovered -+ names. -+ @inOut pui32NumFound When input, max number that can fit into -+ pui32Streams. When output, number of -+ discovered streams. -+ @Return PVRSRV_ERROR for system codes -+*/ /**************************************************************************/ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientDiscoverStreams(SHARED_DEV_CONNECTION hDevConnection, -+ const IMG_CHAR *pszNamePattern, -+ IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], -+ IMG_UINT32 *pui32NumFound); -+ -+/*************************************************************************/ /*! -+ @Function TLClientReserveStream -+ @Description Reserves a region with given size in the stream. If the stream -+ is already reserved the function will return an error. -+ @Input hDevConnection Address of a pointer to a connection object -+ @Input hSD Handle of the stream object to close -+ @Output ppui8Data pointer to the buffer -+ @Input ui32Size size of the data -+ @Return -+*/ /**************************************************************************/ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientReserveStream(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD, -+ IMG_UINT8 **ppui8Data, -+ IMG_UINT32 ui32Size); -+ -+/*************************************************************************/ /*! -+ @Function TLClientStreamReserve2 -+ @Description Reserves a region with given size in the stream. If the stream -+ is already reserved the function will return an error. -+ @Input hDevConnection Address of a pointer to a connection object -+ @Input hSD Handle of the stream object to close -+ @Output ppui8Data pointer to the buffer -+ @Input ui32Size size of the data -+ @Input ui32SizeMin minimum size of the data -+ @Input ui32Available available space in buffer -+ @Return -+*/ /**************************************************************************/ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientReserveStream2(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD, -+ IMG_UINT8 **ppui8Data, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32SizeMin, -+ IMG_UINT32 *pui32Available); -+ -+/*************************************************************************/ /*! -+ @Function TLClientStreamCommit -+ @Description Commits previously reserved region in the stream and therefore -+ allows next reserves. -+ This function call has to be preceded by the call to -+ TLClientReserveStream or TLClientReserveStream2. -+ @Input hDevConnection Address of a pointer to a connection object -+ @Input hSD Handle of the stream object to close -+ @Input ui32Size Size of the data -+ @Return -+*/ /**************************************************************************/ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientCommitStream(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD, -+ IMG_UINT32 ui32Size); -+ -+/*************************************************************************/ /*! -+ @Function TLClientAcquireData -+ @Description When there is data available in the stream buffer this call -+ returns with the address and length of the data buffer the -+ client can safely read. This buffer may contain one or more -+ packets of data. -+ If no data is available then this call blocks until it becomes -+ available. However if the stream has been destroyed while -+ waiting then a resource unavailable error will be returned to -+ the caller. Clients must pair this call with a ReleaseData -+ call. -+ @Input hDevConnection Address of a pointer to a connection object -+ @Input hSD Handle of the stream object to read -+ @Output ppPacketBuf Address of a pointer to an byte buffer. On exit -+ pointer contains address of buffer to read from -+ @Output puiBufLen Pointer to an integer. On exit it is the size -+ of the data to read from the packet buffer -+ @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE when stream no longer exists -+ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle not known -+ @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error -+ @Return PVRSRV_ERROR_RETRY release not called beforehand -+ @Return PVRSRV_ERROR_TIMEOUT block timed out, no data -+ @Return PVRSRV_ERROR for other system codes -+*/ /**************************************************************************/ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientAcquireData(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD, -+ IMG_PBYTE* ppPacketBuf, -+ IMG_UINT32* puiBufLen); -+ -+ -+/*************************************************************************/ /*! -+ @Function TLClientReleaseData -+ @Description Called after client has read the stream data out of the buffer -+ The data is subsequently flushed from the stream buffer to make -+ room for more data packets from the stream source. -+ @Input hDevConnection Address of a pointer to a connection object -+ @Input hSD Handle of the stream object to read -+ @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE when stream no longer exists -+ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle not known to TL -+ @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error -+ @Return PVRSRV_ERROR_RETRY acquire not called beforehand -+ @Return PVRSRV_ERROR for system codes -+*/ /**************************************************************************/ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientReleaseData(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD); -+ -+/*************************************************************************/ /*! -+ @Function TLClientReleaseDataLess -+ @Description Called after client has read only some data out of the buffer -+ and wishes to complete the read early i.e. does not want to -+ read the full data that the acquire call returned e.g read just -+ one packet from the stream. -+ The data is subsequently flushed from the stream buffer to make -+ room for more data packets from the stream source. -+ @Input hDevConnection Address of a pointer to a connection object -+ @Input hSD Handle of the stream object to read -+ @Input uiActualReadLen Size of data read, in bytes. Must be on a TL -+ packet boundary. -+ @Return PVRSRV_ERROR_INVALID_PARAMS when read length too big -+ @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE when stream no longer exists -+ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle not known to TL -+ @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error -+ @Return PVRSRV_ERROR_RETRY acquire not called beforehand -+ @Return PVRSRV_ERROR for system codes -+*/ /**************************************************************************/ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientReleaseDataLess(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen); -+ -+/*************************************************************************/ /*! -+ @Function TLClientWriteData -+ @Description Writes data to the stream. -+ @Input hDevConnection Address of a pointer to a connection object -+ @Input hSD Handle of the stream object to read -+ @Input ui32Size Size of the data -+ @Input pui8Data Pointer to data -+*/ /**************************************************************************/ -+IMG_INTERNAL -+PVRSRV_ERROR TLClientWriteData(SHARED_DEV_CONNECTION hDevConnection, -+ IMG_HANDLE hSD, -+ IMG_UINT32 ui32Size, -+ IMG_BYTE *pui8Data); -+ -+ -+#endif /* TLCLIENT_H */ -+ -+/****************************************************************************** -+ End of file (tlclient.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/tlintern.c b/drivers/gpu/drm/img-rogue/tlintern.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/tlintern.c -@@ -0,0 +1,442 @@ -+/*************************************************************************/ /*! -+@File -+@Title Transport Layer kernel side API implementation. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Transport Layer functions available to driver components in -+ the driver. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+//#define PVR_DPF_FUNCTION_TRACE_ON 1 -+#undef PVR_DPF_FUNCTION_TRACE_ON -+#include "pvr_debug.h" -+ -+#include "allocmem.h" -+#include "pvrsrv_error.h" -+#include "osfunc.h" -+#include "devicemem.h" -+ -+#include "pvrsrv_tlcommon.h" -+#include "tlintern.h" -+ -+/* -+ * Make functions -+ */ -+PTL_STREAM_DESC -+TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3) -+{ -+ PTL_STREAM_DESC ps = OSAllocZMem(sizeof(TL_STREAM_DESC)); -+ if (ps == NULL) -+ { -+ return NULL; -+ } -+ ps->psNode = f1; -+ ps->ui32Flags = f2; -+ ps->hReadEvent = f3; -+ ps->uiRefCount = 1; -+ -+ if (f2 & PVRSRV_STREAM_FLAG_READ_LIMIT) -+ { -+ ps->ui32ReadLimit = f1->psStream->ui32Write; -+ } -+ return ps; -+} -+ -+PTL_SNODE -+TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4) -+{ -+ PTL_SNODE ps = OSAllocZMem(sizeof(TL_SNODE)); -+ if (ps == NULL) -+ { -+ return NULL; -+ } -+ ps->hReadEventObj = f2; -+ ps->psStream = f3; -+ ps->psRDesc = f4; -+ f3->psNode = ps; -+ return ps; -+} -+ -+/* -+ * Transport Layer Global top variables and functions -+ */ -+static TL_GLOBAL_DATA sTLGlobalData; -+ -+TL_GLOBAL_DATA *TLGGD(void) /* TLGetGlobalData() */ -+{ -+ return &sTLGlobalData; -+} -+ -+/* TLInit must only be called once at driver initialisation. -+ * An assert is provided to check this condition on debug builds. -+ */ -+PVRSRV_ERROR -+TLInit(void) -+{ -+ PVRSRV_ERROR eError; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(sTLGlobalData.hTLGDLock == NULL && sTLGlobalData.hTLEventObj == NULL); -+ -+ /* Allocate a lock for TL global data, to be used while updating the TL data. -+ * This is for making TL global data multi-thread safe */ -+ eError = OSLockCreate(&sTLGlobalData.hTLGDLock); -+ PVR_GOTO_IF_ERROR(eError, e0); -+ -+ /* Allocate the event object used to signal global TL events such as -+ * a new stream created */ -+ eError = OSEventObjectCreate("TLGlobalEventObj", &sTLGlobalData.hTLEventObj); -+ PVR_GOTO_IF_ERROR(eError, e1); -+ -+ PVR_DPF_RETURN_OK; -+ -+/* Don't allow the driver to start up on error */ -+e1: -+ OSLockDestroy (sTLGlobalData.hTLGDLock); -+ sTLGlobalData.hTLGDLock = NULL; -+e0: -+ PVR_DPF_RETURN_RC (eError); -+} -+ -+static void RemoveAndFreeStreamNode(PTL_SNODE psRemove) -+{ -+ TL_GLOBAL_DATA* psGD = TLGGD(); -+ PTL_SNODE* last; -+ PTL_SNODE psn; -+ PVRSRV_ERROR eError; -+ -+ PVR_DPF_ENTERED; -+ -+ /* Unlink the stream node from the master list */ -+ PVR_ASSERT(psGD->psHead); -+ last = &psGD->psHead; -+ for (psn = psGD->psHead; psn; psn=psn->psNext) -+ { -+ if (psn == psRemove) -+ { -+ /* Other calling code may have freed and zeroed the pointers */ -+ if (psn->psRDesc) -+ { -+ OSFreeMem(psn->psRDesc); -+ psn->psRDesc = NULL; -+ } -+ if (psn->psStream) -+ { -+ OSFreeMem(psn->psStream); -+ psn->psStream = NULL; -+ } -+ *last = psn->psNext; -+ break; -+ } -+ last = &psn->psNext; -+ } -+ -+ /* Release the event list object owned by the stream node */ -+ if (psRemove->hReadEventObj) -+ { -+ eError = OSEventObjectDestroy(psRemove->hReadEventObj); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); -+ -+ psRemove->hReadEventObj = NULL; -+ } -+ -+ /* Release the memory of the stream node */ -+ OSFreeMem(psRemove); -+ -+ PVR_DPF_RETURN; -+} -+ -+static void FreeGlobalData(void) -+{ -+ PTL_SNODE psCurrent = sTLGlobalData.psHead; -+ PTL_SNODE psNext; -+ PVRSRV_ERROR eError; -+ -+ PVR_DPF_ENTERED; -+ -+ /* Clean up the SNODE list */ -+ if (psCurrent) -+ { -+ while (psCurrent) -+ { -+ psNext = psCurrent->psNext; -+ -+ /* Other calling code may have freed and zeroed the pointers */ -+ if (psCurrent->psRDesc) -+ { -+ OSFreeMem(psCurrent->psRDesc); -+ psCurrent->psRDesc = NULL; -+ } -+ if (psCurrent->psStream) -+ { -+ OSFreeMem(psCurrent->psStream); -+ psCurrent->psStream = NULL; -+ } -+ -+ /* Release the event list object owned by the stream node */ -+ if (psCurrent->hReadEventObj) -+ { -+ eError = OSEventObjectDestroy(psCurrent->hReadEventObj); -+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); -+ -+ psCurrent->hReadEventObj = NULL; -+ } -+ -+ OSFreeMem(psCurrent); -+ psCurrent = psNext; -+ } -+ -+ sTLGlobalData.psHead = NULL; -+ } -+ -+ PVR_DPF_RETURN; -+} -+ -+void -+TLDeInit(void) -+{ -+ PVR_DPF_ENTERED; -+ -+ if (sTLGlobalData.uiClientCnt) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "TLDeInit transport layer but %d client streams are still connected", sTLGlobalData.uiClientCnt)); -+ sTLGlobalData.uiClientCnt = 0; -+ } -+ -+ FreeGlobalData(); -+ -+ /* Clean up the TL global event object */ -+ if (sTLGlobalData.hTLEventObj) -+ { -+ OSEventObjectDestroy(sTLGlobalData.hTLEventObj); -+ sTLGlobalData.hTLEventObj = NULL; -+ } -+ -+ /* Destroy the TL global data lock */ -+ if (sTLGlobalData.hTLGDLock) -+ { -+ OSLockDestroy (sTLGlobalData.hTLGDLock); -+ sTLGlobalData.hTLGDLock = NULL; -+ } -+ -+ PVR_DPF_RETURN; -+} -+ -+void TLAddStreamNode(PTL_SNODE psAdd) -+{ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psAdd); -+ psAdd->psNext = TLGGD()->psHead; -+ TLGGD()->psHead = psAdd; -+ -+ PVR_DPF_RETURN; -+} -+ -+PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName) -+{ -+ TL_GLOBAL_DATA* psGD = TLGGD(); -+ PTL_SNODE psn; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(pszName); -+ -+ for (psn = psGD->psHead; psn; psn=psn->psNext) -+ { -+ if (psn->psStream && OSStringNCompare(psn->psStream->szName, pszName, PRVSRVTL_MAX_STREAM_NAME_SIZE)==0) -+ { -+ PVR_DPF_RETURN_VAL(psn); -+ } -+ } -+ -+ PVR_DPF_RETURN_VAL(NULL); -+} -+ -+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc) -+{ -+ TL_GLOBAL_DATA* psGD = TLGGD(); -+ PTL_SNODE psn; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psDesc); -+ -+ for (psn = psGD->psHead; psn; psn=psn->psNext) -+ { -+ if (psn->psRDesc == psDesc || psn->psWDesc == psDesc) -+ { -+ PVR_DPF_RETURN_VAL(psn); -+ } -+ } -+ PVR_DPF_RETURN_VAL(NULL); -+} -+ -+IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern, -+ IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], -+ IMG_UINT32 ui32Max) -+{ -+ TL_GLOBAL_DATA *psGD = TLGGD(); -+ PTL_SNODE psn; -+ IMG_UINT32 ui32Count = 0; -+ size_t uiLen; -+ -+ PVR_ASSERT(pszNamePattern); -+ -+ if ((uiLen = OSStringLength(pszNamePattern)) == 0) -+ return 0; -+ -+ for (psn = psGD->psHead; psn; psn = psn->psNext) -+ { -+ if (OSStringNCompare(pszNamePattern, psn->psStream->szName, uiLen) != 0) -+ continue; -+ -+ /* If aaszStreams is NULL we only count how many string match -+ * the given pattern. If it's a valid pointer we also return -+ * the names. */ -+ if (aaszStreams != NULL) -+ { -+ if (ui32Count >= ui32Max) -+ break; -+ -+ /* all of names are shorter than MAX and null terminated */ -+ OSStringLCopy(aaszStreams[ui32Count], psn->psStream->szName, -+ PRVSRVTL_MAX_STREAM_NAME_SIZE); -+ } -+ -+ ui32Count++; -+ } -+ -+ return ui32Count; -+} -+ -+PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc) -+{ -+ PTL_SNODE psn; -+ -+ PVR_DPF_ENTERED; -+ -+ psn = TLFindStreamNodeByDesc(psDesc); -+ if (psn == NULL) -+ PVR_DPF_RETURN_VAL(NULL); -+ -+ PVR_ASSERT(psDesc == psn->psWDesc); -+ -+ psn->uiWRefCount++; -+ psDesc->uiRefCount++; -+ -+ PVR_DPF_RETURN_VAL(psn); -+} -+ -+void TLReturnStreamNode(PTL_SNODE psNode) -+{ -+ psNode->uiWRefCount--; -+ psNode->psWDesc->uiRefCount--; -+ -+ PVR_ASSERT(psNode->uiWRefCount > 0); -+ PVR_ASSERT(psNode->psWDesc->uiRefCount > 0); -+} -+ -+IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove) -+{ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psRemove); -+ -+ /* If there is a client connected to this stream, defer stream's deletion */ -+ if (psRemove->psRDesc != NULL || psRemove->psWDesc != NULL) -+ { -+ PVR_DPF_RETURN_VAL(IMG_FALSE); -+ } -+ -+ /* Remove stream from TL_GLOBAL_DATA's list and free stream node */ -+ psRemove->psStream = NULL; -+ RemoveAndFreeStreamNode(psRemove); -+ -+ PVR_DPF_RETURN_VAL(IMG_TRUE); -+} -+ -+IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psNodeToRemove, -+ PTL_STREAM_DESC psSD) -+{ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psNodeToRemove); -+ PVR_ASSERT(psSD); -+ -+ /* Decrement reference count. For descriptor obtained by reader it must -+ * reach 0 (only single reader allowed) and for descriptors obtained by -+ * writers it must reach value greater or equal to 0 (multiple writers -+ * model). */ -+ psSD->uiRefCount--; -+ -+ if (psSD == psNodeToRemove->psRDesc) -+ { -+ PVR_ASSERT(0 == psSD->uiRefCount); -+ /* Remove stream descriptor (i.e. stream reader context) */ -+ psNodeToRemove->psRDesc = NULL; -+ } -+ else if (psSD == psNodeToRemove->psWDesc) -+ { -+ PVR_ASSERT(0 <= psSD->uiRefCount); -+ -+ psNodeToRemove->uiWRefCount--; -+ -+ /* Remove stream descriptor if reference == 0 */ -+ if (0 == psSD->uiRefCount) -+ { -+ psNodeToRemove->psWDesc = NULL; -+ } -+ } -+ -+ /* Do not Free Stream Node if there is a write reference (a producer -+ * context) to the stream */ -+ if (NULL != psNodeToRemove->psRDesc || NULL != psNodeToRemove->psWDesc || -+ 0 != psNodeToRemove->uiWRefCount) -+ { -+ PVR_DPF_RETURN_VAL(IMG_FALSE); -+ } -+ -+ /* Make stream pointer NULL to prevent it from being destroyed in -+ * RemoveAndFreeStreamNode. Cleanup of stream should be done by the -+ * calling context */ -+ psNodeToRemove->psStream = NULL; -+ RemoveAndFreeStreamNode(psNodeToRemove); -+ -+ PVR_DPF_RETURN_VAL(IMG_TRUE); -+} -diff --git a/drivers/gpu/drm/img-rogue/tlintern.h b/drivers/gpu/drm/img-rogue/tlintern.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/tlintern.h -@@ -0,0 +1,345 @@ -+/*************************************************************************/ /*! -+@File -+@Title Transport Layer internals -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Transport Layer header used by TL internally -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef TLINTERN_H -+#define TLINTERN_H -+ -+ -+#include "devicemem_typedefs.h" -+#include "pvrsrv_tlcommon.h" -+#include "lock.h" -+#include "tlstream.h" -+ -+/* Forward declarations */ -+typedef struct _TL_SNODE_* PTL_SNODE; -+ -+/* To debug buffer utilisation enable this macro here and define -+ * PVRSRV_NEED_PVR_TRACE in the server pvr_debug.c and in tutils.c -+ * before the inclusion of pvr_debug.h. -+ * Issue pvrtutils 6 on target to see stream buffer utilisation. */ -+//#define TL_BUFFER_STATS 1 -+ -+/*! TL stream structure container. -+ * pbyBuffer holds the circular buffer. -+ * ui32Read points to the beginning of the buffer, ie to where data to -+ * Read begin. -+ * ui32Write points to the end of data that have been committed, ie this is -+ * where new data will be written. -+ * ui32Pending number of bytes reserved in last reserve call which have not -+ * yet been submitted. Therefore these data are not ready to -+ * be transported. -+ * hStreamWLock - provides atomic protection for the ui32Pending & ui32Write -+ * members of the structure for when they are checked and/or -+ * updated in the context of a stream writer (producer) -+ * calling DoTLStreamReserve() & TLStreamCommit(). -+ * - Reader context is not multi-threaded, only one client per -+ * stream is allowed. Also note the read context may be in an -+ * ISR which prevents a design where locks can be held in the -+ * AcquireData/ReleaseData() calls. Thus this lock only -+ * protects the stream members from simultaneous writers. -+ * -+ * ui32Read < ui32Write <= ui32Pending -+ * where < and <= operators are overloaded to make sense in a circular way. -+ */ -+typedef struct _TL_STREAM_ -+{ -+ IMG_CHAR szName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; /*!< String name identifier */ -+ TL_OPMODE eOpMode; /*!< Mode of Operation of TL Buffer */ -+ -+ IMG_BOOL bWaitForEmptyOnDestroy; /*!< Flag: On destroying a non-empty stream block until -+ * stream is drained. */ -+ IMG_BOOL bNoSignalOnCommit; /*!< Flag: Used to avoid the TL signalling waiting consumers -+ * that new data is available on every commit. Producers -+ * using this flag will need to manually signal when -+ * appropriate using the TLStreamSync() API */ -+ -+ void (*pfOnReaderOpenCallback)(void *pvArg); /*!< Optional on reader connect callback */ -+ void *pvOnReaderOpenUserData; /*!< On reader connect user data */ -+ void (*pfProducerCallback)(void); /*!< Optional producer callback of type TL_STREAM_SOURCECB */ -+ void *pvProducerUserData; /*!< Producer callback user data */ -+ -+ struct _TL_STREAM_ *psNotifStream; /*!< Pointer to the stream to which notification will be sent */ -+ -+ volatile IMG_UINT32 ui32Read; /*!< Pointer to the beginning of available data */ -+ volatile IMG_UINT32 ui32Write; /*!< Pointer to already committed data which are ready to be -+ * copied to user space */ -+ IMG_UINT32 ui32Pending; /*!< Count pending bytes reserved in buffer */ -+ IMG_UINT32 ui32Size; /*!< Buffer size */ -+ IMG_UINT32 ui32ThresholdUsageForSignal; /*!< Buffer usage threshold at which a TL writer signals a blocked/ -+ * waiting reader when transitioning from empty->non-empty */ -+ IMG_UINT32 ui32MaxPacketSize; /*! Max TL packet size */ -+ IMG_BYTE *pbyBuffer; /*!< Actual data buffer */ -+ -+ PTL_SNODE psNode; /*!< Ptr to parent stream node */ -+ DEVMEM_MEMDESC *psStreamMemDesc; /*!< MemDescriptor used to allocate buffer space through PMR */ -+ -+ IMG_HANDLE hProducerEvent; /*!< Handle to wait on if there is not enough space */ -+ IMG_HANDLE hProducerEventObj; /*!< Handle to signal blocked reserve calls */ -+ IMG_BOOL bSignalPending; /*!< Tracks if a "signal" is pending to be sent to a blocked/ -+ * waiting reader */ -+ -+ POS_LOCK hStreamWLock; /*!< Writers Lock for ui32Pending & ui32Write*/ -+ POS_LOCK hReadLock; /*!< Readers Lock for bReadPending & ui32Read*/ -+ IMG_BOOL bReadPending; /*!< Tracks if a read operation is pending or not*/ -+ IMG_BOOL bNoWrapPermanent; /*!< Flag: Prevents buffer wrap and subsequent data loss -+ * as well as resetting the read position on close. */ -+ -+#if defined(TL_BUFFER_STATS) -+ IMG_UINT32 ui32CntReadFails; /*!< Tracks how many times reader failed to acquire read lock */ -+ IMG_UINT32 ui32CntReadSuccesses; /*!< Tracks how many times reader acquires read lock successfully */ -+ IMG_UINT32 ui32CntWriteSuccesses; /*!< Tracks how many times writer acquires read lock successfully */ -+ IMG_UINT32 ui32CntWriteWaits; /*!< Tracks how many times writer had to wait to acquire read lock */ -+ IMG_UINT32 ui32CntNumWriteSuccess; /*!< Tracks how many write operations were successful*/ -+ IMG_UINT32 ui32BufferUt; /*!< Buffer utilisation high watermark, see TL_BUFFER_STATS above */ -+ IMG_UINT32 ui32MaxReserveWatermark; /*!< Max stream reserve size that was ever requested by a writer */ -+ IMG_UINT32 ui32SignalsSent; /*!< Number of signals that were actually sent by the write API */ -+ ATOMIC_T bNoReaderSinceFirstReserve; /*!< Tracks if a read has been done since the buffer was last found empty */ -+ IMG_UINT32 ui32TimeStart; /*!< Time at which a write (Reserve call) was done into an empty buffer. -+ * Guarded by hStreamWLock. */ -+ IMG_UINT32 ui32MinTimeToFullInUs; /*!< Minimum time taken to (nearly) fully fill an empty buffer. Guarded -+ * by hStreamWLock. */ -+ /* Behaviour counters, protected by hStreamLock in case of -+ * multi-threaded access */ -+ IMG_UINT32 ui32NumCommits; /*!< Counters used to analysing stream performance, see ++ loc */ -+ IMG_UINT32 ui32SignalNotSent; /*!< Counters used to analysing stream performance, see ++ loc */ -+ IMG_UINT32 ui32ManSyncs; /*!< Counters used to analysing stream performance, see ++ loc */ -+ IMG_UINT32 ui32ProducerByteCount; /*!< Counters used to analysing stream performance, see ++ loc */ -+ -+ /* Not protected by the lock, inc in the reader thread which is currently singular */ -+ IMG_UINT32 ui32AcquireRead1; /*!< Counters used to analysing stream performance, see ++ loc */ -+ IMG_UINT32 ui32AcquireRead2; /*!< Counters used to analysing stream performance, see ++ loc */ -+#endif -+ -+} TL_STREAM, *PTL_STREAM; -+ -+/* there need to be enough space reserved in the buffer for 2 minimal packets -+ * and it needs to be aligned the same way the buffer is or there will be a -+ * compile error.*/ -+#define BUFFER_RESERVED_SPACE (2 * PVRSRVTL_PACKET_ALIGNMENT) -+ -+/* ensure the space reserved follows the buffer's alignment */ -+static_assert(!(BUFFER_RESERVED_SPACE&(PVRSRVTL_PACKET_ALIGNMENT-1)), -+ "BUFFER_RESERVED_SPACE must be a multiple of PVRSRVTL_PACKET_ALIGNMENT"); -+ -+/* Define the largest value that a uint that matches the -+ * PVRSRVTL_PACKET_ALIGNMENT size can hold */ -+#define MAX_UINT 0xffffFFFF -+ -+/*! Defines the value used for TL_STREAM.ui32Pending when no reserve is -+ * outstanding on the stream. */ -+#define NOTHING_PENDING IMG_UINT32_MAX -+ -+ -+/* -+ * Transport Layer Stream Descriptor types/defs -+ */ -+typedef struct _TL_STREAM_DESC_ -+{ -+ PTL_SNODE psNode; /*!< Ptr to parent stream node */ -+ IMG_UINT32 ui32Flags; /*!< Flags supplied by client on stream open */ -+ IMG_HANDLE hReadEvent; /*!< For wait call (only used/set in reader descriptors) */ -+ IMG_INT uiRefCount; /*!< Reference count to the SD */ -+ -+#if defined(TL_BUFFER_STATS) -+ /* Behaviour counters, no multi-threading protection need as they are -+ * incremented in a single thread due to only supporting one reader -+ * at present */ -+ IMG_UINT32 ui32AcquireCount; /*!< Counters used to analysing stream performance, see ++ loc */ -+ IMG_UINT32 ui32NoData; /*!< Counters used to analysing stream performance, see ++ loc */ -+ IMG_UINT32 ui32NoDataSleep; /*!< Counters used to analysing stream performance, see ++ loc */ -+ IMG_UINT32 ui32Signalled; /*!< Counters used to analysing stream performance, see ++ loc */ -+ IMG_UINT32 ui32TimeoutEmpty; /*!< Counters used to analysing stream performance, see ++ loc */ -+ IMG_UINT32 ui32TimeoutData; /*!< Counters used to analysing stream performance, see ++ loc */ -+#endif -+ IMG_UINT32 ui32ReadLimit; /*!< Limit buffer reads to data present in the -+ buffer at the time of stream open. */ -+ IMG_UINT32 ui32ReadLen; /*!< Size of data returned by initial Acquire */ -+} TL_STREAM_DESC, *PTL_STREAM_DESC; -+ -+PTL_STREAM_DESC TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3); -+ -+#define TL_STREAM_KM_FLAG_MASK 0xFFFF0000 -+#define TL_STREAM_FLAG_TEST 0x10000000 -+#define TL_STREAM_FLAG_WRAPREAD 0x00010000 -+ -+#define TL_STREAM_UM_FLAG_MASK 0x0000FFFF -+ -+#if defined(TL_BUFFER_STATS) -+# define TL_COUNTER_INC(a) ((a)++) -+# define TL_COUNTER_ADD(a,b) ((a) += (b)) -+#else -+# define TL_COUNTER_INC(a) (void)(0) -+# define TL_COUNTER_ADD(a,b) (void)(0) -+#endif -+/* -+ * Transport Layer stream list node -+ */ -+typedef struct _TL_SNODE_ -+{ -+ struct _TL_SNODE_* psNext; /*!< Linked list next element */ -+ IMG_HANDLE hReadEventObj; /*!< Readers 'wait for data' event */ -+ PTL_STREAM psStream; /*!< TL Stream object */ -+ IMG_INT uiWRefCount; /*!< Stream writer reference count */ -+ PTL_STREAM_DESC psRDesc; /*!< Stream reader 0 or ptr only */ -+ PTL_STREAM_DESC psWDesc; /*!< Stream writer 0 or ptr only */ -+} TL_SNODE; -+ -+PTL_SNODE TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4); -+ -+/* -+ * Transport Layer global top types and variables -+ * Use access function to obtain pointer. -+ * -+ * hTLGDLock - provides atomicity over read/check/write operations and -+ * sequence of operations on uiClientCnt, psHead list of SNODEs and -+ * the immediate members in a list element SNODE structure. -+ * - This larger scope of responsibility for this lock helps avoid -+ * the need for a lock in the SNODE structure. -+ * - Lock held in the client (reader) context when streams are -+ * opened/closed and in the server (writer) context when streams -+ * are created/open/closed. -+ */ -+typedef struct _TL_GDATA_ -+{ -+ IMG_HANDLE hTLEventObj; /* Global TL signal object, new streams, etc */ -+ -+ IMG_UINT uiClientCnt; /* Counter to track the number of client stream connections. */ -+ PTL_SNODE psHead; /* List of TL streams and associated client handle */ -+ -+ POS_LOCK hTLGDLock; /* Lock for structure AND psHead SNODE list */ -+} TL_GLOBAL_DATA, *PTL_GLOBAL_DATA; -+ -+/* -+ * Transport Layer Internal Kernel-Mode Server API -+ */ -+TL_GLOBAL_DATA* TLGGD(void); /* TLGetGlobalData() */ -+ -+PVRSRV_ERROR TLInit(void); -+void TLDeInit(void); -+ -+void TLAddStreamNode(PTL_SNODE psAdd); -+PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName); -+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc); -+IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern, -+ IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], -+ IMG_UINT32 ui32Max); -+PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc); -+void TLReturnStreamNode(PTL_SNODE psNode); -+ -+/****************************************************************************** -+ Function Name : TLTryRemoveStreamAndFreeStreamNode -+ -+ Inputs : PTL_SNODE Pointer to the TL_SNODE whose stream is requested -+ to be removed from TL_GLOBAL_DATA's list -+ -+ Return Value : IMG_TRUE - If the stream was made NULL and this -+ TL_SNODE was removed from the -+ TL_GLOBAL_DATA's list -+ -+ IMG_FALSE - If the stream wasn't made NULL as there -+ is a client connected to this stream -+ -+ Description : If there is no client currently connected to this stream then, -+ This function removes this TL_SNODE from the -+ TL_GLOBAL_DATA's list. The caller is responsible for the -+ cleanup of the TL_STREAM whose TL_SNODE may be removed -+ -+ Otherwise, this function does nothing -+******************************************************************************/ -+IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove); -+ -+/****************************************************************************** -+ Function Name : TLUnrefDescAndTryFreeStreamNode -+ -+ Inputs : PTL_SNODE Pointer to the TL_SNODE whose descriptor is -+ requested to be removed -+ : PTL_STREAM_DESC Pointer to the STREAM_DESC -+ -+ Return Value : IMG_TRUE - If this TL_SNODE was removed from the -+ TL_GLOBAL_DATA's list -+ -+ IMG_FALSE - Otherwise -+ -+ Description : This function removes the stream descriptor from this TL_SNODE -+ and, if there is no writer (producer context) currently bound to this -+ stream, this function removes this TL_SNODE from the TL_GLOBAL_DATA's -+ list. The caller is responsible for the cleanup of the TL_STREAM -+ whose TL_SNODE may be removed -+******************************************************************************/ -+IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psRemove, PTL_STREAM_DESC psSD); -+ -+/* -+ * Transport Layer stream interface to server part declared here to avoid -+ * circular dependency. -+ */ -+IMG_UINT32 TLStreamAcquireReadPos(PTL_STREAM psStream, -+ IMG_BOOL bDisableCallback, -+ IMG_UINT32* puiReadOffset); -+PVRSRV_ERROR TLStreamAdvanceReadPos(PTL_STREAM psStream, -+ IMG_UINT32 uiReadLen, -+ IMG_UINT32 uiOrigReadLen); -+void TLStreamResetReadPos(PTL_STREAM psStream); -+ -+DEVMEM_MEMDESC* TLStreamGetBufferPointer(PTL_STREAM psStream); -+IMG_BOOL TLStreamOutOfData(IMG_HANDLE psStream); -+ -+/****************************************************************************** -+ Function Name : TLStreamDestroy -+ -+ Inputs : PTL_STREAM Pointer to the TL_STREAM to be destroyed -+ -+ Description : This function performs all the clean-up operations required for -+ destruction of this stream -+******************************************************************************/ -+void TLStreamDestroy(PTL_STREAM psStream); -+ -+/* -+ * Test related functions -+ */ -+PVRSRV_ERROR TUtilsInit(PVRSRV_DEVICE_NODE *psDeviceNode); -+PVRSRV_ERROR TUtilsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode); -+ -+ -+#endif /* TLINTERN_H */ -+/****************************************************************************** -+ End of file (tlintern.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/tlserver.c b/drivers/gpu/drm/img-rogue/tlserver.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/tlserver.c -@@ -0,0 +1,747 @@ -+/*************************************************************************/ /*! -+@File -+@Title KM server Transport Layer implementation -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Main bridge APIs for Transport Layer client functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "img_defs.h" -+ -+/*#define PVR_DPF_FUNCTION_TRACE_ON 1*/ -+#undef PVR_DPF_FUNCTION_TRACE_ON -+#include "pvr_debug.h" -+ -+#include "connection_server.h" -+#include "allocmem.h" -+#include "devicemem.h" -+ -+#include "tlintern.h" -+#include "tlstream.h" -+#include "tlserver.h" -+ -+#include "pvrsrv_tlstreams.h" -+#define NO_STREAM_WAIT_PERIOD_US 2000000ULL -+#define NO_DATA_WAIT_PERIOD_US 500000ULL -+#define NO_ACQUIRE 0xffffffffU -+ -+ -+/* -+ * Transport Layer Client API Kernel-Mode bridge implementation -+ */ -+PVRSRV_ERROR -+TLServerOpenStreamKM(const IMG_CHAR* pszName, -+ IMG_UINT32 ui32Mode, -+ PTL_STREAM_DESC* ppsSD, -+ PMR** ppsTLPMR) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_ERROR eErrorEO = PVRSRV_OK; -+ PTL_SNODE psNode; -+ PTL_STREAM psStream; -+ TL_STREAM_DESC *psNewSD = NULL; -+ IMG_HANDLE hEvent; -+ IMG_BOOL bIsWriteOnly = ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? -+ IMG_TRUE : IMG_FALSE; -+ IMG_BOOL bResetOnOpen = ui32Mode & PVRSRV_STREAM_FLAG_RESET_ON_OPEN ? -+ IMG_TRUE : IMG_FALSE; -+ IMG_BOOL bNoOpenCB = ui32Mode & PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK ? -+ IMG_TRUE : IMG_FALSE; -+ PTL_GLOBAL_DATA psGD = TLGGD(); -+ -+#if defined(PVR_DPF_FUNCTION_TRACE_ON) -+ PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered (%s, %x)", __func__, __LINE__, pszName, ui32Mode)); -+#endif -+ -+ PVR_ASSERT(pszName); -+ -+ /* Acquire TL_GLOBAL_DATA lock here, as if the following TLFindStreamNodeByName -+ * returns NON NULL PTL_SNODE, we try updating the global data client count and -+ * PTL_SNODE's psRDesc and we want to make sure the TL_SNODE is valid (eg. has -+ * not been deleted) while we are updating it -+ */ -+ OSLockAcquire (psGD->hTLGDLock); -+ -+ psNode = TLFindStreamNodeByName(pszName); -+ if ((psNode == NULL) && (ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT)) -+ { /* Blocking code to wait for stream to be created if it does not exist */ -+ eError = OSEventObjectOpen(psGD->hTLEventObj, &hEvent); -+ PVR_LOG_GOTO_IF_ERROR (eError, "OSEventObjectOpen", e0); -+ -+ do -+ { -+ if ((psNode = TLFindStreamNodeByName(pszName)) == NULL) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "Stream %s does not exist, waiting...", pszName)); -+ -+ /* Release TL_GLOBAL_DATA lock before sleeping */ -+ OSLockRelease (psGD->hTLGDLock); -+ -+ /* Will exit OK or with timeout, both cases safe to ignore */ -+ eErrorEO = OSEventObjectWaitTimeout(hEvent, NO_STREAM_WAIT_PERIOD_US); -+ -+ /* Acquire lock after waking up */ -+ OSLockAcquire (psGD->hTLGDLock); -+ } -+ } -+ while ((psNode == NULL) && (eErrorEO == PVRSRV_OK)); -+ -+ eError = OSEventObjectClose(hEvent); -+ PVR_LOG_GOTO_IF_ERROR (eError, "OSEventObjectClose", e0); -+ } -+ -+ /* Make sure we have found a stream node after wait/search */ -+ if (psNode == NULL) -+ { -+ /* Did we exit the wait with timeout, inform caller */ -+ if (eErrorEO == PVRSRV_ERROR_TIMEOUT) -+ { -+ eError = eErrorEO; -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_NOT_FOUND; -+ PVR_DPF((PVR_DBG_ERROR, "Stream \"%s\" does not exist", pszName)); -+ } -+ goto e0; -+ } -+ -+ psStream = psNode->psStream; -+ -+ /* Allocate memory for the stream. The memory will be allocated with the -+ * first call. */ -+ eError = TLAllocSharedMemIfNull(psStream); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate memory for stream" -+ " \"%s\"", pszName)); -+ goto e0; -+ } -+ -+ if (bIsWriteOnly) -+ { -+ -+ /* If psWDesc == NULL it means that this is the first attempt -+ * to open stream for write. If yes create the descriptor or increment -+ * reference count otherwise. */ -+ if (psNode->psWDesc == NULL) -+ { -+ psNewSD = TLMakeStreamDesc(psNode, ui32Mode, NULL); -+ psNode->psWDesc = psNewSD; -+ } -+ else -+ { -+ psNewSD = psNode->psWDesc; -+ psNode->psWDesc->uiRefCount++; -+ } -+ -+ PVR_LOG_GOTO_IF_NOMEM(psNewSD, eError, e0); -+ -+ psNode->uiWRefCount++; -+ } -+ else -+ { -+ /* Only one reader per stream supported */ -+ if (psNode->psRDesc != NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Cannot open \"%s\" stream, stream already" -+ " opened", pszName)); -+ eError = PVRSRV_ERROR_ALREADY_OPEN; -+ goto e0; -+ } -+ -+ /* Create an event handle for this client to wait on when no data in -+ * stream buffer. */ -+ eError = OSEventObjectOpen(psNode->hReadEventObj, &hEvent); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_LOG_ERROR(eError, "OSEventObjectOpen"); -+ eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT; -+ goto e0; -+ } -+ -+ psNewSD = TLMakeStreamDesc(psNode, ui32Mode, hEvent); -+ psNode->psRDesc = psNewSD; -+ -+ if (!psNewSD) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream descriptor")); -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e1; -+ } -+ -+ PVR_DPF((PVR_DBG_VERBOSE, -+ "TLServerOpenStreamKM evList=%p, evObj=%p", -+ psNode->hReadEventObj, -+ psNode->psRDesc->hReadEvent)); -+ } -+ -+ /* Copy the import handle back to the user mode API to enable access to -+ * the stream buffer from user-mode process. */ -+ eError = DevmemLocalGetImportHandle(TLStreamGetBufferPointer(psStream), -+ (void**) ppsTLPMR); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemLocalGetImportHandle", e2); -+ -+ psGD->uiClientCnt++; -+ -+ /* Global data updated. Now release global lock */ -+ OSLockRelease (psGD->hTLGDLock); -+ -+ *ppsSD = psNewSD; -+ -+ if (bResetOnOpen) -+ { -+ TLStreamReset(psStream); -+ } -+ -+ /* This callback is executed only on reader open. There are some actions -+ * executed on reader open that don't make much sense for writers e.g. -+ * injection on time synchronisation packet into the stream. */ -+ if (!bIsWriteOnly && psStream->pfOnReaderOpenCallback != NULL && !bNoOpenCB) -+ { -+ psStream->pfOnReaderOpenCallback(psStream->pvOnReaderOpenUserData); -+ } -+ -+ /* psNode->uiWRefCount is set to '1' on stream create so the first open -+ * is '2'. */ -+ if (bIsWriteOnly && psStream->psNotifStream != NULL && -+ psNode->uiWRefCount == 2) -+ { -+ TLStreamMarkStreamOpen(psStream); -+ } -+ -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Stream %s opened for %s", __func__, pszName, -+ ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? "write" : "read")); -+ -+ PVR_DPF_RETURN_OK; -+ -+e2: -+ OSFreeMem(psNewSD); -+e1: -+ if (!bIsWriteOnly) -+ OSEventObjectClose(hEvent); -+e0: -+ OSLockRelease (psGD->hTLGDLock); -+ PVR_DPF_RETURN_RC (eError); -+} -+ -+PVRSRV_ERROR -+TLServerCloseStreamKM(PTL_STREAM_DESC psSD) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PTL_GLOBAL_DATA psGD = TLGGD(); -+ PTL_SNODE psNode; -+ PTL_STREAM psStream; -+ IMG_BOOL bDestroyStream; -+ IMG_BOOL bIsWriteOnly = psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO ? -+ IMG_TRUE : IMG_FALSE; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psSD); -+ -+ /* Quick exit if there are no streams */ -+ if (psGD->psHead == NULL) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); -+ } -+ -+ /* Check stream still valid */ -+ psNode = TLFindStreamNodeByDesc(psSD); -+ if ((psNode == NULL) || (psNode != psSD->psNode)) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); -+ } -+ -+ /* Since the descriptor is valid, the stream should not have been made NULL */ -+ PVR_ASSERT (psNode->psStream); -+ -+ /* Save the stream's reference in-case its destruction is required after this -+ * client is removed */ -+ psStream = psNode->psStream; -+ -+ /* Acquire TL_GLOBAL_DATA lock as the following TLRemoveDescAndTryFreeStreamNode -+ * call will update the TL_SNODE's descriptor value */ -+ OSLockAcquire (psGD->hTLGDLock); -+ -+ /* Close event handle because event object list might be destroyed in -+ * TLUnrefDescAndTryFreeStreamNode(). */ -+ if (!bIsWriteOnly) -+ { -+ /* Reset the read position on close if the stream requires it. */ -+ TLStreamResetReadPos(psStream); -+ -+ /* Close and free the event handle resource used by this descriptor */ -+ eError = OSEventObjectClose(psSD->hReadEvent); -+ if (eError != PVRSRV_OK) -+ { -+ /* Log error but continue as it seems best */ -+ PVR_LOG_ERROR(eError, "OSEventObjectClose"); -+ eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; -+ } -+ } -+ else if (psNode->uiWRefCount == 2 && psStream->psNotifStream != NULL) -+ { -+ /* psNode->uiWRefCount is set to '1' on stream create so the last close -+ * before destruction is '2'. */ -+ TLStreamMarkStreamClose(psStream); -+ } -+ -+ /* Remove descriptor from stream object/list */ -+ bDestroyStream = TLUnrefDescAndTryFreeStreamNode (psNode, psSD); -+ -+ /* Check the counter is sensible after input data validated. */ -+ PVR_ASSERT(psGD->uiClientCnt > 0); -+ psGD->uiClientCnt--; -+ -+ OSLockRelease (psGD->hTLGDLock); -+ -+ /* Destroy the stream if its TL_SNODE was removed from TL_GLOBAL_DATA */ -+ if (bDestroyStream) -+ { -+ TLStreamDestroy (psStream); -+ psStream = NULL; -+ } -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "%s: Stream closed", __func__)); -+ -+ /* Free the descriptor if ref count reaches 0. */ -+ if (psSD->uiRefCount == 0) -+ { -+ /* Free the stream descriptor object */ -+ OSFreeMem(psSD); -+ } -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+PVRSRV_ERROR -+TLServerReserveStreamKM(PTL_STREAM_DESC psSD, -+ IMG_UINT32* ui32BufferOffset, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32SizeMin, -+ IMG_UINT32* pui32Available) -+{ -+ TL_GLOBAL_DATA* psGD = TLGGD(); -+ PTL_SNODE psNode; -+ IMG_UINT8* pui8Buffer = NULL; -+ PVRSRV_ERROR eError; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psSD); -+ -+ if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO)) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ /* Quick exit if there are no streams */ -+ if (psGD->psHead == NULL) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); -+ } -+ -+ /* Acquire the global lock. We have to be sure that no one modifies -+ * the list while we are looking for our stream. */ -+ OSLockAcquire(psGD->hTLGDLock); -+ /* Check stream still valid */ -+ psNode = TLFindAndGetStreamNodeByDesc(psSD); -+ OSLockRelease(psGD->hTLGDLock); -+ -+ if ((psNode == NULL) || (psNode != psSD->psNode)) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); -+ } -+ -+ -+ /* Since we have a valid stream descriptor, the stream should not have been -+ * made NULL by any producer context. */ -+ PVR_ASSERT (psNode->psStream); -+ -+ /* The TL writers that currently land here are at a very low to none risk -+ * to breach max TL packet size constraint (even if there is no reader -+ * connected to the TL stream and hence eventually will cause the TL stream -+ * to be full). Hence no need to know the status of TL stream reader -+ * connection. -+ */ -+ eError = TLStreamReserve2(psNode->psStream, &pui8Buffer, ui32Size, -+ ui32SizeMin, pui32Available, NULL); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "Failed to reserve %u (%u, %u) bytes in the stream, error %s.", -+ ui32Size, ui32SizeMin, *pui32Available, PVRSRVGETERRORSTRING(eError))); -+ } -+ else if (pui8Buffer == NULL) -+ { -+ PVR_DPF((PVR_DBG_WARNING, "Not enough space in the stream.")); -+ eError = PVRSRV_ERROR_STREAM_FULL; -+ } -+ else -+ { -+ *ui32BufferOffset = pui8Buffer - psNode->psStream->pbyBuffer; -+ PVR_ASSERT(*ui32BufferOffset < psNode->psStream->ui32Size); -+ } -+ -+ OSLockAcquire(psGD->hTLGDLock); -+ TLReturnStreamNode(psNode); -+ OSLockRelease(psGD->hTLGDLock); -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+PVRSRV_ERROR -+TLServerCommitStreamKM(PTL_STREAM_DESC psSD, -+ IMG_UINT32 ui32Size) -+{ -+ TL_GLOBAL_DATA* psGD = TLGGD(); -+ PTL_SNODE psNode; -+ PVRSRV_ERROR eError; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psSD); -+ -+ if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO)) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ /* Quick exit if there are no streams */ -+ if (psGD->psHead == NULL) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); -+ } -+ -+ /* Acquire the global lock. We have to be sure that no one modifies -+ * the list while we are looking for our stream. */ -+ OSLockAcquire(psGD->hTLGDLock); -+ /* Check stream still valid */ -+ psNode = TLFindAndGetStreamNodeByDesc(psSD); -+ OSLockRelease(psGD->hTLGDLock); -+ -+ if ((psNode == NULL) || (psNode != psSD->psNode)) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); -+ } -+ -+ /* Since we have a valid stream descriptor, the stream should not have been -+ * made NULL by any producer context. */ -+ PVR_ASSERT (psNode->psStream); -+ -+ eError = TLStreamCommit(psNode->psStream, ui32Size); -+ PVR_LOG_IF_ERROR(eError, "TLStreamCommit"); -+ -+ OSLockAcquire(psGD->hTLGDLock); -+ TLReturnStreamNode(psNode); -+ OSLockRelease(psGD->hTLGDLock); -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+PVRSRV_ERROR -+TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern, -+ IMG_UINT32 ui32Size, -+ IMG_CHAR *pszStreams, -+ IMG_UINT32 *pui32NumFound) -+{ -+ PTL_SNODE psNode = NULL; -+ IMG_CHAR (*paszStreams)[PRVSRVTL_MAX_STREAM_NAME_SIZE] = -+ (IMG_CHAR (*)[PRVSRVTL_MAX_STREAM_NAME_SIZE]) (void *)pszStreams; -+ -+ if (*pszNamePattern == '\0') -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ if (ui32Size % PRVSRVTL_MAX_STREAM_NAME_SIZE != 0) -+ return PVRSRV_ERROR_INVALID_PARAMS; -+ -+ /* Quick exit if there are no streams */ -+ if (TLGGD()->psHead == NULL) -+ { -+ *pui32NumFound = 0; -+ return PVRSRV_OK; -+ } -+ -+ OSLockAcquire(TLGGD()->hTLGDLock); -+ -+ *pui32NumFound = TLDiscoverStreamNodes(pszNamePattern, paszStreams, -+ ui32Size / PRVSRVTL_MAX_STREAM_NAME_SIZE); -+ -+ /* Find "tlctrl" stream and reset it */ -+ psNode = TLFindStreamNodeByName(PVRSRV_TL_CTLR_STREAM); -+ if (psNode != NULL) -+ TLStreamReset(psNode->psStream); -+ -+ OSLockRelease(TLGGD()->hTLGDLock); -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+TLServerAcquireDataKM(PTL_STREAM_DESC psSD, -+ IMG_UINT32* puiReadOffset, -+ IMG_UINT32* puiReadLen) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ TL_GLOBAL_DATA* psGD = TLGGD(); -+ IMG_UINT32 uiTmpOffset; -+ IMG_UINT32 uiTmpLen = 0; -+ PTL_SNODE psNode; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psSD); -+ -+ TL_COUNTER_INC(psSD->ui32AcquireCount); -+ -+ /* Quick exit if there are no streams */ -+ if (psGD->psHead == NULL) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); -+ } -+ -+ /* Check stream still valid */ -+ psNode = TLFindStreamNodeByDesc(psSD); -+ if ((psNode == NULL) || (psNode != psSD->psNode)) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); -+ } -+ -+ /* If we are here, the stream will never be made NULL until this context itself -+ * calls TLRemoveDescAndTryFreeStreamNode(). This is because the producer will -+ * fail to make the stream NULL (by calling TLTryRemoveStreamAndFreeStreamNode) -+ * when a valid stream descriptor is present (i.e. a client is connected). -+ * Hence, no checks for stream being NON NULL are required after this. */ -+ PVR_ASSERT (psNode->psStream); -+ -+ psSD->ui32ReadLen = 0; /* Handle NULL read returns */ -+ -+ do -+ { -+ uiTmpLen = TLStreamAcquireReadPos(psNode->psStream, psSD->ui32Flags & PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK, &uiTmpOffset); -+ -+ /* Check we have not already exceeded read limit with just offset -+ * regardless of data length to ensure the client sees the RC */ -+ if (psSD->ui32Flags & PVRSRV_STREAM_FLAG_READ_LIMIT) -+ { -+ /* Check to see if we are reading beyond the read limit */ -+ if (uiTmpOffset >= psSD->ui32ReadLimit) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_READLIMIT_REACHED); -+ } -+ } -+ -+ if (uiTmpLen > 0) -+ { /* Data found */ -+ -+ /* Check we have not already exceeded read limit offset+len */ -+ if (psSD->ui32Flags & PVRSRV_STREAM_FLAG_READ_LIMIT) -+ { -+ /* Adjust the read length if it goes beyond the read limit -+ * limit always guaranteed to be on packet */ -+ if ((uiTmpOffset + uiTmpLen) >= psSD->ui32ReadLimit) -+ { -+ uiTmpLen = psSD->ui32ReadLimit - uiTmpOffset; -+ } -+ } -+ -+ *puiReadOffset = uiTmpOffset; -+ *puiReadLen = uiTmpLen; -+ psSD->ui32ReadLen = uiTmpLen; /* Save the original data length in the stream desc */ -+ PVR_DPF_RETURN_OK; -+ } -+ else if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) -+ { /* No data found blocking */ -+ -+ /* Instead of doing a complete sleep for `NO_DATA_WAIT_PERIOD_US` us, we sleep in chunks -+ * of 168 ms. In a "deferred" signal scenario from writer, this gives us a chance to -+ * wake-up (timeout) early and continue reading in-case some data is available */ -+ IMG_UINT64 ui64WaitInChunksUs = MIN(NO_DATA_WAIT_PERIOD_US, 168000ULL); -+ IMG_BOOL bDataFound = IMG_FALSE; -+ -+ TL_COUNTER_INC(psSD->ui32NoDataSleep); -+ -+ LOOP_UNTIL_TIMEOUT(NO_DATA_WAIT_PERIOD_US) -+ { -+ eError = OSEventObjectWaitTimeout(psSD->hReadEvent, ui64WaitInChunksUs); -+ if (eError == PVRSRV_OK) -+ { -+ bDataFound = IMG_TRUE; -+ TL_COUNTER_INC(psSD->ui32Signalled); -+ break; -+ } -+ else if (eError == PVRSRV_ERROR_TIMEOUT) -+ { -+ if (TLStreamOutOfData(psNode->psStream)) -+ { -+ /* Return on timeout if stream empty, else let while exit and return data */ -+ continue; -+ } -+ else -+ { -+ bDataFound = IMG_TRUE; -+ TL_COUNTER_INC(psSD->ui32TimeoutData); -+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Data found at timeout. Current BuffUt = %u", -+ __func__, TLStreamGetUT(psNode->psStream))); -+ break; -+ } -+ } -+ else -+ { /* Some other system error with event objects */ -+ PVR_DPF_RETURN_RC(eError); -+ } -+ } END_LOOP_UNTIL_TIMEOUT(); -+ -+ if (bDataFound) -+ { -+ continue; -+ } -+ else -+ { -+ TL_COUNTER_INC(psSD->ui32TimeoutEmpty); -+ return PVRSRV_ERROR_TIMEOUT; -+ } -+ } -+ else -+ { /* No data non-blocking */ -+ TL_COUNTER_INC(psSD->ui32NoData); -+ -+ /* When no-data in non-blocking mode, uiReadOffset should be set to NO_ACQUIRE -+ * signifying there's no need of Release call */ -+ *puiReadOffset = NO_ACQUIRE; -+ *puiReadLen = 0; -+ PVR_DPF_RETURN_OK; -+ } -+ } -+ while (1); -+} -+ -+PVRSRV_ERROR -+TLServerReleaseDataKM(PTL_STREAM_DESC psSD, -+ IMG_UINT32 uiReadOffset, -+ IMG_UINT32 uiReadLen) -+{ -+ TL_GLOBAL_DATA* psGD = TLGGD(); -+ PTL_SNODE psNode; -+ -+ PVR_DPF_ENTERED; -+ -+ /* Unreferenced in release builds */ -+ PVR_UNREFERENCED_PARAMETER(uiReadOffset); -+ -+ PVR_ASSERT(psSD); -+ -+ /* Quick exit if there are no streams */ -+ if (psGD->psHead == NULL) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); -+ } -+ -+ if ((uiReadLen % PVRSRVTL_PACKET_ALIGNMENT != 0)) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ /* Check stream still valid */ -+ psNode = TLFindStreamNodeByDesc(psSD); -+ if ((psNode == NULL) || (psNode != psSD->psNode)) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); -+ } -+ -+ /* Since we have a valid stream descriptor, the stream should not have been -+ * made NULL by any producer context. */ -+ PVR_ASSERT (psNode->psStream); -+ -+ PVR_DPF((PVR_DBG_VERBOSE, "TLReleaseDataKM uiReadOffset=%d, uiReadLen=%d", uiReadOffset, uiReadLen)); -+ -+ /* Move read position on to free up space in stream buffer */ -+ PVR_DPF_RETURN_RC(TLStreamAdvanceReadPos(psNode->psStream, uiReadLen, psSD->ui32ReadLen)); -+} -+ -+PVRSRV_ERROR -+TLServerWriteDataKM(PTL_STREAM_DESC psSD, -+ IMG_UINT32 ui32Size, -+ IMG_BYTE* pui8Data) -+{ -+ TL_GLOBAL_DATA* psGD = TLGGD(); -+ PTL_SNODE psNode; -+ PVRSRV_ERROR eError; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psSD); -+ -+ if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO)) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ /* Quick exit if there are no streams */ -+ if (psGD->psHead == NULL) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); -+ } -+ -+ OSLockAcquire(psGD->hTLGDLock); -+ /* Check stream still valid */ -+ psNode = TLFindAndGetStreamNodeByDesc(psSD); -+ OSLockRelease(psGD->hTLGDLock); -+ -+ if ((psNode == NULL) || (psNode != psSD->psNode)) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); -+ } -+ -+ /* Since we have a valid stream descriptor, the stream should not have been -+ * made NULL by any producer context. */ -+ PVR_ASSERT (psNode->psStream); -+ -+ eError = TLStreamWrite(psNode->psStream, pui8Data, ui32Size); -+ /* propagate error up but don't print anything here */ -+ -+ OSLockAcquire(psGD->hTLGDLock); -+ TLReturnStreamNode(psNode); -+ OSLockRelease(psGD->hTLGDLock); -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+/****************************************************************************** -+ End of file (tlserver.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/tlserver.h b/drivers/gpu/drm/img-rogue/tlserver.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/tlserver.h -@@ -0,0 +1,97 @@ -+/*************************************************************************/ /*! -+@File -+@Title KM server Transport Layer implementation -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Main bridge APIs for Transport Layer client functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef TLSERVER_H -+#define TLSERVER_H -+ -+#include "img_defs.h" -+#include "pvr_debug.h" -+#include "connection_server.h" -+ -+#include "tlintern.h" -+ -+/* -+ * Transport Layer Client API Kernel-Mode bridge implementation -+ */ -+ -+PVRSRV_ERROR TLServerConnectKM(CONNECTION_DATA *psConnection); -+PVRSRV_ERROR TLServerDisconnectKM(CONNECTION_DATA *psConnection); -+ -+PVRSRV_ERROR TLServerOpenStreamKM(const IMG_CHAR* pszName, -+ IMG_UINT32 ui32Mode, -+ PTL_STREAM_DESC* ppsSD, -+ PMR** ppsTLPMR); -+ -+PVRSRV_ERROR TLServerCloseStreamKM(PTL_STREAM_DESC psSD); -+ -+PVRSRV_ERROR TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern, -+ IMG_UINT32 ui32Max, -+ IMG_CHAR *pszStreams, -+ IMG_UINT32 *pui32NumFound); -+ -+PVRSRV_ERROR TLServerReserveStreamKM(PTL_STREAM_DESC psSD, -+ IMG_UINT32* ui32BufferOffset, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32SizeMin, -+ IMG_UINT32* pui32Available); -+ -+PVRSRV_ERROR TLServerCommitStreamKM(PTL_STREAM_DESC psSD, -+ IMG_UINT32 ui32Size); -+ -+PVRSRV_ERROR TLServerAcquireDataKM(PTL_STREAM_DESC psSD, -+ IMG_UINT32* puiReadOffset, -+ IMG_UINT32* puiReadLen); -+ -+PVRSRV_ERROR TLServerReleaseDataKM(PTL_STREAM_DESC psSD, -+ IMG_UINT32 uiReadOffset, -+ IMG_UINT32 uiReadLen); -+ -+PVRSRV_ERROR TLServerWriteDataKM(PTL_STREAM_DESC psSD, -+ IMG_UINT32 ui32Size, -+ IMG_BYTE *pui8Data); -+ -+#endif /* TLSERVER_H */ -+ -+/****************************************************************************** -+ End of file (tlserver.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/tlstream.c b/drivers/gpu/drm/img-rogue/tlstream.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/tlstream.c -@@ -0,0 +1,1625 @@ -+/*************************************************************************/ /*! -+@File -+@Title Transport Layer kernel side API implementation. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Transport Layer API implementation. -+ These functions are provided to driver components. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+//#define PVR_DPF_FUNCTION_TRACE_ON 1 -+#undef PVR_DPF_FUNCTION_TRACE_ON -+#include "pvr_debug.h" -+ -+#include "allocmem.h" -+#include "devicemem.h" -+#include "pvrsrv_error.h" -+#include "osfunc.h" -+#include "log2.h" -+ -+#include "tlintern.h" -+#include "tlstream.h" -+ -+#include "pvrsrv.h" -+ -+#define EVENT_OBJECT_TIMEOUT_US 1000000ULL -+#define READ_PENDING_TIMEOUT_US 100000ULL -+ -+/*! Compute maximum TL packet size for this stream. Max packet size will be -+ * minimum of PVRSRVTL_MAX_PACKET_SIZE and (BufferSize / 2.5). This computation -+ * is required to avoid a corner case that was observed when TL buffer size is -+ * smaller than twice of TL max packet size and read, write index are positioned -+ * in such a way that the TL packet (write packet + padding packet) size is may -+ * be bigger than the buffer size itself. -+ */ -+#define GET_TL_MAX_PACKET_SIZE( bufSize ) PVRSRVTL_ALIGN( MIN( PVRSRVTL_MAX_PACKET_SIZE, ( 2 * bufSize ) / 5 ) ) -+ -+/* Given the state of the buffer it returns a number of bytes that the client -+ * can use for a successful allocation. */ -+static INLINE IMG_UINT32 suggestAllocSize(IMG_UINT32 ui32LRead, -+ IMG_UINT32 ui32LWrite, -+ IMG_UINT32 ui32CBSize, -+ IMG_UINT32 ui32ReqSizeMin, -+ IMG_UINT32 ui32MaxPacketSize) -+{ -+ IMG_UINT32 ui32AvSpace = 0; -+ -+ /* This could be written in fewer lines using the ? operator but it -+ would not be kind to potential readers of this source at all. */ -+ if (ui32LRead > ui32LWrite) /* Buffer WRAPPED */ -+ { -+ if ((ui32LRead - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE)) -+ { -+ ui32AvSpace = ui32LRead - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE; -+ } -+ } -+ else /* Normal, no wrap */ -+ { -+ if ((ui32CBSize - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE)) -+ { -+ ui32AvSpace = ui32CBSize - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE; -+ } -+ else if ((ui32LRead - 0) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE)) -+ { -+ ui32AvSpace = ui32LRead - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE; -+ } -+ } -+ /* The max size of a TL packet currently is UINT16. adjust accordingly */ -+ return MIN(ui32AvSpace, ui32MaxPacketSize); -+} -+ -+/* Returns bytes left in the buffer. Negative if there is not any. -+ * two 8b aligned values are reserved, one for the write failed buffer flag -+ * and one to be able to distinguish the buffer full state to the buffer -+ * empty state. -+ * Always returns free space -8 even when the "write failed" packet may be -+ * already in the stream before this write. */ -+static INLINE IMG_INT -+circbufSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size) -+{ -+ /* We need to reserve 8b (one packet) in the buffer to be able to tell empty -+ * buffers from full buffers and one more for packet write fail packet */ -+ if (ui32Read > ui32Write) -+ { -+ return (IMG_INT)ui32Read - (IMG_INT)ui32Write - (IMG_INT)BUFFER_RESERVED_SPACE; -+ } -+ else -+ { -+ return (IMG_INT)ui32size - ((IMG_INT)ui32Write - (IMG_INT)ui32Read) - (IMG_INT)BUFFER_RESERVED_SPACE; -+ } -+} -+ -+IMG_UINT32 TLStreamGetUT(IMG_HANDLE hStream) -+{ -+ PTL_STREAM psStream = (PTL_STREAM) hStream; -+ IMG_UINT32 ui32LRead = psStream->ui32Read, ui32LWrite = psStream->ui32Write; -+ -+ if (ui32LWrite >= ui32LRead) -+ { -+ return (ui32LWrite-ui32LRead); -+ } -+ else -+ { -+ return (psStream->ui32Size-ui32LRead+ui32LWrite); -+ } -+} -+ -+PVRSRV_ERROR TLAllocSharedMemIfNull(IMG_HANDLE hStream) -+{ -+ PTL_STREAM psStream = (PTL_STREAM) hStream; -+ PVRSRV_ERROR eError; -+ -+ /* CPU Local memory used as these buffers are not accessed by the device. -+ * CPU Uncached write combine memory used to improve write performance, -+ * memory barrier added in TLStreamCommit to ensure data written to memory -+ * before CB write point is updated before consumption by the reader. -+ */ -+ IMG_CHAR pszBufferLabel[PRVSRVTL_MAX_STREAM_NAME_SIZE + 20]; -+ PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | -+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | -+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | -+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | -+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL); /* TL for now is only used by host driver, so cpulocal mem suffices */ -+ -+ /* Exit if memory has already been allocated. */ -+ if (psStream->pbyBuffer != NULL) -+ return PVRSRV_OK; -+ -+ OSSNPrintf(pszBufferLabel, sizeof(pszBufferLabel), "TLStreamBuf-%s", -+ psStream->szName); -+ -+ -+ /* Use HostMemDeviceNode instead of psStream->psDevNode to benefit from faster -+ * accesses to CPU local memory. When the framework to access CPU_LOCAL device -+ * memory from GPU is fixed, we'll switch back to use psStream->psDevNode for -+ * TL buffers */ -+ eError = DevmemAllocateExportable((IMG_HANDLE)PVRSRVGetPVRSRVData()->psHostMemDeviceNode, -+ (IMG_DEVMEM_SIZE_T) psStream->ui32Size, -+ (IMG_DEVMEM_ALIGN_T) OSGetPageSize(), -+ ExactLog2(OSGetPageSize()), -+ uiMemFlags, -+ pszBufferLabel, -+ &psStream->psStreamMemDesc); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0); -+ -+ eError = DevmemAcquireCpuVirtAddr(psStream->psStreamMemDesc, -+ (void**) &psStream->pbyBuffer); -+ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e1); -+ -+ return PVRSRV_OK; -+ -+e1: -+ DevmemFree(psStream->psStreamMemDesc); -+e0: -+ return eError; -+} -+ -+void TLFreeSharedMem(IMG_HANDLE hStream) -+{ -+ PTL_STREAM psStream = (PTL_STREAM) hStream; -+ -+ if (psStream->pbyBuffer != NULL) -+ { -+ DevmemReleaseCpuVirtAddr(psStream->psStreamMemDesc); -+ psStream->pbyBuffer = NULL; -+ } -+ if (psStream->psStreamMemDesc != NULL) -+ { -+ DevmemFree(psStream->psStreamMemDesc); -+ psStream->psStreamMemDesc = NULL; -+ } -+} -+ -+/* Special space left routine for TL_FLAG_PERMANENT_NO_WRAP streams */ -+static INLINE IMG_UINT -+bufSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size) -+{ -+ /* buffers from full buffers and one more for packet write fail packet */ -+ PVR_ASSERT(ui32Read<=ui32Write); -+ return ui32size - ui32Write; -+} -+ -+/******************************************************************************* -+ * TL Server public API implementation. -+ ******************************************************************************/ -+PVRSRV_ERROR -+TLStreamCreate(IMG_HANDLE *phStream, -+ const IMG_CHAR *szStreamName, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32StreamFlags, -+ TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB, -+ void *pvOnReaderOpenUD, -+ TL_STREAM_SOURCECB pfProducerCB, -+ void *pvProducerUD) -+{ -+ PTL_STREAM psTmp; -+ PVRSRV_ERROR eError; -+ IMG_HANDLE hEventList; -+ PTL_SNODE psn; -+ TL_OPMODE eOpMode; -+ -+ PVR_DPF_ENTERED; -+ /* Parameter checks: non NULL handler required */ -+ if (NULL == phStream) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ if (szStreamName == NULL || *szStreamName == '\0' || -+ OSStringLength(szStreamName) >= PRVSRVTL_MAX_STREAM_NAME_SIZE) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ eOpMode = ui32StreamFlags & TL_OPMODE_MASK; -+ if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST )) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid")); -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ /* Acquire TL_GLOBAL_DATA lock here because, if the following TLFindStreamNodeByName() -+ * returns NULL, a new TL_SNODE will be added to TL_GLOBAL_DATA's TL_SNODE list */ -+ OSLockAcquire (TLGGD()->hTLGDLock); -+ -+ /* Check if there already exists a stream with this name. */ -+ psn = TLFindStreamNodeByName( szStreamName ); -+ if (NULL != psn) -+ { -+ eError = PVRSRV_ERROR_ALREADY_EXISTS; -+ goto e0; -+ } -+ -+ /* Allocate stream structure container (stream struct) for the new stream */ -+ psTmp = OSAllocZMem(sizeof(TL_STREAM)); -+ if (NULL == psTmp) -+ { -+ eError = PVRSRV_ERROR_OUT_OF_MEMORY; -+ goto e0; -+ } -+ -+ OSStringLCopy(psTmp->szName, szStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE); -+ -+ if (ui32StreamFlags & TL_FLAG_FORCE_FLUSH) -+ { -+ psTmp->bWaitForEmptyOnDestroy = IMG_TRUE; -+ } -+ -+ psTmp->bNoSignalOnCommit = (ui32StreamFlags&TL_FLAG_NO_SIGNAL_ON_COMMIT) ? IMG_TRUE : IMG_FALSE; -+ psTmp->bNoWrapPermanent = (ui32StreamFlags&TL_FLAG_PERMANENT_NO_WRAP) ? IMG_TRUE : IMG_FALSE; -+ -+ psTmp->eOpMode = eOpMode; -+ if (psTmp->eOpMode == TL_OPMODE_BLOCK) -+ { -+ /* Only allow drop properties to be mixed with no-wrap type streams -+ * since space does not become available when reads take place hence -+ * no point blocking. -+ */ -+ if (psTmp->bNoWrapPermanent) -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e1; -+ } -+ } -+ -+ /* Additional synchronisation object required for some streams e.g. blocking */ -+ eError = OSEventObjectCreate(NULL, &psTmp->hProducerEventObj); -+ PVR_GOTO_IF_ERROR(eError, e1); -+ /* Create an event handle for this kind of stream */ -+ eError = OSEventObjectOpen(psTmp->hProducerEventObj, &psTmp->hProducerEvent); -+ PVR_GOTO_IF_ERROR(eError, e2); -+ -+ psTmp->pfOnReaderOpenCallback = pfOnReaderOpenCB; -+ psTmp->pvOnReaderOpenUserData = pvOnReaderOpenUD; -+ /* Remember producer supplied CB and data for later */ -+ psTmp->pfProducerCallback = (void(*)(void))pfProducerCB; -+ psTmp->pvProducerUserData = pvProducerUD; -+ -+ psTmp->psNotifStream = NULL; -+ -+ /* Round the requested bytes to a multiple of array elements' size, eg round 3 to 4 */ -+ psTmp->ui32Size = PVRSRVTL_ALIGN(ui32Size); -+ -+ /* Signalling from TLStreamCommit is deferred until buffer is slightly (~12%) filled */ -+ psTmp->ui32ThresholdUsageForSignal = psTmp->ui32Size >> 3; -+ psTmp->ui32MaxPacketSize = GET_TL_MAX_PACKET_SIZE(psTmp->ui32Size); -+ psTmp->ui32Read = 0; -+ psTmp->ui32Write = 0; -+ psTmp->ui32Pending = NOTHING_PENDING; -+ psTmp->bReadPending = IMG_FALSE; -+ psTmp->bSignalPending = IMG_FALSE; -+ -+#if defined(TL_BUFFER_STATS) -+ OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 0); -+ /* Setting MAX possible value for "minimum" time to full, -+ * helps in the logic which calculates this time */ -+ psTmp->ui32MinTimeToFullInUs = IMG_UINT32_MAX; -+#endif -+ -+ /* Memory will be allocated on first connect to the stream */ -+ if (!(ui32StreamFlags & TL_FLAG_ALLOCATE_ON_FIRST_OPEN)) -+ { -+ /* Allocate memory for the circular buffer and export it to user space. */ -+ eError = TLAllocSharedMemIfNull(psTmp); -+ PVR_LOG_GOTO_IF_ERROR(eError, "TLAllocSharedMem", e3); -+ } -+ -+ /* Synchronisation object to synchronise with user side data transfers. */ -+ eError = OSEventObjectCreate(psTmp->szName, &hEventList); -+ PVR_GOTO_IF_ERROR(eError, e4); -+ -+ eError = OSLockCreate (&psTmp->hStreamWLock); -+ PVR_GOTO_IF_ERROR(eError, e5); -+ -+ eError = OSLockCreate (&psTmp->hReadLock); -+ PVR_GOTO_IF_ERROR(eError, e6); -+ -+ /* Now remember the stream in the global TL structures */ -+ psn = TLMakeSNode(hEventList, (TL_STREAM *)psTmp, NULL); -+ PVR_GOTO_IF_NOMEM(psn, eError, e7); -+ -+ /* Stream node created, now reset the write reference count to 1 -+ * (i.e. this context's reference) */ -+ psn->uiWRefCount = 1; -+ -+ TLAddStreamNode(psn); -+ -+ /* Release TL_GLOBAL_DATA lock as the new TL_SNODE is now added to the list */ -+ OSLockRelease (TLGGD()->hTLGDLock); -+ -+ /* Best effort signal, client wait timeout will ultimately let it find the -+ * new stream if this fails, acceptable to avoid clean-up as it is tricky -+ * at this point */ -+ (void) OSEventObjectSignal(TLGGD()->hTLEventObj); -+ -+ /* Pass the newly created stream handle back to caller */ -+ *phStream = (IMG_HANDLE)psTmp; -+ PVR_DPF_RETURN_OK; -+ -+e7: -+ OSLockDestroy(psTmp->hReadLock); -+e6: -+ OSLockDestroy(psTmp->hStreamWLock); -+e5: -+ OSEventObjectDestroy(hEventList); -+e4: -+ TLFreeSharedMem(psTmp); -+e3: -+ OSEventObjectClose(psTmp->hProducerEvent); -+e2: -+ OSEventObjectDestroy(psTmp->hProducerEventObj); -+e1: -+ OSFreeMem(psTmp); -+e0: -+ OSLockRelease (TLGGD()->hTLGDLock); -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+void TLStreamReset(IMG_HANDLE hStream) -+{ -+ PTL_STREAM psStream = (PTL_STREAM) hStream; -+ -+ PVR_ASSERT(psStream != NULL); -+ -+ OSLockAcquire(psStream->hStreamWLock); -+ -+ while (psStream->ui32Pending != NOTHING_PENDING) -+ { -+ PVRSRV_ERROR eError; -+ -+ /* We're in the middle of a write so we cannot reset the stream. -+ * We are going to wait until the data is committed. Release lock while -+ * we're here. */ -+ OSLockRelease(psStream->hStreamWLock); -+ -+ /* Event when psStream->bNoSignalOnCommit is set we can still use -+ * the timeout capability of event object API (time in us). */ -+ eError = OSEventObjectWaitTimeout(psStream->psNode->hReadEventObj, 100); -+ if (eError != PVRSRV_ERROR_TIMEOUT && eError != PVRSRV_OK) -+ { -+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectWaitTimeout"); -+ } -+ -+ OSLockAcquire(psStream->hStreamWLock); -+ -+ /* Either timeout occurred or the stream has been signalled. -+ * If former we have to check if the data was committed and if latter -+ * if the stream hasn't been re-reserved. Either way we have to go -+ * back to the condition. -+ * If the stream has been released we'll exit with the lock held so -+ * we can finally go and reset the stream. */ -+ } -+ -+ psStream->ui32Read = 0; -+ psStream->ui32Write = 0; -+ /* we know that ui32Pending already has correct value (no need to set) */ -+ -+ OSLockRelease(psStream->hStreamWLock); -+} -+ -+PVRSRV_ERROR -+TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream) -+{ -+ PTL_STREAM psStream = (PTL_STREAM) hStream; -+ -+ if (hStream == NULL || hNotifStream == NULL) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ psStream->psNotifStream = (PTL_STREAM) hNotifStream; -+ -+ return PVRSRV_OK; -+} -+ -+PVRSRV_ERROR -+TLStreamReconfigure( -+ IMG_HANDLE hStream, -+ IMG_UINT32 ui32StreamFlags) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PTL_STREAM psTmp; -+ TL_OPMODE eOpMode; -+ -+ PVR_DPF_ENTERED; -+ -+ if (NULL == hStream) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ eOpMode = ui32StreamFlags & TL_OPMODE_MASK; -+ if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST )) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid")); -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ psTmp = (PTL_STREAM)hStream; -+ -+ /* Prevent the TL Stream buffer from being written to -+ * while its mode is being reconfigured -+ */ -+ OSLockAcquire (psTmp->hStreamWLock); -+ if (NOTHING_PENDING != psTmp->ui32Pending) -+ { -+ OSLockRelease (psTmp->hStreamWLock); -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY); -+ } -+ psTmp->ui32Pending = 0; -+ OSLockRelease (psTmp->hStreamWLock); -+ -+ psTmp->eOpMode = eOpMode; -+ if (psTmp->eOpMode == TL_OPMODE_BLOCK) -+ { -+ /* Only allow drop properties to be mixed with no-wrap type streams -+ * since space does not become available when reads take place hence -+ * no point blocking. -+ */ -+ if (psTmp->bNoWrapPermanent) -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e1; -+ } -+ } -+ -+ OSLockAcquire (psTmp->hStreamWLock); -+ psTmp->ui32Pending = NOTHING_PENDING; -+ OSLockRelease (psTmp->hStreamWLock); -+e1: -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+PVRSRV_ERROR -+TLStreamOpen(IMG_HANDLE *phStream, -+ const IMG_CHAR *szStreamName) -+{ -+ PTL_SNODE psTmpSNode; -+ -+ PVR_DPF_ENTERED; -+ -+ if (NULL == phStream || NULL == szStreamName) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ /* Acquire the TL_GLOBAL_DATA lock first to ensure, -+ * the TL_STREAM while returned and being modified, -+ * is not deleted by some other context */ -+ OSLockAcquire (TLGGD()->hTLGDLock); -+ -+ /* Search for a stream node with a matching stream name */ -+ psTmpSNode = TLFindStreamNodeByName(szStreamName); -+ -+ if (NULL == psTmpSNode) -+ { -+ OSLockRelease (TLGGD()->hTLGDLock); -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_FOUND); -+ } -+ -+ if (psTmpSNode->psStream->psNotifStream != NULL && -+ psTmpSNode->uiWRefCount == 1) -+ { -+ TLStreamMarkStreamOpen(psTmpSNode->psStream); -+ } -+ -+ /* The TL_SNODE->uiWRefCount governs the presence of this node in the -+ * TL_GLOBAL_DATA list i.e. when uiWRefCount falls to zero we try removing -+ * this node from the TL_GLOBAL_DATA list. Hence, is protected using the -+ * TL_GLOBAL_DATA lock and not TL_STREAM lock */ -+ psTmpSNode->uiWRefCount++; -+ -+ OSLockRelease (TLGGD()->hTLGDLock); -+ -+ /* Return the stream handle to the caller */ -+ *phStream = (IMG_HANDLE)psTmpSNode->psStream; -+ -+ PVR_DPF_RETURN_VAL(PVRSRV_OK); -+} -+ -+void -+TLStreamClose(IMG_HANDLE hStream) -+{ -+ PTL_STREAM psTmp; -+ IMG_BOOL bDestroyStream; -+ -+ PVR_DPF_ENTERED; -+ -+ if (NULL == hStream) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "TLStreamClose failed as NULL stream handler passed, nothing done.")); -+ PVR_DPF_RETURN; -+ } -+ -+ psTmp = (PTL_STREAM)hStream; -+ -+ /* Acquire TL_GLOBAL_DATA lock for updating the reference count as this will be required -+ * in-case this TL_STREAM node is to be deleted */ -+ OSLockAcquire (TLGGD()->hTLGDLock); -+ -+ /* Decrement write reference counter of the stream */ -+ psTmp->psNode->uiWRefCount--; -+ -+ if (0 != psTmp->psNode->uiWRefCount) -+ { -+ /* The stream is still being used in other context(s) do not destroy -+ * anything */ -+ -+ /* uiWRefCount == 1 means that stream was closed for write. Next -+ * close is pairing TLStreamCreate(). Send notification to indicate -+ * that no writer are connected to the stream any more. */ -+ if (psTmp->psNotifStream != NULL && psTmp->psNode->uiWRefCount == 1) -+ { -+ TLStreamMarkStreamClose(psTmp); -+ } -+ -+ OSLockRelease (TLGGD()->hTLGDLock); -+ PVR_DPF_RETURN; -+ } -+ else -+ { -+ /* Now we try removing this TL_STREAM from TL_GLOBAL_DATA */ -+ -+ if (psTmp->bWaitForEmptyOnDestroy) -+ { -+ /* We won't require the TL_STREAM lock to be acquired here for accessing its read -+ * and write offsets. REASON: We are here because there is no producer context -+ * referencing this TL_STREAM, hence its ui32Write offset won't be changed now. -+ * Also, the update of ui32Read offset is not protected by locks */ -+ while (psTmp->ui32Read != psTmp->ui32Write) -+ { -+ /* Release lock before sleeping */ -+ OSLockRelease (TLGGD()->hTLGDLock); -+ -+ OSEventObjectWaitTimeout(psTmp->hProducerEvent, EVENT_OBJECT_TIMEOUT_US); -+ -+ OSLockAcquire (TLGGD()->hTLGDLock); -+ -+ /* Ensure destruction of stream is still required */ -+ if (0 != psTmp->psNode->uiWRefCount) -+ { -+ OSLockRelease (TLGGD()->hTLGDLock); -+ PVR_DPF_RETURN; -+ } -+ } -+ } -+ -+ /* Try removing the stream from TL_GLOBAL_DATA */ -+ bDestroyStream = TLTryRemoveStreamAndFreeStreamNode (psTmp->psNode); -+ -+ OSLockRelease (TLGGD()->hTLGDLock); -+ -+ if (bDestroyStream) -+ { -+ /* Destroy the stream if it was removed from TL_GLOBAL_DATA */ -+ TLStreamDestroy (psTmp); -+ psTmp = NULL; -+ } -+ PVR_DPF_RETURN; -+ } -+} -+ -+/* -+ * DoTLSetPacketHeader -+ * -+ * Ensure that whenever we update a Header we always add the RESERVED field -+ */ -+static inline void DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR, IMG_UINT32); -+static inline void -+DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR pHdr, -+ IMG_UINT32 ui32Val) -+{ -+ PVR_ASSERT(((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) == 0); -+ -+ /* Check that this is a correctly aligned packet header. */ -+ if (((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) != 0) -+ { -+ /* Should return an error because the header is misaligned */ -+ PVR_DPF((PVR_DBG_ERROR, "%s: Misaligned header @ %p", __func__, pHdr)); -+ pHdr->uiTypeSize = ui32Val; -+ } -+ else -+ { -+ pHdr->uiTypeSize = ui32Val; -+ pHdr->uiReserved = PVRSRVTL_PACKETHDR_RESERVED; -+ } -+} -+ -+static PVRSRV_ERROR -+DoTLStreamReserve(IMG_HANDLE hStream, -+ IMG_UINT8 **ppui8Data, -+ IMG_UINT32 ui32ReqSize, -+ IMG_UINT32 ui32ReqSizeMin, -+ PVRSRVTL_PACKETTYPE ePacketType, -+ IMG_UINT32* pui32AvSpace, -+ IMG_UINT32* pui32Flags) -+{ -+ PTL_STREAM psTmp; -+ IMG_UINT32 *pui32Buf, ui32LRead, ui32LWrite, ui32LPending, lReqSizeAligned, lReqSizeActual, ui32CreateFreeSpace; -+ IMG_UINT32 ui32InputFlags = 0; -+ IMG_INT pad, iFreeSpace; -+ IMG_UINT8 *pui8IncrRead = NULL; -+ PVRSRVTL_PPACKETHDR pHdr; -+ -+ PVR_DPF_ENTERED; -+ if (pui32AvSpace) *pui32AvSpace = 0; -+ if (pui32Flags) -+ { -+ ui32InputFlags = *pui32Flags; -+ *pui32Flags = 0; -+ } -+ -+ if (NULL == hStream) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ psTmp = (PTL_STREAM)hStream; -+ -+ /* Assert used as the packet type parameter is currently only provided -+ * by the TL APIs, not the calling client */ -+ PVR_ASSERT((PVRSRVTL_PACKETTYPE_UNDEF < ePacketType) && (PVRSRVTL_PACKETTYPE_LAST >= ePacketType)); -+ -+ /* The buffer is only used in "rounded" (aligned) chunks */ -+ lReqSizeAligned = PVRSRVTL_ALIGN(ui32ReqSize); -+ -+ /* Lock the stream before reading it's pending value, because if pending is set -+ * to NOTHING_PENDING, we update the pending value such that subsequent calls to -+ * this function from other context(s) fail with PVRSRV_ERROR_NOT_READY */ -+ OSLockAcquire (psTmp->hStreamWLock); -+ -+#if defined(TL_BUFFER_STATS) -+ /* If writing into an empty buffer, start recording time-to-full */ -+ if (psTmp->ui32Read == psTmp->ui32Write) -+ { -+ OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 1); -+ psTmp->ui32TimeStart = OSClockus(); -+ } -+ -+ if (ui32ReqSize > psTmp->ui32MaxReserveWatermark) -+ { -+ psTmp->ui32MaxReserveWatermark = ui32ReqSize; -+ } -+#endif -+ -+ /* Get a local copy of the stream buffer parameters */ -+ ui32LRead = psTmp->ui32Read; -+ ui32LWrite = psTmp->ui32Write; -+ ui32LPending = psTmp->ui32Pending; -+ -+ /* Multiple pending reserves are not supported. */ -+ if (NOTHING_PENDING != ui32LPending) -+ { -+ OSLockRelease (psTmp->hStreamWLock); -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY); -+ } -+ -+ if (psTmp->ui32MaxPacketSize < lReqSizeAligned) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "Requested Size: %u > TL Max Packet size: %u", lReqSizeAligned, psTmp->ui32MaxPacketSize)); -+ psTmp->ui32Pending = NOTHING_PENDING; -+ if (pui32AvSpace) -+ { -+ *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize); -+ if (*pui32AvSpace == 0 && psTmp->eOpMode == TL_OPMODE_DROP_OLDEST) -+ { -+ *pui32AvSpace = psTmp->ui32MaxPacketSize; -+ PVR_DPF((PVR_DBG_MESSAGE, "Opmode is Drop_Oldest, so Available Space changed to: %u", *pui32AvSpace)); -+ } -+ } -+ OSLockRelease (psTmp->hStreamWLock); -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED); -+ } -+ -+ /* Prevent other threads from entering this region before we are done -+ * updating the pending value and write offset (in case of padding). This -+ * is not exactly a lock but a signal for other contexts that there is a -+ * TLStreamCommit operation pending on this stream */ -+ psTmp->ui32Pending = 0; -+ -+ OSLockRelease (psTmp->hStreamWLock); -+ -+ /* If there is enough contiguous space following the current Write -+ * position then no padding is required */ -+ if ( psTmp->ui32Size -+ < ui32LWrite + lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) ) -+ { -+ pad = psTmp->ui32Size - ui32LWrite; -+ } -+ else -+ { -+ pad = 0; -+ } -+ -+ lReqSizeActual = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) + pad; -+ if (psTmp->bNoWrapPermanent) -+ { -+ iFreeSpace = bufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); -+ } -+ else -+ { -+ iFreeSpace = circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); -+ } -+ -+ if (iFreeSpace < (IMG_INT) lReqSizeActual) -+ { -+ /* If this is a blocking reserve and there is not enough space then wait. */ -+ if (psTmp->eOpMode == TL_OPMODE_BLOCK) -+ { -+ /* Stream create should stop us entering here when -+ * psTmp->bNoWrapPermanent is true as it does not make sense to -+ * block on permanent data streams. */ -+ PVR_ASSERT(psTmp->bNoWrapPermanent == IMG_FALSE); -+ while ( ( circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size) -+ <(IMG_INT) lReqSizeActual ) ) -+ { -+ /* The TL bridge is lockless now, so changing to OSEventObjectWait() */ -+ OSEventObjectWait(psTmp->hProducerEvent); -+ // update local copies. -+ ui32LRead = psTmp->ui32Read; -+ ui32LWrite = psTmp->ui32Write; -+ } -+ } -+ /* Data overwriting, also insert PACKETS_DROPPED flag into existing packet */ -+ else if (psTmp->eOpMode == TL_OPMODE_DROP_OLDEST) -+ { -+ OSLockAcquire(psTmp->hReadLock); -+ -+ while (psTmp->bReadPending) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "Waiting for the pending read operation to complete.")); -+ OSLockRelease(psTmp->hReadLock); -+#if defined(TL_BUFFER_STATS) -+ TL_COUNTER_INC(psTmp->ui32CntWriteWaits); -+#endif -+ (void) OSEventObjectWaitTimeout(psTmp->hProducerEvent, READ_PENDING_TIMEOUT_US); -+ OSLockAcquire(psTmp->hReadLock); -+ } -+ -+#if defined(TL_BUFFER_STATS) -+ TL_COUNTER_INC(psTmp->ui32CntWriteSuccesses); -+#endif -+ ui32LRead = psTmp->ui32Read; -+ -+ if ( circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size) -+ < (IMG_INT) lReqSizeActual ) -+ { -+ ui32CreateFreeSpace = 5 * (psTmp->ui32Size / 100); -+ if (ui32CreateFreeSpace < lReqSizeActual) -+ { -+ ui32CreateFreeSpace = lReqSizeActual; -+ } -+ -+ while (ui32CreateFreeSpace > (IMG_UINT32)circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)) -+ { -+ pui8IncrRead = &psTmp->pbyBuffer[ui32LRead]; -+ ui32LRead += (sizeof(PVRSRVTL_PACKETHDR) + PVRSRVTL_ALIGN( GET_PACKET_DATA_LEN(pui8IncrRead) )); -+ -+ /* Check if buffer needs to wrap */ -+ if (ui32LRead >= psTmp->ui32Size) -+ { -+ ui32LRead = 0; -+ } -+ } -+ psTmp->ui32Read = ui32LRead; -+ pui8IncrRead = &psTmp->pbyBuffer[psTmp->ui32Read]; -+ -+ pHdr = GET_PACKET_HDR(pui8IncrRead); -+ DoTLSetPacketHeader(pHdr, SET_PACKETS_DROPPED(pHdr)); -+ } -+ /* else fall through as there is enough space now to write the data */ -+ -+ OSLockRelease(psTmp->hReadLock); -+ /* If we accepted a flag var set the OVERWRITE bit*/ -+ if (pui32Flags) *pui32Flags |= TL_FLAG_OVERWRITE_DETECTED; -+ } -+ /* No data overwriting, insert write_failed flag and return */ -+ else if (psTmp->eOpMode == TL_OPMODE_DROP_NEWER) -+ { -+ /* Caller should not try to use ppui8Data, -+ * NULLify to give user a chance of avoiding memory corruption */ -+ *ppui8Data = NULL; -+ -+ /* This flag should not be inserted two consecutive times, so -+ * check the last ui32 in case it was a packet drop packet. */ -+ pui32Buf = ui32LWrite -+ ? -+ (void *)&psTmp->pbyBuffer[ui32LWrite - sizeof(PVRSRVTL_PACKETHDR)] -+ : // Previous four bytes are not guaranteed to be a packet header... -+ (void *)&psTmp->pbyBuffer[psTmp->ui32Size - PVRSRVTL_PACKET_ALIGNMENT]; -+ -+ pHdr = GET_PACKET_HDR(pui32Buf); -+ if ( PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED -+ != -+ GET_PACKET_TYPE( pHdr ) && (ui32InputFlags & TL_FLAG_NO_WRITE_FAILED) == 0) -+ { -+ /* Insert size-stamped packet header */ -+ pui32Buf = (void *)&psTmp->pbyBuffer[ui32LWrite]; -+ pHdr = GET_PACKET_HDR(pui32Buf); -+ DoTLSetPacketHeader(pHdr, PVRSRVTL_SET_PACKET_WRITE_FAILED); -+ ui32LWrite += sizeof(PVRSRVTL_PACKETHDR); -+ ui32LWrite %= psTmp->ui32Size; -+ iFreeSpace -= sizeof(PVRSRVTL_PACKETHDR); -+ } -+ -+ OSLockAcquire (psTmp->hStreamWLock); -+ psTmp->ui32Write = ui32LWrite; -+ psTmp->ui32Pending = NOTHING_PENDING; -+ OSLockRelease (psTmp->hStreamWLock); -+ -+ if (pui32AvSpace) -+ { -+ *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize); -+ } -+ -+ /* Inform call of permanent stream misuse, no space left, -+ * the size of the stream will need to be increased. */ -+ if (psTmp->bNoWrapPermanent) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE); -+ } -+ -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_FULL); -+ } -+ } -+ -+ /* The easy case: buffer has enough space to hold the requested packet (data + header) */ -+ -+ /* Should we treat the buffer as non-circular buffer? */ -+ if (psTmp->bNoWrapPermanent) -+ { -+ iFreeSpace = bufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); -+ } -+ else -+ { -+ iFreeSpace = circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); -+ } -+ -+ if (iFreeSpace >= (IMG_INT) lReqSizeActual) -+ { -+ if (pad) -+ { -+ /* Inserting padding packet. */ -+ pui32Buf = (void *)&psTmp->pbyBuffer[ui32LWrite]; -+ pHdr = GET_PACKET_HDR(pui32Buf); -+ DoTLSetPacketHeader(pHdr, -+ PVRSRVTL_SET_PACKET_PADDING(pad-sizeof(PVRSRVTL_PACKETHDR))); -+ -+ /* CAUTION: the used pad value should always result in a properly -+ * aligned ui32LWrite pointer, which in this case is 0 */ -+ ui32LWrite = (ui32LWrite + pad) % psTmp->ui32Size; -+ /* Detect unaligned pad value */ -+ PVR_ASSERT(ui32LWrite == 0); -+ } -+ /* Insert size-stamped packet header */ -+ pui32Buf = (void *) &psTmp->pbyBuffer[ui32LWrite]; -+ -+ pHdr = GET_PACKET_HDR(pui32Buf); -+ DoTLSetPacketHeader(pHdr, -+ PVRSRVTL_SET_PACKET_HDR(ui32ReqSize, ePacketType)); -+ -+ /* return the next position in the buffer to the user */ -+ *ppui8Data = &psTmp->pbyBuffer[ ui32LWrite+sizeof(PVRSRVTL_PACKETHDR) ]; -+ -+ /* update pending offset: size stamp + data */ -+ ui32LPending = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR); -+ } -+ else -+ { -+ OSLockAcquire (psTmp->hStreamWLock); -+ psTmp->ui32Pending = NOTHING_PENDING; -+ OSLockRelease (psTmp->hStreamWLock); -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); -+ } -+ -+ /* Acquire stream lock for updating stream parameters */ -+ OSLockAcquire (psTmp->hStreamWLock); -+ psTmp->ui32Write = ui32LWrite; -+ psTmp->ui32Pending = ui32LPending; -+ OSLockRelease (psTmp->hStreamWLock); -+ -+#if defined(TL_BUFFER_STATS) -+ TL_COUNTER_INC(psTmp->ui32CntNumWriteSuccess); -+#endif -+ -+ PVR_DPF_RETURN_OK; -+} -+ -+PVRSRV_ERROR -+TLStreamReserve(IMG_HANDLE hStream, -+ IMG_UINT8 **ppui8Data, -+ IMG_UINT32 ui32Size) -+{ -+ return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL, NULL); -+} -+ -+PVRSRV_ERROR -+TLStreamReserve2(IMG_HANDLE hStream, -+ IMG_UINT8 **ppui8Data, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32SizeMin, -+ IMG_UINT32* pui32Available, -+ IMG_BOOL* pbIsReaderConnected) -+{ -+ PVRSRV_ERROR eError; -+ -+ eError = DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32SizeMin, PVRSRVTL_PACKETTYPE_DATA, pui32Available, NULL); -+ if (eError != PVRSRV_OK && pbIsReaderConnected != NULL) -+ { -+ *pbIsReaderConnected = TLStreamIsOpenForReading(hStream); -+ } -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+TLStreamReserveReturnFlags(IMG_HANDLE hStream, -+ IMG_UINT8 **ppui8Data, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32* pui32Flags) -+{ -+ return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL, pui32Flags); -+} -+ -+PVRSRV_ERROR -+TLStreamCommit(IMG_HANDLE hStream, IMG_UINT32 ui32ReqSize) -+{ -+ PTL_STREAM psTmp; -+ IMG_UINT32 ui32LRead, ui32OldWrite, ui32LWrite, ui32LPending; -+ PVRSRV_ERROR eError; -+ -+#if defined(TL_BUFFER_STATS) -+ IMG_UINT32 ui32UnreadBytes; -+#endif -+ -+ PVR_DPF_ENTERED; -+ -+ if (NULL == hStream) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ psTmp = (PTL_STREAM)hStream; -+ -+ /* Get a local copy of the stream buffer parameters */ -+ ui32LRead = psTmp->ui32Read; -+ ui32LWrite = psTmp->ui32Write; -+ ui32LPending = psTmp->ui32Pending; -+ -+ ui32OldWrite = ui32LWrite; -+ -+ // Space in buffer is aligned -+ ui32ReqSize = PVRSRVTL_ALIGN(ui32ReqSize) + sizeof(PVRSRVTL_PACKETHDR); -+ -+ /* Check pending reserver and ReqSize + packet header size. */ -+ if ((ui32LPending == NOTHING_PENDING) || (ui32ReqSize > ui32LPending)) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE); -+ } -+ -+ /* Update pointer to written data. */ -+ ui32LWrite = (ui32LWrite + ui32ReqSize) % psTmp->ui32Size; -+ -+ /* and reset LPending to 0 since data are now submitted */ -+ ui32LPending = NOTHING_PENDING; -+ -+#if defined(TL_BUFFER_STATS) -+ /* Calculate new number of bytes unread */ -+ if (ui32LWrite > ui32LRead) -+ { -+ ui32UnreadBytes = (ui32LWrite-ui32LRead); -+ } -+ else if (ui32LWrite < ui32LRead) -+ { -+ ui32UnreadBytes = (psTmp->ui32Size-ui32LRead+ui32LWrite); -+ } -+ else -+ { /* else equal, ignore */ -+ ui32UnreadBytes = 0; -+ } -+ -+ /* Calculate high water mark for debug purposes */ -+ if (ui32UnreadBytes > psTmp->ui32BufferUt) -+ { -+ psTmp->ui32BufferUt = ui32UnreadBytes; -+ } -+#endif -+ -+ /* Memory barrier required to ensure prior data written by writer is -+ * flushed from WC buffer to main memory. */ -+ OSWriteMemoryBarrier(NULL); -+ -+ /* Acquire stream lock to ensure other context(s) (if any) -+ * wait on the lock (in DoTLStreamReserve) for consistent values -+ * of write offset and pending value */ -+ OSLockAcquire (psTmp->hStreamWLock); -+ -+ /* Update stream buffer parameters to match local copies */ -+ psTmp->ui32Write = ui32LWrite; -+ psTmp->ui32Pending = ui32LPending; -+ -+ /* Ensure write pointer is flushed */ -+ OSWriteMemoryBarrier(&psTmp->ui32Write); -+ -+ TL_COUNTER_ADD(psTmp->ui32ProducerByteCount, ui32ReqSize); -+ TL_COUNTER_INC(psTmp->ui32NumCommits); -+ -+#if defined(TL_BUFFER_STATS) -+ /* IF there has been no-reader since first reserve on an empty-buffer, -+ * AND current utilisation is considerably high (90%), calculate the -+ * time taken to fill up the buffer */ -+ if ((OSAtomicRead(&psTmp->bNoReaderSinceFirstReserve) == 1) && -+ (TLStreamGetUT(psTmp) >= 90 * psTmp->ui32Size/100)) -+ { -+ IMG_UINT32 ui32TimeToFullInUs = OSClockus() - psTmp->ui32TimeStart; -+ if (psTmp->ui32MinTimeToFullInUs > ui32TimeToFullInUs) -+ { -+ psTmp->ui32MinTimeToFullInUs = ui32TimeToFullInUs; -+ } -+ /* Following write ensures ui32MinTimeToFullInUs doesn't lose its -+ * real (expected) value in case there is no reader until next Commit call */ -+ OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 0); -+ } -+#endif -+ -+ if (!psTmp->bNoSignalOnCommit) -+ { -+ /* If we have transitioned from an empty buffer to a non-empty buffer, we -+ * must signal possibly waiting consumer. BUT, let the signal be "deferred" -+ * until buffer is at least 'ui32ThresholdUsageForSignal' bytes full. This -+ * avoids a race between OSEventObjectSignal and OSEventObjectWaitTimeout -+ * (in TLServerAcquireDataKM), where a "signal" might happen before "wait", -+ * resulting into signal being lost and stream-reader waiting even though -+ * buffer is no-more empty */ -+ if (ui32OldWrite == ui32LRead) -+ { -+ psTmp->bSignalPending = IMG_TRUE; -+ } -+ -+ if (psTmp->bSignalPending && (TLStreamGetUT(psTmp) >= psTmp->ui32ThresholdUsageForSignal)) -+ { -+ TL_COUNTER_INC(psTmp->ui32SignalsSent); -+ psTmp->bSignalPending = IMG_FALSE; -+ -+ /* Signal consumers that may be waiting */ -+ eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj); -+ if (eError != PVRSRV_OK) -+ { -+ OSLockRelease (psTmp->hStreamWLock); -+ PVR_DPF_RETURN_RC(eError); -+ } -+ } -+ else -+ { -+ TL_COUNTER_INC(psTmp->ui32SignalNotSent); -+ } -+ } -+ OSLockRelease (psTmp->hStreamWLock); -+ -+ PVR_DPF_RETURN_OK; -+} -+ -+PVRSRV_ERROR -+TLStreamWrite(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size) -+{ -+ IMG_BYTE *pbyDest = NULL; -+ PVRSRV_ERROR eError; -+ -+ PVR_DPF_ENTERED; -+ -+ if (NULL == hStream) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ eError = TLStreamReserve(hStream, &pbyDest, ui32Size); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF_RETURN_RC(eError); -+ } -+ else -+ { -+ OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size); -+ eError = TLStreamCommit(hStream, ui32Size); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF_RETURN_RC(eError); -+ } -+ } -+ -+ PVR_DPF_RETURN_OK; -+} -+ -+PVRSRV_ERROR -+TLStreamWriteRetFlags(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size, IMG_UINT32 *pui32Flags){ -+ IMG_BYTE *pbyDest = NULL; -+ PVRSRV_ERROR eError; -+ -+ PVR_DPF_ENTERED; -+ -+ if (NULL == hStream) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ eError = TLStreamReserveReturnFlags(hStream, &pbyDest, ui32Size, pui32Flags); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF_RETURN_RC(eError); -+ } -+ else -+ { -+ OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size); -+ eError = TLStreamCommit(hStream, ui32Size); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF_RETURN_RC(eError); -+ } -+ } -+ -+ PVR_DPF_RETURN_OK; -+} -+ -+void TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo) -+{ -+ IMG_DEVMEM_SIZE_T actual_req_size; -+ IMG_DEVMEM_ALIGN_T align = 4; /* Low fake value so the real value can be obtained */ -+ -+ actual_req_size = 2; -+ /* ignore error as OSGetPageShift() should always return correct value */ -+ (void) DevmemExportalignAdjustSizeAndAlign(OSGetPageShift(), &actual_req_size, &align); -+ -+ psInfo->headerSize = sizeof(PVRSRVTL_PACKETHDR); -+ psInfo->minReservationSize = sizeof(IMG_UINT32); -+ psInfo->pageSize = (IMG_UINT32)(actual_req_size); -+ psInfo->pageAlign = (IMG_UINT32)(align); -+ psInfo->maxTLpacketSize = ((PTL_STREAM)hStream)->ui32MaxPacketSize; -+} -+ -+PVRSRV_ERROR -+TLStreamMarkEOS(IMG_HANDLE psStream, IMG_BOOL bRemoveOld) -+{ -+ PTL_STREAM psTmp; -+ PVRSRV_ERROR eError; -+ IMG_UINT8* pData; -+ -+ PVR_DPF_ENTERED; -+ -+ if (NULL == psStream) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ psTmp = (PTL_STREAM)psStream; -+ -+ /* Do not support EOS packets on permanent stream buffers at present, -+ * EOS is best used with streams where data is consumed. */ -+ if (psTmp->bNoWrapPermanent) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE); -+ } -+ -+ if (bRemoveOld) -+ { -+ eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD, NULL, NULL); -+ } -+ else -+ { -+ eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS, NULL, NULL); -+ } -+ -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF_RETURN_RC(eError); -+ } -+ -+ PVR_DPF_RETURN_RC(TLStreamCommit(psStream, 0)); -+} -+ -+ -+static PVRSRV_ERROR -+_TLStreamMarkOC(IMG_HANDLE hStream, PVRSRVTL_PACKETTYPE ePacketType) -+{ -+ PVRSRV_ERROR eError; -+ PTL_STREAM psStream = hStream; -+ IMG_UINT32 ui32Size; -+ IMG_UINT8 *pData; -+ -+ PVR_DPF_ENTERED; -+ -+ if (NULL == psStream) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ -+ if (NULL == psStream->psNotifStream) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_NOTIF_STREAM); -+ } -+ -+ ui32Size = OSStringLength(psStream->szName) + 1; -+ -+ eError = DoTLStreamReserve(psStream->psNotifStream, &pData, ui32Size, -+ ui32Size, ePacketType, NULL, NULL); -+ if (PVRSRV_OK != eError) -+ { -+ PVR_DPF_RETURN_RC(eError); -+ } -+ -+ OSDeviceMemCopy(pData, psStream->szName, ui32Size); -+ -+ PVR_DPF_RETURN_RC(TLStreamCommit(psStream->psNotifStream, ui32Size)); -+} -+ -+PVRSRV_ERROR -+TLStreamMarkStreamOpen(IMG_HANDLE psStream) -+{ -+ return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE); -+} -+ -+PVRSRV_ERROR -+TLStreamMarkStreamClose(IMG_HANDLE psStream) -+{ -+ return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE); -+} -+ -+PVRSRV_ERROR -+TLStreamSync(IMG_HANDLE psStream) -+{ -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PTL_STREAM psTmp; -+ -+ PVR_DPF_ENTERED; -+ -+ if (NULL == psStream) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); -+ } -+ psTmp = (PTL_STREAM)psStream; -+ -+ /* If read client exists and has opened stream in blocking mode, -+ * signal when data is available to read. */ -+ if (psTmp->psNode->psRDesc && -+ (!(psTmp->psNode->psRDesc->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) && -+ psTmp->ui32Read != psTmp->ui32Write) -+ { -+ TL_COUNTER_INC(psTmp->ui32ManSyncs); -+ eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj); -+ } -+ -+ PVR_DPF_RETURN_RC(eError); -+} -+ -+IMG_BOOL -+TLStreamIsOpenForReading(IMG_HANDLE hStream) -+{ -+ PTL_STREAM psTmp; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(hStream); -+ psTmp = (PTL_STREAM)hStream; -+ -+ PVR_DPF_RETURN_VAL(psTmp->psNode->psRDesc != NULL); -+} -+ -+IMG_BOOL -+TLStreamOutOfData(IMG_HANDLE hStream) -+{ -+ PTL_STREAM psTmp; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(hStream); -+ psTmp = (PTL_STREAM)hStream; -+ -+ /* If both pointers are equal then the buffer is empty */ -+ PVR_DPF_RETURN_VAL(psTmp->ui32Read == psTmp->ui32Write); -+} -+ -+ -+PVRSRV_ERROR -+TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value) -+{ -+ PTL_STREAM psTmp; -+ IMG_UINT32 ui32LRead, ui32LWrite; -+ PVRSRV_ERROR eErr = PVRSRV_OK; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(hStream); -+ psTmp = (PTL_STREAM)hStream; -+ ui32LRead = psTmp->ui32Read; -+ ui32LWrite = psTmp->ui32Write; -+ -+ if (ui32LRead != ui32LWrite) -+ { -+ eErr = PVRSRV_ERROR_STREAM_MISUSE; -+ } -+#if defined(TL_BUFFER_STATS) -+ psTmp->ui32ProducerByteCount = ui32Value; -+#else -+ PVR_UNREFERENCED_PARAMETER(ui32Value); -+#endif -+ PVR_DPF_RETURN_RC(eErr); -+} -+/* -+ * Internal stream APIs to server part of Transport Layer, declared in -+ * header tlintern.h. Direct pointers to stream objects are used here as -+ * these functions are internal. -+ */ -+IMG_UINT32 -+TLStreamAcquireReadPos(PTL_STREAM psStream, -+ IMG_BOOL bDisableCallback, -+ IMG_UINT32* puiReadOffset) -+{ -+ IMG_UINT32 uiReadLen = 0; -+ IMG_UINT32 ui32LRead, ui32LWrite; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psStream); -+ PVR_ASSERT(puiReadOffset); -+ -+ if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) -+ { -+ if (!OSTryLockAcquire(psStream->hReadLock)) -+ { -+ /* -+ * This is a normal event when the system is under load. -+ * An example of how to produce this is to run testrunner / -+ * regression/ddk_test_seq2_host_fw_mem.conf with HTB / pvrhtbd -+ * configured as -+ * -+ * # pvrdebug -log trace -loggroups main,pow,debug \ -+ * -hostloggroups main,ctrl,sync,brg -hostlogtype dropoldest -+ * -+ * # pvrhtbd -hostloggroups main,ctrl,sync,brg -+ * -+ * We will see a small number of these collisions but as this is -+ * an expected calling path, and an expected return code, we drop -+ * the severity to just be a debug MESSAGE instead of WARNING -+ */ -+ PVR_DPF((PVR_DBG_MESSAGE, -+ "%s: Read lock on stream '%s' is acquired by some writer, " -+ "hence reader failed to acquire read lock.", __func__, -+ psStream->szName)); -+#if defined(TL_BUFFER_STATS) -+ TL_COUNTER_INC(psStream->ui32CntReadFails); -+#endif -+ PVR_DPF_RETURN_VAL(0); -+ } -+ } -+ -+#if defined(TL_BUFFER_STATS) -+ TL_COUNTER_INC(psStream->ui32CntReadSuccesses); -+#endif -+ -+ /* Grab a local copy */ -+ ui32LRead = psStream->ui32Read; -+ ui32LWrite = psStream->ui32Write; -+ -+ if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) -+ { -+ psStream->bReadPending = IMG_TRUE; -+ OSLockRelease(psStream->hReadLock); -+ } -+ -+ /* No data available and CB defined - try and get data */ -+ if ((ui32LRead == ui32LWrite) && psStream->pfProducerCallback && !bDisableCallback) -+ { -+ PVRSRV_ERROR eRc; -+ IMG_UINT32 ui32Resp = 0; -+ -+ eRc = ((TL_STREAM_SOURCECB)psStream->pfProducerCallback)(psStream, TL_SOURCECB_OP_CLIENT_EOS, -+ &ui32Resp, psStream->pvProducerUserData); -+ PVR_LOG_IF_ERROR(eRc, "TLStream->pfProducerCallback"); -+ -+ ui32LWrite = psStream->ui32Write; -+ } -+ -+ /* No data available... */ -+ if (ui32LRead == ui32LWrite) -+ { -+ if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) -+ { -+ psStream->bReadPending = IMG_FALSE; -+ } -+ PVR_DPF_RETURN_VAL(0); -+ } -+ -+#if defined(TL_BUFFER_STATS) -+ /* The moment reader knows it will see a non-zero data, it marks its presence in writer's eyes */ -+ OSAtomicWrite (&psStream->bNoReaderSinceFirstReserve, 0); -+#endif -+ -+ /* Data is available to read... */ -+ *puiReadOffset = ui32LRead; -+ -+ /*PVR_DPF((PVR_DBG_VERBOSE, -+ * "TLStreamAcquireReadPos Start before: Write:%d, Read:%d, size:%d", -+ * ui32LWrite, ui32LRead, psStream->ui32Size)); -+ */ -+ -+ if (ui32LRead > ui32LWrite) -+ { /* CB has wrapped around. */ -+ PVR_ASSERT(!psStream->bNoWrapPermanent); -+ /* Return the first contiguous piece of memory, ie [ReadLen,EndOfBuffer] -+ * and let a subsequent AcquireReadPos read the rest of the Buffer */ -+ /*PVR_DPF((PVR_DBG_VERBOSE, "TLStreamAcquireReadPos buffer has wrapped"));*/ -+ uiReadLen = psStream->ui32Size - ui32LRead; -+ TL_COUNTER_INC(psStream->ui32AcquireRead2); -+ } -+ else -+ { /* CB has not wrapped */ -+ uiReadLen = ui32LWrite - ui32LRead; -+ TL_COUNTER_INC(psStream->ui32AcquireRead1); -+ } -+ -+ PVR_DPF_RETURN_VAL(uiReadLen); -+} -+ -+PVRSRV_ERROR -+TLStreamAdvanceReadPos(PTL_STREAM psStream, -+ IMG_UINT32 uiReadLen, -+ IMG_UINT32 uiOrigReadLen) -+{ -+ IMG_UINT32 uiNewReadPos; -+ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psStream); -+ -+ /* -+ * This API does not use Read lock as 'bReadPending' is sufficient -+ * to keep Read index safe by preventing a write from updating the -+ * index and 'bReadPending' itself is safe as it can only be modified -+ * by readers and there can be only one reader in action at a time. -+ */ -+ -+ /* Update the read offset by the length provided in a circular manner. -+ * Assuming the update to be atomic hence, avoiding use of locks -+ */ -+ uiNewReadPos = (psStream->ui32Read + uiReadLen) % psStream->ui32Size; -+ -+ /* Must validate length is on a packet boundary, for -+ * TLReleaseDataLess calls. -+ */ -+ if (uiReadLen != uiOrigReadLen) /* buffer not empty */ -+ { -+ PVRSRVTL_PPACKETHDR psHdr = GET_PACKET_HDR(psStream->pbyBuffer+uiNewReadPos); -+ PVRSRVTL_PACKETTYPE eType = GET_PACKET_TYPE(psHdr); -+ -+ if ((psHdr->uiReserved != PVRSRVTL_PACKETHDR_RESERVED) || -+ (eType == PVRSRVTL_PACKETTYPE_UNDEF) || -+ (eType >= PVRSRVTL_PACKETTYPE_LAST)) -+ { -+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_ALIGNMENT); -+ } -+ /* else OK, on a packet boundary */ -+ } -+ /* else no check needed */ -+ -+ psStream->ui32Read = uiNewReadPos; -+ -+ if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) -+ { -+ psStream->bReadPending = IMG_FALSE; -+ } -+ -+ /* notify reserves that may be pending */ -+ /* The producer event object is used to signal the StreamReserve if the TL -+ * Buffer is in blocking mode and is full. -+ * Previously this event was only signalled if the buffer was created in -+ * blocking mode. Since the buffer mode can now change dynamically the event -+ * is signalled every time to avoid any potential race where the signal is -+ * required, but not produced. -+ */ -+ { -+ PVRSRV_ERROR eError; -+ eError = OSEventObjectSignal(psStream->hProducerEventObj); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_WARNING, -+ "Error in TLStreamAdvanceReadPos: OSEventObjectSignal returned:%u", -+ eError)); -+ /* We've failed to notify the producer event. This means there may -+ * be a delay in generating more data to be consumed until the next -+ * Write() generating action occurs. -+ */ -+ } -+ } -+ -+ PVR_DPF((PVR_DBG_VERBOSE, -+ "TLStreamAdvanceReadPos Read now at: %d", -+ psStream->ui32Read)); -+ PVR_DPF_RETURN_OK; -+} -+ -+void -+TLStreamResetReadPos(PTL_STREAM psStream) -+{ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psStream); -+ -+ if (psStream->bNoWrapPermanent) -+ { -+ -+ /* Update the read offset by the length provided in a circular manner. -+ * Assuming the update to be atomic hence, avoiding use of locks */ -+ psStream->ui32Read = 0; -+ -+ PVR_DPF((PVR_DBG_VERBOSE, -+ "TLStreamResetReadPos Read now at: %d", -+ psStream->ui32Read)); -+ } -+ else -+ { -+ /* else for other stream types this is a no-op */ -+ PVR_DPF((PVR_DBG_VERBOSE, -+ "No need to reset read position of circular tlstream")); -+ } -+ -+ PVR_DPF_RETURN; -+} -+ -+void -+TLStreamDestroy (PTL_STREAM psStream) -+{ -+ PVR_ASSERT (psStream); -+ -+ OSLockDestroy (psStream->hStreamWLock); -+ OSLockDestroy (psStream->hReadLock); -+ -+ OSEventObjectClose(psStream->hProducerEvent); -+ OSEventObjectDestroy(psStream->hProducerEventObj); -+ -+ TLFreeSharedMem(psStream); -+ OSFreeMem(psStream); -+} -+ -+DEVMEM_MEMDESC* -+TLStreamGetBufferPointer(PTL_STREAM psStream) -+{ -+ PVR_DPF_ENTERED; -+ -+ PVR_ASSERT(psStream); -+ -+ PVR_DPF_RETURN_VAL(psStream->psStreamMemDesc); -+} -diff --git a/drivers/gpu/drm/img-rogue/tlstream.h b/drivers/gpu/drm/img-rogue/tlstream.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/tlstream.h -@@ -0,0 +1,600 @@ -+/*************************************************************************/ /*! -+@File -+@Title Transport Layer kernel side API. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description TL provides driver components with a way to copy data from kernel -+ space to user space (e.g. screen/file). -+ -+ Data can be passed to the Transport Layer through the -+ TL Stream (kernel space) API interface. -+ -+ The buffer provided to every stream is a modified version of a -+ circular buffer. Which CB version is created is specified by -+ relevant flags when creating a stream. Currently two types -+ of buffer are available: -+ - TL_OPMODE_DROP_NEWER: -+ When the buffer is full, incoming data are dropped -+ (instead of overwriting older data) and a marker is set -+ to let the user know that data have been lost. -+ - TL_OPMODE_BLOCK: -+ When the circular buffer is full, reserve/write calls block -+ until enough space is freed. -+ - TL_OPMODE_DROP_OLDEST: -+ When the circular buffer is full, the oldest packets in the -+ buffer are dropped and a flag is set in header of next packet -+ to let the user know that data have been lost. -+ -+ All size/space requests are in bytes. However, the actual -+ implementation uses native word sizes (i.e. 4 byte aligned). -+ -+ The user does not need to provide space for the stream buffer -+ as the TL handles memory allocations and usage. -+ -+ Inserting data to a stream's buffer can be done either: -+ - by using TLReserve/TLCommit: User is provided with a buffer -+ to write data to. -+ - or by using TLWrite: User provides a buffer with -+ data to be committed. The TL -+ copies the data from the -+ buffer into the stream buffer -+ and returns. -+ Users should be aware that there are implementation overheads -+ associated with every stream buffer. If you find that less -+ data are captured than expected then try increasing the -+ stream buffer size or use TLInfo to obtain buffer parameters -+ and calculate optimum required values at run time. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#ifndef TLSTREAM_H -+#define TLSTREAM_H -+ -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "pvrsrv_tlcommon.h" -+#include "device.h" -+ -+/*! Extract TL stream opmode from the given stream create flags. -+ * Last 3 bits of streamFlag is used for storing opmode, hence -+ * opmode mask is set as following. */ -+#define TL_OPMODE_MASK 0x7 -+ -+/* -+ * NOTE: This enum is used to directly access the HTB_OPMODE_xxx values -+ * within htbserver.c. -+ * As such we *MUST* keep the values matching in order of declaration. -+ */ -+/*! Opmode specifying circular buffer behaviour */ -+typedef enum -+{ -+ /*! Undefined operation mode */ -+ TL_OPMODE_UNDEF = 0, -+ -+ /*! Reject new data if the buffer is full, producer may then decide to -+ * drop the data or retry after some time. */ -+ TL_OPMODE_DROP_NEWER, -+ -+ /*! When buffer is full, advance the tail/read position to accept the new -+ * reserve call (size permitting), effectively overwriting the oldest -+ * data in the circular buffer. Not supported yet. */ -+ TL_OPMODE_DROP_OLDEST, -+ -+ /*! Block Reserve (subsequently Write) calls if there is not enough space -+ * until some space is freed via a client read operation. */ -+ TL_OPMODE_BLOCK, -+ -+ /*!< For error checking */ -+ TL_OPMODE_LAST -+ -+} TL_OPMODE; -+ -+typedef enum { -+ /* Enum to be used in conjunction with new Flags feature */ -+ -+ /* Flag set when Drop Oldest is set and packets have been dropped */ -+ TL_FLAG_OVERWRITE_DETECTED = (1 << 0), -+ /* Prevents DoTLStreamReserve() from adding from injecting -+ * PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED */ -+ TL_FLAG_NO_WRITE_FAILED = (1 << 1), -+} TL_Flags; -+ -+static_assert(TL_OPMODE_LAST <= TL_OPMODE_MASK, -+ "TL_OPMODE_LAST must not exceed TL_OPMODE_MASK"); -+ -+/*! Flags specifying stream behaviour */ -+/*! Do not destroy stream if there still are data that have not been -+ * copied in user space. Block until the stream is emptied. */ -+#define TL_FLAG_FORCE_FLUSH (1U<<8) -+/*! Do not signal consumers on commit automatically when the stream buffer -+ * transitions from empty to non-empty. Producer responsible for signal when -+ * it chooses. */ -+#define TL_FLAG_NO_SIGNAL_ON_COMMIT (1U<<9) -+ -+/*! When a stream has this property it never wraps around and -+ * overwrites existing data, hence it is a fixed size persistent -+ * buffer, data written is permanent. Producers need to ensure -+ * the buffer is big enough for their needs. -+ * When a stream is opened for reading the client will always -+ * find the read position at the start of the buffer/data. */ -+#define TL_FLAG_PERMANENT_NO_WRAP (1U<<10) -+ -+/*! Defer allocation of stream's shared memory until first open. */ -+#define TL_FLAG_ALLOCATE_ON_FIRST_OPEN (1U<<11) -+ -+/*! Structure used to pass internal TL stream sizes information to users.*/ -+typedef struct _TL_STREAM_INFO_ -+{ -+ IMG_UINT32 headerSize; /*!< Packet header size in bytes */ -+ IMG_UINT32 minReservationSize; /*!< Minimum data size reserved in bytes */ -+ IMG_UINT32 pageSize; /*!< Page size in bytes */ -+ IMG_UINT32 pageAlign; /*!< Page alignment in bytes */ -+ IMG_UINT32 maxTLpacketSize; /*! Max allowed TL packet size*/ -+} TL_STREAM_INFO, *PTL_STREAM_INFO; -+ -+/*! Callback operations or notifications that a stream producer may handle -+ * when requested by the Transport Layer. -+ */ -+#define TL_SOURCECB_OP_CLIENT_EOS 0x01 /*!< Client has reached end of stream, -+ * can anymore data be supplied? -+ * ui32Resp ignored in this operation */ -+ -+/*! Function pointer type for the callback handler into the "producer" code -+ * that writes data to the TL stream. Producer should handle the notification -+ * or operation supplied in ui32ReqOp on stream hStream. The -+ * Operations and notifications are defined above in TL_SOURCECB_OP */ -+typedef PVRSRV_ERROR (*TL_STREAM_SOURCECB)(IMG_HANDLE hStream, -+ IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser); -+ -+typedef void (*TL_STREAM_ONREADEROPENCB)(void *pvArg); -+ -+/*************************************************************************/ /*! -+ @Function TLAllocSharedMemIfNull -+ @Description Allocates shared memory for the stream. -+ @Input hStream Stream handle. -+ @Return eError Internal services call returned eError error -+ number. -+ @Return PVRSRV_OK -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+TLAllocSharedMemIfNull(IMG_HANDLE hStream); -+ -+/*************************************************************************/ /*! -+ @Function TLFreeSharedMem -+ @Description Frees stream's shared memory. -+ @Input phStream Stream handle. -+*/ /**************************************************************************/ -+void -+TLFreeSharedMem(IMG_HANDLE hStream); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamCreate -+ @Description Request the creation of a new stream and open a handle. -+ If creating a stream which should continue to exist after the -+ current context is finished, then TLStreamCreate must be -+ followed by a TLStreamOpen call. On any case, the number of -+ create/open calls must balance with the number of close calls -+ used. This ensures the resources of a stream are released when -+ it is no longer required. -+ @Output phStream Pointer to handle to store the new stream. -+ @Input szStreamName Name of stream, maximum length: -+ PRVSRVTL_MAX_STREAM_NAME_SIZE. -+ If a longer string is provided,creation fails. -+ @Input ui32Size Desired buffer size in bytes. -+ @Input ui32StreamFlags Used to configure buffer behaviour. See above. -+ @Input pfOnReaderOpenCB Optional callback called when a client -+ opens this stream, may be null. -+ @Input pvOnReaderOpenUD Optional user data for pfOnReaderOpenCB, -+ may be null. -+ @Input pfProducerCB Optional callback, may be null. -+ @Input pvProducerUD Optional user data for callback, may be null. -+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or string name -+ exceeded MAX_STREAM_NAME_SIZE -+ @Return PVRSRV_ERROR_OUT_OF_MEMORY Failed to allocate space for -+ stream handle. -+ @Return PVRSRV_ERROR_DUPLICATE_VALUE There already exists a stream with -+ the same stream name string. -+ @Return eError Internal services call returned -+ eError error number. -+ @Return PVRSRV_OK -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamCreate(IMG_HANDLE *phStream, -+ const IMG_CHAR *szStreamName, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32StreamFlags, -+ TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB, -+ void *pvOnReaderOpenUD, -+ TL_STREAM_SOURCECB pfProducerCB, -+ void *pvProducerUD); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamOpen -+ @Description Attach to existing stream that has already been created by a -+ TLStreamCreate call. A handle is returned to the stream. -+ @Output phStream Pointer to handle to store the stream. -+ @Input szStreamName Name of stream, should match an already -+ existing stream name -+ @Return PVRSRV_ERROR_NOT_FOUND None of the streams matched the -+ requested stream name. -+ PVRSRV_ERROR_INVALID_PARAMS Non-NULL pointer to stream -+ handler is required. -+ @Return PVRSRV_OK Success. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamOpen(IMG_HANDLE *phStream, -+ const IMG_CHAR *szStreamName); -+ -+ -+/*************************************************************************/ /*! -+ @Function TLStreamReset -+ @Description Resets read and write pointers and pending flag. -+ @Output phStream Pointer to stream's handle -+*/ /**************************************************************************/ -+void TLStreamReset(IMG_HANDLE hStream); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamSetNotifStream -+ @Description Registers a "notification stream" which will be used to -+ publish information about state change of the "hStream" -+ stream. Notification can inform about events such as stream -+ open/close, etc. -+ @Input hStream Handle to stream to update. -+ @Input hNotifStream Handle to the stream which will be used for -+ publishing notifications. -+ @Return PVRSRV_ERROR_INVALID_PARAMS If either of the parameters is -+ NULL -+ @Return PVRSRV_OK Success. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamReconfigure -+ @Description Request the stream flags controlling buffer behaviour to -+ be updated. -+ In the case where TL_OPMODE_BLOCK is to be used, -+ TLStreamCreate should be called without that flag and this -+ function used to change the stream mode once a consumer process -+ has been started. This avoids a deadlock scenario where the -+ TLStreaWrite/TLStreamReserve call will hold the Bridge Lock -+ while blocking if the TL buffer is full. -+ The TL_OPMODE_BLOCK should never drop the Bridge Lock -+ as this leads to another deadlock scenario where the caller to -+ TLStreamWrite/TLStreamReserve has already acquired another lock -+ (e.g. gHandleLock) which is not dropped. This then leads to that -+ thread acquiring locks out of order. -+ @Input hStream Handle to stream to update. -+ @Input ui32StreamFlags Flags that configure buffer behaviour. See above. -+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or inconsistent -+ stream flags. -+ @Return PVRSRV_ERROR_NOT_READY Stream is currently being written to -+ try again later. -+ @Return eError Internal services call returned -+ eError error number. -+ @Return PVRSRV_OK -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamReconfigure(IMG_HANDLE hStream, -+ IMG_UINT32 ui32StreamFlags); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamClose -+ @Description Detach from the stream associated with the given handle. If -+ the current handle is the last one accessing the stream -+ (i.e. the number of TLStreamCreate+TLStreamOpen calls matches -+ the number of TLStreamClose calls) then the stream is also -+ deleted. -+ On return the handle is no longer valid. -+ @Input hStream Handle to stream that will be closed. -+ @Return None. -+*/ /**************************************************************************/ -+void -+TLStreamClose(IMG_HANDLE hStream); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamReserve -+ @Description Reserve space in stream buffer. When successful every -+ TLStreamReserve call must be followed by a matching -+ TLStreamCommit call. While a TLStreamCommit call is pending -+ for a stream, subsequent TLStreamReserve calls for this -+ stream will fail. -+ @Input hStream Stream handle. -+ @Output ppui8Data Pointer to a pointer to a location in the -+ buffer. The caller can then use this address -+ in writing data into the stream. -+ @Input ui32Size Number of bytes to reserve in buffer. -+ @Return PVRSRV_INVALID_PARAMS NULL stream handler. -+ @Return PVRSRV_ERROR_NOT_READY There are data previously reserved -+ that are pending to be committed. -+ @Return PVRSRV_ERROR_STREAM_MISUSE Misusing the stream by trying to -+ reserve more space than the -+ buffer size. -+ @Return PVRSRV_ERROR_STREAM_FULL The reserve size requested -+ is larger than the free -+ space. -+ @Return PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED The reserve size -+ requested is larger -+ than max TL packet size -+ @Return PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE Permanent stream buffer -+ does not have enough space -+ for the reserve. -+ @Return PVRSRV_OK Success, output arguments valid. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamReserve(IMG_HANDLE hStream, -+ IMG_UINT8 **ppui8Data, -+ IMG_UINT32 ui32Size); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamReserve2 -+ @Description Reserve space in stream buffer. When successful every -+ TLStreamReserve call must be followed by a matching -+ TLStreamCommit call. While a TLStreamCommit call is pending -+ for a stream, subsequent TLStreamReserve calls for this -+ stream will fail. -+ @Input hStream Stream handle. -+ @Output ppui8Data Pointer to a pointer to a location in the -+ buffer. The caller can then use this address -+ in writing data into the stream. -+ @Input ui32Size Ideal number of bytes to reserve in buffer. -+ @Input ui32SizeMin Minimum number of bytes to reserve in buffer. -+ @Input pui32Available Optional, but when present and the -+ RESERVE_TOO_BIG error is returned, a size -+ suggestion is returned in this argument which -+ the caller can attempt to reserve again for a -+ successful allocation. -+ @Output pbIsReaderConnected Let writing clients know if reader is -+ connected or not, in case of error. -+ @Return PVRSRV_INVALID_PARAMS NULL stream handler. -+ @Return PVRSRV_ERROR_NOT_READY There are data previously reserved -+ that are pending to be committed. -+ @Return PVRSRV_ERROR_STREAM_MISUSE Misusing the stream by trying to -+ reserve more space than the -+ buffer size. -+ @Return PVRSRV_ERROR_STREAM_FULL The reserve size requested -+ is larger than the free -+ space. -+ Check the pui32Available -+ value for the correct -+ reserve size to use. -+ @Return PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED The reserve size -+ requested is larger -+ than max TL packet size -+ @Return PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE Permanent stream buffer -+ does not have enough space -+ for the reserve. -+ @Return PVRSRV_OK Success, output arguments valid. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamReserve2(IMG_HANDLE hStream, -+ IMG_UINT8 **ppui8Data, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 ui32SizeMin, -+ IMG_UINT32* pui32Available, -+ IMG_BOOL* pbIsReaderConnected); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamReserveReturnFlags -+ @Description Reserve space in stream buffer. When successful every -+ TLStreamReserve call must be followed by a matching -+ TLStreamCommit call. While a TLStreamCommit call is pending -+ for a stream, subsequent TLStreamReserve calls for this -+ stream will fail. -+ @Input hStream Stream handle. -+ @Output ppui8Data Pointer to a pointer to a location in the -+ buffer. The caller can then use this address -+ in writing data into the stream. -+ @Input ui32Size Ideal number of bytes to reserve in buffer. -+ @Output pui32Flags Output parameter to return flags generated within -+ the reserve function. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamReserveReturnFlags(IMG_HANDLE hStream, -+ IMG_UINT8 **ppui8Data, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32* pui32Flags); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamGetUT -+ @Description Returns the current stream utilisation in bytes -+ @Input hStream Stream handle. -+ @Return IMG_UINT32 Stream utilisation -+*/ /**************************************************************************/ -+IMG_UINT32 TLStreamGetUT(IMG_HANDLE hStream); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamCommit -+ @Description Notify TL that data have been written in the stream buffer. -+ Should always follow and match TLStreamReserve call. -+ @Input hStream Stream handle. -+ @Input ui32Size Number of bytes that have been added to the -+ stream. -+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle. -+ @Return PVRSRV_ERROR_STREAM_MISUSE Commit results in more data -+ committed than the buffer size, -+ the stream is misused. -+ @Return eError Commit was successful but -+ internal services call returned -+ eError error number. -+ @Return PVRSRV_OK -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamCommit(IMG_HANDLE hStream, -+ IMG_UINT32 ui32Size); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamWrite -+ @Description Combined Reserve/Commit call. This function Reserves space in -+ the specified stream buffer, copies ui32Size bytes of data -+ from the array pui8Src points to and Commits in an "atomic" -+ style operation. -+ @Input hStream Stream handle. -+ @Input pui8Src Source to read data from. -+ @Input ui32Size Number of bytes to copy and commit. -+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler. -+ @Return eError Error codes returned by either -+ Reserve or Commit. -+ @Return PVRSRV_OK -+ */ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamWrite(IMG_HANDLE hStream, -+ IMG_UINT8 *pui8Src, -+ IMG_UINT32 ui32Size); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamWriteRetFlags -+ @Description Combined Reserve/Commit call. This function Reserves space in -+ the specified stream buffer, copies ui32Size bytes of data -+ from the array pui8Src points to and Commits in an "atomic" -+ style operation. Also accepts a pointer to a bit flag value -+ for returning write status flags. -+ @Input hStream Stream handle. -+ @Input pui8Src Source to read data from. -+ @Input ui32Size Number of bytes to copy and commit. -+ @Output pui32Flags Output parameter for write status info -+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler. -+ @Return eError Error codes returned by either -+ Reserve or Commit. -+ @Return PVRSRV_OK -+ */ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamWriteRetFlags(IMG_HANDLE hStream, -+ IMG_UINT8 *pui8Src, -+ IMG_UINT32 ui32Size, -+ IMG_UINT32 *pui32Flags); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamSync -+ @Description Signal the consumer to start acquiring data from the stream -+ buffer. Called by producers that use the flag -+ TL_FLAG_NO_SIGNAL_ON_COMMIT to manually control when -+ consumers starting reading the stream. -+ Used when multiple small writes need to be batched. -+ @Input hStream Stream handle. -+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle. -+ @Return eError Error codes returned by either -+ Reserve or Commit. -+ @Return PVRSRV_OK -+ */ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamSync(IMG_HANDLE hStream); -+ -+ -+/*************************************************************************/ /*! -+ @Function TLStreamMarkEOS -+ @Description Insert a EOS marker packet in the given stream. -+ @Input hStream Stream handle. -+ @Input bRemoveOld if TRUE, remove old stream record file before -+ splitting to new file. -+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler. -+ @Return eError Error codes returned by either -+ Reserve or Commit. -+ @Return PVRSRV_OK Success. -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamMarkEOS(IMG_HANDLE hStream, IMG_BOOL bRemoveOld); -+ -+/*************************************************************************/ /*! -+@Function TLStreamMarkStreamOpen -+@Description Puts *open* stream packet into hStream's notification stream, -+ if set, error otherwise." -+@Input hStream Stream handle. -+@Return PVRSRV_OK on success and error code on failure -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamMarkStreamOpen(IMG_HANDLE hStream); -+ -+/*************************************************************************/ /*! -+@Function TLStreamMarkStreamClose -+@Description Puts *close* stream packet into hStream's notification stream, -+ if set, error otherwise." -+@Input hStream Stream handle. -+@Return PVRSRV_OK on success and error code on failure -+*/ /**************************************************************************/ -+PVRSRV_ERROR -+TLStreamMarkStreamClose(IMG_HANDLE hStream); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamInfo -+ @Description Run time information about buffer elemental sizes. -+ It sets psInfo members accordingly. Users can use those values -+ to calculate the parameters they use in TLStreamCreate and -+ TLStreamReserve. -+ @Output psInfo pointer to stream info structure. -+ @Return None. -+*/ /**************************************************************************/ -+void -+TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamIsOpenForReading -+ @Description Query if a stream has any readers connected. -+ @Input hStream Stream handle. -+ @Return IMG_BOOL True if at least one reader is connected, -+ false otherwise -+*/ /**************************************************************************/ -+IMG_BOOL -+TLStreamIsOpenForReading(IMG_HANDLE hStream); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamOutOfData -+ @Description Query if the stream is empty (no data waiting to be read). -+ @Input hStream Stream handle. -+ @Return IMG_BOOL True if read==write, no data waiting, -+ false otherwise -+*/ /**************************************************************************/ -+IMG_BOOL TLStreamOutOfData(IMG_HANDLE hStream); -+ -+/*************************************************************************/ /*! -+ @Function TLStreamResetProducerByteCount -+ @Description Reset the producer byte counter on the specified stream. -+ @Input hStream Stream handle. -+ @Input IMG_UINT32 Value to reset counter to, often 0. -+ @Return PVRSRV_OK Success. -+ @Return PVRSRV_ERROR_STREAM_MISUSE Success but the read and write -+ positions did not match, -+ stream not empty. -+*/ /**************************************************************************/ -+ -+PVRSRV_ERROR -+TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value); -+ -+#endif /* TLSTREAM_H */ -+/***************************************************************************** -+ End of file (tlstream.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/trace_events.c b/drivers/gpu/drm/img-rogue/trace_events.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/trace_events.c -@@ -0,0 +1,269 @@ -+/*************************************************************************/ /*! -+@Title Linux trace event helper functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include -+#include -+ -+#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) -+#if !defined(CONFIG_TRACE_GPU_MEM) -+#define CREATE_TRACE_POINTS -+#include -+#undef CREATE_TRACE_POINTS -+#else /* !defined(CONFIG_TRACE_GPU_MEM) */ -+#include -+#endif /* !defined(CONFIG_TRACE_GPU_MEM) */ -+#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */ -+ -+#include "img_types.h" -+#include "trace_events.h" -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+#include "rogue_trace_events.h" -+#endif -+#include "sync_checkpoint_external.h" -+ -+#if defined(PVRSRV_TRACE_ROGUE_EVENTS) -+ -+static bool fence_update_event_enabled, fence_check_event_enabled; -+ -+bool trace_rogue_are_fence_updates_traced(void) -+{ -+ return fence_update_event_enabled; -+} -+ -+bool trace_rogue_are_fence_checks_traced(void) -+{ -+ return fence_check_event_enabled; -+} -+ -+/* -+ * Call backs referenced from rogue_trace_events.h. Note that these are not -+ * thread-safe, however, since running trace code when tracing is not enabled is -+ * simply a no-op, there is no harm in it. -+ */ -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) -+int trace_fence_update_enabled_callback(void) -+#else -+void trace_fence_update_enabled_callback(void) -+#endif -+{ -+ fence_update_event_enabled = true; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) -+ return 0; -+#endif -+} -+ -+void trace_fence_update_disabled_callback(void) -+{ -+ fence_update_event_enabled = false; -+} -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) -+int trace_fence_check_enabled_callback(void) -+#else -+void trace_fence_check_enabled_callback(void) -+#endif -+{ -+ fence_check_event_enabled = true; -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) -+ return 0; -+#endif -+} -+ -+void trace_fence_check_disabled_callback(void) -+{ -+ fence_check_event_enabled = false; -+} -+ -+#if defined(SUPPORT_RGX) -+/* This is a helper that calls trace_rogue_fence_update for each fence in an -+ * array. -+ */ -+void trace_rogue_fence_updates(const char *cmd, const char *dm, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWContext, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT uCount, -+ PRGXFWIF_UFO_ADDR *pauiAddresses, -+ IMG_UINT32 *paui32Values) -+{ -+ IMG_UINT i; -+ for (i = 0; i < uCount; i++) -+ { -+ trace_rogue_fence_update(current->comm, cmd, dm, ui32GpuId, ui32FWContext, ui32Offset, -+ pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED); -+ } -+} -+ -+void trace_rogue_fence_checks(const char *cmd, const char *dm, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWContext, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT uCount, -+ PRGXFWIF_UFO_ADDR *pauiAddresses, -+ IMG_UINT32 *paui32Values) -+{ -+ IMG_UINT i; -+ for (i = 0; i < uCount; i++) -+ { -+ trace_rogue_fence_check(current->comm, cmd, dm, ui32GpuId, ui32FWContext, ui32Offset, -+ pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED); -+ } -+} -+ -+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWCtx, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_UINT32 ui32UFOCount, -+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) -+{ -+ IMG_UINT i; -+ for (i = 0; i < ui32UFOCount; i++) -+ { -+ trace_rogue_ufo_update(ui64OSTimestamp, ui32GpuId, ui32FWCtx, -+ ui32IntJobRef, -+ ui32ExtJobRef, -+ puData->sUpdate.ui32FWAddr, -+ puData->sUpdate.ui32OldValue, -+ puData->sUpdate.ui32NewValue); -+ puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sUpdate)); -+ } -+} -+ -+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWCtx, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_BOOL bPrEvent, -+ IMG_UINT32 ui32UFOCount, -+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) -+{ -+ IMG_UINT i; -+ for (i = 0; i < ui32UFOCount; i++) -+ { -+ if (bPrEvent) -+ { -+ trace_rogue_ufo_pr_check_success(ui64OSTimestamp, ui32GpuId, ui32FWCtx, -+ ui32IntJobRef, ui32ExtJobRef, -+ puData->sCheckSuccess.ui32FWAddr, -+ puData->sCheckSuccess.ui32Value); -+ } -+ else -+ { -+ trace_rogue_ufo_check_success(ui64OSTimestamp, ui32GpuId, ui32FWCtx, -+ ui32IntJobRef, ui32ExtJobRef, -+ puData->sCheckSuccess.ui32FWAddr, -+ puData->sCheckSuccess.ui32Value); -+ } -+ puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckSuccess)); -+ } -+} -+ -+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWCtx, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_BOOL bPrEvent, -+ IMG_UINT32 ui32UFOCount, -+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) -+{ -+ IMG_UINT i; -+ for (i = 0; i < ui32UFOCount; i++) -+ { -+ if (bPrEvent) -+ { -+ trace_rogue_ufo_pr_check_fail(ui64OSTimestamp, ui32GpuId, ui32FWCtx, -+ ui32IntJobRef, ui32ExtJobRef, -+ puData->sCheckFail.ui32FWAddr, -+ puData->sCheckFail.ui32Value, -+ puData->sCheckFail.ui32Required); -+ } -+ else -+ { -+ trace_rogue_ufo_check_fail(ui64OSTimestamp, ui32GpuId, ui32FWCtx, -+ ui32IntJobRef, ui32ExtJobRef, -+ puData->sCheckFail.ui32FWAddr, -+ puData->sCheckFail.ui32Value, -+ puData->sCheckFail.ui32Required); -+ } -+ puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckFail)); -+ } -+} -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) -+ -+int PVRGpuTraceEnableUfoCallbackWrapper(void) -+{ -+ PVRGpuTraceEnableUfoCallback(); -+ return 0; -+} -+ -+int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void) -+{ -+ PVRGpuTraceEnableFirmwareActivityCallback(); -+ return 0; -+} -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) */ -+#endif /* defined(SUPPORT_RGX) */ -+#endif /* defined(PVRSRV_TRACE_ROGUE_EVENTS) */ -+ -+void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, -+ IMG_UINT64 ui64Size) -+{ -+#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) -+ trace_gpu_mem_total(ui8GPUId, 0, ui64Size); -+#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */ -+} -+ -+void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, -+ IMG_UINT32 ui32Pid, -+ IMG_UINT64 ui64Size) -+{ -+#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) -+ trace_gpu_mem_total(ui8GPUId, ui32Pid, ui64Size); -+#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */ -+} -diff --git a/drivers/gpu/drm/img-rogue/trace_events.h b/drivers/gpu/drm/img-rogue/trace_events.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/trace_events.h -@@ -0,0 +1,196 @@ -+/*************************************************************************/ /*! -+@Title Linux trace events and event helper functions -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#if !defined(TRACE_EVENTS_H) -+#define TRACE_EVENTS_H -+ -+#include "rgx_fwif_km.h" -+#include "rgx_hwperf.h" -+ -+/* We need to make these functions do nothing if CONFIG_EVENT_TRACING isn't -+ * enabled, just like the actual trace event functions that the kernel -+ * defines for us. -+ */ -+ -+#if defined(CONFIG_EVENT_TRACING) && defined(PVRSRV_TRACE_ROGUE_EVENTS) -+bool trace_rogue_are_fence_checks_traced(void); -+ -+bool trace_rogue_are_fence_updates_traced(void); -+ -+#if defined(SUPPORT_RGX) -+void trace_rogue_fence_updates(const char *cmd, const char *dm, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWContext, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT uCount, -+ PRGXFWIF_UFO_ADDR *pauiAddresses, -+ IMG_UINT32 *paui32Values); -+ -+void trace_rogue_fence_checks(const char *cmd, const char *dm, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWContext, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT uCount, -+ PRGXFWIF_UFO_ADDR *pauiAddresses, -+ IMG_UINT32 *paui32Values); -+ -+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWCtx, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_UINT32 ui32UFOCount, -+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData); -+ -+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWCtx, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_BOOL bPrEvent, -+ IMG_UINT32 ui32UFOCount, -+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData); -+ -+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWCtx, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_BOOL bPrEvent, -+ IMG_UINT32 ui32UFOCount, -+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData); -+#endif /* if defined(SUPPORT_RGX) */ -+#else /* defined(CONFIG_EVENT_TRACING) && defined(PVRSRV_TRACE_ROGUE_EVENTS) */ -+static inline -+bool trace_rogue_are_fence_checks_traced(void) -+{ -+ return false; -+} -+ -+static inline -+bool trace_rogue_are_fence_updates_traced(void) -+{ -+ return false; -+} -+ -+#if defined(SUPPORT_RGX) -+static inline -+void trace_rogue_fence_updates(const char *cmd, const char *dm, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWContext, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT uCount, -+ PRGXFWIF_UFO_ADDR *pauiAddresses, -+ IMG_UINT32 *paui32Values) -+{ -+} -+ -+static inline -+void trace_rogue_fence_checks(const char *cmd, const char *dm, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWContext, -+ IMG_UINT32 ui32Offset, -+ IMG_UINT uCount, -+ PRGXFWIF_UFO_ADDR *pauiAddresses, -+ IMG_UINT32 *paui32Values) -+{ -+} -+ -+static inline -+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWCtx, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_UINT32 ui32UFOCount, -+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) -+{ -+} -+ -+static inline -+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWCtx, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_BOOL bPrEvent, -+ IMG_UINT32 ui32UFOCount, -+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) -+{ -+} -+ -+static inline -+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, -+ IMG_UINT32 ui32GpuId, -+ IMG_UINT32 ui32FWCtx, -+ IMG_UINT32 ui32ExtJobRef, -+ IMG_UINT32 ui32IntJobRef, -+ IMG_BOOL bPrEvent, -+ IMG_UINT32 ui32UFOCount, -+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) -+{ -+} -+#endif /* if defined(SUPPORT_RGX)*/ -+#endif /* defined(CONFIG_EVENT_TRACING) && defined(PVRSRV_TRACE_ROGUE_EVENTS) */ -+ -+#if defined(CONFIG_EVENT_TRACING) -+void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, -+ IMG_UINT64 ui64Size); -+ -+void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, -+ IMG_UINT32 ui32Pid, -+ IMG_UINT64 ui64Size); -+#else -+static inline -+void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, -+ IMG_UINT64 ui64Size) -+{ -+} -+ -+static inline -+void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, -+ IMG_UINT32 ui32Pid, -+ IMG_UINT64 ui64Size) -+{ -+} -+#endif /* defined(CONFIG_EVENT_TRACING) */ -+ -+#endif /* TRACE_EVENTS_H */ -diff --git a/drivers/gpu/drm/img-rogue/uniq_key_splay_tree.c b/drivers/gpu/drm/img-rogue/uniq_key_splay_tree.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/uniq_key_splay_tree.c -@@ -0,0 +1,280 @@ -+/*************************************************************************/ /*! -+@File -+@Title Provides splay-trees. -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Implementation of splay-trees. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+ */ /**************************************************************************/ -+ -+#include "allocmem.h" /* for OSMemAlloc / OSMemFree */ -+#include "osfunc.h" /* for OSMemFree */ -+#include "pvr_debug.h" -+#include "uniq_key_splay_tree.h" -+ -+/** -+ * PVRSRVSplay - perform a simple top down splay -+ * @ui32Flags: flags that must splayed to the root (if possible) -+ * @psTree: psTree The tree to splay. -+ * -+ * Return the resulting tree after the splay operation. -+ */ -+IMG_INTERNAL -+IMG_PSPLAY_TREE PVRSRVSplay (IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) -+{ -+ IMG_SPLAY_TREE sTmp1; -+ IMG_PSPLAY_TREE psLeft; -+ IMG_PSPLAY_TREE psRight; -+ IMG_PSPLAY_TREE psTmp2; -+ -+ if (psTree == NULL) -+ { -+ return NULL; -+ } -+ -+ sTmp1.psLeft = NULL; -+ sTmp1.psRight = NULL; -+ -+ psLeft = &sTmp1; -+ psRight = &sTmp1; -+ -+ for (;;) -+ { -+ if (uiFlags < psTree->uiFlags) -+ { -+ if (psTree->psLeft == NULL) -+ { -+ break; -+ } -+ -+ if (uiFlags < psTree->psLeft->uiFlags) -+ { -+ /* if we get to this point, we need to rotate right the tree */ -+ psTmp2 = psTree->psLeft; -+ psTree->psLeft = psTmp2->psRight; -+ psTmp2->psRight = psTree; -+ psTree = psTmp2; -+ if (psTree->psLeft == NULL) -+ { -+ break; -+ } -+ } -+ -+ /* if we get to this point, we need to link right */ -+ psRight->psLeft = psTree; -+ psRight = psTree; -+ psTree = psTree->psLeft; -+ } -+ else -+ { -+ if (uiFlags > psTree->uiFlags) -+ { -+ if (psTree->psRight == NULL) -+ { -+ break; -+ } -+ -+ if (uiFlags > psTree->psRight->uiFlags) -+ { -+ /* if we get to this point, we need to rotate left the tree */ -+ psTmp2 = psTree->psRight; -+ psTree->psRight = psTmp2->psLeft; -+ psTmp2->psLeft = psTree; -+ psTree = psTmp2; -+ if (psTree->psRight == NULL) -+ { -+ break; -+ } -+ } -+ -+ /* if we get to this point, we need to link left */ -+ psLeft->psRight = psTree; -+ psLeft = psTree; -+ psTree = psTree->psRight; -+ } -+ else -+ { -+ break; -+ } -+ } -+ } -+ -+ /* at this point re-assemble the tree */ -+ psLeft->psRight = psTree->psLeft; -+ psRight->psLeft = psTree->psRight; -+ psTree->psLeft = sTmp1.psRight; -+ psTree->psRight = sTmp1.psLeft; -+ return psTree; -+} -+ -+ -+/** -+ * PVRSRVInsert - insert a node into the Tree (unless it is already present, in -+ * which case it is equivalent to performing only a splay operation -+ * @ui32Flags: the key of the new node -+ * @psTree: tree into which one wants to add a new node -+ * -+ * Return the resulting tree after the splay operation. -+ */ -+IMG_INTERNAL -+IMG_PSPLAY_TREE PVRSRVInsert(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) -+{ -+ IMG_PSPLAY_TREE psNew; -+ -+ if (psTree != NULL) -+ { -+ psTree = PVRSRVSplay(uiFlags, psTree); -+ if (psTree->uiFlags == uiFlags) -+ { -+ return psTree; -+ } -+ } -+ -+ psNew = (IMG_PSPLAY_TREE) OSAllocMem(sizeof(IMG_SPLAY_TREE)); -+ if (psNew == NULL) -+ { -+ PVR_DPF ((PVR_DBG_ERROR, "Error: failed to allocate memory to add a node to the splay tree.")); -+ return NULL; -+ } -+ -+ psNew->uiFlags = uiFlags; -+ OSCachedMemSet(&(psNew->buckets[0]), 0, sizeof(psNew->buckets)); -+ -+#if defined(PVR_CTZLL) -+ psNew->bHasEltsMapping = ~(((IMG_ELTS_MAPPINGS) 1 << (sizeof(psNew->buckets) / (sizeof(psNew->buckets[0])))) - 1); -+#endif -+ -+ if (psTree == NULL) -+ { -+ psNew->psLeft = NULL; -+ psNew->psRight = NULL; -+ return psNew; -+ } -+ -+ if (uiFlags < psTree->uiFlags) -+ { -+ psNew->psLeft = psTree->psLeft; -+ psNew->psRight = psTree; -+ psTree->psLeft = NULL; -+ } -+ else -+ { -+ psNew->psRight = psTree->psRight; -+ psNew->psLeft = psTree; -+ psTree->psRight = NULL; -+ } -+ -+ return psNew; -+} -+ -+ -+/** -+ * PVRSRVDelete - delete a node from the tree (unless it is not there, in which -+ * case it is equivalent to a splay operation) -+ * @ui32Flags: value of the node to remove -+ * @psTree: tree into which the node must be removed -+ * -+ * Return the resulting tree. -+ */ -+IMG_INTERNAL -+IMG_PSPLAY_TREE PVRSRVDelete(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) -+{ -+ IMG_PSPLAY_TREE psTmp; -+ if (psTree == NULL) -+ { -+ return NULL; -+ } -+ -+ psTree = PVRSRVSplay(uiFlags, psTree); -+ if (uiFlags == psTree->uiFlags) -+ { -+ /* The value was present in the tree */ -+ if (psTree->psLeft == NULL) -+ { -+ psTmp = psTree->psRight; -+ } -+ else -+ { -+ psTmp = PVRSRVSplay(uiFlags, psTree->psLeft); -+ psTmp->psRight = psTree->psRight; -+ } -+ OSFreeMem(psTree); -+ return psTmp; -+ } -+ -+ /* The value was not present in the tree, so just return it as is -+ * (after the splay) */ -+ return psTree; -+} -+ -+/** -+ * PVRSRVFindNode - pick up the appropriate node for the given flags -+ * @ui32Flags: flags that must associated with the node -+ * @psTree: current splay tree node -+ * -+ * Return the resulting tree node after the search operation. -+ */ -+IMG_INTERNAL -+IMG_PSPLAY_TREE PVRSRVFindNode(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) -+{ -+ if (psTree == NULL) -+ { -+ return NULL; -+ } -+ -+ while (psTree) -+ { -+ if (uiFlags == psTree->uiFlags) -+ { -+ return psTree; -+ } -+ -+ if (uiFlags < psTree->uiFlags) -+ { -+ psTree = psTree->psLeft; -+ continue; -+ } -+ -+ if (uiFlags > psTree->uiFlags) -+ { -+ psTree = psTree->psRight; -+ continue; -+ } -+ } -+ -+ return NULL; -+} -diff --git a/drivers/gpu/drm/img-rogue/uniq_key_splay_tree.h b/drivers/gpu/drm/img-rogue/uniq_key_splay_tree.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/uniq_key_splay_tree.h -@@ -0,0 +1,90 @@ -+/*************************************************************************/ /*! -+@File -+@Title Splay trees interface -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Provides debug functionality -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef UNIQ_KEY_SPLAY_TREE_H_ -+#define UNIQ_KEY_SPLAY_TREE_H_ -+ -+#include "img_types.h" -+#include "pvr_intrinsics.h" -+ -+#if defined(PVR_CTZLL) -+ /* map the is_bucket_n_free to an int. -+ * This way, the driver can find the first non empty without loop -+ */ -+ typedef IMG_UINT64 IMG_ELTS_MAPPINGS; -+#endif -+ -+typedef IMG_UINT64 IMG_PSPLAY_FLAGS_T; -+ -+/* head of list of free boundary tags for indexed by pvr_log2 of the -+ boundary tag size */ -+ -+#define FREE_TABLE_LIMIT 40 -+ -+struct _BT_; -+ -+typedef struct img_splay_tree -+{ -+ /* left child/subtree */ -+ struct img_splay_tree * psLeft; -+ -+ /* right child/subtree */ -+ struct img_splay_tree * psRight; -+ -+ /* Flags to match on this span, used as the key. */ -+ IMG_PSPLAY_FLAGS_T uiFlags; -+#if defined(PVR_CTZLL) -+ /* each bit of this int is a boolean telling if the corresponding -+ bucket is empty or not */ -+ IMG_ELTS_MAPPINGS bHasEltsMapping; -+#endif -+ struct _BT_ * buckets[FREE_TABLE_LIMIT]; -+} IMG_SPLAY_TREE, *IMG_PSPLAY_TREE; -+ -+IMG_PSPLAY_TREE PVRSRVSplay (IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree); -+IMG_PSPLAY_TREE PVRSRVInsert(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree); -+IMG_PSPLAY_TREE PVRSRVDelete(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree); -+IMG_PSPLAY_TREE PVRSRVFindNode(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree); -+ -+ -+#endif /* !UNIQ_KEY_SPLAY_TREE_H_ */ -diff --git a/drivers/gpu/drm/img-rogue/vmm_impl.h b/drivers/gpu/drm/img-rogue/vmm_impl.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/vmm_impl.h -@@ -0,0 +1,203 @@ -+/*************************************************************************/ /*! -+@File vmm_impl.h -+@Title Common VM manager API -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This header provides common VM manager definitions that need to -+ be shared by system virtualization layer itself and modules that -+ implement the actual VM manager types. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef VMM_IMPL_H -+#define VMM_IMPL_H -+ -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+typedef enum _VMM_CONF_PARAM_ -+{ -+ VMM_CONF_PRIO_DRV0 = 0, -+ VMM_CONF_PRIO_DRV1 = 1, -+ VMM_CONF_PRIO_DRV2 = 2, -+ VMM_CONF_PRIO_DRV3 = 3, -+ VMM_CONF_PRIO_DRV4 = 4, -+ VMM_CONF_PRIO_DRV5 = 5, -+ VMM_CONF_PRIO_DRV6 = 6, -+ VMM_CONF_PRIO_DRV7 = 7, -+ VMM_CONF_HCS_DEADLINE = 8, -+ VMM_CONF_ISOLATION_GROUP_DRV0 = 9, -+ VMM_CONF_ISOLATION_GROUP_DRV1 = 10, -+ VMM_CONF_ISOLATION_GROUP_DRV2 = 11, -+ VMM_CONF_ISOLATION_GROUP_DRV3 = 12, -+ VMM_CONF_ISOLATION_GROUP_DRV4 = 13, -+ VMM_CONF_ISOLATION_GROUP_DRV5 = 14, -+ VMM_CONF_ISOLATION_GROUP_DRV6 = 15, -+ VMM_CONF_ISOLATION_GROUP_DRV7 = 16, -+ VMM_CONF_TIME_SLICE_DRV0 = 17, -+ VMM_CONF_TIME_SLICE_DRV1 = 18, -+ VMM_CONF_TIME_SLICE_DRV2 = 19, -+ VMM_CONF_TIME_SLICE_DRV3 = 20, -+ VMM_CONF_TIME_SLICE_DRV4 = 21, -+ VMM_CONF_TIME_SLICE_DRV5 = 22, -+ VMM_CONF_TIME_SLICE_DRV6 = 23, -+ VMM_CONF_TIME_SLICE_DRV7 = 24, -+ VMM_CONF_TIME_SLICE_INTERVAL = 25, -+ VMM_CONF_VZ_CONNECTION_COOLDOWN_PERIOD = 26, -+} VMM_CONF_PARAM; -+ -+/* -+ Virtual machine manager (hypervisor) para-virtualization (PVZ) connection: -+ - Type is implemented by host and guest drivers -+ - Assumes synchronous function call semantics -+ - Unidirectional semantics -+ - For Host (vmm -> host) -+ - For Guest (guest -> vmm) -+ - Parameters can be IN/OUT/INOUT -+ -+ - Host pvz entries are pre-implemented by IMG -+ - For host implementation, see vmm_pvz_server.c -+ - Called by host side hypercall handler or VMM -+ -+ - Guest pvz entries are supplied by 3rd-party -+ - These are specific to hypervisor (VMM) type -+ - These implement the actual hypercalls mechanism -+ -+ Para-virtualization (PVZ) call runtime sequence: -+ 1 - Guest driver in guest VM calls PVZ function -+ 1.1 - Guest PVZ connection calls -+ 1.2 - Guest VM Manager type which -+ 1.2.1 - Performs any pre-processing like parameter packing, etc. -+ 1.2.2 - Issues hypercall (blocking synchronous call) -+ -+ 2 - VM Manager (hypervisor) receives hypercall -+ 2.1 - Hypercall handler: -+ 2.1.1 - Performs any pre-processing -+ 2.1.2 - If call terminates in VM Manager: perform action and return from hypercall -+ 2.1.3 - Otherwise forward to host driver (implementation specific call) -+ -+ 3 - Host driver receives call from VM Manager -+ 3.1 - Host VM manager type: -+ 3.1.1 - Performs any pre-processing like parameter unpacking, etc. -+ 3.1.2 - Acquires host driver PVZ handler and calls the appropriate entry -+ 3.2 - Host PVZ connection calls corresponding host system virtualisation layer -+ 3.3 - Host driver system virtualisation layer: -+ 3.3.1 - Perform action requested by guest driver -+ 3.3.2 - Return to host VM Manager type -+ 3.4 - Host VM Manager type: -+ 3.4.1 - Prepare to return from hypercall -+ 3.4.2 - Perform any post-processing like result packing, etc. -+ 3.4.3 - Issue return from hypercall -+ -+ 4 - VM Manager (hypervisor) -+ 4.1 - Perform any post-processing -+ 4.2 - Return control to guest driver -+ -+ 5 - Guest driver in guest VM -+ 5.1 - Perform any post-processing like parameter unpacking, etc. -+ 5.2 - Continue execution in guest VM -+ */ -+typedef struct _VMM_PVZ_CONNECTION_ -+{ -+ struct { -+ /* -+ This pair must be implemented if the guest is responsible -+ for allocating the physical heap that backs its firmware -+ allocations, this is the default configuration. The physical -+ heap is allocated within the guest VM IPA space and this -+ IPA Addr/Size must be translated into the host's IPA space -+ by the VM manager before forwarding request to host. -+ If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED. -+ */ -+ PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT64 ui64Size, -+ IMG_UINT64 ui64PAddr); -+ -+ PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(void); -+ } sClientFuncTab; -+ -+ struct { -+ /* -+ Corresponding server side entries to handle guest PVZ calls -+ NOTE: -+ - Additional PVZ function ui32DriverID parameter -+ - Driver ID determination is responsibility of VM manager -+ - Actual Driver ID value must be supplied by VM manager -+ - This can be done either in client/VMM/host side -+ - Must be done before host pvz function(s) are called -+ - Host pvz function validates incoming Driver ID values -+ */ -+ PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DevID, -+ IMG_UINT64 ui64Size, -+ IMG_UINT64 ui64PAddr); -+ -+ PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DevID); -+ } sServerFuncTab; -+ -+ struct { -+ /* -+ This is used by the VM manager to report pertinent runtime guest VM -+ information to the host; these events may in turn be forwarded to -+ the firmware -+ */ -+ PVRSRV_ERROR (*pfnOnVmOnline)(IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DevID); -+ -+ PVRSRV_ERROR (*pfnOnVmOffline)(IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DevID); -+ -+ PVRSRV_ERROR (*pfnVMMConfigure)(VMM_CONF_PARAM eVMMParamType, -+ IMG_UINT32 ui32ParamValue, -+ IMG_UINT32 ui32DevID); -+ -+ } sVmmFuncTab; -+} VMM_PVZ_CONNECTION; -+ -+/*! -+******************************************************************************* -+ @Function VMMCreatePvzConnection() and VMMDestroyPvzConnection() -+ @Description Both the guest and VM manager call this in order to obtain a -+ PVZ connection to the VM and host respectively; that is, guest -+ calls it to obtain connection to VM, VM calls it to obtain a -+ connection to the host. -+ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection); -+void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection); -+ -+#endif /* VMM_IMPL_H */ -diff --git a/drivers/gpu/drm/img-rogue/vmm_pvz_client.c b/drivers/gpu/drm/img-rogue/vmm_pvz_client.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/vmm_pvz_client.c -@@ -0,0 +1,131 @@ -+/*************************************************************************/ /*! -+@File vmm_pvz_client.c -+@Title VM manager client para-virtualization -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header provides VMM client para-virtualization APIs -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "pvrsrv.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+ -+#include "vmm_impl.h" -+#include "vz_vmm_pvz.h" -+#include "vmm_pvz_client.h" -+ -+ -+static inline void -+PvzClientLockAcquire(void) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ OSLockAcquire(psPVRSRVData->hPvzConnectionLock); -+} -+ -+static inline void -+PvzClientLockRelease(void) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ OSLockRelease(psPVRSRVData->hPvzConnectionLock); -+} -+ -+/* -+ * =========================================================== -+ * The following client para-virtualization (pvz) functions -+ * are exclusively called by guests to initiate a pvz call -+ * to the host via hypervisor (guest -> vm manager -> host) -+ * =========================================================== -+ */ -+ -+PVRSRV_ERROR -+PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ PVRSRV_ERROR eError; -+ IMG_DEV_PHYADDR sDevPAddr; -+ VMM_PVZ_CONNECTION *psVmmPvz; -+ PHYS_HEAP *psFwPhysHeap = psDevConfig->psDevNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM]; -+ -+ eError = PhysHeapGetDevPAddr(psFwPhysHeap, &sDevPAddr); -+ -+#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) -+{ -+ IMG_DEV_PHYADDR sDevPAddrTranslated; -+ -+ /* If required, perform a software translation between CPU and Device physical addresses. */ -+ PhysHeapCpuPAddrToDevPAddr(psFwPhysHeap, 1, &sDevPAddrTranslated, (IMG_CPU_PHYADDR *)&sDevPAddr); -+ sDevPAddr.uiAddr = sDevPAddrTranslated.uiAddr; -+} -+#endif -+ -+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapGetDevPAddr"); -+ PVR_LOG_RETURN_IF_FALSE((sDevPAddr.uiAddr != 0), "PhysHeapGetDevPAddr", PVRSRV_ERROR_INVALID_PARAMS); -+ -+ psVmmPvz = PvzConnectionAcquire(); -+ PvzClientLockAcquire(); -+ -+ eError = psVmmPvz->sClientFuncTab.pfnMapDevPhysHeap(RGX_FIRMWARE_RAW_HEAP_SIZE, sDevPAddr.uiAddr); -+ -+ PvzClientLockRelease(); -+ PvzConnectionRelease(psVmmPvz); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig) -+{ -+ PVRSRV_ERROR eError; -+ VMM_PVZ_CONNECTION *psVmmPvz = PvzConnectionAcquire(); -+ PVR_ASSERT(psVmmPvz); -+ -+ PvzClientLockAcquire(); -+ -+ PVR_ASSERT(psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap); -+ -+ eError = psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap(); -+ -+ PvzClientLockRelease(); -+ PvzConnectionRelease(psVmmPvz); -+ -+ return eError; -+} -+ -+/****************************************************************************** -+ End of file (vmm_pvz_client.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/vmm_pvz_client.h b/drivers/gpu/drm/img-rogue/vmm_pvz_client.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/vmm_pvz_client.h -@@ -0,0 +1,76 @@ -+/*************************************************************************/ /*! -+@File vmm_pvz_client.h -+@Title Guest VM manager client para-virtualization routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header provides guest VMM client para-virtualization APIs -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef VMM_PVZ_CLIENT_H -+#define VMM_PVZ_CLIENT_H -+ -+#include "pvrsrv.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "vmm_impl.h" -+ -+/*! -+******************************************************************************* -+ @Function PvzClientMapDevPhysHeap -+ @Description The guest front-end to initiate a pfnMapDevPhysHeap PVZ call -+ to the host. -+ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR -+PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig); -+ -+/*! -+******************************************************************************* -+ @Function PvzClientUnmapDevPhysHeap -+ @Description The guest front-end to initiate a pfnUnmapDevPhysHeap PVZ call -+ to the host. -+ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR -+PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig); -+ -+#endif /* VMM_PVZ_CLIENT_H */ -+ -+/****************************************************************************** -+ End of file (vmm_pvz_client.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/vmm_pvz_server.c b/drivers/gpu/drm/img-rogue/vmm_pvz_server.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/vmm_pvz_server.c -@@ -0,0 +1,241 @@ -+/*************************************************************************/ /*! -+@File vmm_pvz_server.c -+@Title VM manager server para-virtualization handlers -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header provides VMM server para-virtz handler APIs -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "pvrsrv.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "rgxfwutils.h" -+ -+#include "vz_vm.h" -+#include "vmm_impl.h" -+#include "vz_vmm_pvz.h" -+#include "vmm_pvz_server.h" -+ -+static inline void -+PvzServerLockAcquire(void) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ OSLockAcquire(psPVRSRVData->hPvzConnectionLock); -+} -+ -+static inline void -+PvzServerLockRelease(void) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ OSLockRelease(psPVRSRVData->hPvzConnectionLock); -+} -+ -+#define VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID) do { \ -+ if ((ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED) || \ -+ (ui32DriverID < RGXFW_GUEST_DRIVER_ID_START)) \ -+ { \ -+ PVR_DPF((PVR_DBG_ERROR, \ -+ "%s: Invalid OSID %u. Supported Guest OSID range: %u - %u", \ -+ __func__, \ -+ ui32DriverID, \ -+ RGXFW_GUEST_DRIVER_ID_START, \ -+ RGX_NUM_DRIVERS_SUPPORTED-1)); \ -+ return PVRSRV_ERROR_INVALID_PARAMS; \ -+ } \ -+ if (PVRSRVGetDeviceInstance(ui32DevID) == NULL) \ -+ { \ -+ PVR_DPF((PVR_DBG_ERROR, \ -+ "%s: Invalid Device ID %u.", \ -+ __func__, \ -+ ui32DevID)); \ -+ return PVRSRV_ERROR_INVALID_PARAMS; \ -+ } \ -+} while (false); -+ -+/* -+ * =========================================================== -+ * The following server para-virtualization (pvz) functions -+ * are exclusively called by the VM manager (hypervisor) on -+ * behalf of guests to complete guest pvz calls -+ * (guest -> vm manager -> host) -+ * =========================================================== -+ */ -+ -+PVRSRV_ERROR -+PvzServerMapDevPhysHeap(IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DevID, -+ IMG_UINT64 ui64Size, -+ IMG_UINT64 ui64PAddr) -+{ -+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) -+ /* -+ * Reject hypercall if called on a system configured at build time to -+ * preallocate the Guest's firmware heaps from static carveout memory. -+ */ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Host PVZ config: Does not match with Guest PVZ config." -+ " Host preallocates the Guest's FW physheap from static memory carveouts at startup.", __func__)); -+ return PVRSRV_ERROR_INVALID_PVZ_CONFIG; -+#else -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID); -+ -+ PvzServerLockAcquire(); -+ -+#if defined(SUPPORT_RGX) -+ if (IsVmOnline(ui32DriverID, ui32DevID)) -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstance(ui32DevID); -+ IMG_DEV_PHYADDR sDevPAddr = {ui64PAddr}; -+ IMG_UINT32 sync; -+ -+ eError = RGXFwRawHeapAllocMap(psDeviceNode, ui32DriverID, sDevPAddr, ui64Size); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", e0); -+ -+ /* Invalidate MMU cache in preparation for a kick from this Guest */ -+ eError = psDeviceNode->pfnMMUCacheInvalidateKick(psDeviceNode, &sync); -+ PVR_LOG_GOTO_IF_ERROR(eError, "MMUCacheInvalidateKick", e0); -+ -+ /* Everything is ready for the firmware to start interacting with this OS */ -+ eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32DriverID, RGXFWIF_OS_ONLINE); -+ } -+e0: -+#endif /* defined(SUPPORT_RGX) */ -+ PvzServerLockRelease(); -+ -+ return eError; -+#endif -+} -+ -+PVRSRV_ERROR -+PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DevID) -+{ -+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) -+ /* -+ * Reject hypercall if called on a system configured at built time to -+ * preallocate the Guest's firmware heaps from static carveout memory. -+ */ -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: Host PVZ config: Does not match with Guest PVZ config\n" -+ " Host preallocates the Guest's FW physheap from static memory carveouts at startup.\n", __func__)); -+ return PVRSRV_ERROR_INVALID_PVZ_CONFIG; -+#else -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID); -+ -+ PvzServerLockAcquire(); -+ -+#if defined(SUPPORT_RGX) -+ if (IsVmOnline(ui32DriverID, ui32DevID)) -+ { -+ PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstance(ui32DevID); -+ -+ /* Order firmware to offload this OS' data and stop accepting commands from it */ -+ eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32DriverID, RGXFWIF_OS_OFFLINE); -+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXFWSetFwOsState", e0); -+ -+ /* it is now safe to remove the Guest's memory mappings */ -+ RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID); -+ } -+e0: -+#endif -+ -+ PvzServerLockRelease(); -+ -+ return eError; -+#endif -+} -+ -+/* -+ * ============================================================ -+ * The following server para-virtualization (pvz) functions -+ * are exclusively called by the VM manager (hypervisor) to -+ * pass side band information to the host (vm manager -> host) -+ * ============================================================ -+ */ -+ -+PVRSRV_ERROR -+PvzServerOnVmOnline(IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DevID) -+{ -+ PVRSRV_ERROR eError; -+ -+ VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID); -+ PvzServerLockAcquire(); -+ eError = PvzOnVmOnline(ui32DriverID, ui32DevID); -+ PvzServerLockRelease(); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PvzServerOnVmOffline(IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DevID) -+{ -+ PVRSRV_ERROR eError; -+ -+ VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID); -+ PvzServerLockAcquire(); -+ eError = PvzOnVmOffline(ui32DriverID, ui32DevID); -+ PvzServerLockRelease(); -+ -+ return eError; -+} -+ -+PVRSRV_ERROR -+PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, -+ IMG_UINT32 ui32ParamValue, -+ IMG_UINT32 ui32DevID) -+{ -+ PVRSRV_ERROR eError; -+ -+ VALIDATE_DRVID_DEVID(RGXFW_GUEST_DRIVER_ID_START, ui32DevID); -+ PvzServerLockAcquire(); -+ eError = PvzVMMConfigure(eVMMParamType, ui32ParamValue, ui32DevID); -+ PvzServerLockRelease(); -+ -+ return eError; -+} -+ -+/****************************************************************************** -+ End of file (vmm_pvz_server.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/vmm_pvz_server.h b/drivers/gpu/drm/img-rogue/vmm_pvz_server.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/vmm_pvz_server.h -@@ -0,0 +1,121 @@ -+/*************************************************************************/ /*! -+@File vmm_pvz_server.h -+@Title VM manager para-virtualization interface helper routines -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Header provides API(s) available to VM manager, this must be -+ called to close the loop during guest para-virtualization calls. -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef VMM_PVZ_SERVER_H -+#define VMM_PVZ_SERVER_H -+ -+#include "vmm_impl.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+ -+/*! -+******************************************************************************* -+ @Function PvzServerMapDevPhysHeap -+ @Description The VM manager calls this in response to guest PVZ interface -+ call pfnMapDevPhysHeap. -+ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR -+PvzServerMapDevPhysHeap(IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DevID, -+ IMG_UINT64 ui64Size, -+ IMG_UINT64 ui64PAddr); -+ -+/*! -+******************************************************************************* -+ @Function PvzServerUnmapDevPhysHeap -+ @Description The VM manager calls this in response to guest PVZ interface -+ call pfnUnmapDevPhysHeap. -+ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR -+PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DevID); -+ -+/*! -+******************************************************************************* -+ @Function PvzServerOnVmOnline -+ @Description The VM manager calls this when guest VM machine comes online. -+ The host driver will initialize the FW if it has not done so -+ already. -+ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR -+PvzServerOnVmOnline(IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DevID); -+ -+/*! -+******************************************************************************* -+ @Function PvzServerOnVmOffline -+ @Description The VM manager calls this when a guest VM machine is about to -+ go offline. The VM manager might have unmapped the GPU kick -+ register for such VM but not the GPU memory until the call -+ returns. Once the function returns, the FW does not hold any -+ reference for such VM and no workloads from it are running in -+ the GPU and it is safe to remove the memory for such VM. -+ @Return PVRSRV_OK on success. PVRSRV_ERROR_TIMEOUT if for some reason -+ the FW is taking too long to clean-up the resources of the -+ DriverID. Otherwise, a PVRSRV_ERROR code. -+******************************************************************************/ -+PVRSRV_ERROR -+PvzServerOnVmOffline(IMG_UINT32 ui32DriverID, -+ IMG_UINT32 ui32DevID); -+ -+/*! -+******************************************************************************* -+ @Function PvzServerVMMConfigure -+ @Description The VM manager calls this to configure several parameters like -+ HCS or isolation. -+ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR -+PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, -+ IMG_UINT32 ui32ParamValue, -+ IMG_UINT32 ui32DevID); -+ -+#endif /* VMM_PVZ_SERVER_H */ -+ -+/****************************************************************************** -+ End of file (vmm_pvz_server.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/vmm_type_stub.c b/drivers/gpu/drm/img-rogue/vmm_type_stub.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/vmm_type_stub.c -@@ -0,0 +1,112 @@ -+/*************************************************************************/ /*! -+@File vmm_type_stub.c -+@Title Stub VM manager type -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description Sample stub (no-operation) VM manager implementation -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include "pvrsrv.h" -+#include "img_types.h" -+#include "img_defs.h" -+#include "pvrsrv_error.h" -+#include "rgxheapconfig.h" -+ -+#include "vmm_impl.h" -+#include "vmm_pvz_server.h" -+ -+static PVRSRV_ERROR -+StubVMMMapDevPhysHeap(IMG_UINT64 ui64Size, -+ IMG_UINT64 ui64Addr) -+{ -+ PVR_UNREFERENCED_PARAMETER(ui64Size); -+ PVR_UNREFERENCED_PARAMETER(ui64Addr); -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+} -+ -+static PVRSRV_ERROR -+StubVMMUnmapDevPhysHeap(void) -+{ -+ return PVRSRV_ERROR_NOT_IMPLEMENTED; -+} -+ -+static VMM_PVZ_CONNECTION gsStubVmmPvz = -+{ -+ .sClientFuncTab = { -+ /* pfnMapDevPhysHeap */ -+ &StubVMMMapDevPhysHeap, -+ -+ /* pfnUnmapDevPhysHeap */ -+ &StubVMMUnmapDevPhysHeap -+ }, -+ -+ .sServerFuncTab = { -+ /* pfnMapDevPhysHeap */ -+ &PvzServerMapDevPhysHeap, -+ -+ /* pfnUnmapDevPhysHeap */ -+ &PvzServerUnmapDevPhysHeap -+ }, -+ -+ .sVmmFuncTab = { -+ /* pfnOnVmOnline */ -+ &PvzServerOnVmOnline, -+ -+ /* pfnOnVmOffline */ -+ &PvzServerOnVmOffline, -+ -+ /* pfnVMMConfigure */ -+ &PvzServerVMMConfigure -+ } -+}; -+ -+PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection) -+{ -+ PVR_LOG_RETURN_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS); -+ *psPvzConnection = &gsStubVmmPvz; -+ PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support")); -+ return PVRSRV_OK; -+} -+ -+void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection) -+{ -+ PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection"); -+} -+ -+/****************************************************************************** -+ End of file (vmm_type_stub.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/vz_vm.h b/drivers/gpu/drm/img-rogue/vz_vm.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/vz_vm.h -@@ -0,0 +1,63 @@ -+/*************************************************************************/ /*! -+@File vz_vm.h -+@Title System virtualization VM support APIs -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This header provides VM management support APIs -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef VZ_VM_H -+#define VZ_VM_H -+ -+#include "vmm_impl.h" -+ -+bool IsVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID); -+ -+PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID); -+ -+PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID); -+ -+PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, -+ IMG_UINT32 ui32ParamValue, -+ IMG_UINT32 ui32DevID); -+ -+#endif /* VZ_VM_H */ -+ -+/***************************************************************************** -+ End of file (vz_vm.h) -+*****************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/vz_vmm_pvz.c b/drivers/gpu/drm/img-rogue/vz_vmm_pvz.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/vz_vmm_pvz.c -@@ -0,0 +1,196 @@ -+/*************************************************************************/ /*! -+@File vz_vmm_pvz.c -+@Title VM manager para-virtualization APIs -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description VM manager para-virtualization management -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#include "pvrsrv.h" -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv_error.h" -+#include "allocmem.h" -+#include "pvrsrv.h" -+#include "vz_vmm_pvz.h" -+ -+#if (RGX_NUM_DRIVERS_SUPPORTED > 1) -+static PVRSRV_ERROR -+PvzConnectionValidate(void) -+{ -+ VMM_PVZ_CONNECTION *psVmmPvz; -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ -+ /* -+ * Acquire the underlying VM manager PVZ connection & validate it. -+ */ -+ psVmmPvz = PvzConnectionAcquire(); -+ if (psVmmPvz == NULL) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: %s PVZ config: Unable to acquire PVZ connection", -+ __func__, PVRSRV_VZ_MODE_IS(GUEST) ? "Guest" : "Host")); -+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG; -+ goto e0; -+ } -+ -+ /* Log which PVZ setup type is being used by driver */ -+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) -+ /* -+ * Static PVZ bootstrap setup -+ * -+ * This setup uses carve-out memory, has no hypercall mechanism & does not support -+ * out-of-order initialisation of host/guest VMs/drivers. The host driver has all -+ * the information needed to initialize all Drivers firmware state when it's loaded -+ * and its PVZ layer must mark all guest Drivers as being online as part of its PVZ -+ * initialisation. Having no out-of-order initialisation support, the guest driver -+ * can only submit a workload to the device after the host driver has completely -+ * initialized the firmware, the VZ hypervisor/VM setup must guarantee this. -+ */ -+ PVR_LOG(("Using static PVZ bootstrap setup")); -+#else -+ /* -+ * Dynamic PVZ bootstrap setup -+ * -+ * This setup uses guest memory, has PVZ hypercall mechanism & supports out-of-order -+ * initialisation of host/guest VMs/drivers. The host driver initializes only its -+ * own Driver-0 firmware state when its loaded and each guest driver will use its PVZ -+ * interface to hypercall to the host driver to both synchronise its initialisation -+ * so it does not submit any workload to the firmware before the host driver has -+ * had a chance to initialize the firmware and to also initialize its own Driver-x -+ * firmware state. -+ */ -+ PVR_LOG(("Using dynamic PVZ bootstrap setup")); -+ -+ if (!PVRSRV_VZ_MODE_IS(GUEST) && -+ (psVmmPvz->sServerFuncTab.pfnMapDevPhysHeap == NULL || -+ psVmmPvz->sServerFuncTab.pfnUnmapDevPhysHeap == NULL)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, "%s: Host PVZ config: Functions for mapping a Guest's heaps not implemented\n", __func__)); -+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG; -+ } -+#endif -+ -+ PvzConnectionRelease(psVmmPvz); -+e0: -+ return eError; -+} -+#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */ -+ -+PVRSRV_ERROR PvzConnectionInit(void) -+{ -+ PVRSRV_ERROR eError; -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+#if (RGX_NUM_DRIVERS_SUPPORTED == 1) -+#if !defined(PVRSRV_NEED_PVR_DPF) -+ PVR_UNREFERENCED_PARAMETER(psPVRSRVData); -+# endif -+ PVR_DPF((PVR_DBG_ERROR, "This kernel driver does not support virtualization. Please rebuild with RGX_NUM_DRIVERS_SUPPORTED > 1")); -+ PVR_DPF((PVR_DBG_ERROR, "Halting initialisation, cannot transition to %s mode", -+ psPVRSRVData->eDriverMode == DRIVER_MODE_HOST ? "host" : "guest")); -+ eError = PVRSRV_ERROR_NOT_SUPPORTED; -+ goto e0; -+#else -+ -+ if ((psPVRSRVData->hPvzConnection != NULL) && -+ (psPVRSRVData->hPvzConnectionLock != NULL)) -+ { -+ eError = PVRSRV_OK; -+ PVR_DPF((PVR_DBG_MESSAGE, "PVzConnection already initialised.")); -+ goto e0; -+ } -+ -+ /* Create para-virtualization connection lock */ -+ eError = OSLockCreate(&psPVRSRVData->hPvzConnectionLock); -+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); -+ -+ /* Create VM manager para-virtualization connection */ -+ eError = VMMCreatePvzConnection((VMM_PVZ_CONNECTION **)&psPVRSRVData->hPvzConnection); -+ if (eError != PVRSRV_OK) -+ { -+ OSLockDestroy(psPVRSRVData->hPvzConnectionLock); -+ psPVRSRVData->hPvzConnectionLock = NULL; -+ -+ PVR_LOG_ERROR(eError, "VMMCreatePvzConnection"); -+ goto e0; -+ } -+ -+ /* Ensure pvz connection is configured correctly */ -+ eError = PvzConnectionValidate(); -+ PVR_LOG_RETURN_IF_ERROR(eError, "PvzConnectionValidate"); -+#endif -+e0: -+ return eError; -+} -+ -+void PvzConnectionDeInit(void) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ -+ if ((psPVRSRVData->hPvzConnection == NULL) && -+ (psPVRSRVData->hPvzConnectionLock == NULL)) -+ { -+ PVR_DPF((PVR_DBG_MESSAGE, "PVzConnection already deinitialised.")); -+ return; -+ } -+ -+ VMMDestroyPvzConnection(psPVRSRVData->hPvzConnection); -+ psPVRSRVData->hPvzConnection = NULL; -+ -+ OSLockDestroy(psPVRSRVData->hPvzConnectionLock); -+ psPVRSRVData->hPvzConnectionLock = NULL; -+} -+ -+VMM_PVZ_CONNECTION* PvzConnectionAcquire(void) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ PVR_ASSERT(psPVRSRVData->hPvzConnection != NULL); -+ return psPVRSRVData->hPvzConnection; -+} -+ -+void PvzConnectionRelease(VMM_PVZ_CONNECTION *psParaVz) -+{ -+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); -+ /* Nothing to do, just validate the pointer we're passed back */ -+ PVR_ASSERT(psParaVz == psPVRSRVData->hPvzConnection); -+} -+ -+/****************************************************************************** -+ End of file (vz_vmm_pvz.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/vz_vmm_pvz.h b/drivers/gpu/drm/img-rogue/vz_vmm_pvz.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/vz_vmm_pvz.h -@@ -0,0 +1,79 @@ -+/*************************************************************************/ /*! -+@File vz_vmm_pvz.h -+@Title System virtualization VM manager management APIs -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description This header provides VM manager para-virtz management APIs -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+ -+#ifndef VZ_VMM_PVZ_H -+#define VZ_VMM_PVZ_H -+ -+#include "img_types.h" -+#include "vmm_impl.h" -+ -+/*! -+******************************************************************************* -+ @Function PvzConnectionInit() and PvzConnectionDeInit() -+ @Description PvzConnectionInit initializes the VM manager para-virt -+ which is used subsequently for communication between guest and -+ host; depending on the underlying VM setup, this could either -+ be a hyper-call or cross-VM call -+ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code -+******************************************************************************/ -+PVRSRV_ERROR PvzConnectionInit(void); -+void PvzConnectionDeInit(void); -+ -+/*! -+******************************************************************************* -+ @Function PvzConnectionAcquire() and PvzConnectionRelease() -+ @Description These are to acquire/release a handle to the VM manager -+ para-virtz connection to make a pvz call; on the client, use it -+ to make the actual pvz call and on the server handler / -+ VM manager, use it to complete the processing for the pvz call -+ or make a VM manager to host pvzbridge call -+@Return VMM_PVZ_CONNECTION* on success. Otherwise NULL -+******************************************************************************/ -+VMM_PVZ_CONNECTION* PvzConnectionAcquire(void); -+void PvzConnectionRelease(VMM_PVZ_CONNECTION *psPvzConnection); -+ -+#endif /* VZ_VMM_PVZ_H */ -+ -+/****************************************************************************** -+ End of file (vz_vmm_pvz.h) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/img-rogue/vz_vmm_vm.c b/drivers/gpu/drm/img-rogue/vz_vmm_vm.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/img-rogue/vz_vmm_vm.c -@@ -0,0 +1,294 @@ -+/*************************************************************************/ /*! -+@File vz_vmm_vm.c -+@Title System virtualization VM support APIs -+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved -+@Description System virtualization VM support functions -+@License Dual MIT/GPLv2 -+ -+The contents of this file are subject to the MIT license as set out below. -+ -+Permission is hereby granted, free of charge, to any person obtaining a copy -+of this software and associated documentation files (the "Software"), to deal -+in the Software without restriction, including without limitation the rights -+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -+copies of the Software, and to permit persons to whom the Software is -+furnished to do so, subject to the following conditions: -+ -+The above copyright notice and this permission notice shall be included in -+all copies or substantial portions of the Software. -+ -+Alternatively, the contents of this file may be used under the terms of -+the GNU General Public License Version 2 ("GPL") in which case the provisions -+of GPL are applicable instead of those above. -+ -+If you wish to allow use of your version of this file only under the terms of -+GPL, and not to allow others to use your version of this file under the terms -+of the MIT license, indicate your decision by deleting the provisions above -+and replace them with the notice and other provisions required by GPL as set -+out in the file called "GPL-COPYING" included in this distribution. If you do -+not delete the provisions above, a recipient may use your version of this file -+under the terms of either the MIT license or GPL. -+ -+This License is also included in this distribution in the file called -+"MIT-COPYING". -+ -+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS -+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR -+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -+*/ /**************************************************************************/ -+#include "osfunc.h" -+#include "pvrsrv.h" -+#include "img_defs.h" -+#include "img_types.h" -+#include "pvrsrv.h" -+#include "pvrsrv_error.h" -+#include "vz_vm.h" -+#include "rgxfwutils.h" -+#include "rgxfwdbg.h" -+ -+bool IsVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID) -+{ -+ PVRSRV_DEVICE_NODE *psDevNode = PVRSRVGetDeviceInstance(ui32DevID); -+ -+ if (psDevNode == NULL) -+ { -+ return false; -+ } -+ else -+ { -+ return BIT_ISSET(psDevNode->ui32VmState, ui32DriverID); -+ } -+} -+ -+PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID) -+{ -+#if !defined(RGX_NUM_DRIVERS_SUPPORTED) || (RGX_NUM_DRIVERS_SUPPORTED == 1) -+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; -+#else -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_DEVICE_NODE *psDevNode; -+ -+ psDevNode = PVRSRVGetDeviceInstance(ui32DevID); -+ -+ if (psDevNode == NULL) -+ { -+ eError = PVRSRV_ERROR_NO_DEVICENODE_FOUND; -+ goto e0; -+ } -+ else if (BIT_ISSET(psDevNode->ui32VmState, ui32DriverID)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: DriverID %u on Device %u is already enabled.", -+ __func__, ui32DriverID, ui32DevID)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e0; -+ } -+ -+ if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_CREATED) -+ { -+ /* Firmware not initialized yet, do it here */ -+ eError = PVRSRVCommonDeviceInitialise(psDevNode); -+ if (eError != PVRSRV_OK) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: failed to initialize firmware (%s)", -+ __func__, PVRSRVGetErrorString(eError))); -+ goto e0; -+ } -+ } -+ -+ eError = RGXFWHealthCheckCmd(psDevNode->pvDevice); -+ if (eError != PVRSRV_OK) -+ { -+ goto e0; -+ } -+ -+ BIT_SET(psDevNode->ui32VmState, ui32DriverID); -+ -+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) -+ /* Everything is ready for the firmware to start interacting with this OS */ -+ eError = RGXFWSetFwOsState(psDevNode->pvDevice, ui32DriverID, RGXFWIF_OS_ONLINE); -+#endif -+ -+e0: -+#endif -+ return eError; -+} -+ -+PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID) -+{ -+#if !defined(RGX_NUM_DRIVERS_SUPPORTED) || (RGX_NUM_DRIVERS_SUPPORTED == 1) -+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; -+#else -+ PVRSRV_ERROR eError = PVRSRV_OK; -+ PVRSRV_DEVICE_NODE *psDevNode; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ -+ psDevNode = PVRSRVGetDeviceInstance(ui32DevID); -+ -+ if (psDevNode == NULL) -+ { -+ eError = PVRSRV_ERROR_NO_DEVICENODE_FOUND; -+ goto e0; -+ } -+ else if (!BIT_ISSET(psDevNode->ui32VmState, ui32DriverID)) -+ { -+ PVR_DPF((PVR_DBG_ERROR, -+ "%s: DriverID %u on Device %u is already disabled.", -+ __func__, ui32DriverID, ui32DevID)); -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ goto e0; -+ } -+ -+ psDevInfo = psDevNode->pvDevice; -+ if (psDevInfo == NULL) -+ { -+ eError = PVRSRV_ERROR_INVALID_DEVINFO; -+ goto e0; -+ } -+ -+ eError = RGXFWSetFwOsState(psDevInfo, ui32DriverID, RGXFWIF_OS_OFFLINE); -+ if (eError == PVRSRV_OK) -+ { -+ BIT_UNSET(psDevNode->ui32VmState, ui32DriverID); -+ } -+ -+e0: -+#endif -+ return eError; -+} -+ -+PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, -+ IMG_UINT32 ui32ParamValue, -+ IMG_UINT32 ui32DevID) -+{ -+#if defined(SUPPORT_RGX) -+ PVRSRV_DEVICE_NODE *psDevNode; -+ PVRSRV_RGXDEV_INFO *psDevInfo; -+ PVRSRV_ERROR eError; -+ -+ psDevNode = PVRSRVGetDeviceInstance(ui32DevID); -+ if (psDevNode == NULL) -+ { -+ eError = PVRSRV_ERROR_NO_DEVICENODE_FOUND; -+ goto e0; -+ } -+ -+ psDevInfo = psDevNode->pvDevice; -+ if (psDevInfo == NULL) -+ { -+ eError = PVRSRV_ERROR_INVALID_DEVINFO; -+ goto e0; -+ } -+ -+ switch (eVMMParamType) -+ { -+ case VMM_CONF_PRIO_DRV0: -+ case VMM_CONF_PRIO_DRV1: -+ case VMM_CONF_PRIO_DRV2: -+ case VMM_CONF_PRIO_DRV3: -+ case VMM_CONF_PRIO_DRV4: -+ case VMM_CONF_PRIO_DRV5: -+ case VMM_CONF_PRIO_DRV6: -+ case VMM_CONF_PRIO_DRV7: -+ { -+ IMG_UINT32 ui32DriverID = eVMMParamType; -+ IMG_UINT32 ui32Prio = ui32ParamValue; -+ -+ if (ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED) -+ { -+ eError = PVRSRVRGXFWDebugSetDriverPriorityKM(NULL, psDevNode, ui32DriverID, ui32Prio); -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ break; -+ } -+ case VMM_CONF_HCS_DEADLINE: -+ { -+ IMG_UINT32 ui32HCSDeadline = ui32ParamValue; -+ eError = RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadline); -+ break; -+ } -+ case VMM_CONF_ISOLATION_GROUP_DRV0: -+ case VMM_CONF_ISOLATION_GROUP_DRV1: -+ case VMM_CONF_ISOLATION_GROUP_DRV2: -+ case VMM_CONF_ISOLATION_GROUP_DRV3: -+ case VMM_CONF_ISOLATION_GROUP_DRV4: -+ case VMM_CONF_ISOLATION_GROUP_DRV5: -+ case VMM_CONF_ISOLATION_GROUP_DRV6: -+ case VMM_CONF_ISOLATION_GROUP_DRV7: -+ { -+ IMG_UINT32 ui32DriverID = eVMMParamType - VMM_CONF_ISOLATION_GROUP_DRV0; -+ IMG_UINT32 ui32IsolationGroup = ui32ParamValue; -+ -+ if (ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED) -+ { -+ eError = PVRSRVRGXFWDebugSetDriverIsolationGroupKM(NULL, psDevNode, ui32DriverID, ui32IsolationGroup); -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ break; -+ } -+ case VMM_CONF_TIME_SLICE_DRV0: -+ case VMM_CONF_TIME_SLICE_DRV1: -+ case VMM_CONF_TIME_SLICE_DRV2: -+ case VMM_CONF_TIME_SLICE_DRV3: -+ case VMM_CONF_TIME_SLICE_DRV4: -+ case VMM_CONF_TIME_SLICE_DRV5: -+ case VMM_CONF_TIME_SLICE_DRV6: -+ case VMM_CONF_TIME_SLICE_DRV7: -+ { -+ IMG_UINT32 ui32DriverID = eVMMParamType - VMM_CONF_TIME_SLICE_DRV0; -+ IMG_UINT32 ui32TimeSlice = ui32ParamValue; -+ -+ if (ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED) -+ { -+ eError = PVRSRVRGXFWDebugSetDriverTimeSliceKM(NULL, psDevNode, ui32DriverID, ui32TimeSlice); -+ } -+ else -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ break; -+ } -+ case VMM_CONF_TIME_SLICE_INTERVAL: -+ { -+ IMG_UINT32 ui32TimeSliceInterval = ui32ParamValue; -+ -+ eError = PVRSRVRGXFWDebugSetDriverTimeSliceIntervalKM(NULL, psDevNode, ui32TimeSliceInterval); -+ break; -+ } -+ case VMM_CONF_VZ_CONNECTION_COOLDOWN_PERIOD: -+ { -+ IMG_UINT32 ui32VzConnectionCooldownPeriodInSec = ui32ParamValue; -+ eError = RGXFWSetVzConnectionCooldownPeriod(psDevInfo, ui32VzConnectionCooldownPeriodInSec); -+ break; -+ } -+ default: -+ { -+ eError = PVRSRV_ERROR_INVALID_PARAMS; -+ } -+ } -+ -+e0: -+ return eError; -+#else -+ PVR_UNREFERENCED_PARAMETER(eVMMParamType); -+ PVR_UNREFERENCED_PARAMETER(ui32ParamValue); -+ PVR_UNREFERENCED_PARAMETER(ui32DevID); -+ -+ return PVRSRV_ERROR_INVALID_PARAMS; -+#endif -+} -+ -+/****************************************************************************** -+ End of file (vz_vmm_vm.c) -+******************************************************************************/ -diff --git a/drivers/gpu/drm/spacemit/Kconfig b/drivers/gpu/drm/spacemit/Kconfig -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/spacemit/Kconfig -@@ -0,0 +1,36 @@ -+# SPDX-License-Identifier: GPL-2.0 -+ -+config DRM_SPACEMIT -+ tristate "DRM Support for Spacemit" -+ select DRM -+ select DRM_KMS_HELPER -+ select DRM_GEM_CMA_HELPER -+ select DRM_KMS_CMA_HELPER -+ select DRM_MIPI_DSI -+ select DRM_PANEL -+ select VIDEOMODE_HELPERS -+ select BACKLIGHT_CLASS_DEVICE -+ select GKI_FIX_WORKAROUND if DRM_SPACEMIT=m -+ default n -+ help -+ Choose this option if you have a Spacemit soc chipsets. -+ This driver provides Spacemit kernel mode -+ setting and buffer management. If M is selected the module will be called spacemit-drm. -+ -+config SPACEMIT_MIPI_PANEL -+ tristate "MIPI Panel Support For Spacemit" -+ depends on DRM_SPACEMIT -+ -+config DRM_LT8911EXB -+ tristate "Lontium LT8911EXB DSI to eDP" -+ default y -+ depends on DRM_SPACEMIT -+ select DRM_KMS_HELPER -+ select REGMAP_I2C -+ select DRM_PANEL -+ select DRM_MIPI_DSI -+ select AUXILIARY_BUS -+ select DRM_DP_AUX_BUS -+ -+ help -+ Support for Lontium LT8911EXB DSI to eDP driver. -\ No newline at end of file -diff --git a/drivers/gpu/drm/spacemit/Makefile b/drivers/gpu/drm/spacemit/Makefile -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/spacemit/Makefile -@@ -0,0 +1,32 @@ -+# SPDX-License-Identifier: GPL-2.0 -+ -+spacemit-drm-y := spacemit_drm.o \ -+ spacemit_cmdlist.o \ -+ spacemit_dpu.o \ -+ spacemit_planes.o \ -+ spacemit_hdmi.o \ -+ spacemit_dsi.o \ -+ spacemit_wb.o \ -+ spacemit_dphy.o \ -+ spacemit_lib.o \ -+ spacemit_gem.o \ -+ spacemit_dmmu.o \ -+ spacemit_bootloader.o \ -+ sysfs/sysfs_class.o \ -+ sysfs/sysfs_dpu.o \ -+ sysfs/sysfs_dsi.o \ -+ sysfs/sysfs_dphy.o \ -+ sysfs/sysfs_mipi_panel.o \ -+ dpu/dpu_debug.o \ -+ dpu/dpu_saturn.o \ -+ dpu/saturn_fbcmem.o \ -+ dsi/spacemit_dsi_drv.o \ -+ dsi/spacemit_dptc_drv.o \ -+ dphy/spacemit_dphy_drv.o -+ -+ -+obj-$(CONFIG_DRM_SPACEMIT) += spacemit-drm.o -+ -+obj-$(CONFIG_DRM_LT8911EXB) += lt8911exb.o -+ -+obj-$(CONFIG_SPACEMIT_MIPI_PANEL) += spacemit_mipi_panel.o -diff --git a/drivers/gpu/drm/spacemit/dphy/spacemit_dphy_drv.c b/drivers/gpu/drm/spacemit/dphy/spacemit_dphy_drv.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/spacemit/dphy/spacemit_dphy_drv.c -@@ -0,0 +1,481 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2023 Spacemit Co., Ltd. -+ * -+ */ -+ -+#include -+#include -+#include -+ -+#include "../dsi/spacemit_dsi_hw.h" -+#include "../spacemit_dphy.h" -+#include "../dsi/spacemit_dptc_drv.h" -+ -+static unsigned int spacemit_dphy_lane[5] = {0, 0x1, 0x3, 0x7, 0xf}; -+ -+static void dphy_ana_reset(void __iomem *base_addr) -+{ -+ dsi_clear_bits(base_addr, DSI_PHY_ANA_PWR_CTRL, CFG_DPHY_ANA_RESET); -+ udelay(5); -+ dsi_set_bits(base_addr, DSI_PHY_ANA_PWR_CTRL, CFG_DPHY_ANA_RESET); -+} -+ -+static void dphy_set_power(void __iomem *base_addr, bool poweron) -+{ -+ if(poweron) { -+ dsi_set_bits(base_addr, DSI_PHY_ANA_PWR_CTRL, CFG_DPHY_ANA_RESET); -+ dsi_set_bits(base_addr, DSI_PHY_ANA_PWR_CTRL, CFG_DPHY_ANA_PU); -+ } else { -+ dsi_clear_bits(base_addr, DSI_PHY_ANA_PWR_CTRL, CFG_DPHY_ANA_PU); -+ dsi_clear_bits(base_addr, DSI_PHY_ANA_PWR_CTRL, CFG_DPHY_ANA_RESET); -+ } -+} -+ -+static void dphy_set_cont_clk(void __iomem *base_addr, bool cont_clk) -+{ -+#ifdef DPTC_DPHY_TEST -+ uint32_t tmp; -+ -+ if(cont_clk) { -+ tmp = dptc_dsi_read(0x04); -+ tmp |= CFG_DPHY_CONT_CLK; -+ //dptc_dsi_write(0x04, tmp); -+ } else { -+ tmp = dptc_dsi_read(0x04); -+ tmp &= (~CFG_DPHY_CONT_CLK); -+ //dptc_dsi_write(0x04, tmp); -+ } -+ dptc_dsi_write(0x04, 0x30001); -+#else -+ if(cont_clk) -+ dsi_set_bits(base_addr, DSI_PHY_CTRL_1, CFG_DPHY_CONT_CLK); -+ else -+ dsi_clear_bits(base_addr, DSI_PHY_CTRL_1, CFG_DPHY_CONT_CLK); -+ -+ dsi_set_bits(base_addr, DSI_PHY_CTRL_1, CFG_DPHY_ADD_VALID); -+ dsi_set_bits(base_addr, DSI_PHY_CTRL_1, CFG_DPHY_VDD_VALID); -+#endif -+} -+ -+static void dphy_set_lane_num(void __iomem *base_addr, uint32_t lane_num) -+{ -+#ifdef DPTC_DPHY_TEST -+ uint32_t tmp; -+ -+ tmp = dptc_dsi_read(0x08); -+ tmp &= ~CFG_DPHY_LANE_EN_MASK; -+ tmp |= spacemit_dphy_lane[lane_num] << CFG_DPHY_LANE_EN_SHIFT; -+ dptc_dsi_write(0x08, 0); -+ tmp = dptc_dsi_read(0x08); -+ dptc_dsi_write(0x08, 0x30); -+#endif -+ dsi_write_bits(base_addr, DSI_PHY_CTRL_2, -+ CFG_DPHY_LANE_EN_MASK, spacemit_dphy_lane[lane_num] << CFG_DPHY_LANE_EN_SHIFT); -+} -+ -+static void dphy_set_bit_clk_src(void __iomem *base_addr, uint32_t bit_clk_src, -+ uint32_t half_pll5) -+{ -+#ifdef DPTC_DPHY_TEST -+ uint32_t tmp; -+#endif -+ -+ if(bit_clk_src >= DPHY_BIT_CLK_SRC_MAX) { -+ pr_err("%s: Invalid bit clk src (%d)\n", __func__, bit_clk_src); -+ return; -+ } -+ -+#ifdef DPTC_DPHY_TEST -+ //if(bit_clk_src == DPHY_BIT_CLK_SRC_MUX) { -+ if(0) { -+ tmp = dptc_dsi_read(0x68); -+ tmp |= CFG_CLK_SEL; -+ dptc_dsi_write(0x68,tmp); -+ } else { -+ tmp = dptc_dsi_read(0x68); -+ tmp &= ~CFG_CLK_SEL; -+ dptc_dsi_write(0x68,tmp); -+ } -+ -+ //if(1 == half_pll5) { -+ if(0) { -+ tmp = dptc_dsi_read(0x68); -+ tmp |= CFG_CLK_DIV2; -+ dptc_dsi_write(0x68,tmp); -+ } else { -+ tmp = dptc_dsi_read(0x68); -+ tmp &= ~CFG_CLK_DIV2; -+ dptc_dsi_write(0x68,tmp); -+ } -+#else -+#if 0 -+ if(bit_clk_src == DPHY_BIT_CLK_SRC_MUX) -+ dsi_set_bits(base_addr, DSI_PHY_ANA_CTRL1, CFG_CLK_SEL); -+ else -+ dsi_clear_bits(base_addr, DSI_PHY_ANA_CTRL1, CFG_CLK_SEL); -+ -+ if(1 == half_pll5) -+ dsi_set_bits(base_addr, DSI_PHY_ANA_CTRL1, CFG_CLK_DIV2); -+ else -+ dsi_clear_bits(base_addr, DSI_PHY_ANA_CTRL1, CFG_CLK_DIV2); -+#else -+ /* -+ dsi_set_bits(base_addr, DSI_PHY_ANA_CTRL1, CFG_CLK_SEL); -+ dsi_clear_bits(base_addr, DSI_PHY_ANA_CTRL1, CFG_CLK_DIV2); -+ */ -+#endif -+#endif -+} -+ -+static void dphy_set_timing(struct spacemit_dphy_ctx *dphy_ctx) -+{ -+ uint32_t bitclk, lpx_clk, lpx_time, ta_get, ta_go; -+ int ui, wakeup, reg; -+ int hs_prep, hs_zero, hs_trail, hs_exit, ck_zero, ck_trail, ck_exit; -+ int esc_clk, esc_clk_t; -+ struct spacemit_dphy_timing *phy_timing; -+ uint32_t value; -+ -+ if(NULL == dphy_ctx) { -+ pr_err("%s: Invalid param!\n", __func__); -+ return; -+ } -+ -+ phy_timing = &(dphy_ctx->dphy_timing); -+ -+ DRM_DEBUG("%s() phy_freq %d esc_clk %d \n", __func__, dphy_ctx->phy_freq, dphy_ctx->esc_clk); -+ -+ esc_clk = dphy_ctx->esc_clk/1000; -+ esc_clk_t = 1000/esc_clk; -+ -+ bitclk = dphy_ctx->phy_freq / 1000; -+ ui = 1000/bitclk + 1; -+ -+ lpx_clk = (phy_timing->lpx_constant + phy_timing->lpx_ui * ui) / esc_clk_t + 1; -+ lpx_time = lpx_clk * esc_clk_t; -+ -+ /* Below is for NT35451 */ -+ ta_get = lpx_time * 5 / esc_clk_t - 1; -+ ta_go = lpx_time * 4 / esc_clk_t - 1; -+ -+ wakeup = phy_timing->wakeup_constant; -+ wakeup = wakeup / esc_clk_t + 1; -+ -+ hs_prep = phy_timing->hs_prep_constant + phy_timing->hs_prep_ui * ui; -+ hs_prep = hs_prep / esc_clk_t + 1; -+ -+ /* Our hardware added 3-byte clk automatically. -+ * 3-byte 3 * 8 * ui. -+ */ -+ hs_zero = phy_timing->hs_zero_constant + phy_timing->hs_zero_ui * ui - -+ (hs_prep + 1) * esc_clk_t; -+ hs_zero = (hs_zero - (3 * ui << 3)) / esc_clk_t + 4; -+ if (hs_zero < 0) -+ hs_zero = 0; -+ -+ hs_trail = phy_timing->hs_trail_constant + phy_timing->hs_trail_ui * ui; -+ hs_trail = ((8 * ui) >= hs_trail) ? (8 * ui) : hs_trail; -+ hs_trail = hs_trail / esc_clk_t + 1; -+ if (hs_trail > 3) -+ hs_trail -= 3; -+ else -+ hs_trail = 0; -+ -+ hs_exit = phy_timing->hs_exit_constant + phy_timing->hs_exit_ui * ui; -+ hs_exit = hs_exit / esc_clk_t + 1; -+ -+ ck_zero = phy_timing->ck_zero_constant + phy_timing->ck_zero_ui * ui - -+ (hs_prep + 1) * esc_clk_t; -+ ck_zero = ck_zero / esc_clk_t + 1; -+ -+ ck_trail = phy_timing->ck_trail_constant + phy_timing->ck_trail_ui * ui; -+ ck_trail = ck_trail / esc_clk_t + 1; -+ -+ ck_exit = hs_exit; -+ -+ reg = (hs_exit << CFG_DPHY_TIME_HS_EXIT_SHIFT) -+ | (hs_trail << CFG_DPHY_TIME_HS_TRAIL_SHIFT) -+ | (hs_zero << CFG_DPHY_TIME_HS_ZERO_SHIFT) -+ | (hs_prep << CFG_DPHY_TIME_HS_PREP_SHIFT); -+ -+ DRM_DEBUG("%s dphy time0 hs_exit %d hs_trail %d hs_zero %d hs_prep %d reg 0x%x\n", __func__, hs_exit, hs_trail, hs_zero, hs_prep, reg); -+#ifdef DPTC_DPHY_TEST -+ dptc_dsi_write(0x40 , 0x01000000); -+#else -+ dsi_write(dphy_ctx->base_addr, DSI_PHY_TIME_0, reg); -+ // dsi_write(dphy_ctx->base_addr, DSI_PHY_TIME_0, 0x06010603); -+#endif -+ -+ reg = (ta_get << CFG_DPHY_TIME_TA_GET_SHIFT) -+ | (ta_go << CFG_DPHY_TIME_TA_GO_SHIFT) -+ | (wakeup << CFG_DPHY_TIME_WAKEUP_SHIFT); -+ -+ DRM_INFO("%s dphy time1 ta_get %d ta_go %d wakeup %d reg 0x%x\n", __func__, ta_get, ta_go, wakeup, reg); -+#ifdef DPTC_DPHY_TEST -+ dptc_dsi_write(0x44, 0x0403001F); -+#else -+ dsi_write(dphy_ctx->base_addr, DSI_PHY_TIME_1, reg); -+ // dsi_write(dphy_ctx->base_addr, DSI_PHY_TIME_1, 0x130fcd98); -+#endif -+ reg = (ck_exit << CFG_DPHY_TIME_CLK_EXIT_SHIFT) -+ | (ck_trail << CFG_DPHY_TIME_CLK_TRAIL_SHIFT) -+ | (ck_zero << CFG_DPHY_TIME_CLK_ZERO_SHIFT) -+ | (lpx_clk << CFG_DPHY_TIME_CLK_LPX_SHIFT); -+ -+ DRM_INFO("%s dphy time2 ck_exit %d ck_trail %d ck_zero %d lpx_clk %d reg 0x%x\n", __func__, ck_exit, ck_trail, ck_zero, lpx_clk, reg); -+#ifdef DPTC_DPHY_TEST -+ dptc_dsi_write(0x48, 0x02010500); -+#else -+ dsi_write(dphy_ctx->base_addr, DSI_PHY_TIME_2, reg); -+ // dsi_write(dphy_ctx->base_addr, DSI_PHY_TIME_2, 0x06040c04); -+#endif -+ -+ reg = (lpx_clk << CFG_DPHY_TIME_LPX_SHIFT) -+ | phy_timing->req_ready << CFG_DPHY_TIME_REQRDY_SHIFT; -+ -+ DRM_INFO("%s dphy time3 lpx_clk %d req_ready %d reg 0x%x\n", __func__, lpx_clk, phy_timing->req_ready, reg); -+#ifdef DPTC_DPHY_TEST -+ dptc_dsi_write(0x4c, 0x001F); -+#else -+ dsi_write(dphy_ctx->base_addr, DSI_PHY_TIME_3, reg); -+ // dsi_write(dphy_ctx->base_addr, DSI_PHY_TIME_3, 0x43c); -+#endif -+ /* calculated timing on brownstone: -+ * DSI_PHY_TIME_0 0x06080204 -+ * DSI_PHY_TIME_1 0x6d2bfff0 -+ * DSI_PHY_TIME_2 0x603130a -+ * DSI_PHY_TIME_3 0xa3c -+ */ -+ -+ value = dsi_read(dphy_ctx->base_addr, DSI_PHY_TIME_0); -+ DRM_DEBUG("%s() DSI_PHY_TIME_0 offset 0x%x value 0x%x\n", __func__, DSI_PHY_TIME_0, value); -+ value = dsi_read(dphy_ctx->base_addr, DSI_PHY_TIME_1); -+ DRM_DEBUG("%s() DSI_PHY_TIME_1 offset 0x%x value 0x%x\n", __func__, DSI_PHY_TIME_1, value); -+ value = dsi_read(dphy_ctx->base_addr, DSI_PHY_TIME_2); -+ DRM_DEBUG("%s() DSI_PHY_TIME_2 offset 0x%x value 0x%x\n", __func__, DSI_PHY_TIME_2, value); -+ value = dsi_read(dphy_ctx->base_addr, DSI_PHY_TIME_3); -+ DRM_DEBUG("%s() DSI_PHY_TIME_3 offset 0x%x value 0x%x\n", __func__, DSI_PHY_TIME_3, value); -+} -+ -+static void dphy_get_setting(struct spacemit_dphy_ctx *dphy_ctx, struct device_node *np) -+{ -+ struct spacemit_dphy_timing *dphy_timing = &dphy_ctx->dphy_timing; -+ int ret; -+ -+ if(NULL == dphy_timing){ -+ pr_err("%s: Invalid param\n",__func__); -+ return; -+ } -+ -+ ret = of_property_read_u32(np, "hs_prep_constant", &dphy_timing->hs_prep_constant); -+ if(0 != ret) -+ dphy_timing->hs_prep_constant = HS_PREP_CONSTANT_DEFAULT; -+ -+ ret = of_property_read_u32(np, "hs_prep_ui", &dphy_timing->hs_prep_ui); -+ if(0 != ret) -+ dphy_timing->hs_prep_ui = HS_PREP_UI_DEFAULT; -+ -+ ret = of_property_read_u32(np, "hs_zero_constant", &dphy_timing->hs_zero_constant); -+ if(0 != ret) -+ dphy_timing->hs_zero_constant = HS_ZERO_CONSTANT_DEFAULT; -+ -+ ret = of_property_read_u32(np, "hs_zero_ui", &dphy_timing->hs_zero_ui); -+ if(0 != ret) -+ dphy_timing->hs_zero_ui = HS_ZERO_UI_DEFAULT; -+ -+ ret = of_property_read_u32(np, "hs_trail_constant", &dphy_timing->hs_trail_constant); -+ if(0 != ret) -+ dphy_timing->hs_trail_constant = HS_TRAIL_CONSTANT_DEFAULT; -+ -+ ret = of_property_read_u32(np, "hs_trail_ui", &dphy_timing->hs_trail_ui); -+ if(0 != ret) -+ dphy_timing->hs_trail_ui = HS_TRAIL_UI_DEFAULT; -+ -+ ret = of_property_read_u32(np, "hs_exit_constant", &dphy_timing->hs_exit_constant); -+ if(0 != ret) -+ dphy_timing->hs_exit_constant = HS_EXIT_CONSTANT_DEFAULT; -+ -+ ret = of_property_read_u32(np, "hs_exit_ui", &dphy_timing->hs_exit_ui); -+ if(0 != ret) -+ dphy_timing->hs_exit_ui = HS_EXIT_UI_DEFAULT; -+ -+ ret = of_property_read_u32(np, "ck_zero_constant", &dphy_timing->ck_zero_constant); -+ if(0 != ret) -+ dphy_timing->ck_zero_constant = CK_ZERO_CONSTANT_DEFAULT; -+ -+ ret = of_property_read_u32(np, "ck_zero_ui", &dphy_timing->ck_zero_ui); -+ if(0 != ret) -+ dphy_timing->ck_zero_ui = CK_ZERO_UI_DEFAULT; -+ -+ ret = of_property_read_u32(np, "ck_trail_constant", &dphy_timing->ck_trail_constant); -+ if(0 != ret) -+ dphy_timing->ck_trail_constant = CK_TRAIL_CONSTANT_DEFAULT; -+ -+ ret = of_property_read_u32(np, "ck_zero_ui", &dphy_timing->ck_zero_ui); -+ if(0 != ret) -+ dphy_timing->ck_zero_ui = CK_TRAIL_UI_DEFAULT; -+ -+ ret = of_property_read_u32(np, "req_ready", &dphy_timing->req_ready); -+ if(0 != ret) -+ dphy_timing->req_ready = REQ_READY_DEFAULT; -+ -+ ret = of_property_read_u32(np, "wakeup_constant", &dphy_timing->wakeup_constant); -+ if(0 != ret) -+ dphy_timing->wakeup_constant = WAKEUP_CONSTANT_DEFAULT; -+ -+ ret = of_property_read_u32(np, "wakeup_ui", &dphy_timing->wakeup_ui); -+ if(0 != ret) -+ dphy_timing->wakeup_ui = WAKEUP_UI_DEFAULT; -+ -+ ret = of_property_read_u32(np, "lpx_constant", &dphy_timing->lpx_constant); -+ if(0 != ret) -+ dphy_timing->lpx_constant = LPX_CONSTANT_DEFAULT; -+ -+ ret = of_property_read_u32(np, "lpx_ui", &dphy_timing->lpx_ui); -+ if(0 != ret) -+ dphy_timing->lpx_ui = LPX_UI_DEFAULT; -+} -+ -+ -+void spacemit_dphy_core_get_status(struct spacemit_dphy_ctx *dphy_ctx) -+{ -+ pr_debug("%s\n", __func__); -+ -+ if(NULL == dphy_ctx){ -+ pr_err("%s: Invalid param\n", __func__); -+ return; -+ } -+ -+ dphy_ctx->dphy_status0 = dsi_read(dphy_ctx->base_addr, DSI_PHY_STATUS_0); -+ dphy_ctx->dphy_status1 = dsi_read(dphy_ctx->base_addr, DSI_PHY_STATUS_1); -+ dphy_ctx->dphy_status2 = dsi_read(dphy_ctx->base_addr, DSI_PHY_STATUS_2); -+ pr_debug("%s: dphy_status0 = 0x%x\n", __func__, dphy_ctx->dphy_status0); -+ pr_debug("%s: dphy_status1 = 0x%x\n", __func__, dphy_ctx->dphy_status1); -+ pr_debug("%s: dphy_status2 = 0x%x\n", __func__, dphy_ctx->dphy_status2); -+} -+ -+void spacemit_dphy_core_reset(struct spacemit_dphy_ctx *dphy_ctx) -+{ -+ pr_debug("%s\n", __func__); -+ -+ if(NULL == dphy_ctx){ -+ pr_err("%s: Invalid param\n", __func__); -+ return; -+ } -+ -+ dphy_ana_reset(dphy_ctx->base_addr); -+} -+ -+/** -+ * spacemit_dphy_core_init - int spacemit dphy -+ * -+ * @dphy_ctx: pointer to the spacemit_dphy_ctx -+ * -+ * This function will be called by the dsi driver in order to init the dphy -+ * This function will do phy power on, enable continous clk, set dphy timing -+ * and set lane number. -+ * -+ * This function has no return value. -+ * -+ */ -+void spacemit_dphy_core_init(struct spacemit_dphy_ctx *dphy_ctx) -+{ -+ pr_debug("%s\n", __func__); -+ -+ if(NULL == dphy_ctx){ -+ pr_err("%s: Invalid param\n", __func__); -+ return; -+ } -+ -+ if(DPHY_STATUS_UNINIT != dphy_ctx->status){ -+ pr_warn("%s: dphy_ctx has been initialized (%d)\n", -+ __func__, dphy_ctx->status); -+ return; -+ } -+ -+ /*use DPHY_BIT_CLK_SRC_MUX as default clk src*/ -+ dphy_set_bit_clk_src(dphy_ctx->base_addr, dphy_ctx->clk_src, dphy_ctx->half_pll5); -+ -+ /* digital and analog power on */ -+ dphy_set_power(dphy_ctx->base_addr, true); -+ -+ /* turn on DSI continuous clock for HS */ -+ dphy_set_cont_clk(dphy_ctx->base_addr, true); -+ -+ /* set dphy */ -+ dphy_set_timing(dphy_ctx); -+ -+ /* enable data lanes */ -+ dphy_set_lane_num(dphy_ctx->base_addr, dphy_ctx->lane_num); -+ -+ dphy_ctx->status = DPHY_STATUS_INIT; -+} -+ -+/** -+ * spacemit_dphy_core_uninit - unint spacemit dphy -+ * -+ * @dphy_ctx: pointer to the spacemit_dphy_ctx -+ * -+ * This function will be called by the dsi driver in order to unint the dphy -+ * This function will disable continous clk, reset dphy, power down dphy -+ * -+ * This function has no return value. -+ * -+ */ -+void spacemit_dphy_core_uninit(struct spacemit_dphy_ctx *dphy_ctx) -+{ -+ pr_debug("%s\n", __func__); -+ -+ if(NULL == dphy_ctx){ -+ pr_err("%s: Invalid param\n", __func__); -+ return; -+ } -+ -+ if(DPHY_STATUS_INIT != dphy_ctx->status){ -+ pr_warn("%s: dphy_ctx has not been initialized (%d)\n", -+ __func__, dphy_ctx->status); -+ return; -+ } -+ -+ dphy_set_cont_clk(dphy_ctx->base_addr, false); -+ dphy_ana_reset(dphy_ctx->base_addr); -+ dphy_set_power(dphy_ctx->base_addr, false); -+ -+ dphy_ctx->status = DPHY_STATUS_UNINIT; -+} -+ -+int spacemit_dphy_core_parse_dt(struct spacemit_dphy_ctx *dphy_ctx, struct device_node *np) -+{ -+ if (!dphy_ctx) { -+ pr_err("%s: Param is NULL\n",__func__); -+ return -1; -+ } -+ -+ dphy_get_setting(dphy_ctx, np); -+ -+ return 0; -+} -+ -+ -+static struct dphy_core_ops dphy_core_ops = { -+ .parse_dt = spacemit_dphy_core_parse_dt, -+ .init = spacemit_dphy_core_init, -+ .uninit = spacemit_dphy_core_uninit, -+ .reset = spacemit_dphy_core_reset, -+ .get_status = spacemit_dphy_core_get_status, -+}; -+ -+static struct ops_entry entry = { -+ .ver = "spacemit-dphy", -+ .ops = &dphy_core_ops, -+}; -+ -+static int __init dphy_core_register(void) -+{ -+ return dphy_core_ops_register(&entry); -+} -+ -+subsys_initcall(dphy_core_register); -+ -+MODULE_LICENSE("GPL v2"); -diff --git a/drivers/gpu/drm/spacemit/dpu/dpu_debug.c b/drivers/gpu/drm/spacemit/dpu/dpu_debug.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/spacemit/dpu/dpu_debug.c -@@ -0,0 +1,321 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2023 Spacemit Co., Ltd. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include "dpu_debug.h" -+#include "dpu_trace.h" -+#include "./../spacemit_dpu_reg.h" -+#include "./../spacemit_drm.h" -+ -+dpu_reg_enum SATURN_LE_DPU_REG_ENUM_LISTS[] = { -+ E_DPU_TOP_REG, -+ E_DPU_CTRL_REG, -+ E_DPU_CTRL_REG, -+ E_DPU_CMDLIST_REG, -+ E_DPU_INT_REG, -+ E_DMA_TOP_CTRL_REG, -+ E_RDMA_LAYER0_REG, -+ E_RDMA_LAYER1_REG, -+ E_RDMA_LAYER2_REG, -+ E_RDMA_LAYER3_REG, -+ E_MMU_TBU0_REG, -+ E_MMU_TBU2_REG, -+ E_MMU_TBU4_REG, -+ E_MMU_TBU6_REG, -+ E_COMPOSER2_REG, -+ E_SCALER0_REG, -+ E_OUTCTRL2_REG -+}; -+ -+static dpu_reg_dump_t dpu_reg_dump_array[] = { -+ {E_DPU_TOP_REG, "DPU_TOP", DPU_TOP_BASE_ADDR, 218}, -+ -+ {E_DPU_CTRL_REG, "DPU_CTRL", DPU_CTRL_BASE_ADDR, 5}, -+ {E_DPU_CTRL_REG, "DPU_CTRL", DPU_CTRL_BASE_ADDR + 0x24, 8}, -+ {E_DPU_CTRL_REG, "DPU_CTRL", DPU_CTRL_BASE_ADDR + 0x54, 8}, -+ {E_DPU_CTRL_REG, "DPU_CTRL", DPU_CTRL_BASE_ADDR + 0x84, 19}, -+ {E_DPU_CTRL_REG, "DPU_CTRL", DPU_CTRL_BASE_ADDR + 0xe4, 8}, -+ {E_DPU_CTRL_REG, "DPU_CTRL", DPU_CTRL_BASE_ADDR + 0x114, 25}, -+ -+ {E_DPU_CRG_REG, "DPU_CRG", DPU_CRG_BASE_ADDR, 5}, -+ -+ {E_DPU_CMDLIST_REG, "DPU_CMDLIST", CMDLIST_BASE_ADDR, 44}, -+ -+ {E_DPU_INT_REG, "DPU_INT", DPU_INT_BASE_ADDR, 40}, -+ -+ {E_DMA_TOP_CTRL_REG, "DMA_TOP_CTRL", DMA_TOP_BASE_ADDR, 25}, -+ -+ {E_RDMA_LAYER0_REG, "RDMA_LAYER0", RDMA0_BASE_ADDR, 31}, -+ {E_RDMA_LAYER0_REG, "RDMA_LAYER0", RDMA0_BASE_ADDR + 0x80, 57}, -+ -+ {E_RDMA_LAYER1_REG, "RDMA_LAYER1", RDMA1_BASE_ADDR, 31}, -+ {E_RDMA_LAYER1_REG, "RDMA_LAYER1", RDMA1_BASE_ADDR + 0x80, 57}, -+ -+ {E_RDMA_LAYER2_REG, "RDMA_LAYER2", RDMA2_BASE_ADDR, 31}, -+ {E_RDMA_LAYER2_REG, "RDMA_LAYER2", RDMA2_BASE_ADDR + 0x80, 57}, -+ -+ {E_RDMA_LAYER3_REG, "RDMA_LAYER3" ,RDMA3_BASE_ADDR, 31}, -+ {E_RDMA_LAYER3_REG, "RDMA_LAYER3", RDMA3_BASE_ADDR + 0x80, 57}, -+ -+ {E_RDMA_LAYER4_REG, "RDMA_LAYER4", RDMA4_BASE_ADDR, 46}, -+ -+ {E_RDMA_LAYER5_REG, "RDMA_LAYER5", RDMA5_BASE_ADDR, 46}, -+ -+ {E_RDMA_LAYER6_REG, "RDMA_LAYER6", RDMA6_BASE_ADDR, 46}, -+ -+ {E_RDMA_LAYER7_REG, "RDMA_LAYER7", RDMA7_BASE_ADDR, 46}, -+ -+ {E_RDMA_LAYER8_REG, "RDMA_LAYER8", RDMA8_BASE_ADDR, 46}, -+ -+ {E_RDMA_LAYER9_REG, "RDMA_LAYER9", RDMA9_BASE_ADDR, 46}, -+ -+ {E_RDMA_LAYER10_REG, "RDMA_LAYER10", RDMA10_BASE_ADDR, 46}, -+ -+ {E_RDMA_LAYER11_REG, "RDMA_LAYER11", RDMA11_BASE_ADDR, 46}, -+ -+ {E_MMU_TBU0_REG, "MMU_TBU0", MMU_TBU_BASE_ADDR + 0 * MMU_TBU_SIZE, 13}, -+ {E_MMU_TBU1_REG, "MMU_TBU1", MMU_TBU_BASE_ADDR + 1 * MMU_TBU_SIZE, 13}, -+ {E_MMU_TBU2_REG, "MMU_TBU2", MMU_TBU_BASE_ADDR + 2 * MMU_TBU_SIZE, 13}, -+ {E_MMU_TBU3_REG, "MMU_TBU3", MMU_TBU_BASE_ADDR + 3 * MMU_TBU_SIZE, 13}, -+ {E_MMU_TBU4_REG, "MMU_TBU4", MMU_TBU_BASE_ADDR + 4 * MMU_TBU_SIZE, 13}, -+ {E_MMU_TBU5_REG, "MMU_TBU5", MMU_TBU_BASE_ADDR + 5 * MMU_TBU_SIZE, 13}, -+ {E_MMU_TBU6_REG, "MMU_TBU6", MMU_TBU_BASE_ADDR + 6 * MMU_TBU_SIZE, 13}, -+ {E_MMU_TBU7_REG, "MMU_TBU7", MMU_TBU_BASE_ADDR + 7 * MMU_TBU_SIZE, 13}, -+ {E_MMU_TBU8_REG, "MMU_TBU8", MMU_TBU_BASE_ADDR + 8 * MMU_TBU_SIZE, 13}, -+ {E_MMU_TOP_REG, "MMU_TOP", MMU_TOP_BASE_ADDR, 13}, -+ -+ {E_LP0_REG, "LP0", LP0_BASE_ADDR, 81}, -+ {E_LP1_REG, "LP1", LP1_BASE_ADDR, 81}, -+ {E_LP2_REG, "LP2", LP2_BASE_ADDR, 81}, -+ {E_LP3_REG, "LP3", LP3_BASE_ADDR, 81}, -+ {E_LP4_REG, "LP4", LP4_BASE_ADDR, 81}, -+ {E_LP5_REG, "LP5", LP5_BASE_ADDR, 81}, -+ {E_LP6_REG, "LP6", LP6_BASE_ADDR, 81}, -+ {E_LP7_REG, "LP7", LP7_BASE_ADDR, 81}, -+ {E_LP8_REG, "LP8", LP8_BASE_ADDR, 81}, -+ {E_LP9_REG, "LP9", LP9_BASE_ADDR, 81}, -+ {E_LP10_REG, "LP10", LP10_BASE_ADDR, 81}, -+ {E_LP11_REG, "LP11", LP11_BASE_ADDR, 81}, -+ -+ {E_LM0_REG, "LMERGE0", LMERGE0_BASE_ADDR, 4}, -+ {E_LM1_REG, "LMERGE1", LMERGE1_BASE_ADDR, 4}, -+ {E_LM2_REG, "LMERGE2", LMERGE2_BASE_ADDR, 4}, -+ {E_LM3_REG, "LMERGE3", LMERGE3_BASE_ADDR, 4}, -+ {E_LM4_REG, "LMERGE4", LMERGE4_BASE_ADDR, 4}, -+ {E_LM5_REG, "LMERGE5", LMERGE5_BASE_ADDR, 4}, -+ {E_LM6_REG, "LMERGE6", LMERGE6_BASE_ADDR, 4}, -+ {E_LM7_REG, "LMERGE7", LMERGE7_BASE_ADDR, 4}, -+ {E_LM8_REG, "LMERGE8", LMERGE8_BASE_ADDR, 4}, -+ {E_LM9_REG, "LMERGE9", LMERGE9_BASE_ADDR, 4}, -+ {E_LM10_REG, "LMERGE10", LMERGE10_BASE_ADDR, 4}, -+ {E_LM11_REG, "LMERGE11", LMERGE11_BASE_ADDR, 4}, -+ -+ {E_COMPOSER0_REG, "COMPOSER0", CMP0_BASE_ADDR, 146}, -+ {E_COMPOSER1_REG, "COMPOSER1", CMP1_BASE_ADDR, 146}, -+ {E_COMPOSER2_REG, "COMPOSER2", CMP2_BASE_ADDR, 146}, -+ {E_COMPOSER3_REG, "COMPOSER3", CMP3_BASE_ADDR, 146}, -+ -+ {E_SCALER0_REG, "SCALER0", SCALER0_ONLINE_BASE_ADDR, 121}, -+ {E_SCALER1_REG, "SCALER1", SCALER1_ONLINE_BASE_ADDR, 121}, -+ -+ {E_OUTCTRL0_REG, "OUTCTRL0", OUTCTRL0_BASE_ADDR, 55}, -+ {E_PP0_REG, "PP0", PP0_BASE_ADDR, 86}, -+ {E_OUTCTRL1_REG, "OUTCTRL1", OUTCTRL1_BASE_ADDR, 55}, -+ {E_PP1_REG, "PP1", PP1_BASE_ADDR, 86}, -+ {E_OUTCTRL2_REG, "OUTCTRL2", OUTCTRL2_BASE_ADDR, 55}, -+ {E_PP2_REG, "PP2", PP2_BASE_ADDR, 86}, -+ {E_OUTCTRL3_REG, "OUTCTRL3", OUTCTRL3_BASE_ADDR, 55}, -+ {E_PP3_REG, "PP3", PP3_BASE_ADDR, 86}, -+ -+ {E_WB_TOP_0_REG, "WB_TOP_0", WB0_TOP_BASE_ADDR, 54}, -+ {E_WB_TOP_1_REG, "WB_TOP_1", WB1_TOP_BASE_ADDR, 54}, -+}; -+ -+static void dump_dpu_regs_by_enum(void __iomem *io_base, phys_addr_t phy_base, dpu_reg_enum reg_enum, u8 trace_dump) -+{ -+ int i; -+ int j; -+ uint32_t reg_num; -+ void __iomem *io_addr; -+ phys_addr_t phy_addr; -+ -+ dpu_reg_dump_t *tmp = &dpu_reg_dump_array[0]; -+ -+ for (i = 0; i < ARRAY_SIZE(dpu_reg_dump_array); i++) { -+ if (tmp->index == reg_enum) { -+ reg_num = tmp->dump_reg_num; -+ io_addr = io_base + tmp->module_offset; -+ phy_addr = phy_base + tmp->module_offset; -+ if (trace_dump) { -+ trace_dpu_reg_info(tmp->module_name, phy_addr, reg_num); -+ for (j = 0; j < reg_num; j++) { -+ trace_dpu_reg_dump(phy_addr + j * 4, readl(io_addr + j * 4)); -+ } -+ } else { -+ printk(KERN_DEBUG "%d-%s, address:0x%08llx, num:%d\n", tmp->index, tmp->module_name, phy_addr, reg_num); -+ for (j = 0; j < reg_num; j++) { -+ printk(KERN_DEBUG "0x%08llx: 0x%08x\n", (phy_addr + j * 4), readl(io_addr + j * 4)); -+ } -+ } -+ } -+ tmp++; -+ } -+} -+ -+bool dpu_reg_enum_valid(dpu_reg_enum reg_enum) -+{ -+ int size = ARRAY_SIZE(SATURN_LE_DPU_REG_ENUM_LISTS); -+ int i = 0; -+ -+ for (i = 0; i < size; i++) { -+ if (reg_enum == SATURN_LE_DPU_REG_ENUM_LISTS[i]) -+ return true; -+ } -+ -+ return false; -+} -+ -+void dump_dpu_regs(struct spacemit_dpu *dpu, dpu_reg_enum reg_enum, u8 trace_dump) -+{ -+ dpu_reg_enum tmp = E_DPU_TOP_REG; -+ struct spacemit_drm_private *priv = dpu->crtc.dev->dev_private; -+ struct spacemit_hw_device *hwdev = priv->hwdev; -+ void __iomem* reg_io_base = hwdev->base; -+ phys_addr_t reg_phy_base = hwdev->phy_addr; -+ -+ if (reg_enum > E_DPU_DUMP_ALL) { -+ pr_err("invalid dump regsiter enum\n"); -+ return; -+ } else if (reg_enum == E_DPU_DUMP_ALL) { -+ for (; tmp < E_DPU_DUMP_ALL; tmp++) { -+ if (dpu_reg_enum_valid(tmp)) -+ dump_dpu_regs_by_enum(reg_io_base, reg_phy_base, tmp, trace_dump); -+ } -+ } else { -+ dump_dpu_regs_by_enum(reg_io_base, reg_phy_base, reg_enum, trace_dump); -+ } -+} -+ -+static void dpu_debug_mode(struct spacemit_hw_device *hwdev, int pipeline_id, bool enable) -+{ -+ u32 base = DPU_CTRL_BASE_ADDR; -+ -+ switch (pipeline_id) { -+ case ONLINE0: -+ dpu_write_reg(hwdev, DPU_CTL_REG, base, ctl0_dbg_mod, enable ? 1 : 0); -+ break; -+ case ONLINE1: -+ dpu_write_reg(hwdev, DPU_CTL_REG, base, ctl1_dbg_mod, enable ? 1 : 0); -+ break; -+ case ONLINE2: -+ dpu_write_reg(hwdev, DPU_CTL_REG, base, ctl2_dbg_mod, enable ? 1 : 0); -+ break; -+ case OFFLINE0: -+ case OFFLINE1: -+ default: -+ DRM_ERROR("pipeline id is invalid!\n"); -+ break; -+ } -+} -+#if IS_ENABLED(CONFIG_GKI_FIX_WORKAROUND) -+static struct file *gki_filp_open(const char *filename, int flags, umode_t mode) -+{ -+ return 0; -+} -+static ssize_t gki_kernel_write(struct file *file, const void *buf, size_t count, -+ loff_t *pos) -+{ -+ return 0; -+} -+#endif -+#define DPU_BUFFER_DUMP_FILE "/mnt/dpu_buffer_dump" -+int dpu_buffer_dump(struct drm_plane *plane) { -+ unsigned int buffer_size = 0; -+ int i = 0; -+ void *mmu_tbl_vaddr = NULL; -+ phys_addr_t dpu_buffer_paddr = 0; -+ void __iomem *dpu_buffer_vaddr = NULL; -+ loff_t pos = 0; -+ static int dump_once = true; -+ struct file *filep = NULL; -+ struct spacemit_plane_state *spacemit_pstate = to_spacemit_plane_state(plane->state); -+ -+ if (!dump_once) -+ return 0; -+ -+ mmu_tbl_vaddr = spacemit_pstate->mmu_tbl.va; -+ buffer_size = plane->state->fb->obj[0]->size >> PAGE_SHIFT; -+ -+#if IS_ENABLED(CONFIG_GKI_FIX_WORKAROUND) -+ filep = gki_filp_open(DPU_BUFFER_DUMP_FILE, O_RDWR | O_APPEND | O_CREAT, 0644); -+#else -+ filep = filp_open(DPU_BUFFER_DUMP_FILE, O_RDWR | O_APPEND | O_CREAT, 0644); -+#endif -+ -+ if (IS_ERR(filep)) { -+ printk("Open file %s error\n", DPU_BUFFER_DUMP_FILE); -+ return -EINVAL; -+ } -+ for (i = 0; i < buffer_size; i++) { -+ dpu_buffer_paddr = *(volatile u32 __force *)mmu_tbl_vaddr; -+ dpu_buffer_paddr = dpu_buffer_paddr << PAGE_SHIFT; -+ if (dpu_buffer_paddr >= 0x80000000UL) { -+ dpu_buffer_paddr += 0x80000000UL; -+ } -+ dpu_buffer_vaddr = phys_to_virt((unsigned long)dpu_buffer_paddr); -+ mmu_tbl_vaddr += 4; -+#if IS_ENABLED(CONFIG_GKI_FIX_WORKAROUND) -+ gki_kernel_write(filep, (void *)dpu_buffer_vaddr, PAGE_SIZE, &pos); -+#else -+ kernel_write(filep, (void *)dpu_buffer_vaddr, PAGE_SIZE, &pos); -+#endif -+ } -+ -+ filp_close(filep, NULL); -+ filep = NULL; -+ -+ dump_once = false; -+ -+ return 0; -+} -+ -+void dpu_dump_reg(struct spacemit_dpu *dpu) -+{ -+ struct spacemit_drm_private *priv = dpu->crtc.dev->dev_private; -+ struct spacemit_hw_device *hwdev = priv->hwdev; -+ -+ if (!dpu->enable_dump_reg) -+ return; -+ -+ dpu_debug_mode(hwdev, ONLINE2, 1); -+ dump_dpu_regs(dpu, E_DPU_DUMP_ALL, 1); -+ dpu_debug_mode(hwdev, ONLINE2, 0); -+} -+ -+void dpu_dump_fps(struct spacemit_dpu *dpu) -+{ -+ struct timespec64 cur_tm, tmp_tm; -+ -+ if (!dpu->enable_dump_fps) -+ return; -+ -+ ktime_get_real_ts64(&cur_tm); -+ tmp_tm = timespec64_sub(cur_tm, dpu->last_tm); -+ dpu->last_tm.tv_sec = cur_tm.tv_sec; -+ dpu->last_tm.tv_nsec = cur_tm.tv_nsec; -+ if (tmp_tm.tv_sec == 0) -+ trace_printk("fps: %ld\n", 1000000000 / (tmp_tm.tv_nsec / 1000)); -+} -+ -+void dpu_underrun_wq_stop_trace(struct work_struct *work) -+{ -+// #ifndef MODULE -+// trace_set_clr_event("dpu", NULL, false); -+// #endif -+} -diff --git a/drivers/gpu/drm/spacemit/dpu/dpu_debug.h b/drivers/gpu/drm/spacemit/dpu/dpu_debug.h -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/spacemit/dpu/dpu_debug.h -@@ -0,0 +1,108 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2023 Spacemit Co., Ltd. -+ * -+ */ -+ -+#ifndef _DPU_DEBUG_H_ -+#define _DPU_DEBUG_H_ -+ -+#include -+#include "saturn_regs/reg_map.h" -+#include "./../spacemit_dpu.h" -+ -+typedef enum { -+ E_DPU_TOP_REG = 0, -+ E_DPU_CTRL_REG, -+ E_DPU_CRG_REG, -+ E_DPU_CMDLIST_REG, -+ E_DPU_INT_REG, -+ -+ E_DMA_TOP_CTRL_REG, -+ E_RDMA_LAYER0_REG, -+ E_RDMA_LAYER1_REG, -+ E_RDMA_LAYER2_REG, -+ E_RDMA_LAYER3_REG, -+ E_RDMA_LAYER4_REG, -+ E_RDMA_LAYER5_REG, -+ E_RDMA_LAYER6_REG, -+ E_RDMA_LAYER7_REG, -+ E_RDMA_LAYER8_REG, -+ E_RDMA_LAYER9_REG, -+ E_RDMA_LAYER10_REG, -+ E_RDMA_LAYER11_REG, -+ -+ E_MMU_TBU0_REG, -+ E_MMU_TBU1_REG, -+ E_MMU_TBU2_REG, -+ E_MMU_TBU3_REG, -+ E_MMU_TBU4_REG, -+ E_MMU_TBU5_REG, -+ E_MMU_TBU6_REG, -+ E_MMU_TBU7_REG, -+ E_MMU_TBU8_REG, -+ E_MMU_TOP_REG, -+ -+ E_LP0_REG, -+ E_LP1_REG, -+ E_LP2_REG, -+ E_LP3_REG, -+ E_LP4_REG, -+ E_LP5_REG, -+ E_LP6_REG, -+ E_LP7_REG, -+ E_LP8_REG, -+ E_LP9_REG, -+ E_LP10_REG, -+ E_LP11_REG, -+ -+ E_LM0_REG, -+ E_LM1_REG, -+ E_LM2_REG, -+ E_LM3_REG, -+ E_LM4_REG, -+ E_LM5_REG, -+ E_LM6_REG, -+ E_LM7_REG, -+ E_LM8_REG, -+ E_LM9_REG, -+ E_LM10_REG, -+ E_LM11_REG, -+ -+ E_COMPOSER0_REG, -+ E_COMPOSER1_REG, -+ E_COMPOSER2_REG, -+ E_COMPOSER3_REG, -+ -+ E_SCALER0_REG, -+ E_SCALER1_REG, -+ -+ E_OUTCTRL0_REG, -+ E_PP0_REG, -+ E_OUTCTRL1_REG, -+ E_PP1_REG, -+ E_OUTCTRL2_REG, -+ E_PP2_REG, -+ E_OUTCTRL3_REG, -+ E_PP3_REG, -+ -+ E_WB_TOP_0_REG, -+ E_WB_TOP_1_REG, -+ -+ E_DPU_DUMP_ALL -+}dpu_reg_enum; -+ -+typedef struct dpu_reg_dump { -+ dpu_reg_enum index; -+ u8* module_name; -+ uint32_t module_offset; -+ uint32_t dump_reg_num; -+}dpu_reg_dump_t; -+ -+void dump_dpu_regs(struct spacemit_dpu *dpu, dpu_reg_enum reg_enum, u8 trace_dump); -+void dpu_dump_reg(struct spacemit_dpu *dpu); -+void dpu_dump_fps(struct spacemit_dpu *dpu); -+int dpu_buffer_dump(struct drm_plane *plane); -+void dpu_underrun_wq_stop_trace(struct work_struct *work); -+ -+#endif -diff --git a/drivers/gpu/drm/spacemit/dpu/dpu_saturn.c b/drivers/gpu/drm/spacemit/dpu/dpu_saturn.c -new file mode 100644 -index 000000000000..111111111111 ---- /dev/null -+++ b/drivers/gpu/drm/spacemit/dpu/dpu_saturn.c -@@ -0,0 +1,1859 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2023 Spacemit Co., Ltd. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "dpu_saturn.h" -+#include "saturn_fbcmem.h" -+#include "../spacemit_cmdlist.h" -+#include "../spacemit_dmmu.h" -+#include "../spacemit_dpu_reg.h" -+#include "../spacemit_drm.h" -+#include "../spacemit_wb.h" -+#include